commit | cc8ff82a4d1fde9950bd0081bfcb2670331f3ad4 | [log] [tgz] |
---|---|---|
author | Googler <noreply@google.com> | Wed Sep 14 14:41:45 2022 +0800 |
committer | Googler <noreply@google.com> | Thu Jan 05 16:27:27 2023 -0800 |
tree | dabf0a5fb8b3cc6a988ad501033755b430aa23b0 |
Project import generated by Copybara. GitOrigin-RevId: df5bf87eca1b9de24d3afd9ee49b125f95dc1351
diff --git a/Makefile b/Makefile new file mode 100755 index 0000000..7481eee --- /dev/null +++ b/Makefile
@@ -0,0 +1,51 @@ +mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST))) +MEDIA_MODULE_PATH := $(dir $(mkfile_path)) +VERSION_CONTROL_CFLAGS := $(shell ${MEDIA_MODULE_PATH}/version_control.sh) + +CONFIGS := CONFIG_AMLOGIC_MEDIA_VDEC_MPEG12=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_MPEG2_MULTI=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_MPEG4=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_MPEG4_MULTI=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_VC1=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_H264=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_H264_MULTI=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_H264_MVC=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_H265=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_VP9=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_MJPEG=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_MJPEG_MULTI=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_REAL=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_AVS=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_AVS_MULTI=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_AVS2=m \ + CONFIG_AMLOGIC_MEDIA_VENC_H264=m \ + CONFIG_AMLOGIC_MEDIA_VENC_H265=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_AV1=m \ + CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION=y \ + CONFIG_AMLOGIC_MEDIA_GE2D=y \ + CONFIG_AMLOGIC_MEDIA_VENC_MULTI=m \ + CONFIG_AMLOGIC_MEDIA_VENC_JPEG=m + +EXTRA_INCLUDE := -I$(KERNEL_SRC)/$(M)/drivers/include + +CONFIGS_BUILD := -Wno-parentheses-equality -Wno-pointer-bool-conversion \ + -Wno-unused-const-variable -Wno-typedef-redefinition \ + -Wno-logical-not-parentheses -Wno-sometimes-uninitialized \ + -Wno-frame-larger-than= + +KBUILD_CFLAGS_MODULE += $(GKI_EXT_MODULE_PREDEFINE) + +modules: + $(MAKE) -C $(KERNEL_SRC) M=$(M)/drivers modules "EXTRA_CFLAGS+=-I$(INCLUDE) -Wno-error $(CONFIGS_BUILD) $(EXTRA_INCLUDE) $(KBUILD_CFLAGS_MODULE) ${VERSION_CONTROL_CFLAGS}" $(CONFIGS) + +all: modules + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 M=$(M)/drivers -C $(KERNEL_SRC) modules_install + mkdir -p ${OUT_DIR}/../vendor_lib/modules + cd ${OUT_DIR}/$(M)/; find -name "*.ko" -exec cp {} ${OUT_DIR}/../vendor_lib/modules/ \; + mkdir -p ${OUT_DIR}/../vendor_lib/firmware/video + cp $(KERNEL_SRC)/$(M)/firmware/* ${OUT_DIR}/../vendor_lib/firmware/video/ + +clean: + $(MAKE) -C $(KERNEL_SRC) M=$(M) clean
diff --git a/Media.mk b/Media.mk new file mode 100644 index 0000000..58db63b --- /dev/null +++ b/Media.mk
@@ -0,0 +1,110 @@ +ifeq ($(KERNEL_A32_SUPPORT), true) +KERNEL_ARCH := arm +else +KERNEL_ARCH := arm64 +endif + +CONFIGS := CONFIG_AMLOGIC_MEDIA_VDEC_MPEG12=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_MPEG2_MULTI=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_MPEG4=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_MPEG4_MULTI=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_VC1=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_H264=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_H264_MULTI=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_H264_MVC=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_H265=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_VP9=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_MJPEG=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_MJPEG_MULTI=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_REAL=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_AVS=m \ + CONFIG_AMLOGIC_MEDIA_VDEC_AVS2=m \ + CONFIG_AMLOGIC_MEDIA_VENC_H264=m \ + CONFIG_AMLOGIC_MEDIA_VENC_H265=m \ + CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION=y \ + CONFIG_AMLOGIC_MEDIA_GE2D=y \ + CONFIG_AMLOGIC_MEDIA_VENC_MULTI=m \ + CONFIG_AMLOGIC_MEDIA_VENC_JPEG=m + +define copy-media-modules +$(foreach m, $(shell find $(strip $(1)) -name "*.ko"),\ + $(shell cp $(m) $(strip $(2)) -rfa)) +endef + +ifneq (,$(TOP)) +KDIR := $(shell pwd)/$(PRODUCT_OUT)/obj/KERNEL_OBJ/ + +MEDIA_DRIVERS := $(TOP)/hardware/amlogic/media_modules/drivers +ifeq (,$(wildcard $(MEDIA_DRIVERS))) +$(error No find the dir of drivers.) +endif + +INCLUDE := $(MEDIA_DRIVERS)/include +ifeq (,$(wildcard $(INCLUDE))) +$(error No find the dir of include.) +endif + +MEDIA_MODULES := $(shell pwd)/$(PRODUCT_OUT)/obj/media_modules +ifeq (,$(wildcard $(MEDIA_MODULES))) +$(shell mkdir $(MEDIA_MODULES) -p) +endif + +MODS_OUT := $(shell pwd)/$(PRODUCT_OUT)/obj/lib_vendor +ifeq (,$(wildcard $(MODS_OUT))) +$(shell mkdir $(MODS_OUT) -p) +endif + +UCODE_OUT := $(shell pwd)/$(PRODUCT_OUT)/$(TARGET_COPY_OUT_VENDOR)/lib/firmware/video +ifeq (,$(wildcard $(UCODE_OUT))) +$(shell mkdir $(UCODE_OUT) -p) +endif + +$(shell cp $(MEDIA_DRIVERS)/../firmware/* $(UCODE_OUT) -rfa) +$(shell cp $(MEDIA_DRIVERS)/* $(MEDIA_MODULES) -rfa) + +define media-modules + PATH=$(KERNEL_TOOLPATHS):$$PATH \ + $(MAKE) -C $(KDIR) M=$(MEDIA_MODULES) $(KERNEL_ARGS) $(CONFIGS) \ + "EXTRA_CFLAGS+=-I$(INCLUDE) -Wno-error" modules; \ + find $(MEDIA_MODULES) -name "*.ko" | PATH=$$(cd ./$(TARGET_HOST_TOOL_PATH); pwd):$$PATH xargs -i cp {} $(MODS_OUT) +endef + +else +KDIR := $(PWD)/kernel +ifeq (,$(wildcard $(KDIR))) +$(error No find the dir of kernel.) +endif + +MEDIA_DRIVERS := $(PWD)/media_modules/drivers +ifeq (,$(wildcard $(MEDIA_DRIVERS))) +$(error No find the dir of drivers.) +endif + +INCLUDE := $(MEDIA_DRIVERS)/include +ifeq (,$(wildcard $(INCLUDE))) +$(error No find the dir of include.) +endif + +MODS_OUT ?= $(MEDIA_DRIVERS)/../modules +ifeq (,$(wildcard $(MODS_OUT))) +$(shell mkdir $(MODS_OUT) -p) +endif + +modules: + CCACHE_NODIRECT="true" PATH=$(KERNEL_TOOLPATHS):$$PATH \ + $(MAKE) -C $(KDIR) M=$(MEDIA_DRIVERS) ARCH=$(KERNEL_ARCH) $(KERNEL_ARGS) $(CONFIGS) \ + EXTRA_CFLAGS+=-I$(INCLUDE) -j64 + +copy-modules: + @echo "start copying media modules." + mkdir -p $(MODS_OUT) + $(call copy-media-modules, $(MEDIA_DRIVERS), $(MODS_OUT)) + +all: modules copy-modules + + +clean: + PATH=$(KERNEL_TOOLPATHS):$$PATH \ + $(MAKE) -C $(KDIR) M=$(MEDIA_DRIVERS) $(KERNEL_ARGS) clean + +endif
diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..4b1504d --- /dev/null +++ b/VERSION
@@ -0,0 +1,76 @@ +Major_V=5 +Minor_V=3 +BaseChangeId=Ifc17e92ef9c9e211b8f01ebae1e97c5855fea084 + + +#history version +#V5.2.104-g795aee00.003025 Release Notes +#Release_ChangeId_V5_2=I34801cba60d54bfd9f459f7445d0362989c23496 +#V5.1.77-g0cdebf27.003009 Release Notes +#Release_ChangeId_V5_1=Id7e315bbf379d166ca4b335cef13c983a8ca5078 +#Feature develop total count: 6 +# critical feature develop count: 0 +# important feature develop count: 4 +# normal feature develop count: 2 +#Feature develop detail information: +# improve pip channel play +# support decoder fence +# fixed can't get hdr information correctly. +# add multi-vdec info for new format av1 after revert +# change ptsadjust and threshold from s64 to s32. +# support dw 0x100 +#Fixed BUG total count: 30 +# fixed critical BUG count: 3 +# fixed important feature develop count: 15 +# fixed normal feature develop count: 12 +#Fixed BUG detail information: +# add VIDTYPE_COMPRESS in vf for dw mode with afbc buffer +# fix some the crash caused by null pointer +# fixed vpp wrapper memory leak. +# metadata lose the last byte every frame in frame mode +# fix avs color abnormal. +# fix gst secure v4l2 decoder not work +# fix the playback stuck when resolution changed +# h265 ucode send 2 interrupts cause playback stuck +# fix av1 freeze when burn-in test. +# fixed playback stuck after seek. +# fix 8k display abnormal. +# fixed irq-vdec-0 takes more cpu slice. +# fixed AV1 seek freezing. +# fixed failed to allocate tvp memory +# fixed issue of reports resolution change. +#V5.0 Release Notes +#Release_ChangeId_V5_0=I6053e02900215d9006469c38ca375ace498b849f +#upgrade Kernel version to 5.4 +#Android R + v4l2dec (no vpp) xts clean +#v4ldec driver seperate from amports drivers +# +#V4.0.0 Release Notes +#upgrade Kernel version to 4.19 +#v4l2 support for h264/h265/vp9 +#add fra support in decoder driver +# +#V3.0.0 Release Notes +#upgrade Kernel version to 4.9 +#media_module remove from kernel +#new firmware management +#mjpeg/mpeg12/mpeg2 multi-instance decoder support +#h264 4k afbc support +#AVS2 decoder support +#vdec double write support +#add av1 decoder support +#add decoder QOS info report +#upgrade TA ucode to 0.2 version +# +#V2.0.0 Release Notes +#upgrade Kernel version to 3.14 +#Introduce codec_mm memory managment +#add afbc scatter memory support +#add vp9 decoder support +#add 264/265/vp9 multi-instance decoder support +# +#V1.0.0 Release Notes +#based kernel to 3.10 +#add H264 4K decoder support +#add H265 video decoder support +#H265 decoder support afbc output
diff --git a/drivers/Makefile b/drivers/Makefile new file mode 100644 index 0000000..e96ba44 --- /dev/null +++ b/drivers/Makefile
@@ -0,0 +1,8 @@ +obj-y += common/ +obj-y += frame_provider/ +obj-y += frame_sink/ +obj-y += stream_input/ +obj-y += amvdec_ports/ +obj-y += framerate_adapter/ +obj-y += media_sync/ +obj-$(CONFIG_AMLOGIC_MEDIA_V4L_DEC) += amvdec_ports/
diff --git a/drivers/amvdec_ports/Makefile b/drivers/amvdec_ports/Makefile new file mode 100644 index 0000000..2e7cff4 --- /dev/null +++ b/drivers/amvdec_ports/Makefile
@@ -0,0 +1,26 @@ +obj-m += amvdec_ports.o +amvdec_ports-objs += aml_vcodec_dec_drv.o +amvdec_ports-objs += aml_vcodec_dec.o +amvdec_ports-objs += aml_vcodec_util.o +amvdec_ports-objs += aml_vcodec_adapt.o +amvdec_ports-objs += aml_vcodec_vpp.o +amvdec_ports-objs += aml_vcodec_ge2d.o +amvdec_ports-objs += vdec_drv_if.o +amvdec_ports-objs += aml_task_chain.o +amvdec_ports-objs += decoder/vdec_h264_if.o +amvdec_ports-objs += decoder/vdec_hevc_if.o +amvdec_ports-objs += decoder/vdec_vp9_if.o +amvdec_ports-objs += decoder/vdec_mpeg12_if.o +amvdec_ports-objs += decoder/vdec_mpeg4_if.o +amvdec_ports-objs += decoder/vdec_mjpeg_if.o +amvdec_ports-objs += decoder/vdec_av1_if.o +ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +amvdec_ports-objs += decoder/aml_h264_parser.o +amvdec_ports-objs += decoder/aml_hevc_parser.o +amvdec_ports-objs += decoder/aml_vp9_parser.o +amvdec_ports-objs += decoder/aml_mpeg12_parser.o +amvdec_ports-objs += decoder/aml_mpeg4_parser.o +amvdec_ports-objs += decoder/aml_mjpeg_parser.o +amvdec_ports-objs += utils/golomb.o +endif +amvdec_ports-objs += utils/common.o
diff --git a/drivers/amvdec_ports/aml_task_chain.c b/drivers/amvdec_ports/aml_task_chain.c new file mode 100644 index 0000000..8dfe014 --- /dev/null +++ b/drivers/amvdec_ports/aml_task_chain.c
@@ -0,0 +1,365 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ + +#include <linux/list.h> +#include <linux/spinlock.h> + +#include "aml_vcodec_drv.h" +#include "aml_task_chain.h" + +#define KERNEL_ATRACE_TAG KERNEL_ATRACE_TAG_V4L2 +#include <trace/events/meson_atrace.h> + +struct task_item_name_s { + enum task_type_e type; + const u8 *name; +}; + +static const struct task_item_name_s iname[] = { + {TASK_TYPE_DEC, "dec"}, + {TASK_TYPE_VPP, "vpp"}, + {TASK_TYPE_V4L_SINK, "v4l-sink"}, + {TASK_TYPE_GE2D, "ge2d"}, + {TASK_TYPE_MAX, "unknown"}, +}; + +static const u8 *type_to_name(enum task_type_e type) +{ + const u8 *name = "unknown"; + int i, size = ARRAY_SIZE(iname); + + for (i = 0; i < size; i++) { + if (type == iname[i].type) + name = iname[i].name; + } + + return name; +} + +static struct task_item_s *find_task_item(struct task_chain_s *task, + enum task_type_e type) +{ + struct task_item_s *item = NULL; + ulong flags; + + spin_lock_irqsave(&task->slock, flags); + + if (!list_empty(&task->list_item)) { + struct task_item_s *p; + + list_for_each_entry(p, &task->list_item, node) { + if (p->ops->type == type) { + item = p; + break; + } + } + } + + if (item) + kref_get(&item->ref); + + spin_unlock_irqrestore(&task->slock, flags); + + return item; +} + +static void task_item_release(struct kref *kref); + +static void task_item_vframe_push(struct task_item_s *item, struct vframe_s *vframe) +{ + int i = 0; + + for (i = 0 ; i < 3; i++) { + if (item->vframe[i] == NULL) { + item->vframe[i] = vframe; + break; + } + } +} + +static struct vframe_s *task_item_vframe_pop(struct task_item_s *item) +{ + struct vframe_s *vframe = NULL; + int i = 0; + + for (i = 0 ; i < 3; i++) { + if (item->vframe[i] != NULL) { + vframe = item->vframe[i]; + item->vframe[i] = NULL; + break; + } + } + + return vframe; +} + +static struct task_item_s *task_item_get(struct task_chain_s *task, + enum task_type_e type) +{ + struct task_item_s *item = NULL; + + item = find_task_item(task, type); + if (!item) { + v4l_dbg(task->ctx, V4L_DEBUG_CODEC_ERROR, + "TSK(%px):%d get item:%d fail.\n", task, task->id, type); + } + + return item; +} + +static int task_item_put(struct task_item_s *item) +{ + return kref_put(&item->ref, task_item_release); +} + +static void task_buffer_submit(struct task_chain_s *task, + enum task_type_e type) +{ + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *)task->obj; + struct task_item_s *item = NULL; + struct task_item_s *item2 = NULL; + struct vframe_s *vf = NULL; + + item = task_item_get(task, type); + if (item) { + item->ops->get_vframe(item->caller, &vf); + fb->vframe = (void *)vf; + task_item_vframe_push(item, vf); + item->is_active = false; + + item2 = task_item_get(task, task->map[0][type]); + if (item2) { + item2->is_active = true; + item2->ops->fill_buffer(task->ctx, fb); + + v4l_dbg(task->ctx, V4L_DEBUG_TASK_CHAIN, + "TSK(%px):%d, vf:%px, phy:%lx, submit %d => %d.\n", + task, task->id, vf, fb->m.mem[0].addr, + type, task->map[0][type]); + + task->direction = TASK_DIR_SUBMIT; + task_item_put(item2); + } + task_item_put(item); + } +} + +static void task_buffer_recycle(struct task_chain_s *task, + enum task_type_e type) +{ + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *)task->obj; + struct task_item_s *item = NULL; + struct task_item_s *item2 = NULL; + + item = task_item_get(task, type); + if (item) { + item->is_active = false; + + item2 = task_item_get(task, task->map[1][type]); + if (item2) { + struct vframe_s *vf = NULL; + + item2->is_active = true; + + vf = task_item_vframe_pop(item2); + item2->ops->put_vframe(item2->caller, vf); + + v4l_dbg(task->ctx, V4L_DEBUG_TASK_CHAIN, + "TSK(%px):%d, vf:%px, phy:%lx, recycle %d => %d.\n", + task, task->id, vf, fb->m.mem[0].addr, + type, task->map[1][type]); + + task->direction = TASK_DIR_RECYCLE; + task_item_put(item2); + } + task_item_put(item); + } +} + +void task_chain_show(struct task_chain_s *task) +{ + struct task_item_s *item = NULL; + char buf[128] = {0}; + char *pbuf = buf; + ulong flags; + + if (!task || !task->ctx) + return; + + spin_lock_irqsave(&task->slock, flags); + + if (!list_empty(&task->list_item)) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *)task->obj; + + list_for_each_entry(item, &task->list_item, node) { + pbuf += sprintf(pbuf, "%s(%d)", + item->name, item->is_active); + if (item->node.next != &task->list_item) { + if (task->direction == TASK_DIR_SUBMIT) + pbuf += sprintf(pbuf, " ==> "); + else + pbuf += sprintf(pbuf, " <== "); + } + } + v4l_dbg(task->ctx, V4L_DEBUG_CODEC_PRINFO, + "vb:%2d, phy:%lx %s\n", + task->id, fb->m.mem[0].addr, buf); + } + + spin_unlock_irqrestore(&task->slock, flags); +} +EXPORT_SYMBOL(task_chain_show); + +static void task_chain_destroy(struct kref *kref) +{ + struct task_chain_s *task; + + task = container_of(kref, struct task_chain_s, ref); + + task->cur_type = TASK_TYPE_MAX; + memset(task->map, 0, sizeof(task->map)); + + v4l_dbg(task->ctx, V4L_DEBUG_TASK_CHAIN, + "TSK(%px):%d task chain destroyed.\n", task, task->id); + + kfree(task); +} + +static void task_item_release(struct kref *kref) +{ + struct task_item_s *item; + + item = container_of(kref, struct task_item_s, ref); + list_del(&item->node); + + v4l_dbg(item->task->ctx, V4L_DEBUG_TASK_CHAIN, + "TSK(%px):%d task item:(%px,%d) released.\n", + item->task, item->task->id, item, item->ops->type); + + kref_put(&item->task->ref, task_chain_destroy); + + kfree(item); +} + +void task_chain_clean(struct task_chain_s *task) +{ + struct task_item_s *item, *tmp; + + v4l_dbg(task->ctx, V4L_DEBUG_TASK_CHAIN, + "TSK(%px):%d task chain clean.\n", task, task->id); + + if (!list_empty(&task->list_item)) { + list_for_each_entry_safe(item, tmp, &task->list_item, node) + kref_put(&item->ref, task_item_release); + } +} +EXPORT_SYMBOL(task_chain_clean); + +void task_chain_release(struct task_chain_s *task) +{ + v4l_dbg(task->ctx, V4L_DEBUG_TASK_CHAIN, + "TSK(%px):%d task chain release.\n", task, task->id); + + kref_put(&task->ref, task_chain_destroy); +} +EXPORT_SYMBOL(task_chain_release); + +void task_order_attach(struct task_chain_s *task, + struct task_ops_s *ops, + void *caller) +{ + struct task_item_s *item; + + item = kzalloc(sizeof(struct task_item_s), GFP_ATOMIC); + if (!item) { + v4l_dbg(task->ctx, V4L_DEBUG_CODEC_ERROR, + "TSK(%px):%d alloc item fail.\n", task, task->id); + return; + } + + item->task = task; + item->ops = ops; + item->caller = caller; + item->name = type_to_name(ops->type); + kref_init(&item->ref); + + task->map[0][ops->type] = task->cur_type; + task->map[1][task->cur_type] = ops->type; + task->cur_type = ops->type; + kref_get(&task->ref); + + list_add(&item->node, &task->list_item); + + v4l_dbg(task->ctx, V4L_DEBUG_TASK_CHAIN, + "TSK(%px):%d attach item:(%px,%d).\n", + task, task->id, item, ops->type); +} +EXPORT_SYMBOL(task_order_attach); + +void task_chain_update_object(struct task_chain_s *task, void *obj) +{ + /* + * Note: have to invoke this funtion + * if the task object has been changed. + */ + task->obj = obj; + + v4l_dbg(task->ctx, V4L_DEBUG_TASK_CHAIN, + "TSK(%px):%d update task obj:%px.\n", + task, task->id, obj); +} +EXPORT_SYMBOL(task_chain_update_object); + +int task_chain_init(struct task_chain_s **task_out, + void *v4l_ctx, + void *obj, + int vb_idx) +{ + struct task_chain_s *task; + + task = kzalloc(sizeof(struct task_chain_s), GFP_ATOMIC); + if (!task) { + v4l_dbg(task->ctx, V4L_DEBUG_CODEC_ERROR, + "TSK(%px):%d alloc task fail.\n", task, task->id); + return -ENOMEM; + } + + task->id = vb_idx; + task->obj = obj; + task->ctx = v4l_ctx; + kref_init(&task->ref); + spin_lock_init(&task->slock); + INIT_LIST_HEAD(&task->list_item); + + task->attach = task_order_attach; + task->submit = task_buffer_submit; + task->recycle = task_buffer_recycle; + + *task_out = task; + + v4l_dbg(task->ctx, V4L_DEBUG_TASK_CHAIN, + "TSK(%px):%d task chain creat success.\n", task, task->id); + return 0; +} +EXPORT_SYMBOL(task_chain_init); +
diff --git a/drivers/amvdec_ports/aml_task_chain.h b/drivers/amvdec_ports/aml_task_chain.h new file mode 100644 index 0000000..fdbe2fb --- /dev/null +++ b/drivers/amvdec_ports/aml_task_chain.h
@@ -0,0 +1,125 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ + +#ifndef AML_TASK_CHAIN_H +#define AML_TASK_CHAIN_H + +#include <linux/amlogic/media/vfm/vframe.h> + +enum task_type_e { + TASK_TYPE_DEC, + TASK_TYPE_VPP, + TASK_TYPE_V4L_SINK, + TASK_TYPE_GE2D, + TASK_TYPE_MAX +}; + +enum task_dir_e { + TASK_DIR_SUBMIT, + TASK_DIR_RECYCLE, + TASK_DIR_MAX +}; + +struct task_chain_s; + +/* + * struct task_ops_s - interface of the task item. + * @type : type of task ops involves dec, vpp, v4l sink etc. + * @get_vframe : get the video frame from caller's fifo. + * @put_vframe : put the video frame to caller's fifo. + * @fill_buffer : submit the buffer into next process module. + */ +struct task_ops_s { + enum task_type_e type; + void (*get_vframe) (void *caller, struct vframe_s **vf); + void (*put_vframe) (void *caller, struct vframe_s *vf); + void (*fill_buffer) (void *v4l_ctx, void *fb_ctx); +}; + +/* + * struct task_item_s - items of the task chain. + * @node : list node of the specific task item. + * @ref : reference count of item be used by others. + * @name : name of task item, map with task type. + * @is_active : indicate this item whether is active. + * @vframe[3] : store the vframes that get from caller. + * @task : the context of the task chain. + * @caller : it's the handle, meght it's dec, vpp or v4l-sink etc. + * @ops : sets of interface which attach from task item. + */ +struct task_item_s { + struct list_head node; + struct kref ref; + const u8 *name; + bool is_active; + void *vframe[3]; + struct task_chain_s *task; + void *caller; + struct task_ops_s *ops; +}; + +/* + * struct task_chain_s - the manager struct of the task chain. + * @list_item : all task items be attached are store in the list. + * @node : will register to the task chain pool. + * @ref : reference count of task chain be used by others. + * @slock : used for list item write and read safely. + * @id : it's vb index to be a mark used for task chain. + * @ctx : the context of the v4l driver. + * @obj : the object managed by task chain. + * @direction : direction incluse 2 flows submit & recycle. + * @cur_type : the latest item type before a new item be attached. + * @map : the map store the pipeline information. + * @attach : attach a new item to task chain. + * @submit : submit the finish item to next item module. + * @recycle : if item's date was consumed will be recycled to item. + */ +struct task_chain_s { + struct list_head list_item; + struct list_head node; + struct kref ref; + spinlock_t slock; + int id; + void *ctx; + void *obj; + enum task_dir_e direction; + enum task_type_e cur_type; + u8 map[2][8]; + + void (*attach) (struct task_chain_s *, struct task_ops_s *, void *); + void (*submit) (struct task_chain_s *, enum task_type_e); + void (*recycle) (struct task_chain_s *, enum task_type_e); +}; + + +int task_chain_init(struct task_chain_s **task_out, + void *v4l_ctx, + void *obj, + int vb_idx); +void task_order_attach(struct task_chain_s *task, + struct task_ops_s *ops, + void *caller); +void task_chain_clean(struct task_chain_s *task); +void task_chain_release(struct task_chain_s *task); +void task_chain_show(struct task_chain_s *task); +void task_chain_update_object(struct task_chain_s *task, void *obj); + +#endif //AML_TASK_CHAIN_H +
diff --git a/drivers/amvdec_ports/aml_vcodec_adapt.c b/drivers/amvdec_ports/aml_vcodec_adapt.c new file mode 100644 index 0000000..7e08371 --- /dev/null +++ b/drivers/amvdec_ports/aml_vcodec_adapt.c
@@ -0,0 +1,591 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/types.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/utils/aformat.h> +#include <linux/amlogic/media/frame_sync/tsync.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/frame_sync/timestamp.h> +#include <linux/amlogic/media/utils/amports_config.h> +#include <linux/amlogic/media/frame_sync/tsync_pcr.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/utils/aformat.h> +#include <linux/amlogic/media/registers/register.h> +#include "../stream_input/amports/adec.h" +#include "../stream_input/amports/streambuf.h" +#include "../stream_input/amports/streambuf_reg.h" +#include "../stream_input/parser/tsdemux.h" +#include "../stream_input/parser/psparser.h" +#include "../stream_input/parser/esparser.h" +#include "../frame_provider/decoder/utils/vdec.h" +#include "../common/media_clock/switch/amports_gate.h" +#include <linux/delay.h> +#include "aml_vcodec_adapt.h" +#include <linux/crc32.h> + +#define DEFAULT_VIDEO_BUFFER_SIZE (1024 * 1024 * 3) +#define DEFAULT_VIDEO_BUFFER_SIZE_4K (1024 * 1024 * 6) +#define DEFAULT_VIDEO_BUFFER_SIZE_TVP (1024 * 1024 * 10) +#define DEFAULT_VIDEO_BUFFER_SIZE_4K_TVP (1024 * 1024 * 15) +#define DEFAULT_AUDIO_BUFFER_SIZE (1024*768*2) +#define DEFAULT_SUBTITLE_BUFFER_SIZE (1024*256) + +#define PTS_OUTSIDE (1) +#define SYNC_OUTSIDE (2) + +//#define DATA_DEBUG + +extern int dump_output_frame; +extern u32 dump_output_start_position; +extern void aml_recycle_dma_buffers(struct aml_vcodec_ctx *ctx, u32 handle); + +static int slow_input = 0; + +static struct stream_buf_s bufs[BUF_MAX_NUM] = { + { + .reg_base = VLD_MEM_VIFIFO_REG_BASE, + .type = BUF_TYPE_VIDEO, + .buf_start = 0, + .buf_size = DEFAULT_VIDEO_BUFFER_SIZE, + .default_buf_size = DEFAULT_VIDEO_BUFFER_SIZE, + .first_tstamp = INVALID_PTS + }, + { + .reg_base = AIU_MEM_AIFIFO_REG_BASE, + .type = BUF_TYPE_AUDIO, + .buf_start = 0, + .buf_size = DEFAULT_AUDIO_BUFFER_SIZE, + .default_buf_size = DEFAULT_AUDIO_BUFFER_SIZE, + .first_tstamp = INVALID_PTS + }, + { + .reg_base = 0, + .type = BUF_TYPE_SUBTITLE, + .buf_start = 0, + .buf_size = DEFAULT_SUBTITLE_BUFFER_SIZE, + .default_buf_size = DEFAULT_SUBTITLE_BUFFER_SIZE, + .first_tstamp = INVALID_PTS + }, + { + .reg_base = 0, + .type = BUF_TYPE_USERDATA, + .buf_start = 0, + .buf_size = 0, + .first_tstamp = INVALID_PTS + }, + { + .reg_base = HEVC_STREAM_REG_BASE, + .type = BUF_TYPE_HEVC, + .buf_start = 0, + .buf_size = DEFAULT_VIDEO_BUFFER_SIZE_4K, + .default_buf_size = DEFAULT_VIDEO_BUFFER_SIZE_4K, + .first_tstamp = INVALID_PTS + }, +}; + +extern int aml_set_vfm_path, aml_set_vdec_type; +extern bool aml_set_vfm_enable, aml_set_vdec_type_enable; + +static void set_default_params(struct aml_vdec_adapt *vdec) +{ + ulong sync_mode = (PTS_OUTSIDE | SYNC_OUTSIDE); + + vdec->dec_prop.param = (void *)sync_mode; + vdec->dec_prop.format = vdec->format; + vdec->dec_prop.width = 1920; + vdec->dec_prop.height = 1088; + vdec->dec_prop.rate = 3200; +} + +static int enable_hardware(struct stream_port_s *port) +{ + if (get_cpu_type() < MESON_CPU_MAJOR_ID_M6) + return -1; + + amports_switch_gate("demux", 1); + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8) + amports_switch_gate("parser_top", 1); + + if (port->type & PORT_TYPE_VIDEO) { + amports_switch_gate("vdec", 1); + + if (has_hevc_vdec()) { + if (port->type & PORT_TYPE_HEVC) + vdec_poweron(VDEC_HEVC); + else + vdec_poweron(VDEC_1); + } else { + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8) + vdec_poweron(VDEC_1); + } + } + + return 0; +} + +static int disable_hardware(struct stream_port_s *port) +{ + if (get_cpu_type() < MESON_CPU_MAJOR_ID_M6) + return -1; + + if (port->type & PORT_TYPE_VIDEO) { + if (has_hevc_vdec()) { + if (port->type & PORT_TYPE_HEVC) + vdec_poweroff(VDEC_HEVC); + else + vdec_poweroff(VDEC_1); + } + + amports_switch_gate("vdec", 0); + } + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8) + amports_switch_gate("parser_top", 0); + + amports_switch_gate("demux", 0); + + return 0; +} + +static void user_buffer_init(void) +{ + struct stream_buf_s *pubuf = &bufs[BUF_TYPE_USERDATA]; + + pubuf->buf_size = 0; + pubuf->buf_start = 0; + pubuf->buf_wp = 0; + pubuf->buf_rp = 0; +} + +static void video_component_release(struct stream_port_s *port) +{ + struct aml_vdec_adapt *ada_ctx + = container_of(port, struct aml_vdec_adapt, port); + struct vdec_s *vdec = ada_ctx->vdec; + + vdec_release(vdec); + +} + +static int video_component_init(struct stream_port_s *port, + struct stream_buf_s *pbuf) +{ + int ret = -1; + struct aml_vdec_adapt *ada_ctx + = container_of(port, struct aml_vdec_adapt, port); + struct vdec_s *vdec = ada_ctx->vdec; + + if ((vdec->port_flag & PORT_FLAG_VFORMAT) == 0) { + v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_ERROR, "vformat not set\n"); + return -EPERM; + } + + if ((vdec->sys_info->height * vdec->sys_info->width) > 1920 * 1088 + || port->vformat == VFORMAT_H264_4K2K) { + port->is_4k = true; + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXLX + && (port->vformat == VFORMAT_H264)) + vdec_poweron(VDEC_HEVC); + } else + port->is_4k = false; + + if (port->type & PORT_TYPE_FRAME || + (port->type & PORT_TYPE_ES)) { + ret = vdec_init(vdec, port->is_4k, true); + if (ret < 0) { + v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_ERROR, "failed\n"); + video_component_release(port); + return ret; + } + } + + return 0; +} + +static int vdec_ports_release(struct stream_port_s *port) +{ + struct stream_buf_s *pvbuf = &bufs[BUF_TYPE_VIDEO]; + + if (has_hevc_vdec()) { + if (port->vformat == VFORMAT_HEVC || + port->vformat == VFORMAT_VP9) + pvbuf = &bufs[BUF_TYPE_HEVC]; + } + + if (port->type & PORT_TYPE_MPTS) { + tsync_pcr_stop(); + tsdemux_release(); + } + + if (port->type & PORT_TYPE_MPPS) + psparser_release(); + + if (port->type & PORT_TYPE_VIDEO) + video_component_release(port); + + port->pcr_inited = 0; + port->flag = 0; + + return 0; +} + +static void set_vdec_properity(struct vdec_s *vdec, + struct aml_vdec_adapt *ada_ctx) +{ + vdec->sys_info = &ada_ctx->dec_prop; + vdec->port = &ada_ctx->port; + vdec->format = ada_ctx->video_type; + vdec->sys_info_store = ada_ctx->dec_prop; + + /* binding v4l2 ctx to vdec. */ + vdec->private = ada_ctx->ctx; + + /* set video format, sys info and vfm map.*/ + vdec->port->vformat = vdec->format; + vdec->port->type |= PORT_TYPE_VIDEO; + vdec->port_flag |= (vdec->port->flag | PORT_FLAG_VFORMAT); + if (vdec->slave) { + vdec->slave->format = ada_ctx->dec_prop.format; + vdec->slave->port_flag |= PORT_FLAG_VFORMAT; + } + + vdec->type = VDEC_TYPE_FRAME_BLOCK; + vdec->port->type |= PORT_TYPE_FRAME; + vdec->frame_base_video_path = FRAME_BASE_PATH_V4L_OSD; + + if (aml_set_vdec_type_enable) { + if (aml_set_vdec_type == VDEC_TYPE_STREAM_PARSER) { + vdec->type = VDEC_TYPE_STREAM_PARSER; + vdec->port->type &= ~PORT_TYPE_FRAME; + vdec->port->type |= PORT_TYPE_ES; + } else if (aml_set_vdec_type == VDEC_TYPE_FRAME_BLOCK) { + vdec->type = VDEC_TYPE_FRAME_BLOCK; + vdec->port->type &= ~PORT_TYPE_ES; + vdec->port->type |= PORT_TYPE_FRAME; + } + } + + if (aml_set_vfm_enable) + vdec->frame_base_video_path = aml_set_vfm_path; + + vdec->port->flag = vdec->port_flag; + + vdec->config_len = ada_ctx->config.length > + PAGE_SIZE ? PAGE_SIZE : ada_ctx->config.length; + memcpy(vdec->config, ada_ctx->config.buf, vdec->config_len); + + ada_ctx->vdec = vdec; +} + +static int vdec_ports_init(struct aml_vdec_adapt *ada_ctx) +{ + int ret = -1; + struct stream_buf_s *pvbuf = &bufs[BUF_TYPE_VIDEO]; + struct vdec_s *vdec = NULL; + + /* create the vdec instance.*/ + vdec = vdec_create(&ada_ctx->port, NULL); + if (IS_ERR_OR_NULL(vdec)) + return -1; + + vdec->disable_vfm = true; + set_vdec_properity(vdec, ada_ctx); + + /* init hw and gate*/ + ret = enable_hardware(vdec->port); + if (ret < 0) { + v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_ERROR, "enable hw fail.\n"); + return ret; + } + + stbuf_fetch_init(); + user_buffer_init(); + + if ((vdec->port->type & PORT_TYPE_VIDEO) + && (vdec->port_flag & PORT_FLAG_VFORMAT)) { + vdec->port->is_4k = false; + if (has_hevc_vdec()) { + if (vdec->port->vformat == VFORMAT_HEVC || + vdec->port->vformat == VFORMAT_VP9) + pvbuf = &bufs[BUF_TYPE_HEVC]; + } + + ret = video_component_init(vdec->port, pvbuf); + if (ret < 0) { + v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_ERROR, "video_component_init failed\n"); + return ret; + } + + /* connect vdec at the end after all HW initialization */ + vdec_connect(vdec); + } + + return 0; +} + +int video_decoder_init(struct aml_vdec_adapt *vdec) +{ + int ret = -1; + + /* sets configure data */ + set_default_params(vdec); + + /* init the buffer work space and connect vdec.*/ + ret = vdec_ports_init(vdec); + if (ret < 0) { + v4l_dbg(vdec->ctx, V4L_DEBUG_CODEC_ERROR, "vdec ports init fail.\n"); + goto out; + } +out: + return ret; +} + +int video_decoder_release(struct aml_vdec_adapt *vdec) +{ + int ret = -1; + struct stream_port_s *port = &vdec->port; + + ret = vdec_ports_release(port); + if (ret < 0) { + v4l_dbg(vdec->ctx, V4L_DEBUG_CODEC_ERROR, "vdec ports release fail.\n"); + goto out; + } + + /* disable gates */ + ret = disable_hardware(port); + if (ret < 0) { + v4l_dbg(vdec->ctx, V4L_DEBUG_CODEC_ERROR, "disable hw fail.\n"); + goto out; + } +out: + return ret; +} + +void dump(const char* path, const char *data, unsigned int size) +{ + struct file *fp; + + fp = filp_open(path, + O_CREAT | O_RDWR | O_LARGEFILE | O_APPEND, 0600); + if (!IS_ERR(fp)) { + kernel_write(fp, data, size, 0); + filp_close(fp, NULL); + } else { + pr_info("Dump ES fail, should check RW permission, size:%x\n", size); + } +} + +int vdec_vbuf_write(struct aml_vdec_adapt *ada_ctx, + const char *buf, unsigned int count) +{ + int ret = -1; + int try_cnt = 100; + struct stream_port_s *port = &ada_ctx->port; + struct vdec_s *vdec = ada_ctx->vdec; + struct stream_buf_s *pbuf = NULL; + + if (has_hevc_vdec()) { + pbuf = (port->type & PORT_TYPE_HEVC) ? &bufs[BUF_TYPE_HEVC] : + &bufs[BUF_TYPE_VIDEO]; + } else + pbuf = &bufs[BUF_TYPE_VIDEO]; + + /*if (!(port_get_inited(priv))) { + r = video_decoder_init(priv); + if (r < 0) + return r; + }*/ + + do { + if (vdec->port_flag & PORT_FLAG_DRM) + ret = drm_write(ada_ctx->filp, pbuf, buf, count); + else + ret = esparser_write(ada_ctx->filp, pbuf, buf, count); + + if (ret == -EAGAIN) + msleep(30); + } while (ret == -EAGAIN && try_cnt--); + + if (slow_input) { + v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_PRINFO, + "slow_input: es codec write size %x\n", ret); + msleep(10); + } + +#ifdef DATA_DEBUG + /* dump to file */ + //dump_write(vbuf, size); + //v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_PRINFO, "vbuf: %p, size: %u, ret: %d\n", vbuf, size, ret); +#endif + + return ret; +} + +bool vdec_input_full(struct aml_vdec_adapt *ada_ctx) +{ + struct vdec_s *vdec = ada_ctx->vdec; + + return (vdec->input.have_frame_num > 60) ? true : false; +} + +int vdec_vframe_write(struct aml_vdec_adapt *ada_ctx, + const char *buf, unsigned int count, u64 timestamp, ulong meta_ptr) +{ + int ret = -1; + struct vdec_s *vdec = ada_ctx->vdec; + + /* set timestamp */ + vdec_set_timestamp(vdec, timestamp); + + /* set metadata */ + vdec_set_metadata(vdec, meta_ptr); + + ret = vdec_write_vframe(vdec, buf, count); + + if (slow_input) { + v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_PRINFO, + "slow_input: frame codec write size %d\n", ret); + msleep(30); + } + + if (dump_output_frame > 0 && + (!dump_output_start_position || + (dump_output_start_position == crc32_le(0, buf, count)))) { + dump("/data/es.data", buf, count); + dump_output_frame--; + dump_output_start_position = 0; + } + + v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_INPUT, + "write frames, vbuf: %p, size: %u, ret: %d, crc: %x, ts: %llu\n", + buf, count, ret, crc32_le(0, buf, count), timestamp); + + return ret; +} + +void vdec_vframe_input_free(void *priv, u32 handle) +{ + struct aml_vcodec_ctx *ctx = priv; + + aml_recycle_dma_buffers(ctx, handle); +} + +int vdec_vframe_write_with_dma(struct aml_vdec_adapt *ada_ctx, + ulong addr, u32 count, u64 timestamp, u32 handle, + chunk_free free, void* priv) +{ + int ret = -1; + struct vdec_s *vdec = ada_ctx->vdec; + + /* set timestamp */ + vdec_set_timestamp(vdec, timestamp); + + ret = vdec_write_vframe_with_dma(vdec, addr, count, + handle, free, priv); + + if (slow_input) { + v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_PRINFO, + "slow_input: frame codec write size %d\n", ret); + msleep(30); + } + + v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_INPUT, + "write frames, vbuf: %lx, size: %u, ret: %d, ts: %llu\n", + addr, count, ret, timestamp); + + return ret; +} + +void aml_decoder_flush(struct aml_vdec_adapt *ada_ctx) +{ + struct vdec_s *vdec = ada_ctx->vdec; + + if (vdec) + vdec_set_eos(vdec, true); +} + +int aml_codec_reset(struct aml_vdec_adapt *ada_ctx, int *mode) +{ + struct vdec_s *vdec = ada_ctx->vdec; + int ret = 0; + + if (vdec) { + if (ada_ctx->ctx->v4l_resolution_change) + *mode = V4L_RESET_MODE_LIGHT; + else + vdec_set_eos(vdec, false); + + v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_PRINFO, + "reset mode: %d, es frames buffering: %d\n", + *mode, vdec_frame_number(ada_ctx)); + + ret = vdec_v4l2_reset(vdec, *mode); + *mode = V4L_RESET_MODE_NORMAL; + } + + return ret; +} + +bool is_input_ready(struct aml_vdec_adapt *ada_ctx) +{ + struct vdec_s *vdec = ada_ctx->vdec; + int state = VDEC_STATUS_UNINITIALIZED; + + if (vdec) { + state = vdec_get_status(vdec); + + if (state == VDEC_STATUS_CONNECTED + || state == VDEC_STATUS_ACTIVE) + return true; + } + + return false; +} + +int vdec_frame_number(struct aml_vdec_adapt *ada_ctx) +{ + struct vdec_s *vdec = ada_ctx->vdec; + + if (vdec) + return vdec_get_frame_num(vdec); + else + return -1; +} + +int vdec_get_instance_num(void) +{ + return vdec_get_core_nr(); +} + +void v4l2_config_vdec_parm(struct aml_vdec_adapt *ada_ctx, u8 *data, u32 len) +{ + struct vdec_s *vdec = ada_ctx->vdec; + + vdec->config_len = len > PAGE_SIZE ? PAGE_SIZE : len; + memcpy(vdec->config, data, vdec->config_len); +} + +void vdec_set_duration(s32 duration) +{ + vdec_frame_rate_uevent(duration); +}
diff --git a/drivers/amvdec_ports/aml_vcodec_adapt.h b/drivers/amvdec_ports/aml_vcodec_adapt.h new file mode 100644 index 0000000..c8641ce --- /dev/null +++ b/drivers/amvdec_ports/aml_vcodec_adapt.h
@@ -0,0 +1,79 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef VDEC_ADAPT_H +#define VDEC_ADAPT_H + +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/utils/amstream.h> +#include "../stream_input/amports/streambuf.h" +#include "../frame_provider/decoder/utils/vdec_input.h" +#include "aml_vcodec_drv.h" + +struct aml_vdec_adapt { + int format; + void *vsi; + int32_t failure; + uint32_t inst_addr; + unsigned int signaled; + struct aml_vcodec_ctx *ctx; + wait_queue_head_t wq; + struct file *filp; + struct vdec_s *vdec; + struct stream_port_s port; + struct dec_sysinfo dec_prop; + struct v4l2_config_parm config; + int video_type; + char *frm_name; +}; + +int video_decoder_init(struct aml_vdec_adapt *ada_ctx); + +int video_decoder_release(struct aml_vdec_adapt *ada_ctx); + +int vdec_vbuf_write(struct aml_vdec_adapt *ada_ctx, + const char *buf, unsigned int count); + +int vdec_vframe_write(struct aml_vdec_adapt *ada_ctx, + const char *buf, unsigned int count, u64 timestamp, ulong meta_ptr); + +void vdec_vframe_input_free(void *priv, u32 handle); + +int vdec_vframe_write_with_dma(struct aml_vdec_adapt *ada_ctx, + ulong addr, u32 count, u64 timestamp, u32 handle, + chunk_free free, void *priv); + +bool vdec_input_full(struct aml_vdec_adapt *ada_ctx); + +void aml_decoder_flush(struct aml_vdec_adapt *ada_ctx); + +int aml_codec_reset(struct aml_vdec_adapt *ada_ctx, int *flag); + +extern void dump_write(const char __user *buf, size_t count); + +bool is_input_ready(struct aml_vdec_adapt *ada_ctx); + +int vdec_frame_number(struct aml_vdec_adapt *ada_ctx); + +int vdec_get_instance_num(void); + +void vdec_set_duration(s32 duration); + +#endif /* VDEC_ADAPT_H */ +
diff --git a/drivers/amvdec_ports/aml_vcodec_dec.c b/drivers/amvdec_ports/aml_vcodec_dec.c new file mode 100644 index 0000000..2bbfb6e --- /dev/null +++ b/drivers/amvdec_ports/aml_vcodec_dec.c
@@ -0,0 +1,4593 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <media/v4l2-event.h> +#include <media/v4l2-mem2mem.h> +#include <media/videobuf2-dma-contig.h> +#include <media/videobuf2-dma-sg.h> + +#include <linux/delay.h> +#include <linux/atomic.h> +#include <linux/crc32.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/amlogic/meson_uvm_core.h> +#include <linux/scatterlist.h> +#include <linux/sched/clock.h> +#include <linux/highmem.h> +#include <uapi/linux/sched/types.h> + +#include "aml_vcodec_drv.h" +#include "aml_vcodec_dec.h" +#include "aml_vcodec_util.h" +#include "vdec_drv_if.h" +#include "aml_vcodec_adapt.h" +#include "aml_vcodec_vpp.h" +#include "aml_vcodec_ge2d.h" + +#include "../frame_provider/decoder/utils/decoder_bmmu_box.h" +#include "../frame_provider/decoder/utils/decoder_mmu_box.h" +#include "../common/chips/decoder_cpu_ver_info.h" +#include "utils/common.h" +#include "../frame_provider/decoder/utils/vdec_sync.h" + + +#define KERNEL_ATRACE_TAG KERNEL_ATRACE_TAG_V4L2 +#include <trace/events/meson_atrace.h> + + +#define OUT_FMT_IDX (0) //default h264 +#define CAP_FMT_IDX (9) //capture nv21 +#define CAP_FMT_I420_IDX (12) //use for mjpeg + +#define AML_VDEC_MIN_W 64U +#define AML_VDEC_MIN_H 64U +#define DFT_CFG_WIDTH AML_VDEC_MIN_W +#define DFT_CFG_HEIGHT AML_VDEC_MIN_H + +#define V4L2_CID_USER_AMLOGIC_BASE (V4L2_CID_USER_BASE + 0x1100) +#define AML_V4L2_SET_DRMMODE (V4L2_CID_USER_AMLOGIC_BASE + 0) +#define AML_V4L2_GET_INPUT_BUFFER_NUM (V4L2_CID_USER_AMLOGIC_BASE + 1) +#define AML_V4L2_SET_DURATION (V4L2_CID_USER_AMLOGIC_BASE + 2) +#define AML_V4L2_GET_FILMGRAIN_INFO (V4L2_CID_USER_AMLOGIC_BASE + 3) + +#define WORK_ITEMS_MAX (32) +#define MAX_DI_INSTANCE (2) + +//#define USEC_PER_SEC 1000000 + +#define call_void_memop(vb, op, args...) \ + do { \ + if ((vb)->vb2_queue->mem_ops->op) \ + (vb)->vb2_queue->mem_ops->op(args); \ + } while (0) + +static struct aml_video_fmt aml_video_formats[] = { + { + .name = "H.264", + .fourcc = V4L2_PIX_FMT_H264, + .type = AML_FMT_DEC, + .num_planes = 1, + }, + { + .name = "H.265", + .fourcc = V4L2_PIX_FMT_HEVC, + .type = AML_FMT_DEC, + .num_planes = 1, + }, + { + .name = "VP9", + .fourcc = V4L2_PIX_FMT_VP9, + .type = AML_FMT_DEC, + .num_planes = 1, + }, + { + .name = "MPEG1", + .fourcc = V4L2_PIX_FMT_MPEG1, + .type = AML_FMT_DEC, + .num_planes = 1, + }, + { + .name = "MPEG2", + .fourcc = V4L2_PIX_FMT_MPEG2, + .type = AML_FMT_DEC, + .num_planes = 1, + }, + { + .name = "MPEG4", + .fourcc = V4L2_PIX_FMT_MPEG4, + .type = AML_FMT_DEC, + .num_planes = 1, + }, + { + .name = "MJPEG", + .fourcc = V4L2_PIX_FMT_MJPEG, + .type = AML_FMT_DEC, + .num_planes = 1, + }, + { + .name = "AV1", + .fourcc = V4L2_PIX_FMT_AV1, + .type = AML_FMT_DEC, + .num_planes = 1, + }, + { + .name = "NV21", + .fourcc = V4L2_PIX_FMT_NV21, + .type = AML_FMT_FRAME, + .num_planes = 1, + }, + { + .name = "NV21M", + .fourcc = V4L2_PIX_FMT_NV21M, + .type = AML_FMT_FRAME, + .num_planes = 2, + }, + { + .name = "NV12", + .fourcc = V4L2_PIX_FMT_NV12, + .type = AML_FMT_FRAME, + .num_planes = 1, + }, + { + .name = "NV12M", + .fourcc = V4L2_PIX_FMT_NV12M, + .type = AML_FMT_FRAME, + .num_planes = 2, + }, + { + .name = "YUV420", + .fourcc = V4L2_PIX_FMT_YUV420, + .type = AML_FMT_FRAME, + .num_planes = 1, + }, + { + .name = "YUV420M", + .fourcc = V4L2_PIX_FMT_YUV420M, + .type = AML_FMT_FRAME, + .num_planes = 2, + }, +}; + +static const struct aml_codec_framesizes aml_vdec_framesizes[] = { + { + .fourcc = V4L2_PIX_FMT_H264, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_HEVC, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_VP9, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_MPEG1, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_MPEG2, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_MPEG4, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_MJPEG, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_AV1, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_NV21, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_NV21M, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_NV12, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_NV12M, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_YUV420, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, + { + .fourcc = V4L2_PIX_FMT_YUV420M, + .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2, + AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2}, + }, +}; + +#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(aml_vdec_framesizes) +#define NUM_FORMATS ARRAY_SIZE(aml_video_formats) + +extern bool multiplanar; +extern int dump_capture_frame; +extern int bypass_vpp; +extern int bypass_ge2d; +extern bool support_format_I420; +extern bool support_mjpeg; +extern int bypass_progressive; +extern int force_enable_nr; +extern int force_enable_di_local_buffer; +extern int max_di_instance; +extern int bypass_nr_flag; + +extern int dmabuf_fd_install_data(int fd, void* data, u32 size); +extern bool is_v4l2_buf_file(struct file *file); +extern int get_double_write_ratio(int dw_mode); +static void box_release(struct kref *kref); +static struct internal_comp_buf* vb_to_comp(struct aml_vcodec_ctx *ctx, + struct vb2_buffer *vb); +static void update_ctx_dimension(struct aml_vcodec_ctx *ctx, u32 type); +static void copy_v4l2_format_dimention(struct v4l2_pix_format_mplane *pix_mp, + struct v4l2_pix_format *pix, + struct aml_q_data *q_data, + u32 type); + +static ulong aml_vcodec_ctx_lock(struct aml_vcodec_ctx *ctx) +{ + ulong flags; + + spin_lock_irqsave(&ctx->slock, flags); + + return flags; +} + +static void aml_vcodec_ctx_unlock(struct aml_vcodec_ctx *ctx, ulong flags) +{ + spin_unlock_irqrestore(&ctx->slock, flags); +} + +static ulong dmabuf_contiguous_size(struct sg_table *sgt) +{ + struct scatterlist *s; + dma_addr_t expected = sg_dma_address(sgt->sgl); + ulong size = 0; + u32 i; + + for_each_sg(sgt->sgl, s, sgt->nents, i) { + if (sg_dma_address(s) != expected) + break; + expected = sg_dma_address(s) + sg_dma_len(s); + size += sg_dma_len(s); + } + + return size; +} + +static struct aml_video_fmt *aml_vdec_find_format(struct v4l2_format *f) +{ + struct aml_video_fmt *fmt; + unsigned int k; + + for (k = 0; k < NUM_FORMATS; k++) { + fmt = &aml_video_formats[k]; + if (fmt->fourcc == f->fmt.pix_mp.pixelformat) + return fmt; + } + + return NULL; +} + +static struct aml_q_data *aml_vdec_get_q_data(struct aml_vcodec_ctx *ctx, + enum v4l2_buf_type type) +{ + if (V4L2_TYPE_IS_OUTPUT(type)) + return &ctx->q_data[AML_Q_DATA_SRC]; + + return &ctx->q_data[AML_Q_DATA_DST]; +} + +void aml_vdec_dispatch_event(struct aml_vcodec_ctx *ctx, u32 changes) +{ + struct v4l2_event event = {0}; + + switch (changes) { + case V4L2_EVENT_SRC_CH_RESOLUTION: + case V4L2_EVENT_SRC_CH_HDRINFO: + case V4L2_EVENT_REQUEST_RESET: + case V4L2_EVENT_REQUEST_EXIT: + event.type = V4L2_EVENT_SOURCE_CHANGE; + event.u.src_change.changes = changes; + break; + case V4L2_EVENT_SEND_EOS: + event.type = V4L2_EVENT_EOS; + break; + default: + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "unsupport dispatch event %x\n", changes); + return; + } + + v4l2_event_queue_fh(&ctx->fh, &event); + if (changes != V4L2_EVENT_SRC_CH_HDRINFO) + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "changes: %x\n", changes); + else + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "changes: %x\n", changes); +} + +static void aml_vdec_flush_decoder(struct aml_vcodec_ctx *ctx) +{ + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "%s\n", __func__); + + aml_decoder_flush(ctx->ada_ctx); +} + +/* Conditions: + * Always connect VPP for mpeg2 and h264 when the stream size is under 2K. + * Always connect VPP for hevc/av1/vp9 when color space is not SDR and + * stream size is under 2K. + * For DV, need application to notify V4L2 driver to enforce the color space + * conversion. Plan to do it through a system node. + * Do not connect VPP in other cases. + */ +static bool vpp_needed(struct aml_vcodec_ctx *ctx, u32* mode) +{ + int width = ctx->picinfo.coded_width; + int height = ctx->picinfo.coded_height; + int size = 1920 * 1088; + + if (bypass_vpp) + return false; + + if (!ctx->vpp_cfg.enable_nr && + (ctx->picinfo.field == V4L2_FIELD_NONE)) { + return false; + } + + if (!ctx->vpp_cfg.enable_nr && + (ctx->output_pix_fmt == V4L2_PIX_FMT_HEVC)) { + if (is_over_size(width, height, size)) { + return false; + } + } + + if ((ctx->output_pix_fmt == V4L2_PIX_FMT_H264) && + (ctx->picinfo.field != V4L2_FIELD_NONE)) { + if (is_over_size(width, height, size)) { + return false; + } + } + + if (ctx->vpp_cfg.enable_nr) { + if (ctx->vpp_cfg.enable_local_buf) + *mode = VPP_MODE_NOISE_REDUC_LOCAL; + else + *mode = VPP_MODE_NOISE_REDUC; + } else { + if (ctx->vpp_cfg.enable_local_buf) + *mode = VPP_MODE_DI_LOCAL; + else + *mode = VPP_MODE_DI; + } + +#if 0//enable later + if (ctx->colorspace != V4L2_COLORSPACE_DEFAULT && + !is_over_size(width, height, size)) { + if (ctx->vpp_cfg.enable_local_buf) + *mode = VPP_MODE_COLOR_CONV_LOCAL; + else + *mode = VPP_MODE_COLOR_CONV; + } +#endif + + return true; +} + +static bool ge2d_needed(struct aml_vcodec_ctx *ctx, u32* mode) +{ + bool enable_fence = (ctx->config.parm.dec.cfg.low_latency_mode & 2) ? 1 : 0; + + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) && enable_fence) { + return false; + } + + if (bypass_ge2d) + return false; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) { + if ((ctx->output_pix_fmt != V4L2_PIX_FMT_H264) && + (ctx->output_pix_fmt != V4L2_PIX_FMT_MPEG1) && + (ctx->output_pix_fmt != V4L2_PIX_FMT_MPEG2) && + (ctx->output_pix_fmt != V4L2_PIX_FMT_MPEG4) && + (ctx->output_pix_fmt != V4L2_PIX_FMT_MJPEG)) { + return false; + } + } else if (ctx->output_pix_fmt != V4L2_PIX_FMT_MJPEG) { + return false; + } + + if (ctx->picinfo.field != V4L2_FIELD_NONE) { + return false; + } + + if ((ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + *mode = GE2D_MODE_CONVERT_NV12; + else if ((ctx->cap_pix_fmt == V4L2_PIX_FMT_NV21) || + (ctx->cap_pix_fmt == V4L2_PIX_FMT_NV21M)) + *mode = GE2D_MODE_CONVERT_NV21; + else + *mode = GE2D_MODE_CONVERT_NV21; + + *mode |= GE2D_MODE_CONVERT_LE; + + return true; +} + +static u32 v4l_buf_size_decision(struct aml_vcodec_ctx *ctx) +{ + u32 mode, total_size; + struct vdec_pic_info *picinfo = &ctx->picinfo; + struct aml_vpp_cfg_infos *vpp = &ctx->vpp_cfg; + struct aml_ge2d_cfg_infos *ge2d = &ctx->ge2d_cfg; + + if (vpp_needed(ctx, &mode)) { + vpp->mode = mode; + vpp->fmt = ctx->cap_pix_fmt; + vpp->is_drm = ctx->is_drm_mode; + vpp->buf_size = aml_v4l2_vpp_get_buf_num(vpp->mode) + + picinfo->vpp_margin; + + if (picinfo->field == V4L2_FIELD_NONE) { + vpp->is_prog = true; + vpp->buf_size = 0; + } else { + vpp->is_prog = false; + /* for between with dec & vpp. */ + picinfo->dpb_margin = 2; + } + + if (vpp->is_prog && + !vpp->enable_local_buf && + bypass_progressive) { + vpp->is_bypass_p = true; + } + ctx->vpp_is_need = true; + } else { + vpp->buf_size = 0; + ctx->vpp_is_need = false; + } + + if (ge2d_needed(ctx, &mode)) { + ge2d->mode = mode; + ge2d->buf_size = 4 + picinfo->dpb_margin; + ctx->ge2d_is_need = true; + picinfo->dpb_margin = 2; + } else { + ge2d->buf_size = 0; + ctx->ge2d_is_need = false; + } + + ctx->dpb_size = picinfo->dpb_frames + picinfo->dpb_margin; + ctx->vpp_size = vpp->buf_size; + ctx->ge2d_size = ge2d->buf_size; + + total_size = ctx->dpb_size + ctx->vpp_size + ctx->ge2d_size; + + if (total_size > V4L_CAP_BUFF_MAX) { + if (ctx->ge2d_size) { + ctx->dpb_size = V4L_CAP_BUFF_MAX - ctx->ge2d_size - ctx->vpp_size; + } else if (ctx->vpp_size) { + ctx->dpb_size = V4L_CAP_BUFF_MAX - ctx->vpp_size; + } else { + ctx->dpb_size = V4L_CAP_BUFF_MAX; + } + picinfo->dpb_margin = ctx->dpb_size - picinfo->dpb_frames; + total_size = V4L_CAP_BUFF_MAX; + } + vdec_if_set_param(ctx, SET_PARAM_PIC_INFO, picinfo); + + return total_size; +} + +void aml_vdec_pic_info_update(struct aml_vcodec_ctx *ctx) +{ + if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->last_decoded_picinfo)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Cannot get param : GET_PARAM_PICTURE_INFO ERR\n"); + return; + } + + if (ctx->last_decoded_picinfo.visible_width == 0 || + ctx->last_decoded_picinfo.visible_height == 0 || + ctx->last_decoded_picinfo.coded_width == 0 || + ctx->last_decoded_picinfo.coded_height == 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Cannot get correct pic info\n"); + return; + } + + /*if ((ctx->last_decoded_picinfo.visible_width == ctx->picinfo.visible_width) || + (ctx->last_decoded_picinfo.visible_height == ctx->picinfo.visible_height)) + return;*/ + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "new(%d,%d), old(%d,%d), real(%d,%d)\n", + ctx->last_decoded_picinfo.visible_width, + ctx->last_decoded_picinfo.visible_height, + ctx->picinfo.visible_width, ctx->picinfo.visible_height, + ctx->last_decoded_picinfo.coded_width, + ctx->last_decoded_picinfo.coded_width); + + ctx->picinfo = ctx->last_decoded_picinfo; + + if (ctx->vpp_is_need) + ctx->vpp_cfg.is_vpp_reset = true; + + v4l_buf_size_decision(ctx); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "Update picture buffer count: dec:%u, vpp:%u, ge2d:%u, margin:%u, total:%u\n", + ctx->picinfo.dpb_frames, ctx->vpp_size, ctx->ge2d_size, + ctx->picinfo.dpb_margin, + CTX_BUF_TOTAL(ctx)); +} + +void vdec_frame_buffer_release(void *data) +{ + struct file_private_data *priv_data = + (struct file_private_data *) data; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *) + priv_data->v4l_dec_ctx; + struct aml_video_dec_buf *vb = (struct aml_video_dec_buf *) + priv_data->vb_handle; + struct uvm_hook_mod_info *uvm = NULL; + + if (ctx && ctx->uvm_proxy) { + uvm = &ctx->uvm_proxy[vb->internal_index]; + uvm->free(uvm->arg); + } + + memset(data, 0, sizeof(struct file_private_data)); + kfree(data); +} + +static void v4l2_buff_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) +{ + struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(buf->vb2_buf.vb2_queue); + + mutex_lock(&ctx->buff_done_lock); + if (buf->vb2_buf.state != VB2_BUF_STATE_ACTIVE) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "vb is not active state = %d!\n", + buf->vb2_buf.state); + mutex_unlock(&ctx->buff_done_lock); + return; + } + v4l2_m2m_buf_done(buf, state); + mutex_unlock(&ctx->buff_done_lock); +} + +static void comp_buf_set_vframe(struct aml_vcodec_ctx *ctx, + struct vb2_buffer *vb, + struct vframe_s *vf) +{ + dmabuf_set_vframe(vb->planes[0].dbuf, vf, VF_SRC_DECODER); +} + +static void fb_map_table_clean(struct aml_vcodec_ctx *ctx) +{ + int i; + ulong flags; + + flags = aml_vcodec_ctx_lock(ctx); + + for (i = 0; i < ARRAY_SIZE(ctx->fb_map); i++) { + ctx->fb_map[i].addr = 0; + ctx->fb_map[i].vframe = NULL; + ctx->fb_map[i].task = NULL; + ctx->fb_map[i].icomp = 0; + } + + aml_vcodec_ctx_unlock(ctx, flags); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "%s done\n", __func__); +} + +static void fb_map_table_hold(struct aml_vcodec_ctx *ctx, + struct vb2_buffer *vb, + struct vframe_s *vf, + struct task_chain_s *task, + u32 icomp) +{ + int i; + ulong addr, flags; + + flags = aml_vcodec_ctx_lock(ctx); + + addr = vb2_dma_contig_plane_dma_addr(vb, 0); + + for (i = 0; i < ARRAY_SIZE(ctx->fb_map); i++) { + if (!ctx->fb_map[i].addr || + (addr == ctx->fb_map[i].addr)) { + ctx->fb_map[i].task = task; + ctx->fb_map[i].addr = addr; + ctx->fb_map[i].vframe = vf; + ctx->fb_map[i].icomp = icomp; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "%s, task:%px, vf:%px, addr:%lx, icomp:%u\n", + __func__, task, vf, addr, icomp); + break; + } + } + + aml_vcodec_ctx_unlock(ctx, flags); + + if (i >= ARRAY_SIZE(ctx->fb_map)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "%s, table is full. addr:%lx, vf:%px\n", + __func__, addr, vf); + } +} + +static void fb_map_table_fetch(struct aml_vcodec_ctx *ctx, + struct vb2_buffer *vb, + struct vframe_s **vf, + struct task_chain_s **task, + u32 *icomp) +{ + int i; + ulong addr, flags; + + flags = aml_vcodec_ctx_lock(ctx); + + addr = vb2_dma_contig_plane_dma_addr(vb, 0); + + for (i = 0; i < ARRAY_SIZE(ctx->fb_map); i++) { + if (addr == ctx->fb_map[i].addr) { + *task = ctx->fb_map[i].task; + *vf = ctx->fb_map[i].vframe; + *icomp = ctx->fb_map[i].icomp; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "%s, task:%px, vf:%px, addr:%lx, icomp:%u\n", + __func__, task, vf, addr, *icomp); + + ctx->fb_map[i].task = NULL; + ctx->fb_map[i].vframe = NULL; + ctx->fb_map[i].addr = 0; + ctx->fb_map[i].icomp = 0; + break; + } + } + + aml_vcodec_ctx_unlock(ctx, flags); + + if (i >= ARRAY_SIZE(ctx->fb_map)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "%s, there is new addr:%lx.\n", + __func__, addr); + } +} + +static bool is_fb_mapped(struct aml_vcodec_ctx *ctx, ulong addr) +{ + int i; + ulong flags; + + flags = aml_vcodec_ctx_lock(ctx); + + for (i = 0; i < ARRAY_SIZE(ctx->fb_map); i++) { + if (addr == ctx->fb_map[i].addr) + break; + } + + aml_vcodec_ctx_unlock(ctx, flags); + + if (i >= ARRAY_SIZE(ctx->fb_map)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "%s, addr:%lx isn't output.\n", + __func__, addr); + return false; + } + + return true; +} + + static void post_frame_to_upper(struct aml_vcodec_ctx *ctx, + struct vdec_v4l2_buffer *fb) +{ + struct aml_video_dec_buf *dstbuf = + container_of(fb, struct aml_video_dec_buf, frame_buffer); + struct vb2_buffer *vb2_buf = &dstbuf->vb.vb2_buf; + struct vframe_s *vf = fb->vframe; + struct vb2_v4l2_buffer *vb2_v4l2 = NULL; + + vf->index_disp = ctx->index_disp; + ctx->index_disp++; + ctx->post_to_upper_done = false; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_OUTPUT, + "OUT_BUFF (%s, st:%d, seq:%d) vb:(%d, %px), vf:(%d, %px), ts:%lld, " + "Y:(%lx, %u) C/U:(%lx, %u) V:(%lx, %u)\n", + ctx->ada_ctx->frm_name, fb->status, vf->index_disp, + vb2_buf->index, vb2_buf, + vf->index & 0xff, vf, + vf->timestamp, + fb->m.mem[0].addr, fb->m.mem[0].size, + fb->m.mem[1].addr, fb->m.mem[1].size, + fb->m.mem[2].addr, fb->m.mem[2].size); + + vb2_v4l2 = container_of(vb2_buf, struct vb2_v4l2_buffer, vb2_buf); + + if (dstbuf->frame_buffer.num_planes == 1) { + vb2_set_plane_payload(vb2_buf, 0, fb->m.mem[0].bytes_used); + } else if (dstbuf->frame_buffer.num_planes == 2) { + vb2_set_plane_payload(vb2_buf, 0, fb->m.mem[0].bytes_used); + vb2_set_plane_payload(vb2_buf, 1, fb->m.mem[1].bytes_used); + } + vb2_buf->timestamp = vf->timestamp; + dstbuf->vb.flags |= vf->frame_type; + + if ((ctx->picinfo.field == V4L2_FIELD_INTERLACED) && (!ctx->vpp_is_need)) { + vb2_v4l2->field = V4L2_FIELD_INTERLACED; + } + + do { + unsigned int dw_mode = VDEC_DW_NO_AFBC; + struct file *fp; + char file_name[32] = {0}; + + if (!dump_capture_frame || ctx->is_drm_mode) + break; + if (vdec_if_get_param(ctx, GET_PARAM_DW_MODE, &dw_mode)) + break; + if (dw_mode == VDEC_DW_AFBC_ONLY) + break; + + snprintf(file_name, 32, "/data/dec_dump_%ux%u.raw", vf->width, vf->height); + + fp = filp_open(file_name, + O_CREAT | O_RDWR | O_LARGEFILE | O_APPEND, 0600); + + if (!IS_ERR(fp)) { + struct vb2_buffer *vb = vb2_buf; + + kernel_write(fp,vb2_plane_vaddr(vb, 0),vb->planes[0].length, 0); + if (dstbuf->frame_buffer.num_planes == 2) + kernel_write(fp,vb2_plane_vaddr(vb, 1), + vb->planes[1].length, 0); + pr_info("dump idx: %d %dx%d\n", dump_capture_frame, vf->width, vf->height); + dump_capture_frame--; + filp_close(fp, NULL); + } + } while(0); + + ATRACE_COUNTER("VC_OUT_VSINK-1.submit", vb2_buf->index); + ATRACE_COUNTER("V_ST_VSINK-input_buffering", vdec_frame_number(ctx->ada_ctx)); + + if (vf->flag & VFRAME_FLAG_EMPTY_FRAME_V4L) { + dstbuf->vb.flags = V4L2_BUF_FLAG_LAST; + if (dstbuf->frame_buffer.num_planes == 1) { + vb2_set_plane_payload(vb2_buf, 0, 0); + } else if (dstbuf->frame_buffer.num_planes == 2) { + vb2_set_plane_payload(vb2_buf, 0, 0); + vb2_set_plane_payload(vb2_buf, 1, 0); + } + ctx->has_receive_eos = true; + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "recevie a empty frame. idx: %d, state: %d\n", + vb2_buf->index, vb2_buf->state); + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "receive vbuf idx: %d, state: %d\n", + vb2_buf->index, vb2_buf->state); + + if (vf->flag & VFRAME_FLAG_EMPTY_FRAME_V4L) { + if (ctx->v4l_resolution_change) { + /* make the run to stanby until new buffs to enque. */ + ctx->v4l_codec_dpb_ready = false; + ctx->reset_flag = V4L_RESET_MODE_LIGHT; + ctx->vpp_cfg.res_chg = true; + + /* + * After all buffers containing decoded frames from + * before the resolution change point ready to be + * dequeued on the CAPTURE queue, the driver sends a + * V4L2_EVENT_SOURCE_CHANGE event for source change + * type V4L2_EVENT_SRC_CH_RESOLUTION, also the upper + * layer will get new information from cts->picinfo. + */ + aml_vdec_dispatch_event(ctx, V4L2_EVENT_SRC_CH_RESOLUTION); + } else + aml_vdec_dispatch_event(ctx, V4L2_EVENT_SEND_EOS); + } + + if (dstbuf->vb.vb2_buf.state == VB2_BUF_STATE_ACTIVE) { + /* binding vframe handle. */ + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) { + if (vf->canvas0_config[0].block_mode == CANVAS_BLKMODE_LINEAR) { + if ((ctx->output_pix_fmt != V4L2_PIX_FMT_H264) && + (ctx->output_pix_fmt != V4L2_PIX_FMT_MPEG1) && + (ctx->output_pix_fmt != V4L2_PIX_FMT_MPEG2) && + (ctx->output_pix_fmt != V4L2_PIX_FMT_MPEG4) && + (ctx->output_pix_fmt != V4L2_PIX_FMT_MJPEG)) { + vf->flag |= VFRAME_FLAG_VIDEO_LINEAR; + } + else { + if (fb->status == FB_ST_GE2D) + vf->flag |= VFRAME_FLAG_VIDEO_LINEAR; + } + } + } else { + if (vf->canvas0_config[0].block_mode == CANVAS_BLKMODE_LINEAR) + vf->flag |= VFRAME_FLAG_VIDEO_LINEAR; + } + + vf->omx_index = vf->index_disp; + dstbuf->privdata.vf = *vf; + + if (vb2_buf->memory == VB2_MEMORY_DMABUF) { + struct dma_buf * dma; + + dma = dstbuf->vb.vb2_buf.planes[0].dbuf; + if (dmabuf_is_uvm(dma)) { + /* only Y will contain vframe */ + comp_buf_set_vframe(ctx, vb2_buf, vf); + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "set vf(%px) into %dth buf\n", + vf, vb2_buf->index); + } + } + + fb_map_table_hold(ctx, vb2_buf, vf, fb->task, dstbuf->internal_index); + + v4l2_buff_done(&dstbuf->vb, VB2_BUF_STATE_DONE); + + fb->status = FB_ST_DISPLAY; + } + + mutex_lock(&ctx->state_lock); + if (ctx->state == AML_STATE_FLUSHING && + ctx->has_receive_eos) { + ctx->state = AML_STATE_FLUSHED; + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE, + "vcodec state (AML_STATE_FLUSHED)\n"); + } + mutex_unlock(&ctx->state_lock); + + if (ctx->post_to_upper_done == false) { + ctx->post_to_upper_done = true; + wake_up_interruptible(&ctx->post_done_wq); + } + + ctx->decoded_frame_cnt++; +} + +static void fill_capture_done_cb(void *v4l_ctx, void *fb_ctx) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)v4l_ctx; + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *)fb_ctx; + struct aml_video_dec_buf *aml_buff = + container_of(fb, struct aml_video_dec_buf, frame_buffer); + struct vb2_v4l2_buffer *vb = &aml_buff->vb; + + if (ctx->is_stream_off) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_INPUT, + "ignore buff idx: %d streamoff\n", fb->buf_idx); + return; + } + + ATRACE_COUNTER("VC_OUT_VSINK-0.receive", vb->vb2_buf.index); + + mutex_lock(&ctx->capture_buffer_lock); + kfifo_put(&ctx->capture_buffer, vb); + mutex_unlock(&ctx->capture_buffer_lock); + + aml_thread_post_task(ctx, AML_THREAD_CAPTURE); +} + +static void update_vdec_buf_plane(struct aml_vcodec_ctx *ctx, + struct vdec_v4l2_buffer *fb, + struct vb2_buffer *vb) +{ + int i; + char plane_n[3] = {'Y','U','V'}; + + fb->num_planes = vb->num_planes; + fb->buf_idx = vb->index; + + for (i = 0 ; i < vb->num_planes ; i++) { + fb->m.mem[i].addr = vb2_dma_contig_plane_dma_addr(vb, i); + fb->m.mem[i].dbuf = vb->planes[i].dbuf; + if (i == 0) { + //Y + if (vb->num_planes == 1) { + fb->m.mem[0].size = ctx->picinfo.y_len_sz + + ctx->picinfo.c_len_sz; + fb->m.mem[0].offset = ctx->picinfo.y_len_sz; + } else { + fb->m.mem[0].size = ctx->picinfo.y_len_sz; + fb->m.mem[0].offset = 0; + } + } else { + if (vb->num_planes == 2) { + //UV + fb->m.mem[1].size = ctx->picinfo.c_len_sz; + fb->m.mem[1].offset = ctx->picinfo.c_len_sz >> 1; + } else { + fb->m.mem[i].size = ctx->picinfo.c_len_sz >> 1; + fb->m.mem[i].offset = 0; + } + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "idx: %u, %c:(0x%lx, %d)\n", vb->index, + plane_n[i], fb->m.mem[i].addr, fb->m.mem[i].size); + } +} + +static bool fb_token_insert(struct aml_vcodec_ctx *ctx, + ulong *token) +{ + ulong vb_handle; + int i; + + for (i = 0; i < ARRAY_SIZE(ctx->token_table); i++) { + if (ctx->token_table[i] && + (ctx->token_table[i] == *token)) { + return true; + } + } + + if (!v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) + return false; + + vb_handle = (ulong)v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); + + for (i = 0; i < ARRAY_SIZE(ctx->token_table); i++) { + if (!ctx->token_table[i]) { + ctx->token_table[i] = vb_handle; + break; + } + } + + if (i >= ARRAY_SIZE(ctx->token_table)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "%s, table is full. token:%lx\n", + __func__, vb_handle); + return false; + } + + *token = vb_handle; + + return true; +} + +static void fb_token_remove(struct aml_vcodec_ctx *ctx, + ulong token) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ctx->token_table); i++) { + if (token == ctx->token_table[i]) { + ctx->token_table[i] = 0; + break; + } + } + + if (i >= ARRAY_SIZE(ctx->token_table)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "%s, remove token err, token:%lx.\n", + __func__, token); + } +} + +static void fb_token_clean(struct aml_vcodec_ctx *ctx) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ctx->token_table); i++) { + ctx->token_table[i] = 0; + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "%s done\n", __func__); +} + +static bool fb_buff_query(struct aml_fb_ops *fb, ulong *token) +{ + struct aml_vcodec_ctx *ctx = + container_of(fb, struct aml_vcodec_ctx, fb_ops); + struct vb2_queue * que = v4l2_m2m_get_dst_vq(ctx->m2m_ctx); + bool ret = false; + ulong flags; + + if (!que->streaming) + return false; + + flags = aml_vcodec_ctx_lock(ctx); + + ret = fb_token_insert(ctx, token); + + aml_vcodec_ctx_unlock(ctx, flags); + + return ret; +} + +static void aml_task_chain_remove(struct aml_vcodec_ctx *ctx) +{ + struct task_chain_s *task, *tmp; + + list_for_each_entry_safe(task, tmp, &ctx->task_chain_pool, node) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "remove task chain:%d, %px\n", task->id, task); + list_del(&task->node); + task_chain_clean(task); + task_chain_release(task); + } +} + +static struct task_ops_s *get_v4l_sink_ops(void); + +static void aml_creat_pipeline(struct aml_vcodec_ctx *ctx, + struct vdec_v4l2_buffer *fb, + u32 requester) +{ + struct task_chain_s *task = fb->task; + /* + * line 1: dec <==> vpp <==> v4l-sink, for P / P + DI.NR. + * line 2: dec <==> vpp, vpp <==> v4l-sink, for I / I + DI.NR. + * line 3: dec <==> v4l-sink, only for P. + * line 4: dec <==> ge2d, ge2d <==> v4l-sink, used for fmt convert. + * line 5: dec <==> ge2d, ge2d <==>vpp, vpp <==> v4l-sink. + * line 6: dec <==> ge2d, ge2d <==> vpp <==> v4l-sink. + */ + + switch (requester) { + case AML_FB_REQ_DEC: + if (ctx->ge2d) { + /* dec <==> ge2d. */ + task->attach(task, get_ge2d_ops(), ctx->ge2d); + } else if (ctx->vpp) { + if (ctx->vpp->is_prog) { + /* dec <==> vpp <==> v4l-sink. */ + task->attach(task, get_v4l_sink_ops(), ctx); + task->attach(task, get_vpp_ops(), ctx->vpp); + } else { + /* dec <==> vpp. */ + task->attach(task, get_vpp_ops(), ctx->vpp); + } + } else { + /* dec <==> v4l-sink. */ + task->attach(task, get_v4l_sink_ops(), ctx); + } + break; + + case AML_FB_REQ_GE2D: + if (ctx->vpp) { + if (ctx->vpp->is_prog) { + /* ge2d <==> vpp <==> v4l-sink. */ + task->attach(task, get_v4l_sink_ops(), ctx); + task->attach(task, get_vpp_ops(), ctx->vpp); + task->attach(task, get_ge2d_ops(), ctx->ge2d); + } else { + /* ge2d <==> vpp. */ + task->attach(task, get_vpp_ops(), ctx->vpp); + task->attach(task, get_ge2d_ops(), ctx->ge2d); + } + } else { + /* ge2d <==> v4l-sink. */ + task->attach(task, get_v4l_sink_ops(), ctx); + task->attach(task, get_ge2d_ops(), ctx->ge2d); + } + break; + + case AML_FB_REQ_VPP: + /* vpp <==> v4l-sink. */ + task->attach(task, get_v4l_sink_ops(), ctx); + task->attach(task, get_vpp_ops(), ctx->vpp); + break; + + default: + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "unsupport requester %x\n", requester); + } +} + +static int fb_buff_from_queue(struct aml_fb_ops *fb_ops, + ulong token, struct vdec_v4l2_buffer **out_fb, + u32 requester) +{ + struct aml_vcodec_ctx *ctx = + container_of(fb_ops, struct aml_vcodec_ctx, fb_ops); + struct aml_video_dec_buf *aml_buf = NULL; + struct vb2_v4l2_buffer *v4l_buf = NULL; + struct vdec_v4l2_buffer *fb = NULL; + u32 buf_status = 0; + ulong flags; + + flags = aml_vcodec_ctx_lock(ctx); + + if (ctx->is_stream_off) { + aml_vcodec_ctx_unlock(ctx, flags); + return -1; + } + + v4l_buf = (struct vb2_v4l2_buffer *) token; + if (!v4l_buf) { + aml_vcodec_ctx_unlock(ctx, flags); + return -1; + } + + aml_buf = container_of(v4l_buf, struct aml_video_dec_buf, vb); + + fb = &aml_buf->frame_buffer; + fb->buf_idx = v4l_buf->vb2_buf.index; + aml_buf->used = true; + ctx->buf_used_count++; + + if (requester == AML_FB_REQ_VPP) { + buf_status = V4L_CAP_BUFF_IN_VPP; + ctx->cap_pool.vpp++; + } else if (requester == AML_FB_REQ_DEC) { + buf_status = V4L_CAP_BUFF_IN_DEC; + ctx->cap_pool.dec++; + } else if (requester == AML_FB_REQ_GE2D) { + buf_status = V4L_CAP_BUFF_IN_GE2D; + ctx->cap_pool.ge2d++; + } + + ctx->cap_pool.seq[ctx->cap_pool.out++] = + (buf_status << 16 | fb->buf_idx); + + update_vdec_buf_plane(ctx, fb, &v4l_buf->vb2_buf); + + aml_creat_pipeline(ctx, fb, requester); + + fb_token_remove(ctx, token); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "vid:%d, task:%px, phy:%lx, state:%d, ready:%d, requester:%d\n", + fb->buf_idx, fb->task, fb->m.mem[0].addr, v4l_buf->vb2_buf.state, + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx), requester); + + ATRACE_COUNTER("VC_IN_VSINK-3.require", v4l_buf->vb2_buf.index); + + *out_fb = fb; + + aml_vcodec_ctx_unlock(ctx, flags); + + return 0; +} + +static struct task_ops_s v4l_sink_ops = { + .type = TASK_TYPE_V4L_SINK, + .fill_buffer = fill_capture_done_cb, +}; + +static struct task_ops_s *get_v4l_sink_ops(void) +{ + return &v4l_sink_ops; +} + +void aml_vdec_basic_information(struct aml_vcodec_ctx *ctx) +{ + struct aml_q_data *outq = NULL; + struct aml_q_data *capq = NULL; + struct vdec_pic_info pic; + + if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &pic)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "get pic info err\n"); + return; + } + + outq = aml_vdec_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); + capq = aml_vdec_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + + pr_info("\n==== Show Basic Information ==== \n"); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "Format : %s\n", + outq->fmt->name); + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "Color space: %s\n", + capq->fmt->name); + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "Scan type : %s\n", + (pic.field == V4L2_FIELD_NONE) ? + "Progressive" : "Interlaced"); + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "Resolution : visible(%dx%d), coded(%dx%d)\n", + pic.visible_width, pic.visible_height, + pic.coded_width, pic.coded_height); + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "Buffer num : dec:%d, vpp:%d, ge2d:%d, margin:%d, total:%d\n", + ctx->picinfo.dpb_frames, ctx->vpp_size, ctx->ge2d_size, + ctx->picinfo.dpb_margin, CTX_BUF_TOTAL(ctx)); + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "Config : dw:%d, drm:%d, byp:%d, lc:%d, nr:%d, ge2d:%x\n", + ctx->config.parm.dec.cfg.double_write_mode, + ctx->is_drm_mode, + ctx->vpp_cfg.is_bypass_p, + ctx->vpp_cfg.enable_local_buf, + ctx->vpp_cfg.enable_nr, + ctx->ge2d_cfg.mode); +} + +void aml_buffer_status(struct aml_vcodec_ctx *ctx) +{ + struct vb2_v4l2_buffer *vb = NULL; + struct aml_video_dec_buf *aml_buff = NULL; + struct vdec_v4l2_buffer *fb = NULL; + struct vb2_queue *q = NULL; + ulong flags; + int i; + + flags = aml_vcodec_ctx_lock(ctx); + + q = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + if (!q->streaming) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "can't achieve buffers status before start streaming.\n"); + } + + pr_info("\n==== Show Buffer Status ======== \n"); + for (i = 0; i < q->num_buffers; ++i) { + vb = to_vb2_v4l2_buffer(q->bufs[i]); + aml_buff = container_of(vb, struct aml_video_dec_buf, vb); + fb = &aml_buff->frame_buffer; + + /* print out task chain status. */ + task_chain_show(fb->task); + } + + aml_vcodec_ctx_unlock(ctx, flags); +} + +static void aml_check_dpb_ready(struct aml_vcodec_ctx *ctx) +{ + if (!ctx->v4l_codec_dpb_ready) { + /* + * make sure enough dst bufs for decoding. + */ + if ((ctx->dpb_size) && (ctx->cap_pool.in >= ctx->dpb_size)) + ctx->v4l_codec_dpb_ready = true; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "dpb: %d, vpp: %d, ready: %d, used: %d, dpb is ready: %s\n", + ctx->dpb_size, ctx->vpp_size, + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx), + ctx->cap_pool.out, ctx->v4l_codec_dpb_ready ? "yes" : "no"); + } +} + +static void reconfig_vpp_status(struct aml_vcodec_ctx *ctx) +{ + if (bypass_nr_flag && + !ctx->vpp_cfg.is_prog && + ((ctx->vpp_cfg.mode == VPP_MODE_NOISE_REDUC_LOCAL) || + (ctx->vpp_cfg.mode == VPP_MODE_NOISE_REDUC))) { + ctx->vpp_cfg.enable_nr = 0; + ctx->vpp_cfg.enable_local_buf = 0; + + ctx->vpp_cfg.mode = VPP_MODE_DI; + } +} + +static int is_vdec_ready(struct aml_vcodec_ctx *ctx) +{ + struct aml_vcodec_dev *dev = ctx->dev; + + if (!is_input_ready(ctx->ada_ctx)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "the decoder input has not ready.\n"); + v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx); + return 0; + } + + if (ctx->state == AML_STATE_PROBE) { + mutex_lock(&ctx->state_lock); + if (ctx->state == AML_STATE_PROBE) { + ctx->state = AML_STATE_READY; + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE, + "vcodec state (AML_STATE_READY)\n"); + } + mutex_unlock(&ctx->state_lock); + } + + mutex_lock(&ctx->state_lock); + if (ctx->state == AML_STATE_READY) { + if (ctx->m2m_ctx->out_q_ctx.q.streaming && + ctx->m2m_ctx->cap_q_ctx.q.streaming) { + ctx->state = AML_STATE_ACTIVE; + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE, + "vcodec state (AML_STATE_ACTIVE)\n"); + } + } + mutex_unlock(&ctx->state_lock); + + /* check dpb ready */ + //aml_check_dpb_ready(ctx); + + return 1; +} + +static bool is_enough_work_items(struct aml_vcodec_ctx *ctx) +{ + struct aml_vcodec_dev *dev = ctx->dev; + + if (vdec_frame_number(ctx->ada_ctx) >= WORK_ITEMS_MAX) { + v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx); + return false; + } + + return true; +} + +static void aml_wait_buf_ready(struct aml_vcodec_ctx *ctx) +{ + ulong expires; + + expires = jiffies + msecs_to_jiffies(1000); + while (!ctx->v4l_codec_dpb_ready) { + u32 ready_num = 0; + + if (time_after(jiffies, expires)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "the DPB state has not ready.\n"); + break; + } + + ready_num = v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx); + if ((ready_num + ctx->buf_used_count) >= CTX_BUF_TOTAL(ctx)) + ctx->v4l_codec_dpb_ready = true; + } +} + +void dmabuff_recycle_worker(struct work_struct *work) +{ + struct aml_vcodec_ctx *ctx = + container_of(work, struct aml_vcodec_ctx, dmabuff_recycle_work); + struct vb2_v4l2_buffer *vb = NULL; + struct aml_video_dec_buf *buf = NULL; + unsigned long flags; + + for (;;) { + spin_lock_irqsave(&ctx->dmabuff_recycle_lock, flags); + if (!kfifo_get(&ctx->dmabuff_recycle, &vb)) { + spin_unlock_irqrestore(&ctx->dmabuff_recycle_lock, flags); + break; + } + spin_unlock_irqrestore(&ctx->dmabuff_recycle_lock, flags); + + buf = container_of(vb, struct aml_video_dec_buf, vb); + + if (ctx->is_out_stream_off) + continue; + + if (wait_event_interruptible_timeout + (ctx->wq, buf->used == false, + msecs_to_jiffies(200)) == 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "wait recycle dma buff timeout.\n"); + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_INPUT, + "recycle buff idx: %d, vbuf: %lx\n", vb->vb2_buf.index, + (ulong)sg_dma_address(buf->out_sgt->sgl)); + + ATRACE_COUNTER("VO_OUT_VSINK-2.write_secure_end", vb->vb2_buf.index); + + if (vb->vb2_buf.state != VB2_BUF_STATE_ERROR) + v4l2_buff_done(vb, buf->error ? VB2_BUF_STATE_ERROR : + VB2_BUF_STATE_DONE); + } +} + +void aml_recycle_dma_buffers(struct aml_vcodec_ctx *ctx, u32 handle) +{ + struct aml_vcodec_dev *dev = ctx->dev; + struct vb2_v4l2_buffer *vb = NULL; + struct vb2_queue *q = NULL; + int index = handle & 0xf; + unsigned long flags; + + if (ctx->is_out_stream_off) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_INPUT, + "ignore buff idx: %d streamoff\n", index); + return; + } + + q = v4l2_m2m_get_vq(ctx->m2m_ctx, + V4L2_BUF_TYPE_VIDEO_OUTPUT); + + vb = to_vb2_v4l2_buffer(q->bufs[index]); + + spin_lock_irqsave(&ctx->dmabuff_recycle_lock, flags); + kfifo_put(&ctx->dmabuff_recycle, vb); + spin_unlock_irqrestore(&ctx->dmabuff_recycle_lock, flags); + + queue_work(dev->decode_workqueue, &ctx->dmabuff_recycle_work); +} + +static void aml_vdec_worker(struct work_struct *work) +{ + struct aml_vcodec_ctx *ctx = + container_of(work, struct aml_vcodec_ctx, decode_work); + struct aml_vcodec_dev *dev = ctx->dev; + struct aml_video_dec_buf *aml_buf; + struct vb2_v4l2_buffer *vb2_v4l2; + struct vb2_buffer *vb; + struct aml_vcodec_mem buf; + bool res_chg = false; + int ret; + + if (ctx->state < AML_STATE_INIT || + ctx->state > AML_STATE_FLUSHED) { + v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx); + goto out; + } + + if (!is_vdec_ready(ctx)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "the decoder has not ready.\n"); + goto out; + } + + vb2_v4l2 = v4l2_m2m_next_src_buf(ctx->m2m_ctx); + if (vb2_v4l2 == NULL) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "src_buf empty.\n"); + goto out; + } + + vb = (struct vb2_buffer *)vb2_v4l2; + + /*this case for google, but some frames are droped on ffmpeg, so disabled temp.*/ + if (0 && !is_enough_work_items(ctx)) + goto out; + + aml_buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb); + if (aml_buf->lastframe) { + ulong expires; + + /*the empty data use to flushed the decoder.*/ + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "Got empty flush input buffer.\n"); + + /* + * when inputs a small amount of src buff, then soon to + * switch state FLUSHING, must to wait the DBP to be ready. + * (!ctx->v4l_codec_dpb_ready) change to only need one buf + * for run ready in new version. + */ + expires = jiffies + msecs_to_jiffies(5000); + while ((vdec_frame_number(ctx->ada_ctx) > 0) && + (ctx->cap_pool.in < 1)) { + if (time_after(jiffies, expires)) { + aml_vdec_flush_decoder(ctx); + v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx); + aml_vdec_dispatch_event(ctx, V4L2_EVENT_REQUEST_EXIT); + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "capture buffer waiting timeout.\n"); + goto out; + } + usleep_range(5000, 5500); + } + + mutex_lock(&ctx->state_lock); + if (ctx->state == AML_STATE_ACTIVE) { + ctx->state = AML_STATE_FLUSHING;// prepare flushing + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE, + "vcodec state (AML_STATE_FLUSHING-LASTFRM)\n"); + } + mutex_unlock(&ctx->state_lock); + + v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx); + + /* sets eos data for vdec input. */ + aml_vdec_flush_decoder(ctx); + + goto out; + } + + buf.index = vb->index; + buf.vaddr = vb2_plane_vaddr(vb, 0); + buf.addr = sg_dma_address(aml_buf->out_sgt->sgl); + buf.size = vb->planes[0].bytesused; + buf.model = vb->memory; + buf.timestamp = vb->timestamp; + buf.meta_ptr = (ulong)aml_buf->meta_data; + + if (!buf.vaddr && !buf.addr) { + v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx); + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "id=%d src_addr is NULL.\n", vb->index); + goto out; + } + + aml_buf->used = true; + + /* v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "size: 0x%zx, crc: 0x%x\n", + buf.size, crc32(0, buf.va, buf.size));*/ + + /* pts = (time / 10e6) * (90k / fps) */ + /*v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "timestamp: 0x%llx\n", src_buf->timestamp);*/ + + if (ctx->is_drm_mode && + (buf.model == VB2_MEMORY_DMABUF)) { + ATRACE_COUNTER("VO_IN_VSINK-2.write_secure", buf.size); + } else { + ATRACE_COUNTER("VO_IN_VSINK-2.write", buf.size); + } + + ATRACE_COUNTER("V_ST_VSINK-input_buffering", vdec_frame_number(ctx->ada_ctx)); + + ret = vdec_if_decode(ctx, &buf, &res_chg); + if (ret > 0) { + /* + * we only return src buffer with VB2_BUF_STATE_DONE + * when decode success without resolution change. + */ + aml_buf->used = false; + v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + + if (ctx->is_drm_mode && + (buf.model == VB2_MEMORY_DMABUF)) { + wake_up_interruptible(&ctx->wq); + } else { + ATRACE_COUNTER("VO_OUT_VSINK-0.wrtie_end", buf.size); + v4l2_buff_done(&aml_buf->vb, + VB2_BUF_STATE_DONE); + } + } else if (ret && ret != -EAGAIN) { + aml_buf->used = false; + v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + + if (ctx->is_drm_mode && + (buf.model == VB2_MEMORY_DMABUF)) { + wake_up_interruptible(&ctx->wq); + } else { + ATRACE_COUNTER("VO_OUT_VSINK-3.write_error", buf.size); + v4l2_buff_done(&aml_buf->vb, + VB2_BUF_STATE_ERROR); + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "error processing src data. %d.\n", ret); + } else if (res_chg) { + /* wait the DPB state to be ready. */ + aml_wait_buf_ready(ctx); + + aml_buf->used = false; + aml_vdec_pic_info_update(ctx); + /* + * On encountering a resolution change in the stream. + * The driver must first process and decode all + * remaining buffers from before the resolution change + * point, so call flush decode here + */ + mutex_lock(&ctx->state_lock); + if (ctx->state == AML_STATE_ACTIVE) { + ctx->state = AML_STATE_FLUSHING;// prepare flushing + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE, + "vcodec state (AML_STATE_FLUSHING-RESCHG)\n"); + } + mutex_unlock(&ctx->state_lock); + + ctx->v4l_resolution_change = true; + while (ctx->m2m_ctx->job_flags & TRANS_RUNNING) { + v4l2_m2m_job_pause(dev->m2m_dev_dec, ctx->m2m_ctx); + } + + aml_vdec_flush_decoder(ctx); + + goto out; + } else { + ATRACE_COUNTER("VO_OUT_VSINK-1.write_again", buf.size); + /* decoder is lack of resource, retry after short delay */ + if (vdec_get_instance_num() < 2) + usleep_range(2000, 4000); + } + + v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx); +out: + return; +} + +static void aml_vdec_reset(struct aml_vcodec_ctx *ctx) +{ + if (ctx->state == AML_STATE_ABORT) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "the decoder will be exited.\n"); + goto out; + } + + if (aml_codec_reset(ctx->ada_ctx, &ctx->reset_flag)) { + ctx->state = AML_STATE_ABORT; + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE, + "vcodec state (AML_STATE_ABORT).\n"); + } +out: + complete(&ctx->comp); + return; +} + +void wait_vcodec_ending(struct aml_vcodec_ctx *ctx) +{ + /* disable queue output item to worker. */ + ctx->output_thread_ready = false; + ctx->is_stream_off = true; + + /* flush output buffer worker. */ + cancel_work_sync(&ctx->decode_work); + cancel_work_sync(&ctx->dmabuff_recycle_work); + + /* clean output cache and decoder status . */ + if (ctx->state > AML_STATE_INIT) + aml_vdec_reset(ctx); + + /* pause the job and clean trans status. */ + while (ctx->m2m_ctx->job_flags & TRANS_RUNNING) { + v4l2_m2m_job_pause(ctx->dev->m2m_dev_dec, ctx->m2m_ctx); + } + + ctx->v4l_codec_dpb_ready = false; +} + +void aml_thread_capture_worker(struct aml_vcodec_ctx *ctx) +{ + struct vb2_v4l2_buffer *vb = NULL; + struct aml_video_dec_buf *aml_buff = NULL; + struct vdec_v4l2_buffer *fb = NULL; + + for (;;) { + mutex_lock(&ctx->capture_buffer_lock); + if (!kfifo_get(&ctx->capture_buffer, &vb)) { + mutex_unlock(&ctx->capture_buffer_lock); + break; + } + mutex_unlock(&ctx->capture_buffer_lock); + + aml_buff = container_of(vb, struct aml_video_dec_buf, vb); + fb = &aml_buff->frame_buffer; + + if (ctx->is_stream_off) + continue; + + post_frame_to_upper(ctx, fb); + } +} +EXPORT_SYMBOL_GPL(aml_thread_capture_worker); + +static int vdec_capture_thread(void *data) +{ + struct aml_vdec_thread *thread = + (struct aml_vdec_thread *) data; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *) thread->priv; + + for (;;) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "%s, state: %d\n", __func__, ctx->state); + + if (down_interruptible(&thread->sem)) + break; + + if (thread->stop) + break; + + /* handle event. */ + thread->func(ctx); + } + + while (!kthread_should_stop()) { + usleep_range(1000, 2000); + } + + return 0; +} + +void aml_thread_post_task(struct aml_vcodec_ctx *ctx, + enum aml_thread_type type) +{ + struct aml_vdec_thread *thread = NULL; + ulong flags; + + spin_lock_irqsave(&ctx->tsplock, flags); + list_for_each_entry(thread, &ctx->vdec_thread_list, node) { + if (thread->task == NULL) + continue; + + if (thread->type == type) + up(&thread->sem); + } + spin_unlock_irqrestore(&ctx->tsplock, flags); +} +EXPORT_SYMBOL_GPL(aml_thread_post_task); + +int aml_thread_start(struct aml_vcodec_ctx *ctx, aml_thread_func func, + enum aml_thread_type type, const char *thread_name) +{ + struct aml_vdec_thread *thread; + struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; + int ret = 0; + + thread = kzalloc(sizeof(*thread), GFP_KERNEL); + if (thread == NULL) + return -ENOMEM; + + thread->type = type; + thread->func = func; + thread->priv = ctx; + sema_init(&thread->sem, 0); + + thread->task = kthread_run(vdec_capture_thread, thread, "aml-%s-%d", thread_name, ctx->id); + if (IS_ERR(thread->task)) { + ret = PTR_ERR(thread->task); + thread->task = NULL; + goto err; + } + sched_setscheduler_nocheck(thread->task, SCHED_FIFO, ¶m); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "%s, policy is:%d priority is:%d\n", + __func__, thread->task->policy, thread->task->rt_priority); + + list_add(&thread->node, &ctx->vdec_thread_list); + + return 0; + +err: + kfree(thread); + + return ret; +} +EXPORT_SYMBOL_GPL(aml_thread_start); + +void aml_thread_stop(struct aml_vcodec_ctx *ctx) +{ + struct aml_vdec_thread *thread = NULL; + ulong flags; + + while (!list_empty(&ctx->vdec_thread_list)) { + thread = list_entry(ctx->vdec_thread_list.next, + struct aml_vdec_thread, node); + spin_lock_irqsave(&ctx->tsplock, flags); + list_del(&thread->node); + spin_unlock_irqrestore(&ctx->tsplock, flags); + + thread->stop = true; + up(&thread->sem); + kthread_stop(thread->task); + thread->task = NULL; + kfree(thread); + } +} +EXPORT_SYMBOL_GPL(aml_thread_stop); + +static int vidioc_try_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *cmd) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, cmd: %u\n", __func__, cmd->cmd); + + switch (cmd->cmd) { + case V4L2_DEC_CMD_STOP: + case V4L2_DEC_CMD_START: + if (cmd->cmd == V4L2_DEC_CMD_START) { + if (cmd->start.speed == ~0) + cmd->start.speed = 0; + if (cmd->start.format == ~0) + cmd->start.format = 0; + } + + if (cmd->flags == ~0) + cmd->flags = 0; + + if ((cmd->flags != 0) && (cmd->flags != ~0)) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "cmd->flags=%u\n", cmd->flags); + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static int vidioc_decoder_cmd(struct file *file, void *priv, + struct v4l2_decoder_cmd *cmd) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + struct vb2_queue *src_vq, *dst_vq; + int ret; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, cmd: %u\n", __func__, cmd->cmd); + + ret = vidioc_try_decoder_cmd(file, priv, cmd); + if (ret) + return ret; + + switch (cmd->cmd) { + case V4L2_DEC_CMD_STOP: + src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + if (!vb2_is_streaming(src_vq)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Output stream is off. No need to flush.\n"); + return 0; + } + + dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, + multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : + V4L2_BUF_TYPE_VIDEO_CAPTURE); + if (!vb2_is_streaming(dst_vq)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Capture stream is off. No need to flush.\n"); + return 0; + } + + /* flush pipeline */ + v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf->vb); + v4l2_m2m_try_schedule(ctx->m2m_ctx);//pay attention + ctx->receive_cmd_stop = true; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "%s, receive cmd stop and prepare flush pipeline.\n", __func__); + break; + + case V4L2_DEC_CMD_START: + dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, + multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : + V4L2_BUF_TYPE_VIDEO_CAPTURE); + vb2_clear_last_buffer_dequeued(dst_vq);//pay attention + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "%s, receive cmd start.\n", __func__); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void aml_wait_resource(struct aml_vcodec_ctx *ctx) +{ + ulong expires = jiffies + msecs_to_jiffies(1000); + + while (atomic_read(&ctx->dev->vpp_count) >= max_di_instance) { + if (time_after(jiffies, expires)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "wait resource timeout.\n"); + break; + } + usleep_range(2000, 4000); + } +} + +static int vidioc_decoder_streamon(struct file *file, void *priv, + enum v4l2_buf_type i) +{ + struct v4l2_fh *fh = file->private_data; + struct aml_vcodec_ctx *ctx = fh_to_ctx(fh); + struct vb2_queue *q; + + q = v4l2_m2m_get_vq(fh->m2m_ctx, i); + if (!V4L2_TYPE_IS_OUTPUT(q->type) && + ctx->is_stream_off) { + if (ctx->vpp_is_need) { + int ret; + + if (ctx->vpp_cfg.fmt == 0) + ctx->vpp_cfg.fmt = ctx->cap_pix_fmt; + + if (ctx->vpp == NULL) + aml_wait_resource(ctx); + + if ((atomic_read(&ctx->dev->vpp_count) < max_di_instance) || + (ctx->vpp != NULL)) { + if (ctx->vpp && ctx->vpp_cfg.is_vpp_reset && + (ctx->vpp->is_prog == ctx->vpp_cfg.is_prog) && + (ctx->vpp->is_bypass_p == ctx->vpp_cfg.is_bypass_p) && + (ctx->vpp->work_mode == ctx->vpp_cfg.mode)) { + aml_v4l2_vpp_reset(ctx->vpp); + } else { + if (ctx->vpp) { + atomic_dec(&ctx->dev->vpp_count); + aml_v4l2_vpp_destroy(ctx->vpp); + ctx->vpp = NULL; + } + + ret = aml_v4l2_vpp_init(ctx, &ctx->vpp_cfg, &ctx->vpp); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "vpp_wrapper init err:%d vpp_cfg.fmt: %d\n", + ret, ctx->vpp_cfg.fmt); + return ret; + } + + atomic_inc(&ctx->dev->vpp_count); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "vpp_wrapper instance count: %d\n", + atomic_read(&ctx->dev->vpp_count)); + } + } else { + ctx->vpp_cfg.enable_local_buf = 0; + ctx->vpp_cfg.enable_nr = 0; + ctx->picinfo.dpb_margin += ctx->vpp_size; + ctx->dpb_size = ctx->picinfo.dpb_margin + ctx->picinfo.dpb_frames; + ctx->vpp_size = 0; + vdec_if_set_param(ctx, SET_PARAM_PIC_INFO, &ctx->picinfo); + ctx->vpp_is_need = false; + } + ctx->vpp_cfg.is_vpp_reset = false; + } else { + if (ctx->vpp) { + atomic_dec(&ctx->dev->vpp_count); + aml_v4l2_vpp_destroy(ctx->vpp); + ctx->vpp = NULL; + } + } + + if (ctx->ge2d_is_need) { + int ret; + + if (ctx->ge2d) { + aml_v4l2_ge2d_destroy(ctx->ge2d); + ctx->ge2d = NULL; + } + + ret = aml_v4l2_ge2d_init(ctx, &ctx->ge2d_cfg, &ctx->ge2d); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "ge2d_wrapper init err:%d\n", ret); + return ret; + } + } + + ctx->is_stream_off = false; + } else + ctx->is_out_stream_off = false; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %d\n", __func__, q->type); + + return v4l2_m2m_ioctl_streamon(file, priv, i); +} + +static int vidioc_decoder_streamoff(struct file *file, void *priv, + enum v4l2_buf_type i) +{ + struct v4l2_fh *fh = file->private_data; + struct aml_vcodec_ctx *ctx = fh_to_ctx(fh); + struct vb2_queue *q; + ulong flags; + + q = v4l2_m2m_get_vq(fh->m2m_ctx, i); + + flags = aml_vcodec_ctx_lock(ctx); + + if (V4L2_TYPE_IS_OUTPUT(q->type)) + ctx->is_out_stream_off = true; + else + ctx->is_stream_off = true; + + aml_vcodec_ctx_unlock(ctx, flags); + + if (!V4L2_TYPE_IS_OUTPUT(q->type)) { + if (ctx->vpp) { + reconfig_vpp_status(ctx); + } + } else { + ctx->index_disp = 0; + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %d\n", __func__, q->type); + + return v4l2_m2m_ioctl_streamoff(file, priv, i); +} + +static int vidioc_decoder_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *rb) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + struct v4l2_fh *fh = file->private_data; + struct vb2_queue *q; + + q = v4l2_m2m_get_vq(fh->m2m_ctx, rb->type); + + if (!rb->count) { + if (!V4L2_TYPE_IS_OUTPUT(rb->type)) { + if (wait_event_interruptible_timeout + (ctx->post_done_wq, ctx->post_to_upper_done == true, + msecs_to_jiffies(200)) == 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "wait post frame to upper finish timeout.\n"); + } + } + vb2_queue_release(q); + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %d, count: %d\n", + __func__, q->type, rb->count); + + if (!V4L2_TYPE_IS_OUTPUT(rb->type)) { + /* driver needs match v4l buffer number with total size*/ + if (rb->count > CTX_BUF_TOTAL(ctx)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "reqbufs (st:%d) %d -> %d\n", + ctx->state, rb->count, CTX_BUF_TOTAL(ctx)); + ctx->picinfo.dpb_margin += (rb->count - CTX_BUF_TOTAL(ctx)); + ctx->dpb_size = ctx->picinfo.dpb_frames + ctx->picinfo.dpb_margin; + vdec_if_set_param(ctx, SET_PARAM_PIC_INFO, &ctx->picinfo); + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s buf updated, dec: %d (%d + %d), vpp %d\n", + __func__, + ctx->dpb_size, + ctx->picinfo.dpb_frames, + ctx->picinfo.dpb_margin, + ctx->vpp_size); + //rb->count = ctx->dpb_size; + } + } else { + ctx->output_dma_mode = + (rb->memory == VB2_MEMORY_DMABUF) ? 1 : 0; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_INPUT, + "output buffer memory mode is %d\n", rb->memory); + } + + return v4l2_m2m_ioctl_reqbufs(file, priv, rb); +} + +static int vidioc_vdec_querybuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %d\n", __func__, buf->type); + + return v4l2_m2m_ioctl_querybuf(file, priv, buf); +} + +static int vidioc_vdec_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *eb) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %d\n", __func__, eb->type); + + return v4l2_m2m_ioctl_expbuf(file, priv, eb); +} + +void aml_vcodec_dec_release(struct aml_vcodec_ctx *ctx) +{ + ulong flags; + + if (kref_read(&ctx->box_ref)) + kref_put(&ctx->box_ref, box_release); + + flags = aml_vcodec_ctx_lock(ctx); + ctx->state = AML_STATE_ABORT; + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE, + "vcodec state (AML_STATE_ABORT)\n"); + aml_vcodec_ctx_unlock(ctx, flags); + + vdec_if_deinit(ctx); +} + +void aml_vcodec_dec_set_default_params(struct aml_vcodec_ctx *ctx) +{ + struct aml_q_data *q_data; + + ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex; + ctx->fh.m2m_ctx = ctx->m2m_ctx; + ctx->fh.ctrl_handler = &ctx->ctrl_hdl; + INIT_WORK(&ctx->decode_work, aml_vdec_worker); + ctx->colorspace = V4L2_COLORSPACE_REC709; + ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + ctx->quantization = V4L2_QUANTIZATION_DEFAULT; + ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT; + ctx->dev->dec_capability = 0;//VCODEC_CAPABILITY_4K_DISABLED;//disable 4k + + q_data = &ctx->q_data[AML_Q_DATA_SRC]; + memset(q_data, 0, sizeof(struct aml_q_data)); + q_data->visible_width = DFT_CFG_WIDTH; + q_data->visible_height = DFT_CFG_HEIGHT; + q_data->coded_width = DFT_CFG_WIDTH; + q_data->coded_height = DFT_CFG_HEIGHT; + q_data->fmt = &aml_video_formats[OUT_FMT_IDX]; + q_data->field = V4L2_FIELD_NONE; + + q_data->sizeimage[0] = (1024 * 1024);//DFT_CFG_WIDTH * DFT_CFG_HEIGHT; //1m + q_data->bytesperline[0] = 0; + + q_data = &ctx->q_data[AML_Q_DATA_DST]; + memset(q_data, 0, sizeof(struct aml_q_data)); + q_data->visible_width = DFT_CFG_WIDTH; + q_data->visible_height = DFT_CFG_HEIGHT; + q_data->coded_width = DFT_CFG_WIDTH; + q_data->coded_height = DFT_CFG_HEIGHT; + q_data->fmt = &aml_video_formats[CAP_FMT_IDX]; + if (support_format_I420) + q_data->fmt = &aml_video_formats[CAP_FMT_I420_IDX]; + + q_data->field = V4L2_FIELD_NONE; + + v4l_bound_align_image(&q_data->coded_width, + AML_VDEC_MIN_W, + AML_VDEC_MAX_W, 4, + &q_data->coded_height, + AML_VDEC_MIN_H, + AML_VDEC_MAX_H, 5, 6); + + q_data->sizeimage[0] = q_data->coded_width * q_data->coded_height; + q_data->bytesperline[0] = q_data->coded_width; + q_data->sizeimage[1] = q_data->sizeimage[0] / 2; + q_data->bytesperline[1] = q_data->coded_width; + ctx->reset_flag = V4L_RESET_MODE_NORMAL; + + ctx->fb_ops.query = fb_buff_query; + ctx->fb_ops.alloc = fb_buff_from_queue; + + ctx->state = AML_STATE_IDLE; + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE, + "vcodec state (AML_STATE_IDLE)\n"); +} + +static int vidioc_vdec_qbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + int ret; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %d\n", __func__, buf->type); + + if (ctx->state == AML_STATE_ABORT) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Call on QBUF after unrecoverable error, type = %s\n", + V4L2_TYPE_IS_OUTPUT(buf->type) ? "OUT" : "IN"); + return -EIO; + } + + ret = v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); + + if (V4L2_TYPE_IS_OUTPUT(buf->type)) { + if (V4L2_TYPE_IS_MULTIPLANAR(buf->type)) { + if (ret == -EAGAIN) + ATRACE_COUNTER("VO_IN_VSINK-1.que_again", buf->m.planes[0].bytesused); + else + ATRACE_COUNTER("VO_IN_VSINK-0.que", buf->m.planes[0].bytesused); + } else { + if (ret == -EAGAIN) + ATRACE_COUNTER("VO_IN_VSINK-1.que_again", buf->length); + else + ATRACE_COUNTER("VO_IN_VSINK-0.que", buf->length); + } + } else { + if (ret == -EAGAIN) + ATRACE_COUNTER("VC_IN_VSINK-1.que_again", buf->index); + else + ATRACE_COUNTER("VC_IN_VSINK-0.que", buf->index); + } + + return ret; +} + +static int vidioc_vdec_dqbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + int ret; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %d\n", __func__, buf->type); + + if (ctx->state == AML_STATE_ABORT) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Call on DQBUF after unrecoverable error, type = %s\n", + V4L2_TYPE_IS_OUTPUT(buf->type) ? "OUT" : "IN"); + if (!V4L2_TYPE_IS_OUTPUT(buf->type)) + return -EIO; + } + + ret = v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); + if (!ret && !V4L2_TYPE_IS_OUTPUT(buf->type)) { + struct vb2_queue *vq; + struct vb2_v4l2_buffer *vb2_v4l2 = NULL; + struct aml_video_dec_buf *aml_buf = NULL; + struct file *file = NULL; + + vq = v4l2_m2m_get_vq(ctx->m2m_ctx, buf->type); + vb2_v4l2 = to_vb2_v4l2_buffer(vq->bufs[buf->index]); + aml_buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb); + aml_buf->privdata.vb_handle = (ulong) aml_buf; + aml_buf->privdata.v4l_dec_ctx = (ulong) ctx; + + file = fget(vb2_v4l2->private); + if (file && is_v4l2_buf_file(file)) { + dmabuf_fd_install_data(vb2_v4l2->private, + (void*)&aml_buf->privdata, + sizeof(struct file_private_data)); + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "disp: %d, vf: %lx\n", + aml_buf->privdata.vf.index_disp, + (ulong) v4l_get_vf_handle(vb2_v4l2->private)); + fput(file); + } + } + + if (V4L2_TYPE_IS_OUTPUT(buf->type)) { + if (V4L2_TYPE_IS_MULTIPLANAR(buf->type)) { + if (ret == -EAGAIN) + ATRACE_COUNTER("VO_OUT_VSINK-5.deque_again", buf->m.planes[0].bytesused); + else + ATRACE_COUNTER("VO_OUT_VSINK-4.deque", buf->m.planes[0].bytesused); + } else { + if (ret == -EAGAIN) + ATRACE_COUNTER("VO_OUT_VSINK-5.deque_again", buf->length); + else + ATRACE_COUNTER("VO_OUT_VSINK-4.deque", buf->length); + } + } else { + if (ret == -EAGAIN) + ATRACE_COUNTER("VC_OUT_VSINK-3.deque_again", buf->index); + else + ATRACE_COUNTER("VC_OUT_VSINK-2.deque", buf->index); + } + + return ret; +} + +static int vidioc_vdec_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + struct video_device *vfd_dec = video_devdata(file); + + strlcpy(cap->driver, AML_VCODEC_DEC_NAME, sizeof(cap->driver)); + strlcpy(cap->bus_info, AML_PLATFORM_STR, sizeof(cap->bus_info)); + strlcpy(cap->card, AML_PLATFORM_STR, sizeof(cap->card)); + cap->device_caps = vfd_dec->device_caps; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, %s\n", __func__, cap->card); + + return 0; +} + +static int vidioc_vdec_subscribe_evt(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(fh); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %d\n", __func__, sub->type); + + switch (sub->type) { + case V4L2_EVENT_EOS: + return v4l2_event_subscribe(fh, sub, 2, NULL); + case V4L2_EVENT_SOURCE_CHANGE: + return v4l2_src_change_event_subscribe(fh, sub); + default: + return v4l2_ctrl_subscribe_event(fh, sub); + } +} + +static int vidioc_vdec_event_unsubscribe(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(fh); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, type: %d\n", + __func__, sub->type); + + return v4l2_event_unsubscribe(fh, sub); +} + +static int vidioc_try_fmt(struct v4l2_format *f, struct aml_video_fmt *fmt) +{ + int i; + struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; + struct v4l2_pix_format *pix = &f->fmt.pix; + + if (V4L2_TYPE_IS_MULTIPLANAR(f->type)) { + if (V4L2_TYPE_IS_OUTPUT(f->type)) { + pix_mp->num_planes = 1; + pix_mp->plane_fmt[0].bytesperline = 0; + + if ((pix_mp->pixelformat != V4L2_PIX_FMT_MPEG2) && + (pix_mp->pixelformat != V4L2_PIX_FMT_H264) && + (pix_mp->pixelformat != V4L2_PIX_FMT_MPEG1)) { + pix_mp->field = V4L2_FIELD_NONE; + } else if (pix_mp->field != V4L2_FIELD_NONE) { + if (pix_mp->field == V4L2_FIELD_ANY) + pix_mp->field = V4L2_FIELD_NONE; + + pr_info("%s, field: %u, fmt: %x\n", + __func__, pix_mp->field, + pix_mp->pixelformat); + } + } else { + if (pix_mp->field != V4L2_FIELD_INTERLACED) + pix_mp->field = V4L2_FIELD_NONE; + pix_mp->height = clamp(pix_mp->height, + AML_VDEC_MIN_H, + AML_VDEC_MAX_H); + pix_mp->width = clamp(pix_mp->width, + AML_VDEC_MIN_W, + AML_VDEC_MAX_W); + + pix_mp->num_planes = fmt->num_planes; + + pix_mp->plane_fmt[0].bytesperline = pix_mp->width; + pix_mp->plane_fmt[0].sizeimage = + pix_mp->width * pix_mp->height; + + pix_mp->plane_fmt[1].bytesperline = pix_mp->width; + pix_mp->plane_fmt[1].sizeimage = + pix_mp->width * pix_mp->height / 2; + } + + for (i = 0; i < pix_mp->num_planes; i++) { + memset(&(pix_mp->plane_fmt[i].reserved[0]), 0x0, + sizeof(pix_mp->plane_fmt[0].reserved)); + } + memset(&pix_mp->reserved, 0x0, sizeof(pix_mp->reserved)); + + pix_mp->flags = 0; + } else { + if (V4L2_TYPE_IS_OUTPUT(f->type)) { + pix->bytesperline = 0; + if ((pix->pixelformat != V4L2_PIX_FMT_MPEG2) && + (pix->pixelformat != V4L2_PIX_FMT_H264) && + (pix->pixelformat != V4L2_PIX_FMT_MPEG1)) { + pix->field = V4L2_FIELD_NONE; + } else if (pix->field != V4L2_FIELD_NONE) { + if (pix->field == V4L2_FIELD_ANY) + pix->field = V4L2_FIELD_NONE; + + pr_info("%s, field: %u, fmt: %x\n", + __func__, pix->field, + pix->pixelformat); + } + } else { + if (pix->field != V4L2_FIELD_INTERLACED) + pix->field = V4L2_FIELD_NONE; + + pix->height = clamp(pix->height, + AML_VDEC_MIN_H, + AML_VDEC_MAX_H); + pix->width = clamp(pix->width, + AML_VDEC_MIN_W, + AML_VDEC_MAX_W); + + pix->bytesperline = pix->width; + pix->sizeimage = pix->width * pix->height; + } + pix->flags = 0; + } + + return 0; +} + +static int vidioc_try_fmt_vid_cap_out(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; + struct v4l2_pix_format *pix = &f->fmt.pix; + struct aml_q_data *q_data = NULL; + struct aml_video_fmt *fmt = NULL; + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + struct vb2_queue *dst_vq; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %u, planes: %u, fmt: %x\n", + __func__, f->type, + V4L2_TYPE_IS_MULTIPLANAR(f->type) ? + f->fmt.pix_mp.num_planes : 1, + f->fmt.pix_mp.pixelformat); + + dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + if (!dst_vq) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "no vb2 queue for type=%d\n", V4L2_BUF_TYPE_VIDEO_CAPTURE); + return -EINVAL; + } + + if (!V4L2_TYPE_IS_MULTIPLANAR(f->type) && dst_vq->is_multiplanar) + return -EINVAL; + + fmt = aml_vdec_find_format(f); + if (!fmt) { + if (V4L2_TYPE_IS_OUTPUT(f->type)) + f->fmt.pix.pixelformat = aml_video_formats[OUT_FMT_IDX].fourcc; + else + f->fmt.pix.pixelformat = aml_video_formats[CAP_FMT_IDX].fourcc; + fmt = aml_vdec_find_format(f); + } + + vidioc_try_fmt(f, fmt); + + q_data = aml_vdec_get_q_data(ctx, f->type); + if (!q_data) + return -EINVAL; + + if (ctx->state >= AML_STATE_PROBE) + update_ctx_dimension(ctx, f->type); + copy_v4l2_format_dimention(pix_mp, pix, q_data, f->type); + + if (!V4L2_TYPE_IS_OUTPUT(f->type)) + return 0; + + if (V4L2_TYPE_IS_MULTIPLANAR(f->type)) { + if (pix_mp->plane_fmt[0].sizeimage == 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "sizeimage of output format must be given\n"); + return -EINVAL; + } + } else { + if (pix->sizeimage == 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "sizeimage of output format must be given\n"); + return -EINVAL; + } + } + + return 0; +} + +static int vidioc_vdec_g_selection(struct file *file, void *priv, + struct v4l2_selection *s) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + struct aml_q_data *q_data; + int ratio = 1; + + if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && + (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) + return -EINVAL; + + if (ctx->internal_dw_scale) { + if (ctx->state >= AML_STATE_PROBE) { + unsigned int dw_mode = VDEC_DW_NO_AFBC; + if (vdec_if_get_param(ctx, GET_PARAM_DW_MODE, &dw_mode)) + return -EBUSY; + ratio = get_double_write_ratio(dw_mode); + } + } + + q_data = &ctx->q_data[AML_Q_DATA_DST]; + + switch (s->target) { + case V4L2_SEL_TGT_COMPOSE_DEFAULT: + case V4L2_SEL_TGT_COMPOSE: + s->r.left = 0; + s->r.top = 0; + s->r.width = ctx->picinfo.visible_width / ratio; + s->r.height = ctx->picinfo.visible_height / ratio; + break; + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + s->r.left = 0; + s->r.top = 0; + s->r.width = ctx->picinfo.coded_width / ratio; + s->r.height = ctx->picinfo.coded_height / ratio; + break; + default: + return -EINVAL; + } + + if (ctx->state < AML_STATE_PROBE) { + /* set to default value if header info not ready yet*/ + s->r.left = 0; + s->r.top = 0; + s->r.width = q_data->visible_width; + s->r.height = q_data->visible_height; + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, type: %d\n", + __func__, s->type); + + return 0; +} + +static int vidioc_vdec_s_selection(struct file *file, void *priv, + struct v4l2_selection *s) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + int ratio = 1; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, type: %d\n", + __func__, s->type); + + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + if (ctx->internal_dw_scale) { + if (ctx->state >= AML_STATE_PROBE) { + unsigned int dw_mode = VDEC_DW_NO_AFBC; + if (vdec_if_get_param(ctx, GET_PARAM_DW_MODE, &dw_mode)) + return -EBUSY; + ratio = get_double_write_ratio(dw_mode); + } + } + + switch (s->target) { + case V4L2_SEL_TGT_COMPOSE: + s->r.left = 0; + s->r.top = 0; + s->r.width = ctx->picinfo.visible_width / ratio; + s->r.height = ctx->picinfo.visible_height / ratio; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* called when it is beyong AML_STATE_PROBE */ +static void update_ctx_dimension(struct aml_vcodec_ctx *ctx, u32 type) +{ + struct aml_q_data *q_data; + unsigned int dw_mode = VDEC_DW_NO_AFBC; + int ratio = 1; + + q_data = aml_vdec_get_q_data(ctx, type); + + if (ctx->internal_dw_scale) { + if (vdec_if_get_param(ctx, GET_PARAM_DW_MODE, &dw_mode)) + return; + ratio = get_double_write_ratio(dw_mode); + } + + if (V4L2_TYPE_IS_MULTIPLANAR(type)) { + q_data->sizeimage[0] = ctx->picinfo.y_len_sz; + q_data->sizeimage[1] = ctx->picinfo.c_len_sz; + + q_data->coded_width = ALIGN(ctx->picinfo.coded_width / ratio, 64); + q_data->coded_height = ALIGN(ctx->picinfo.coded_height / ratio, 64); + + q_data->bytesperline[0] = ALIGN(ctx->picinfo.coded_width / ratio, 64); + q_data->bytesperline[1] = ALIGN(ctx->picinfo.coded_width / ratio, 64); + } else { + q_data->coded_width = ALIGN(ctx->picinfo.coded_width / ratio, 64); + q_data->coded_height = ALIGN(ctx->picinfo.coded_height / ratio, 64); + q_data->sizeimage[0] = ctx->picinfo.y_len_sz; + q_data->sizeimage[0] += ctx->picinfo.c_len_sz; + q_data->bytesperline[0] = ALIGN(ctx->picinfo.coded_width / ratio, 64); + } +} + +static void copy_v4l2_format_dimention(struct v4l2_pix_format_mplane *pix_mp, + struct v4l2_pix_format *pix, + struct aml_q_data *q_data, + u32 type) +{ + int i; + + if (!pix || !pix_mp || !q_data) + return; + + if (V4L2_TYPE_IS_MULTIPLANAR(type)) { + pix_mp->width = q_data->coded_width; + pix_mp->height = q_data->coded_height; + pix_mp->num_planes = q_data->fmt->num_planes; + pix_mp->pixelformat = q_data->fmt->fourcc; + + for (i = 0; i < q_data->fmt->num_planes; i++) { + pix_mp->plane_fmt[i].bytesperline = q_data->bytesperline[i]; + pix_mp->plane_fmt[i].sizeimage = q_data->sizeimage[i]; + } + } else { + pix->width = q_data->coded_width; + pix->height = q_data->coded_height; + pix->pixelformat = q_data->fmt->fourcc; + pix->bytesperline = q_data->bytesperline[0]; + pix->sizeimage = q_data->sizeimage[0]; + } +} + +static int vidioc_vdec_s_fmt(struct file *file, void *priv, + struct v4l2_format *f) +{ + int ret = 0; + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; + struct v4l2_pix_format *pix = &f->fmt.pix; + struct aml_q_data *q_data = NULL; + struct aml_video_fmt *fmt; + struct vb2_queue *dst_vq; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %u, planes: %u, fmt: %x\n", + __func__, f->type, + V4L2_TYPE_IS_MULTIPLANAR(f->type) ? + f->fmt.pix_mp.num_planes : 1, + f->fmt.pix_mp.pixelformat); + + dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + if (!dst_vq) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "no vb2 queue for type=%d\n", V4L2_BUF_TYPE_VIDEO_CAPTURE); + return -EINVAL; + } + + if (!V4L2_TYPE_IS_MULTIPLANAR(f->type) && dst_vq->is_multiplanar) + return -EINVAL; + + q_data = aml_vdec_get_q_data(ctx, f->type); + if (!q_data) + return -EINVAL; + + if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) && + vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "out_q_ctx buffers already requested\n"); + ret = -EBUSY; + } + + if ((!V4L2_TYPE_IS_OUTPUT(f->type)) && + vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "cap_q_ctx buffers already requested\n"); + ret = -EBUSY; + } + + fmt = aml_vdec_find_format(f); + if (fmt == NULL) { + if (V4L2_TYPE_IS_OUTPUT(f->type)) + fmt = &aml_video_formats[OUT_FMT_IDX]; + else + fmt = &aml_video_formats[CAP_FMT_IDX]; + f->fmt.pix.pixelformat = fmt->fourcc; + } + + q_data->fmt = fmt; + vidioc_try_fmt(f, q_data->fmt); + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + q_data->sizeimage[0] = pix_mp->plane_fmt[0].sizeimage; + q_data->coded_width = pix_mp->width; + q_data->coded_height = pix_mp->height; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "w: %d, h: %d, size: %d\n", + pix_mp->width, pix_mp->height, + pix_mp->plane_fmt[0].sizeimage); + + ctx->output_pix_fmt = pix_mp->pixelformat; + ctx->colorspace = f->fmt.pix_mp.colorspace; + ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc; + ctx->quantization = f->fmt.pix_mp.quantization; + ctx->xfer_func = f->fmt.pix_mp.xfer_func; + + mutex_lock(&ctx->state_lock); + if (ctx->state == AML_STATE_IDLE) { + ret = vdec_if_init(ctx, q_data->fmt->fourcc); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "vdec_if_init() fail ret=%d\n", ret); + mutex_unlock(&ctx->state_lock); + return -EINVAL; + } + ctx->state = AML_STATE_INIT; + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE, + "vcodec state (AML_STATE_INIT)\n"); + } + mutex_unlock(&ctx->state_lock); + } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { + q_data->sizeimage[0] = pix->sizeimage; + q_data->coded_width = pix->width; + q_data->coded_height = pix->height; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "w: %d, h: %d, size: %d\n", + pix->width, pix->height, + pix->sizeimage); + + ctx->output_pix_fmt = pix->pixelformat; + ctx->colorspace = f->fmt.pix.colorspace; + ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc; + ctx->quantization = f->fmt.pix.quantization; + ctx->xfer_func = f->fmt.pix.xfer_func; + + mutex_lock(&ctx->state_lock); + if (ctx->state == AML_STATE_IDLE) { + ret = vdec_if_init(ctx, q_data->fmt->fourcc); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "vdec_if_init() fail ret=%d\n", ret); + mutex_unlock(&ctx->state_lock); + return -EINVAL; + } + ctx->state = AML_STATE_INIT; + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE, + "vcodec state (AML_STATE_INIT)\n"); + } + mutex_unlock(&ctx->state_lock); + } + + if (!V4L2_TYPE_IS_OUTPUT(f->type)) { + ctx->cap_pix_fmt = V4L2_TYPE_IS_MULTIPLANAR(f->type) ? + pix_mp->pixelformat : pix->pixelformat; + if (ctx->state >= AML_STATE_PROBE) { + update_ctx_dimension(ctx, f->type); + copy_v4l2_format_dimention(pix_mp, pix, q_data, f->type); + v4l_buf_size_decision(ctx); + } + } + + return 0; +} + +static int vidioc_enum_framesizes(struct file *file, void *priv, + struct v4l2_frmsizeenum *fsize) +{ + int i = 0; + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, idx: %d, pix fmt: %x\n", + __func__, fsize->index, fsize->pixel_format); + + if (fsize->index != 0) + return -EINVAL; + + for (i = 0; i < NUM_SUPPORTED_FRAMESIZE; ++i) { + if (fsize->pixel_format != aml_vdec_framesizes[i].fourcc) + continue; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + fsize->stepwise = aml_vdec_framesizes[i].stepwise; + if (!(ctx->dev->dec_capability & + VCODEC_CAPABILITY_4K_DISABLED)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "4K is enabled\n"); + fsize->stepwise.max_width = + VCODEC_DEC_4K_CODED_WIDTH; + fsize->stepwise.max_height = + VCODEC_DEC_4K_CODED_HEIGHT; + } + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "%x, %d %d %d %d %d %d\n", + ctx->dev->dec_capability, + fsize->stepwise.min_width, + fsize->stepwise.max_width, + fsize->stepwise.step_width, + fsize->stepwise.min_height, + fsize->stepwise.max_height, + fsize->stepwise.step_height); + return 0; + } + + return -EINVAL; +} + +static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue) +{ + struct aml_video_fmt *fmt; + int i = 0, j = 0; + + /* I420 only used for mjpeg. */ + if (!output_queue && support_mjpeg && support_format_I420) { + for (i = 0; i < NUM_FORMATS; i++) { + fmt = &aml_video_formats[i]; + if ((fmt->fourcc == V4L2_PIX_FMT_YUV420) || + (fmt->fourcc == V4L2_PIX_FMT_YUV420M)) { + break; + } + } + } + + for (; i < NUM_FORMATS; i++) { + fmt = &aml_video_formats[i]; + if (output_queue && (fmt->type != AML_FMT_DEC)) + continue; + if (!output_queue && (fmt->type != AML_FMT_FRAME)) + continue; + if (support_mjpeg && !support_format_I420 && + ((fmt->fourcc == V4L2_PIX_FMT_YUV420) || + (fmt->fourcc == V4L2_PIX_FMT_YUV420M))) + continue; + + if (j == f->index) { + f->pixelformat = fmt->fourcc; + return 0; + } + ++j; + } + + return -EINVAL; +} + +static int vidioc_vdec_enum_fmt_vid_cap_mplane(struct file *file, + void *priv, struct v4l2_fmtdesc *f) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s\n", __func__); + + return vidioc_enum_fmt(f, false); +} + +static int vidioc_vdec_enum_fmt_vid_out_mplane(struct file *file, + void *priv, struct v4l2_fmtdesc *f) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s\n", __func__); + + return vidioc_enum_fmt(f, true); +} + +static int vidioc_vdec_g_fmt(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; + struct v4l2_pix_format *pix = &f->fmt.pix; + struct vb2_queue *vq; + struct vb2_queue *dst_vq; + struct aml_q_data *q_data; + int ret = 0; + + vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); + if (!vq) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "no vb2 queue for type=%d\n", f->type); + return -EINVAL; + } + + dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + if (!dst_vq) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "no vb2 queue for type=%d\n", V4L2_BUF_TYPE_VIDEO_CAPTURE); + return -EINVAL; + } + + if (!V4L2_TYPE_IS_MULTIPLANAR(f->type) && dst_vq->is_multiplanar) + return -EINVAL; + + q_data = aml_vdec_get_q_data(ctx, f->type); + + ret = vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "GET_PARAM_PICTURE_INFO err\n"); + } else { + if ((ctx->picinfo.visible_height < 16 && ctx->picinfo.visible_height > 0) || + (ctx->picinfo.visible_width < 16 && ctx->picinfo.visible_width > 0)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "The width or height of the stream is less than 16\n"); + return -EPERM; + } + } + + if (V4L2_TYPE_IS_MULTIPLANAR(f->type)) { + pix_mp->field = ret ? V4L2_FIELD_NONE : ctx->picinfo.field; + pix_mp->colorspace = ctx->colorspace; + pix_mp->ycbcr_enc = ctx->ycbcr_enc; + pix_mp->quantization = ctx->quantization; + pix_mp->xfer_func = ctx->xfer_func; + } else { + pix->field = ret ? V4L2_FIELD_NONE : ctx->picinfo.field; + pix->colorspace = ctx->colorspace; + pix->ycbcr_enc = ctx->ycbcr_enc; + pix->quantization = ctx->quantization; + pix->xfer_func = ctx->xfer_func; + } + + if ((!V4L2_TYPE_IS_OUTPUT(f->type)) && + (ctx->state >= AML_STATE_PROBE)) { + update_ctx_dimension(ctx, f->type); + copy_v4l2_format_dimention(pix_mp, pix, q_data, f->type); + } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + /* + * This is run on OUTPUT + * The buffer contains compressed image + * so width and height have no meaning. + * Assign value here to pass v4l2-compliance test + */ + copy_v4l2_format_dimention(pix_mp, pix, q_data, f->type); + } else { + copy_v4l2_format_dimention(pix_mp, pix, q_data, f->type); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "type=%d state=%d Format information could not be read, not ready yet!\n", + f->type, ctx->state); + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %u, planes: %u, fmt: %x\n", + __func__, f->type, + V4L2_TYPE_IS_MULTIPLANAR(f->type) ? + q_data->fmt->num_planes : 1, + q_data->fmt->fourcc); + + return 0; +} + +static int vidioc_vdec_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *create) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(priv); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %u, count: %u\n", + __func__, create->format.type, create->count); + + return v4l2_m2m_ioctl_create_bufs(file, priv, create); +} + +/*int vidioc_vdec_g_ctrl(struct file *file, void *fh, + struct v4l2_control *a) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(fh); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, id: %d\n", __func__, a->id); + + if (a->id == V4L2_CID_MIN_BUFFERS_FOR_CAPTURE) + a->value = 4; + else if (a->id == V4L2_CID_MIN_BUFFERS_FOR_OUTPUT) + a->value = 8; + + return 0; +}*/ + +static int vb2ops_vdec_queue_setup(struct vb2_queue *vq, + unsigned int *nbuffers, + unsigned int *nplanes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(vq); + struct aml_q_data *q_data; + unsigned int i; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, type: %d\n", + __func__, vq->type); + + q_data = aml_vdec_get_q_data(ctx, vq->type); + if (q_data == NULL) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "vq->type=%d err\n", vq->type); + return -EINVAL; + } + + if (*nplanes) { + for (i = 0; i < *nplanes; i++) { + if (sizes[i] < q_data->sizeimage[i]) + return -EINVAL; + alloc_devs[i] = &ctx->dev->plat_dev->dev; + + if (!V4L2_TYPE_IS_OUTPUT(vq->type)) + alloc_devs[i] = v4l_get_dev_from_codec_mm(); + } + } else { + int dw_mode = VDEC_DW_NO_AFBC; + + if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + *nplanes = 2; + else + *nplanes = 1; + + if (!vdec_if_get_param(ctx, GET_PARAM_DW_MODE, &dw_mode)) { + if (dw_mode == VDEC_DW_AFBC_ONLY) + *nplanes = 1; + } + + for (i = 0; i < *nplanes; i++) { + sizes[i] = q_data->sizeimage[i]; + if (V4L2_TYPE_IS_OUTPUT(vq->type) && ctx->output_dma_mode) + sizes[i] = 1; + alloc_devs[i] = &ctx->dev->plat_dev->dev; + + if (!V4L2_TYPE_IS_OUTPUT(vq->type)) + alloc_devs[i] = v4l_get_dev_from_codec_mm(); + } + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "type: %d, plane: %d, buf cnt: %d, size: [Y: %u, C: %u]\n", + vq->type, *nplanes, *nbuffers, sizes[0], sizes[1]); + + return 0; +} + +static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb) +{ + struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct aml_q_data *q_data; + struct vb2_v4l2_buffer *vb2_v4l2 = NULL; + struct aml_video_dec_buf *buf = NULL; + int i; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %d, idx: %d\n", + __func__, vb->vb2_queue->type, vb->index); + + if (vb->memory == VB2_MEMORY_DMABUF + && V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) + return 0; + + q_data = aml_vdec_get_q_data(ctx, vb->vb2_queue->type); + + for (i = 0; i < q_data->fmt->num_planes; i++) { + if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "data will not fit into plane %d (%lu < %d)\n", + i, vb2_plane_size(vb, i), + q_data->sizeimage[i]); + } + } + + vb2_v4l2 = to_vb2_v4l2_buffer(vb); + buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb); + + if (vb2_v4l2->meta_ptr && (copy_from_user(buf->meta_data, + (void *)vb2_v4l2->meta_ptr, META_DATA_SIZE + 4))) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "%s:copy meta data error. ptr: %lx\n", __func__, vb2_v4l2->meta_ptr); + } + + return 0; +} + +static int init_mmu_bmmu_box(struct aml_vcodec_ctx *ctx) +{ + int i; + int mmu_flag = ctx->is_drm_mode? CODEC_MM_FLAGS_TVP:0; + int bmmu_flag = mmu_flag; + u32 dw_mode = VDEC_DW_NO_AFBC; + + ctx->comp_bufs = vzalloc(sizeof(*ctx->comp_bufs) * V4L_CAP_BUFF_MAX); + if (!ctx->comp_bufs) + return -ENOMEM; + + if (vdec_if_get_param(ctx, GET_PARAM_DW_MODE, &dw_mode)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "invalid dw_mode\n"); + goto free_comp_bufs; + } + + /* init mmu box */ + ctx->mmu_box = decoder_mmu_box_alloc_box("v4l2_dec", + ctx->id, V4L_CAP_BUFF_MAX, + ctx->comp_info.max_size * SZ_1M, mmu_flag); + if (!ctx->mmu_box) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "fail to create mmu box\n"); + goto free_comp_bufs; + } + + /* init bmmu box */ + bmmu_flag |= CODEC_MM_FLAGS_CMA_CLEAR | CODEC_MM_FLAGS_FOR_VDECODER; + ctx->bmmu_box = decoder_bmmu_box_alloc_box("v4l2_dec", + ctx->id, V4L_CAP_BUFF_MAX, + 4 + PAGE_SHIFT, bmmu_flag); + if (!ctx->bmmu_box) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "fail to create bmmu box\n"); + goto free_mmubox; + } + + if (dw_mode & 0x20) { + /* init mmu box dw*/ + ctx->mmu_box_dw = decoder_mmu_box_alloc_box("v4l2_dec", + ctx->id, V4L_CAP_BUFF_MAX, + ctx->comp_info.max_size * SZ_1M, mmu_flag); + if (!ctx->mmu_box_dw) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "fail to create mmu box dw\n"); + goto free_bmmubox; + } + + /* init bmmu box dw*/ + bmmu_flag |= CODEC_MM_FLAGS_CMA_CLEAR | CODEC_MM_FLAGS_FOR_VDECODER; + ctx->bmmu_box_dw = decoder_bmmu_box_alloc_box("v4l2_dec", + ctx->id, V4L_CAP_BUFF_MAX, + 4 + PAGE_SHIFT, bmmu_flag); + if (!ctx->bmmu_box_dw) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "fail to create bmmu box dw\n"); + goto free_mmubox_dw; + } + } + + kref_init(&ctx->box_ref); + for (i = 0; i < V4L_CAP_BUFF_MAX; i++) { + struct internal_comp_buf *buf; + buf = &ctx->comp_bufs[i]; + buf->index = i; + buf->ref = 0; + buf->box_ref = &ctx->box_ref; + buf->mmu_box = ctx->mmu_box; + buf->bmmu_box = ctx->bmmu_box; + buf->mmu_box_dw = ctx->mmu_box_dw; + buf->bmmu_box_dw = ctx->bmmu_box_dw; + } + kref_get(&ctx->ctx_ref); + + ctx->uvm_proxy = vzalloc(sizeof(*ctx->uvm_proxy) * V4L_CAP_BUFF_MAX); + if (!ctx->uvm_proxy) + goto free_mmubox; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "box init, bmmu: %px, mmu: %px, mmu_dw: %px bmmu_dw: %px\n", + ctx->bmmu_box, ctx->mmu_box, ctx->mmu_box_dw, ctx->bmmu_box_dw); + + return 0; +free_mmubox_dw: + decoder_mmu_box_free(ctx->mmu_box_dw); + ctx->mmu_box_dw = NULL; + +free_bmmubox: + decoder_bmmu_box_free(ctx->bmmu_box); + ctx->bmmu_box = NULL; + +free_mmubox: + decoder_mmu_box_free(ctx->mmu_box); + ctx->mmu_box = NULL; + +free_comp_bufs: + vfree(ctx->comp_bufs); + ctx->comp_bufs = NULL; + + return -1; +} + +void aml_alloc_buffer(struct aml_vcodec_ctx *ctx, int flag) +{ + int i = 0; + + if (flag & DV_TYPE) { + for (i = 0; i < V4L_CAP_BUFF_MAX; i++) { + ctx->aux_infos.bufs[i].md_buf = vzalloc(MD_BUF_SIZE); + if (ctx->aux_infos.bufs[i].md_buf == NULL) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "v4l2 alloc %dth dv md buffer fail\n", i); + } + + ctx->aux_infos.bufs[i].comp_buf = vzalloc(COMP_BUF_SIZE); + if (ctx->aux_infos.bufs[i].comp_buf == NULL) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "v4l2 alloc %dth dv comp buffer fail\n", i); + } + } + } + + if (flag & SEI_TYPE) { + for (i = 0; i < V4L_CAP_BUFF_MAX; i++) { + ctx->aux_infos.bufs[i].sei_buf = vzalloc(SEI_BUF_SIZE); + if (ctx->aux_infos.bufs[i].sei_buf) { + ctx->aux_infos.bufs[i].sei_size = 0; + ctx->aux_infos.bufs[i].sei_state = 1; + ctx->aux_infos.sei_need_free = false; + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "v4l2 alloc %dth aux buffer:%px\n", + i, ctx->aux_infos.bufs[i].sei_buf); + } else { + ctx->aux_infos.bufs[i].sei_buf = NULL; + ctx->aux_infos.bufs[i].sei_state = 0; + ctx->aux_infos.bufs[i].sei_size = 0; + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "v4l2 alloc %dth aux buffer fail\n", i); + } + } + } +} + +void aml_free_buffer(struct aml_vcodec_ctx *ctx, int flag) +{ + int i = 0; + + if (flag & DV_TYPE) { + for (i = 0; i < V4L_CAP_BUFF_MAX; i++) { + if (ctx->aux_infos.bufs[i].md_buf != NULL) { + vfree(ctx->aux_infos.bufs[i].md_buf); + ctx->aux_infos.bufs[i].md_buf = NULL; + } + + if (ctx->aux_infos.bufs[i].comp_buf != NULL) { + vfree(ctx->aux_infos.bufs[i].comp_buf); + ctx->aux_infos.bufs[i].comp_buf = NULL; + } + } + } + + if (flag & SEI_TYPE) { + for (i = 0; i < V4L_CAP_BUFF_MAX; i++) { + if (ctx->aux_infos.bufs[i].sei_buf != NULL) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "v4l2 free %dth aux buffer:%px\n", + i, ctx->aux_infos.bufs[i].sei_buf); + vfree(ctx->aux_infos.bufs[i].sei_buf); + ctx->aux_infos.bufs[i].sei_state = 0; + ctx->aux_infos.bufs[i].sei_size = 0; + ctx->aux_infos.bufs[i].sei_buf = NULL; + } + } + } +} + +void aml_free_one_sei_buffer(struct aml_vcodec_ctx *ctx, char **addr, int *size, int idx) +{ + if (ctx->aux_infos.bufs[idx].sei_buf != NULL) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "v4l2 free %dth aux buffer:%px\n", + idx, ctx->aux_infos.bufs[idx].sei_buf); + + vfree(ctx->aux_infos.bufs[idx].sei_buf); + ctx->aux_infos.bufs[idx].sei_state = 0; + ctx->aux_infos.bufs[idx].sei_size = 0; + ctx->aux_infos.bufs[idx].sei_buf = NULL; + *addr = NULL; + *size = 0; + ctx->aux_infos.sei_need_free = true; + } +} + +void aml_bind_sei_buffer(struct aml_vcodec_ctx *ctx, char **addr, int *size, int *idx) +{ + int index = ctx->aux_infos.sei_index; + int count = 0; + + if (ctx->aux_infos.sei_need_free) { + for (count = 0; count < V4L_CAP_BUFF_MAX; count++) { + if ((ctx->aux_infos.bufs[index].sei_buf != NULL) && + (ctx->aux_infos.bufs[index].sei_state == 1)) { + break; + } + index = (index + 1) % V4L_CAP_BUFF_MAX; + } + } else { + for (count = 0; count < V4L_CAP_BUFF_MAX; count++) { + if ((ctx->aux_infos.bufs[index].sei_buf != NULL) && + ((ctx->aux_infos.bufs[index].sei_state == 1) || + (ctx->aux_infos.bufs[index].sei_state == 2))) { + memset(ctx->aux_infos.bufs[index].sei_buf, 0, SEI_BUF_SIZE); + ctx->aux_infos.bufs[index].sei_size = 0; + break; + } + index = (index + 1) % V4L_CAP_BUFF_MAX; + } + } + + if (count == V4L_CAP_BUFF_MAX) { + *addr = NULL; + *size = 0; + } else { + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "v4l2 bind %dth aux buffer:%px, count = %d\n", + index, ctx->aux_infos.bufs[index].sei_buf, count); + *addr = ctx->aux_infos.bufs[index].sei_buf; + *size = ctx->aux_infos.bufs[index].sei_size; + *idx = index; + ctx->aux_infos.bufs[index].sei_state = 2; + ctx->aux_infos.sei_index = (index + 1) % V4L_CAP_BUFF_MAX; + } +} + +void aml_bind_dv_buffer(struct aml_vcodec_ctx *ctx, char **comp_buf, char **md_buf) +{ + int index = ctx->aux_infos.dv_index; + + if ((ctx->aux_infos.bufs[index].comp_buf != NULL) && + (ctx->aux_infos.bufs[index].md_buf != NULL)) { + *comp_buf = ctx->aux_infos.bufs[index].comp_buf; + *md_buf = ctx->aux_infos.bufs[index].md_buf; + ctx->aux_infos.dv_index = (index + 1) % V4L_CAP_BUFF_MAX; + } +} + +void aml_v4l_ctx_release(struct kref *kref) +{ + struct aml_vcodec_ctx * ctx; + + ctx = container_of(kref, struct aml_vcodec_ctx, ctx_ref); + + if (ctx->vpp) { + atomic_dec(&ctx->dev->vpp_count); + aml_v4l2_vpp_destroy(ctx->vpp); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "vpp destory inst count:%d.\n", + atomic_read(&ctx->dev->vpp_count)); + } + + if (ctx->ge2d) { + aml_v4l2_ge2d_destroy(ctx->ge2d); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "ge2d destory.\n"); + } + + v4l2_m2m_ctx_release(ctx->m2m_ctx); + aml_task_chain_remove(ctx); + + vfree(ctx->meta_infos.meta_bufs); + ctx->aux_infos.free_buffer(ctx, SEI_TYPE | DV_TYPE); + ctx->aux_infos.free_buffer(ctx, 1); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "v4ldec has been destroyed.\n"); + + if (ctx->sync) { + vdec_clean_all_fence(ctx->sync); + } + + kfree(ctx); +} + +static void box_release(struct kref *kref) +{ + struct aml_vcodec_ctx * ctx + = container_of(kref, struct aml_vcodec_ctx, box_ref); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "%s, bmmu: %px, mmu: %px mmu_dw: %pu\n", + __func__, ctx->bmmu_box, ctx->mmu_box,ctx->mmu_box_dw); + + decoder_bmmu_box_free(ctx->bmmu_box); + decoder_mmu_box_free(ctx->mmu_box); + + if (ctx->config.parm.dec.cfg.double_write_mode & 0x20) { + decoder_mmu_box_free(ctx->mmu_box_dw); + decoder_bmmu_box_free(ctx->bmmu_box_dw); + } + vfree(ctx->comp_bufs); + vfree(ctx->uvm_proxy); + kref_put(&ctx->ctx_ref, aml_v4l_ctx_release); +} + +static void internal_buf_free(void *arg) +{ + struct internal_comp_buf* ibuf = + (struct internal_comp_buf*)arg; + struct aml_vcodec_ctx * ctx + = container_of(ibuf->box_ref,struct aml_vcodec_ctx, box_ref); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "%s, idx:%d\n", __func__, ibuf->index); + + mutex_lock(&ctx->comp_lock); + + if (!(ibuf->ref & 0xff00)) { + decoder_mmu_box_free_idx(ibuf->mmu_box, ibuf->index); + decoder_bmmu_box_free_idx(ibuf->bmmu_box, ibuf->index); + + if (ctx->config.parm.dec.cfg.double_write_mode & 0x20) { + decoder_mmu_box_free_idx(ibuf->mmu_box_dw, ibuf->index); + decoder_bmmu_box_free_idx(ibuf->bmmu_box_dw, ibuf->index); + } + } + ibuf->ref = 0; + + mutex_unlock(&ctx->comp_lock); + + kref_put(ibuf->box_ref, box_release); +} + +static void internal_buf_free2(void *arg) +{ + struct internal_comp_buf *ibuf = + container_of(arg, struct internal_comp_buf, priv_data); + struct aml_vcodec_ctx * ctx + = container_of(ibuf->box_ref, struct aml_vcodec_ctx, box_ref); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "%s, idx: %d\n", __func__, ibuf->index); + + mutex_lock(&ctx->comp_lock); + + if (!(ibuf->ref & 0xff00)) { + decoder_mmu_box_free_idx(ibuf->mmu_box, ibuf->index); + decoder_bmmu_box_free_idx(ibuf->bmmu_box, ibuf->index); + + if (ctx->config.parm.dec.cfg.double_write_mode & 0x20) { + decoder_mmu_box_free_idx(ibuf->mmu_box_dw, ibuf->index); + decoder_bmmu_box_free_idx(ibuf->bmmu_box_dw, ibuf->index); + } + } + ibuf->ref = 0; + + mutex_unlock(&ctx->comp_lock); + + kref_put(ibuf->box_ref, box_release); +} + +static void aml_uvm_buf_free(void *arg) +{ + struct aml_uvm_buff_ref * ubuf = + (struct aml_uvm_buff_ref*)arg; + struct aml_vcodec_ctx * ctx + = container_of(ubuf->ref, struct aml_vcodec_ctx, ctx_ref); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "%s, vb:%d, dbuf:%px, ino:%lu\n", + __func__, ubuf->index, ubuf->dbuf, + file_inode(ubuf->dbuf->file)->i_ino); + + kref_put(ubuf->ref, aml_v4l_ctx_release); + vfree(ubuf); +} + +static int uvm_attach_hook_mod_local(struct aml_vcodec_ctx *ctx, + struct uvm_hook_mod_info *uvm) +{ + struct internal_comp_buf* ibuf = uvm->arg; + + ctx->uvm_proxy[ibuf->index] = *uvm; + + return 0; +} + +static int update_comp_buffer_to_reuse(struct aml_vcodec_ctx *ctx, + struct aml_video_dec_buf *buf) +{ + struct internal_comp_buf* ibuf = NULL; + + mutex_lock(&ctx->comp_lock); + + ibuf = vb_to_comp(ctx, &buf->vb.vb2_buf); + if (!ibuf) { + mutex_unlock(&ctx->comp_lock); + return 0; + } + + if (ibuf->ref & 0xff) { + buf->internal_index = ibuf->index; + ibuf->frame_buffer_size = ctx->comp_info.frame_buffer_size; + + if (ctx->comp_info.header_size != ibuf->header_size) { + decoder_bmmu_box_free_idx(ctx->bmmu_box, ibuf->index); + if (decoder_bmmu_box_alloc_buf_phy(ctx->bmmu_box, + ibuf->index, ctx->comp_info.header_size, + "v4l2_dec", &ibuf->header_addr) < 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "fail to alloc %dth bmmu\n", + ibuf->index); + mutex_unlock(&ctx->comp_lock); + return -ENOMEM; + } + ibuf->header_size = ctx->comp_info.header_size; + } + + ibuf->ref |= (1 << 8); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "%s, reuse comp buffer vb2:%d <--> internal: %d, header_addr 0x%lx, size: %u\n", + __func__, buf->vb.vb2_buf.index, + buf->internal_index, + ibuf->header_addr, + ibuf->header_size); + } + + mutex_unlock(&ctx->comp_lock); + + return (ibuf->ref & 0xff00) ? 1 : 0; +} + +static int bind_comp_buffer_to_uvm(struct aml_vcodec_ctx *ctx, + struct aml_video_dec_buf *buf) +{ + int ret, i; + struct dma_buf * dma = buf->vb.vb2_buf.planes[0].dbuf; + struct aml_dec_params *parms = &ctx->config.parm.dec; + struct uvm_hook_mod_info u_info; + struct internal_comp_buf* ibuf; + u32 dw_mode = VDEC_DW_NO_AFBC; + + /* get header and page size */ + if (vdec_if_get_param(ctx, GET_PARAM_COMP_BUF_INFO, &ctx->comp_info)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "fail to get comp info\n"); + return -EINVAL; + } + + if (!ctx->bmmu_box || !ctx->mmu_box) + if (init_mmu_bmmu_box(ctx)) + return -EINVAL; + + ret = update_comp_buffer_to_reuse(ctx, buf); + if (ret < 0) + return ret; + + if (ret == 1 /*reused*/) + return 0; + + for (i = 0; i < V4L_CAP_BUFF_MAX; i++) { + if (!ctx->comp_bufs[i].ref) + break; + } + + if (i == V4L_CAP_BUFF_MAX) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "out of internal buf\n"); + return -EINVAL; + } + + if (vdec_if_get_param(ctx, GET_PARAM_DW_MODE, &dw_mode)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "invalid dw_mode\n"); + return -EINVAL; + } + + buf->internal_index = i; + ibuf = &ctx->comp_bufs[i]; + ibuf->frame_buffer_size = ctx->comp_info.frame_buffer_size; + ibuf->header_size = ctx->comp_info.header_size; + + /* allocate header */ + ret = decoder_bmmu_box_alloc_buf_phy(ctx->bmmu_box, + ibuf->index, ctx->comp_info.header_size, + "v4l2_dec", &ibuf->header_addr); + if (ret < 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "fail to alloc %dth bmmu\n", i); + return -ENOMEM; + } + + if (dw_mode & 0x20) { + ret = decoder_bmmu_box_alloc_buf_phy(ctx->bmmu_box_dw, + ibuf->index, ctx->comp_info.header_size, + "v4l2_dec", &ibuf->header_dw_addr); + if (ret < 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "fail to alloc %dth bmmu dw\n", i); + return -ENOMEM; + } + } + + kref_get(&ctx->box_ref); + ibuf->ref = 1; + + /* frame SG buffer need to be realloc inside decoder, + * just before slice decoding to save memory + */ + u_info.type = VF_SRC_DECODER; + u_info.arg = ibuf; + u_info.free = internal_buf_free; + + if (parms->cfg.uvm_hook_type == VF_PROCESS_V4LVIDEO) { + /* adapted video composer to use for hwc. */ + ibuf->priv_data.v4l_inst_id = ctx->id; + u_info.type = VF_PROCESS_V4LVIDEO; + u_info.arg = &ibuf->priv_data; + u_info.free = internal_buf_free2; + } + + ret = dmabuf_is_uvm(dma) ? + uvm_attach_hook_mod(dma, &u_info) : + uvm_attach_hook_mod_local(ctx, &u_info); + if (ret < 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "fail to set dmabuf priv buf\n"); + goto bmmu_box_free; + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "%s, bind vb2:(%d, %px) <--> internal: (%d, %px) header_addr 0x%lx, size: %u\n", + __func__, buf->vb.vb2_buf.index, + dma, i, ibuf, ibuf->header_addr, + ctx->comp_info.header_size); + + return 0; + +bmmu_box_free: + if (dw_mode & 0x20) { + decoder_bmmu_box_free_idx(ibuf->bmmu_box_dw, ibuf->index); + } + decoder_bmmu_box_free_idx(ibuf->bmmu_box, ibuf->index); + kref_put(&ctx->box_ref, box_release); + ibuf->ref = 0; + return -EINVAL; +} + +static int aml_uvm_buff_attach(struct vb2_buffer * vb) +{ + int ret = 0; + struct dma_buf *dbuf = vb->planes[0].dbuf; + struct uvm_hook_mod_info u_info; + struct aml_vcodec_ctx *ctx = + vb2_get_drv_priv(vb->vb2_queue); + struct aml_uvm_buff_ref *ubuf = NULL; + + if (vb->memory != VB2_MEMORY_DMABUF || !dmabuf_is_uvm(dbuf)) + return 0; + + ubuf = vzalloc(sizeof(struct aml_uvm_buff_ref)); + if (ubuf == NULL) + return -ENOMEM; + + ubuf->index = vb->index; + ubuf->addr = vb2_dma_contig_plane_dma_addr(vb, 0); + ubuf->dbuf = dbuf; + ubuf->ref = &ctx->ctx_ref; + + u_info.type = VF_PROCESS_DECODER; + u_info.arg = (void *)ubuf; + u_info.free = aml_uvm_buf_free; + ret = uvm_attach_hook_mod(dbuf, &u_info); + if (ret < 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "aml uvm buffer %d attach fail.\n", + ubuf->index); + return ret; + } + + kref_get(ubuf->ref); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "%s, vb:%d, dbuf:%px, ino:%lu\n", + __func__, ubuf->index, ubuf->dbuf, + file_inode(ubuf->dbuf->file)->i_ino); + + return ret; +} + +static struct internal_comp_buf* vb_to_comp(struct aml_vcodec_ctx *ctx, + struct vb2_buffer *vb) +{ + struct aml_dec_params *parms = &ctx->config.parm.dec; + bool is_v4lvideo = (parms->cfg.uvm_hook_type == VF_PROCESS_V4LVIDEO); + enum uvm_hook_mod_type u_type = + is_v4lvideo ? VF_PROCESS_V4LVIDEO : VF_SRC_DECODER; + struct dma_buf *dbuf = vb->planes[0].dbuf; + struct internal_comp_buf *ibuf = NULL; + struct uvm_hook_mod *uhmod = NULL; + + uhmod = uvm_get_hook_mod(dbuf, u_type); + if (IS_ERR_OR_NULL(uhmod)) + return NULL; + + ibuf = !is_v4lvideo ? (struct internal_comp_buf *) uhmod->arg : + container_of(uhmod->arg, struct internal_comp_buf, priv_data); + + uvm_put_hook_mod(dbuf, u_type); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "%s, vb2: (%d, %px) --> comp: (%d, %px)\n", + __func__, vb->index, dbuf, ibuf->index, ibuf); + + return ibuf; +} + +static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb) +{ + struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vb2_v4l2 = to_vb2_v4l2_buffer(vb); + struct aml_video_dec_buf *buf = + container_of(vb2_v4l2, struct aml_video_dec_buf, vb); + struct vdec_v4l2_buffer *fb = &buf->frame_buffer; + struct aml_vcodec_mem src_mem; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, vb: %lx, type: %d, idx: %d, state: %d, used: %d, ts: %llu\n", + __func__, (ulong) vb, vb->vb2_queue->type, + vb->index, vb->state, buf->used, vb->timestamp); + + /* + * check if this buffer is ready to be used after decode + */ + if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { + u32 dw_mode = VDEC_DW_NO_AFBC; + + if (vdec_if_get_param(ctx, GET_PARAM_DW_MODE, &dw_mode)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "invalid dw_mode\n"); + return; + + } + + if (!buf->que_in_m2m) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "enque capture buf idx %d, vf: %lx\n", + vb->index, (ulong) v4l_get_vf_handle(vb2_v4l2->private)); + + /* bind compressed buffer to uvm */ + if ((dw_mode != VDEC_DW_NO_AFBC) && + vb->memory == VB2_MEMORY_DMABUF && + bind_comp_buffer_to_uvm(ctx, buf)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "fail to bind comp buffer\n"); + return; + } + + /* DI hook must be detached if the dmabuff be reused. */ + if (ctx->vpp_cfg.enable_local_buf) { + struct dma_buf *dma = vb->planes[0].dbuf; + + if (dmabuf_is_uvm(dma) && + uvm_detach_hook_mod(dma, VF_PROCESS_DI) < 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "dmabuf without attach DI hook.\n"); + } + } + + task_chain_clean(fb->task); + + ctx->cap_pool.seq[ctx->cap_pool.in++] = + (V4L_CAP_BUFF_IN_M2M << 16 | vb->index); + v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2); + buf->que_in_m2m = true; + + fb->status = FB_ST_INIT; + ATRACE_COUNTER("VC_IN_VSINK-2.storage", vb->index); + + /* check dpb ready */ + aml_check_dpb_ready(ctx); + } else { + struct vframe_s *vf = fb->vframe; + struct task_chain_s *task = fb->task; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_OUTPUT, + "IN__BUFF (%s, st:%d, seq:%d) vb:(%d, %px), vf:(%d, %px), ts:%lld, " + "Y:(%lx, %u) C/U:(%lx, %u) V:(%lx, %u)\n", + ctx->ada_ctx->frm_name, fb->status, vf ? vf->index_disp : -1, + vb->index, vb, + vf ? vf->index & 0xff : -1, vf, + vf ? vf->timestamp : 0, + fb->m.mem[0].addr, fb->m.mem[0].size, + fb->m.mem[1].addr, fb->m.mem[1].size, + fb->m.mem[2].addr, fb->m.mem[2].size); + + ATRACE_COUNTER("VC_IN_VSINK-4.recycle", vb->index); + + task->recycle(task, TASK_TYPE_V4L_SINK); + } + + wake_up_interruptible(&ctx->cap_wq); + return; + } + + v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb)); + + if (ctx->state != AML_STATE_INIT) { + return; + } + + buf->used = true; + vb2_v4l2 = to_vb2_v4l2_buffer(vb); + buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb); + if (buf->lastframe) { + /* This shouldn't happen. Just in case. */ + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Invalid flush buffer.\n"); + buf->used = false; + v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + if (ctx->is_drm_mode && (vb->memory == VB2_MEMORY_DMABUF)) + wake_up_interruptible(&ctx->wq); + + return; + } + + src_mem.index = vb->index; + src_mem.vaddr = vb2_plane_vaddr(vb, 0); + src_mem.addr = sg_dma_address(buf->out_sgt->sgl); + src_mem.size = vb->planes[0].bytesused; + src_mem.model = vb->memory; + src_mem.timestamp = vb->timestamp; + src_mem.meta_ptr = (ulong)buf->meta_data; + + if (vdec_if_probe(ctx, &src_mem, NULL)) { + buf->used = false; + v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + + if (ctx->is_drm_mode && + (src_mem.model == VB2_MEMORY_DMABUF)) { + wake_up_interruptible(&ctx->wq); + } else { + v4l2_buff_done(to_vb2_v4l2_buffer(vb), + VB2_BUF_STATE_DONE); + } + + return; + } + + /* + * If on model dmabuf must remove the buffer + * because this data has been consumed by hw. + */ + buf->used = false; + v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + + if (ctx->is_drm_mode && + (src_mem.model == VB2_MEMORY_DMABUF)) { + wake_up_interruptible(&ctx->wq); + } else if (ctx->param_sets_from_ucode) { + v4l2_buff_done(to_vb2_v4l2_buffer(vb), + VB2_BUF_STATE_DONE); + } + + if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "GET_PARAM_PICTURE_INFO err\n"); + return; + } + + if (!ctx->picinfo.dpb_frames) + return; + + v4l_buf_size_decision(ctx); + ctx->last_decoded_picinfo = ctx->picinfo; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "Picture buffer count: dec:%u, vpp:%u, ge2d:%u, margin:%u, total:%u\n", + ctx->picinfo.dpb_frames, ctx->vpp_size, ctx->ge2d_size, + ctx->picinfo.dpb_margin, + CTX_BUF_TOTAL(ctx)); + + aml_vdec_dispatch_event(ctx, V4L2_EVENT_SRC_CH_RESOLUTION); + + mutex_lock(&ctx->state_lock); + if (ctx->state == AML_STATE_INIT) { + ctx->state = AML_STATE_PROBE; + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE, + "vcodec state (AML_STATE_PROBE)\n"); + } + mutex_unlock(&ctx->state_lock); +} + +static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb) +{ + struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vb2_v4l2 = NULL; + struct aml_video_dec_buf *buf = NULL; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %d, idx: %d\n", + __func__, vb->vb2_queue->type, vb->index); + + vb2_v4l2 = to_vb2_v4l2_buffer(vb); + buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb); + + if (buf->error) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Unrecoverable error on buffer.\n"); + ctx->state = AML_STATE_ABORT; + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE, + "vcodec state (AML_STATE_ABORT)\n"); + } +} + +static int vb2ops_vdec_buf_init(struct vb2_buffer *vb) +{ + struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb, + struct vb2_v4l2_buffer, vb2_buf); + struct aml_video_dec_buf *buf = container_of(vb2_v4l2, + struct aml_video_dec_buf, vb); + struct vdec_v4l2_buffer *fb = &buf->frame_buffer; + u32 size, phy_addr = 0; + int i; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, type: %d, idx: %d\n", + __func__, vb->vb2_queue->type, vb->index); + + if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { + buf->lastframe = false; + } + + /* codec_mm buffers count */ + if (!V4L2_TYPE_IS_OUTPUT(vb->type)) { + if (vb->memory == VB2_MEMORY_MMAP) { + char *owner = __getname(); + + snprintf(owner, PATH_MAX, "%s-%d", "v4l-output", ctx->id); + strncpy(buf->mem_onwer, owner, sizeof(buf->mem_onwer)); + buf->mem_onwer[sizeof(buf->mem_onwer) - 1] = '\0'; + __putname(owner); + + for (i = 0; i < vb->num_planes; i++) { + size = vb->planes[i].length; + phy_addr = vb2_dma_contig_plane_dma_addr(vb, i); + buf->mem[i] = v4l_reqbufs_from_codec_mm(buf->mem_onwer, + phy_addr, size, vb->index); + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "OUT %c alloc, addr: %x, size: %u, idx: %u\n", + (i == 0? 'Y':'C'), phy_addr, size, vb->index); + } + } else if (vb->memory == VB2_MEMORY_DMABUF) { + unsigned int dw_mode = VDEC_DW_NO_AFBC; + + for (i = 0; i < vb->num_planes; i++) { + struct dma_buf * dma; + + if (vdec_if_get_param(ctx, GET_PARAM_DW_MODE, &dw_mode)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "invalid dw_mode\n"); + return -EINVAL; + } + /* None-DW mode means single layer */ + if (dw_mode == VDEC_DW_AFBC_ONLY && i > 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "only support single plane in dw mode 0\n"); + return -EINVAL; + } + size = vb->planes[i].length; + dma = vb->planes[i].dbuf; + + if (!dmabuf_is_uvm(dma)) + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "non-uvm dmabuf\n"); + } + } + } + + if (!V4L2_TYPE_IS_OUTPUT(vb->type)) { + struct vframe_s *vf = NULL; + struct task_chain_s *task = NULL; + struct task_chain_s *task_pre = fb->task; + u32 icomp = -1; + + fb_map_table_fetch(ctx, vb, &vf, &task, &icomp); + if (vf) { + fb->task = task; + fb->vframe = vf; + vf->v4l_mem_handle = (ulong)fb; + buf->internal_index = icomp; + task_chain_update_object(task, fb); + } else { + buf->que_in_m2m = false; + + if (aml_uvm_buff_attach(vb)) + return -EFAULT; + + if (task_chain_init(&fb->task, ctx, fb, vb->index)) + return -EFAULT; + + list_add(&fb->task->node, &ctx->task_chain_pool); + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "init buffer(%s), vb idx:%d, task:(%px -> %px), addr:(%lx -> %lx), icomp:%d\n", + vf ? "update" : "idel", + vb->index, task_pre, fb->task, + fb->m.mem[0].addr, + (ulong) vb2_dma_contig_plane_dma_addr(vb, 0), + (int)icomp); + + update_vdec_buf_plane(ctx, fb, vb); + } + + if (V4L2_TYPE_IS_OUTPUT(vb->type)) { + ulong contig_size; + + buf->out_sgt = vb2_dma_sg_plane_desc(vb, 0); + + contig_size = dmabuf_contiguous_size(buf->out_sgt); + if (contig_size < vb->planes[0].bytesused) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "contiguous mapping is too small %lu/%u\n", + contig_size, size); + return -EFAULT; + } + } + + return 0; +} + +static void vb2ops_vdec_buf_cleanup(struct vb2_buffer *vb) +{ + struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb, + struct vb2_v4l2_buffer, vb2_buf); + struct aml_video_dec_buf *buf = container_of(vb2_v4l2, + struct aml_video_dec_buf, vb); + struct vdec_v4l2_buffer *fb = &buf->frame_buffer;; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, type: %d, idx: %d\n", + __func__, vb->vb2_queue->type, vb->index); + + if (!V4L2_TYPE_IS_OUTPUT(vb->type)) { + if (vb->memory == VB2_MEMORY_MMAP) { + int i; + + for (i = 0; i < vb->num_planes ; i++) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR, + "OUT %c clean, addr: %lx, size: %u, idx: %u\n", + (i == 0)? 'Y':'C', + buf->mem[i]->phy_addr, buf->mem[i]->buffer_size, vb->index); + v4l_freebufs_back_to_codec_mm(buf->mem_onwer, buf->mem[i]); + buf->mem[i] = NULL; + } + } + if (ctx->output_thread_ready) { + if (!is_fb_mapped(ctx, fb->m.mem[0].addr)) { + list_del(&fb->task->node); + task_chain_clean(fb->task); + task_chain_release(fb->task); + } + } + } +} + +static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(q); + + ctx->has_receive_eos = false; + ctx->v4l_resolution_change = false; + + /* vdec has ready to decode subsequence data of new resolution. */ + v4l2_m2m_job_resume(ctx->dev->m2m_dev_dec, ctx->m2m_ctx); + + v4l2_m2m_set_dst_buffered(ctx->fh.m2m_ctx, true); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %d\n", __func__, q->type); + + return 0; +} + +static void vb2ops_vdec_stop_streaming(struct vb2_queue *q) +{ + struct aml_video_dec_buf *buf = NULL; + struct vb2_v4l2_buffer *vb2_v4l2 = NULL; + struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(q); + int i; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, type: %d, state: %x, frame_cnt: %d\n", + __func__, q->type, ctx->state, ctx->decoded_frame_cnt); + + if (V4L2_TYPE_IS_OUTPUT(q->type)) + ctx->is_out_stream_off = true; + else + ctx->is_stream_off = true; + + if (V4L2_TYPE_IS_OUTPUT(q->type)) { + struct vb2_queue * que = v4l2_m2m_get_dst_vq(ctx->m2m_ctx); + unsigned long flags; + + cancel_work_sync(&ctx->dmabuff_recycle_work); + spin_lock_irqsave(&ctx->dmabuff_recycle_lock, flags); + INIT_KFIFO(ctx->dmabuff_recycle); + spin_unlock_irqrestore(&ctx->dmabuff_recycle_lock, flags); + + while ((vb2_v4l2 = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) + v4l2_buff_done(vb2_v4l2, VB2_BUF_STATE_ERROR); + + for (i = 0; i < q->num_buffers; ++i) { + vb2_v4l2 = to_vb2_v4l2_buffer(q->bufs[i]); + if (vb2_v4l2->vb2_buf.state == VB2_BUF_STATE_ACTIVE) + v4l2_buff_done(vb2_v4l2, VB2_BUF_STATE_ERROR); + } + + /* + * drop es frame was stored in the vdec_input + * if the capture queue have not start streaming. + */ + if (!que->streaming && + (vdec_frame_number(ctx->ada_ctx) > 0) && + (ctx->state < AML_STATE_ACTIVE)) { + ctx->state = AML_STATE_INIT; + ATRACE_COUNTER("V_ST_VSINK-state", ctx->state); + ctx->v4l_resolution_change = false; + ctx->reset_flag = V4L_RESET_MODE_NORMAL; + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "force reset to drop es frames.\n"); + wake_up_interruptible(&ctx->cap_wq); + aml_vdec_reset(ctx); + } + } else { + /* clean output cache and decoder status . */ + if (ctx->state > AML_STATE_INIT) { + wake_up_interruptible(&ctx->cap_wq); + aml_vdec_reset(ctx); + } + + cancel_work_sync(&ctx->decode_work); + mutex_lock(&ctx->capture_buffer_lock); + INIT_KFIFO(ctx->capture_buffer); + mutex_unlock(&ctx->capture_buffer_lock); + + while ((vb2_v4l2 = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) + v4l2_buff_done(vb2_v4l2, VB2_BUF_STATE_ERROR); + + for (i = 0; i < q->num_buffers; ++i) { + vb2_v4l2 = to_vb2_v4l2_buffer(q->bufs[i]); + buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb); + buf->frame_buffer.status = FB_ST_FREE; + buf->frame_buffer.vframe = NULL; + buf->que_in_m2m = false; + buf->used = false; + buf->vb.flags = 0; + ctx->cap_pool.seq[i] = 0; + + if (vb2_v4l2->vb2_buf.state == VB2_BUF_STATE_ACTIVE) + v4l2_buff_done(vb2_v4l2, VB2_BUF_STATE_ERROR); + + /*v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "idx: %d, state: %d\n", + q->bufs[i]->index, q->bufs[i]->state);*/ + } + + fb_map_table_clean(ctx); + + fb_token_clean(ctx); + + ctx->buf_used_count = 0; + ctx->cap_pool.in = 0; + ctx->cap_pool.out = 0; + ctx->cap_pool.dec = 0; + ctx->cap_pool.vpp = 0; + } +} + +static void m2mops_vdec_device_run(void *priv) +{ + struct aml_vcodec_ctx *ctx = priv; + struct aml_vcodec_dev *dev = ctx->dev; + + if (ctx->output_thread_ready) + queue_work(dev->decode_workqueue, &ctx->decode_work); +} + +static int m2mops_vdec_job_ready(void *m2m_priv) +{ + struct aml_vcodec_ctx *ctx = m2m_priv; + + if (ctx->state < AML_STATE_PROBE || + ctx->state > AML_STATE_FLUSHED) + return 0; + + return 1; +} + +static void m2mops_vdec_job_abort(void *priv) +{ + struct aml_vcodec_ctx *ctx = priv; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "%s\n", __func__); +} + +static int aml_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl) +{ + struct aml_vcodec_ctx *ctx = ctrl_to_ctx(ctrl); + int ret = 0; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, + "%s, id: %d\n", __func__, ctrl->id); + + switch (ctrl->id) { + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: + if (ctx->state >= AML_STATE_PROBE) { + ctrl->val = CTX_BUF_TOTAL(ctx); + } else { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Seqinfo not ready.\n"); + ctrl->val = 0; + } + break; + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: + ctrl->val = 4; + break; + case AML_V4L2_GET_INPUT_BUFFER_NUM: + if (ctx->ada_ctx != NULL) + ctrl->val = vdec_frame_number(ctx->ada_ctx); + break; + case AML_V4L2_GET_FILMGRAIN_INFO: + ctrl->val = ctx->film_grain_present; + break; + default: + ret = -EINVAL; + } + return ret; +} + +static int aml_vdec_try_s_v_ctrl(struct v4l2_ctrl *ctrl) +{ + struct aml_vcodec_ctx *ctx = ctrl_to_ctx(ctrl); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s\n", __func__); + + if (ctrl->id == AML_V4L2_SET_DRMMODE) { + ctx->is_drm_mode = ctrl->val; + ctx->param_sets_from_ucode = true; + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "set stream mode: %x\n", ctrl->val); + } else if (ctrl->id == AML_V4L2_SET_DURATION) { + vdec_set_duration(ctrl->val); + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "set duration: %x\n", ctrl->val); + } + + return 0; +} + +static const struct v4l2_ctrl_ops aml_vcodec_dec_ctrl_ops = { + .g_volatile_ctrl = aml_vdec_g_v_ctrl, + .try_ctrl = aml_vdec_try_s_v_ctrl, +}; + +static const struct v4l2_ctrl_config ctrl_st_mode = { + .name = "drm mode", + .id = AML_V4L2_SET_DRMMODE, + .ops = &aml_vcodec_dec_ctrl_ops, + .type = V4L2_CTRL_TYPE_BOOLEAN, + .flags = V4L2_CTRL_FLAG_WRITE_ONLY, + .min = 0, + .max = 1, + .step = 1, + .def = 0, +}; + +static const struct v4l2_ctrl_config ctrl_gt_input_buffer_number = { + .name = "input buffer number", + .id = AML_V4L2_GET_INPUT_BUFFER_NUM, + .ops = &aml_vcodec_dec_ctrl_ops, + .type = V4L2_CTRL_TYPE_INTEGER, + .flags = V4L2_CTRL_FLAG_VOLATILE, + .min = 0, + .max = 128, + .step = 1, + .def = 0, +}; + +static const struct v4l2_ctrl_config ctrl_st_duration = { + .name = "duration", + .id = AML_V4L2_SET_DURATION, + .ops = &aml_vcodec_dec_ctrl_ops, + .type = V4L2_CTRL_TYPE_INTEGER, + .flags = V4L2_CTRL_FLAG_WRITE_ONLY, + .min = 0, + .max = 96000, + .step = 1, + .def = 0, +}; + +static const struct v4l2_ctrl_config ctrl_gt_filmgrain_info = { + .name = "filmgrain info", + .id = AML_V4L2_GET_FILMGRAIN_INFO, + .ops = &aml_vcodec_dec_ctrl_ops, + .type = V4L2_CTRL_TYPE_INTEGER, + .flags = V4L2_CTRL_FLAG_VOLATILE, + .min = 0, + .max = 1, + .step = 1, + .def = 0, +}; + +int aml_vcodec_dec_ctrls_setup(struct aml_vcodec_ctx *ctx) +{ + int ret; + struct v4l2_ctrl *ctrl; + + v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 3); + ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl, + &aml_vcodec_dec_ctrl_ops, + V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, + 0, 32, 1, 2); + if ((ctrl == NULL) || (ctx->ctrl_hdl.error)) { + ret = ctx->ctrl_hdl.error; + goto err; + } + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl, + &aml_vcodec_dec_ctrl_ops, + V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, + 0, 32, 1, 8); + if ((ctrl == NULL) || (ctx->ctrl_hdl.error)) { + ret = ctx->ctrl_hdl.error; + goto err; + } + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_custom(&ctx->ctrl_hdl, &ctrl_st_mode, NULL); + if ((ctrl == NULL) || (ctx->ctrl_hdl.error)) { + ret = ctx->ctrl_hdl.error; + goto err; + } + + ctrl = v4l2_ctrl_new_custom(&ctx->ctrl_hdl, &ctrl_gt_input_buffer_number, NULL); + if ((ctrl == NULL) || (ctx->ctrl_hdl.error)) { + ret = ctx->ctrl_hdl.error; + goto err; + } + + ctrl = v4l2_ctrl_new_custom(&ctx->ctrl_hdl, &ctrl_st_duration, NULL); + if ((ctrl == NULL) || (ctx->ctrl_hdl.error)) { + ret = ctx->ctrl_hdl.error; + goto err; + } + + ctrl = v4l2_ctrl_new_custom(&ctx->ctrl_hdl, &ctrl_gt_filmgrain_info, NULL); + if ((ctrl == NULL) || (ctx->ctrl_hdl.error)) { + ret = ctx->ctrl_hdl.error; + goto err; + } + + v4l2_ctrl_handler_setup(&ctx->ctrl_hdl); + + return 0; +err: + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Adding control failed %d\n", + ctx->ctrl_hdl.error); + v4l2_ctrl_handler_free(&ctx->ctrl_hdl); + return ret; +} + +static int vidioc_vdec_g_parm(struct file *file, void *fh, + struct v4l2_streamparm *a) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(fh); + struct vb2_queue *dst_vq; + + dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + if (!dst_vq) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "no vb2 queue for type=%d\n", V4L2_BUF_TYPE_VIDEO_CAPTURE); + return -EINVAL; + } + + if (!V4L2_TYPE_IS_MULTIPLANAR(a->type) && dst_vq->is_multiplanar) + return -EINVAL; + + if ((a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) || (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) { + if (vdec_if_get_param(ctx, GET_PARAM_CONFIG_INFO, &ctx->config.parm.dec)) + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "GET_PARAM_CONFIG_INFO err\n"); + else + memcpy(a->parm.raw_data, ctx->config.parm.data, + sizeof(a->parm.raw_data)); + } + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s\n", __func__); + + return 0; +} + +static int check_dec_cfginfo(struct aml_vdec_cfg_infos *cfg) +{ + if (cfg->double_write_mode != 0 && + cfg->double_write_mode != 1 && + cfg->double_write_mode != 2 && + cfg->double_write_mode != 3 && + cfg->double_write_mode != 4 && + cfg->double_write_mode != 16 && + cfg->double_write_mode != 0x21 && + cfg->double_write_mode != 0x100 && + cfg->double_write_mode != 0x200) { + pr_err("invalid double write mode %d\n", cfg->double_write_mode); + return -1; + } + if (cfg->ref_buf_margin > 20) { + pr_err("invalid margin %d\n", cfg->ref_buf_margin); + return -1; + } + + if (mandatory_dw_mmu) { + cfg->double_write_mode = 0x21; + } + + pr_info("double write mode %d margin %d\n", + cfg->double_write_mode, cfg->ref_buf_margin); + return 0; +} + +static int vidioc_vdec_s_parm(struct file *file, void *fh, + struct v4l2_streamparm *a) +{ + struct aml_vcodec_ctx *ctx = fh_to_ctx(fh); + struct vb2_queue *dst_vq; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s\n", __func__); + + dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + if (!dst_vq) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "no vb2 queue for type=%d\n", V4L2_BUF_TYPE_VIDEO_CAPTURE); + return -EINVAL; + } + + if (!V4L2_TYPE_IS_MULTIPLANAR(a->type) && dst_vq->is_multiplanar) + return -EINVAL; + + if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT || + a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + struct aml_dec_params *in = + (struct aml_dec_params *) a->parm.raw_data; + struct aml_dec_params *dec = &ctx->config.parm.dec; + + ctx->config.type = V4L2_CONFIG_PARM_DECODE; + + if (in->parms_status & V4L2_CONFIG_PARM_DECODE_CFGINFO) { + if (check_dec_cfginfo(&in->cfg)) + return -EINVAL; + dec->cfg = in->cfg; + } + if (in->parms_status & V4L2_CONFIG_PARM_DECODE_PSINFO) + dec->ps = in->ps; + if (in->parms_status & V4L2_CONFIG_PARM_DECODE_HDRINFO) + dec->hdr = in->hdr; + if (in->parms_status & V4L2_CONFIG_PARM_DECODE_CNTINFO) + dec->cnt = in->cnt; + + dec->parms_status |= in->parms_status; + + /* aml v4l driver parms config. */ + ctx->vpp_cfg.enable_nr = + (dec->cfg.metadata_config_flag & (1 << 15)); + if (force_enable_nr) + ctx->vpp_cfg.enable_nr = true; + + ctx->vpp_cfg.enable_local_buf = + (dec->cfg.metadata_config_flag & (1 << 14)); + if (force_enable_di_local_buffer) + ctx->vpp_cfg.enable_local_buf = true; + + ctx->internal_dw_scale = dec->cfg.metadata_config_flag & (1 << 13); + ctx->second_field_pts_mode = dec->cfg.metadata_config_flag & (1 << 12); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s parms:%x metadata_config_flag: 0x%x\n", + __func__, in->parms_status, dec->cfg.metadata_config_flag); + + memset(a->parm.output.reserved, 0, sizeof(a->parm.output.reserved)); + } else { + memset(a->parm.capture.reserved, 0, sizeof(a->parm.capture.reserved)); + } + + return 0; +} + + +const struct v4l2_m2m_ops aml_vdec_m2m_ops = { + .device_run = m2mops_vdec_device_run, + .job_ready = m2mops_vdec_job_ready, + .job_abort = m2mops_vdec_job_abort, +}; + +static const struct vb2_ops aml_vdec_vb2_ops = { + .queue_setup = vb2ops_vdec_queue_setup, + .buf_prepare = vb2ops_vdec_buf_prepare, + .buf_queue = vb2ops_vdec_buf_queue, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .buf_init = vb2ops_vdec_buf_init, + .buf_cleanup = vb2ops_vdec_buf_cleanup, + .buf_finish = vb2ops_vdec_buf_finish, + .start_streaming = vb2ops_vdec_start_streaming, + .stop_streaming = vb2ops_vdec_stop_streaming, +}; + +const struct v4l2_ioctl_ops aml_vdec_ioctl_ops = { + .vidioc_streamon = vidioc_decoder_streamon, + .vidioc_streamoff = vidioc_decoder_streamoff, + .vidioc_reqbufs = vidioc_decoder_reqbufs, + .vidioc_querybuf = vidioc_vdec_querybuf, + .vidioc_expbuf = vidioc_vdec_expbuf, + //.vidioc_g_ctrl = vidioc_vdec_g_ctrl, + + .vidioc_qbuf = vidioc_vdec_qbuf, + .vidioc_dqbuf = vidioc_vdec_dqbuf, + + .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_out, + .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap_out, + .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_cap_out, + .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_cap_out, + + .vidioc_s_fmt_vid_cap_mplane = vidioc_vdec_s_fmt, + .vidioc_s_fmt_vid_cap = vidioc_vdec_s_fmt, + .vidioc_s_fmt_vid_out_mplane = vidioc_vdec_s_fmt, + .vidioc_s_fmt_vid_out = vidioc_vdec_s_fmt, + .vidioc_g_fmt_vid_cap_mplane = vidioc_vdec_g_fmt, + .vidioc_g_fmt_vid_cap = vidioc_vdec_g_fmt, + .vidioc_g_fmt_vid_out_mplane = vidioc_vdec_g_fmt, + .vidioc_g_fmt_vid_out = vidioc_vdec_g_fmt, + + .vidioc_create_bufs = vidioc_vdec_create_bufs, + + //fixme + //.vidioc_enum_fmt_vid_cap_mplane = vidioc_vdec_enum_fmt_vid_cap_mplane, + //.vidioc_enum_fmt_vid_out_mplane = vidioc_vdec_enum_fmt_vid_out_mplane, + .vidioc_enum_fmt_vid_cap = vidioc_vdec_enum_fmt_vid_cap_mplane, + .vidioc_enum_fmt_vid_out = vidioc_vdec_enum_fmt_vid_out_mplane, + .vidioc_enum_framesizes = vidioc_enum_framesizes, + + .vidioc_querycap = vidioc_vdec_querycap, + .vidioc_subscribe_event = vidioc_vdec_subscribe_evt, + .vidioc_unsubscribe_event = vidioc_vdec_event_unsubscribe, + .vidioc_g_selection = vidioc_vdec_g_selection, + .vidioc_s_selection = vidioc_vdec_s_selection, + + .vidioc_decoder_cmd = vidioc_decoder_cmd, + .vidioc_try_decoder_cmd = vidioc_try_decoder_cmd, + + .vidioc_g_parm = vidioc_vdec_g_parm, + .vidioc_s_parm = vidioc_vdec_s_parm, +}; + +int aml_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq, + struct vb2_queue *dst_vq) +{ + struct aml_vcodec_ctx *ctx = priv; + int ret = 0; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "%s\n", __func__); + + src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR; + src_vq->drv_priv = ctx; + src_vq->buf_struct_size = sizeof(struct aml_video_dec_buf); + src_vq->ops = &aml_vdec_vb2_ops; + src_vq->mem_ops = &vb2_dma_sg_memops; + src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + src_vq->lock = &ctx->dev->dev_mutex; + ret = vb2_queue_init(src_vq); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Failed to initialize videobuf2 queue(output)\n"); + return ret; + } + + dst_vq->type = multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : + V4L2_BUF_TYPE_VIDEO_CAPTURE; + dst_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR; + dst_vq->drv_priv = ctx; + dst_vq->buf_struct_size = sizeof(struct aml_video_dec_buf); + dst_vq->ops = &aml_vdec_vb2_ops; + dst_vq->mem_ops = &vb2_dma_contig_memops; + dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + dst_vq->lock = &ctx->dev->dev_mutex; + dst_vq->min_buffers_needed = 1; + ret = vb2_queue_init(dst_vq); + if (ret) { + vb2_queue_release(src_vq); + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Failed to initialize videobuf2 queue(capture)\n"); + } + + return ret; +} +
diff --git a/drivers/amvdec_ports/aml_vcodec_dec.h b/drivers/amvdec_ports/aml_vcodec_dec.h new file mode 100644 index 0000000..a23522c --- /dev/null +++ b/drivers/amvdec_ports/aml_vcodec_dec.h
@@ -0,0 +1,148 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef _AML_VCODEC_DEC_H_ +#define _AML_VCODEC_DEC_H_ + +#include <linux/kref.h> +#include <linux/scatterlist.h> +#include <media/videobuf2-core.h> +#include <media/videobuf2-v4l2.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/video_sink/v4lvideo_ext.h> +#include "aml_vcodec_util.h" +#include "aml_task_chain.h" + +#define VCODEC_CAPABILITY_4K_DISABLED 0x10 +#define VCODEC_DEC_4K_CODED_WIDTH 4096U +#define VCODEC_DEC_4K_CODED_HEIGHT 2304U +#define AML_VDEC_MAX_W 2048U +#define AML_VDEC_MAX_H 1088U + +#define AML_VDEC_IRQ_STATUS_DEC_SUCCESS 0x10000 +#define V4L2_BUF_FLAG_LAST 0x00100000 + +#define VDEC_GATHER_MEMORY_TYPE 0 +#define VDEC_SCATTER_MEMORY_TYPE 1 + +#define META_DATA_SIZE (256) +#define MD_BUF_SIZE (1024) +#define COMP_BUF_SIZE (8196) +#define SEI_BUF_SIZE (2 * 12 * 1024) +#define SEI_TYPE (1) +#define DV_TYPE (2) + + +/* + * struct vdec_v4l2_buffer - decoder frame buffer + * @mem_type : gather or scatter memory. + * @num_planes : used number of the plane + * @mem[4] : array mem for used planes, + * mem[0]: Y, mem[1]: C/U, mem[2]: V + * @vf_fd : the file handle of video frame + * @status : frame buffer status (vdec_fb_status) + * @buf_idx : the index from vb2 index. + * @vframe : store the vframe that get from caller. + * @task : the context of task chain manager. + */ + +struct vdec_v4l2_buffer { + int mem_type; + int num_planes; + union { + struct aml_vcodec_mem mem[4]; + u32 vf_fd; + } m; + u32 status; + u32 buf_idx; + void *vframe; + + struct task_chain_s *task; +}; + +/** + * struct aml_video_dec_buf - Private data related to each VB2 buffer. + * @b: VB2 buffer + * @list: link list + * @used: Capture buffer contain decoded frame data and keep in + * codec data structure + * @lastframe: Intput buffer is last buffer - EOS + * @error: An unrecoverable error occurs on this buffer. + * @frame_buffer: Decode status, and buffer information of Capture buffer + * + * Note : These status information help us track and debug buffer state + */ +struct aml_video_dec_buf { + struct vb2_v4l2_buffer vb; + struct list_head list; + + struct vdec_v4l2_buffer frame_buffer; + struct file_private_data privdata; + struct codec_mm_s *mem[2]; + char mem_onwer[32]; + bool used; + bool que_in_m2m; + bool lastframe; + bool error; + + /* internal compressed buffer */ + unsigned int internal_index; + + ulong vpp_buf_handle; + ulong ge2d_buf_handle; + + /*4 bytes data for data len*/ + char meta_data[META_DATA_SIZE + 4]; + + struct sg_table *out_sgt; + struct sg_table *cap_sgt; +}; + +extern const struct v4l2_ioctl_ops aml_vdec_ioctl_ops; +extern const struct v4l2_m2m_ops aml_vdec_m2m_ops; + +/* + * aml_vdec_lock/aml_vdec_unlock are for ctx instance to + * get/release lock before/after access decoder hw. + * aml_vdec_lock get decoder hw lock and set curr_ctx + * to ctx instance that get lock + */ +void aml_vdec_unlock(struct aml_vcodec_ctx *ctx); +void aml_vdec_lock(struct aml_vcodec_ctx *ctx); +int aml_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq, + struct vb2_queue *dst_vq); +void aml_vcodec_dec_set_default_params(struct aml_vcodec_ctx *ctx); +void aml_vcodec_dec_release(struct aml_vcodec_ctx *ctx); +int aml_vcodec_dec_ctrls_setup(struct aml_vcodec_ctx *ctx); +void wait_vcodec_ending(struct aml_vcodec_ctx *ctx); +void vdec_frame_buffer_release(void *data); +void aml_vdec_dispatch_event(struct aml_vcodec_ctx *ctx, u32 changes); +void* v4l_get_vf_handle(int fd); +void aml_v4l_ctx_release(struct kref *kref); +void dmabuff_recycle_worker(struct work_struct *work); +void aml_buffer_status(struct aml_vcodec_ctx *ctx); +void aml_vdec_basic_information(struct aml_vcodec_ctx *ctx); + +void aml_alloc_buffer(struct aml_vcodec_ctx *ctx, int flag); +void aml_free_buffer(struct aml_vcodec_ctx *ctx, int flag); +void aml_free_one_sei_buffer(struct aml_vcodec_ctx *ctx, char **addr, int *size, int idx); +void aml_bind_sei_buffer(struct aml_vcodec_ctx *v4l, char **addr, int *size, int *idx); +void aml_bind_dv_buffer(struct aml_vcodec_ctx *v4l, char **comp_buf, char **md_buf); + +#endif /* _AML_VCODEC_DEC_H_ */
diff --git a/drivers/amvdec_ports/aml_vcodec_dec_drv.c b/drivers/amvdec_ports/aml_vcodec_dec_drv.c new file mode 100644 index 0000000..18f0254 --- /dev/null +++ b/drivers/amvdec_ports/aml_vcodec_dec_drv.c
@@ -0,0 +1,754 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ + +#define DEBUG +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <media/v4l2-event.h> +#include <media/v4l2-mem2mem.h> +#include <media/videobuf2-dma-contig.h> +#include <linux/kthread.h> +#include <linux/compat.h> +#include "aml_vcodec_drv.h" +#include "aml_vcodec_dec.h" +#include "aml_vcodec_util.h" +#include "aml_vcodec_vpp.h" +#include <linux/file.h> +#include <linux/anon_inodes.h> + +#define VDEC_HW_ACTIVE 0x10 +#define VDEC_IRQ_CFG 0x11 +#define VDEC_IRQ_CLR 0x10 +#define VDEC_IRQ_CFG_REG 0xa4 + +#define V4LVIDEO_IOC_MAGIC 'I' +#define V4LVIDEO_IOCTL_ALLOC_FD _IOW(V4LVIDEO_IOC_MAGIC, 0x02, int) +#define V4LVIDEO_IOCTL_CHECK_FD _IOW(V4LVIDEO_IOC_MAGIC, 0x03, int) +#define V4LVIDEO_IOCTL_SET_CONFIG_PARAMS _IOWR(V4LVIDEO_IOC_MAGIC, 0x04, struct v4l2_config_parm) +#define V4LVIDEO_IOCTL_GET_CONFIG_PARAMS _IOWR(V4LVIDEO_IOC_MAGIC, 0x05, struct v4l2_config_parm) + +bool param_sets_from_ucode = 1; +bool enable_drm_mode; +extern void aml_vdec_pic_info_update(struct aml_vcodec_ctx *ctx); + +static int fops_vcodec_open(struct file *file) +{ + struct aml_vcodec_dev *dev = video_drvdata(file); + struct aml_vcodec_ctx *ctx = NULL; + struct aml_video_dec_buf *aml_buf = NULL; + int ret = 0; + struct vb2_queue *src_vq; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + kref_init(&ctx->ctx_ref); + + aml_buf = kzalloc(sizeof(*aml_buf), GFP_KERNEL); + if (!aml_buf) { + kfree(ctx); + return -ENOMEM; + } + + ctx->meta_infos.meta_bufs = vzalloc(sizeof(struct meta_data) * V4L_CAP_BUFF_MAX); + if (ctx->meta_infos.meta_bufs == NULL) { + kfree(aml_buf); + kfree(ctx); + return -ENOMEM; + } + + mutex_lock(&dev->dev_mutex); + ctx->empty_flush_buf = aml_buf; + ctx->id = dev->id_counter++; + v4l2_fh_init(&ctx->fh, video_devdata(file)); + file->private_data = &ctx->fh; + v4l2_fh_add(&ctx->fh); + INIT_LIST_HEAD(&ctx->list); + INIT_LIST_HEAD(&ctx->vdec_thread_list); + INIT_LIST_HEAD(&ctx->task_chain_pool); + dev->filp = file; + ctx->dev = dev; + init_waitqueue_head(&ctx->queue); + mutex_init(&ctx->capture_buffer_lock); + mutex_init(&ctx->buff_done_lock); + mutex_init(&ctx->state_lock); + mutex_init(&ctx->comp_lock); + spin_lock_init(&ctx->slock); + spin_lock_init(&ctx->tsplock); + spin_lock_init(&ctx->dmabuff_recycle_lock); + init_completion(&ctx->comp); + init_waitqueue_head(&ctx->wq); + init_waitqueue_head(&ctx->cap_wq); + init_waitqueue_head(&ctx->post_done_wq); + INIT_WORK(&ctx->dmabuff_recycle_work, dmabuff_recycle_worker); + INIT_KFIFO(ctx->dmabuff_recycle); + INIT_KFIFO(ctx->capture_buffer); + + ctx->post_to_upper_done = true; + ctx->param_sets_from_ucode = param_sets_from_ucode ? 1 : 0; + + if (enable_drm_mode) { + ctx->is_drm_mode = true; + ctx->param_sets_from_ucode = true; + } + + ctx->type = AML_INST_DECODER; + ret = aml_vcodec_dec_ctrls_setup(ctx); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Failed to setup vcodec controls\n"); + goto err_ctrls_setup; + } + ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_dec, ctx, + &aml_vcodec_dec_queue_init); + if (IS_ERR((__force void *)ctx->m2m_ctx)) { + ret = PTR_ERR((__force void *)ctx->m2m_ctx); + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Failed to v4l2_m2m_ctx_init() (%d)\n", ret); + goto err_m2m_ctx_init; + } + src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + ctx->output_thread_ready = true; + ctx->empty_flush_buf->vb.vb2_buf.vb2_queue = src_vq; + ctx->empty_flush_buf->lastframe = true; + ctx->vdec_pic_info_update = aml_vdec_pic_info_update; + aml_vcodec_dec_set_default_params(ctx); + ctx->is_stream_off = true; + + ctx->aux_infos.dv_index = 0; + ctx->aux_infos.sei_index = 0; + ctx->aux_infos.alloc_buffer = aml_alloc_buffer; + ctx->aux_infos.free_buffer = aml_free_buffer; + ctx->aux_infos.bind_sei_buffer = aml_bind_sei_buffer; + ctx->aux_infos.bind_dv_buffer = aml_bind_dv_buffer; + ctx->aux_infos.free_one_sei_buffer = aml_free_one_sei_buffer; + + ret = aml_thread_start(ctx, aml_thread_capture_worker, AML_THREAD_CAPTURE, "cap"); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "Failed to creat capture thread.\n"); + goto err_creat_thread; + } + + list_add(&ctx->list, &dev->ctx_list); + + mutex_unlock(&dev->dev_mutex); + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "%s decoder %lx\n", + dev_name(&dev->plat_dev->dev), (ulong)ctx); + + return 0; + + /* Deinit when failure occurred */ +err_creat_thread: + v4l2_m2m_ctx_release(ctx->m2m_ctx); +err_m2m_ctx_init: + v4l2_ctrl_handler_free(&ctx->ctrl_hdl); +err_ctrls_setup: + v4l2_fh_del(&ctx->fh); + v4l2_fh_exit(&ctx->fh); + vfree(ctx->meta_infos.meta_bufs); + kfree(ctx->empty_flush_buf); + kfree(ctx); + mutex_unlock(&dev->dev_mutex); + + return ret; +} + +static int fops_vcodec_release(struct file *file) +{ + struct aml_vcodec_dev *dev = video_drvdata(file); + struct aml_vcodec_ctx *ctx = fh_to_ctx(file->private_data); + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "release decoder %lx\n", (ulong) ctx); + mutex_lock(&dev->dev_mutex); + + aml_thread_stop(ctx); + wait_vcodec_ending(ctx); + vb2_queue_release(&ctx->m2m_ctx->cap_q_ctx.q); + vb2_queue_release(&ctx->m2m_ctx->out_q_ctx.q); + + aml_vcodec_dec_release(ctx); + v4l2_fh_del(&ctx->fh); + v4l2_fh_exit(&ctx->fh); + v4l2_ctrl_handler_free(&ctx->ctrl_hdl); + + list_del_init(&ctx->list); + + kfree(ctx->empty_flush_buf); + kref_put(&ctx->ctx_ref, aml_v4l_ctx_release); + mutex_unlock(&dev->dev_mutex); + return 0; +} + +static int v4l2video_file_release(struct inode *inode, struct file *file) +{ + v4l_dbg(0, V4L_DEBUG_CODEC_BUFMGR, "file: %lx, data: %lx\n", + (ulong) file, (ulong) file->private_data); + + if (file->private_data) + vdec_frame_buffer_release(file->private_data); + + return 0; +} + +const struct file_operations v4l2_file_fops = { + .release = v4l2video_file_release, +}; + +int v4l2_alloc_fd(int *fd) +{ + struct file *file = NULL; + int file_fd = get_unused_fd_flags(O_CLOEXEC); + + if (file_fd < 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "get unused fd fail\n"); + return -ENODEV; + } + + file = anon_inode_getfile("v4l2_meta_file", &v4l2_file_fops, NULL, 0); + if (IS_ERR(file)) { + put_unused_fd(file_fd); + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "anon_inode_getfile fail\n"); + return -ENODEV; + } + + file->private_data = + kzalloc(sizeof(struct file_private_data), GFP_KERNEL); + if (!file->private_data) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "alloc priv data faild.\n"); + return -ENOMEM; + } + + v4l_dbg(0, V4L_DEBUG_CODEC_BUFMGR, "fd %d, file %lx, data: %lx\n", + file_fd, (ulong) file, (ulong) file->private_data); + + fd_install(file_fd, file); + *fd = file_fd; + + return 0; +} + +extern const struct file_operations v4l2_file_fops; +bool is_v4l2_buf_file(struct file *file) +{ + return file->f_op == &v4l2_file_fops; +} + +int v4l2_check_fd(int fd) +{ + struct file *file; + + file = fget(fd); + + if (!file) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "fget fd %d fail!\n", fd); + return -EBADF; + } + + if (!is_v4l2_buf_file(file)) { + fput(file); + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "is_v4l2_buf_file fail!\n"); + return -1; + } + + fput(file); + + v4l_dbg(0, V4L_DEBUG_CODEC_EXINFO, + "ioctl ok, comm %s, pid %d\n", + current->comm, current->pid); + + return 0; +} + +int dmabuf_fd_install_data(int fd, void* data, u32 size) +{ + struct file *file; + + file = fget(fd); + + if (!file) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "fget fd %d fail!, comm %s, pid %d\n", + fd, current->comm, current->pid); + return -EBADF; + } + + if (!is_v4l2_buf_file(file)) { + fput(file); + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "the buf file checked fail!\n"); + return -EBADF; + } + + memcpy(file->private_data, data, size); + + fput(file); + + return 0; +} + +void* v4l_get_vf_handle(int fd) +{ + struct file *file; + struct file_private_data *data = NULL; + void *vf_handle = 0; + + file = fget(fd); + + if (!file) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "fget fd %d fail!, comm %s, pid %d\n", + fd, current->comm, current->pid); + return NULL; + } + + if (!is_v4l2_buf_file(file)) { + fput(file); +#if 0 + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "the buf file checked fail!\n"); +#endif + return NULL; + } + + data = (struct file_private_data*) file->private_data; + if (data) { + vf_handle = &data->vf; + v4l_dbg(0, V4L_DEBUG_CODEC_BUFMGR, "file: %lx, data: %lx\n", + (ulong) file, (ulong) data); + } + + fput(file); + + return vf_handle; +} + + +static long v4l2_vcodec_ioctl(struct file *file, + unsigned int cmd, + ulong arg) +{ + long ret = 0; + void __user *argp = (void __user *)arg; + + switch (cmd) { + case V4LVIDEO_IOCTL_ALLOC_FD: + { + u32 v4lvideo_fd = 0; + + ret = v4l2_alloc_fd(&v4lvideo_fd); + if (ret != 0) + break; + put_user(v4lvideo_fd, (u32 __user *)argp); + v4l_dbg(0, V4L_DEBUG_CODEC_EXINFO, + "V4LVIDEO_IOCTL_ALLOC_FD fd %d\n", + v4lvideo_fd); + break; + } + case V4LVIDEO_IOCTL_CHECK_FD: + { + u32 v4lvideo_fd = 0; + + get_user(v4lvideo_fd, (u32 __user *)argp); + ret = v4l2_check_fd(v4lvideo_fd); + if (ret != 0) + break; + v4l_dbg(0, V4L_DEBUG_CODEC_EXINFO, + "V4LVIDEO_IOCTL_CHECK_FD fd %d\n", + v4lvideo_fd); + break; + } + case V4LVIDEO_IOCTL_SET_CONFIG_PARAMS: + { + struct aml_vcodec_ctx *ctx = NULL; + + if (is_v4l2_buf_file(file)) + break; + + ctx = fh_to_ctx(file->private_data); + if (copy_from_user((void *)&ctx->config, + (void *)argp, sizeof(ctx->config))) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "set config parm err\n"); + return -EFAULT; + } + break; + } + case V4LVIDEO_IOCTL_GET_CONFIG_PARAMS: + { + struct aml_vcodec_ctx *ctx = NULL; + + if (is_v4l2_buf_file(file)) + break; + + ctx = fh_to_ctx(file->private_data); + if (copy_to_user((void *)argp, + (void *)&ctx->config, sizeof(ctx->config))) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "get config parm err\n"); + return -EFAULT; + } + break; + } + default: + return video_ioctl2(file, cmd, arg); + } + return ret; +} + +#ifdef CONFIG_COMPAT +static long v4l2_compat_ioctl(struct file *file, + unsigned int cmd, ulong arg) +{ + long ret = 0; + + ret = v4l2_vcodec_ioctl(file, cmd, (ulong)compat_ptr(arg)); + return ret; +} +#endif + +static const struct v4l2_file_operations aml_vcodec_fops = { + .owner = THIS_MODULE, + .open = fops_vcodec_open, + .release = fops_vcodec_release, + .poll = v4l2_m2m_fop_poll, + .unlocked_ioctl = v4l2_vcodec_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = v4l2_compat_ioctl, +#endif + .mmap = v4l2_m2m_fop_mmap, +}; + +static ssize_t status_show(struct class *cls, + struct class_attribute *attr, char *buf) +{ + struct aml_vcodec_dev *dev = container_of(cls, + struct aml_vcodec_dev, v4ldec_class); + struct aml_vcodec_ctx *ctx = NULL; + char *pbuf = buf; + + mutex_lock(&dev->dev_mutex); + + if (list_empty(&dev->ctx_list)) { + pbuf += sprintf(pbuf, "No v4ldec context.\n"); + goto out; + } + + list_for_each_entry(ctx, &dev->ctx_list, list) { + /* basic information. */ + aml_vdec_basic_information(ctx); + + /* buffers status. */ + aml_buffer_status(ctx); + } +out: + mutex_unlock(&dev->dev_mutex); + + return pbuf - buf; +} + +static CLASS_ATTR_RO(status); + +static struct attribute *v4ldec_class_attrs[] = { + &class_attr_status.attr, + NULL +}; + +ATTRIBUTE_GROUPS(v4ldec_class); + +static int aml_vcodec_probe(struct platform_device *pdev) +{ + struct aml_vcodec_dev *dev; + struct video_device *vfd_dec; + int ret = 0; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + INIT_LIST_HEAD(&dev->ctx_list); + dev->plat_dev = pdev; + atomic_set(&dev->vpp_count, 0); + + mutex_init(&dev->dec_mutex); + mutex_init(&dev->dev_mutex); + spin_lock_init(&dev->irqlock); + + snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s", + "[/AML_V4L2_VDEC]"); + + ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); + if (ret) { + dev_err(&pdev->dev, "v4l2_device_register err=%d\n", ret); + goto err_res; + } + + init_waitqueue_head(&dev->queue); + + vfd_dec = video_device_alloc(); + if (!vfd_dec) { + dev_err(&pdev->dev, "Failed to allocate video device\n"); + ret = -ENOMEM; + goto err_dec_alloc; + } + + vfd_dec->fops = &aml_vcodec_fops; + vfd_dec->ioctl_ops = &aml_vdec_ioctl_ops; + vfd_dec->release = video_device_release; + vfd_dec->lock = &dev->dev_mutex; + vfd_dec->v4l2_dev = &dev->v4l2_dev; + vfd_dec->vfl_dir = VFL_DIR_M2M; + vfd_dec->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | + V4L2_CAP_STREAMING; + + snprintf(vfd_dec->name, sizeof(vfd_dec->name), "%s", + AML_VCODEC_DEC_NAME); + video_set_drvdata(vfd_dec, dev); + dev->vfd_dec = vfd_dec; + platform_set_drvdata(pdev, dev); + + dev->m2m_dev_dec = v4l2_m2m_init(&aml_vdec_m2m_ops); + if (IS_ERR((__force void *)dev->m2m_dev_dec)) { + dev_err(&pdev->dev, "Failed to init mem2mem dec device\n"); + ret = PTR_ERR((__force void *)dev->m2m_dev_dec); + goto err_dec_mem_init; + } + + dev->decode_workqueue = + alloc_ordered_workqueue("output-worker", + __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_HIGHPRI); + if (!dev->decode_workqueue) { + dev_err(&pdev->dev, "Failed to create decode workqueue\n"); + ret = -EINVAL; + goto err_event_workq; + } + + //dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num); + + ret = video_register_device(vfd_dec, VFL_TYPE_GRABBER, 26); + if (ret) { + dev_err(&pdev->dev, "Failed to register video device\n"); + goto err_dec_reg; + } + + /*init class*/ + dev->v4ldec_class.name = "v4ldec"; + dev->v4ldec_class.owner = THIS_MODULE; + dev->v4ldec_class.class_groups = v4ldec_class_groups; + ret = class_register(&dev->v4ldec_class); + if (ret) { + dev_err(&pdev->dev, "v4l dec class create fail.\n"); + goto err_reg_class; + } + + dev_info(&pdev->dev, "v4ldec registered as /dev/video%d\n", vfd_dec->num); + + return 0; + +err_reg_class: + class_unregister(&dev->v4ldec_class); +err_dec_reg: + destroy_workqueue(dev->decode_workqueue); +err_event_workq: + v4l2_m2m_release(dev->m2m_dev_dec); +err_dec_mem_init: + video_unregister_device(vfd_dec); +err_dec_alloc: + v4l2_device_unregister(&dev->v4l2_dev); +err_res: + + return ret; +} + +static int aml_vcodec_dec_remove(struct platform_device *pdev) +{ + struct aml_vcodec_dev *dev = platform_get_drvdata(pdev); + + flush_workqueue(dev->decode_workqueue); + destroy_workqueue(dev->decode_workqueue); + + class_unregister(&dev->v4ldec_class); + + if (dev->m2m_dev_dec) + v4l2_m2m_release(dev->m2m_dev_dec); + + if (dev->vfd_dec) + video_unregister_device(dev->vfd_dec); + + v4l2_device_unregister(&dev->v4l2_dev); + + dev_info(&pdev->dev, "v4ldec removed.\n"); + + return 0; +} + +static const struct of_device_id aml_vcodec_match[] = { + {.compatible = "amlogic, vcodec-dec",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, aml_vcodec_match); + +static struct platform_driver aml_vcodec_dec_driver = { + .probe = aml_vcodec_probe, + .remove = aml_vcodec_dec_remove, + .driver = { + .name = AML_VCODEC_DEC_NAME, + .of_match_table = aml_vcodec_match, + }, +}; + +static int __init amvdec_ports_init(void) +{ + pr_info("v4l dec module init\n"); + + if (platform_driver_register(&aml_vcodec_dec_driver)) { + pr_err("failed to register v4l dec driver\n"); + return -ENODEV; + } + + return 0; +} + +static void __exit amvdec_ports_exit(void) +{ + pr_info("v4l dec module exit\n"); + + platform_driver_unregister(&aml_vcodec_dec_driver); +} + +module_init(amvdec_ports_init); +module_exit(amvdec_ports_exit); + +u32 debug_mode; +EXPORT_SYMBOL(debug_mode); +module_param(debug_mode, uint, 0644); + +u32 mandatory_dw_mmu; +EXPORT_SYMBOL(mandatory_dw_mmu); +module_param(mandatory_dw_mmu, uint, 0644); + +bool aml_set_vfm_enable; +EXPORT_SYMBOL(aml_set_vfm_enable); +module_param(aml_set_vfm_enable, bool, 0644); + +int aml_set_vfm_path; +EXPORT_SYMBOL(aml_set_vfm_path); +module_param(aml_set_vfm_path, int, 0644); + +bool aml_set_vdec_type_enable; +EXPORT_SYMBOL(aml_set_vdec_type_enable); +module_param(aml_set_vdec_type_enable, bool, 0644); + +int aml_set_vdec_type; +EXPORT_SYMBOL(aml_set_vdec_type); +module_param(aml_set_vdec_type, int, 0644); + +int vp9_need_prefix; +EXPORT_SYMBOL(vp9_need_prefix); +module_param(vp9_need_prefix, int, 0644); + +int av1_need_prefix; +EXPORT_SYMBOL(av1_need_prefix); +module_param(av1_need_prefix, int, 0644); + +bool multiplanar; +EXPORT_SYMBOL(multiplanar); +module_param(multiplanar, bool, 0644); + +int dump_capture_frame; +EXPORT_SYMBOL(dump_capture_frame); +module_param(dump_capture_frame, int, 0644); + +int dump_vpp_input; +EXPORT_SYMBOL(dump_vpp_input); +module_param(dump_vpp_input, int, 0644); + +int dump_ge2d_input; +EXPORT_SYMBOL(dump_ge2d_input); +module_param(dump_ge2d_input, int, 0644); + +int dump_output_frame; +EXPORT_SYMBOL(dump_output_frame); +module_param(dump_output_frame, int, 0644); + +u32 dump_output_start_position; +EXPORT_SYMBOL(dump_output_start_position); +module_param(dump_output_start_position, uint, 0644); + +EXPORT_SYMBOL(param_sets_from_ucode); +module_param(param_sets_from_ucode, bool, 0644); + +EXPORT_SYMBOL(enable_drm_mode); +module_param(enable_drm_mode, bool, 0644); + +int bypass_vpp; +EXPORT_SYMBOL(bypass_vpp); +module_param(bypass_vpp, int, 0644); + +int bypass_ge2d; +EXPORT_SYMBOL(bypass_ge2d); +module_param(bypass_ge2d, int, 0644); + +int max_di_instance = 2; +EXPORT_SYMBOL(max_di_instance); +module_param(max_di_instance, int, 0644); + +int bypass_progressive = 1; +EXPORT_SYMBOL(bypass_progressive); +module_param(bypass_progressive, int, 0644); + +bool support_mjpeg; +EXPORT_SYMBOL(support_mjpeg); +module_param(support_mjpeg, bool, 0644); + +bool support_format_I420; +EXPORT_SYMBOL(support_format_I420); +module_param(support_format_I420, bool, 0644); + +int force_enable_nr; +EXPORT_SYMBOL(force_enable_nr); +module_param(force_enable_nr, int, 0644); + +int force_enable_di_local_buffer; +EXPORT_SYMBOL(force_enable_di_local_buffer); +module_param(force_enable_di_local_buffer, int, 0644); + +int vpp_bypass_frames; +EXPORT_SYMBOL(vpp_bypass_frames); +module_param(vpp_bypass_frames, int, 0644); + +int bypass_nr_flag; +EXPORT_SYMBOL(bypass_nr_flag); +module_param(bypass_nr_flag, int, 0644); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("AML video codec V4L2 decoder driver"); +
diff --git a/drivers/amvdec_ports/aml_vcodec_drv.h b/drivers/amvdec_ports/aml_vcodec_drv.h new file mode 100644 index 0000000..522fe67 --- /dev/null +++ b/drivers/amvdec_ports/aml_vcodec_drv.h
@@ -0,0 +1,788 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef _AML_VCODEC_DRV_H_ +#define _AML_VCODEC_DRV_H_ + +#include <linux/kref.h> +#include <linux/platform_device.h> +#include <linux/videodev2.h> +#include <linux/kfifo.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> +#include <media/v4l2-ioctl.h> +#include <media/videobuf2-core.h> +#include <media/videobuf2-v4l2.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/video_sink/v4lvideo_ext.h> +#include "aml_vcodec_util.h" +#include "aml_vcodec_dec.h" + +#define AML_VCODEC_DRV_NAME "aml_vcodec_drv" +#define AML_VCODEC_DEC_NAME "aml-vcodec-dec" +#define AML_VCODEC_ENC_NAME "aml-vcodec-enc" +#define AML_PLATFORM_STR "platform:amlogic" + +#define AML_VCODEC_MAX_PLANES 3 +#define AML_V4L2_BENCHMARK 0 +#define WAIT_INTR_TIMEOUT_MS 1000 + +/* codec types of get/set parms. */ +#define V4L2_CONFIG_PARM_ENCODE (0) +#define V4L2_CONFIG_PARM_DECODE (1) + +/* types of decode parms. */ +#define V4L2_CONFIG_PARM_DECODE_CFGINFO (1 << 0) +#define V4L2_CONFIG_PARM_DECODE_PSINFO (1 << 1) +#define V4L2_CONFIG_PARM_DECODE_HDRINFO (1 << 2) +#define V4L2_CONFIG_PARM_DECODE_CNTINFO (1 << 3) + +/* amlogic event define. */ +/* #define V4L2_EVENT_SRC_CH_RESOLUTION (1 << 0) */ +#define V4L2_EVENT_SRC_CH_HDRINFO (1 << 1) +#define V4L2_EVENT_SRC_CH_PSINFO (1 << 2) +#define V4L2_EVENT_SRC_CH_CNTINFO (1 << 3) + +/* exception handing */ +#define V4L2_EVENT_REQUEST_RESET (1 << 8) +#define V4L2_EVENT_REQUEST_EXIT (1 << 9) + +/* eos event */ +#define V4L2_EVENT_SEND_EOS (1 << 16) + +/* v4l buffer pool */ +#define V4L_CAP_BUFF_MAX (32) +#define V4L_CAP_BUFF_INVALID (0) +#define V4L_CAP_BUFF_IN_M2M (1) +#define V4L_CAP_BUFF_IN_DEC (2) +#define V4L_CAP_BUFF_IN_VPP (3) +#define V4L_CAP_BUFF_IN_GE2D (4) + +/* v4l reset mode */ +#define V4L_RESET_MODE_NORMAL (1 << 0) /* reset vdec_input and decoder. */ +#define V4L_RESET_MODE_LIGHT (1 << 1) /* just only reset decoder. */ + +/* m2m job queue's status */ +/* Instance is already queued on the job_queue */ +#define TRANS_QUEUED (1 << 0) +/* Instance is currently running in hardware */ +#define TRANS_RUNNING (1 << 1) +/* Instance is currently aborting */ +#define TRANS_ABORT (1 << 2) + +#define CTX_BUF_TOTAL(ctx) (ctx->dpb_size + ctx->vpp_size + ctx->ge2d_size) +/** + * enum aml_hw_reg_idx - AML hw register base index + */ +enum aml_hw_reg_idx { + VDEC_SYS, + VDEC_MISC, + VDEC_LD, + VDEC_TOP, + VDEC_CM, + VDEC_AD, + VDEC_AV, + VDEC_PP, + VDEC_HWD, + VDEC_HWQ, + VDEC_HWB, + VDEC_HWG, + NUM_MAX_VDEC_REG_BASE, + /* h264 encoder */ + VENC_SYS = NUM_MAX_VDEC_REG_BASE, + /* vp8 encoder */ + VENC_LT_SYS, + NUM_MAX_VCODEC_REG_BASE +}; + +/** + * enum aml_instance_type - The type of an AML Vcodec instance. + */ +enum aml_instance_type { + AML_INST_DECODER = 0, + AML_INST_ENCODER = 1, +}; + +/** + * enum aml_instance_state - The state of an AML Vcodec instance. + * @AML_STATE_IDLE - default state when instance is created + * @AML_STATE_INIT - vcodec instance is initialized + * @AML_STATE_PROBE - vdec/venc had sps/pps header parsed/encoded + * @AML_STATE_ACTIVE - vdec is ready for work. + * @AML_STATE_FLUSHING - vdec is flushing. Only used by decoder + * @AML_STATE_FLUSHED - decoder has transacted the last frame. + * @AML_STATE_ABORT - vcodec should be aborted + */ +enum aml_instance_state { + AML_STATE_IDLE, + AML_STATE_INIT, + AML_STATE_PROBE, + AML_STATE_READY, + AML_STATE_ACTIVE, + AML_STATE_FLUSHING, + AML_STATE_FLUSHED, + AML_STATE_ABORT, +}; + +/** + * struct aml_encode_param - General encoding parameters type + */ +enum aml_encode_param { + AML_ENCODE_PARAM_NONE = 0, + AML_ENCODE_PARAM_BITRATE = (1 << 0), + AML_ENCODE_PARAM_FRAMERATE = (1 << 1), + AML_ENCODE_PARAM_INTRA_PERIOD = (1 << 2), + AML_ENCODE_PARAM_FORCE_INTRA = (1 << 3), + AML_ENCODE_PARAM_GOP_SIZE = (1 << 4), +}; + +enum aml_fmt_type { + AML_FMT_DEC = 0, + AML_FMT_ENC = 1, + AML_FMT_FRAME = 2, +}; + +/** + * struct aml_video_fmt - Structure used to store information about pixelformats + */ +struct aml_video_fmt { + u32 fourcc; + enum aml_fmt_type type; + u32 num_planes; + const u8 *name; +}; + +/** + * struct aml_codec_framesizes - Structure used to store information about + * framesizes + */ +struct aml_codec_framesizes { + u32 fourcc; + struct v4l2_frmsize_stepwise stepwise; +}; + +/** + * struct aml_q_type - Type of queue + */ +enum aml_q_type { + AML_Q_DATA_SRC = 0, + AML_Q_DATA_DST = 1, +}; + + +/** + * struct aml_q_data - Structure used to store information about queue + */ +struct aml_q_data { + u32 visible_width; + u32 visible_height; + u32 coded_width; + u32 coded_height; + enum v4l2_field field; + u32 bytesperline[AML_VCODEC_MAX_PLANES]; + u32 sizeimage[AML_VCODEC_MAX_PLANES]; + struct aml_video_fmt *fmt; + bool resolution_changed; +}; + +/** + * struct aml_enc_params - General encoding parameters + * @bitrate: target bitrate in bits per second + * @num_b_frame: number of b frames between p-frame + * @rc_frame: frame based rate control + * @rc_mb: macroblock based rate control + * @seq_hdr_mode: H.264 sequence header is encoded separately or joined + * with the first frame + * @intra_period: I frame period + * @gop_size: group of picture size, it's used as the intra frame period + * @framerate_num: frame rate numerator. ex: framerate_num=30 and + * framerate_denom=1 menas FPS is 30 + * @framerate_denom: frame rate denominator. ex: framerate_num=30 and + * framerate_denom=1 menas FPS is 30 + * @h264_max_qp: Max value for H.264 quantization parameter + * @h264_profile: V4L2 defined H.264 profile + * @h264_level: V4L2 defined H.264 level + * @force_intra: force/insert intra frame + */ +struct aml_enc_params { + u32 bitrate; + u32 num_b_frame; + u32 rc_frame; + u32 rc_mb; + u32 seq_hdr_mode; + u32 intra_period; + u32 gop_size; + u32 framerate_num; + u32 framerate_denom; + u32 h264_max_qp; + u32 h264_profile; + u32 h264_level; + u32 force_intra; +}; + +/** + * struct vdec_pic_info - picture size information + * @visible_width: picture width + * @visible_height: picture height + * @coded_width: picture buffer width (64 aligned up from pic_w) + * @coded_height: picture buffer heiht (64 aligned up from pic_h) + * @y_bs_sz: Y bitstream size + * @c_bs_sz: CbCr bitstream size + * @y_len_sz: additional size required to store decompress information for y + * plane + * @c_len_sz: additional size required to store decompress information for cbcr + * plane + * E.g. suppose picture size is 176x144, + * buffer size will be aligned to 176x160. + * @profile_idc: source profile level + * @field: frame/field information. + * @dpb_frames: used for DPB size of calculation. + * @dpb_margin: extra buffers for decoder. + * @vpp_margin: extra buffers for vpp. + */ +struct vdec_pic_info { + u32 visible_width; + u32 visible_height; + u32 coded_width; + u32 coded_height; + u32 y_bs_sz; + u32 c_bs_sz; + u32 y_len_sz; + u32 c_len_sz; + int profile_idc; + enum v4l2_field field; + u32 dpb_frames; + u32 dpb_margin; + u32 vpp_margin; +}; + +/** + * struct vdec_comp_buf_info - compressed buffer info + * @max_size: max size needed for MMU Box in MB + * @header_size: contineous size for the compressed header + * @frame_buffer_size: SG page number to store the frame + */ +struct vdec_comp_buf_info { + u32 max_size; + u32 header_size; + u32 frame_buffer_size; +}; + +struct aml_vdec_cfg_infos { + u32 double_write_mode; + u32 init_width; + u32 init_height; + u32 ref_buf_margin; + u32 canvas_mem_mode; + u32 canvas_mem_endian; + u32 low_latency_mode; + u32 uvm_hook_type; + /* + * bit 16 : force progressive output flag. + * bit 15 : enable nr. + * bit 14 : enable di local buff. + * bit 13 : report downscale yuv buffer size flag. + * bit 12 : for second field pts mode. + * bit 11 : disable error policy + * bit 1 : Non-standard dv flag. + * bit 0 : dv two layer flag. + */ + u32 metadata_config_flag; // for metadata config flag + u32 duration; + u32 data[4]; +}; + +struct aml_vdec_hdr_infos { + /* + * bit 29 : present_flag + * bit 28-26: video_format "component", "PAL", "NTSC", "SECAM", "MAC", "unspecified" + * bit 25 : range "limited", "full_range" + * bit 24 : color_description_present_flag + * bit 23-16: color_primaries "unknown", "bt709", "undef", "bt601", + * "bt470m", "bt470bg", "smpte170m", "smpte240m", "film", "bt2020" + * bit 15-8 : transfer_characteristic unknown", "bt709", "undef", "bt601", + * "bt470m", "bt470bg", "smpte170m", "smpte240m", + * "linear", "log100", "log316", "iec61966-2-4", + * "bt1361e", "iec61966-2-1", "bt2020-10", "bt2020-12", + * "smpte-st-2084", "smpte-st-428" + * bit 7-0 : matrix_coefficient "GBR", "bt709", "undef", "bt601", + * "fcc", "bt470bg", "smpte170m", "smpte240m", + * "YCgCo", "bt2020nc", "bt2020c" + */ + u32 signal_type; + struct vframe_master_display_colour_s color_parms; +}; + +struct aml_vdec_ps_infos { + u32 visible_width; + u32 visible_height; + u32 coded_width; + u32 coded_height; + u32 profile; + u32 mb_width; + u32 mb_height; + u32 dpb_size; + u32 ref_frames; + u32 dpb_frames; + u32 dpb_margin; + u32 field; + u32 data[3]; +}; + +struct aml_vdec_cnt_infos { + u32 bit_rate; + u32 frame_count; + u32 error_frame_count; + u32 drop_frame_count; + u32 total_data; +}; + +struct aml_dec_params { + u32 parms_status; + struct aml_vdec_cfg_infos cfg; + struct aml_vdec_ps_infos ps; + struct aml_vdec_hdr_infos hdr; + struct aml_vdec_cnt_infos cnt; +}; + +struct v4l2_config_parm { + u32 type; + u32 length; + union { + struct aml_dec_params dec; + struct aml_enc_params enc; + u8 data[200]; + } parm; + u8 buf[4096]; +}; + +struct v4l_buff_pool { + /* + * bit 31-16: buffer state + * bit 15- 0: buffer index + */ + u32 seq[V4L_CAP_BUFF_MAX]; + u32 in, out; + u32 dec, vpp, ge2d; +}; + +enum aml_thread_type { + AML_THREAD_OUTPUT, + AML_THREAD_CAPTURE, +}; + +typedef void (*aml_thread_func)(struct aml_vcodec_ctx *ctx); + +struct aml_vdec_thread { + struct list_head node; + spinlock_t lock; + struct semaphore sem; + struct task_struct *task; + enum aml_thread_type type; + void *priv; + int stop; + + aml_thread_func func; +}; + +/* struct internal_comp_buf - compressed buffer + * @index: index of this buf within (B)MMU BOX + * @ref: [0-7]:reference number of this buf + * [8-15]: use for reuse. + * @mmu_box: mmu_box of context + * @bmmu_box: bmmu_box of context + * @box_ref: box_ref of context + * @header_addr: header for compressed buffer + * @frame_buffer_size: SG buffer page number from + * @priv_data use for video composer + * struct vdec_comp_buf_info + */ +struct internal_comp_buf { + u32 index; + u32 ref; + void *mmu_box; + void *bmmu_box; + struct kref *box_ref; + + ulong header_addr; + u32 header_size; + u32 frame_buffer_size; + struct file_private_data priv_data; + ulong header_dw_addr; + void *mmu_box_dw; + void *bmmu_box_dw; +}; + +/* + * struct aml_uvm_buff_ref - uvm buff is used reseve ctx ref count. + * @index : index of video buffer. + * @addr : physic address of video buffer. + * @ref : reference of v4ldec context. + * @dma : dma buf of associated with vb. + */ +struct aml_uvm_buff_ref { + int index; + ulong addr; + struct kref *ref; + struct dma_buf *dbuf; +}; + +/* + * enum aml_fb_requester - indicate which module request fb buffers. + */ +enum aml_fb_requester { + AML_FB_REQ_DEC, + AML_FB_REQ_VPP, + AML_FB_REQ_GE2D, + AML_FB_REQ_MAX +}; + +/* + * @query: try to achieved fb token. + * @alloc: used for allocte fb buffer. + */ +struct aml_fb_ops { + bool (*query)(struct aml_fb_ops *, ulong *); + int (*alloc)(struct aml_fb_ops *, ulong, struct vdec_v4l2_buffer **, u32); +}; + +/* + * struct aml_fb_map_table - record some buffer map infos + * @addr : yuv linear buffer address. + * @header_addr : used for compress buffer. + * @vframe : which is from decoder or vpp vf pool. + * @task : context of task chain. + * @icomp : compress buffer index. + */ +struct aml_fb_map_table { + ulong addr; + ulong header_addr; + struct vframe_s *vframe; + struct task_chain_s *task; + u32 icomp; +}; + +/* + * struct aux_data - record sei data and dv data + * @sei_size: sei data size. + * @sei_buf: sei data addr. + * @sei_state: sei buffer state. (0 free, 1 not used, 2 used) + * @comp_buf: stores comp data parsed from sei data. + * @md_buf: stores md data parsed from sei data. + */ +struct aux_data { + int sei_size; + char* sei_buf; + int sei_state; + char* comp_buf; + char* md_buf; +}; + +/* + * struct aux_info - record aux data infos + * @sei_index: sei data index. + * @dv_index: dv data index. + * @sei_need_free: sei buffer need to free. + * @bufs: stores aux data. + * @alloc_buffer: alloc aux buffer functions. + * @free_buffer: free aux buffer functions. + * @free_one_sei_buffer:free sei buffer with index functions. + * @bind_sei_buffer: bind sei buffer functions. + * @bind_dv_buffer: bind dv buffer functions. + */ +struct aux_info { + int sei_index; + int dv_index; + bool sei_need_free; + struct aux_data bufs[V4L_CAP_BUFF_MAX]; + void (*alloc_buffer)(struct aml_vcodec_ctx *ctx, int flag); + void (*free_buffer)(struct aml_vcodec_ctx *ctx, int flag); + void (*free_one_sei_buffer)(struct aml_vcodec_ctx *ctx, char **addr, int *size, int idx); + void (*bind_sei_buffer)(struct aml_vcodec_ctx *ctx, char **addr, int *size, int *idx); + void (*bind_dv_buffer)(struct aml_vcodec_ctx *ctx, char **comp_buf, char **md_buf); +}; + +/* + * struct meta_data - record meta data. + * @buf[META_DATA_SIZE]: meta data information. + */ +struct meta_data { + char buf[META_DATA_SIZE]; +}; + +/* + * struct meta_info - record some meta data infos + * @index: meta data index. + * @meta_bufs: record meta data. + */ +struct meta_info { + int index; + struct meta_data *meta_bufs; +}; + +/* + * struct aml_vpp_cfg_infos - config vpp init param + * @mode : vpp work mode + * @fmt : picture format used to switch nv21 or nv12. + * @buf_size: config buffer size for vpp + * @is_drm : is drm mode + * @is_prog : is a progressive source. + * @is_bypass_p : to set progressive bypass in vpp + * @enable_nr : enable nosie reduce. + * @enable_local_buf: DI used buff alloc by itself. + * @res_chg : indicate resolution changed. + * @is_vpp_reset: vpp reset just used to res chg. + */ +struct aml_vpp_cfg_infos { + u32 mode; + u32 fmt; + u32 buf_size; + bool is_drm; + bool is_prog; + bool is_bypass_p; + bool enable_nr; + bool enable_local_buf; + bool res_chg; + bool is_vpp_reset; +}; + +struct aml_ge2d_cfg_infos { + u32 mode; + u32 buf_size; + bool is_drm; +}; + +/* + * struct aml_vcodec_ctx - Context (instance) private data. + * @id: index of the context that this structure describes. + * @ctx_ref: for deferred free of this context. + * @type: type of the instance - decoder or encoder. + * @dev: pointer to the aml_vcodec_dev of the device. + * @m2m_ctx: pointer to the v4l2_m2m_ctx of the context. + * @ada_ctx: pointer to the aml_vdec_adapt of the context. + * @vpp: pointer to video post processor + * @dec_if: hooked decoder driver interface. + * @drv_handle: driver handle for specific decode instance + * @fh: struct v4l2_fh. + * @ctrl_hdl: handler for v4l2 framework. + * @slock: protect v4l2 codec context. + * @tsplock: protect the vdec thread context. + * @empty_flush_buf: a fake size-0 capture buffer that indicates flush. + * @list: link to ctx_list of aml_vcodec_dev. + * @q_data: store information of input and output queue of the context. + * @queue: waitqueue that can be used to wait for this context to finish. + * @state_lock: protect the codec status. + * @state: state of the context. + * @decode_work: decoder work be used to output buffer. + * @output_thread_ready: indicate the output thread ready. + * @cap_pool: capture buffers are remark in the pool. + * @vdec_thread_list: vdec thread be used to capture. + * @dpb_size: store dpb count after header parsing + * @vpp_size: store vpp buffer count after header parsing + * @param_change: indicate encode parameter type + * @param_sets_from_ucode: if true indicate ps from ucode. + * @v4l_codec_dpb_ready: queue buffer number greater than dpb. + # @v4l_resolution_change: indicate resolution change happend. + * @comp: comp be used for sync picture information with decoder. + * @config: used to set or get parms for application. + * @picinfo: store picture info after header parsing. + * @last_decoded_picinfo: pic information get from latest decode. + * @colorspace: enum v4l2_colorspace; supplemental to pixelformat. + * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding. + * @quantization: enum v4l2_quantization, colorspace quantization. + * @xfer_func: enum v4l2_xfer_func, colorspace transfer function. + * @cap_pix_fmt: the picture format used to switch nv21 or nv12. + * @has_receive_eos: if receive last frame of capture that be set. + * @is_drm_mode: decoding work on drm mode if that set. + * @is_stream_mode: vdec input used to stream mode, default frame mode. + * @is_stream_off: the value used to handle reset active. + * @is_out_stream_off: streamoff called for output port. + * @receive_cmd_stop: if receive the cmd flush decoder. + * @reset_flag: reset mode includes lightly and normal mode. + * @decoded_frame_cnt: the capture buffer deque number to be count. + * @buf_used_count: means that decode allocate how many buffs from v4l. + * @wq: wait recycle dma buffer finish. + * @cap_wq: the wq used for wait capture buffer. + * @dmabuff_recycle_lock: protect the lock dmabuff free. + * @dmabuff_recycle_work: used for recycle dmabuff. + * @dmabuff_recycle: kfifo used for store vb buff. + * @capture_buffer: kfifo used for store capture vb buff. + * @mmu_box: mmu_box of context. + * @bmmu_box: bmmu_box of context. + * @box_ref: box_ref of context. + * @comp_info: compress buffer information. + * @comp_bufs: compress buffer describe. + * @comp_lock: used for lock ibuf free cb. + * @fb_ops: frame buffer ops interface. + * @dv_infos: dv data information. + * @vpp_cfg: vpp init parms of configuration. + * @vdec_pic_info_update: update pic info cb. + * @vpp_is_need: the instance is need vpp. + * @task_chain_pool: used to store task chain inst. + * @index_disp: the number of frames output. + */ +struct aml_vcodec_ctx { + int id; + struct kref ctx_ref; + enum aml_instance_type type; + struct aml_vcodec_dev *dev; + struct v4l2_m2m_ctx *m2m_ctx; + struct aml_vdec_adapt *ada_ctx; + struct aml_v4l2_vpp *vpp; + const struct vdec_common_if *dec_if; + ulong drv_handle; + struct v4l2_fh fh; + struct v4l2_ctrl_handler ctrl_hdl; + spinlock_t slock; + spinlock_t tsplock; + struct aml_video_dec_buf *empty_flush_buf; + struct list_head list; + + struct aml_q_data q_data[2]; + wait_queue_head_t queue; + struct mutex state_lock; + enum aml_instance_state state; + struct work_struct decode_work; + bool output_thread_ready; + struct v4l_buff_pool cap_pool; + struct list_head vdec_thread_list; + + int dpb_size; + int vpp_size; + int ge2d_size; + bool param_sets_from_ucode; + bool v4l_codec_dpb_ready; + bool v4l_resolution_change; + struct completion comp; + struct v4l2_config_parm config; + struct vdec_pic_info picinfo; + struct vdec_pic_info last_decoded_picinfo; + enum v4l2_colorspace colorspace; + enum v4l2_ycbcr_encoding ycbcr_enc; + enum v4l2_quantization quantization; + enum v4l2_xfer_func xfer_func; + u32 cap_pix_fmt; + u32 output_pix_fmt; + + bool has_receive_eos; + bool is_drm_mode; + bool output_dma_mode; + bool is_stream_off; + bool is_out_stream_off; + bool receive_cmd_stop; + int reset_flag; + int decoded_frame_cnt; + int buf_used_count; + wait_queue_head_t wq, cap_wq, post_done_wq; + struct mutex capture_buffer_lock; + spinlock_t dmabuff_recycle_lock; + struct mutex buff_done_lock; + struct work_struct dmabuff_recycle_work; + DECLARE_KFIFO(dmabuff_recycle, struct vb2_v4l2_buffer *, 32); + DECLARE_KFIFO(capture_buffer, struct vb2_v4l2_buffer *, 32); + + /* compressed buffer support */ + void *bmmu_box; + void *mmu_box; + struct kref box_ref; + struct vdec_comp_buf_info comp_info; + struct internal_comp_buf *comp_bufs; + struct uvm_hook_mod_info *uvm_proxy; + struct mutex comp_lock; + + struct aml_fb_ops fb_ops; + ulong token_table[32]; + + struct aml_fb_map_table fb_map[32]; + struct aml_vpp_cfg_infos vpp_cfg; + void (*vdec_pic_info_update)(struct aml_vcodec_ctx *ctx); + bool vpp_is_need; + struct list_head task_chain_pool; + struct meta_info meta_infos; + struct vdec_sync *sync; + u32 internal_dw_scale; + + /* ge2d field. */ + struct aml_v4l2_ge2d *ge2d; + struct aml_ge2d_cfg_infos ge2d_cfg; + bool ge2d_is_need; + + bool second_field_pts_mode; + struct aux_info aux_infos; + u32 index_disp; + bool post_to_upper_done; + bool film_grain_present; + void *bmmu_box_dw; + void *mmu_box_dw; +}; + +/** + * struct aml_vcodec_dev - driver data. + * @v4l2_dev : V4L2 device to register video devices for. + * @vfd_dec : Video device for decoder. + * @plat_dev : platform device. + * @m2m_dev_dec : m2m device for decoder. + * @curr_ctx : The context that is waiting for codec hardware. + * @id_counter : used to identify current opened instance. + * @dec_capability : used to identify decode capability, ex: 4k + * @decode_workqueue : the worker used to output buffer schedule. + * @ctx_list : list of struct aml_vcodec_ctx. + * @irqlock : protect data access by irq handler and work thread. + * @dev_mutex : video_device lock. + * @dec_mutex : decoder hardware lock. + * @queue : waitqueue for waiting for completion of device commands. + * @vpp_count : count the number of open vpp. + * @v4ldec_class : creat class sysfs uesd to show some information. + */ +struct aml_vcodec_dev { + struct v4l2_device v4l2_dev; + struct video_device *vfd_dec; + struct platform_device *plat_dev; + struct v4l2_m2m_dev *m2m_dev_dec; + struct aml_vcodec_ctx *curr_ctx; + ulong id_counter; + u32 dec_capability; + struct workqueue_struct *decode_workqueue; + struct list_head ctx_list; + struct file *filp; + spinlock_t irqlock; + struct mutex dev_mutex; + struct mutex dec_mutex; + wait_queue_head_t queue; + atomic_t vpp_count; + struct class v4ldec_class; +}; + +static inline struct aml_vcodec_ctx *fh_to_ctx(struct v4l2_fh *fh) +{ + return container_of(fh, struct aml_vcodec_ctx, fh); +} + +static inline struct aml_vcodec_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl) +{ + return container_of(ctrl->handler, struct aml_vcodec_ctx, ctrl_hdl); +} + +void aml_thread_capture_worker(struct aml_vcodec_ctx *ctx); +void aml_thread_post_task(struct aml_vcodec_ctx *ctx, enum aml_thread_type type); +int aml_thread_start(struct aml_vcodec_ctx *ctx, aml_thread_func func, + enum aml_thread_type type, const char *thread_name); +void aml_thread_stop(struct aml_vcodec_ctx *ctx); + +#endif /* _AML_VCODEC_DRV_H_ */
diff --git a/drivers/amvdec_ports/aml_vcodec_ge2d.c b/drivers/amvdec_ports/aml_vcodec_ge2d.c new file mode 100644 index 0000000..2d331ed --- /dev/null +++ b/drivers/amvdec_ports/aml_vcodec_ge2d.c
@@ -0,0 +1,968 @@ +/* +* Copyright (C) 2020 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ + +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/videodev2.h> +#include <linux/sched/clock.h> +#include <uapi/linux/sched/types.h> +#include <linux/amlogic/meson_uvm_core.h> +#include <linux/amlogic/media/ge2d/ge2d.h> +#include <linux/amlogic/media/canvas/canvas_mgr.h> + +#include "../common/chips/decoder_cpu_ver_info.h" +#include "aml_vcodec_ge2d.h" +#include "aml_vcodec_adapt.h" +#include "vdec_drv_if.h" + +#define KERNEL_ATRACE_TAG KERNEL_ATRACE_TAG_V4L2 +#include <trace/events/meson_atrace.h> + +#define GE2D_BUF_GET_IDX(ge2d_buf) (ge2d_buf->aml_buf->vb.vb2_buf.index) +#define INPUT_PORT 0 +#define OUTPUT_PORT 1 + +extern int dump_ge2d_input; +extern int ge2d_bypass_frames; + +enum GE2D_FLAG { + GE2D_FLAG_P = 0x1, + GE2D_FLAG_I = 0x2, + GE2D_FLAG_EOS = 0x4, + GE2D_FLAG_BUF_BY_PASS = 0x8, + GE2D_FLAG_MAX = 0x7FFFFFFF, +}; + +enum videocom_source_type { + DECODER_8BIT_NORMAL = 0, + DECODER_8BIT_BOTTOM, + DECODER_8BIT_TOP, + DECODER_10BIT_NORMAL, + DECODER_10BIT_BOTTOM, + DECODER_10BIT_TOP +}; + +#ifndef CONFIG_AMLOGIC_MEDIA_GE2D +inline void stretchblt_noalpha(struct ge2d_context_s *wq, + int src_x, int src_y, int src_w, int src_h, + int dst_x, int dst_y, int dst_w, int dst_h) { return; } +inline int ge2d_context_config_ex(struct ge2d_context_s *context, + struct config_para_ex_s *ge2d_config) { return -1; } +inline struct ge2d_context_s *create_ge2d_work_queue(void) { return NULL; } +inline int destroy_ge2d_work_queue(struct ge2d_context_s *ge2d_work_queue) { return -1; } +#endif + +static int get_source_type(struct vframe_s *vf) +{ + enum videocom_source_type ret; + int interlace_mode; + + interlace_mode = vf->type & VIDTYPE_TYPEMASK; + + if ((vf->bitdepth & BITDEPTH_Y10) && + (!(vf->type & VIDTYPE_COMPRESS)) && + (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL)) { + if (interlace_mode == VIDTYPE_INTERLACE_TOP) + ret = DECODER_10BIT_TOP; + else if (interlace_mode == VIDTYPE_INTERLACE_BOTTOM) + ret = DECODER_10BIT_BOTTOM; + else + ret = DECODER_10BIT_NORMAL; + } else { + if (interlace_mode == VIDTYPE_INTERLACE_TOP) + ret = DECODER_8BIT_TOP; + else if (interlace_mode == VIDTYPE_INTERLACE_BOTTOM) + ret = DECODER_8BIT_BOTTOM; + else + ret = DECODER_8BIT_NORMAL; + } + + return ret; +} + +static int get_input_format(struct vframe_s *vf) +{ + int format = GE2D_FORMAT_M24_YUV420; + enum videocom_source_type soure_type; + + soure_type = get_source_type(vf); + + switch (soure_type) { + case DECODER_8BIT_NORMAL: + if (vf->type & VIDTYPE_VIU_422) + format = GE2D_FORMAT_S16_YUV422; + else if (vf->type & VIDTYPE_VIU_NV21) + format = GE2D_FORMAT_M24_NV21; + else if (vf->type & VIDTYPE_VIU_NV12) + format = GE2D_FORMAT_M24_NV12; + else if (vf->type & VIDTYPE_VIU_444) + format = GE2D_FORMAT_S24_YUV444; + else + format = GE2D_FORMAT_M24_YUV420; + break; + case DECODER_8BIT_BOTTOM: + if (vf->type & VIDTYPE_VIU_422) + format = GE2D_FORMAT_S16_YUV422 + | (GE2D_FORMAT_S16_YUV422B & (3 << 3)); + else if (vf->type & VIDTYPE_VIU_NV21) + format = GE2D_FORMAT_M24_NV21 + | (GE2D_FORMAT_M24_NV21B & (3 << 3)); + else if (vf->type & VIDTYPE_VIU_NV12) + format = GE2D_FORMAT_M24_NV12 + | (GE2D_FORMAT_M24_NV12B & (3 << 3)); + else if (vf->type & VIDTYPE_VIU_444) + format = GE2D_FORMAT_S24_YUV444 + | (GE2D_FORMAT_S24_YUV444B & (3 << 3)); + else + format = GE2D_FORMAT_M24_YUV420 + | (GE2D_FMT_M24_YUV420B & (3 << 3)); + break; + case DECODER_8BIT_TOP: + if (vf->type & VIDTYPE_VIU_422) + format = GE2D_FORMAT_S16_YUV422 + | (GE2D_FORMAT_S16_YUV422T & (3 << 3)); + else if (vf->type & VIDTYPE_VIU_NV21) + format = GE2D_FORMAT_M24_NV21 + | (GE2D_FORMAT_M24_NV21T & (3 << 3)); + else if (vf->type & VIDTYPE_VIU_NV12) + format = GE2D_FORMAT_M24_NV12 + | (GE2D_FORMAT_M24_NV12T & (3 << 3)); + else if (vf->type & VIDTYPE_VIU_444) + format = GE2D_FORMAT_S24_YUV444 + | (GE2D_FORMAT_S24_YUV444T & (3 << 3)); + else + format = GE2D_FORMAT_M24_YUV420 + | (GE2D_FMT_M24_YUV420T & (3 << 3)); + break; + case DECODER_10BIT_NORMAL: + if (vf->type & VIDTYPE_VIU_422) { + if (vf->bitdepth & FULL_PACK_422_MODE) + format = GE2D_FORMAT_S16_10BIT_YUV422; + else + format = GE2D_FORMAT_S16_12BIT_YUV422; + } + break; + case DECODER_10BIT_BOTTOM: + if (vf->type & VIDTYPE_VIU_422) { + if (vf->bitdepth & FULL_PACK_422_MODE) + format = GE2D_FORMAT_S16_10BIT_YUV422 + | (GE2D_FORMAT_S16_10BIT_YUV422B + & (3 << 3)); + else + format = GE2D_FORMAT_S16_12BIT_YUV422 + | (GE2D_FORMAT_S16_12BIT_YUV422B + & (3 << 3)); + } + break; + case DECODER_10BIT_TOP: + if (vf->type & VIDTYPE_VIU_422) { + if (vf->bitdepth & FULL_PACK_422_MODE) + format = GE2D_FORMAT_S16_10BIT_YUV422 + | (GE2D_FORMAT_S16_10BIT_YUV422T + & (3 << 3)); + else + format = GE2D_FORMAT_S16_12BIT_YUV422 + | (GE2D_FORMAT_S16_12BIT_YUV422T + & (3 << 3)); + } + break; + default: + format = GE2D_FORMAT_M24_YUV420; + } + return format; +} + +static int v4l_ge2d_empty_input_done(struct aml_v4l2_ge2d_buf *buf) +{ + struct aml_v4l2_ge2d *ge2d = buf->caller_data; + struct vdec_v4l2_buffer *fb = NULL; + bool eos = false; + + if (!ge2d || !ge2d->ctx) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "fatal %s %d ge2d:%px\n", + __func__, __LINE__, ge2d); + return -1; + } + + fb = &buf->aml_buf->frame_buffer; + eos = (buf->flag & GE2D_FLAG_EOS); + + v4l_dbg(ge2d->ctx, V4L_DEBUG_GE2D_BUFMGR, + "ge2d_input done: vf:%px, idx: %d, flag(vf:%x ge2d:%x) %s, ts:%lld, " + "in:%d, out:%d, vf:%d, in done:%d, out done:%d\n", + buf->vf, + buf->vf->index, + buf->vf->flag, + buf->flag, + eos ? "eos" : "", + buf->vf->timestamp, + kfifo_len(&ge2d->input), + kfifo_len(&ge2d->output), + kfifo_len(&ge2d->frame), + kfifo_len(&ge2d->in_done_q), + kfifo_len(&ge2d->out_done_q)); + + fb->task->recycle(fb->task, TASK_TYPE_GE2D); + + kfifo_put(&ge2d->input, buf); + + ATRACE_COUNTER("VC_IN_GE2D-1.recycle", fb->buf_idx); + + return 0; +} + +static int v4l_ge2d_fill_output_done(struct aml_v4l2_ge2d_buf *buf) +{ + struct aml_v4l2_ge2d *ge2d = buf->caller_data; + struct vdec_v4l2_buffer *fb = NULL; + bool bypass = false; + bool eos = false; + + if (!ge2d || !ge2d->ctx) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "fatal %s %d ge2d:%px\n", + __func__, __LINE__, ge2d); + return -1; + } + + fb = &buf->aml_buf->frame_buffer; + eos = (buf->flag & GE2D_FLAG_EOS); + bypass = (buf->flag & GE2D_FLAG_BUF_BY_PASS); + + /* recovery fb handle. */ + buf->vf->v4l_mem_handle = (ulong)fb; + + kfifo_put(&ge2d->out_done_q, buf); + + v4l_dbg(ge2d->ctx, V4L_DEBUG_GE2D_BUFMGR, + "ge2d_output done: vf:%px, idx:%d, flag(vf:%x ge2d:%x) %s, ts:%lld, " + "in:%d, out:%d, vf:%d, in done:%d, out done:%d, wxh:%ux%u\n", + buf->vf, + buf->vf->index, + buf->vf->flag, + buf->flag, + eos ? "eos" : "", + buf->vf->timestamp, + kfifo_len(&ge2d->input), + kfifo_len(&ge2d->output), + kfifo_len(&ge2d->frame), + kfifo_len(&ge2d->in_done_q), + kfifo_len(&ge2d->out_done_q), + buf->vf->width, buf->vf->height); + + ATRACE_COUNTER("VC_OUT_GE2D-2.submit", fb->buf_idx); + + fb->task->submit(fb->task, TASK_TYPE_GE2D); + + ge2d->out_num[OUTPUT_PORT]++; + + return 0; +} + +static void ge2d_vf_get(void *caller, struct vframe_s **vf_out) +{ + struct aml_v4l2_ge2d *ge2d = (struct aml_v4l2_ge2d *)caller; + struct aml_v4l2_ge2d_buf *buf = NULL; + struct vdec_v4l2_buffer *fb = NULL; + struct vframe_s *vf = NULL; + bool bypass = false; + bool eos = false; + + if (!ge2d || !ge2d->ctx) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "fatal %s %d ge2d:%px\n", + __func__, __LINE__, ge2d); + return; + } + + if (kfifo_get(&ge2d->out_done_q, &buf)) { + fb = &buf->aml_buf->frame_buffer; + eos = (buf->flag & GE2D_FLAG_EOS); + bypass = (buf->flag & GE2D_FLAG_BUF_BY_PASS); + vf = buf->vf; + + if (eos) { + v4l_dbg(ge2d->ctx, V4L_DEBUG_GE2D_DETAIL, + "%s %d got eos\n", + __func__, __LINE__); + vf->type |= VIDTYPE_V4L_EOS; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + } + + *vf_out = vf; + + ATRACE_COUNTER("VC_OUT_GE2D-3.vf_get", fb->buf_idx); + + v4l_dbg(ge2d->ctx, V4L_DEBUG_GE2D_BUFMGR, + "%s: vf:%px, index:%d, flag(vf:%x ge2d:%x), ts:%lld, type:%x, wxh:%ux%u\n", + __func__, vf, + vf->index, + vf->flag, + buf->flag, + vf->timestamp, vf->type, vf->width, vf->height); + } +} + +static void ge2d_vf_put(void *caller, struct vframe_s *vf) +{ + struct aml_v4l2_ge2d *ge2d = (struct aml_v4l2_ge2d *)caller; + struct vdec_v4l2_buffer *fb = NULL; + struct aml_video_dec_buf *aml_buf = NULL; + struct aml_v4l2_ge2d_buf *buf = NULL; + bool bypass = false; + bool eos = false; + + fb = (struct vdec_v4l2_buffer *) vf->v4l_mem_handle; + aml_buf = container_of(fb, struct aml_video_dec_buf, frame_buffer); + buf = (struct aml_v4l2_ge2d_buf *) aml_buf->ge2d_buf_handle; + eos = (buf->flag & GE2D_FLAG_EOS); + bypass = (buf->flag & GE2D_FLAG_BUF_BY_PASS); + + v4l_dbg(ge2d->ctx, V4L_DEBUG_GE2D_BUFMGR, + "%s: vf:%px, index:%d, flag(vf:%x ge2d:%x), ts:%lld\n", + __func__, vf, + vf->index, + vf->flag, + buf->flag, + vf->timestamp); + + ATRACE_COUNTER("VC_IN_GE2D-0.vf_put", fb->buf_idx); + + mutex_lock(&ge2d->output_lock); + kfifo_put(&ge2d->frame, vf); + kfifo_put(&ge2d->output, buf); + mutex_unlock(&ge2d->output_lock); + up(&ge2d->sem_out); +} + +static int aml_v4l2_ge2d_thread(void* param) +{ + struct aml_v4l2_ge2d* ge2d = param; + struct aml_vcodec_ctx *ctx = ge2d->ctx; + struct config_para_ex_s ge2d_config; + u32 src_fmt = 0, dst_fmt = 0; + struct canvas_s cd; + ulong start_time; + + v4l_dbg(ctx, V4L_DEBUG_GE2D_DETAIL, "enter ge2d thread\n"); + while (ge2d->running) { + struct aml_v4l2_ge2d_buf *in_buf; + struct aml_v4l2_ge2d_buf *out_buf = NULL; + struct vframe_s *vf_out = NULL; + struct vdec_v4l2_buffer *fb; + + if (down_interruptible(&ge2d->sem_in)) + goto exit; +retry: + if (!ge2d->running) + break; + + if (kfifo_is_empty(&ge2d->output)) { + if (down_interruptible(&ge2d->sem_out)) + goto exit; + goto retry; + } + + mutex_lock(&ge2d->output_lock); + if (!kfifo_get(&ge2d->output, &out_buf)) { + mutex_unlock(&ge2d->output_lock); + v4l_dbg(ctx, 0, "ge2d can not get output\n"); + goto exit; + } + mutex_unlock(&ge2d->output_lock); + + /* bind v4l2 buffers */ + if (!out_buf->aml_buf) { + struct vdec_v4l2_buffer *out; + + if (!ctx->fb_ops.query(&ctx->fb_ops, &ge2d->fb_token)) { + usleep_range(500, 550); + mutex_lock(&ge2d->output_lock); + kfifo_put(&ge2d->output, out_buf); + mutex_unlock(&ge2d->output_lock); + goto retry; + } + + if (ctx->fb_ops.alloc(&ctx->fb_ops, ge2d->fb_token, &out, AML_FB_REQ_GE2D)) { + usleep_range(5000, 5500); + mutex_lock(&ge2d->output_lock); + kfifo_put(&ge2d->output, out_buf); + mutex_unlock(&ge2d->output_lock); + goto retry; + } + + out_buf->aml_buf = container_of(out, + struct aml_video_dec_buf, frame_buffer); + out_buf->aml_buf->ge2d_buf_handle = (ulong) out_buf; + v4l_dbg(ctx, V4L_DEBUG_GE2D_BUFMGR, + "ge2d bind buf:%d to ge2d_buf:%px\n", + GE2D_BUF_GET_IDX(out_buf), out_buf); + + out->m.mem[0].bytes_used = out->m.mem[0].size; + out->m.mem[1].bytes_used = out->m.mem[1].size; + } + + /* safe to pop in_buf */ + if (!kfifo_get(&ge2d->in_done_q, &in_buf)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "ge2d can not get input\n"); + goto exit; + } + + mutex_lock(&ge2d->output_lock); + if (!kfifo_get(&ge2d->frame, &vf_out)) { + mutex_unlock(&ge2d->output_lock); + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "ge2d can not get frame\n"); + goto exit; + } + mutex_unlock(&ge2d->output_lock); + + fb = &out_buf->aml_buf->frame_buffer; + fb->status = FB_ST_GE2D; + + /* fill output vframe information. */ + memcpy(vf_out, in_buf->vf, sizeof(*vf_out)); + memcpy(vf_out->canvas0_config, + in_buf->vf->canvas0_config, + 2 * sizeof(struct canvas_config_s)); + + vf_out->canvas0_config[0].phy_addr = fb->m.mem[0].addr; + if (fb->num_planes == 1) { + vf_out->canvas0_config[1].phy_addr = + fb->m.mem[0].addr + fb->m.mem[0].offset; + vf_out->canvas0_config[2].phy_addr = + fb->m.mem[0].addr + fb->m.mem[0].offset + + (fb->m.mem[0].offset >> 2); + } else { + vf_out->canvas0_config[1].phy_addr = + fb->m.mem[1].addr; + vf_out->canvas0_config[2].phy_addr = + fb->m.mem[2].addr; + } + + /* fill outbuf parms. */ + out_buf->vf = vf_out; + out_buf->flag = 0; + out_buf->caller_data = ge2d; + + /* fill inbuf parms. */ + in_buf->caller_data = ge2d; + + memset(&ge2d_config, 0, sizeof(ge2d_config)); + + src_fmt = get_input_format(in_buf->vf); + if (in_buf->vf->canvas0_config[0].endian == 7) + src_fmt |= GE2D_BIG_ENDIAN; + else + src_fmt |= GE2D_LITTLE_ENDIAN; + + /* negotiate format of destination */ + dst_fmt = get_input_format(in_buf->vf); + if (ge2d->work_mode & GE2D_MODE_CONVERT_NV12) + dst_fmt |= GE2D_FORMAT_M24_NV12; + else if (ge2d->work_mode & GE2D_MODE_CONVERT_NV21) + dst_fmt |= GE2D_FORMAT_M24_NV21; + + if (ge2d->work_mode & GE2D_MODE_CONVERT_LE) + dst_fmt |= GE2D_LITTLE_ENDIAN; + else + dst_fmt |= GE2D_BIG_ENDIAN; + + if ((dst_fmt & GE2D_COLOR_MAP_MASK) == GE2D_COLOR_MAP_NV12) { + vf_out->type |= VIDTYPE_VIU_NV12; + vf_out->type &= ~VIDTYPE_VIU_NV21; + } else if ((dst_fmt & GE2D_COLOR_MAP_MASK) == GE2D_COLOR_MAP_NV21) { + vf_out->type |= VIDTYPE_VIU_NV21; + vf_out->type &= ~VIDTYPE_VIU_NV12; + } + if ((dst_fmt & GE2D_ENDIAN_MASK) == GE2D_LITTLE_ENDIAN) { + vf_out->canvas0_config[0].endian = 0; + vf_out->canvas0_config[1].endian = 0; + vf_out->canvas0_config[2].endian = 0; + } else if ((dst_fmt & GE2D_ENDIAN_MASK) == GE2D_BIG_ENDIAN){ + vf_out->canvas0_config[0].endian = 7; + vf_out->canvas0_config[1].endian = 7; + vf_out->canvas0_config[2].endian = 7; + } + + start_time = local_clock(); + /* src canvas configure. */ + if ((in_buf->vf->canvas0Addr == 0) || + (in_buf->vf->canvas0Addr == (u32)-1)) { + canvas_config_config(ge2d->src_canvas_id[0], &in_buf->vf->canvas0_config[0]); + canvas_config_config(ge2d->src_canvas_id[1], &in_buf->vf->canvas0_config[1]); + canvas_config_config(ge2d->src_canvas_id[2], &in_buf->vf->canvas0_config[2]); + ge2d_config.src_para.canvas_index = + ge2d->src_canvas_id[0] | + ge2d->src_canvas_id[1] << 8 | + ge2d->src_canvas_id[2] << 16; + + ge2d_config.src_planes[0].addr = + in_buf->vf->canvas0_config[0].phy_addr; + ge2d_config.src_planes[0].w = + in_buf->vf->canvas0_config[0].width; + ge2d_config.src_planes[0].h = + in_buf->vf->canvas0_config[0].height; + ge2d_config.src_planes[1].addr = + in_buf->vf->canvas0_config[1].phy_addr; + ge2d_config.src_planes[1].w = + in_buf->vf->canvas0_config[1].width; + ge2d_config.src_planes[1].h = + in_buf->vf->canvas0_config[1].height; + ge2d_config.src_planes[2].addr = + in_buf->vf->canvas0_config[2].phy_addr; + ge2d_config.src_planes[2].w = + in_buf->vf->canvas0_config[2].width; + ge2d_config.src_planes[2].h = + in_buf->vf->canvas0_config[2].height; + } else { + ge2d_config.src_para.canvas_index = in_buf->vf->canvas0Addr; + } + ge2d_config.src_para.mem_type = CANVAS_TYPE_INVALID; + ge2d_config.src_para.format = src_fmt; + ge2d_config.src_para.fill_color_en = 0; + ge2d_config.src_para.fill_mode = 0; + ge2d_config.src_para.x_rev = 0; + ge2d_config.src_para.y_rev = 0; + ge2d_config.src_para.color = 0xffffffff; + ge2d_config.src_para.top = 0; + ge2d_config.src_para.left = 0; + ge2d_config.src_para.width = in_buf->vf->width; + if (in_buf->vf->type & VIDTYPE_INTERLACE) + ge2d_config.src_para.height = in_buf->vf->height >> 1; + else + ge2d_config.src_para.height = in_buf->vf->height; + + /* dst canvas configure. */ + canvas_config_config(ge2d->dst_canvas_id[0], &vf_out->canvas0_config[0]); + if ((ge2d_config.src_para.format & 0xfffff) == GE2D_FORMAT_M24_YUV420) { + vf_out->canvas0_config[1].width <<= 1; + } + canvas_config_config(ge2d->dst_canvas_id[1], &vf_out->canvas0_config[1]); + canvas_config_config(ge2d->dst_canvas_id[2], &vf_out->canvas0_config[2]); + ge2d_config.dst_para.canvas_index = + ge2d->dst_canvas_id[0] | + ge2d->dst_canvas_id[1] << 8; + canvas_read(ge2d->dst_canvas_id[0], &cd); + ge2d_config.dst_planes[0].addr = cd.addr; + ge2d_config.dst_planes[0].w = cd.width; + ge2d_config.dst_planes[0].h = cd.height; + canvas_read(ge2d->dst_canvas_id[1], &cd); + ge2d_config.dst_planes[1].addr = cd.addr; + ge2d_config.dst_planes[1].w = cd.width; + ge2d_config.dst_planes[1].h = cd.height; + + ge2d_config.dst_para.format = dst_fmt; + ge2d_config.dst_para.width = in_buf->vf->width; + ge2d_config.dst_para.height = in_buf->vf->height; + ge2d_config.dst_para.mem_type = CANVAS_TYPE_INVALID; + ge2d_config.dst_para.fill_color_en = 0; + ge2d_config.dst_para.fill_mode = 0; + ge2d_config.dst_para.x_rev = 0; + ge2d_config.dst_para.y_rev = 0; + ge2d_config.dst_para.color = 0; + ge2d_config.dst_para.top = 0; + ge2d_config.dst_para.left = 0; + + /* other ge2d parameters configure. */ + ge2d_config.src_key.key_enable = 0; + ge2d_config.src_key.key_mask = 0; + ge2d_config.src_key.key_mode = 0; + ge2d_config.alu_const_color = 0; + ge2d_config.bitmask_en = 0; + ge2d_config.src1_gb_alpha = 0; + ge2d_config.dst_xy_swap = 0; + ge2d_config.src2_para.mem_type = CANVAS_TYPE_INVALID; + + ATRACE_COUNTER("VC_OUT_GE2D-1.handle_start", + in_buf->aml_buf->frame_buffer.buf_idx); + + v4l_dbg(ctx, V4L_DEBUG_GE2D_BUFMGR, + "ge2d_handle start: dec vf:%px/%d, ge2d vf:%px/%d, iphy:%lx/%lx %dx%d ophy:%lx/%lx %dx%d, vf:%ux%u, fmt(src:%x, dst:%x), " + "in:%d, out:%d, vf:%d, in done:%d, out done:%d\n", + in_buf->vf, in_buf->vf->index, + out_buf->vf, GE2D_BUF_GET_IDX(out_buf), + in_buf->vf->canvas0_config[0].phy_addr, + in_buf->vf->canvas0_config[1].phy_addr, + in_buf->vf->canvas0_config[0].width, + in_buf->vf->canvas0_config[0].height, + vf_out->canvas0_config[0].phy_addr, + vf_out->canvas0_config[1].phy_addr, + vf_out->canvas0_config[0].width, + vf_out->canvas0_config[0].height, + vf_out->width, vf_out->height, + src_fmt, dst_fmt, + kfifo_len(&ge2d->input), + kfifo_len(&ge2d->output), + kfifo_len(&ge2d->frame), + kfifo_len(&ge2d->in_done_q), + kfifo_len(&ge2d->out_done_q)); + + if (ge2d_context_config_ex(ge2d->ge2d_context, &ge2d_config) < 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "ge2d_context_config_ex error.\n"); + goto exit; + } + + if (!(in_buf->flag & GE2D_FLAG_EOS)) { + if (in_buf->vf->type & VIDTYPE_INTERLACE) { + stretchblt_noalpha(ge2d->ge2d_context, + 0, 0, in_buf->vf->width, in_buf->vf->height / 2, + 0, 0, in_buf->vf->width, in_buf->vf->height); + } else { + stretchblt_noalpha(ge2d->ge2d_context, + 0, 0, in_buf->vf->width, in_buf->vf->height, + 0, 0, in_buf->vf->width, in_buf->vf->height); + } + } + + //pr_info("consume time %d us\n", div64_u64(local_clock() - start_time, 1000)); + + v4l_ge2d_fill_output_done(out_buf); + v4l_ge2d_empty_input_done(in_buf); + + ge2d->in_num[INPUT_PORT]++; + ge2d->out_num[INPUT_PORT]++; + } +exit: + while (!kthread_should_stop()) { + usleep_range(1000, 2000); + } + + v4l_dbg(ctx, V4L_DEBUG_GE2D_DETAIL, "exit ge2d thread\n"); + + return 0; +} + +int aml_v4l2_ge2d_get_buf_num(u32 mode) +{ + return 4; +} + +int aml_v4l2_ge2d_init( + struct aml_vcodec_ctx *ctx, + struct aml_ge2d_cfg_infos *cfg, + struct aml_v4l2_ge2d** ge2d_handle) +{ + struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; + struct aml_v4l2_ge2d *ge2d; + u32 work_mode = cfg->mode; + u32 buf_size; + int i, ret; + + if (!cfg || !ge2d_handle) + return -EINVAL; + + ge2d = kzalloc(sizeof(*ge2d), GFP_KERNEL); + if (!ge2d) + return -ENOMEM; + + ge2d->work_mode = work_mode; + + /* default convert little endian. */ + if (!ge2d->work_mode) { + ge2d->work_mode = GE2D_MODE_CONVERT_LE; + } + + ge2d->ge2d_context = create_ge2d_work_queue(); + if (!ge2d->ge2d_context) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "ge2d_create_instance fail\n"); + ret = -EINVAL; + goto error; + } + + INIT_KFIFO(ge2d->input); + INIT_KFIFO(ge2d->output); + INIT_KFIFO(ge2d->frame); + INIT_KFIFO(ge2d->out_done_q); + INIT_KFIFO(ge2d->in_done_q); + + ge2d->ctx = ctx; + buf_size = cfg->buf_size; + ge2d->buf_size = buf_size; + + /* setup output fifo */ + ret = kfifo_alloc(&ge2d->output, buf_size, GFP_KERNEL); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc output fifo fail.\n"); + ret = -ENOMEM; + goto error2; + } + + ge2d->ovbpool = vzalloc(buf_size * sizeof(*ge2d->ovbpool)); + if (!ge2d->ovbpool) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc output vb pool fail.\n"); + ret = -ENOMEM; + goto error3; + } + + /* setup vframe fifo */ + ret = kfifo_alloc(&ge2d->frame, buf_size, GFP_KERNEL); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc ge2d vframe fifo fail.\n"); + ret = -ENOMEM; + goto error4; + } + + ge2d->vfpool = vzalloc(buf_size * sizeof(*ge2d->vfpool)); + if (!ge2d->vfpool) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc vf pool fail.\n"); + ret = -ENOMEM; + goto error5; + } + + ret = kfifo_alloc(&ge2d->input, GE2D_FRAME_SIZE, GFP_KERNEL); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc input fifo fail.\n"); + ret = -ENOMEM; + goto error6; + } + + ge2d->ivbpool = vzalloc(GE2D_FRAME_SIZE * sizeof(*ge2d->ivbpool)); + if (!ge2d->ivbpool) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc input vb pool fail.\n"); + ret = -ENOMEM; + goto error7; + } + + for (i = 0 ; i < GE2D_FRAME_SIZE ; i++) { + kfifo_put(&ge2d->input, &ge2d->ivbpool[i]); + } + + for (i = 0 ; i < buf_size ; i++) { + kfifo_put(&ge2d->output, &ge2d->ovbpool[i]); + kfifo_put(&ge2d->frame, &ge2d->vfpool[i]); + } + + ge2d->src_canvas_id[0] = canvas_pool_map_alloc_canvas("v4ldec-ge2d"); + ge2d->src_canvas_id[1] = canvas_pool_map_alloc_canvas("v4ldec-ge2d"); + ge2d->src_canvas_id[2] = canvas_pool_map_alloc_canvas("v4ldec-ge2d"); + ge2d->dst_canvas_id[0] = canvas_pool_map_alloc_canvas("v4ldec-ge2d"); + ge2d->dst_canvas_id[1] = canvas_pool_map_alloc_canvas("v4ldec-ge2d"); + ge2d->dst_canvas_id[2] = canvas_pool_map_alloc_canvas("v4ldec-ge2d"); + if ((ge2d->src_canvas_id[0] <= 0) || + (ge2d->src_canvas_id[1] <= 0) || + (ge2d->src_canvas_id[2] <= 0) || + (ge2d->dst_canvas_id[0] <= 0) || + (ge2d->dst_canvas_id[1] <= 0) || + (ge2d->dst_canvas_id[2] <= 0)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "canvas pool alloc fail. src(%d, %d, %d) dst(%d, %d, %d).\n", + ge2d->src_canvas_id[0], + ge2d->src_canvas_id[1], + ge2d->src_canvas_id[2], + ge2d->dst_canvas_id[0], + ge2d->dst_canvas_id[1], + ge2d->dst_canvas_id[2]); + goto error8; + } + + mutex_init(&ge2d->output_lock); + sema_init(&ge2d->sem_in, 0); + sema_init(&ge2d->sem_out, 0); + + ge2d->running = true; + ge2d->task = kthread_run(aml_v4l2_ge2d_thread, ge2d, + "%s", "aml-v4l2-ge2d"); + if (IS_ERR(ge2d->task)) { + ret = PTR_ERR(ge2d->task); + goto error9; + } + sched_setscheduler_nocheck(ge2d->task, SCHED_FIFO, ¶m); + + *ge2d_handle = ge2d; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "GE2D_CFG bsize:%d, wkm:%x, bm:%x, drm:%d\n", + ge2d->buf_size, + ge2d->work_mode, + ge2d->buffer_mode, + cfg->is_drm); + + return 0; + +error9: + if (ge2d->src_canvas_id[0] > 0) + canvas_pool_map_free_canvas(ge2d->src_canvas_id[0]); + if (ge2d->src_canvas_id[1] > 0) + canvas_pool_map_free_canvas(ge2d->src_canvas_id[1]); + if (ge2d->src_canvas_id[2] > 0) + canvas_pool_map_free_canvas(ge2d->src_canvas_id[2]); + if (ge2d->dst_canvas_id[0] > 0) + canvas_pool_map_free_canvas(ge2d->dst_canvas_id[0]); + if (ge2d->dst_canvas_id[1] > 0) + canvas_pool_map_free_canvas(ge2d->dst_canvas_id[1]); + if (ge2d->dst_canvas_id[2] > 0) + canvas_pool_map_free_canvas(ge2d->dst_canvas_id[2]); +error8: + vfree(ge2d->ivbpool); +error7: + kfifo_free(&ge2d->input); +error6: + vfree(ge2d->vfpool); +error5: + kfifo_free(&ge2d->frame); +error4: + vfree(ge2d->ovbpool); +error3: + kfifo_free(&ge2d->output); +error2: + destroy_ge2d_work_queue(ge2d->ge2d_context); +error: + kfree(ge2d); + + return ret; +} +EXPORT_SYMBOL(aml_v4l2_ge2d_init); + +int aml_v4l2_ge2d_destroy(struct aml_v4l2_ge2d* ge2d) +{ + v4l_dbg(ge2d->ctx, V4L_DEBUG_GE2D_DETAIL, + "ge2d destroy begin\n"); + + ge2d->running = false; + up(&ge2d->sem_in); + up(&ge2d->sem_out); + kthread_stop(ge2d->task); + + destroy_ge2d_work_queue(ge2d->ge2d_context); + /* no more ge2d callback below this line */ + + kfifo_free(&ge2d->frame); + vfree(ge2d->vfpool); + kfifo_free(&ge2d->output); + vfree(ge2d->ovbpool); + kfifo_free(&ge2d->input); + vfree(ge2d->ivbpool); + mutex_destroy(&ge2d->output_lock); + + canvas_pool_map_free_canvas(ge2d->src_canvas_id[0]); + canvas_pool_map_free_canvas(ge2d->src_canvas_id[1]); + canvas_pool_map_free_canvas(ge2d->src_canvas_id[2]); + canvas_pool_map_free_canvas(ge2d->dst_canvas_id[0]); + canvas_pool_map_free_canvas(ge2d->dst_canvas_id[1]); + canvas_pool_map_free_canvas(ge2d->dst_canvas_id[2]); + + v4l_dbg(ge2d->ctx, V4L_DEBUG_GE2D_DETAIL, + "ge2d destroy done\n"); + + kfree(ge2d); + + return 0; +} +EXPORT_SYMBOL(aml_v4l2_ge2d_destroy); + +static int aml_v4l2_ge2d_push_vframe(struct aml_v4l2_ge2d* ge2d, struct vframe_s *vf) +{ + struct aml_v4l2_ge2d_buf* in_buf; + struct vdec_v4l2_buffer *fb = NULL; + + if (!ge2d) + return -EINVAL; + + if (!kfifo_get(&ge2d->input, &in_buf)) { + v4l_dbg(ge2d->ctx, V4L_DEBUG_CODEC_ERROR, + "cat not get free input buffer.\n"); + return -1; + } + + if (vf->type & VIDTYPE_V4L_EOS) + in_buf->flag |= GE2D_FLAG_EOS; + + v4l_dbg(ge2d->ctx, V4L_DEBUG_GE2D_BUFMGR, + "ge2d_push_vframe: vf:%px, idx:%d, type:%x, ts:%lld\n", + vf, vf->index, vf->type, vf->timestamp); + + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + in_buf->aml_buf = container_of(fb, struct aml_video_dec_buf, frame_buffer); + in_buf->vf = vf; + + do { + unsigned int dw_mode = VDEC_DW_NO_AFBC; + struct file *fp; + + if (!dump_ge2d_input || ge2d->ctx->is_drm_mode) + break; + + if (vdec_if_get_param(ge2d->ctx, GET_PARAM_DW_MODE, &dw_mode)) + break; + + if (dw_mode == VDEC_DW_AFBC_ONLY) + break; + + fp = filp_open("/data/dec_dump_before.raw", + O_CREAT | O_RDWR | O_LARGEFILE | O_APPEND, 0600); + if (!IS_ERR(fp)) { + struct vb2_buffer *vb = &in_buf->aml_buf->vb.vb2_buf; + + kernel_write(fp,vb2_plane_vaddr(vb, 0),vb->planes[0].length, 0); + if (in_buf->aml_buf->frame_buffer.num_planes == 2) + kernel_write(fp,vb2_plane_vaddr(vb, 1), + vb->planes[1].length, 0); + dump_ge2d_input--; + filp_close(fp, NULL); + } + } while(0); + + ATRACE_COUNTER("VC_OUT_GE2D-0.receive", fb->buf_idx); + + kfifo_put(&ge2d->in_done_q, in_buf); + up(&ge2d->sem_in); + + return 0; +} + +static void fill_ge2d_buf_cb(void *v4l_ctx, void *fb_ctx) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)v4l_ctx; + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *)fb_ctx; + int ret = -1; + + ret = aml_v4l2_ge2d_push_vframe(ctx->ge2d, fb->vframe); + if (ret < 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "ge2d push vframe err, ret: %d\n", ret); + } +} + +static struct task_ops_s ge2d_ops = { + .type = TASK_TYPE_GE2D, + .get_vframe = ge2d_vf_get, + .put_vframe = ge2d_vf_put, + .fill_buffer = fill_ge2d_buf_cb, +}; + +struct task_ops_s *get_ge2d_ops(void) +{ + return &ge2d_ops; +} +EXPORT_SYMBOL(get_ge2d_ops); +
diff --git a/drivers/amvdec_ports/aml_vcodec_ge2d.h b/drivers/amvdec_ports/aml_vcodec_ge2d.h new file mode 100644 index 0000000..a12931d --- /dev/null +++ b/drivers/amvdec_ports/aml_vcodec_ge2d.h
@@ -0,0 +1,89 @@ +/* +* Copyright (C) 2020 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef _AML_VCODEC_GE2D_H_ +#define _AML_VCODEC_GE2D_H_ + +#include <linux/kfifo.h> +#include "aml_vcodec_drv.h" +#include "aml_vcodec_dec.h" + +/* define ge2d work mode. */ +#define GE2D_MODE_CONVERT_NV12 (1 << 0) +#define GE2D_MODE_CONVERT_NV21 (1 << 1) +#define GE2D_MODE_CONVERT_LE (1 << 2) +#define GE2D_MODE_CONVERT_BE (1 << 3) +#define GE2D_MODE_SEPARATE_FIELD (1 << 4) +#define GE2D_MODE_422_TO_420 (1 << 5) + +#define GE2D_FRAME_SIZE 64 + +struct aml_v4l2_ge2d_buf { + u32 flag; + struct vframe_s *vf; + void *caller_data; + struct aml_video_dec_buf *aml_buf; +}; + +struct aml_v4l2_ge2d { + struct ge2d_context_s *ge2d_context; /* handle of GE2D */ + u32 buf_size; /* buffer size for ge2d */ + u32 work_mode; /* enum ge2d_work_mode */ + u32 buffer_mode; + struct aml_vcodec_ctx *ctx; + + DECLARE_KFIFO_PTR(input, typeof(struct aml_v4l2_ge2d_buf*)); + DECLARE_KFIFO_PTR(output, typeof(struct aml_v4l2_ge2d_buf*)); + DECLARE_KFIFO_PTR(frame, typeof(struct vframe_s *)); + DECLARE_KFIFO(out_done_q, struct aml_v4l2_ge2d_buf *, GE2D_FRAME_SIZE); + DECLARE_KFIFO(in_done_q, struct aml_v4l2_ge2d_buf *, GE2D_FRAME_SIZE); + + struct vframe_s *vfpool; + struct aml_v4l2_ge2d_buf *ovbpool; + struct aml_v4l2_ge2d_buf *ivbpool; + struct task_struct *task; + bool running; + struct semaphore sem_in, sem_out; + + /* In p to i transition, output/frame can be multi writer */ + struct mutex output_lock; + + /* for debugging */ + /* + * in[0] --> ge2d <-- in[1] + * out[0]<-- ge2d --> out[1] + */ + int in_num[2]; + int out_num[2]; + ulong fb_token; + + int src_canvas_id[3]; + int dst_canvas_id[3]; +}; + +struct task_ops_s *get_ge2d_ops(void); + +int aml_v4l2_ge2d_init( + struct aml_vcodec_ctx *ctx, + struct aml_ge2d_cfg_infos *cfg, + struct aml_v4l2_ge2d** ge2d_handle); + +int aml_v4l2_ge2d_destroy(struct aml_v4l2_ge2d* ge2d); + +#endif
diff --git a/drivers/amvdec_ports/aml_vcodec_util.c b/drivers/amvdec_ports/aml_vcodec_util.c new file mode 100644 index 0000000..54b0d06 --- /dev/null +++ b/drivers/amvdec_ports/aml_vcodec_util.c
@@ -0,0 +1,46 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/module.h> + +#include "aml_vcodec_drv.h" +#include "aml_vcodec_util.h" + +void aml_vcodec_set_curr_ctx(struct aml_vcodec_dev *dev, + struct aml_vcodec_ctx *ctx) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->irqlock, flags); + dev->curr_ctx = ctx; + spin_unlock_irqrestore(&dev->irqlock, flags); +} +EXPORT_SYMBOL(aml_vcodec_set_curr_ctx); + +struct aml_vcodec_ctx *aml_vcodec_get_curr_ctx(struct aml_vcodec_dev *dev) +{ + unsigned long flags; + struct aml_vcodec_ctx *ctx; + + spin_lock_irqsave(&dev->irqlock, flags); + ctx = dev->curr_ctx; + spin_unlock_irqrestore(&dev->irqlock, flags); + return ctx; +} +EXPORT_SYMBOL(aml_vcodec_get_curr_ctx);
diff --git a/drivers/amvdec_ports/aml_vcodec_util.h b/drivers/amvdec_ports/aml_vcodec_util.h new file mode 100644 index 0000000..96c5453 --- /dev/null +++ b/drivers/amvdec_ports/aml_vcodec_util.h
@@ -0,0 +1,106 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef _AML_VCODEC_UTIL_H_ +#define _AML_VCODEC_UTIL_H_ + +#include <linux/types.h> +#include <linux/dma-direction.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +/* +typedef unsigned long long u64; +typedef signed long long s64; +typedef unsigned int u32; +typedef unsigned short int u16; +typedef short int s16; +typedef unsigned char u8; +*/ +#define CODEC_MODE(a, b, c, d)\ + (((u8)(a) << 24) | ((u8)(b) << 16) | ((u8)(c) << 8) | (u8)(d)) + +#define BUFF_IDX(h, i)\ + (((ulong)(h) << 8) | (u8)(i)) + +struct aml_vcodec_mem { + int index; + ulong addr; + u32 size; + void *vaddr; + u32 bytes_used; + u32 offset; + u64 timestamp; + u32 model; + ulong meta_ptr; + struct dma_buf *dbuf; +}; + +struct aml_vcodec_ctx; +struct aml_vcodec_dev; + +extern u32 debug_mode; +extern u32 mandatory_dw_mmu; + +#ifdef v4l_dbg +#undef v4l_dbg +#endif + +/* v4l debug define. */ +#define V4L_DEBUG_CODEC_ERROR (0) +#define V4L_DEBUG_CODEC_PRINFO (1 << 0) +#define V4L_DEBUG_CODEC_STATE (1 << 1) +#define V4L_DEBUG_CODEC_BUFMGR (1 << 2) +#define V4L_DEBUG_CODEC_INPUT (1 << 3) +#define V4L_DEBUG_CODEC_OUTPUT (1 << 4) +#define V4L_DEBUG_CODEC_COUNT (1 << 5) +#define V4L_DEBUG_CODEC_PARSER (1 << 6) +#define V4L_DEBUG_CODEC_PROT (1 << 7) +#define V4L_DEBUG_CODEC_EXINFO (1 << 8) +#define V4L_DEBUG_VPP_BUFMGR (1 << 9) +#define V4L_DEBUG_VPP_DETAIL (1 << 10) +#define V4L_DEBUG_TASK_CHAIN (1 << 11) +#define V4L_DEBUG_GE2D_BUFMGR (1 << 12) +#define V4L_DEBUG_GE2D_DETAIL (1 << 13) + +#define __v4l_dbg(h, id, fmt, args...) \ + do { \ + if (h) \ + pr_info("[%d]: " fmt, id, ##args); \ + else \ + pr_info(fmt, ##args); \ + } while (0) + +#define v4l_dbg(h, flags, fmt, args...) \ + do { \ + struct aml_vcodec_ctx *__ctx = (struct aml_vcodec_ctx *) h; \ + if ((flags == V4L_DEBUG_CODEC_ERROR) || \ + (flags == V4L_DEBUG_CODEC_PRINFO) || \ + (debug_mode & flags)) { \ + if (flags == V4L_DEBUG_CODEC_ERROR) { \ + __v4l_dbg(h, __ctx->id, "[ERR]: " fmt, ##args); \ + } else { \ + __v4l_dbg(h, __ctx->id, fmt, ##args); \ + } \ + } \ + } while (0) + +void aml_vcodec_set_curr_ctx(struct aml_vcodec_dev *dev, + struct aml_vcodec_ctx *ctx); +struct aml_vcodec_ctx *aml_vcodec_get_curr_ctx(struct aml_vcodec_dev *dev); + +#endif /* _AML_VCODEC_UTIL_H_ */
diff --git a/drivers/amvdec_ports/aml_vcodec_vpp.c b/drivers/amvdec_ports/aml_vcodec_vpp.c new file mode 100644 index 0000000..9f4e960 --- /dev/null +++ b/drivers/amvdec_ports/aml_vcodec_vpp.c
@@ -0,0 +1,1102 @@ +/* +* Copyright (C) 2020 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ + +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/videodev2.h> +#include <uapi/linux/sched/types.h> +#include <linux/amlogic/meson_uvm_core.h> + +#include "aml_vcodec_vpp.h" +#include "aml_vcodec_adapt.h" +#include "vdec_drv_if.h" +#include "../common/chips/decoder_cpu_ver_info.h" + +#define KERNEL_ATRACE_TAG KERNEL_ATRACE_TAG_V4L2 +#include <trace/events/meson_atrace.h> + +#define VPP_BUF_GET_IDX(vpp_buf) (vpp_buf->aml_buf->vb.vb2_buf.index) +#define INPUT_PORT 0 +#define OUTPUT_PORT 1 + +extern int dump_vpp_input; +extern int vpp_bypass_frames; + +static void di_release_keep_buf_wrap(void *arg) +{ + struct di_buffer *buf = (struct di_buffer *)arg; + + v4l_dbg(0, V4L_DEBUG_VPP_BUFMGR, + "%s release di local buffer %px, vf:%px, comm:%s, pid:%d\n", + __func__ , buf, buf->vf, + current->comm, current->pid); + + di_release_keep_buf(buf); + + ATRACE_COUNTER("VC_OUT_VPP_LC-2.lc_release", buf->mng.index); +} + +static int attach_DI_buffer(struct aml_v4l2_vpp_buf *vpp_buf) +{ + struct aml_v4l2_vpp *vpp = vpp_buf->di_buf.caller_data; + struct dma_buf *dma = NULL; + struct aml_video_dec_buf *aml_buf = NULL; + struct uvm_hook_mod_info u_info; + int ret; + + aml_buf = vpp_buf->aml_buf; + if (!aml_buf) + return -EINVAL; + + dma = aml_buf->vb.vb2_buf.planes[0].dbuf; + if (!dma || !dmabuf_is_uvm(dma)) { + v4l_dbg(vpp->ctx, V4L_DEBUG_CODEC_ERROR, + "attach_DI_buffer err\n"); + return -EINVAL; + } + + if (!vpp_buf->di_local_buf) { + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_BUFMGR, + "attach_DI_buffer nothing\n"); + return 0; + } + + if (uvm_get_hook_mod(dma, VF_PROCESS_DI)) { + uvm_put_hook_mod(dma, VF_PROCESS_DI); + v4l_dbg(vpp->ctx, V4L_DEBUG_CODEC_ERROR, + "attach_DI_buffer exist hook\n"); + return -EINVAL; + } + u_info.type = VF_PROCESS_DI; + u_info.arg = (void *)vpp_buf->di_local_buf; + u_info.free = di_release_keep_buf_wrap; + + ret = uvm_attach_hook_mod(dma, &u_info); + if (ret < 0) { + v4l_dbg(vpp->ctx, V4L_DEBUG_CODEC_ERROR, + "fail to set dmabuf DI hook\n"); + } + + ATRACE_COUNTER("VC_OUT_VPP_LC-0.lc_attach", vpp_buf->di_local_buf->mng.index); + + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_BUFMGR, + "%s attach di local buffer %px, dbuf:%px\n", + __func__ , vpp_buf->di_local_buf, dma); + + return ret; +} + +static int detach_DI_buffer(struct aml_v4l2_vpp_buf *vpp_buf) +{ + struct aml_v4l2_vpp *vpp = vpp_buf->di_buf.caller_data; + struct dma_buf *dma = NULL; + struct aml_video_dec_buf *aml_buf = NULL; + int ret; + + aml_buf = vpp_buf->aml_buf; + if (!aml_buf) + return -EINVAL; + + dma = aml_buf->vb.vb2_buf.planes[0].dbuf; + if (!dma || !dmabuf_is_uvm(dma)) { + v4l_dbg(vpp->ctx, V4L_DEBUG_CODEC_ERROR, + "detach_DI_buffer err\n"); + return -EINVAL; + } + + if (!vpp_buf->di_local_buf) { + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_BUFMGR, + "detach_DI_buffer nothing\n"); + return 0; + } + + ATRACE_COUNTER("VC_OUT_VPP_LC-1.lc_detach", vpp_buf->di_local_buf->mng.index); + + ret = uvm_detach_hook_mod(dma, VF_PROCESS_DI); + if (ret < 0) { + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_BUFMGR, + "fail to remove dmabuf DI hook\n"); + } + + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_BUFMGR, + "%s detach di local buffer %px, dbuf:%px\n", + __func__ , vpp_buf->di_local_buf, dma); + + return ret; +} + +static void release_DI_buff(struct aml_v4l2_vpp* vpp) +{ + struct aml_v4l2_vpp_buf *vpp_buf = NULL; + + while (kfifo_get(&vpp->out_done_q, &vpp_buf)) { + if (vpp_buf->di_buf.private_data) { + di_release_keep_buf(vpp_buf->di_local_buf); + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_BUFMGR, + "%s release di local buffer %px\n", + __func__ , vpp_buf->di_local_buf); + } + } +} + +static int is_di_input_buff_full(struct aml_v4l2_vpp *vpp) +{ + return ((vpp->in_num[INPUT_PORT] - vpp->in_num[OUTPUT_PORT]) + > vpp->di_ibuf_num) ? true : false; +} + +static int is_di_output_buff_full(struct aml_v4l2_vpp *vpp) +{ + return ((vpp->out_num[INPUT_PORT] - vpp->out_num[OUTPUT_PORT]) + > vpp->di_obuf_num) ? true : false; +} + +static enum DI_ERRORTYPE + v4l_vpp_fill_output_done_alloc_buffer(struct di_buffer *buf) +{ + struct aml_v4l2_vpp *vpp = buf->caller_data; + struct aml_v4l2_vpp_buf *vpp_buf = NULL; + struct vdec_v4l2_buffer *fb = NULL; + bool bypass = false; + bool eos = false; + + if (!vpp || !vpp->ctx) { + pr_err("fatal %s %d vpp:%p\n", + __func__, __LINE__, vpp); + di_release_keep_buf_wrap(buf); + return DI_ERR_UNDEFINED; + } + + if (vpp->ctx->is_stream_off) { + v4l_dbg(vpp->ctx, V4L_DEBUG_CODEC_EXINFO, + "vpp discard submit frame %s %d vpp:%p\n", + __func__, __LINE__, vpp); + di_release_keep_buf_wrap(buf); + return DI_ERR_UNDEFINED; + } + + if (!kfifo_get(&vpp->processing, &vpp_buf)) { + v4l_dbg(vpp->ctx, V4L_DEBUG_CODEC_EXINFO, + "vpp doesn't get output %s %d vpp:%p\n", + __func__, __LINE__, vpp); + di_release_keep_buf_wrap(buf); + return DI_ERR_UNDEFINED; + } + + fb = &vpp_buf->aml_buf->frame_buffer; + eos = (buf->flag & DI_FLAG_EOS); + bypass = (buf->flag & DI_FLAG_BUF_BY_PASS); + + vpp_buf->di_buf.vf->timestamp = buf->vf->timestamp; + vpp_buf->di_buf.private_data = buf->private_data; + vpp_buf->di_buf.vf->vf_ext = buf->vf; + vpp_buf->di_buf.flag = buf->flag; + vpp_buf->di_buf.vf->v4l_mem_handle = (ulong)fb; + + if (!eos && !bypass) { + vpp_buf->di_local_buf = buf; + vpp_buf->di_buf.vf->vf_ext = buf->vf; + vpp_buf->di_buf.vf->flag |= VFRAME_FLAG_CONTAIN_POST_FRAME; + } + + kfifo_put(&vpp->out_done_q, vpp_buf); + + if (vpp->is_prog) + kfifo_put(&vpp->input, vpp_buf->inbuf); + + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_BUFMGR, + "vpp_output local done: idx:%d, vf:%px, ext vf:%px, idx:%d, flag(vf:%x di:%x) %s %s, ts:%lld, " + "in:%d, out:%d, vf:%d, in done:%d, out done:%d\n", + fb->buf_idx, + vpp_buf->di_buf.vf, + vpp_buf->di_buf.vf->vf_ext, + vpp_buf->di_buf.vf->index, + vpp_buf->di_buf.vf->flag, + buf->flag, + vpp->is_prog ? "P" : "I", + eos ? "eos" : "", + vpp_buf->di_buf.vf->timestamp, + kfifo_len(&vpp->input), + kfifo_len(&vpp->output), + kfifo_len(&vpp->frame), + kfifo_len(&vpp->in_done_q), + kfifo_len(&vpp->out_done_q)); + + ATRACE_COUNTER("VC_OUT_VPP-2.lc_submit", fb->buf_idx); + + fb->task->submit(fb->task, TASK_TYPE_VPP); + + vpp->out_num[OUTPUT_PORT]++; + vpp->in_num[OUTPUT_PORT]++; + + return DI_ERR_NONE; +} + +static enum DI_ERRORTYPE + v4l_vpp_empty_input_done(struct di_buffer *buf) +{ + struct aml_v4l2_vpp *vpp = buf->caller_data; + struct aml_v4l2_vpp_buf *vpp_buf; + struct vdec_v4l2_buffer *fb = NULL; + bool eos = false; + + if (!vpp || !vpp->ctx) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "fatal %s %d vpp:%px\n", + __func__, __LINE__, vpp); + return DI_ERR_UNDEFINED; + } + + if (vpp->ctx->is_stream_off) { + v4l_dbg(vpp->ctx, V4L_DEBUG_CODEC_EXINFO, + "vpp discard recycle frame %s %d vpp:%p\n", + __func__, __LINE__, vpp); + return DI_ERR_UNDEFINED; + } + + vpp_buf = container_of(buf, struct aml_v4l2_vpp_buf, di_buf); + fb = &vpp_buf->aml_buf->frame_buffer; + eos = (buf->flag & DI_FLAG_EOS); + + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_BUFMGR, + "vpp_input done: idx:%d, vf:%px, idx: %d, flag(vf:%x di:%x) %s %s, ts:%lld, " + "in:%d, out:%d, vf:%d, in done:%d, out done:%d\n", + fb->buf_idx, + buf->vf, + buf->vf->index, + buf->vf->flag, + buf->flag, + vpp->is_prog ? "P" : "I", + eos ? "eos" : "", + buf->vf->timestamp, + kfifo_len(&vpp->input), + kfifo_len(&vpp->output), + kfifo_len(&vpp->frame), + kfifo_len(&vpp->in_done_q), + kfifo_len(&vpp->out_done_q)); + + if (!vpp->is_prog) { + /* recycle vf only in non-bypass mode */ + fb->task->recycle(fb->task, TASK_TYPE_VPP); + + kfifo_put(&vpp->input, vpp_buf); + } + + if (vpp->buffer_mode != BUFFER_MODE_ALLOC_BUF) + vpp->in_num[OUTPUT_PORT]++; + + ATRACE_COUNTER("VC_IN_VPP-1.recycle", fb->buf_idx); + + return DI_ERR_NONE; +} + +static enum DI_ERRORTYPE + v4l_vpp_fill_output_done(struct di_buffer *buf) +{ + struct aml_v4l2_vpp *vpp = buf->caller_data; + struct aml_v4l2_vpp_buf *vpp_buf = NULL; + struct vdec_v4l2_buffer *fb = NULL; + bool bypass = false; + bool eos = false; + + if (!vpp || !vpp->ctx) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "fatal %s %d vpp:%px\n", + __func__, __LINE__, vpp); + return DI_ERR_UNDEFINED; + } + + if (vpp->ctx->is_stream_off) { + v4l_dbg(vpp->ctx, V4L_DEBUG_CODEC_EXINFO, + "vpp discard submit frame %s %d vpp:%p\n", + __func__, __LINE__, vpp); + return DI_ERR_UNDEFINED; + } + + vpp_buf = container_of(buf, struct aml_v4l2_vpp_buf, di_buf); + fb = &vpp_buf->aml_buf->frame_buffer; + eos = (buf->flag & DI_FLAG_EOS); + bypass = (buf->flag & DI_FLAG_BUF_BY_PASS); + + /* recovery fb handle. */ + buf->vf->v4l_mem_handle = (ulong)fb; + + kfifo_put(&vpp->out_done_q, vpp_buf); + + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_BUFMGR, + "vpp_output done: idx:%d, vf:%px, idx:%d, flag(vf:%x di:%x) %s %s, ts:%lld, " + "in:%d, out:%d, vf:%d, in done:%d, out done:%d\n", + fb->buf_idx, + buf->vf, + buf->vf->index, + buf->vf->flag, + buf->flag, + vpp->is_prog ? "P" : "I", + eos ? "eos" : "", + buf->vf->timestamp, + kfifo_len(&vpp->input), + kfifo_len(&vpp->output), + kfifo_len(&vpp->frame), + kfifo_len(&vpp->in_done_q), + kfifo_len(&vpp->out_done_q)); + + ATRACE_COUNTER("VC_OUT_VPP-2.submit", fb->buf_idx); + + fb->task->submit(fb->task, TASK_TYPE_VPP); + + vpp->out_num[OUTPUT_PORT]++; + + /* count for bypass nr */ + if (vpp->buffer_mode == BUFFER_MODE_ALLOC_BUF) + vpp->in_num[OUTPUT_PORT]++; + + return DI_ERR_NONE; +} + +static void vpp_vf_get(void *caller, struct vframe_s **vf_out) +{ + struct aml_v4l2_vpp *vpp = (struct aml_v4l2_vpp *)caller; + struct aml_v4l2_vpp_buf *vpp_buf = NULL; + struct vdec_v4l2_buffer *fb = NULL; + struct di_buffer *buf = NULL; + struct vframe_s *vf = NULL; + bool bypass = false; + bool eos = false; + + if (!vpp || !vpp->ctx) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "fatal %s %d vpp:%px\n", + __func__, __LINE__, vpp); + return; + } + + if (kfifo_get(&vpp->out_done_q, &vpp_buf)) { + fb = &vpp_buf->aml_buf->frame_buffer; + buf = &vpp_buf->di_buf; + eos = (buf->flag & DI_FLAG_EOS); + bypass = (buf->flag & DI_FLAG_BUF_BY_PASS); + vf = buf->vf; + + if (eos) { + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_DETAIL, + "%s %d got eos\n", + __func__, __LINE__); + vf->type |= VIDTYPE_V4L_EOS; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + } + + if (!eos && !bypass) { + if (vpp->buffer_mode == BUFFER_MODE_ALLOC_BUF) { + attach_DI_buffer(vpp_buf); + } + } + + *vf_out = vf; + + ATRACE_COUNTER("VC_OUT_VPP-3.vf_get", fb->buf_idx); + + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_BUFMGR, + "%s: vf:%px, index:%d, flag(vf:%x di:%x), ts:%lld\n", + __func__, vf, + vf->index, + vf->flag, + buf->flag, + vf->timestamp); + } +} + +static void vpp_vf_put(void *caller, struct vframe_s *vf) +{ + struct aml_v4l2_vpp *vpp = (struct aml_v4l2_vpp *)caller; + struct vdec_v4l2_buffer *fb = NULL; + struct aml_video_dec_buf *aml_buf = NULL; + struct aml_v4l2_vpp_buf *vpp_buf = NULL; + struct di_buffer *buf = NULL; + bool bypass = false; + bool eos = false; + + fb = (struct vdec_v4l2_buffer *) vf->v4l_mem_handle; + aml_buf = container_of(fb, struct aml_video_dec_buf, frame_buffer); + + + vpp_buf = (struct aml_v4l2_vpp_buf *) aml_buf->vpp_buf_handle; + buf = &vpp_buf->di_buf; + eos = (buf->flag & DI_FLAG_EOS); + bypass = (buf->flag & DI_FLAG_BUF_BY_PASS); + + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_BUFMGR, + "%s: vf:%px, index:%d, flag(vf:%x di:%x), ts:%lld\n", + __func__, vf, + vf->index, + vf->flag, + buf->flag, + vf->timestamp); + + ATRACE_COUNTER("VC_IN_VPP-0.vf_put", fb->buf_idx); + + if (vpp->is_prog) { + ATRACE_COUNTER("VC_IN_VPP-1.recycle", fb->buf_idx); + fb->task->recycle(fb->task, TASK_TYPE_VPP); + } + + if (!eos && !bypass) { + if (vpp->buffer_mode == BUFFER_MODE_ALLOC_BUF) { + detach_DI_buffer(vpp_buf); + } + } + + mutex_lock(&vpp->output_lock); + kfifo_put(&vpp->frame, vf); + kfifo_put(&vpp->output, vpp_buf); + mutex_unlock(&vpp->output_lock); + up(&vpp->sem_out); +} + +static int aml_v4l2_vpp_thread(void* param) +{ + struct aml_v4l2_vpp* vpp = param; + struct aml_vcodec_ctx *ctx = vpp->ctx; + + v4l_dbg(ctx, V4L_DEBUG_VPP_DETAIL, "enter vpp thread\n"); + while (vpp->running) { + struct aml_v4l2_vpp_buf *in_buf; + struct aml_v4l2_vpp_buf *out_buf = NULL; + struct vframe_s *vf_out = NULL; + struct vdec_v4l2_buffer *fb; + + if (down_interruptible(&vpp->sem_in)) + goto exit; +retry: + if (!vpp->running) + break; + + if (kfifo_is_empty(&vpp->output)) { + if (down_interruptible(&vpp->sem_out)) + goto exit; + goto retry; + } + + if ((vpp->buffer_mode == BUFFER_MODE_ALLOC_BUF) && + (is_di_input_buff_full(vpp) || is_di_output_buff_full(vpp))) { + usleep_range(500, 550); + goto retry; + } + + mutex_lock(&vpp->output_lock); + if (!kfifo_get(&vpp->output, &out_buf)) { + mutex_unlock(&vpp->output_lock); + v4l_dbg(ctx, 0, "vpp can not get output\n"); + goto exit; + } + mutex_unlock(&vpp->output_lock); + + /* bind v4l2 buffers */ + if (!vpp->is_prog && !out_buf->aml_buf) { + struct vdec_v4l2_buffer *out; + + if (!ctx->fb_ops.query(&ctx->fb_ops, &vpp->fb_token)) { + usleep_range(500, 550); + mutex_lock(&vpp->output_lock); + kfifo_put(&vpp->output, out_buf); + mutex_unlock(&vpp->output_lock); + goto retry; + } + + if (ctx->fb_ops.alloc(&ctx->fb_ops, vpp->fb_token, &out, AML_FB_REQ_VPP)) { + usleep_range(5000, 5500); + mutex_lock(&vpp->output_lock); + kfifo_put(&vpp->output, out_buf); + mutex_unlock(&vpp->output_lock); + goto retry; + } + + out_buf->aml_buf = container_of(out, + struct aml_video_dec_buf, frame_buffer); + out_buf->aml_buf->vpp_buf_handle = (ulong) out_buf; + v4l_dbg(ctx, V4L_DEBUG_VPP_BUFMGR, + "vpp bind buf:%d to vpp_buf:%px\n", + VPP_BUF_GET_IDX(out_buf), out_buf); + + out->m.mem[0].bytes_used = out->m.mem[0].size; + out->m.mem[1].bytes_used = out->m.mem[1].size; + } + + /* safe to pop in_buf */ + if (!kfifo_get(&vpp->in_done_q, &in_buf)) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "vpp can not get input\n"); + goto exit; + } + + mutex_lock(&vpp->output_lock); + if (!kfifo_get(&vpp->frame, &vf_out)) { + mutex_unlock(&vpp->output_lock); + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "vpp can not get frame\n"); + goto exit; + } + mutex_unlock(&vpp->output_lock); + + if (!vpp->is_prog) { + /* submit I to DI. */ + fb = &out_buf->aml_buf->frame_buffer; + fb->status = FB_ST_VPP; + + memcpy(vf_out, in_buf->di_buf.vf, sizeof(*vf_out)); + memcpy(vf_out->canvas0_config, + in_buf->di_buf.vf->canvas0_config, + 2 * sizeof(struct canvas_config_s)); + + vf_out->canvas0_config[0].phy_addr = fb->m.mem[0].addr; + if (fb->num_planes == 1) + vf_out->canvas0_config[1].phy_addr = + fb->m.mem[0].addr + fb->m.mem[0].offset; + else + vf_out->canvas0_config[1].phy_addr = + fb->m.mem[1].addr; + + if (in_buf->di_buf.flag & DI_FLAG_EOS) + memset(vf_out, 0, sizeof(*vf_out)); + + vf_out->meta_data_size = in_buf->di_buf.vf->meta_data_size; + vf_out->meta_data_buf = in_buf->di_buf.vf->meta_data_buf; + } else { + /* submit P to DI. */ + out_buf->aml_buf = in_buf->aml_buf; + out_buf->aml_buf->vpp_buf_handle = (ulong) out_buf; + + memcpy(vf_out, in_buf->di_buf.vf, sizeof(*vf_out)); + } + + /* fill outbuf parms. */ + out_buf->di_buf.vf = vf_out; + out_buf->di_buf.flag = 0; + out_buf->di_local_buf = NULL; + out_buf->di_buf.caller_data = vpp; + + /* fill inbuf parms. */ + in_buf->di_buf.caller_data = vpp; + + /* + * HWC or SF should hold di buffres refcnt after resolution changed + * that might cause stuck, thus sumbit 10 frames from dec to display directly. + * then frames will be pushed out from these buffer queuen and + * recycle local buffers to DI module. + */ + if (/*(ctx->vpp_cfg.res_chg) && */(vpp->is_prog) && + (vpp->buffer_mode == BUFFER_MODE_ALLOC_BUF)) { + if (vpp->in_num[INPUT_PORT] < vpp_bypass_frames) { + vpp->is_bypass_p = true; + } else { + vpp->is_bypass_p = false; + ctx->vpp_cfg.res_chg = false; + } + } + + v4l_dbg(ctx, V4L_DEBUG_VPP_BUFMGR, + "vpp_handle start: idx:(%d, %d), dec vf:%px/%d, vpp vf:%px/%d, iphy:%lx/%lx %dx%d ophy:%lx/%lx %dx%d, %s %s " + "in:%d, out:%d, vf:%d, in done:%d, out done:%d", + in_buf->aml_buf->frame_buffer.buf_idx, + out_buf->aml_buf->frame_buffer.buf_idx, + in_buf->di_buf.vf, in_buf->di_buf.vf->index, + out_buf->di_buf.vf, VPP_BUF_GET_IDX(out_buf), + in_buf->di_buf.vf->canvas0_config[0].phy_addr, + in_buf->di_buf.vf->canvas0_config[1].phy_addr, + in_buf->di_buf.vf->canvas0_config[0].width, + in_buf->di_buf.vf->canvas0_config[0].height, + vf_out->canvas0_config[0].phy_addr, + vf_out->canvas0_config[1].phy_addr, + vf_out->canvas0_config[0].width, + vf_out->canvas0_config[0].height, + vpp->is_prog ? "P" : "", + vpp->is_bypass_p ? "bypass-prog" : "", + kfifo_len(&vpp->input), + kfifo_len(&vpp->output), + kfifo_len(&vpp->frame), + kfifo_len(&vpp->in_done_q), + kfifo_len(&vpp->out_done_q)); + + if (vpp->is_bypass_p) { + ATRACE_COUNTER("V4L_OUT_VPP-1.direct_handle_start", + in_buf->aml_buf->frame_buffer.buf_idx); + out_buf->di_buf.flag = in_buf->di_buf.flag; + out_buf->di_buf.vf->vf_ext = in_buf->di_buf.vf; + + v4l_vpp_fill_output_done(&out_buf->di_buf); + v4l_vpp_empty_input_done(&in_buf->di_buf); + } else { + if (vpp->buffer_mode == BUFFER_MODE_ALLOC_BUF) { + /* + * the flow of DI local buffer: + * empty input -> output done cb -> fetch processing fifo. + */ + ATRACE_COUNTER("VC_OUT_VPP-1.lc_handle_start", + in_buf->aml_buf->frame_buffer.buf_idx); + out_buf->inbuf = in_buf; + kfifo_put(&vpp->processing, out_buf); + + di_empty_input_buffer(vpp->di_handle, &in_buf->di_buf); + } else { + ATRACE_COUNTER("VC_OUT_VPP-1.fill_output_start", + out_buf->aml_buf->frame_buffer.buf_idx); + di_fill_output_buffer(vpp->di_handle, &out_buf->di_buf); + + ATRACE_COUNTER("VC_OUT_VPP-1.empty_input_start", + in_buf->aml_buf->frame_buffer.buf_idx); + di_empty_input_buffer(vpp->di_handle, &in_buf->di_buf); + } + } + vpp->in_num[INPUT_PORT]++; + vpp->out_num[INPUT_PORT]++; + } +exit: + while (!kthread_should_stop()) { + usleep_range(1000, 2000); + } + + v4l_dbg(ctx, V4L_DEBUG_VPP_DETAIL, "exit vpp thread\n"); + + return 0; +} + +int aml_v4l2_vpp_get_buf_num(u32 mode) +{ + if ((mode == VPP_MODE_DI) || + (mode == VPP_MODE_COLOR_CONV) || + (mode == VPP_MODE_NOISE_REDUC)) { + return 4; + } + //TODO: support more modes + return 2; +} + +int aml_v4l2_vpp_reset(struct aml_v4l2_vpp *vpp) +{ + int i; + struct sched_param param = + { .sched_priority = MAX_RT_PRIO - 1 }; + + vpp->running = false; + up(&vpp->sem_in); + up(&vpp->sem_out); + kthread_stop(vpp->task); + + kfifo_reset(&vpp->input); + kfifo_reset(&vpp->output); + kfifo_reset(&vpp->frame); + kfifo_reset(&vpp->out_done_q); + kfifo_reset(&vpp->in_done_q); + kfifo_reset(&vpp->processing); + + for (i = 0 ; i < VPP_FRAME_SIZE ; i++) { + memset(&vpp->ivbpool[i], 0, sizeof(struct aml_v4l2_vpp_buf)); + + kfifo_put(&vpp->input, &vpp->ivbpool[i]); + } + + for (i = 0 ; i < vpp->buf_size ; i++) { + memset(&vpp->ovbpool[i], 0, sizeof(struct aml_v4l2_vpp_buf)); + memset(&vpp->vfpool[i], 0, sizeof(struct vframe_s)); + + kfifo_put(&vpp->output, &vpp->ovbpool[i]); + kfifo_put(&vpp->frame, &vpp->vfpool[i]); + } + + vpp->in_num[0] = 0; + vpp->in_num[1] = 0; + vpp->out_num[0] = 0; + vpp->out_num[1] = 0; + vpp->fb_token = 0; + sema_init(&vpp->sem_in, 0); + sema_init(&vpp->sem_out, 0); + + vpp->running = true; + vpp->task = kthread_run(aml_v4l2_vpp_thread, vpp, + "%s", "aml-v4l2-vpp"); + if (IS_ERR(vpp->task)) { + return PTR_ERR(vpp->task); + } + + sched_setscheduler_nocheck(vpp->task, SCHED_FIFO, ¶m); + + v4l_dbg(vpp->ctx, V4L_DEBUG_CODEC_PRINFO, "vpp wrapper reset.\n"); + + return 0; + +} +EXPORT_SYMBOL(aml_v4l2_vpp_reset); + +int aml_v4l2_vpp_init( + struct aml_vcodec_ctx *ctx, + struct aml_vpp_cfg_infos *cfg, + struct aml_v4l2_vpp** vpp_handle) +{ + struct di_init_parm init; + u32 buf_size; + int i, ret; + struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; + struct aml_v4l2_vpp *vpp; + u32 work_mode = cfg->mode; + + if (!cfg || work_mode > VPP_MODE_MAX || !ctx || !vpp_handle) + return -EINVAL; + if (cfg->fmt != V4L2_PIX_FMT_NV12 && cfg->fmt != V4L2_PIX_FMT_NV12M && + cfg->fmt != V4L2_PIX_FMT_NV21 && cfg->fmt != V4L2_PIX_FMT_NV21M) + return -EINVAL; + + vpp = kzalloc(sizeof(*vpp), GFP_KERNEL); + if (!vpp) + return -ENOMEM; + + vpp->work_mode = work_mode; + if (vpp->work_mode >= VPP_MODE_DI_LOCAL && + vpp->work_mode <= VPP_MODE_NOISE_REDUC_LOCAL) + vpp->buffer_mode = BUFFER_MODE_ALLOC_BUF; + else + vpp->buffer_mode = BUFFER_MODE_USE_BUF; + + init.work_mode = WORK_MODE_PRE_POST; + init.buffer_mode = vpp->buffer_mode; + init.ops.fill_output_done = v4l_vpp_fill_output_done; + init.ops.empty_input_done = v4l_vpp_empty_input_done; + init.caller_data = (void *)vpp; + + if (vpp->buffer_mode == BUFFER_MODE_ALLOC_BUF) { + init.ops.fill_output_done = + v4l_vpp_fill_output_done_alloc_buffer; + } + + if (vpp->buffer_mode == BUFFER_MODE_ALLOC_BUF) + init.output_format = DI_OUTPUT_BY_DI_DEFINE; + else if ((vpp->buffer_mode == BUFFER_MODE_USE_BUF) && + ((cfg->fmt == V4L2_PIX_FMT_NV21M) || (cfg->fmt == V4L2_PIX_FMT_NV21))) + init.output_format = DI_OUTPUT_NV21 | DI_OUTPUT_LINEAR; + else if ((vpp->buffer_mode == BUFFER_MODE_USE_BUF) && + ((cfg->fmt == V4L2_PIX_FMT_NV12M) || (cfg->fmt == V4L2_PIX_FMT_NV12))) + init.output_format = DI_OUTPUT_NV12 | DI_OUTPUT_LINEAR; + else /* AFBC deocde case, NV12 as default */ + init.output_format = DI_OUTPUT_NV12 | DI_OUTPUT_LINEAR; + + if (cfg->is_drm) + init.output_format |= DI_OUTPUT_TVP; + + vpp->di_handle = di_create_instance(init); + if (vpp->di_handle < 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "di_create_instance fail\n"); + ret = -EINVAL; + goto error; + } + + INIT_KFIFO(vpp->input); + INIT_KFIFO(vpp->output); + INIT_KFIFO(vpp->frame); + INIT_KFIFO(vpp->out_done_q); + INIT_KFIFO(vpp->in_done_q); + INIT_KFIFO(vpp->processing); + + vpp->ctx = ctx; + vpp->is_prog = cfg->is_prog; + vpp->is_bypass_p = cfg->is_bypass_p; + + buf_size = vpp->is_prog ? 16 : cfg->buf_size; + vpp->buf_size = buf_size; + + /* setup output fifo */ + ret = kfifo_alloc(&vpp->output, buf_size, GFP_KERNEL); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc output fifo fail.\n"); + ret = -ENOMEM; + goto error2; + } + + vpp->ovbpool = vzalloc(buf_size * sizeof(*vpp->ovbpool)); + if (!vpp->ovbpool) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc output vb pool fail.\n"); + ret = -ENOMEM; + goto error3; + } + + /* setup vframe fifo */ + ret = kfifo_alloc(&vpp->frame, buf_size, GFP_KERNEL); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc vpp vframe fifo fail.\n"); + ret = -ENOMEM; + goto error4; + } + + vpp->vfpool = vzalloc(buf_size * sizeof(*vpp->vfpool)); + if (!vpp->vfpool) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc vf pool fail.\n"); + ret = -ENOMEM; + goto error5; + } + + /* setup processing fifo */ + ret = kfifo_alloc(&vpp->processing, buf_size, GFP_KERNEL); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc processing fifo fail.\n"); + ret = -ENOMEM; + goto error6; + } + + ret = kfifo_alloc(&vpp->input, VPP_FRAME_SIZE, GFP_KERNEL); + if (ret) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc input fifo fail.\n"); + ret = -ENOMEM; + goto error7; + } + + vpp->ivbpool = vzalloc(VPP_FRAME_SIZE * sizeof(*vpp->ivbpool)); + if (!vpp->ivbpool) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "alloc input vb pool fail.\n"); + ret = -ENOMEM; + goto error8; + } + + for (i = 0 ; i < VPP_FRAME_SIZE ; i++) { + kfifo_put(&vpp->input, &vpp->ivbpool[i]); + } + + for (i = 0 ; i < buf_size ; i++) { + kfifo_put(&vpp->output, &vpp->ovbpool[i]); + kfifo_put(&vpp->frame, &vpp->vfpool[i]); + } + + mutex_init(&vpp->output_lock); + sema_init(&vpp->sem_in, 0); + sema_init(&vpp->sem_out, 0); + + vpp->running = true; + vpp->task = kthread_run(aml_v4l2_vpp_thread, vpp, + "aml-%s", "aml-v4l2-vpp"); + if (IS_ERR(vpp->task)) { + ret = PTR_ERR(vpp->task); + goto error9; + } + sched_setscheduler_nocheck(vpp->task, SCHED_FIFO, ¶m); + + vpp->di_ibuf_num = di_get_input_buffer_num(vpp->di_handle); + vpp->di_obuf_num = di_get_output_buffer_num(vpp->di_handle); + + *vpp_handle = vpp; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, + "vpp_wrapper init bsize:%d, di(i:%d, o:%d), wkm:%x, bm:%x, fmt:%x, drm:%d, prog:%d, byp:%d, local:%d, NR:%d\n", + vpp->buf_size, + vpp->di_ibuf_num, + vpp->di_obuf_num, + vpp->work_mode, + vpp->buffer_mode, + init.output_format, + cfg->is_drm, + cfg->is_prog, + cfg->is_bypass_p, + cfg->enable_local_buf, + cfg->enable_nr); + + return 0; + +error9: + vfree(vpp->ivbpool); +error8: + kfifo_free(&vpp->input); +error7: + kfifo_free(&vpp->processing); +error6: + vfree(vpp->vfpool); +error5: + kfifo_free(&vpp->frame); +error4: + vfree(vpp->ovbpool); +error3: + kfifo_free(&vpp->output); +error2: + di_destroy_instance(vpp->di_handle); +error: + kfree(vpp); + return ret; +} +EXPORT_SYMBOL(aml_v4l2_vpp_init); + +int aml_v4l2_vpp_destroy(struct aml_v4l2_vpp* vpp) +{ + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_DETAIL, + "vpp destroy begin\n"); + vpp->running = false; + up(&vpp->sem_in); + up(&vpp->sem_out); + kthread_stop(vpp->task); + + di_destroy_instance(vpp->di_handle); + /* no more vpp callback below this line */ + + if (vpp->buffer_mode == BUFFER_MODE_ALLOC_BUF) + release_DI_buff(vpp); + + kfifo_free(&vpp->processing); + kfifo_free(&vpp->frame); + vfree(vpp->vfpool); + kfifo_free(&vpp->output); + vfree(vpp->ovbpool); + kfifo_free(&vpp->input); + vfree(vpp->ivbpool); + mutex_destroy(&vpp->output_lock); + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_DETAIL, + "vpp_wrapper destroy done\n"); + kfree(vpp); + + return 0; +} +EXPORT_SYMBOL(aml_v4l2_vpp_destroy); + +static int aml_v4l2_vpp_push_vframe(struct aml_v4l2_vpp* vpp, struct vframe_s *vf) +{ + struct aml_v4l2_vpp_buf *in_buf; + struct vdec_v4l2_buffer *fb = NULL; + + if (!vpp) + return -EINVAL; + + if (!kfifo_get(&vpp->input, &in_buf)) { + v4l_dbg(vpp->ctx, V4L_DEBUG_CODEC_ERROR, + "cat not get free input buffer.\n"); + return -1; + } + +#if 0 //to debug di by frame + if (vpp->in_num[INPUT_PORT] > 2) + return 0; + if (vpp->in_num[INPUT_PORT] == 2) + vf->type |= VIDTYPE_V4L_EOS; +#endif + + in_buf->di_buf.vf = vf; + in_buf->di_buf.flag = 0; + if (vf->type & VIDTYPE_V4L_EOS) { + u32 dw_mode = VDEC_DW_NO_AFBC; + + in_buf->di_buf.flag |= DI_FLAG_EOS; + + if (vdec_if_get_param(vpp->ctx, GET_PARAM_DW_MODE, &dw_mode)) + return -1; + + vf->type |= vpp->ctx->vpp_cfg.is_prog ? + VIDTYPE_PROGRESSIVE : + VIDTYPE_INTERLACE; + + if (dw_mode != VDEC_DW_NO_AFBC) + vf->type |= VIDTYPE_COMPRESS; + } + + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + in_buf->aml_buf = container_of(fb, struct aml_video_dec_buf, frame_buffer); + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) { + if (vf->canvas0_config[0].block_mode == CANVAS_BLKMODE_LINEAR) { + if ((vpp->ctx->output_pix_fmt != V4L2_PIX_FMT_H264) && + (vpp->ctx->output_pix_fmt != V4L2_PIX_FMT_MPEG1) && + (vpp->ctx->output_pix_fmt != V4L2_PIX_FMT_MPEG2) && + (vpp->ctx->output_pix_fmt != V4L2_PIX_FMT_MPEG4) && + (vpp->ctx->output_pix_fmt != V4L2_PIX_FMT_MJPEG)) { + vf->flag |= VFRAME_FLAG_VIDEO_LINEAR; + } + else { + if (fb->status == FB_ST_GE2D) + vf->flag |= VFRAME_FLAG_VIDEO_LINEAR; + } + } + } else { + if (vf->canvas0_config[0].block_mode == CANVAS_BLKMODE_LINEAR) + vf->flag |= VFRAME_FLAG_VIDEO_LINEAR; + } + + v4l_dbg(vpp->ctx, V4L_DEBUG_VPP_BUFMGR, + "vpp_push_vframe: idx:%d, vf:%px, idx:%d, type:%x, ts:%lld\n", + fb->buf_idx, vf, vf->index, vf->type, vf->timestamp); + + do { + unsigned int dw_mode = VDEC_DW_NO_AFBC; + struct file *fp; + + if (!dump_vpp_input || vpp->ctx->is_drm_mode) + break; + if (vdec_if_get_param(vpp->ctx, GET_PARAM_DW_MODE, &dw_mode)) + break; + if (dw_mode == VDEC_DW_AFBC_ONLY) + break; + + fp = filp_open("/data/dec_dump_before.raw", + O_CREAT | O_RDWR | O_LARGEFILE | O_APPEND, 0600); + if (!IS_ERR(fp)) { + struct vb2_buffer *vb = &in_buf->aml_buf->vb.vb2_buf; + + kernel_write(fp,vb2_plane_vaddr(vb, 0),vb->planes[0].length, 0); + if (in_buf->aml_buf->frame_buffer.num_planes == 2) + kernel_write(fp,vb2_plane_vaddr(vb, 1), + vb->planes[1].length, 0); + dump_vpp_input--; + filp_close(fp, NULL); + } + } while(0); + + ATRACE_COUNTER("VC_OUT_VPP-0.receive", fb->buf_idx); + + kfifo_put(&vpp->in_done_q, in_buf); + up(&vpp->sem_in); + + return 0; +} + +static void fill_vpp_buf_cb(void *v4l_ctx, void *fb_ctx) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)v4l_ctx; + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *)fb_ctx; + int ret = -1; + + ret = aml_v4l2_vpp_push_vframe(ctx->vpp, fb->vframe); + if (ret < 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "vpp push vframe err, ret: %d\n", ret); + } +} + +static struct task_ops_s vpp_ops = { + .type = TASK_TYPE_VPP, + .get_vframe = vpp_vf_get, + .put_vframe = vpp_vf_put, + .fill_buffer = fill_vpp_buf_cb, +}; + +struct task_ops_s *get_vpp_ops(void) +{ + return &vpp_ops; +} +EXPORT_SYMBOL(get_vpp_ops); +
diff --git a/drivers/amvdec_ports/aml_vcodec_vpp.h b/drivers/amvdec_ports/aml_vcodec_vpp.h new file mode 100644 index 0000000..7a5771d --- /dev/null +++ b/drivers/amvdec_ports/aml_vcodec_vpp.h
@@ -0,0 +1,112 @@ +/* +* Copyright (C) 2020 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef _AML_VCODEC_VPP_H_ +#define _AML_VCODEC_VPP_H_ + +#define SUPPORT_V4L_VPP + +#include <linux/kfifo.h> +#ifdef SUPPORT_V4L_VPP +#include <linux/amlogic/media/di/di_interface.h> +#endif +#include "aml_vcodec_drv.h" +#include "aml_vcodec_dec.h" + +enum vpp_work_mode { + VPP_MODE_DI, + VPP_MODE_COLOR_CONV, + VPP_MODE_NOISE_REDUC, + VPP_MODE_DI_LOCAL = 0x81, + VPP_MODE_COLOR_CONV_LOCAL = 0x82, + VPP_MODE_NOISE_REDUC_LOCAL = 0x83, + VPP_MODE_MAX = 0xff +}; + +#define VPP_FRAME_SIZE 64 + +struct aml_v4l2_vpp_buf { +#ifdef SUPPORT_V4L_VPP + struct di_buffer di_buf; + struct di_buffer *di_local_buf; +#endif + struct aml_video_dec_buf *aml_buf; + struct aml_v4l2_vpp_buf *inbuf; +}; + +struct aml_v4l2_vpp { + int di_handle; /* handle of DI */ + struct aml_vcodec_ctx *ctx; + u32 buf_size; /* buffer size for vpp */ + u32 work_mode; /* enum vpp_work_mode */ + u32 buffer_mode; + + DECLARE_KFIFO_PTR(input, typeof(struct aml_v4l2_vpp_buf*)); + DECLARE_KFIFO_PTR(output, typeof(struct aml_v4l2_vpp_buf*)); + DECLARE_KFIFO_PTR(processing, typeof(struct aml_v4l2_vpp_buf*)); + DECLARE_KFIFO_PTR(frame, typeof(struct vframe_s *)); + DECLARE_KFIFO(out_done_q, struct aml_v4l2_vpp_buf *, VPP_FRAME_SIZE); + DECLARE_KFIFO(in_done_q, struct aml_v4l2_vpp_buf *, VPP_FRAME_SIZE); + + struct vframe_s *vfpool; + struct aml_v4l2_vpp_buf *ovbpool; + struct aml_v4l2_vpp_buf *ivbpool; + struct task_struct *task; + bool running; + struct semaphore sem_in, sem_out; + + /* In p to i transition, output/frame can be multi writer */ + struct mutex output_lock; + + /* for debugging */ + /* + * in[0] --> vpp <-- in[1] + * out[0]<-- vpp --> out[1] + */ + int in_num[2]; + int out_num[2]; + ulong fb_token; + + bool is_prog; + bool is_bypass_p; + int di_ibuf_num; + int di_obuf_num; +}; + +struct task_ops_s *get_vpp_ops(void); + +#ifdef SUPPORT_V4L_VPP +/* get number of buffer needed for a working mode */ +int aml_v4l2_vpp_get_buf_num(u32 mode); +int aml_v4l2_vpp_init( + struct aml_vcodec_ctx *ctx, + struct aml_vpp_cfg_infos *cfg, + struct aml_v4l2_vpp** vpp_handle); +int aml_v4l2_vpp_destroy(struct aml_v4l2_vpp* vpp); +int aml_v4l2_vpp_reset(struct aml_v4l2_vpp *vpp); +#else +static inline int aml_v4l2_vpp_get_buf_num(u32 mode) { return -1; } +static inline int aml_v4l2_vpp_init( + struct aml_vcodec_ctx *ctx, + struct aml_vpp_cfg_infos *cfg, + struct aml_v4l2_vpp** vpp_handle) { return -1; } +static inline int aml_v4l2_vpp_destroy(struct aml_v4l2_vpp* vpp) { return -1; } +#endif + +#endif
diff --git a/drivers/amvdec_ports/decoder/aml_h264_parser.c b/drivers/amvdec_ports/decoder/aml_h264_parser.c new file mode 100644 index 0000000..d0d0198 --- /dev/null +++ b/drivers/amvdec_ports/decoder/aml_h264_parser.c
@@ -0,0 +1,713 @@ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/string.h> + +#include "aml_h264_parser.h" +#include "../utils/get_bits.h" +#include "../utils/put_bits.h" +#include "../utils/golomb.h" +#include "../utils/common.h" +#include "utils.h" + +#define MAX_DELAYED_PIC_COUNT (16) +#define MAX_LOG2_MAX_FRAME_NUM (12 + 4) +#define MIN_LOG2_MAX_FRAME_NUM (4) +#define MAX_SPS_COUNT (32) +#define EXTENDED_SAR (255) + +static const struct rational h264_pixel_aspect[17] = { + { 0, 1 }, + { 1, 1 }, + { 12, 11 }, + { 10, 11 }, + { 16, 11 }, + { 40, 33 }, + { 24, 11 }, + { 20, 11 }, + { 32, 11 }, + { 80, 33 }, + { 18, 11 }, + { 15, 11 }, + { 64, 33 }, + { 160, 99 }, + { 4, 3 }, + { 3, 2 }, + { 2, 1 }, +}; + +/* maximum number of MBs in the DPB for a given level */ +static const int level_max_dpb_mbs[][2] = { + { 10, 396 }, + { 11, 900 }, + { 12, 2376 }, + { 13, 2376 }, + { 20, 2376 }, + { 21, 4752 }, + { 22, 8100 }, + { 30, 8100 }, + { 31, 18000 }, + { 32, 20480 }, + { 40, 32768 }, + { 41, 32768 }, + { 42, 34816 }, + { 50, 110400 }, + { 51, 184320 }, + { 52, 184320 }, +}; + +static const u8 default_scaling4[2][16] = { + { 6, 13, 20, 28, 13, 20, 28, 32, + 20, 28, 32, 37, 28, 32, 37, 42}, + { 10, 14, 20, 24, 14, 20, 24, 27, + 20, 24, 27, 30, 24, 27, 30, 34 } +}; + +static const u8 default_scaling8[2][64] = { + { 6, 10, 13, 16, 18, 23, 25, 27, + 10, 11, 16, 18, 23, 25, 27, 29, + 13, 16, 18, 23, 25, 27, 29, 31, + 16, 18, 23, 25, 27, 29, 31, 33, + 18, 23, 25, 27, 29, 31, 33, 36, + 23, 25, 27, 29, 31, 33, 36, 38, + 25, 27, 29, 31, 33, 36, 38, 40, + 27, 29, 31, 33, 36, 38, 40, 42 }, + { 9, 13, 15, 17, 19, 21, 22, 24, + 13, 13, 17, 19, 21, 22, 24, 25, + 15, 17, 19, 21, 22, 24, 25, 27, + 17, 19, 21, 22, 24, 25, 27, 28, + 19, 21, 22, 24, 25, 27, 28, 30, + 21, 22, 24, 25, 27, 28, 30, 32, + 22, 24, 25, 27, 28, 30, 32, 33, + 24, 25, 27, 28, 30, 32, 33, 35 } +}; + +extern const u8 ff_zigzag_scan[16 + 1]; +extern const u8 ff_zigzag_direct[64]; + +static int decode_scaling_list(struct get_bits_context *gb, + u8 *factors, int size, + const u8 *jvt_list, + const u8 *fallback_list) +{ + int i, last = 8, next = 8; + const u8 *scan = size == 16 ? ff_zigzag_scan : ff_zigzag_direct; + + if (!get_bits1(gb)) /* matrix not written, we use the predicted one */ + memcpy(factors, fallback_list, size * sizeof(u8)); + else + for (i = 0; i < size; i++) { + if (next) { + int v = get_se_golomb(gb); + /*if (v < -128 || v > 127) { //JM19 has not check. + pr_err( "delta scale %d is invalid\n", v); + return -1; + }*/ + next = (last + v) & 0xff; + } + if (!i && !next) { /* matrix not written, we use the preset one */ + memcpy(factors, jvt_list, size * sizeof(u8)); + break; + } + last = factors[scan[i]] = next ? next : last; + } + return 0; +} + +/* returns non zero if the provided SPS scaling matrix has been filled */ +static int decode_scaling_matrices(struct get_bits_context *gb, + const struct h264_SPS_t *sps, + const struct h264_PPS_t *pps, int is_sps, + u8(*scaling_matrix4)[16], + u8(*scaling_matrix8)[64]) +{ + int ret = 0; + int fallback_sps = !is_sps && sps->scaling_matrix_present; + const u8 *fallback[4] = { + fallback_sps ? sps->scaling_matrix4[0] : default_scaling4[0], + fallback_sps ? sps->scaling_matrix4[3] : default_scaling4[1], + fallback_sps ? sps->scaling_matrix8[0] : default_scaling8[0], + fallback_sps ? sps->scaling_matrix8[3] : default_scaling8[1] + }; + + if (get_bits1(gb)) { + ret |= decode_scaling_list(gb, scaling_matrix4[0], 16, default_scaling4[0], fallback[0]); // Intra, Y + ret |= decode_scaling_list(gb, scaling_matrix4[1], 16, default_scaling4[0], scaling_matrix4[0]); // Intra, Cr + ret |= decode_scaling_list(gb, scaling_matrix4[2], 16, default_scaling4[0], scaling_matrix4[1]); // Intra, Cb + ret |= decode_scaling_list(gb, scaling_matrix4[3], 16, default_scaling4[1], fallback[1]); // Inter, Y + ret |= decode_scaling_list(gb, scaling_matrix4[4], 16, default_scaling4[1], scaling_matrix4[3]); // Inter, Cr + ret |= decode_scaling_list(gb, scaling_matrix4[5], 16, default_scaling4[1], scaling_matrix4[4]); // Inter, Cb + if (is_sps || pps->transform_8x8_mode) { + ret |= decode_scaling_list(gb, scaling_matrix8[0], 64, default_scaling8[0], fallback[2]); // Intra, Y + ret |= decode_scaling_list(gb, scaling_matrix8[3], 64, default_scaling8[1], fallback[3]); // Inter, Y + if (sps->chroma_format_idc == 3) { + ret |= decode_scaling_list(gb, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr + ret |= decode_scaling_list(gb, scaling_matrix8[4], 64, default_scaling8[1], scaling_matrix8[3]); // Inter, Cr + ret |= decode_scaling_list(gb, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb + ret |= decode_scaling_list(gb, scaling_matrix8[5], 64, default_scaling8[1], scaling_matrix8[4]); // Inter, Cb + } + } + if (!ret) + ret = is_sps; + } + + return ret; +} + +static int decode_hrd_parameters(struct get_bits_context *gb, + struct h264_SPS_t *sps) +{ + int cpb_count, i; + + cpb_count = get_ue_golomb_31(gb) + 1; + if (cpb_count > 32U) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "cpb_count %d invalid\n", cpb_count); + return -1; + } + + get_bits(gb, 4); /* bit_rate_scale */ + get_bits(gb, 4); /* cpb_size_scale */ + for (i = 0; i < cpb_count; i++) { + get_ue_golomb_long(gb); /* bit_rate_value_minus1 */ + get_ue_golomb_long(gb); /* cpb_size_value_minus1 */ + get_bits1(gb); /* cbr_flag */ + } + + sps->initial_cpb_removal_delay_length = get_bits(gb, 5) + 1; + sps->cpb_removal_delay_length = get_bits(gb, 5) + 1; + sps->dpb_output_delay_length = get_bits(gb, 5) + 1; + sps->time_offset_length = get_bits(gb, 5); + sps->cpb_cnt = cpb_count; + + return 0; +} + +static int decode_vui_parameters(struct get_bits_context *gb, struct h264_SPS_t *sps) +{ + int aspect_ratio_info_present_flag; + u32 aspect_ratio_idc; + + aspect_ratio_info_present_flag = get_bits1(gb); + + if (aspect_ratio_info_present_flag) { + aspect_ratio_idc = get_bits(gb, 8); + if (aspect_ratio_idc == EXTENDED_SAR) { + sps->sar.num = get_bits(gb, 16); + sps->sar.den = get_bits(gb, 16); + } else if (aspect_ratio_idc < ARRAY_SIZE(h264_pixel_aspect)) { + sps->sar = h264_pixel_aspect[aspect_ratio_idc]; + } else { + return -1; + } + } else { + sps->sar.num = + sps->sar.den = 0; + } + + if (get_bits1(gb)) /* overscan_info_present_flag */ + get_bits1(gb); /* overscan_appropriate_flag */ + + sps->video_signal_type_present_flag = get_bits1(gb); + if (sps->video_signal_type_present_flag) { + get_bits(gb, 3); /* video_format */ + sps->full_range = get_bits1(gb); /* video_full_range_flag */ + + sps->colour_description_present_flag = get_bits1(gb); + if (sps->colour_description_present_flag) { + sps->color_primaries = get_bits(gb, 8); /* colour_primaries */ + sps->color_trc = get_bits(gb, 8); /* transfer_characteristics */ + sps->colorspace = get_bits(gb, 8); /* matrix_coefficients */ + + // Set invalid values to "unspecified" + if (!av_color_primaries_name(sps->color_primaries)) + sps->color_primaries = AVCOL_PRI_UNSPECIFIED; + if (!av_color_transfer_name(sps->color_trc)) + sps->color_trc = AVCOL_TRC_UNSPECIFIED; + if (!av_color_space_name(sps->colorspace)) + sps->colorspace = AVCOL_SPC_UNSPECIFIED; + } + } + + /* chroma_location_info_present_flag */ + if (get_bits1(gb)) { + /* chroma_sample_location_type_top_field */ + //avctx->chroma_sample_location = get_ue_golomb(gb) + 1; + get_ue_golomb(gb); /* chroma_sample_location_type_bottom_field */ + } + + if (show_bits1(gb) && get_bits_left(gb) < 10) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Truncated VUI\n"); + return 0; + } + + sps->timing_info_present_flag = get_bits1(gb); + if (sps->timing_info_present_flag) { + unsigned num_units_in_tick = get_bits_long(gb, 32); + unsigned time_scale = get_bits_long(gb, 32); + if (!num_units_in_tick || !time_scale) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, + "time_scale/num_units_in_tick invalid or unsupported (%u/%u)\n", + time_scale, num_units_in_tick); + sps->timing_info_present_flag = 0; + } else { + sps->num_units_in_tick = num_units_in_tick; + sps->time_scale = time_scale; + } + sps->fixed_frame_rate_flag = get_bits1(gb); + } + + sps->nal_hrd_parameters_present_flag = get_bits1(gb); + if (sps->nal_hrd_parameters_present_flag) + if (decode_hrd_parameters(gb, sps) < 0) + return -1; + sps->vcl_hrd_parameters_present_flag = get_bits1(gb); + if (sps->vcl_hrd_parameters_present_flag) + if (decode_hrd_parameters(gb, sps) < 0) + return -1; + if (sps->nal_hrd_parameters_present_flag || + sps->vcl_hrd_parameters_present_flag) + get_bits1(gb); /* low_delay_hrd_flag */ + sps->pic_struct_present_flag = get_bits1(gb); + if (!get_bits_left(gb)) + return 0; + sps->bitstream_restriction_flag = get_bits1(gb); + if (sps->bitstream_restriction_flag) { + get_bits1(gb); /* motion_vectors_over_pic_boundaries_flag */ + get_ue_golomb(gb); /* max_bytes_per_pic_denom */ + get_ue_golomb(gb); /* max_bits_per_mb_denom */ + get_ue_golomb(gb); /* log2_max_mv_length_horizontal */ + get_ue_golomb(gb); /* log2_max_mv_length_vertical */ + sps->num_reorder_frames = get_ue_golomb(gb); + sps->max_dec_frame_buffering = get_ue_golomb(gb); /*max_dec_frame_buffering*/ + + if (get_bits_left(gb) < 0) { + sps->num_reorder_frames = 0; + sps->bitstream_restriction_flag = 0; + } + + if (sps->num_reorder_frames > 16U + /* max_dec_frame_buffering || max_dec_frame_buffering > 16 */) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "Clipping illegal num_reorder_frames %d\n", + sps->num_reorder_frames); + sps->num_reorder_frames = 16; + return -1; + } + } + + return 0; +} + +static int aml_h264_parser_sps(struct get_bits_context *gb, struct h264_SPS_t *sps) +{ + int ret; + u32 sps_id; + int profile_idc, level_idc, constraint_set_flags = 0; + int i, log2_max_frame_num_minus4; + + profile_idc = get_bits(gb, 8); + constraint_set_flags |= get_bits1(gb) << 0; // constraint_set0_flag + constraint_set_flags |= get_bits1(gb) << 1; // constraint_set1_flag + constraint_set_flags |= get_bits1(gb) << 2; // constraint_set2_flag + constraint_set_flags |= get_bits1(gb) << 3; // constraint_set3_flag + constraint_set_flags |= get_bits1(gb) << 4; // constraint_set4_flag + constraint_set_flags |= get_bits1(gb) << 5; // constraint_set5_flag + skip_bits(gb, 2); // reserved_zero_2bits + level_idc = get_bits(gb, 8); + sps_id = get_ue_golomb_31(gb); + + if (sps_id >= MAX_SPS_COUNT) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "sps_id %u out of range\n", sps_id); + goto fail; + } + + sps->sps_id = sps_id; + sps->time_offset_length = 24; + sps->profile_idc = profile_idc; + sps->constraint_set_flags = constraint_set_flags; + sps->level_idc = level_idc; + sps->full_range = -1; + + memset(sps->scaling_matrix4, 16, sizeof(sps->scaling_matrix4)); + memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8)); + sps->scaling_matrix_present = 0; + sps->colorspace = 2; //AVCOL_SPC_UNSPECIFIED + + if (sps->profile_idc == 100 || // High profile + sps->profile_idc == 110 || // High10 profile + sps->profile_idc == 122 || // High422 profile + sps->profile_idc == 244 || // High444 Predictive profile + sps->profile_idc == 44 || // Cavlc444 profile + sps->profile_idc == 83 || // Scalable Constrained High profile (SVC) + sps->profile_idc == 86 || // Scalable High Intra profile (SVC) + sps->profile_idc == 118 || // Stereo High profile (MVC) + sps->profile_idc == 128 || // Multiview High profile (MVC) + sps->profile_idc == 138 || // Multiview Depth High profile (MVCD) + sps->profile_idc == 144) { // old High444 profile + sps->chroma_format_idc = get_ue_golomb_31(gb); + + if (sps->chroma_format_idc > 3U) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "chroma_format_idc %u\n", sps->chroma_format_idc); + goto fail; + } else if (sps->chroma_format_idc == 3) { + sps->residual_color_transform_flag = get_bits1(gb); + if (sps->residual_color_transform_flag) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "separate color planes are not supported\n"); + goto fail; + } + } + + sps->bit_depth_luma = get_ue_golomb(gb) + 8; + sps->bit_depth_chroma = get_ue_golomb(gb) + 8; + if (sps->bit_depth_chroma != sps->bit_depth_luma) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "Different chroma and luma bit depth\n"); + goto fail; + } + + if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 || + sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "illegal bit depth value (%d, %d)\n", + sps->bit_depth_luma, sps->bit_depth_chroma); + goto fail; + } + + sps->transform_bypass = get_bits1(gb); + ret = decode_scaling_matrices(gb, sps, NULL, 1, + sps->scaling_matrix4, sps->scaling_matrix8); + if (ret < 0) + goto fail; + sps->scaling_matrix_present |= ret; + } else { + sps->chroma_format_idc = 1; + sps->bit_depth_luma = 8; + sps->bit_depth_chroma = 8; + } + + log2_max_frame_num_minus4 = get_ue_golomb(gb); + if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 || + log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "log2_max_frame_num_minus4 out of range (0-12): %d\n", + log2_max_frame_num_minus4); + goto fail; + } + sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4; + + sps->poc_type = get_ue_golomb_31(gb); + if (sps->poc_type == 0) { // FIXME #define + u32 t = get_ue_golomb(gb); + if (t > 12) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "log2_max_poc_lsb (%d) is out of range\n", t); + goto fail; + } + sps->log2_max_poc_lsb = t + 4; + } else if (sps->poc_type == 1) { // FIXME #define + sps->delta_pic_order_always_zero_flag = get_bits1(gb); + sps->offset_for_non_ref_pic = get_se_golomb_long(gb); + sps->offset_for_top_to_bottom_field = get_se_golomb_long(gb); + + sps->poc_cycle_length = get_ue_golomb(gb); + if ((u32)sps->poc_cycle_length >= ARRAY_SIZE(sps->offset_for_ref_frame)) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "poc_cycle_length overflow %d\n", sps->poc_cycle_length); + goto fail; + } + + for (i = 0; i < sps->poc_cycle_length; i++) + sps->offset_for_ref_frame[i] = get_se_golomb_long(gb); + } else if (sps->poc_type != 2) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "illegal POC type %d\n", sps->poc_type); + goto fail; + } + + sps->ref_frame_count = get_ue_golomb_31(gb); + if (sps->ref_frame_count > MAX_DELAYED_PIC_COUNT) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "too many reference frames %d\n", sps->ref_frame_count); + goto fail; + } + sps->gaps_in_frame_num_allowed_flag = get_bits1(gb); + sps->mb_width = get_ue_golomb(gb) + 1; + sps->mb_height = get_ue_golomb(gb) + 1; + + sps->frame_mbs_only_flag = get_bits1(gb); + + if (sps->mb_height >= INT_MAX / 2U) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "height overflow\n"); + goto fail; + } + sps->mb_height *= 2 - sps->frame_mbs_only_flag; + + if (!sps->frame_mbs_only_flag) + sps->mb_aff = get_bits1(gb); + else + sps->mb_aff = 0; + + if ((u32)sps->mb_width >= INT_MAX / 16 || + (u32)sps->mb_height >= INT_MAX / 16) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "mb_width/height overflow\n"); + goto fail; + } + + sps->direct_8x8_inference_flag = get_bits1(gb); + + sps->crop = get_bits1(gb); + if (sps->crop) { + u32 crop_left = get_ue_golomb(gb); + u32 crop_right = get_ue_golomb(gb); + u32 crop_top = get_ue_golomb(gb); + u32 crop_bottom = get_ue_golomb(gb); + int width = 16 * sps->mb_width; + int height = 16 * sps->mb_height; + int vsub = (sps->chroma_format_idc == 1) ? 1 : 0; + int hsub = (sps->chroma_format_idc == 1 || sps->chroma_format_idc == 2) ? 1 : 0; + int step_x = 1 << hsub; + int step_y = (2 - sps->frame_mbs_only_flag) << vsub; + + if (crop_left > (u32)INT_MAX / 4 / step_x || + crop_right > (u32)INT_MAX / 4 / step_x || + crop_top > (u32)INT_MAX / 4 / step_y || + crop_bottom > (u32)INT_MAX / 4 / step_y || + (crop_left + crop_right ) * step_x >= width || + (crop_top + crop_bottom) * step_y >= height) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "crop values invalid %u %u %u %u / %d %d\n", + crop_left, crop_right, crop_top, crop_bottom, width, height); + goto fail; + } + + sps->crop_left = crop_left * step_x; + sps->crop_right = crop_right * step_x; + sps->crop_top = crop_top * step_y; + sps->crop_bottom = crop_bottom * step_y; + } else { + sps->crop_left = + sps->crop_right = + sps->crop_top = + sps->crop_bottom = + sps->crop = 0; + } + + sps->vui_parameters_present_flag = get_bits1(gb); + if (sps->vui_parameters_present_flag) { + int ret = decode_vui_parameters(gb, sps); + if (ret < 0) + goto fail; + } + + if (get_bits_left(gb) < 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "Overread %s by %d bits\n", + sps->vui_parameters_present_flag ? "VUI" : "SPS", -get_bits_left(gb)); + /*goto out;*/ + } + +#if 0 + /* if the maximum delay is not stored in the SPS, derive it based on the level */ + if (!sps->bitstream_restriction_flag && sps->ref_frame_count) { + sps->num_reorder_frames = MAX_DELAYED_PIC_COUNT - 1; + for (i = 0; i < ARRAY_SIZE(level_max_dpb_mbs); i++) { + if (level_max_dpb_mbs[i][0] == sps->level_idc) { + sps->num_reorder_frames = + MIN(level_max_dpb_mbs[i][1] / (sps->mb_width * sps->mb_height), + sps->num_reorder_frames); + break; + } + } + } +#endif + + sps->num_reorder_frames = MAX_DELAYED_PIC_COUNT - 1; + for (i = 0; i < ARRAY_SIZE(level_max_dpb_mbs); i++) { + if (level_max_dpb_mbs[i][0] == sps->level_idc) { + sps->num_reorder_frames = + MIN(level_max_dpb_mbs[i][1] / (sps->mb_width * sps->mb_height), + sps->num_reorder_frames); + sps->num_reorder_frames += 1; + if (sps->max_dec_frame_buffering > sps->num_reorder_frames) + sps->num_reorder_frames = sps->max_dec_frame_buffering; + break; + } + } + + if ((sps->bitstream_restriction_flag) && + (sps->max_dec_frame_buffering < + sps->num_reorder_frames)) { + sps->num_reorder_frames = sps->max_dec_frame_buffering; + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, + "set reorder_pic_num to %d\n", + sps->num_reorder_frames); + } + + if (!sps->sar.den) + sps->sar.den = 1; +/*out:*/ + if (1) { + static const char csp[4][5] = { "Gray", "420", "422", "444" }; + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, + "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%u/%u/%u/%u %s %s %d/%d b%d reo:%d\n", + sps_id, sps->profile_idc, sps->level_idc, + sps->poc_type, + sps->ref_frame_count, + sps->mb_width, sps->mb_height, + sps->frame_mbs_only_flag ? "FRM" : (sps->mb_aff ? "MB-AFF" : "PIC-AFF"), + sps->direct_8x8_inference_flag ? "8B8" : "", + sps->crop_left, sps->crop_right, + sps->crop_top, sps->crop_bottom, + sps->vui_parameters_present_flag ? "VUI" : "", + csp[sps->chroma_format_idc], + sps->timing_info_present_flag ? sps->num_units_in_tick : 0, + sps->timing_info_present_flag ? sps->time_scale : 0, + sps->bit_depth_luma, + sps->bitstream_restriction_flag ? sps->num_reorder_frames : -1); + } + + return 0; + +fail: + return -1; +} + +static const char *h264_nal_type_name[32] = { + "Unspecified 0", //H264_NAL_UNSPECIFIED + "Coded slice of a non-IDR picture", // H264_NAL_SLICE + "Coded slice data partition A", // H264_NAL_DPA + "Coded slice data partition B", // H264_NAL_DPB + "Coded slice data partition C", // H264_NAL_DPC + "IDR", // H264_NAL_IDR_SLICE + "SEI", // H264_NAL_SEI + "SPS", // H264_NAL_SPS + "PPS", // H264_NAL_PPS + "AUD", // H264_NAL_AUD + "End of sequence", // H264_NAL_END_SEQUENCE + "End of stream", // H264_NAL_END_STREAM + "Filler data", // H264_NAL_FILLER_DATA + "SPS extension", // H264_NAL_SPS_EXT + "Prefix", // H264_NAL_PREFIX + "Subset SPS", // H264_NAL_SUB_SPS + "Depth parameter set", // H264_NAL_DPS + "Reserved 17", // H264_NAL_RESERVED17 + "Reserved 18", // H264_NAL_RESERVED18 + "Auxiliary coded picture without partitioning", // H264_NAL_AUXILIARY_SLICE + "Slice extension", // H264_NAL_EXTEN_SLICE + "Slice extension for a depth view or a 3D-AVC texture view", // H264_NAL_DEPTH_EXTEN_SLICE + "Reserved 22", // H264_NAL_RESERVED22 + "Reserved 23", // H264_NAL_RESERVED23 + "Unspecified 24", // H264_NAL_UNSPECIFIED24 + "Unspecified 25", // H264_NAL_UNSPECIFIED25 + "Unspecified 26", // H264_NAL_UNSPECIFIED26 + "Unspecified 27", // H264_NAL_UNSPECIFIED27 + "Unspecified 28", // H264_NAL_UNSPECIFIED28 + "Unspecified 29", // H264_NAL_UNSPECIFIED29 + "Unspecified 30", // H264_NAL_UNSPECIFIED30 + "Unspecified 31", // H264_NAL_UNSPECIFIED31 +}; + +static const char *h264_nal_unit_name(int nal_type) +{ + return h264_nal_type_name[nal_type]; +} + +static int decode_extradata_ps(u8 *data, int size, struct h264_param_sets *ps) +{ + int ret = 0; + struct get_bits_context gb; + u32 src_len, rbsp_size = 0; + u8 *rbsp_buf = NULL; + int ref_idc, nalu_pos; + u32 nal_type; + u8 *p = data; + u32 len = size; + + nalu_pos = find_start_code(p, len); + if (nalu_pos < 0) + return -1; + + src_len = calc_nal_len(p + nalu_pos, size - nalu_pos); + rbsp_buf = nal_unit_extract_rbsp(p + nalu_pos, src_len, &rbsp_size); + if (rbsp_buf == NULL) + return -ENOMEM; + + ret = init_get_bits8(&gb, rbsp_buf, rbsp_size); + if (ret < 0) + goto out; + + if (get_bits1(&gb) != 0) { + ret = -1; + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "invalid h264 data,return!\n"); + goto out; + } + + ref_idc = get_bits(&gb, 2); + nal_type = get_bits(&gb, 5); + + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, + "nal_unit_type: %d(%s), nal_ref_idc: %d\n", + nal_type, h264_nal_unit_name(nal_type), ref_idc); + + switch (nal_type) { + case H264_NAL_SPS: + ret = aml_h264_parser_sps(&gb, &ps->sps); + if (ret < 0) + goto out; + ps->sps_parsed = true; + break; + /*case H264_NAL_PPS: + ret = ff_h264_decode_picture_parameter_set(&gb, &ps->pps, rbsp_size); + if (ret < 0) + goto fail; + ps->pps_parsed = true; + break;*/ + default: + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "Unsupport parser nal type (%s).\n", + h264_nal_unit_name(nal_type)); + break; + } + +out: + vfree(rbsp_buf); + + return ret; +} + +int h264_decode_extradata_ps(u8 *buf, int size, struct h264_param_sets *ps) +{ + int ret = 0, i = 0, j = 0; + u8 *p = buf; + int len = size; + + for (i = 4; i < size; i++) { + j = find_start_code(p, len); + if (j > 0) { + len = size - (p - buf); + ret = decode_extradata_ps(p, len, ps); + if (ret) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "parse extra data failed. err: %d\n", ret); + return ret; + } + + if (ps->sps_parsed) + break; + + p += j; + } + p++; + } + + return ret; +} + +
diff --git a/drivers/amvdec_ports/decoder/aml_h264_parser.h b/drivers/amvdec_ports/decoder/aml_h264_parser.h new file mode 100644 index 0000000..def00dd --- /dev/null +++ b/drivers/amvdec_ports/decoder/aml_h264_parser.h
@@ -0,0 +1,210 @@ +/* + * drivers/amvdec_ports/decoder/aml_h264_parser.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef AML_H264_PARSER_H +#define AML_H264_PARSER_H + +#include "../aml_vcodec_drv.h" +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +#include "../utils/pixfmt.h" +#endif + +#define QP_MAX_NUM (51 + 6 * 6) // The maximum supported qp + +/* NAL unit types */ +enum { + H264_NAL_SLICE = 1, + H264_NAL_DPA = 2, + H264_NAL_DPB = 3, + H264_NAL_DPC = 4, + H264_NAL_IDR_SLICE = 5, + H264_NAL_SEI = 6, + H264_NAL_SPS = 7, + H264_NAL_PPS = 8, + H264_NAL_AUD = 9, + H264_NAL_END_SEQUENCE = 10, + H264_NAL_END_STREAM = 11, + H264_NAL_FILLER_DATA = 12, + H264_NAL_SPS_EXT = 13, + H264_NAL_AUXILIARY_SLICE = 19, +}; + +enum { + // 7.4.2.1.1: seq_parameter_set_id is in [0, 31]. + H264_MAX_SPS_COUNT = 32, + // 7.4.2.2: pic_parameter_set_id is in [0, 255]. + H264_MAX_PPS_COUNT = 256, + + // A.3: MaxDpbFrames is bounded above by 16. + H264_MAX_DPB_FRAMES = 16, + // 7.4.2.1.1: max_num_ref_frames is in [0, MaxDpbFrames], and + // each reference frame can have two fields. + H264_MAX_REFS = 2 * H264_MAX_DPB_FRAMES, + + // 7.4.3.1: modification_of_pic_nums_idc is not equal to 3 at most + // num_ref_idx_lN_active_minus1 + 1 times (that is, once for each + // possible reference), then equal to 3 once. + H264_MAX_RPLM_COUNT = H264_MAX_REFS + 1, + + // 7.4.3.3: in the worst case, we begin with a full short-term + // reference picture list. Each picture in turn is moved to the + // long-term list (type 3) and then discarded from there (type 2). + // Then, we set the length of the long-term list (type 4), mark + // the current picture as long-term (type 6) and terminate the + // process (type 0). + H264_MAX_MMCO_COUNT = H264_MAX_REFS * 2 + 3, + + // A.2.1, A.2.3: profiles supporting FMO constrain + // num_slice_groups_minus1 to be in [0, 7]. + H264_MAX_SLICE_GROUPS = 8, + + // E.2.2: cpb_cnt_minus1 is in [0, 31]. + H264_MAX_CPB_CNT = 32, + + // A.3: in table A-1 the highest level allows a MaxFS of 139264. + H264_MAX_MB_PIC_SIZE = 139264, + // A.3.1, A.3.2: PicWidthInMbs and PicHeightInMbs are constrained + // to be not greater than sqrt(MaxFS * 8). Hence height/width are + // bounded above by sqrt(139264 * 8) = 1055.5 macroblocks. + H264_MAX_MB_WIDTH = 1055, + H264_MAX_MB_HEIGHT = 1055, + H264_MAX_WIDTH = H264_MAX_MB_WIDTH * 16, + H264_MAX_HEIGHT = H264_MAX_MB_HEIGHT * 16, +}; + +/** + * Rational number (pair of numerator and denominator). + */ +struct rational{ + int num; ///< Numerator + int den; ///< Denominator +}; + +/** + * Sequence parameter set + */ +struct h264_SPS_t { + u32 sps_id; + int profile_idc; + int level_idc; + int chroma_format_idc; + int transform_bypass; ///< qpprime_y_zero_transform_bypass_flag + int log2_max_frame_num; ///< log2_max_frame_num_minus4 + 4 + int poc_type; ///< pic_order_cnt_type + int log2_max_poc_lsb; ///< log2_max_pic_order_cnt_lsb_minus4 + int delta_pic_order_always_zero_flag; + int offset_for_non_ref_pic; + int offset_for_top_to_bottom_field; + int poc_cycle_length; ///< num_ref_frames_in_pic_order_cnt_cycle + int ref_frame_count; ///< num_ref_frames + int gaps_in_frame_num_allowed_flag; + int mb_width; ///< pic_width_in_mbs_minus1 + 1 + ///< (pic_height_in_map_units_minus1 + 1) * (2 - frame_mbs_only_flag) + int mb_height; + int frame_mbs_only_flag; + int mb_aff; ///< mb_adaptive_frame_field_flag + int direct_8x8_inference_flag; + int crop; ///< frame_cropping_flag + + /* those 4 are already in luma samples */ + u32 crop_left; ///< frame_cropping_rect_left_offset + u32 crop_right; ///< frame_cropping_rect_right_offset + u32 crop_top; ///< frame_cropping_rect_top_offset + u32 crop_bottom; ///< frame_cropping_rect_bottom_offset + int vui_parameters_present_flag; + struct rational sar; + int video_signal_type_present_flag; + int full_range; + int colour_description_present_flag; +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER + enum AVColorPrimaries color_primaries; + enum AVColorTransferCharacteristic color_trc; + enum AVColorSpace colorspace; +#endif + int timing_info_present_flag; + u32 num_units_in_tick; + u32 time_scale; + int fixed_frame_rate_flag; + int32_t offset_for_ref_frame[256]; + int bitstream_restriction_flag; + int num_reorder_frames; + int max_dec_frame_buffering; + int scaling_matrix_present; + u8 scaling_matrix4[6][16]; + u8 scaling_matrix8[6][64]; + int nal_hrd_parameters_present_flag; + int vcl_hrd_parameters_present_flag; + int pic_struct_present_flag; + int time_offset_length; + int cpb_cnt; ///< See H.264 E.1.2 + int initial_cpb_removal_delay_length; ///< initial_cpb_removal_delay_length_minus1 + 1 + int cpb_removal_delay_length; ///< cpb_removal_delay_length_minus1 + 1 + int dpb_output_delay_length; ///< dpb_output_delay_length_minus1 + 1 + int bit_depth_luma; ///< bit_depth_luma_minus8 + 8 + int bit_depth_chroma; ///< bit_depth_chroma_minus8 + 8 + int residual_color_transform_flag; ///< residual_colour_transform_flag + int constraint_set_flags; ///< constraint_set[0-3]_flag +} ; + +/** + * Picture parameter set + */ +struct h264_PPS_t { + u32 sps_id; + int cabac; ///< entropy_coding_mode_flag + int pic_order_present; ///< pic_order_present_flag + int slice_group_count; ///< num_slice_groups_minus1 + 1 + int mb_slice_group_map_type; + u32 ref_count[2]; ///< num_ref_idx_l0/1_active_minus1 + 1 + int weighted_pred; ///< weighted_pred_flag + int weighted_bipred_idc; + int init_qp; ///< pic_init_qp_minus26 + 26 + int init_qs; ///< pic_init_qs_minus26 + 26 + int chroma_qp_index_offset[2]; + int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag + int constrained_intra_pred; ///< constrained_intra_pred_flag + int redundant_pic_cnt_present; ///< redundant_pic_cnt_present_flag + int transform_8x8_mode; ///< transform_8x8_mode_flag + u8 scaling_matrix4[6][16]; + u8 scaling_matrix8[6][64]; + u8 chroma_qp_table[2][87+1]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table + int chroma_qp_diff; + u8 data[4096]; + int data_size; + + u32 dequant4_buffer[6][87 + 1][16]; + u32 dequant8_buffer[6][87 + 1][64]; + u32(*dequant4_coeff[6])[16]; + u32(*dequant8_coeff[6])[64]; +} ; + +struct h264_param_sets { + bool sps_parsed; + bool pps_parsed; + struct h264_SPS_t sps; + struct h264_PPS_t pps; +}; + + +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +int h264_decode_extradata_ps(u8 *data, int size, struct h264_param_sets *ps); +#else +inline int h264_decode_extradata_ps(u8 *data, int size, struct h264_param_sets *ps) { return -1; } +#endif + +#endif /* AML_H264_PARSER_H */ +
diff --git a/drivers/amvdec_ports/decoder/aml_hevc_parser.c b/drivers/amvdec_ports/decoder/aml_hevc_parser.c new file mode 100644 index 0000000..24977a8 --- /dev/null +++ b/drivers/amvdec_ports/decoder/aml_hevc_parser.c
@@ -0,0 +1,1282 @@ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/string.h> + +#include "aml_hevc_parser.h" +#include "../utils/get_bits.h" +#include "../utils/put_bits.h" +#include "../utils/golomb.h" +#include "../utils/common.h" +#include "utils.h" + +const u8 ff_hevc_diag_scan4x4_x[16] = { + 0, 0, 1, 0, + 1, 2, 0, 1, + 2, 3, 1, 2, + 3, 2, 3, 3, +}; + +const u8 ff_hevc_diag_scan4x4_y[16] = { + 0, 1, 0, 2, + 1, 0, 3, 2, + 1, 0, 3, 2, + 1, 3, 2, 3, +}; + +const u8 ff_hevc_diag_scan8x8_x[64] = { + 0, 0, 1, 0, + 1, 2, 0, 1, + 2, 3, 0, 1, + 2, 3, 4, 0, + 1, 2, 3, 4, + 5, 0, 1, 2, + 3, 4, 5, 6, + 0, 1, 2, 3, + 4, 5, 6, 7, + 1, 2, 3, 4, + 5, 6, 7, 2, + 3, 4, 5, 6, + 7, 3, 4, 5, + 6, 7, 4, 5, + 6, 7, 5, 6, + 7, 6, 7, 7, +}; + +const u8 ff_hevc_diag_scan8x8_y[64] = { + 0, 1, 0, 2, + 1, 0, 3, 2, + 1, 0, 4, 3, + 2, 1, 0, 5, + 4, 3, 2, 1, + 0, 6, 5, 4, + 3, 2, 1, 0, + 7, 6, 5, 4, + 3, 2, 1, 0, + 7, 6, 5, 4, + 3, 2, 1, 7, + 6, 5, 4, 3, + 2, 7, 6, 5, + 4, 3, 7, 6, + 5, 4, 7, 6, + 5, 7, 6, 7, +}; + +static const u8 default_scaling_list_intra[] = { + 16, 16, 16, 16, 17, 18, 21, 24, + 16, 16, 16, 16, 17, 19, 22, 25, + 16, 16, 17, 18, 20, 22, 25, 29, + 16, 16, 18, 21, 24, 27, 31, 36, + 17, 17, 20, 24, 30, 35, 41, 47, + 18, 19, 22, 27, 35, 44, 54, 65, + 21, 22, 25, 31, 41, 54, 70, 88, + 24, 25, 29, 36, 47, 65, 88, 115 +}; + +static const u8 default_scaling_list_inter[] = { + 16, 16, 16, 16, 17, 18, 20, 24, + 16, 16, 16, 17, 18, 20, 24, 25, + 16, 16, 17, 18, 20, 24, 25, 28, + 16, 17, 18, 20, 24, 25, 28, 33, + 17, 18, 20, 24, 25, 28, 33, 41, + 18, 20, 24, 25, 28, 33, 41, 54, + 20, 24, 25, 28, 33, 41, 54, 71, + 24, 25, 28, 33, 41, 54, 71, 91 +}; + +static const struct AVRational vui_sar[] = { + { 0, 1 }, + { 1, 1 }, + { 12, 11 }, + { 10, 11 }, + { 16, 11 }, + { 40, 33 }, + { 24, 11 }, + { 20, 11 }, + { 32, 11 }, + { 80, 33 }, + { 18, 11 }, + { 15, 11 }, + { 64, 33 }, + { 160, 99 }, + { 4, 3 }, + { 3, 2 }, + { 2, 1 }, +}; + +static const u8 hevc_sub_width_c[] = { + 1, 2, 2, 1 +}; + +static const u8 hevc_sub_height_c[] = { + 1, 2, 1, 1 +}; + +static int decode_profile_tier_level(struct get_bits_context *gb, struct PTLCommon *ptl) +{ + int i; + + if (get_bits_left(gb) < 2+1+5 + 32 + 4 + 16 + 16 + 12) + return -1; + + ptl->profile_space = get_bits(gb, 2); + ptl->tier_flag = get_bits1(gb); + ptl->profile_idc = get_bits(gb, 5); + if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Main profile bitstream\n"); + else if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN_10) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Main 10 profile bitstream\n"); + else if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN_STILL_PICTURE) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Main Still Picture profile bitstream\n"); + else if (ptl->profile_idc == FF_PROFILE_HEVC_REXT) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Range Extension profile bitstream\n"); + else + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Unknown HEVC profile: %d\n", ptl->profile_idc); + + for (i = 0; i < 32; i++) { + ptl->profile_compatibility_flag[i] = get_bits1(gb); + + if (ptl->profile_idc == 0 && i > 0 && ptl->profile_compatibility_flag[i]) + ptl->profile_idc = i; + } + ptl->progressive_source_flag = get_bits1(gb); + ptl->interlaced_source_flag = get_bits1(gb); + ptl->non_packed_constraint_flag = get_bits1(gb); + ptl->frame_only_constraint_flag = get_bits1(gb); + + skip_bits(gb, 16); // XXX_reserved_zero_44bits[0..15] + skip_bits(gb, 16); // XXX_reserved_zero_44bits[16..31] + skip_bits(gb, 12); // XXX_reserved_zero_44bits[32..43] + + return 0; +} + +static int parse_ptl(struct get_bits_context *gb, struct PTL *ptl, int max_num_sub_layers) +{ + int i; + if (decode_profile_tier_level(gb, &ptl->general_ptl) < 0 || + get_bits_left(gb) < 8 + (8*2 * (max_num_sub_layers - 1 > 0))) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "PTL information too short\n"); + return -1; + } + + ptl->general_ptl.level_idc = get_bits(gb, 8); + + for (i = 0; i < max_num_sub_layers - 1; i++) { + ptl->sub_layer_profile_present_flag[i] = get_bits1(gb); + ptl->sub_layer_level_present_flag[i] = get_bits1(gb); + } + + if (max_num_sub_layers - 1> 0) + for (i = max_num_sub_layers - 1; i < 8; i++) + skip_bits(gb, 2); // reserved_zero_2bits[i] + for (i = 0; i < max_num_sub_layers - 1; i++) { + if (ptl->sub_layer_profile_present_flag[i] && + decode_profile_tier_level(gb, &ptl->sub_layer_ptl[i]) < 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "PTL information for sublayer %i too short\n", i); + return -1; + } + if (ptl->sub_layer_level_present_flag[i]) { + if (get_bits_left(gb) < 8) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Not enough data for sublayer %i level_idc\n", i); + return -1; + } else + ptl->sub_layer_ptl[i].level_idc = get_bits(gb, 8); + } + } + + return 0; +} + +static void decode_sublayer_hrd(struct get_bits_context *gb, + u32 nb_cpb, int subpic_params_present) +{ + int i; + + for (i = 0; i < nb_cpb; i++) { + get_ue_golomb_long(gb); // bit_rate_value_minus1 + get_ue_golomb_long(gb); // cpb_size_value_minus1 + + if (subpic_params_present) { + get_ue_golomb_long(gb); // cpb_size_du_value_minus1 + get_ue_golomb_long(gb); // bit_rate_du_value_minus1 + } + skip_bits1(gb); // cbr_flag + } +} + +static int decode_hrd(struct get_bits_context *gb, + int common_inf_present, int max_sublayers) +{ + int nal_params_present = 0, vcl_params_present = 0; + int subpic_params_present = 0; + int i; + + if (common_inf_present) { + nal_params_present = get_bits1(gb); + vcl_params_present = get_bits1(gb); + + if (nal_params_present || vcl_params_present) { + subpic_params_present = get_bits1(gb); + + if (subpic_params_present) { + skip_bits(gb, 8); // tick_divisor_minus2 + skip_bits(gb, 5); // du_cpb_removal_delay_increment_length_minus1 + skip_bits(gb, 1); // sub_pic_cpb_params_in_pic_timing_sei_flag + skip_bits(gb, 5); // dpb_output_delay_du_length_minus1 + } + + skip_bits(gb, 4); // bit_rate_scale + skip_bits(gb, 4); // cpb_size_scale + + if (subpic_params_present) + skip_bits(gb, 4); // cpb_size_du_scale + + skip_bits(gb, 5); // initial_cpb_removal_delay_length_minus1 + skip_bits(gb, 5); // au_cpb_removal_delay_length_minus1 + skip_bits(gb, 5); // dpb_output_delay_length_minus1 + } + } + + for (i = 0; i < max_sublayers; i++) { + int low_delay = 0; + u32 nb_cpb = 1; + int fixed_rate = get_bits1(gb); + + if (!fixed_rate) + fixed_rate = get_bits1(gb); + + if (fixed_rate) + get_ue_golomb_long(gb); // elemental_duration_in_tc_minus1 + else + low_delay = get_bits1(gb); + + if (!low_delay) { + nb_cpb = get_ue_golomb_long(gb) + 1; + if (nb_cpb < 1 || nb_cpb > 32) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "nb_cpb %d invalid\n", nb_cpb); + return -1; + } + } + + if (nal_params_present) + decode_sublayer_hrd(gb, nb_cpb, subpic_params_present); + if (vcl_params_present) + decode_sublayer_hrd(gb, nb_cpb, subpic_params_present); + } + return 0; +} + +int ff_hevc_parse_vps(struct get_bits_context *gb, struct h265_VPS_t *vps) +{ + int i,j; + int vps_id = 0; + + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Decoding VPS\n"); + + vps_id = get_bits(gb, 4); + if (vps_id >= HEVC_MAX_VPS_COUNT) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "VPS id out of range: %d\n", vps_id); + goto err; + } + + if (get_bits(gb, 2) != 3) { // vps_reserved_three_2bits + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vps_reserved_three_2bits is not three\n"); + goto err; + } + + vps->vps_max_layers = get_bits(gb, 6) + 1; + vps->vps_max_sub_layers = get_bits(gb, 3) + 1; + vps->vps_temporal_id_nesting_flag = get_bits1(gb); + + if (get_bits(gb, 16) != 0xffff) { // vps_reserved_ffff_16bits + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vps_reserved_ffff_16bits is not 0xffff\n"); + goto err; + } + + if (vps->vps_max_sub_layers > HEVC_MAX_SUB_LAYERS) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vps_max_sub_layers out of range: %d\n", + vps->vps_max_sub_layers); + goto err; + } + + if (parse_ptl(gb, &vps->ptl, vps->vps_max_sub_layers) < 0) + goto err; + + vps->vps_sub_layer_ordering_info_present_flag = get_bits1(gb); + + i = vps->vps_sub_layer_ordering_info_present_flag ? 0 : vps->vps_max_sub_layers - 1; + for (; i < vps->vps_max_sub_layers; i++) { + vps->vps_max_dec_pic_buffering[i] = get_ue_golomb_long(gb) + 1; + vps->vps_num_reorder_pics[i] = get_ue_golomb_long(gb); + vps->vps_max_latency_increase[i] = get_ue_golomb_long(gb) - 1; + + if (vps->vps_max_dec_pic_buffering[i] > HEVC_MAX_DPB_SIZE || !vps->vps_max_dec_pic_buffering[i]) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vps_max_dec_pic_buffering_minus1 out of range: %d\n", + vps->vps_max_dec_pic_buffering[i] - 1); + goto err; + } + if (vps->vps_num_reorder_pics[i] > vps->vps_max_dec_pic_buffering[i] - 1) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vps_max_num_reorder_pics out of range: %d\n", + vps->vps_num_reorder_pics[i]); + goto err; + } + } + + vps->vps_max_layer_id = get_bits(gb, 6); + vps->vps_num_layer_sets = get_ue_golomb_long(gb) + 1; + if (vps->vps_num_layer_sets < 1 || vps->vps_num_layer_sets > 1024 || + (vps->vps_num_layer_sets - 1LL) * (vps->vps_max_layer_id + 1LL) > get_bits_left(gb)) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "too many layer_id_included_flags\n"); + goto err; + } + + for (i = 1; i < vps->vps_num_layer_sets; i++) + for (j = 0; j <= vps->vps_max_layer_id; j++) + skip_bits(gb, 1); // layer_id_included_flag[i][j] + + vps->vps_timing_info_present_flag = get_bits1(gb); + if (vps->vps_timing_info_present_flag) { + vps->vps_num_units_in_tick = get_bits_long(gb, 32); + vps->vps_time_scale = get_bits_long(gb, 32); + vps->vps_poc_proportional_to_timing_flag = get_bits1(gb); + if (vps->vps_poc_proportional_to_timing_flag) + vps->vps_num_ticks_poc_diff_one = get_ue_golomb_long(gb) + 1; + vps->vps_num_hrd_parameters = get_ue_golomb_long(gb); + if (vps->vps_num_hrd_parameters > (u32)vps->vps_num_layer_sets) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vps_num_hrd_parameters %d is invalid\n", vps->vps_num_hrd_parameters); + goto err; + } + for (i = 0; i < vps->vps_num_hrd_parameters; i++) { + int common_inf_present = 1; + + get_ue_golomb_long(gb); // hrd_layer_set_idx + if (i) + common_inf_present = get_bits1(gb); + decode_hrd(gb, common_inf_present, vps->vps_max_sub_layers); + } + } + get_bits1(gb); /* vps_extension_flag */ + + if (get_bits_left(gb) < 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Overread VPS by %d bits\n", -get_bits_left(gb)); + goto err; + } + + return 0; +err: + return -1; +} + +static int map_pixel_format(struct h265_SPS_t *sps) +{ + /*const AVPixFmtDescriptor *desc;*/ + switch (sps->bit_depth) { + case 8: + if (sps->chroma_format_idc == 0) sps->pix_fmt = AV_PIX_FMT_GRAY8; + if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P; + if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P; + if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P; + break; + case 9: + if (sps->chroma_format_idc == 0) sps->pix_fmt = AV_PIX_FMT_GRAY9; + if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P9; + if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P9; + if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P9; + break; + case 10: + if (sps->chroma_format_idc == 0) sps->pix_fmt = AV_PIX_FMT_GRAY10; + if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P10; + if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P10; + if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P10; + break; + case 12: + if (sps->chroma_format_idc == 0) sps->pix_fmt = AV_PIX_FMT_GRAY12; + if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P12; + if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P12; + if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P12; + break; + default: + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "The following bit-depths are currently specified: 8, 9, 10 and 12 bits, " + "chroma_format_idc is %d, depth is %d\n", + sps->chroma_format_idc, sps->bit_depth); + return -1; + } + + /*desc = av_pix_fmt_desc_get(sps->pix_fmt); + if (!desc) + return AVERROR(EINVAL); + + sps->hshift[0] = sps->vshift[0] = 0; + sps->hshift[2] = sps->hshift[1] = desc->log2_chroma_w; + sps->vshift[2] = sps->vshift[1] = desc->log2_chroma_h;*/ + + sps->pixel_shift = sps->bit_depth > 8; + + return 0; +} + +static void set_default_scaling_list_data(struct ScalingList *sl) +{ + int matrixId; + + for (matrixId = 0; matrixId < 6; matrixId++) { + // 4x4 default is 16 + memset(sl->sl[0][matrixId], 16, 16); + sl->sl_dc[0][matrixId] = 16; // default for 16x16 + sl->sl_dc[1][matrixId] = 16; // default for 32x32 + } + memcpy(sl->sl[1][0], default_scaling_list_intra, 64); + memcpy(sl->sl[1][1], default_scaling_list_intra, 64); + memcpy(sl->sl[1][2], default_scaling_list_intra, 64); + memcpy(sl->sl[1][3], default_scaling_list_inter, 64); + memcpy(sl->sl[1][4], default_scaling_list_inter, 64); + memcpy(sl->sl[1][5], default_scaling_list_inter, 64); + memcpy(sl->sl[2][0], default_scaling_list_intra, 64); + memcpy(sl->sl[2][1], default_scaling_list_intra, 64); + memcpy(sl->sl[2][2], default_scaling_list_intra, 64); + memcpy(sl->sl[2][3], default_scaling_list_inter, 64); + memcpy(sl->sl[2][4], default_scaling_list_inter, 64); + memcpy(sl->sl[2][5], default_scaling_list_inter, 64); + memcpy(sl->sl[3][0], default_scaling_list_intra, 64); + memcpy(sl->sl[3][1], default_scaling_list_intra, 64); + memcpy(sl->sl[3][2], default_scaling_list_intra, 64); + memcpy(sl->sl[3][3], default_scaling_list_inter, 64); + memcpy(sl->sl[3][4], default_scaling_list_inter, 64); + memcpy(sl->sl[3][5], default_scaling_list_inter, 64); +} + +static int scaling_list_data(struct get_bits_context *gb, + struct ScalingList *sl, struct h265_SPS_t *sps) +{ + u8 scaling_list_pred_mode_flag; + int scaling_list_dc_coef[2][6]; + int size_id, matrix_id, pos; + int i; + + for (size_id = 0; size_id < 4; size_id++) + for (matrix_id = 0; matrix_id < 6; matrix_id += ((size_id == 3) ? 3 : 1)) { + scaling_list_pred_mode_flag = get_bits1(gb); + if (!scaling_list_pred_mode_flag) { + u32 delta = get_ue_golomb_long(gb); + /* Only need to handle non-zero delta. Zero means default, + * which should already be in the arrays. */ + if (delta) { + // Copy from previous array. + delta *= (size_id == 3) ? 3 : 1; + if (matrix_id < delta) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid delta in scaling list data: %d.\n", delta); + return -1; + } + + memcpy(sl->sl[size_id][matrix_id], + sl->sl[size_id][matrix_id - delta], + size_id > 0 ? 64 : 16); + if (size_id > 1) + sl->sl_dc[size_id - 2][matrix_id] = sl->sl_dc[size_id - 2][matrix_id - delta]; + } + } else { + int next_coef, coef_num; + int scaling_list_delta_coef; + + next_coef = 8; + coef_num = FFMIN(64, 1 << (4 + (size_id << 1))); + if (size_id > 1) { + scaling_list_dc_coef[size_id - 2][matrix_id] = get_se_golomb(gb) + 8; + next_coef = scaling_list_dc_coef[size_id - 2][matrix_id]; + sl->sl_dc[size_id - 2][matrix_id] = next_coef; + } + for (i = 0; i < coef_num; i++) { + if (size_id == 0) + pos = 4 * ff_hevc_diag_scan4x4_y[i] + + ff_hevc_diag_scan4x4_x[i]; + else + pos = 8 * ff_hevc_diag_scan8x8_y[i] + + ff_hevc_diag_scan8x8_x[i]; + + scaling_list_delta_coef = get_se_golomb(gb); + next_coef = (next_coef + 256U + scaling_list_delta_coef) % 256; + sl->sl[size_id][matrix_id][pos] = next_coef; + } + } + } + + if (sps->chroma_format_idc == 3) { + for (i = 0; i < 64; i++) { + sl->sl[3][1][i] = sl->sl[2][1][i]; + sl->sl[3][2][i] = sl->sl[2][2][i]; + sl->sl[3][4][i] = sl->sl[2][4][i]; + sl->sl[3][5][i] = sl->sl[2][5][i]; + } + sl->sl_dc[1][1] = sl->sl_dc[0][1]; + sl->sl_dc[1][2] = sl->sl_dc[0][2]; + sl->sl_dc[1][4] = sl->sl_dc[0][4]; + sl->sl_dc[1][5] = sl->sl_dc[0][5]; + } + + return 0; +} + +int ff_hevc_decode_short_term_rps(struct get_bits_context *gb, + struct ShortTermRPS *rps, const struct h265_SPS_t *sps, int is_slice_header) +{ + u8 rps_predict = 0; + int delta_poc; + int k0 = 0; + int k1 = 0; + int k = 0; + int i; + + if (rps != sps->st_rps && sps->nb_st_rps) + rps_predict = get_bits1(gb); + + if (rps_predict) { + const struct ShortTermRPS *rps_ridx; + int delta_rps; + u32 abs_delta_rps; + u8 use_delta_flag = 0; + u8 delta_rps_sign; + + if (is_slice_header) { + u32 delta_idx = get_ue_golomb_long(gb) + 1; + if (delta_idx > sps->nb_st_rps) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value of delta_idx in slice header RPS: %d > %d.\n", + delta_idx, sps->nb_st_rps); + return -1; + } + rps_ridx = &sps->st_rps[sps->nb_st_rps - delta_idx]; + rps->rps_idx_num_delta_pocs = rps_ridx->num_delta_pocs; + } else + rps_ridx = &sps->st_rps[rps - sps->st_rps - 1]; + + delta_rps_sign = get_bits1(gb); + abs_delta_rps = get_ue_golomb_long(gb) + 1; + if (abs_delta_rps < 1 || abs_delta_rps > 32768) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value of abs_delta_rps: %d\n", + abs_delta_rps); + return -1; + } + delta_rps = (1 - (delta_rps_sign << 1)) * abs_delta_rps; + for (i = 0; i <= rps_ridx->num_delta_pocs; i++) { + int used = rps->used[k] = get_bits1(gb); + + if (!used) + use_delta_flag = get_bits1(gb); + + if (used || use_delta_flag) { + if (i < rps_ridx->num_delta_pocs) + delta_poc = delta_rps + rps_ridx->delta_poc[i]; + else + delta_poc = delta_rps; + rps->delta_poc[k] = delta_poc; + if (delta_poc < 0) + k0++; + else + k1++; + k++; + } + } + + if (k >= ARRAY_SIZE(rps->used)) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid num_delta_pocs: %d\n", k); + return -1; + } + + rps->num_delta_pocs = k; + rps->num_negative_pics = k0; + // sort in increasing order (smallest first) + if (rps->num_delta_pocs != 0) { + int used, tmp; + for (i = 1; i < rps->num_delta_pocs; i++) { + delta_poc = rps->delta_poc[i]; + used = rps->used[i]; + for (k = i - 1; k >= 0; k--) { + tmp = rps->delta_poc[k]; + if (delta_poc < tmp) { + rps->delta_poc[k + 1] = tmp; + rps->used[k + 1] = rps->used[k]; + rps->delta_poc[k] = delta_poc; + rps->used[k] = used; + } + } + } + } + if ((rps->num_negative_pics >> 1) != 0) { + int used; + k = rps->num_negative_pics - 1; + // flip the negative values to largest first + for (i = 0; i < rps->num_negative_pics >> 1; i++) { + delta_poc = rps->delta_poc[i]; + used = rps->used[i]; + rps->delta_poc[i] = rps->delta_poc[k]; + rps->used[i] = rps->used[k]; + rps->delta_poc[k] = delta_poc; + rps->used[k] = used; + k--; + } + } + } else { + u32 prev, nb_positive_pics; + rps->num_negative_pics = get_ue_golomb_long(gb); + nb_positive_pics = get_ue_golomb_long(gb); + + if (rps->num_negative_pics >= HEVC_MAX_REFS || + nb_positive_pics >= HEVC_MAX_REFS) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Too many refs in a short term RPS.\n"); + return -1; + } + + rps->num_delta_pocs = rps->num_negative_pics + nb_positive_pics; + if (rps->num_delta_pocs) { + prev = 0; + for (i = 0; i < rps->num_negative_pics; i++) { + delta_poc = get_ue_golomb_long(gb) + 1; + if (delta_poc < 1 || delta_poc > 32768) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value of delta_poc: %d\n", + delta_poc); + return -1; + } + prev -= delta_poc; + rps->delta_poc[i] = prev; + rps->used[i] = get_bits1(gb); + } + prev = 0; + for (i = 0; i < nb_positive_pics; i++) { + delta_poc = get_ue_golomb_long(gb) + 1; + if (delta_poc < 1 || delta_poc > 32768) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value of delta_poc: %d\n", + delta_poc); + return -1; + } + prev += delta_poc; + rps->delta_poc[rps->num_negative_pics + i] = prev; + rps->used[rps->num_negative_pics + i] = get_bits1(gb); + } + } + } + return 0; +} + +static void decode_vui(struct get_bits_context *gb, struct h265_SPS_t *sps) +{ + struct VUI backup_vui, *vui = &sps->vui; + struct get_bits_context backup; + int sar_present, alt = 0; + + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Decoding VUI\n"); + + sar_present = get_bits1(gb); + if (sar_present) { + u8 sar_idx = get_bits(gb, 8); + if (sar_idx < ARRAY_SIZE(vui_sar)) + vui->sar = vui_sar[sar_idx]; + else if (sar_idx == 255) { + vui->sar.num = get_bits(gb, 16); + vui->sar.den = get_bits(gb, 16); + } else + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, + "Unknown SAR index: %u.\n", sar_idx); + } + + vui->overscan_info_present_flag = get_bits1(gb); + if (vui->overscan_info_present_flag) + vui->overscan_appropriate_flag = get_bits1(gb); + + vui->video_signal_type_present_flag = get_bits1(gb); + if (vui->video_signal_type_present_flag) { + vui->video_format = get_bits(gb, 3); + vui->video_full_range_flag = get_bits1(gb); + vui->colour_description_present_flag = get_bits1(gb); + if (vui->video_full_range_flag && sps->pix_fmt == AV_PIX_FMT_YUV420P) + sps->pix_fmt = AV_PIX_FMT_YUVJ420P; + if (vui->colour_description_present_flag) { + vui->colour_primaries = get_bits(gb, 8); + vui->transfer_characteristic = get_bits(gb, 8); + vui->matrix_coeffs = get_bits(gb, 8); + + // Set invalid values to "unspecified" + if (!av_color_primaries_name(vui->colour_primaries)) + vui->colour_primaries = AVCOL_PRI_UNSPECIFIED; + if (!av_color_transfer_name(vui->transfer_characteristic)) + vui->transfer_characteristic = AVCOL_TRC_UNSPECIFIED; + if (!av_color_space_name(vui->matrix_coeffs)) + vui->matrix_coeffs = AVCOL_SPC_UNSPECIFIED; + if (vui->matrix_coeffs == AVCOL_SPC_RGB) { + switch (sps->pix_fmt) { + case AV_PIX_FMT_YUV444P: + sps->pix_fmt = AV_PIX_FMT_GBRP; + break; + case AV_PIX_FMT_YUV444P10: + sps->pix_fmt = AV_PIX_FMT_GBRP10; + break; + case AV_PIX_FMT_YUV444P12: + sps->pix_fmt = AV_PIX_FMT_GBRP12; + break; + } + } + } + } + + vui->chroma_loc_info_present_flag = get_bits1(gb); + if (vui->chroma_loc_info_present_flag) { + vui->chroma_sample_loc_type_top_field = get_ue_golomb_long(gb); + vui->chroma_sample_loc_type_bottom_field = get_ue_golomb_long(gb); + } + + vui->neutra_chroma_indication_flag = get_bits1(gb); + vui->field_seq_flag = get_bits1(gb); + vui->frame_field_info_present_flag = get_bits1(gb); + + // Backup context in case an alternate header is detected + memcpy(&backup, gb, sizeof(backup)); + memcpy(&backup_vui, vui, sizeof(backup_vui)); + if (get_bits_left(gb) >= 68 && show_bits_long(gb, 21) == 0x100000) { + vui->default_display_window_flag = 0; + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Invalid default display window\n"); + } else + vui->default_display_window_flag = get_bits1(gb); + + if (vui->default_display_window_flag) { + int vert_mult = hevc_sub_height_c[sps->chroma_format_idc]; + int horiz_mult = hevc_sub_width_c[sps->chroma_format_idc]; + vui->def_disp_win.left_offset = get_ue_golomb_long(gb) * horiz_mult; + vui->def_disp_win.right_offset = get_ue_golomb_long(gb) * horiz_mult; + vui->def_disp_win.top_offset = get_ue_golomb_long(gb) * vert_mult; + vui->def_disp_win.bottom_offset = get_ue_golomb_long(gb) * vert_mult; + } + +timing_info: + vui->vui_timing_info_present_flag = get_bits1(gb); + + if (vui->vui_timing_info_present_flag) { + if (get_bits_left(gb) < 66 && !alt) { + // The alternate syntax seem to have timing info located + // at where def_disp_win is normally located + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Strange VUI timing information, retrying...\n"); + memcpy(vui, &backup_vui, sizeof(backup_vui)); + memcpy(gb, &backup, sizeof(backup)); + alt = 1; + goto timing_info; + } + vui->vui_num_units_in_tick = get_bits_long(gb, 32); + vui->vui_time_scale = get_bits_long(gb, 32); + if (alt) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Retry got %u/%u fps\n", + vui->vui_time_scale, vui->vui_num_units_in_tick); + } + vui->vui_poc_proportional_to_timing_flag = get_bits1(gb); + if (vui->vui_poc_proportional_to_timing_flag) + vui->vui_num_ticks_poc_diff_one_minus1 = get_ue_golomb_long(gb); + vui->vui_hrd_parameters_present_flag = get_bits1(gb); + if (vui->vui_hrd_parameters_present_flag) + decode_hrd(gb, 1, sps->max_sub_layers); + } + + vui->bitstream_restriction_flag = get_bits1(gb); + if (vui->bitstream_restriction_flag) { + if (get_bits_left(gb) < 8 && !alt) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Strange VUI bitstream restriction information, retrying" + " from timing information...\n"); + memcpy(vui, &backup_vui, sizeof(backup_vui)); + memcpy(gb, &backup, sizeof(backup)); + alt = 1; + goto timing_info; + } + vui->tiles_fixed_structure_flag = get_bits1(gb); + vui->motion_vectors_over_pic_boundaries_flag = get_bits1(gb); + vui->restricted_ref_pic_lists_flag = get_bits1(gb); + vui->min_spatial_segmentation_idc = get_ue_golomb_long(gb); + vui->max_bytes_per_pic_denom = get_ue_golomb_long(gb); + vui->max_bits_per_min_cu_denom = get_ue_golomb_long(gb); + vui->log2_max_mv_length_horizontal = get_ue_golomb_long(gb); + vui->log2_max_mv_length_vertical = get_ue_golomb_long(gb); + } + + if (get_bits_left(gb) < 1 && !alt) { + // XXX: Alternate syntax when sps_range_extension_flag != 0? + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Overread in VUI, retrying from timing information...\n"); + memcpy(vui, &backup_vui, sizeof(backup_vui)); + memcpy(gb, &backup, sizeof(backup)); + alt = 1; + goto timing_info; + } +} + +int ff_hevc_parse_sps(struct get_bits_context *gb, struct h265_SPS_t *sps) +{ + int i, ret = 0; + int log2_diff_max_min_transform_block_size; + int bit_depth_chroma, start, vui_present, sublayer_ordering_info; + struct HEVCWindow *ow; + + sps->vps_id = get_bits(gb, 4); + if (sps->vps_id >= HEVC_MAX_VPS_COUNT) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "VPS id out of range: %d\n", sps->vps_id); + return -1; + } + + sps->max_sub_layers = get_bits(gb, 3) + 1; + if (sps->max_sub_layers > HEVC_MAX_SUB_LAYERS) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "sps_max_sub_layers out of range: %d\n", + sps->max_sub_layers); + return -1; + } + + sps->temporal_id_nesting_flag = get_bits(gb, 1); + + if ((ret = parse_ptl(gb, &sps->ptl, sps->max_sub_layers)) < 0) + return ret; + + sps->sps_id = get_ue_golomb_long(gb); + if (sps->sps_id >= HEVC_MAX_SPS_COUNT) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "SPS id out of range: %d\n", sps->sps_id); + return -1; + } + + sps->chroma_format_idc = get_ue_golomb_long(gb); + if (sps->chroma_format_idc > 3U) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "chroma_format_idc %d is invalid\n", sps->chroma_format_idc); + return -1; + } + + if (sps->chroma_format_idc == 3) + sps->separate_colour_plane_flag = get_bits1(gb); + + if (sps->separate_colour_plane_flag) + sps->chroma_format_idc = 0; + + sps->width = get_ue_golomb_long(gb); + sps->height = get_ue_golomb_long(gb); + if (sps->width > 8192 || sps->height > 8192) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "width or height oversize.\n"); + return -1; + } + + if (get_bits1(gb)) { // pic_conformance_flag + int vert_mult = hevc_sub_height_c[sps->chroma_format_idc]; + int horiz_mult = hevc_sub_width_c[sps->chroma_format_idc]; + sps->pic_conf_win.left_offset = get_ue_golomb_long(gb) * horiz_mult; + sps->pic_conf_win.right_offset = get_ue_golomb_long(gb) * horiz_mult; + sps->pic_conf_win.top_offset = get_ue_golomb_long(gb) * vert_mult; + sps->pic_conf_win.bottom_offset = get_ue_golomb_long(gb) * vert_mult; + sps->output_window = sps->pic_conf_win; + } + + sps->bit_depth = get_ue_golomb_long(gb) + 8; + bit_depth_chroma = get_ue_golomb_long(gb) + 8; + if (sps->chroma_format_idc && bit_depth_chroma != sps->bit_depth) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Luma bit depth (%d) is different from chroma bit depth (%d), this is unsupported.\n", + sps->bit_depth, bit_depth_chroma); + return -1; + } + sps->bit_depth_chroma = bit_depth_chroma; + + ret = map_pixel_format(sps); + if (ret < 0) + return ret; + + sps->log2_max_poc_lsb = get_ue_golomb_long(gb) + 4; + if (sps->log2_max_poc_lsb > 16) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "log2_max_pic_order_cnt_lsb_minus4 out range: %d\n", + sps->log2_max_poc_lsb - 4); + return -1; + } + + sublayer_ordering_info = get_bits1(gb); + start = sublayer_ordering_info ? 0 : sps->max_sub_layers - 1; + for (i = start; i < sps->max_sub_layers; i++) { + sps->temporal_layer[i].max_dec_pic_buffering = get_ue_golomb_long(gb) + 1; + sps->temporal_layer[i].num_reorder_pics = get_ue_golomb_long(gb); + sps->temporal_layer[i].max_latency_increase = get_ue_golomb_long(gb) - 1; + if (sps->temporal_layer[i].max_dec_pic_buffering > (u32)HEVC_MAX_DPB_SIZE) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "sps_max_dec_pic_buffering_minus1 out of range: %d\n", + sps->temporal_layer[i].max_dec_pic_buffering - 1U); + return -1; + } + if (sps->temporal_layer[i].num_reorder_pics > sps->temporal_layer[i].max_dec_pic_buffering - 1) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "sps_max_num_reorder_pics out of range: %d\n", + sps->temporal_layer[i].num_reorder_pics); + if (sps->temporal_layer[i].num_reorder_pics > HEVC_MAX_DPB_SIZE - 1) { + return -1; + } + sps->temporal_layer[i].max_dec_pic_buffering = sps->temporal_layer[i].num_reorder_pics + 1; + } + } + + if (!sublayer_ordering_info) { + for (i = 0; i < start; i++) { + sps->temporal_layer[i].max_dec_pic_buffering = sps->temporal_layer[start].max_dec_pic_buffering; + sps->temporal_layer[i].num_reorder_pics = sps->temporal_layer[start].num_reorder_pics; + sps->temporal_layer[i].max_latency_increase = sps->temporal_layer[start].max_latency_increase; + } + } + + sps->log2_min_cb_size = get_ue_golomb_long(gb) + 3; + sps->log2_diff_max_min_coding_block_size = get_ue_golomb_long(gb); + sps->log2_min_tb_size = get_ue_golomb_long(gb) + 2; + log2_diff_max_min_transform_block_size = get_ue_golomb_long(gb); + sps->log2_max_trafo_size = log2_diff_max_min_transform_block_size + sps->log2_min_tb_size; + + if (sps->log2_min_cb_size < 3 || sps->log2_min_cb_size > 30) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value %d for log2_min_cb_size", sps->log2_min_cb_size); + return -1; + } + + if (sps->log2_diff_max_min_coding_block_size > 30) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value %d for log2_diff_max_min_coding_block_size", sps->log2_diff_max_min_coding_block_size); + return -1; + } + + if (sps->log2_min_tb_size >= sps->log2_min_cb_size || sps->log2_min_tb_size < 2) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value for log2_min_tb_size"); + return -1; + } + + if (log2_diff_max_min_transform_block_size < 0 || log2_diff_max_min_transform_block_size > 30) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value %d for log2_diff_max_min_transform_block_size", log2_diff_max_min_transform_block_size); + return -1; + } + + sps->max_transform_hierarchy_depth_inter = get_ue_golomb_long(gb); + sps->max_transform_hierarchy_depth_intra = get_ue_golomb_long(gb); + + sps->scaling_list_enable_flag = get_bits1(gb); + if (sps->scaling_list_enable_flag) { + set_default_scaling_list_data(&sps->scaling_list); + + if (get_bits1(gb)) { + ret = scaling_list_data(gb, &sps->scaling_list, sps); + if (ret < 0) + return ret; + } + } + + sps->amp_enabled_flag = get_bits1(gb); + sps->sao_enabled = get_bits1(gb); + + sps->pcm_enabled_flag = get_bits1(gb); + if (sps->pcm_enabled_flag) { + sps->pcm.bit_depth = get_bits(gb, 4) + 1; + sps->pcm.bit_depth_chroma = get_bits(gb, 4) + 1; + sps->pcm.log2_min_pcm_cb_size = get_ue_golomb_long(gb) + 3; + sps->pcm.log2_max_pcm_cb_size = sps->pcm.log2_min_pcm_cb_size + + get_ue_golomb_long(gb); + if (FFMAX(sps->pcm.bit_depth, sps->pcm.bit_depth_chroma) > sps->bit_depth) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "PCM bit depth (%d, %d) is greater than normal bit depth (%d)\n", + sps->pcm.bit_depth, sps->pcm.bit_depth_chroma, sps->bit_depth); + return -1; + } + + sps->pcm.loop_filter_disable_flag = get_bits1(gb); + } + + sps->nb_st_rps = get_ue_golomb_long(gb); + if (sps->nb_st_rps > HEVC_MAX_SHORT_TERM_REF_PIC_SETS) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Too many short term RPS: %d.\n", sps->nb_st_rps); + return -1; + } + for (i = 0; i < sps->nb_st_rps; i++) { + if ((ret = ff_hevc_decode_short_term_rps(gb, &sps->st_rps[i], sps, 0)) < 0) + return ret; + } + + sps->long_term_ref_pics_present_flag = get_bits1(gb); + if (sps->long_term_ref_pics_present_flag) { + sps->num_long_term_ref_pics_sps = get_ue_golomb_long(gb); + if (sps->num_long_term_ref_pics_sps > HEVC_MAX_LONG_TERM_REF_PICS) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Too many long term ref pics: %d.\n", + sps->num_long_term_ref_pics_sps); + return -1; + } + for (i = 0; i < sps->num_long_term_ref_pics_sps; i++) { + sps->lt_ref_pic_poc_lsb_sps[i] = get_bits(gb, sps->log2_max_poc_lsb); + sps->used_by_curr_pic_lt_sps_flag[i] = get_bits1(gb); + } + } + + sps->sps_temporal_mvp_enabled_flag = get_bits1(gb); + sps->sps_strong_intra_smoothing_enable_flag = get_bits1(gb); + sps->vui.sar = (struct AVRational){0, 1}; + vui_present = get_bits1(gb); + if (vui_present) + decode_vui(gb, sps); + + if (get_bits1(gb)) { // sps_extension_flag + sps->sps_range_extension_flag = get_bits1(gb); + skip_bits(gb, 7); //sps_extension_7bits = get_bits(gb, 7); + if (sps->sps_range_extension_flag) { + sps->transform_skip_rotation_enabled_flag = get_bits1(gb); + sps->transform_skip_context_enabled_flag = get_bits1(gb); + sps->implicit_rdpcm_enabled_flag = get_bits1(gb); + sps->explicit_rdpcm_enabled_flag = get_bits1(gb); + sps->extended_precision_processing_flag = get_bits1(gb); + if (sps->extended_precision_processing_flag) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "extended_precision_processing_flag not yet implemented\n"); + + sps->intra_smoothing_disabled_flag = get_bits1(gb); + sps->high_precision_offsets_enabled_flag = get_bits1(gb); + if (sps->high_precision_offsets_enabled_flag) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "high_precision_offsets_enabled_flag not yet implemented\n"); + + sps->persistent_rice_adaptation_enabled_flag = get_bits1(gb); + sps->cabac_bypass_alignment_enabled_flag = get_bits1(gb); + if (sps->cabac_bypass_alignment_enabled_flag) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "cabac_bypass_alignment_enabled_flag not yet implemented\n"); + } + } + + ow = &sps->output_window; + if (ow->left_offset >= INT_MAX - ow->right_offset || + ow->top_offset >= INT_MAX - ow->bottom_offset || + ow->left_offset + ow->right_offset >= sps->width || + ow->top_offset + ow->bottom_offset >= sps->height) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid cropping offsets: %u/%u/%u/%u\n", + ow->left_offset, ow->right_offset, ow->top_offset, ow->bottom_offset); + return -1; + } + + // Inferred parameters + sps->log2_ctb_size = sps->log2_min_cb_size + + sps->log2_diff_max_min_coding_block_size; + sps->log2_min_pu_size = sps->log2_min_cb_size - 1; + + if (sps->log2_ctb_size > HEVC_MAX_LOG2_CTB_SIZE) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "CTB size out of range: 2^%d\n", sps->log2_ctb_size); + return -1; + } + if (sps->log2_ctb_size < 4) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "log2_ctb_size %d differs from the bounds of any known profile\n", sps->log2_ctb_size); + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "log2_ctb_size %d", sps->log2_ctb_size); + return -1; + } + + sps->ctb_width = (sps->width + (1 << sps->log2_ctb_size) - 1) >> sps->log2_ctb_size; + sps->ctb_height = (sps->height + (1 << sps->log2_ctb_size) - 1) >> sps->log2_ctb_size; + sps->ctb_size = sps->ctb_width * sps->ctb_height; + + sps->min_cb_width = sps->width >> sps->log2_min_cb_size; + sps->min_cb_height = sps->height >> sps->log2_min_cb_size; + sps->min_tb_width = sps->width >> sps->log2_min_tb_size; + sps->min_tb_height = sps->height >> sps->log2_min_tb_size; + sps->min_pu_width = sps->width >> sps->log2_min_pu_size; + sps->min_pu_height = sps->height >> sps->log2_min_pu_size; + sps->tb_mask = (1 << (sps->log2_ctb_size - sps->log2_min_tb_size)) - 1; + sps->qp_bd_offset = 6 * (sps->bit_depth - 8); + + if (av_mod_uintp2(sps->width, sps->log2_min_cb_size) || + av_mod_uintp2(sps->height, sps->log2_min_cb_size)) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid coded frame dimensions.\n"); + return -1; + } + + if (sps->max_transform_hierarchy_depth_inter > sps->log2_ctb_size - sps->log2_min_tb_size) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "max_transform_hierarchy_depth_inter out of range: %d\n", + sps->max_transform_hierarchy_depth_inter); + return -1; + } + if (sps->max_transform_hierarchy_depth_intra > sps->log2_ctb_size - sps->log2_min_tb_size) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "max_transform_hierarchy_depth_intra out of range: %d\n", + sps->max_transform_hierarchy_depth_intra); + return -1; + } + if (sps->log2_max_trafo_size > FFMIN(sps->log2_ctb_size, 5)) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "max transform block size out of range: %d\n", + sps->log2_max_trafo_size); + return -1; + } + + if (get_bits_left(gb) < 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Overread SPS by %d bits\n", -get_bits_left(gb)); + return -1; + } + + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Parsed SPS: id %d; ref: %d, coded wxh: %dx%d, cropped wxh: %dx%d; pix_fmt: %d.\n", + sps->sps_id, sps->temporal_layer[0].num_reorder_pics, sps->width, sps->height, + sps->width - (sps->output_window.left_offset + sps->output_window.right_offset), + sps->height - (sps->output_window.top_offset + sps->output_window.bottom_offset), + sps->pix_fmt); + + return 0; +} + +const char *hevc_nal_type_name[64] = { + "TRAIL_N", // HEVC_NAL_TRAIL_N + "TRAIL_R", // HEVC_NAL_TRAIL_R + "TSA_N", // HEVC_NAL_TSA_N + "TSA_R", // HEVC_NAL_TSA_R + "STSA_N", // HEVC_NAL_STSA_N + "STSA_R", // HEVC_NAL_STSA_R + "RADL_N", // HEVC_NAL_RADL_N + "RADL_R", // HEVC_NAL_RADL_R + "RASL_N", // HEVC_NAL_RASL_N + "RASL_R", // HEVC_NAL_RASL_R + "RSV_VCL_N10", // HEVC_NAL_VCL_N10 + "RSV_VCL_R11", // HEVC_NAL_VCL_R11 + "RSV_VCL_N12", // HEVC_NAL_VCL_N12 + "RSV_VLC_R13", // HEVC_NAL_VCL_R13 + "RSV_VCL_N14", // HEVC_NAL_VCL_N14 + "RSV_VCL_R15", // HEVC_NAL_VCL_R15 + "BLA_W_LP", // HEVC_NAL_BLA_W_LP + "BLA_W_RADL", // HEVC_NAL_BLA_W_RADL + "BLA_N_LP", // HEVC_NAL_BLA_N_LP + "IDR_W_RADL", // HEVC_NAL_IDR_W_RADL + "IDR_N_LP", // HEVC_NAL_IDR_N_LP + "CRA_NUT", // HEVC_NAL_CRA_NUT + "IRAP_IRAP_VCL22", // HEVC_NAL_IRAP_VCL22 + "IRAP_IRAP_VCL23", // HEVC_NAL_IRAP_VCL23 + "RSV_VCL24", // HEVC_NAL_RSV_VCL24 + "RSV_VCL25", // HEVC_NAL_RSV_VCL25 + "RSV_VCL26", // HEVC_NAL_RSV_VCL26 + "RSV_VCL27", // HEVC_NAL_RSV_VCL27 + "RSV_VCL28", // HEVC_NAL_RSV_VCL28 + "RSV_VCL29", // HEVC_NAL_RSV_VCL29 + "RSV_VCL30", // HEVC_NAL_RSV_VCL30 + "RSV_VCL31", // HEVC_NAL_RSV_VCL31 + "VPS", // HEVC_NAL_VPS + "SPS", // HEVC_NAL_SPS + "PPS", // HEVC_NAL_PPS + "AUD", // HEVC_NAL_AUD + "EOS_NUT", // HEVC_NAL_EOS_NUT + "EOB_NUT", // HEVC_NAL_EOB_NUT + "FD_NUT", // HEVC_NAL_FD_NUT + "SEI_PREFIX", // HEVC_NAL_SEI_PREFIX + "SEI_SUFFIX", // HEVC_NAL_SEI_SUFFIX + "RSV_NVCL41", // HEVC_NAL_RSV_NVCL41 + "RSV_NVCL42", // HEVC_NAL_RSV_NVCL42 + "RSV_NVCL43", // HEVC_NAL_RSV_NVCL43 + "RSV_NVCL44", // HEVC_NAL_RSV_NVCL44 + "RSV_NVCL45", // HEVC_NAL_RSV_NVCL45 + "RSV_NVCL46", // HEVC_NAL_RSV_NVCL46 + "RSV_NVCL47", // HEVC_NAL_RSV_NVCL47 + "UNSPEC48", // HEVC_NAL_UNSPEC48 + "UNSPEC49", // HEVC_NAL_UNSPEC49 + "UNSPEC50", // HEVC_NAL_UNSPEC50 + "UNSPEC51", // HEVC_NAL_UNSPEC51 + "UNSPEC52", // HEVC_NAL_UNSPEC52 + "UNSPEC53", // HEVC_NAL_UNSPEC53 + "UNSPEC54", // HEVC_NAL_UNSPEC54 + "UNSPEC55", // HEVC_NAL_UNSPEC55 + "UNSPEC56", // HEVC_NAL_UNSPEC56 + "UNSPEC57", // HEVC_NAL_UNSPEC57 + "UNSPEC58", // HEVC_NAL_UNSPEC58 + "UNSPEC59", // HEVC_NAL_UNSPEC59 + "UNSPEC60", // HEVC_NAL_UNSPEC60 + "UNSPEC61", // HEVC_NAL_UNSPEC61 + "UNSPEC62", // HEVC_NAL_UNSPEC62 + "UNSPEC63", // HEVC_NAL_UNSPEC63 +}; + +static const char *hevc_nal_unit_name(int nal_type) +{ + return hevc_nal_type_name[nal_type]; +} + +/** +* Parse NAL units of found picture and decode some basic information. +* +* @param s parser context. +* @param avctx codec context. +* @param buf buffer with field/frame data. +* @param buf_size size of the buffer. +*/ +static int decode_extradata_ps(u8 *data, int size, struct h265_param_sets *ps) +{ + int ret = 0; + struct get_bits_context gb; + u32 src_len, rbsp_size = 0; + u8 *rbsp_buf = NULL; + int nalu_pos, nuh_layer_id, temporal_id; + u32 nal_type; + u8 *p = data; + u32 len = size; + + nalu_pos = find_start_code(p, len); + if (nalu_pos < 0) + return -1; + + src_len = calc_nal_len(p + nalu_pos, size - nalu_pos); + rbsp_buf = nal_unit_extract_rbsp(p + nalu_pos, src_len, &rbsp_size); + if (rbsp_buf == NULL) + return -ENOMEM; + + ret = init_get_bits8(&gb, rbsp_buf, rbsp_size); + if (ret < 0) + goto out; + + if (get_bits1(&gb) != 0) { + ret = -1; + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "invalid data, return!\n"); + goto out; + } + + nal_type = get_bits(&gb, 6); + nuh_layer_id = get_bits(&gb, 6); + temporal_id = get_bits(&gb, 3) - 1; + if (temporal_id < 0) { + ret = -1; + goto out; + } + + /*pr_info("nal_unit_type: %d(%s), nuh_layer_id: %d, temporal_id: %d\n", + nal_type, hevc_nal_unit_name(nal_type), + nuh_layer_id, temporal_id);*/ + + switch (nal_type) { + case HEVC_NAL_VPS: + ret = ff_hevc_parse_vps(&gb, &ps->vps); + if (ret < 0) + goto out; + ps->vps_parsed = true; + break; + case HEVC_NAL_SPS: + ret = ff_hevc_parse_sps(&gb, &ps->sps); + if (ret < 0) + goto out; + ps->sps_parsed = true; + break; + /*case HEVC_NAL_PPS: + ret = ff_hevc_decode_nal_pps(&gb, NULL, ps); + if (ret < 0) + goto out; + ps->pps_parsed = true; + break;*/ + default: + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Unsupport parser nal type (%s).\n", + hevc_nal_unit_name(nal_type)); + break; + } + +out: + vfree(rbsp_buf); + + return ret; +} + +int h265_decode_extradata_ps(u8 *buf, int size, struct h265_param_sets *ps) +{ + int ret = 0, i = 0, j = 0; + u8 *p = buf; + int len = size; + + for (i = 4; i < size; i++) { + j = find_start_code(p, len); + if (j > 0) { + len = size - (p - buf); + ret = decode_extradata_ps(p, len, ps); + if (ret) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "parse extra data failed. err: %d\n", ret); + return ret; + } + + if (ps->sps_parsed) + break; + + p += j; + } + p++; + } + + return ret; +} +
diff --git a/drivers/amvdec_ports/decoder/aml_hevc_parser.h b/drivers/amvdec_ports/decoder/aml_hevc_parser.h new file mode 100644 index 0000000..9223639 --- /dev/null +++ b/drivers/amvdec_ports/decoder/aml_hevc_parser.h
@@ -0,0 +1,562 @@ +/* + * drivers/amvdec_ports/decoder/aml_hevc_parser.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + + +#ifndef AML_HEVC_PARSER_H +#define AML_HEVC_PARSER_H + +#include "../aml_vcodec_drv.h" +#include "../utils/common.h" + +#define MAX_DPB_SIZE 16 // A.4.1 +#define MAX_REFS 16 + +#define MAX_NB_THREADS 16 +#define SHIFT_CTB_WPP 2 + +/** + * 7.4.2.1 + */ +#define MAX_SUB_LAYERS 7 +#define MAX_VPS_COUNT 16 +#define MAX_SPS_COUNT 32 +#define MAX_PPS_COUNT 256 +#define MAX_SHORT_TERM_RPS_COUNT 64 +#define MAX_CU_SIZE 128 + +//TODO: check if this is really the maximum +#define MAX_TRANSFORM_DEPTH 5 + +#define MAX_TB_SIZE 32 +#define MAX_PB_SIZE 64 +#define MAX_LOG2_CTB_SIZE 6 +#define MAX_QP 51 +#define DEFAULT_INTRA_TC_OFFSET 2 + +#define HEVC_CONTEXTS 183 + +#define MRG_MAX_NUM_CANDS 5 + +#define L0 0 +#define L1 1 + +#define EPEL_EXTRA_BEFORE 1 +#define EPEL_EXTRA_AFTER 2 +#define EPEL_EXTRA 3 + +#define FF_PROFILE_HEVC_MAIN 1 +#define FF_PROFILE_HEVC_MAIN_10 2 +#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3 +#define FF_PROFILE_HEVC_REXT 4 + +/** + * Value of the luma sample at position (x, y) in the 2D array tab. + */ +#define SAMPLE(tab, x, y) ((tab)[(y) * s->sps->width + (x)]) +#define SAMPLE_CTB(tab, x, y) ((tab)[(y) * min_cb_width + (x)]) +#define SAMPLE_CBF(tab, x, y) ((tab)[((y) & ((1<<log2_trafo_size)-1)) * MAX_CU_SIZE + ((x) & ((1<<log2_trafo_size)-1))]) + +#define IS_IDR(s) (s->nal_unit_type == NAL_IDR_W_RADL || s->nal_unit_type == NAL_IDR_N_LP) +#define IS_BLA(s) (s->nal_unit_type == NAL_BLA_W_RADL || s->nal_unit_type == NAL_BLA_W_LP || \ + s->nal_unit_type == NAL_BLA_N_LP) +#define IS_IRAP(s) (s->nal_unit_type >= 16 && s->nal_unit_type <= 23) + +/** + * Table 7-3: NAL unit type codes + */ +enum HEVCNALUnitType { + HEVC_NAL_TRAIL_N = 0, + HEVC_NAL_TRAIL_R = 1, + HEVC_NAL_TSA_N = 2, + HEVC_NAL_TSA_R = 3, + HEVC_NAL_STSA_N = 4, + HEVC_NAL_STSA_R = 5, + HEVC_NAL_RADL_N = 6, + HEVC_NAL_RADL_R = 7, + HEVC_NAL_RASL_N = 8, + HEVC_NAL_RASL_R = 9, + HEVC_NAL_VCL_N10 = 10, + HEVC_NAL_VCL_R11 = 11, + HEVC_NAL_VCL_N12 = 12, + HEVC_NAL_VCL_R13 = 13, + HEVC_NAL_VCL_N14 = 14, + HEVC_NAL_VCL_R15 = 15, + HEVC_NAL_BLA_W_LP = 16, + HEVC_NAL_BLA_W_RADL = 17, + HEVC_NAL_BLA_N_LP = 18, + HEVC_NAL_IDR_W_RADL = 19, + HEVC_NAL_IDR_N_LP = 20, + HEVC_NAL_CRA_NUT = 21, + HEVC_NAL_IRAP_VCL22 = 22, + HEVC_NAL_IRAP_VCL23 = 23, + HEVC_NAL_RSV_VCL24 = 24, + HEVC_NAL_RSV_VCL25 = 25, + HEVC_NAL_RSV_VCL26 = 26, + HEVC_NAL_RSV_VCL27 = 27, + HEVC_NAL_RSV_VCL28 = 28, + HEVC_NAL_RSV_VCL29 = 29, + HEVC_NAL_RSV_VCL30 = 30, + HEVC_NAL_RSV_VCL31 = 31, + HEVC_NAL_VPS = 32, + HEVC_NAL_SPS = 33, + HEVC_NAL_PPS = 34, + HEVC_NAL_AUD = 35, + HEVC_NAL_EOS_NUT = 36, + HEVC_NAL_EOB_NUT = 37, + HEVC_NAL_FD_NUT = 38, + HEVC_NAL_SEI_PREFIX = 39, + HEVC_NAL_SEI_SUFFIX = 40, +}; + +enum HEVCSliceType { + HEVC_SLICE_B = 0, + HEVC_SLICE_P = 1, + HEVC_SLICE_I = 2, +}; + +enum { + // 7.4.3.1: vps_max_layers_minus1 is in [0, 62]. + HEVC_MAX_LAYERS = 63, + // 7.4.3.1: vps_max_sub_layers_minus1 is in [0, 6]. + HEVC_MAX_SUB_LAYERS = 7, + // 7.4.3.1: vps_num_layer_sets_minus1 is in [0, 1023]. + HEVC_MAX_LAYER_SETS = 1024, + + // 7.4.2.1: vps_video_parameter_set_id is u(4). + HEVC_MAX_VPS_COUNT = 16, + // 7.4.3.2.1: sps_seq_parameter_set_id is in [0, 15]. + HEVC_MAX_SPS_COUNT = 16, + // 7.4.3.3.1: pps_pic_parameter_set_id is in [0, 63]. + HEVC_MAX_PPS_COUNT = 64, + + // A.4.2: MaxDpbSize is bounded above by 16. + HEVC_MAX_DPB_SIZE = 16, + // 7.4.3.1: vps_max_dec_pic_buffering_minus1[i] is in [0, MaxDpbSize - 1]. + HEVC_MAX_REFS = HEVC_MAX_DPB_SIZE, + + // 7.4.3.2.1: num_short_term_ref_pic_sets is in [0, 64]. + HEVC_MAX_SHORT_TERM_REF_PIC_SETS = 64, + // 7.4.3.2.1: num_long_term_ref_pics_sps is in [0, 32]. + HEVC_MAX_LONG_TERM_REF_PICS = 32, + + // A.3: all profiles require that CtbLog2SizeY is in [4, 6]. + HEVC_MIN_LOG2_CTB_SIZE = 4, + HEVC_MAX_LOG2_CTB_SIZE = 6, + + // E.3.2: cpb_cnt_minus1[i] is in [0, 31]. + HEVC_MAX_CPB_CNT = 32, + + // A.4.1: in table A.6 the highest level allows a MaxLumaPs of 35 651 584. + HEVC_MAX_LUMA_PS = 35651584, + // A.4.1: pic_width_in_luma_samples and pic_height_in_luma_samples are + // constrained to be not greater than sqrt(MaxLumaPs * 8). Hence height/ + // width are bounded above by sqrt(8 * 35651584) = 16888.2 samples. + HEVC_MAX_WIDTH = 16888, + HEVC_MAX_HEIGHT = 16888, + + // A.4.1: table A.6 allows at most 22 tile rows for any level. + HEVC_MAX_TILE_ROWS = 22, + // A.4.1: table A.6 allows at most 20 tile columns for any level. + HEVC_MAX_TILE_COLUMNS = 20, + + // 7.4.7.1: in the worst case (tiles_enabled_flag and + // entropy_coding_sync_enabled_flag are both set), entry points can be + // placed at the beginning of every Ctb row in every tile, giving an + // upper bound of (num_tile_columns_minus1 + 1) * PicHeightInCtbsY - 1. + // Only a stream with very high resolution and perverse parameters could + // get near that, though, so set a lower limit here with the maximum + // possible value for 4K video (at most 135 16x16 Ctb rows). + HEVC_MAX_ENTRY_POINT_OFFSETS = HEVC_MAX_TILE_COLUMNS * 135, +}; + +struct ShortTermRPS { + u32 num_negative_pics; + int num_delta_pocs; + int rps_idx_num_delta_pocs; + int delta_poc[32]; + u8 used[32]; +}; + +struct LongTermRPS { + int poc[32]; + u8 used[32]; + u8 nb_refs; +}; + +struct SliceHeader { + u32 pps_id; + + ///< address (in raster order) of the first block in the current slice segment + u32 slice_segment_addr; + ///< address (in raster order) of the first block in the current slice + u32 slice_addr; + + enum HEVCSliceType slice_type; + + int pic_order_cnt_lsb; + + u8 first_slice_in_pic_flag; + u8 dependent_slice_segment_flag; + u8 pic_output_flag; + u8 colour_plane_id; + + ///< RPS coded in the slice header itself is stored here + int short_term_ref_pic_set_sps_flag; + int short_term_ref_pic_set_size; + struct ShortTermRPS slice_rps; + const struct ShortTermRPS *short_term_rps; + int long_term_ref_pic_set_size; + struct LongTermRPS long_term_rps; + u32 list_entry_lx[2][32]; + + u8 rpl_modification_flag[2]; + u8 no_output_of_prior_pics_flag; + u8 slice_temporal_mvp_enabled_flag; + + u32 nb_refs[2]; + + u8 slice_sample_adaptive_offset_flag[3]; + u8 mvd_l1_zero_flag; + + u8 cabac_init_flag; + u8 disable_deblocking_filter_flag; ///< slice_header_disable_deblocking_filter_flag + u8 slice_loop_filter_across_slices_enabled_flag; + u8 collocated_list; + + u32 collocated_ref_idx; + + int slice_qp_delta; + int slice_cb_qp_offset; + int slice_cr_qp_offset; + + u8 cu_chroma_qp_offset_enabled_flag; + + int beta_offset; ///< beta_offset_div2 * 2 + int tc_offset; ///< tc_offset_div2 * 2 + + u32 max_num_merge_cand; ///< 5 - 5_minus_max_num_merge_cand + + u8 *entry_point_offset; + int * offset; + int * size; + int num_entry_point_offsets; + + char slice_qp; + + u8 luma_log2_weight_denom; + s16 chroma_log2_weight_denom; + + s16 luma_weight_l0[16]; + s16 chroma_weight_l0[16][2]; + s16 chroma_weight_l1[16][2]; + s16 luma_weight_l1[16]; + + s16 luma_offset_l0[16]; + s16 chroma_offset_l0[16][2]; + + s16 luma_offset_l1[16]; + s16 chroma_offset_l1[16][2]; + + int slice_ctb_addr_rs; +}; + +struct HEVCWindow { + u32 left_offset; + u32 right_offset; + u32 top_offset; + u32 bottom_offset; +}; + +struct VUI { +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER + struct AVRational sar; +#endif + int overscan_info_present_flag; + int overscan_appropriate_flag; + + int video_signal_type_present_flag; + int video_format; + int video_full_range_flag; + int colour_description_present_flag; + u8 colour_primaries; + u8 transfer_characteristic; + u8 matrix_coeffs; + + int chroma_loc_info_present_flag; + int chroma_sample_loc_type_top_field; + int chroma_sample_loc_type_bottom_field; + int neutra_chroma_indication_flag; + + int field_seq_flag; + int frame_field_info_present_flag; + + int default_display_window_flag; + struct HEVCWindow def_disp_win; + + int vui_timing_info_present_flag; + u32 vui_num_units_in_tick; + u32 vui_time_scale; + int vui_poc_proportional_to_timing_flag; + int vui_num_ticks_poc_diff_one_minus1; + int vui_hrd_parameters_present_flag; + + int bitstream_restriction_flag; + int tiles_fixed_structure_flag; + int motion_vectors_over_pic_boundaries_flag; + int restricted_ref_pic_lists_flag; + int min_spatial_segmentation_idc; + int max_bytes_per_pic_denom; + int max_bits_per_min_cu_denom; + int log2_max_mv_length_horizontal; + int log2_max_mv_length_vertical; +}; + +struct PTLCommon { + u8 profile_space; + u8 tier_flag; + u8 profile_idc; + u8 profile_compatibility_flag[32]; + u8 level_idc; + u8 progressive_source_flag; + u8 interlaced_source_flag; + u8 non_packed_constraint_flag; + u8 frame_only_constraint_flag; +}; + +struct PTL { + struct PTLCommon general_ptl; + struct PTLCommon sub_layer_ptl[HEVC_MAX_SUB_LAYERS]; + + u8 sub_layer_profile_present_flag[HEVC_MAX_SUB_LAYERS]; + u8 sub_layer_level_present_flag[HEVC_MAX_SUB_LAYERS]; +}; + +struct h265_VPS_t { + u8 vps_temporal_id_nesting_flag; + int vps_max_layers; + int vps_max_sub_layers; ///< vps_max_temporal_layers_minus1 + 1 + + struct PTL ptl; + int vps_sub_layer_ordering_info_present_flag; + u32 vps_max_dec_pic_buffering[HEVC_MAX_SUB_LAYERS]; + u32 vps_num_reorder_pics[HEVC_MAX_SUB_LAYERS]; + u32 vps_max_latency_increase[HEVC_MAX_SUB_LAYERS]; + int vps_max_layer_id; + int vps_num_layer_sets; ///< vps_num_layer_sets_minus1 + 1 + u8 vps_timing_info_present_flag; + u32 vps_num_units_in_tick; + u32 vps_time_scale; + u8 vps_poc_proportional_to_timing_flag; + int vps_num_ticks_poc_diff_one; ///< vps_num_ticks_poc_diff_one_minus1 + 1 + int vps_num_hrd_parameters; +}; + +struct ScalingList { + /* This is a little wasteful, since sizeID 0 only needs 8 coeffs, + * and size ID 3 only has 2 arrays, not 6. */ + u8 sl[4][6][64]; + u8 sl_dc[2][6]; +}; + +struct h265_SPS_t { + u8 vps_id; + u8 sps_id; + int chroma_format_idc; + u8 separate_colour_plane_flag; + + struct HEVCWindow output_window; + struct HEVCWindow pic_conf_win; + + int bit_depth; + int bit_depth_chroma; + int pixel_shift; + int pix_fmt; + + u32 log2_max_poc_lsb; + int pcm_enabled_flag; + + int max_sub_layers; + struct { + int max_dec_pic_buffering; + int num_reorder_pics; + int max_latency_increase; + } temporal_layer[HEVC_MAX_SUB_LAYERS]; + u8 temporal_id_nesting_flag; + + struct VUI vui; + struct PTL ptl; + + u8 scaling_list_enable_flag; + struct ScalingList scaling_list; + + u32 nb_st_rps; + struct ShortTermRPS st_rps[HEVC_MAX_SHORT_TERM_REF_PIC_SETS]; + + u8 amp_enabled_flag; + u8 sao_enabled; + + u8 long_term_ref_pics_present_flag; + u16 lt_ref_pic_poc_lsb_sps[HEVC_MAX_LONG_TERM_REF_PICS]; + u8 used_by_curr_pic_lt_sps_flag[HEVC_MAX_LONG_TERM_REF_PICS]; + u8 num_long_term_ref_pics_sps; + + struct { + u8 bit_depth; + u8 bit_depth_chroma; + u32 log2_min_pcm_cb_size; + u32 log2_max_pcm_cb_size; + u8 loop_filter_disable_flag; + } pcm; + u8 sps_temporal_mvp_enabled_flag; + u8 sps_strong_intra_smoothing_enable_flag; + + u32 log2_min_cb_size; + u32 log2_diff_max_min_coding_block_size; + u32 log2_min_tb_size; + u32 log2_max_trafo_size; + u32 log2_ctb_size; + u32 log2_min_pu_size; + + int max_transform_hierarchy_depth_inter; + int max_transform_hierarchy_depth_intra; + + int sps_range_extension_flag; + int transform_skip_rotation_enabled_flag; + int transform_skip_context_enabled_flag; + int implicit_rdpcm_enabled_flag; + int explicit_rdpcm_enabled_flag; + int extended_precision_processing_flag; + int intra_smoothing_disabled_flag; + int high_precision_offsets_enabled_flag; + int persistent_rice_adaptation_enabled_flag; + int cabac_bypass_alignment_enabled_flag; + + ///< coded frame dimension in various units + int width; + int height; + int ctb_width; + int ctb_height; + int ctb_size; + int min_cb_width; + int min_cb_height; + int min_tb_width; + int min_tb_height; + int min_pu_width; + int min_pu_height; + int tb_mask; + + int hshift[3]; + int vshift[3]; + + int qp_bd_offset; + + u8 data[4096]; + int data_size; +}; + +struct h265_PPS_t { + u32 sps_id; ///< seq_parameter_set_id + + u8 sign_data_hiding_flag; + + u8 cabac_init_present_flag; + + int num_ref_idx_l0_default_active; ///< num_ref_idx_l0_default_active_minus1 + 1 + int num_ref_idx_l1_default_active; ///< num_ref_idx_l1_default_active_minus1 + 1 + int pic_init_qp_minus26; + + u8 constrained_intra_pred_flag; + u8 transform_skip_enabled_flag; + + u8 cu_qp_delta_enabled_flag; + int diff_cu_qp_delta_depth; + + int cb_qp_offset; + int cr_qp_offset; + u8 pic_slice_level_chroma_qp_offsets_present_flag; + u8 weighted_pred_flag; + u8 weighted_bipred_flag; + u8 output_flag_present_flag; + u8 transquant_bypass_enable_flag; + + u8 dependent_slice_segments_enabled_flag; + u8 tiles_enabled_flag; + u8 entropy_coding_sync_enabled_flag; + + int num_tile_columns; ///< num_tile_columns_minus1 + 1 + int num_tile_rows; ///< num_tile_rows_minus1 + 1 + u8 uniform_spacing_flag; + u8 loop_filter_across_tiles_enabled_flag; + + u8 seq_loop_filter_across_slices_enabled_flag; + + u8 deblocking_filter_control_present_flag; + u8 deblocking_filter_override_enabled_flag; + u8 disable_dbf; + int beta_offset; ///< beta_offset_div2 * 2 + int tc_offset; ///< tc_offset_div2 * 2 + + u8 scaling_list_data_present_flag; + struct ScalingList scaling_list; + + u8 lists_modification_present_flag; + int log2_parallel_merge_level; ///< log2_parallel_merge_level_minus2 + 2 + int num_extra_slice_header_bits; + u8 slice_header_extension_present_flag; + u8 log2_max_transform_skip_block_size; + u8 cross_component_prediction_enabled_flag; + u8 chroma_qp_offset_list_enabled_flag; + u8 diff_cu_chroma_qp_offset_depth; + u8 chroma_qp_offset_list_len_minus1; + char cb_qp_offset_list[6]; + char cr_qp_offset_list[6]; + u8 log2_sao_offset_scale_luma; + u8 log2_sao_offset_scale_chroma; + + // Inferred parameters + u32 *column_width; ///< ColumnWidth + u32 *row_height; ///< RowHeight + u32 *col_bd; ///< ColBd + u32 *row_bd; ///< RowBd + int *col_idxX; + + int *ctb_addr_rs_to_ts; ///< CtbAddrRSToTS + int *ctb_addr_ts_to_rs; ///< CtbAddrTSToRS + int *tile_id; ///< TileId + int *tile_pos_rs; ///< TilePosRS + int *min_tb_addr_zs; ///< MinTbAddrZS + int *min_tb_addr_zs_tab;///< MinTbAddrZS +}; + +struct h265_param_sets { + bool vps_parsed; + bool sps_parsed; + bool pps_parsed; + /* currently active parameter sets */ + struct h265_VPS_t vps; + struct h265_SPS_t sps; + struct h265_PPS_t pps; +}; + +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +int h265_decode_extradata_ps(u8 *data, int size, struct h265_param_sets *ps); +#else +inline int h265_decode_extradata_ps(u8 *data, int size, struct h265_param_sets *ps) { return -1; } +#endif + +#endif /* AML_HEVC_PARSER_H */ +
diff --git a/drivers/amvdec_ports/decoder/aml_mjpeg_parser.c b/drivers/amvdec_ports/decoder/aml_mjpeg_parser.c new file mode 100644 index 0000000..c582ab0 --- /dev/null +++ b/drivers/amvdec_ports/decoder/aml_mjpeg_parser.c
@@ -0,0 +1,397 @@ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/string.h> + +#include "aml_mjpeg_parser.h" +#include "../utils/get_bits.h" +#include "../utils/put_bits.h" +#include "../utils/golomb.h" +#include "../utils/common.h" +#include "utils.h" + +/* return the 8 bit start code value and update the search +state. Return -1 if no start code found */ +static int find_marker(const u8 **pbuf_ptr, const u8 *buf_end) +{ + const u8 *buf_ptr; + u32 v, v2; + int val; + int skipped = 0; + + buf_ptr = *pbuf_ptr; + while (buf_end - buf_ptr > 1) { + v = *buf_ptr++; + v2 = *buf_ptr; + if ((v == 0xff) && (v2 >= 0xc0) && (v2 <= 0xfe) && buf_ptr < buf_end) { + val = *buf_ptr++; + goto found; + } + skipped++; + } + buf_ptr = buf_end; + val = -1; +found: + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "find_marker skipped %d bytes\n", skipped); + *pbuf_ptr = buf_ptr; + + return val; +} + +int ff_mjpeg_find_marker(struct MJpegDecodeContext *s, + const u8 **buf_ptr, const u8 *buf_end, + const u8 **unescaped_buf_ptr, + int *unescaped_buf_size) +{ + int start_code; + + start_code = find_marker(buf_ptr, buf_end); + + /* unescape buffer of SOS, use special treatment for JPEG-LS */ + if (start_code == SOS && !s->ls) { + const u8 *src = *buf_ptr; + const u8 *ptr = src; + u8 *dst = s->buffer; + + #define copy_data_segment(skip) do { \ + int length = (ptr - src) - (skip); \ + if (length > 0) { \ + memcpy(dst, src, length); \ + dst += length; \ + src = ptr; \ + } \ + } while (0) + + + while (ptr < buf_end) { + u8 x = *(ptr++); + + if (x == 0xff) { + int skip = 0; + while (ptr < buf_end && x == 0xff) { + x = *(ptr++); + skip++; + } + + /* 0xFF, 0xFF, ... */ + if (skip > 1) { + copy_data_segment(skip); + + /* decrement src as it is equal to ptr after the + * copy_data_segment macro and we might want to + * copy the current value of x later on */ + src--; + } + + if (x < 0xd0 || x > 0xd7) { + copy_data_segment(1); + if (x) + break; + } + } + if (src < ptr) + copy_data_segment(0); + } + #undef copy_data_segment + + *unescaped_buf_ptr = s->buffer; + *unescaped_buf_size = dst - s->buffer; + memset(s->buffer + *unescaped_buf_size, 0, + AV_INPUT_BUFFER_PADDING_SIZE); + + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "escaping removed %d bytes\n", + (int)((buf_end - *buf_ptr) - (dst - s->buffer))); + } else if (start_code == SOS && s->ls) { + const u8 *src = *buf_ptr; + u8 *dst = s->buffer; + int bit_count = 0; + int t = 0, b = 0; + struct put_bits_context pb; + + /* find marker */ + while (src + t < buf_end) { + u8 x = src[t++]; + if (x == 0xff) { + while ((src + t < buf_end) && x == 0xff) + x = src[t++]; + if (x & 0x80) { + t -= FFMIN(2, t); + break; + } + } + } + bit_count = t * 8; + init_put_bits(&pb, dst, t); + + /* unescape bitstream */ + while (b < t) { + u8 x = src[b++]; + put_bits(&pb, 8, x); + if (x == 0xFF && b < t) { + x = src[b++]; + if (x & 0x80) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid escape sequence\n"); + x &= 0x7f; + } + put_bits(&pb, 7, x); + bit_count--; + } + } + flush_put_bits(&pb); + + *unescaped_buf_ptr = dst; + *unescaped_buf_size = (bit_count + 7) >> 3; + memset(s->buffer + *unescaped_buf_size, 0, + AV_INPUT_BUFFER_PADDING_SIZE); + } else { + *unescaped_buf_ptr = *buf_ptr; + *unescaped_buf_size = buf_end - *buf_ptr; + } + + return start_code; +} + + +int ff_mjpeg_decode_sof(struct MJpegDecodeContext *s) +{ + int len, nb_components, i, width, height, bits, size_change; + int h_count[MAX_COMPONENTS] = { 0 }; + int v_count[MAX_COMPONENTS] = { 0 }; + + s->cur_scan = 0; + memset(s->upscale_h, 0, sizeof(s->upscale_h)); + memset(s->upscale_v, 0, sizeof(s->upscale_v)); + + /* XXX: verify len field validity */ + len = get_bits(&s->gb, 16); + bits = get_bits(&s->gb, 8); + + if (bits > 16 || bits < 1) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "bits %d is invalid\n", bits); + return -1; + } + + height = get_bits(&s->gb, 16); + width = get_bits(&s->gb, 16); + + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "sof0: picture: %dx%d\n", width, height); + + nb_components = get_bits(&s->gb, 8); + if (nb_components <= 0 || + nb_components > MAX_COMPONENTS) + return -1; + + s->nb_components = nb_components; + s->h_max = 1; + s->v_max = 1; + for (i = 0; i < nb_components; i++) { + /* component id */ + s->component_id[i] = get_bits(&s->gb, 8) - 1; + h_count[i] = get_bits(&s->gb, 4); + v_count[i] = get_bits(&s->gb, 4); + /* compute hmax and vmax (only used in interleaved case) */ + if (h_count[i] > s->h_max) + s->h_max = h_count[i]; + if (v_count[i] > s->v_max) + s->v_max = v_count[i]; + s->quant_index[i] = get_bits(&s->gb, 8); + if (s->quant_index[i] >= 4) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "quant_index is invalid\n"); + return -1; + } + if (!h_count[i] || !v_count[i]) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid sampling factor in component %d %d:%d\n", + i, h_count[i], v_count[i]); + return -1; + } + + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "component %d %d:%d id: %d quant:%d\n", + i, h_count[i], v_count[i], + s->component_id[i], s->quant_index[i]); + } + if (nb_components == 4 + && s->component_id[0] == 'C' - 1 + && s->component_id[1] == 'M' - 1 + && s->component_id[2] == 'Y' - 1 + && s->component_id[3] == 'K' - 1) + s->adobe_transform = 0; + + /* if different size, realloc/alloc picture */ + if (width != s->width || height != s->height || bits != s->bits || + memcmp(s->h_count, h_count, sizeof(h_count)) || + memcmp(s->v_count, v_count, sizeof(v_count))) { + size_change = 1; + + s->width = width; + s->height = height; + s->bits = bits; + memcpy(s->h_count, h_count, sizeof(h_count)); + memcpy(s->v_count, v_count, sizeof(v_count)); + s->interlaced = 0; + s->got_picture = 0; + } else { + size_change = 0; + } + + return 0; +} + +static int ff_mjpeg_decode_frame(u8 *buf, int buf_size, struct MJpegDecodeContext *s) +{ + const u8 *buf_end, *buf_ptr; + const u8 *unescaped_buf_ptr; + int unescaped_buf_size; + int start_code; + int ret = 0; + + buf_ptr = buf; + buf_end = buf + buf_size; + while (buf_ptr < buf_end) { + /* find start next marker */ + start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end, + &unescaped_buf_ptr, + &unescaped_buf_size); + /* EOF */ + if (start_code < 0) { + break; + } else if (unescaped_buf_size > INT_MAX / 8) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n", + start_code, unescaped_buf_size, buf_size); + return -1; + } + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "marker=%x avail_size_in_buf=%d\n", + start_code, (int)(buf_end - buf_ptr)); + + ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size); + if (ret < 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "invalid buffer\n"); + goto fail; + } + + s->start_code = start_code; + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "startcode: %X\n", start_code); + + switch (start_code) { + case SOF0: + case SOF1: + case SOF2: + case SOF3: + case SOF48: + case SOI: + case SOS: + case EOI: + break; + default: + goto skip; + } + + switch (start_code) { + case SOI: + s->restart_interval = 0; + s->restart_count = 0; + s->raw_image_buffer = buf_ptr; + s->raw_image_buffer_size = buf_end - buf_ptr; + /* nothing to do on SOI */ + break; + case SOF0: + case SOF1: + if (start_code == SOF0) + s->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT; + else + s->profile = FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT; + s->lossless = 0; + s->ls = 0; + s->progressive = 0; + if ((ret = ff_mjpeg_decode_sof(s)) < 0) + goto fail; + break; + case SOF2: + s->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT; + s->lossless = 0; + s->ls = 0; + s->progressive = 1; + if ((ret = ff_mjpeg_decode_sof(s)) < 0) + goto fail; + break; + case SOF3: + s->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS; + s->properties |= FF_CODEC_PROPERTY_LOSSLESS; + s->lossless = 1; + s->ls = 0; + s->progressive = 0; + if ((ret = ff_mjpeg_decode_sof(s)) < 0) + goto fail; + break; + case SOF48: + s->profile = FF_PROFILE_MJPEG_JPEG_LS; + s->properties |= FF_CODEC_PROPERTY_LOSSLESS; + s->lossless = 1; + s->ls = 1; + s->progressive = 0; + if ((ret = ff_mjpeg_decode_sof(s)) < 0) + goto fail; + break; + case EOI: + goto the_end; + case DHT: + case LSE: + case SOS: + case DRI: + case SOF5: + case SOF6: + case SOF7: + case SOF9: + case SOF10: + case SOF11: + case SOF13: + case SOF14: + case SOF15: + case JPG: + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "mjpeg: unsupported coding type (%x)\n", start_code); + break; + } +skip: + /* eof process start code */ + buf_ptr += (get_bits_count(&s->gb) + 7) / 8; + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "marker parser used %d bytes (%d bits)\n", + (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb)); + } + + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "No JPEG data found in image\n"); + return -1; +fail: + s->got_picture = 0; + return ret; +the_end: + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "decode frame unused %d bytes\n", (int)(buf_end - buf_ptr)); + + return 0; +} + +int mjpeg_decode_extradata_ps(u8 *buf, int size, struct mjpeg_param_sets *ps) +{ + int ret; + + ps->head_parsed = false; + + ps->dec_ps.buf_size = size; + ps->dec_ps.buffer = vzalloc(size + AV_INPUT_BUFFER_PADDING_SIZE); + if (!ps->dec_ps.buffer) + return -1; + + ret = ff_mjpeg_decode_frame(buf, size, &ps->dec_ps); + if (ret) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "parse extra data failed. err: %d\n", ret); + vfree(ps->dec_ps.buffer); + return ret; + } + + if (ps->dec_ps.width && ps->dec_ps.height) + ps->head_parsed = true; + + vfree(ps->dec_ps.buffer); + + return 0; +} +
diff --git a/drivers/amvdec_ports/decoder/aml_mjpeg_parser.h b/drivers/amvdec_ports/decoder/aml_mjpeg_parser.h new file mode 100644 index 0000000..4704ba0 --- /dev/null +++ b/drivers/amvdec_ports/decoder/aml_mjpeg_parser.h
@@ -0,0 +1,170 @@ +#ifndef AML_MJPEG_PARSER_H +#define AML_MJPEG_PARSER_H + +#include "../aml_vcodec_drv.h" +#include "../utils/common.h" +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +#include "../utils/get_bits.h" +#endif + +#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT 0xc0 +#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT 0xc1 +#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT 0xc2 +#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS 0xc3 +#define FF_PROFILE_MJPEG_JPEG_LS 0xf7 + +#define FF_CODEC_PROPERTY_LOSSLESS 0x00000001 +#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002 + +#define MAX_COMPONENTS 4 + +/* JPEG marker codes */ +enum JpegMarker { + /* start of frame */ + SOF0 = 0xc0, /* baseline */ + SOF1 = 0xc1, /* extended sequential, huffman */ + SOF2 = 0xc2, /* progressive, huffman */ + SOF3 = 0xc3, /* lossless, huffman */ + + SOF5 = 0xc5, /* differential sequential, huffman */ + SOF6 = 0xc6, /* differential progressive, huffman */ + SOF7 = 0xc7, /* differential lossless, huffman */ + JPG = 0xc8, /* reserved for JPEG extension */ + SOF9 = 0xc9, /* extended sequential, arithmetic */ + SOF10 = 0xca, /* progressive, arithmetic */ + SOF11 = 0xcb, /* lossless, arithmetic */ + + SOF13 = 0xcd, /* differential sequential, arithmetic */ + SOF14 = 0xce, /* differential progressive, arithmetic */ + SOF15 = 0xcf, /* differential lossless, arithmetic */ + + DHT = 0xc4, /* define huffman tables */ + + DAC = 0xcc, /* define arithmetic-coding conditioning */ + + /* restart with modulo 8 count "m" */ + RST0 = 0xd0, + RST1 = 0xd1, + RST2 = 0xd2, + RST3 = 0xd3, + RST4 = 0xd4, + RST5 = 0xd5, + RST6 = 0xd6, + RST7 = 0xd7, + + SOI = 0xd8, /* start of image */ + EOI = 0xd9, /* end of image */ + SOS = 0xda, /* start of scan */ + DQT = 0xdb, /* define quantization tables */ + DNL = 0xdc, /* define number of lines */ + DRI = 0xdd, /* define restart interval */ + DHP = 0xde, /* define hierarchical progression */ + EXP = 0xdf, /* expand reference components */ + + APP0 = 0xe0, + APP1 = 0xe1, + APP2 = 0xe2, + APP3 = 0xe3, + APP4 = 0xe4, + APP5 = 0xe5, + APP6 = 0xe6, + APP7 = 0xe7, + APP8 = 0xe8, + APP9 = 0xe9, + APP10 = 0xea, + APP11 = 0xeb, + APP12 = 0xec, + APP13 = 0xed, + APP14 = 0xee, + APP15 = 0xef, + + JPG0 = 0xf0, + JPG1 = 0xf1, + JPG2 = 0xf2, + JPG3 = 0xf3, + JPG4 = 0xf4, + JPG5 = 0xf5, + JPG6 = 0xf6, + SOF48 = 0xf7, ///< JPEG-LS + LSE = 0xf8, ///< JPEG-LS extension parameters + JPG9 = 0xf9, + JPG10 = 0xfa, + JPG11 = 0xfb, + JPG12 = 0xfc, + JPG13 = 0xfd, + + COM = 0xfe, /* comment */ + + TEM = 0x01, /* temporary private use for arithmetic coding */ + + /* 0x02 -> 0xbf reserved */ +}; + +struct VLC { + int bits; + short (*table)[2]; ///< code, bits + int table_size, table_allocated; +}; + +struct MJpegDecodeContext { +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER + struct get_bits_context gb; +#endif + int buf_size; + + int start_code; /* current start code */ + int buffer_size; + u8 *buffer; + + u16 quant_matrixes[4][64]; + struct VLC vlcs[3][4]; + int qscale[4]; ///< quantizer scale calculated from quant_matrixes + + int first_picture; /* true if decoding first picture */ + int interlaced; /* true if interlaced */ + int bottom_field; /* true if bottom field */ + int lossless; + int ls; + int progressive; + u8 upscale_h[4]; + u8 upscale_v[4]; + int bits; /* bits per component */ + int adobe_transform; + + int width, height; + int mb_width, mb_height; + int nb_components; + int block_stride[MAX_COMPONENTS]; + int component_id[MAX_COMPONENTS]; + int h_count[MAX_COMPONENTS]; /* horizontal and vertical count for each component */ + int v_count[MAX_COMPONENTS]; + int h_scount[MAX_COMPONENTS]; + int v_scount[MAX_COMPONENTS]; + int h_max, v_max; /* maximum h and v counts */ + int quant_index[4]; /* quant table index for each component */ + int got_picture; ///< we found a SOF and picture is valid, too. + int restart_interval; + int restart_count; + int cur_scan; /* current scan, used by JPEG-LS */ + + // Raw stream data for hwaccel use. + const u8 *raw_image_buffer; + int raw_image_buffer_size; + + int profile; + u32 properties; +}; + +struct mjpeg_param_sets { + bool head_parsed; + /* currently active parameter sets */ + struct MJpegDecodeContext dec_ps; +}; + +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +int mjpeg_decode_extradata_ps(u8 *buf, int size, struct mjpeg_param_sets *ps); +#else +inline int mjpeg_decode_extradata_ps(u8 *buf, int size, struct mjpeg_param_sets *ps) { return -1; } +#endif + +#endif
diff --git a/drivers/amvdec_ports/decoder/aml_mpeg12_parser.c b/drivers/amvdec_ports/decoder/aml_mpeg12_parser.c new file mode 100644 index 0000000..748a83f --- /dev/null +++ b/drivers/amvdec_ports/decoder/aml_mpeg12_parser.c
@@ -0,0 +1,198 @@ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/string.h> + +#include "aml_mpeg12_parser.h" +#include "../utils/get_bits.h" +#include "../utils/put_bits.h" +#include "../utils/golomb.h" +#include "../utils/common.h" +#include "utils.h" + +const struct AVRational ff_mpeg12_frame_rate_tab[16] = { + { 0, 0}, + {24000, 1001}, + { 24, 1}, + { 25, 1}, + {30000, 1001}, + { 30, 1}, + { 50, 1}, + {60000, 1001}, + { 60, 1}, + // Xing's 15fps: (9) + { 15, 1}, + // libmpeg3's "Unofficial economy rates": (10-13) + { 5, 1}, + { 10, 1}, + { 12, 1}, + { 15, 1}, + { 0, 0}, +}; + +const u8 *avpriv_find_start_code(const u8 *p, const u8 *end, u32 *state) +{ + int i; + + if (p >= end) + return end; + + for (i = 0; i < 3; i++) { + u32 tmp = *state << 8; + *state = tmp + *(p++); + if (tmp == 0x100 || p == end) + return p; + } + + while (p < end) { + if (p[-1] > 1 ) p += 3; + else if (p[-2] ) p += 2; + else if (p[-3]|(p[-1]-1)) p++; + else { + p++; + break; + } + } + + p = FFMIN(p, end) - 4; + *state = AV_RB32(p); + + return p + 4; +} + +static void mpegvideo_extract_headers(const u8 *buf, int buf_size, + struct mpeg12_param_sets *ps) +{ + struct MpvParseContext *pc = &ps->dec_ps; + const u8 *buf_end = buf + buf_size; + u32 start_code; + int frame_rate_index, ext_type, bytes_left; + int frame_rate_ext_n, frame_rate_ext_d; + int top_field_first, repeat_first_field, progressive_frame; + int horiz_size_ext, vert_size_ext, bit_rate_ext; + int bit_rate = 0; + int vbv_delay = 0; + int chroma_format; + enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE; + //FIXME replace the crap with get_bits() + pc->repeat_pict = 0; + + while (buf < buf_end) { + start_code= -1; + buf= avpriv_find_start_code(buf, buf_end, &start_code); + bytes_left = buf_end - buf; + switch (start_code) { + case PICTURE_START_CODE: + if (bytes_left >= 2) { + pc->pict_type = (buf[1] >> 3) & 7; + if (bytes_left >= 4) + vbv_delay = ((buf[1] & 0x07) << 13) | (buf[2] << 5) | (buf[3] >> 3); + } + break; + case SEQ_START_CODE: + if (bytes_left >= 7) { + pc->width = (buf[0] << 4) | (buf[1] >> 4); + pc->height = ((buf[1] & 0x0f) << 8) | buf[2]; + + pix_fmt = AV_PIX_FMT_YUV420P; + frame_rate_index = buf[3] & 0xf; + pc->frame_rate = ff_mpeg12_frame_rate_tab[frame_rate_index]; + bit_rate = (buf[4]<<10) | (buf[5]<<2) | (buf[6]>>6); + pc->ticks_per_frame = 1; + } + break; + case EXT_START_CODE: + if (bytes_left >= 1) { + ext_type = (buf[0] >> 4); + switch (ext_type) { + case 0x1: /* sequence extension */ + if (bytes_left >= 6) { + horiz_size_ext = ((buf[1] & 1) << 1) | (buf[2] >> 7); + vert_size_ext = (buf[2] >> 5) & 3; + bit_rate_ext = ((buf[2] & 0x1F)<<7) | (buf[3]>>1); + frame_rate_ext_n = (buf[5] >> 5) & 3; + frame_rate_ext_d = (buf[5] & 0x1f); + pc->progressive_sequence = buf[1] & (1 << 3); + pc->has_b_frames= !(buf[5] >> 7); + + chroma_format = (buf[1] >> 1) & 3; + switch (chroma_format) { + case 1: pix_fmt = AV_PIX_FMT_YUV420P; break; + case 2: pix_fmt = AV_PIX_FMT_YUV422P; break; + case 3: pix_fmt = AV_PIX_FMT_YUV444P; break; + } + + pc->width = (pc->width & 0xFFF) | (horiz_size_ext << 12); + pc->height = (pc->height& 0xFFF) | ( vert_size_ext << 12); + bit_rate = (bit_rate&0x3FFFF) | (bit_rate_ext << 18); + //if(did_set_size) + //set_dim_ret = ff_set_dimensions(avctx, pc->width, pc->height); + pc->framerate.num = pc->frame_rate.num * (frame_rate_ext_n + 1); + pc->framerate.den = pc->frame_rate.den * (frame_rate_ext_d + 1); + pc->ticks_per_frame = 2; + } + break; + case 0x8: /* picture coding extension */ + if (bytes_left >= 5) { + top_field_first = buf[3] & (1 << 7); + repeat_first_field = buf[3] & (1 << 1); + progressive_frame = buf[4] & (1 << 7); + + /* check if we must repeat the frame */ + pc->repeat_pict = 1; + if (repeat_first_field) { + if (pc->progressive_sequence) { + if (top_field_first) + pc->repeat_pict = 5; + else + pc->repeat_pict = 3; + } else if (progressive_frame) { + pc->repeat_pict = 2; + } + } + + if (!pc->progressive_sequence && !progressive_frame) { + if (top_field_first) + pc->field_order = AV_FIELD_TT; + else + pc->field_order = AV_FIELD_BB; + } else + pc->field_order = AV_FIELD_PROGRESSIVE; + } + break; + } + } + break; + case -1: + goto the_end; + default: + /* we stop parsing when we encounter a slice. It ensures + that this function takes a negligible amount of time */ + if (start_code >= SLICE_MIN_START_CODE && + start_code <= SLICE_MAX_START_CODE) + goto the_end; + break; + } + } +the_end: + + if (pix_fmt != AV_PIX_FMT_NONE) { + pc->format = pix_fmt; + pc->coded_width = ALIGN(pc->width, 16); + pc->coded_height = ALIGN(pc->height, 16); + } +} + +int mpeg12_decode_extradata_ps(u8 *buf, int size, struct mpeg12_param_sets *ps) +{ + ps->head_parsed = false; + + mpegvideo_extract_headers(buf, size, ps); + + if (ps->dec_ps.width && ps->dec_ps.height) + ps->head_parsed = true; + + return 0; +} +
diff --git a/drivers/amvdec_ports/decoder/aml_mpeg12_parser.h b/drivers/amvdec_ports/decoder/aml_mpeg12_parser.h new file mode 100644 index 0000000..c06d632 --- /dev/null +++ b/drivers/amvdec_ports/decoder/aml_mpeg12_parser.h
@@ -0,0 +1,81 @@ +#ifndef AML_MPEG12_PARSER_H +#define AML_MPEG12_PARSER_H + +#include "../aml_vcodec_drv.h" +#include "../utils/common.h" +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +#include "../utils/pixfmt.h" +#endif + +/* Start codes. */ +#define SEQ_END_CODE 0x000001b7 +#define SEQ_START_CODE 0x000001b3 +#define GOP_START_CODE 0x000001b8 +#define PICTURE_START_CODE 0x00000100 +#define SLICE_MIN_START_CODE 0x00000101 +#define SLICE_MAX_START_CODE 0x000001af +#define EXT_START_CODE 0x000001b5 +#define USER_START_CODE 0x000001b2 +#define SLICE_START_CODE 0x000001b7 + +enum AVFieldOrder { + AV_FIELD_UNKNOWN, + AV_FIELD_PROGRESSIVE, + AV_FIELD_TT, //< Top coded_first, top displayed first + AV_FIELD_BB, //< Bottom coded first, bottom displayed first + AV_FIELD_TB, //< Top coded first, bottom displayed first + AV_FIELD_BT, //< Bottom coded first, top displayed first +}; + +struct MpvParseContext { + struct AVRational frame_rate; + int progressive_sequence; + int width, height; + + int repeat_pict; /* XXX: Put it back in AVCodecContext. */ + int pict_type; /* XXX: Put it back in AVCodecContext. */ + enum AVFieldOrder field_order; + int format; + /** + * Dimensions of the coded video. + */ + int coded_width; + int coded_height; + /** + * For some codecs, the time base is closer to the field rate than the frame rate. + * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration + * if no telecine is used ... + * + * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2. + */ + int ticks_per_frame; + /** + * Size of the frame reordering buffer in the decoder. + * For MPEG-2 it is 1 IPB or 0 low delay IP. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int has_b_frames; + /** + * - decoding: For codecs that store a framerate value in the compressed + * bitstream, the decoder may export it here. { 0, 1} when + * unknown. + * - encoding: May be used to signal the framerate of CFR content to an + * encoder. + */ + struct AVRational framerate; +}; + +struct mpeg12_param_sets { + bool head_parsed; + /* currently active parameter sets */ + struct MpvParseContext dec_ps; +}; + +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +int mpeg12_decode_extradata_ps(u8 *buf, int size, struct mpeg12_param_sets *ps); +#else +inline int mpeg12_decode_extradata_ps(u8 *buf, int size, struct mpeg12_param_sets *ps) { return -1; } +#endif + +#endif
diff --git a/drivers/amvdec_ports/decoder/aml_mpeg4_parser.c b/drivers/amvdec_ports/decoder/aml_mpeg4_parser.c new file mode 100644 index 0000000..9c47c08 --- /dev/null +++ b/drivers/amvdec_ports/decoder/aml_mpeg4_parser.c
@@ -0,0 +1,1231 @@ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/string.h> + +#include "aml_mpeg4_parser.h" +#include "../utils/get_bits.h" +#include "../utils/put_bits.h" +#include "../utils/golomb.h" +#include "../utils/common.h" +#include "utils.h" + +const u8 ff_mpeg4_dc_threshold[8]={ + 99, 13, 15, 17, 19, 21, 23, 0 +}; + +/* these matrixes will be permuted for the idct */ +const int16_t ff_mpeg4_default_intra_matrix[64] = { + 8, 17, 18, 19, 21, 23, 25, 27, + 17, 18, 19, 21, 23, 25, 27, 28, + 20, 21, 22, 23, 24, 26, 28, 30, + 21, 22, 23, 24, 26, 28, 30, 32, + 22, 23, 24, 26, 28, 30, 32, 35, + 23, 24, 26, 28, 30, 32, 35, 38, + 25, 26, 28, 30, 32, 35, 38, 41, + 27, 28, 30, 32, 35, 38, 41, 45, +}; + +const int16_t ff_mpeg4_default_non_intra_matrix[64] = { + 16, 17, 18, 19, 20, 21, 22, 23, + 17, 18, 19, 20, 21, 22, 23, 24, + 18, 19, 20, 21, 22, 23, 24, 25, + 19, 20, 21, 22, 23, 24, 26, 27, + 20, 21, 22, 23, 25, 26, 27, 28, + 21, 22, 23, 24, 26, 27, 28, 30, + 22, 23, 24, 26, 27, 28, 30, 31, + 23, 24, 25, 27, 28, 30, 31, 33, +}; + +const struct AVRational ff_h263_pixel_aspect[16] = { + { 0, 1 }, + { 1, 1 }, + { 12, 11 }, + { 10, 11 }, + { 16, 11 }, + { 40, 33 }, + { 0, 1 }, + { 0, 1 }, + { 0, 1 }, + { 0, 1 }, + { 0, 1 }, + { 0, 1 }, + { 0, 1 }, + { 0, 1 }, + { 0, 1 }, + { 0, 1 }, +}; + +/* As per spec, studio start code search isn't the same as the old type of start code */ +static void next_start_code_studio(struct get_bits_context *gb) +{ + align_get_bits(gb); + + while (get_bits_left(gb) >= 24 && show_bits_long(gb, 24) != 0x1) { + get_bits(gb, 8); + } +} + +static int read_quant_matrix_ext(struct MpegEncContext *s, struct get_bits_context *gb) +{ + int i, /*j,*/ v; + + if (get_bits1(gb)) { + if (get_bits_left(gb) < 64*8) + return -1; + /* intra_quantiser_matrix */ + for (i = 0; i < 64; i++) { + v = get_bits(gb, 8); + //j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; + //s->intra_matrix[j] = v; + //s->chroma_intra_matrix[j] = v; + } + } + + if (get_bits1(gb)) { + if (get_bits_left(gb) < 64*8) + return -1; + /* non_intra_quantiser_matrix */ + for (i = 0; i < 64; i++) { + get_bits(gb, 8); + } + } + + if (get_bits1(gb)) { + if (get_bits_left(gb) < 64*8) + return -1; + /* chroma_intra_quantiser_matrix */ + for (i = 0; i < 64; i++) { + v = get_bits(gb, 8); + //j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; + //s->chroma_intra_matrix[j] = v; + } + } + + if (get_bits1(gb)) { + if (get_bits_left(gb) < 64*8) + return -1; + /* chroma_non_intra_quantiser_matrix */ + for (i = 0; i < 64; i++) { + get_bits(gb, 8); + } + } + + next_start_code_studio(gb); + return 0; +} + +static void extension_and_user_data(struct MpegEncContext *s, struct get_bits_context *gb, int id) +{ + u32 startcode; + u8 extension_type; + + startcode = show_bits_long(gb, 32); + if (startcode == USER_DATA_STARTCODE || startcode == EXT_STARTCODE) { + if ((id == 2 || id == 4) && startcode == EXT_STARTCODE) { + skip_bits_long(gb, 32); + extension_type = get_bits(gb, 4); + if (extension_type == QUANT_MATRIX_EXT_ID) + read_quant_matrix_ext(s, gb); + } + } +} + + +static int decode_studio_vol_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb) +{ + struct MpegEncContext *s = &ctx->m; + int width, height; + int bits_per_raw_sample; + + // random_accessible_vol and video_object_type_indication have already + // been read by the caller decode_vol_header() + skip_bits(gb, 4); /* video_object_layer_verid */ + ctx->shape = get_bits(gb, 2); /* video_object_layer_shape */ + skip_bits(gb, 4); /* video_object_layer_shape_extension */ + skip_bits1(gb); /* progressive_sequence */ + if (ctx->shape != BIN_ONLY_SHAPE) { + ctx->rgb = get_bits1(gb); /* rgb_components */ + s->chroma_format = get_bits(gb, 2); /* chroma_format */ + if (!s->chroma_format) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "illegal chroma format\n"); + return -1; + } + + bits_per_raw_sample = get_bits(gb, 4); /* bit_depth */ + if (bits_per_raw_sample == 10) { + if (ctx->rgb) { + ctx->pix_fmt = AV_PIX_FMT_GBRP10; + } else { + ctx->pix_fmt = s->chroma_format == CHROMA_422 ? AV_PIX_FMT_YUV422P10 : AV_PIX_FMT_YUV444P10; + } + } + else { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "MPEG-4 Studio profile bit-depth %u", bits_per_raw_sample); + return -1; + } + ctx->bits_per_raw_sample = bits_per_raw_sample; + } + if (ctx->shape == RECT_SHAPE) { + check_marker(gb, "before video_object_layer_width"); + width = get_bits(gb, 14); /* video_object_layer_width */ + check_marker(gb, "before video_object_layer_height"); + height = get_bits(gb, 14); /* video_object_layer_height */ + check_marker(gb, "after video_object_layer_height"); + + /* Do the same check as non-studio profile */ + if (width && height) { + if (s->width && s->height && + (s->width != width || s->height != height)) + s->context_reinit = 1; + s->width = width; + s->height = height; + } + } + s->aspect_ratio_info = get_bits(gb, 4); + if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) { + ctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width + ctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height + } else { + ctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info]; + } + skip_bits(gb, 4); /* frame_rate_code */ + skip_bits(gb, 15); /* first_half_bit_rate */ + check_marker(gb, "after first_half_bit_rate"); + skip_bits(gb, 15); /* latter_half_bit_rate */ + check_marker(gb, "after latter_half_bit_rate"); + skip_bits(gb, 15); /* first_half_vbv_buffer_size */ + check_marker(gb, "after first_half_vbv_buffer_size"); + skip_bits(gb, 3); /* latter_half_vbv_buffer_size */ + skip_bits(gb, 11); /* first_half_vbv_buffer_size */ + check_marker(gb, "after first_half_vbv_buffer_size"); + skip_bits(gb, 15); /* latter_half_vbv_occupancy */ + check_marker(gb, "after latter_half_vbv_occupancy"); + s->low_delay = get_bits1(gb); + s->mpeg_quant = get_bits1(gb); /* mpeg2_stream */ + + next_start_code_studio(gb); + extension_and_user_data(s, gb, 2); + + return 0; +} + +static int decode_vol_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb) +{ + struct MpegEncContext *s = &ctx->m; + int width, height, vo_ver_id; + + /* vol header */ + skip_bits(gb, 1); /* random access */ + s->vo_type = get_bits(gb, 8); + + /* If we are in studio profile (per vo_type), check if its all consistent + * and if so continue pass control to decode_studio_vol_header(). + * elIf something is inconsistent, error out + * else continue with (non studio) vol header decpoding. + */ + if (s->vo_type == CORE_STUDIO_VO_TYPE || + s->vo_type == SIMPLE_STUDIO_VO_TYPE) { + if (ctx->profile != FF_PROFILE_UNKNOWN && ctx->profile != FF_PROFILE_MPEG4_SIMPLE_STUDIO) + return -1; + s->studio_profile = 1; + ctx->profile = FF_PROFILE_MPEG4_SIMPLE_STUDIO; + return decode_studio_vol_header(ctx, gb); + } else if (s->studio_profile) { + return -1; + } + + if (get_bits1(gb) != 0) { /* is_ol_id */ + vo_ver_id = get_bits(gb, 4); /* vo_ver_id */ + skip_bits(gb, 3); /* vo_priority */ + } else { + vo_ver_id = 1; + } + s->aspect_ratio_info = get_bits(gb, 4); + if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) { + ctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width + ctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height + } else { + ctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info]; + } + + if ((ctx->vol_control_parameters = get_bits1(gb))) { /* vol control parameter */ + int chroma_format = get_bits(gb, 2); + if (chroma_format != CHROMA_420) + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "illegal chroma format\n"); + + s->low_delay = get_bits1(gb); + if (get_bits1(gb)) { /* vbv parameters */ + get_bits(gb, 15); /* first_half_bitrate */ + check_marker(gb, "after first_half_bitrate"); + get_bits(gb, 15); /* latter_half_bitrate */ + check_marker(gb, "after latter_half_bitrate"); + get_bits(gb, 15); /* first_half_vbv_buffer_size */ + check_marker(gb, "after first_half_vbv_buffer_size"); + get_bits(gb, 3); /* latter_half_vbv_buffer_size */ + get_bits(gb, 11); /* first_half_vbv_occupancy */ + check_marker(gb, "after first_half_vbv_occupancy"); + get_bits(gb, 15); /* latter_half_vbv_occupancy */ + check_marker(gb, "after latter_half_vbv_occupancy"); + } + } else { + /* is setting low delay flag only once the smartest thing to do? + * low delay detection will not be overridden. */ + if (s->picture_number == 0) { + switch (s->vo_type) { + case SIMPLE_VO_TYPE: + case ADV_SIMPLE_VO_TYPE: + s->low_delay = 1; + break; + default: + s->low_delay = 0; + } + } + } + + ctx->shape = get_bits(gb, 2); /* vol shape */ + if (ctx->shape != RECT_SHAPE) + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "only rectangular vol supported\n"); + if (ctx->shape == GRAY_SHAPE && vo_ver_id != 1) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Gray shape not supported\n"); + skip_bits(gb, 4); /* video_object_layer_shape_extension */ + } + + check_marker(gb, "before time_increment_resolution"); + + ctx->framerate.num = get_bits(gb, 16); + if (!ctx->framerate.num) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "framerate==0\n"); + return -1; + } + + ctx->time_increment_bits = av_log2(ctx->framerate.num - 1) + 1; + if (ctx->time_increment_bits < 1) + ctx->time_increment_bits = 1; + + check_marker(gb, "before fixed_vop_rate"); + + if (get_bits1(gb) != 0) /* fixed_vop_rate */ + ctx->framerate.den = get_bits(gb, ctx->time_increment_bits); + else + ctx->framerate.den = 1; + + //ctx->time_base = av_inv_q(av_mul_q(ctx->framerate, (AVRational){ctx->ticks_per_frame, 1})); + + ctx->t_frame = 0; + + if (ctx->shape != BIN_ONLY_SHAPE) { + if (ctx->shape == RECT_SHAPE) { + check_marker(gb, "before width"); + width = get_bits(gb, 13); + check_marker(gb, "before height"); + height = get_bits(gb, 13); + check_marker(gb, "after height"); + if (width && height && /* they should be non zero but who knows */ + !(s->width && s->codec_tag == AV_RL32("MP4S"))) { + if (s->width && s->height && + (s->width != width || s->height != height)) + s->context_reinit = 1; + s->width = width; + s->height = height; + } + } + + s->progressive_sequence = + s->progressive_frame = get_bits1(gb) ^ 1; + s->interlaced_dct = 0; + if (!get_bits1(gb)) /* OBMC Disable */ + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "MPEG-4 OBMC not supported (very likely buggy encoder)\n"); + if (vo_ver_id == 1) + ctx->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */ + else + ctx->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */ + + if (ctx->vol_sprite_usage == STATIC_SPRITE) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Static Sprites not supported\n"); + if (ctx->vol_sprite_usage == STATIC_SPRITE || + ctx->vol_sprite_usage == GMC_SPRITE) { + if (ctx->vol_sprite_usage == STATIC_SPRITE) { + skip_bits(gb, 13); // sprite_width + check_marker(gb, "after sprite_width"); + skip_bits(gb, 13); // sprite_height + check_marker(gb, "after sprite_height"); + skip_bits(gb, 13); // sprite_left + check_marker(gb, "after sprite_left"); + skip_bits(gb, 13); // sprite_top + check_marker(gb, "after sprite_top"); + } + ctx->num_sprite_warping_points = get_bits(gb, 6); + if (ctx->num_sprite_warping_points > 3) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "%d sprite_warping_points\n", + ctx->num_sprite_warping_points); + ctx->num_sprite_warping_points = 0; + return -1; + } + s->sprite_warping_accuracy = get_bits(gb, 2); + ctx->sprite_brightness_change = get_bits1(gb); + if (ctx->vol_sprite_usage == STATIC_SPRITE) + skip_bits1(gb); // low_latency_sprite + } + // FIXME sadct disable bit if verid!=1 && shape not rect + + if (get_bits1(gb) == 1) { /* not_8_bit */ + s->quant_precision = get_bits(gb, 4); /* quant_precision */ + if (get_bits(gb, 4) != 8) /* bits_per_pixel */ + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "N-bit not supported\n"); + if (s->quant_precision != 5) + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "quant precision %d\n", s->quant_precision); + if (s->quant_precision<3 || s->quant_precision>9) { + s->quant_precision = 5; + } + } else { + s->quant_precision = 5; + } + + // FIXME a bunch of grayscale shape things + + if ((s->mpeg_quant = get_bits1(gb))) { /* vol_quant_type */ + int i, v; + + //mpeg4_load_default_matrices(s); + + /* load custom intra matrix */ + if (get_bits1(gb)) { + int last = 0; + for (i = 0; i < 64; i++) { + //int j; + if (get_bits_left(gb) < 8) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "insufficient data for custom matrix\n"); + return -1; + } + v = get_bits(gb, 8); + if (v == 0) + break; + + last = v; + //j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; + //s->intra_matrix[j] = last; + //s->chroma_intra_matrix[j] = last; + } + + /* replicate last value */ + //for (; i < 64; i++) { + //int j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; + //s->intra_matrix[j] = last; + //s->chroma_intra_matrix[j] = last; + //} + } + + /* load custom non intra matrix */ + if (get_bits1(gb)) { + int last = 0; + for (i = 0; i < 64; i++) { + //int j; + if (get_bits_left(gb) < 8) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "insufficient data for custom matrix\n"); + return -1; + } + v = get_bits(gb, 8); + if (v == 0) + break; + + last = v; + //j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; + //s->inter_matrix[j] = v; + //s->chroma_inter_matrix[j] = v; + } + + /* replicate last value */ + //for (; i < 64; i++) { + //int j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; + //s->inter_matrix[j] = last; + //s->chroma_inter_matrix[j] = last; + //} + } + + // FIXME a bunch of grayscale shape things + } + + if (vo_ver_id != 1) + s->quarter_sample = get_bits1(gb); + else + s->quarter_sample = 0; + + if (get_bits_left(gb) < 4) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "VOL Header truncated\n"); + return -1; + } + + if (!get_bits1(gb)) { + int pos = get_bits_count(gb); + int estimation_method = get_bits(gb, 2); + if (estimation_method < 2) { + if (!get_bits1(gb)) { + ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* opaque */ + ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* transparent */ + ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_cae */ + ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* inter_cae */ + ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* no_update */ + ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* upsampling */ + } + if (!get_bits1(gb)) { + ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_blocks */ + ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter_blocks */ + ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter4v_blocks */ + ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* not coded blocks */ + } + if (!check_marker(gb, "in complexity estimation part 1")) { + skip_bits_long(gb, pos - get_bits_count(gb)); + goto no_cplx_est; + } + if (!get_bits1(gb)) { + ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_coeffs */ + ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_lines */ + ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* vlc_syms */ + ctx->cplx_estimation_trash_i += 4 * get_bits1(gb); /* vlc_bits */ + } + if (!get_bits1(gb)) { + ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* apm */ + ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* npm */ + ctx->cplx_estimation_trash_b += 8 * get_bits1(gb); /* interpolate_mc_q */ + ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* forwback_mc_q */ + ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel2 */ + ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel4 */ + } + if (!check_marker(gb, "in complexity estimation part 2")) { + skip_bits_long(gb, pos - get_bits_count(gb)); + goto no_cplx_est; + } + if (estimation_method == 1) { + ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* sadct */ + ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* qpel */ + } + } else + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid Complexity estimation method %d\n", + estimation_method); + } else { + +no_cplx_est: + ctx->cplx_estimation_trash_i = + ctx->cplx_estimation_trash_p = + ctx->cplx_estimation_trash_b = 0; + } + + ctx->resync_marker = !get_bits1(gb); /* resync_marker_disabled */ + + s->data_partitioning = get_bits1(gb); + if (s->data_partitioning) + ctx->rvlc = get_bits1(gb); + + if (vo_ver_id != 1) { + ctx->new_pred = get_bits1(gb); + if (ctx->new_pred) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "new pred not supported\n"); + skip_bits(gb, 2); /* requested upstream message type */ + skip_bits1(gb); /* newpred segment type */ + } + if (get_bits1(gb)) // reduced_res_vop + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "reduced resolution VOP not supported\n"); + } else { + ctx->new_pred = 0; + } + + ctx->scalability = get_bits1(gb); + + if (ctx->scalability) { + struct get_bits_context bak = *gb; + int h_sampling_factor_n; + int h_sampling_factor_m; + int v_sampling_factor_n; + int v_sampling_factor_m; + + skip_bits1(gb); // hierarchy_type + skip_bits(gb, 4); /* ref_layer_id */ + skip_bits1(gb); /* ref_layer_sampling_dir */ + h_sampling_factor_n = get_bits(gb, 5); + h_sampling_factor_m = get_bits(gb, 5); + v_sampling_factor_n = get_bits(gb, 5); + v_sampling_factor_m = get_bits(gb, 5); + ctx->enhancement_type = get_bits1(gb); + + if (h_sampling_factor_n == 0 || h_sampling_factor_m == 0 || + v_sampling_factor_n == 0 || v_sampling_factor_m == 0) { + /* illegal scalability header (VERY broken encoder), + * trying to workaround */ + ctx->scalability = 0; + *gb = bak; + } else + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "scalability not supported\n"); + + // bin shape stuff FIXME + } + } + + if (1) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "tb %d/%d, tincrbits:%d, qp_prec:%d, ps:%d, low_delay:%d %s%s%s%s\n", + ctx->framerate.den, ctx->framerate.num, + ctx->time_increment_bits, + s->quant_precision, + s->progressive_sequence, + s->low_delay, + ctx->scalability ? "scalability " :"" , s->quarter_sample ? "qpel " : "", + s->data_partitioning ? "partition " : "", ctx->rvlc ? "rvlc " : ""); + } + + return 0; +} + + +/** + * Decode the user data stuff in the header. + * Also initializes divx/xvid/lavc_version/build. + */ +static int decode_user_data(struct mpeg4_dec_param *ctx, struct get_bits_context *gb) +{ + struct MpegEncContext *s = &ctx->m; + char buf[256]; + int i; + int e; + int ver = 0, build = 0, ver2 = 0, ver3 = 0; + char last; + + for (i = 0; i < 255 && get_bits_count(gb) < gb->size_in_bits; i++) { + if (show_bits(gb, 23) == 0) + break; + buf[i] = get_bits(gb, 8); + } + buf[i] = 0; + + /* divx detection */ + e = sscanf(buf, "DivX%dBuild%d%c", &ver, &build, &last); + if (e < 2) + e = sscanf(buf, "DivX%db%d%c", &ver, &build, &last); + if (e >= 2) { + ctx->divx_version = ver; + ctx->divx_build = build; + s->divx_packed = e == 3 && last == 'p'; + } + + /* libavcodec detection */ + e = sscanf(buf, "FFmpe%*[^b]b%d", &build) + 3; + if (e != 4) + e = sscanf(buf, "FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build); + if (e != 4) { + e = sscanf(buf, "Lavc%d.%d.%d", &ver, &ver2, &ver3) + 1; + if (e > 1) { + if (ver > 0xFFU || ver2 > 0xFFU || ver3 > 0xFFU) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Unknown Lavc version string encountered, %d.%d.%d; " + "clamping sub-version values to 8-bits.\n", + ver, ver2, ver3); + } + build = ((ver & 0xFF) << 16) + ((ver2 & 0xFF) << 8) + (ver3 & 0xFF); + } + } + if (e != 4) { + if (strcmp(buf, "ffmpeg") == 0) + ctx->lavc_build = 4600; + } + if (e == 4) + ctx->lavc_build = build; + + /* Xvid detection */ + e = sscanf(buf, "XviD%d", &build); + if (e == 1) + ctx->xvid_build = build; + + return 0; +} + + +static int mpeg4_decode_gop_header(struct MpegEncContext *s, struct get_bits_context *gb) +{ + int hours, minutes, seconds; + + if (!show_bits(gb, 23)) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "GOP header invalid\n"); + return -1; + } + + hours = get_bits(gb, 5); + minutes = get_bits(gb, 6); + check_marker(gb, "in gop_header"); + seconds = get_bits(gb, 6); + + s->time_base = seconds + 60*(minutes + 60*hours); + + skip_bits1(gb); + skip_bits1(gb); + + return 0; +} + + +static int mpeg4_decode_profile_level(struct MpegEncContext *s, struct get_bits_context *gb, int *profile, int *level) +{ + + *profile = get_bits(gb, 4); + *level = get_bits(gb, 4); + + // for Simple profile, level 0 + if (*profile == 0 && *level == 8) { + *level = 0; + } + + return 0; +} + + +static int decode_studiovisualobject(struct mpeg4_dec_param *ctx, struct get_bits_context *gb) +{ + struct MpegEncContext *s = &ctx->m; + int visual_object_type; + + skip_bits(gb, 4); /* visual_object_verid */ + visual_object_type = get_bits(gb, 4); + if (visual_object_type != VOT_VIDEO_ID) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "VO type %u", visual_object_type); + return -1; + } + + next_start_code_studio(gb); + extension_and_user_data(s, gb, 1); + + return 0; +} + + +static int mpeg4_decode_visual_object(struct MpegEncContext *s, struct get_bits_context *gb) +{ + int visual_object_type; + int is_visual_object_identifier = get_bits1(gb); + + if (is_visual_object_identifier) { + skip_bits(gb, 4+3); + } + visual_object_type = get_bits(gb, 4); + + if (visual_object_type == VOT_VIDEO_ID || + visual_object_type == VOT_STILL_TEXTURE_ID) { + int video_signal_type = get_bits1(gb); + if (video_signal_type) { + int video_range, color_description; + skip_bits(gb, 3); // video_format + video_range = get_bits1(gb); + color_description = get_bits1(gb); + + s->ctx->color_range = video_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; + + if (color_description) { + s->ctx->color_primaries = get_bits(gb, 8); + s->ctx->color_trc = get_bits(gb, 8); + s->ctx->colorspace = get_bits(gb, 8); + } + } + } + + return 0; +} + +static void decode_smpte_tc(struct mpeg4_dec_param *ctx, struct get_bits_context *gb) +{ + skip_bits(gb, 16); /* Time_code[63..48] */ + check_marker(gb, "after Time_code[63..48]"); + skip_bits(gb, 16); /* Time_code[47..32] */ + check_marker(gb, "after Time_code[47..32]"); + skip_bits(gb, 16); /* Time_code[31..16] */ + check_marker(gb, "after Time_code[31..16]"); + skip_bits(gb, 16); /* Time_code[15..0] */ + check_marker(gb, "after Time_code[15..0]"); + skip_bits(gb, 4); /* reserved_bits */ +} + +static void reset_studio_dc_predictors(struct MpegEncContext *s) +{ + /* Reset DC Predictors */ + s->last_dc[0] = + s->last_dc[1] = + s->last_dc[2] = 1 << (s->ctx->bits_per_raw_sample + s->dct_precision + s->intra_dc_precision - 1); +} + +/** + * Decode the next studio vop header. + * @return <0 if something went wrong + */ +static int decode_studio_vop_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb) +{ + struct MpegEncContext *s = &ctx->m; + + if (get_bits_left(gb) <= 32) + return 0; + + //s->decode_mb = mpeg4_decode_studio_mb; + + decode_smpte_tc(ctx, gb); + + skip_bits(gb, 10); /* temporal_reference */ + skip_bits(gb, 2); /* vop_structure */ + s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* vop_coding_type */ + if (get_bits1(gb)) { /* vop_coded */ + skip_bits1(gb); /* top_field_first */ + skip_bits1(gb); /* repeat_first_field */ + s->progressive_frame = get_bits1(gb) ^ 1; /* progressive_frame */ + } + + if (s->pict_type == AV_PICTURE_TYPE_I) { + if (get_bits1(gb)) + reset_studio_dc_predictors(s); + } + + if (ctx->shape != BIN_ONLY_SHAPE) { + s->alternate_scan = get_bits1(gb); + s->frame_pred_frame_dct = get_bits1(gb); + s->dct_precision = get_bits(gb, 2); + s->intra_dc_precision = get_bits(gb, 2); + s->q_scale_type = get_bits1(gb); + } + + //if (s->alternate_scan) { } + + //mpeg4_load_default_matrices(s); + + next_start_code_studio(gb); + extension_and_user_data(s, gb, 4); + + return 0; +} + +static int decode_new_pred(struct mpeg4_dec_param *ctx, struct get_bits_context *gb) +{ + int len = FFMIN(ctx->time_increment_bits + 3, 15); + + get_bits(gb, len); + if (get_bits1(gb)) + get_bits(gb, len); + check_marker(gb, "after new_pred"); + + return 0; +} + +static int decode_vop_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb) +{ + struct MpegEncContext *s = &ctx->m; + int time_incr, time_increment; + int64_t pts; + + s->mcsel = 0; + s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */ + if (s->pict_type == AV_PICTURE_TYPE_B && s->low_delay && + ctx->vol_control_parameters == 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "low_delay flag set incorrectly, clearing it\n"); + s->low_delay = 0; + } + + s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B; + /*if (s->partitioned_frame) + s->decode_mb = mpeg4_decode_partitioned_mb; + else + s->decode_mb = mpeg4_decode_mb;*/ + + time_incr = 0; + while (get_bits1(gb) != 0) + time_incr++; + + check_marker(gb, "before time_increment"); + + if (ctx->time_increment_bits == 0 || + !(show_bits(gb, ctx->time_increment_bits + 1) & 1)) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "time_increment_bits %d is invalid in relation to the current bitstream, this is likely caused by a missing VOL header\n", ctx->time_increment_bits); + + for (ctx->time_increment_bits = 1; + ctx->time_increment_bits < 16; + ctx->time_increment_bits++) { + if (s->pict_type == AV_PICTURE_TYPE_P || + (s->pict_type == AV_PICTURE_TYPE_S && + ctx->vol_sprite_usage == GMC_SPRITE)) { + if ((show_bits(gb, ctx->time_increment_bits + 6) & 0x37) == 0x30) + break; + } else if ((show_bits(gb, ctx->time_increment_bits + 5) & 0x1F) == 0x18) + break; + } + + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "time_increment_bits set to %d bits, based on bitstream analysis\n", ctx->time_increment_bits); + if (ctx->framerate.num && 4*ctx->framerate.num < 1<<ctx->time_increment_bits) { + ctx->framerate.num = 1<<ctx->time_increment_bits; + //ctx->time_base = av_inv_q(av_mul_q(ctx->framerate, (AVRational){ctx->ticks_per_frame, 1})); + } + } + + if (IS_3IV1) + time_increment = get_bits1(gb); // FIXME investigate further + else + time_increment = get_bits(gb, ctx->time_increment_bits); + + if (s->pict_type != AV_PICTURE_TYPE_B) { + s->last_time_base = s->time_base; + s->time_base += time_incr; + s->time = s->time_base * (int64_t)ctx->framerate.num + time_increment; + //if (s->workaround_bugs & FF_BUG_UMP4) { } + s->pp_time = s->time - s->last_non_b_time; + s->last_non_b_time = s->time; + } else { + s->time = (s->last_time_base + time_incr) * (int64_t)ctx->framerate.num + time_increment; + s->pb_time = s->pp_time - (s->last_non_b_time - s->time); + if (s->pp_time <= s->pb_time || + s->pp_time <= s->pp_time - s->pb_time || + s->pp_time <= 0) { + /* messed up order, maybe after seeking? skipping current B-frame */ + return FRAME_SKIPPED; + } + //ff_mpeg4_init_direct_mv(s); + + if (ctx->t_frame == 0) + ctx->t_frame = s->pb_time; + if (ctx->t_frame == 0) + ctx->t_frame = 1; // 1/0 protection + s->pp_field_time = (ROUNDED_DIV(s->last_non_b_time, ctx->t_frame) - + ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2; + s->pb_field_time = (ROUNDED_DIV(s->time, ctx->t_frame) - + ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2; + if (s->pp_field_time <= s->pb_field_time || s->pb_field_time <= 1) { + s->pb_field_time = 2; + s->pp_field_time = 4; + if (!s->progressive_sequence) + return FRAME_SKIPPED; + } + } + + if (ctx->framerate.den) + pts = ROUNDED_DIV(s->time, ctx->framerate.den); + else + pts = AV_NOPTS_VALUE; + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "MPEG4 PTS: %lld\n", pts); + + check_marker(gb, "before vop_coded"); + + /* vop coded */ + if (get_bits1(gb) != 1) { + if (1) + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vop not coded\n"); + return FRAME_SKIPPED; + } + if (ctx->new_pred) + decode_new_pred(ctx, gb); + + if (ctx->shape != BIN_ONLY_SHAPE && + (s->pict_type == AV_PICTURE_TYPE_P || + (s->pict_type == AV_PICTURE_TYPE_S && + ctx->vol_sprite_usage == GMC_SPRITE))) { + /* rounding type for motion estimation */ + s->no_rounding = get_bits1(gb); + } else { + s->no_rounding = 0; + } + // FIXME reduced res stuff + + if (ctx->shape != RECT_SHAPE) { + if (ctx->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) { + skip_bits(gb, 13); /* width */ + check_marker(gb, "after width"); + skip_bits(gb, 13); /* height */ + check_marker(gb, "after height"); + skip_bits(gb, 13); /* hor_spat_ref */ + check_marker(gb, "after hor_spat_ref"); + skip_bits(gb, 13); /* ver_spat_ref */ + } + skip_bits1(gb); /* change_CR_disable */ + + if (get_bits1(gb) != 0) + skip_bits(gb, 8); /* constant_alpha_value */ + } + + // FIXME complexity estimation stuff + + if (ctx->shape != BIN_ONLY_SHAPE) { + skip_bits_long(gb, ctx->cplx_estimation_trash_i); + if (s->pict_type != AV_PICTURE_TYPE_I) + skip_bits_long(gb, ctx->cplx_estimation_trash_p); + if (s->pict_type == AV_PICTURE_TYPE_B) + skip_bits_long(gb, ctx->cplx_estimation_trash_b); + + if (get_bits_left(gb) < 3) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Header truncated\n"); + return -1; + } + ctx->intra_dc_threshold = ff_mpeg4_dc_threshold[get_bits(gb, 3)]; + if (!s->progressive_sequence) { + s->top_field_first = get_bits1(gb); + s->alternate_scan = get_bits1(gb); + } else + s->alternate_scan = 0; + } + + /*if (s->alternate_scan) { } */ + + if (s->pict_type == AV_PICTURE_TYPE_S) { + if((ctx->vol_sprite_usage == STATIC_SPRITE || + ctx->vol_sprite_usage == GMC_SPRITE)) { + //if (mpeg4_decode_sprite_trajectory(ctx, gb) < 0) + //return -1; + if (ctx->sprite_brightness_change) + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "sprite_brightness_change not supported\n"); + if (ctx->vol_sprite_usage == STATIC_SPRITE) + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "static sprite not supported\n"); + } else { + memset(s->sprite_offset, 0, sizeof(s->sprite_offset)); + memset(s->sprite_delta, 0, sizeof(s->sprite_delta)); + } + } + + if (ctx->shape != BIN_ONLY_SHAPE) { + s->chroma_qscale = s->qscale = get_bits(gb, s->quant_precision); + if (s->qscale == 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Error, header damaged or not MPEG-4 header (qscale=0)\n"); + return -1; // makes no sense to continue, as there is nothing left from the image then + } + + if (s->pict_type != AV_PICTURE_TYPE_I) { + s->f_code = get_bits(gb, 3); /* fcode_for */ + if (s->f_code == 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Error, header damaged or not MPEG-4 header (f_code=0)\n"); + s->f_code = 1; + return -1; // makes no sense to continue, as there is nothing left from the image then + } + } else + s->f_code = 1; + + if (s->pict_type == AV_PICTURE_TYPE_B) { + s->b_code = get_bits(gb, 3); + if (s->b_code == 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Error, header damaged or not MPEG4 header (b_code=0)\n"); + s->b_code=1; + return -1; // makes no sense to continue, as the MV decoding will break very quickly + } + } else + s->b_code = 1; + + if (1) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d time:%ld tincr:%d\n", + s->qscale, s->f_code, s->b_code, + s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")), + gb->size_in_bits,s->progressive_sequence, s->alternate_scan, + s->top_field_first, s->quarter_sample ? "q" : "h", + s->data_partitioning, ctx->resync_marker, + ctx->num_sprite_warping_points, s->sprite_warping_accuracy, + 1 - s->no_rounding, s->vo_type, + ctx->vol_control_parameters ? " VOLC" : " ", ctx->intra_dc_threshold, + ctx->cplx_estimation_trash_i, ctx->cplx_estimation_trash_p, + ctx->cplx_estimation_trash_b, + s->time, + time_increment); + } + + if (!ctx->scalability) { + if (ctx->shape != RECT_SHAPE && s->pict_type != AV_PICTURE_TYPE_I) + skip_bits1(gb); // vop shape coding type + } else { + if (ctx->enhancement_type) { + int load_backward_shape = get_bits1(gb); + if (load_backward_shape) + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "load backward shape isn't supported\n"); + } + skip_bits(gb, 2); // ref_select_code + } + } + /* detect buggy encoders which don't set the low_delay flag + * (divx4/xvid/opendivx). Note we cannot detect divx5 without B-frames + * easily (although it's buggy too) */ + if (s->vo_type == 0 && ctx->vol_control_parameters == 0 && + ctx->divx_version == -1 && s->picture_number == 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n"); + s->low_delay = 1; + } + + s->picture_number++; // better than pic number==0 always ;) + + // FIXME add short header support + //s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table; + //s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table; + + return 0; +} + +/** + * Decode MPEG-4 headers. + * @return <0 if no VOP found (or a damaged one) + * FRAME_SKIPPED if a not coded VOP is found + * 0 if a VOP is found + */ +int ff_mpeg4_decode_picture_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb) +{ + struct MpegEncContext *s = &ctx->m; + + unsigned startcode, v; + int ret; + int vol = 0; + int bits_per_raw_sample = 0; + + s->ctx = ctx; + + /* search next start code */ + align_get_bits(gb); + + // If we have not switched to studio profile than we also did not switch bps + // that means something else (like a previous instance) outside set bps which + // would be inconsistant with the currect state, thus reset it + if (!s->studio_profile && bits_per_raw_sample != 8) + bits_per_raw_sample = 0; + + if (show_bits(gb, 24) == 0x575630) { + skip_bits(gb, 24); + if (get_bits(gb, 8) == 0xF0) + goto end; + } + + startcode = 0xff; + for (;;) { + if (get_bits_count(gb) >= gb->size_in_bits) { + if (gb->size_in_bits == 8) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "frame skip %d\n", gb->size_in_bits); + return FRAME_SKIPPED; // divx bug + } else + return -1; // end of stream + } + + /* use the bits after the test */ + v = get_bits(gb, 8); + startcode = ((startcode << 8) | v) & 0xffffffff; + + if ((startcode & 0xFFFFFF00) != 0x100) + continue; // no startcode + + if (1) { //debug + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "startcode: %3X \n", startcode); + if (startcode <= 0x11F) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Video Object Start\n"); + else if (startcode <= 0x12F) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Video Object Layer Start\n"); + else if (startcode <= 0x13F) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Reserved\n"); + else if (startcode <= 0x15F) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "FGS bp start\n"); + else if (startcode <= 0x1AF) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Reserved\n"); + else if (startcode == 0x1B0) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Visual Object Seq Start\n"); + else if (startcode == 0x1B1) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Visual Object Seq End\n"); + else if (startcode == 0x1B2) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "User Data\n"); + else if (startcode == 0x1B3) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Group of VOP start\n"); + else if (startcode == 0x1B4) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Video Session Error\n"); + else if (startcode == 0x1B5) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Visual Object Start\n"); + else if (startcode == 0x1B6) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Video Object Plane start\n"); + else if (startcode == 0x1B7) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "slice start\n"); + else if (startcode == 0x1B8) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "extension start\n"); + else if (startcode == 0x1B9) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "fgs start\n"); + else if (startcode == 0x1BA) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "FBA Object start\n"); + else if (startcode == 0x1BB) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "FBA Object Plane start\n"); + else if (startcode == 0x1BC) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Mesh Object start\n"); + else if (startcode == 0x1BD) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Mesh Object Plane start\n"); + else if (startcode == 0x1BE) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Still Texture Object start\n"); + else if (startcode == 0x1BF) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Texture Spatial Layer start\n"); + else if (startcode == 0x1C0) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Texture SNR Layer start\n"); + else if (startcode == 0x1C1) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Texture Tile start\n"); + else if (startcode == 0x1C2) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Texture Shape Layer start\n"); + else if (startcode == 0x1C3) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "stuffing start\n"); + else if (startcode <= 0x1C5) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "reserved\n"); + else if (startcode <= 0x1FF) + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "System start\n"); + } + + if (startcode >= 0x120 && startcode <= 0x12F) { + if (vol) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Ignoring multiple VOL headers\n"); + continue; + } + vol++; + if ((ret = decode_vol_header(ctx, gb)) < 0) + return ret; + } else if (startcode == USER_DATA_STARTCODE) { + decode_user_data(ctx, gb); + } else if (startcode == GOP_STARTCODE) { + mpeg4_decode_gop_header(s, gb); + } else if (startcode == VOS_STARTCODE) { + int profile, level; + mpeg4_decode_profile_level(s, gb, &profile, &level); + if (profile == FF_PROFILE_MPEG4_SIMPLE_STUDIO && + (level > 0 && level < 9)) { + s->studio_profile = 1; + next_start_code_studio(gb); + extension_and_user_data(s, gb, 0); + } else if (s->studio_profile) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Mixes studio and non studio profile\n"); + return -1; + } + ctx->profile = profile; + ctx->level = level; + } else if (startcode == VISUAL_OBJ_STARTCODE) { + if (s->studio_profile) { + if ((ret = decode_studiovisualobject(ctx, gb)) < 0) + return ret; + } else + mpeg4_decode_visual_object(s, gb); + } else if (startcode == VOP_STARTCODE) { + break; + } + + align_get_bits(gb); + startcode = 0xff; + } + +end: + if (s->studio_profile) { + if (!bits_per_raw_sample) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Missing VOL header\n"); + return -1; + } + return decode_studio_vop_header(ctx, gb); + } else + return decode_vop_header(ctx, gb); +} + +int mpeg4_decode_extradata_ps(u8 *buf, int size, struct mpeg4_param_sets *ps) +{ + int ret = 0; + struct get_bits_context gb; + + ps->head_parsed = false; + + init_get_bits8(&gb, buf, size); + + ret = ff_mpeg4_decode_picture_header(&ps->dec_ps, &gb); + if (ret < -1) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Failed to parse extradata\n"); + return ret; + } + + if (ps->dec_ps.m.width && ps->dec_ps.m.height) + ps->head_parsed = true; + + return 0; +} +
diff --git a/drivers/amvdec_ports/decoder/aml_mpeg4_parser.h b/drivers/amvdec_ports/decoder/aml_mpeg4_parser.h new file mode 100644 index 0000000..f9b71cf --- /dev/null +++ b/drivers/amvdec_ports/decoder/aml_mpeg4_parser.h
@@ -0,0 +1,259 @@ +#ifndef AVCODEC_MPEG4VIDEO_H +#define AVCODEC_MPEG4VIDEO_H + +#include "../aml_vcodec_drv.h" +#include "../utils/common.h" +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +#include "../utils/pixfmt.h" +#endif + +//mpeg4 profile +#define FF_PROFILE_MPEG4_SIMPLE 0 +#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1 +#define FF_PROFILE_MPEG4_CORE 2 +#define FF_PROFILE_MPEG4_MAIN 3 +#define FF_PROFILE_MPEG4_N_BIT 4 +#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5 +#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6 +#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7 +#define FF_PROFILE_MPEG4_HYBRID 8 +#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9 +#define FF_PROFILE_MPEG4_CORE_SCALABLE 10 +#define FF_PROFILE_MPEG4_ADVANCED_CODING 11 +#define FF_PROFILE_MPEG4_ADVANCED_CORE 12 +#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13 +#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14 +#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15 + +// shapes +#define RECT_SHAPE 0 +#define BIN_SHAPE 1 +#define BIN_ONLY_SHAPE 2 +#define GRAY_SHAPE 3 + +#define SIMPLE_VO_TYPE 1 +#define CORE_VO_TYPE 3 +#define MAIN_VO_TYPE 4 +#define NBIT_VO_TYPE 5 +#define ARTS_VO_TYPE 10 +#define ACE_VO_TYPE 12 +#define SIMPLE_STUDIO_VO_TYPE 14 +#define CORE_STUDIO_VO_TYPE 15 +#define ADV_SIMPLE_VO_TYPE 17 + +#define VOT_VIDEO_ID 1 +#define VOT_STILL_TEXTURE_ID 2 + +#define FF_PROFILE_UNKNOWN -99 +#define FF_PROFILE_RESERVED -100 + +// aspect_ratio_info +#define EXTENDED_PAR 15 + +//vol_sprite_usage / sprite_enable +#define STATIC_SPRITE 1 +#define GMC_SPRITE 2 + +#define MOTION_MARKER 0x1F001 +#define DC_MARKER 0x6B001 + +#define VOS_STARTCODE 0x1B0 +#define USER_DATA_STARTCODE 0x1B2 +#define GOP_STARTCODE 0x1B3 +#define VISUAL_OBJ_STARTCODE 0x1B5 +#define VOP_STARTCODE 0x1B6 +#define SLICE_STARTCODE 0x1B7 +#define EXT_STARTCODE 0x1B8 + +#define QUANT_MATRIX_EXT_ID 0x3 + +/* smaller packets likely don't contain a real frame */ +#define MAX_NVOP_SIZE 19 + +#define IS_3IV1 0 + +#define CHROMA_420 1 +#define CHROMA_422 2 +#define CHROMA_444 3 + +#define FF_ASPECT_EXTENDED 15 + +#define AV_NOPTS_VALUE (LONG_MIN) + +/** + * Return value for header parsers if frame is not coded. + * */ +#define FRAME_SKIPPED 100 + +enum AVPictureType { + AV_PICTURE_TYPE_NONE = 0, ///< Undefined + AV_PICTURE_TYPE_I, ///< Intra + AV_PICTURE_TYPE_P, ///< Predicted + AV_PICTURE_TYPE_B, ///< Bi-dir predicted + AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG-4 + AV_PICTURE_TYPE_SI, ///< Switching Intra + AV_PICTURE_TYPE_SP, ///< Switching Predicted + AV_PICTURE_TYPE_BI, ///< BI type +}; + +struct VLC { + int bits; + short (*table)[2]; ///< code, bits + int table_size, table_allocated; +}; + +/** + * MpegEncContext. + */ +struct MpegEncContext { + struct mpeg4_dec_param *ctx; + + /* the following parameters must be initialized before encoding */ + int width, height;///< picture size. must be a multiple of 16 + int codec_tag; ///< internal codec_tag upper case converted from avctx codec_tag + int picture_number; //FIXME remove, unclear definition + + /** matrix transmitted in the bitstream */ + u16 intra_matrix[64]; + u16 chroma_intra_matrix[64]; + u16 inter_matrix[64]; + u16 chroma_inter_matrix[64]; + + /* MPEG-4 specific */ + int studio_profile; + int time_base; ///< time in seconds of last I,P,S Frame + int quant_precision; + int quarter_sample; ///< 1->qpel, 0->half pel ME/MC + int aspect_ratio_info; //FIXME remove + int sprite_warping_accuracy; + int data_partitioning; ///< data partitioning flag from header + int low_delay; ///< no reordering needed / has no B-frames + int vo_type; + int mpeg_quant; + + /* divx specific, used to workaround (many) bugs in divx5 */ + int divx_packed; + + /* MPEG-2-specific - I wished not to have to support this mess. */ + int progressive_sequence; + + int progressive_frame; + int interlaced_dct; + + int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replication) + const u8 *y_dc_scale_table; ///< qscale -> y_dc_scale table + const u8 *c_dc_scale_table; ///< qscale -> c_dc_scale table + int qscale; ///< QP + int chroma_qscale; ///< chroma QP + int pict_type; ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ... + int f_code; ///< forward MV resolution + int b_code; ///< backward MV resolution for B-frames (MPEG-4) + int no_rounding; /**< apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) + for B-frames rounding mode is always 0 */ + int last_time_base; + long time; ///< time of current frame + long last_non_b_time; + u16 pp_time; ///< time distance between the last 2 p,s,i frames + u16 pb_time; ///< time distance between the last b and p,s,i frame + u16 pp_field_time; + u16 pb_field_time; ///< like above, just for interlaced + int real_sprite_warping_points; + int sprite_offset[2][2]; ///< sprite offset[isChroma][isMVY] + int sprite_delta[2][2]; ///< sprite_delta [isY][isMVY] + int mcsel; + int partitioned_frame; ///< is current frame partitioned + int top_field_first; + int alternate_scan; + int last_dc[3]; ///< last DC values for MPEG-1 + int dct_precision; + int intra_dc_precision; + int frame_pred_frame_dct; + int q_scale_type; + int context_reinit; + int chroma_format; +}; + +struct mpeg4_dec_param { + struct MpegEncContext m; + + /// number of bits to represent the fractional part of time + int time_increment_bits; + int shape; + int vol_sprite_usage; + int sprite_brightness_change; + int num_sprite_warping_points; + /// sprite trajectory points + u16 sprite_traj[4][2]; + /// sprite shift [isChroma] + int sprite_shift[2]; + + // reversible vlc + int rvlc; + /// could this stream contain resync markers + int resync_marker; + /// time distance of first I -> B, used for interlaced B-frames + int t_frame; + + int new_pred; + int enhancement_type; + int scalability; + int use_intra_dc_vlc; + + /// QP above which the ac VLC should be used for intra dc + int intra_dc_threshold; + + /* bug workarounds */ + int divx_version; + int divx_build; + int xvid_build; + int lavc_build; + + /// flag for having shown the warning about invalid Divx B-frames + int showed_packed_warning; + /** does the stream contain the low_delay flag, + * used to work around buggy encoders. */ + int vol_control_parameters; + int cplx_estimation_trash_i; + int cplx_estimation_trash_p; + int cplx_estimation_trash_b; + + struct VLC studio_intra_tab[12]; + struct VLC studio_luma_dc; + struct VLC studio_chroma_dc; + + int rgb; + + struct AVRational time_base; + int ticks_per_frame; + struct AVRational sample_aspect_ratio; + enum AVColorPrimaries color_primaries; + enum AVColorTransferCharacteristic color_trc; + enum AVColorSpace colorspace; +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER + enum AVPixelFormat pix_fmt; + enum AVColorRange color_range; + enum AVChromaLocation chroma_sample_location; +#endif + int err_recognition; + int idct_algo; + int bits_per_raw_sample; + int profile; + int level; + struct AVRational framerate; + int flags; +}; + +struct mpeg4_param_sets { + bool head_parsed; + /* currently active parameter sets */ + struct mpeg4_dec_param dec_ps; +}; + +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +int mpeg4_decode_extradata_ps(u8 *buf, int size, struct mpeg4_param_sets *ps); +#else +inline int mpeg4_decode_extradata_ps(u8 *buf, int size, struct mpeg4_param_sets *ps) { return -1; } +#endif + +#endif +
diff --git a/drivers/amvdec_ports/decoder/aml_vp9_parser.c b/drivers/amvdec_ports/decoder/aml_vp9_parser.c new file mode 100644 index 0000000..21d5283 --- /dev/null +++ b/drivers/amvdec_ports/decoder/aml_vp9_parser.c
@@ -0,0 +1,299 @@ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/string.h> + +#include "aml_vp9_parser.h" +#include "../utils/get_bits.h" +#include "../utils/put_bits.h" +#include "../utils/golomb.h" +#include "../utils/common.h" +#include "utils.h" + +#define VP9_SYNCCODE 0x498342 + +static int read_colorspace_details(struct VP9Context *s, int profile) +{ + static const enum AVColorSpace colorspaces[8] = { + AVCOL_SPC_UNSPECIFIED, AVCOL_SPC_BT470BG, AVCOL_SPC_BT709, AVCOL_SPC_SMPTE170M, + AVCOL_SPC_SMPTE240M, AVCOL_SPC_BT2020_NCL, AVCOL_SPC_RESERVED, AVCOL_SPC_RGB, + }; + + enum AVColorSpace colorspace; + int color_range; + int bits = profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12 + + s->bpp_index = bits; + s->s.h.bpp = 8 + bits * 2; + s->bytesperpixel = (7 + s->s.h.bpp) >> 3; + colorspace = colorspaces[get_bits(&s->gb, 3)]; + if (colorspace == AVCOL_SPC_RGB) { // RGB = profile 1 + if (profile & 1) { + if (get_bits1(&s->gb)) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Reserved bit set in RGB\n"); + return -1; + } + } else { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "RGB not supported in profile %d\n", profile); + return -1; + } + } else { + static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = { + { { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P }, + { AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV420P } }, + { { AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10 }, + { AV_PIX_FMT_YUV440P10, AV_PIX_FMT_YUV420P10 } }, + { { AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12 }, + { AV_PIX_FMT_YUV440P12, AV_PIX_FMT_YUV420P12 } }}; + color_range = get_bits1(&s->gb) ? 2 : 1; + if (profile & 1) { + s->ss_h = get_bits1(&s->gb); + s->ss_v = get_bits1(&s->gb); + s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h]; + if (s->pix_fmt == AV_PIX_FMT_YUV420P) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "YUV 4:2:0 not supported in profile %d\n", profile); + return -1; + } else if (get_bits1(&s->gb)) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Profile %d color details reserved bit set\n", profile); + return -1; + } + } else { + s->ss_h = s->ss_v = 1; + s->pix_fmt = pix_fmt_for_ss[bits][1][1]; + } + } + + return 0; +} + +int decode_frame_header(const u8 *data, int size, struct VP9Context *s, int *ref) +{ + int ret, last_invisible, profile; + + /* general header */ + if ((ret = init_get_bits8(&s->gb, data, size)) < 0) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Failed to initialize bitstream reader\n"); + return ret; + } + + if (get_bits(&s->gb, 2) != 0x2) { // frame marker + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid frame marker\n"); + return -1; + } + + profile = get_bits1(&s->gb); + profile |= get_bits1(&s->gb) << 1; + if (profile == 3) + profile += get_bits1(&s->gb); + + if (profile > 3) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Profile %d is not yet supported\n", profile); + return -1; + } + + s->s.h.profile = profile; + if (get_bits1(&s->gb)) { + *ref = get_bits(&s->gb, 3); + return 0; + } + + s->last_keyframe = s->s.h.keyframe; + s->s.h.keyframe = !get_bits1(&s->gb); + + last_invisible = s->s.h.invisible; + s->s.h.invisible = !get_bits1(&s->gb); + s->s.h.errorres = get_bits1(&s->gb); + s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible; + + if (s->s.h.keyframe) { + if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid sync code\n"); + return -1; + } + if ((ret = read_colorspace_details(s,profile)) < 0) + return ret; + // for profile 1, here follows the subsampling bits + s->s.h.refreshrefmask = 0xff; + s->width = get_bits(&s->gb, 16) + 1; + s->height = get_bits(&s->gb, 16) + 1; + if (get_bits1(&s->gb)) { // has scaling + s->render_width = get_bits(&s->gb, 16) + 1; + s->render_height = get_bits(&s->gb, 16) + 1; + } else { + s->render_width = s->width; + s->render_height = s->height; + } + /*pr_info("keyframe res: (%d x %d), render size: (%d x %d)\n", + s->width, s->height, s->render_width, s->render_height);*/ + } else { + s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0; + s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2); + if (s->s.h.intraonly) { + if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid sync code\n"); + return -1; + } + if (profile >= 1) { + if ((ret = read_colorspace_details(s, profile)) < 0) + return ret; + } else { + s->ss_h = s->ss_v = 1; + s->s.h.bpp = 8; + s->bpp_index = 0; + s->bytesperpixel = 1; + s->pix_fmt = AV_PIX_FMT_YUV420P; + } + s->s.h.refreshrefmask = get_bits(&s->gb, 8); + s->width = get_bits(&s->gb, 16) + 1; + s->height = get_bits(&s->gb, 16) + 1; + if (get_bits1(&s->gb)) { // has scaling + s->render_width = get_bits(&s->gb, 16) + 1; + s->render_height = get_bits(&s->gb, 16) + 1; + } else { + s->render_width = s->width; + s->render_height = s->height; + } + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "intra res: (%d x %d), render size: (%d x %d)\n", + s->width, s->height, s->render_width, s->render_height); + } else { + s->s.h.refreshrefmask = get_bits(&s->gb, 8); + s->s.h.refidx[0] = get_bits(&s->gb, 3); + s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres; + s->s.h.refidx[1] = get_bits(&s->gb, 3); + s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres; + s->s.h.refidx[2] = get_bits(&s->gb, 3); + s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres; + + /*refresh_frame_flags; + for (i = 0; i < REFS_PER_FRAME; ++i) { + frame_refs[i]; + ref_frame_sign_biases[i]; + } + frame_size_from_refs(); + high_precision_mv; + interp_filter();*/ + + return -1; + } + } + + return 0; +} + +int vp9_superframe_split_filter(struct vp9_superframe_split *s) +{ + int i, j, ret, marker; + bool is_superframe = false; + int *prefix = (int *)s->data; + + if (!s->data) + return -1; + + #define AML_PREFIX ('V' << 24 | 'L' << 16 | 'M' << 8 | 'A') + if (prefix[3] == AML_PREFIX) { + s->prefix_size = 16; + /*pr_info("the frame data has beed added header\n");*/ + } + + marker = s->data[s->data_size - 1]; + if ((marker & 0xe0) == 0xc0) { + int length_size = 1 + ((marker >> 3) & 0x3); + int nb_frames = 1 + (marker & 0x7); + int idx_size = 2 + nb_frames * length_size; + + if (s->data_size >= idx_size && + s->data[s->data_size - idx_size] == marker) { + s64 total_size = 0; + int idx = s->data_size + 1 - idx_size; + + for (i = 0; i < nb_frames; i++) { + int frame_size = 0; + for (j = 0; j < length_size; j++) + frame_size |= s->data[idx++] << (j * 8); + + total_size += frame_size; + if (frame_size < 0 || + total_size > s->data_size - idx_size) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid frame size in a sframe: %d\n", + frame_size); + ret = -EINVAL; + goto fail; + } + s->sizes[i] = frame_size; + } + + s->nb_frames = nb_frames; + s->size = total_size; + s->next_frame = 0; + s->next_frame_offset = 0; + is_superframe = true; + } + }else { + s->nb_frames = 1; + s->sizes[0] = s->data_size; + s->size = s->data_size; + } + + /*pr_info("sframe: %d, frames: %d, IN: %x, OUT: %x\n", + is_superframe, s->nb_frames, + s->data_size, s->size);*/ + + /* parse uncompressed header. */ + if (is_superframe) { + /* bitstream profile. */ + /* frame type. (intra or inter) */ + /* colorspace descriptor */ + /* ... */ + + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "the frame is a superframe.\n"); + } + + /*pr_err("in: %x, %d, out: %x, sizes %d,%d,%d,%d,%d,%d,%d,%d\n", + s->data_size, + s->nb_frames, + s->size, + s->sizes[0], + s->sizes[1], + s->sizes[2], + s->sizes[3], + s->sizes[4], + s->sizes[5], + s->sizes[6], + s->sizes[7]);*/ + + return 0; +fail: + return ret; +} + +int vp9_decode_extradata_ps(u8 *data, int size, struct vp9_param_sets *ps) +{ + int i, ref = -1, ret = 0; + struct vp9_superframe_split s = {0}; + + /*parse superframe.*/ + s.data = data; + s.data_size = size; + ret = vp9_superframe_split_filter(&s); + if (ret) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "parse frames failed.\n"); + return ret; + } + + for (i = 0; i < s.nb_frames; i++) { + u32 len = s.sizes[i] - s.prefix_size; + u8 *buf = s.data + s.next_frame_offset + s.prefix_size; + + ret = decode_frame_header(buf, len, &ps->ctx, &ref); + if (!ret) { + ps->head_parsed = ref < 0 ? true : false; + return 0; + } + + s.next_frame_offset = len + s.prefix_size; + } + + return ret; +} +
diff --git a/drivers/amvdec_ports/decoder/aml_vp9_parser.h b/drivers/amvdec_ports/decoder/aml_vp9_parser.h new file mode 100644 index 0000000..ddeddec --- /dev/null +++ b/drivers/amvdec_ports/decoder/aml_vp9_parser.h
@@ -0,0 +1,184 @@ +/* + * drivers/amvdec_ports/decoder/aml_vp9_parser.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef AML_VP9_PARSER_H +#define AML_VP9_PARSER_H + +#include "../aml_vcodec_drv.h" +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +#include "../utils/pixfmt.h" +#include "../utils/get_bits.h" +#endif + +#define MAX_SEGMENT 8 + +struct VP9BitstreamHeader { + // bitstream header + u8 profile; + u8 bpp; + u8 keyframe; + u8 invisible; + u8 errorres; + u8 intraonly; + u8 resetctx; + u8 refreshrefmask; + u8 highprecisionmvs; + u8 allowcompinter; + u8 refreshctx; + u8 parallelmode; + u8 framectxid; + u8 use_last_frame_mvs; + u8 refidx[3]; + u8 signbias[3]; + u8 fixcompref; + u8 varcompref[2]; + struct { + u8 level; + char sharpness; + } filter; + struct { + u8 enabled; + u8 updated; + char mode[2]; + char ref[4]; + } lf_delta; + u8 yac_qi; + char ydc_qdelta, uvdc_qdelta, uvac_qdelta; + u8 lossless; + struct { + u8 enabled; + u8 temporal; + u8 absolute_vals; + u8 update_map; + u8 prob[7]; + u8 pred_prob[3]; + struct { + u8 q_enabled; + u8 lf_enabled; + u8 ref_enabled; + u8 skip_enabled; + u8 ref_val; + int16_t q_val; + char lf_val; + int16_t qmul[2][2]; + u8 lflvl[4][2]; + } feat[MAX_SEGMENT]; + } segmentation; + struct { + u32 log2_tile_cols, log2_tile_rows; + u32 tile_cols, tile_rows; + } tiling; + + int uncompressed_header_size; + int compressed_header_size; +}; + +struct VP9SharedContext { + struct VP9BitstreamHeader h; + + //struct ThreadFrame refs[8]; +#define CUR_FRAME 0 +#define REF_FRAME_MVPAIR 1 +#define REF_FRAME_SEGMAP 2 + //struct VP9Frame frames[3]; +}; + +struct VP9Context { + struct VP9SharedContext s; +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER + struct get_bits_context gb; +#endif + int pass, active_tile_cols; + + u8 ss_h, ss_v; + u8 last_bpp, bpp_index, bytesperpixel; + u8 last_keyframe; + // sb_cols/rows, rows/cols and last_fmt are used for allocating all internal + // arrays, and are thus per-thread. w/h and gf_fmt are synced between threads + // and are therefore per-stream. pix_fmt represents the value in the header + // of the currently processed frame. + int width; + int height; + + int render_width; + int render_height; +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER + enum AVPixelFormat pix_fmt, last_fmt, gf_fmt; +#endif + u32 sb_cols, sb_rows, rows, cols; + + struct { + u8 lim_lut[64]; + u8 mblim_lut[64]; + } filter_lut; + struct { + u8 coef[4][2][2][6][6][3]; + } prob_ctx[4]; + struct { + u8 coef[4][2][2][6][6][11]; + } prob; + + // contextual (above) cache + u8 *above_partition_ctx; + u8 *above_mode_ctx; + // FIXME maybe merge some of the below in a flags field? + u8 *above_y_nnz_ctx; + u8 *above_uv_nnz_ctx[2]; + u8 *above_skip_ctx; // 1bit + u8 *above_txfm_ctx; // 2bit + u8 *above_segpred_ctx; // 1bit + u8 *above_intra_ctx; // 1bit + u8 *above_comp_ctx; // 1bit + u8 *above_ref_ctx; // 2bit + u8 *above_filter_ctx; + + // whole-frame cache + u8 *intra_pred_data[3]; + + // block reconstruction intermediates + int block_alloc_using_2pass; + uint16_t mvscale[3][2]; + u8 mvstep[3][2]; +}; + +struct vp9_superframe_split { + /*in data*/ + u8 *data; + u32 data_size; + + /*out data*/ + int nb_frames; + int size; + int next_frame; + u32 next_frame_offset; + int prefix_size; + int sizes[8]; +}; + +struct vp9_param_sets { + bool head_parsed; + struct VP9Context ctx; +}; + +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +int vp9_superframe_split_filter(struct vp9_superframe_split *s); +int vp9_decode_extradata_ps(u8 *data, int size, struct vp9_param_sets *ps); +#else +inline int vp9_decode_extradata_ps(u8 *data, int size, struct vp9_param_sets *ps) { return -1; } +#endif + +#endif //AML_VP9_PARSER_H
diff --git a/drivers/amvdec_ports/decoder/utils.h b/drivers/amvdec_ports/decoder/utils.h new file mode 100644 index 0000000..26b1552 --- /dev/null +++ b/drivers/amvdec_ports/decoder/utils.h
@@ -0,0 +1,31 @@ +/* + * drivers/amlogic/media_modules/amvdec_ports/decoder/utils.h + * + * Copyright (C) 2017 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _UTILS_H +#define _UTILS_H + +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define CLAMP(x, low, high) \ + (((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x))) +#define BITAT(x, n) ((x & (1 << n)) == (1 << n)) + +typedef unsigned char uint8_t; +typedef int int32_t; +typedef unsigned int uint32_t; + +#endif //_UTILS_H
diff --git a/drivers/amvdec_ports/decoder/vdec_av1_if.c b/drivers/amvdec_ports/decoder/vdec_av1_if.c new file mode 100644 index 0000000..c28ca10 --- /dev/null +++ b/drivers/amvdec_ports/decoder/vdec_av1_if.c
@@ -0,0 +1,1374 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <uapi/linux/swab.h> +#include "../vdec_drv_if.h" +#include "../aml_vcodec_util.h" +#include "../aml_vcodec_dec.h" +#include "../aml_vcodec_drv.h" +#include "../aml_vcodec_adapt.h" +#include "../vdec_drv_base.h" +#include "../utils/common.h" + +#define KERNEL_ATRACE_TAG KERNEL_ATRACE_TAG_V4L2 +#include <trace/events/meson_atrace.h> + +#define PREFIX_SIZE (16) + +#define HEADER_BUFFER_SIZE (32 * 1024) +#define SYNC_CODE (0x498342) + +extern int av1_need_prefix; + +/** + * struct av1_fb - av1 decode frame buffer information + * @vdec_fb_va : virtual address of struct vdec_fb + * @y_fb_dma : dma address of Y frame buffer (luma) + * @c_fb_dma : dma address of C frame buffer (chroma) + * @poc : picture order count of frame buffer + * @reserved : for 8 bytes alignment + */ +struct av1_fb { + uint64_t vdec_fb_va; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + int32_t poc; + uint32_t reserved; +}; + +/** + * struct vdec_av1_dec_info - decode information + * @dpb_sz : decoding picture buffer size + * @resolution_changed : resoltion change happen + * @reserved : for 8 bytes alignment + * @bs_dma : Input bit-stream buffer dma address + * @y_fb_dma : Y frame buffer dma address + * @c_fb_dma : C frame buffer dma address + * @vdec_fb_va : VDEC frame buffer struct virtual address + */ +struct vdec_av1_dec_info { + uint32_t dpb_sz; + uint32_t resolution_changed; + uint32_t reserved; + uint64_t bs_dma; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + uint64_t vdec_fb_va; +}; + +/** + * struct vdec_av1_vsi - shared memory for decode information exchange + * between VPU and Host. + * The memory is allocated by VPU then mapping to Host + * in vpu_dec_init() and freed in vpu_dec_deinit() + * by VPU. + * AP-W/R : AP is writer/reader on this item + * VPU-W/R: VPU is write/reader on this item + * @hdr_buf : Header parsing buffer (AP-W, VPU-R) + * @list_free : free frame buffer ring list (AP-W/R, VPU-W) + * @list_disp : display frame buffer ring list (AP-R, VPU-W) + * @dec : decode information (AP-R, VPU-W) + * @pic : picture information (AP-R, VPU-W) + * @crop : crop information (AP-R, VPU-W) + */ +struct vdec_av1_vsi { + char *header_buf; + int sps_size; + int pps_size; + int sei_size; + int head_offset; + struct vdec_av1_dec_info dec; + struct vdec_pic_info pic; + struct vdec_pic_info cur_pic; + struct v4l2_rect crop; + bool is_combine; + int nalu_pos; +}; + +/** + * struct vdec_av1_inst - av1 decoder instance + * @num_nalu : how many nalus be decoded + * @ctx : point to aml_vcodec_ctx + * @vsi : VPU shared information + */ +struct vdec_av1_inst { + unsigned int num_nalu; + struct aml_vcodec_ctx *ctx; + struct aml_vdec_adapt vdec; + struct vdec_av1_vsi *vsi; + struct aml_dec_params parms; + struct completion comp; + struct vdec_comp_buf_info comp_info; +}; + +/*!\brief OBU types. */ +enum OBU_TYPE { + OBU_SEQUENCE_HEADER = 1, + OBU_TEMPORAL_DELIMITER = 2, + OBU_FRAME_HEADER = 3, + OBU_TILE_GROUP = 4, + OBU_METADATA = 5, + OBU_FRAME = 6, + OBU_REDUNDANT_FRAME_HEADER = 7, + OBU_TILE_LIST = 8, + OBU_PADDING = 15, +}; + +/*!\brief OBU metadata types. */ +enum OBU_METADATA_TYPE { + OBU_METADATA_TYPE_RESERVED_0 = 0, + OBU_METADATA_TYPE_HDR_CLL = 1, + OBU_METADATA_TYPE_HDR_MDCV = 2, + OBU_METADATA_TYPE_SCALABILITY = 3, + OBU_METADATA_TYPE_ITUT_T35 = 4, + OBU_METADATA_TYPE_TIMECODE = 5, +}; + +struct ObuHeader { + size_t size; // Size (1 or 2 bytes) of the OBU header (including the + // optional OBU extension header) in the bitstream. + enum OBU_TYPE type; + int has_size_field; + int has_extension; + // The following fields come from the OBU extension header and therefore are + // only used if has_extension is true. + int temporal_layer_id; + int spatial_layer_id; +}; + +static const size_t kMaximumLeb128Size = 8; +static const u8 kLeb128ByteMask = 0x7f; // Binary: 01111111 + +// Disallow values larger than 32-bits to ensure consistent behavior on 32 and +// 64 bit targets: value is typically used to determine buffer allocation size +// when decoded. +static const u64 kMaximumLeb128Value = ULONG_MAX; + +char obu_type_name[16][32] = { + "UNKNOWN", + "OBU_SEQUENCE_HEADER", + "OBU_TEMPORAL_DELIMITER", + "OBU_FRAME_HEADER", + "OBU_TILE_GROUP", + "OBU_METADATA", + "OBU_FRAME", + "OBU_REDUNDANT_FRAME_HEADER", + "OBU_TILE_LIST", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "OBU_PADDING" +}; + +char meta_type_name[6][32] = { + "OBU_METADATA_TYPE_RESERVED_0", + "OBU_METADATA_TYPE_HDR_CLL", + "OBU_METADATA_TYPE_HDR_MDCV", + "OBU_METADATA_TYPE_SCALABILITY", + "OBU_METADATA_TYPE_ITUT_T35", + "OBU_METADATA_TYPE_TIMECODE" +}; + +struct read_bit_buffer { + const u8 *bit_buffer; + const u8 *bit_buffer_end; + u32 bit_offset; +}; + +struct DataBuffer { + const u8 *data; + size_t size; +}; + +static int vdec_write_nalu(struct vdec_av1_inst *inst, + u8 *buf, u32 size, u64 ts); +static int vdec_get_dw_mode(struct vdec_av1_inst *inst, int dw_mode); + +static void get_pic_info(struct vdec_av1_inst *inst, + struct vdec_pic_info *pic) +{ + *pic = inst->vsi->pic; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "pic(%d, %d), buf(%d, %d)\n", + pic->visible_width, pic->visible_height, + pic->coded_width, pic->coded_height); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "Y(%d, %d), C(%d, %d)\n", + pic->y_bs_sz, pic->y_len_sz, + pic->c_bs_sz, pic->c_len_sz); +} + +static void get_crop_info(struct vdec_av1_inst *inst, struct v4l2_rect *cr) +{ + cr->left = inst->vsi->crop.left; + cr->top = inst->vsi->crop.top; + cr->width = inst->vsi->crop.width; + cr->height = inst->vsi->crop.height; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "l=%d, t=%d, w=%d, h=%d\n", + cr->left, cr->top, cr->width, cr->height); +} + +static void get_dpb_size(struct vdec_av1_inst *inst, unsigned int *dpb_sz) +{ + *dpb_sz = inst->vsi->dec.dpb_sz; + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, "sz=%d\n", *dpb_sz); +} + +static u32 vdec_config_default_parms(u8 *parm) +{ + u8 *pbuf = parm; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:7;"); + pbuf += sprintf(pbuf, "av1_double_write_mode:1;"); + pbuf += sprintf(pbuf, "av1_buf_width:1920;"); + pbuf += sprintf(pbuf, "av1_buf_height:1088;"); + pbuf += sprintf(pbuf, "av1_max_pic_w:4096;"); + pbuf += sprintf(pbuf, "av1_max_pic_h:2304;"); + pbuf += sprintf(pbuf, "save_buffer_mode:0;"); + pbuf += sprintf(pbuf, "no_head:0;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:0;"); + + return parm - pbuf; +} + +static void vdec_parser_parms(struct vdec_av1_inst *inst) +{ + struct aml_vcodec_ctx *ctx = inst->ctx; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "%s:parms_status = 0x%x, present_flag = %d\n", + __func__, ctx->config.parm.dec.parms_status, + ctx->config.parm.dec.hdr.color_parms.present_flag); + if (ctx->config.parm.dec.parms_status & + V4L2_CONFIG_PARM_DECODE_CFGINFO) { + u8 *pbuf = ctx->config.buf; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;", + ctx->config.parm.dec.cfg.ref_buf_margin); + pbuf += sprintf(pbuf, "av1_double_write_mode:%d;", + ctx->config.parm.dec.cfg.double_write_mode); + pbuf += sprintf(pbuf, "av1_buf_width:1920;"); + pbuf += sprintf(pbuf, "av1_buf_height:1088;"); + pbuf += sprintf(pbuf, "save_buffer_mode:0;"); + pbuf += sprintf(pbuf, "no_head:0;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;", + ctx->config.parm.dec.cfg.canvas_mem_mode); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:%d;", + ctx->config.parm.dec.cfg.canvas_mem_endian); + pbuf += sprintf(pbuf, "parm_v4l_low_latency_mode:%d;", + ctx->config.parm.dec.cfg.low_latency_mode); + pbuf += sprintf(pbuf, "parm_v4l_duration:%d;", + ctx->config.parm.dec.cfg.duration); + ctx->config.length = pbuf - ctx->config.buf; + } else { + ctx->config.parm.dec.cfg.double_write_mode = 1; + ctx->config.parm.dec.cfg.ref_buf_margin = 7; + ctx->config.length = vdec_config_default_parms(ctx->config.buf); + } + + if ((ctx->config.parm.dec.parms_status & + V4L2_CONFIG_PARM_DECODE_HDRINFO) && + ctx->config.parm.dec.hdr.color_parms.present_flag) { + u8 *pbuf = ctx->config.buf + ctx->config.length; + + pbuf += sprintf(pbuf, "HDRStaticInfo:%d;", 1); + pbuf += sprintf(pbuf, "signal_type:%d;", + ctx->config.parm.dec.hdr.signal_type); + pbuf += sprintf(pbuf, "mG.x:%d;", + ctx->config.parm.dec.hdr.color_parms.primaries[0][0]); + pbuf += sprintf(pbuf, "mG.y:%d;", + ctx->config.parm.dec.hdr.color_parms.primaries[0][1]); + pbuf += sprintf(pbuf, "mB.x:%d;", + ctx->config.parm.dec.hdr.color_parms.primaries[1][0]); + pbuf += sprintf(pbuf, "mB.y:%d;", + ctx->config.parm.dec.hdr.color_parms.primaries[1][1]); + pbuf += sprintf(pbuf, "mR.x:%d;", + ctx->config.parm.dec.hdr.color_parms.primaries[2][0]); + pbuf += sprintf(pbuf, "mR.y:%d;", + ctx->config.parm.dec.hdr.color_parms.primaries[2][1]); + pbuf += sprintf(pbuf, "mW.x:%d;", + ctx->config.parm.dec.hdr.color_parms.white_point[0]); + pbuf += sprintf(pbuf, "mW.y:%d;", + ctx->config.parm.dec.hdr.color_parms.white_point[1]); + pbuf += sprintf(pbuf, "mMaxDL:%d;", + ctx->config.parm.dec.hdr.color_parms.luminance[0] * 1000); + pbuf += sprintf(pbuf, "mMinDL:%d;", + ctx->config.parm.dec.hdr.color_parms.luminance[1]); + pbuf += sprintf(pbuf, "mMaxCLL:%d;", + ctx->config.parm.dec.hdr.color_parms.content_light_level.max_content); + pbuf += sprintf(pbuf, "mMaxFALL:%d;", + ctx->config.parm.dec.hdr.color_parms.content_light_level.max_pic_average); + ctx->config.length = pbuf - ctx->config.buf; + inst->parms.hdr = ctx->config.parm.dec.hdr; + inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_HDRINFO; + } + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "config.buf = %s\n", ctx->config.buf); + + inst->vdec.config = ctx->config; + inst->parms.cfg = ctx->config.parm.dec.cfg; + inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO; +} + +static int vdec_av1_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec) +{ + struct vdec_av1_inst *inst = NULL; + int ret = -1; + + inst = kzalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + inst->vdec.frm_name = "AV1"; + inst->vdec.video_type = VFORMAT_AV1; + inst->vdec.filp = ctx->dev->filp; + inst->vdec.ctx = ctx; + inst->ctx = ctx; + + vdec_parser_parms(inst); + + /* set play mode.*/ + if (ctx->is_drm_mode) + inst->vdec.port.flag |= PORT_FLAG_DRM; + + /* to eable av1 hw.*/ + inst->vdec.port.type = PORT_TYPE_HEVC; + + /* probe info from the stream */ + inst->vsi = kzalloc(sizeof(struct vdec_av1_vsi), GFP_KERNEL); + if (!inst->vsi) { + ret = -ENOMEM; + goto err; + } + + /* alloc the header buffer to be used cache sps or spp etc.*/ + inst->vsi->header_buf = vzalloc(HEADER_BUFFER_SIZE); + if (!inst->vsi->header_buf) { + ret = -ENOMEM; + goto err; + } + + init_completion(&inst->comp); + + ctx->ada_ctx = &inst->vdec; + *h_vdec = (unsigned long)inst; + + /* init decoder. */ + ret = video_decoder_init(&inst->vdec); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "vdec_av1 init err=%d\n", ret); + goto err; + } + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "av1 Instance >> %lx\n", (ulong) inst); + + return 0; +err: + if (inst && inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + if (inst && inst->vsi) + kfree(inst->vsi); + if (inst) + kfree(inst); + *h_vdec = 0; + + return ret; +} + +static int parse_stream_ucode(struct vdec_av1_inst *inst, + u8 *buf, u32 size, u64 timestamp) +{ + int ret = 0; + + ret = vdec_write_nalu(inst, buf, size, timestamp); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write data failed. size: %d, err: %d\n", size, ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_ucode_dma(struct vdec_av1_inst *inst, + ulong buf, u32 size, u64 timestamp, u32 handle) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle, + vdec_vframe_input_free, inst->ctx); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_cpu(struct vdec_av1_inst *inst, u8 *buf, u32 size) +{ + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "can not suppport parse stream by cpu.\n"); + + return -1; +} + +static int vdec_av1_probe(unsigned long h_vdec, + struct aml_vcodec_mem *bs, void *out) +{ + struct vdec_av1_inst *inst = + (struct vdec_av1_inst *)h_vdec; + u8 *buf = (u8 *)bs->vaddr; + u32 size = bs->size; + int ret = 0; + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if ((s->magic != AML_VIDEO_MAGIC) && + (s->type != V4L_STREAM_TYPE_MATEDATA)) + return -1; + + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, s->data, + s->len, bs->timestamp); + } else { + ret = parse_stream_cpu(inst, s->data, s->len); + } + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = parse_stream_ucode_dma(inst, bs->addr, size, + bs->timestamp, BUFF_IDX(bs, bs->index)); + } + } else { + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, buf, size, bs->timestamp); + } else { + ret = parse_stream_cpu(inst, buf, size); + } + } + + inst->vsi->cur_pic = inst->vsi->pic; + + return ret; +} + +static void vdec_av1_deinit(unsigned long h_vdec) +{ + struct vdec_av1_inst *inst = (struct vdec_av1_inst *)h_vdec; + struct aml_vcodec_ctx *ctx = inst->ctx; + + video_decoder_release(&inst->vdec); + + if (inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + + if (inst->vsi) + kfree(inst->vsi); + + kfree(inst); + + ctx->drv_handle = 0; +} + +// Returns 1 when OBU type is valid, and 0 otherwise. +static int valid_obu_type(int obu_type) +{ + int valid_type = 0; + + switch (obu_type) { + case OBU_SEQUENCE_HEADER: + case OBU_TEMPORAL_DELIMITER: + case OBU_FRAME_HEADER: + case OBU_TILE_GROUP: + case OBU_METADATA: + case OBU_FRAME: + case OBU_REDUNDANT_FRAME_HEADER: + case OBU_TILE_LIST: + case OBU_PADDING: + valid_type = 1; + break; + default: + break; + } + + return valid_type; +} + +size_t uleb_size_in_bytes(u64 value) +{ + size_t size = 0; + + do { + ++size; + } while ((value >>= 7) != 0); + + return size; +} + +int uleb_decode(const u8 *buffer, size_t available, + u64 *value, size_t *length) +{ + int i; + + if (buffer && value) { + *value = 0; + + for (i = 0; i < kMaximumLeb128Size && i < available; ++i) { + const u8 decoded_byte = *(buffer + i) & kLeb128ByteMask; + + *value |= ((u64)decoded_byte) << (i * 7); + if ((*(buffer + i) >> 7) == 0) { + if (length) { + *length = i + 1; + } + + // Fail on values larger than 32-bits to ensure consistent behavior on + // 32 and 64 bit targets: value is typically used to determine buffer + // allocation size. + if (*value > ULONG_MAX) + return -1; + + return 0; + } + } + } + + // If we get here, either the buffer/value pointers were invalid, + // or we ran over the available space + return -1; +} + +int uleb_encode(u64 value, size_t available, + u8 *coded_value, size_t *coded_size) +{ + int i; + const size_t leb_size = uleb_size_in_bytes(value); + + if (value > kMaximumLeb128Value || leb_size > kMaximumLeb128Size || + leb_size > available || !coded_value || !coded_size) { + return -1; + } + + for (i = 0; i < leb_size; ++i) { + u8 byte = value & 0x7f; + + value >>= 7; + if (value != 0) byte |= 0x80; // Signal that more bytes follow. + + *(coded_value + i) = byte; + } + + *coded_size = leb_size; + + return 0; +} + +static int rb_read_bit(struct read_bit_buffer *rb) +{ + const u32 off = rb->bit_offset; + const u32 p = off >> 3; + const int q = 7 - (int)(off & 0x7); + + if (rb->bit_buffer + p < rb->bit_buffer_end) { + const int bit = (rb->bit_buffer[p] >> q) & 1; + + rb->bit_offset = off + 1; + return bit; + } else { + return 0; + } +} + +static int rb_read_literal(struct read_bit_buffer *rb, int bits) +{ + int value = 0, bit; + + for (bit = bits - 1; bit >= 0; bit--) + value |= rb_read_bit(rb) << bit; + + return value; +} + +static int read_obu_size(const u8 *data, + size_t bytes_available, + size_t *const obu_size, + size_t *const length_field_size) +{ + u64 u_obu_size = 0; + + if (uleb_decode(data, bytes_available, &u_obu_size, length_field_size) != 0) { + return -1; + } + + if (u_obu_size > ULONG_MAX) + return -1; + + *obu_size = (size_t) u_obu_size; + + return 0; +} + +// Parses OBU header and stores values in 'header'. +static int read_obu_header(struct read_bit_buffer *rb, + int is_annexb, struct ObuHeader *header) +{ + const int bit_buffer_byte_length = + rb->bit_buffer_end - rb->bit_buffer; + + if (!rb || !header) + return -1; + + if (bit_buffer_byte_length < 1) + return -1; + + header->size = 1; + + if (rb_read_bit(rb) != 0) { + // Forbidden bit. Must not be set. + return -1; + } + + header->type = (enum OBU_TYPE) rb_read_literal(rb, 4); + if (!valid_obu_type(header->type)) + return -1; + + header->has_extension = rb_read_bit(rb); + header->has_size_field = rb_read_bit(rb); + + if (!header->has_size_field && !is_annexb) { + // section 5 obu streams must have obu_size field set. + return -1; + } + + if (rb_read_bit(rb) != 0) { + // obu_reserved_1bit must be set to 0. + return -1; + } + + if (header->has_extension) { + if (bit_buffer_byte_length == 1) + return -1; + + header->size += 1; + header->temporal_layer_id = rb_read_literal(rb, 3); + header->spatial_layer_id = rb_read_literal(rb, 2); + if (rb_read_literal(rb, 3) != 0) { + // extension_header_reserved_3bits must be set to 0. + return -1; + } + } + + return 0; +} + +int read_obu_header_and_size(const u8 *data, + size_t bytes_available, + int is_annexb, + struct ObuHeader *obu_header, + size_t *const payload_size, + size_t *const bytes_read) +{ + size_t length_field_size_obu = 0; + size_t length_field_size_payload = 0; + size_t obu_size = 0; + int status = 0; + struct read_bit_buffer rb = { data + length_field_size_obu, + data + bytes_available, 0}; + + if (is_annexb) { + // Size field comes before the OBU header, and includes the OBU header + status = read_obu_size(data, bytes_available, &obu_size, &length_field_size_obu); + if (status != 0) + return status; + } + + status = read_obu_header(&rb, is_annexb, obu_header); + if (status != 0) + return status; + + if (!obu_header->has_size_field) { + // Derive the payload size from the data we've already read + if (obu_size < obu_header->size) + return -1; + + *payload_size = obu_size - obu_header->size; + } else { + // Size field comes after the OBU header, and is just the payload size + status = read_obu_size(data + length_field_size_obu + obu_header->size, + bytes_available - length_field_size_obu - obu_header->size, + payload_size, &length_field_size_payload); + if (status != 0) + return status; + } + + *bytes_read = length_field_size_obu + obu_header->size + length_field_size_payload; + + return 0; +} + +int parser_frame(int is_annexb, u8 *data, const u8 *data_end, + u8 *dst_data, u32 *frame_len, u8 *meta_buf, u32 *meta_len) +{ + int frame_decoding_finished = 0; + u32 obu_size = 0; + int seen_frame_header = 0; + int next_start_tile = 0; + struct DataBuffer obu_size_hdr; + u8 header[20] = {0}; + u8 *p = NULL; + u32 rpu_size = 0; + struct ObuHeader obu_header; + + memset(&obu_header, 0, sizeof(obu_header)); + + // decode frame as a series of OBUs + while (!frame_decoding_finished) { + // struct read_bit_buffer rb; + size_t payload_size = 0; + size_t header_size = 0; + size_t bytes_read = 0; + const size_t bytes_available = data_end - data; + enum OBU_METADATA_TYPE meta_type; + int status; + u64 type; + u32 i; + + if (bytes_available == 0 && !seen_frame_header) { + break; + } + + status = read_obu_header_and_size(data, bytes_available, is_annexb, + &obu_header, &payload_size, &bytes_read); + if (status != 0) { + return -1; + } + + // Record obu size header information. + obu_size_hdr.data = data + obu_header.size; + obu_size_hdr.size = bytes_read - obu_header.size; + + // Note: read_obu_header_and_size() takes care of checking that this + // doesn't cause 'data' to advance past 'data_end'. + + if ((size_t)(data_end - data - bytes_read) < payload_size) { + return -1; + } + + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "obu %s len %zu+%zu\n", + obu_type_name[obu_header.type], + bytes_read, payload_size); + + if (!is_annexb) { + obu_size = bytes_read + payload_size + 4; + header_size = 20; + } else { + obu_size = bytes_read + payload_size; + header_size = 16; + } + + header[0] = ((obu_size + 4) >> 24) & 0xff; + header[1] = ((obu_size + 4) >> 16) & 0xff; + header[2] = ((obu_size + 4) >> 8) & 0xff; + header[3] = ((obu_size + 4) >> 0) & 0xff; + header[4] = header[0] ^ 0xff; + header[5] = header[1] ^ 0xff; + header[6] = header[2] ^ 0xff; + header[7] = header[3] ^ 0xff; + header[8] = 0; + header[9] = 0; + header[10] = 0; + header[11] = 1; + header[12] = 'A'; + header[13] = 'M'; + header[14] = 'L'; + header[15] = 'V'; + + // put new size to here as annexb + header[16] = (obu_size & 0xff) | 0x80; + header[17] = ((obu_size >> 7) & 0xff) | 0x80; + header[18] = ((obu_size >> 14) & 0xff) | 0x80; + header[19] = ((obu_size >> 21) & 0xff) | 0x00; + + memcpy(dst_data, header, header_size); + dst_data += header_size; + memcpy(dst_data, data, bytes_read + payload_size); + dst_data += (bytes_read + payload_size); + + data += bytes_read; + *frame_len += (header_size + bytes_read + payload_size); + + switch (obu_header.type) { + case OBU_TEMPORAL_DELIMITER: + seen_frame_header = 0; + next_start_tile = 0; + break; + case OBU_SEQUENCE_HEADER: + // The sequence header should not change in the middle of a frame. + if (seen_frame_header) { + return -1; + } + break; + case OBU_FRAME_HEADER: + if (data_end == data + payload_size) { + frame_decoding_finished = 1; + } else { + seen_frame_header = 1; + } + break; + case OBU_REDUNDANT_FRAME_HEADER: + case OBU_FRAME: + if (obu_header.type == OBU_REDUNDANT_FRAME_HEADER) { + if (!seen_frame_header) { + return -1; + } + } else { + // OBU_FRAME_HEADER or OBU_FRAME. + if (seen_frame_header) { + return -1; + } + } + if (obu_header.type == OBU_FRAME) { + if (data_end == data + payload_size) { + frame_decoding_finished = 1; + seen_frame_header = 0; + } + } + break; + case OBU_TILE_GROUP: + if (!seen_frame_header) { + return -1; + } + if (data + payload_size == data_end) + frame_decoding_finished = 1; + if (frame_decoding_finished) + seen_frame_header = 0; + break; + case OBU_METADATA: + uleb_decode(data, 8, &type, &bytes_read); + if (type < 6) + meta_type = type; + else + meta_type = 0; + p = data + bytes_read; + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, + "meta type %s %zu+%zu\n", + meta_type_name[type], + bytes_read, + payload_size - bytes_read); + + if (meta_type == OBU_METADATA_TYPE_ITUT_T35) { +#if 0 /* for dumping original obu payload */ + for (i = 0; i < payload_size - bytes_read; i++) { + pr_info("%02x ", p[i]); + if (i % 16 == 15) + pr_info("\n"); + } + if (i % 16 != 0) + pr_info("\n"); +#endif + if ((p[0] == 0xb5) /* country code */ + && ((p[1] == 0x00) && (p[2] == 0x3b)) /* terminal_provider_code */ + && ((p[3] == 0x00) && (p[4] == 0x00) && (p[5] == 0x08) && (p[6] == 0x00))) { /* terminal_provider_oriented_code */ + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, + "dolbyvison rpu\n"); + meta_buf[0] = meta_buf[1] = meta_buf[2] = 0; + meta_buf[3] = 0x01; + meta_buf[4] = 0x19; + + if (p[11] & 0x10) { + rpu_size = 0x100; + rpu_size |= (p[11] & 0x0f) << 4; + rpu_size |= (p[12] >> 4) & 0x0f; + if (p[12] & 0x08) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, + "meta rpu in obu exceed 512 bytes\n"); + break; + } + for (i = 0; i < rpu_size; i++) { + meta_buf[5 + i] = (p[12 + i] & 0x07) << 5; + meta_buf[5 + i] |= (p[13 + i] >> 3) & 0x1f; + } + rpu_size += 5; + } else { + rpu_size = (p[10] & 0x1f) << 3; + rpu_size |= (p[11] >> 5) & 0x07; + for (i = 0; i < rpu_size; i++) { + meta_buf[5 + i] = (p[11 + i] & 0x0f) << 4; + meta_buf[5 + i] |= (p[12 + i] >> 4) & 0x0f; + } + rpu_size += 5; + } + *meta_len = rpu_size; + } + } else if (meta_type == OBU_METADATA_TYPE_HDR_CLL) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "hdr10 cll:\n"); + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "max_cll = %x\n", (p[0] << 8) | p[1]); + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "max_fall = %x\n", (p[2] << 8) | p[3]); + } else if (meta_type == OBU_METADATA_TYPE_HDR_MDCV) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "hdr10 primaries[r,g,b] = \n"); + for (i = 0; i < 3; i++) { + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, " %x, %x\n", + (p[i * 4] << 8) | p[i * 4 + 1], + (p[i * 4 + 2] << 8) | p[i * 4 + 3]); + } + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, + "white point = %x, %x\n", (p[12] << 8) | p[13], (p[14] << 8) | p[15]); + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, + "maxl = %x\n", (p[16] << 24) | (p[17] << 16) | (p[18] << 8) | p[19]); + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, + "minl = %x\n", (p[20] << 24) | (p[21] << 16) | (p[22] << 8) | p[23]); + } + break; + case OBU_TILE_LIST: + break; + case OBU_PADDING: + break; + default: + // Skip unrecognized OBUs + break; + } + + data += payload_size; + } + + return 0; +} + +static int vdec_write_nalu(struct vdec_av1_inst *inst, + u8 *buf, u32 size, u64 ts) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + u8 *data = NULL; + u32 length = 0; + bool need_prefix = av1_need_prefix; + + if (need_prefix) { + u8 meta_buffer[1024] = {0}; + u32 meta_size = 0; + u8 *src = buf; + + data = vzalloc(size + 0x1000); + if (!data) + return -ENOMEM; + + parser_frame(0, src, src + size, data, &length, meta_buffer, &meta_size); + + if (length) + ret = vdec_vframe_write(vdec, data, length, ts, 0); + else + ret = -1; + + vfree(data); + } else { + ret = vdec_vframe_write(vdec, buf, size, ts, 0); + } + + return ret; +} + +static bool monitor_res_change(struct vdec_av1_inst *inst, u8 *buf, u32 size) +{ + int ret = -1; + u8 *p = buf; + int len = size; + u32 synccode = av1_need_prefix ? + ((p[1] << 16) | (p[2] << 8) | p[3]) : + ((p[17] << 16) | (p[18] << 8) | p[19]); + + if (synccode == SYNC_CODE) { + ret = parse_stream_cpu(inst, p, len); + if (!ret && (inst->vsi->cur_pic.coded_width != + inst->vsi->pic.coded_width || + inst->vsi->cur_pic.coded_height != + inst->vsi->pic.coded_height)) { + inst->vsi->cur_pic = inst->vsi->pic; + return true; + } + } + + return false; +} + +static int vdec_av1_decode(unsigned long h_vdec, + struct aml_vcodec_mem *bs, bool *res_chg) +{ + struct vdec_av1_inst *inst = (struct vdec_av1_inst *)h_vdec; + struct aml_vdec_adapt *vdec = &inst->vdec; + u8 *buf = (u8 *) bs->vaddr; + u32 size = bs->size; + int ret = -1; + + if (bs == NULL) + return -1; + + if (vdec_input_full(vdec)) { + return -EAGAIN; + } + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if (s->magic != AML_VIDEO_MAGIC) + return -1; + + if (!inst->ctx->param_sets_from_ucode && + (s->type == V4L_STREAM_TYPE_MATEDATA)) { + if ((*res_chg = monitor_res_change(inst, + s->data, s->len))) + return 0; + } + + ret = vdec_vframe_write(vdec, + s->data, + s->len, + bs->timestamp, + 0); + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = vdec_vframe_write_with_dma(vdec, + bs->addr, size, bs->timestamp, + BUFF_IDX(bs, bs->index), + vdec_vframe_input_free, inst->ctx); + } + } else { + /*checked whether the resolution changes.*/ + if ((!inst->ctx->param_sets_from_ucode) && + (*res_chg = monitor_res_change(inst, buf, size))) + return 0; + + ret = vdec_write_nalu(inst, buf, size, bs->timestamp); + } + + return ret; +} + + static void get_param_config_info(struct vdec_av1_inst *inst, + struct aml_dec_params *parms) + { + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CFGINFO) { + /* dw use v4l cfg */ + inst->parms.cfg.double_write_mode = + inst->ctx->config.parm.dec.cfg.double_write_mode; + parms->cfg = inst->parms.cfg; + } + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_PSINFO) + parms->ps = inst->parms.ps; + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_HDRINFO) + parms->hdr = inst->parms.hdr; + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CNTINFO) + parms->cnt = inst->parms.cnt; + + parms->parms_status |= inst->parms.parms_status; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "parms status: %u\n", parms->parms_status); + } + +static void get_param_comp_buf_info(struct vdec_av1_inst *inst, + struct vdec_comp_buf_info *params) +{ + memcpy(params, &inst->comp_info, sizeof(*params)); +} + +static int vdec_av1_get_param(unsigned long h_vdec, + enum vdec_get_param_type type, void *out) +{ + int ret = 0; + struct vdec_av1_inst *inst = (struct vdec_av1_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the av1 inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case GET_PARAM_PIC_INFO: + get_pic_info(inst, out); + break; + + case GET_PARAM_DPB_SIZE: + get_dpb_size(inst, out); + break; + + case GET_PARAM_CROP_INFO: + get_crop_info(inst, out); + break; + + case GET_PARAM_CONFIG_INFO: + get_param_config_info(inst, out); + break; + + case GET_PARAM_DW_MODE: + { + u32 *mode = out; + u32 m = inst->ctx->config.parm.dec.cfg.double_write_mode; + if (m <= 16) + *mode = inst->ctx->config.parm.dec.cfg.double_write_mode; + else + *mode = vdec_get_dw_mode(inst, 0); + break; + } + case GET_PARAM_COMP_BUF_INFO: + get_param_comp_buf_info(inst, out); + break; + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid get parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static void set_param_write_sync(struct vdec_av1_inst *inst) +{ + complete(&inst->comp); +} + +static int vdec_get_dw_mode(struct vdec_av1_inst *inst, int dw_mode) +{ + u32 valid_dw_mode = inst->ctx->config.parm.dec.cfg.double_write_mode; + int w = inst->vsi->pic.coded_width; + int h = inst->vsi->pic.coded_height; + u32 dw = 0x1; /*1:1*/ + + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + + return dw; +} + +static int vdec_pic_scale(struct vdec_av1_inst *inst, int length, int dw_mode) +{ + int ret = 64; + + switch (vdec_get_dw_mode(inst, dw_mode)) { + case 0x0: /* only afbc, output afbc */ + case 0x21: /* only afbc, output afbc */ + ret = 64; + break; + case 0x1: /* afbc and (w x h), output YUV420 */ + ret = length; + break; + case 0x2: /* afbc and (w/4 x h/4), output YUV420 */ + case 0x3: /* afbc and (w/4 x h/4), output afbc and YUV420 */ + ret = length >> 2; + break; + case 0x4: /* afbc and (w/2 x h/2), output YUV420 */ + ret = length >> 1; + break; + case 0x10: /* (w x h), output YUV420-8bit) */ + default: + ret = length; + break; + } + + return ret; +} + +static void set_param_ps_info(struct vdec_av1_inst *inst, + struct aml_vdec_ps_infos *ps) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_av1_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + int dw = inst->parms.cfg.double_write_mode; + + /* fill visible area size that be used for EGL. */ + pic->visible_width = ps->visible_width; + pic->visible_height = ps->visible_height; + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + pic->coded_width = ps->coded_width; + pic->coded_height = ps->coded_height; + + pic->y_len_sz = ALIGN(vdec_pic_scale(inst, pic->coded_width, dw), 64) * + ALIGN(vdec_pic_scale(inst, pic->coded_height, dw), 64); + pic->c_len_sz = pic->y_len_sz >> 1; + + /* calc DPB size */ + pic->dpb_frames = ps->dpb_frames; + pic->dpb_margin = ps->dpb_margin; + pic->vpp_margin = ps->dpb_margin; + dec->dpb_sz = ps->dpb_size; + pic->field = ps->field; + + inst->parms.ps = *ps; + inst->parms.parms_status |= + V4L2_CONFIG_PARM_DECODE_PSINFO; + + /*wake up*/ + complete(&inst->comp); + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "Parse from ucode, visible(%d x %d), coded(%d x %d)\n", + ps->visible_width, ps->visible_height, + ps->coded_width, ps->coded_height); +} + +static void set_param_comp_buf_info(struct vdec_av1_inst *inst, + struct vdec_comp_buf_info *info) +{ + memcpy(&inst->comp_info, info, sizeof(*info)); +} + +static void set_param_hdr_info(struct vdec_av1_inst *inst, + struct aml_vdec_hdr_infos *hdr) +{ + if (hdr->signal_type != 0) { + inst->parms.hdr = *hdr; + inst->parms.parms_status |= + V4L2_CONFIG_PARM_DECODE_HDRINFO; + aml_vdec_dispatch_event(inst->ctx, + V4L2_EVENT_SRC_CH_HDRINFO); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "av1 set HDR infos\n"); + } +} + +static void set_param_post_event(struct vdec_av1_inst *inst, u32 *event) +{ + aml_vdec_dispatch_event(inst->ctx, *event); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "av1 post event: %d\n", *event); +} + +static void set_pic_info(struct vdec_av1_inst *inst, + struct vdec_pic_info *pic) +{ + inst->vsi->pic = *pic; +} + +static int vdec_av1_set_param(unsigned long h_vdec, + enum vdec_set_param_type type, void *in) +{ + int ret = 0; + struct vdec_av1_inst *inst = (struct vdec_av1_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the av1 inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case SET_PARAM_WRITE_FRAME_SYNC: + set_param_write_sync(inst); + break; + + case SET_PARAM_PS_INFO: + set_param_ps_info(inst, in); + break; + + case SET_PARAM_COMP_BUF_INFO: + set_param_comp_buf_info(inst, in); + break; + + case SET_PARAM_HDR_INFO: + set_param_hdr_info(inst, in); + break; + + case SET_PARAM_POST_EVENT: + set_param_post_event(inst, in); + break; + + case SET_PARAM_PIC_INFO: + set_pic_info(inst, in); + break; + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid set parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static struct vdec_common_if vdec_av1_if = { + .init = vdec_av1_init, + .probe = vdec_av1_probe, + .decode = vdec_av1_decode, + .get_param = vdec_av1_get_param, + .set_param = vdec_av1_set_param, + .deinit = vdec_av1_deinit, +}; + +struct vdec_common_if *get_av1_dec_comm_if(void); + +struct vdec_common_if *get_av1_dec_comm_if(void) +{ + return &vdec_av1_if; +} +
diff --git a/drivers/amvdec_ports/decoder/vdec_h264_if.c b/drivers/amvdec_ports/decoder/vdec_h264_if.c new file mode 100644 index 0000000..911c91d --- /dev/null +++ b/drivers/amvdec_ports/decoder/vdec_h264_if.c
@@ -0,0 +1,1066 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <uapi/linux/swab.h> + +#include "../vdec_drv_if.h" +#include "../aml_vcodec_util.h" +#include "../aml_vcodec_dec.h" +#include "../aml_vcodec_drv.h" +#include "../aml_vcodec_adapt.h" +#include "../vdec_drv_base.h" +#include "aml_h264_parser.h" +#include "../utils/common.h" + +/* h264 NALU type */ +#define NAL_NON_IDR_SLICE 0x01 +#define NAL_IDR_SLICE 0x05 +#define NAL_H264_SEI 0x06 +#define NAL_H264_SPS 0x07 +#define NAL_H264_PPS 0x08 +#define NAL_H264_AUD 0x09 + +#define AVC_NAL_TYPE(value) ((value) & 0x1F) + +#define BUF_PREDICTION_SZ (64 * 1024)//(32 * 1024) + +#define MB_UNIT_LEN 16 + +/* motion vector size (bytes) for every macro block */ +#define HW_MB_STORE_SZ 64 + +#define H264_MAX_FB_NUM 17 +#define HDR_PARSING_BUF_SZ 1024 + +#define HEADER_BUFFER_SIZE (128 * 1024) + +/** + * struct h264_fb - h264 decode frame buffer information + * @vdec_fb_va : virtual address of struct vdec_fb + * @y_fb_dma : dma address of Y frame buffer (luma) + * @c_fb_dma : dma address of C frame buffer (chroma) + * @poc : picture order count of frame buffer + * @reserved : for 8 bytes alignment + */ +struct h264_fb { + uint64_t vdec_fb_va; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + int32_t poc; + uint32_t reserved; +}; + +/** + * struct h264_ring_fb_list - ring frame buffer list + * @fb_list : frame buffer arrary + * @read_idx : read index + * @write_idx : write index + * @count : buffer count in list + */ +struct h264_ring_fb_list { + struct h264_fb fb_list[H264_MAX_FB_NUM]; + unsigned int read_idx; + unsigned int write_idx; + unsigned int count; + unsigned int reserved; +}; + +/** + * struct vdec_h264_dec_info - decode information + * @dpb_sz : decoding picture buffer size + * @realloc_mv_buf : flag to notify driver to re-allocate mv buffer + * @reserved : for 8 bytes alignment + * @bs_dma : Input bit-stream buffer dma address + * @y_fb_dma : Y frame buffer dma address + * @c_fb_dma : C frame buffer dma address + * @vdec_fb_va : VDEC frame buffer struct virtual address + */ +struct vdec_h264_dec_info { + uint32_t dpb_sz; + uint32_t realloc_mv_buf; + uint32_t reserved; + uint64_t bs_dma; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + uint64_t vdec_fb_va; +}; + +/** + * struct vdec_h264_vsi - shared memory for decode information exchange + * between VPU and Host. + * The memory is allocated by VPU then mapping to Host + * in vpu_dec_init() and freed in vpu_dec_deinit() + * by VPU. + * AP-W/R : AP is writer/reader on this item + * VPU-W/R: VPU is write/reader on this item + * @dec : decode information (AP-R, VPU-W) + * @pic : picture information (AP-R, VPU-W) + * @crop : crop information (AP-R, VPU-W) + */ +struct vdec_h264_vsi { + unsigned char hdr_buf[HDR_PARSING_BUF_SZ]; + char *header_buf; + int sps_size; + int pps_size; + int sei_size; + int head_offset; + struct vdec_h264_dec_info dec; + struct vdec_pic_info pic; + struct vdec_pic_info cur_pic; + struct v4l2_rect crop; + bool is_combine; + int nalu_pos; +}; + +/** + * struct vdec_h264_inst - h264 decoder instance + * @num_nalu : how many nalus be decoded + * @ctx : point to aml_vcodec_ctx + * @pred_buf : HW working predication buffer + * @mv_buf : HW working motion vector buffer + * @vpu : VPU instance + * @vsi : VPU shared information + */ +struct vdec_h264_inst { + unsigned int num_nalu; + struct aml_vcodec_ctx *ctx; + struct aml_vcodec_mem pred_buf; + struct aml_vcodec_mem mv_buf[H264_MAX_FB_NUM]; + struct aml_vdec_adapt vdec; + struct vdec_h264_vsi *vsi; + struct aml_dec_params parms; + struct completion comp; +}; + +#if 0 +#define DUMP_FILE_NAME "/data/dump/dump.tmp" +static struct file *filp; +static loff_t file_pos; + +void dump_write(const char __user *buf, size_t count) +{ + mm_segment_t old_fs; + + if (!filp) + return; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + + if (count != vfs_write(filp, buf, count, &file_pos)) + pr_err("Failed to write file\n"); + + set_fs(old_fs); +} + +void dump_init(void) +{ + filp = filp_open(DUMP_FILE_NAME, O_CREAT | O_RDWR, 0644); + if (IS_ERR(filp)) { + pr_err("open dump file failed\n"); + filp = NULL; + } +} + +void dump_deinit(void) +{ + if (filp) { + filp_close(filp, current->files); + filp = NULL; + file_pos = 0; + } +} + +void swap_uv(void *uv, int size) +{ + int i; + __u16 *p = uv; + + size /= 2; + + for (i = 0; i < size; i++, p++) + *p = __swab16(*p); +} +#endif + +static void get_pic_info(struct vdec_h264_inst *inst, + struct vdec_pic_info *pic) +{ + *pic = inst->vsi->pic; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "pic(%d, %d), buf(%d, %d)\n", + pic->visible_width, pic->visible_height, + pic->coded_width, pic->coded_height); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "Y(%d, %d), C(%d, %d)\n", pic->y_bs_sz, + pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz); +} + +static void get_crop_info(struct vdec_h264_inst *inst, struct v4l2_rect *cr) +{ + cr->left = inst->vsi->crop.left; + cr->top = inst->vsi->crop.top; + cr->width = inst->vsi->crop.width; + cr->height = inst->vsi->crop.height; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "l=%d, t=%d, w=%d, h=%d\n", + cr->left, cr->top, cr->width, cr->height); +} + +static void get_dpb_size(struct vdec_h264_inst *inst, unsigned int *dpb_sz) +{ + *dpb_sz = inst->vsi->dec.dpb_sz; + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, "sz=%d\n", *dpb_sz); +} + +static void skip_aud_data(u8 **data, u32 *size) +{ + int i; + + i = find_start_code(*data, *size); + if (i > 0 && (*data)[i++] == 0x9 && (*data)[i++] == 0xf0) { + *size -= i; + *data += i; + } +} + +static u32 vdec_config_default_parms(u8 *parm) +{ + u8 *pbuf = parm; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "mh264_double_write_mode:16;"); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:7;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:0;"); + + return parm - pbuf; +} + +static void vdec_parser_parms(struct vdec_h264_inst *inst) +{ + struct aml_vcodec_ctx *ctx = inst->ctx; + + if (ctx->config.parm.dec.parms_status & + V4L2_CONFIG_PARM_DECODE_CFGINFO) { + u8 *pbuf = ctx->config.buf; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "mh264_double_write_mode:%d;", + ctx->config.parm.dec.cfg.double_write_mode); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;", + ctx->config.parm.dec.cfg.ref_buf_margin); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;", + ctx->config.parm.dec.cfg.canvas_mem_mode); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:%d;", + ctx->config.parm.dec.cfg.canvas_mem_endian); + pbuf += sprintf(pbuf, "parm_v4l_low_latency_mode:%d;", + ctx->config.parm.dec.cfg.low_latency_mode); + pbuf += sprintf(pbuf, "parm_v4l_metadata_config_flag:%d;", + ctx->config.parm.dec.cfg.metadata_config_flag); + pbuf += sprintf(pbuf, "parm_v4l_duration:%d;", + ctx->config.parm.dec.cfg.duration); + ctx->config.length = pbuf - ctx->config.buf; + } else { + ctx->config.parm.dec.cfg.double_write_mode = 16; + ctx->config.parm.dec.cfg.ref_buf_margin = 7; + ctx->config.length = vdec_config_default_parms(ctx->config.buf); + } + + inst->vdec.config = ctx->config; + inst->parms.cfg = ctx->config.parm.dec.cfg; + inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO; +} + +static int vdec_h264_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec) +{ + struct vdec_h264_inst *inst = NULL; + int ret = -1; + + inst = vzalloc(sizeof(*inst)); + if (!inst) + return -ENOMEM; + + inst->vdec.frm_name = "H.264"; + inst->vdec.video_type = VFORMAT_H264; + inst->vdec.filp = ctx->dev->filp; + inst->vdec.ctx = ctx; + inst->ctx = ctx; + + vdec_parser_parms(inst); + + /* set play mode.*/ + if (ctx->is_drm_mode) + inst->vdec.port.flag |= PORT_FLAG_DRM; + + /* probe info from the stream */ + inst->vsi = vzalloc(sizeof(struct vdec_h264_vsi)); + if (!inst->vsi) { + ret = -ENOMEM; + goto err; + } + + /* alloc the header buffer to be used cache sps or spp etc.*/ + inst->vsi->header_buf = vzalloc(HEADER_BUFFER_SIZE); + if (!inst->vsi->header_buf) { + ret = -ENOMEM; + goto err; + } + + init_completion(&inst->comp); + + ctx->ada_ctx = &inst->vdec; + *h_vdec = (unsigned long)inst; + + ret = video_decoder_init(&inst->vdec); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "vdec_h264 init err=%d\n", ret); + goto err; + } + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "H264 Instance >> %lx", (ulong) inst); + + return 0; +err: + if (inst && inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + if (inst && inst->vsi) + vfree(inst->vsi); + if (inst) + vfree(inst); + *h_vdec = 0; + + return ret; +} + +#if 0 +static int refer_buffer_num(int level_idc, int max_poc_cnt, + int mb_width, int mb_height) +{ + int size; + int pic_size = mb_width * mb_height * 384; + + switch (level_idc) { + case 9: + size = 152064; + break; + case 10: + size = 152064; + break; + case 11: + size = 345600; + break; + case 12: + size = 912384; + break; + case 13: + size = 912384; + break; + case 20: + size = 912384; + break; + case 21: + size = 1824768; + break; + case 22: + size = 3110400; + break; + case 30: + size = 3110400; + break; + case 31: + size = 6912000; + break; + case 32: + size = 7864320; + break; + case 40: + size = 12582912; + break; + case 41: + size = 12582912; + break; + case 42: + size = 13369344; + break; + case 50: + size = 42393600; + break; + case 51: + case 52: + default: + size = 70778880; + break; + } + + size /= pic_size; + size = size + 1; /* need more buffers */ + + if (size > max_poc_cnt) + size = max_poc_cnt; + + return size; +} +#endif + +static void vdec_config_dw_mode(struct vdec_pic_info *pic, int dw_mode) +{ + switch (dw_mode) { + case 0x1: /* (w x h) + (w/2 x h) */ + pic->coded_width += pic->coded_width >> 1; + pic->y_len_sz = pic->coded_width * pic->coded_height; + pic->c_len_sz = pic->y_len_sz >> 1; + break; + case 0x2: /* (w x h) + (w/2 x h/2) */ + pic->coded_width += pic->coded_width >> 1; + pic->coded_height += pic->coded_height >> 1; + pic->y_len_sz = pic->coded_width * pic->coded_height; + pic->c_len_sz = pic->y_len_sz >> 1; + break; + default: /* nothing to do */ + break; + } +} + +static void fill_vdec_params(struct vdec_h264_inst *inst, struct h264_SPS_t *sps) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_h264_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + int dw = inst->parms.cfg.double_write_mode; + int margin = inst->parms.cfg.ref_buf_margin; + u32 mb_w, mb_h, width, height; + + mb_w = sps->mb_width; + mb_h = sps->mb_height; + + width = mb_w << 4; + height = mb_h << 4; + + width -= (sps->crop_left + sps->crop_right); + height -= (sps->crop_top + sps->crop_bottom); + + /* fill visible area size that be used for EGL. */ + pic->visible_width = width; + pic->visible_height = height; + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + pic->coded_width = ALIGN(mb_w, 4) << 4; + pic->coded_height = ALIGN(mb_h, 4) << 4; + pic->y_len_sz = pic->coded_width * pic->coded_height; + pic->c_len_sz = pic->y_len_sz >> 1; + pic->profile_idc = sps->profile_idc; + /* calc DPB size */ + dec->dpb_sz = sps->num_reorder_frames + margin; + + inst->parms.ps.visible_width = pic->visible_width; + inst->parms.ps.visible_height = pic->visible_height; + inst->parms.ps.coded_width = pic->coded_width; + inst->parms.ps.coded_height = pic->coded_height; + inst->parms.ps.profile = sps->profile_idc; + inst->parms.ps.mb_width = sps->mb_width; + inst->parms.ps.mb_height = sps->mb_height; + inst->parms.ps.ref_frames = sps->ref_frame_count; + inst->parms.ps.dpb_frames = sps->num_reorder_frames; + inst->parms.ps.dpb_size = dec->dpb_sz; + inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_PSINFO; + + vdec_config_dw_mode(pic, dw); + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_BUFMGR, + "The stream infos, dw: %d, coded:(%d x %d), visible:(%d x %d), DPB: %d, margin: %d\n", + dw, pic->coded_width, pic->coded_height, + pic->visible_width, pic->visible_height, + dec->dpb_sz - margin, margin); +} + +static bool check_frame_combine(u8 *buf, u32 size, int *pos) +{ + bool combine = false; + int i = 0, j = 0, cnt = 0; + u8 *p = buf; + + for (i = 4; i < size; i++) { + j = find_start_code(p, 7); + if (j > 0) { + if (++cnt > 1) { + combine = true; + break; + } + + *pos = p - buf + j; + p += j; + i += j; + } + p++; + } + + //pr_info("nal pos: %d, is_combine: %d\n",*pos, *is_combine); + return combine; +} + +static int vdec_search_startcode(u8 *buf, u32 range) +{ + int pos = -1; + int i = 0, j = 0; + u8 *p = buf; + + for (i = 4; i < range; i++) { + j = find_start_code(p, 7); + if (j > 0) { + pos = p - buf + j; + break; + } + p++; + } + + return pos; +} + +static int parse_stream_ucode(struct vdec_h264_inst *inst, + u8 *buf, u32 size, u64 timestamp) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write(vdec, buf, size, timestamp, 0); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_ucode_dma(struct vdec_h264_inst *inst, + ulong buf, u32 size, u64 timestamp, u32 handle) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle, + vdec_vframe_input_free, inst->ctx); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_cpu(struct vdec_h264_inst *inst, u8 *buf, u32 size) +{ + int ret = 0; + struct h264_param_sets *ps; + int nal_idx = 0; + bool is_combine = false; + + is_combine = check_frame_combine(buf, size, &nal_idx); + if (nal_idx < 0) + return -1; + + /* if the st compose from csd + slice that is the combine data. */ + inst->vsi->is_combine = is_combine; + inst->vsi->nalu_pos = nal_idx; + + ps = vzalloc(sizeof(struct h264_param_sets)); + if (ps == NULL) + return -ENOMEM; + + ret = h264_decode_extradata_ps(buf, size, ps); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "parse extra data failed. err: %d\n", ret); + goto out; + } + + if (ps->sps_parsed) + fill_vdec_params(inst, &ps->sps); + + ret = ps->sps_parsed ? 0 : -1; +out: + vfree(ps); + + return ret; +} + +static int vdec_h264_probe(unsigned long h_vdec, + struct aml_vcodec_mem *bs, void *out) +{ + struct vdec_h264_inst *inst = + (struct vdec_h264_inst *)h_vdec; + u8 *buf = (u8 *) bs->vaddr; + u32 size = bs->size; + int ret = 0; + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if ((s->magic != AML_VIDEO_MAGIC) && + (s->type != V4L_STREAM_TYPE_MATEDATA)) + return -1; + + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, s->data, + s->len, bs->timestamp); + } else { + skip_aud_data((u8 **)&s->data, &s->len); + ret = parse_stream_cpu(inst, s->data, s->len); + } + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = parse_stream_ucode_dma(inst, bs->addr, size, + bs->timestamp, BUFF_IDX(bs, bs->index)); + } + } else { + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, buf, size, bs->timestamp); + } else { + skip_aud_data(&buf, &size); + ret = parse_stream_cpu(inst, buf, size); + } + } + + inst->vsi->cur_pic = inst->vsi->pic; + + return ret; +} + +static void vdec_h264_deinit(unsigned long h_vdec) +{ + struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec; + struct aml_vcodec_ctx *ctx = inst->ctx; + + video_decoder_release(&inst->vdec); + + if (inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + + if (inst->vsi) + vfree(inst->vsi); + + vfree(inst); + + ctx->drv_handle = 0; +} + +static int vdec_write_nalu(struct vdec_h264_inst *inst, + u8 *buf, u32 size, u64 ts) +{ + int ret = -1; + struct aml_vdec_adapt *vdec = &inst->vdec; + bool is_combine = inst->vsi->is_combine; + int nalu_pos; + u32 nal_type; + + /*print_hex_debug(buf, size, 32);*/ + + nalu_pos = vdec_search_startcode(buf, 16); + if (nalu_pos < 0) + goto err; + + nal_type = AVC_NAL_TYPE(buf[nalu_pos]); + //v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, "NALU type: %d, size: %u\n", nal_type, size); + + if (nal_type == NAL_H264_SPS && !is_combine) { + if (inst->vsi->head_offset + size > HEADER_BUFFER_SIZE) { + ret = -EILSEQ; + goto err; + } + inst->vsi->sps_size = size; + memcpy(inst->vsi->header_buf + inst->vsi->head_offset, buf, size); + inst->vsi->head_offset += inst->vsi->sps_size; + ret = size; + } else if (nal_type == NAL_H264_PPS && !is_combine) { + //buf_sz -= nal_start_idx; + if (inst->vsi->head_offset + size > HEADER_BUFFER_SIZE) { + ret = -EILSEQ; + goto err; + } + inst->vsi->pps_size = size; + memcpy(inst->vsi->header_buf + inst->vsi->head_offset, buf, size); + inst->vsi->head_offset += inst->vsi->pps_size; + ret = size; + } else if (nal_type == NAL_H264_SEI && !is_combine) { + if (inst->vsi->head_offset + size > HEADER_BUFFER_SIZE) { + ret = -EILSEQ; + goto err; + } + inst->vsi->sei_size = size; + memcpy(inst->vsi->header_buf + inst->vsi->head_offset, buf, size); + inst->vsi->head_offset += inst->vsi->sei_size; + ret = size; + } else if (inst->vsi->head_offset == 0) { + ret = vdec_vframe_write(vdec, buf, size, ts, 0); + } else { + char *write_buf = vmalloc(inst->vsi->head_offset + size); + if (!write_buf) { + ret = -ENOMEM; + goto err; + } + + memcpy(write_buf, inst->vsi->header_buf, inst->vsi->head_offset); + memcpy(write_buf + inst->vsi->head_offset, buf, size); + + ret = vdec_vframe_write(vdec, write_buf, + inst->vsi->head_offset + size, ts, 0); + + memset(inst->vsi->header_buf, 0, HEADER_BUFFER_SIZE); + inst->vsi->head_offset = 0; + inst->vsi->sps_size = 0; + inst->vsi->pps_size = 0; + inst->vsi->sei_size = 0; + + vfree(write_buf); + } + + return ret; +err: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, "err(%d)", ret); + return ret; +} + +static bool monitor_res_change(struct vdec_h264_inst *inst, u8 *buf, u32 size) +{ + int ret = 0, i = 0, j = 0; + u8 *p = buf; + int len = size; + u32 type; + + for (i = 4; i < size; i++) { + j = find_start_code(p, len); + if (j > 0) { + len = size - (p - buf); + type = AVC_NAL_TYPE(p[j]); + if (type != NAL_H264_AUD && + (type > NAL_H264_PPS || type < NAL_H264_SEI)) + break; + + if (type == NAL_H264_SPS) { + ret = parse_stream_cpu(inst, p, len); + if (ret) + break; + } + p += j; + } + p++; + } + + if (!ret && ((inst->vsi->cur_pic.coded_width != + inst->vsi->pic.coded_width || + inst->vsi->cur_pic.coded_height != + inst->vsi->pic.coded_height) || + (inst->vsi->pic.profile_idc != + inst->vsi->cur_pic.profile_idc))) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, "res change\n"); + inst->vsi->cur_pic = inst->vsi->pic; + return true; + } + + return false; +} + +static int vdec_h264_decode(unsigned long h_vdec, + struct aml_vcodec_mem *bs, bool *res_chg) +{ + struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec; + struct aml_vdec_adapt *vdec = &inst->vdec; + u8 *buf = (u8 *) bs->vaddr; + u32 size = bs->size; + int ret = -1; + + if (bs == NULL) + return -1; + + if (vdec_input_full(vdec)) + return -EAGAIN; + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if (s->magic != AML_VIDEO_MAGIC) + return -1; + + if (!inst->ctx->param_sets_from_ucode && + (s->type == V4L_STREAM_TYPE_MATEDATA)) { + if ((*res_chg = monitor_res_change(inst, + s->data, s->len))) + return 0; + } + + ret = vdec_vframe_write(vdec, + s->data, + s->len, + bs->timestamp, + 0); + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = vdec_vframe_write_with_dma(vdec, + bs->addr, size, bs->timestamp, + BUFF_IDX(bs, bs->index), + vdec_vframe_input_free, inst->ctx); + } + } else { + if (inst->ctx->param_sets_from_ucode) { + int nal_idx = 0; + /* if the st compose from csd + slice that is the combine data. */ + inst->vsi->is_combine = check_frame_combine(buf, size, &nal_idx); + /*if (nal_idx < 0) + return -1;*/ + } else { + /*checked whether the resolution changes.*/ + if ((*res_chg = monitor_res_change(inst, buf, size))) { + return 0; + } + } + ret = vdec_write_nalu(inst, buf, size, bs->timestamp); + } + + return ret; +} + +static void get_param_config_info(struct vdec_h264_inst *inst, + struct aml_dec_params *parms) +{ + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CFGINFO) + parms->cfg = inst->parms.cfg; + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_PSINFO) + parms->ps = inst->parms.ps; + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_HDRINFO) + parms->hdr = inst->parms.hdr; + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CNTINFO) + parms->cnt = inst->parms.cnt; + + parms->parms_status |= inst->parms.parms_status; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "parms status: %u\n", parms->parms_status); +} + +static int vdec_h264_get_param(unsigned long h_vdec, + enum vdec_get_param_type type, void *out) +{ + int ret = 0; + struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the h264 inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case GET_PARAM_PIC_INFO: + get_pic_info(inst, out); + break; + + case GET_PARAM_DPB_SIZE: + get_dpb_size(inst, out); + break; + + case GET_PARAM_CROP_INFO: + get_crop_info(inst, out); + break; + + case GET_PARAM_CONFIG_INFO: + get_param_config_info(inst, out); + break; + + case GET_PARAM_DW_MODE: + { + unsigned int* mode = out; + *mode = inst->ctx->config.parm.dec.cfg.double_write_mode; + break; + } + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid get parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static void set_param_write_sync(struct vdec_h264_inst *inst) +{ + complete(&inst->comp); +} + +static void set_param_ps_info(struct vdec_h264_inst *inst, + struct aml_vdec_ps_infos *ps) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_h264_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + int dw = inst->parms.cfg.double_write_mode; + + /* fill visible area size that be used for EGL. */ + pic->visible_width = ps->visible_width; + pic->visible_height = ps->visible_height; + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + pic->coded_width = ps->coded_width; + pic->coded_height = ps->coded_height; + pic->y_len_sz = pic->coded_width * pic->coded_height; + pic->c_len_sz = pic->y_len_sz >> 1; + pic->profile_idc = ps->profile; + pic->field = ps->field; + pic->dpb_frames = ps->dpb_frames; + pic->dpb_margin = ps->dpb_margin; + pic->vpp_margin = ps->dpb_margin; + dec->dpb_sz = ps->dpb_size; + + inst->parms.ps = *ps; + inst->parms.parms_status |= + V4L2_CONFIG_PARM_DECODE_PSINFO; + + vdec_config_dw_mode(pic, dw); + + /*wake up*/ + complete(&inst->comp); + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "Parse from ucode, visible(%d x %d), coded(%d x %d), scan:%s\n", + ps->visible_width, ps->visible_height, + ps->coded_width, ps->coded_height, + ps->field == V4L2_FIELD_NONE ? "P" : "I"); +} + +static void set_param_hdr_info(struct vdec_h264_inst *inst, + struct aml_vdec_hdr_infos *hdr) +{ + inst->parms.hdr = *hdr; + if (!(inst->parms.parms_status & + V4L2_CONFIG_PARM_DECODE_HDRINFO)) { + inst->parms.hdr = *hdr; + inst->parms.parms_status |= + V4L2_CONFIG_PARM_DECODE_HDRINFO; + aml_vdec_dispatch_event(inst->ctx, + V4L2_EVENT_SRC_CH_HDRINFO); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "H264 set HDR infos\n"); + } +} + +static void set_pic_info(struct vdec_h264_inst *inst, + struct vdec_pic_info *pic) +{ + inst->vsi->pic = *pic; +} + +static void set_param_post_event(struct vdec_h264_inst *inst, u32 *event) +{ + aml_vdec_dispatch_event(inst->ctx, *event); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "H264 post event: %d\n", *event); +} + +static int vdec_h264_set_param(unsigned long h_vdec, + enum vdec_set_param_type type, void *in) +{ + int ret = 0; + struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the h264 inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case SET_PARAM_WRITE_FRAME_SYNC: + set_param_write_sync(inst); + break; + + case SET_PARAM_PS_INFO: + set_param_ps_info(inst, in); + break; + + case SET_PARAM_HDR_INFO: + set_param_hdr_info(inst, in); + break; + + case SET_PARAM_POST_EVENT: + set_param_post_event(inst, in); + break; + + case SET_PARAM_PIC_INFO: + set_pic_info(inst, in); + break; + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid set parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static struct vdec_common_if vdec_h264_if = { + .init = vdec_h264_init, + .probe = vdec_h264_probe, + .decode = vdec_h264_decode, + .get_param = vdec_h264_get_param, + .set_param = vdec_h264_set_param, + .deinit = vdec_h264_deinit, +}; + +struct vdec_common_if *get_h264_dec_comm_if(void); + +struct vdec_common_if *get_h264_dec_comm_if(void) +{ + return &vdec_h264_if; +}
diff --git a/drivers/amvdec_ports/decoder/vdec_hevc_if.c b/drivers/amvdec_ports/decoder/vdec_hevc_if.c new file mode 100644 index 0000000..56d492a --- /dev/null +++ b/drivers/amvdec_ports/decoder/vdec_hevc_if.c
@@ -0,0 +1,866 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <uapi/linux/swab.h> +#include "../vdec_drv_if.h" +#include "../aml_vcodec_util.h" +#include "../aml_vcodec_dec.h" +#include "../aml_vcodec_drv.h" +#include "../aml_vcodec_adapt.h" +#include "../vdec_drv_base.h" +#include "aml_hevc_parser.h" + +#define HEVC_NAL_TYPE(value) ((value >> 1) & 0x3F) +#define HEADER_BUFFER_SIZE (32 * 1024) + +/** + * struct hevc_fb - hevc decode frame buffer information + * @vdec_fb_va : virtual address of struct vdec_fb + * @y_fb_dma : dma address of Y frame buffer (luma) + * @c_fb_dma : dma address of C frame buffer (chroma) + * @poc : picture order count of frame buffer + * @reserved : for 8 bytes alignment + */ +struct hevc_fb { + uint64_t vdec_fb_va; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + int32_t poc; + uint32_t reserved; +}; + +/** + * struct vdec_hevc_dec_info - decode information + * @dpb_sz : decoding picture buffer size + * @resolution_changed : resoltion change happen + * @reserved : for 8 bytes alignment + * @bs_dma : Input bit-stream buffer dma address + * @y_fb_dma : Y frame buffer dma address + * @c_fb_dma : C frame buffer dma address + * @vdec_fb_va : VDEC frame buffer struct virtual address + */ +struct vdec_hevc_dec_info { + uint32_t dpb_sz; + uint32_t resolution_changed; + uint32_t reserved; + uint64_t bs_dma; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + uint64_t vdec_fb_va; +}; + +/** + * struct vdec_hevc_vsi - shared memory for decode information exchange + * between VPU and Host. + * The memory is allocated by VPU then mapping to Host + * in vpu_dec_init() and freed in vpu_dec_deinit() + * by VPU. + * AP-W/R : AP is writer/reader on this item + * VPU-W/R: VPU is write/reader on this item + * @hdr_buf : Header parsing buffer (AP-W, VPU-R) + * @list_free : free frame buffer ring list (AP-W/R, VPU-W) + * @list_disp : display frame buffer ring list (AP-R, VPU-W) + * @dec : decode information (AP-R, VPU-W) + * @pic : picture information (AP-R, VPU-W) + * @crop : crop information (AP-R, VPU-W) + */ +struct vdec_hevc_vsi { + char *header_buf; + int sps_size; + int pps_size; + int sei_size; + int head_offset; + struct vdec_hevc_dec_info dec; + struct vdec_pic_info pic; + struct vdec_pic_info cur_pic; + struct v4l2_rect crop; + bool is_combine; + int nalu_pos; + struct h265_param_sets ps; +}; + +/** + * struct vdec_hevc_inst - hevc decoder instance + * @num_nalu : how many nalus be decoded + * @ctx : point to aml_vcodec_ctx + * @vsi : VPU shared information + */ +struct vdec_hevc_inst { + unsigned int num_nalu; + struct aml_vcodec_ctx *ctx; + struct aml_vdec_adapt vdec; + struct vdec_hevc_vsi *vsi; + struct aml_dec_params parms; + struct completion comp; + struct vdec_comp_buf_info comp_info; +}; + +static void get_pic_info(struct vdec_hevc_inst *inst, + struct vdec_pic_info *pic) +{ + *pic = inst->vsi->pic; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "pic(%d, %d), buf(%d, %d)\n", + pic->visible_width, pic->visible_height, + pic->coded_width, pic->coded_height); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "Y(%d, %d), C(%d, %d)\n", pic->y_bs_sz, + pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz); +} + +static void get_crop_info(struct vdec_hevc_inst *inst, struct v4l2_rect *cr) +{ + cr->left = inst->vsi->crop.left; + cr->top = inst->vsi->crop.top; + cr->width = inst->vsi->crop.width; + cr->height = inst->vsi->crop.height; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "l=%d, t=%d, w=%d, h=%d\n", + cr->left, cr->top, cr->width, cr->height); +} + +static void get_dpb_size(struct vdec_hevc_inst *inst, unsigned int *dpb_sz) +{ + *dpb_sz = inst->vsi->dec.dpb_sz; + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, "sz=%d\n", *dpb_sz); +} + +static u32 vdec_config_default_parms(u8 *parm) +{ + u8 *pbuf = parm; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:7;"); + pbuf += sprintf(pbuf, "hevc_double_write_mode:1;"); + pbuf += sprintf(pbuf, "hevc_buf_width:4096;"); + pbuf += sprintf(pbuf, "hevc_buf_height:2304;"); + pbuf += sprintf(pbuf, "save_buffer_mode:0;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:0;"); + + return parm - pbuf; +} + +static void vdec_parser_parms(struct vdec_hevc_inst *inst) +{ + struct aml_vcodec_ctx *ctx = inst->ctx; + + if (ctx->config.parm.dec.parms_status & + V4L2_CONFIG_PARM_DECODE_CFGINFO) { + u8 *pbuf = ctx->config.buf; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;", + ctx->config.parm.dec.cfg.ref_buf_margin); + pbuf += sprintf(pbuf, "hevc_double_write_mode:%d;", + ctx->config.parm.dec.cfg.double_write_mode); + pbuf += sprintf(pbuf, "hevc_buf_width:4096;"); + pbuf += sprintf(pbuf, "hevc_buf_height:2304;"); + pbuf += sprintf(pbuf, "save_buffer_mode:0;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;", + ctx->config.parm.dec.cfg.canvas_mem_mode); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:%d;", + ctx->config.parm.dec.cfg.canvas_mem_endian); + pbuf += sprintf(pbuf, "parm_v4l_low_latency_mode:%d;", + ctx->config.parm.dec.cfg.low_latency_mode); + pbuf += sprintf(pbuf, "parm_v4l_metadata_config_flag:%d;", + ctx->config.parm.dec.cfg.metadata_config_flag); + pbuf += sprintf(pbuf, "parm_v4l_duration:%d;", + ctx->config.parm.dec.cfg.duration); + ctx->config.length = pbuf - ctx->config.buf; + } else { + ctx->config.parm.dec.cfg.double_write_mode = 1; + ctx->config.parm.dec.cfg.ref_buf_margin = 7; + ctx->config.length = vdec_config_default_parms(ctx->config.buf); + } + + inst->vdec.config = ctx->config; + inst->parms.cfg = ctx->config.parm.dec.cfg; + inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO; +} + +static int vdec_hevc_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec) +{ + struct vdec_hevc_inst *inst = NULL; + int ret = -1; + + inst = kzalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + inst->vdec.frm_name = "H.265"; + inst->vdec.video_type = VFORMAT_HEVC; + inst->vdec.filp = ctx->dev->filp; + inst->vdec.ctx = ctx; + inst->ctx = ctx; + + vdec_parser_parms(inst); + + /* set play mode.*/ + if (ctx->is_drm_mode) + inst->vdec.port.flag |= PORT_FLAG_DRM; + + /* to eable hevc hw.*/ + inst->vdec.port.type = PORT_TYPE_HEVC; + + /* probe info from the stream */ + inst->vsi = kzalloc(sizeof(struct vdec_hevc_vsi), GFP_KERNEL); + if (!inst->vsi) { + ret = -ENOMEM; + goto err; + } + + /* alloc the header buffer to be used cache sps or spp etc.*/ + inst->vsi->header_buf = vzalloc(HEADER_BUFFER_SIZE); + if (!inst->vsi->header_buf) { + ret = -ENOMEM; + goto err; + } + + init_completion(&inst->comp); + + ctx->ada_ctx = &inst->vdec; + *h_vdec = (unsigned long)inst; + + ret = video_decoder_init(&inst->vdec); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "vdec_hevc init err=%d\n", ret); + goto err; + } + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "hevc Instance >> %lx\n", (ulong) inst); + + return 0; +err: + if (inst && inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + if (inst && inst->vsi) + kfree(inst->vsi); + if (inst) + kfree(inst); + *h_vdec = 0; + + return ret; +} + + +static int refer_buffer_num(struct h265_SPS_t *sps) +{ + int used_buf_num = 0; + int sps_pic_buf_diff = 0; + + if ((!sps->temporal_layer[0].num_reorder_pics) && + (sps->temporal_layer[0].max_dec_pic_buffering)) { + /* the range of sps_num_reorder_pics_0 is in + [0, sps_max_dec_pic_buffering_minus1_0] */ + used_buf_num = sps->temporal_layer[0].max_dec_pic_buffering; + } else + used_buf_num = sps->temporal_layer[0].num_reorder_pics; + + sps_pic_buf_diff = sps->temporal_layer[0].max_dec_pic_buffering - + sps->temporal_layer[0].num_reorder_pics - 1; + + if (sps_pic_buf_diff >= 4) + used_buf_num += 1; + + /*need one more for multi instance, as + apply_ref_pic_set() has no chanch to run to + to clear referenced flag in some case */ + used_buf_num++; + + /* for eos add more buffer to flush.*/ + used_buf_num++; + + return used_buf_num; +} + +static int vdec_get_dw_mode(struct vdec_hevc_inst *inst, int dw_mode) +{ + u32 valid_dw_mode = inst->parms.cfg.double_write_mode; + int w = inst->vsi->pic.coded_width; + int h = inst->vsi->pic.coded_height; + u32 dw = 0x1; /*1:1*/ + + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + + return dw; +} + +static int vdec_pic_scale(struct vdec_hevc_inst *inst, int length, int dw_mode) +{ + int ret = 64; + + switch (vdec_get_dw_mode(inst, dw_mode)) { + case 0x0: /* only afbc, output afbc */ + ret = 64; + break; + case 0x1: /* afbc and (w x h), output YUV420 */ + ret = length; + break; + case 0x2: /* afbc and (w/4 x h/4), output YUV420 */ + case 0x3: /* afbc and (w/4 x h/4), output afbc and YUV420 */ + ret = length >> 2; + break; + case 0x4: /* afbc and (w/2 x h/2), output YUV420 */ + ret = length >> 1; + break; + case 0x10: /* (w x h), output YUV420-8bit)*/ + default: + ret = length; + break; + } + + return ret; +} + +static void fill_vdec_params(struct vdec_hevc_inst *inst, struct h265_SPS_t *sps) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_hevc_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + int dw = inst->parms.cfg.double_write_mode; + int margin = inst->parms.cfg.ref_buf_margin; + + /* fill visible area size that be used for EGL. */ + pic->visible_width = sps->width - (sps->output_window.left_offset + + sps->output_window.right_offset); + pic->visible_height = sps->height - (sps->output_window.top_offset + + sps->output_window.bottom_offset); + pic->visible_width = vdec_pic_scale(inst, pic->visible_width, dw); + pic->visible_height = vdec_pic_scale(inst, pic->visible_height, dw); + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + pic->coded_width = vdec_pic_scale(inst, ALIGN(sps->width, 32), dw); + pic->coded_height = vdec_pic_scale(inst, ALIGN(sps->height, 32), dw); + + pic->y_len_sz = pic->coded_width * pic->coded_height; + pic->c_len_sz = pic->y_len_sz >> 1; + + /* calc DPB size */ + dec->dpb_sz = refer_buffer_num(sps) + margin; + + inst->parms.ps.visible_width = pic->visible_width; + inst->parms.ps.visible_height = pic->visible_height; + inst->parms.ps.coded_width = pic->coded_width; + inst->parms.ps.coded_height = pic->coded_height; + inst->parms.ps.dpb_size = dec->dpb_sz; + inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_PSINFO; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_BUFMGR, + "The stream infos, dw: %d, coded:(%d x %d), visible:(%d x %d), DPB: %d, margin: %d\n", + dw, pic->coded_width, pic->coded_height, + pic->visible_width, pic->visible_height, + dec->dpb_sz - margin, margin); +} + +static int parse_stream_ucode(struct vdec_hevc_inst *inst, + u8 *buf, u32 size, u64 timestamp) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write(vdec, buf, size, timestamp, 0); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_ucode_dma(struct vdec_hevc_inst *inst, + ulong buf, u32 size, u64 timestamp, u32 handle) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle, + vdec_vframe_input_free, inst->ctx); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_cpu(struct vdec_hevc_inst *inst, u8 *buf, u32 size) +{ + int ret = 0; + struct h265_param_sets *ps = NULL; + + ps = vzalloc(sizeof(struct h265_param_sets)); + if (ps == NULL) + return -ENOMEM; + + ret = h265_decode_extradata_ps(buf, size, ps); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "parse extra data failed. err: %d\n", ret); + goto out; + } + + if (ps->sps_parsed) + fill_vdec_params(inst, &ps->sps); + + ret = ps->sps_parsed ? 0 : -1; +out: + vfree(ps); + + return ret; +} + +static int vdec_hevc_probe(unsigned long h_vdec, + struct aml_vcodec_mem *bs, void *out) +{ + struct vdec_hevc_inst *inst = + (struct vdec_hevc_inst *)h_vdec; + u8 *buf = (u8 *)bs->vaddr; + u32 size = bs->size; + int ret = 0; + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if ((s->magic != AML_VIDEO_MAGIC) && + (s->type != V4L_STREAM_TYPE_MATEDATA)) + return -1; + + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, s->data, + s->len, bs->timestamp); + } else { + ret = parse_stream_cpu(inst, s->data, s->len); + } + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = parse_stream_ucode_dma(inst, bs->addr, size, + bs->timestamp, BUFF_IDX(bs, bs->index)); + } + } else { + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, buf, size, bs->timestamp); + } else { + ret = parse_stream_cpu(inst, buf, size); + } + } + + inst->vsi->cur_pic = inst->vsi->pic; + + return ret; +} + +static void vdec_hevc_deinit(unsigned long h_vdec) +{ + struct vdec_hevc_inst *inst = (struct vdec_hevc_inst *)h_vdec; + struct aml_vcodec_ctx *ctx = inst->ctx; + + video_decoder_release(&inst->vdec); + + if (inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + + if (inst->vsi) + kfree(inst->vsi); + + kfree(inst); + + ctx->drv_handle = 0; +} + +static int vdec_write_nalu(struct vdec_hevc_inst *inst, + u8 *buf, u32 size, u64 ts) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write(vdec, buf, size, ts, 0); + + return ret; +} + +static bool monitor_res_change(struct vdec_hevc_inst *inst, u8 *buf, u32 size) +{ + int ret = 0, i = 0, j = 0; + u8 *p = buf; + int len = size; + u32 type; + + for (i = 4; i < size; i++) { + j = find_start_code(p, len); + if (j > 0) { + len = size - (p - buf); + type = HEVC_NAL_TYPE(p[j]); + if (type != HEVC_NAL_AUD && + (type > HEVC_NAL_PPS || type < HEVC_NAL_VPS)) + break; + + if (type == HEVC_NAL_SPS) { + ret = parse_stream_cpu(inst, p, len); + if (ret) + break; + } + p += j; + } + p++; + } + + if (!ret && (inst->vsi->cur_pic.coded_width != + inst->vsi->pic.coded_width || + inst->vsi->cur_pic.coded_height != + inst->vsi->pic.coded_height)) { + inst->vsi->cur_pic = inst->vsi->pic; + return true; + } + + return false; +} + +static int vdec_hevc_decode(unsigned long h_vdec, + struct aml_vcodec_mem *bs, bool *res_chg) +{ + struct vdec_hevc_inst *inst = (struct vdec_hevc_inst *)h_vdec; + struct aml_vdec_adapt *vdec = &inst->vdec; + u8 *buf = (u8 *) bs->vaddr; + u32 size = bs->size; + int ret = -1; + + if (bs == NULL) + return -1; + + if (vdec_input_full(vdec)) + return -EAGAIN; + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if (s->magic != AML_VIDEO_MAGIC) + return -1; + + if (!inst->ctx->param_sets_from_ucode && + (s->type == V4L_STREAM_TYPE_MATEDATA)) { + if ((*res_chg = monitor_res_change(inst, + s->data, s->len))) + return 0; + } + + ret = vdec_vframe_write(vdec, + s->data, + s->len, + bs->timestamp, + 0); + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = vdec_vframe_write_with_dma(vdec, + bs->addr, size, bs->timestamp, + BUFF_IDX(bs, bs->index), + vdec_vframe_input_free, inst->ctx); + } + } else { + if (!inst->ctx->param_sets_from_ucode) { + /*checked whether the resolution changes.*/ + if ((*res_chg = monitor_res_change(inst, buf, size))) + return 0; + } + ret = vdec_write_nalu(inst, buf, size, bs->timestamp); + } + + return ret; +} + + static void get_param_config_info(struct vdec_hevc_inst *inst, + struct aml_dec_params *parms) + { + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CFGINFO) + parms->cfg = inst->parms.cfg; + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_PSINFO) + parms->ps = inst->parms.ps; + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_HDRINFO) + parms->hdr = inst->parms.hdr; + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CNTINFO) + parms->cnt = inst->parms.cnt; + + parms->parms_status |= inst->parms.parms_status; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "parms status: %u\n", parms->parms_status); + } + +static void get_param_comp_buf_info(struct vdec_hevc_inst *inst, + struct vdec_comp_buf_info *params) +{ + memcpy(params, &inst->comp_info, sizeof(*params)); +} + +static int vdec_hevc_get_param(unsigned long h_vdec, + enum vdec_get_param_type type, void *out) +{ + int ret = 0; + struct vdec_hevc_inst *inst = (struct vdec_hevc_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the hevc inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case GET_PARAM_PIC_INFO: + get_pic_info(inst, out); + break; + + case GET_PARAM_DPB_SIZE: + get_dpb_size(inst, out); + break; + + case GET_PARAM_CROP_INFO: + get_crop_info(inst, out); + break; + + case GET_PARAM_CONFIG_INFO: + get_param_config_info(inst, out); + break; + + case GET_PARAM_DW_MODE: + { + u32 *mode = out; + u32 m = inst->ctx->config.parm.dec.cfg.double_write_mode; + if (m <= 16) + *mode = inst->ctx->config.parm.dec.cfg.double_write_mode; + else + *mode = vdec_get_dw_mode(inst, 0); + break; + } + case GET_PARAM_COMP_BUF_INFO: + get_param_comp_buf_info(inst, out); + break; + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid get parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static void set_param_write_sync(struct vdec_hevc_inst *inst) +{ + complete(&inst->comp); +} + +static void set_param_ps_info(struct vdec_hevc_inst *inst, + struct aml_vdec_ps_infos *ps) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_hevc_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + int dw = inst->parms.cfg.double_write_mode; + + /* fill visible area size that be used for EGL. */ + pic->visible_width = ps->visible_width; + pic->visible_height = ps->visible_height; + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + + pic->coded_width = ps->coded_width; + pic->coded_height = ps->coded_height; + + pic->y_len_sz = ALIGN(vdec_pic_scale(inst, pic->coded_width, dw), 64) * + ALIGN(vdec_pic_scale(inst, pic->coded_height, dw), 64); + pic->c_len_sz = pic->y_len_sz >> 1; + + pic->dpb_frames = ps->dpb_frames; + pic->dpb_margin = ps->dpb_margin; + pic->vpp_margin = ps->dpb_margin; + dec->dpb_sz = ps->dpb_size; + pic->field = ps->field; + + inst->parms.ps = *ps; + inst->parms.parms_status |= + V4L2_CONFIG_PARM_DECODE_PSINFO; + + /*wake up*/ + complete(&inst->comp); + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "Parse from ucode, visible(%d x %d), coded(%d x %d), scan:%s\n", + pic->visible_width, pic->visible_height, + pic->coded_width, pic->coded_height, + pic->field == V4L2_FIELD_NONE ? "P" : "I"); +} + +static void set_cfg_info(struct vdec_hevc_inst *inst, + struct aml_vdec_cfg_infos *cfg) +{ + memcpy(&inst->ctx->config.parm.dec.cfg, + cfg, sizeof(struct aml_vdec_cfg_infos)); + memcpy(&inst->parms.cfg, + cfg, sizeof(struct aml_vdec_cfg_infos)); +} + +static void set_param_comp_buf_info(struct vdec_hevc_inst *inst, + struct vdec_comp_buf_info *info) +{ + memcpy(&inst->comp_info, info, sizeof(*info)); +} + +static void set_param_hdr_info(struct vdec_hevc_inst *inst, + struct aml_vdec_hdr_infos *hdr) +{ + if (!(inst->parms.parms_status & + V4L2_CONFIG_PARM_DECODE_HDRINFO)) { + inst->parms.hdr = *hdr; + inst->parms.parms_status |= + V4L2_CONFIG_PARM_DECODE_HDRINFO; + aml_vdec_dispatch_event(inst->ctx, + V4L2_EVENT_SRC_CH_HDRINFO); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "H265 set HDR infos\n"); + } +} + +static void set_param_post_event(struct vdec_hevc_inst *inst, u32 *event) +{ + aml_vdec_dispatch_event(inst->ctx, *event); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "H265 post event: %d\n", *event); +} + +static void set_pic_info(struct vdec_hevc_inst *inst, + struct vdec_pic_info *pic) +{ + inst->vsi->pic = *pic; +} + +static int vdec_hevc_set_param(unsigned long h_vdec, + enum vdec_set_param_type type, void *in) +{ + int ret = 0; + struct vdec_hevc_inst *inst = (struct vdec_hevc_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the hevc inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case SET_PARAM_WRITE_FRAME_SYNC: + set_param_write_sync(inst); + break; + + case SET_PARAM_CFG_INFO: + set_cfg_info(inst, in); + break; + + case SET_PARAM_PS_INFO: + set_param_ps_info(inst, in); + break; + + case SET_PARAM_COMP_BUF_INFO: + set_param_comp_buf_info(inst, in); + break; + + case SET_PARAM_HDR_INFO: + set_param_hdr_info(inst, in); + break; + + case SET_PARAM_POST_EVENT: + set_param_post_event(inst, in); + break; + + case SET_PARAM_PIC_INFO: + set_pic_info(inst, in); + break; + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid set parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static struct vdec_common_if vdec_hevc_if = { + .init = vdec_hevc_init, + .probe = vdec_hevc_probe, + .decode = vdec_hevc_decode, + .get_param = vdec_hevc_get_param, + .set_param = vdec_hevc_set_param, + .deinit = vdec_hevc_deinit, +}; + +struct vdec_common_if *get_hevc_dec_comm_if(void); + +struct vdec_common_if *get_hevc_dec_comm_if(void) +{ + return &vdec_hevc_if; +}
diff --git a/drivers/amvdec_ports/decoder/vdec_mjpeg_if.c b/drivers/amvdec_ports/decoder/vdec_mjpeg_if.c new file mode 100644 index 0000000..8332956 --- /dev/null +++ b/drivers/amvdec_ports/decoder/vdec_mjpeg_if.c
@@ -0,0 +1,620 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <uapi/linux/swab.h> +#include "../vdec_drv_if.h" +#include "../aml_vcodec_util.h" +#include "../aml_vcodec_dec.h" +#include "../aml_vcodec_adapt.h" +#include "../vdec_drv_base.h" +#include "aml_mjpeg_parser.h" +#include <media/v4l2-mem2mem.h> + +#define NAL_TYPE(value) ((value) & 0x1F) +#define HEADER_BUFFER_SIZE (32 * 1024) + +/** + * struct mjpeg_fb - mjpeg decode frame buffer information + * @vdec_fb_va : virtual address of struct vdec_fb + * @y_fb_dma : dma address of Y frame buffer (luma) + * @c_fb_dma : dma address of C frame buffer (chroma) + * @poc : picture order count of frame buffer + * @reserved : for 8 bytes alignment + */ +struct mjpeg_fb { + uint64_t vdec_fb_va; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + int32_t poc; + uint32_t reserved; +}; + +/** + * struct vdec_mjpeg_dec_info - decode information + * @dpb_sz : decoding picture buffer size + * @resolution_changed : resoltion change happen + * @reserved : for 8 bytes alignment + * @bs_dma : Input bit-stream buffer dma address + * @y_fb_dma : Y frame buffer dma address + * @c_fb_dma : C frame buffer dma address + * @vdec_fb_va : VDEC frame buffer struct virtual address + */ +struct vdec_mjpeg_dec_info { + uint32_t dpb_sz; + uint32_t resolution_changed; + uint32_t reserved; + uint64_t bs_dma; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + uint64_t vdec_fb_va; +}; + +/** + * struct vdec_mjpeg_vsi - shared memory for decode information exchange + * between VPU and Host. + * The memory is allocated by VPU then mapping to Host + * in vpu_dec_init() and freed in vpu_dec_deinit() + * by VPU. + * AP-W/R : AP is writer/reader on this item + * VPU-W/R: VPU is write/reader on this item + * @hdr_buf : Header parsing buffer (AP-W, VPU-R) + * @list_free : free frame buffer ring list (AP-W/R, VPU-W) + * @list_disp : display frame buffer ring list (AP-R, VPU-W) + * @dec : decode information (AP-R, VPU-W) + * @pic : picture information (AP-R, VPU-W) + * @crop : crop information (AP-R, VPU-W) + */ +struct vdec_mjpeg_vsi { + char *header_buf; + int sps_size; + int pps_size; + int sei_size; + int head_offset; + struct vdec_mjpeg_dec_info dec; + struct vdec_pic_info pic; + struct vdec_pic_info cur_pic; + struct v4l2_rect crop; + bool is_combine; + int nalu_pos; + //struct mjpeg_param_sets ps; +}; + +/** + * struct vdec_mjpeg_inst - mjpeg decoder instance + * @num_nalu : how many nalus be decoded + * @ctx : point to aml_vcodec_ctx + * @vsi : VPU shared information + */ +struct vdec_mjpeg_inst { + unsigned int num_nalu; + struct aml_vcodec_ctx *ctx; + struct aml_vdec_adapt vdec; + struct vdec_mjpeg_vsi *vsi; + struct aml_dec_params parms; + struct completion comp; +}; + +static void get_pic_info(struct vdec_mjpeg_inst *inst, + struct vdec_pic_info *pic) +{ + *pic = inst->vsi->pic; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "pic(%d, %d), buf(%d, %d)\n", + pic->visible_width, pic->visible_height, + pic->coded_width, pic->coded_height); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "Y(%d, %d), C(%d, %d)\n", + pic->y_bs_sz, pic->y_len_sz, + pic->c_bs_sz, pic->c_len_sz); +} + +static void get_crop_info(struct vdec_mjpeg_inst *inst, struct v4l2_rect *cr) +{ + cr->left = inst->vsi->crop.left; + cr->top = inst->vsi->crop.top; + cr->width = inst->vsi->crop.width; + cr->height = inst->vsi->crop.height; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "l=%d, t=%d, w=%d, h=%d\n", + cr->left, cr->top, cr->width, cr->height); +} + +static void get_dpb_size(struct vdec_mjpeg_inst *inst, unsigned int *dpb_sz) +{ + *dpb_sz = inst->vsi->dec.dpb_sz; + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "sz=%d\n", *dpb_sz); +} + +static u32 vdec_config_default_parms(u8 *parm) +{ + u8 *pbuf = parm; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;"); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:0;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:0;"); + + return pbuf - parm; +} + +static void vdec_parser_parms(struct vdec_mjpeg_inst *inst) +{ + struct aml_vcodec_ctx *ctx = inst->ctx; + + if (ctx->config.parm.dec.parms_status & + V4L2_CONFIG_PARM_DECODE_CFGINFO) { + u8 *pbuf = ctx->config.buf; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;", + ctx->config.parm.dec.cfg.canvas_mem_mode); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;", + ctx->config.parm.dec.cfg.ref_buf_margin); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:%d;", + ctx->config.parm.dec.cfg.canvas_mem_endian); + pbuf += sprintf(pbuf, "parm_v4l_duration:%d;", + ctx->config.parm.dec.cfg.duration); + ctx->config.length = pbuf - ctx->config.buf; + } else { + ctx->config.length = vdec_config_default_parms(ctx->config.buf); + } + + inst->vdec.config = ctx->config; + inst->parms.cfg = ctx->config.parm.dec.cfg; + inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO; +} + + +static int vdec_mjpeg_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec) +{ + struct vdec_mjpeg_inst *inst = NULL; + int ret = -1; + + inst = kzalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + inst->vdec.frm_name = "MJPEG"; + inst->vdec.video_type = VFORMAT_MJPEG; + inst->vdec.filp = ctx->dev->filp; + inst->vdec.config = ctx->config; + inst->vdec.ctx = ctx; + inst->ctx = ctx; + + vdec_parser_parms(inst); + /* set play mode.*/ + if (ctx->is_drm_mode) + inst->vdec.port.flag |= PORT_FLAG_DRM; + + /* to eable mjpeg hw.*/ + inst->vdec.port.type = PORT_TYPE_VIDEO; + + /* probe info from the stream */ + inst->vsi = kzalloc(sizeof(struct vdec_mjpeg_vsi), GFP_KERNEL); + if (!inst->vsi) { + ret = -ENOMEM; + goto err; + } + + /* alloc the header buffer to be used cache sps or spp etc.*/ + inst->vsi->header_buf = vzalloc(HEADER_BUFFER_SIZE); + if (!inst->vsi->header_buf) { + ret = -ENOMEM; + goto err; + } + + init_completion(&inst->comp); + ctx->ada_ctx = &inst->vdec; + *h_vdec = (unsigned long)inst; + + ret = video_decoder_init(&inst->vdec); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "vdec_mjpeg init err=%d\n", ret); + goto err; + } + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "mjpeg Instance >> %lx\n", (ulong) inst); + + return 0; + +err: + if (inst && inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + if (inst && inst->vsi) + kfree(inst->vsi); + if (inst) + kfree(inst); + *h_vdec = 0; + + return ret; +} + +#if 0 +static int refer_buffer_num(int level_idc, int poc_cnt, + int mb_width, int mb_height) +{ + return 20; +} +#endif + +static void fill_vdec_params(struct vdec_mjpeg_inst *inst, + struct MJpegDecodeContext *ps) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_mjpeg_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + + /* fill visible area size that be used for EGL. */ + pic->visible_width = ps->width; + pic->visible_height = ps->height; + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + pic->coded_width = ALIGN(ps->width, 64); + pic->coded_height = ALIGN(ps->height, 64); + + pic->y_len_sz = pic->coded_width * pic->coded_height; + pic->c_len_sz = pic->y_len_sz; + + /*8(DECODE_BUFFER_NUM_DEF) */ + dec->dpb_sz = 8; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_BUFMGR, + "The stream infos, coded:(%d x %d), visible:(%d x %d)\n", + pic->coded_width, pic->coded_height, + pic->visible_width, pic->visible_height); +} + +static int parse_stream_ucode(struct vdec_mjpeg_inst *inst, + u8 *buf, u32 size, u64 timestamp) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write(vdec, buf, size, timestamp, 0); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_ucode_dma(struct vdec_mjpeg_inst *inst, + ulong buf, u32 size, u64 timestamp, u32 handle) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle, + vdec_vframe_input_free, inst->ctx); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_cpu(struct vdec_mjpeg_inst *inst, u8 *buf, u32 size) +{ + int ret = 0; + struct mjpeg_param_sets *ps = NULL; + + ps = kzalloc(sizeof(struct mjpeg_param_sets), GFP_KERNEL); + if (ps == NULL) + return -ENOMEM; + + ret = mjpeg_decode_extradata_ps(buf, size, ps); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "parse extra data failed. err: %d\n", ret); + goto out; + } + + if (ps->head_parsed) + fill_vdec_params(inst, &ps->dec_ps); + + ret = ps->head_parsed ? 0 : -1; +out: + kfree(ps); + + return ret; +} + +static int vdec_mjpeg_probe(unsigned long h_vdec, + struct aml_vcodec_mem *bs, void *out) +{ + struct vdec_mjpeg_inst *inst = + (struct vdec_mjpeg_inst *)h_vdec; + u8 *buf = (u8 *)bs->vaddr; + u32 size = bs->size; + int ret = 0; + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if ((s->magic != AML_VIDEO_MAGIC) && + (s->type != V4L_STREAM_TYPE_MATEDATA)) + return -1; + + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, s->data, + s->len, bs->timestamp); + } else { + ret = parse_stream_cpu(inst, s->data, s->len); + } + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = parse_stream_ucode_dma(inst, bs->addr, size, + bs->timestamp, BUFF_IDX(bs, bs->index)); + } + } else { + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, buf, size, bs->timestamp); + } else { + ret = parse_stream_cpu(inst, buf, size); + } + } + + inst->vsi->cur_pic = inst->vsi->pic; + + return ret; +} + +static void vdec_mjpeg_deinit(unsigned long h_vdec) +{ + struct vdec_mjpeg_inst *inst = (struct vdec_mjpeg_inst *)h_vdec; + + if (!inst) + return; + + video_decoder_release(&inst->vdec); + + if (inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + + if (inst->vsi) + kfree(inst->vsi); + + kfree(inst); +} + +static int vdec_write_nalu(struct vdec_mjpeg_inst *inst, + u8 *buf, u32 size, u64 ts) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write(vdec, buf, size, ts, 0); + + return ret; +} + +static int vdec_mjpeg_decode(unsigned long h_vdec, + struct aml_vcodec_mem *bs, bool *res_chg) +{ + struct vdec_mjpeg_inst *inst = (struct vdec_mjpeg_inst *)h_vdec; + struct aml_vdec_adapt *vdec = &inst->vdec; + u8 *buf = (u8 *) bs->vaddr; + u32 size = bs->size; + int ret = -1; + + if (vdec_input_full(vdec)) + return -EAGAIN; + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if (s->magic != AML_VIDEO_MAGIC) + return -1; + + ret = vdec_vframe_write(vdec, + s->data, + s->len, + bs->timestamp, + 0); + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = vdec_vframe_write_with_dma(vdec, + bs->addr, size, bs->timestamp, + BUFF_IDX(bs, bs->index), + vdec_vframe_input_free, inst->ctx); + } + } else { + ret = vdec_write_nalu(inst, buf, size, bs->timestamp); + } + + return ret; +} + +static int vdec_mjpeg_get_param(unsigned long h_vdec, + enum vdec_get_param_type type, void *out) +{ + int ret = 0; + struct vdec_mjpeg_inst *inst = (struct vdec_mjpeg_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the mjpeg inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case GET_PARAM_PIC_INFO: + get_pic_info(inst, out); + break; + + case GET_PARAM_DPB_SIZE: + get_dpb_size(inst, out); + break; + + case GET_PARAM_CROP_INFO: + get_crop_info(inst, out); + break; + + case GET_PARAM_DW_MODE: + { + unsigned int* mode = out; + *mode = VDEC_DW_NO_AFBC; + break; + } + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid get parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static void set_param_ps_info(struct vdec_mjpeg_inst *inst, + struct aml_vdec_ps_infos *ps) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_mjpeg_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, "%s in\n", __func__); + /* fill visible area size that be used for EGL. */ + pic->visible_width = ps->visible_width; + pic->visible_height = ps->visible_height; + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + pic->coded_width = ps->coded_width; + pic->coded_height = ps->coded_height; + pic->y_len_sz = pic->coded_width * pic->coded_height; + pic->c_len_sz = pic->y_len_sz >> 1; + + pic->dpb_frames = ps->dpb_frames; + pic->dpb_margin = ps->dpb_margin; + pic->vpp_margin = ps->dpb_margin; + dec->dpb_sz = ps->dpb_size; + pic->field = ps->field; + + inst->parms.ps = *ps; + inst->parms.parms_status |= + V4L2_CONFIG_PARM_DECODE_PSINFO; + + /*wake up*/ + complete(&inst->comp); + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "Parse from ucode, visible(%d x %d), coded(%d x %d), scan:%s\n", + ps->visible_width, ps->visible_height, + ps->coded_width, ps->coded_height, + pic->field == V4L2_FIELD_NONE ? "P" : "I"); +} + +static void set_param_write_sync(struct vdec_mjpeg_inst *inst) +{ + complete(&inst->comp); +} + +static void set_pic_info(struct vdec_mjpeg_inst *inst, + struct vdec_pic_info *pic) +{ + inst->vsi->pic = *pic; +} + +static int vdec_mjpeg_set_param(unsigned long h_vdec, + enum vdec_set_param_type type, void *in) +{ + int ret = 0; + struct vdec_mjpeg_inst *inst = (struct vdec_mjpeg_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the mjpeg inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case SET_PARAM_WRITE_FRAME_SYNC: + set_param_write_sync(inst); + break; + + case SET_PARAM_PS_INFO: + set_param_ps_info(inst, in); + break; + + case SET_PARAM_PIC_INFO: + set_pic_info(inst, in); + break; + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid set parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static struct vdec_common_if vdec_mjpeg_if = { + .init = vdec_mjpeg_init, + .probe = vdec_mjpeg_probe, + .decode = vdec_mjpeg_decode, + .get_param = vdec_mjpeg_get_param, + .set_param = vdec_mjpeg_set_param, + .deinit = vdec_mjpeg_deinit, +}; + +struct vdec_common_if *get_mjpeg_dec_comm_if(void); + +struct vdec_common_if *get_mjpeg_dec_comm_if(void) +{ + return &vdec_mjpeg_if; +}
diff --git a/drivers/amvdec_ports/decoder/vdec_mpeg12_if.c b/drivers/amvdec_ports/decoder/vdec_mpeg12_if.c new file mode 100644 index 0000000..2472ac1 --- /dev/null +++ b/drivers/amvdec_ports/decoder/vdec_mpeg12_if.c
@@ -0,0 +1,645 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <uapi/linux/swab.h> +#include "../vdec_drv_if.h" +#include "../aml_vcodec_util.h" +#include "../aml_vcodec_dec.h" +#include "../aml_vcodec_adapt.h" +#include "../vdec_drv_base.h" +#include "aml_mpeg12_parser.h" + +#define NAL_TYPE(value) ((value) & 0x1F) +#define HEADER_BUFFER_SIZE (32 * 1024) + +/** + * struct mpeg12_fb - mpeg12 decode frame buffer information + * @vdec_fb_va : virtual address of struct vdec_fb + * @y_fb_dma : dma address of Y frame buffer (luma) + * @c_fb_dma : dma address of C frame buffer (chroma) + * @poc : picture order count of frame buffer + * @reserved : for 8 bytes alignment + */ +struct mpeg12_fb { + uint64_t vdec_fb_va; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + int32_t poc; + uint32_t reserved; +}; + +/** + * struct vdec_mpeg12_dec_info - decode information + * @dpb_sz : decoding picture buffer size + * @resolution_changed : resoltion change happen + * @reserved : for 8 bytes alignment + * @bs_dma : Input bit-stream buffer dma address + * @y_fb_dma : Y frame buffer dma address + * @c_fb_dma : C frame buffer dma address + * @vdec_fb_va : VDEC frame buffer struct virtual address + */ +struct vdec_mpeg12_dec_info { + uint32_t dpb_sz; + uint32_t resolution_changed; + uint32_t reserved; + uint64_t bs_dma; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + uint64_t vdec_fb_va; +}; + +/** + * struct vdec_mpeg12_vsi - shared memory for decode information exchange + * between VPU and Host. + * The memory is allocated by VPU then mapping to Host + * in vpu_dec_init() and freed in vpu_dec_deinit() + * by VPU. + * AP-W/R : AP is writer/reader on this item + * VPU-W/R: VPU is write/reader on this item + * @hdr_buf : Header parsing buffer (AP-W, VPU-R) + * @list_free : free frame buffer ring list (AP-W/R, VPU-W) + * @list_disp : display frame buffer ring list (AP-R, VPU-W) + * @dec : decode information (AP-R, VPU-W) + * @pic : picture information (AP-R, VPU-W) + * @crop : crop information (AP-R, VPU-W) + */ +struct vdec_mpeg12_vsi { + char *header_buf; + int sps_size; + int pps_size; + int sei_size; + int head_offset; + struct vdec_mpeg12_dec_info dec; + struct vdec_pic_info pic; + struct vdec_pic_info cur_pic; + struct v4l2_rect crop; + bool is_combine; + int nalu_pos; + //struct mpeg12_param_sets ps; +}; + +/** + * struct vdec_mpeg12_inst - mpeg12 decoder instance + * @num_nalu : how many nalus be decoded + * @ctx : point to aml_vcodec_ctx + * @vsi : VPU shared information + */ +struct vdec_mpeg12_inst { + unsigned int num_nalu; + struct aml_vcodec_ctx *ctx; + struct aml_vdec_adapt vdec; + struct vdec_mpeg12_vsi *vsi; + struct aml_dec_params parms; + struct completion comp; +}; + +static void get_pic_info(struct vdec_mpeg12_inst *inst, + struct vdec_pic_info *pic) +{ + *pic = inst->vsi->pic; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "pic(%d, %d), buf(%d, %d)\n", + pic->visible_width, pic->visible_height, + pic->coded_width, pic->coded_height); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "Y(%d, %d), C(%d, %d)\n", + pic->y_bs_sz, pic->y_len_sz, + pic->c_bs_sz, pic->c_len_sz); +} + +static void get_crop_info(struct vdec_mpeg12_inst *inst, struct v4l2_rect *cr) +{ + cr->left = inst->vsi->crop.left; + cr->top = inst->vsi->crop.top; + cr->width = inst->vsi->crop.width; + cr->height = inst->vsi->crop.height; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "l=%d, t=%d, w=%d, h=%d\n", + cr->left, cr->top, cr->width, cr->height); +} + +static void get_dpb_size(struct vdec_mpeg12_inst *inst, unsigned int *dpb_sz) +{ + *dpb_sz = inst->vsi->dec.dpb_sz; + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, "sz=%d\n", *dpb_sz); +} + +static u32 vdec_config_default_parms(u8 *parm) +{ + u8 *pbuf = parm; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;"); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:0;"); + + return pbuf - parm; +} + +static void vdec_parser_parms(struct vdec_mpeg12_inst *inst) +{ + struct aml_vcodec_ctx *ctx = inst->ctx; + + if (ctx->config.parm.dec.parms_status & + V4L2_CONFIG_PARM_DECODE_CFGINFO) { + u8 *pbuf = ctx->config.buf; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;", + ctx->config.parm.dec.cfg.canvas_mem_mode); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;", + ctx->config.parm.dec.cfg.ref_buf_margin); + pbuf += sprintf(pbuf, "parm_v4l_metadata_config_flag:%d;", + ctx->config.parm.dec.cfg.metadata_config_flag); + pbuf += sprintf(pbuf, "parm_v4l_duration:%d;", + ctx->config.parm.dec.cfg.duration); + ctx->config.length = pbuf - ctx->config.buf; + } else { + ctx->config.length = vdec_config_default_parms(ctx->config.buf); + } + + inst->vdec.config = ctx->config; + inst->parms.cfg = ctx->config.parm.dec.cfg; + inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO; +} + +static int vdec_mpeg12_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec) +{ + struct vdec_mpeg12_inst *inst = NULL; + int ret = -1; + + inst = kzalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + inst->vdec.frm_name = "MPEG2"; + inst->vdec.video_type = VFORMAT_MPEG12; + inst->vdec.filp = ctx->dev->filp; + inst->vdec.config = ctx->config; + inst->vdec.ctx = ctx; + inst->ctx = ctx; + + vdec_parser_parms(inst); + + /* set play mode.*/ + if (ctx->is_drm_mode) + inst->vdec.port.flag |= PORT_FLAG_DRM; + + /* to eable mpeg12 hw.*/ + inst->vdec.port.type = PORT_TYPE_VIDEO; + + /* probe info from the stream */ + inst->vsi = kzalloc(sizeof(struct vdec_mpeg12_vsi), GFP_KERNEL); + if (!inst->vsi) { + ret = -ENOMEM; + goto err; + } + + /* alloc the header buffer to be used cache sps or spp etc.*/ + inst->vsi->header_buf = vzalloc(HEADER_BUFFER_SIZE); + if (!inst->vsi->header_buf) { + ret = -ENOMEM; + goto err; + } + + init_completion(&inst->comp); + ctx->ada_ctx = &inst->vdec; + *h_vdec = (unsigned long)inst; + + ret = video_decoder_init(&inst->vdec); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "vdec_mpeg12 init err=%d\n", ret); + goto err; + } + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "mpeg12 Instance >> %lx\n", (ulong) inst); + + return 0; + +err: + if (inst && inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + if (inst && inst->vsi) + kfree(inst->vsi); + if (inst) + kfree(inst); + *h_vdec = 0; + + return ret; +} + +static void fill_vdec_params(struct vdec_mpeg12_inst *inst, + struct MpvParseContext *dec_ps) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_mpeg12_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + + /* fill visible area size that be used for EGL. */ + pic->visible_width = dec_ps->width; + pic->visible_height = dec_ps->height; + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + pic->coded_width = ALIGN(dec_ps->coded_width, 64); + pic->coded_height = ALIGN(dec_ps->coded_height, 32); + + pic->y_len_sz = pic->coded_width * pic->coded_height; + pic->c_len_sz = pic->y_len_sz >> 1; + + /*7(parm_v4l_buffer_margin) + 8(DECODE_BUFFER_NUM_DEF)*/ + dec->dpb_sz = 15; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_BUFMGR, + "The stream infos, coded:(%d x %d), visible:(%d x %d), DPB: %d\n", + pic->coded_width, pic->coded_height, + pic->visible_width, pic->visible_height, dec->dpb_sz); +} + +static int parse_stream_ucode(struct vdec_mpeg12_inst *inst, + u8 *buf, u32 size, u64 timestamp) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write(vdec, buf, size, timestamp, 0); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_ucode_dma(struct vdec_mpeg12_inst *inst, + ulong buf, u32 size, u64 timestamp, u32 handle) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle, + vdec_vframe_input_free, inst->ctx); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_cpu(struct vdec_mpeg12_inst *inst, u8 *buf, u32 size) +{ + int ret = 0; + struct mpeg12_param_sets *ps = NULL; + + ps = kzalloc(sizeof(struct mpeg12_param_sets), GFP_KERNEL); + if (ps == NULL) + return -ENOMEM; + + ret = mpeg12_decode_extradata_ps(buf, size, ps); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "parse extra data failed. err: %d\n", ret); + goto out; + } + + if (ps->head_parsed) + fill_vdec_params(inst, &ps->dec_ps); + + ret = ps->head_parsed ? 0 : -1; +out: + kfree(ps); + + return ret; +} + +static int vdec_mpeg12_probe(unsigned long h_vdec, + struct aml_vcodec_mem *bs, void *out) +{ + struct vdec_mpeg12_inst *inst = + (struct vdec_mpeg12_inst *)h_vdec; + u8 *buf = (u8 *)bs->vaddr; + u32 size = bs->size; + int ret = 0; + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if ((s->magic != AML_VIDEO_MAGIC) && + (s->type != V4L_STREAM_TYPE_MATEDATA)) + return -1; + + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, s->data, + s->len, bs->timestamp); + } else { + ret = parse_stream_cpu(inst, s->data, s->len); + } + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = parse_stream_ucode_dma(inst, bs->addr, size, + bs->timestamp, BUFF_IDX(bs, bs->index)); + } + } else { + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, buf, size, bs->timestamp); + } else { + ret = parse_stream_cpu(inst, buf, size); + } + } + + inst->vsi->cur_pic = inst->vsi->pic; + + return ret; +} + +static void vdec_mpeg12_deinit(unsigned long h_vdec) +{ + struct vdec_mpeg12_inst *inst = (struct vdec_mpeg12_inst *)h_vdec; + + if (!inst) + return; + + video_decoder_release(&inst->vdec); + + if (inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + + if (inst->vsi) + kfree(inst->vsi); + + kfree(inst); +} + +static int vdec_write_nalu(struct vdec_mpeg12_inst *inst, + u8 *buf, u32 size, u64 ts) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write(vdec, buf, size, ts, 0); + + return ret; +} + +static int vdec_mpeg12_decode(unsigned long h_vdec, + struct aml_vcodec_mem *bs, bool *res_chg) +{ + struct vdec_mpeg12_inst *inst = (struct vdec_mpeg12_inst *)h_vdec; + struct aml_vdec_adapt *vdec = &inst->vdec; + u8 *buf = (u8 *) bs->vaddr; + u32 size = bs->size; + int ret = -1; + + if (vdec_input_full(vdec)) + return -EAGAIN; + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if (s->magic != AML_VIDEO_MAGIC) + return -1; + + ret = vdec_vframe_write(vdec, + s->data, + s->len, + bs->timestamp, + 0); + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = vdec_vframe_write_with_dma(vdec, + bs->addr, size, bs->timestamp, + BUFF_IDX(bs, bs->index), + vdec_vframe_input_free, inst->ctx); + } + } else { + ret = vdec_write_nalu(inst, buf, size, bs->timestamp); + } + + return ret; +} + +static void get_param_config_info(struct vdec_mpeg12_inst *inst, + struct aml_dec_params *parms) +{ + + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_HDRINFO) + parms->hdr = inst->parms.hdr; + + parms->parms_status |= inst->parms.parms_status; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "parms status: %u\n", parms->parms_status); +} + +static int vdec_mpeg12_get_param(unsigned long h_vdec, + enum vdec_get_param_type type, void *out) +{ + int ret = 0; + struct vdec_mpeg12_inst *inst = (struct vdec_mpeg12_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the mpeg12 inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case GET_PARAM_PIC_INFO: + get_pic_info(inst, out); + break; + + case GET_PARAM_DPB_SIZE: + get_dpb_size(inst, out); + break; + + case GET_PARAM_CROP_INFO: + get_crop_info(inst, out); + break; + + case GET_PARAM_CONFIG_INFO: + get_param_config_info(inst, out); + break; + + case GET_PARAM_DW_MODE: + { + unsigned int* mode = out; + *mode = VDEC_DW_NO_AFBC; + break; + } + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid get parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static void set_param_write_sync(struct vdec_mpeg12_inst *inst) +{ + complete(&inst->comp); +} + +static void set_pic_info(struct vdec_mpeg12_inst *inst, + struct vdec_pic_info *pic) +{ + inst->vsi->pic = *pic; +} + +static void set_param_ps_info(struct vdec_mpeg12_inst *inst, + struct aml_vdec_ps_infos *ps) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_mpeg12_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + + /* fill visible area size that be used for EGL. */ + pic->visible_width = ps->visible_width; + pic->visible_height = ps->visible_height; + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + pic->coded_width = ps->coded_width; + pic->coded_height = ps->coded_height; + pic->y_len_sz = pic->coded_width * pic->coded_height; + pic->c_len_sz = pic->y_len_sz >> 1; + + pic->dpb_frames = ps->dpb_frames; + pic->dpb_margin = ps->dpb_margin; + pic->vpp_margin = ps->dpb_margin; + dec->dpb_sz = ps->dpb_size; + pic->field = ps->field; + + inst->parms.ps = *ps; + inst->parms.parms_status |= + V4L2_CONFIG_PARM_DECODE_PSINFO; + + /*wake up*/ + complete(&inst->comp); + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "Parse from ucode, visible(%d x %d), coded(%d x %d), scan:%s\n", + ps->visible_width, ps->visible_height, + ps->coded_width, ps->coded_height, + pic->field == V4L2_FIELD_NONE ? "P" : "I"); +} + +static void set_param_hdr_info(struct vdec_mpeg12_inst *inst, + struct aml_vdec_hdr_infos *hdr) +{ + inst->parms.hdr = *hdr; + if (!(inst->parms.parms_status & + V4L2_CONFIG_PARM_DECODE_HDRINFO)) { + inst->parms.hdr = *hdr; + inst->parms.parms_status |= + V4L2_CONFIG_PARM_DECODE_HDRINFO; + aml_vdec_dispatch_event(inst->ctx, + V4L2_EVENT_SRC_CH_HDRINFO); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "mpeg12 set HDR infos\n"); + } +} + +static int vdec_mpeg12_set_param(unsigned long h_vdec, + enum vdec_set_param_type type, void *in) +{ + int ret = 0; + struct vdec_mpeg12_inst *inst = (struct vdec_mpeg12_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the mpeg12 inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case SET_PARAM_WRITE_FRAME_SYNC: + set_param_write_sync(inst); + break; + + case SET_PARAM_HDR_INFO: + set_param_hdr_info(inst, in); + break; + + case SET_PARAM_PS_INFO: + set_param_ps_info(inst, in); + break; + + case SET_PARAM_PIC_INFO: + set_pic_info(inst, in); + break; + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid set parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static struct vdec_common_if vdec_mpeg12_if = { + .init = vdec_mpeg12_init, + .probe = vdec_mpeg12_probe, + .decode = vdec_mpeg12_decode, + .get_param = vdec_mpeg12_get_param, + .set_param = vdec_mpeg12_set_param, + .deinit = vdec_mpeg12_deinit, +}; + +struct vdec_common_if *get_mpeg12_dec_comm_if(void); + +struct vdec_common_if *get_mpeg12_dec_comm_if(void) +{ + return &vdec_mpeg12_if; +}
diff --git a/drivers/amvdec_ports/decoder/vdec_mpeg4_if.c b/drivers/amvdec_ports/decoder/vdec_mpeg4_if.c new file mode 100644 index 0000000..bc0af24 --- /dev/null +++ b/drivers/amvdec_ports/decoder/vdec_mpeg4_if.c
@@ -0,0 +1,615 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <uapi/linux/swab.h> +#include "../vdec_drv_if.h" +#include "../aml_vcodec_util.h" +#include "../aml_vcodec_dec.h" +#include "../aml_vcodec_adapt.h" +#include "../vdec_drv_base.h" +#include "aml_mpeg4_parser.h" + +#define NAL_TYPE(value) ((value) & 0x1F) +#define HEADER_BUFFER_SIZE (32 * 1024) + +/** + * struct mpeg4_fb - mpeg4 decode frame buffer information + * @vdec_fb_va : virtual address of struct vdec_fb + * @y_fb_dma : dma address of Y frame buffer (luma) + * @c_fb_dma : dma address of C frame buffer (chroma) + * @poc : picture order count of frame buffer + * @reserved : for 8 bytes alignment + */ +struct mpeg4_fb { + uint64_t vdec_fb_va; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + int32_t poc; + uint32_t reserved; +}; + +/** + * struct vdec_mpeg4_dec_info - decode information + * @dpb_sz : decoding picture buffer size + * @resolution_changed : resoltion change happen + * @reserved : for 8 bytes alignment + * @bs_dma : Input bit-stream buffer dma address + * @y_fb_dma : Y frame buffer dma address + * @c_fb_dma : C frame buffer dma address + * @vdec_fb_va : VDEC frame buffer struct virtual address + */ +struct vdec_mpeg4_dec_info { + uint32_t dpb_sz; + uint32_t resolution_changed; + uint32_t reserved; + uint64_t bs_dma; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + uint64_t vdec_fb_va; +}; + +/** + * struct vdec_mpeg4_vsi - shared memory for decode information exchange + * between VPU and Host. + * The memory is allocated by VPU then mapping to Host + * in vpu_dec_init() and freed in vpu_dec_deinit() + * by VPU. + * AP-W/R : AP is writer/reader on this item + * VPU-W/R: VPU is write/reader on this item + * @hdr_buf : Header parsing buffer (AP-W, VPU-R) + * @list_free : free frame buffer ring list (AP-W/R, VPU-W) + * @list_disp : display frame buffer ring list (AP-R, VPU-W) + * @dec : decode information (AP-R, VPU-W) + * @pic : picture information (AP-R, VPU-W) + * @crop : crop information (AP-R, VPU-W) + */ +struct vdec_mpeg4_vsi { + char *header_buf; + int sps_size; + int pps_size; + int sei_size; + int head_offset; + struct vdec_mpeg4_dec_info dec; + struct vdec_pic_info pic; + struct vdec_pic_info cur_pic; + struct v4l2_rect crop; + bool is_combine; + int nalu_pos; + //struct mpeg4ParamSets ps; +}; + +/** + * struct vdec_mpeg4_inst - mpeg4 decoder instance + * @num_nalu : how many nalus be decoded + * @ctx : point to aml_vcodec_ctx + * @vsi : VPU shared information + */ +struct vdec_mpeg4_inst { + unsigned int num_nalu; + struct aml_vcodec_ctx *ctx; + struct aml_vdec_adapt vdec; + struct vdec_mpeg4_vsi *vsi; + struct aml_dec_params parms; + struct completion comp; +}; + +static void get_pic_info(struct vdec_mpeg4_inst *inst, + struct vdec_pic_info *pic) +{ + *pic = inst->vsi->pic; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "pic(%d, %d), buf(%d, %d)\n", + pic->visible_width, pic->visible_height, + pic->coded_width, pic->coded_height); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "Y(%d, %d), C(%d, %d)\n", + pic->y_bs_sz, pic->y_len_sz, + pic->c_bs_sz, pic->c_len_sz); +} + +static void get_crop_info(struct vdec_mpeg4_inst *inst, struct v4l2_rect *cr) +{ + cr->left = inst->vsi->crop.left; + cr->top = inst->vsi->crop.top; + cr->width = inst->vsi->crop.width; + cr->height = inst->vsi->crop.height; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "l=%d, t=%d, w=%d, h=%d\n", + cr->left, cr->top, cr->width, cr->height); +} + +static void get_dpb_size(struct vdec_mpeg4_inst *inst, unsigned int *dpb_sz) +{ + *dpb_sz = inst->vsi->dec.dpb_sz; + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, "sz=%d\n", *dpb_sz); +} + +static u32 vdec_config_default_parms(u8 *parm) +{ + u8 *pbuf = parm; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;"); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:0;"); + + return pbuf - parm; +} + +static void vdec_parser_parms(struct vdec_mpeg4_inst *inst) +{ + struct aml_vcodec_ctx *ctx = inst->ctx; + + if (ctx->config.parm.dec.parms_status & + V4L2_CONFIG_PARM_DECODE_CFGINFO) { + u8 *pbuf = ctx->config.buf; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;", + ctx->config.parm.dec.cfg.canvas_mem_mode); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;", + ctx->config.parm.dec.cfg.ref_buf_margin); + pbuf += sprintf(pbuf, "parm_v4l_duration:%d;", + ctx->config.parm.dec.cfg.duration); + ctx->config.length = pbuf - ctx->config.buf; + } else { + ctx->config.length = vdec_config_default_parms(ctx->config.buf); + } + + inst->vdec.config = ctx->config; + inst->parms.cfg = ctx->config.parm.dec.cfg; + inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO; +} + + +static int vdec_mpeg4_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec) +{ + struct vdec_mpeg4_inst *inst = NULL; + int ret = -1; + + inst = kzalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + inst->vdec.frm_name = "MPEG4"; + inst->vdec.video_type = VFORMAT_MPEG4; + inst->vdec.format = VIDEO_DEC_FORMAT_MPEG4_5; + inst->vdec.filp = ctx->dev->filp; + inst->vdec.config = ctx->config; + inst->vdec.ctx = ctx; + inst->ctx = ctx; + + vdec_parser_parms(inst); + /* set play mode.*/ + if (ctx->is_drm_mode) + inst->vdec.port.flag |= PORT_FLAG_DRM; + + /* to eable mpeg4 hw.*/ + inst->vdec.port.type = PORT_TYPE_VIDEO; + + /* probe info from the stream */ + inst->vsi = kzalloc(sizeof(struct vdec_mpeg4_vsi), GFP_KERNEL); + if (!inst->vsi) { + ret = -ENOMEM; + goto err; + } + + /* alloc the header buffer to be used cache sps or spp etc.*/ + inst->vsi->header_buf = vzalloc(HEADER_BUFFER_SIZE); + if (!inst->vsi->header_buf) { + ret = -ENOMEM; + goto err; + } + + init_completion(&inst->comp); + ctx->ada_ctx = &inst->vdec; + *h_vdec = (unsigned long)inst; + + ret = video_decoder_init(&inst->vdec); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "vdec_mpeg4 init err=%d\n", ret); + goto err; + } + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "mpeg4 Instance >> %lx\n", (ulong) inst); + + return 0; + +err: + if (inst && inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + if (inst && inst->vsi) + kfree(inst->vsi); + if (inst) + kfree(inst); + *h_vdec = 0; + + return ret; +} + +#if 0 +static int refer_buffer_num(int level_idc, int poc_cnt, + int mb_width, int mb_height) +{ + return 20; +} +#endif + +static void fill_vdec_params(struct vdec_mpeg4_inst *inst, + struct mpeg4_dec_param *dec_ps) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_mpeg4_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + + /* fill visible area size that be used for EGL. */ + pic->visible_width = dec_ps->m.width; + pic->visible_height = dec_ps->m.height; + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + pic->coded_width = ALIGN(dec_ps->m.width, 64); + pic->coded_height = ALIGN(dec_ps->m.height, 64); + + pic->y_len_sz = pic->coded_width * pic->coded_height; + pic->c_len_sz = pic->y_len_sz >> 1; + + /*8(DECODE_BUFFER_NUM_DEF) */ + dec->dpb_sz = 8; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_BUFMGR, + "The stream infos, coded:(%d x %d), visible:(%d x %d), DPB: %d\n", + pic->coded_width, pic->coded_height, + pic->visible_width, pic->visible_height, dec->dpb_sz); +} + +static int parse_stream_ucode(struct vdec_mpeg4_inst *inst, + u8 *buf, u32 size, u64 timestamp) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write(vdec, buf, size, timestamp, 0); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_ucode_dma(struct vdec_mpeg4_inst *inst, + ulong buf, u32 size, u64 timestamp, u32 handle) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle, + vdec_vframe_input_free, inst->ctx); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_cpu(struct vdec_mpeg4_inst *inst, u8 *buf, u32 size) +{ + int ret = 0; + struct mpeg4_param_sets *ps = NULL; + + ps = kzalloc(sizeof(struct mpeg4_param_sets), GFP_KERNEL); + if (ps == NULL) + return -ENOMEM; + + ret = mpeg4_decode_extradata_ps(buf, size, ps); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "parse extra data failed. err: %d\n", ret); + goto out; + } + + if (ps->head_parsed) + fill_vdec_params(inst, &ps->dec_ps); + + ret = ps->head_parsed ? 0 : -1; +out: + kfree(ps); + + return ret; +} + +static int vdec_mpeg4_probe(unsigned long h_vdec, + struct aml_vcodec_mem *bs, void *out) +{ + struct vdec_mpeg4_inst *inst = + (struct vdec_mpeg4_inst *)h_vdec; + u8 *buf = (u8 *)bs->vaddr; + u32 size = bs->size; + int ret = 0; + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if ((s->magic != AML_VIDEO_MAGIC) && + (s->type != V4L_STREAM_TYPE_MATEDATA)) + return -1; + + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, s->data, + s->len, bs->timestamp); + } else { + ret = parse_stream_cpu(inst, s->data, s->len); + } + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = parse_stream_ucode_dma(inst, bs->addr, size, + bs->timestamp, BUFF_IDX(bs, bs->index)); + } + } else { + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, buf, size, bs->timestamp); + } else { + ret = parse_stream_cpu(inst, buf, size); + } + } + + inst->vsi->cur_pic = inst->vsi->pic; + + return ret; +} + +static void vdec_mpeg4_deinit(unsigned long h_vdec) +{ + struct vdec_mpeg4_inst *inst = (struct vdec_mpeg4_inst *)h_vdec; + + if (!inst) + return; + + video_decoder_release(&inst->vdec); + + if (inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + + if (inst->vsi) + kfree(inst->vsi); + + kfree(inst); +} + +static int vdec_write_nalu(struct vdec_mpeg4_inst *inst, + u8 *buf, u32 size, u64 ts) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write(vdec, buf, size, ts, 0); + + return ret; +} + +static int vdec_mpeg4_decode(unsigned long h_vdec, + struct aml_vcodec_mem *bs, bool *res_chg) +{ + struct vdec_mpeg4_inst *inst = (struct vdec_mpeg4_inst *)h_vdec; + struct aml_vdec_adapt *vdec = &inst->vdec; + u8 *buf = (u8 *) bs->vaddr; + u32 size = bs->size; + int ret = -1; + + if (vdec_input_full(vdec)) + return -EAGAIN; + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if (s->magic != AML_VIDEO_MAGIC) + return -1; + + ret = vdec_vframe_write(vdec, + s->data, + s->len, + bs->timestamp, + 0); + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = vdec_vframe_write_with_dma(vdec, + bs->addr, size, bs->timestamp, + BUFF_IDX(bs, bs->index), + vdec_vframe_input_free, inst->ctx); + } + } else { + ret = vdec_write_nalu(inst, buf, size, bs->timestamp); + } + + return ret; +} + +static int vdec_mpeg4_get_param(unsigned long h_vdec, + enum vdec_get_param_type type, void *out) +{ + int ret = 0; + struct vdec_mpeg4_inst *inst = (struct vdec_mpeg4_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the mpeg4 inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case GET_PARAM_PIC_INFO: + get_pic_info(inst, out); + break; + + case GET_PARAM_DPB_SIZE: + get_dpb_size(inst, out); + break; + + case GET_PARAM_CROP_INFO: + get_crop_info(inst, out); + break; + case GET_PARAM_DW_MODE: + { + unsigned int* mode = out; + *mode = VDEC_DW_NO_AFBC; + break; + } + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "invalid get parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static void set_param_ps_info(struct vdec_mpeg4_inst *inst, + struct aml_vdec_ps_infos *ps) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_mpeg4_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, "%s in\n", __func__); + /* fill visible area size that be used for EGL. */ + pic->visible_width = ps->visible_width; + pic->visible_height = ps->visible_height; + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + pic->coded_width = ps->coded_width; + pic->coded_height = ps->coded_height; + pic->y_len_sz = pic->coded_width * pic->coded_height; + pic->c_len_sz = pic->y_len_sz >> 1; + + pic->dpb_frames = ps->dpb_frames; + pic->dpb_margin = ps->dpb_margin; + pic->vpp_margin = ps->dpb_margin; + dec->dpb_sz = ps->dpb_size; + pic->field = ps->field; + + inst->parms.ps = *ps; + inst->parms.parms_status |= + V4L2_CONFIG_PARM_DECODE_PSINFO; + + /*wake up*/ + complete(&inst->comp); + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "Parse from ucode, visible(%d x %d), coded(%d x %d), scan:%s\n", + ps->visible_width, ps->visible_height, + ps->coded_width, ps->coded_height, + pic->field == V4L2_FIELD_NONE ? "P" : "I"); +} + +static void set_param_write_sync(struct vdec_mpeg4_inst *inst) +{ + complete(&inst->comp); +} + +static void set_pic_info(struct vdec_mpeg4_inst *inst, + struct vdec_pic_info *pic) +{ + inst->vsi->pic = *pic; +} + +static int vdec_mpeg4_set_param(unsigned long h_vdec, + enum vdec_set_param_type type, void *in) +{ + int ret = 0; + struct vdec_mpeg4_inst *inst = (struct vdec_mpeg4_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the mpeg4 inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case SET_PARAM_WRITE_FRAME_SYNC: + set_param_write_sync(inst); + break; + + case SET_PARAM_PS_INFO: + set_param_ps_info(inst, in); + break; + + case SET_PARAM_PIC_INFO: + set_pic_info(inst, in); + break; + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid set parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static struct vdec_common_if vdec_mpeg4_if = { + .init = vdec_mpeg4_init, + .probe = vdec_mpeg4_probe, + .decode = vdec_mpeg4_decode, + .get_param = vdec_mpeg4_get_param, + .set_param = vdec_mpeg4_set_param, + .deinit = vdec_mpeg4_deinit, +}; + +struct vdec_common_if *get_mpeg4_dec_comm_if(void); + +struct vdec_common_if *get_mpeg4_dec_comm_if(void) +{ + return &vdec_mpeg4_if; +}
diff --git a/drivers/amvdec_ports/decoder/vdec_vp9_if.c b/drivers/amvdec_ports/decoder/vdec_vp9_if.c new file mode 100644 index 0000000..fe81ddd --- /dev/null +++ b/drivers/amvdec_ports/decoder/vdec_vp9_if.c
@@ -0,0 +1,1065 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <uapi/linux/swab.h> +#include "../vdec_drv_if.h" +#include "../aml_vcodec_util.h" +#include "../aml_vcodec_dec.h" +#include "../aml_vcodec_drv.h" +#include "../aml_vcodec_adapt.h" +#include "../vdec_drv_base.h" +#include "aml_vp9_parser.h" +#include "vdec_vp9_trigger.h" + +#define KERNEL_ATRACE_TAG KERNEL_ATRACE_TAG_V4L2 +#include <trace/events/meson_atrace.h> + +#define PREFIX_SIZE (16) + +#define NAL_TYPE(value) ((value) & 0x1F) +#define HEADER_BUFFER_SIZE (32 * 1024) +#define SYNC_CODE (0x498342) + +extern int vp9_need_prefix; +bool need_trigger; +int dump_cnt = 0; + +/** + * struct vp9_fb - vp9 decode frame buffer information + * @vdec_fb_va : virtual address of struct vdec_fb + * @y_fb_dma : dma address of Y frame buffer (luma) + * @c_fb_dma : dma address of C frame buffer (chroma) + * @poc : picture order count of frame buffer + * @reserved : for 8 bytes alignment + */ +struct vp9_fb { + uint64_t vdec_fb_va; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + int32_t poc; + uint32_t reserved; +}; + +/** + * struct vdec_vp9_dec_info - decode information + * @dpb_sz : decoding picture buffer size + * @resolution_changed : resoltion change happen + * @reserved : for 8 bytes alignment + * @bs_dma : Input bit-stream buffer dma address + * @y_fb_dma : Y frame buffer dma address + * @c_fb_dma : C frame buffer dma address + * @vdec_fb_va : VDEC frame buffer struct virtual address + */ +struct vdec_vp9_dec_info { + uint32_t dpb_sz; + uint32_t resolution_changed; + uint32_t reserved; + uint64_t bs_dma; + uint64_t y_fb_dma; + uint64_t c_fb_dma; + uint64_t vdec_fb_va; +}; + +/** + * struct vdec_vp9_vsi - shared memory for decode information exchange + * between VPU and Host. + * The memory is allocated by VPU then mapping to Host + * in vpu_dec_init() and freed in vpu_dec_deinit() + * by VPU. + * AP-W/R : AP is writer/reader on this item + * VPU-W/R: VPU is write/reader on this item + * @hdr_buf : Header parsing buffer (AP-W, VPU-R) + * @list_free : free frame buffer ring list (AP-W/R, VPU-W) + * @list_disp : display frame buffer ring list (AP-R, VPU-W) + * @dec : decode information (AP-R, VPU-W) + * @pic : picture information (AP-R, VPU-W) + * @crop : crop information (AP-R, VPU-W) + */ +struct vdec_vp9_vsi { + char *header_buf; + int sps_size; + int pps_size; + int sei_size; + int head_offset; + struct vdec_vp9_dec_info dec; + struct vdec_pic_info pic; + struct vdec_pic_info cur_pic; + struct v4l2_rect crop; + bool is_combine; + int nalu_pos; + struct vp9_param_sets ps; +}; + +/** + * struct vdec_vp9_inst - vp9 decoder instance + * @num_nalu : how many nalus be decoded + * @ctx : point to aml_vcodec_ctx + * @vsi : VPU shared information + */ +struct vdec_vp9_inst { + unsigned int num_nalu; + struct aml_vcodec_ctx *ctx; + struct aml_vdec_adapt vdec; + struct vdec_vp9_vsi *vsi; + struct aml_dec_params parms; + struct completion comp; + struct vdec_comp_buf_info comp_info; +}; + +static int vdec_write_nalu(struct vdec_vp9_inst *inst, + u8 *buf, u32 size, u64 ts, ulong meta_ptr); + +static void get_pic_info(struct vdec_vp9_inst *inst, + struct vdec_pic_info *pic) +{ + *pic = inst->vsi->pic; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "pic(%d, %d), buf(%d, %d)\n", + pic->visible_width, pic->visible_height, + pic->coded_width, pic->coded_height); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "Y(%d, %d), C(%d, %d)\n", + pic->y_bs_sz, pic->y_len_sz, + pic->c_bs_sz, pic->c_len_sz); +} + +static void get_crop_info(struct vdec_vp9_inst *inst, struct v4l2_rect *cr) +{ + cr->left = inst->vsi->crop.left; + cr->top = inst->vsi->crop.top; + cr->width = inst->vsi->crop.width; + cr->height = inst->vsi->crop.height; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "l=%d, t=%d, w=%d, h=%d\n", + cr->left, cr->top, cr->width, cr->height); +} + +static void get_dpb_size(struct vdec_vp9_inst *inst, unsigned int *dpb_sz) +{ + *dpb_sz = inst->vsi->dec.dpb_sz; + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, "sz=%d\n", *dpb_sz); +} + +static u32 vdec_config_default_parms(u8 *parm) +{ + u8 *pbuf = parm; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:7;"); + pbuf += sprintf(pbuf, "vp9_double_write_mode:1;"); + pbuf += sprintf(pbuf, "vp9_buf_width:1920;"); + pbuf += sprintf(pbuf, "vp9_buf_height:1088;"); + pbuf += sprintf(pbuf, "vp9_max_pic_w:4096;"); + pbuf += sprintf(pbuf, "vp9_max_pic_h:2304;"); + pbuf += sprintf(pbuf, "save_buffer_mode:0;"); + pbuf += sprintf(pbuf, "no_head:0;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:0;"); + + return parm - pbuf; +} + +static void vdec_parser_parms(struct vdec_vp9_inst *inst) +{ + struct aml_vcodec_ctx *ctx = inst->ctx; + + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "%s:parms_status = 0x%x, present_flag = %d\n", + __func__, ctx->config.parm.dec.parms_status, + ctx->config.parm.dec.hdr.color_parms.present_flag); + if (ctx->config.parm.dec.parms_status & + V4L2_CONFIG_PARM_DECODE_CFGINFO) { + u8 *pbuf = ctx->config.buf; + + pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;"); + pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;", + ctx->config.parm.dec.cfg.ref_buf_margin); + pbuf += sprintf(pbuf, "vp9_double_write_mode:%d;", + ctx->config.parm.dec.cfg.double_write_mode); + pbuf += sprintf(pbuf, "vp9_buf_width:%d;", + ctx->config.parm.dec.cfg.init_width); + pbuf += sprintf(pbuf, "vp9_buf_height:%d;", + ctx->config.parm.dec.cfg.init_height); + pbuf += sprintf(pbuf, "save_buffer_mode:0;"); + pbuf += sprintf(pbuf, "no_head:0;"); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;", + ctx->config.parm.dec.cfg.canvas_mem_mode); + pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:%d;", + ctx->config.parm.dec.cfg.canvas_mem_endian); + pbuf += sprintf(pbuf, "parm_v4l_low_latency_mode:%d;", + ctx->config.parm.dec.cfg.low_latency_mode); + pbuf += sprintf(pbuf, "parm_v4l_duration:%d;", + ctx->config.parm.dec.cfg.duration); + ctx->config.length = pbuf - ctx->config.buf; + } else { + ctx->config.parm.dec.cfg.double_write_mode = 1; + ctx->config.parm.dec.cfg.ref_buf_margin = 7; + ctx->config.length = vdec_config_default_parms(ctx->config.buf); + } + + if ((ctx->config.parm.dec.parms_status & + V4L2_CONFIG_PARM_DECODE_HDRINFO) && + ctx->config.parm.dec.hdr.color_parms.present_flag) { + u8 *pbuf = ctx->config.buf + ctx->config.length; + + pbuf += sprintf(pbuf, "HDRStaticInfo:%d;", 1); + pbuf += sprintf(pbuf, "signal_type:%d;", + ctx->config.parm.dec.hdr.signal_type); + pbuf += sprintf(pbuf, "mG.x:%d;", + ctx->config.parm.dec.hdr.color_parms.primaries[0][0]); + pbuf += sprintf(pbuf, "mG.y:%d;", + ctx->config.parm.dec.hdr.color_parms.primaries[0][1]); + pbuf += sprintf(pbuf, "mB.x:%d;", + ctx->config.parm.dec.hdr.color_parms.primaries[1][0]); + pbuf += sprintf(pbuf, "mB.y:%d;", + ctx->config.parm.dec.hdr.color_parms.primaries[1][1]); + pbuf += sprintf(pbuf, "mR.x:%d;", + ctx->config.parm.dec.hdr.color_parms.primaries[2][0]); + pbuf += sprintf(pbuf, "mR.y:%d;", + ctx->config.parm.dec.hdr.color_parms.primaries[2][1]); + pbuf += sprintf(pbuf, "mW.x:%d;", + ctx->config.parm.dec.hdr.color_parms.white_point[0]); + pbuf += sprintf(pbuf, "mW.y:%d;", + ctx->config.parm.dec.hdr.color_parms.white_point[1]); + pbuf += sprintf(pbuf, "mMaxDL:%d;", + ctx->config.parm.dec.hdr.color_parms.luminance[0] * 10000); + pbuf += sprintf(pbuf, "mMinDL:%d;", + ctx->config.parm.dec.hdr.color_parms.luminance[1]); + pbuf += sprintf(pbuf, "mMaxCLL:%d;", + ctx->config.parm.dec.hdr.color_parms.content_light_level.max_content); + pbuf += sprintf(pbuf, "mMaxFALL:%d;", + ctx->config.parm.dec.hdr.color_parms.content_light_level.max_pic_average); + ctx->config.length = pbuf - ctx->config.buf; + inst->parms.hdr = ctx->config.parm.dec.hdr; + inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_HDRINFO; + } + v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, + "config.buf = %s\n", ctx->config.buf); + + inst->vdec.config = ctx->config; + inst->parms.cfg = ctx->config.parm.dec.cfg; + inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO; +} + +static int vdec_vp9_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec) +{ + struct vdec_vp9_inst *inst = NULL; + int ret = -1; + + inst = kzalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + inst->vdec.frm_name = "VP9"; + inst->vdec.video_type = VFORMAT_VP9; + inst->vdec.filp = ctx->dev->filp; + inst->vdec.ctx = ctx; + inst->ctx = ctx; + + vdec_parser_parms(inst); + + /* set play mode.*/ + if (ctx->is_drm_mode) + inst->vdec.port.flag |= PORT_FLAG_DRM; + + /* to eable vp9 hw.*/ + inst->vdec.port.type = PORT_TYPE_HEVC; + + /* probe info from the stream */ + inst->vsi = kzalloc(sizeof(struct vdec_vp9_vsi), GFP_KERNEL); + if (!inst->vsi) { + ret = -ENOMEM; + goto err; + } + + /* alloc the header buffer to be used cache sps or spp etc.*/ + inst->vsi->header_buf = vzalloc(HEADER_BUFFER_SIZE); + if (!inst->vsi->header_buf) { + ret = -ENOMEM; + goto err; + } + + init_completion(&inst->comp); + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "vp9 Instance >> %lx\n", (ulong) inst); + + ctx->ada_ctx = &inst->vdec; + *h_vdec = (unsigned long)inst; + + /* init decoder. */ + ret = video_decoder_init(&inst->vdec); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "vdec_vp9 init err=%d\n", ret); + goto err; + } + + return 0; +err: + if (inst && inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + if (inst && inst->vsi) + kfree(inst->vsi); + if (inst) + kfree(inst); + *h_vdec = 0; + + return ret; +} + +#if 0 +static int refer_buffer_num(int level_idc, int poc_cnt, + int mb_width, int mb_height) +{ + return 20; +} +#endif + +static int vdec_get_dw_mode(struct vdec_vp9_inst *inst, int dw_mode) +{ + u32 valid_dw_mode = inst->parms.cfg.double_write_mode; + int w = inst->vsi->pic.coded_width; + int h = inst->vsi->pic.coded_height; + u32 dw = 0x1; /*1:1*/ + + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + + return dw; +} + +static int vdec_pic_scale(struct vdec_vp9_inst *inst, int length, int dw_mode) +{ + int ret = 64; + + switch (vdec_get_dw_mode(inst, dw_mode)) { + case 0x0: /* only afbc, output afbc */ + ret = 64; + break; + case 0x1: /* afbc and (w x h), output YUV420 */ + ret = length; + break; + case 0x2: /* afbc and (w/4 x h/4), output YUV420 */ + case 0x3: /* afbc and (w/4 x h/4), output afbc and YUV420 */ + ret = length >> 2; + break; + case 0x4: /* afbc and (w/2 x h/2), output YUV420 */ + ret = length >> 1; + break; + case 0x10: /* (w x h), output YUV420-8bit) */ + default: + ret = length; + break; + } + + return ret; +} + +static void fill_vdec_params(struct vdec_vp9_inst *inst, + struct VP9Context *vp9_ctx) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_vp9_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + int dw = inst->parms.cfg.double_write_mode; + int margin = inst->parms.cfg.ref_buf_margin; + + /* fill visible area size that be used for EGL. */ + pic->visible_width = vdec_pic_scale(inst, vp9_ctx->render_width, dw); + pic->visible_height = vdec_pic_scale(inst, vp9_ctx->render_height, dw); + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + pic->coded_width = vdec_pic_scale(inst, ALIGN(vp9_ctx->width, 32), dw); + pic->coded_height = vdec_pic_scale(inst, ALIGN(vp9_ctx->height, 32), dw); + + pic->y_len_sz = pic->coded_width * pic->coded_height; + pic->c_len_sz = pic->y_len_sz >> 1; + + /* calc DPB size */ + dec->dpb_sz = 5 + margin;//refer_buffer_num(sps->level_idc, poc_cnt, mb_w, mb_h); + + inst->parms.ps.visible_width = pic->visible_width; + inst->parms.ps.visible_height = pic->visible_height; + inst->parms.ps.coded_width = pic->coded_width; + inst->parms.ps.coded_height = pic->coded_height; + inst->parms.ps.dpb_size = dec->dpb_sz; + inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_PSINFO; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_BUFMGR, + "The stream infos, dw: %d, coded:(%d x %d), visible:(%d x %d), DPB: %d, margin: %d\n", + dw, pic->coded_width, pic->coded_height, + pic->visible_width, pic->visible_height, + dec->dpb_sz - margin, margin); +} + +static int parse_stream_ucode(struct vdec_vp9_inst *inst, + u8 *buf, u32 size, u64 timestamp, ulong meta_ptr) +{ + int ret = 0; + + ret = vdec_write_nalu(inst, buf, size, timestamp, meta_ptr); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_ucode_dma(struct vdec_vp9_inst *inst, + ulong buf, u32 size, u64 timestamp, u32 handle) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + + ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle, + vdec_vframe_input_free, inst->ctx); + if (ret < 0) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "write frame data failed. err: %d\n", ret); + return ret; + } + + /* wait ucode parse ending. */ + wait_for_completion_timeout(&inst->comp, + msecs_to_jiffies(1000)); + + return inst->vsi->pic.dpb_frames ? 0 : -1; +} + +static int parse_stream_cpu(struct vdec_vp9_inst *inst, u8 *buf, u32 size) +{ + int ret = 0; + struct vp9_param_sets *ps = NULL; + + ps = vzalloc(sizeof(struct vp9_param_sets)); + if (ps == NULL) + return -ENOMEM; + + ret = vp9_decode_extradata_ps(buf, size, ps); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "parse extra data failed. err: %d\n", ret); + goto out; + } + + if (ps->head_parsed) + fill_vdec_params(inst, &ps->ctx); + + ret = ps->head_parsed ? 0 : -1; +out: + vfree(ps); + + return ret; +} + +static int vdec_vp9_probe(unsigned long h_vdec, + struct aml_vcodec_mem *bs, void *out) +{ + struct vdec_vp9_inst *inst = + (struct vdec_vp9_inst *)h_vdec; + u8 *buf = (u8 *)bs->vaddr; + u32 size = bs->size; + int ret = 0; + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if ((s->magic != AML_VIDEO_MAGIC) && + (s->type != V4L_STREAM_TYPE_MATEDATA)) + return -1; + + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, s->data, + s->len, bs->timestamp, 0); + } else { + ret = parse_stream_cpu(inst, s->data, s->len); + } + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = parse_stream_ucode_dma(inst, bs->addr, size, + bs->timestamp, BUFF_IDX(bs, bs->index)); + } + } else { + if (inst->ctx->param_sets_from_ucode) { + ret = parse_stream_ucode(inst, buf, size, bs->timestamp, bs->meta_ptr); + } else { + ret = parse_stream_cpu(inst, buf, size); + } + } + + inst->vsi->cur_pic = inst->vsi->pic; + + return ret; +} + +static void vdec_vp9_deinit(unsigned long h_vdec) +{ + struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec; + struct aml_vcodec_ctx *ctx = inst->ctx; + + video_decoder_release(&inst->vdec); + + if (inst->vsi && inst->vsi->header_buf) + vfree(inst->vsi->header_buf); + + if (inst->vsi) + kfree(inst->vsi); + + kfree(inst); + + ctx->drv_handle = 0; + + need_trigger = false; + dump_cnt = 0; +} + +static void add_prefix_data(struct vp9_superframe_split *s, + u8 **out, u32 *out_size) +{ + int i; + u8 *p = NULL; + u32 length; + + length = s->size + s->nb_frames * PREFIX_SIZE; + if (!length) + return; + p = vzalloc(length); + if (!p) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, + "alloc size %d failed.\n" ,length); + return; + } + + memcpy(p, s->data, s->size); + p += s->size; + + for (i = s->nb_frames; i > 0; i--) { + u32 frame_size = s->sizes[i - 1]; + u8 *prefix = NULL; + + p -= frame_size; + memmove(p + PREFIX_SIZE * i, p, frame_size); + prefix = p + PREFIX_SIZE * (i - 1); + + /*add amlogic frame headers.*/ + frame_size += 16; + prefix[0] = (frame_size >> 24) & 0xff; + prefix[1] = (frame_size >> 16) & 0xff; + prefix[2] = (frame_size >> 8 ) & 0xff; + prefix[3] = (frame_size >> 0 ) & 0xff; + prefix[4] = ((frame_size >> 24) & 0xff) ^ 0xff; + prefix[5] = ((frame_size >> 16) & 0xff) ^ 0xff; + prefix[6] = ((frame_size >> 8 ) & 0xff) ^ 0xff; + prefix[7] = ((frame_size >> 0 ) & 0xff) ^ 0xff; + prefix[8] = 0; + prefix[9] = 0; + prefix[10] = 0; + prefix[11] = 1; + prefix[12] = 'A'; + prefix[13] = 'M'; + prefix[14] = 'L'; + prefix[15] = 'V'; + frame_size -= 16; + } + + *out = p; + *out_size = length; +} + +#ifndef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +static int vp9_superframe_split_filter(struct vp9_superframe_split *s) +{ + int i, j, ret, marker; + bool is_superframe = false; + int *prefix = (int *)s->data; + + if (!s->data) + return -1; + +#define AML_PREFIX ('V' << 24 | 'L' << 16 | 'M' << 8 | 'A') + if (prefix[3] == AML_PREFIX) { + s->prefix_size = 16; + /*pr_info("the frame data has beed added header\n");*/ + } + + marker = s->data[s->data_size - 1]; + if ((marker & 0xe0) == 0xc0) { + int length_size = 1 + ((marker >> 3) & 0x3); + int nb_frames = 1 + (marker & 0x7); + int idx_size = 2 + nb_frames * length_size; + + if (s->data_size >= idx_size && + s->data[s->data_size - idx_size] == marker) { + s64 total_size = 0; + int idx = s->data_size + 1 - idx_size; + + for (i = 0; i < nb_frames; i++) { + int frame_size = 0; + for (j = 0; j < length_size; j++) + frame_size |= s->data[idx++] << (j * 8); + + total_size += frame_size; + if (frame_size < 0 || + total_size > s->data_size - idx_size) { + v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid frame size in a sframe: %d\n", + frame_size); + ret = -EINVAL; + goto fail; + } + s->sizes[i] = frame_size; + } + + s->nb_frames = nb_frames; + s->size = total_size; + s->next_frame = 0; + s->next_frame_offset = 0; + is_superframe = true; + } + }else { + s->nb_frames = 1; + s->sizes[0] = s->data_size; + s->size = s->data_size; + } + + /*pr_info("sframe: %d, frames: %d, IN: %x, OUT: %x\n", + is_superframe, s->nb_frames, + s->data_size, s->size);*/ + + /* parse uncompressed header. */ + if (is_superframe) { + /* bitstream profile. */ + /* frame type. (intra or inter) */ + /* colorspace descriptor */ + /* ... */ + + v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "the frame is a superframe.\n"); + } + + /*pr_err("in: %x, %d, out: %x, sizes %d,%d,%d,%d,%d,%d,%d,%d\n", + s->data_size, + s->nb_frames, + s->size, + s->sizes[0], + s->sizes[1], + s->sizes[2], + s->sizes[3], + s->sizes[4], + s->sizes[5], + s->sizes[6], + s->sizes[7]);*/ + + return 0; +fail: + return ret; +} +#endif + +static void trigger_decoder(struct aml_vdec_adapt *vdec) +{ + int i, ret; + u32 frame_size = 0; + u8 *p = vp9_trigger_header; + + for (i = 0; i < ARRAY_SIZE(vp9_trigger_framesize); i++) { + frame_size = vp9_trigger_framesize[i]; + ret = vdec_vframe_write(vdec, p, + frame_size, 0, 0); + v4l_dbg(vdec->ctx, V4L_DEBUG_CODEC_ERROR, + "write trigger frame %d\n", ret); + p += frame_size; + } +} + +static int vdec_write_nalu(struct vdec_vp9_inst *inst, + u8 *buf, u32 size, u64 ts, ulong meta_ptr) +{ + int ret = 0; + struct aml_vdec_adapt *vdec = &inst->vdec; + struct vp9_superframe_split s; + u8 *data = NULL; + u32 length = 0; + bool need_prefix = vp9_need_prefix; + + memset(&s, 0, sizeof(s)); + + /*trigger.*/ + if (0 && !need_trigger) { + trigger_decoder(vdec); + need_trigger = true; + } + + if (need_prefix) { + /*parse superframe.*/ + s.data = buf; + s.data_size = size; + ret = vp9_superframe_split_filter(&s); + if (ret) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "parse frames failed.\n"); + return ret; + } + + /*add headers.*/ + add_prefix_data(&s, &data, &length); + ret = vdec_vframe_write(vdec, data, length, ts, 0); + vfree(data); + } else { + ret = vdec_vframe_write(vdec, buf, size, ts, meta_ptr); + } + + return ret; +} + +static bool monitor_res_change(struct vdec_vp9_inst *inst, u8 *buf, u32 size) +{ + int ret = -1; + u8 *p = buf; + int len = size; + u32 synccode = vp9_need_prefix ? + ((p[1] << 16) | (p[2] << 8) | p[3]) : + ((p[17] << 16) | (p[18] << 8) | p[19]); + + if (synccode == SYNC_CODE) { + ret = parse_stream_cpu(inst, p, len); + if (!ret && (inst->vsi->cur_pic.coded_width != + inst->vsi->pic.coded_width || + inst->vsi->cur_pic.coded_height != + inst->vsi->pic.coded_height)) { + inst->vsi->cur_pic = inst->vsi->pic; + return true; + } + } + + return false; +} + +static int vdec_vp9_decode(unsigned long h_vdec, + struct aml_vcodec_mem *bs, bool *res_chg) +{ + struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec; + struct aml_vdec_adapt *vdec = &inst->vdec; + u8 *buf = (u8 *) bs->vaddr; + u32 size = bs->size; + int ret = -1; + + if (bs == NULL) + return -1; + + if (vdec_input_full(vdec)) { + return -EAGAIN; + } + + if (inst->ctx->is_drm_mode) { + if (bs->model == VB2_MEMORY_MMAP) { + struct aml_video_stream *s = + (struct aml_video_stream *) buf; + + if (s->magic != AML_VIDEO_MAGIC) + return -1; + + if (!inst->ctx->param_sets_from_ucode && + (s->type == V4L_STREAM_TYPE_MATEDATA)) { + if ((*res_chg = monitor_res_change(inst, + s->data, s->len))) + return 0; + } + + ret = vdec_vframe_write(vdec, + s->data, + s->len, + bs->timestamp, + 0); + } else if (bs->model == VB2_MEMORY_DMABUF || + bs->model == VB2_MEMORY_USERPTR) { + ret = vdec_vframe_write_with_dma(vdec, + bs->addr, size, bs->timestamp, + BUFF_IDX(bs, bs->index), + vdec_vframe_input_free, inst->ctx); + } + } else { + /*checked whether the resolution changes.*/ + if ((!inst->ctx->param_sets_from_ucode) && + (*res_chg = monitor_res_change(inst, buf, size))) + return 0; + ret = vdec_write_nalu(inst, buf, size, bs->timestamp, bs->meta_ptr); + } + + return ret; +} + + static void get_param_config_info(struct vdec_vp9_inst *inst, + struct aml_dec_params *parms) + { + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CFGINFO) + parms->cfg = inst->parms.cfg; + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_PSINFO) + parms->ps = inst->parms.ps; + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_HDRINFO) + parms->hdr = inst->parms.hdr; + if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CNTINFO) + parms->cnt = inst->parms.cnt; + + parms->parms_status |= inst->parms.parms_status; + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "parms status: %u\n", parms->parms_status); + } + +static void get_param_comp_buf_info(struct vdec_vp9_inst *inst, + struct vdec_comp_buf_info *params) +{ + memcpy(params, &inst->comp_info, sizeof(*params)); +} + +static int vdec_vp9_get_param(unsigned long h_vdec, + enum vdec_get_param_type type, void *out) +{ + int ret = 0; + struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the vp9 inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case GET_PARAM_PIC_INFO: + get_pic_info(inst, out); + break; + + case GET_PARAM_DPB_SIZE: + get_dpb_size(inst, out); + break; + + case GET_PARAM_CROP_INFO: + get_crop_info(inst, out); + break; + + case GET_PARAM_CONFIG_INFO: + get_param_config_info(inst, out); + break; + + case GET_PARAM_DW_MODE: + { + u32 *mode = out; + u32 m = inst->ctx->config.parm.dec.cfg.double_write_mode; + if (m <= 16) + *mode = inst->ctx->config.parm.dec.cfg.double_write_mode; + else + *mode = vdec_get_dw_mode(inst, 0); + break; + } + case GET_PARAM_COMP_BUF_INFO: + get_param_comp_buf_info(inst, out); + break; + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid get parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static void set_param_write_sync(struct vdec_vp9_inst *inst) +{ + complete(&inst->comp); +} + +static void set_param_ps_info(struct vdec_vp9_inst *inst, + struct aml_vdec_ps_infos *ps) +{ + struct vdec_pic_info *pic = &inst->vsi->pic; + struct vdec_vp9_dec_info *dec = &inst->vsi->dec; + struct v4l2_rect *rect = &inst->vsi->crop; + int dw = inst->parms.cfg.double_write_mode; + + /* fill visible area size that be used for EGL. */ + pic->visible_width = ps->visible_width; + pic->visible_height = ps->visible_height; + + /* calc visible ares. */ + rect->left = 0; + rect->top = 0; + rect->width = pic->visible_width; + rect->height = pic->visible_height; + + /* config canvas size that be used for decoder. */ + pic->coded_width = ps->coded_width; + pic->coded_height = ps->coded_height; + + pic->y_len_sz = ALIGN(vdec_pic_scale(inst, pic->coded_width, dw), 64) * + ALIGN(vdec_pic_scale(inst, pic->coded_height, dw), 64); + pic->c_len_sz = pic->y_len_sz >> 1; + + /* calc DPB size */ + pic->dpb_frames = ps->dpb_frames; + pic->dpb_margin = ps->dpb_margin; + pic->vpp_margin = ps->dpb_margin; + dec->dpb_sz = ps->dpb_size; + pic->field = ps->field; + + inst->parms.ps = *ps; + inst->parms.parms_status |= + V4L2_CONFIG_PARM_DECODE_PSINFO; + + /*wake up*/ + complete(&inst->comp); + + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "Parse from ucode, visible(%d x %d), coded(%d x %d)\n", + pic->visible_width, pic->visible_height, + pic->coded_width, pic->coded_height); +} + +static void set_param_comp_buf_info(struct vdec_vp9_inst *inst, + struct vdec_comp_buf_info *info) +{ + memcpy(&inst->comp_info, info, sizeof(*info)); +} + +static void set_param_hdr_info(struct vdec_vp9_inst *inst, + struct aml_vdec_hdr_infos *hdr) +{ + if ((inst->parms.parms_status & + V4L2_CONFIG_PARM_DECODE_HDRINFO)) { + inst->parms.hdr = *hdr; + inst->parms.parms_status |= + V4L2_CONFIG_PARM_DECODE_HDRINFO; + aml_vdec_dispatch_event(inst->ctx, + V4L2_EVENT_SRC_CH_HDRINFO); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, + "VP9 set HDR infos\n"); + } +} + +static void set_param_post_event(struct vdec_vp9_inst *inst, u32 *event) +{ + aml_vdec_dispatch_event(inst->ctx, *event); + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, + "VP9 post event: %d\n", *event); +} + +static void set_pic_info(struct vdec_vp9_inst *inst, + struct vdec_pic_info *pic) +{ + inst->vsi->pic = *pic; +} + +static int vdec_vp9_set_param(unsigned long h_vdec, + enum vdec_set_param_type type, void *in) +{ + int ret = 0; + struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec; + + if (!inst) { + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "the vp9 inst of dec is invalid.\n"); + return -1; + } + + switch (type) { + case SET_PARAM_WRITE_FRAME_SYNC: + set_param_write_sync(inst); + break; + + case SET_PARAM_PS_INFO: + set_param_ps_info(inst, in); + break; + + case SET_PARAM_COMP_BUF_INFO: + set_param_comp_buf_info(inst, in); + break; + + case SET_PARAM_HDR_INFO: + set_param_hdr_info(inst, in); + break; + + case SET_PARAM_POST_EVENT: + set_param_post_event(inst, in); + break; + + case SET_PARAM_PIC_INFO: + set_pic_info(inst, in); + break; + + default: + v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, + "invalid set parameter type=%d\n", type); + ret = -EINVAL; + } + + return ret; +} + +static struct vdec_common_if vdec_vp9_if = { + .init = vdec_vp9_init, + .probe = vdec_vp9_probe, + .decode = vdec_vp9_decode, + .get_param = vdec_vp9_get_param, + .set_param = vdec_vp9_set_param, + .deinit = vdec_vp9_deinit, +}; + +struct vdec_common_if *get_vp9_dec_comm_if(void); + +struct vdec_common_if *get_vp9_dec_comm_if(void) +{ + return &vdec_vp9_if; +} +
diff --git a/drivers/amvdec_ports/decoder/vdec_vp9_trigger.h b/drivers/amvdec_ports/decoder/vdec_vp9_trigger.h new file mode 100644 index 0000000..0097690 --- /dev/null +++ b/drivers/amvdec_ports/decoder/vdec_vp9_trigger.h
@@ -0,0 +1,860 @@ +/* + * drivers/amvdec_ports/decoder/vdec_vp9_trigger.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _VDEC_VP9_TRIG_ +#define _VDEC_VP9_TRIG_ + +#define VP9_USE_TRIGGER_BIG_SIZE 1 + +static u8 vp9_trigger_header[] = { +#if VP9_USE_TRIGGER_BIG_SIZE + 0x00, 0x00, 0x15, 0x29, 0xff, 0xff, 0xea, 0xd6, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, 0x56, + 0x82, 0x49, 0x83, 0x42, 0x00, 0x0c, 0x30, 0x0c, 0x34, 0x24, 0x38, 0x24, 0x1c, 0x19, 0x28, 0x00, + 0x02, 0x90, 0x7c, 0x52, 0xff, 0x19, 0x05, 0xfa, 0xbe, 0xa9, 0xed, 0x30, 0x72, 0xdd, 0x1b, 0xad, + 0xe8, 0xdd, 0xdc, 0xe2, 0x39, 0xb2, 0xb7, 0xd3, 0x37, 0xea, 0x4b, 0xe7, 0xd1, 0xfe, 0x57, 0xf0, + 0x7a, 0x9f, 0x51, 0xa4, 0x5e, 0xb7, 0x18, 0x80, 0x77, 0x60, 0x00, 0x7f, 0xdb, 0x4d, 0xea, 0xbb, + 0x00, 0x92, 0xed, 0xbc, 0xf6, 0x6e, 0x24, 0x2b, 0x3f, 0xd0, 0xb8, 0x09, 0x77, 0x11, 0x43, 0x65, + 0x62, 0x94, 0xed, 0x33, 0xe1, 0x6e, 0xe8, 0x1a, 0xd0, 0x4e, 0xb1, 0x72, 0x34, 0x8e, 0x24, 0xe5, + 0x1b, 0x26, 0x58, 0xdf, 0x19, 0x8d, 0x59, 0x82, 0x19, 0xae, 0xea, 0x12, 0x63, 0x6c, 0x89, 0x9c, + 0x7f, 0xc2, 0x67, 0x40, 0xed, 0xf2, 0x8c, 0xaa, 0x9f, 0xe1, 0x1e, 0x27, 0x12, 0xdb, 0x01, 0x86, + 0xcc, 0x47, 0xee, 0xaf, 0x5b, 0xaf, 0x44, 0x3d, 0x8e, 0x67, 0x2c, 0x57, 0xd9, 0xb9, 0xb5, 0x7a, + 0x83, 0x28, 0xab, 0xf4, 0xe0, 0x86, 0xf0, 0x14, 0xd0, 0x5d, 0xa4, 0x73, 0x28, 0x04, 0x3e, 0x99, + 0xa3, 0xd7, 0xb5, 0xc6, 0xf7, 0xb1, 0xd9, 0x11, 0x06, 0x34, 0x1b, 0x91, 0x6d, 0x31, 0xee, 0x26, + 0x95, 0xde, 0x0e, 0x55, 0x8f, 0xf1, 0x59, 0xe5, 0xc8, 0xb7, 0x74, 0x8a, 0x56, 0x99, 0xe5, 0xf7, + 0x04, 0x49, 0xa5, 0x66, 0xa2, 0x15, 0xaf, 0x3c, 0xe5, 0xce, 0x26, 0xb0, 0x66, 0x63, 0xae, 0x7b, + 0xf1, 0x09, 0xd2, 0x62, 0xb5, 0xe1, 0x4f, 0x0c, 0x54, 0xad, 0xe5, 0x00, 0x86, 0x14, 0x4b, 0x06, + 0x82, 0xad, 0x62, 0x95, 0x6d, 0x3a, 0x99, 0x67, 0x1d, 0x1b, 0x85, 0xf6, 0xe7, 0x69, 0xd8, 0x00, + 0xde, 0x63, 0xb5, 0x35, 0xf1, 0x44, 0x42, 0x21, 0xd3, 0xf3, 0xc9, 0x07, 0x23, 0x67, 0xe5, 0xea, + 0x5e, 0xd2, 0x63, 0x78, 0xb9, 0x7b, 0xeb, 0xd7, 0x2d, 0x4c, 0x5e, 0x44, 0x6a, 0x46, 0x68, 0xeb, + 0x5d, 0x61, 0xaa, 0xc7, 0xce, 0xb5, 0xe3, 0x01, 0xc8, 0x24, 0xca, 0x72, 0xcc, 0xdf, 0x89, 0x34, + 0xb6, 0xab, 0xfd, 0x7b, 0xb9, 0xbd, 0xef, 0x33, 0xb0, 0x2b, 0x1e, 0xd3, 0x20, 0xa4, 0xcd, 0x8a, + 0x51, 0x2e, 0x9d, 0x2e, 0x5c, 0xc7, 0x52, 0xed, 0xb8, 0x29, 0x68, 0x5d, 0x63, 0xe1, 0x79, 0x98, + 0x2d, 0xb1, 0xf4, 0xa0, 0x01, 0x30, 0x2d, 0x10, 0xec, 0xe4, 0x6c, 0xed, 0x55, 0xb1, 0xf9, 0x0f, + 0xd4, 0xae, 0x0f, 0x75, 0x5d, 0x81, 0x76, 0xfe, 0x94, 0x44, 0x1c, 0xcc, 0x8c, 0x7e, 0x0f, 0x4c, + 0xda, 0x88, 0x61, 0x6a, 0x17, 0x70, 0x14, 0xf5, 0x0d, 0x7c, 0xd1, 0xf8, 0x0f, 0x19, 0xa2, 0x05, + 0xe3, 0x98, 0xdc, 0xe1, 0xb4, 0x6a, 0x74, 0xa0, 0x8f, 0x1b, 0xc8, 0x12, 0xb4, 0xde, 0x62, 0x88, + 0xd4, 0x0d, 0xed, 0x0b, 0x76, 0xae, 0xe0, 0x92, 0xa2, 0x13, 0x70, 0x03, 0x08, 0x26, 0x8f, 0xed, + 0xa7, 0x5c, 0x5a, 0x55, 0x6e, 0x92, 0x76, 0xd0, 0xc2, 0x9c, 0x24, 0x77, 0x78, 0x9b, 0x33, 0xe5, + 0x88, 0xc6, 0x08, 0x8a, 0x28, 0x46, 0x9f, 0xb4, 0xd9, 0xd7, 0x86, 0x82, 0xe4, 0xba, 0x97, 0x3d, + 0x36, 0xd5, 0x31, 0x61, 0x7f, 0x8e, 0xb9, 0xa1, 0xab, 0x68, 0xa6, 0x8c, 0xa8, 0x47, 0x8d, 0x5c, + 0x97, 0xe9, 0xf0, 0x8f, 0xa4, 0xe8, 0x13, 0x2b, 0x9c, 0x4d, 0xff, 0x95, 0x8e, 0x98, 0x08, 0x75, + 0xd4, 0xed, 0x5e, 0x86, 0xe5, 0x68, 0x4b, 0x01, 0x0b, 0xd2, 0x13, 0xb0, 0x94, 0xd1, 0x28, 0x22, + 0x13, 0x6b, 0x95, 0x30, 0x79, 0xb6, 0xd9, 0x55, 0x2e, 0x3b, 0x36, 0x18, 0xef, 0x39, 0x16, 0x97, + 0x1e, 0xc4, 0x03, 0xb4, 0x75, 0xbe, 0xfd, 0x04, 0x2e, 0xd5, 0xac, 0x95, 0xac, 0x70, 0x40, 0xb3, + 0x1a, 0x61, 0x03, 0x9a, 0x9f, 0xbf, 0x93, 0x14, 0xcc, 0xc0, 0x28, 0xf8, 0x93, 0xa6, 0x7f, 0x07, + 0x12, 0xe1, 0xc2, 0x86, 0xe3, 0x87, 0x0d, 0x4d, 0x20, 0x75, 0xf7, 0xa0, 0x14, 0x49, 0x6f, 0x52, + 0xc6, 0x6e, 0x9d, 0xa8, 0x8e, 0x14, 0x3a, 0x9f, 0xa6, 0xac, 0xdc, 0x56, 0x9c, 0xdf, 0xf4, 0x75, + 0x6a, 0x31, 0x94, 0x50, 0x9c, 0x43, 0xd5, 0x6b, 0x20, 0xe4, 0xbc, 0x20, 0xd0, 0x9a, 0x7f, 0x84, + 0x6e, 0xd5, 0x7c, 0x2e, 0x93, 0xf8, 0x25, 0x50, 0x4f, 0xcb, 0x13, 0x74, 0x78, 0x08, 0x82, 0x0d, + 0xd0, 0x39, 0xaf, 0x4c, 0x6e, 0x9e, 0x25, 0x37, 0xbf, 0x7d, 0xe3, 0x93, 0xbd, 0x91, 0xb9, 0x52, + 0xac, 0x6d, 0xa6, 0xcd, 0x78, 0x50, 0x78, 0x3a, 0xc0, 0xc4, 0x13, 0xc0, 0x4e, 0xa5, 0x09, 0x09, + 0x80, 0xd8, 0x08, 0xfc, 0x63, 0xd4, 0x28, 0x3d, 0xef, 0xfd, 0xf8, 0x30, 0x3e, 0x09, 0x3a, 0x56, + 0xf8, 0x11, 0xa3, 0x67, 0xdd, 0x51, 0x15, 0xde, 0x20, 0x8d, 0xd8, 0x66, 0xac, 0x08, 0x70, 0x9c, + 0x8e, 0xb2, 0xff, 0x26, 0x5a, 0x82, 0x14, 0x1d, 0xf5, 0xf3, 0x1c, 0xf9, 0x6a, 0x00, 0x25, 0x84, + 0x94, 0xc9, 0x6f, 0x16, 0x75, 0xf6, 0xd2, 0x13, 0xa2, 0x70, 0xe0, 0x94, 0x4b, 0xe7, 0xe4, 0x6d, + 0xf1, 0xd2, 0xa3, 0x94, 0x65, 0x25, 0x38, 0xb4, 0x31, 0x8b, 0xdf, 0x1c, 0xb6, 0x22, 0x2b, 0x4b, + 0x03, 0xd2, 0x92, 0xcc, 0xa1, 0xcf, 0xb6, 0xf5, 0x28, 0x02, 0xb8, 0xe0, 0x44, 0x69, 0x34, 0xa8, + 0xb3, 0xaa, 0xea, 0x48, 0x6e, 0xf0, 0x67, 0x39, 0x0e, 0xbd, 0xfd, 0x20, 0x9d, 0x8d, 0xc8, 0x0b, + 0xa3, 0x7e, 0x66, 0x5b, 0xde, 0x21, 0xcc, 0x19, 0xae, 0xfd, 0x82, 0x73, 0x75, 0x09, 0x35, 0x2f, + 0xbf, 0x91, 0xaa, 0xfa, 0x1d, 0xa4, 0x44, 0x7b, 0xf9, 0xac, 0xa9, 0x02, 0xb4, 0x39, 0xf8, 0x96, + 0x90, 0x01, 0xff, 0x73, 0xe6, 0x52, 0xa1, 0x22, 0xfe, 0x04, 0xe3, 0x3e, 0x8a, 0x6d, 0xa0, 0x19, + 0x4e, 0x11, 0x90, 0x9a, 0x05, 0xb2, 0x7a, 0x8d, 0x98, 0xfc, 0xda, 0x1c, 0x5f, 0x86, 0x94, 0x7f, + 0x58, 0x20, 0xdf, 0xb6, 0xc0, 0x11, 0xd0, 0x8c, 0xc2, 0x11, 0x61, 0x75, 0x47, 0xbe, 0xec, 0x92, + 0x82, 0xa3, 0xfe, 0xcd, 0x13, 0xdd, 0xe0, 0xe6, 0x5a, 0x0a, 0xc1, 0x9f, 0x31, 0x6d, 0x78, 0x31, + 0xb6, 0x60, 0xbe, 0x0b, 0xd5, 0x81, 0x24, 0xe6, 0xc7, 0xe7, 0xe8, 0x08, 0x53, 0x27, 0xf3, 0x9a, + 0xf2, 0x7e, 0xb7, 0xc8, 0xd9, 0x74, 0x72, 0x45, 0xe6, 0xf8, 0xba, 0xb9, 0x40, 0xff, 0xa4, 0xfb, + 0x6a, 0xd0, 0x98, 0x4d, 0x4d, 0xcc, 0x4a, 0x38, 0xcb, 0xa0, 0xf0, 0x08, 0x7d, 0xd7, 0x70, 0xca, + 0xdf, 0xe6, 0x16, 0xa2, 0xd1, 0x9d, 0xaf, 0xcc, 0xd7, 0x6c, 0x5a, 0xfd, 0xac, 0x42, 0xab, 0x16, + 0x33, 0xc0, 0x2a, 0x68, 0xdd, 0x58, 0xb2, 0x41, 0xc5, 0x05, 0x61, 0x09, 0x60, 0xc8, 0x72, 0x29, + 0xb8, 0x1c, 0x90, 0xc5, 0x02, 0x76, 0xdc, 0xcb, 0x45, 0x5c, 0x6c, 0x16, 0x37, 0xe6, 0x11, 0xce, + 0x4e, 0x2e, 0xfa, 0xf4, 0x2c, 0x4f, 0x80, 0x64, 0x85, 0xf4, 0xbd, 0x03, 0x03, 0xd2, 0x86, 0x3e, + 0x97, 0xbb, 0x07, 0x22, 0x82, 0x3f, 0xc8, 0xc5, 0xc4, 0x8d, 0x4f, 0x66, 0x18, 0xc7, 0x74, 0xe6, + 0x19, 0x5e, 0xe7, 0xc8, 0xc8, 0xfd, 0xb1, 0xc5, 0x51, 0xc4, 0x25, 0xec, 0x2d, 0x0b, 0xed, 0xd0, + 0x53, 0x5b, 0x5d, 0x80, 0x2c, 0x28, 0xd0, 0x19, 0xe2, 0x1d, 0xd8, 0x25, 0x1b, 0xb1, 0xb2, 0x99, + 0x26, 0x93, 0xec, 0x08, 0x14, 0x16, 0x60, 0x28, 0xeb, 0x88, 0x0a, 0x84, 0x2d, 0xde, 0x41, 0xe3, + 0x67, 0x0f, 0x74, 0x7c, 0xf9, 0xcc, 0x38, 0xea, 0xf8, 0xa7, 0x13, 0x53, 0xfb, 0xea, 0x8b, 0x50, + 0x9c, 0x37, 0xff, 0x23, 0x4a, 0xdf, 0xc5, 0xe0, 0x04, 0x72, 0x8d, 0x2b, 0xca, 0x1c, 0x2c, 0x33, + 0x7a, 0x3d, 0x25, 0xa3, 0x76, 0x15, 0xcb, 0x8d, 0xb8, 0x24, 0xa4, 0xa3, 0xf8, 0xc2, 0x69, 0x33, + 0x27, 0x58, 0x51, 0xd9, 0x3c, 0x4c, 0x3b, 0x4a, 0xd7, 0x4e, 0x0b, 0xb5, 0xe2, 0x68, 0xeb, 0xa3, + 0xf8, 0x93, 0xc1, 0x92, 0x58, 0xf4, 0xc2, 0xf6, 0x1e, 0x7d, 0xa3, 0x13, 0x20, 0x50, 0x5b, 0xb3, + 0x3f, 0x07, 0xb3, 0x7e, 0xf5, 0x71, 0x1e, 0xf0, 0x23, 0x97, 0x95, 0x64, 0x70, 0xc2, 0xb9, 0x4a, + 0x16, 0x9b, 0xbb, 0xaf, 0xd2, 0x1d, 0xc6, 0xae, 0x3f, 0xa2, 0x7e, 0x23, 0x55, 0xdc, 0x68, 0x64, + 0x56, 0x33, 0xf8, 0xd4, 0x4e, 0xf4, 0x94, 0x0c, 0x09, 0xea, 0xae, 0xaf, 0xfb, 0x12, 0x31, 0x72, + 0xda, 0xc1, 0x23, 0x72, 0xb7, 0x69, 0xe6, 0x2b, 0x36, 0x8b, 0xe5, 0xdc, 0xd5, 0xcd, 0x3e, 0xdd, + 0x2c, 0x8e, 0x72, 0x23, 0xc8, 0x1f, 0x52, 0xea, 0x95, 0x21, 0xeb, 0xc6, 0x19, 0x9b, 0x6d, 0x80, + 0x4e, 0x3a, 0x5b, 0x2f, 0x3b, 0x81, 0x91, 0x12, 0xed, 0xbc, 0x45, 0x4f, 0x93, 0x5c, 0xdf, 0xf0, + 0xb7, 0x5a, 0xfd, 0x35, 0x54, 0xea, 0x68, 0x70, 0x4d, 0x4c, 0xb3, 0x56, 0x6c, 0x38, 0xbf, 0xaa, + 0x29, 0xe6, 0x9c, 0x78, 0x7f, 0x5e, 0x7b, 0xa3, 0x04, 0xeb, 0x3d, 0x25, 0xce, 0x56, 0x5e, 0x62, + 0x69, 0x87, 0xee, 0x7d, 0xf1, 0x1d, 0xb6, 0x1d, 0x7b, 0x4f, 0x47, 0x70, 0x5e, 0x06, 0x7b, 0x48, + 0x02, 0x1d, 0x01, 0xfd, 0xbb, 0xa6, 0xa3, 0x6f, 0x90, 0xe3, 0xb2, 0x10, 0xa1, 0xc9, 0x40, 0x96, + 0x6c, 0x4e, 0x35, 0x47, 0x71, 0x22, 0x80, 0x40, 0x52, 0xa8, 0x8f, 0x02, 0x62, 0x6a, 0xb5, 0x72, + 0xa0, 0x65, 0x55, 0xdc, 0x69, 0x63, 0x2e, 0xae, 0x9f, 0xcd, 0xa7, 0x3a, 0x32, 0x4a, 0x76, 0x03, + 0xc7, 0xf4, 0x7a, 0xde, 0x29, 0x1d, 0x7c, 0xad, 0x46, 0xe9, 0x90, 0x3b, 0xff, 0x4f, 0xa3, 0xe1, + 0x40, 0xe1, 0xe7, 0x2a, 0xd6, 0x2d, 0x6b, 0x23, 0x42, 0x1b, 0xe8, 0xdf, 0x76, 0xe6, 0x11, 0x7d, + 0xb2, 0xbe, 0xd1, 0x83, 0x81, 0x86, 0xb6, 0x5d, 0xc2, 0x29, 0xdf, 0xf4, 0xfe, 0x82, 0x14, 0x81, + 0xed, 0xe3, 0x77, 0xbc, 0xe3, 0x42, 0xba, 0x14, 0x82, 0x85, 0x84, 0xca, 0x00, 0x37, 0x0e, 0xbc, + 0x88, 0xa6, 0xa9, 0x63, 0x67, 0x3b, 0x9f, 0x42, 0xa2, 0x7a, 0xe4, 0x71, 0x11, 0xd0, 0x0a, 0xd8, + 0x2e, 0xcb, 0x95, 0xe9, 0x8f, 0xb3, 0x85, 0x17, 0x78, 0x4e, 0xa7, 0xce, 0x0d, 0xc4, 0x56, 0xf0, + 0x22, 0x0b, 0x65, 0xc0, 0xf4, 0x13, 0x55, 0x89, 0x00, 0x99, 0x7f, 0x19, 0xba, 0xa6, 0xe0, 0xa6, + 0xa0, 0x60, 0x27, 0xd0, 0x24, 0xb7, 0x69, 0x33, 0x95, 0xc8, 0x9b, 0x18, 0x38, 0x62, 0xc8, 0xde, + 0xef, 0xbe, 0x88, 0x5f, 0x21, 0x42, 0x0b, 0x59, 0x10, 0x0c, 0x9e, 0x9e, 0x66, 0x4a, 0xb7, 0xd6, + 0x4f, 0x7a, 0xa8, 0xcd, 0x20, 0x6a, 0x70, 0x3a, 0x3e, 0xc9, 0x3c, 0x4d, 0x35, 0xfe, 0xaa, 0xad, + 0x4f, 0x15, 0x77, 0x39, 0x29, 0x20, 0xac, 0x8a, 0x19, 0xdc, 0xd5, 0x61, 0x24, 0x59, 0x2a, 0x33, + 0xa5, 0xdb, 0x05, 0xcf, 0x93, 0x70, 0x77, 0xb3, 0x0e, 0xdf, 0xeb, 0x58, 0x78, 0xd4, 0x6c, 0xc3, + 0xe0, 0x7a, 0x09, 0xcf, 0xaa, 0x09, 0xaf, 0xbd, 0x2c, 0x01, 0x09, 0x11, 0x20, 0x00, 0x57, 0x8a, + 0x32, 0xd7, 0xf9, 0x20, 0x19, 0xe8, 0x80, 0xf6, 0x96, 0xcf, 0xad, 0xf9, 0x2c, 0xe8, 0x4d, 0x6d, + 0xe3, 0xd4, 0xfc, 0x2e, 0x8a, 0xce, 0x4a, 0x06, 0x51, 0x20, 0x23, 0x58, 0xe5, 0x8a, 0xcb, 0xa1, + 0xcc, 0x12, 0x9f, 0x34, 0x17, 0x1e, 0x69, 0x66, 0x02, 0xeb, 0x2e, 0x71, 0x6b, 0x25, 0xde, 0x7c, + 0x96, 0x17, 0xca, 0xac, 0x43, 0x41, 0x22, 0x3b, 0x87, 0xb9, 0x46, 0x85, 0x20, 0xac, 0x75, 0xbb, + 0x0b, 0x48, 0x6c, 0x7f, 0xfe, 0x1b, 0xa5, 0x6c, 0x98, 0xfd, 0xb5, 0x8d, 0x93, 0x7a, 0xfb, 0x5b, + 0x22, 0x26, 0x25, 0xda, 0x92, 0x96, 0x41, 0xe7, 0x75, 0xaf, 0xf0, 0x32, 0xea, 0xaa, 0xad, 0xc1, + 0x5a, 0xb0, 0x78, 0xa7, 0x03, 0xdf, 0x57, 0xaf, 0xac, 0x69, 0xb3, 0xa3, 0xa9, 0x02, 0x9f, 0x31, + 0xd5, 0xcf, 0x39, 0xc0, 0xc4, 0x83, 0xda, 0xc2, 0xa4, 0x5f, 0x9a, 0x31, 0x90, 0xc8, 0xd6, 0x29, + 0x57, 0xf8, 0x31, 0xa6, 0x4a, 0x51, 0x80, 0x70, 0x12, 0x7f, 0x5d, 0xc0, 0x6f, 0x0e, 0x62, 0x99, + 0xc0, 0x03, 0xdb, 0x16, 0x0a, 0x06, 0x79, 0x9a, 0xd2, 0x84, 0xc5, 0x4e, 0xb9, 0x05, 0x0b, 0xb5, + 0x2a, 0xd2, 0x98, 0x8c, 0xf0, 0xd3, 0x43, 0xc6, 0xfd, 0x73, 0x3d, 0x96, 0x28, 0xe3, 0x18, 0xbc, + 0x6c, 0x2f, 0xfd, 0x10, 0x9a, 0x90, 0x13, 0x8c, 0x17, 0xfe, 0xe1, 0xec, 0xb8, 0x44, 0xe7, 0xed, + 0xcf, 0x01, 0xbb, 0x47, 0x08, 0xc1, 0x0c, 0x49, 0x22, 0xc4, 0x8b, 0x2a, 0xe8, 0x89, 0x6d, 0x01, + 0x17, 0xdc, 0x58, 0x94, 0x1c, 0x52, 0xa7, 0x7f, 0x19, 0xda, 0x79, 0x92, 0x40, 0xdb, 0x28, 0x93, + 0x1b, 0xdf, 0xdb, 0x4f, 0xc0, 0x10, 0x95, 0x6d, 0x81, 0x62, 0x8d, 0x0a, 0xbe, 0x3e, 0x3b, 0x53, + 0x59, 0xca, 0x9f, 0x70, 0xc8, 0x32, 0xc3, 0x39, 0xbe, 0x44, 0x99, 0x96, 0x02, 0x46, 0xa9, 0xa9, + 0xe4, 0xe2, 0xa6, 0x1f, 0xce, 0xf0, 0x3a, 0xdc, 0x42, 0xae, 0x6b, 0xa7, 0x95, 0xa1, 0x2a, 0x1f, + 0xa2, 0xd5, 0x44, 0x2a, 0x85, 0xd4, 0x43, 0x0d, 0xf6, 0xa6, 0xbd, 0xcc, 0xb0, 0xab, 0xd0, 0xf6, + 0x2f, 0xac, 0x2c, 0x61, 0xb0, 0x52, 0xba, 0xcf, 0x3f, 0xb5, 0xea, 0xdf, 0x9f, 0x46, 0xbf, 0x58, + 0x1b, 0xf9, 0x16, 0xdb, 0x60, 0xce, 0xea, 0xf5, 0x72, 0xc2, 0x74, 0x32, 0xae, 0x7b, 0x41, 0x4d, + 0xa2, 0x33, 0x88, 0xf8, 0x7b, 0x89, 0xe0, 0x18, 0xe4, 0x7d, 0x6c, 0xab, 0xce, 0x9e, 0xb4, 0xcd, + 0xd2, 0x9a, 0xa5, 0x55, 0xfb, 0x83, 0x05, 0x9b, 0x06, 0x5a, 0xcf, 0xb7, 0x1a, 0xbe, 0xb9, 0x6a, + 0xe1, 0x0a, 0x48, 0x98, 0x25, 0xcd, 0xb8, 0xa6, 0x7e, 0x95, 0x22, 0xb4, 0x55, 0x2c, 0x21, 0x1c, + 0x07, 0xe7, 0x94, 0xe4, 0x78, 0x92, 0x09, 0x89, 0x05, 0xec, 0xf0, 0xce, 0x3f, 0x4f, 0x31, 0x30, + 0xb5, 0x61, 0x38, 0xce, 0x55, 0x54, 0x96, 0xf6, 0x5e, 0x42, 0xa0, 0xd7, 0xd4, 0x41, 0xd6, 0x4f, + 0x71, 0xc0, 0xc7, 0x45, 0x12, 0x89, 0x2c, 0x0d, 0x7e, 0xd2, 0xf9, 0x43, 0xaa, 0xa9, 0xeb, 0xc2, + 0x46, 0xa4, 0x97, 0xd9, 0x16, 0xb6, 0xa4, 0xd2, 0xeb, 0xfe, 0xbd, 0xcd, 0x62, 0xab, 0xbc, 0xc2, + 0xc4, 0x39, 0x07, 0x9f, 0x03, 0xed, 0x5c, 0x13, 0x5e, 0x92, 0x7c, 0x1a, 0xf3, 0xa6, 0x7f, 0x9a, + 0x07, 0x5a, 0xff, 0xa6, 0xbf, 0x57, 0xf9, 0xeb, 0xd2, 0x56, 0x78, 0x3f, 0x74, 0xb3, 0x2d, 0xbe, + 0xc9, 0x2d, 0xb2, 0x52, 0x5b, 0x7b, 0x79, 0x32, 0xb8, 0xfb, 0x5f, 0xfc, 0x3f, 0x62, 0x90, 0xe6, + 0x22, 0xe8, 0x5e, 0xed, 0x41, 0x4c, 0xb0, 0xf9, 0xe4, 0x7d, 0x6e, 0x96, 0x97, 0x8c, 0xa7, 0xf4, + 0xf1, 0xad, 0x3c, 0xa2, 0xdb, 0xa7, 0x8f, 0x81, 0xc0, 0xe5, 0xf6, 0x06, 0xd7, 0xae, 0xf5, 0x8b, + 0x66, 0xf9, 0x84, 0xec, 0x3f, 0xe6, 0x76, 0xce, 0x91, 0x64, 0xce, 0x1d, 0x78, 0x8b, 0x3e, 0x85, + 0xa5, 0x75, 0xd7, 0xcd, 0x6c, 0x57, 0x28, 0xd5, 0x6f, 0x62, 0x3d, 0x03, 0x47, 0x9e, 0xb5, 0xf8, + 0x12, 0x83, 0xdb, 0xf7, 0x3b, 0xf2, 0x8d, 0x03, 0x1e, 0x70, 0x53, 0x9e, 0x62, 0x54, 0x9d, 0xf6, + 0x94, 0x46, 0xdf, 0x68, 0xb8, 0xaa, 0x02, 0x19, 0xad, 0xfd, 0x3b, 0xf9, 0xdc, 0xb7, 0xe0, 0x78, + 0xf8, 0x83, 0x18, 0x1a, 0x42, 0x8c, 0x5b, 0x5e, 0x64, 0x28, 0x0c, 0xe5, 0xa7, 0x80, 0x8b, 0x07, + 0xdd, 0x93, 0x39, 0x76, 0x5b, 0x4a, 0x5c, 0x92, 0xab, 0xa6, 0xf6, 0xf4, 0x1e, 0x22, 0xc9, 0xb3, + 0x94, 0x55, 0x81, 0x32, 0xc2, 0xe0, 0x19, 0x64, 0x14, 0xe0, 0xd5, 0x4a, 0xb8, 0x0c, 0x21, 0xd1, + 0x7b, 0x38, 0x4b, 0x99, 0xff, 0x8e, 0xbb, 0x1d, 0xbb, 0xcc, 0xa4, 0xb7, 0x64, 0x30, 0xc7, 0x2b, + 0x11, 0x8f, 0xfc, 0xba, 0xb6, 0xae, 0xf1, 0xbc, 0x24, 0x1b, 0x0e, 0x7e, 0x06, 0xd6, 0xbc, 0x27, + 0x3b, 0x7e, 0x3b, 0x08, 0xcb, 0xbb, 0x23, 0x51, 0x0a, 0x6e, 0xce, 0xf6, 0x07, 0x0b, 0xd1, 0x1a, + 0x04, 0xfc, 0x88, 0xb5, 0xf3, 0x01, 0x17, 0xfc, 0x99, 0xef, 0x2c, 0x20, 0x2f, 0x50, 0x9f, 0xd0, + 0xe6, 0xec, 0x46, 0x9e, 0xf2, 0x25, 0xed, 0x99, 0x84, 0x26, 0x64, 0xce, 0xca, 0xb9, 0x2e, 0xf3, + 0x45, 0xe7, 0x1e, 0x56, 0x87, 0x1a, 0x1f, 0x40, 0xd2, 0x5f, 0x9c, 0x46, 0x6a, 0x0b, 0xda, 0x6a, + 0x57, 0xbd, 0x74, 0x76, 0x0b, 0xbf, 0x5b, 0x5b, 0xcd, 0x6c, 0x4a, 0x34, 0x73, 0x18, 0x57, 0xa3, + 0x1b, 0x32, 0x44, 0xd9, 0x76, 0x53, 0x5b, 0xde, 0x92, 0x2d, 0xb4, 0xab, 0x90, 0xa3, 0x58, 0xc2, + 0x1e, 0x7d, 0xdf, 0x9e, 0x98, 0xdf, 0x70, 0x66, 0x88, 0xa5, 0x1c, 0xc7, 0xb8, 0x65, 0x12, 0x62, + 0x3e, 0x7f, 0x00, 0x14, 0xf9, 0x3f, 0x70, 0x90, 0xfa, 0x94, 0x4c, 0x6e, 0x32, 0x26, 0xc3, 0x97, + 0x98, 0xe2, 0xa5, 0x33, 0xb9, 0xa8, 0xec, 0x9e, 0x41, 0x16, 0xf6, 0xa4, 0x8b, 0x14, 0x61, 0x35, + 0xf2, 0xc5, 0xb4, 0xca, 0x90, 0xd1, 0xac, 0xef, 0x9a, 0x4c, 0x24, 0x19, 0x5d, 0x9b, 0x15, 0xa5, + 0xca, 0xd7, 0x1c, 0x7e, 0x8e, 0xc5, 0x50, 0x86, 0x64, 0x13, 0xbc, 0x2c, 0xf3, 0x77, 0xb2, 0x59, + 0xa8, 0x6e, 0x3f, 0x75, 0xb4, 0x8d, 0x1c, 0xad, 0xad, 0xf5, 0x76, 0x54, 0xc6, 0x00, 0x76, 0x94, + 0xfc, 0x88, 0x71, 0x33, 0xbc, 0xf4, 0xed, 0xa4, 0x31, 0x76, 0x66, 0x7f, 0x05, 0x57, 0xeb, 0xe8, + 0xb9, 0x25, 0xc0, 0x30, 0x2b, 0x0f, 0xe7, 0xa0, 0x96, 0xaf, 0x7e, 0x6a, 0xc4, 0x5a, 0x39, 0x4a, + 0xbc, 0x14, 0x7c, 0x6e, 0x00, 0xdf, 0x53, 0x8d, 0x97, 0x5a, 0xe2, 0x49, 0xe9, 0x89, 0x74, 0xff, + 0xec, 0x94, 0x22, 0xa5, 0x3a, 0xc5, 0xae, 0x14, 0xcd, 0xc3, 0x46, 0xf6, 0x17, 0x53, 0x2c, 0xcd, + 0x59, 0x94, 0xc7, 0x3c, 0xad, 0xdb, 0x43, 0xb0, 0x1d, 0x8e, 0x0d, 0xae, 0x1a, 0x04, 0xad, 0xa2, + 0x94, 0xe4, 0x90, 0x5c, 0x80, 0xa1, 0x42, 0xa2, 0x08, 0x61, 0xe3, 0x5a, 0x9e, 0x7c, 0xc4, 0x4d, + 0x18, 0x1b, 0x8d, 0x0f, 0x61, 0x09, 0x78, 0xbb, 0xc5, 0x98, 0xb1, 0xe0, 0x1d, 0x8d, 0x09, 0x74, + 0x7d, 0x26, 0xcb, 0x13, 0x21, 0x2d, 0x13, 0x2b, 0xd1, 0xc8, 0x05, 0x2b, 0xf8, 0x29, 0x27, 0xb0, + 0xf9, 0x94, 0xbb, 0xa4, 0xaf, 0xf7, 0xea, 0x51, 0x47, 0x04, 0x86, 0x4e, 0x14, 0x01, 0xdb, 0xfa, + 0x9b, 0xee, 0x0c, 0x9f, 0x77, 0x8d, 0xb2, 0x2d, 0xb6, 0x30, 0x02, 0x91, 0x6e, 0x8f, 0x53, 0xe0, + 0x44, 0x8f, 0xee, 0xd8, 0x35, 0x0b, 0x94, 0xa1, 0x6a, 0x8b, 0xf5, 0xd3, 0x2c, 0xd1, 0x3d, 0xe3, + 0xfb, 0x56, 0xb9, 0x02, 0x7a, 0x85, 0xc7, 0x3d, 0x64, 0x64, 0x46, 0x47, 0x14, 0x5c, 0xe4, 0xcc, + 0xb0, 0x16, 0xb3, 0x0d, 0xa7, 0x8d, 0xf5, 0xc9, 0xa5, 0x83, 0xc9, 0x66, 0x64, 0x19, 0x0d, 0x32, + 0x3d, 0x10, 0xc2, 0xc0, 0x8b, 0x12, 0xb3, 0x90, 0xf4, 0x6c, 0x34, 0x39, 0x24, 0x89, 0x93, 0x26, + 0x49, 0x79, 0xd8, 0x9f, 0x6c, 0x44, 0x02, 0x8f, 0xd8, 0x22, 0x1b, 0x6f, 0xf3, 0xb7, 0xf1, 0x8b, + 0x99, 0x90, 0x0f, 0x95, 0xb8, 0x92, 0x23, 0x1a, 0x20, 0xa3, 0x74, 0x6f, 0x40, 0x8a, 0xaf, 0x6a, + 0x33, 0xf1, 0xf0, 0x5a, 0xe9, 0x50, 0x58, 0x0b, 0x76, 0x87, 0xe7, 0x42, 0x34, 0x3a, 0x50, 0xff, + 0x10, 0x0d, 0x91, 0xc2, 0x63, 0x35, 0x51, 0xde, 0x67, 0xaa, 0x41, 0xdc, 0x0d, 0x0a, 0x6a, 0xf4, + 0x07, 0xe6, 0xd8, 0xe1, 0xff, 0x01, 0x33, 0x10, 0x7f, 0xc8, 0x1d, 0x30, 0x3a, 0xc5, 0xce, 0x72, + 0xcb, 0x05, 0x9d, 0x2f, 0xcb, 0x48, 0xf2, 0xc5, 0x7d, 0xfb, 0x4c, 0xe5, 0x64, 0x63, 0x26, 0x18, + 0x95, 0x6c, 0x87, 0x13, 0xcd, 0x44, 0x26, 0x9b, 0x31, 0x02, 0xcf, 0xee, 0x65, 0xf6, 0x1c, 0x49, + 0x1e, 0xd3, 0xb5, 0x91, 0xc1, 0x1e, 0xe9, 0xf2, 0x81, 0x87, 0x55, 0x6c, 0x18, 0xaf, 0xaf, 0x93, + 0x8b, 0x86, 0xf2, 0xe9, 0x69, 0x13, 0xe2, 0x25, 0x1f, 0x32, 0xc4, 0x36, 0xa2, 0xfd, 0xdb, 0x6b, + 0x93, 0x2d, 0x15, 0xec, 0x80, 0x55, 0xa8, 0x58, 0x4f, 0x1f, 0xd8, 0xbe, 0x2b, 0x8e, 0x26, 0x06, + 0xf3, 0x73, 0x2a, 0xae, 0x87, 0xe4, 0x95, 0x7b, 0xb3, 0x8b, 0xb1, 0x0f, 0xe3, 0x9d, 0x47, 0x67, + 0x4f, 0x1d, 0xae, 0xd8, 0xe0, 0x76, 0x1a, 0xc2, 0x0b, 0x3e, 0x89, 0x22, 0x62, 0xdc, 0x15, 0x05, + 0x15, 0x51, 0x22, 0x2c, 0xce, 0x2f, 0xe2, 0x99, 0x74, 0x75, 0xc3, 0x7e, 0xcd, 0x66, 0x4e, 0xdf, + 0x97, 0x95, 0xea, 0xfa, 0x54, 0xae, 0x01, 0x58, 0x4a, 0xa6, 0x90, 0xfe, 0x6a, 0xe5, 0xf8, 0xce, + 0x78, 0x13, 0x1b, 0x20, 0x55, 0x33, 0xaf, 0xc7, 0x0a, 0x96, 0x14, 0x99, 0xb4, 0x22, 0xec, 0xcf, + 0x3c, 0x6a, 0x5c, 0x9d, 0x46, 0x92, 0x81, 0xee, 0x72, 0x7a, 0x6c, 0x5c, 0xe5, 0xa1, 0xcc, 0x5c, + 0x7b, 0x99, 0xae, 0x53, 0x3d, 0x05, 0xaf, 0x21, 0xf2, 0x4b, 0x6a, 0xf8, 0xd6, 0xc5, 0xce, 0xf9, + 0x15, 0xce, 0xc8, 0xa5, 0x37, 0x58, 0x3c, 0xe1, 0x83, 0xd4, 0xbe, 0x3e, 0x1e, 0x7a, 0x6e, 0x9e, + 0x6a, 0x94, 0x03, 0xa7, 0x25, 0x9c, 0x1c, 0x26, 0x84, 0x8e, 0xc4, 0xf1, 0x52, 0x8d, 0xc7, 0x76, + 0xd7, 0xa4, 0x7f, 0xc2, 0x52, 0x5c, 0x6b, 0x3a, 0xb3, 0xb2, 0xa9, 0x9a, 0x4b, 0xff, 0xc1, 0x89, + 0x99, 0xc5, 0x77, 0xac, 0x0d, 0x09, 0x69, 0xde, 0x50, 0x49, 0x03, 0xd2, 0xf7, 0x7a, 0xc9, 0xe9, + 0x48, 0x9f, 0x66, 0xa3, 0x91, 0x0d, 0x8e, 0x4f, 0xe1, 0x70, 0xc8, 0x74, 0x93, 0xd8, 0x76, 0x2b, + 0x9f, 0x4f, 0x15, 0xd5, 0xff, 0xb0, 0x5a, 0x4f, 0x06, 0xaa, 0xe0, 0xca, 0xdd, 0x0b, 0xd7, 0x6d, + 0x28, 0xa7, 0x20, 0x32, 0x6b, 0x20, 0x57, 0x51, 0x15, 0xbc, 0xc0, 0xc7, 0xa2, 0x21, 0xfa, 0x92, + 0x45, 0xf0, 0x24, 0x88, 0xc3, 0x22, 0x65, 0x32, 0x27, 0x45, 0x96, 0x1b, 0x6f, 0xdb, 0x8b, 0x22, + 0x17, 0x78, 0xa0, 0x78, 0xe1, 0xd5, 0x6a, 0x9e, 0x6a, 0xc9, 0xde, 0xe3, 0x71, 0x1b, 0x9d, 0x31, + 0x27, 0xb2, 0x25, 0x80, 0xfd, 0x47, 0x85, 0xa7, 0xb7, 0xcd, 0x63, 0xb3, 0x54, 0xc7, 0xf2, 0x53, + 0xbf, 0x22, 0x58, 0x95, 0xc4, 0x39, 0x19, 0x9d, 0xce, 0xbe, 0x54, 0xd5, 0x58, 0x68, 0x01, 0xf8, + 0x0d, 0x28, 0xaa, 0xf8, 0x27, 0x71, 0x68, 0x3b, 0x13, 0x53, 0x07, 0xd4, 0x42, 0xb0, 0x02, 0x66, + 0x35, 0x2b, 0xec, 0x62, 0x84, 0x85, 0x2b, 0x2c, 0xe7, 0x09, 0xa5, 0xe6, 0x1a, 0x77, 0x18, 0x28, + 0x94, 0xff, 0x1b, 0x3e, 0xcf, 0xdd, 0x21, 0x2a, 0xe2, 0x49, 0xa4, 0x27, 0xcf, 0x3a, 0x72, 0xcc, + 0x3e, 0xbe, 0x24, 0x61, 0xe2, 0x43, 0x4b, 0x3e, 0xcb, 0xe5, 0x18, 0x63, 0xfc, 0xd0, 0xb3, 0x49, + 0xcc, 0xd1, 0xce, 0xd5, 0x1d, 0x38, 0x72, 0x07, 0xbc, 0xa5, 0x68, 0xa5, 0xb1, 0x30, 0xc7, 0x5b, + 0xfc, 0x15, 0xcf, 0xf5, 0xa0, 0xf7, 0xe9, 0x38, 0x7d, 0xd3, 0xcb, 0xc0, 0x77, 0x16, 0x2a, 0x37, + 0xff, 0x62, 0x09, 0x5c, 0xe7, 0x5e, 0x5b, 0xfc, 0xaf, 0xcc, 0xe4, 0xcf, 0x63, 0x13, 0xb0, 0x53, + 0xbf, 0xf2, 0x94, 0x76, 0xb5, 0xd3, 0x60, 0x72, 0x0c, 0xf1, 0x71, 0x43, 0xa0, 0x04, 0xaa, 0xe5, + 0x87, 0x8c, 0x57, 0x66, 0x20, 0xe2, 0x9c, 0x39, 0xb4, 0xc0, 0xb4, 0x40, 0x55, 0x34, 0xe6, 0x31, + 0x75, 0x03, 0xdf, 0xf3, 0x5c, 0xd8, 0x15, 0x16, 0x35, 0x40, 0xc5, 0xcf, 0xc7, 0x51, 0x3b, 0x03, + 0xb4, 0x8f, 0x21, 0x96, 0x3d, 0x4f, 0x32, 0xb8, 0x05, 0xdf, 0x66, 0xb4, 0xcd, 0x42, 0xd4, 0x36, + 0x2e, 0x2d, 0x73, 0x76, 0xc5, 0x59, 0x92, 0xe7, 0x0f, 0xe6, 0x42, 0x1f, 0x34, 0xc6, 0x9c, 0x28, + 0x5e, 0xee, 0x14, 0x24, 0xd6, 0x66, 0xa9, 0x1a, 0xd0, 0xd5, 0x60, 0xa2, 0xc0, 0x73, 0x30, 0x1a, + 0x40, 0xc3, 0xf3, 0x77, 0x8b, 0x96, 0xef, 0xcb, 0x30, 0x83, 0x09, 0x62, 0x08, 0x1e, 0x50, 0x2d, + 0x26, 0xde, 0xaa, 0xa4, 0x74, 0x85, 0x5b, 0xd3, 0x4d, 0xbe, 0x70, 0x03, 0x26, 0xf6, 0x59, 0x0c, + 0x3e, 0x06, 0x5a, 0xfb, 0xfb, 0xd1, 0x3b, 0x32, 0xc9, 0x71, 0x67, 0x89, 0x38, 0x07, 0xce, 0x24, + 0xe3, 0x9e, 0x26, 0x06, 0x66, 0x06, 0xbb, 0x90, 0x1e, 0x67, 0xd5, 0x00, 0x3e, 0x8b, 0xeb, 0x49, + 0xce, 0xdf, 0x3d, 0xb6, 0x54, 0x4c, 0xef, 0xd2, 0x98, 0x7c, 0x49, 0x49, 0xd9, 0xb8, 0x06, 0xac, + 0x89, 0xa8, 0x94, 0x78, 0xe5, 0x83, 0x04, 0x49, 0xfb, 0x5a, 0x87, 0x7d, 0x10, 0x9c, 0x56, 0x3f, + 0x42, 0xd6, 0x6f, 0x97, 0x66, 0xe1, 0x18, 0xbc, 0x71, 0xc1, 0x03, 0x24, 0xaf, 0xdf, 0x36, 0x04, + 0x66, 0x02, 0x55, 0xd8, 0xae, 0x19, 0x96, 0x48, 0xc5, 0x6f, 0x4c, 0x7c, 0x34, 0x2c, 0x01, 0x24, + 0xd0, 0x49, 0xf5, 0x85, 0x76, 0xcc, 0x69, 0x56, 0x7a, 0x2f, 0x57, 0x3c, 0x5c, 0x81, 0x76, 0x73, + 0x13, 0x84, 0xa5, 0xd7, 0x3a, 0xfa, 0x3c, 0x84, 0x67, 0x3e, 0x11, 0x1d, 0x34, 0xe9, 0x33, 0x0b, + 0x47, 0x96, 0x02, 0x92, 0x4e, 0x43, 0x0b, 0xab, 0x56, 0x64, 0x53, 0xdf, 0x1d, 0x37, 0x1a, 0x57, + 0x00, 0x7d, 0x0d, 0x99, 0x1a, 0x7c, 0x6d, 0x68, 0xf7, 0xb2, 0x7e, 0x07, 0xeb, 0x65, 0xf4, 0x4c, + 0xbb, 0x2d, 0xe7, 0xd9, 0xc7, 0xa7, 0x52, 0x58, 0x36, 0x27, 0x2a, 0x51, 0xd9, 0x0e, 0x6b, 0x70, + 0xfe, 0xb9, 0xa2, 0x34, 0x41, 0x72, 0x68, 0xc5, 0x9c, 0xcc, 0xd4, 0x7a, 0x90, 0xf0, 0x62, 0xa0, + 0xf6, 0x05, 0x4b, 0xd6, 0x70, 0x9c, 0x04, 0xd9, 0x76, 0xde, 0xb6, 0x09, 0xb4, 0xc5, 0x24, 0x4b, + 0x8e, 0x79, 0x11, 0x91, 0xaf, 0x89, 0x10, 0x68, 0x8c, 0xed, 0xb5, 0xf2, 0x39, 0x8d, 0xe8, 0x0d, + 0xed, 0xb9, 0x22, 0x20, 0xe0, 0x45, 0x8a, 0xc2, 0x7d, 0x23, 0xb2, 0xb0, 0xb2, 0xde, 0xdb, 0x0f, + 0xa1, 0x6b, 0x8b, 0xf0, 0x94, 0x8b, 0xa5, 0x40, 0x1b, 0x2b, 0xcb, 0x41, 0x35, 0x39, 0x28, 0x3d, + 0x4e, 0x13, 0x6b, 0x2c, 0xbf, 0xa7, 0x6d, 0xd0, 0x11, 0xdf, 0x43, 0xd6, 0xf3, 0xc5, 0x54, 0x79, + 0x86, 0x07, 0x7c, 0xef, 0x1a, 0x51, 0xc3, 0xb2, 0xc6, 0xaa, 0x04, 0x68, 0xfb, 0xcb, 0xf0, 0x1b, + 0x1f, 0xf3, 0x45, 0xe0, 0x6e, 0x6d, 0xab, 0xb7, 0x7c, 0x42, 0x58, 0xc9, 0xbb, 0x35, 0xd9, 0x1f, + 0x9a, 0x88, 0x26, 0x12, 0x54, 0xda, 0x1d, 0x0d, 0xc4, 0x3e, 0x50, 0xd5, 0x17, 0x00, 0x08, 0x54, + 0xd3, 0x11, 0x01, 0xea, 0xb4, 0x47, 0xd7, 0x5c, 0x8f, 0x7a, 0x58, 0xfb, 0x07, 0x2a, 0xb7, 0x53, + 0xd6, 0x2b, 0x59, 0x13, 0xaf, 0x78, 0x22, 0x70, 0x1f, 0x10, 0xe3, 0x48, 0xae, 0x4f, 0xda, 0x98, + 0xca, 0xdd, 0x53, 0xab, 0xb8, 0x02, 0xa5, 0x95, 0xc8, 0xe0, 0x1c, 0x99, 0xf6, 0x9c, 0x18, 0x55, + 0x18, 0xcf, 0x67, 0x91, 0x46, 0xbb, 0x21, 0x1b, 0xea, 0x26, 0xbd, 0x5f, 0x90, 0x00, 0x7b, 0xbc, + 0xe5, 0x6f, 0xa0, 0xaf, 0xd8, 0xe6, 0xcd, 0x18, 0x75, 0x50, 0x3c, 0x08, 0x98, 0x56, 0x67, 0xfd, + 0x1a, 0x58, 0x64, 0xc0, 0x89, 0x11, 0xf0, 0x39, 0x65, 0x8a, 0x5f, 0x8c, 0x04, 0xd3, 0x93, 0x83, + 0xf0, 0x7d, 0xd8, 0xdf, 0xee, 0x0a, 0x2e, 0x8b, 0xc2, 0x12, 0x0e, 0x21, 0xe0, 0x75, 0xe2, 0x1d, + 0x6c, 0x22, 0x08, 0x5a, 0xaf, 0xdb, 0x17, 0x5a, 0x29, 0xc7, 0x76, 0xa8, 0xfc, 0x9a, 0x74, 0x1a, + 0xbd, 0xfe, 0x89, 0xc6, 0x23, 0xf8, 0x09, 0x58, 0xfe, 0xf2, 0x9d, 0xf0, 0xc3, 0x3e, 0xa9, 0x06, + 0x57, 0x2d, 0x5f, 0x41, 0x76, 0xd7, 0xa9, 0xec, 0x3b, 0x08, 0xac, 0x3d, 0x62, 0x0e, 0x66, 0x25, + 0xca, 0x2f, 0x10, 0xc5, 0xc7, 0x47, 0x2b, 0xc2, 0x3a, 0xda, 0x69, 0x55, 0xe6, 0x88, 0xb6, 0x4d, + 0x82, 0x0e, 0xe7, 0x40, 0x95, 0x2e, 0xe6, 0x6b, 0x4f, 0xb3, 0xc3, 0x30, 0x1e, 0x90, 0x44, 0x42, + 0xef, 0x46, 0x53, 0xf2, 0x46, 0xe7, 0xb5, 0x3b, 0x12, 0xae, 0x20, 0x99, 0xa0, 0xfc, 0x65, 0x3c, + 0x80, 0xec, 0x5c, 0xf5, 0x56, 0x9c, 0x94, 0x15, 0xb1, 0xa8, 0xe3, 0xa1, 0xde, 0xd7, 0xdc, 0x67, + 0xa0, 0x89, 0x4a, 0x04, 0x00, 0x4f, 0x8f, 0xb4, 0xe6, 0x81, 0xb5, 0x4f, 0x36, 0xcb, 0xa8, 0x71, + 0xcd, 0x33, 0xc6, 0x16, 0x0c, 0x9e, 0xa8, 0x2e, 0x4b, 0x56, 0x04, 0x5f, 0x24, 0x6b, 0x0a, 0x02, + 0x92, 0x36, 0x67, 0xa2, 0x01, 0xb8, 0xde, 0x46, 0x20, 0x27, 0x69, 0x1c, 0x3c, 0x96, 0x82, 0x60, + 0x01, 0xda, 0xa7, 0x19, 0xfd, 0x00, 0xab, 0x54, 0x9b, 0x66, 0xf8, 0xa8, 0xe3, 0x7d, 0xe8, 0x94, + 0x5e, 0xc4, 0xc2, 0x34, 0x5d, 0xa9, 0x91, 0x41, 0x7e, 0xa6, 0xe5, 0x84, 0xcb, 0x3d, 0x10, 0xbf, + 0xab, 0x02, 0xd1, 0x9d, 0xc3, 0xae, 0xe3, 0x0b, 0x03, 0x08, 0x19, 0x77, 0x4e, 0xb5, 0x55, 0x35, + 0x42, 0xc1, 0x03, 0x99, 0x3c, 0xd6, 0x33, 0x7a, 0x58, 0xb9, 0xcc, 0x23, 0x71, 0x3c, 0x67, 0xab, + 0x33, 0x26, 0xf5, 0x68, 0xe6, 0xb2, 0x23, 0x89, 0x1b, 0xd9, 0xb6, 0xf3, 0x5b, 0xba, 0xea, 0x41, + 0xb1, 0xd9, 0x7b, 0xc5, 0xe9, 0xeb, 0xec, 0x45, 0x09, 0x4b, 0x1f, 0x6c, 0x17, 0x54, 0x3f, 0x2a, + 0x68, 0xf5, 0xe9, 0xdb, 0xcb, 0xa4, 0x99, 0x0a, 0xae, 0x4b, 0xe4, 0x9f, 0x09, 0x84, 0xd1, 0x82, + 0x79, 0xf3, 0x8c, 0xd5, 0x10, 0x6d, 0x79, 0xc9, 0x2d, 0xe3, 0x47, 0xfd, 0x81, 0x6a, 0x9a, 0x38, + 0x3d, 0x7b, 0x25, 0x5d, 0x96, 0x35, 0x3a, 0x59, 0xbb, 0xb3, 0x6b, 0x31, 0x03, 0x43, 0xf0, 0x9b, + 0x72, 0xe5, 0xed, 0x2a, 0x18, 0x2f, 0xb4, 0x5f, 0x87, 0x54, 0x0e, 0x06, 0xc4, 0x58, 0x85, 0x80, + 0x29, 0xda, 0x85, 0x21, 0x2f, 0x11, 0x8b, 0x3c, 0x9f, 0xde, 0xb3, 0x53, 0x7e, 0x12, 0x19, 0x01, + 0xc9, 0xb7, 0x51, 0x35, 0x7d, 0x79, 0x38, 0x0e, 0xc2, 0xc6, 0x66, 0xab, 0x14, 0xba, 0x94, 0xff, + 0x64, 0x0b, 0xa7, 0x0e, 0x92, 0x6c, 0x55, 0x0a, 0xd0, 0x7a, 0xae, 0x88, 0x99, 0xaa, 0x52, 0x06, + 0x43, 0x3b, 0xa6, 0xef, 0x2b, 0x4e, 0xa1, 0xc9, 0xdf, 0x47, 0x26, 0xc1, 0x62, 0x7d, 0xe3, 0x66, + 0x40, 0x9c, 0x87, 0x2f, 0xf8, 0xd6, 0xe9, 0x3a, 0x51, 0xff, 0xd6, 0x68, 0xf8, 0x72, 0xf1, 0xcc, + 0xb7, 0x37, 0x95, 0x19, 0xdf, 0x4e, 0x39, 0x6f, 0x5a, 0x73, 0xe6, 0xc5, 0x37, 0x94, 0xc9, 0xb3, + 0xf1, 0x39, 0xfa, 0x1b, 0x15, 0x28, 0xd6, 0x25, 0xaf, 0x71, 0x5f, 0x51, 0x65, 0x3a, 0xd1, 0xc6, + 0xa4, 0x87, 0x88, 0x9d, 0xee, 0x9c, 0x56, 0x8a, 0xd5, 0xe2, 0xd6, 0x40, 0x4a, 0xfb, 0x2b, 0x7c, + 0xb4, 0x6f, 0xef, 0x21, 0x5d, 0x40, 0x74, 0x0b, 0xee, 0x59, 0x53, 0xa3, 0x45, 0x5e, 0x33, 0x97, + 0x59, 0xec, 0x8c, 0x6b, 0x97, 0xf8, 0xa3, 0x4c, 0xb4, 0xea, 0x0c, 0x27, 0x04, 0xc9, 0xb7, 0xb6, + 0xe2, 0x1b, 0xd6, 0x6a, 0xec, 0x60, 0x89, 0x10, 0xc2, 0xd1, 0x77, 0xc1, 0x26, 0xd6, 0xf5, 0x15, + 0xa2, 0x5c, 0x83, 0xbd, 0xe2, 0x92, 0x29, 0x18, 0x51, 0xc8, 0x2c, 0x61, 0xef, 0x90, 0x9c, 0xfa, + 0x2c, 0xd2, 0xee, 0x4b, 0x9f, 0x4b, 0xea, 0x13, 0x06, 0xde, 0x84, 0x43, 0x83, 0x4e, 0x65, 0xbf, + 0x12, 0x02, 0x54, 0xf5, 0xb8, 0x7e, 0x87, 0x6a, 0x98, 0xd9, 0xb3, 0x0d, 0xd9, 0xd1, 0x87, 0x96, + 0x9a, 0xaa, 0x93, 0x05, 0xc6, 0x13, 0x9b, 0xba, 0x23, 0x63, 0x41, 0x80, 0xf9, 0x91, 0x3b, 0xa2, + 0xfd, 0xd7, 0xa3, 0x0c, 0x33, 0x7c, 0x3b, 0xc9, 0x34, 0x54, 0x74, 0xf4, 0xb5, 0x9e, 0xd5, 0x2b, + 0xb1, 0xaa, 0x0f, 0x86, 0x53, 0x1c, 0x66, 0x6f, 0x6a, 0x38, 0x3d, 0x02, 0x20, 0xb7, 0xab, 0x9e, + 0x53, 0x15, 0x19, 0x2e, 0xdd, 0xd3, 0x82, 0xf5, 0xb5, 0x69, 0x6a, 0x97, 0x47, 0xe9, 0x04, 0xea, + 0x34, 0x2d, 0x67, 0xec, 0x82, 0x0f, 0x36, 0xd1, 0x79, 0x96, 0x89, 0xa1, 0x4d, 0x73, 0x1c, 0x7a, + 0x78, 0xf5, 0xe9, 0x62, 0x9d, 0x87, 0x93, 0x50, 0x00, 0x97, 0x75, 0x46, 0xc1, 0x9c, 0x66, 0x16, + 0x72, 0xab, 0x67, 0x22, 0xb2, 0x6b, 0x0c, 0x23, 0x88, 0x01, 0xcc, 0xc3, 0xf0, 0x1e, 0xf0, 0x9d, + 0x9e, 0x5a, 0xd3, 0xe2, 0x0f, 0x59, 0x18, 0x38, 0xb5, 0x38, 0x6d, 0x9c, 0x5c, 0xf8, 0xe0, 0xa9, + 0x2d, 0x1a, 0x72, 0x38, 0x35, 0xf3, 0x8a, 0x92, 0x4a, 0xc7, 0xba, 0x5a, 0xf1, 0x5a, 0x85, 0x2f, + 0x13, 0x2a, 0x93, 0xaf, 0x12, 0xd3, 0x83, 0xcb, 0xb0, 0x40, 0x3d, 0xee, 0x3b, 0x6a, 0x6e, 0x3e, + 0xb2, 0xd2, 0x7e, 0xb7, 0x07, 0x1e, 0x54, 0xc8, 0x8e, 0xc0, 0xf7, 0x23, 0xb1, 0xa0, 0xbd, 0x78, + 0xb7, 0x8b, 0x83, 0x59, 0x2d, 0xd5, 0x3b, 0x5f, 0x44, 0x3a, 0x63, 0x8b, 0x88, 0x84, 0x69, 0x3c, + 0x7b, 0xae, 0xfe, 0x21, 0xe8, 0xff, 0x03, 0xd8, 0x23, 0xc3, 0x83, 0x31, 0x06, 0xe2, 0x3f, 0x8a, + 0x21, 0x14, 0xee, 0x4b, 0xd3, 0xca, 0xb1, 0x0a, 0xd9, 0xc7, 0x31, 0xbf, 0xbc, 0xab, 0x35, 0x16, + 0x0e, 0x13, 0x5d, 0x40, 0xc2, 0xb1, 0xe5, 0xd3, 0xb2, 0x76, 0xc5, 0x38, 0x48, 0x49, 0xcc, 0x97, + 0x70, 0x35, 0xa1, 0x0d, 0x6e, 0xc6, 0xa8, 0x29, 0xe6, 0xaa, 0xc2, 0xe1, 0x63, 0x95, 0x5b, 0x77, + 0x63, 0x7f, 0x09, 0xf5, 0x66, 0xd1, 0xe7, 0x60, 0x97, 0x53, 0x05, 0x22, 0x1d, 0x77, 0x07, 0xc6, + 0x30, 0xa0, 0x9e, 0x78, 0x7f, 0x92, 0xd2, 0x76, 0x9c, 0x63, 0x9d, 0xf3, 0xa3, 0x6f, 0xb0, 0xb8, + 0x30, 0x73, 0xda, 0xbd, 0x50, 0xad, 0xa8, 0x30, 0xa4, 0x17, 0x38, 0x6d, 0x57, 0x17, 0x9d, 0x09, + 0xa6, 0x7c, 0x4a, 0x30, 0xcc, 0xcc, 0x89, 0x16, 0x11, 0x81, 0x61, 0x29, 0x9b, 0x61, 0x07, 0x89, + 0x82, 0x28, 0x1c, 0xc9, 0x6b, 0xca, 0x75, 0x83, 0x55, 0xe8, 0x30, 0xe1, 0x63, 0xab, 0xa8, 0x7e, + 0x6e, 0x6a, 0xa8, 0xdc, 0x89, 0x03, 0x57, 0xfe, 0x32, 0x0a, 0xfa, 0xdd, 0x87, 0xa3, 0xc2, 0x1d, + 0x41, 0xc7, 0x55, 0x94, 0x38, 0x5d, 0xec, 0x4c, 0x06, 0xf8, 0xd5, 0x29, 0xa6, 0x64, 0x4b, 0x93, + 0x38, 0x14, 0x63, 0x51, 0x53, 0x03, 0x09, 0x4b, 0x67, 0x74, 0x3d, 0x41, 0xc6, 0x5d, 0x69, 0xc9, + 0xd2, 0x76, 0xba, 0x1a, 0x07, 0x6f, 0x72, 0x85, 0xb1, 0xb2, 0x1a, 0x9c, 0xe7, 0xed, 0xf5, 0x1e, + 0xd6, 0xe7, 0x5f, 0xc7, 0x7d, 0x64, 0x35, 0xc7, 0x10, 0xc7, 0xae, 0x93, 0x87, 0x54, 0x49, 0xdb, + 0x1e, 0x7d, 0xaa, 0x94, 0x3e, 0x5b, 0xf3, 0x79, 0x72, 0xcf, 0xba, 0xa2, 0x59, 0x42, 0x7c, 0xad, + 0x0f, 0x19, 0x6b, 0xe8, 0xef, 0xa7, 0x01, 0x98, 0xce, 0x34, 0xf6, 0xf0, 0xfc, 0x8d, 0xef, 0x32, + 0x10, 0x57, 0x25, 0xdc, 0xb6, 0x43, 0x46, 0xd3, 0xf7, 0xf4, 0x5c, 0xd1, 0x6c, 0xc8, 0x64, 0xc5, + 0x44, 0x55, 0x28, 0x10, 0x3d, 0x19, 0x40, 0xf1, 0xba, 0xd2, 0x47, 0x9d, 0xf0, 0x8e, 0x0a, 0x61, + 0x04, 0x71, 0x2f, 0x32, 0xec, 0x9b, 0x91, 0xa9, 0x6a, 0x73, 0x1c, 0xc5, 0xd7, 0x25, 0x07, 0xaf, + 0x8e, 0x6e, 0x98, 0x09, 0xd4, 0xa7, 0xa7, 0xf7, 0x27, 0x55, 0x42, 0xe3, 0xf7, 0xf5, 0xf1, 0x15, + 0x3b, 0x3c, 0x0e, 0x4d, 0x99, 0x86, 0xb1, 0xd4, 0x9c, 0x2e, 0x28, 0x6a, 0xb5, 0xa9, 0xe1, 0x92, + 0xfe, 0x9d, 0xb7, 0x4f, 0x86, 0x4b, 0x21, 0x75, 0xee, 0xb8, 0xc6, 0xff, 0xe5, 0x0a, 0x9b, 0x1b, + 0x1e, 0xf6, 0x8a, 0x9b, 0x6d, 0xb1, 0x76, 0x93, 0x2d, 0x8c, 0xf7, 0x61, 0x51, 0x34, 0x14, 0x16, + 0xd3, 0xb9, 0xde, 0x38, 0x50, 0x03, 0xc2, 0x3c, 0x5d, 0xf9, 0xc2, 0xac, 0x27, 0x14, 0x65, 0x4f, + 0xbd, 0x56, 0x67, 0xc5, 0x1a, 0xe6, 0xb4, 0x95, 0x1a, 0x73, 0x1f, 0x44, 0x4d, 0xc1, 0x96, 0xba, + 0xef, 0x59, 0x2d, 0x05, 0x7c, 0xda, 0xbc, 0x3d, 0x4d, 0x34, 0x5d, 0x2f, 0xfb, 0xa3, 0x9e, 0xe9, + 0xa8, 0x76, 0x86, 0x05, 0xbd, 0xa2, 0xdc, 0xca, 0x83, 0x01, 0xf5, 0xb7, 0x6f, 0x50, 0xf9, 0xa7, + 0xbe, 0xc1, 0xe1, 0xb4, 0x24, 0x1e, 0x42, 0xdd, 0xb2, 0xc2, 0x35, 0x12, 0xb4, 0xab, 0x60, 0x20, + 0x9e, 0x10, 0x22, 0x98, 0xc6, 0xdf, 0x87, 0xcb, 0xd8, 0x9b, 0xab, 0xae, 0x9d, 0xd9, 0xa8, 0x63, + 0x51, 0x29, 0x17, 0xdb, 0x28, 0xe5, 0x89, 0xc9, 0x9f, 0x3d, 0x8b, 0xbe, 0x4f, 0x54, 0xf2, 0xf0, + 0xfd, 0xa5, 0x61, 0xa7, 0x06, 0x69, 0xe4, 0x37, 0x12, 0x86, 0x5a, 0x37, 0x3a, 0x1f, 0x04, 0xb1, + 0x2e, 0xa3, 0x8b, 0x03, 0x5f, 0xfa, 0xc1, 0xdd, 0x4c, 0xf8, 0x64, 0xa7, 0x9e, 0xb6, 0x45, 0x6a, + 0x93, 0xb2, 0xc0, 0x0f, 0x60, 0x31, 0x1d, 0xe7, 0xf5, 0xf6, 0x2d, 0x38, 0xf6, 0x72, 0xe5, 0xcf, + 0x2d, 0xe7, 0x44, 0x96, 0x74, 0x19, 0xd9, 0x70, 0x9e, 0x76, 0x7b, 0x74, 0x8a, 0x8b, 0x5a, 0x88, + 0xf1, 0x85, 0xc3, 0x87, 0x87, 0x27, 0x0e, 0x24, 0x03, 0x09, 0x15, 0x30, 0x8c, 0x15, 0x09, 0x5f, + 0x80, 0x4f, 0xdb, 0xa7, 0x93, 0x6c, 0x8b, 0x26, 0xba, 0x6e, 0xc6, 0xba, 0xf8, 0x68, 0x2b, 0xe6, + 0x52, 0x34, 0x00, 0x40, 0x9e, 0xe2, 0x33, 0xdf, 0xb6, 0xd2, 0xac, 0x24, 0x02, 0x72, 0x3b, 0xc6, + 0xbc, 0xeb, 0xf8, 0x2c, 0x95, 0x07, 0x65, 0x76, 0xda, 0xd9, 0x0c, 0x02, 0x72, 0xa8, 0xdb, 0x16, + 0x12, 0x5a, 0xb1, 0x51, 0x73, 0x5d, 0x76, 0x37, 0xd8, 0xdb, 0x60, 0x4f, 0xa2, 0x8e, 0x90, 0x96, + 0xe4, 0x78, 0x96, 0xcb, 0x67, 0xd2, 0x32, 0xca, 0xf7, 0x2f, 0x66, 0xbf, 0x82, 0x0f, 0x2a, 0x17, + 0x68, 0xb3, 0x95, 0x38, 0x4c, 0x08, 0x86, 0x00, 0xb4, 0x66, 0xa0, 0x20, 0xa4, 0x02, 0x97, 0x71, + 0x8c, 0x59, 0x04, 0xe9, 0x98, 0xba, 0x08, 0xf2, 0xd9, 0xcf, 0xaf, 0x73, 0x2e, 0x88, 0xb1, 0x68, + 0x30, 0x06, 0xdd, 0x05, 0xf4, 0x1f, 0x55, 0x13, 0x04, 0xe6, 0xb1, 0x40, 0x1f, 0xcf, 0x46, 0x83, + 0x26, 0x96, 0x8f, 0x41, 0x20, 0xec, 0x1c, 0x00, 0x78, 0x1d, 0xb4, 0x8c, 0x25, 0xb3, 0xe4, 0x2f, + 0x94, 0x2f, 0xfe, 0x98, 0x92, 0x3a, 0x12, 0xaf, 0x34, 0x37, 0xee, 0xaa, 0x51, 0x60, 0x6e, 0x9c, + 0x0a, 0x1f, 0xa9, 0x38, 0x00, 0x01, 0x17, 0xdd, 0x4c, 0x60, 0xbf, 0x0d, 0x08, 0x6f, 0xd3, 0xcf, + 0x4a, 0x23, 0x81, 0x5f, 0x3b, 0xca, 0x66, 0x4f, 0xf1, 0xf8, 0x15, 0x51, 0xf7, 0xe1, 0x6d, 0x9f, + 0x55, 0xd3, 0xd8, 0xb2, 0xb3, 0x94, 0x67, 0xa1, 0x24, 0xda, 0xfe, 0x60, 0xe9, 0x73, 0xf6, 0x93, + 0x49, 0x22, 0x51, 0x4a, 0xaf, 0x47, 0x77, 0xfa, 0xce, 0x03, 0x69, 0xb1, 0x65, 0x8c, 0xce, 0x48, + 0x62, 0xcc, 0x88, 0x61, 0xfb, 0x07, 0xcc, 0xdb, 0x4d, 0x01, 0x9d, 0x21, 0x65, 0x33, 0x1a, 0x72, + 0x93, 0x67, 0x90, 0xff, 0x45, 0xa1, 0x50, 0x33, 0x74, 0xaf, 0x62, 0xf3, 0x61, 0xcf, 0xdb, 0x7a, + 0xd5, 0x78, 0xce, 0x56, 0xbf, 0xee, 0xd6, 0x6b, 0x03, 0xd2, 0x7d, 0x69, 0xcd, 0x89, 0x81, 0xb1, + 0xc1, 0xb2, 0x21, 0xf5, 0xd8, 0xcf, 0x15, 0x79, 0xfb, 0xbd, 0x56, 0xf6, 0x06, 0x90, 0xa1, 0x98, + 0xc1, 0x6a, 0xb8, 0x70, 0xa7, 0xee, 0xbc, 0x02, 0xe3, 0x8a, 0xda, 0xac, 0x1f, 0x2b, 0x42, 0xa8, + 0xc4, 0x39, 0xcc, 0xa8, 0x2d, 0x6d, 0x62, 0x5b, 0xa7, 0x55, 0x83, 0x4a, 0x7f, 0x1c, 0xbd, 0x16, + 0xec, 0x9c, 0x03, 0x4d, 0xfb, 0x63, 0x33, 0x90, 0xab, 0x65, 0x2d, 0xa8, 0x7c, 0x55, 0x8b, 0xc8, + 0x89, 0x97, 0x97, 0x8a, 0x33, 0x26, 0x03, 0xe2, 0x97, 0x42, 0x87, 0xf5, 0xc9, 0x6d, 0xae, 0x00, + 0x00, 0x10, 0xc4, 0xc4, 0x6b, 0xfd, 0xcd, 0xb7, 0x58, 0x00, 0xee, 0xca, 0x51, 0x3d, 0x6d, 0x04, + 0xb0, 0x7d, 0x36, 0x2f, 0x54, 0xf2, 0xa6, 0xd5, 0x35, 0xf9, 0xa4, 0x28, 0x56, 0xfd, 0x08, 0xc6, + 0x7b, 0xd8, 0x0e, 0x38, 0x28, 0xd2, 0x67, 0x66, 0x51, 0x30, 0x06, 0x4f, 0x51, 0xcc, 0xa3, 0x46, + 0x8e, 0xcb, 0x95, 0xee, 0x48, 0x2f, 0xac, 0xd0, 0x31, 0x5b, 0xbe, 0xb1, 0xf0, 0xf1, 0x78, 0x13, + 0xd2, 0x2c, 0x16, 0x48, 0x18, 0x59, 0xe4, 0xee, 0x40, 0x7a, 0x4d, 0x9b, 0x10, 0x68, 0x0c, 0x89, + 0xc5, 0x0c, 0xfa, 0x00, 0x00, 0x00, 0x00, 0x02, 0x43, 0xff, 0xff, 0xfd, 0xbc, 0x00, 0x00, 0x00, + 0x01, 0x41, 0x4d, 0x4c, 0x56, 0x86, 0x00, 0x40, 0x92, 0xe1, 0x81, 0x56, 0x00, 0x00, 0x18, 0x70, + 0x5a, 0x9a, 0x74, 0x00, 0x00, 0x7f, 0xe8, 0x2b, 0xfc, 0x0c, 0x29, 0x71, 0x4d, 0x17, 0x17, 0xe7, + 0xd9, 0x45, 0x16, 0x69, 0x89, 0x6b, 0x68, 0x73, 0xfe, 0x3a, 0xe9, 0x08, 0x96, 0xe2, 0x0f, 0xc5, + 0x5d, 0x8c, 0xb1, 0x77, 0xec, 0x64, 0x9e, 0xce, 0x03, 0x26, 0x59, 0xee, 0x37, 0xe8, 0xc1, 0x2a, + 0x2a, 0xdc, 0xfa, 0xa1, 0xb2, 0x5a, 0x4b, 0x59, 0xde, 0xa6, 0x91, 0xd1, 0x63, 0xa0, 0xa1, 0x81, + 0xb1, 0x8d, 0xab, 0x38, 0xb0, 0xa5, 0x45, 0xcf, 0x41, 0xe5, 0x6c, 0x54, 0x13, 0xac, 0x85, 0x72, + 0xd3, 0x7a, 0x0c, 0x7d, 0x47, 0xd0, 0xe5, 0x6c, 0xd5, 0x81, 0xb7, 0xf6, 0x8e, 0x53, 0x81, 0x42, + 0x9e, 0x74, 0xb0, 0x3b, 0x1c, 0x7d, 0xbc, 0x04, 0xfe, 0x15, 0xc6, 0x00, 0x01, 0x9c, 0x05, 0x97, + 0x44, 0x0e, 0xd6, 0xf0, 0x85, 0x0f, 0x28, 0x5b, 0x4d, 0x4b, 0x67, 0x07, 0x25, 0x16, 0xfe, 0x59, + 0x69, 0xe2, 0x28, 0x42, 0xc6, 0xd0, 0xb4, 0x2c, 0x4e, 0xcf, 0x78, 0x8c, 0x9c, 0x85, 0x02, 0x09, + 0xa3, 0x53, 0x53, 0xed, 0x0a, 0x52, 0xa7, 0x00, 0x48, 0x46, 0x12, 0x0f, 0x17, 0x7c, 0x6d, 0x99, + 0x0f, 0x06, 0x14, 0xac, 0x68, 0xa4, 0x09, 0x01, 0x1d, 0x97, 0x7d, 0x2b, 0x81, 0xc2, 0x02, 0xad, + 0x6e, 0x10, 0x82, 0xed, 0x3c, 0x51, 0xae, 0x23, 0xc6, 0xfb, 0x77, 0x54, 0x42, 0x7d, 0x46, 0x7e, + 0x71, 0xfa, 0xa9, 0x63, 0xda, 0x8b, 0x4b, 0xc7, 0x85, 0x61, 0x9a, 0x5e, 0x08, 0xab, 0x74, 0x27, + 0x35, 0x63, 0x21, 0x19, 0x09, 0xf4, 0xa3, 0x40, 0x19, 0x1a, 0xbd, 0x87, 0x00, 0x99, 0xd7, 0xd7, + 0xda, 0xcf, 0x01, 0xdf, 0xc2, 0x9b, 0x35, 0xd8, 0x89, 0x58, 0xdb, 0xf5, 0x0c, 0xfb, 0x67, 0xb1, + 0x54, 0x6f, 0x16, 0xc2, 0xed, 0x20, 0x2d, 0x67, 0xbb, 0xb7, 0xb5, 0x0b, 0xb7, 0xde, 0x2c, 0xfa, + 0xa9, 0xe5, 0x0f, 0xf1, 0x10, 0x0e, 0x80, 0x7f, 0x2d, 0x25, 0x79, 0x3a, 0xd2, 0x80, 0x23, 0x07, + 0x85, 0xc2, 0x80, 0x4f, 0x4f, 0x99, 0x6e, 0xf5, 0xd4, 0x4d, 0x9a, 0x23, 0x61, 0xa9, 0xe7, 0x6c, + 0x8e, 0x3d, 0xc1, 0x34, 0x7d, 0xbc, 0x47, 0xea, 0x7d, 0x36, 0x9d, 0x92, 0x7f, 0xe8, 0x10, 0xf0, + 0x6b, 0x63, 0x72, 0xee, 0xe7, 0x46, 0x35, 0xa6, 0xfc, 0xeb, 0x32, 0x27, 0xbe, 0x8e, 0x13, 0x58, + 0x6e, 0xa9, 0xe1, 0x71, 0x9f, 0x1a, 0xf5, 0xb1, 0x5e, 0x3e, 0x08, 0xdb, 0xb8, 0x61, 0xef, 0xdd, + 0xbb, 0x5c, 0x1b, 0x61, 0xe0, 0x12, 0xb6, 0xea, 0xfe, 0xfe, 0x75, 0xae, 0x71, 0x4f, 0x71, 0x97, + 0x3e, 0x04, 0x02, 0x2e, 0x7a, 0xfd, 0xbf, 0x2d, 0x36, 0xd5, 0x45, 0x57, 0x80, 0xd0, 0xe6, 0xc8, + 0x36, 0x63, 0x22, 0xda, 0x89, 0xc5, 0x90, 0xb3, 0x44, 0xff, 0x75, 0x4f, 0x25, 0xa7, 0xc1, 0xa9, + 0x0a, 0xe6, 0x0b, 0x3d, 0x7c, 0x90, 0xfa, 0x59, 0x11, 0x7b, 0xd1, 0xbb, 0xa0, 0x1e, 0x38, 0x7e, + 0xdd, 0xd2, 0x9a, 0xa5, 0xfb, 0x59, 0x63, 0x2a, 0x37, 0xc6, 0x06, 0x70, 0x54, 0x9e, 0x83, 0x35, + 0xc5, 0xfb, 0xa4, 0x7f, 0xa4, 0xb7, 0x48, 0xf3, 0x16, 0xe1, 0x4c, 0xc6, 0x66, 0x9f, 0x8d, 0xb4, + 0x3a, 0xb2, 0x8b, 0xb2, 0x9a, 0x88, 0x2f, 0xb7, 0x57, 0xd3, 0x91, 0xd1, 0x94, 0x1f, 0xfb, 0x85, + 0x0d, 0x86, 0x39, 0x65, 0xcd, 0xfa, 0xa3, 0x25, 0x32, 0x54, 0x81, 0xdf, 0x1e, 0x3c, 0xcf, 0x9e, + 0xe8, 0x74, 0xb1, 0x94, 0xdf, 0xae, 0x01, 0x10, 0x52, 0x43, 0xa4, 0x2c, 0xe7, 0xdd, 0x7f, 0x82, + 0x18, 0xbf, 0xc3, 0x8c, 0xb8, 0xf2, 0xab, 0x16, 0xe8, 0xe9, 0xc5, 0x1d, 0xe2, 0xc1, 0x58, 0xdc, + 0x93, 0x4e, 0x2e, 0x19, 0xc5, 0x83, 0x0f, 0xf7, 0x25, 0x28, 0x70, 0x1b, 0x70, 0xca, 0x53, 0x86, + 0xed, 0x59, 0x96, 0xf0, 0x04, 0xba, 0x47, 0x26, 0xe8, 0xae, 0x5a, 0xa8, 0xc8, 0x6b, 0x4a, 0xe4, + 0x46, 0xaf, 0x51, 0xf5, 0x2f, 0xb9, 0x03, 0xc5, 0xdb, 0x85, 0x28, 0x4f, 0xa2, 0xa9, 0x5c, 0xc4, + 0x4d, 0xc7, 0x58, 0xce, 0x6f, 0xc9, 0xa9, 0x21, 0x04, 0x57, 0x91, 0x61, 0x7f, 0x6b, 0xb5, 0x61, + 0x36, 0xd7, 0x90, 0x00, 0x00, 0x00, 0x03, 0x13, 0xff, 0xff, 0xfc, 0xec, 0x00, 0x00, 0x00, 0x01, + 0x41, 0x4d, 0x4c, 0x56, 0x86, 0x00, 0x40, 0x92, 0xe1, 0x81, 0x56, 0x00, 0x00, 0x14, 0x70, 0x12, + 0x20, 0x00, 0x00, 0x7f, 0xdb, 0xf4, 0x80, 0x58, 0xaf, 0x60, 0x9f, 0x03, 0x27, 0x50, 0xe2, 0x89, + 0x62, 0x11, 0xa5, 0x5a, 0x21, 0x51, 0x4f, 0xdb, 0x8c, 0x95, 0x4f, 0xfd, 0xec, 0x37, 0xd3, 0x3b, + 0x6e, 0x73, 0x15, 0x60, 0x6e, 0xfa, 0x66, 0xcd, 0x61, 0xff, 0xb3, 0xc7, 0x3d, 0x0e, 0xd5, 0x61, + 0x3e, 0xe1, 0x5b, 0x03, 0x7f, 0x08, 0xb7, 0x66, 0x6b, 0x57, 0xab, 0x77, 0x0c, 0x84, 0xed, 0xc7, + 0x81, 0xfe, 0x20, 0x80, 0x1f, 0x11, 0xf2, 0x02, 0x8d, 0x5c, 0x91, 0x5c, 0xeb, 0x61, 0x46, 0xe6, + 0x06, 0x2d, 0x90, 0x43, 0x76, 0xca, 0x4b, 0x01, 0x8f, 0x02, 0x01, 0x41, 0x07, 0x9a, 0xee, 0x97, + 0x99, 0x88, 0x5d, 0xa2, 0xbf, 0x93, 0x02, 0x8a, 0x02, 0x8a, 0x61, 0x08, 0x4f, 0xbc, 0xc4, 0x7e, + 0x40, 0x15, 0x8e, 0xaa, 0x14, 0x7e, 0xbe, 0xdb, 0x86, 0x18, 0xf1, 0xcc, 0xd8, 0x56, 0x50, 0xe5, + 0x14, 0xa0, 0x2c, 0x6b, 0x06, 0x30, 0xc4, 0x9f, 0x45, 0xe0, 0x8b, 0x33, 0x18, 0xe2, 0x47, 0x12, + 0x5a, 0x22, 0xb4, 0x68, 0x33, 0x8a, 0xe6, 0x75, 0x41, 0x5a, 0xcf, 0x0f, 0xc8, 0xed, 0xa7, 0x4e, + 0x90, 0xd0, 0x38, 0xe8, 0x51, 0x14, 0x37, 0x9c, 0x8e, 0x96, 0xe0, 0x22, 0x0a, 0xf8, 0xab, 0x63, + 0x67, 0xaa, 0x96, 0x47, 0x55, 0xbd, 0x86, 0x33, 0x0f, 0x57, 0x07, 0x3b, 0xae, 0x42, 0x44, 0x84, + 0x59, 0x36, 0x33, 0x4f, 0x41, 0xc7, 0x89, 0x35, 0x81, 0xba, 0x58, 0xc4, 0xd3, 0x97, 0x9b, 0xe6, + 0x48, 0x92, 0xf3, 0x5f, 0xf9, 0xe3, 0xd5, 0xa4, 0xfd, 0x35, 0x47, 0x9c, 0xdd, 0x68, 0xca, 0xbd, + 0x59, 0x36, 0xd5, 0x40, 0x65, 0xd7, 0xfa, 0x19, 0x6a, 0xcc, 0x17, 0xc0, 0x11, 0x2c, 0x72, 0x69, + 0x0a, 0x7a, 0x0f, 0x27, 0x21, 0x21, 0xca, 0xc7, 0x7f, 0x59, 0x27, 0x21, 0x87, 0x64, 0x8d, 0xc4, + 0xfd, 0x3b, 0x12, 0xfa, 0x96, 0xbe, 0x66, 0xd9, 0x4a, 0xf8, 0xa3, 0xd4, 0x32, 0x47, 0x81, 0x70, + 0xe1, 0x6c, 0x99, 0xe4, 0xec, 0xac, 0x2c, 0xc5, 0xb8, 0x7e, 0x2a, 0x61, 0x31, 0x1b, 0x5e, 0xfb, + 0x44, 0xbc, 0xab, 0x4c, 0x41, 0x32, 0xb1, 0xfc, 0xfa, 0x39, 0x04, 0xa9, 0x4a, 0x40, 0x50, 0x74, + 0xa3, 0x6f, 0x90, 0x1e, 0x60, 0x71, 0xb0, 0xfd, 0x6e, 0x51, 0x6b, 0xef, 0xea, 0x2f, 0xe7, 0xba, + 0x30, 0xa6, 0xe4, 0x29, 0x8b, 0x0b, 0xa2, 0xe4, 0xbf, 0x54, 0xc7, 0x38, 0xbf, 0x59, 0xab, 0x19, + 0xf2, 0xe4, 0x92, 0x88, 0x9c, 0x22, 0x4b, 0x20, 0x69, 0x83, 0x17, 0xee, 0x7b, 0x10, 0x19, 0x92, + 0xcf, 0x24, 0xd4, 0x01, 0xb5, 0x36, 0x9e, 0x93, 0x96, 0xe9, 0xe8, 0x5d, 0x77, 0x1a, 0x24, 0x9e, + 0x79, 0xef, 0x3e, 0x63, 0xe5, 0x53, 0xa7, 0x57, 0xdd, 0xab, 0x0e, 0x62, 0xa2, 0x8e, 0xc7, 0xb4, + 0xfb, 0xab, 0xa0, 0x9a, 0xdd, 0xb9, 0xe0, 0x1b, 0x66, 0x3c, 0x12, 0x9e, 0xb1, 0x9f, 0xf0, 0x5d, + 0xe0, 0x53, 0xd4, 0xb2, 0xd3, 0xeb, 0x6c, 0x86, 0xed, 0xdc, 0xe8, 0xbb, 0xa8, 0xf9, 0x92, 0x63, + 0xb2, 0xe7, 0xd9, 0x6e, 0x7b, 0x17, 0x0e, 0x7e, 0x6b, 0x84, 0x6f, 0x7d, 0x3c, 0x37, 0xe5, 0x50, + 0xef, 0x16, 0x87, 0xf5, 0xb1, 0x92, 0x9b, 0x58, 0x03, 0xef, 0x99, 0x6e, 0x3b, 0x79, 0x16, 0xb2, + 0x6c, 0x10, 0xb2, 0xa9, 0xcd, 0x6b, 0x0b, 0x71, 0x23, 0x95, 0xa9, 0xe3, 0xa7, 0x6e, 0x34, 0x31, + 0x29, 0xec, 0x0d, 0x72, 0x3d, 0xa7, 0xb9, 0x01, 0xd4, 0xcb, 0xbb, 0x11, 0xda, 0x71, 0xd1, 0xbe, + 0xbd, 0x3e, 0x53, 0xe6, 0xf8, 0x87, 0x0f, 0x2e, 0x63, 0xfe, 0x24, 0x46, 0xa9, 0x19, 0x94, 0xf5, + 0x4a, 0xdc, 0x65, 0x61, 0x7b, 0x8b, 0xce, 0xf7, 0x07, 0x3b, 0x25, 0xa3, 0x4c, 0x5c, 0x01, 0x88, + 0xd1, 0x3f, 0x5f, 0x21, 0x33, 0x26, 0xe9, 0xcd, 0x87, 0xb9, 0x69, 0xee, 0x8e, 0x45, 0xf1, 0x03, + 0x7d, 0xb5, 0x73, 0x97, 0x62, 0x9a, 0x3e, 0x80, 0xf1, 0x84, 0xdd, 0xea, 0xaa, 0x8b, 0xfd, 0x30, + 0x0e, 0x4c, 0xbb, 0xff, 0xfa, 0x74, 0x86, 0xe4, 0x35, 0x9f, 0x12, 0x04, 0x4f, 0xf7, 0x37, 0x14, + 0x05, 0xef, 0x9f, 0x56, 0xba, 0x9b, 0xb7, 0x75, 0xb5, 0x63, 0x6b, 0xa8, 0x8f, 0x75, 0x63, 0x01, + 0x10, 0x19, 0xb0, 0xea, 0xf7, 0xf4, 0x8f, 0xcb, 0x2c, 0x21, 0x35, 0x19, 0x15, 0x8d, 0xef, 0x10, + 0x4a, 0x61, 0xec, 0x09, 0xa3, 0xe0, 0xe5, 0x8b, 0x60, 0xa1, 0x04, 0xf5, 0x11, 0xe6, 0xc6, 0xba, + 0x66, 0x75, 0xa7, 0xe2, 0xed, 0x60, 0x7f, 0x53, 0xb4, 0xa7, 0x0a, 0xeb, 0x94, 0xa7, 0x33, 0x99, + 0x22, 0x36, 0xa7, 0xc1, 0xef, 0x0d, 0xfe, 0xde, 0x0d, 0x3c, 0xa6, 0x9e, 0xe7, 0x89, 0x94, 0xfd, + 0x2f, 0x40, 0xbf, 0x44, 0x42, 0xc3, 0x05, 0xcd, 0x3c, 0x8d, 0x4b, 0x70, 0xa2, 0x0a, 0x1f, 0x64, + 0x7c, 0x5e, 0x34, 0xb2, 0x49, 0x2d, 0xd2, 0x3c, 0xac, 0x6a, 0xcd, 0x8e, 0x4c, 0x1e, 0x23, 0xc6, + 0x71, 0x1d, 0x10, 0xcb, 0x61, 0x9b, 0xe8, 0xcb, 0xc8, 0x48, 0xb2, 0x7d, 0xf5, 0x0d, 0xa5, 0x6f, + 0x37, 0x2c, 0x98, 0xa6, 0x78, 0xa3, 0x87, 0x39, 0xd4, 0x19, 0x4e, 0x22, 0xb5, 0x7c, 0x09, 0xed, + 0xbe, 0x06, 0x33, 0x88, 0x57, 0xd8, 0x23, 0x25, 0xbb, 0xb4, 0xa0, 0xf2, 0xbb, 0x72, 0xa0, 0x96, + 0x25, 0xb7, 0xa1, 0x45, 0x34, 0x8c, 0x8d, 0x9d, 0x5c, 0x8f, 0xb1, 0x14, 0x88, 0x1d, 0x5c, 0xdf, + 0x7f, 0xfb, 0xa2, 0xae, 0x9c, 0x3a, 0x02, 0x92, 0xeb, 0x01, 0xf4, 0x0a, 0x55, 0x32, 0xb3, 0xdb, + 0xaa, 0xb2, 0x00, 0x42, 0xb4, 0xd7, 0x9b, 0x55, 0x64, 0x26, 0x07, 0xe9, 0x2c, 0x55, 0x88, 0xd2, + 0xea, 0x00, 0x00, 0x00, 0x00, 0x02, 0x83, 0xff, 0xff, 0xfd, 0x7c, 0x00, 0x00, 0x00, 0x01, 0x41, + 0x4d, 0x4c, 0x56, 0x86, 0x00, 0x40, 0x92, 0xe1, 0xa1, 0x56, 0x00, 0x00, 0x0c, 0x70, 0x00, 0x00, + 0x7b, 0xfb, 0xea, 0x59, 0x62, 0x8e, 0xaa, 0x5d, 0x87, 0x99, 0x92, 0xc1, 0xf4, 0x5b, 0xb2, 0xe7, + 0x85, 0x2b, 0xe7, 0x5d, 0x58, 0x57, 0xe1, 0x4e, 0xd8, 0xf4, 0xd2, 0x03, 0x54, 0x5a, 0x28, 0x1a, + 0x87, 0x5e, 0x10, 0xbb, 0x88, 0x87, 0x33, 0xba, 0x96, 0x1d, 0x8b, 0x44, 0x44, 0x27, 0x97, 0xaa, + 0x34, 0xe0, 0x4f, 0x25, 0x70, 0x75, 0x42, 0x8c, 0x03, 0x31, 0x68, 0xf4, 0x81, 0xc3, 0xb7, 0x6f, + 0x49, 0xf6, 0x75, 0xa0, 0x46, 0x0c, 0x6b, 0xb7, 0x62, 0x82, 0xda, 0xb5, 0xbf, 0x7a, 0xb7, 0xd6, + 0x3b, 0x77, 0x71, 0x44, 0xfd, 0xc2, 0xad, 0x8d, 0xfb, 0xba, 0xad, 0xab, 0xd8, 0x5c, 0x47, 0x1e, + 0x62, 0x4f, 0xfc, 0x8b, 0x02, 0xe6, 0x1a, 0x68, 0x88, 0xcc, 0x07, 0xb1, 0x56, 0xb8, 0x97, 0xba, + 0xd8, 0x66, 0x7e, 0xf1, 0xc0, 0x97, 0x20, 0xdf, 0x1d, 0x3f, 0xbc, 0xff, 0x59, 0xd5, 0xc8, 0xd6, + 0xec, 0xfe, 0xe1, 0xd8, 0x09, 0x10, 0xc4, 0x9d, 0xbf, 0x70, 0x6e, 0xe8, 0xeb, 0xa3, 0xef, 0xee, + 0xf4, 0x40, 0x70, 0x73, 0xe0, 0x16, 0x9c, 0x31, 0x3d, 0x86, 0xc8, 0x08, 0x37, 0x30, 0x4c, 0x80, + 0xf5, 0x6d, 0x57, 0x74, 0xb9, 0xd7, 0xda, 0x11, 0xa2, 0xd1, 0x1c, 0xf8, 0x04, 0xc0, 0xbc, 0x2e, + 0x51, 0x0e, 0xbb, 0xe8, 0x4b, 0x4e, 0xf8, 0xc2, 0xb1, 0xa4, 0x00, 0x93, 0xd1, 0x3f, 0xa1, 0x04, + 0x98, 0xcc, 0x12, 0xbf, 0x72, 0xa3, 0x2d, 0x27, 0x02, 0x17, 0x36, 0xc9, 0x90, 0x27, 0x91, 0xb4, + 0xe0, 0x40, 0xe0, 0xd4, 0x1d, 0xd1, 0x6b, 0xc0, 0x55, 0x9a, 0xc6, 0xdf, 0xed, 0x7c, 0x12, 0xc6, + 0x3a, 0xda, 0x65, 0xf8, 0xd3, 0x3b, 0x14, 0x02, 0xc4, 0x80, 0x9e, 0x9c, 0xed, 0x6c, 0xee, 0x1f, + 0x09, 0x9b, 0x32, 0x6b, 0xd7, 0x04, 0x06, 0xd4, 0xec, 0xcc, 0xf9, 0xf8, 0x06, 0xd7, 0xf7, 0x5a, + 0xfa, 0x24, 0x72, 0x8b, 0xd3, 0xfc, 0x2d, 0x9e, 0xea, 0x7a, 0xc2, 0x4c, 0xfc, 0xd2, 0xd0, 0x0f, + 0x49, 0x5a, 0x34, 0xf3, 0x25, 0xde, 0xaa, 0x99, 0xad, 0xba, 0x55, 0xcd, 0xa9, 0x5d, 0x15, 0x85, + 0xc1, 0x3f, 0x5a, 0x7c, 0x00, 0xe6, 0x26, 0x9a, 0x99, 0xa5, 0xad, 0x8f, 0xe3, 0x6a, 0xbc, 0xb4, + 0xae, 0x28, 0xa6, 0x9f, 0x66, 0xd2, 0x92, 0xb8, 0x9a, 0x16, 0x54, 0x8b, 0x9e, 0x50, 0xd6, 0xde, + 0xbd, 0x63, 0x75, 0x5c, 0x46, 0x69, 0xe1, 0x84, 0xa6, 0x8a, 0x78, 0x18, 0x9d, 0xbc, 0x3f, 0xeb, + 0xe9, 0x9f, 0xe2, 0x27, 0xd8, 0x1d, 0x99, 0x6e, 0xc4, 0x55, 0x2c, 0x48, 0x57, 0x84, 0xf0, 0x86, + 0x6c, 0x65, 0x05, 0x3c, 0xcb, 0x56, 0x0e, 0x68, 0xed, 0x71, 0x70, 0x98, 0x74, 0x6d, 0x2f, 0xf7, + 0xcf, 0x30, 0x2d, 0x48, 0x65, 0x9f, 0x06, 0xcc, 0x24, 0xe6, 0x3a, 0x3a, 0x36, 0x8d, 0xd0, 0xcd, + 0x2c, 0xd3, 0x63, 0x52, 0x83, 0x54, 0xf2, 0xe9, 0x7b, 0x89, 0x62, 0xe5, 0x3a, 0x3f, 0x54, 0xa4, + 0x2f, 0x08, 0xff, 0xae, 0x20, 0xf1, 0xae, 0xb6, 0xb2, 0xb0, 0x7b, 0xbc, 0x50, 0xda, 0xd9, 0xb7, + 0xed, 0x9b, 0xf0, 0xc7, 0xd2, 0x9d, 0xb9, 0x29, 0x9e, 0x11, 0x99, 0x4f, 0xa0, 0xce, 0x21, 0x47, + 0x2e, 0x79, 0x60, 0x1c, 0x17, 0x54, 0xbd, 0x54, 0xa4, 0x6f, 0xcb, 0x77, 0x39, 0x9b, 0x20, 0xc5, + 0x6b, 0x92, 0xae, 0x6d, 0x8b, 0xce, 0xb2, 0x1f, 0x75, 0x7e, 0x7a, 0x55, 0x9f, 0x42, 0xa4, 0xb1, + 0x02, 0xf2, 0xbc, 0x5a, 0x5b, 0x9c, 0xe2, 0xf1, 0x91, 0x93, 0x2f, 0x48, 0xeb, 0x46, 0x9c, 0xa1, + 0xa0, 0x27, 0x32, 0xde, 0xa3, 0x00, 0xfa, 0x6a, 0x1d, 0x7c, 0x56, 0x40, 0x37, 0xa7, 0xec, 0x1d, + 0xff, 0x12, 0x9a, 0xc4, 0xe8, 0xce, 0xfc, 0x57, 0xae, 0xeb, 0x17, 0x09, 0xe9, 0xab, 0xb8, 0x37, + 0x61, 0x30, 0x6d, 0xd4, 0xad, 0xa2, 0x47, 0xc1, 0x97, 0xd4, 0xd5, 0x94, 0x1a, 0x39, 0x3e, 0x45, + 0x43, 0x47, 0x8a, 0x67, 0xf4, 0x12, 0x5d, 0x52, 0xf1, 0xbf, 0x53, 0x55, 0x69, 0x82, 0x0f, 0x0a, + 0x05, 0xa5, 0x79, 0x57, 0xd4, 0xd5, 0x12, 0x26, 0x7e, 0xa5, 0xe6, 0x3c, 0xc1, 0x09, 0x31, 0x28, + 0x3e, 0x5c, 0xea, 0x0e, 0x95, 0xb3, 0x22, 0xc0, 0x0d, 0x37, 0x3f, 0x2b, 0x19, 0xc4, 0xe0, 0xab, + 0x66, 0xc9, 0x40, 0x02, 0x77, 0x6e, 0x99, 0x69, 0xdd, 0x70, 0x67, 0x83, 0xc9, 0x28, 0xe4, 0xb1, + 0x24, 0xf9, 0xb3, 0x5b, 0x43, 0x54, 0xa0, 0xac, 0x6f, 0x97, 0x8e, 0x5e, 0x22, 0x81, 0x4b, 0xb8, + 0xcb, 0x68, 0x3d, 0xda, 0xa1, 0xde, 0xa1, 0x67, 0x2a, 0x1b, 0x79, 0xc1, 0xdd, 0x70, 0x5c, 0x0a, + 0xd4, 0x00, 0x00, 0x00, 0x02, 0x83, 0xff, 0xff, 0xfd, 0x7c, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, + 0x4c, 0x56, 0x86, 0x00, 0x40, 0x92, 0xe1, 0x91, 0x56, 0x00, 0x00, 0x0c, 0x70, 0x00, 0x00, 0x7f, + 0xd3, 0x15, 0x8a, 0x8a, 0xcd, 0xb2, 0xee, 0x76, 0x8b, 0xe2, 0x04, 0x22, 0xe9, 0x74, 0xee, 0x1d, + 0x6e, 0x7f, 0xa1, 0x7f, 0x27, 0x4e, 0xa8, 0x2c, 0x9a, 0xb9, 0x53, 0x00, 0x68, 0xcc, 0xf9, 0xb3, + 0xf8, 0xb6, 0xea, 0xfc, 0xe9, 0x7b, 0xac, 0x1f, 0x96, 0x4e, 0x4f, 0xdc, 0x18, 0x70, 0xfc, 0x16, + 0x66, 0x5d, 0x74, 0x5b, 0xe6, 0x5e, 0x28, 0x76, 0xbe, 0xb8, 0xc8, 0xaf, 0x1d, 0x9a, 0xa4, 0x55, + 0x26, 0x71, 0x80, 0x88, 0x5f, 0x0c, 0xbd, 0x88, 0xd5, 0x8d, 0x91, 0x54, 0x8f, 0xcb, 0xeb, 0x24, + 0xb4, 0x39, 0x99, 0xaa, 0xf2, 0x0a, 0x90, 0xe4, 0x41, 0x69, 0xe8, 0xf4, 0xce, 0x21, 0x67, 0xcc, + 0xdf, 0x86, 0x29, 0x47, 0x26, 0xf0, 0xf2, 0x5a, 0x81, 0x32, 0xe9, 0xc3, 0x86, 0x4d, 0xc9, 0xef, + 0xd6, 0x77, 0x51, 0xcc, 0x6b, 0x79, 0x45, 0x9c, 0xda, 0x96, 0x07, 0xa5, 0x0d, 0x03, 0x04, 0x43, + 0x07, 0x54, 0x56, 0xde, 0xb3, 0x6a, 0xf9, 0xbb, 0x3f, 0xf3, 0xf2, 0x8b, 0xb9, 0x2f, 0x98, 0x69, + 0xc8, 0xd3, 0x50, 0x2c, 0x3d, 0xdd, 0xab, 0x5c, 0x8c, 0x56, 0xc7, 0x61, 0xaf, 0xbd, 0x55, 0xe1, + 0x2e, 0xe2, 0x6f, 0x0f, 0x4b, 0x10, 0x07, 0xc1, 0x13, 0x14, 0xd6, 0x8a, 0xab, 0x6f, 0x9a, 0x52, + 0x6a, 0x41, 0x91, 0x30, 0xf3, 0x45, 0xce, 0x47, 0xf0, 0x16, 0x90, 0xb3, 0x06, 0xc6, 0xf7, 0x23, + 0x9c, 0x61, 0xad, 0xc6, 0x07, 0x5f, 0xef, 0x64, 0x2f, 0x73, 0x47, 0x4e, 0xe1, 0x23, 0xa7, 0xf6, + 0xa8, 0x3a, 0x79, 0xa1, 0x5f, 0xd5, 0x6e, 0xdb, 0x40, 0x6b, 0x26, 0xb9, 0x7e, 0x15, 0xc1, 0xf1, + 0x1b, 0xeb, 0x3c, 0x10, 0xbd, 0x53, 0xa6, 0x27, 0xdf, 0xb9, 0x8c, 0x09, 0xd2, 0x8d, 0x1d, 0x67, + 0xfd, 0xfb, 0xee, 0x5c, 0x9b, 0x4b, 0xc7, 0x68, 0x08, 0x02, 0x5d, 0x3d, 0xf2, 0xab, 0x8f, 0xf9, + 0x32, 0xac, 0x2c, 0x85, 0x14, 0x17, 0xae, 0x95, 0x7d, 0xea, 0x92, 0xf8, 0x45, 0x28, 0xa3, 0x84, + 0x93, 0x7a, 0xe2, 0x03, 0x07, 0x2f, 0x80, 0x18, 0xc7, 0x4f, 0xf0, 0x23, 0xe8, 0x1f, 0x20, 0x25, + 0x1d, 0xa3, 0x2b, 0x9b, 0xde, 0x0f, 0x35, 0x1b, 0x59, 0x93, 0x06, 0xab, 0x8a, 0xbf, 0x30, 0x04, + 0xae, 0xfb, 0xfa, 0x65, 0x4f, 0xab, 0x65, 0xe1, 0x9c, 0x57, 0x4e, 0x2f, 0xd7, 0x37, 0xfc, 0x95, + 0x64, 0xf4, 0x02, 0x84, 0xa0, 0x51, 0xae, 0xd4, 0x54, 0xff, 0xae, 0x16, 0x9d, 0xa5, 0x68, 0x94, + 0x15, 0xf2, 0xe1, 0xca, 0x57, 0x80, 0x83, 0x89, 0xcf, 0xfe, 0x66, 0x73, 0x26, 0x0c, 0x8e, 0xdc, + 0xad, 0x93, 0xa6, 0x98, 0x96, 0xf0, 0xe7, 0xcd, 0xeb, 0xc2, 0x78, 0x33, 0x05, 0x05, 0x98, 0x28, + 0xf5, 0x81, 0xba, 0xb6, 0xe7, 0xa7, 0x41, 0x9c, 0xb9, 0x96, 0xf1, 0xa4, 0x13, 0xb1, 0xbc, 0xd8, + 0x77, 0x1b, 0x98, 0x60, 0x35, 0x45, 0xb8, 0x54, 0x6c, 0x36, 0xb4, 0x38, 0xe2, 0xaa, 0xb9, 0x39, + 0x99, 0xbc, 0x59, 0x65, 0xfe, 0x01, 0x21, 0x15, 0x65, 0x8d, 0x2c, 0x88, 0x08, 0x5f, 0x7f, 0x38, + 0x73, 0x55, 0x7e, 0x3e, 0x06, 0x81, 0xf6, 0xa0, 0x5c, 0xa3, 0xfd, 0xce, 0xe2, 0x40, 0x42, 0xdf, + 0x8a, 0x9e, 0x72, 0x13, 0xe6, 0xd1, 0x71, 0x4e, 0x1c, 0x96, 0x59, 0x0a, 0x5c, 0xeb, 0xc9, 0x79, + 0x79, 0xc9, 0x5e, 0xfc, 0xd9, 0x65, 0xad, 0x55, 0x5f, 0x19, 0xd1, 0xe7, 0x20, 0x10, 0xe2, 0xb7, + 0x54, 0x46, 0x96, 0x92, 0x64, 0x79, 0x85, 0xc9, 0xb9, 0xdb, 0x02, 0x4b, 0xb3, 0xc6, 0x2c, 0xf0, + 0x0f, 0xc9, 0x2d, 0xa7, 0x8b, 0x32, 0x20, 0x30, 0xfd, 0xbd, 0x65, 0x1b, 0x86, 0xaf, 0x27, 0xc7, + 0x3c, 0xb0, 0xdb, 0xd1, 0x60, 0x94, 0xc2, 0x99, 0xa7, 0x37, 0xfb, 0x6a, 0x92, 0x21, 0x22, 0x61, + 0x7e, 0x59, 0xb1, 0x74, 0xe2, 0xcd, 0x98, 0x90, 0x2c, 0xf2, 0x48, 0x51, 0x3c, 0xec, 0xfa, 0xf7, + 0x7c, 0xd1, 0xa1, 0x00, 0x98, 0x30, 0x33, 0x02, 0x6d, 0xf3, 0xfd, 0x2a, 0x92, 0x8a, 0x76, 0x55, + 0x84, 0x99, 0x9d, 0xd1, 0x23, 0x80, 0x40, 0xde, 0x90, 0x0a, 0x57, 0xfe, 0x48, 0x05, 0x6e, 0x47, + 0x51, 0xa6, 0xd4, 0x7e, 0xd0, 0x43, 0x6f, 0x61, 0x18, 0xea, 0x05, 0xe3, 0x66, 0x6e, 0xdc, 0xed, + 0x48, 0x4d, 0x53, 0xfa, 0x3d, 0x27, 0xb6, 0xbb, 0x54, 0xb0, 0x14, 0xe7, 0xde, 0xb8, 0x02, 0xe2, + 0xf2, 0x75, 0x2e, 0xee, 0x47, 0x2b, 0x9e, 0xe6, 0x8a, 0xb6, 0x77, 0x95, 0xbb, 0xf1, 0x67, 0x0f, + 0x96, 0xbc, 0x06, 0x46, 0x0b, 0xa0, 0x4b, 0xef, 0x76, 0x84, 0x16, 0x14, 0x0c, 0x74, 0xd6, 0xf1, + 0x00, 0x00, 0x00, 0x02, 0x7b, 0xff, 0xff, 0xfd, 0x84, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, + 0x56, 0x86, 0x00, 0x40, 0x92, 0xe1, 0x91, 0x56, 0x00, 0x00, 0x0c, 0x70, 0x00, 0x00, 0x7f, 0x76, + 0x8b, 0xd1, 0xc9, 0x38, 0x2b, 0x3f, 0xac, 0xf0, 0xee, 0x99, 0x34, 0xcc, 0x6e, 0xcb, 0x35, 0x6b, + 0xd1, 0xb6, 0x68, 0x0f, 0x76, 0x12, 0x2d, 0x32, 0x86, 0xce, 0x9b, 0x22, 0xaa, 0xe8, 0x2c, 0x54, + 0xe3, 0x30, 0xf3, 0x0a, 0xbe, 0xbb, 0x1c, 0x43, 0x8a, 0x4f, 0x5c, 0x89, 0xd0, 0x18, 0x0b, 0x74, + 0xc9, 0xf6, 0xe5, 0x16, 0xb8, 0xb5, 0x47, 0x23, 0x64, 0xb8, 0x38, 0xa8, 0x9d, 0xb8, 0x3f, 0xe5, + 0xa3, 0x36, 0xd0, 0x62, 0x2a, 0xd7, 0x6e, 0x5b, 0xff, 0x4c, 0x4a, 0xc8, 0xfa, 0x76, 0xc3, 0xe1, + 0x8c, 0x7b, 0xdd, 0xa8, 0xe8, 0x24, 0xbf, 0x2b, 0x9b, 0xcd, 0x6c, 0xa0, 0x82, 0x17, 0xef, 0x2c, + 0xf4, 0xb2, 0x97, 0xee, 0x99, 0x29, 0xd7, 0x0e, 0x54, 0x6a, 0xfe, 0xe1, 0x88, 0x3a, 0x16, 0x9c, + 0x1f, 0x1d, 0x91, 0xe4, 0x6f, 0xfc, 0xa4, 0x5a, 0xfd, 0x24, 0x57, 0x3d, 0x2d, 0x42, 0xb6, 0x64, + 0xda, 0x55, 0xfd, 0x28, 0x5c, 0x93, 0x2b, 0x8c, 0xa0, 0x0e, 0x2b, 0x93, 0x42, 0x2a, 0xe1, 0xcd, + 0xee, 0x10, 0xc1, 0x03, 0xdc, 0xa4, 0xc0, 0xf5, 0xf9, 0x7f, 0x33, 0xe2, 0xf4, 0x90, 0x27, 0x92, + 0x11, 0x20, 0xa1, 0x14, 0x42, 0xe4, 0xff, 0x0d, 0x07, 0x28, 0x6e, 0xbc, 0x3b, 0xbc, 0xe8, 0x72, + 0xe2, 0xe5, 0xbe, 0xa6, 0x27, 0x5b, 0x02, 0x5c, 0xcf, 0x9c, 0x09, 0x32, 0x25, 0xca, 0xf8, 0x03, + 0x94, 0xbd, 0x5c, 0x6f, 0x41, 0xff, 0x6d, 0x29, 0x8e, 0xfa, 0x05, 0xfe, 0xf0, 0x42, 0x2e, 0xa8, + 0xfa, 0x30, 0x4a, 0x22, 0x13, 0x21, 0x5e, 0xc5, 0xa0, 0x9e, 0xfe, 0xb2, 0x93, 0xb5, 0x6a, 0xb3, + 0x44, 0xf7, 0x51, 0x22, 0xdf, 0x02, 0x01, 0x0a, 0x28, 0xd8, 0x9f, 0xae, 0xf0, 0x9e, 0x89, 0x82, + 0x7b, 0x94, 0x3c, 0x50, 0x43, 0x5a, 0xc5, 0xbe, 0x43, 0x4e, 0x3e, 0xa3, 0xe0, 0x33, 0xa0, 0x99, + 0x7c, 0x4e, 0x3b, 0x5a, 0x85, 0x06, 0x94, 0xc7, 0xfd, 0xe8, 0x6a, 0x7d, 0xa2, 0x8b, 0x4d, 0x14, + 0xd9, 0xd8, 0xbf, 0x86, 0x53, 0xc0, 0xa0, 0x3d, 0xe3, 0x0f, 0x85, 0xa6, 0xe0, 0xc2, 0xc3, 0x73, + 0x6e, 0x27, 0xff, 0xbe, 0x30, 0x53, 0x3f, 0x76, 0x2d, 0x85, 0xb9, 0xf4, 0x39, 0x51, 0xb7, 0xae, + 0x4c, 0x08, 0x08, 0x5b, 0xd8, 0xe1, 0x6d, 0x8f, 0x01, 0x0f, 0x98, 0x70, 0xdb, 0x49, 0x21, 0x18, + 0x15, 0x4e, 0xce, 0xc4, 0xb6, 0xd5, 0xa0, 0x72, 0xc6, 0x4b, 0x6b, 0x82, 0x8b, 0x7b, 0x85, 0x39, + 0xdb, 0x97, 0xf2, 0xd4, 0x03, 0x07, 0x44, 0xc5, 0xbd, 0x9d, 0xc3, 0xb6, 0x86, 0x76, 0xca, 0xd1, + 0xd6, 0x5a, 0xf8, 0x41, 0x68, 0xe3, 0xa8, 0x5a, 0xa2, 0xe8, 0x0d, 0xf8, 0x3e, 0xcd, 0x28, 0x7d, + 0x5f, 0x9c, 0x38, 0x4d, 0x7c, 0x63, 0xd3, 0x8e, 0xa7, 0x5a, 0xa7, 0x21, 0x1f, 0xc4, 0xde, 0x64, + 0xf0, 0x7f, 0x05, 0xf5, 0xa8, 0xdd, 0x6c, 0xd5, 0x4f, 0x8c, 0x14, 0x65, 0x9b, 0xe9, 0x3e, 0xcb, + 0x0c, 0xf4, 0x94, 0x3b, 0x12, 0xb1, 0xec, 0xf5, 0x15, 0x22, 0x1e, 0x47, 0x02, 0x6d, 0xa4, 0x8f, + 0x08, 0xe1, 0x84, 0x2a, 0x26, 0xec, 0x95, 0x29, 0x7a, 0xf3, 0xcd, 0x48, 0x61, 0x65, 0xb0, 0xff, + 0xba, 0x0e, 0xd8, 0x56, 0x6b, 0x5f, 0x5e, 0xdd, 0xdc, 0x43, 0x12, 0x54, 0x1f, 0xa6, 0xea, 0x27, + 0x5d, 0x97, 0x5c, 0xfe, 0xd6, 0xb3, 0xaa, 0xc1, 0xd6, 0x37, 0x19, 0xdc, 0xa8, 0xfc, 0x76, 0xdb, + 0x81, 0x54, 0x10, 0xa6, 0xb7, 0xc1, 0xb1, 0xb9, 0x42, 0x54, 0xb8, 0x69, 0xd6, 0x5b, 0xdf, 0x8c, + 0xd2, 0x85, 0x4b, 0xdf, 0x80, 0x36, 0x1e, 0x31, 0x4d, 0xa1, 0x1a, 0x56, 0xb8, 0x2a, 0x6c, 0x59, + 0x33, 0x11, 0x1c, 0xe6, 0x36, 0x1b, 0xba, 0x6b, 0x55, 0x82, 0x8c, 0x69, 0x89, 0xe2, 0x7a, 0xf6, + 0x95, 0x53, 0xd9, 0x29, 0xd0, 0x06, 0xfb, 0x38, 0xf2, 0x38, 0xf6, 0x12, 0x8e, 0x54, 0x16, 0xd8, + 0x3b, 0xfa, 0x5a, 0x7e, 0x63, 0x01, 0xd7, 0x98, 0x7e, 0x74, 0xdf, 0x47, 0xc7, 0x84, 0x72, 0x2d, + 0x5d, 0x0a, 0xff, 0xac, 0xdf, 0x7f, 0x31, 0xa7, 0x6b, 0xf8, 0x63, 0x95, 0x4a, 0xfe, 0x81, 0x06, + 0x3a, 0x23, 0xba, 0x30, 0x9d, 0x2f, 0x00, 0x27, 0xd7, 0x54, 0xa3, 0xbd, 0xfa, 0x9f, 0xc8, 0x35, + 0x1d, 0x1f, 0x63, 0x1c, 0xe7, 0x67, 0xf2, 0xf4, 0xf2, 0x47, 0x7d, 0x9d, 0xce, 0xa5, 0xe6, 0x28, + 0x7d, 0x93, 0x5a, 0x85, 0xdc, 0x51, 0x4c, 0x63, 0x52, 0xfe, 0xe9, 0x46, 0x87, 0x42, 0x6f, 0xe2, + 0x59, 0x85, 0xb2, 0x51, 0xda, 0x55, 0x40, 0x00, 0x00, 0x00, 0x02, 0xf5, 0xff, 0xff, 0xfd, 0x0a, + 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, 0x56, 0x86, 0x00, 0x40, 0x92, 0xe1, 0x41, 0x56, 0x00, + 0x00, 0x0c, 0x70, 0x00, 0x00, 0x7f, 0x5f, 0x5a, 0x76, 0xc1, 0xa8, 0xce, 0xa1, 0x96, 0xe3, 0x34, + 0xa0, 0x35, 0x06, 0xd0, 0xd8, 0x43, 0x47, 0xf4, 0xe8, 0xed, 0xe6, 0xea, 0x69, 0xfa, 0xaa, 0x31, + 0xab, 0x53, 0x27, 0x82, 0x67, 0xc4, 0x30, 0xff, 0x82, 0xcf, 0xaa, 0xb8, 0x66, 0x4d, 0xdb, 0xd9, + 0xe4, 0xf1, 0x55, 0x11, 0x36, 0x9f, 0xa8, 0x11, 0x6a, 0xcb, 0x50, 0xdd, 0x95, 0x67, 0xf5, 0xba, + 0x2c, 0x69, 0x04, 0xb0, 0xb0, 0x11, 0xad, 0x21, 0xc6, 0xec, 0x3e, 0x2e, 0xe5, 0x28, 0x7f, 0x53, + 0x06, 0xd2, 0x5d, 0x66, 0x5e, 0x81, 0x21, 0x13, 0xa7, 0x62, 0x12, 0x84, 0x3d, 0x1e, 0xcc, 0x49, + 0x1c, 0x99, 0xb4, 0x33, 0xcf, 0xfb, 0xb1, 0x41, 0x5e, 0x88, 0x26, 0x2f, 0xd3, 0xbf, 0xcf, 0x2e, + 0x9d, 0x1c, 0x81, 0x9d, 0xbe, 0xcc, 0x8c, 0x0d, 0x5b, 0xd0, 0xb2, 0xb7, 0xc0, 0xb8, 0x53, 0xb0, + 0x7d, 0x3a, 0xb7, 0x63, 0x66, 0xa2, 0xc0, 0xd8, 0x08, 0x76, 0x26, 0xc7, 0xe0, 0x85, 0x98, 0x2b, + 0x39, 0xa6, 0x65, 0xe5, 0x1c, 0xad, 0xe4, 0x9e, 0x82, 0xa4, 0x7c, 0x71, 0x31, 0x07, 0xca, 0xec, + 0x47, 0xb4, 0x75, 0x3e, 0x2b, 0x65, 0x0c, 0x9b, 0x1c, 0x2d, 0xcc, 0xa3, 0x40, 0x40, 0xd6, 0xdd, + 0x21, 0xa1, 0x10, 0x76, 0x35, 0x13, 0xb3, 0x1a, 0xd3, 0x43, 0x87, 0xf2, 0xbc, 0x28, 0xde, 0x82, + 0x59, 0x4c, 0x3d, 0xad, 0x33, 0x8d, 0xc5, 0x96, 0x25, 0x86, 0xa3, 0x33, 0x95, 0x36, 0x85, 0x08, + 0xb1, 0x95, 0xd5, 0xb6, 0x5a, 0xaa, 0xb3, 0x77, 0xf9, 0xbd, 0xd0, 0xb4, 0x4a, 0xc7, 0x7c, 0x00, + 0x38, 0x72, 0x90, 0x37, 0xc8, 0x40, 0x00, 0xc8, 0xbb, 0x51, 0x0e, 0x77, 0x71, 0x48, 0x4f, 0xf9, + 0x92, 0x54, 0x01, 0xc9, 0x02, 0xe2, 0x36, 0xac, 0x41, 0x54, 0x64, 0xcd, 0xc5, 0x16, 0x64, 0x2e, + 0x6c, 0x8e, 0x63, 0xe8, 0xea, 0x48, 0x61, 0x74, 0x6a, 0xcb, 0xb6, 0x11, 0x97, 0x19, 0x68, 0xc3, + 0x86, 0x18, 0x8e, 0x1e, 0x28, 0x21, 0xe7, 0xa3, 0x4c, 0xed, 0x23, 0x3f, 0xbc, 0x26, 0xe5, 0x92, + 0xda, 0xc1, 0x9b, 0x63, 0xab, 0xda, 0xd9, 0xa3, 0x5a, 0x17, 0x48, 0xbd, 0x89, 0xd2, 0x3e, 0x14, + 0x3b, 0x1b, 0xc0, 0x6d, 0xe7, 0x3d, 0x86, 0x85, 0x45, 0xe2, 0x9e, 0x7f, 0xff, 0x63, 0x07, 0xe6, + 0x12, 0x23, 0xa9, 0x1c, 0x53, 0x24, 0xc8, 0xe1, 0x89, 0xee, 0xe7, 0x72, 0x07, 0x04, 0x11, 0x8c, + 0xcb, 0x66, 0x61, 0x23, 0x6d, 0x68, 0xe2, 0xaa, 0xb7, 0xf8, 0xb6, 0xd8, 0xb4, 0x6c, 0x13, 0xc4, + 0xd6, 0xba, 0x08, 0xa0, 0x05, 0x23, 0xdc, 0xad, 0xed, 0xff, 0x6d, 0x1b, 0x03, 0x3e, 0xf2, 0x1c, + 0xf6, 0xdd, 0x2a, 0xf1, 0x18, 0x76, 0x2e, 0x82, 0x38, 0xa4, 0xb7, 0x3e, 0xab, 0x74, 0x24, 0x79, + 0x83, 0x0b, 0x2e, 0x8e, 0x0b, 0x19, 0x05, 0x52, 0x20, 0x78, 0x9a, 0xe8, 0x57, 0x77, 0xde, 0xd5, + 0x36, 0xab, 0x60, 0x2b, 0xd5, 0x58, 0x6b, 0xf8, 0x64, 0xdf, 0xeb, 0x52, 0xad, 0x4b, 0xe5, 0x8b, + 0x05, 0x6a, 0x90, 0xcc, 0x72, 0x8a, 0x3a, 0x95, 0xd0, 0x31, 0xa5, 0x75, 0x3a, 0xdd, 0x08, 0xf5, + 0xd5, 0x3f, 0x76, 0x9e, 0x29, 0x7d, 0x4e, 0xb7, 0x93, 0x75, 0xc3, 0xd5, 0x38, 0xdf, 0x97, 0x7c, + 0xf0, 0x15, 0x4a, 0x5c, 0x52, 0x64, 0x3a, 0xf6, 0x1e, 0x26, 0x2f, 0xc6, 0xc8, 0xa3, 0x5a, 0x9f, + 0x40, 0x53, 0x7a, 0x4b, 0x5c, 0x94, 0xe2, 0x79, 0x07, 0x23, 0x06, 0x3e, 0xe5, 0xee, 0x4f, 0xed, + 0x0f, 0x37, 0x19, 0xd8, 0x84, 0xdb, 0x02, 0x69, 0x20, 0x57, 0x4d, 0x08, 0x81, 0xf9, 0x44, 0x13, + 0x41, 0x02, 0x79, 0x4b, 0x3f, 0xc9, 0x07, 0x16, 0xe2, 0xb1, 0x73, 0x43, 0x9d, 0x04, 0xea, 0x8c, + 0xda, 0x4f, 0x85, 0x30, 0x12, 0xc4, 0x87, 0xb4, 0x18, 0x0a, 0x18, 0x32, 0x0c, 0x77, 0xc4, 0x1e, + 0xa2, 0x23, 0xfa, 0xcf, 0xbd, 0x8b, 0x35, 0xf4, 0x4c, 0x4e, 0x75, 0x1a, 0x80, 0xf9, 0x2c, 0xc5, + 0x0b, 0x81, 0x7a, 0x40, 0x36, 0xa0, 0x58, 0x86, 0xf3, 0xdd, 0x2d, 0x71, 0xc7, 0x8f, 0x05, 0xd0, + 0x81, 0xf2, 0xb9, 0xb2, 0x7e, 0xd9, 0x65, 0x73, 0x4f, 0x8d, 0x1c, 0xed, 0x09, 0x9b, 0xcd, 0xdd, + 0xdd, 0x9a, 0xa0, 0x22, 0x5c, 0x5b, 0xc5, 0xf5, 0xda, 0x8d, 0x01, 0x87, 0x8e, 0x01, 0xe3, 0x12, + 0x5c, 0xb2, 0x23, 0x2a, 0x94, 0x65, 0xa6, 0x9a, 0x87, 0xf8, 0x63, 0x5f, 0x4c, 0xf0, 0x18, 0xe2, + 0x0c, 0xb8, 0x8d, 0xdc, 0x4d, 0x7c, 0x50, 0xa2, 0xe1, 0x87, 0x49, 0x86, 0xb4, 0x38, 0xec, 0xd3, + 0x5d, 0x22, 0x27, 0x42, 0xdc, 0xae, 0x8b, 0x7f, 0xbe, 0x4e, 0x1d, 0xad, 0x06, 0xc9, 0xd0, 0x98, + 0x1f, 0x67, 0x2a, 0x22, 0x19, 0x90, 0xbb, 0x8f, 0xef, 0x42, 0x23, 0xc1, 0xd2, 0x51, 0xd1, 0x9c, + 0x5b, 0xa0, 0x3a, 0x56, 0xae, 0x94, 0x4e, 0x0c, 0x17, 0x5e, 0x82, 0xe7, 0x21, 0xfd, 0x0e, 0xa6, + 0x66, 0xb7, 0x8e, 0x9c, 0x8d, 0x4b, 0x02, 0x63, 0xdf, 0x1c, 0x7a, 0x0c, 0xd9, 0xca, 0x24, 0xc5, + 0x1d, 0xb3, 0x29, 0xff, 0x48, 0x16, 0x48, 0x3d, 0x75, 0xd2, 0xb9, 0x44, 0x00, 0xdc, 0x3e, 0xcb, + 0x37, 0xd9, 0xee, 0x16, 0xce, 0xb7, 0x50, 0x10, 0xad, 0xa8, 0x1f, 0xa0, 0xdb, 0x2f, 0x57, 0xd1, + 0xb6, 0x44, 0x87, 0x00, 0x79, 0xc2, 0x25, 0x03, 0xa8, 0x7a, 0x94, 0xea, 0x02, 0x6b, 0x56, 0x6c, + 0x1c, 0x77, 0x35, 0xd8, 0x85, 0x82, 0xbb, 0x6e, 0x80, 0x00, 0x00, 0x03, 0x2a, 0xff, 0xff, 0xfc, + 0xd5, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, 0x56, 0x86, 0x00, 0x40, 0x92, 0xe1, 0x81, 0x56, + 0x00, 0x00, 0x14, 0x70, 0x96, 0xc0, 0x00, 0x00, 0x7f, 0xfa, 0x45, 0x6a, 0x6e, 0xe1, 0x2e, 0x61, + 0x4a, 0x49, 0x88, 0x35, 0x59, 0x20, 0x91, 0xb6, 0x0d, 0xe2, 0xdf, 0xa8, 0xab, 0xf2, 0xb1, 0xb8, + 0xf6, 0x9c, 0x9e, 0xc6, 0x50, 0x06, 0xec, 0x2f, 0x9a, 0x9e, 0x91, 0x94, 0x36, 0x4d, 0x13, 0x75, + 0xf0, 0x4e, 0x63, 0xdc, 0x85, 0xa1, 0xdc, 0x3f, 0x09, 0x4a, 0x4b, 0xd4, 0x08, 0x70, 0x94, 0xc9, + 0x73, 0x5b, 0x50, 0x71, 0xa3, 0x32, 0x65, 0xe6, 0x9e, 0xe5, 0xd7, 0xc5, 0x4d, 0xa5, 0x02, 0x5a, + 0x00, 0x75, 0xd9, 0x80, 0xc9, 0x95, 0x79, 0x47, 0xb1, 0x30, 0xac, 0x8b, 0xf0, 0x3c, 0xd1, 0x57, + 0xe1, 0xf3, 0x6f, 0x39, 0xe0, 0x5c, 0x6c, 0x89, 0x7c, 0x80, 0x78, 0x32, 0x0e, 0x2a, 0x95, 0xf0, + 0x52, 0x88, 0x63, 0x30, 0xfc, 0x2f, 0xe2, 0x2d, 0x0a, 0xb6, 0x1e, 0xa4, 0xd5, 0x59, 0x32, 0x99, + 0x82, 0xc4, 0x2c, 0x77, 0x89, 0x5d, 0x28, 0xab, 0x0f, 0x07, 0xed, 0xa3, 0x02, 0xc2, 0x0c, 0x95, + 0x45, 0xdc, 0x0b, 0x2e, 0xa6, 0x96, 0x1d, 0x2a, 0x36, 0x90, 0xa9, 0x03, 0xcd, 0x09, 0x2e, 0xc9, + 0x95, 0x28, 0x73, 0xf2, 0x1b, 0x2b, 0x3a, 0x5e, 0x66, 0xf0, 0xf8, 0x50, 0xac, 0xf5, 0x23, 0x98, + 0xa0, 0xb2, 0xc0, 0xe7, 0x0b, 0xc4, 0x93, 0x97, 0x0a, 0xbe, 0x83, 0x82, 0x56, 0x4c, 0xa9, 0xd9, + 0x32, 0x0f, 0x88, 0xc5, 0xd8, 0x30, 0xac, 0x6f, 0x92, 0x42, 0x94, 0x64, 0xb1, 0x33, 0x95, 0x27, + 0x0b, 0x3a, 0x69, 0x4a, 0xbd, 0x60, 0x75, 0x9d, 0xb5, 0xc5, 0x82, 0x15, 0xa7, 0x0d, 0x27, 0xe9, + 0x5b, 0xf0, 0x1e, 0x40, 0x6c, 0x6b, 0x6d, 0x48, 0xff, 0x77, 0x4e, 0xfc, 0x58, 0x33, 0xc0, 0x00, + 0x58, 0x62, 0xfc, 0xe3, 0x0e, 0xa2, 0xc5, 0xf7, 0xd9, 0x38, 0x5d, 0xbb, 0x80, 0x4b, 0x1d, 0x36, + 0x3d, 0x63, 0xf0, 0x3c, 0xaa, 0x83, 0x9f, 0x12, 0x7b, 0x48, 0x98, 0xad, 0x67, 0xda, 0x18, 0x4a, + 0x4e, 0x79, 0x7c, 0xd9, 0xb9, 0xda, 0x2d, 0x32, 0xe3, 0x6a, 0x6b, 0x5c, 0xf4, 0xbb, 0xfe, 0x52, + 0x3e, 0xee, 0xe6, 0x37, 0xc3, 0x10, 0xfc, 0xd5, 0xf2, 0x7d, 0xca, 0xef, 0x60, 0x25, 0xa0, 0x0e, + 0x3e, 0xe2, 0xb3, 0x26, 0xba, 0xac, 0xea, 0x87, 0x66, 0xd8, 0xa6, 0xe6, 0x2d, 0x3b, 0x77, 0x25, + 0xe2, 0x84, 0x32, 0x85, 0x27, 0x7d, 0x20, 0x97, 0xd5, 0x5c, 0x49, 0x71, 0x16, 0x95, 0x10, 0x30, + 0x27, 0x67, 0xa3, 0xd4, 0x5e, 0x5a, 0x8c, 0xcf, 0x56, 0x37, 0xa9, 0x04, 0xb1, 0xec, 0xd8, 0x69, + 0x90, 0xca, 0x37, 0x0d, 0x4f, 0x82, 0x91, 0x58, 0x0e, 0xf6, 0xfe, 0x84, 0xa5, 0x7b, 0xa8, 0xac, + 0x99, 0x23, 0x01, 0xa6, 0x37, 0x8a, 0x50, 0x0e, 0x27, 0x47, 0x96, 0x37, 0xb6, 0x13, 0x47, 0xb1, + 0x6f, 0xe9, 0xb1, 0xeb, 0xe6, 0x73, 0x6b, 0xf2, 0xd8, 0x5a, 0x3c, 0x55, 0xb4, 0x87, 0x3f, 0x1c, + 0x96, 0x94, 0x84, 0xd9, 0x3d, 0x6f, 0x51, 0xc6, 0x06, 0xc6, 0x40, 0x5f, 0xde, 0x9a, 0x4a, 0x72, + 0x9b, 0x8d, 0x3e, 0x1e, 0x1d, 0x7d, 0xb7, 0x46, 0x90, 0x48, 0x7b, 0x30, 0x38, 0xef, 0x6b, 0x02, + 0x92, 0x10, 0x94, 0x81, 0x15, 0x66, 0x9b, 0xc3, 0xb0, 0x62, 0xab, 0xd6, 0x6c, 0xd8, 0xbd, 0x04, + 0xad, 0x69, 0x93, 0xb9, 0x0a, 0xd2, 0xc5, 0x39, 0xb7, 0xde, 0x20, 0xa6, 0x27, 0x58, 0x0a, 0x79, + 0x4a, 0xd5, 0xb0, 0xa9, 0x0a, 0x9f, 0x5d, 0x05, 0x20, 0xdd, 0x11, 0xa8, 0x8d, 0x82, 0xb3, 0xa9, + 0x28, 0x5f, 0xb7, 0x13, 0xa6, 0x7d, 0xf5, 0x48, 0x09, 0x5c, 0xdb, 0x4d, 0x92, 0xb7, 0x68, 0x57, + 0xd0, 0x0c, 0x94, 0x03, 0x99, 0xe4, 0xae, 0xb3, 0x5c, 0x9d, 0xe7, 0x73, 0x36, 0x16, 0x4e, 0x9f, + 0xea, 0x2e, 0x8c, 0x83, 0xfd, 0x7a, 0xb4, 0xef, 0xdd, 0x5d, 0x60, 0x24, 0x70, 0xa4, 0x1d, 0x11, + 0xd7, 0x44, 0xa1, 0x51, 0x18, 0x12, 0x78, 0x20, 0xf7, 0x22, 0x68, 0x32, 0xd8, 0x50, 0xd9, 0x73, + 0xa2, 0x00, 0xc8, 0x52, 0xfe, 0xb6, 0x06, 0x03, 0x05, 0xd3, 0xa0, 0x47, 0xc4, 0x2c, 0x9a, 0x7f, + 0x6a, 0x8f, 0xdc, 0x03, 0x7a, 0x4a, 0x96, 0x16, 0x86, 0x8b, 0x09, 0x73, 0x90, 0x22, 0x99, 0x9e, + 0x79, 0x01, 0xf9, 0xe8, 0x26, 0xcb, 0x80, 0x7e, 0x2f, 0xf7, 0x92, 0x56, 0xfa, 0xa1, 0x22, 0xd3, + 0x5d, 0x64, 0xa6, 0xe1, 0x14, 0x73, 0x3e, 0xa1, 0x67, 0x34, 0xc5, 0xc9, 0xac, 0xd4, 0xef, 0xd5, + 0x09, 0xc4, 0x9d, 0x38, 0xa8, 0xe9, 0x7a, 0xdd, 0xfc, 0x3c, 0xb8, 0x5d, 0x84, 0x55, 0xcc, 0x75, + 0xfd, 0x11, 0x12, 0x72, 0xe1, 0x46, 0x06, 0xd9, 0x8e, 0x6a, 0xee, 0xa3, 0xbd, 0xa3, 0xc5, 0x89, + 0x9e, 0x9b, 0x56, 0xb2, 0xe7, 0xd6, 0x13, 0x44, 0x96, 0xbd, 0x01, 0xae, 0xbb, 0xf6, 0xe5, 0x56, + 0x8d, 0xc0, 0x75, 0xd1, 0x7f, 0x85, 0x25, 0xb3, 0x98, 0xee, 0x97, 0xbf, 0xa2, 0x50, 0x47, 0x5a, + 0x2b, 0x31, 0x32, 0x42, 0x08, 0x80, 0x72, 0x70, 0x52, 0xb9, 0xb9, 0x76, 0x71, 0x02, 0x18, 0x3b, + 0xa0, 0xc3, 0xee, 0xbb, 0x44, 0x38, 0x4d, 0x25, 0xcb, 0xc9, 0x54, 0xfa, 0x26, 0x3c, 0xff, 0x5d, + 0xac, 0xa7, 0x30, 0x56, 0xfd, 0x81, 0xd7, 0xbb, 0x7c, 0x44, 0xf1, 0x48, 0x51, 0xc0, 0x09, 0x00, + 0x15, 0xdd, 0xf6, 0x4d, 0xbb, 0x59, 0x8a, 0x19, 0xb7, 0xab, 0x6b, 0x95, 0xb9, 0x46, 0x35, 0x03, + 0x38, 0x95, 0x51, 0x42, 0x9c, 0x10, 0x31, 0x98, 0xfd, 0x84, 0x65, 0xf0, 0xa7, 0x67, 0x51, 0xea, + 0x02, 0x20, 0xf8, 0xd0, 0xc1, 0x5a, 0x2c, 0xcc, 0x04, 0x1f, 0xf5, 0x2d, 0x58, 0x75, 0xe4, 0x56, + 0xda, 0xc5, 0x90, 0xd2, 0xc3, 0x08, 0x64, 0x4e, 0x3f, 0x48, 0x60, 0xf3, 0xa6, 0x52, 0x66, 0xec, + 0x77, 0xac, 0xf5, 0x0a, 0xae, 0xd0, 0x86, 0x9f, 0x28, 0x5f, 0x10, 0x95, 0xf3, 0xf4, 0x80, 0x00, + 0x00, 0x03, 0x57, 0xff, 0xff, 0xfc, 0xa8, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, 0x56, 0x86, + 0x00, 0x40, 0x92, 0xe1, 0x61, 0x56, 0x00, 0x00, 0x0c, 0x70, 0x00, 0x00, 0x7f, 0xdf, 0xbf, 0x28, + 0xf1, 0xca, 0x4e, 0xe8, 0xe2, 0x0b, 0xc7, 0x0d, 0xe4, 0x38, 0xbe, 0xfa, 0xb0, 0xd9, 0x0f, 0x0f, + 0x63, 0x4f, 0x57, 0x00, 0x2b, 0x01, 0x35, 0x8f, 0xe1, 0x82, 0xaf, 0xbd, 0x69, 0x53, 0x0c, 0x2d, + 0x6b, 0x68, 0xab, 0x4b, 0xf4, 0x60, 0xae, 0x73, 0x82, 0x1c, 0xe8, 0x95, 0x84, 0xed, 0xc2, 0x4e, + 0x81, 0x9a, 0xe5, 0xf5, 0x8e, 0x3f, 0xe2, 0x55, 0x6a, 0x1e, 0x18, 0xb5, 0x1b, 0xf5, 0xea, 0x3f, + 0x94, 0x82, 0xce, 0xbf, 0x35, 0xea, 0xd6, 0x8d, 0xb0, 0xaf, 0x9a, 0xf9, 0xdd, 0x4f, 0xff, 0xea, + 0x07, 0x63, 0x93, 0x8d, 0x17, 0x65, 0x54, 0xd3, 0x1d, 0x45, 0x70, 0x68, 0xf2, 0xbc, 0x40, 0x84, + 0xac, 0x8b, 0xff, 0x42, 0xa1, 0xa1, 0x29, 0x77, 0xc9, 0xc6, 0x81, 0xf9, 0x46, 0x45, 0xf7, 0x10, + 0x50, 0x85, 0x67, 0xe6, 0xab, 0xd6, 0x1e, 0xe0, 0xa9, 0xeb, 0x1b, 0x9f, 0x1a, 0xe3, 0xc2, 0x41, + 0x87, 0x71, 0x97, 0xc4, 0xc8, 0x6a, 0x2d, 0x64, 0xde, 0x4d, 0x02, 0x33, 0x42, 0xf7, 0xa9, 0xc2, + 0x6e, 0x20, 0x7f, 0xf3, 0x1a, 0xd3, 0x33, 0x92, 0x3e, 0xd8, 0xe3, 0x77, 0x24, 0x5f, 0x70, 0x4d, + 0x50, 0x0d, 0xc7, 0x18, 0x82, 0x82, 0x65, 0x1d, 0xe4, 0x49, 0x3e, 0x9f, 0x8e, 0xec, 0xd8, 0x29, + 0x8c, 0xd4, 0x67, 0xbf, 0xe6, 0x51, 0xc3, 0x66, 0x74, 0x0f, 0x8a, 0x8c, 0xae, 0xd4, 0x8f, 0xab, + 0xc3, 0x37, 0xee, 0xbe, 0xec, 0x81, 0x24, 0x77, 0x73, 0x4b, 0x84, 0x7e, 0x24, 0x62, 0x62, 0x6d, + 0x79, 0x5d, 0x68, 0x64, 0x3f, 0x98, 0xdd, 0x7e, 0x82, 0xa8, 0x0f, 0x45, 0x0e, 0x93, 0xcd, 0x3d, + 0x1d, 0x50, 0x16, 0xf5, 0xf5, 0x53, 0x29, 0xc0, 0xbb, 0xbb, 0xd6, 0x68, 0x87, 0x47, 0x5d, 0xda, + 0xdf, 0xe4, 0x84, 0xfe, 0x40, 0x21, 0xd4, 0x0b, 0xf0, 0x3d, 0x19, 0x32, 0x03, 0x85, 0x1b, 0xa8, + 0xa7, 0xea, 0x6f, 0xd9, 0xf5, 0x54, 0x58, 0xa3, 0xdc, 0xd7, 0xc0, 0xd5, 0x26, 0x2d, 0x39, 0xed, + 0xcc, 0xb7, 0xfb, 0x36, 0xeb, 0xc6, 0x21, 0x58, 0xe4, 0x52, 0xea, 0x81, 0xa0, 0xa2, 0x63, 0x8a, + 0xae, 0x0c, 0xb1, 0x1d, 0x79, 0xb9, 0xcd, 0x49, 0xb8, 0x31, 0x88, 0xe5, 0xe2, 0x5c, 0xbd, 0x7a, + 0xbe, 0xcc, 0x74, 0x47, 0xe7, 0x65, 0x2d, 0x3d, 0xe9, 0x41, 0x64, 0x67, 0x01, 0x76, 0xc8, 0x41, + 0x3b, 0x7e, 0xde, 0x3c, 0x65, 0xc6, 0x36, 0x8a, 0xeb, 0xe1, 0x77, 0xe8, 0x4b, 0x8f, 0x6b, 0xb0, + 0x09, 0x8b, 0xc3, 0xf9, 0x9e, 0x9c, 0xdb, 0x26, 0x45, 0x00, 0x52, 0x4d, 0xc1, 0xfb, 0x33, 0x0f, + 0xfc, 0x01, 0x31, 0xe1, 0x1a, 0x92, 0x5c, 0x57, 0x5d, 0xce, 0x65, 0x51, 0x16, 0x62, 0xa0, 0x4b, + 0x4f, 0x86, 0xf4, 0x7d, 0xb4, 0x3c, 0x01, 0x19, 0x32, 0x7a, 0x88, 0x4a, 0x50, 0x96, 0xbd, 0x99, + 0xe7, 0x3c, 0x9e, 0x38, 0xd8, 0x08, 0x42, 0x03, 0xae, 0xbc, 0x19, 0xf1, 0x2b, 0xe1, 0x7f, 0x2f, + 0xd6, 0x1e, 0xda, 0x8f, 0xf2, 0x0e, 0x88, 0x9e, 0x07, 0x76, 0x6a, 0xd8, 0xde, 0xfa, 0xac, 0x08, + 0x06, 0x34, 0x15, 0x18, 0xf4, 0x09, 0x47, 0x4e, 0x76, 0x91, 0xfc, 0x53, 0x03, 0xed, 0xc7, 0x53, + 0x9e, 0xbc, 0xc7, 0x5e, 0x17, 0xd4, 0x18, 0x29, 0x04, 0xb6, 0xe9, 0x57, 0x97, 0x5a, 0x36, 0x61, + 0xf7, 0x3a, 0x47, 0x62, 0x24, 0xb8, 0x53, 0x44, 0x41, 0x22, 0xb8, 0x4d, 0x75, 0x89, 0xe2, 0xce, + 0x01, 0x3a, 0xce, 0x33, 0xd0, 0xa6, 0x18, 0xd4, 0x3e, 0x51, 0x02, 0xbc, 0x02, 0x01, 0x26, 0x8e, + 0x44, 0x97, 0xa6, 0x2a, 0xd3, 0xe1, 0x43, 0xbb, 0xa6, 0xea, 0x15, 0x3b, 0xeb, 0xab, 0xf6, 0xfe, + 0x5d, 0xe7, 0x4c, 0x8f, 0x16, 0x47, 0xdd, 0xf6, 0xdd, 0xe4, 0x5a, 0x47, 0x8c, 0x6d, 0x1c, 0x66, + 0xfb, 0x6c, 0x00, 0xe2, 0xf6, 0x79, 0xae, 0x35, 0x4e, 0x81, 0xc8, 0xdd, 0xd6, 0xb8, 0x31, 0x52, + 0x4d, 0xee, 0x6b, 0x66, 0xf1, 0x17, 0x1c, 0x34, 0xa2, 0x9c, 0x1b, 0x9d, 0x0f, 0x29, 0x23, 0x9e, + 0xf1, 0xa8, 0x4a, 0x44, 0x5d, 0x8d, 0x6d, 0x75, 0x15, 0xe6, 0x29, 0x3d, 0xf9, 0x10, 0x7d, 0x2c, + 0x02, 0xb3, 0x42, 0x28, 0xfe, 0x70, 0x3d, 0x0f, 0x5a, 0xac, 0x5d, 0x81, 0x6e, 0xea, 0xb9, 0x05, + 0x94, 0x2a, 0xaf, 0xc5, 0x6a, 0x0d, 0x90, 0x72, 0xbf, 0x17, 0x34, 0x28, 0x4c, 0x92, 0x3b, 0xcf, + 0xbc, 0x45, 0xc1, 0xb5, 0xac, 0x1d, 0x60, 0x6f, 0xce, 0x7c, 0xe6, 0x81, 0x33, 0x5c, 0x30, 0xdf, + 0xae, 0x80, 0x8d, 0x1d, 0xc3, 0x66, 0x96, 0xf4, 0x59, 0x08, 0x15, 0x13, 0x09, 0x28, 0x3b, 0x15, + 0x42, 0x78, 0x15, 0xa7, 0x14, 0x73, 0x25, 0x98, 0xe5, 0x1d, 0x93, 0xf0, 0xa2, 0x7e, 0x3d, 0xdf, + 0x53, 0xcc, 0x32, 0xb1, 0x54, 0x55, 0xe1, 0xb5, 0x88, 0x46, 0x75, 0xcc, 0x61, 0x71, 0x71, 0x11, + 0xd5, 0x74, 0x99, 0x2c, 0xf9, 0x67, 0x4a, 0x7d, 0x70, 0xfb, 0xa4, 0x8b, 0x0e, 0xa8, 0x4d, 0x0b, + 0x5b, 0xd5, 0x8d, 0xa5, 0xb3, 0xb8, 0xda, 0x2d, 0x36, 0xf4, 0x80, 0x3c, 0xde, 0xbd, 0x29, 0xa2, + 0xe6, 0x57, 0xc7, 0x0b, 0x2a, 0x8f, 0x67, 0x37, 0xfb, 0xd6, 0x5f, 0xcb, 0xde, 0xb5, 0x4e, 0x2b, + 0x4b, 0x1a, 0x1b, 0x67, 0xdc, 0x4b, 0x3e, 0xd5, 0x2b, 0x3f, 0x96, 0x57, 0x00, 0xa3, 0xb4, 0x5a, + 0x30, 0x5d, 0x25, 0x1a, 0xa6, 0x49, 0x3e, 0xd6, 0xd5, 0x4e, 0x18, 0x9d, 0xe0, 0x88, 0xe7, 0xd1, + 0xc0, 0x09, 0x98, 0x2d, 0x1a, 0x7b, 0xee, 0xda, 0xc4, 0x35, 0xba, 0x60, 0x66, 0x72, 0x20, 0x1f, + 0x16, 0x9f, 0xa7, 0xc3, 0x96, 0x94, 0x22, 0x70, 0x05, 0x1e, 0x08, 0x63, 0x8f, 0xd8, 0x85, 0x71, + 0x94, 0xb4, 0x26, 0x4d, 0x7c, 0xcd, 0x46, 0x90, 0x6a, 0x07, 0x2c, 0x08, 0xe5, 0xa7, 0x04, 0x05, + 0xed, 0x2d, 0xea, 0x0e, 0xc1, 0x69, 0x6a, 0xa5, 0x57, 0x41, 0xb0, 0x91, 0xdf, 0xe2, 0x52, 0xad, + 0x68, 0x19, 0xef, 0x2f, 0x8f, 0x4b, 0x65, 0xea, 0x4a, 0xe7, 0x00, 0x2e, 0xfd, 0x7c, 0xd8, 0x0b, + 0xf9, 0x1e, 0x98, 0x44, 0x1b, 0xdc, 0x7e, 0x5e, 0xfe, 0xf3, 0x3d, 0xa5, 0xba, 0x5f, 0x3b, 0x37, + 0x64, 0x80, 0x00, 0x00, 0x03, 0xa5, 0xff, 0xff, 0xfc, 0x5a, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, + 0x4c, 0x56, 0x86, 0x00, 0x40, 0x92, 0xe1, 0x41, 0x56, 0x00, 0x00, 0x0c, 0x70, 0x00, 0x00, 0x7d, + 0x9c, 0xe1, 0xf6, 0x1d, 0x70, 0x67, 0xf6, 0xf6, 0xf8, 0xc3, 0xff, 0x6e, 0xea, 0xb7, 0x4e, 0x1d, + 0xa0, 0xf6, 0xa7, 0x65, 0x73, 0x97, 0xd9, 0xd1, 0xdc, 0x5f, 0xc1, 0x5a, 0xf3, 0x6c, 0x5b, 0xf9, + 0x6a, 0x5a, 0x87, 0xdc, 0xd5, 0x6d, 0x6c, 0xd6, 0x48, 0x29, 0x2a, 0x94, 0x99, 0xea, 0xc9, 0xd0, + 0xf7, 0x06, 0x13, 0xf7, 0xcd, 0xda, 0x03, 0xc2, 0x95, 0x2e, 0x8c, 0xa8, 0x4c, 0xf3, 0xd5, 0x11, + 0x38, 0x31, 0x6f, 0xd8, 0x16, 0xeb, 0xd5, 0x72, 0xac, 0x62, 0xc2, 0xd9, 0xf7, 0x6f, 0xbb, 0xfc, + 0x4b, 0x12, 0x46, 0xd7, 0xc6, 0x23, 0x00, 0xb4, 0x2a, 0xe4, 0x18, 0x45, 0xa6, 0xf2, 0x0a, 0x1c, + 0xa6, 0x8a, 0xc7, 0x30, 0x3d, 0x73, 0xa1, 0xfb, 0x47, 0x07, 0x18, 0xf4, 0x63, 0x33, 0x44, 0xa7, + 0x7e, 0xdb, 0xd6, 0x25, 0xd5, 0x1b, 0x5a, 0x67, 0x46, 0x6b, 0x8d, 0x91, 0x09, 0x7d, 0xf4, 0x2c, + 0xc7, 0x45, 0xa1, 0xce, 0x6e, 0x90, 0x0f, 0x36, 0xa4, 0x57, 0x49, 0x7c, 0x03, 0x1b, 0x62, 0x33, + 0x09, 0x05, 0x59, 0xca, 0xdd, 0xfb, 0x13, 0x4a, 0x58, 0xfc, 0xdc, 0x8f, 0x5e, 0xda, 0x20, 0x7d, + 0x1f, 0x6a, 0x80, 0x01, 0x01, 0xea, 0x4d, 0xfc, 0xe4, 0x8d, 0x90, 0xb0, 0xf1, 0xa1, 0x16, 0x47, + 0x19, 0x4f, 0xab, 0x4f, 0x38, 0x32, 0xc8, 0x17, 0xb2, 0x99, 0xed, 0x71, 0x5b, 0x5c, 0x72, 0xda, + 0x64, 0x6e, 0xc5, 0x13, 0x5b, 0x51, 0xf3, 0x46, 0x04, 0x26, 0xcc, 0x47, 0xa0, 0x44, 0x91, 0x4c, + 0x75, 0x57, 0xbe, 0xb6, 0x88, 0x49, 0x6c, 0x4a, 0xec, 0xd0, 0x65, 0x3f, 0xf0, 0xc2, 0x15, 0x28, + 0x51, 0x28, 0x83, 0xfd, 0xfc, 0x86, 0x0f, 0x6b, 0xc1, 0x20, 0x60, 0x57, 0xbb, 0x15, 0x4c, 0x01, + 0x07, 0xc9, 0x2f, 0x8e, 0x19, 0xc1, 0xef, 0x73, 0x65, 0x3a, 0x9e, 0x00, 0x4f, 0x03, 0x88, 0x7c, + 0xc3, 0x04, 0x2b, 0x16, 0xee, 0xf1, 0x37, 0x42, 0x6e, 0x05, 0x93, 0x98, 0x65, 0x51, 0xef, 0x5b, + 0xb3, 0xe6, 0x0a, 0x08, 0xad, 0x02, 0xc2, 0xfa, 0xfc, 0xc7, 0x0e, 0x39, 0x74, 0x54, 0xef, 0xf0, + 0xdc, 0x7b, 0x19, 0xce, 0x35, 0x74, 0xe0, 0x35, 0x5c, 0xf3, 0x23, 0xe8, 0xc5, 0x71, 0x9f, 0x63, + 0x4c, 0x3f, 0xb9, 0x64, 0xcd, 0x37, 0x7d, 0x59, 0x28, 0x37, 0x83, 0x06, 0xcb, 0x4f, 0x22, 0xf9, + 0x03, 0x85, 0x2a, 0x18, 0xc7, 0xb0, 0xb7, 0x27, 0x3e, 0xc4, 0x8a, 0xa1, 0xbe, 0xf6, 0xe0, 0x31, + 0x75, 0x4b, 0xfe, 0x70, 0x96, 0x82, 0x8e, 0xfe, 0x82, 0x8c, 0xd6, 0x64, 0x02, 0x96, 0xed, 0xdf, + 0x14, 0x04, 0x27, 0xa2, 0x7b, 0x2c, 0xe0, 0xd4, 0x1e, 0x61, 0x0e, 0x0f, 0x2d, 0x5f, 0x93, 0xfc, + 0x89, 0xf1, 0xfe, 0xaf, 0xa4, 0xda, 0x53, 0x18, 0x12, 0xc4, 0x0c, 0xd9, 0x15, 0x47, 0x60, 0x31, + 0xc8, 0xea, 0x9d, 0x33, 0x78, 0x91, 0xbf, 0xe3, 0xc9, 0xe1, 0xdc, 0x26, 0xe9, 0xf8, 0x70, 0x55, + 0x8f, 0x2b, 0xa8, 0x5b, 0x4c, 0x11, 0x7d, 0xd1, 0xa1, 0xe2, 0xd0, 0xba, 0xe6, 0x62, 0x36, 0x67, + 0xe9, 0x5f, 0x01, 0x05, 0x71, 0x29, 0xba, 0x31, 0x36, 0xbe, 0x4b, 0xc0, 0x43, 0xa4, 0xb2, 0x9f, + 0x09, 0xa6, 0xc4, 0x9b, 0xfd, 0x1f, 0x46, 0x3c, 0x44, 0x48, 0x3c, 0xc7, 0x58, 0x34, 0x7e, 0xfa, + 0x8b, 0x73, 0xee, 0x0e, 0x04, 0x9e, 0x01, 0x10, 0x0a, 0xe1, 0x9f, 0x15, 0x92, 0xb3, 0x8b, 0x9c, + 0x10, 0x2f, 0xfa, 0xb3, 0x8d, 0x21, 0x48, 0x4d, 0x82, 0x45, 0x0b, 0x89, 0x47, 0xa1, 0xd2, 0x7c, + 0xeb, 0x30, 0x44, 0x51, 0xdd, 0x64, 0xb8, 0x7e, 0x3c, 0xce, 0xc9, 0x5b, 0xf1, 0x8a, 0xf9, 0xad, + 0x62, 0xe9, 0x7f, 0x06, 0x56, 0x31, 0xb3, 0xae, 0xb3, 0x7d, 0x8e, 0x11, 0xa4, 0x4e, 0xbd, 0x46, + 0xc3, 0x01, 0xce, 0x13, 0xb0, 0x3c, 0x2d, 0x6c, 0x7c, 0xbd, 0xa3, 0x00, 0x1e, 0x59, 0x1f, 0x92, + 0x49, 0x16, 0xbd, 0x4b, 0x1e, 0x24, 0x23, 0x4c, 0x91, 0xb9, 0xb5, 0xf2, 0x58, 0x76, 0x2f, 0xcb, + 0xda, 0xd4, 0xe4, 0xe0, 0x5b, 0x32, 0x93, 0x53, 0x40, 0x94, 0xe1, 0x7b, 0x12, 0xb7, 0xaa, 0xba, + 0x70, 0x93, 0x26, 0x93, 0x7d, 0x68, 0x5f, 0xda, 0x4f, 0x33, 0xac, 0xc4, 0xf9, 0x4b, 0xcb, 0xb0, + 0x8e, 0x7a, 0xda, 0x65, 0xdf, 0x5f, 0x31, 0xbb, 0x36, 0x0d, 0xdf, 0xd4, 0x8c, 0xf0, 0xd5, 0xa8, + 0x9d, 0x3c, 0x89, 0x3d, 0x1c, 0x0e, 0x25, 0x8e, 0x5a, 0xc9, 0x68, 0x38, 0xcd, 0x74, 0x5c, 0x2a, + 0xb7, 0x06, 0x7b, 0x8f, 0x0c, 0x11, 0x43, 0x9e, 0x13, 0x61, 0xdc, 0x72, 0x09, 0x92, 0x3e, 0x5f, + 0x9e, 0xaf, 0x5f, 0x5d, 0x69, 0x25, 0xf5, 0x23, 0x74, 0x74, 0xab, 0xc2, 0x2f, 0x94, 0xac, 0xa9, + 0xef, 0xcd, 0xf6, 0x48, 0x77, 0x91, 0xbb, 0x4f, 0xc8, 0x57, 0x8f, 0x1c, 0xc6, 0x09, 0xde, 0xcb, + 0x4f, 0xdc, 0x6f, 0xf9, 0xcc, 0xcc, 0xd1, 0xce, 0x9a, 0xb6, 0x4a, 0x16, 0xeb, 0x5c, 0x54, 0x26, + 0x48, 0xe8, 0x85, 0x9a, 0x53, 0xdf, 0xb3, 0xad, 0x9d, 0x59, 0x77, 0xdc, 0xca, 0xa7, 0x1d, 0xc2, + 0x39, 0x60, 0xd3, 0xb8, 0xb9, 0xd9, 0x51, 0xf8, 0x34, 0x26, 0xce, 0x87, 0xb8, 0x87, 0x9a, 0xb0, + 0xa6, 0x6f, 0xba, 0xe6, 0xff, 0xa0, 0xff, 0x57, 0x4b, 0xde, 0x20, 0x5f, 0x71, 0xad, 0xc3, 0xad, + 0xd5, 0x12, 0xee, 0xad, 0xd3, 0x3b, 0xdf, 0x6e, 0xa3, 0x38, 0xc0, 0x87, 0x39, 0x3e, 0xce, 0xfe, + 0xfe, 0x72, 0x84, 0x1c, 0xe7, 0xfd, 0xac, 0x88, 0xab, 0x79, 0x73, 0x97, 0xc4, 0x2d, 0x2b, 0xf3, + 0xa3, 0x69, 0x55, 0x64, 0x25, 0x1a, 0x32, 0xe0, 0x57, 0xc1, 0x10, 0x18, 0x0d, 0xdb, 0x20, 0x6c, + 0xa2, 0x3d, 0xc9, 0xbf, 0x93, 0x2e, 0x6a, 0x6f, 0xac, 0xa3, 0x9a, 0xc9, 0xe8, 0x6a, 0xd3, 0xfe, + 0x43, 0x05, 0x9d, 0xa0, 0x21, 0xa9, 0xbe, 0xe9, 0x87, 0x06, 0x75, 0xe3, 0x9e, 0xee, 0x40, 0x93, + 0x9d, 0x21, 0xec, 0xe9, 0xdc, 0x17, 0x62, 0xf4, 0xfc, 0x74, 0x4f, 0xf2, 0xf2, 0x5b, 0xed, 0x73, + 0x93, 0x79, 0x66, 0xf8, 0x6b, 0x38, 0xb1, 0xbe, 0x29, 0x11, 0x3f, 0x52, 0x78, 0x7c, 0xab, 0xb7, + 0x9a, 0x9e, 0x0a, 0xfa, 0x29, 0x0f, 0x41, 0x44, 0xd2, 0xd6, 0x38, 0xbb, 0x11, 0x83, 0x25, 0xf9, + 0xf8, 0x2a, 0x7c, 0x4a, 0x72, 0xa1, 0x42, 0x54, 0x41, 0xbd, 0x3a, 0x6f, 0x7e, 0x3a, 0xc6, 0xee, + 0x45, 0xf0, 0x90, 0xe5, 0x86, 0x6e, 0x91, 0xbc, 0x35, 0x21, 0x47, 0xa0, 0x0a, 0x2d, 0xd2, 0x5c, + 0xbc, 0x83, 0x21, 0x8b, 0x74, 0xb1, 0x11, 0x86, 0x60, 0x00, 0xab, 0x5b, 0x4f, 0x08, 0x26, 0xce, + 0xce, 0xe5, 0x36, 0x02, 0x0d, 0x42, 0x35, 0x1e, 0x4d, 0x28, 0x6e, 0xc0, 0xe4, 0x4a, 0x1c, 0x2c, + 0x7b, 0x93, 0x80 +#else + 0x00, 0x00, 0x01, 0x34, 0xff, 0xff, 0xfe, 0xcb, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, 0x56, + 0x82, 0x49, 0x83, 0x42, 0x00, 0x04, 0x10, 0x04, 0x14, 0x0e, 0x38, 0x24, 0x1c, 0x19, 0xba, 0x00, + 0x00, 0x90, 0x7c, 0x11, 0xb4, 0xcc, 0x7e, 0x19, 0x86, 0x00, 0x00, 0x7f, 0xc3, 0xb6, 0x35, 0x54, + 0x45, 0x38, 0x12, 0xe2, 0xd7, 0xa9, 0x1b, 0x49, 0xef, 0xc6, 0xfc, 0x14, 0x34, 0x41, 0xc7, 0xa3, + 0x91, 0xf4, 0x76, 0x8a, 0x19, 0x6b, 0xb0, 0xa4, 0xa0, 0x04, 0x13, 0xb0, 0xbe, 0x10, 0x72, 0x80, + 0x0a, 0x58, 0xbc, 0x18, 0xf2, 0xd0, 0x4f, 0x62, 0x1b, 0xea, 0xc2, 0x0b, 0xba, 0xfc, 0x7b, 0xbc, + 0xb3, 0x6a, 0x97, 0x15, 0x1f, 0x3c, 0x21, 0x4e, 0x3f, 0xa2, 0xe9, 0xe9, 0xfc, 0x92, 0xc3, 0xe7, + 0x7d, 0xb2, 0x08, 0x87, 0x98, 0x8e, 0x77, 0x0f, 0x09, 0x7b, 0xa7, 0x41, 0x42, 0xff, 0x14, 0xa1, + 0x0e, 0xf3, 0x28, 0x2d, 0xe1, 0x13, 0x73, 0x49, 0x26, 0xed, 0x88, 0x22, 0x82, 0x6a, 0x02, 0x87, + 0xa5, 0xbe, 0x9c, 0xe5, 0x3f, 0xc7, 0xb5, 0x65, 0x3f, 0x7e, 0xa2, 0x82, 0x3e, 0x22, 0xa1, 0x03, + 0xe1, 0xcd, 0x89, 0xe6, 0xf0, 0x47, 0x62, 0x81, 0x89, 0xaa, 0x7e, 0xbc, 0x4a, 0xc1, 0x7c, 0x26, + 0xe4, 0xc0, 0xd4, 0xbc, 0xb8, 0xcc, 0xb8, 0x44, 0x07, 0x51, 0xb4, 0xb3, 0xb9, 0xf5, 0x04, 0xaf, + 0x3f, 0x41, 0x49, 0x56, 0x6a, 0x87, 0x46, 0x95, 0xa1, 0xe7, 0x69, 0xe7, 0x3c, 0x32, 0x06, 0xc2, + 0xa2, 0x66, 0x48, 0x1f, 0x14, 0x43, 0x81, 0xf0, 0xa0, 0x3c, 0xa4, 0x82, 0x7f, 0x18, 0x9b, 0xe4, + 0x22, 0x2d, 0x64, 0x2a, 0xd0, 0x4d, 0xb4, 0xfd, 0x14, 0x2d, 0x6d, 0x68, 0xaf, 0x19, 0x7d, 0x0f, + 0x0f, 0x60, 0xc4, 0x92, 0x73, 0x34, 0xc9, 0x51, 0x9a, 0xb6, 0xac, 0x06, 0x90, 0xaf, 0x11, 0x21, + 0x0c, 0xb0, 0x02, 0xd9, 0xd7, 0xd1, 0x06, 0xa0, 0x05, 0xb6, 0x75, 0x70, 0x5d, 0xbc, 0x84, 0x99, + 0x08, 0xfd, 0x8b, 0x60, 0x33, 0xf5, 0x0f, 0xab, 0x42, 0xab, 0x63, 0x1a, 0x37, 0x1c, 0x5a, 0x89, + 0xc1, 0x5f, 0x43, 0x5c, 0x65, 0x63, 0x60, 0xc4, 0xca, 0xe9, 0x59, 0x72, 0xea, 0x93, 0xf9, 0xcb, + 0x0d, 0x96, 0x5e, 0x33, 0x21, 0xa9, 0xe1, 0xf1, 0x37, 0xcc, 0x1b, 0x3c, 0x99, 0x36, 0x78, 0x40, + 0x00, 0x00, 0x00, 0x76, 0xff, 0xff, 0xff, 0x89, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, 0x56, + 0x84, 0x00, 0x80, 0x49, 0x72, 0x58, 0xba, 0x00, 0x00, 0x06, 0x70, 0x00, 0x00, 0x7f, 0x0a, 0x55, + 0x64, 0x1f, 0x77, 0x22, 0x11, 0x64, 0x86, 0x41, 0x2e, 0x75, 0xe3, 0xba, 0x24, 0xf7, 0x02, 0xaa, + 0x1f, 0x9a, 0x97, 0x84, 0x24, 0x83, 0xa8, 0xa3, 0x92, 0x1c, 0x9c, 0xe7, 0x85, 0x71, 0x18, 0x49, + 0xc5, 0x09, 0x36, 0xf0, 0x9e, 0x04, 0x84, 0x88, 0xa0, 0xad, 0x9c, 0x8e, 0x75, 0x9d, 0x08, 0xfb, + 0xab, 0xfd, 0x3d, 0x68, 0xdd, 0x14, 0x93, 0x50, 0xa3, 0x48, 0x96, 0xf7, 0xe6, 0xa4, 0x54, 0x62, + 0x3b, 0x31, 0x18, 0x57, 0xef, 0x3b, 0xb7, 0x98, 0x8e, 0xe9, 0x3b, 0xdf, 0x63, 0x0c, 0xfa, 0x5d, + 0x30, 0x51, 0x4e, 0x61, 0x06, 0xf9, 0x1f, 0xe5, 0xc5, 0x90, 0xb0, 0x80, 0x7c, 0xa0, 0x04, 0x98, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x30, 0xff, 0xff, 0xff, 0xcf, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, + 0x4c, 0x56, 0x86, 0x00, 0x40, 0x96, 0x61, 0xf1, 0x78, 0x00, 0x00, 0x0c, 0x23, 0x18, 0x00, 0x7c, + 0xc1, 0x46, 0xab, 0x45, 0x25, 0x66, 0x9d, 0x6f, 0xa7, 0x18, 0x82, 0xbc, 0xc3, 0xd5, 0xa0, 0xba, + 0x42, 0x04, 0x49, 0x7d, 0x20, 0xb3, 0x0f, 0x4c, 0x78, 0x4c, 0xae, 0x9b, 0x20, 0x00, 0x00, 0x00, + 0x00, 0x29, 0xff, 0xff, 0xff, 0xd6, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, 0x56, 0x86, 0x00, + 0x40, 0x96, 0x61, 0x21, 0x78, 0x00, 0x00, 0x0c, 0x70, 0x50, 0x40, 0x78, 0xfc, 0x21, 0xff, 0xe1, + 0x45, 0x85, 0x50, 0x5b, 0xfa, 0x35, 0xc8, 0x10, 0x5f, 0x78, 0x01, 0xb7, 0x6b, 0x58, 0x7e, 0x58, + 0x2f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0xff, 0xff, 0xff, 0xc7, 0x00, 0x00, 0x00, 0x01, 0x41, + 0x4d, 0x4c, 0x56, 0x86, 0x00, 0x40, 0x96, 0x62, 0x51, 0x78, 0x00, 0x00, 0x0c, 0x60, 0x63, 0x00, + 0x7e, 0x6c, 0x4b, 0x84, 0x3e, 0x1d, 0xe4, 0x3e, 0x25, 0x9f, 0x3f, 0x5a, 0x2a, 0x19, 0xb6, 0xdd, + 0x80, 0x97, 0xf7, 0x65, 0x1e, 0xa1, 0x17, 0xe2, 0xff, 0xac, 0xb3, 0x4c, 0x5a, 0xfa, 0x5f, 0x60, + 0xcf, 0x78, 0xc3, 0x28, 0x6f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0xff, 0xff, 0xff, 0xd8, 0x00, + 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, 0x56, 0x86, 0x00, 0x40, 0x96, 0x18, 0x70, 0x5e, 0x00, 0x00, + 0x03, 0x23, 0xa8, 0x00, 0x76, 0x0f, 0xb4, 0x03, 0xdc, 0x4e, 0xaf, 0x53, 0xde, 0xfe, 0x91, 0xd9, + 0x66, 0x0b, 0xab, 0x76, 0xbb, 0x44, 0x2c, 0xa6, 0x90, 0x80, 0x00, 0x00, 0x00, 0x23, 0xff, 0xff, + 0xff, 0xdc, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, 0x56, 0x86, 0x00, 0x40, 0x96, 0x18, 0x70, + 0x5e, 0x00, 0x00, 0x03, 0x23, 0xa8, 0x00, 0x5d, 0xfc, 0x15, 0x9d, 0x57, 0xb5, 0x81, 0x63, 0x84, + 0xbd, 0x2b, 0xee, 0x37, 0x7c, 0xa0, 0xb6, 0x2f, 0x00, 0x00, 0x00, 0x00, 0x18, 0xff, 0xff, 0xff, + 0xe7, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, 0x56, 0x86, 0x01, 0x00, 0x96, 0x18, 0x48, 0x5f, + 0xe0, 0x00, 0x03, 0x20, 0x00, 0x00, 0x46, 0x74, 0x8f, 0xef, 0xb7, 0x09, 0x80, 0x00, 0x00, 0x00, + 0x31, 0xff, 0xff, 0xff, 0xce, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, 0x56, 0x86, 0x00, 0x41, + 0x0a, 0x61, 0xa1, 0x78, 0x00, 0x00, 0x0c, 0x20, 0x00, 0x00, 0x72, 0x25, 0x0b, 0x41, 0x5b, 0x2e, + 0x3e, 0x8d, 0x64, 0x6d, 0xf0, 0x11, 0x8f, 0xd9, 0x9f, 0x4f, 0x28, 0x72, 0xf6, 0xd5, 0x1e, 0x5f, + 0x92, 0x69, 0xb8, 0xb0, 0xf6, 0xe0, 0xc8, 0xd6, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x3e, 0xff, 0xff, + 0xff, 0xc1, 0x00, 0x00, 0x00, 0x01, 0x41, 0x4d, 0x4c, 0x56, 0x86, 0x00, 0x41, 0x0a, 0x62, 0x01, + 0x78, 0x00, 0x00, 0x0c, 0x70, 0x00, 0x00, 0x7d, 0x80, 0x9f, 0xc5, 0x19, 0x81, 0x7a, 0xcd, 0xc4, + 0xc5, 0x08, 0x1c, 0x79, 0x94, 0xce, 0xb4, 0x6f, 0xc4, 0xd8, 0x8c, 0x45, 0x0a, 0xcf, 0xcb, 0xb2, + 0x21, 0x84, 0xe2, 0x7e, 0x84, 0xeb, 0x73, 0xd9, 0x4c, 0xad, 0x10, 0x50, 0x48, 0x96, 0xc2, 0x17, + 0x24, 0xa2, 0x4c, 0x90, 0x00, 0x00, 0x00, 0x45, 0xff, 0xff, 0xff, 0xba, 0x00, 0x00, 0x00, 0x01, + 0x41, 0x4d, 0x4c, 0x56, 0x86, 0x00, 0x41, 0x0a, 0x62, 0x61, 0x78, 0x00, 0x00, 0x0c, 0x20, 0x00, + 0x00, 0x7b, 0xea, 0xae, 0x37, 0x81, 0xba, 0xc1, 0x88, 0x4f, 0xbd, 0xf1, 0x0c, 0xc5, 0xf3, 0x80, + 0x6c, 0x69, 0x9f, 0xee, 0xd1, 0x8d, 0x03, 0x08, 0x49, 0x19, 0x41, 0x50, 0x0f, 0xa8, 0x85, 0xbd, + 0x27, 0x49, 0xf0, 0xfa, 0x1e, 0x96, 0x3a, 0x4d, 0x54, 0xf6, 0x11, 0xfc, 0x1e, 0x10, 0xe3, 0x75, + 0x67, 0xe5, 0x33, 0x73, 0xb0 +#endif + }; + +#if VP9_USE_TRIGGER_BIG_SIZE + static u32 vp9_trigger_framesize[] = {5429,591,799,655,655,647,769,822,867,945}; +#else + static u32 vp9_trigger_framesize[] = {320,130,60,53,68,51,47,36,61,74,81}; +#endif + +#endif //_VDEC_VP9_TRIG_ +
diff --git a/drivers/amvdec_ports/test/Android.mk b/drivers/amvdec_ports/test/Android.mk new file mode 100644 index 0000000..d9652fb --- /dev/null +++ b/drivers/amvdec_ports/test/Android.mk
@@ -0,0 +1,23 @@ +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) +LOCAL_MODULE := vcode_m2m +LOCAL_MODULE_TAGS := optional +LOCAL_SRC_FILES := vcodec_m2m_test.c +LOCAL_ARM_MODE := arm + +LOCAL_C_INCLUDES := \ + $(JNI_H_INCLUDE) \ + $(BOARD_AML_VENDOR_PATH)/vendor/amlogic/external/ffmpeg + +LOCAL_SHARED_LIBRARIES := \ + libamffmpeg + +include $(BUILD_EXECUTABLE) + +include $(CLEAR_VARS) + +LOCAL_PREBUILT_LIBS:= \ +# libavcodec:ffmpeg/lib/libavcodec.so \ + +include $(BUILD_MULTI_PREBUILT)
diff --git a/drivers/amvdec_ports/test/vcodec_m2m_test.c b/drivers/amvdec_ports/test/vcodec_m2m_test.c new file mode 100644 index 0000000..bec040b --- /dev/null +++ b/drivers/amvdec_ports/test/vcodec_m2m_test.c
@@ -0,0 +1,343 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <libavcodec/avcodec.h> +#include <libavformat/avformat.h> +#include <pthread.h> +#include <unistd.h> +#include <sys/stat.h> +#include <pthread.h> + +#define INBUF_SIZE (4096) +#define DUMP_DIR "/data/video_frames" +static int dump; + +typedef struct VcodecCtx { + AVFormatContext *fmt_ctx; + AVStream *stream; + int nb_streams; + int vst_idx; + char *filename; + pthread_t tid; + pthread_mutex_t pthread_mutex; + pthread_cond_t pthread_cond; +} VcodecCtx; + +static void dump_yuv(AVFrame *frame, char *filename) +{ + FILE *f; + + printf("name: %s, resolution: %dx%d, size: %d\n", + filename, frame->width, frame->height, + frame->buf[0]->size + frame->buf[1]->size); + + f = fopen(filename,"w+"); + + fwrite(frame->buf[0]->data, 1, frame->buf[0]->size, f); + fwrite(frame->buf[1]->data, 1, frame->buf[1]->size, f); + + fclose(f); +} + +static void decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt, + const char *filename) +{ + char buf[1024]; + int ret; + + ret = avcodec_send_packet(dec_ctx, pkt); + if (ret < 0) { + fprintf(stderr, "Error sending a packet for decoding\n"); + return; + } + + while (ret >= 0) { + ret = avcodec_receive_frame(dec_ctx, frame); + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) + return; + else if (ret < 0) { + fprintf(stderr, "Error during decoding, ret: %s\n", av_err2str(ret)); + break; + } + + //fprintf(stderr, "saving frame %3d\n", dec_ctx->frame_number); + fflush(stdout); + + /* the picture is allocated by the decoder. no need to free it */ + snprintf(buf, sizeof(buf), "%s/frame-%d", filename, dec_ctx->frame_number); + + if (dump) + dump_yuv(frame, buf); + } +} + +static void* read_thread(void *arg) +{ + int ret, err; + AVFormatContext *ic = NULL; + AVCodecContext *dec_ctx = NULL; + AVStream *stream = NULL; + AVCodec *codec = NULL; + AVPacket pkt1, *pkt = &pkt1; + AVFrame *frame = NULL; + int vst_idx = 0; + int has_video = 0; + unsigned int st_idx = 0; + VcodecCtx *vctx = arg; + const char *forced_codec_name = NULL; + + printf("entry read thread, tid: %ld.\n", vctx->tid); + + ic = avformat_alloc_context(); + if (!ic) { + fprintf(stderr, "Could not allocate avformat context.\n"); + goto out; + } + + err = avformat_open_input(&ic, vctx->filename, NULL, NULL); + if (err < 0) { + fprintf(stderr, "Could not open avformat input.\n"); + goto out; + } + + err = avformat_find_stream_info(ic, NULL); + if (err < 0) { + fprintf(stderr, "find stream info err.\n"); + goto out; + } + + for (st_idx = 0; st_idx < ic->nb_streams; st_idx++) { + AVStream *st = ic->streams[st_idx]; + + enum AVMediaType type = st->codecpar->codec_type; + st->discard = AVDISCARD_ALL; + + if (type == AVMEDIA_TYPE_VIDEO) { + st->discard = AVDISCARD_NONE; + vctx->vst_idx = st_idx; + has_video = 1; + break; + } + } + + if (!has_video) { + fprintf(stderr, "no video stream.\n"); + goto out; + } + + stream = ic->streams[vctx->vst_idx]; + + //codec = avcodec_find_decoder(stream->codecpar->codec_id); + switch (stream->codecpar->codec_id) { + case AV_CODEC_ID_H264: + forced_codec_name = "h264_v4l2m2m"; + break; + case AV_CODEC_ID_HEVC: + forced_codec_name = "hevc_v4l2m2m"; + break; + case AV_CODEC_ID_VP9: + forced_codec_name = "vp9_v4l2m2m"; + break; + case AV_CODEC_ID_MPEG1VIDEO: + forced_codec_name = "mpeg1_v4l2m2m"; + break; + case AV_CODEC_ID_MPEG2VIDEO: + forced_codec_name = "mpeg2_v4l2m2m"; + break; + case AV_CODEC_ID_VC1: + forced_codec_name = "vc1_v4l2m2m"; + break; + case AV_CODEC_ID_H263: + forced_codec_name = "h263_v4l2m2m"; + break; + case AV_CODEC_ID_MPEG4: + forced_codec_name = "mpeg4_v4l2m2m"; + break; + case AV_CODEC_ID_MJPEG: + forced_codec_name = "mjpeg_v4l2m2m"; + break; + } + + codec = avcodec_find_decoder_by_name(forced_codec_name); + if (!codec) { + fprintf(stderr, "Unsupported codec with id %d for input stream %d\n", + stream->codecpar->codec_id, stream->index); + goto out; + } + + dec_ctx = avcodec_alloc_context3(codec); + if (!dec_ctx) { + fprintf(stderr, "Could not allocate video codec context\n"); + goto out; + } + + err = avcodec_parameters_to_context(dec_ctx, stream->codecpar); + if (err < 0) { + fprintf(stderr, "Could not set paras to context\n"); + goto out; + } + + av_codec_set_pkt_timebase(dec_ctx, stream->time_base); + dec_ctx->framerate = stream->avg_frame_rate; + + if (avcodec_open2(dec_ctx, codec, NULL) < 0) { + fprintf(stderr, "Could not open codec for input stream %d\n", + stream->index); + goto out; + } + + printf("fmt ctx: %p, stream: %p, video st idx: %d, num: %d\n", + ic, stream, vst_idx, ic->nb_streams); + printf("format: %s\n",ic->iformat->name); + + ic->flags |= AVFMT_FLAG_GENPTS; + ic->debug = 0xff; + + //if (ic->pb) + // ic->pb->eof_reached = 0; + + frame = av_frame_alloc(); + if (!frame) { + fprintf(stderr, "Could not allocate video frame\n"); + goto out; + } + + for (;;) { + ret = av_read_frame(ic, pkt); + if (ret < 0) { + if (ret == AVERROR_EOF || avio_feof(ic->pb)) { + printf("read data end, ret: %d.\n", ret); + break; + } + + if (ic->pb && ic->pb->error) + break; + + printf("read data fail, ret: %d.\n", ret); + continue; + } + + if (pkt->stream_index == vctx->vst_idx) { + //packet_queue_put(&is->audioq, pkt); + //printf("read video data size: %d.\n", pkt->size); + if (pkt->size) + decode(dec_ctx, frame, pkt, DUMP_DIR); + } + + av_packet_unref(pkt); + + usleep(8 * 1000); + } + + /* flush the decoder */ + decode(dec_ctx, frame, NULL, DUMP_DIR); +out: + if (dec_ctx) + avcodec_free_context(&dec_ctx); + + if (frame) + av_frame_free(&frame); + + if (ic) { + avformat_close_input(&ic); + avformat_free_context(ic); + } + + printf("read thread exit.\n"); + + pthread_mutex_lock(&vctx->pthread_mutex); + pthread_cond_signal(&vctx->pthread_cond); + pthread_mutex_unlock(&vctx->pthread_mutex); + + return NULL; +} + +static int open_input_file(const char *filename) +{ + int ret; + VcodecCtx *vctx; + pthread_t pid = pthread_self(); + + vctx = av_mallocz(sizeof(VcodecCtx)); + if (!vctx) + return -1; + + vctx->filename = av_strdup(filename); + if (!vctx->filename) { + av_free(vctx); + return -1; + } + + pthread_mutex_init(&vctx->pthread_mutex, NULL); + pthread_cond_init(&vctx->pthread_cond, NULL); + + ret = pthread_create(&vctx->tid, NULL, read_thread, (void *)vctx); + if (ret == 0) { + pthread_setname_np(pid, "read_thread"); + + pthread_mutex_lock(&vctx->pthread_mutex); + pthread_cond_wait(&vctx->pthread_cond, &vctx->pthread_mutex); + pthread_join(vctx->tid, NULL); + pthread_mutex_unlock(&vctx->pthread_mutex); + } + + av_free(vctx->filename); + av_free(vctx); + + printf("creat the read thread, ret: %d.\n", ret); + + return 0; +} + +int main(int argc, char **argv) +{ + int ret; + const char *filename; + int log_level = 0; + + if (argc < 2) { + fprintf(stderr, "Usage: %s <input file>\n ==> %s/frame-123\n", argv[0], DUMP_DIR); + exit(0); + } + + filename = argv[1]; + if (argv[2]) { + if (!strcmp(argv[2], "dump")) + dump = 1; + else + log_level = atoi(argv[2]); + } + + mkdir(DUMP_DIR, 0664); + + /*set debug level*/ + av_log_set_level(log_level); //AV_LOG_DEBUG + + /* register all the codecs */ + avcodec_register_all(); + + ret = open_input_file(filename); + if (ret < 0) + fprintf(stderr, "open input file fail.\n"); + + return 0; +}
diff --git a/drivers/amvdec_ports/utils/common.c b/drivers/amvdec_ports/utils/common.c new file mode 100644 index 0000000..67cf93b --- /dev/null +++ b/drivers/amvdec_ports/utils/common.c
@@ -0,0 +1,232 @@ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/string.h> + +#include "common.h" +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +#include "pixfmt.h" +#endif + +const u8 ff_zigzag_direct[64] = { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63 +}; + +const u8 ff_zigzag_scan[16 + 1] = { + 0 + 0 * 4, 1 + 0 * 4, 0 + 1 * 4, 0 + 2 * 4, + 1 + 1 * 4, 2 + 0 * 4, 3 + 0 * 4, 2 + 1 * 4, + 1 + 2 * 4, 0 + 3 * 4, 1 + 3 * 4, 2 + 2 * 4, + 3 + 1 * 4, 3 + 2 * 4, 2 + 3 * 4, 3 + 3 * 4, +}; + +const char * const color_space_names[] = { + [AVCOL_SPC_RGB] = "gbr", + [AVCOL_SPC_BT709] = "bt709", + [AVCOL_SPC_UNSPECIFIED] = "unknown", + [AVCOL_SPC_RESERVED] = "reserved", + [AVCOL_SPC_FCC] = "fcc", + [AVCOL_SPC_BT470BG] = "bt470bg", + [AVCOL_SPC_SMPTE170M] = "smpte170m", + [AVCOL_SPC_SMPTE240M] = "smpte240m", + [AVCOL_SPC_YCGCO] = "ycgco", + [AVCOL_SPC_BT2020_NCL] = "bt2020nc", + [AVCOL_SPC_BT2020_CL] = "bt2020c", + [AVCOL_SPC_SMPTE2085] = "smpte2085", + [AVCOL_SPC_CHROMA_DERIVED_NCL] = "chroma-derived-nc", + [AVCOL_SPC_CHROMA_DERIVED_CL] = "chroma-derived-c", + [AVCOL_SPC_ICTCP] = "ictcp", +}; + +const char *av_color_space_name(enum AVColorSpace space) +{ + return (unsigned) space < AVCOL_SPC_NB ? + color_space_names[space] : NULL; +} + +const char * const color_primaries_names[AVCOL_PRI_NB] = { + [AVCOL_PRI_RESERVED0] = "reserved", + [AVCOL_PRI_BT709] = "bt709", + [AVCOL_PRI_UNSPECIFIED] = "unknown", + [AVCOL_PRI_RESERVED] = "reserved", + [AVCOL_PRI_BT470M] = "bt470m", + [AVCOL_PRI_BT470BG] = "bt470bg", + [AVCOL_PRI_SMPTE170M] = "smpte170m", + [AVCOL_PRI_SMPTE240M] = "smpte240m", + [AVCOL_PRI_FILM] = "film", + [AVCOL_PRI_BT2020] = "bt2020", + [AVCOL_PRI_SMPTE428] = "smpte428", + [AVCOL_PRI_SMPTE431] = "smpte431", + [AVCOL_PRI_SMPTE432] = "smpte432", + [AVCOL_PRI_JEDEC_P22] = "jedec-p22", +}; + +const char *av_color_primaries_name(enum AVColorPrimaries primaries) +{ + return (unsigned) primaries < AVCOL_PRI_NB ? + color_primaries_names[primaries] : NULL; +} + +const char * const color_transfer_names[] = { + [AVCOL_TRC_RESERVED0] = "reserved", + [AVCOL_TRC_BT709] = "bt709", + [AVCOL_TRC_UNSPECIFIED] = "unknown", + [AVCOL_TRC_RESERVED] = "reserved", + [AVCOL_TRC_GAMMA22] = "bt470m", + [AVCOL_TRC_GAMMA28] = "bt470bg", + [AVCOL_TRC_SMPTE170M] = "smpte170m", + [AVCOL_TRC_SMPTE240M] = "smpte240m", + [AVCOL_TRC_LINEAR] = "linear", + [AVCOL_TRC_LOG] = "log100", + [AVCOL_TRC_LOG_SQRT] = "log316", + [AVCOL_TRC_IEC61966_2_4] = "iec61966-2-4", + [AVCOL_TRC_BT1361_ECG] = "bt1361e", + [AVCOL_TRC_IEC61966_2_1] = "iec61966-2-1", + [AVCOL_TRC_BT2020_10] = "bt2020-10", + [AVCOL_TRC_BT2020_12] = "bt2020-12", + [AVCOL_TRC_SMPTE2084] = "smpte2084", + [AVCOL_TRC_SMPTE428] = "smpte428", + [AVCOL_TRC_ARIB_STD_B67] = "arib-std-b67", +}; + +const char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer) +{ + return (unsigned) transfer < AVCOL_TRC_NB ? + color_transfer_names[transfer] : NULL; +} + +//math +const u8 ff_log2_tab[256]={ + 0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 +}; + +int av_log2(u32 v) +{ + int n = 0; + + if (v & 0xffff0000) { + v >>= 16; + n += 16; + } + if (v & 0xff00) { + v >>= 8; + n += 8; + } + n += ff_log2_tab[v]; + + return n; +} + +//bitstream +int find_start_code(u8 *data, int data_sz) +{ + if (data_sz > 3 && data[0] == 0 && data[1] == 0 && data[2] == 1) + return 3; + + if (data_sz > 4 && data[0] == 0 && data[1] == 0 && data[2] == 0 && data[3] == 1) + return 4; + + return -1; +} + +int calc_nal_len(u8 *data, int len) +{ + int i; + + for (i = 0; i < len - 4; i++) { + if (data[i]) + continue; + + if ((data[i] == 0 && data[i + 1] == 0 && data[i + 2] == 1) || + (data[i] == 0 && data[i + 1] == 0 && + data[i + 2]==0 && data[i + 3] == 1)) + return i; + } + return len; //Not find the end of nalu +} + +u8 *nal_unit_extract_rbsp(const u8 *src, u32 src_len, u32 *dst_len) +{ + u8 *dst; + u32 i, len; + + dst = vmalloc(src_len + AV_INPUT_BUFFER_PADDING_SIZE); + if (!dst) + return NULL; + + /* NAL unit header (2 bytes) */ + i = len = 0; + while (i < 2 && i < src_len) + dst[len++] = src[i++]; + + while (i + 2 < src_len) + if (!src[i] && !src[i + 1] && src[i + 2] == 3) { + dst[len++] = src[i++]; + dst[len++] = src[i++]; + i++; // remove emulation_prevention_three_byte + } else + dst[len++] = src[i++]; + + while (i < src_len) + dst[len++] = src[i++]; + + memset(dst + len, 0, AV_INPUT_BUFFER_PADDING_SIZE); + + *dst_len = len; + + return dst; +} + +//debug +static void _pr_hex(const char *fmt, ...) +{ + u8 buf[512]; + int len = 0; + + va_list args; + va_start(args, fmt); + vsnprintf(buf + len, 512 - len, fmt, args); + printk("%s", buf); + va_end(args); +} + +void print_hex_debug(u8 *data, u32 len, int max) +{ + int i, l; + + l = len > max ? max : len; + + for (i = 0; i < l; i++) { + if ((i & 0xf) == 0) + _pr_hex("%06x:", i); + _pr_hex("%02x ", data[i]); + if ((((i + 1) & 0xf) == 0) || ((i + 1) == l)) + _pr_hex("\n"); + } + + _pr_hex("print hex ending. len %d\n\n", l); +} + +bool is_over_size(int w, int h, int size) +{ + if (h != 0 && (w > size / h)) + return true; + + return false; +} + +
diff --git a/drivers/amvdec_ports/utils/common.h b/drivers/amvdec_ports/utils/common.h new file mode 100644 index 0000000..89ae50b --- /dev/null +++ b/drivers/amvdec_ports/utils/common.h
@@ -0,0 +1,155 @@ +#ifndef UTILS_COMMON_H +#define UTILS_COMMON_H + +#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +#include "pixfmt.h" +#endif + +#define AV_INPUT_BUFFER_PADDING_SIZE 64 +#define MIN_CACHE_BITS 64 + +#define FFMAX(a,b) ((a) > (b) ? (a) : (b)) +#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c) +#define FFMIN(a,b) ((a) > (b) ? (b) : (a)) +#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c) + +#define AV_WL32(p, val) \ + do { \ + u32 d = (val); \ + ((u8*)(p))[0] = (d); \ + ((u8*)(p))[1] = (d) >> 8; \ + ((u8*)(p))[2] = (d) >> 16; \ + ((u8*)(p))[3] = (d) >> 24; \ + } while(0) + +#define AV_WB32(p, val) \ + do { u32 d = (val); \ + ((u8*)(p))[3] = (d); \ + ((u8*)(p))[2] = (d) >> 8; \ + ((u8*)(p))[1] = (d) >> 16; \ + ((u8*)(p))[0] = (d) >> 24; \ + } while(0) + +#define AV_RB32(x) \ + (((u32)((const u8*)(x))[0] << 24) | \ + (((const u8*)(x))[1] << 16) | \ + (((const u8*)(x))[2] << 8) | \ + ((const u8*)(x))[3]) + +#define AV_RL32(x) \ + (((u32)((const u8*)(x))[3] << 24) | \ + (((const u8*)(x))[2] << 16) | \ + (((const u8*)(x))[1] << 8) | \ + ((const u8*)(x))[0]) + +#define NEG_SSR32(a, s) (((int)(a)) >> ((s < 32) ? (32 - (s)) : 0)) +#define NEG_USR32(a, s) (((u32)(a)) >> ((s < 32) ? (32 - (s)) : 0)) + +//rounded division & shift +#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b)) +/* assume b>0 */ +#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) + +struct AVRational{ + int num; ///< numerator + int den; ///< denominator +}; + +#ifndef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER +/** + * YUV colorspace type. + * These values match the ones defined by ISO/IEC 23001-8_2013 ¡ì 7.3. + */ +enum AVColorSpace { + AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB) + AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B + AVCOL_SPC_UNSPECIFIED = 2, + AVCOL_SPC_RESERVED = 3, + AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 + AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_SPC_SMPTE240M = 7, ///< functionally identical to above + AVCOL_SPC_YCGCO = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO, + AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system + AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system + AVCOL_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x + AVCOL_SPC_CHROMA_DERIVED_NCL = 12, ///< Chromaticity-derived non-constant luminance system + AVCOL_SPC_CHROMA_DERIVED_CL = 13, ///< Chromaticity-derived constant luminance system + AVCOL_SPC_ICTCP = 14, ///< ITU-R BT.2100-0, ICtCp + AVCOL_SPC_NB ///< Not part of ABI +}; + +/** + * Chromaticity coordinates of the source primaries. + * These values match the ones defined by ISO/IEC 23001-8_2013 ¡ì 7.1. + */ +enum AVColorPrimaries { + AVCOL_PRI_RESERVED0 = 0, + AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B + AVCOL_PRI_UNSPECIFIED = 2, + AVCOL_PRI_RESERVED = 3, + AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + + AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM + AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above + AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C + AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020 + AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ) + AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428, + AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3 + AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3 + AVCOL_PRI_JEDEC_P22 = 22, ///< JEDEC P22 phosphors + AVCOL_PRI_NB ///< Not part of ABI +}; + +/** + * Color Transfer Characteristic. + * These values match the ones defined by ISO/IEC 23001-8_2013 ¡ì 7.2. + */ +enum AVColorTransferCharacteristic { + AVCOL_TRC_RESERVED0 = 0, + AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 + AVCOL_TRC_UNSPECIFIED = 2, + AVCOL_TRC_RESERVED = 3, + AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM + AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG + AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC + AVCOL_TRC_SMPTE240M = 7, + AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics" + AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)" + AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)" + AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4 + AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut + AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC) + AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system + AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system + AVCOL_TRC_SMPTE2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems + AVCOL_TRC_SMPTEST2084 = AVCOL_TRC_SMPTE2084, + AVCOL_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1 + AVCOL_TRC_SMPTEST428_1 = AVCOL_TRC_SMPTE428, + AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma" + AVCOL_TRC_NB ///< Not part of ABI +}; +#endif + +//fmt +const char *av_color_space_name(enum AVColorSpace space); +const char *av_color_primaries_name(enum AVColorPrimaries primaries); +const char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer); + +//math +int av_log2(u32 v); + +//bitstream +int find_start_code(u8 *data, int data_sz); +int calc_nal_len(u8 *data, int len); +u8 *nal_unit_extract_rbsp(const u8 *src, u32 src_len, u32 *dst_len); + +//debug +void print_hex_debug(u8 *data, u32 len, int max); + +bool is_over_size(int w, int h, int size); + +#endif \ No newline at end of file
diff --git a/drivers/amvdec_ports/utils/get_bits.h b/drivers/amvdec_ports/utils/get_bits.h new file mode 100644 index 0000000..bb98ebd --- /dev/null +++ b/drivers/amvdec_ports/utils/get_bits.h
@@ -0,0 +1,590 @@ +#ifndef AVCODEC_GET_BITS_H +#define AVCODEC_GET_BITS_H + +#include <linux/kernel.h> +#include <linux/types.h> +#include "common.h" + +/* + * Safe bitstream reading: + * optionally, the get_bits API can check to ensure that we + * don't read past input buffer boundaries. This is protected + * with CONFIG_SAFE_BITSTREAM_READER at the global level, and + * then below that with UNCHECKED_BITSTREAM_READER at the per- + * decoder level. This means that decoders that check internally + * can "#define UNCHECKED_BITSTREAM_READER 1" to disable + * overread checks. + * Boundary checking causes a minor performance penalty so for + * applications that won't want/need this, it can be disabled + * globally using "#define CONFIG_SAFE_BITSTREAM_READER 0". + */ + +struct get_bits_context { + const u8 *buffer; + const u8 *buffer_end; + int index; + int size_in_bits; + int size_in_bits_plus8; +}; + +/* Bitstream reader API docs: + * name + * arbitrary name which is used as prefix for the internal variables + * + * gb + * struct get_bits_context + * + * OPEN_READER(name, gb) + * load gb into local variables + * + * CLOSE_READER(name, gb) + * store local vars in gb + * + * UPDATE_CACHE(name, gb) + * Refill the internal cache from the bitstream. + * After this call at least MIN_CACHE_BITS will be available. + * + * GET_CACHE(name, gb) + * Will output the contents of the internal cache, + * next bit is MSB of 32 or 64 bits (FIXME 64 bits). + * + * SHOW_UBITS(name, gb, num) + * Will return the next num bits. + * + * SHOW_SBITS(name, gb, num) + * Will return the next num bits and do sign extension. + * + * SKIP_BITS(name, gb, num) + * Will skip over the next num bits. + * Note, this is equivalent to SKIP_CACHE; SKIP_COUNTER. + * + * SKIP_CACHE(name, gb, num) + * Will remove the next num bits from the cache (note SKIP_COUNTER + * MUST be called before UPDATE_CACHE / CLOSE_READER). + * + * SKIP_COUNTER(name, gb, num) + * Will increment the internal bit counter (see SKIP_CACHE & SKIP_BITS). + * + * LAST_SKIP_BITS(name, gb, num) + * Like SKIP_BITS, to be used if next call is UPDATE_CACHE or CLOSE_READER. + * + * BITS_LEFT(name, gb) + * Return the number of bits left + * + * For examples see get_bits, show_bits, skip_bits, get_vlc. + */ + +#define OPEN_READER_NOSIZE(name, gb) \ + u32 name ## _index = (gb)->index; \ + u32 name ## _cache + +#define OPEN_READER(name, gb) OPEN_READER_NOSIZE(name, gb) +#define BITS_AVAILABLE(name, gb) 1 + +#define CLOSE_READER(name, gb) (gb)->index = name ## _index + +#define UPDATE_CACHE_LE(name, gb) name ##_cache = \ + AV_RL32((gb)->buffer + (name ## _index >> 3)) >> (name ## _index & 7) + +#define UPDATE_CACHE_BE(name, gb) name ## _cache = \ + AV_RB32((gb)->buffer + (name ## _index >> 3)) << (name ## _index & 7) + +#define SKIP_COUNTER(name, gb, num) name ## _index += (num) + +#define BITS_LEFT(name, gb) ((int)((gb)->size_in_bits - name ## _index)) + +#define SKIP_BITS(name, gb, num) \ + do { \ + SKIP_CACHE(name, gb, num); \ + SKIP_COUNTER(name, gb, num); \ + } while (0) + +#define GET_CACHE(name, gb) ((u32) name ## _cache) + +#define LAST_SKIP_BITS(name, gb, num) SKIP_COUNTER(name, gb, num) + +#define SHOW_UBITS_LE(name, gb, num) zero_extend(name ## _cache, num) +#define SHOW_SBITS_LE(name, gb, num) sign_extend(name ## _cache, num) + +#define SHOW_UBITS_BE(name, gb, num) NEG_USR32(name ## _cache, num) +#define SHOW_SBITS_BE(name, gb, num) NEG_SSR32(name ## _cache, num) + +#ifdef BITSTREAM_READER_LE +#define UPDATE_CACHE(name, gb) UPDATE_CACHE_LE(name, gb) +#define SKIP_CACHE(name, gb, num) name ## _cache >>= (num) + +#define SHOW_UBITS(name, gb, num) SHOW_UBITS_LE(name, gb, num) +#define SHOW_SBITS(name, gb, num) SHOW_SBITS_LE(name, gb, num) +#else +#define UPDATE_CACHE(name, gb) UPDATE_CACHE_BE(name, gb) +#define SKIP_CACHE(name, gb, num) name ## _cache <<= (num) + +#define SHOW_UBITS(name, gb, num) SHOW_UBITS_BE(name, gb, num) +#define SHOW_SBITS(name, gb, num) SHOW_SBITS_BE(name, gb, num) +#endif + +static inline const int sign_extend(int val, u32 bits) +{ + u32 shift = 8 * sizeof(int) - bits; + + union { u32 u; int s; } v = { (u32) val << shift }; + return v.s >> shift; +} + +static inline u32 zero_extend(u32 val, u32 bits) +{ + return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits); +} + +static inline int get_bits_count(const struct get_bits_context *s) +{ + return s->index; +} + +/** + * Skips the specified number of bits. + * @param n the number of bits to skip, + * For the UNCHECKED_BITSTREAM_READER this must not cause the distance + * from the start to overflow int. Staying within the bitstream + padding + * is sufficient, too. + */ +static inline void skip_bits_long(struct get_bits_context *s, int n) +{ + s->index += n; +} + +/** + * Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB). + * if MSB not set it is negative + * @param n length in bits + */ +static inline int get_xbits(struct get_bits_context *s, int n) +{ + register int sign; + register int cache; + + OPEN_READER(re, s); + UPDATE_CACHE(re, s); + cache = GET_CACHE(re, s); + sign = ~cache >> 31; + LAST_SKIP_BITS(re, s, n); + CLOSE_READER(re, s); + + return (NEG_USR32(sign ^ cache, n) ^ sign) - sign; +} + + +static inline int get_xbits_le(struct get_bits_context *s, int n) +{ + register int sign; + register int cache; + + OPEN_READER(re, s); + UPDATE_CACHE_LE(re, s); + cache = GET_CACHE(re, s); + sign = sign_extend(~cache, n) >> 31; + LAST_SKIP_BITS(re, s, n); + CLOSE_READER(re, s); + + return (zero_extend(sign ^ cache, n) ^ sign) - sign; +} + +static inline int get_sbits(struct get_bits_context *s, int n) +{ + register int tmp; + + OPEN_READER(re, s); + UPDATE_CACHE(re, s); + tmp = SHOW_SBITS(re, s, n); + LAST_SKIP_BITS(re, s, n); + CLOSE_READER(re, s); + + return tmp; +} + +/** + * Read 1-25 bits. + */ +static inline u32 get_bits(struct get_bits_context *s, int n) +{ + register u32 tmp; + + OPEN_READER(re, s); + UPDATE_CACHE(re, s); + tmp = SHOW_UBITS(re, s, n); + LAST_SKIP_BITS(re, s, n); + CLOSE_READER(re, s); + + return tmp; +} + +/** + * Read 0-25 bits. + */ +static inline int get_bitsz(struct get_bits_context *s, int n) +{ + return n ? get_bits(s, n) : 0; +} + +static inline u32 get_bits_le(struct get_bits_context *s, int n) +{ + register int tmp; + + OPEN_READER(re, s); + UPDATE_CACHE_LE(re, s); + tmp = SHOW_UBITS_LE(re, s, n); + LAST_SKIP_BITS(re, s, n); + CLOSE_READER(re, s); + + return tmp; +} + +/** + * Show 1-25 bits. + */ +static inline u32 show_bits(struct get_bits_context *s, int n) +{ + register u32 tmp; + + OPEN_READER_NOSIZE(re, s); + UPDATE_CACHE(re, s); + tmp = SHOW_UBITS(re, s, n); + + return tmp; +} + +static inline void skip_bits(struct get_bits_context *s, int n) +{ + u32 re_index = s->index; + LAST_SKIP_BITS(re, s, n); + CLOSE_READER(re, s); +} + +static inline u32 get_bits1(struct get_bits_context *s) +{ + u32 index = s->index; + u8 result = s->buffer[index >> 3]; + +#ifdef BITSTREAM_READER_LE + result >>= index & 7; + result &= 1; +#else + result <<= index & 7; + result >>= 8 - 1; +#endif + + index++; + s->index = index; + + return result; +} + +static inline u32 show_bits1(struct get_bits_context *s) +{ + return show_bits(s, 1); +} + +static inline void skip_bits1(struct get_bits_context *s) +{ + skip_bits(s, 1); +} + +/** + * Read 0-32 bits. + */ +static inline u32 get_bits_long(struct get_bits_context *s, int n) +{ + if (!n) { + return 0; + } else if (n <= MIN_CACHE_BITS) { + return get_bits(s, n); + } else { +#ifdef BITSTREAM_READER_LE + u32 ret = get_bits(s, 16); + return ret | (get_bits(s, n - 16) << 16); +#else + u32 ret = get_bits(s, 16) << (n - 16); + return ret | get_bits(s, n - 16); +#endif + } +} + +/** + * Read 0-64 bits. + */ +static inline u64 get_bits64(struct get_bits_context *s, int n) +{ + if (n <= 32) { + return get_bits_long(s, n); + } else { +#ifdef BITSTREAM_READER_LE + u64 ret = get_bits_long(s, 32); + return ret | (u64) get_bits_long(s, n - 32) << 32; +#else + u64 ret = (u64) get_bits_long(s, n - 32) << 32; + return ret | get_bits_long(s, 32); +#endif + } +} + +/** + * Read 0-32 bits as a signed integer. + */ +static inline int get_sbits_long(struct get_bits_context *s, int n) +{ + if (!n) + return 0; + + return sign_extend(get_bits_long(s, n), n); +} + +/** + * Show 0-32 bits. + */ +static inline u32 show_bits_long(struct get_bits_context *s, int n) +{ + if (n <= MIN_CACHE_BITS) { + return show_bits(s, n); + } else { + struct get_bits_context gb = *s; + + return get_bits_long(&gb, n); + } +} + +static inline int check_marker(struct get_bits_context *s, const char *msg) +{ + int bit = get_bits1(s); + + if (!bit) + pr_err("Marker bit missing at %d of %d %s\n", + get_bits_count(s) - 1, s->size_in_bits, msg); + return bit; +} + +static inline int init_get_bits_xe(struct get_bits_context *s, + const u8 *buffer, int bit_size, int is_le) +{ + int buffer_size; + int ret = 0; + + if (bit_size >= INT_MAX - FFMAX(7, AV_INPUT_BUFFER_PADDING_SIZE * 8) || + bit_size < 0 || !buffer) { + bit_size = 0; + buffer = NULL; + ret = -1; + } + + buffer_size = (bit_size + 7) >> 3; + + s->buffer = buffer; + s->size_in_bits = bit_size; + s->size_in_bits_plus8 = bit_size + 8; + s->buffer_end = buffer + buffer_size; + s->index = 0; + + return ret; +} + +/** + * Initialize struct get_bits_context. + * @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes + * larger than the actual read bits because some optimized bitstream + * readers read 32 or 64 bit at once and could read over the end + * @param bit_size the size of the buffer in bits + * @return 0 on success, -1 if the buffer_size would overflow. + */ +static inline int init_get_bits(struct get_bits_context *s, + const u8 *buffer, int bit_size) +{ +#ifdef BITSTREAM_READER_LE + return init_get_bits_xe(s, buffer, bit_size, 1); +#else + return init_get_bits_xe(s, buffer, bit_size, 0); +#endif +} + +/** + * Initialize struct get_bits_context. + * @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes + * larger than the actual read bits because some optimized bitstream + * readers read 32 or 64 bit at once and could read over the end + * @param byte_size the size of the buffer in bytes + * @return 0 on success, -1 if the buffer_size would overflow. + */ +static inline int init_get_bits8(struct get_bits_context *s, + const u8 *buffer, int byte_size) +{ + if (byte_size > INT_MAX / 8 || byte_size < 0) + byte_size = -1; + return init_get_bits(s, buffer, byte_size * 8); +} + +static inline int init_get_bits8_le(struct get_bits_context *s, + const u8 *buffer, int byte_size) +{ + if (byte_size > INT_MAX / 8 || byte_size < 0) + byte_size = -1; + return init_get_bits_xe(s, buffer, byte_size * 8, 1); +} + +static inline const u8 *align_get_bits(struct get_bits_context *s) +{ + int n = -get_bits_count(s) & 7; + + if (n) + skip_bits(s, n); + return s->buffer + (s->index >> 3); +} + +/** + * If the vlc code is invalid and max_depth=1, then no bits will be removed. + * If the vlc code is invalid and max_depth>1, then the number of bits removed + * is undefined. + */ +#define GET_VLC(code, name, gb, table, bits, max_depth) \ + do { \ + int n, nb_bits; \ + u32 index; \ + \ + index = SHOW_UBITS(name, gb, bits); \ + code = table[index][0]; \ + n = table[index][1]; \ + \ + if (max_depth > 1 && n < 0) { \ + LAST_SKIP_BITS(name, gb, bits); \ + UPDATE_CACHE(name, gb); \ + \ + nb_bits = -n; \ + \ + index = SHOW_UBITS(name, gb, nb_bits) + code; \ + code = table[index][0]; \ + n = table[index][1]; \ + if (max_depth > 2 && n < 0) { \ + LAST_SKIP_BITS(name, gb, nb_bits); \ + UPDATE_CACHE(name, gb); \ + \ + nb_bits = -n; \ + \ + index = SHOW_UBITS(name, gb, nb_bits) + code; \ + code = table[index][0]; \ + n = table[index][1]; \ + } \ + } \ + SKIP_BITS(name, gb, n); \ + } while (0) + +#define GET_RL_VLC(level, run, name, gb, table, bits, \ + max_depth, need_update) \ + do { \ + int n, nb_bits; \ + u32 index; \ + \ + index = SHOW_UBITS(name, gb, bits); \ + level = table[index].level; \ + n = table[index].len; \ + \ + if (max_depth > 1 && n < 0) { \ + SKIP_BITS(name, gb, bits); \ + if (need_update) { \ + UPDATE_CACHE(name, gb); \ + } \ + \ + nb_bits = -n; \ + \ + index = SHOW_UBITS(name, gb, nb_bits) + level; \ + level = table[index].level; \ + n = table[index].len; \ + if (max_depth > 2 && n < 0) { \ + LAST_SKIP_BITS(name, gb, nb_bits); \ + if (need_update) { \ + UPDATE_CACHE(name, gb); \ + } \ + nb_bits = -n; \ + \ + index = SHOW_UBITS(name, gb, nb_bits) + level; \ + level = table[index].level; \ + n = table[index].len; \ + } \ + } \ + run = table[index].run; \ + SKIP_BITS(name, gb, n); \ + } while (0) + +/* Return the LUT element for the given bitstream configuration. */ +static inline int set_idx(struct get_bits_context *s, + int code, int *n, int *nb_bits, int (*table)[2]) +{ + u32 idx; + + *nb_bits = -*n; + idx = show_bits(s, *nb_bits) + code; + *n = table[idx][1]; + + return table[idx][0]; +} + +/** + * Parse a vlc code. + * @param bits is the number of bits which will be read at once, must be + * identical to nb_bits in init_vlc() + * @param max_depth is the number of times bits bits must be read to completely + * read the longest vlc code + * = (max_vlc_length + bits - 1) / bits + * @returns the code parsed or -1 if no vlc matches + */ +static inline int get_vlc2(struct get_bits_context *s, + int (*table)[2], int bits, int max_depth) +{ + int code; + + OPEN_READER(re, s); + UPDATE_CACHE(re, s); + + GET_VLC(code, re, s, table, bits, max_depth); + + CLOSE_READER(re, s); + + return code; +} + +static inline int decode012(struct get_bits_context *gb) +{ + int n; + + n = get_bits1(gb); + if (n == 0) + return 0; + else + return get_bits1(gb) + 1; +} + +static inline int decode210(struct get_bits_context *gb) +{ + if (get_bits1(gb)) + return 0; + else + return 2 - get_bits1(gb); +} + +static inline int get_bits_left(struct get_bits_context *gb) +{ + return gb->size_in_bits - get_bits_count(gb); +} + +static inline int skip_1stop_8data_bits(struct get_bits_context *gb) +{ + if (get_bits_left(gb) <= 0) + return -1; + + while (get_bits1(gb)) { + skip_bits(gb, 8); + if (get_bits_left(gb) <= 0) + return -1; + } + + return 0; +} + +#endif /* AVCODEC_GET_BITS_H */ +
diff --git a/drivers/amvdec_ports/utils/golomb.c b/drivers/amvdec_ports/utils/golomb.c new file mode 100644 index 0000000..21fcb6a --- /dev/null +++ b/drivers/amvdec_ports/utils/golomb.c
@@ -0,0 +1,147 @@ +#include <linux/kernel.h> +#include <linux/types.h> + +const u8 ff_golomb_vlc_len[512]={ + 19,17,15,15,13,13,13,13,11,11,11,11,11,11,11,11,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, + 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, + 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, + 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 +}; + +const u8 ff_ue_golomb_vlc_code[512]={ + 32,32,32,32,32,32,32,32,31,32,32,32,32,32,32,32,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30, + 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9,10,10,10,10,11,11,11,11,12,12,12,12,13,13,13,13,14,14,14,14, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + +const char ff_se_golomb_vlc_code[512]={ + 17, 17, 17, 17, 17, 17, 17, 17, 16, 17, 17, 17, 17, 17, 17, 17, 8, -8, 9, -9, 10,-10, 11,-11, 12,-12, 13,-13, 14,-14, 15,-15, + 4, 4, 4, 4, -4, -4, -4, -4, 5, 5, 5, 5, -5, -5, -5, -5, 6, 6, 6, 6, -6, -6, -6, -6, 7, 7, 7, 7, -7, -7, -7, -7, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +const u8 ff_ue_golomb_len[256]={ + 1, 3, 3, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,11, + 11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,13, + 13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13, + 13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,15, + 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15, + 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15, + 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15, + 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,17, +}; + +const u8 ff_interleaved_golomb_vlc_len[256]={ + 9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5, + 9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5, + 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, + 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, + 9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5, + 9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5, + 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, + 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, +}; + +const u8 ff_interleaved_ue_golomb_vlc_code[256]={ + 15,16,7, 7, 17,18,8, 8, 3, 3, 3, 3, 3, 3, 3, 3, + 19,20,9, 9, 21,22,10,10,4, 4, 4, 4, 4, 4, 4, 4, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 23,24,11,11,25,26,12,12,5, 5, 5, 5, 5, 5, 5, 5, + 27,28,13,13,29,30,14,14,6, 6, 6, 6, 6, 6, 6, 6, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +const char ff_interleaved_se_golomb_vlc_code[256]={ + 8, -8, 4, 4, 9, -9, -4, -4, 2, 2, 2, 2, 2, 2, 2, 2, + 10,-10, 5, 5, 11,-11, -5, -5, -2, -2, -2, -2, -2, -2, -2, -2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 12,-12, 6, 6, 13,-13, -6, -6, 3, 3, 3, 3, 3, 3, 3, 3, + 14,-14, 7, 7, 15,-15, -7, -7, -3, -3, -3, -3, -3, -3, -3, -3, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +const u8 ff_interleaved_dirac_golomb_vlc_code[256]={ + 0, 1, 0, 0, 2, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 4, 5, 2, 2, 6, 7, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 8, 9, 4, 4, 10,11,5, 5, 2, 2, 2, 2, 2, 2, 2, 2, + 12,13,6, 6, 14,15,7, 7, 3, 3, 3, 3, 3, 3, 3, 3, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; +
diff --git a/drivers/amvdec_ports/utils/golomb.h b/drivers/amvdec_ports/utils/golomb.h new file mode 100644 index 0000000..d66c182 --- /dev/null +++ b/drivers/amvdec_ports/utils/golomb.h
@@ -0,0 +1,500 @@ +#ifndef AVCODEC_GOLOMB_H +#define AVCODEC_GOLOMB_H + +#include <linux/kernel.h> +#include <linux/types.h> + +#include "get_bits.h" +#include "put_bits.h" +#include "common.h" + +#define INVALID_VLC 0x80000000 + +extern const u8 ff_golomb_vlc_len[512]; +extern const u8 ff_ue_golomb_vlc_code[512]; +extern const char ff_se_golomb_vlc_code[512]; +extern const u8 ff_ue_golomb_len[256]; + +extern const u8 ff_interleaved_golomb_vlc_len[256]; +extern const u8 ff_interleaved_ue_golomb_vlc_code[256]; +extern const char ff_interleaved_se_golomb_vlc_code[256]; +extern const u8 ff_interleaved_dirac_golomb_vlc_code[256]; + +/** + * Read an u32 Exp-Golomb code in the range 0 to 8190. + * + * @returns the read value or a negative error code. + */ +static inline int get_ue_golomb(struct get_bits_context *gb) +{ + u32 buf; + + OPEN_READER(re, gb); + UPDATE_CACHE(re, gb); + buf = GET_CACHE(re, gb); + + if (buf >= (1 << 27)) { + buf >>= 32 - 9; + LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]); + CLOSE_READER(re, gb); + + return ff_ue_golomb_vlc_code[buf]; + } else { + int log = 2 * av_log2(buf) - 31; + LAST_SKIP_BITS(re, gb, 32 - log); + CLOSE_READER(re, gb); + if (log < 7) { + pr_err("Invalid UE golomb code\n"); + return -1; + } + buf >>= log; + buf--; + + return buf; + } +} + +/** + * Read an u32 Exp-Golomb code in the range 0 to UINT_MAX-1. + */ +static inline u32 get_ue_golomb_long(struct get_bits_context *gb) +{ + u32 buf, log; + + buf = show_bits_long(gb, 32); + log = 31 - av_log2(buf); + skip_bits_long(gb, log); + + return get_bits_long(gb, log + 1) - 1; +} + +/** + * read u32 exp golomb code, constraint to a max of 31. + * the return value is undefined if the stored value exceeds 31. + */ +static inline int get_ue_golomb_31(struct get_bits_context *gb) +{ + u32 buf; + + OPEN_READER(re, gb); + UPDATE_CACHE(re, gb); + buf = GET_CACHE(re, gb); + + buf >>= 32 - 9; + LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]); + CLOSE_READER(re, gb); + + return ff_ue_golomb_vlc_code[buf]; +} + +static inline u32 get_interleaved_ue_golomb(struct get_bits_context *gb) +{ + u32 buf; + + OPEN_READER(re, gb); + UPDATE_CACHE(re, gb); + buf = GET_CACHE(re, gb); + + if (buf & 0xAA800000) { + buf >>= 32 - 8; + LAST_SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]); + CLOSE_READER(re, gb); + + return ff_interleaved_ue_golomb_vlc_code[buf]; + } else { + u32 ret = 1; + + do { + buf >>= 32 - 8; + LAST_SKIP_BITS(re, gb, + FFMIN(ff_interleaved_golomb_vlc_len[buf], 8)); + + if (ff_interleaved_golomb_vlc_len[buf] != 9) { + ret <<= (ff_interleaved_golomb_vlc_len[buf] - 1) >> 1; + ret |= ff_interleaved_dirac_golomb_vlc_code[buf]; + break; + } + ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf]; + UPDATE_CACHE(re, gb); + buf = GET_CACHE(re, gb); + } while (ret<0x8000000U && BITS_AVAILABLE(re, gb)); + + CLOSE_READER(re, gb); + return ret - 1; + } +} + +/** + * read u32 truncated exp golomb code. + */ +static inline int get_te0_golomb(struct get_bits_context *gb, int range) +{ + if (range == 1) + return 0; + else if (range == 2) + return get_bits1(gb) ^ 1; + else + return get_ue_golomb(gb); +} + +/** + * read u32 truncated exp golomb code. + */ +static inline int get_te_golomb(struct get_bits_context *gb, int range) +{ + if (range == 2) + return get_bits1(gb) ^ 1; + else + return get_ue_golomb(gb); +} + +/** + * read signed exp golomb code. + */ +static inline int get_se_golomb(struct get_bits_context *gb) +{ + u32 buf; + + OPEN_READER(re, gb); + UPDATE_CACHE(re, gb); + buf = GET_CACHE(re, gb); + + if (buf >= (1 << 27)) { + buf >>= 32 - 9; + LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]); + CLOSE_READER(re, gb); + + return ff_se_golomb_vlc_code[buf]; + } else { + int log = av_log2(buf), sign; + LAST_SKIP_BITS(re, gb, 31 - log); + UPDATE_CACHE(re, gb); + buf = GET_CACHE(re, gb); + + buf >>= log; + + LAST_SKIP_BITS(re, gb, 32 - log); + CLOSE_READER(re, gb); + + sign = -(buf & 1); + buf = ((buf >> 1) ^ sign) - sign; + + return buf; + } +} + +static inline int get_se_golomb_long(struct get_bits_context *gb) +{ + u32 buf = get_ue_golomb_long(gb); + int sign = (buf & 1) - 1; + + return ((buf >> 1) ^ sign) + 1; +} + +static inline int get_interleaved_se_golomb(struct get_bits_context *gb) +{ + u32 buf; + + OPEN_READER(re, gb); + UPDATE_CACHE(re, gb); + buf = GET_CACHE(re, gb); + + if (buf & 0xAA800000) { + buf >>= 32 - 8; + LAST_SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]); + CLOSE_READER(re, gb); + + return ff_interleaved_se_golomb_vlc_code[buf]; + } else { + int log; + LAST_SKIP_BITS(re, gb, 8); + UPDATE_CACHE(re, gb); + buf |= 1 | (GET_CACHE(re, gb) >> 8); + + if ((buf & 0xAAAAAAAA) == 0) + return INVALID_VLC; + + for (log = 31; (buf & 0x80000000) == 0; log--) + buf = (buf << 2) - ((buf << log) >> (log - 1)) + (buf >> 30); + + LAST_SKIP_BITS(re, gb, 63 - 2 * log - 8); + CLOSE_READER(re, gb); + return (signed) (((((buf << log) >> log) - 1) ^ -(buf & 0x1)) + 1) >> 1; + } +} + +static inline int dirac_get_se_golomb(struct get_bits_context *gb) +{ + u32 ret = get_interleaved_ue_golomb(gb); + + if (ret) { + int sign = -get_bits1(gb); + ret = (ret ^ sign) - sign; + } + + return ret; +} + +/** + * read u32 golomb rice code (ffv1). + */ +static inline int get_ur_golomb(struct get_bits_context *gb, + int k, int limit, int esc_len) +{ + u32 buf; + int log; + + OPEN_READER(re, gb); + UPDATE_CACHE(re, gb); + buf = GET_CACHE(re, gb); + + log = av_log2(buf); + + if (log > 31 - limit) { + buf >>= log - k; + buf += (30U - log) << k; + LAST_SKIP_BITS(re, gb, 32 + k - log); + CLOSE_READER(re, gb); + + return buf; + } else { + LAST_SKIP_BITS(re, gb, limit); + UPDATE_CACHE(re, gb); + + buf = SHOW_UBITS(re, gb, esc_len); + + LAST_SKIP_BITS(re, gb, esc_len); + CLOSE_READER(re, gb); + + return buf + limit - 1; + } +} + +/** + * read u32 golomb rice code (jpegls). + */ +static inline int get_ur_golomb_jpegls(struct get_bits_context *gb, + int k, int limit, int esc_len) +{ + u32 buf; + int log; + + OPEN_READER(re, gb); + UPDATE_CACHE(re, gb); + buf = GET_CACHE(re, gb); + + log = av_log2(buf); + + if (log - k >= 32 - MIN_CACHE_BITS + (MIN_CACHE_BITS == 32) && + 32 - log < limit) { + buf >>= log - k; + buf += (30U - log) << k; + LAST_SKIP_BITS(re, gb, 32 + k - log); + CLOSE_READER(re, gb); + + return buf; + } else { + int i; + for (i = 0; i + MIN_CACHE_BITS <= limit && SHOW_UBITS(re, gb, MIN_CACHE_BITS) == 0; i += MIN_CACHE_BITS) { + if (gb->size_in_bits <= re_index) { + CLOSE_READER(re, gb); + return -1; + } + LAST_SKIP_BITS(re, gb, MIN_CACHE_BITS); + UPDATE_CACHE(re, gb); + } + for (; i < limit && SHOW_UBITS(re, gb, 1) == 0; i++) { + SKIP_BITS(re, gb, 1); + } + LAST_SKIP_BITS(re, gb, 1); + UPDATE_CACHE(re, gb); + + if (i < limit - 1) { + if (k) { + if (k > MIN_CACHE_BITS - 1) { + buf = SHOW_UBITS(re, gb, 16) << (k-16); + LAST_SKIP_BITS(re, gb, 16); + UPDATE_CACHE(re, gb); + buf |= SHOW_UBITS(re, gb, k-16); + LAST_SKIP_BITS(re, gb, k-16); + } else { + buf = SHOW_UBITS(re, gb, k); + LAST_SKIP_BITS(re, gb, k); + } + } else { + buf = 0; + } + buf += ((u32)i << k); + } else if (i == limit - 1) { + buf = SHOW_UBITS(re, gb, esc_len); + LAST_SKIP_BITS(re, gb, esc_len); + + buf ++; + } else { + buf = -1; + } + CLOSE_READER(re, gb); + return buf; + } +} + +/** + * read signed golomb rice code (ffv1). + */ +static inline int get_sr_golomb(struct get_bits_context *gb, + int k, int limit, int esc_len) +{ + u32 v = get_ur_golomb(gb, k, limit, esc_len); + + return (v >> 1) ^ -(v & 1); +} + +/** + * read signed golomb rice code (flac). + */ +static inline int get_sr_golomb_flac(struct get_bits_context *gb, + int k, int limit, int esc_len) +{ + u32 v = get_ur_golomb_jpegls(gb, k, limit, esc_len); + + return (v >> 1) ^ -(v & 1); +} + +/** + * read u32 golomb rice code (shorten). + */ +static inline u32 get_ur_golomb_shorten(struct get_bits_context *gb, int k) +{ + return get_ur_golomb_jpegls(gb, k, INT_MAX, 0); +} + +/** + * read signed golomb rice code (shorten). + */ +static inline int get_sr_golomb_shorten(struct get_bits_context *gb, int k) +{ + int uvar = get_ur_golomb_jpegls(gb, k + 1, INT_MAX, 0); + + return (uvar >> 1) ^ -(uvar & 1); +} + +/** + * write u32 exp golomb code. 2^16 - 2 at most + */ +static inline void set_ue_golomb(struct put_bits_context *pb, int i) +{ + if (i < 256) + put_bits(pb, ff_ue_golomb_len[i], i + 1); + else { + int e = av_log2(i + 1); + put_bits(pb, 2 * e + 1, i + 1); + } +} + +/** + * write u32 exp golomb code. 2^32-2 at most. + */ +static inline void set_ue_golomb_long(struct put_bits_context *pb, u32 i) +{ + if (i < 256) + put_bits(pb, ff_ue_golomb_len[i], i + 1); + else { + int e = av_log2(i + 1); + put_bits64(pb, 2 * e + 1, i + 1); + } +} + +/** + * write truncated u32 exp golomb code. + */ +static inline void set_te_golomb(struct put_bits_context *pb, int i, int range) +{ + if (range == 2) + put_bits(pb, 1, i ^ 1); + else + set_ue_golomb(pb, i); +} + +/** + * write signed exp golomb code. 16 bits at most. + */ +static inline void set_se_golomb(struct put_bits_context *pb, int i) +{ + i = 2 * i - 1; + + if (i < 0) + i ^= -1; //FIXME check if gcc does the right thing + set_ue_golomb(pb, i); +} + +/** + * write u32 golomb rice code (ffv1). + */ +static inline void set_ur_golomb(struct put_bits_context *pb, int i, int k, int limit, + int esc_len) +{ + int e; + + e = i >> k; + if (e < limit) + put_bits(pb, e + k + 1, (1 << k) + av_mod_uintp2(i, k)); + else + put_bits(pb, limit + esc_len, i - limit + 1); +} + +/** + * write u32 golomb rice code (jpegls). + */ +static inline void set_ur_golomb_jpegls(struct put_bits_context *pb, + int i, int k, int limit, int esc_len) +{ + int e; + + e = (i >> k) + 1; + if (e < limit) { + while (e > 31) { + put_bits(pb, 31, 0); + e -= 31; + } + put_bits(pb, e, 1); + if (k) + put_sbits(pb, k, i); + } else { + while (limit > 31) { + put_bits(pb, 31, 0); + limit -= 31; + } + put_bits(pb, limit, 1); + put_bits(pb, esc_len, i - 1); + } +} + +/** + * write signed golomb rice code (ffv1). + */ +static inline void set_sr_golomb(struct put_bits_context *pb, + int i, int k, int limit, int esc_len) +{ + int v; + + v = -2 * i - 1; + v ^= (v >> 31); + + set_ur_golomb(pb, v, k, limit, esc_len); +} + +/** + * write signed golomb rice code (flac). + */ +static inline void set_sr_golomb_flac(struct put_bits_context *pb, + int i, int k, int limit, int esc_len) +{ + int v; + + v = -2 * i - 1; + v ^= (v >> 31); + + set_ur_golomb_jpegls(pb, v, k, limit, esc_len); +} + +#endif /* AVCODEC_GOLOMB_H */
diff --git a/drivers/amvdec_ports/utils/pixfmt.h b/drivers/amvdec_ports/utils/pixfmt.h new file mode 100644 index 0000000..f13411e --- /dev/null +++ b/drivers/amvdec_ports/utils/pixfmt.h
@@ -0,0 +1,470 @@ +#ifndef AVUTIL_PIXFMT_H +#define AVUTIL_PIXFMT_H + +/** + * Pixel format. + * + * @note + * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA + * color is put together as: + * (A << 24) | (R << 16) | (G << 8) | B + * This is stored as BGRA on little-endian CPU architectures and ARGB on + * big-endian CPUs. + * + * @note + * If the resolution is not a multiple of the chroma subsampling factor + * then the chroma plane resolution must be rounded up. + * + * @par + * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized + * image data is stored in AVFrame.data[0]. The palette is transported in + * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is + * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is + * also endian-specific). Note also that the individual RGB32 palette + * components stored in AVFrame.data[1] should be in the range 0..255. + * This is important as many custom PAL8 video codecs that were designed + * to run on the IBM VGA graphics adapter use 6-bit palette components. + * + * @par + * For all the 8 bits per pixel formats, an RGB32 palette is in data[1] like + * for pal8. This palette is filled in automatically by the function + * allocating the picture. + */ +enum AVPixelFormat { + AV_PIX_FMT_NONE = -1, + AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... + AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... + AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + AV_PIX_FMT_GRAY8, ///< Y , 8bpp + AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_PAL8, ///< 8 bits with AV_PIX_FMT_RGB32 palette + AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range + AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range + AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range + AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped + + AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian + AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian + AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range + AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) + AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined + AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined + + AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined + AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined + +#ifdef FF_API_VAAPI + /** @name Deprecated pixel formats */ + /**@{*/ + AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a VASurfaceID + /**@}*/ + AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD, +#else + /** + * Hardware acceleration through VA-API, data[3] contains a + * VASurfaceID. + */ + AV_PIX_FMT_VAAPI, +#endif + + AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined + AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined + AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined + AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined + AV_PIX_FMT_YA8, ///< 8 bits gray, 8 bits alpha + + AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8 + AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8 + + AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + + /** + * The following 12 formats have the disadvantage of needing 1 format for each bit depth. + * Notice that each 9/10 bits sample is stored in 16 bits with extra padding. + * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better. + */ + AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp + AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP + AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian + AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian + AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian + AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian + AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian + AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian + AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian + AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + + AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface + + AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + + AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + + AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb + + AV_PIX_FMT_YA16BE, ///< 16 bits gray, 16 bits alpha (big-endian) + AV_PIX_FMT_YA16LE, ///< 16 bits gray, 16 bits alpha (little-endian) + + AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp + AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian + AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian + /** + * HW acceleration through QSV, data[3] contains a pointer to the + * mfxFrameSurface1 structure. + */ + AV_PIX_FMT_QSV, + /** + * HW acceleration though MMAL, data[3] contains a pointer to the + * MMAL_BUFFER_HEADER_T structure. + */ + AV_PIX_FMT_MMAL, + + AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer + + /** + * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers + * exactly as for system memory frames. + */ + AV_PIX_FMT_CUDA, + + AV_PIX_FMT_0RGB, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined + AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined + AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined + AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined + + AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian + AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian + AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian + AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian + AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range + + AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ + + AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing + + AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + + AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox + + AV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian + AV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian + + AV_PIX_FMT_GBRAP12BE, ///< planar GBR 4:4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRAP12LE, ///< planar GBR 4:4:4:4 48bpp, little-endian + + AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian + AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian + + AV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodec + + AV_PIX_FMT_GRAY12BE, ///< Y , 12bpp, big-endian + AV_PIX_FMT_GRAY12LE, ///< Y , 12bpp, little-endian + AV_PIX_FMT_GRAY10BE, ///< Y , 10bpp, big-endian + AV_PIX_FMT_GRAY10LE, ///< Y , 10bpp, little-endian + + AV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component, little-endian + AV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian + + /** + * Hardware surfaces for Direct3D11. + * + * This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new D3D11 + * hwaccel API and filtering support AV_PIX_FMT_D3D11 only. + * + * data[0] contains a ID3D11Texture2D pointer, and data[1] contains the + * texture array index of the frame as intptr_t if the ID3D11Texture2D is + * an array texture (or always 0 if it's a normal texture). + */ + AV_PIX_FMT_D3D11, + + AV_PIX_FMT_GRAY9BE, ///< Y , 9bpp, big-endian + AV_PIX_FMT_GRAY9LE, ///< Y , 9bpp, little-endian + + AV_PIX_FMT_GBRPF32BE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian + AV_PIX_FMT_GBRPF32LE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian + AV_PIX_FMT_GBRAPF32BE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian + AV_PIX_FMT_GBRAPF32LE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian + + /** + * DRM-managed buffers exposed through PRIME buffer sharing. + * + * data[0] points to an AVDRMFrameDescriptor. + */ + AV_PIX_FMT_DRM_PRIME, + /** + * Hardware surfaces for OpenCL. + * + * data[i] contain 2D image objects (typed in C as cl_mem, used + * in OpenCL as image2d_t) for each plane of the surface. + */ + AV_PIX_FMT_OPENCL, + + AV_PIX_FMT_GRAY14BE, ///< Y , 14bpp, big-endian + AV_PIX_FMT_GRAY14LE, ///< Y , 14bpp, little-endian + + AV_PIX_FMT_GRAYF32BE, ///< IEEE-754 single precision Y, 32bpp, big-endian + AV_PIX_FMT_GRAYF32LE, ///< IEEE-754 single precision Y, 32bpp, little-endian + + AV_PIX_FMT_YUVA422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endian + AV_PIX_FMT_YUVA422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endian + AV_PIX_FMT_YUVA444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endian + AV_PIX_FMT_YUVA444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endian + + AV_PIX_FMT_NV24, ///< planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + AV_PIX_FMT_NV42, ///< as above, but U and V bytes are swapped + + AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions +}; + +#ifdef AV_HAVE_BIGENDIAN +#define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be +#else +#define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le +#endif + +#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE) +#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE) +#define AV_PIX_FMT_YUV440P10 AV_PIX_FMT_NE(YUV440P10BE, YUV440P10LE) +#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE) +#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE) +#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE) +#define AV_PIX_FMT_YUV440P12 AV_PIX_FMT_NE(YUV440P12BE, YUV440P12LE) +#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE) + + +#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE) +#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE) +#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE) +#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE) +#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE) +#define AV_PIX_FMT_GBRAP10 AV_PIX_FMT_NE(GBRAP10BE, GBRAP10LE) +#define AV_PIX_FMT_GBRAP12 AV_PIX_FMT_NE(GBRAP12BE, GBRAP12LE) +#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE) + +#define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE) +#define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE) +#define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE) + +#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE) +#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE) +#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE) + +/** + * Chromaticity coordinates of the source primaries. + * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.1. + */ +enum AVColorPrimaries { + AVCOL_PRI_RESERVED0 = 0, + AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B + AVCOL_PRI_UNSPECIFIED = 2, + AVCOL_PRI_RESERVED = 3, + AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + + AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM + AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above + AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C + AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020 + AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ) + AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428, + AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3 + AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3 + AVCOL_PRI_JEDEC_P22 = 22, ///< JEDEC P22 phosphors + AVCOL_PRI_NB ///< Not part of ABI +}; + +/** + * Color Transfer Characteristic. + * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.2. + */ +enum AVColorTransferCharacteristic { + AVCOL_TRC_RESERVED0 = 0, + AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 + AVCOL_TRC_UNSPECIFIED = 2, + AVCOL_TRC_RESERVED = 3, + AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM + AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG + AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC + AVCOL_TRC_SMPTE240M = 7, + AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics" + AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)" + AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)" + AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4 + AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut + AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC) + AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system + AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system + AVCOL_TRC_SMPTE2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems + AVCOL_TRC_SMPTEST2084 = AVCOL_TRC_SMPTE2084, + AVCOL_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1 + AVCOL_TRC_SMPTEST428_1 = AVCOL_TRC_SMPTE428, + AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma" + AVCOL_TRC_NB ///< Not part of ABI +}; + +/** + * YUV colorspace type. + * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.3. + */ +enum AVColorSpace { + AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB) + AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B + AVCOL_SPC_UNSPECIFIED = 2, + AVCOL_SPC_RESERVED = 3, + AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 + AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_SPC_SMPTE240M = 7, ///< functionally identical to above + AVCOL_SPC_YCGCO = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO, + AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system + AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system + AVCOL_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x + AVCOL_SPC_CHROMA_DERIVED_NCL = 12, ///< Chromaticity-derived non-constant luminance system + AVCOL_SPC_CHROMA_DERIVED_CL = 13, ///< Chromaticity-derived constant luminance system + AVCOL_SPC_ICTCP = 14, ///< ITU-R BT.2100-0, ICtCp + AVCOL_SPC_NB ///< Not part of ABI +}; + +/** + * MPEG vs JPEG YUV range. + */ +enum AVColorRange { + AVCOL_RANGE_UNSPECIFIED = 0, + AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges + AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges + AVCOL_RANGE_NB ///< Not part of ABI +}; + +/** + * Location of chroma samples. + * + * Illustration showing the location of the first (top left) chroma sample of the + * image, the left shows only luma, the right + * shows the location of the chroma sample, the 2 could be imagined to overlay + * each other but are drawn separately due to limitations of ASCII + * + * 1st 2nd 1st 2nd horizontal luma sample positions + * v v v v + * ______ ______ + *1st luma line > |X X ... |3 4 X ... X are luma samples, + * | |1 2 1-6 are possible chroma positions + *2nd luma line > |X X ... |5 6 X ... 0 is undefined/unknown position + */ +enum AVChromaLocation { + AVCHROMA_LOC_UNSPECIFIED = 0, + AVCHROMA_LOC_LEFT = 1, ///< MPEG-2/4 4:2:0, H.264 default for 4:2:0 + AVCHROMA_LOC_CENTER = 2, ///< MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0 + AVCHROMA_LOC_TOPLEFT = 3, ///< ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2 + AVCHROMA_LOC_TOP = 4, + AVCHROMA_LOC_BOTTOMLEFT = 5, + AVCHROMA_LOC_BOTTOM = 6, + AVCHROMA_LOC_NB ///< Not part of ABI +}; + +#endif /* AVUTIL_PIXFMT_H */ +
diff --git a/drivers/amvdec_ports/utils/put_bits.h b/drivers/amvdec_ports/utils/put_bits.h new file mode 100644 index 0000000..8b2aa15 --- /dev/null +++ b/drivers/amvdec_ports/utils/put_bits.h
@@ -0,0 +1,323 @@ +#ifndef AVCODEC_PUT_BITS_H +#define AVCODEC_PUT_BITS_H + +#include <linux/kernel.h> +#include <linux/types.h> +#include "common.h" + +struct put_bits_context { + u32 bit_buf; + int bit_left; + u8 *buf; + u8 *buf_ptr; + u8 *buf_end; + int size_in_bits; +}; + +/** + * Initialize the struct put_bits_context s. + * + * @param buffer the buffer where to put bits + * @param buffer_size the size in bytes of buffer + */ +static inline void init_put_bits(struct put_bits_context *s, + u8 *buffer, int buffer_size) +{ + if (buffer_size < 0) { + buffer_size = 0; + buffer = NULL; + } + + s->size_in_bits = 8 * buffer_size; + s->buf = buffer; + s->buf_end = s->buf + buffer_size; + s->buf_ptr = s->buf; + s->bit_left = 32; + s->bit_buf = 0; +} + +/** + * Rebase the bit writer onto a reallocated buffer. + * + * @param buffer the buffer where to put bits + * @param buffer_size the size in bytes of buffer, + * must be larger than the previous size + */ +static inline void rebase_put_bits(struct put_bits_context *s, + u8 *buffer, int buffer_size) +{ + s->buf_end = buffer + buffer_size; + s->buf_ptr = buffer + (s->buf_ptr - s->buf); + s->buf = buffer; + s->size_in_bits = 8 * buffer_size; +} + +/** + * @return the total number of bits written to the bitstream. + */ +static inline int put_bits_count(struct put_bits_context *s) +{ + return (s->buf_ptr - s->buf) * 8 + 32 - s->bit_left; +} + +/** + * @return the number of bits available in the bitstream. + */ +static inline int put_bits_left(struct put_bits_context* s) +{ + return (s->buf_end - s->buf_ptr) * 8 - 32 + s->bit_left; +} + +/** + * Pad the end of the output stream with zeros. + */ +static inline void flush_put_bits(struct put_bits_context *s) +{ +#ifndef BITSTREAM_WRITER_LE + if (s->bit_left < 32) + s->bit_buf <<= s->bit_left; +#endif + while (s->bit_left < 32) { +#ifdef BITSTREAM_WRITER_LE + *s->buf_ptr++ = s->bit_buf; + s->bit_buf >>= 8; +#else + *s->buf_ptr++ = s->bit_buf >> 24; + s->bit_buf <<= 8; +#endif + s->bit_left += 8; + } + s->bit_left = 32; + s->bit_buf = 0; +} + +static inline void flush_put_bits_le(struct put_bits_context *s) +{ + while (s->bit_left < 32) { + *s->buf_ptr++ = s->bit_buf; + s->bit_buf >>= 8; + s->bit_left += 8; + } + s->bit_left = 32; + s->bit_buf = 0; +} + +#ifdef BITSTREAM_WRITER_LE +#define avpriv_align_put_bits align_put_bits_unsupported_here +#define avpriv_put_string ff_put_string_unsupported_here +#define avpriv_copy_bits avpriv_copy_bits_unsupported_here +#else +/** + * Pad the bitstream with zeros up to the next byte boundary. + */ +void avpriv_align_put_bits(struct put_bits_context *s); + +/** + * Put the string string in the bitstream. + * + * @param terminate_string 0-terminates the written string if value is 1 + */ +void avpriv_put_string(struct put_bits_context *pb, + const char *string, int terminate_string); + +/** + * Copy the content of src to the bitstream. + * + * @param length the number of bits of src to copy + */ +void avpriv_copy_bits(struct put_bits_context *pb, const u8 *src, int length); +#endif + +/** + * Write up to 31 bits into a bitstream. + * Use put_bits32 to write 32 bits. + */ +static inline void put_bits(struct put_bits_context *s, int n, u32 value) +{ + u32 bit_buf; + int bit_left; + + bit_buf = s->bit_buf; + bit_left = s->bit_left; + + /* XXX: optimize */ +#ifdef BITSTREAM_WRITER_LE + bit_buf |= value << (32 - bit_left); + if (n >= bit_left) { + if (3 < s->buf_end - s->buf_ptr) { + AV_WL32(s->buf_ptr, bit_buf); + s->buf_ptr += 4; + } else { + pr_err("Internal error, put_bits buffer too small\n"); + } + bit_buf = value >> bit_left; + bit_left += 32; + } + bit_left -= n; +#else + if (n < bit_left) { + bit_buf = (bit_buf << n) | value; + bit_left -= n; + } else { + bit_buf <<= bit_left; + bit_buf |= value >> (n - bit_left); + if (3 < s->buf_end - s->buf_ptr) { + AV_WB32(s->buf_ptr, bit_buf); + s->buf_ptr += 4; + } else { + pr_err("Internal error, put_bits buffer too small\n"); + } + bit_left += 32 - n; + bit_buf = value; + } +#endif + s->bit_buf = bit_buf; + s->bit_left = bit_left; +} + +static inline void put_bits_le(struct put_bits_context *s, int n, u32 value) +{ + u32 bit_buf; + int bit_left; + + bit_buf = s->bit_buf; + bit_left = s->bit_left; + + bit_buf |= value << (32 - bit_left); + if (n >= bit_left) { + if (3 < s->buf_end - s->buf_ptr) { + AV_WL32(s->buf_ptr, bit_buf); + s->buf_ptr += 4; + } else { + pr_err("Internal error, put_bits buffer too small\n"); + } + bit_buf = value >> bit_left; + bit_left += 32; + } + bit_left -= n; + + s->bit_buf = bit_buf; + s->bit_left = bit_left; +} + +static inline u32 av_mod_uintp2(u32 a, u32 p) +{ + return a & ((1 << p) - 1); +} + +static inline void put_sbits(struct put_bits_context *pb, int n, int32_t value) +{ + put_bits(pb, n, av_mod_uintp2(value, n)); +} + +/** + * Write exactly 32 bits into a bitstream. + */ +static void put_bits32(struct put_bits_context *s, u32 value) +{ + u32 bit_buf; + int bit_left; + + bit_buf = s->bit_buf; + bit_left = s->bit_left; + +#ifdef BITSTREAM_WRITER_LE + bit_buf |= value << (32 - bit_left); + if (3 < s->buf_end - s->buf_ptr) { + AV_WL32(s->buf_ptr, bit_buf); + s->buf_ptr += 4; + } else { + pr_err("Internal error, put_bits buffer too small\n"); + } + bit_buf = (uint64_t)value >> bit_left; +#else + bit_buf = (uint64_t)bit_buf << bit_left; + bit_buf |= value >> (32 - bit_left); + if (3 < s->buf_end - s->buf_ptr) { + AV_WB32(s->buf_ptr, bit_buf); + s->buf_ptr += 4; + } else { + pr_err("Internal error, put_bits buffer too small\n"); + } + bit_buf = value; +#endif + + s->bit_buf = bit_buf; + s->bit_left = bit_left; +} + +/** + * Write up to 64 bits into a bitstream. + */ +static inline void put_bits64(struct put_bits_context *s, int n, uint64_t value) +{ + if (n < 32) + put_bits(s, n, value); + else if (n == 32) + put_bits32(s, value); + else if (n < 64) { + u32 lo = value & 0xffffffff; + u32 hi = value >> 32; +#ifdef BITSTREAM_WRITER_LE + put_bits32(s, lo); + put_bits(s, n - 32, hi); +#else + put_bits(s, n - 32, hi); + put_bits32(s, lo); +#endif + } else { + u32 lo = value & 0xffffffff; + u32 hi = value >> 32; +#ifdef BITSTREAM_WRITER_LE + put_bits32(s, lo); + put_bits32(s, hi); +#else + put_bits32(s, hi); + put_bits32(s, lo); +#endif + } +} + +/** + * Return the pointer to the byte where the bitstream writer will put + * the next bit. + */ +static inline u8 *put_bits_ptr(struct put_bits_context *s) +{ + return s->buf_ptr; +} + +/** + * Skip the given number of bytes. + * struct put_bits_context must be flushed & aligned to a byte boundary before calling this. + */ +static inline void skip_put_bytes(struct put_bits_context *s, int n) +{ + s->buf_ptr += n; +} + +/** + * Skip the given number of bits. + * Must only be used if the actual values in the bitstream do not matter. + * If n is 0 the behavior is undefined. + */ +static inline void skip_put_bits(struct put_bits_context *s, int n) +{ + s->bit_left -= n; + s->buf_ptr -= 4 * (s->bit_left >> 5); + s->bit_left &= 31; +} + +/** + * Change the end of the buffer. + * + * @param size the new size in bytes of the buffer where to put bits + */ +static inline void set_put_bits_buffer_size(struct put_bits_context *s, int size) +{ + s->buf_end = s->buf + size; + s->size_in_bits = 8*size; +} + +#endif /* AVCODEC_PUT_BITS_H */ +
diff --git a/drivers/amvdec_ports/vdec_drv_base.h b/drivers/amvdec_ports/vdec_drv_base.h new file mode 100644 index 0000000..990d406 --- /dev/null +++ b/drivers/amvdec_ports/vdec_drv_base.h
@@ -0,0 +1,73 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef _VDEC_DRV_BASE_ +#define _VDEC_DRV_BASE_ + +#include "aml_vcodec_drv.h" + +#include "vdec_drv_if.h" + +struct vdec_common_if { + /** + * (*init)() - initialize decode driver + * @ctx : [in] aml v4l2 context + * @h_vdec : [out] driver handle + */ + int (*init)(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec); + + int (*probe)(unsigned long h_vdec, + struct aml_vcodec_mem *bs, void *out); + + /** + * (*decode)() - trigger decode + * @h_vdec : [in] driver handle + * @bs : [in] input bitstream + * @fb : [in] frame buffer to store decoded frame + * @res_chg : [out] resolution change happen + */ + int (*decode)(unsigned long h_vdec, struct aml_vcodec_mem *bs, + bool *res_chg); + + /** + * (*get_param)() - get driver's parameter + * @h_vdec : [in] driver handle + * @type : [in] input parameter type + * @out : [out] buffer to store query result + */ + int (*get_param)(unsigned long h_vdec, + enum vdec_get_param_type type, void *out); + + /** + * (*set_param)() - set driver's parameter + * @h_vdec : [in] driver handle + * @type : [in] input parameter type + * @in : [in] buffer to store query result + */ + int (*set_param)(unsigned long h_vdec, + enum vdec_set_param_type type, void *in); + + /** + * (*deinit)() - deinitialize driver. + * @h_vdec : [in] driver handle to be deinit + */ + void (*deinit)(unsigned long h_vdec); +}; + +#endif
diff --git a/drivers/amvdec_ports/vdec_drv_if.c b/drivers/amvdec_ports/vdec_drv_if.c new file mode 100644 index 0000000..01510c5 --- /dev/null +++ b/drivers/amvdec_ports/vdec_drv_if.c
@@ -0,0 +1,139 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "vdec_drv_if.h" +#include "aml_vcodec_dec.h" +#include "vdec_drv_base.h" + +const struct vdec_common_if *get_h264_dec_comm_if(void); +const struct vdec_common_if *get_hevc_dec_comm_if(void); +const struct vdec_common_if *get_vp9_dec_comm_if(void); +const struct vdec_common_if *get_mpeg12_dec_comm_if(void); +const struct vdec_common_if *get_mpeg4_dec_comm_if(void); +const struct vdec_common_if *get_mjpeg_dec_comm_if(void); +const struct vdec_common_if *get_av1_dec_comm_if(void); + +int vdec_if_init(struct aml_vcodec_ctx *ctx, unsigned int fourcc) +{ + int ret = 0; + + switch (fourcc) { + case V4L2_PIX_FMT_H264: + ctx->dec_if = get_h264_dec_comm_if(); + break; + case V4L2_PIX_FMT_HEVC: + ctx->dec_if = get_hevc_dec_comm_if(); + break; + case V4L2_PIX_FMT_VP9: + ctx->dec_if = get_vp9_dec_comm_if(); + break; + case V4L2_PIX_FMT_MPEG: + case V4L2_PIX_FMT_MPEG1: + case V4L2_PIX_FMT_MPEG2: + ctx->dec_if = get_mpeg12_dec_comm_if(); + break; + case V4L2_PIX_FMT_MPEG4: + ctx->dec_if = get_mpeg4_dec_comm_if(); + break; + case V4L2_PIX_FMT_MJPEG: + ctx->dec_if = get_mjpeg_dec_comm_if(); + break; + case V4L2_PIX_FMT_AV1: + ctx->dec_if = get_av1_dec_comm_if(); + break; + default: + return -EINVAL; + } + + ret = ctx->dec_if->init(ctx, &ctx->drv_handle); + + return ret; +} + +int vdec_if_probe(struct aml_vcodec_ctx *ctx, + struct aml_vcodec_mem *bs, void *out) +{ + int ret = 0; + + ret = ctx->dec_if->probe(ctx->drv_handle, bs, out); + + return ret; +} + +int vdec_if_decode(struct aml_vcodec_ctx *ctx, + struct aml_vcodec_mem *bs, bool *res_chg) +{ + int ret = 0; + + if (bs) { + if ((bs->addr & 63) != 0) { + v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, + "bs dma_addr should 64 byte align\n"); + return -EINVAL; + } + } + + if (ctx->drv_handle == 0) + return -EIO; + + aml_vcodec_set_curr_ctx(ctx->dev, ctx); + ret = ctx->dec_if->decode(ctx->drv_handle, bs, res_chg); + aml_vcodec_set_curr_ctx(ctx->dev, NULL); + + return ret; +} + +int vdec_if_get_param(struct aml_vcodec_ctx *ctx, + enum vdec_get_param_type type, void *out) +{ + int ret = 0; + + if (ctx->drv_handle == 0) + return -EIO; + + ret = ctx->dec_if->get_param(ctx->drv_handle, type, out); + + return ret; +} + +int vdec_if_set_param(struct aml_vcodec_ctx *ctx, + enum vdec_set_param_type type, void *in) +{ + int ret = 0; + + if (ctx->drv_handle == 0) + return -EIO; + + ret = ctx->dec_if->set_param(ctx->drv_handle, type, in); + + return ret; +} + +void vdec_if_deinit(struct aml_vcodec_ctx *ctx) +{ + if (ctx->drv_handle == 0) + return; + + ctx->dec_if->deinit(ctx->drv_handle); + ctx->drv_handle = 0; +}
diff --git a/drivers/amvdec_ports/vdec_drv_if.h b/drivers/amvdec_ports/vdec_drv_if.h new file mode 100644 index 0000000..29911f8 --- /dev/null +++ b/drivers/amvdec_ports/vdec_drv_if.h
@@ -0,0 +1,165 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef _VDEC_DRV_IF_H_ +#define _VDEC_DRV_IF_H_ + +#include "aml_vcodec_drv.h" +#include "aml_vcodec_dec.h" +#include "aml_vcodec_util.h" +#include "../stream_input/amports/streambuf.h" + +#define AML_VIDEO_MAGIC CODEC_MODE('A', 'M', 'L', 'V') + +#define V4L_STREAM_TYPE_MATEDATA (0) +#define V4L_STREAM_TYPE_FRAME (1) + +struct stream_info { + u32 stream_width; + u32 stream_height; + u32 stream_field; + u32 stream_dpb; +}; + +struct aml_video_stream { + u32 magic; + u32 type; + union { + struct stream_info s; + u8 buf[64]; + } m; + u32 len; + u8 data[0]; +}; + +/** + * struct vdec_fb_status - decoder frame buffer status + * @FB_ST_INIT : initial state + * @FB_ST_DECODER : frame buffer be allocted by decoder. + * @FB_ST_VPP : frame buffer be allocate by vpp. + * @FB_ST_DISPLAY : frame buffer is ready to be displayed. + * @FB_ST_FREE : frame buffer is not used by decoder any more + */ +enum vdec_fb_status { + FB_ST_INIT, + FB_ST_DECODER, + FB_ST_VPP, + FB_ST_GE2D, + FB_ST_DISPLAY, + FB_ST_FREE +}; + +enum vdec_dw_mode { + VDEC_DW_AFBC_ONLY = 0, + VDEC_DW_AFBC_1_1_DW = 1, + VDEC_DW_AFBC_1_4_DW = 2, + VDEC_DW_AFBC_x2_1_4_DW = 3, + VDEC_DW_AFBC_1_2_DW = 4, + VDEC_DW_NO_AFBC = 16, + VDEC_DW_AFBC_AUTO_1_2 = 0x100, + VDEC_DW_AFBC_AUTO_1_4 = 0x200, +}; + +/* + * the caller does not own the returned buffer. The buffer will not be + * released before vdec_if_deinit. + * GET_PARAM_PIC_INFO : get picture info, struct vdec_pic_info* + * GET_PARAM_CROP_INFO : get crop info, struct v4l2_crop* + * GET_PARAM_DPB_SIZE : get dpb size, unsigned int* + * GET_PARAM_DW_MODE: : get double write mode, unsigned int* + * GET_PARAM_COMP_BUF_INFO : get compressed buf info, struct vdec_comp_buf_info* + */ +enum vdec_get_param_type { + GET_PARAM_PIC_INFO, + GET_PARAM_CROP_INFO, + GET_PARAM_DPB_SIZE, + GET_PARAM_CONFIG_INFO, + GET_PARAM_DW_MODE, + GET_PARAM_COMP_BUF_INFO +}; + +/* + * SET_PARAM_PS_INFO : set picture parms, data parsed from ucode. + */ +enum vdec_set_param_type { + SET_PARAM_WRITE_FRAME_SYNC, + SET_PARAM_PS_INFO, + SET_PARAM_COMP_BUF_INFO, + SET_PARAM_HDR_INFO, + SET_PARAM_POST_EVENT, + SET_PARAM_PIC_INFO, + SET_PARAM_CFG_INFO +}; + +/** + * struct vdec_fb_node - decoder frame buffer node + * @list : list to hold this node + * @fb : point to frame buffer (vdec_fb), fb could point to frame buffer and + * working buffer this is for maintain buffers in different state + */ +struct vdec_fb_node { + struct list_head list; + struct vdec_fb *fb; +}; + +/** + * vdec_if_init() - initialize decode driver + * @ctx : [in] v4l2 context + * @fourcc : [in] video format fourcc, V4L2_PIX_FMT_H264/VP8/VP9.. + */ +int vdec_if_init(struct aml_vcodec_ctx *ctx, unsigned int fourcc); + +int vdec_if_probe(struct aml_vcodec_ctx *ctx, + struct aml_vcodec_mem *bs, void *out); + +/** + * vdec_if_deinit() - deinitialize decode driver + * @ctx : [in] v4l2 context + * + */ +void vdec_if_deinit(struct aml_vcodec_ctx *ctx); + +/** + * vdec_if_decode() - trigger decode + * @ctx : [in] v4l2 context + * @bs : [in] input bitstream + * @fb : [in] frame buffer to store decoded frame, when null menas parse + * header only + * @res_chg : [out] resolution change happens if current bs have different + * picture width/height + * Note: To flush the decoder when reaching EOF, set input bitstream as NULL. + * + * Return: 0 on success. -EIO on unrecoverable error. + */ +int vdec_if_decode(struct aml_vcodec_ctx *ctx, + struct aml_vcodec_mem *bs, bool *res_chg); + +/** + * vdec_if_get_param() - get driver's parameter + * @ctx : [in] v4l2 context + * @type : [in] input parameter type + * @out : [out] buffer to store query result + */ +int vdec_if_get_param(struct aml_vcodec_ctx *ctx, + enum vdec_get_param_type type, void *out); + +int vdec_if_set_param(struct aml_vcodec_ctx *ctx, + enum vdec_set_param_type type, void *in); + +#endif
diff --git a/drivers/common/Makefile b/drivers/common/Makefile new file mode 100644 index 0000000..77ce080 --- /dev/null +++ b/drivers/common/Makefile
@@ -0,0 +1,2 @@ +obj-y += media_clock/ +obj-y += firmware/
diff --git a/drivers/common/chips/chips.c b/drivers/common/chips/chips.c new file mode 100644 index 0000000..158e32c --- /dev/null +++ b/drivers/common/chips/chips.c
@@ -0,0 +1,198 @@ +/* + * drivers/amlogic/media/common/arch/chips/chips.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> + +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/registers/cpu_version.h> +#include "../../stream_input/amports/amports_priv.h" +#include "../../frame_provider/decoder/utils/vdec.h" +#include "chips.h" +#include <linux/amlogic/media/utils/log.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "decoder_cpu_ver_info.h" + +#define VIDEO_FIRMWARE_FATHER_NAME "video" + +/* + *#define MESON_CPU_MAJOR_ID_M6 0x16 + *#define MESON_CPU_MAJOR_ID_M6TV 0x17 + *#define MESON_CPU_MAJOR_ID_M6TVL 0x18 + *#define MESON_CPU_MAJOR_ID_M8 0x19 + *#define MESON_CPU_MAJOR_ID_MTVD 0x1A + *#define MESON_CPU_MAJOR_ID_M8B 0x1B + *#define MESON_CPU_MAJOR_ID_MG9TV 0x1C + *#define MESON_CPU_MAJOR_ID_M8M2 0x1D + *#define MESON_CPU_MAJOR_ID_GXBB 0x1F + *#define MESON_CPU_MAJOR_ID_GXTVBB 0x20 + *#define MESON_CPU_MAJOR_ID_GXL 0x21 + *#define MESON_CPU_MAJOR_ID_GXM 0x22 + *#define MESON_CPU_MAJOR_ID_TXL 0x23 + */ +struct type_name { + + int type; + + const char *name; +}; +static const struct type_name cpu_type_name[] = { + {AM_MESON_CPU_MAJOR_ID_M6, "m6"}, + {AM_MESON_CPU_MAJOR_ID_M6TV, "m6tv"}, + {AM_MESON_CPU_MAJOR_ID_M6TVL, "m6tvl"}, + {AM_MESON_CPU_MAJOR_ID_M8, "m8"}, + {AM_MESON_CPU_MAJOR_ID_MTVD, "mtvd"}, + {AM_MESON_CPU_MAJOR_ID_M8B, "m8b"}, + {AM_MESON_CPU_MAJOR_ID_MG9TV, "mg9tv"}, + {AM_MESON_CPU_MAJOR_ID_M8M2, "m8"}, + {AM_MESON_CPU_MAJOR_ID_GXBB, "gxbb"}, + {AM_MESON_CPU_MAJOR_ID_GXTVBB, "gxtvbb"}, + {AM_MESON_CPU_MAJOR_ID_GXL, "gxl"}, + {AM_MESON_CPU_MAJOR_ID_GXM, "gxm"}, + {AM_MESON_CPU_MAJOR_ID_TXL, "txl"}, + {AM_MESON_CPU_MAJOR_ID_TXLX, "txlx"}, + {AM_MESON_CPU_MAJOR_ID_GXLX, "gxlx"}, + {AM_MESON_CPU_MAJOR_ID_G12A, "g12a"}, + {AM_MESON_CPU_MAJOR_ID_G12B, "g12b"}, + {AM_MESON_CPU_MAJOR_ID_SM1, "sm1"}, + {AM_MESON_CPU_MAJOR_ID_TL1, "tl1"}, + {AM_MESON_CPU_MAJOR_ID_TM2, "tm2"}, + {AM_MESON_CPU_MAJOR_ID_SC2, "sc2"}, + {AM_MESON_CPU_MAJOR_ID_T5, "t5"}, + {AM_MESON_CPU_MAJOR_ID_T5D, "t5d"}, + {AM_MESON_CPU_MAJOR_ID_T7, "t7"}, + {AM_MESON_CPU_MAJOR_ID_S4, "s4"}, + {AM_MESON_CPU_MAJOR_ID_T3, "t3"}, + {AM_MESON_CPU_MAJOR_ID_P1, "p1"}, + {AM_MESON_CPU_MAJOR_ID_S4D, "s4d"}, + {AM_MESON_CPU_MAJOR_ID_T5W, "t5w"}, + {0, NULL}, +}; + +static const char *get_type_name(const struct type_name *typename, int size, + int type) +{ + + const char *name = "unknown"; + + int i; + + for (i = 0; i < size; i++) { + + if (type == typename[i].type) + + name = typename[i].name; + + } + + return name; +} + +const char *get_cpu_type_name(void) +{ + + return get_type_name(cpu_type_name, + sizeof(cpu_type_name) / sizeof(struct type_name), + get_cpu_major_id()); +} +EXPORT_SYMBOL(get_cpu_type_name); + +/* + *enum vformat_e { + * VFORMAT_MPEG12 = 0, + * VFORMAT_MPEG4, + * VFORMAT_H264, + * VFORMAT_MJPEG, + * VFORMAT_REAL, + * VFORMAT_JPEG, + * VFORMAT_VC1, + * VFORMAT_AVS, + * VFORMAT_YUV, + * VFORMAT_H264MVC, + * VFORMAT_H264_4K2K, + * VFORMAT_HEVC, + * VFORMAT_H264_ENC, + * VFORMAT_JPEG_ENC, + * VFORMAT_VP9, +* VFORMAT_AVS2, + * VFORMAT_MAX + *}; + */ +static const struct type_name vformat_type_name[] = { + {VFORMAT_MPEG12, "mpeg12"}, + {VFORMAT_MPEG4, "mpeg4"}, + {VFORMAT_H264, "h264"}, + {VFORMAT_MJPEG, "mjpeg"}, + {VFORMAT_REAL, "real"}, + {VFORMAT_JPEG, "jpeg"}, + {VFORMAT_VC1, "vc1"}, + {VFORMAT_AVS, "avs"}, + {VFORMAT_YUV, "yuv"}, + {VFORMAT_H264MVC, "h264mvc"}, + {VFORMAT_H264_4K2K, "h264_4k"}, + {VFORMAT_HEVC, "hevc"}, + {VFORMAT_H264_ENC, "h264_enc"}, + {VFORMAT_JPEG_ENC, "jpeg_enc"}, + {VFORMAT_VP9, "vp9"}, + {VFORMAT_AVS2, "avs2"}, + {VFORMAT_AV1, "av1"}, + {VFORMAT_YUV, "yuv"}, + {0, NULL}, +}; + +const char *get_video_format_name(enum vformat_e type) +{ + + return get_type_name(vformat_type_name, + sizeof(vformat_type_name) / sizeof(struct type_name), type); +} +EXPORT_SYMBOL(get_video_format_name); + +static struct chip_vdec_info_s current_chip_info; + +struct chip_vdec_info_s *get_current_vdec_chip(void) +{ + + return ¤t_chip_info; +} +EXPORT_SYMBOL(get_current_vdec_chip); + +bool check_efuse_chip(int vformat) +{ + unsigned int status, i = 0; + int type[] = {15, 14, 11, 2}; /* avs2, vp9, h265, h264 */ + + status = (READ_EFUSE_REG(EFUSE_LIC2) >> 8 & 0xf); + if (!status) + return false; + + do { + if ((status & 1) && (type[i] == vformat)) + return true; + i++; + } while (status >>= 1); + + return false; +} +EXPORT_SYMBOL(check_efuse_chip); +
diff --git a/drivers/common/chips/chips.h b/drivers/common/chips/chips.h new file mode 100644 index 0000000..003e9d2 --- /dev/null +++ b/drivers/common/chips/chips.h
@@ -0,0 +1,40 @@ +/* + * drivers/amlogic/media/common/arch/chips/chips.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef UCODE_MANAGER_HEADER +#define UCODE_MANAGER_HEADER +#include "../media_clock/clk/clk_priv.h" + +struct chip_vdec_info_s { + + int cpu_type; + + struct video_firmware_s *firmware; + + struct chip_vdec_clk_s *clk_mgr[VDEC_MAX]; + + struct clk_set_setting *clk_setting_array; +}; + +const char *get_cpu_type_name(void); +const char *get_video_format_name(enum vformat_e type); + +struct chip_vdec_info_s *get_current_vdec_chip(void); + +bool check_efuse_chip(int vformat); + +#endif
diff --git a/drivers/common/chips/decoder_cpu_ver_info.c b/drivers/common/chips/decoder_cpu_ver_info.c new file mode 100644 index 0000000..af162b8 --- /dev/null +++ b/drivers/common/chips/decoder_cpu_ver_info.c
@@ -0,0 +1,271 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/kernel.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/amlogic/media/registers/cpu_version.h> +#include "decoder_cpu_ver_info.h" + +#define DECODE_CPU_VER_ID_NODE_NAME "cpu_ver_name" +#define AM_SUCESS 0 +#define MAJOR_ID_START AM_MESON_CPU_MAJOR_ID_M6 + +static enum AM_MESON_CPU_MAJOR_ID cpu_ver_id = AM_MESON_CPU_MAJOR_ID_MAX; +static int cpu_sub_id = 0; + +static enum AM_MESON_CPU_MAJOR_ID cpu_ver_info[AM_MESON_CPU_MAJOR_ID_MAX - MAJOR_ID_START]= +{ + AM_MESON_CPU_MAJOR_ID_M6, + AM_MESON_CPU_MAJOR_ID_M6TV, + AM_MESON_CPU_MAJOR_ID_M6TVL, + AM_MESON_CPU_MAJOR_ID_M8, + AM_MESON_CPU_MAJOR_ID_MTVD, + AM_MESON_CPU_MAJOR_ID_M8B, + AM_MESON_CPU_MAJOR_ID_MG9TV, + AM_MESON_CPU_MAJOR_ID_M8M2, + AM_MESON_CPU_MAJOR_ID_UNUSE, + AM_MESON_CPU_MAJOR_ID_GXBB, + AM_MESON_CPU_MAJOR_ID_GXTVBB, + AM_MESON_CPU_MAJOR_ID_GXL, + AM_MESON_CPU_MAJOR_ID_GXM, + AM_MESON_CPU_MAJOR_ID_TXL, + AM_MESON_CPU_MAJOR_ID_TXLX, + AM_MESON_CPU_MAJOR_ID_AXG, + AM_MESON_CPU_MAJOR_ID_GXLX, + AM_MESON_CPU_MAJOR_ID_TXHD, + AM_MESON_CPU_MAJOR_ID_G12A, + AM_MESON_CPU_MAJOR_ID_G12B, + AM_MESON_CPU_MAJOR_ID_GXLX2, + AM_MESON_CPU_MAJOR_ID_SM1, + AM_MESON_CPU_MAJOR_ID_A1, + AM_MESON_CPU_MAJOR_ID_RES_0x2d, + AM_MESON_CPU_MAJOR_ID_TL1, + AM_MESON_CPU_MAJOR_ID_TM2, + AM_MESON_CPU_MAJOR_ID_C1, + AM_MESON_CPU_MAJOR_ID_RES_0x31, + AM_MESON_CPU_MAJOR_ID_SC2, + AM_MESON_CPU_MAJOR_ID_C2, + AM_MESON_CPU_MAJOR_ID_T5, + AM_MESON_CPU_MAJOR_ID_T5D, + AM_MESON_CPU_MAJOR_ID_T7, + AM_MESON_CPU_MAJOR_ID_S4, + AM_MESON_CPU_MAJOR_ID_T3, + AM_MESON_CPU_MAJOR_ID_P1, + AM_MESON_CPU_MAJOR_ID_S4D, + AM_MESON_CPU_MAJOR_ID_T5W, +}; + +static const struct of_device_id cpu_ver_of_match[] = { + { + .compatible = "amlogic, cpu-major-id-axg", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_AXG - MAJOR_ID_START], + }, + + { + .compatible = "amlogic, cpu-major-id-g12a", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_G12A - MAJOR_ID_START], + }, + + { + .compatible = "amlogic, cpu-major-id-gxl", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_GXL - MAJOR_ID_START], + }, + + { + .compatible = "amlogic, cpu-major-id-gxm", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_GXM - MAJOR_ID_START], + }, + + { + .compatible = "amlogic, cpu-major-id-txl", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_TXL - MAJOR_ID_START], + }, + + { + .compatible = "amlogic, cpu-major-id-txlx", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_TXLX - MAJOR_ID_START], + }, + + { + .compatible = "amlogic, cpu-major-id-sm1", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_SM1 - MAJOR_ID_START], + }, + + { + .compatible = "amlogic, cpu-major-id-tl1", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_TL1 - MAJOR_ID_START], + }, + { + .compatible = "amlogic, cpu-major-id-tm2", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_TM2 - MAJOR_ID_START], + }, + { + .compatible = "amlogic, cpu-major-id-sc2", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_SC2 - MAJOR_ID_START], + }, + { + .compatible = "amlogic, cpu-major-id-t5", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_T5 - MAJOR_ID_START], + }, + { + .compatible = "amlogic, cpu-major-id-t5d", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_T5D - MAJOR_ID_START], + }, + { + .compatible = "amlogic, cpu-major-id-t7", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_T7 - MAJOR_ID_START], + }, + { + .compatible = "amlogic, cpu-major-id-s4", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_S4 - MAJOR_ID_START], + }, + { + .compatible = "amlogic, cpu-major-id-t3", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_T3 - MAJOR_ID_START], + }, + { + .compatible = "amlogic, cpu-major-id-p1", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_P1 - MAJOR_ID_START], + }, + { + .compatible = "amlogic, cpu-major-id-s4d", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_S4D - MAJOR_ID_START], + }, + { + .compatible = "amlogic, cpu-major-id-t5w", + .data = &cpu_ver_info[AM_MESON_CPU_MAJOR_ID_T5W - MAJOR_ID_START], + }, + {}, +}; + +static const int cpu_sub_info[] = { + AM_MESON_CPU_MINOR_ID_REVB_G12B, + AM_MESON_CPU_MINOR_ID_REVB_TM2, + AM_MESON_CPU_MINOR_ID_S4_S805X2, +}; + +static const struct of_device_id cpu_sub_id_of_match[] = { + { + .compatible = "amlogic, cpu-major-id-g12b-b", + .data = &cpu_sub_info[0], + }, + { + .compatible = "amlogic, cpu-major-id-tm2-b", + .data = &cpu_sub_info[1], + }, + { + .compatible = "amlogic, cpu-major-id-s4-805x2", + .data = &cpu_sub_info[2], + }, +}; + +static bool get_cpu_id_from_dtb(enum AM_MESON_CPU_MAJOR_ID *pid_type, int *sub_id) +{ + struct device_node *pnode = NULL; + struct platform_device *pdev = NULL; + const struct of_device_id *pmatch = NULL; + + pnode = of_find_node_by_name(NULL, DECODE_CPU_VER_ID_NODE_NAME); + if (NULL == pnode) { + pr_err("No find node.\n"); + return -EINVAL; + } + + pdev = of_find_device_by_node(pnode); + if (NULL == pdev) + return -EINVAL; + + pmatch = of_match_device(cpu_ver_of_match, &pdev->dev); + if (NULL == pmatch) { + pmatch = of_match_device(cpu_sub_id_of_match, &pdev->dev); + if (NULL == pmatch) { + pr_err("No find of_match_device\n"); + return -EINVAL; + } + } + + *pid_type = (enum AM_MESON_CPU_MAJOR_ID)(*(int *)pmatch->data) & (MAJOY_ID_MASK); + + *sub_id = ((*(int *)pmatch->data) & (SUB_ID_MASK)) >> 8; + + return AM_SUCESS; +} + +static void initial_cpu_id(void) +{ + enum AM_MESON_CPU_MAJOR_ID id_type = AM_MESON_CPU_MAJOR_ID_MAX; + int sub_id = 0; + + if (AM_SUCESS == get_cpu_id_from_dtb(&id_type, &sub_id)) { + cpu_ver_id = id_type; + cpu_sub_id = sub_id; + } else { + cpu_ver_id = (enum AM_MESON_CPU_MAJOR_ID)get_cpu_type(); + cpu_sub_id = (is_meson_rev_b()) ? CHIP_REVB : CHIP_REVA; + } + + if ((AM_MESON_CPU_MAJOR_ID_G12B == cpu_ver_id) && (CHIP_REVB == cpu_sub_id)) + cpu_ver_id = AM_MESON_CPU_MAJOR_ID_TL1; + + pr_info("vdec init cpu id: 0x%x(%d)", cpu_ver_id, cpu_sub_id); +} + +enum AM_MESON_CPU_MAJOR_ID get_cpu_major_id(void) +{ + if (AM_MESON_CPU_MAJOR_ID_MAX == cpu_ver_id) + initial_cpu_id(); + + return cpu_ver_id; +} +EXPORT_SYMBOL(get_cpu_major_id); + +int get_cpu_sub_id(void) +{ + return cpu_sub_id; +} +EXPORT_SYMBOL(get_cpu_sub_id); + +bool is_cpu_meson_revb(void) +{ + if (AM_MESON_CPU_MAJOR_ID_MAX == cpu_ver_id) + initial_cpu_id(); + + return (cpu_sub_id == CHIP_REVB); +} +EXPORT_SYMBOL(is_cpu_meson_revb); + +bool is_cpu_tm2_revb(void) +{ + return ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_TM2) + && (is_cpu_meson_revb())); +} +EXPORT_SYMBOL(is_cpu_tm2_revb); + +bool is_cpu_s4_s805x2(void) +{ + return ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_S4) + && (get_cpu_sub_id() == CHIP_REVX)); +} +EXPORT_SYMBOL(is_cpu_s4_s805x2); +
diff --git a/drivers/common/chips/decoder_cpu_ver_info.h b/drivers/common/chips/decoder_cpu_ver_info.h new file mode 100644 index 0000000..6129244 --- /dev/null +++ b/drivers/common/chips/decoder_cpu_ver_info.h
@@ -0,0 +1,95 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef DECODER_CPU_VER_INFO_H +#define DECODER_CPU_VER_INFO_H +#include <linux/amlogic/media/registers/cpu_version.h> +/* majoy chip id define */ +#define MAJOY_ID_MASK (0x000000ff) + +enum AM_MESON_CPU_MAJOR_ID { + AM_MESON_CPU_MAJOR_ID_M6 = 0x16, + AM_MESON_CPU_MAJOR_ID_M6TV = 0x17, + AM_MESON_CPU_MAJOR_ID_M6TVL = 0x18, + AM_MESON_CPU_MAJOR_ID_M8 = 0x19, + AM_MESON_CPU_MAJOR_ID_MTVD = 0x1A, + AM_MESON_CPU_MAJOR_ID_M8B = 0x1B, + AM_MESON_CPU_MAJOR_ID_MG9TV = 0x1C, + AM_MESON_CPU_MAJOR_ID_M8M2 = 0x1D, + AM_MESON_CPU_MAJOR_ID_UNUSE = 0x1E, + AM_MESON_CPU_MAJOR_ID_GXBB = 0x1F, + AM_MESON_CPU_MAJOR_ID_GXTVBB = 0x20, + AM_MESON_CPU_MAJOR_ID_GXL = 0x21, + AM_MESON_CPU_MAJOR_ID_GXM = 0x22, + AM_MESON_CPU_MAJOR_ID_TXL = 0x23, + AM_MESON_CPU_MAJOR_ID_TXLX = 0x24, + AM_MESON_CPU_MAJOR_ID_AXG = 0x25, + AM_MESON_CPU_MAJOR_ID_GXLX = 0x26, + AM_MESON_CPU_MAJOR_ID_TXHD = 0x27, + AM_MESON_CPU_MAJOR_ID_G12A = 0x28, + AM_MESON_CPU_MAJOR_ID_G12B = 0x29, + AM_MESON_CPU_MAJOR_ID_GXLX2 = 0x2a, + AM_MESON_CPU_MAJOR_ID_SM1 = 0x2b, + AM_MESON_CPU_MAJOR_ID_A1 = 0x2c, + AM_MESON_CPU_MAJOR_ID_RES_0x2d, + AM_MESON_CPU_MAJOR_ID_TL1 = 0x2e, + AM_MESON_CPU_MAJOR_ID_TM2 = 0x2f, + AM_MESON_CPU_MAJOR_ID_C1 = 0x30, + AM_MESON_CPU_MAJOR_ID_RES_0x31, + AM_MESON_CPU_MAJOR_ID_SC2 = 0x32, + AM_MESON_CPU_MAJOR_ID_C2 = 0x33, + AM_MESON_CPU_MAJOR_ID_T5 = 0x34, + AM_MESON_CPU_MAJOR_ID_T5D = 0x35, + AM_MESON_CPU_MAJOR_ID_T7 = 0x36, + AM_MESON_CPU_MAJOR_ID_S4 = 0x37, + AM_MESON_CPU_MAJOR_ID_T3 = 0x38, + AM_MESON_CPU_MAJOR_ID_P1 = 0x39, + AM_MESON_CPU_MAJOR_ID_S4D = 0x3a, + AM_MESON_CPU_MAJOR_ID_T5W = 0x3b, + AM_MESON_CPU_MAJOR_ID_MAX, +}; + +/* chips sub id define */ +#define CHIP_REVA 0x0 +#define CHIP_REVB 0x1 +#define CHIP_REVC 0x2 +#define CHIP_REVX 0x10 + +#define REVB_MASK (CHIP_REVB << 8) +#define REVC_MASK (CHIP_REVC << 8) +#define REVX_MASK (CHIP_REVX << 8) + +#define SUB_ID_MASK (REVB_MASK | REVC_MASK | REVX_MASK) + +#define AM_MESON_CPU_MINOR_ID_REVB_G12B (REVB_MASK | AM_MESON_CPU_MAJOR_ID_G12B) +#define AM_MESON_CPU_MINOR_ID_REVB_TM2 (REVB_MASK | AM_MESON_CPU_MAJOR_ID_TM2) +#define AM_MESON_CPU_MINOR_ID_S4_S805X2 (REVX_MASK | AM_MESON_CPU_MAJOR_ID_S4) + +/* export functions */ +enum AM_MESON_CPU_MAJOR_ID get_cpu_major_id(void); + +bool is_cpu_meson_revb(void); + +bool is_cpu_tm2_revb(void); + +int get_cpu_sub_id(void); + +bool is_cpu_s4_s805x2(void); + +#endif
diff --git a/drivers/common/firmware/Makefile b/drivers/common/firmware/Makefile new file mode 100644 index 0000000..748039c --- /dev/null +++ b/drivers/common/firmware/Makefile
@@ -0,0 +1,3 @@ +obj-m += firmware.o +firmware-objs += firmware_drv.o +firmware-objs += firmware_type.o
diff --git a/drivers/common/firmware/firmware_cfg.h b/drivers/common/firmware/firmware_cfg.h new file mode 100644 index 0000000..e23ac2a --- /dev/null +++ b/drivers/common/firmware/firmware_cfg.h
@@ -0,0 +1,32 @@ +/* + * drivers/amlogic/media/common/firmware/firmware_cfg.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +/*all firmwares in one bin.*/ +{VIDEO_MISC, VIDEO_PACKAGE, "video_ucode.bin"}, + +/* Note: if the addition of new package has the same name */ +/* as the firmware in the video_ucode.bin, the firmware */ +/* in the video_ucode.bin will be ignored yet, because the */ +/* video_ucode.bin will always be processed in the end */ +{VIDEO_ENCODE, VIDEO_PACKAGE, "h264_enc.bin"}, + + +/*firmware for a special format, to replace the format in the package.*/ +/*{VIDEO_DECODE, VIDEO_FW_FILE, "h265.bin"},*/ +/*{VIDEO_DECODE, VIDEO_FW_FILE, "h264.bin"},*/ +/*{VIDEO_DECODE, VIDEO_FW_FILE, "h264_multi.bin"},*/ +
diff --git a/drivers/common/firmware/firmware_drv.c b/drivers/common/firmware/firmware_drv.c new file mode 100644 index 0000000..9422198 --- /dev/null +++ b/drivers/common/firmware/firmware_drv.c
@@ -0,0 +1,1056 @@ +/* + * drivers/amlogic/media/common/firmware/firmware.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/slab.h> + +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/registers/cpu_version.h> +#include "../../stream_input/amports/amports_priv.h" +#include "../../frame_provider/decoder/utils/vdec.h" +#include "firmware_priv.h" +#include "../chips/chips.h" +#include <linux/string.h> +#include <linux/amlogic/media/utils/log.h> +#include <linux/firmware.h> +#include <linux/amlogic/tee.h> +//#include <linux/amlogic/major.h> //if kernel is 4.9 then use this one +#include <uapi/linux/major.h> +#include <linux/cdev.h> +#include <linux/crc32.h> +#include "../chips/decoder_cpu_ver_info.h" + +/* major.minor */ +#define PACK_VERS "v0.3" + +#define CLASS_NAME "firmware_codec" +#define DEV_NAME "firmware_vdec" +#define DIR "video" +#define FRIMWARE_SIZE (64 * 1024) /*64k*/ +#define BUFF_SIZE (1024 * 1024 * 2) + +#define FW_LOAD_FORCE (0x1) +#define FW_LOAD_TRY (0X2) + +/*the first 256 bytes are signature data*/ +#define SEC_OFFSET (256) + +#define TRY_PARSE_MAX (256) + +#define PACK ('P' << 24 | 'A' << 16 | 'C' << 8 | 'K') +#define CODE ('C' << 24 | 'O' << 16 | 'D' << 8 | 'E') + +#ifndef FIRMWARE_MAJOR +#define FIRMWARE_MAJOR AMSTREAM_MAJOR +#endif + +static DEFINE_MUTEX(mutex); + +static struct ucode_file_info_s ucode_info[] = { +#include "firmware_cfg.h" +}; + +static const struct file_operations fw_fops = { + .owner = THIS_MODULE +}; + +struct fw_mgr_s *g_mgr; +struct fw_dev_s *g_dev; +struct package_head_s package_head; + +static u32 debug; +static u32 detail; + +int get_firmware_data(unsigned int format, char *buf) +{ + int data_len, ret = -1; + struct fw_mgr_s *mgr = g_mgr; + struct fw_info_s *info; + + pr_info("[%s], the fw (%s) will be loaded...\n", + tee_enabled() ? "TEE" : "LOCAL", + get_fw_format_name(format)); + + if (tee_enabled()) + return 0; + + mutex_lock(&mutex); + + if (list_empty(&mgr->fw_head)) { + pr_info("the info list is empty.\n"); + goto out; + } + + list_for_each_entry(info, &mgr->fw_head, node) { + if (format != info->format) + continue; + + data_len = info->data->head.data_size; + memcpy(buf, info->data->data, data_len); + ret = data_len; + + break; + } + +out: + mutex_unlock(&mutex); + + return ret; +} +EXPORT_SYMBOL(get_firmware_data); + +int get_data_from_name(const char *name, char *buf) +{ + int data_len, ret = -1; + struct fw_mgr_s *mgr = g_mgr; + struct fw_info_s *info; + char *fw_name = __getname(); + int len; + + if (fw_name == NULL) + return -ENOMEM; + + len = snprintf(fw_name, PATH_MAX, "%s.bin", name); + if (len >= PATH_MAX) { + __putname(fw_name); + return -ENAMETOOLONG; + } + + mutex_lock(&mutex); + + if (list_empty(&mgr->fw_head)) { + pr_info("the info list is empty.\n"); + goto out; + } + + list_for_each_entry(info, &mgr->fw_head, node) { + if (strcmp(fw_name, info->name)) + continue; + + data_len = info->data->head.data_size; + memcpy(buf, info->data->data, data_len); + ret = data_len; + + break; + } +out: + mutex_unlock(&mutex); + + __putname(fw_name); + + return ret; +} +EXPORT_SYMBOL(get_data_from_name); + +static int fw_probe(char *buf) +{ + int magic = 0; + + memcpy(&magic, buf, sizeof(int)); + return magic; +} + +static int request_firmware_from_sys(const char *file_name, + char *buf, int size) +{ + int ret = -1; + const struct firmware *fw; + int magic, offset = 0; + + pr_info("Try to load %s ...\n", file_name); + + ret = request_firmware(&fw, file_name, g_dev->dev); + if (ret < 0) { + pr_info("Error : %d can't load the %s.\n", ret, file_name); + goto err; + } + + if (fw->size > size) { + pr_info("Not enough memory size for ucode.\n"); + ret = -ENOMEM; + goto release; + } + + magic = fw_probe((char *)fw->data); + if (magic != PACK && magic != CODE) { + if (fw->size < SEC_OFFSET) { + pr_info("This is an invalid firmware file.\n"); + goto release; + } + + magic = fw_probe((char *)fw->data + SEC_OFFSET); + if (magic != PACK) { + pr_info("The firmware file is not packet.\n"); + goto release; + } + + offset = SEC_OFFSET; + } + + memcpy(buf, (char *)fw->data + offset, fw->size - offset); + + pr_info("load firmware size : %zd, Name : %s.\n", + fw->size, file_name); + ret = fw->size; +release: + release_firmware(fw); +err: + return ret; +} + +int request_decoder_firmware_on_sys(enum vformat_e format, + const char *file_name, char *buf, int size) +{ + int ret; + + ret = get_data_from_name(file_name, buf); + if (ret < 0) + pr_info("Get firmware fail.\n"); + + if (ret > size) { + pr_info("Not enough memory.\n"); + return -ENOMEM; + } + + return ret; +} +int get_decoder_firmware_data(enum vformat_e format, + const char *file_name, char *buf, int size) +{ + int ret; + + ret = request_decoder_firmware_on_sys(format, file_name, buf, size); + if (ret < 0) + pr_info("get_decoder_firmware_data %s for format %d failed!\n", + file_name, format); + + return ret; +} +EXPORT_SYMBOL(get_decoder_firmware_data); + +static unsigned long fw_mgr_lock(struct fw_mgr_s *mgr) +{ + unsigned long flags; + + spin_lock_irqsave(&mgr->lock, flags); + return flags; +} + +static void fw_mgr_unlock(struct fw_mgr_s *mgr, unsigned long flags) +{ + spin_unlock_irqrestore(&mgr->lock, flags); +} + +static void fw_add_info(struct fw_info_s *info) +{ + unsigned long flags; + struct fw_mgr_s *mgr = g_mgr; + + flags = fw_mgr_lock(mgr); + list_add(&info->node, &mgr->fw_head); + fw_mgr_unlock(mgr, flags); +} + +static void fw_del_info(struct fw_info_s *info) +{ + unsigned long flags; + struct fw_mgr_s *mgr = g_mgr; + + flags = fw_mgr_lock(mgr); + list_del(&info->node); + kfree(info); + fw_mgr_unlock(mgr, flags); +} + +static void fw_info_walk(void) +{ + struct fw_mgr_s *mgr = g_mgr; + struct fw_info_s *info; + + if (list_empty(&mgr->fw_head)) { + pr_info("the info list is empty.\n"); + return; + } + + list_for_each_entry(info, &mgr->fw_head, node) { + if (IS_ERR_OR_NULL(info->data)) + continue; + + pr_info("name : %s.\n", info->name); + pr_info("ver : %s.\n", + info->data->head.version); + pr_info("crc : 0x%x.\n", + info->data->head.checksum); + pr_info("size : %d.\n", + info->data->head.data_size); + pr_info("maker: %s.\n", + info->data->head.maker); + pr_info("from : %s.\n", info->src_from); + pr_info("date : %s.\n", + info->data->head.date); + if (info->data->head.duplicate) + pr_info("NOTE : Dup from %s.\n", + info->data->head.dup_from); + pr_info("\n"); + } +} + +static void fw_files_info_walk(void) +{ + struct fw_mgr_s *mgr = g_mgr; + struct fw_files_s *files; + + if (list_empty(&mgr->files_head)) { + pr_info("the file list is empty.\n"); + return; + } + + list_for_each_entry(files, &mgr->files_head, node) { + pr_info("type : %s.\n", !files->fw_type ? + "VIDEO_DECODE" : files->fw_type == 1 ? + "VIDEO_ENCODE" : "VIDEO_MISC"); + pr_info("from : %s.\n", !files->file_type ? + "VIDEO_PACKAGE" : "VIDEO_FW_FILE"); + pr_info("path : %s.\n", files->path); + pr_info("name : %s.\n\n", files->name); + } +} + +static ssize_t info_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + char *pbuf = buf; + struct fw_mgr_s *mgr = g_mgr; + struct fw_info_s *info; + unsigned int secs = 0; + struct tm tm; + char history_change_id[7] = {0}; + int i; + + mutex_lock(&mutex); + + if (list_empty(&mgr->fw_head)) { + pbuf += sprintf(pbuf, "No firmware.\n"); + goto out; + } + + /* shows version of driver. */ + pr_info("The ucode driver version is %s\n", PACK_VERS); + + pr_info("The firmware version is %d.%d.%d-g%s\n", + (package_head.version >> 16) & 0xff, + package_head.version & 0xff, + package_head.submit_count, + package_head.commit); + + pr_info("change id history:\n"); + for (i = 0; i < 5; i++) { + memset(history_change_id, 0, sizeof(history_change_id)); + strncpy(history_change_id, &(package_head.history_change_id[i * 6]), 6); + pr_info("\t%s\n", history_change_id); + + } + + list_for_each_entry(info, &mgr->fw_head, node) { + if (IS_ERR_OR_NULL(info->data)) + continue; + + if (detail) { + pr_info("%-5s: %s\n", "name", info->name); + pr_info("%-5s: %s\n", "ver", + info->data->head.version); + pr_info("%-5s: 0x%x\n", "sum", + info->data->head.checksum); + pr_info("%-5s: %d\n", "size", + info->data->head.data_size); + pr_info("%-5s: %s\n", "maker", + info->data->head.maker); + pr_info("%-5s: %s\n", "from", + info->src_from); + pr_info("%-5s: %s\n\n", "date", + info->data->head.date); + continue; + } + + secs = info->data->head.time + - sys_tz.tz_minuteswest * 60; + //time_to_tm(secs, 0, &tm);//kernel4.9 + time64_to_tm(secs, 0, &tm); + + pr_info("%s %-16s, %02d:%02d:%02d %d/%d/%ld, %s %-8s, %s %-8s, %s %s\n", + "fmt:", info->data->head.format, + tm.tm_hour, tm.tm_min, tm.tm_sec, + tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900, + "cmtid:", info->data->head.commit, + "chgid:", info->data->head.change_id, + "mk:", info->data->head.maker); + } +out: + mutex_unlock(&mutex); + + return pbuf - buf; +} + +static ssize_t info_store(struct class *cls, + struct class_attribute *attr, const char *buf, size_t count) +{ + if (kstrtoint(buf, 0, &detail) < 0) + return -EINVAL; + + return count; +} + +static int fw_info_fill(void) +{ + int ret = 0, i, len; + struct fw_mgr_s *mgr = g_mgr; + struct fw_files_s *files; + int info_size = ARRAY_SIZE(ucode_info); + char *path = __getname(); + const char *name; + + if (path == NULL) + return -ENOMEM; + + for (i = 0; i < info_size; i++) { + name = ucode_info[i].name; + if (IS_ERR_OR_NULL(name)) + break; + + len = snprintf(path, PATH_MAX, "%s/%s", DIR, + ucode_info[i].name); + if (len >= PATH_MAX) + continue; + + files = kzalloc(sizeof(struct fw_files_s), GFP_KERNEL); + if (files == NULL) { + __putname(path); + return -ENOMEM; + } + + files->file_type = ucode_info[i].file_type; + files->fw_type = ucode_info[i].fw_type; + strncpy(files->path, path, sizeof(files->path)); + files->path[sizeof(files->path) - 1] = '\0'; + strncpy(files->name, name, sizeof(files->name)); + files->name[sizeof(files->name) - 1] = '\0'; + + list_add(&files->node, &mgr->files_head); + } + + __putname(path); + + if (debug) + fw_files_info_walk(); + + return ret; +} + +static int fw_data_check_sum(struct firmware_s *fw) +{ + unsigned int crc; + + crc = crc32_le(~0U, fw->data, fw->head.data_size); + + /*pr_info("firmware crc result : 0x%x\n", crc ^ ~0U);*/ + + return fw->head.checksum != (crc ^ ~0U) ? 0 : 1; +} + +static int fw_data_filter(struct firmware_s *fw, + struct fw_info_s *fw_info) +{ + struct fw_mgr_s *mgr = g_mgr; + struct fw_info_s *info, *tmp; + int cpu = fw_get_cpu(fw->head.cpu); + + if (mgr->cur_cpu < cpu) { + kfree(fw_info); + kfree(fw); + return -1; + } + + /* the encode fw need to ignoring filtering rules. */ + if (fw_info->format == FIRMWARE_MAX) + return 0; + + list_for_each_entry_safe(info, tmp, &mgr->fw_head, node) { + if (info->format != fw_info->format) + continue; + + if (IS_ERR_OR_NULL(info->data)) { + fw_del_info(info); + return 0; + } + + /* high priority of VIDEO_FW_FILE */ + if (info->file_type == VIDEO_FW_FILE) { + pr_info("the %s need to priority proc.\n",info->name); + kfree(fw_info); + kfree(fw); + return 1; + } + + /* the cpu ver is lower and needs to be filtered */ + if (cpu < fw_get_cpu(info->data->head.cpu)) { + if (debug) + pr_info("keep the newer fw (%s) and ignore the older fw (%s).\n", + info->name, fw_info->name); + kfree(fw_info); + kfree(fw); + return 1; + } + + /* removes not match fw from info list */ + if (debug) + pr_info("drop the old fw (%s) will be load the newer fw (%s).\n", + info->name, fw_info->name); + kfree(info->data); + fw_del_info(info); + } + + return 0; +} + +static int fw_replace_dup_data(char *buf) +{ + int ret = 0; + struct fw_mgr_s *mgr = g_mgr; + struct package_s *pkg = + (struct package_s *) buf; + struct package_info_s *pinfo = + (struct package_info_s *) pkg->data; + struct fw_info_s *info = NULL; + char *pdata = pkg->data; + int try_cnt = TRY_PARSE_MAX; + + do { + if (!pinfo->head.length) + break; + list_for_each_entry(info, &mgr->fw_head, node) { + struct firmware_s *comp = NULL; + struct firmware_s *data = NULL; + int len = 0; + + comp = (struct firmware_s *)pinfo->data; + if (comp->head.duplicate) + break; + + if (!info->data->head.duplicate || + comp->head.checksum != + info->data->head.checksum) + continue; + + len = pinfo->head.length; + data = kzalloc(len, GFP_KERNEL); + if (data == NULL) { + ret = -ENOMEM; + goto out; + } + + memcpy(data, pinfo->data, len); + + /* update header information. */ + memcpy(data, info->data, sizeof(*data)); + + /* if replaced success need to update real size. */ + data->head.data_size = comp->head.data_size; + + kfree(info->data); + info->data = data; + } + pdata += (pinfo->head.length + sizeof(*pinfo)); + pinfo = (struct package_info_s *)pdata; + } while (try_cnt--); +out: + return ret; +} + +static int fw_check_pack_version(char *buf) +{ + struct package_s *pack = NULL; + int major, minor, major_fw, minor_fw; + int ret; + + pack = (struct package_s *) buf; + ret = sscanf(PACK_VERS, "v%x.%x", &major, &minor); + if (ret != 2) + return -1; + + package_head = pack->head; + major_fw = (pack->head.version >> 16) & 0xff; + minor_fw = pack->head.version & 0xff; + + if (major < major_fw) { + pr_info("the pack ver v%d.%d too higher to unsupport.\n", + major_fw, minor_fw); + return -1; + } + + if (minor < minor_fw) { + pr_info("The fw driver version (v%d.%d) is lower than the pkg version (v%d.%d).\n", + major, minor, major_fw, minor_fw); + pr_info("The driver version is too low that may affect the work please update asap.\n"); + } + + if (debug) { + pr_info("The package has %d fws totally.\n", pack->head.total); + pr_info("The driver ver is v%d.%d\n", major, minor); + pr_info("The firmware ver is v%d.%d.%d\n", major_fw, minor_fw, pack->head.submit_count); + } + + return 0; +} + +static int fw_package_parse(struct fw_files_s *files, + char *buf, int size) +{ + int ret = 0; + struct package_info_s *pack_info; + struct fw_info_s *info; + struct firmware_s *data; + char *pack_data; + int info_len, len; + int try_cnt = TRY_PARSE_MAX; + char *path = __getname(); + + if (path == NULL) + return -ENOMEM; + + pack_data = ((struct package_s *)buf)->data; + pack_info = (struct package_info_s *)pack_data; + info_len = sizeof(struct package_info_s); + + do { + if (!pack_info->head.length) + break; + + len = snprintf(path, PATH_MAX, "%s/%s", DIR, + pack_info->head.name); + if (len >= PATH_MAX) + continue; + + info = kzalloc(sizeof(struct fw_info_s), GFP_KERNEL); + if (info == NULL) { + ret = -ENOMEM; + goto out; + } + + data = kzalloc(FRIMWARE_SIZE, GFP_KERNEL); + if (data == NULL) { + kfree(info); + ret = -ENOMEM; + goto out; + } + + info->file_type = files->file_type; + strncpy(info->src_from, files->name, + sizeof(info->src_from)); + info->src_from[sizeof(info->src_from) - 1] = '\0'; + strncpy(info->name, pack_info->head.name, + sizeof(info->name)); + info->name[sizeof(info->name) - 1] = '\0'; + info->format = get_fw_format(pack_info->head.format); + + len = pack_info->head.length; + memcpy(data, pack_info->data, len); + + pack_data += (pack_info->head.length + info_len); + pack_info = (struct package_info_s *)pack_data; + + if (!data->head.duplicate && + !fw_data_check_sum(data)) { + pr_info("check sum fail !\n"); + kfree(data); + kfree(info); + goto out; + } + + if (fw_data_filter(data, info)) + continue; + + if (debug) + pr_info("adds %s to the fw list.\n", info->name); + + info->data = data; + fw_add_info(info); + } while (try_cnt--); + + /* process the fw of dup attribute. */ + ret = fw_replace_dup_data(buf); + if (ret) + pr_err("replace dup fw failed.\n"); +out: + __putname(path); + + return ret; +} + +static int fw_code_parse(struct fw_files_s *files, + char *buf, int size) +{ + struct fw_info_s *info; + + info = kzalloc(sizeof(struct fw_info_s), GFP_KERNEL); + if (info == NULL) + return -ENOMEM; + + info->data = kzalloc(FRIMWARE_SIZE, GFP_KERNEL); + if (info->data == NULL) { + kfree(info); + return -ENOMEM; + } + + info->file_type = files->file_type; + strncpy(info->src_from, files->name, + sizeof(info->src_from)); + info->src_from[sizeof(info->src_from) - 1] = '\0'; + memcpy(info->data, buf, size); + + if (!fw_data_check_sum(info->data)) { + pr_info("check sum fail !\n"); + kfree(info->data); + kfree(info); + return -1; + } + + if (debug) + pr_info("adds %s to the fw list.\n", info->name); + + fw_add_info(info); + + return 0; +} + +static int get_firmware_from_sys(const char *path, + char *buf, int size) +{ + int len = 0; + + len = request_firmware_from_sys(path, buf, size); + if (len < 0) + pr_info("get data from fsys fail.\n"); + + return len; +} + +static int fw_data_binding(void) +{ + int ret = 0, magic = 0; + struct fw_mgr_s *mgr = g_mgr; + struct fw_files_s *files, *tmp; + char *buf = NULL; + int size; + + if (list_empty(&mgr->files_head)) { + pr_info("the file list is empty.\n"); + return 0; + } + + buf = vmalloc(BUFF_SIZE); + if (IS_ERR_OR_NULL(buf)) + return -ENOMEM; + + memset(buf, 0, BUFF_SIZE); + + list_for_each_entry_safe(files, tmp, &mgr->files_head, node) { + size = get_firmware_from_sys(files->path, buf, BUFF_SIZE); + magic = fw_probe(buf); + + if (files->file_type == VIDEO_PACKAGE && magic == PACK) { + if (!fw_check_pack_version(buf)) + ret = fw_package_parse(files, buf, size); + } else if (files->file_type == VIDEO_FW_FILE && magic == CODE) { + ret = fw_code_parse(files, buf, size); + } else { + list_del(&files->node); + kfree(files); + pr_info("invaild file type.\n"); + } + + memset(buf, 0, BUFF_SIZE); + } + + if (debug) + fw_info_walk(); + + vfree(buf); + + return ret; +} + +static int fw_pre_load(void) +{ + if (fw_info_fill() < 0) { + pr_info("Get path fail.\n"); + return -1; + } + + if (fw_data_binding() < 0) { + pr_info("Set data fail.\n"); + return -1; + } + + return 0; +} + +static int fw_mgr_init(void) +{ + g_mgr = kzalloc(sizeof(struct fw_mgr_s), GFP_KERNEL); + if (IS_ERR_OR_NULL(g_mgr)) + return -ENOMEM; + + g_mgr->cur_cpu = get_cpu_major_id(); + INIT_LIST_HEAD(&g_mgr->files_head); + INIT_LIST_HEAD(&g_mgr->fw_head); + spin_lock_init(&g_mgr->lock); + + return 0; +} + +static void fw_ctx_clean(void) +{ + struct fw_mgr_s *mgr = g_mgr; + struct fw_files_s *files; + struct fw_info_s *info; + unsigned long flags; + + flags = fw_mgr_lock(mgr); + while (!list_empty(&mgr->files_head)) { + files = list_entry(mgr->files_head.next, + struct fw_files_s, node); + list_del(&files->node); + kfree(files); + } + + while (!list_empty(&mgr->fw_head)) { + info = list_entry(mgr->fw_head.next, + struct fw_info_s, node); + list_del(&info->node); + kfree(info->data); + kfree(info); + } + fw_mgr_unlock(mgr, flags); +} + +int video_fw_reload(int mode) +{ + int ret = 0; + struct fw_mgr_s *mgr = g_mgr; + + if (tee_enabled()) + return 0; + + mutex_lock(&mutex); + + if (mode & FW_LOAD_FORCE) { + fw_ctx_clean(); + + ret = fw_pre_load(); + if (ret < 0) + pr_err("The fw reload fail.\n"); + } else if (mode & FW_LOAD_TRY) { + if (!list_empty(&mgr->fw_head)) { + pr_info("The fw has been loaded.\n"); + goto out; + } + + ret = fw_pre_load(); + if (ret < 0) + pr_err("The fw try to reload fail.\n"); + } +out: + mutex_unlock(&mutex); + + return ret; +} +EXPORT_SYMBOL(video_fw_reload); + +static ssize_t reload_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + char *pbuf = buf; + + pbuf += sprintf(pbuf, "The fw reload usage.\n"); + pbuf += sprintf(pbuf, "> set 1 means that the fw is forced to update\n"); + pbuf += sprintf(pbuf, "> set 2 means that the fw is try to reload\n"); + + return pbuf - buf; +} + +static ssize_t reload_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + int ret = -1; + unsigned int val; + + ret = kstrtoint(buf, 0, &val); + if (ret != 0) + return -EINVAL; + + ret = video_fw_reload(val); + if (ret < 0) + pr_err("fw reload fail.\n"); + + return size; +} + +static ssize_t debug_show(struct class *cls, + struct class_attribute *attr, char *buf) +{ + return sprintf(buf, "%x\n", debug); +} + +static ssize_t debug_store(struct class *cls, + struct class_attribute *attr, const char *buf, size_t count) +{ + if (kstrtoint(buf, 0, &debug) < 0) + return -EINVAL; + + return count; +} + +#if 0 //kernel4.9 +static struct class_attribute fw_class_attrs[] = { + __ATTR(info, 0664, info_show, info_store), + __ATTR(reload, 0664, reload_show, reload_store), + __ATTR(debug, 0664, debug_show, debug_store), + __ATTR_NULL +}; + +static struct class fw_class = { + .name = CLASS_NAME, + .class_attrs = fw_class_attrs, +}; +#else //below is for kernel 4.19 and 5.4 +static CLASS_ATTR_RW(info); +static CLASS_ATTR_RW(reload); +static CLASS_ATTR_RW(debug); + +static struct attribute *fw_class_attrs[] = { + &class_attr_info.attr, + &class_attr_reload.attr, + &class_attr_debug.attr, + NULL +}; + +ATTRIBUTE_GROUPS(fw_class); + +static struct class fw_class = { + .name = CLASS_NAME, + .class_groups = fw_class_groups, +}; + +#endif +static int fw_driver_init(void) +{ + int ret = -1; + + g_dev = kzalloc(sizeof(struct fw_dev_s), GFP_KERNEL); + if (IS_ERR_OR_NULL(g_dev)) + return -ENOMEM; + + g_dev->dev_no = MKDEV(FIRMWARE_MAJOR, 100); + + ret = register_chrdev_region(g_dev->dev_no, 1, DEV_NAME); + if (ret < 0) { + pr_info("Can't get major number %d.\n", FIRMWARE_MAJOR); + goto err; + } + + cdev_init(&g_dev->cdev, &fw_fops); + g_dev->cdev.owner = THIS_MODULE; + + ret = cdev_add(&g_dev->cdev, g_dev->dev_no, 1); + if (ret) { + pr_info("Error %d adding cdev fail.\n", ret); + goto err; + } + + ret = class_register(&fw_class); + if (ret < 0) { + pr_info("Failed in creating class.\n"); + goto err; + } + + g_dev->dev = device_create(&fw_class, NULL, + g_dev->dev_no, NULL, DEV_NAME); + if (IS_ERR_OR_NULL(g_dev->dev)) { + pr_info("Create device failed.\n"); + ret = -ENODEV; + goto err; + } + + pr_info("Registered firmware driver success.\n"); +err: + return ret; +} + +static void fw_driver_exit(void) +{ + cdev_del(&g_dev->cdev); + device_destroy(&fw_class, g_dev->dev_no); + class_unregister(&fw_class); + unregister_chrdev_region(g_dev->dev_no, 1); + kfree(g_dev); + kfree(g_mgr); +} + +static int __init fw_module_init(void) +{ + int ret = -1; + + ret = fw_driver_init(); + if (ret) { + pr_info("Error %d firmware driver init fail.\n", ret); + goto err; + } + + ret = fw_mgr_init(); + if (ret) { + pr_info("Error %d firmware mgr init fail.\n", ret); + goto err; + } + + ret = fw_pre_load(); + if (ret) { + pr_info("Error %d firmware pre load fail.\n", ret); + goto err; + } +err: + return ret; +} + +static void __exit fw_module_exit(void) +{ + fw_ctx_clean(); + fw_driver_exit(); + pr_info("Firmware driver cleaned up.\n"); +} + +module_init(fw_module_init); +module_exit(fw_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Nanxin Qin <nanxin.qin@amlogic.com>");
diff --git a/drivers/common/firmware/firmware_priv.h b/drivers/common/firmware/firmware_priv.h new file mode 100644 index 0000000..d901f9d --- /dev/null +++ b/drivers/common/firmware/firmware_priv.h
@@ -0,0 +1,124 @@ +/* + * drivers/amlogic/media/common/firmware/firmware.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#ifndef __VIDEO_FIRMWARE_PRIV_HEAD_ +#define __VIDEO_FIRMWARE_PRIV_HEAD_ +#include <linux/types.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/cdev.h> +#include "firmware_type.h" + +struct fw_mgr_s { + struct list_head fw_head; + struct list_head files_head; + spinlock_t lock; + int cur_cpu; +}; + +struct fw_files_s { + struct list_head node; + int fw_type; + int file_type; + char name[32]; + char path[64]; +}; + +struct ucode_file_info_s { + int fw_type; + int file_type; + const char *name; +}; + +struct fw_info_s { + struct list_head node; + char name[32]; + char src_from[32]; + int file_type; + unsigned int format; + struct firmware_s *data; +}; + +struct fw_head_s { + int magic; + int checksum; + char name[32]; + char cpu[16]; + char format[32]; + char version[32]; + char maker[32]; + char date[32]; + char commit[16]; + int data_size; + unsigned int time; + char change_id[16]; + int duplicate; + char dup_from[16]; + char reserved[92]; +}; + +struct firmware_s { + union { + struct fw_head_s head; + char buf[512]; + }; + char data[0]; +}; + +struct package_head_s { + int magic; + int size; + int checksum; + int total; + int version; + int submit_count; + char change_id[16]; + char commit[16]; + char history_change_id[30]; + char reserved[62]; +}; + +struct package_s { + union { + struct package_head_s head; + char buf[256]; + }; + char data[0]; +}; + +struct info_head_s { + char name[32]; + char format[32]; + char cpu[32]; + int length; +}; + +struct package_info_s { + union { + struct info_head_s head; + char buf[256]; + }; + char data[0]; +}; + +struct fw_dev_s { + struct cdev cdev; + struct device *dev; + dev_t dev_no; +}; + +#endif
diff --git a/drivers/common/firmware/firmware_type.c b/drivers/common/firmware/firmware_type.c new file mode 100644 index 0000000..8d95c12 --- /dev/null +++ b/drivers/common/firmware/firmware_type.c
@@ -0,0 +1,129 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include "firmware_type.h" +#include "../chips/decoder_cpu_ver_info.h" + +static const struct format_name_s format_name[] = { + {VIDEO_DEC_MPEG12, "mpeg12"}, + {VIDEO_DEC_MPEG12_MULTI, "mpeg12_multi"}, + {VIDEO_DEC_MPEG4_3, "mpeg4_3"}, + {VIDEO_DEC_MPEG4_4, "mpeg4_4"}, + {VIDEO_DEC_MPEG4_4_MULTI, "mpeg4_4_multi"}, + {VIDEO_DEC_MPEG4_5, "xvid"}, + {VIDEO_DEC_MPEG4_5_MULTI, "xvid_multi"}, + {VIDEO_DEC_H263, "h263"}, + {VIDEO_DEC_H263_MULTI, "h263_multi"}, + {VIDEO_DEC_MJPEG, "mjpeg"}, + {VIDEO_DEC_MJPEG_MULTI, "mjpeg_multi"}, + {VIDEO_DEC_REAL_V8, "real_v8"}, + {VIDEO_DEC_REAL_V9, "real_v9"}, + {VIDEO_DEC_VC1, "vc1"}, + {VIDEO_DEC_VC1_G12A, "vc1_g12a"}, + {VIDEO_DEC_AVS, "avs"}, + {VIDEO_DEC_AVS_GXM, "avs_gxm"}, + {VIDEO_DEC_AVS_NOCABAC, "avs_no_cabac"}, + {VIDEO_DEC_AVS_MULTI, "avs_multi"}, + {VIDEO_DEC_H264, "h264"}, + {VIDEO_DEC_H264_MVC, "h264_mvc"}, + {VIDEO_DEC_H264_MVC_GXM, "h264_mvc_gxm"}, + {VIDEO_DEC_H264_MULTI, "h264_multi"}, + {VIDEO_DEC_H264_MULTI_MMU, "h264_multi_mmu"}, + {VIDEO_DEC_H264_MULTI_GXM, "h264_multi_gxm"}, + {VIDEO_DEC_HEVC, "hevc"}, + {VIDEO_DEC_HEVC_MMU, "hevc_mmu"}, + {VIDEO_DEC_HEVC_MMU_SWAP, "hevc_mmu_swap"}, + {VIDEO_DEC_HEVC_G12A, "hevc_g12a"}, + {VIDEO_DEC_VP9, "vp9"}, + {VIDEO_DEC_VP9_MMU, "vp9_mmu"}, + {VIDEO_DEC_VP9_G12A, "vp9_g12a"}, + {VIDEO_DEC_AVS2, "avs2"}, + {VIDEO_DEC_AVS2_MMU, "avs2_mmu"}, + {VIDEO_DEC_AV1_MMU, "av1_mmu"}, + {VIDEO_ENC_H264, "h264_enc"}, + {VIDEO_ENC_JPEG, "jpeg_enc"}, + {FIRMWARE_MAX, "unknown"}, +}; + +static const struct cpu_type_s cpu_type[] = { + {AM_MESON_CPU_MAJOR_ID_GXL, "gxl"}, + {AM_MESON_CPU_MAJOR_ID_GXM, "gxm"}, + {AM_MESON_CPU_MAJOR_ID_TXL, "txl"}, + {AM_MESON_CPU_MAJOR_ID_TXLX, "txlx"}, + {AM_MESON_CPU_MAJOR_ID_AXG, "axg"}, + {AM_MESON_CPU_MAJOR_ID_GXLX, "gxlx"}, + {AM_MESON_CPU_MAJOR_ID_TXHD, "txhd"}, + {AM_MESON_CPU_MAJOR_ID_G12A, "g12a"}, + {AM_MESON_CPU_MAJOR_ID_G12B, "g12b"}, + {AM_MESON_CPU_MAJOR_ID_GXLX2, "gxlx2"}, + {AM_MESON_CPU_MAJOR_ID_SM1, "sm1"}, + {AM_MESON_CPU_MAJOR_ID_TL1, "tl1"}, + {AM_MESON_CPU_MAJOR_ID_TM2, "tm2"}, + {AM_MESON_CPU_MAJOR_ID_SC2, "sc2"}, + {AM_MESON_CPU_MAJOR_ID_T5, "t5"}, + {AM_MESON_CPU_MAJOR_ID_T5D, "t5d"}, + {AM_MESON_CPU_MAJOR_ID_T7, "t7"}, + {AM_MESON_CPU_MAJOR_ID_S4, "s4"}, + {AM_MESON_CPU_MAJOR_ID_T3, "t3"}, + {AM_MESON_CPU_MAJOR_ID_P1, "p1"}, + {AM_MESON_CPU_MAJOR_ID_S4D, "s4d"}, +}; + +const char *get_fw_format_name(unsigned int format) +{ + const char *name = "unknown"; + int i, size = ARRAY_SIZE(format_name); + + for (i = 0; i < size; i++) { + if (format == format_name[i].format) + name = format_name[i].name; + } + + return name; +} +EXPORT_SYMBOL(get_fw_format_name); + +unsigned int get_fw_format(const char *name) +{ + unsigned int format = FIRMWARE_MAX; + int i, size = ARRAY_SIZE(format_name); + + for (i = 0; i < size; i++) { + if (!strcmp(name, format_name[i].name)) + format = format_name[i].format; + } + + return format; +} +EXPORT_SYMBOL(get_fw_format); + +int fw_get_cpu(const char *name) +{ + int type = 0; + int i, size = ARRAY_SIZE(cpu_type); + + for (i = 0; i < size; i++) { + if (!strcmp(name, cpu_type[i].name)) + type = cpu_type[i].type; + } + + return type; +} +EXPORT_SYMBOL(fw_get_cpu); +
diff --git a/drivers/common/firmware/firmware_type.h b/drivers/common/firmware/firmware_type.h new file mode 100644 index 0000000..4615baf --- /dev/null +++ b/drivers/common/firmware/firmware_type.h
@@ -0,0 +1,99 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef __VIDEO_FIRMWARE_FORMAT_ +#define __VIDEO_FIRMWARE_FORMAT_ + +#include <linux/slab.h> + +/* example: #define VIDEO_DEC_AV1 TAG('A', 'V', '1', '-')*/ +#define TAG(a, b, c, d)\ + ((a << 24) | (b << 16) | (c << 8) | d) + +/* fws define */ +#define VIDEO_DEC_MPEG12 (0) +#define VIDEO_DEC_MPEG4_3 (1) +#define VIDEO_DEC_MPEG4_4 (2) +#define VIDEO_DEC_MPEG4_5 (3) +#define VIDEO_DEC_H263 (4) +#define VIDEO_DEC_MJPEG (5) +#define VIDEO_DEC_MJPEG_MULTI (6) +#define VIDEO_DEC_REAL_V8 (7) +#define VIDEO_DEC_REAL_V9 (8) +#define VIDEO_DEC_VC1 (9) +#define VIDEO_DEC_AVS (10) +#define VIDEO_DEC_H264 (11) +#define VIDEO_DEC_H264_4k2K (12) +#define VIDEO_DEC_H264_4k2K_SINGLE (13) +#define VIDEO_DEC_H264_MVC (14) +#define VIDEO_DEC_H264_MULTI (15) +#define VIDEO_DEC_HEVC (16) +#define VIDEO_DEC_HEVC_MMU (17) +#define VIDEO_DEC_VP9 (18) +#define VIDEO_DEC_VP9_MMU (19) +#define VIDEO_ENC_H264 (20) +#define VIDEO_ENC_JPEG (21) +#define VIDEO_DEC_H264_MULTI_MMU (23) +#define VIDEO_DEC_HEVC_G12A (24) +#define VIDEO_DEC_VP9_G12A (25) +#define VIDEO_DEC_AVS2 (26) +#define VIDEO_DEC_AVS2_MMU (27) +#define VIDEO_DEC_AVS_GXM (28) +#define VIDEO_DEC_AVS_NOCABAC (29) +#define VIDEO_DEC_H264_MULTI_GXM (30) +#define VIDEO_DEC_H264_MVC_GXM (31) +#define VIDEO_DEC_VC1_G12A (32) +#define VIDEO_DEC_MPEG12_MULTI TAG('M', '1', '2', 'M') +#define VIDEO_DEC_MPEG4_4_MULTI TAG('M', '4', '4', 'M') +#define VIDEO_DEC_MPEG4_5_MULTI TAG('M', '4', '5', 'M') +#define VIDEO_DEC_H263_MULTI TAG('2', '6', '3', 'M') +#define VIDEO_DEC_HEVC_MMU_SWAP TAG('2', '6', '5', 'S') +#define VIDEO_DEC_AVS_MULTI TAG('A', 'V', 'S', 'M') +#define VIDEO_DEC_AV1_MMU TAG('A', 'V', '1', 'M') + +/* ... */ +#define FIRMWARE_MAX (UINT_MAX) + +#define VIDEO_PACKAGE (0) +#define VIDEO_FW_FILE (1) + +#define VIDEO_DECODE (0) +#define VIDEO_ENCODE (1) +#define VIDEO_MISC (2) + +#define OPTEE_VDEC_LEGENCY (0) +#define OPTEE_VDEC (1) +#define OPTEE_VDEC_HEVC (2) +#define OPTEE_VDEC_HCDEC (3) + +struct format_name_s { + unsigned int format; + const char *name; +}; + +struct cpu_type_s { + int type; + const char *name; +}; + +const char *get_fw_format_name(unsigned int format); +unsigned int get_fw_format(const char *name); +int fw_get_cpu(const char *name); + +#endif
diff --git a/drivers/common/media_clock/Makefile b/drivers/common/media_clock/Makefile new file mode 100644 index 0000000..975b5e5 --- /dev/null +++ b/drivers/common/media_clock/Makefile
@@ -0,0 +1,6 @@ +obj-m += media_clock.o +media_clock-objs += ../chips/chips.o +media_clock-objs += clk/clkg12.o +media_clock-objs += clk/clk.o +media_clock-objs += switch/amports_gate.o +media_clock-objs += ../chips/decoder_cpu_ver_info.o
diff --git a/drivers/common/media_clock/clk/clk.c b/drivers/common/media_clock/clk/clk.c new file mode 100644 index 0000000..6340c1a --- /dev/null +++ b/drivers/common/media_clock/clk/clk.c
@@ -0,0 +1,473 @@ +/* + * drivers/amlogic/media/common/arch/clk/clk.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> + +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/registers/cpu_version.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "../../../frame_provider/decoder/utils/vdec.h" +#include "../../chips/chips.h" +#include "clk_priv.h" +#include <linux/amlogic/media/utils/log.h> +#include "../../chips/decoder_cpu_ver_info.h" + +#define p_vdec() (get_current_vdec_chip()->clk_mgr[VDEC_1]) +#define p_vdec2() (get_current_vdec_chip()->clk_mgr[VDEC_2]) +#define p_vdec_hcodec() (get_current_vdec_chip()->clk_mgr[VDEC_HCODEC]) +#define p_vdec_hevc() (get_current_vdec_chip()->clk_mgr[VDEC_HEVC]) +#define p_vdec_hevc_back() (get_current_vdec_chip()->clk_mgr[VDEC_HEVCB]) + +static int clock_source_wxhxfps_saved[VDEC_MAX + 1]; + +#define IF_HAVE_RUN(p, fn)\ + do {\ + if (p && p->fn)\ + p->fn();\ + } while (0) +/* + *#define IF_HAVE_RUN_P1_RET(p, fn, p1)\ + * do {\ + * pr_debug("%s-----%d\n", __func__, clk);\ + * if (p && p->fn)\ + * return p->fn(p1);\ + * else\ + * return -1;\ + * } while (0) + * + *#define IF_HAVE_RUN_RET(p, fn)\ + * do {\ + * if (p && p->fn)\ + * return p->fn();\ + * else\ + * return 0;\ + * } while (0) + */ + +int vdec_clock_init(void) +{ + if (p_vdec() && p_vdec()->clock_init) + return p_vdec()->clock_init(); + else + return 0; +} +EXPORT_SYMBOL(vdec_clock_init); + +/* + *clk ==0 : + * to be release. + * released shared clk, + *clk ==1 :default low clk + *clk ==2 :default high clk + */ +int vdec_clock_set(int clk) +{ + pr_debug("%s-----%d\n", __func__, clk); + if (p_vdec() && p_vdec()->clock_set) + return p_vdec()->clock_set(clk); + else + return -1; +} +EXPORT_SYMBOL(vdec_clock_set); + +void vdec_clock_enable(void) +{ + vdec_clock_set(1); +} +EXPORT_SYMBOL(vdec_clock_enable); + +void vdec_clock_hi_enable(void) +{ + vdec_clock_set(2); +} +EXPORT_SYMBOL(vdec_clock_hi_enable); + +void vdec_clock_on(void) +{ + IF_HAVE_RUN(p_vdec(), clock_on); +} +EXPORT_SYMBOL(vdec_clock_on); + +void vdec_clock_off(void) +{ + IF_HAVE_RUN(p_vdec(), clock_off); + clock_source_wxhxfps_saved[VDEC_1] = 0; +} +EXPORT_SYMBOL(vdec_clock_off); + +int vdec2_clock_set(int clk) +{ + pr_debug("%s-----%d\n", __func__, clk); + if (p_vdec2() && p_vdec2()->clock_set) + return p_vdec2()->clock_set(clk); + else + return -1; +} +EXPORT_SYMBOL(vdec2_clock_set); + +void vdec2_clock_enable(void) +{ + vdec2_clock_set(1); +} +EXPORT_SYMBOL(vdec2_clock_enable); + +void vdec2_clock_hi_enable(void) +{ + vdec2_clock_set(2); +} +EXPORT_SYMBOL(vdec2_clock_hi_enable); + +void vdec2_clock_on(void) +{ + IF_HAVE_RUN(p_vdec2(), clock_on); +} +EXPORT_SYMBOL(vdec2_clock_on); + +void vdec2_clock_off(void) +{ + IF_HAVE_RUN(p_vdec2(), clock_off); + clock_source_wxhxfps_saved[VDEC_2] = 0; +} +EXPORT_SYMBOL(vdec2_clock_off); + +int hcodec_clock_set(int clk) +{ + pr_debug("%s-----%d\n", __func__, clk); + if (p_vdec_hcodec() && p_vdec_hcodec()->clock_set) + return p_vdec_hcodec()->clock_set(clk); + else + return -1; +} +EXPORT_SYMBOL(hcodec_clock_set); + +void hcodec_clock_enable(void) +{ + hcodec_clock_set(667); +} +EXPORT_SYMBOL(hcodec_clock_enable); + +void hcodec_clock_hi_enable(void) +{ + hcodec_clock_set(2); +} +EXPORT_SYMBOL(hcodec_clock_hi_enable); + +void hcodec_clock_on(void) +{ + IF_HAVE_RUN(p_vdec_hcodec(), clock_on); +} +EXPORT_SYMBOL(hcodec_clock_on); + +void hcodec_clock_off(void) +{ + IF_HAVE_RUN(p_vdec_hcodec(), clock_off); + clock_source_wxhxfps_saved[VDEC_HCODEC] = 0; +} +EXPORT_SYMBOL(hcodec_clock_off); + +int hevc_back_clock_init(void) +{ + if (p_vdec_hevc_back() && p_vdec_hevc_back()->clock_init) + return p_vdec_hevc_back()->clock_init(); + else + return 0; +} +EXPORT_SYMBOL(hevc_back_clock_init); + +int hevc_back_clock_set(int clk) +{ + pr_debug("%s-----%d\n", __func__, clk); + if (p_vdec_hevc_back() && p_vdec_hevc_back()->clock_set) + return p_vdec_hevc_back()->clock_set(clk); + else + return -1; +} +EXPORT_SYMBOL(hevc_back_clock_set); + +void hevc_back_clock_enable(void) +{ + hevc_back_clock_set(1); +} +EXPORT_SYMBOL(hevc_back_clock_enable); + +void hevc_back_clock_hi_enable(void) +{ + hevc_back_clock_set(2); +} +EXPORT_SYMBOL(hevc_back_clock_hi_enable); + +int hevc_clock_init(void) +{ + if (p_vdec_hevc() && p_vdec_hevc()->clock_init) + return p_vdec_hevc()->clock_init(); + else + return 0; +} +EXPORT_SYMBOL(hevc_clock_init); + +int hevc_clock_set(int clk) +{ + pr_debug("%s-----%d\n", __func__, clk); + if (p_vdec_hevc() && p_vdec_hevc()->clock_set) + return p_vdec_hevc()->clock_set(clk); + else + return -1; +} +EXPORT_SYMBOL(hevc_clock_set); + +void hevc_clock_enable(void) +{ + hevc_clock_set(1); +} +EXPORT_SYMBOL(hevc_clock_enable); + +void hevc_clock_hi_enable(void) +{ + hevc_clock_set(2); +} +EXPORT_SYMBOL(hevc_clock_hi_enable); + +void hevc_back_clock_on(void) +{ + IF_HAVE_RUN(p_vdec_hevc_back(), clock_on); +} +EXPORT_SYMBOL(hevc_back_clock_on); + +void hevc_back_clock_off(void) +{ + IF_HAVE_RUN(p_vdec_hevc_back(), clock_off); + clock_source_wxhxfps_saved[VDEC_HEVCB] = 0; +} +EXPORT_SYMBOL(hevc_back_clock_off); + +void hevc_clock_on(void) +{ + IF_HAVE_RUN(p_vdec_hevc(), clock_on); +} +EXPORT_SYMBOL(hevc_clock_on); + +void hevc_clock_off(void) +{ + IF_HAVE_RUN(p_vdec_hevc(), clock_off); + clock_source_wxhxfps_saved[VDEC_HEVC] = 0; +} +EXPORT_SYMBOL(hevc_clock_off); + +int vdec_source_get(enum vdec_type_e core) +{ + return clock_source_wxhxfps_saved[core]; +} +EXPORT_SYMBOL(vdec_source_get); + +int vdec_clk_get(enum vdec_type_e core) +{ + return get_current_vdec_chip()->clk_mgr[core]->clock_get(core); +} +EXPORT_SYMBOL(vdec_clk_get); + +int get_clk_with_source(int format, int w_x_h_fps) +{ + struct clk_set_setting *p_setting; + int i; + int clk = -2; + + p_setting = get_current_vdec_chip()->clk_setting_array; + if (!p_setting || format < 0 || format > VFORMAT_MAX) { + pr_info("error on get_clk_with_source ,%p,%d\n", + p_setting, format); + return -1; /*no setting found. */ + } + p_setting = &p_setting[format]; + for (i = 0; i < MAX_CLK_SET; i++) { + if (p_setting->set[i].wh_X_fps > w_x_h_fps) { + clk = p_setting->set[i].clk_Mhz; + break; + } + } + return clk; +} +EXPORT_SYMBOL(get_clk_with_source); + +bool is_hevc_front_back_clk_combined(void) +{ + int cpu_id = get_cpu_major_id(); + + if (cpu_id == AM_MESON_CPU_MAJOR_ID_T5 || + (cpu_id == AM_MESON_CPU_MAJOR_ID_T5D) || + (cpu_id == AM_MESON_CPU_MAJOR_ID_S4) || + (cpu_id == AM_MESON_CPU_MAJOR_ID_S4D) || + (cpu_id == AM_MESON_CPU_MAJOR_ID_T5W)) + return true; + + return false; +} +EXPORT_SYMBOL(is_hevc_front_back_clk_combined); + +int vdec_source_changed_for_clk_set(int format, int width, int height, int fps) +{ + int clk = get_clk_with_source(format, width * height * fps); + int ret_clk; + + if (clk < 0) { + pr_info("can't get valid clk for source ,%d,%d,%d\n", + width, height, fps); + if (format >= 1920 && width >= 1080 && fps >= 30) + clk = 2; /*default high clk */ + else + clk = 0; /*default clk. */ + } + if (width * height * fps == 0) + clk = 0; + /* + *clk == 0 + *is used for set default clk; + *if used supper clk. + *changed to default min clk. + */ + + if (format == VFORMAT_HEVC || format == VFORMAT_VP9 + || format == VFORMAT_AVS2 + || format == VFORMAT_AV1) { + ret_clk = hevc_clock_set(clk); + clock_source_wxhxfps_saved[VDEC_HEVC] = width * height * fps; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A && + !is_hevc_front_back_clk_combined()) { + ret_clk = hevc_back_clock_set(clk); + clock_source_wxhxfps_saved[VDEC_HEVCB] = width * height * fps; + } + } else if (format == VFORMAT_H264_ENC || format == VFORMAT_JPEG_ENC) { + ret_clk = hcodec_clock_set(clk); + clock_source_wxhxfps_saved[VDEC_HCODEC] = width * height * fps; + } else if (format == VFORMAT_H264_4K2K && + get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_M8) { + ret_clk = vdec2_clock_set(clk); + clock_source_wxhxfps_saved[VDEC_2] = width * height * fps; + ret_clk = vdec_clock_set(clk); + clock_source_wxhxfps_saved[VDEC_1] = width * height * fps; + } else { + ret_clk = vdec_clock_set(clk); + clock_source_wxhxfps_saved[VDEC_1] = width * height * fps; + } + return ret_clk; +} +EXPORT_SYMBOL(vdec_source_changed_for_clk_set); + +static int register_vdec_clk_mgr_per_cpu(int cputype, + enum vdec_type_e vdec_type, struct chip_vdec_clk_s *t_mgr) +{ + + struct chip_vdec_clk_s *mgr; + + if (cputype != get_cpu_major_id() || vdec_type >= VDEC_MAX) { + /* + *pr_info("ignore vdec clk mgr for vdec[%d] cpu=%d\n", + *vdec_type, cputype); + */ + return 0; /* ignore don't needed firmare. */ + } + mgr = kmalloc(sizeof(struct chip_vdec_clk_s), GFP_KERNEL); + if (!mgr) + return -ENOMEM; + *mgr = *t_mgr; + /* + *pr_info("register vdec clk mgr for vdec[%d]\n", vdec_type); + */ + if (mgr->clock_init) { + if (mgr->clock_init()) { + kfree(mgr); + return -ENOMEM; + } + } + get_current_vdec_chip()->clk_mgr[vdec_type] = mgr; + return 0; +} + +int register_vdec_clk_mgr(int cputype[], enum vdec_type_e vdec_type, + struct chip_vdec_clk_s *t_mgr) +{ + int i = 0; + + while (cputype[i] > 0) { + register_vdec_clk_mgr_per_cpu(cputype[i], vdec_type, t_mgr); + i++; + } + return 0; +} +EXPORT_SYMBOL(register_vdec_clk_mgr); + +int unregister_vdec_clk_mgr(enum vdec_type_e vdec_type) +{ + kfree(get_current_vdec_chip()->clk_mgr[vdec_type]); + + return 0; +} +EXPORT_SYMBOL(unregister_vdec_clk_mgr); + +static int register_vdec_clk_setting_per_cpu(int cputype, + struct clk_set_setting *setting, int size) +{ + + struct clk_set_setting *p_setting; + + if (cputype != get_cpu_major_id()) { + /* + *pr_info("ignore clk_set_setting for cpu=%d\n", + *cputype); + */ + return 0; /* ignore don't needed this setting . */ + } + p_setting = kmalloc(size, GFP_KERNEL); + if (!p_setting) + return -ENOMEM; + memcpy(p_setting, setting, size); + + pr_info("register clk_set_setting cpu[%d]\n", cputype); + + get_current_vdec_chip()->clk_setting_array = p_setting; + return 0; +} + +int register_vdec_clk_setting(int cputype[], + struct clk_set_setting *p_seting, int size) +{ + int i = 0; + + while (cputype[i] > 0) { + register_vdec_clk_setting_per_cpu(cputype[i], p_seting, size); + i++; + } + return 0; +} +EXPORT_SYMBOL(register_vdec_clk_setting); + +int unregister_vdec_clk_setting(void) +{ + kfree(get_current_vdec_chip()->clk_setting_array); + + return 0; +} +EXPORT_SYMBOL(unregister_vdec_clk_setting); +
diff --git a/drivers/common/media_clock/clk/clk.h b/drivers/common/media_clock/clk/clk.h new file mode 100644 index 0000000..bbc3bee --- /dev/null +++ b/drivers/common/media_clock/clk/clk.h
@@ -0,0 +1,177 @@ +/* + * drivers/amlogic/media/common/arch/clk/clk.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VDEC_CHIP_CLK_HEADER +#define VDEC_CHIP_CLK_HEADER +#include <linux/types.h> +#include <linux/init.h> +#include <linux/module.h> +#include "clk_priv.h" +#include <linux/amlogic/media/clk/gp_pll.h> + +#ifndef INCLUDE_FROM_ARCH_CLK_MGR +int vdec_clock_init(void); +int vdec_clock_set(int clk); +int vdec2_clock_set(int clk); + +int hcodec_clock_set(int clk); +int hevc_clock_init(void); +int hevc_clock_set(int clk); + +void vdec_clock_on(void); +void vdec_clock_off(void); +void vdec2_clock_on(void); + +void vdec2_clock_off(void); +void hcodec_clock_on(void); +void hcodec_clock_off(void); +void hevc_clock_on(void); +void hevc_clock_off(void); + +int hevc_back_clock_init(void); +void hevc_back_clock_on(void); +void hevc_back_clock_off(void); +int hevc_back_clock_set(int clk); +void hevc_back_clock_enable(void); +void hevc_back_clock_hi_enable(void); + +int vdec_source_get(enum vdec_type_e core); +int vdec_clk_get(enum vdec_type_e core); + +bool is_hevc_front_back_clk_combined(void); + +int vdec_source_changed_for_clk_set(int format, int width, int height, int fps); +int get_clk_with_source(int format, int w_x_h_fps); + +void vdec_clock_enable(void); +void vdec_clock_hi_enable(void); +void hcodec_clock_enable(void); +void hcodec_clock_hi_enable(void); +void hevc_clock_enable(void); +void hevc_clock_hi_enable(void); +void vdec2_clock_enable(void); +void vdec2_clock_hi_enable(void); +void set_clock_gate(struct gate_switch_node *nodes, int num); + +#endif +int register_vdec_clk_mgr(int cputype[], + enum vdec_type_e vdec_type, struct chip_vdec_clk_s *t_mgr); + +int unregister_vdec_clk_mgr(enum vdec_type_e vdec_type); + +int register_vdec_clk_setting(int cputype[], + struct clk_set_setting *p_seting, int size); + +int unregister_vdec_clk_setting(void); + +#ifdef INCLUDE_FROM_ARCH_CLK_MGR +static struct chip_vdec_clk_s vdec_clk_mgr __initdata = { + .clock_init = vdec_clock_init, + .clock_set = vdec_clock_set, + .clock_on = vdec_clock_on, + .clock_off = vdec_clock_off, + .clock_get = vdec_clock_get, +}; + +#ifdef VDEC_HAS_VDEC2 +static struct chip_vdec_clk_s vdec2_clk_mgr __initdata = { + .clock_set = vdec2_clock_set, + .clock_on = vdec2_clock_on, + .clock_off = vdec2_clock_off, + .clock_get = vdec_clock_get, +}; +#endif + +#ifdef VDEC_HAS_HEVC +static struct chip_vdec_clk_s vdec_hevc_clk_mgr __initdata = { + .clock_init = hevc_clock_init, + .clock_set = hevc_clock_set, + .clock_on = hevc_clock_on, + .clock_off = hevc_clock_off, + .clock_get = vdec_clock_get, +}; +static struct chip_vdec_clk_s vdec_hevc_back_clk_mgr __initdata = { + .clock_init = hevc_back_clock_init, + .clock_set = hevc_back_clock_set, + .clock_on = hevc_back_clock_on, + .clock_off = hevc_back_clock_off, + .clock_get = vdec_clock_get, +}; +#endif + +#ifdef VDEC_HAS_VDEC_HCODEC +static struct chip_vdec_clk_s vdec_hcodec_clk_mgr __initdata = { + .clock_set = hcodec_clock_set, + .clock_on = hcodec_clock_on, + .clock_off = hcodec_clock_off, + .clock_get = vdec_clock_get, +}; +#endif + +static int __init vdec_init_clk(void) +{ + int cpus[] = CLK_FOR_CPU; + + register_vdec_clk_mgr(cpus, VDEC_1, &vdec_clk_mgr); +#ifdef VDEC_HAS_VDEC2 + register_vdec_clk_mgr(cpus, VDEC_2, &vdec2_clk_mgr); +#endif +#ifdef VDEC_HAS_HEVC + register_vdec_clk_mgr(cpus, VDEC_HEVC, &vdec_hevc_clk_mgr); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + register_vdec_clk_mgr(cpus, VDEC_HEVCB, &vdec_hevc_back_clk_mgr); +#endif +#ifdef VDEC_HAS_VDEC_HCODEC + register_vdec_clk_mgr(cpus, VDEC_HCODEC, &vdec_hcodec_clk_mgr); +#endif + +#ifdef VDEC_HAS_CLK_SETTINGS + register_vdec_clk_setting(cpus, + clks_for_formats, sizeof(clks_for_formats)); +#endif + return 0; +} + +static void __exit vdec_clk_exit(void) +{ + unregister_vdec_clk_mgr(VDEC_1); +#ifdef VDEC_HAS_VDEC2 + unregister_vdec_clk_mgr(VDEC_2); +#endif +#ifdef VDEC_HAS_HEVC + unregister_vdec_clk_mgr(VDEC_HEVC); +#endif +#ifdef VDEC_HAS_VDEC_HCODEC + unregister_vdec_clk_mgr(VDEC_HCODEC); +#endif +#ifdef VDEC_HAS_CLK_SETTINGS + unregister_vdec_clk_setting(); +#endif + pr_info("media clock exit.\n"); +} + +#define ARCH_VDEC_CLK_INIT()\ + module_init(vdec_init_clk) + +#define ARCH_VDEC_CLK_EXIT()\ + module_exit(vdec_clk_exit) + +MODULE_DESCRIPTION("AMLOGIC clk Driver"); +MODULE_LICENSE("GPL"); + +#endif +#endif
diff --git a/drivers/common/media_clock/clk/clk_priv.h b/drivers/common/media_clock/clk/clk_priv.h new file mode 100644 index 0000000..60b7be0 --- /dev/null +++ b/drivers/common/media_clock/clk/clk_priv.h
@@ -0,0 +1,38 @@ +/* + * drivers/amlogic/media/common/arch/clk/clk_priv.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef AMPORTS_CLK_PRIV_HEADER +#define AMPORTS_CLK_PRIV_HEADER + +struct clk_set { + u32 wh_X_fps; /* [x*y*fps */ + u32 clk_Mhz; /*min MHZ */ +}; +#define MAX_CLK_SET 6 +struct clk_set_setting { + struct clk_set set[MAX_CLK_SET]; +}; + +struct chip_vdec_clk_s { + int (*clock_get)(enum vdec_type_e core); + int (*clock_init)(void); + int (*clock_set)(int clk); + void (*clock_on)(void); + void (*clock_off)(void); + void (*clock_prepare_switch)(void); +}; +#endif
diff --git a/drivers/common/media_clock/clk/clkg12.c b/drivers/common/media_clock/clk/clkg12.c new file mode 100644 index 0000000..c49d150 --- /dev/null +++ b/drivers/common/media_clock/clk/clkg12.c
@@ -0,0 +1,1071 @@ +/* + * drivers/amlogic/media/common/arch/clk/clkgx.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/amlogic/media/clk/gp_pll.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include <linux/amlogic/media/utils/amports_config.h> +#include "../../../frame_provider/decoder/utils/vdec.h" +#include <linux/amlogic/media/registers/register.h> +#include "clk_priv.h" +#include <linux/amlogic/media/utils/log.h> + +#include <linux/amlogic/media/registers/register_ops.h> +#include "../switch/amports_gate.h" +#include "../../chips/decoder_cpu_ver_info.h" + +#define MHz (1000000) +#define debug_print pr_info +#define TL1_HEVC_MAX_CLK (800) + +//#define NO_CLKTREE + +/* set gp0 648M vdec use gp0 clk*/ +#define VDEC1_648M() \ + WRITE_HHI_REG_BITS(HHI_VDEC_CLK_CNTL, (6 << 9) | (0), 0, 16) + +#define HEVC_648M() \ + WRITE_HHI_REG_BITS(HHI_VDEC2_CLK_CNTL, (6 << 9) | (0), 16, 16) + +/*set gp0 1296M vdec use gp0 clk div2*/ +#define VDEC1_648M_DIV() \ + WRITE_HHI_REG_BITS(HHI_VDEC_CLK_CNTL, (6 << 9) | (1), 0, 16) + +#define HEVC_648M_DIV() \ + WRITE_HHI_REG_BITS(HHI_VDEC2_CLK_CNTL, (6 << 9) | (1), 16, 16) + +#define VDEC1_WITH_GP_PLL() \ + ((READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0xe00) == 0xc00) +#define HEVC_WITH_GP_PLL() \ + ((READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0xe000000) == 0xc000000) + +#define VDEC1_CLOCK_ON() \ + do { if (is_meson_m8_cpu()) { \ + WRITE_HHI_REG_BITS(HHI_VDEC_CLK_CNTL, 1, 8, 1); \ + WRITE_VREG_BITS(DOS_GCLK_EN0, 0x3ff, 0, 10); \ + } else { \ + WRITE_HHI_REG_BITS(HHI_VDEC_CLK_CNTL, 1, 8, 1); \ + WRITE_HHI_REG_BITS(HHI_VDEC3_CLK_CNTL, 0, 15, 1); \ + WRITE_HHI_REG_BITS(HHI_VDEC3_CLK_CNTL, 0, 8, 1); \ + WRITE_VREG_BITS(DOS_GCLK_EN0, 0x3ff, 0, 10); \ + } \ + } while (0) + +#define VDEC2_CLOCK_ON() do {\ + WRITE_HHI_REG_BITS(HHI_VDEC2_CLK_CNTL, 1, 8, 1); \ + WRITE_VREG(DOS_GCLK_EN1, 0x3ff);\ + } while (0) + +#define HCODEC_CLOCK_ON() do {\ + WRITE_HHI_REG_BITS(HHI_VDEC_CLK_CNTL, 1, 24, 1); \ + WRITE_VREG_BITS(DOS_GCLK_EN0, 0x7fff, 12, 15);\ + } while (0) +#define HEVC_CLOCK_ON() do {\ + WRITE_HHI_REG_BITS(HHI_VDEC2_CLK_CNTL, 1, 24, 1); \ + WRITE_HHI_REG_BITS(HHI_VDEC2_CLK_CNTL, 1, 8, 1); \ + WRITE_HHI_REG_BITS(HHI_VDEC4_CLK_CNTL, 0, 31, 1); \ + WRITE_HHI_REG_BITS(HHI_VDEC4_CLK_CNTL, 0, 15, 1); \ + WRITE_HHI_REG_BITS(HHI_VDEC4_CLK_CNTL, 0, 24, 1); \ + WRITE_VREG(DOS_GCLK_EN3, 0xffffffff);\ + } while (0) +#define VDEC1_SAFE_CLOCK() do {\ + WRITE_HHI_REG_BITS(HHI_VDEC3_CLK_CNTL, \ + READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x7f, 0, 7); \ + WRITE_HHI_REG_BITS(HHI_VDEC3_CLK_CNTL, 1, 8, 1); \ + WRITE_HHI_REG_BITS(HHI_VDEC3_CLK_CNTL, 1, 15, 1);\ + } while (0) + +#define VDEC1_CLOCK_OFF() \ + WRITE_HHI_REG_BITS(HHI_VDEC_CLK_CNTL, 0, 8, 1) +#define VDEC2_CLOCK_OFF() \ + WRITE_HHI_REG_BITS(HHI_VDEC2_CLK_CNTL, 0, 8, 1) +#define HCODEC_CLOCK_OFF() \ + WRITE_HHI_REG_BITS(HHI_VDEC_CLK_CNTL, 0, 24, 1) +#define HEVC_SAFE_CLOCK() do { \ + WRITE_HHI_REG_BITS(HHI_VDEC4_CLK_CNTL, \ + (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) >> 16) & 0x7f, 16, 7);\ + WRITE_HHI_REG_BITS(HHI_VDEC4_CLK_CNTL, \ + (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) >> 25) & 0x7f, 25, 7);\ + WRITE_HHI_REG_BITS(HHI_VDEC4_CLK_CNTL, 1, 24, 1); \ + WRITE_HHI_REG_BITS(HHI_VDEC4_CLK_CNTL, 1, 31, 1);\ + WRITE_HHI_REG_BITS(HHI_VDEC4_CLK_CNTL, 1, 15, 1);\ + } while (0) + +#define HEVC_CLOCK_OFF() do {\ + WRITE_HHI_REG_BITS(HHI_VDEC2_CLK_CNTL, 0, 24, 1);\ + WRITE_HHI_REG_BITS(HHI_VDEC2_CLK_CNTL, 0, 8, 1);\ +}while(0) + +static int clock_real_clk[VDEC_MAX + 1]; + +static unsigned int set_frq_enable, vdec_frq, hevc_frq, hevcb_frq; + +#ifdef NO_CLKTREE +static struct gp_pll_user_handle_s *gp_pll_user_vdec, *gp_pll_user_hevc; +static bool is_gp0_div2 = true; + +static int gp_pll_user_cb_vdec(struct gp_pll_user_handle_s *user, + int event) +{ + debug_print("gp_pll_user_cb_vdec call\n"); + if (event == GP_PLL_USER_EVENT_GRANT) { + struct clk *clk = clk_get(NULL, "gp0_pll"); + if (!IS_ERR(clk)) { + if (is_gp0_div2) + clk_set_rate(clk, 1296000000UL); + else + clk_set_rate(clk, 648000000UL); + VDEC1_SAFE_CLOCK(); + VDEC1_CLOCK_OFF(); + if (is_gp0_div2) + VDEC1_648M_DIV(); + else + VDEC1_648M(); + + VDEC1_CLOCK_ON(); + debug_print("gp_pll_user_cb_vdec call set\n"); + } + } + return 0; +} + +static int gp_pll_user_cb_hevc(struct gp_pll_user_handle_s *user, + int event) +{ + debug_print("gp_pll_user_cb_hevc callback\n"); + if (event == GP_PLL_USER_EVENT_GRANT) { + struct clk *clk = clk_get(NULL, "gp0_pll"); + if (!IS_ERR(clk)) { + if (is_gp0_div2) + clk_set_rate(clk, 1296000000UL); + else + clk_set_rate(clk, 648000000UL); +// HEVC_SAFE_CLOCK(); + HEVC_CLOCK_OFF(); + if (is_gp0_div2) + HEVC_648M_DIV(); + else + HEVC_648M(); + HEVC_CLOCK_ON(); + debug_print("gp_pll_user_cb_hevc callback2\n"); + } + } + + return 0; +} + + +#endif + +struct clk_mux_s { + struct gate_switch_node *vdec_mux_node; + struct gate_switch_node *hcodec_mux_node; + struct gate_switch_node *hevc_mux_node; + struct gate_switch_node *hevc_back_mux_node; +}; + +struct clk_mux_s gclk; + +void vdec1_set_clk(int source, int div) +{ + pr_debug("vdec1_set_clk %d, %d\n", source, div); + WRITE_HHI_REG_BITS(HHI_VDEC_CLK_CNTL, (source << 9) | (div - 1), 0, 16); +} +EXPORT_SYMBOL(vdec1_set_clk); + +void hcodec_set_clk(int source, int div) +{ + WRITE_HHI_REG_BITS(HHI_VDEC_CLK_CNTL, + (source << 9) | (div - 1), 16, 16); +} +EXPORT_SYMBOL(hcodec_set_clk); + +void vdec2_set_clk(int source, int div) +{ + WRITE_HHI_REG_BITS(HHI_VDEC2_CLK_CNTL, + (source << 9) | (div - 1), 0, 16); +} +EXPORT_SYMBOL(vdec2_set_clk); + +//extern uint force_hevc_clock_cntl; +uint force_hevc_clock_cntl = 0; +void hevc_set_clk(int source, int div) +{ + if (force_hevc_clock_cntl) { + pr_info("%s, write force clock cntl %x\n", __func__, force_hevc_clock_cntl); + WRITE_HHI_REG(HHI_VDEC2_CLK_CNTL, force_hevc_clock_cntl); + } else { + pr_debug("hevc_set_clk %d, %d\n", source, div); + WRITE_HHI_REG_BITS(HHI_VDEC2_CLK_CNTL, + (source << 9) | (div - 1), 16, 16); + WRITE_HHI_REG_BITS(HHI_VDEC2_CLK_CNTL, (source << 9) | (div - 1), 0, 16); + } +} +EXPORT_SYMBOL(hevc_set_clk); + +void vdec_get_clk_source(int clk, int *source, int *div, int *rclk) +{ +#define source_div4 (0) +#define source_div3 (1) +#define source_div5 (2) +#define source_div7 (3) + if (clk > 500) { + *source = source_div3; + *div = 1; + *rclk = 667; + } else if (clk >= 500) { + *source = source_div4; + *div = 1; + *rclk = 500; + } else if (clk >= 400) { + *source = source_div5; + *div = 1; + *rclk = 400; + } else if (clk >= 333) { + *source = source_div3; + *div = 2; + *rclk = 333; + } else if (clk >= 200) { + *source = source_div5; + *div = 2; + *rclk = 200; + } else if (clk >= 166) { + *source = source_div4; + *div = 3; + *rclk = 166; + } else if (clk >= 133) { + *source = source_div5; + *div = 3; + *rclk = 133; + } else if (clk >= 100) { + *source = source_div5; + *div = 4; + *rclk = 100; + } else if (clk >= 50) { + *source = source_div5; + *div = 8; + *rclk = 50; + } else { + *source = source_div5; + *div = 20; + *rclk = 10; + } +} +EXPORT_SYMBOL(vdec_get_clk_source); + + +/* + *enum vformat_e { + * VFORMAT_MPEG12 = 0, + * VFORMAT_MPEG4, + * VFORMAT_H264, + * VFORMAT_MJPEG, + * VFORMAT_REAL, + * VFORMAT_JPEG, + * VFORMAT_VC1, + * VFORMAT_AVS, + * VFORMAT_YUV, + * VFORMAT_H264MVC, + * VFORMAT_H264_4K2K, + * VFORMAT_HEVC, + * VFORMAT_H264_ENC, + * VFORMAT_JPEG_ENC, + * VFORMAT_VP9, + * VFORMAT_MAX + *}; + *sample: + *{{1280*720*30, 100}, {1920*1080*30, 166}, {1920*1080*60, 333}, + * {4096*2048*30, 600}, {4096*2048*60, 600}, {INT_MAX, 600},} + *mean: + *width * height * fps + *<720p30fps clk=100MHZ + *>=720p30fps & < 1080p30fps clk=166MHZ + *>=1080p 30fps & < 1080p60fps clk=333MHZ + */ +static struct clk_set_setting clks_for_formats[] = { + { /*[VFORMAT_MPEG12] */ + {{1280 * 720 * 30, 100}, {1920 * 1080 * 30, 166}, + {1920 * 1080 * 60, 333}, + {4096 * 2048 * 30, 600}, {4096 * 2048 * 60, + 600}, {INT_MAX, 600}, + } + }, + { /*[VFORMAT_MPEG4] */ + {{1280 * 720 * 30, 100}, {1920 * 1080 * 30, 166}, + {1920 * 1080 * 60, 333}, + {4096 * 2048 * 30, 600}, {4096 * 2048 * 60, + 600}, {INT_MAX, 600}, + } + }, + { /*[VFORMAT_H264] */ + {{1280 * 720 * 30, 100}, {1920 * 1080 * 21, 166}, + {1920 * 1080 * 30, 333}, + {1920 * 1080 * 60, 600}, {4096 * 2048 * 60, + 600}, {INT_MAX, 600}, + } + }, + { /*[VFORMAT_MJPEG] */ + {{1280 * 720 * 30, 200}, {1920 * 1080 * 30, 200}, + {1920 * 1080 * 60, 333}, + {4096 * 2048 * 30, 600}, {4096 * 2048 * 60, + 600}, {INT_MAX, 600}, + } + }, + { /*[VFORMAT_REAL] */ + {{1280 * 720 * 20, 200}, {1920 * 1080 * 30, 500}, + {1920 * 1080 * 60, 500}, + {4096 * 2048 * 30, 600}, {4096 * 2048 * 60, + 600}, {INT_MAX, 600}, + } + }, + { /*[VFORMAT_JPEG] */ + {{1280 * 720 * 30, 100}, {1920 * 1080 * 30, 166}, + {1920 * 1080 * 60, 333}, + {4096 * 2048 * 30, 600}, {4096 * 2048 * 60, + 600}, {INT_MAX, 600}, + } + }, + { /*[VFORMAT_VC1] */ + {{1280 * 720 * 30, 100}, {1920 * 1080 * 30, 166}, + {1920 * 1080 * 60, 333}, + {4096 * 2048 * 30, 600}, {4096 * 2048 * 60, + 600}, {INT_MAX, 600}, + } + }, + { /*[VFORMAT_AVS] */ + {{1280 * 720 * 30, 100}, {1920 * 1080 * 30, 166}, + {1920 * 1080 * 60, 333}, + {4096 * 2048 * 30, 600}, {4096 * 2048 * 60, + 600}, {INT_MAX, 600}, + } + }, + { /*[VFORMAT_YUV] */ + {{1280 * 720 * 30, 100}, {INT_MAX, 100}, + {0, 0}, {0, 0}, {0, 0}, {0, 0}, + } + }, + { /*VFORMAT_H264MVC */ + {{1280 * 720 * 30, 333}, {1920 * 1080 * 30, 333}, + {4096 * 2048 * 60, 600}, + {INT_MAX, 630}, {0, 0}, {0, 0}, + } + }, + { /*VFORMAT_H264_4K2K */ + {{1280 * 720 * 30, 600}, {4096 * 2048 * 60, 630}, + {INT_MAX, 630}, + {0, 0}, {0, 0}, {0, 0}, + } + }, + { /*VFORMAT_HEVC */ + {{1280 * 720 * 30, 100}, {1920 * 1080 * 60, 600}, + {4096 * 2048 * 25, 630}, + {4096 * 2048 * 30, 630}, {4096 * 2048 * 60, + 630}, {INT_MAX, 630}, + } + }, + { /*VFORMAT_H264_ENC */ + {{1280 * 720 * 30, 0}, {INT_MAX, 0}, + {0, 0}, {0, 0}, {0, 0}, {0, 0}, + } + }, + { /*VFORMAT_JPEG_ENC */ + {{1280 * 720 * 30, 0}, {INT_MAX, 0}, + {0, 0}, {0, 0}, {0, 0}, {0, 0}, + } + }, + { /*VFORMAT_VP9 */ + {{1280 * 720 * 30, 100}, {1920 * 1080 * 30, 100}, + {1920 * 1080 * 60, 166}, + {4096 * 2048 * 30, 333}, {4096 * 2048 * 60, + 630}, {INT_MAX, 630}, + } + }, + {/*VFORMAT_AVS2*/ + {{1280*720*30, 100}, {1920*1080*30, 100}, + {1920*1080*60, 166}, {4096*2048*30, 333}, + {4096*2048*60, 630}, {INT_MAX, 630},} + }, + {/*VFORMAT_AV1*/ + {{1280*720*30, 100}, {1920*1080*30, 100}, + {1920*1080*60, 166}, {4096*2048*30, 333}, + {4096*2048*60, 630}, {INT_MAX, 630},} + }, + +}; + +void set_clock_gate(struct gate_switch_node *nodes, int num) +{ + struct gate_switch_node *node = NULL; + char *hevc_mux_str = NULL; + + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SC2) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D)) + hevc_mux_str = "clk_hevc_mux"; + else + hevc_mux_str = "clk_hevcf_mux"; + + do { + node = &nodes[num - 1]; + if (IS_ERR_OR_NULL(node) || (IS_ERR_OR_NULL(node->clk))) + pr_info("get mux clk err.\n"); + + if (!strcmp(node->name, "clk_vdec_mux")) + gclk.vdec_mux_node = node; + else if (!strcmp(node->name, "clk_hcodec_mux")) + gclk.hcodec_mux_node = node; + else if (!strcmp(node->name, hevc_mux_str)) + gclk.hevc_mux_node = node; + else if (!strcmp(node->name, "clk_hevcb_mux")) + gclk.hevc_back_mux_node = node; + } while(--num); +} +EXPORT_SYMBOL(set_clock_gate); +#ifdef NO_CLKTREE +int vdec_set_clk(int dec, int source, int div) +{ + + if (dec == VDEC_1) + vdec1_set_clk(source, div); + else if (dec == VDEC_2) + vdec2_set_clk(source, div); + else if (dec == VDEC_HEVC) + hevc_set_clk(source, div); + else if (dec == VDEC_HCODEC) + hcodec_set_clk(source, div); + return 0; +} + +#else +static int vdec_set_clk(int dec, int rate) +{ + struct clk *clk = NULL; + + switch (dec) { + case VDEC_1: + clk = gclk.vdec_mux_node->clk; + WRITE_VREG_BITS(DOS_GCLK_EN0, 0x3ff, 0, 10); + break; + + case VDEC_HCODEC: + clk = gclk.hcodec_mux_node->clk; + WRITE_VREG_BITS(DOS_GCLK_EN0, 0x7fff, 12, 15); + break; + + case VDEC_2: + clk = gclk.vdec_mux_node->clk; + WRITE_VREG(DOS_GCLK_EN1, 0x3ff); + break; + + case VDEC_HEVC: + clk = gclk.hevc_mux_node->clk; + WRITE_VREG(DOS_GCLK_EN3, 0xffffffff); + break; + + case VDEC_HEVCB: + clk = gclk.hevc_back_mux_node->clk; + WRITE_VREG(DOS_GCLK_EN3, 0xffffffff); + break; + + case VDEC_MAX: + break; + + default: + pr_info("invaild vdec type.\n"); + } + + if (IS_ERR_OR_NULL(clk)) { + pr_info("the mux clk err.\n"); + return -1; + } + + clk_set_rate(clk, rate); + + return 0; +} + +static int vdec_clock_init(void) +{ + return 0; +} + +#endif +#ifdef NO_CLKTREE +static int vdec_clock_init(void) +{ + gp_pll_user_vdec = gp_pll_user_register("vdec", 0, + gp_pll_user_cb_vdec); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) + is_gp0_div2 = false; + else + is_gp0_div2 = true; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) { + pr_info("used fix clk for vdec clk source!\n"); + //update_vdec_clk_config_settings(1); + } + return (gp_pll_user_vdec) ? 0 : -ENOMEM; +} + + + +static void update_clk_with_clk_configs( + int clk, int *source, int *div, int *rclk) +{ + unsigned int config = 0;//get_vdec_clk_config_settings(); + + if (!config) + return; + if (config >= 10) { + int wantclk; + wantclk = config; + vdec_get_clk_source(wantclk, source, div, rclk); + } + return; +} +#define NO_GP0_PLL 0//(get_vdec_clk_config_settings() == 1) +#define ALWAYS_GP0_PLL 0//(get_vdec_clk_config_settings() == 2) + +#define NO_GP0_PLL 0//(get_vdec_clk_config_settings() == 1) +#define ALWAYS_GP0_PLL 0//(get_vdec_clk_config_settings() == 2) + +static int vdec_clock_set(int clk) +{ + int use_gpll = 0; + int source, div, rclk; + int clk_seted = 0; + int gp_pll_wait = 0; + if (clk == 1) + clk = 200; + else if (clk == 2) { + if (clock_real_clk[VDEC_1] != 648) + clk = 500; + else + clk = 648; + } else if (clk == 0) { + /*used for release gp pull. + if used, release it. + if not used gp pll + do nothing. + */ + if (clock_real_clk[VDEC_1] == 667 || + (clock_real_clk[VDEC_1] == 648) || + clock_real_clk[VDEC_1] <= 0) + clk = 200; + else + clk = clock_real_clk[VDEC_1]; + } + vdec_get_clk_source(clk, &source, &div, &rclk); + update_clk_with_clk_configs(clk, &source, &div, &rclk); + + if (clock_real_clk[VDEC_1] == rclk) + return rclk; + if (NO_GP0_PLL) { + use_gpll = 0; + clk_seted = 0; + } else if ((rclk > 500 && clk != 667) || ALWAYS_GP0_PLL) { + if (clock_real_clk[VDEC_1] == 648) + return 648; + use_gpll = 1; + gp_pll_request(gp_pll_user_vdec); + while (!VDEC1_WITH_GP_PLL() && gp_pll_wait++ < 1000000) + udelay(1); + if (VDEC1_WITH_GP_PLL()) { + clk_seted = 1; + rclk = 648; + } else { + use_gpll = 0; + rclk = 667; + /*gp_pull request failed,used default 500Mhz*/ + pr_info("get gp pll failed used fix pull\n"); + } + } + if (!clk_seted) {/*if 648 not set,*/ + VDEC1_SAFE_CLOCK(); + VDEC1_CLOCK_OFF(); + vdec_set_clk(VDEC_1, source, div); + VDEC1_CLOCK_ON(); + } + + if (!use_gpll) + gp_pll_release(gp_pll_user_vdec); + clock_real_clk[VDEC_1] = rclk; + debug_print("vdec_clock_set 2 to %d\n", rclk); + return rclk; +} +static int hevc_clock_init(void) +{ + gp_pll_user_hevc = gp_pll_user_register("hevc", 0, + gp_pll_user_cb_hevc); + + return (gp_pll_user_hevc) ? 0 : -ENOMEM; +} +static int hevc_back_clock_init(void) +{ + return 0; +} + +static int hevc_back_clock_set(int clk) +{ + return 0; +} + +static int hevc_clock_set(int clk) +{ + int use_gpll = 0; + int source, div, rclk; + int gp_pll_wait = 0; + int clk_seted = 0; + + debug_print("hevc_clock_set 1 to clk %d\n", clk); + if (clk == 1) + clk = 200; + else if (clk == 2) { + if (clock_real_clk[VDEC_HEVC] != 648) + clk = 500; + else + clk = 648; + } else if (clk == 0) { + /*used for release gp pull. + if used, release it. + if not used gp pll + do nothing. + */ + if ((clock_real_clk[VDEC_HEVC] == 667) || + (clock_real_clk[VDEC_HEVC] == 648) || + (clock_real_clk[VDEC_HEVC] <= 0)) + clk = 200; + else + clk = clock_real_clk[VDEC_HEVC]; + } + vdec_get_clk_source(clk, &source, &div, &rclk); + update_clk_with_clk_configs(clk, &source, &div, &rclk); + + if (rclk == clock_real_clk[VDEC_HEVC]) + return rclk;/*clk not changed,*/ + if (NO_GP0_PLL) { + use_gpll = 0; + clk_seted = 0; + } else if ((rclk > 500 && clk != 667) || ALWAYS_GP0_PLL) { + if (clock_real_clk[VDEC_HEVC] == 648) + return 648; + use_gpll = 1; + gp_pll_request(gp_pll_user_hevc); + while (!HEVC_WITH_GP_PLL() && gp_pll_wait++ < 1000000) + udelay(1); + if (HEVC_WITH_GP_PLL()) { + clk_seted = 1; + rclk = 648; + } else { + rclk = 667; + /*gp_pull request failed,used default 500Mhz*/ + pr_info("get gp pll failed used fix pull\n"); + } + } + if (!clk_seted) {/*if 648 not set,*/ +// HEVC_SAFE_CLOCK(); + HEVC_CLOCK_OFF(); + vdec_set_clk(VDEC_HEVC, source, div); + HEVC_CLOCK_ON(); + } + if (!use_gpll) + gp_pll_release(gp_pll_user_hevc); + clock_real_clk[VDEC_HEVC] = rclk; + /*debug_print("hevc_clock_set 2 to rclk=%d, configs=%d\n", + rclk, + get_vdec_clk_config_settings());*/ //DEBUG_TMP + return rclk; +} + +static int hcodec_clock_set(int clk) +{ + int source, div, rclk; + HCODEC_CLOCK_OFF(); + vdec_get_clk_source(200, &source, &div, &rclk); + vdec_set_clk(VDEC_HCODEC, source, div); + HCODEC_CLOCK_ON(); + clock_real_clk[VDEC_HCODEC] = rclk; + return rclk; +} + + +#else +static int vdec_clock_set(int clk) +{ + if (clk == 1) + clk = 200; + else if (clk == 2) { + if (clock_real_clk[VDEC_1] != 648) + clk = 500; + else + clk = 648; + } else if (clk == 0) { + if (clock_real_clk[VDEC_1] == 667 || + (clock_real_clk[VDEC_1] == 648) || + clock_real_clk[VDEC_1] <= 0) + clk = 200; + else + clk = clock_real_clk[VDEC_1]; + } + + if ((clk > 500 && clk != 667)) { + if (clock_real_clk[VDEC_1] == 648) + return 648; + clk = 667; + } + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1 && + get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1 && + get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5 && + get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D) + clk = 800; + + if (is_cpu_s4_s805x2()) + clk = 500; + + if (set_frq_enable && vdec_frq) { + pr_info("Set the vdec frq is %u MHz\n", vdec_frq); + clk = vdec_frq; + } + + vdec_set_clk(VDEC_1, clk * MHz); + + clock_real_clk[VDEC_1] = clk; + + pr_debug("vdec mux clock is %lu Hz\n", + clk_get_rate(gclk.vdec_mux_node->clk)); + + return clk; +} + +static int hevc_clock_init(void) +{ + return 0; +} + +static int hevc_back_clock_init(void) +{ + return 0; +} + +static int hevc_back_clock_set(int clk) +{ + if (clk == 1) + clk = 200; + else if (clk == 2) { + if (clock_real_clk[VDEC_HEVCB] != 648) + clk = 500; + else + clk = 648; + } else if (clk == 0) { + if (clock_real_clk[VDEC_HEVCB] == 667 || + (clock_real_clk[VDEC_HEVCB] == 648) || + clock_real_clk[VDEC_HEVCB] <= 0) + clk = 200; + else + clk = clock_real_clk[VDEC_HEVCB]; + } + + if ((clk > 500 && clk != 667)) { + if (clock_real_clk[VDEC_HEVCB] == 648) + return 648; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + clk = TL1_HEVC_MAX_CLK; + else + clk = 667; + } + + if (set_frq_enable && hevcb_frq) { + pr_info("Set the hevcb frq is %u MHz\n", hevcb_frq); + clk = hevcb_frq; + } + + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_TXLX) && + (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SC2)) { + if ((READ_EFUSE_REG(EFUSE_LIC1) >> 28 & 0x1) && clk > 333) { + pr_info("The hevcb clock limit to 333MHz.\n"); + clk = 333; + } + } + + vdec_set_clk(VDEC_HEVCB, clk * MHz); + + clock_real_clk[VDEC_HEVCB] = clk; + pr_debug("hevc back mux clock is %lu Hz\n", + clk_get_rate(gclk.hevc_back_mux_node->clk)); + + return clk; +} + +static int hevc_clock_set(int clk) +{ + if (clk == 1) + clk = 200; + else if (clk == 2) { + if (clock_real_clk[VDEC_HEVC] != 648) + clk = 500; + else + clk = 648; + } else if (clk == 0) { + if (clock_real_clk[VDEC_HEVC] == 667 || + (clock_real_clk[VDEC_HEVC] == 648) || + clock_real_clk[VDEC_HEVC] <= 0) + clk = 200; + else + clk = clock_real_clk[VDEC_HEVC]; + } + + if ((clk > 500 && clk != 667)) { + if (clock_real_clk[VDEC_HEVC] == 648) + return 648; + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D)) + clk = TL1_HEVC_MAX_CLK; + else + clk = 667; + + if (is_cpu_s4_s805x2()) + clk = 500; + } + + if (set_frq_enable && hevc_frq) { + pr_info("Set the hevc frq is %u MHz\n", hevc_frq); + clk = hevc_frq; + } + + vdec_set_clk(VDEC_HEVC, clk * MHz); + + clock_real_clk[VDEC_HEVC] = clk; + + pr_debug("hevc mux clock is %lu Hz\n", + clk_get_rate(gclk.hevc_mux_node->clk)); + + return clk; +} + +static int hcodec_clock_set(int clk) +{ + if (clk == 1) + clk = 200; + else if (clk == 2) { + if (clock_real_clk[VDEC_HCODEC] != 648) + clk = 500; + else + clk = 648; + } else if (clk == 0) { + if (clock_real_clk[VDEC_HCODEC] == 667 || + (clock_real_clk[VDEC_HCODEC] == 648) || + clock_real_clk[VDEC_HCODEC] <= 0) + clk = 200; + else + clk = clock_real_clk[VDEC_HCODEC]; + } + + if ((clk > 500 && clk != 667)) { + if (clock_real_clk[VDEC_HCODEC] == 648) + return 648; + clk = 667; + } + + vdec_set_clk(VDEC_HCODEC, clk * MHz); + + clock_real_clk[VDEC_HCODEC] = clk; + + pr_debug("hcodec mux clock is %lu Hz\n", + clk_get_rate(gclk.hcodec_mux_node->clk)); + + return clk; +} +#endif + +static void vdec_clock_on(void) +{ + mutex_lock(&gclk.vdec_mux_node->mutex); + if (!gclk.vdec_mux_node->ref_count) + clk_prepare_enable(gclk.vdec_mux_node->clk); + + gclk.vdec_mux_node->ref_count++; + mutex_unlock(&gclk.vdec_mux_node->mutex); + + pr_debug("the %-15s clock on, ref cnt: %d\n", + gclk.vdec_mux_node->name, + gclk.vdec_mux_node->ref_count); +} + +static void vdec_clock_off(void) +{ + mutex_lock(&gclk.vdec_mux_node->mutex); + gclk.vdec_mux_node->ref_count--; + if (!gclk.vdec_mux_node->ref_count) + clk_disable_unprepare(gclk.vdec_mux_node->clk); + + clock_real_clk[VDEC_1] = 0; + mutex_unlock(&gclk.vdec_mux_node->mutex); + + pr_debug("the %-15s clock off, ref cnt: %d\n", + gclk.vdec_mux_node->name, + gclk.vdec_mux_node->ref_count); +} + +static void hcodec_clock_on(void) +{ + mutex_lock(&gclk.hcodec_mux_node->mutex); + if (!gclk.hcodec_mux_node->ref_count) + clk_prepare_enable(gclk.hcodec_mux_node->clk); + + gclk.hcodec_mux_node->ref_count++; + mutex_unlock(&gclk.hcodec_mux_node->mutex); + + pr_debug("the %-15s clock on, ref cnt: %d\n", + gclk.hcodec_mux_node->name, + gclk.hcodec_mux_node->ref_count); +} + +static void hcodec_clock_off(void) +{ + mutex_lock(&gclk.hcodec_mux_node->mutex); + gclk.hcodec_mux_node->ref_count--; + if (!gclk.hcodec_mux_node->ref_count) + clk_disable_unprepare(gclk.hcodec_mux_node->clk); + + mutex_unlock(&gclk.hcodec_mux_node->mutex); + + pr_debug("the %-15s clock off, ref cnt: %d\n", + gclk.hcodec_mux_node->name, + gclk.hcodec_mux_node->ref_count); +} + +static void hevc_clock_on(void) +{ + mutex_lock(&gclk.hevc_mux_node->mutex); + if (!gclk.hevc_mux_node->ref_count) + clk_prepare_enable(gclk.hevc_mux_node->clk); + + gclk.hevc_mux_node->ref_count++; + WRITE_VREG(DOS_GCLK_EN3, 0xffffffff); + mutex_unlock(&gclk.hevc_mux_node->mutex); + + pr_debug("the %-15s clock on, ref cnt: %d\n", + gclk.hevc_mux_node->name, + gclk.hevc_mux_node->ref_count); +} + +static void hevc_clock_off(void) +{ + mutex_lock(&gclk.hevc_mux_node->mutex); + gclk.hevc_mux_node->ref_count--; + if (!gclk.hevc_mux_node->ref_count) + clk_disable_unprepare(gclk.hevc_mux_node->clk); + + clock_real_clk[VDEC_HEVC] = 0; + mutex_unlock(&gclk.hevc_mux_node->mutex); + + pr_debug("the %-15s clock off, ref cnt: %d\n", + gclk.hevc_mux_node->name, + gclk.hevc_mux_node->ref_count); +} + +static void hevc_back_clock_on(void) +{ + mutex_lock(&gclk.hevc_back_mux_node->mutex); + if (!gclk.hevc_back_mux_node->ref_count) + clk_prepare_enable(gclk.hevc_back_mux_node->clk); + + gclk.hevc_back_mux_node->ref_count++; + WRITE_VREG(DOS_GCLK_EN3, 0xffffffff); + mutex_unlock(&gclk.hevc_back_mux_node->mutex); + + pr_debug("the %-15s clock on, ref cnt: %d\n", + gclk.hevc_back_mux_node->name, + gclk.hevc_back_mux_node->ref_count); +} + +static void hevc_back_clock_off(void) +{ + mutex_lock(&gclk.hevc_back_mux_node->mutex); + gclk.hevc_back_mux_node->ref_count--; + if (!gclk.hevc_back_mux_node->ref_count) + clk_disable_unprepare(gclk.hevc_back_mux_node->clk); + + clock_real_clk[VDEC_HEVC] = 0; + mutex_unlock(&gclk.hevc_back_mux_node->mutex); + + pr_debug("the %-15s clock off, ref cnt: %d\n", + gclk.hevc_back_mux_node->name, + gclk.hevc_back_mux_node->ref_count); +} + +static int vdec_clock_get(enum vdec_type_e core) +{ + if (core >= VDEC_MAX) + return 0; + + return clock_real_clk[core]; +} + +#define INCLUDE_FROM_ARCH_CLK_MGR + +/*#define VDEC_HAS_VDEC2*/ +#define VDEC_HAS_HEVC +#define VDEC_HAS_VDEC_HCODEC +#define VDEC_HAS_CLK_SETTINGS +#define CLK_FOR_CPU {\ + AM_MESON_CPU_MAJOR_ID_GXBB,\ + AM_MESON_CPU_MAJOR_ID_GXTVBB,\ + AM_MESON_CPU_MAJOR_ID_GXL,\ + AM_MESON_CPU_MAJOR_ID_GXM,\ + AM_MESON_CPU_MAJOR_ID_TXL,\ + AM_MESON_CPU_MAJOR_ID_TXLX,\ + AM_MESON_CPU_MAJOR_ID_GXLX,\ + AM_MESON_CPU_MAJOR_ID_G12A,\ + AM_MESON_CPU_MAJOR_ID_G12B,\ + AM_MESON_CPU_MAJOR_ID_SM1,\ + AM_MESON_CPU_MAJOR_ID_TL1,\ + AM_MESON_CPU_MAJOR_ID_TM2,\ + AM_MESON_CPU_MAJOR_ID_SC2,\ + AM_MESON_CPU_MAJOR_ID_T5,\ + AM_MESON_CPU_MAJOR_ID_T5D,\ + AM_MESON_CPU_MAJOR_ID_T7,\ + AM_MESON_CPU_MAJOR_ID_S4,\ + AM_MESON_CPU_MAJOR_ID_T3,\ + AM_MESON_CPU_MAJOR_ID_P1,\ + AM_MESON_CPU_MAJOR_ID_S4D,\ + AM_MESON_CPU_MAJOR_ID_T5W,\ + 0} +#include "clk.h" + +module_param(set_frq_enable, uint, 0664); +MODULE_PARM_DESC(set_frq_enable, "\n set frequency enable\n"); + +module_param(vdec_frq, uint, 0664); +MODULE_PARM_DESC(vdec_frq, "\n set vdec frequency\n"); + +module_param(hevc_frq, uint, 0664); +MODULE_PARM_DESC(hevc_frq, "\n set hevc frequency\n"); + +module_param(hevcb_frq, uint, 0664); +MODULE_PARM_DESC(hevcb_frq, "\n set hevcb frequency\n"); + +ARCH_VDEC_CLK_INIT(); +ARCH_VDEC_CLK_EXIT(); + +MODULE_LICENSE("GPL");
diff --git a/drivers/common/media_clock/switch/amports_gate.c b/drivers/common/media_clock/switch/amports_gate.c new file mode 100644 index 0000000..58a0289 --- /dev/null +++ b/drivers/common/media_clock/switch/amports_gate.c
@@ -0,0 +1,204 @@ +/* + * drivers/amlogic/media/common/arch/switch/amports_gate.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/compiler.h> +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/clk.h> +#include "amports_gate.h" +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "../../../frame_provider/decoder/utils/vdec.h" +#include "../clk/clk.h" + + +#define DEBUG_REF 1 +#define GATE_RESET_OK + +#ifdef GATE_RESET_OK + +struct gate_switch_node gates[] = { + { + .name = "demux", + }, + { + .name = "parser_top", + }, + { + .name = "vdec", + }, + { + .name = "clk_81", + }, + { + .name = "clk_vdec_mux", + }, + { + .name = "clk_hcodec_mux", + }, + { + .name = "clk_hevc_mux", + }, + { + .name = "clk_hevcb_mux", + }, + { + .name = "ahbarb0", + }, + { + .name = "asyncfifo", + }, + { + .name = "clk_hevcf_mux", + }, +}; + +/* + *mesonstream { + * compatible = "amlogic, codec, streambuf"; + * dev_name = "mesonstream"; + * status = "okay"; + * clocks = <&clkc CLKID_DOS_PARSER + * &clkc CLKID_DEMUX + * &clkc CLKID_DOS + * &clkc CLKID_VDEC_MUX + * &clkc CLKID_HCODEC_MUX + * &clkc CLKID_HEVCF_MUX + * &clkc CLKID_HEVC_MUX>; + * clock-names = "parser_top", + * "demux", + * "vdec", + * "clk_vdec_mux", + * "clk_hcodec_mux", + * "clk_hevc_mux", + * "clk_hevcb_mux"; + *}; + */ + +int amports_clock_gate_init(struct device *dev) +{ + int i; + + for (i = 0; i < sizeof(gates) / sizeof(struct gate_switch_node); i++) { + gates[i].clk = devm_clk_get(dev, gates[i].name); + if (IS_ERR_OR_NULL(gates[i].clk)) { + gates[i].clk = NULL; + pr_info("get gate %s control failed %px\n", + gates[i].name, + gates[i].clk); + } else { + pr_info("get gate %s control ok %px\n", + gates[i].name, + gates[i].clk); + } + gates[i].ref_count = 0; + mutex_init(&gates[i].mutex); + } + + set_clock_gate(gates, ARRAY_SIZE(gates)); + + return 0; +} +EXPORT_SYMBOL(amports_clock_gate_init); + +static int amports_gate_clk(struct gate_switch_node *gate_node, int enable) +{ + mutex_lock(&gate_node->mutex); + if (enable) { + if (gate_node->ref_count == 0) + clk_prepare_enable(gate_node->clk); + + gate_node->ref_count++; + + if (DEBUG_REF) + pr_debug("the %-15s clock on, ref cnt: %d\n", + gate_node->name, gate_node->ref_count); + } else { + gate_node->ref_count--; + if (gate_node->ref_count == 0) + clk_disable_unprepare(gate_node->clk); + + if (DEBUG_REF) + pr_debug("the %-15s clock off, ref cnt: %d\n", + gate_node->name, gate_node->ref_count); + } + mutex_unlock(&gate_node->mutex); + + return 0; +} + +int amports_switch_gate(const char *name, int enable) +{ + int i; + + for (i = 0; i < sizeof(gates) / sizeof(struct gate_switch_node); i++) { + if (!strcmp(name, gates[i].name)) { + + /*pr_info("openclose:%d gate %s control\n", enable, + * gates[i].name); + */ + + if (gates[i].clk) + amports_gate_clk(&gates[i], enable); + } + } + return 0; +} +EXPORT_SYMBOL(amports_switch_gate); + +#else +/* + *can used for debug. + *on chip bringup. + */ +int amports_clock_gate_init(struct device *dev) +{ + static int gate_inited; + + if (gate_inited) + return 0; +/* + *#define HHI_GCLK_MPEG0 0x1050 + *#define HHI_GCLK_MPEG1 0x1051 + *#define HHI_GCLK_MPEG2 0x1052 + *#define HHI_GCLK_OTHER 0x1054 + *#define HHI_GCLK_AO 0x1055 + */ + WRITE_HHI_REG_BITS(HHI_GCLK_MPEG0, 1, 1, 1);/*dos*/ + WRITE_HHI_REG_BITS(HHI_GCLK_MPEG1, 1, 25, 1);/*U_parser_top()*/ + WRITE_HHI_REG_BITS(HHI_GCLK_MPEG1, 0xff, 6, 8);/*aiu()*/ + WRITE_HHI_REG_BITS(HHI_GCLK_MPEG1, 1, 4, 1);/*demux()*/ + WRITE_HHI_REG_BITS(HHI_GCLK_MPEG1, 1, 2, 1);/*audio in()*/ + WRITE_HHI_REG_BITS(HHI_GCLK_MPEG2, 1, 25, 1);/*VPU Interrupt*/ + gate_inited++; + + + + return 0; +} +EXPORT_SYMBOL(amports_clock_gate_init); + + +int amports_switch_gate(const char *name, int enable) +{ + return 0; +} +EXPORT_SYMBOL(amports_switch_gate); + +#endif
diff --git a/drivers/common/media_clock/switch/amports_gate.h b/drivers/common/media_clock/switch/amports_gate.h new file mode 100644 index 0000000..58abc92 --- /dev/null +++ b/drivers/common/media_clock/switch/amports_gate.h
@@ -0,0 +1,32 @@ +/* + * drivers/amlogic/media/common/arch/switch/amports_gate.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef AMPORT_GATE_H +#define AMPORT_GATE_H +#include <linux/device.h> + +struct gate_switch_node { + struct clk *clk; + const char *name; + struct mutex mutex; + int ref_count; +}; + +extern int amports_clock_gate_init(struct device *dev); +extern int amports_switch_gate(const char *name, int enable); + +#endif
diff --git a/drivers/frame_provider/Makefile b/drivers/frame_provider/Makefile new file mode 100644 index 0000000..f30c4f1 --- /dev/null +++ b/drivers/frame_provider/Makefile
@@ -0,0 +1,2 @@ +obj-y += decoder/ +obj-y += decoder_v4l/
diff --git a/drivers/frame_provider/decoder/Makefile b/drivers/frame_provider/decoder/Makefile new file mode 100644 index 0000000..bb0079e --- /dev/null +++ b/drivers/frame_provider/decoder/Makefile
@@ -0,0 +1,13 @@ +obj-y += utils/ +obj-y += mpeg12/ +obj-y += mpeg4/ +obj-y += vc1/ +obj-y += h264/ +obj-y += h264_multi/ +obj-y += h265/ +obj-y += vp9/ +obj-y += mjpeg/ +obj-y += avs/ +obj-y += avs2/ +obj-y += avs_multi/ +obj-y += vav1/
diff --git a/drivers/frame_provider/decoder/avs/Makefile b/drivers/frame_provider/decoder/avs/Makefile new file mode 100644 index 0000000..1d56236 --- /dev/null +++ b/drivers/frame_provider/decoder/avs/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_AVS) += amvdec_avs.o +amvdec_avs-objs += avs.o avsp_trans.o
diff --git a/drivers/frame_provider/decoder/avs/avs.c b/drivers/frame_provider/decoder/avs/avs.c new file mode 100644 index 0000000..5869eac --- /dev/null +++ b/drivers/frame_provider/decoder/avs/avs.c
@@ -0,0 +1,1988 @@ +/* + * drivers/amlogic/amports/vavs.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../../stream_input/amports/streambuf_reg.h" +#include "../utils/amvdec.h" +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/dma-mapping.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/slab.h> +#include "avs.h" +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include "../utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> + +#define DRIVER_NAME "amvdec_avs" +#define MODULE_NAME "amvdec_avs" + + +#if 1/* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +#define NV21 +#endif + +#define USE_AVS_SEQ_INFO +#define HANDLE_AVS_IRQ +#define DEBUG_PTS + +#define I_PICTURE 0 +#define P_PICTURE 1 +#define B_PICTURE 2 + +/* #define ORI_BUFFER_START_ADDR 0x81000000 */ +#define ORI_BUFFER_START_ADDR 0x80000000 + +#define INTERLACE_FLAG 0x80 +#define TOP_FIELD_FIRST_FLAG 0x40 + +/* protocol registers */ +#define AVS_PIC_RATIO AV_SCRATCH_0 +#define AVS_PIC_WIDTH AV_SCRATCH_1 +#define AVS_PIC_HEIGHT AV_SCRATCH_2 +#define AVS_FRAME_RATE AV_SCRATCH_3 + +#define AVS_ERROR_COUNT AV_SCRATCH_6 +#define AVS_SOS_COUNT AV_SCRATCH_7 +#define AVS_BUFFERIN AV_SCRATCH_8 +#define AVS_BUFFEROUT AV_SCRATCH_9 +#define AVS_REPEAT_COUNT AV_SCRATCH_A +#define AVS_TIME_STAMP AV_SCRATCH_B +#define AVS_OFFSET_REG AV_SCRATCH_C +#define MEM_OFFSET_REG AV_SCRATCH_F +#define AVS_ERROR_RECOVERY_MODE AV_SCRATCH_G + +#define VF_POOL_SIZE 32 +#define PUT_INTERVAL (HZ/100) + +#if 1 /*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8*/ +#define INT_AMVENCODER INT_DOS_MAILBOX_1 +#else +/* #define AMVENC_DEV_VERSION "AML-MT" */ +#define INT_AMVENCODER INT_MAILBOX_1A +#endif + + +#define DEC_CONTROL_FLAG_FORCE_2500_1080P_INTERLACE 0x0001 +static u32 dec_control = DEC_CONTROL_FLAG_FORCE_2500_1080P_INTERLACE; + + +#define VPP_VD1_POSTBLEND (1 << 10) + +static int debug_flag; + +/******************************** +firmware_sel + 0: use avsp_trans long cabac ucode; + 1: not use avsp_trans long cabac ucode +********************************/ +static int firmware_sel; +static int disable_longcabac_trans = 1; + +static int support_user_data = 1; + +int avs_get_debug_flag(void) +{ + return debug_flag; +} + +static struct vframe_s *vavs_vf_peek(void *); +static struct vframe_s *vavs_vf_get(void *); +static void vavs_vf_put(struct vframe_s *, void *); +static int vavs_event_cb(int type, void *data, void *private_data); +static int vavs_vf_states(struct vframe_states *states, void *); + +static const char vavs_dec_id[] = "vavs-dev"; + +#define PROVIDER_NAME "decoder.avs" +static DEFINE_SPINLOCK(lock); +static DEFINE_MUTEX(vavs_mutex); + +static const struct vframe_operations_s vavs_vf_provider = { + .peek = vavs_vf_peek, + .get = vavs_vf_get, + .put = vavs_vf_put, + .event_cb = vavs_event_cb, + .vf_states = vavs_vf_states, +}; +static void *mm_blk_handle; +static struct vframe_provider_s vavs_vf_prov; + +#define VF_BUF_NUM_MAX 16 +#define WORKSPACE_SIZE (4 * SZ_1M) + +#ifdef AVSP_LONG_CABAC +#define MAX_BMMU_BUFFER_NUM (VF_BUF_NUM_MAX + 2) +#define WORKSPACE_SIZE_A (MAX_CODED_FRAME_SIZE + LOCAL_HEAP_SIZE) +#else +#define MAX_BMMU_BUFFER_NUM (VF_BUF_NUM_MAX + 1) +#endif + +#define RV_AI_BUFF_START_ADDR 0x01a00000 +#define LONG_CABAC_RV_AI_BUFF_START_ADDR 0x00000000 + +static u32 vf_buf_num = 8; +static u32 vf_buf_num_used; +static u32 canvas_base = 128; +#ifdef NV21 + int canvas_num = 2; /*NV21*/ +#else + int canvas_num = 3; +#endif + +static struct vframe_s vfpool[VF_POOL_SIZE]; +/*static struct vframe_s vfpool2[VF_POOL_SIZE];*/ +static struct vframe_s *cur_vfpool; +static unsigned char recover_flag; +static s32 vfbuf_use[VF_BUF_NUM_MAX]; +static u32 saved_resolution; +static u32 frame_width, frame_height, frame_dur, frame_prog; +static struct timer_list recycle_timer; +static u32 stat; +static u32 buf_size = 32 * 1024 * 1024; +static u32 buf_offset; +static u32 avi_flag; +static u32 vavs_ratio; +static u32 pic_type; +static u32 pts_by_offset = 1; +static u32 total_frame; +static u32 next_pts; +static unsigned char throw_pb_flag; +#ifdef DEBUG_PTS +static u32 pts_hit, pts_missed, pts_i_hit, pts_i_missed; +#endif + +static u32 radr, rval; +static struct dec_sysinfo vavs_amstream_dec_info; +static struct vdec_info *gvs; +static u32 fr_hint_status; +static struct work_struct notify_work; +static struct work_struct set_clk_work; +static bool is_reset; + +static struct vdec_s *vdec = NULL; + +#ifdef AVSP_LONG_CABAC +static struct work_struct long_cabac_wd_work; +void *es_write_addr_virt; +dma_addr_t es_write_addr_phy; + +void *bitstream_read_tmp; +dma_addr_t bitstream_read_tmp_phy; +void *avsp_heap_adr; +static uint long_cabac_busy; +#endif + + +static void *user_data_buffer; +static dma_addr_t user_data_buffer_phys; + +static DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(recycle_q, struct vframe_s *, VF_POOL_SIZE); + +static inline u32 index2canvas(u32 index) +{ + const u32 canvas_tab[VF_BUF_NUM_MAX] = { + 0x010100, 0x030302, 0x050504, 0x070706, + 0x090908, 0x0b0b0a, 0x0d0d0c, 0x0f0f0e, + 0x111110, 0x131312, 0x151514, 0x171716, + 0x191918, 0x1b1b1a, 0x1d1d1c, 0x1f1f1e, + }; + const u32 canvas_tab_3[4] = { + 0x010100, 0x040403, 0x070706, 0x0a0a09 + }; + + if (canvas_num == 2) + return canvas_tab[index] + (canvas_base << 16) + + (canvas_base << 8) + canvas_base; + + return canvas_tab_3[index] + (canvas_base << 16) + + (canvas_base << 8) + canvas_base; +} + +static const u32 frame_rate_tab[16] = { + 96000 / 30, /* forbidden */ + 96000000 / 23976, /* 24000/1001 (23.967) */ + 96000 / 24, + 96000 / 25, + 9600000 / 2997, /* 30000/1001 (29.97) */ + 96000 / 30, + 96000 / 50, + 9600000 / 5994, /* 60000/1001 (59.94) */ + 96000 / 60, + /* > 8 reserved, use 24 */ + 96000 / 24, 96000 / 24, 96000 / 24, 96000 / 24, + 96000 / 24, 96000 / 24, 96000 / 24 +}; + +static void set_frame_info(struct vframe_s *vf, unsigned int *duration) +{ + int ar = 0; + + unsigned int pixel_ratio = READ_VREG(AVS_PIC_RATIO); +#ifndef USE_AVS_SEQ_INFO + if (vavs_amstream_dec_info.width > 0 + && vavs_amstream_dec_info.height > 0) { + vf->width = vavs_amstream_dec_info.width; + vf->height = vavs_amstream_dec_info.height; + } else +#endif + { + vf->width = READ_VREG(AVS_PIC_WIDTH); + vf->height = READ_VREG(AVS_PIC_HEIGHT); + frame_width = vf->width; + frame_height = vf->height; + /* pr_info("%s: (%d,%d)\n", __func__,vf->width, vf->height);*/ + } + +#ifndef USE_AVS_SEQ_INFO + if (vavs_amstream_dec_info.rate > 0) + *duration = vavs_amstream_dec_info.rate; + else +#endif + { + *duration = frame_rate_tab[READ_VREG(AVS_FRAME_RATE) & 0xf]; + /* pr_info("%s: duration = %d\n", __func__, *duration); */ + frame_dur = *duration; + schedule_work(¬ify_work); + } + + if (vavs_ratio == 0) { + /* always stretch to 16:9 */ + vf->ratio_control |= (0x90 << + DISP_RATIO_ASPECT_RATIO_BIT); + } else { + switch (pixel_ratio) { + case 1: + ar = (vf->height * vavs_ratio) / vf->width; + break; + case 2: + ar = (vf->height * 3 * vavs_ratio) / (vf->width * 4); + break; + case 3: + ar = (vf->height * 9 * vavs_ratio) / (vf->width * 16); + break; + case 4: + ar = (vf->height * 100 * vavs_ratio) / (vf->width * + 221); + break; + default: + ar = (vf->height * vavs_ratio) / vf->width; + break; + } + } + + ar = min(ar, DISP_RATIO_ASPECT_RATIO_MAX); + + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + /*vf->ratio_control |= DISP_RATIO_FORCECONFIG | DISP_RATIO_KEEPRATIO; */ + + vf->flag = 0; +} + + +static struct work_struct userdata_push_work; +/* +#define DUMP_LAST_REPORTED_USER_DATA +*/ +static void userdata_push_do_work(struct work_struct *work) +{ + unsigned int user_data_flags; + unsigned int user_data_wp; + unsigned int user_data_length; + struct userdata_poc_info_t user_data_poc; +#ifdef DUMP_LAST_REPORTED_USER_DATA + int user_data_len; + int wp_start; + unsigned char *pdata; + int nLeft; +#endif + + user_data_flags = READ_VREG(AV_SCRATCH_N); + user_data_wp = (user_data_flags >> 16) & 0xffff; + user_data_length = user_data_flags & 0x7fff; + +#ifdef DUMP_LAST_REPORTED_USER_DATA + dma_sync_single_for_cpu(amports_get_dma_device(), + user_data_buffer_phys, USER_DATA_SIZE, + DMA_FROM_DEVICE); + + if (user_data_length & 0x07) + user_data_len = (user_data_length + 8) & 0xFFFFFFF8; + else + user_data_len = user_data_length; + + if (user_data_wp >= user_data_len) { + wp_start = user_data_wp - user_data_len; + + pdata = (unsigned char *)user_data_buffer; + pdata += wp_start; + nLeft = user_data_len; + while (nLeft >= 8) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + } else { + wp_start = user_data_wp + + USER_DATA_SIZE - user_data_len; + + pdata = (unsigned char *)user_data_buffer; + pdata += wp_start; + nLeft = USER_DATA_SIZE - wp_start; + + while (nLeft >= 8) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + + pdata = (unsigned char *)user_data_buffer; + nLeft = user_data_wp; + while (nLeft >= 8) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + } +#endif + +/* + pr_info("pocinfo 0x%x, poc %d, wp 0x%x, len %d\n", + READ_VREG(AV_SCRATCH_L), READ_VREG(AV_SCRATCH_M), + user_data_wp, user_data_length); +*/ + user_data_poc.poc_info = READ_VREG(AV_SCRATCH_L); + user_data_poc.poc_number = READ_VREG(AV_SCRATCH_M); + + WRITE_VREG(AV_SCRATCH_N, 0); +/* + wakeup_userdata_poll(user_data_poc, user_data_wp, + (unsigned long)user_data_buffer, + USER_DATA_SIZE, user_data_length); +*/ +} + +static void UserDataHandler(void) +{ + unsigned int user_data_flags; + + user_data_flags = READ_VREG(AV_SCRATCH_N); + if (user_data_flags & (1 << 15)) { /* data ready */ + schedule_work(&userdata_push_work); + } +} + + +#ifdef HANDLE_AVS_IRQ +static irqreturn_t vavs_isr(int irq, void *dev_id) +#else +static void vavs_isr(void) +#endif +{ + u32 reg; + struct vframe_s *vf; + u32 dur; + u32 repeat_count; + u32 picture_type; + u32 buffer_index; + u32 frame_size; + bool force_interlaced_frame = false; + unsigned int pts, pts_valid = 0, offset = 0; + u64 pts_us64; + if (debug_flag & AVS_DEBUG_UCODE) { + if (READ_VREG(AV_SCRATCH_E) != 0) { + pr_info("dbg%x: %x\n", READ_VREG(AV_SCRATCH_E), + READ_VREG(AV_SCRATCH_D)); + WRITE_VREG(AV_SCRATCH_E, 0); + } + } +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0 && READ_VREG(LONG_CABAC_REQ)) { +#ifdef PERFORMANCE_DEBUG + pr_info("%s:schedule long_cabac_wd_work\r\n", __func__); +#endif + pr_info("schedule long_cabac_wd_work and requested from %d\n", + (READ_VREG(LONG_CABAC_REQ) >> 8)&0xFF); + schedule_work(&long_cabac_wd_work); + } +#endif + + + UserDataHandler(); + + reg = READ_VREG(AVS_BUFFEROUT); + + if (reg) { + if (debug_flag & AVS_DEBUG_PRINT) + pr_info("AVS_BUFFEROUT=%x\n", reg); + if (pts_by_offset) { + offset = READ_VREG(AVS_OFFSET_REG); + if (debug_flag & AVS_DEBUG_PRINT) + pr_info("AVS OFFSET=%x\n", offset); + if (pts_lookup_offset_us64(PTS_TYPE_VIDEO, offset, &pts, + &frame_size, + 0, &pts_us64) == 0) { + pts_valid = 1; +#ifdef DEBUG_PTS + pts_hit++; +#endif + } else { +#ifdef DEBUG_PTS + pts_missed++; +#endif + } + } + + repeat_count = READ_VREG(AVS_REPEAT_COUNT); + if (firmware_sel == 0) + buffer_index = + ((reg & 0x7) + + (((reg >> 8) & 0x3) << 3) - 1) & 0x1f; + else + buffer_index = + ((reg & 0x7) - 1) & 7; + + picture_type = (reg >> 3) & 7; +#ifdef DEBUG_PTS + if (picture_type == I_PICTURE) { + /* pr_info("I offset 0x%x, pts_valid %d\n", + * offset, pts_valid); + */ + if (!pts_valid) + pts_i_missed++; + else + pts_i_hit++; + } +#endif + + if ((dec_control & DEC_CONTROL_FLAG_FORCE_2500_1080P_INTERLACE) + && frame_width == 1920 && frame_height == 1080) { + force_interlaced_frame = true; + } + + if (throw_pb_flag && picture_type != I_PICTURE) { + + if (debug_flag & AVS_DEBUG_PRINT) { + pr_info("picture type %d throwed\n", + picture_type); + } + WRITE_VREG(AVS_BUFFERIN, ~(1 << buffer_index)); + } else if (reg & INTERLACE_FLAG || force_interlaced_frame) { /* interlace */ + throw_pb_flag = 0; + + if (debug_flag & AVS_DEBUG_PRINT) { + pr_info("interlace, picture type %d\n", + picture_type); + } + + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + set_frame_info(vf, &dur); + vf->bufWidth = 1920; + pic_type = 2; + if ((picture_type == I_PICTURE) && pts_valid) { + vf->pts = pts; + vf->pts_us64 = pts_us64; + if ((repeat_count > 1) && avi_flag) { + /* next_pts = pts + + * (vavs_amstream_dec_info.rate * + * repeat_count >> 1)*15/16; + */ + next_pts = + pts + + (dur * repeat_count >> 1) * + 15 / 16; + } else + next_pts = 0; + } else { + vf->pts = next_pts; + if (vf->pts == 0) { + vf->pts_us64 = 0; + } + if ((repeat_count > 1) && avi_flag) { + /* vf->duration = + * vavs_amstream_dec_info.rate * + * repeat_count >> 1; + */ + vf->duration = dur * repeat_count >> 1; + if (next_pts != 0) { + next_pts += + ((vf->duration) - + ((vf->duration) >> 4)); + } + } else { + /* vf->duration = + * vavs_amstream_dec_info.rate >> 1; + */ + vf->duration = dur >> 1; + next_pts = 0; + } + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->duration_pulldown = 0; + if (force_interlaced_frame) { + vf->type = VIDTYPE_INTERLACE_TOP; + }else{ + vf->type = + (reg & TOP_FIELD_FIRST_FLAG) + ? VIDTYPE_INTERLACE_TOP + : VIDTYPE_INTERLACE_BOTTOM; + } +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->type_original = vf->type; + + if (debug_flag & AVS_DEBUG_PRINT) { + pr_info("buffer_index %d, canvas addr %x\n", + buffer_index, vf->canvas0Addr); + } + + vf->pts = (pts_valid)?pts:0; + /* + *vf->pts_us64 = (pts_valid) ? pts_us64 : 0; + */ + vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + buffer_index); + decoder_do_frame_check(NULL, vf); + kfifo_put(&display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + set_frame_info(vf, &dur); + vf->bufWidth = 1920; + if (force_interlaced_frame) + vf->pts = 0; + else + vf->pts = next_pts; + if (vf->pts == 0) { + vf->pts_us64 = 0; + } + if ((repeat_count > 1) && avi_flag) { + /* vf->duration = vavs_amstream_dec_info.rate * + * repeat_count >> 1; + */ + vf->duration = dur * repeat_count >> 1; + if (next_pts != 0) { + next_pts += + ((vf->duration) - + ((vf->duration) >> 4)); + } + } else { + /* vf->duration = vavs_amstream_dec_info.rate + * >> 1; + */ + vf->duration = dur >> 1; + next_pts = 0; + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->duration_pulldown = 0; + if (force_interlaced_frame) { + vf->type = VIDTYPE_INTERLACE_BOTTOM; + } else { + vf->type = + (reg & TOP_FIELD_FIRST_FLAG) ? + VIDTYPE_INTERLACE_BOTTOM : + VIDTYPE_INTERLACE_TOP; + } +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->type_original = vf->type; + vf->pts_us64 = 0; + vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + buffer_index); + + kfifo_put(&display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + total_frame++; + } else { /* progressive */ + throw_pb_flag = 0; + + if (debug_flag & AVS_DEBUG_PRINT) { + pr_info("progressive picture type %d\n", + picture_type); + } + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + set_frame_info(vf, &dur); + vf->bufWidth = 1920; + pic_type = 1; + + if ((picture_type == I_PICTURE) && pts_valid) { + vf->pts = pts; + if ((repeat_count > 1) && avi_flag) { + /* next_pts = pts + + * (vavs_amstream_dec_info.rate * + * repeat_count)*15/16; + */ + next_pts = + pts + + (dur * repeat_count) * 15 / 16; + } else + next_pts = 0; + } else { + vf->pts = next_pts; + if (vf->pts == 0) { + vf->pts_us64 = 0; + } + if ((repeat_count > 1) && avi_flag) { + /* vf->duration = + * vavs_amstream_dec_info.rate * + * repeat_count; + */ + vf->duration = dur * repeat_count; + if (next_pts != 0) { + next_pts += + ((vf->duration) - + ((vf->duration) >> 4)); + } + } else { + /* vf->duration = + * vavs_amstream_dec_info.rate; + */ + vf->duration = dur; + next_pts = 0; + } + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->duration_pulldown = 0; + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->type_original = vf->type; + vf->pts = (pts_valid)?pts:0; + /* + *vf->pts_us64 = (pts_valid) ? pts_us64 : 0; + */ + if (debug_flag & AVS_DEBUG_PRINT) { + pr_info("buffer_index %d, canvas addr %x\n", + buffer_index, vf->canvas0Addr + ); + pr_info("PicType = %d, PTS = 0x%x\n", + picture_type, vf->pts); + } + + vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + buffer_index); + decoder_do_frame_check(NULL, vf); + kfifo_put(&display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + total_frame++; + } + + /*count info*/ + gvs->frame_dur = frame_dur; + vdec_count_info(gvs, 0, offset); + + /* pr_info("PicType = %d, PTS = 0x%x\n", + * picture_type, vf->pts); + */ + WRITE_VREG(AVS_BUFFEROUT, 0); + } + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + +#ifdef HANDLE_AVS_IRQ + return IRQ_HANDLED; +#else + return; +#endif +} +/* + *static int run_flag = 1; + *static int step_flag; + */ +static int error_recovery_mode; /*0: blocky 1: mosaic*/ +/* + *static uint error_watchdog_threshold=10; + *static uint error_watchdog_count; + *static uint error_watchdog_buf_threshold = 0x4000000; + */ + +static struct vframe_s *vavs_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + + if (recover_flag) + return NULL; + + if (kfifo_peek(&display_q, &vf)) + return vf; + + return NULL; + +} + +static struct vframe_s *vavs_vf_get(void *op_arg) +{ + struct vframe_s *vf; + + if (recover_flag) + return NULL; + + if (kfifo_get(&display_q, &vf)) + return vf; + + return NULL; + +} + +static void vavs_vf_put(struct vframe_s *vf, void *op_arg) +{ + int i; + + if (recover_flag) + return; + + for (i = 0; i < VF_POOL_SIZE; i++) { + if (vf == &cur_vfpool[i]) + break; + } + if (i < VF_POOL_SIZE) + kfifo_put(&recycle_q, (const struct vframe_s *)vf); + +} + +static int vavs_event_cb(int type, void *data, void *private_data) +{ + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE && vdec) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +int vavs_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + if (!(stat & STAT_VDEC_RUN)) + return -1; + + vstatus->frame_width = frame_width; + vstatus->frame_height = frame_height; + if (frame_dur != 0) + vstatus->frame_rate = 96000 / frame_dur; + else + vstatus->frame_rate = -1; + vstatus->error_count = READ_VREG(AV_SCRATCH_C); + vstatus->status = stat; + vstatus->bit_rate = gvs->bit_rate; + vstatus->frame_dur = frame_dur; + vstatus->frame_data = gvs->frame_data; + vstatus->total_data = gvs->total_data; + vstatus->frame_count = gvs->frame_count; + vstatus->error_frame_count = gvs->error_frame_count; + vstatus->drop_frame_count = gvs->drop_frame_count; + vstatus->total_data = gvs->total_data; + vstatus->samp_cnt = gvs->samp_cnt; + vstatus->offset = gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + +int vavs_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +static int vavs_vdec_info_init(void) +{ + gvs = kzalloc(sizeof(struct vdec_info), GFP_KERNEL); + if (NULL == gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -ENOMEM; + } + return 0; +} +/****************************************/ +static int vavs_canvas_init(void) +{ + int i, ret; + u32 canvas_width, canvas_height; + u32 decbuf_size, decbuf_y_size, decbuf_uv_size; + unsigned long buf_start; + int need_alloc_buf_num; + u32 endian; + + vf_buf_num_used = vf_buf_num; + if (buf_size <= 0x00400000) { + /* SD only */ + canvas_width = 768; + canvas_height = 576; + decbuf_y_size = 0x80000; + decbuf_uv_size = 0x20000; + decbuf_size = 0x100000; + } else { + /* HD & SD */ + canvas_width = 1920; + canvas_height = 1088; + decbuf_y_size = 0x200000; + decbuf_uv_size = 0x80000; + decbuf_size = 0x300000; + } + +#ifdef AVSP_LONG_CABAC + need_alloc_buf_num = vf_buf_num_used + 2; +#else + need_alloc_buf_num = vf_buf_num_used + 1; +#endif + for (i = 0; i < need_alloc_buf_num; i++) { + + if (i == (need_alloc_buf_num - 1)) + decbuf_size = WORKSPACE_SIZE; +#ifdef AVSP_LONG_CABAC + else if (i == (need_alloc_buf_num - 2)) + decbuf_size = WORKSPACE_SIZE_A; +#endif + ret = decoder_bmmu_box_alloc_buf_phy(mm_blk_handle, i, + decbuf_size, DRIVER_NAME, &buf_start); + if (ret < 0) + return ret; + if (i == (need_alloc_buf_num - 1)) { + if (firmware_sel == 1) + buf_offset = buf_start - + RV_AI_BUFF_START_ADDR; + else + buf_offset = buf_start - + LONG_CABAC_RV_AI_BUFF_START_ADDR; + continue; + } +#ifdef AVSP_LONG_CABAC + else if (i == (need_alloc_buf_num - 2)) { + avsp_heap_adr = codec_mm_phys_to_virt(buf_start); + continue; + } +#endif + if (vdec->canvas_mode == CANVAS_BLKMODE_LINEAR) + endian = 7; + else + endian = 0; +#ifdef NV21 + config_cav_lut_ex(canvas_base + canvas_num * i + 0, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, + vdec->canvas_mode, endian, VDEC_1); + config_cav_lut_ex(canvas_base + canvas_num * i + 1, + buf_start + + decbuf_y_size, canvas_width, + canvas_height / 2, + CANVAS_ADDR_NOWRAP, + vdec->canvas_mode, endian, VDEC_1); +#else + config_cav_lut_ex(canvas_num * i + 0, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, + vdec->canvas_mode, endian, VDEC_1); + config_cav_lut_ex(canvas_num * i + 1, + buf_start + + decbuf_y_size, canvas_width / 2, + canvas_height / 2, + CANVAS_ADDR_NOWRAP, + vdec->canvas_mode, endian, VDEC_1); + config_cav_lut_ex(canvas_num * i + 2, + buf_start + + decbuf_y_size + decbuf_uv_size, + canvas_width / 2, canvas_height / 2, + CANVAS_ADDR_NOWRAP, + vdec->canvas_mode, endian, VDEC_1); +#endif + if (debug_flag & AVS_DEBUG_PRINT) { + pr_info("canvas config %d, addr %p\n", i, + (void *)buf_start); + } + + } + return 0; +} + +void vavs_recover(void) +{ + vavs_canvas_init(); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 9) | (1 << 8)); + WRITE_VREG(DOS_SW_RESET0, 0); + WRITE_VREG(AV_SCRATCH_H, 0); + if (firmware_sel == 1) { + WRITE_VREG(POWER_CTL_VLD, 0x10); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 2, + MEM_FIFO_CNT_BIT, 2); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 8, + MEM_LEVEL_CNT_BIT, 6); + WRITE_VREG(AV_SCRATCH_H, 1); // 8 buf flag to ucode + } + + + if (firmware_sel == 0) { + /* fixed canvas index */ + WRITE_VREG(AV_SCRATCH_0, canvas_base); + WRITE_VREG(AV_SCRATCH_1, vf_buf_num_used); + } else { + int ii; + + for (ii = 0; ii < 4; ii++) { + WRITE_VREG(AV_SCRATCH_0 + ii, + (canvas_base + canvas_num * ii) | + ((canvas_base + canvas_num * ii + 1) + << 8) | + ((canvas_base + canvas_num * ii + 1) + << 16) + ); + } + } + + /* notify ucode the buffer offset */ + WRITE_VREG(AV_SCRATCH_F, buf_offset); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + + WRITE_VREG(AVS_SOS_COUNT, 0); + WRITE_VREG(AVS_BUFFERIN, 0); + WRITE_VREG(AVS_BUFFEROUT, 0); + if (error_recovery_mode) + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 0); + else + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 1); + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); +#if 1 /* def DEBUG_UCODE */ + WRITE_VREG(AV_SCRATCH_D, 0); +#endif + +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + +#ifdef PIC_DC_NEED_CLEAR + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 31); +#endif + +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) { + WRITE_VREG(LONG_CABAC_DES_ADDR, es_write_addr_phy); + WRITE_VREG(LONG_CABAC_REQ, 0); + WRITE_VREG(LONG_CABAC_PIC_SIZE, 0); + WRITE_VREG(LONG_CABAC_SRC_ADDR, 0); + } +#endif + WRITE_VREG(AV_SCRATCH_N, (u32)(user_data_buffer_phys - buf_offset)); + pr_info("support_user_data = %d\n", support_user_data); + if (support_user_data) + WRITE_VREG(AV_SCRATCH_M, 1); + else + WRITE_VREG(AV_SCRATCH_M, 0); + + WRITE_VREG(AV_SCRATCH_5, 0); + +} + +static int vavs_prot_init(void) +{ + int r; +#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 9) | (1 << 8)); + WRITE_VREG(DOS_SW_RESET0, 0); + +#else + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + READ_RESET_REG(RESET0_REGISTER); + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + + WRITE_RESET_REG(RESET2_REGISTER, RESET_PIC_DC | RESET_DBLK); +#endif + + /***************** reset vld **********************************/ + WRITE_VREG(POWER_CTL_VLD, 0x10); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 2, MEM_FIFO_CNT_BIT, 2); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 8, MEM_LEVEL_CNT_BIT, 6); + /*************************************************************/ + + r = vavs_canvas_init(); + WRITE_VREG(AV_SCRATCH_H, 0); +#ifdef NV21 + if (firmware_sel == 0) { + /* fixed canvas index */ + WRITE_VREG(AV_SCRATCH_0, canvas_base); + WRITE_VREG(AV_SCRATCH_1, vf_buf_num_used); + } else { + int ii; + + for (ii = 0; ii < 4; ii++) { + WRITE_VREG(AV_SCRATCH_0 + ii, + (canvas_base + canvas_num * ii) | + ((canvas_base + canvas_num * ii + 1) + << 8) | + ((canvas_base + canvas_num * ii + 1) + << 16) + ); + } + WRITE_VREG(AV_SCRATCH_H, 1); // 8 buf flag to ucode + } +#else + /* index v << 16 | u << 8 | y */ + WRITE_VREG(AV_SCRATCH_0, 0x020100); + WRITE_VREG(AV_SCRATCH_1, 0x050403); + WRITE_VREG(AV_SCRATCH_2, 0x080706); + WRITE_VREG(AV_SCRATCH_3, 0x0b0a09); +#endif + /* notify ucode the buffer offset */ + WRITE_VREG(AV_SCRATCH_F, buf_offset); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + + WRITE_VREG(AVS_SOS_COUNT, 0); + WRITE_VREG(AVS_BUFFERIN, 0); + WRITE_VREG(AVS_BUFFEROUT, 0); + if (error_recovery_mode) + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 0); + else + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 1); + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); +#if 1 /* def DEBUG_UCODE */ + WRITE_VREG(AV_SCRATCH_D, 0); +#endif + +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + +#ifdef PIC_DC_NEED_CLEAR + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 31); +#endif + +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) { + WRITE_VREG(LONG_CABAC_DES_ADDR, es_write_addr_phy); + WRITE_VREG(LONG_CABAC_REQ, 0); + WRITE_VREG(LONG_CABAC_PIC_SIZE, 0); + WRITE_VREG(LONG_CABAC_SRC_ADDR, 0); + } +#endif + + WRITE_VREG(AV_SCRATCH_N, (u32)(user_data_buffer_phys - buf_offset)); + pr_info("support_user_data = %d\n", support_user_data); + if (support_user_data) + WRITE_VREG(AV_SCRATCH_M, 1); + else + WRITE_VREG(AV_SCRATCH_M, 0); + + return r; +} + +#ifdef AVSP_LONG_CABAC +static unsigned char es_write_addr[MAX_CODED_FRAME_SIZE] __aligned(64); +#endif +static void vavs_local_init(bool is_reset) +{ + int i; + + is_reset = 0; + vavs_ratio = vavs_amstream_dec_info.ratio; + + avi_flag = (unsigned long) vavs_amstream_dec_info.param; + + frame_width = frame_height = frame_dur = frame_prog = 0; + + throw_pb_flag = 1; + + total_frame = 0; + saved_resolution = 0; + next_pts = 0; + +#ifdef DEBUG_PTS + pts_hit = pts_missed = pts_i_hit = pts_i_missed = 0; +#endif + + if (!is_reset) { + INIT_KFIFO(display_q); + INIT_KFIFO(recycle_q); + INIT_KFIFO(newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &vfpool[i]; + + vfpool[i].index = vf_buf_num; + vfpool[i].bufWidth = 1920; + kfifo_put(&newframe_q, vf); + } + + for (i = 0; i < vf_buf_num; i++) + vfbuf_use[i] = 0; + } + + cur_vfpool = vfpool; + + if (recover_flag == 1) + return; + + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + + mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER); + +} + +static int vavs_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + + spin_lock_irqsave(&lock, flags); + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&newframe_q); + states->buf_avail_num = kfifo_len(&display_q); + states->buf_recycle_num = kfifo_len(&recycle_q); + spin_unlock_irqrestore(&lock, flags); + return 0; +} + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER +static void vavs_ppmgr_reset(void) +{ + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_RESET, NULL); + + vavs_local_init(true); + + pr_info("vavs: vf_ppmgr_reset\n"); +} +#endif + +static void vavs_local_reset(void) +{ + mutex_lock(&vavs_mutex); + //recover_flag = 1; + pr_info("error, local reset\n"); + amvdec_stop(); + msleep(100); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_RESET, NULL); + vavs_local_init(true); + vavs_recover(); + + + reset_userdata_fifo(1); + + + amvdec_start(); + recover_flag = 0; +#if 0 + error_watchdog_count = 0; + + pr_info("pc %x stream buf wp %x rp %x level %x\n", + READ_VREG(MPC_E), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); +#endif + + + + mutex_unlock(&vavs_mutex); +} + +static struct work_struct fatal_error_wd_work; +static struct work_struct notify_work; +static atomic_t error_handler_run = ATOMIC_INIT(0); +static void vavs_fatal_error_handler(struct work_struct *work) +{ + if (debug_flag & AVS_DEBUG_OLD_ERROR_HANDLE) { + mutex_lock(&vavs_mutex); + pr_info("vavs fatal error reset !\n"); + amvdec_stop(); +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vavs_ppmgr_reset(); +#else + vf_light_unreg_provider(&vavs_vf_prov); + vavs_local_init(true); + vf_reg_provider(&vavs_vf_prov); +#endif + vavs_recover(); + amvdec_start(); + mutex_unlock(&vavs_mutex); + } else { + pr_info("avs fatal_error_handler\n"); + vavs_local_reset(); + } + atomic_set(&error_handler_run, 0); +} + +static void vavs_notify_work(struct work_struct *work) +{ + if (fr_hint_status == VDEC_NEED_HINT) { + vf_notify_receiver(PROVIDER_NAME , + VFRAME_EVENT_PROVIDER_FR_HINT , + (void *)((unsigned long)frame_dur)); + fr_hint_status = VDEC_HINTED; + } + return; +} + +static void avs_set_clk(struct work_struct *work) +{ + int fps = 96000 / frame_dur; + + saved_resolution = frame_width * frame_height * fps; + if (firmware_sel == 0 && + (debug_flag & AVS_DEBUG_USE_FULL_SPEED)) { + vdec_source_changed(VFORMAT_AVS, + 4096, 2048, 60); + } else { + vdec_source_changed(VFORMAT_AVS, + frame_width, frame_height, fps); + } +} + +static void vavs_put_timer_func(struct timer_list *timer) +{ +#ifndef HANDLE_AVS_IRQ + vavs_isr(); +#endif + + if (READ_VREG(AVS_SOS_COUNT)) { + if (!error_recovery_mode) { +#if 0 + if (debug_flag & AVS_DEBUG_OLD_ERROR_HANDLE) { + mutex_lock(&vavs_mutex); + pr_info("vavs fatal error reset !\n"); + amvdec_stop(); +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vavs_ppmgr_reset(); +#else + vf_light_unreg_provider(&vavs_vf_prov); + vavs_local_init(true); + vf_reg_provider(&vavs_vf_prov); +#endif + vavs_recover(); + amvdec_start(); + mutex_unlock(&vavs_mutex); + } else { + vavs_local_reset(); + } +#else + if (!atomic_read(&error_handler_run)) { + atomic_set(&error_handler_run, 1); + pr_info("AVS_SOS_COUNT = %d\n", + READ_VREG(AVS_SOS_COUNT)); + pr_info("WP = 0x%x, RP = 0x%x, LEVEL = 0x%x, AVAIL = 0x%x, CUR_PTR = 0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL), + READ_VREG(VLD_MEM_VIFIFO_CURR_PTR)); + schedule_work(&fatal_error_wd_work); + } +#endif + } + } +#if 0 + if (long_cabac_busy == 0 && + error_watchdog_threshold > 0 && + kfifo_len(&display_q) == 0 && + READ_VREG(VLD_MEM_VIFIFO_LEVEL) > + error_watchdog_buf_threshold) { + pr_info("newq %d dispq %d recyq %d\r\n", + kfifo_len(&newframe_q), + kfifo_len(&display_q), + kfifo_len(&recycle_q)); + pr_info("pc %x stream buf wp %x rp %x level %x\n", + READ_VREG(MPC_E), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + error_watchdog_count++; + if (error_watchdog_count >= error_watchdog_threshold) + vavs_local_reset(); + } else + error_watchdog_count = 0; +#endif + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + + if (!kfifo_is_empty(&recycle_q) && (READ_VREG(AVS_BUFFERIN) == 0)) { + struct vframe_s *vf; + + if (kfifo_get(&recycle_q, &vf)) { + if ((vf->index < vf_buf_num) && + (--vfbuf_use[vf->index] == 0)) { + WRITE_VREG(AVS_BUFFERIN, ~(1 << vf->index)); + vf->index = vf_buf_num; + } + kfifo_put(&newframe_q, + (const struct vframe_s *)vf); + } + } + + if (frame_dur > 0 && saved_resolution != + frame_width * frame_height * (96000 / frame_dur)) + schedule_work(&set_clk_work); + + timer->expires = jiffies + PUT_INTERVAL; + + add_timer(timer); +} + +#ifdef AVSP_LONG_CABAC + +static void long_cabac_do_work(struct work_struct *work) +{ + int status = 0; +#ifdef PERFORMANCE_DEBUG + pr_info("enter %s buf level (new %d, display %d, recycle %d)\r\n", + __func__, + kfifo_len(&newframe_q), + kfifo_len(&display_q), + kfifo_len(&recycle_q) + ); +#endif + mutex_lock(&vavs_mutex); + long_cabac_busy = 1; + while (READ_VREG(LONG_CABAC_REQ)) { + if (process_long_cabac() < 0) { + status = -1; + break; + } + } + long_cabac_busy = 0; + mutex_unlock(&vavs_mutex); +#ifdef PERFORMANCE_DEBUG + pr_info("exit %s buf level (new %d, display %d, recycle %d)\r\n", + __func__, + kfifo_len(&newframe_q), + kfifo_len(&display_q), + kfifo_len(&recycle_q) + ); +#endif + if (status < 0) { + pr_info("transcoding error, local reset\r\n"); + vavs_local_reset(); + } + +} +#endif + +#ifdef AVSP_LONG_CABAC +static void init_avsp_long_cabac_buf(void) +{ +#if 0 + es_write_addr_phy = (unsigned long)codec_mm_alloc_for_dma( + "vavs", + PAGE_ALIGN(MAX_CODED_FRAME_SIZE)/PAGE_SIZE, + 0, CODEC_MM_FLAGS_DMA_CPU); + es_write_addr_virt = codec_mm_phys_to_virt(es_write_addr_phy); + +#elif 0 + es_write_addr_virt = + (void *)dma_alloc_coherent(amports_get_dma_device(), + MAX_CODED_FRAME_SIZE, &es_write_addr_phy, + GFP_KERNEL); +#else + /*es_write_addr_virt = kmalloc(MAX_CODED_FRAME_SIZE, GFP_KERNEL); + * es_write_addr_virt = (void *)__get_free_pages(GFP_KERNEL, + * get_order(MAX_CODED_FRAME_SIZE)); + */ + es_write_addr_virt = &es_write_addr[0]; + if (es_write_addr_virt == NULL) { + pr_err("%s: failed to alloc es_write_addr_virt buffer\n", + __func__); + return; + } + + es_write_addr_phy = dma_map_single(amports_get_dma_device(), + es_write_addr_virt, + MAX_CODED_FRAME_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(amports_get_dma_device(), + es_write_addr_phy)) { + pr_err("%s: failed to map es_write_addr_virt buffer\n", + __func__); + /*kfree(es_write_addr_virt);*/ + es_write_addr_virt = NULL; + return; + } +#endif + + +#ifdef BITSTREAM_READ_TMP_NO_CACHE + bitstream_read_tmp = + (void *)dma_alloc_coherent(amports_get_dma_device(), + SVA_STREAM_BUF_SIZE, &bitstream_read_tmp_phy, + GFP_KERNEL); + +#else + + bitstream_read_tmp = kmalloc(SVA_STREAM_BUF_SIZE, GFP_KERNEL); + /*bitstream_read_tmp = (void *)__get_free_pages(GFP_KERNEL, + *get_order(MAX_CODED_FRAME_SIZE)); + */ + if (bitstream_read_tmp == NULL) { + pr_err("%s: failed to alloc bitstream_read_tmp buffer\n", + __func__); + return; + } + + bitstream_read_tmp_phy = dma_map_single(amports_get_dma_device(), + bitstream_read_tmp, + SVA_STREAM_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(amports_get_dma_device(), + bitstream_read_tmp_phy)) { + pr_err("%s: failed to map rpm buffer\n", __func__); + kfree(bitstream_read_tmp); + bitstream_read_tmp = NULL; + return; + } +#endif +} +#endif + + +static s32 vavs_init(void) +{ + int ret, size = -1; + char *buf = vmalloc(0x1000 * 16); + + if (IS_ERR_OR_NULL(buf)) + return -ENOMEM; + + pr_info("vavs_init\n"); + + stat |= STAT_TIMER_INIT; + + amvdec_enable(); + vavs_local_init(false); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM) + size = get_firmware_data(VIDEO_DEC_AVS, buf); + else { + if (firmware_sel == 1) + size = get_firmware_data(VIDEO_DEC_AVS_NOCABAC, buf); +#ifdef AVSP_LONG_CABAC + else { + init_avsp_long_cabac_buf(); + size = get_firmware_data(VIDEO_DEC_AVS, buf); + } +#endif + } + + if (size < 0) { + amvdec_disable(); + pr_err("get firmware fail."); + vfree(buf); + return -1; + } + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM) + ret = amvdec_loadmc_ex(VFORMAT_AVS, NULL, buf); + else if (firmware_sel == 1) + ret = amvdec_loadmc_ex(VFORMAT_AVS, "avs_no_cabac", buf); + else + ret = amvdec_loadmc_ex(VFORMAT_AVS, NULL, buf); + + if (ret < 0) { + amvdec_disable(); + vfree(buf); + pr_err("AVS: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(buf); + + stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + ret = vavs_prot_init(); + if (ret < 0) + return ret; + +#ifdef HANDLE_AVS_IRQ + if (vdec_request_irq(VDEC_IRQ_1, vavs_isr, + "vavs-irq", (void *)vavs_dec_id)) { + amvdec_disable(); + pr_info("vavs irq register error.\n"); + return -ENOENT; + } +#endif + + stat |= STAT_ISR_REG; + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_provider_init(&vavs_vf_prov, PROVIDER_NAME, &vavs_vf_provider, NULL); + vf_reg_provider(&vavs_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); +#else + vf_provider_init(&vavs_vf_prov, PROVIDER_NAME, &vavs_vf_provider, NULL); + vf_reg_provider(&vavs_vf_prov); +#endif + + if (vavs_amstream_dec_info.rate != 0) { + if (!is_reset) { + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long) + vavs_amstream_dec_info.rate)); + fr_hint_status = VDEC_HINTED; + } + } else + fr_hint_status = VDEC_NEED_HINT; + + stat |= STAT_VF_HOOK; + + timer_setup(&recycle_timer, vavs_put_timer_func, 0); + recycle_timer.expires = jiffies + PUT_INTERVAL; + add_timer(&recycle_timer); + + stat |= STAT_TIMER_ARM; + +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) + INIT_WORK(&long_cabac_wd_work, long_cabac_do_work); +#endif + vdec_source_changed(VFORMAT_AVS, + 1920, 1080, 30); + amvdec_start(); + + stat |= STAT_VDEC_RUN; + + return 0; +} + +static int amvdec_avs_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + + if (pdata == NULL) { + pr_info("amvdec_avs memory resource undefined.\n"); + return -EFAULT; + } + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM || disable_longcabac_trans) + firmware_sel = 1; + + if (firmware_sel == 1) { + vf_buf_num = 8; + canvas_base = 0; + canvas_num = 2; + } else { + + canvas_base = 128; + canvas_num = 2; /*NV21*/ + } + + + if (pdata->sys_info) + vavs_amstream_dec_info = *pdata->sys_info; + + pr_info("%s (%d,%d) %d\n", __func__, vavs_amstream_dec_info.width, + vavs_amstream_dec_info.height, vavs_amstream_dec_info.rate); + + pdata->dec_status = vavs_dec_status; + pdata->set_isreset = vavs_set_isreset; + is_reset = 0; + + pdata->user_data_read = NULL; + pdata->reset_userdata_fifo = NULL; + + vavs_vdec_info_init(); + + + if (NULL == user_data_buffer) { + user_data_buffer = + dma_alloc_coherent(amports_get_dma_device(), + USER_DATA_SIZE, + &user_data_buffer_phys, GFP_KERNEL); + if (!user_data_buffer) { + pr_info("%s: Can not allocate user_data_buffer\n", + __func__); + return -ENOMEM; + } + pr_debug("user_data_buffer = 0x%p, user_data_buffer_phys = 0x%x\n", + user_data_buffer, (u32)user_data_buffer_phys); + } + + INIT_WORK(&set_clk_work, avs_set_clk); + vdec = pdata; + + INIT_WORK(&fatal_error_wd_work, vavs_fatal_error_handler); + atomic_set(&error_handler_run, 0); + + INIT_WORK(&userdata_push_work, userdata_push_do_work); + INIT_WORK(¬ify_work, vavs_notify_work); + + if (vavs_init() < 0) { + pr_info("amvdec_avs init failed.\n"); + kfree(gvs); + gvs = NULL; + pdata->dec_status = NULL; + return -ENODEV; + } + return 0; +} + +static int amvdec_avs_remove(struct platform_device *pdev) +{ + if (stat & STAT_TIMER_ARM) { + del_timer_sync(&recycle_timer); + stat &= ~STAT_TIMER_ARM; + } + cancel_work_sync(&fatal_error_wd_work); + atomic_set(&error_handler_run, 0); + + cancel_work_sync(&userdata_push_work); + + cancel_work_sync(¬ify_work); + if (stat & STAT_VDEC_RUN) { + amvdec_stop(); + stat &= ~STAT_VDEC_RUN; + } + + if (stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)vavs_dec_id); + stat &= ~STAT_ISR_REG; + } + +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) { + mutex_lock(&vavs_mutex); + cancel_work_sync(&long_cabac_wd_work); + mutex_unlock(&vavs_mutex); + + if (es_write_addr_virt) { +#if 0 + codec_mm_free_for_dma("vavs", es_write_addr_phy); +#else + dma_unmap_single(amports_get_dma_device(), + es_write_addr_phy, + MAX_CODED_FRAME_SIZE, DMA_FROM_DEVICE); + /*kfree(es_write_addr_virt);*/ + es_write_addr_virt = NULL; +#endif + } + +#ifdef BITSTREAM_READ_TMP_NO_CACHE + if (bitstream_read_tmp) { + dma_free_coherent(amports_get_dma_device(), + SVA_STREAM_BUF_SIZE, bitstream_read_tmp, + bitstream_read_tmp_phy); + bitstream_read_tmp = NULL; + } +#else + if (bitstream_read_tmp) { + dma_unmap_single(amports_get_dma_device(), + bitstream_read_tmp_phy, + SVA_STREAM_BUF_SIZE, DMA_FROM_DEVICE); + kfree(bitstream_read_tmp); + bitstream_read_tmp = NULL; + } +#endif + } +#endif + if (stat & STAT_VF_HOOK) { + if (fr_hint_status == VDEC_HINTED) + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_END_HINT, NULL); + fr_hint_status = VDEC_NO_NEED_HINT; + vf_unreg_provider(&vavs_vf_prov); + stat &= ~STAT_VF_HOOK; + } + + + if (user_data_buffer != NULL) { + dma_free_coherent( + amports_get_dma_device(), + USER_DATA_SIZE, + user_data_buffer, + user_data_buffer_phys); + user_data_buffer = NULL; + user_data_buffer_phys = 0; + } + + + amvdec_disable(); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_TM2) + vdec_reset_core(NULL); + pic_type = 0; + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } +#ifdef DEBUG_PTS + pr_debug("pts hit %d, pts missed %d, i hit %d, missed %d\n", pts_hit, + pts_missed, pts_i_hit, pts_i_missed); + pr_debug("total frame %d, avi_flag %d, rate %d\n", total_frame, avi_flag, + vavs_amstream_dec_info.rate); +#endif + kfree(gvs); + gvs = NULL; + vdec = NULL; + + cancel_work_sync(&set_clk_work); + return 0; +} + +/****************************************/ +#ifdef CONFIG_PM +static int avs_suspend(struct device *dev) +{ + amvdec_suspend(to_platform_device(dev), dev->power.power_state); + return 0; +} + +static int avs_resume(struct device *dev) +{ + amvdec_resume(to_platform_device(dev)); + return 0; +} + +static const struct dev_pm_ops avs_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(avs_suspend, avs_resume) +}; +#endif + +static struct platform_driver amvdec_avs_driver = { + .probe = amvdec_avs_probe, + .remove = amvdec_avs_remove, + .driver = { + .name = DRIVER_NAME, +#ifdef CONFIG_PM + .pm = &avs_pm_ops, +#endif + } +}; + +static struct codec_profile_t amvdec_avs_profile = { + .name = "avs", + .profile = "" +}; + +static struct mconfig avs_configs[] = { + MC_PU32("stat", &stat), + MC_PU32("debug_flag", &debug_flag), + MC_PU32("error_recovery_mode", &error_recovery_mode), + MC_PU32("pic_type", &pic_type), + MC_PU32("radr", &radr), + MC_PU32("vf_buf_num", &vf_buf_num), + MC_PU32("vf_buf_num_used", &vf_buf_num_used), + MC_PU32("canvas_base", &canvas_base), + MC_PU32("firmware_sel", &firmware_sel), +}; +static struct mconfig_node avs_node; + + +static int __init amvdec_avs_driver_init_module(void) +{ + pr_debug("amvdec_avs module init\n"); + + if (platform_driver_register(&amvdec_avs_driver)) { + pr_info("failed to register amvdec_avs driver\n"); + return -ENODEV; + } + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB) + amvdec_avs_profile.profile = "avs+"; + + vcodec_profile_register(&amvdec_avs_profile); + INIT_REG_NODE_CONFIGS("media.decoder", &avs_node, + "avs", avs_configs, CONFIG_FOR_RW); + return 0; +} + +static void __exit amvdec_avs_driver_remove_module(void) +{ + pr_debug("amvdec_avs module remove.\n"); + + platform_driver_unregister(&amvdec_avs_driver); +} + +/****************************************/ + +module_param(stat, uint, 0664); +MODULE_PARM_DESC(stat, "\n amvdec_avs stat\n"); + +/****************************************** + *module_param(run_flag, uint, 0664); + *MODULE_PARM_DESC(run_flag, "\n run_flag\n"); + * + *module_param(step_flag, uint, 0664); + *MODULE_PARM_DESC(step_flag, "\n step_flag\n"); + ******************************************* + */ + +module_param(debug_flag, uint, 0664); +MODULE_PARM_DESC(debug_flag, "\n debug_flag\n"); + +module_param(error_recovery_mode, uint, 0664); +MODULE_PARM_DESC(error_recovery_mode, "\n error_recovery_mode\n"); + +/****************************************** + *module_param(error_watchdog_threshold, uint, 0664); + *MODULE_PARM_DESC(error_watchdog_threshold, "\n error_watchdog_threshold\n"); + * + *module_param(error_watchdog_buf_threshold, uint, 0664); + *MODULE_PARM_DESC(error_watchdog_buf_threshold, + * "\n error_watchdog_buf_threshold\n"); + ******************************************* + */ + +module_param(pic_type, uint, 0444); +MODULE_PARM_DESC(pic_type, "\n amdec_vas picture type\n"); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(vf_buf_num, uint, 0664); +MODULE_PARM_DESC(vf_buf_num, "\nvf_buf_num\n"); + +module_param(vf_buf_num_used, uint, 0664); +MODULE_PARM_DESC(vf_buf_num_used, "\nvf_buf_num_used\n"); + +module_param(canvas_base, uint, 0664); +MODULE_PARM_DESC(canvas_base, "\ncanvas_base\n"); + + +module_param(firmware_sel, uint, 0664); +MODULE_PARM_DESC(firmware_sel, "\n firmware_sel\n"); + +module_param(disable_longcabac_trans, uint, 0664); +MODULE_PARM_DESC(disable_longcabac_trans, "\n disable_longcabac_trans\n"); + +module_param(dec_control, uint, 0664); +MODULE_PARM_DESC(dec_control, "\n amvdec_vavs decoder control\n"); + +module_param(support_user_data, uint, 0664); +MODULE_PARM_DESC(support_user_data, "\n support_user_data\n"); + +module_init(amvdec_avs_driver_init_module); +module_exit(amvdec_avs_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC AVS Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Qi Wang <qi.wang@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/avs/avs.h b/drivers/frame_provider/decoder/avs/avs.h new file mode 100644 index 0000000..8277d20 --- /dev/null +++ b/drivers/frame_provider/decoder/avs/avs.h
@@ -0,0 +1,91 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef AVS_H_ +#define AVS_H_ + +#ifdef CONFIG_AMLOGIC_AVSP_LONG_CABAC +#define AVSP_LONG_CABAC +#endif +/*#define BITSTREAM_READ_TMP_NO_CACHE*/ + +#ifdef AVSP_LONG_CABAC +#define MAX_CODED_FRAME_SIZE 1500000 /*!< bytes for one frame*/ +#define LOCAL_HEAP_SIZE (1024*1024*10) +/* + *#define MAX_CODED_FRAME_SIZE 240000 + *#define MAX_CODED_FRAME_SIZE 700000 + */ +#define SVA_STREAM_BUF_SIZE 1024 + +extern void *es_write_addr_virt; +extern dma_addr_t es_write_addr_phy; + +extern void *bitstream_read_tmp; +extern dma_addr_t bitstream_read_tmp_phy; +extern void *avsp_heap_adr; + +int avs_get_debug_flag(void); + +int process_long_cabac(void); + +/* bit [6] - skip_mode_flag + * bit [5:4] - picture_type + * bit [3] - picture_structure (0-Field, 1-Frame) + * bit [2] - fixed_picture_qp + * bit [1] - progressive_sequence + * bit [0] - active + */ +#define LONG_CABAC_REQ AV_SCRATCH_K +#define LONG_CABAC_SRC_ADDR AV_SCRATCH_H +#define LONG_CABAC_DES_ADDR AV_SCRATCH_I +/* bit[31:16] - vertical_size + * bit[15:0] - horizontal_size + */ +#define LONG_CABAC_PIC_SIZE AV_SCRATCH_J + +#endif + +/* + *#define PERFORMANCE_DEBUG + *#define DUMP_DEBUG + */ +#define AVS_DEBUG_PRINT 0x01 +#define AVS_DEBUG_UCODE 0x02 +#define AVS_DEBUG_OLD_ERROR_HANDLE 0x10 +#define AVS_DEBUG_USE_FULL_SPEED 0x80 +#define AEC_DUMP 0x100 +#define STREAM_INFO_DUMP 0x200 +#define SLICE_INFO_DUMP 0x400 +#define MB_INFO_DUMP 0x800 +#define MB_NUM_DUMP 0x1000 +#define BLOCK_NUM_DUMP 0x2000 +#define COEFF_DUMP 0x4000 +#define ES_DUMP 0x8000 +#define DQUANT_DUMP 0x10000 +#define STREAM_INFO_DUMP_MORE 0x20000 +#define STREAM_INFO_DUMP_MORE2 0x40000 + +extern void *es_write_addr_virt; +extern void *bitstream_read_tmp; +extern dma_addr_t bitstream_read_tmp_phy; +int read_bitstream(unsigned char *Buf, int size); +int u_v(int LenInBits, char *tracestring); + +#endif
diff --git a/drivers/frame_provider/decoder/avs/avsp_trans.c b/drivers/frame_provider/decoder/avs/avsp_trans.c new file mode 100644 index 0000000..a92dbb9 --- /dev/null +++ b/drivers/frame_provider/decoder/avs/avsp_trans.c
@@ -0,0 +1,5065 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/dma-mapping.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/slab.h> +/* #include <mach/am_regs.h> */ +#include <linux/module.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../../stream_input/amports/streambuf_reg.h" +#include "../utils/amvdec.h" +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" + +#include "avs.h" +#ifdef AVSP_LONG_CABAC + +#define DECODING_SANITY_CHECK + +#define TRACE 0 +#define LIWR_FIX 0 +#define pow2(a, b) (1<<b) +#define io_printf pr_info + +static unsigned char *local_heap_adr; +static int local_heap_size; +static int local_heap_pos; +static int transcoding_error_flag; + +unsigned char *local_alloc(int num, int size) +{ + unsigned char *ret_buf = NULL; + int alloc_size = num * size; + + if ((local_heap_pos + alloc_size) <= local_heap_size) { + ret_buf = local_heap_adr + local_heap_pos; + local_heap_pos += alloc_size; + } else { + pr_info( + "!!!local_alloc(%d) error, local_heap (size %d) is not enough\r\n", + alloc_size, local_heap_size); + } + return ret_buf; +} + +int local_heap_init(int size) +{ + /*local_heap_adr = &local_heap[0];*/ + local_heap_adr = (unsigned char *)(avsp_heap_adr + + MAX_CODED_FRAME_SIZE); + memset(local_heap_adr, 0, LOCAL_HEAP_SIZE); + + local_heap_size = LOCAL_HEAP_SIZE; + local_heap_pos = 0; + return 0; +} + +void local_heap_uninit(void) +{ + local_heap_adr = NULL; + local_heap_size = 0; + local_heap_pos = 0; +} + +#define CODE2D_ESCAPE_SYMBOL 59 + +const int vlc_golomb_order[3][7][2] = + +{{{2, 9}, {2, 9}, {2, 9}, {2, 9}, {2, 9}, {2, 9}, {2, 9}, }, {{3, 9}, {2, 9}, { + 2, 9}, {2, 9}, {2, 9}, {2, 9}, {2, 9}, }, {{2, 9}, {0, 9}, + {1, 9}, {1, 9}, {0, 9}, {-1, -1}, {-1, -1}, }, }; + +const int MaxRun[3][7] = {{22, 14, 9, 6, 4, 2, 1}, {25, 18, 13, 9, 6, 4, 3}, { + 24, 19, 10, 7, 4, -1, -1} }; + +const int refabslevel[19][26] = {{4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, -1, -1, -1}, {7, 4, 4, 3, 3, 3, 3, 3, 2, + 2, 2, 2, 2, 2, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + 10, 6, 4, 4, 3, 3, 3, 2, 2, 2, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1}, {13, 7, 5, 4, 3, 2, 2, -1, -1, + -1 - 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {18, 8, 4, 2, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {22, 7, 3, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {27, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {4, + 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2}, {5, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, -1, -1, -1, -1, -1, -1, -1}, {7, 5, 4, 4, 3, 3, 3, 2, 2, + 2, 2, 2, 2, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + {10, 6, 5, 4, 3, 3, 2, 2, 2, 2, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1}, {13, 7, 5, 4, + 3, 2, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {17, 8, 4, + 3, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + 22, 6, 3, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {5, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -1}, {6, 4, 3, + 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, -1, -1, -1, -1, -1, -1}, {10, 6, 4, 4, 3, 3, + 2, 2, 2, 2, 2, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {14, 7, 4, 3, 3, 2, + 2, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1}, {20, 7, 3, 2, + 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1} }; + +static const int incvlc_intra[7] = {0, 1, 2, 4, 7, 10, 3000}; +static const int incvlc_chroma[5] = {0, 1, 2, 4, 3000}; + +const int AVS_2DVLC_INTRA[7][26][27] = {{{0, 22, 38, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {2, 32, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {4, 44, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {6, 50, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {8, 54, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {10, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {12, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {14, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {16, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {18, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {20, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {24, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {26, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {28, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {30, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {34, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {36, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {40, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {42, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {46, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {48, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {52, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {56, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, }, {{8, 0, 4, 15, 27, 41, + 55, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, 2, 17, 35, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + {-1, 6, 25, 53, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, 9, 33, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, 11, 39, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, 13, 45, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, 19, 49, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, 21, 51, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, 23, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, 29, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, 31, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, 37, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, 43, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, 47, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, 57, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, }, {{8, 0, 2, 6, + 13, 17, 27, 35, 45, 55, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, 4, 11, 21, 33, 49, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, 9, 23, 37, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 15, + 29, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 19, 39, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, 25, 43, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, 31, 53, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 41, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 47, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, 57, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, }, {{8, 0, 2, 4, 9, 11, 17, 21, 25, 33, 39, 45, 55, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 6, 13, 19, + 29, 35, 47, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, 15, 27, 41, 57, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, 23, 37, 53, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, 31, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 43, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, 49, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, }, {{6, 0, 2, 4, 7, 9, 11, 15, 17, + 21, 23, 29, 33, 35, 43, 47, 49, 57, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, 13, 19, 27, 31, 37, 45, 55, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + 25, 41, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 39, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, 53, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, }, {{0, + 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 23, 25, 27, 31, 33, 37, 41, + 45, 49, 51, 55, -1, -1, -1, -1, -1}, {-1, 21, 29, 35, 43, 47, + 53, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, 39, 57, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, }, {{0, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, + 21, 23, 25, 27, 29, 31, 35, 37, 39, 41, 43, 47, 49, 51, 53, 57}, + {-1, 33, 45, 55, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1} } }; + +const int AVS_2DVLC_CHROMA[5][26][27] = {{{0, 14, 32, 56, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {2, 48, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {4, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1}, {6, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {10, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {12, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {16, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {18, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {20, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {22, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {24, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {26, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {28, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {30, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {34, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {36, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {38, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {40, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {42, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {44, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {46, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {50, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {52, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {54, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, }, {{0, 1, 5, 15, 29, + 43, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, 3, 21, 45, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1}, {-1, 7, 37, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 9, 41, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, 11, 53, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, 17, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 19, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, 23, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, 25, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 31, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, 33, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 47, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, 49, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, 55, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 57, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, }, + {{2, 0, 3, 7, 11, 17, 27, 33, 47, 53, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, { + -1, 5, 13, 21, 37, 55, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, 9, 23, 41, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, 15, 31, 57, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + 19, 43, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, 25, 45, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, 29, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, 39, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, 49, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, }, {{0, 1, 3, 5, 7, 11, 15, 19, 23, 29, + 35, 43, 47, 53, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}, {-1, 9, 13, 21, 31, 39, 51, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1}, {-1, 17, 27, + 37, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + {-1, 25, 41, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, { + -1, 33, 55, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, 45, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, { + -1, 49, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, 57, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, { + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1}, {-1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1}, }, + {{0, 1, 3, 5, 7, 9, 11, 13, 15, 19, 21, 23, 27, 29, 33, 37, 41, + 43, 51, 55, -1, -1, -1, -1, -1, -1, -1}, {-1, + 17, 25, 31, 39, 45, 53, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, 35, 49, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, 47, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + 57, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, {-1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1}, } }; + +const int UE[64][2] = {{1, 1}, {2, 3}, {3, 3}, {4, 5}, {5, 5}, {6, 5}, {7, 5}, { + 8, 7}, {9, 7}, {10, 7}, {11, 7}, {12, 7}, {13, 7}, {14, 7}, {15, + 7}, {16, 9}, {17, 9}, {18, 9}, {19, 9}, {20, 9}, {21, 9}, + {22, 9}, {23, 9}, {24, 9}, {25, 9}, {26, 9}, {27, 9}, {28, 9}, { + 29, 9}, {30, 9}, {31, 9}, {32, 11}, {33, 11}, { + 34, 11}, {35, 11}, {36, 11}, {37, 11}, {38, 11}, + {39, 11}, {40, 11}, {41, 11}, {42, 11}, {43, 11}, {44, 11}, {45, + 11}, {46, 11}, {47, 11}, {48, 11}, {49, 11}, { + 50, 11}, {51, 11}, {52, 11}, {53, 11}, {54, 11}, + {55, 11}, {56, 11}, {57, 11}, {58, 11}, {59, 11}, {60, 11}, {61, + 11}, {62, 11}, {63, 11}, {64, 13} }; + +unsigned int src_start; +unsigned int des_start; + +#ifdef AVSP_LONG_CABAC + +unsigned char *es_buf; +unsigned int es_buf_ptr; +unsigned int es_buf_is_overflow; + +#else +FILE *f_es; +#endif +unsigned int es_ptr; +unsigned int es_res; +unsigned int es_res_ptr; +unsigned int previous_es; + +void init_es(void) +{ + +#ifdef AVSP_LONG_CABAC + es_buf_is_overflow = 0; + + es_buf[0] = 0x00; + es_buf[1] = 0x00; + es_buf[2] = 0x01; + es_buf_ptr = 3; + es_ptr = 3; +#else + f_es = fopen("es.out", "wb"); + if (f_es == NULL) + io_printf(" ERROR : Can not open es.out for write\n"); + putc(0x00, f_es); + putc(0x00, f_es); + putc(0x01, f_es); + + es_ptr = 3; +#endif + es_res = 0; + es_res_ptr = 0; + previous_es = 0xff; + +} + +void push_es(int value, int num) +{ + unsigned char wr_es_data; + int push_num; + int push_value; + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & ES_DUMP) + io_printf(" push_es : value : 0x%x, num : %d\n", value, num); +#endif + while (num > 0) { + if (num >= 8) + push_num = 8; + else + push_num = num; + + num = num - push_num; + push_value = (value >> num); + + es_res = (es_res << push_num) | push_value; + es_res_ptr = es_res_ptr + push_num; + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & ES_DUMP) + io_printf(" #### es_res : 0x%X, es_res_ptr : %d\n", + es_res, es_res_ptr); +#endif + + while (es_res_ptr >= 8) { + es_res_ptr = es_res_ptr & 7; + wr_es_data = (es_res >> es_res_ptr) & 0xff; + if ((previous_es == 0) & (wr_es_data < 4)) { + io_printf( + " Insert 2'b10 for emu at position : %d\n", + es_ptr); + + es_res_ptr = es_res_ptr + 2; + wr_es_data = 2; + } +#ifdef AVSP_LONG_CABAC +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & ES_DUMP) + pr_info("es_buf[%d] = 0x%02x\r\n", + es_buf_ptr, wr_es_data); +#endif + if (!es_buf_is_overflow) { + es_buf[es_buf_ptr++] = wr_es_data; + if (es_buf_ptr >= MAX_CODED_FRAME_SIZE) + es_buf_is_overflow = 1; + } +#else + putc(wr_es_data, f_es); +#endif + es_ptr++; + previous_es = ((previous_es << 8) | wr_es_data) + & 0xffff; + } + + } +} + +#ifdef BLOCK_SIZE +#undef BLOCK_SIZE +#endif + +#define MIN_QP 0 +#define MAX_QP 63 + +#define BLOCK_SIZE 4 +#define B8_SIZE 8 +#define MB_BLOCK_SIZE 16 + +#define BLOCK_MULTIPLE (MB_BLOCK_SIZE/(BLOCK_SIZE*2)) + +#define DECODE_COPY_MB 0 +#define DECODE_MB 1 + +#define NO_INTRA_PMODE 5 +#define INTRA_PMODE_4x4 10 +#define NO_INTRA_PMODE_4x4 19 +/* 8x8 intra prediction modes */ +#define VERT_PRED 0 +#define HOR_PRED 1 +#define DC_PRED 2 +#define DOWN_LEFT_PRED 3 +#define DOWN_RIGHT_PRED 4 + +#define VERT_PRED_4x4 0 +#define HOR_PRED_4x4 1 +#define DC_PRED_4x4 2 +#define DOWN_LEFT_PRED_4x4 3 +#define DOWN_RIGHT_PRED_4x4 4 + +#define HOR_DOWN_PRED_4x4 5 +#define VERT_LEFT_PRED_4x4 6 +#define HOR_UP_PRED_4x4 7 +#define VERT_RIGHT_PRED_4x4 8 + +#define DC_PRED_8 0 +#define HOR_PRED_8 1 +#define VERT_PRED_8 2 +#define PLANE_8 3 + +#define LUMA_16DC 0 +#define LUMA_16AC 1 +#define LUMA_8x8 2 +#define LUMA_8x4 3 +#define LUMA_4x8 4 +#define LUMA_4x4 5 +#define CHROMA_DC 6 +#define CHROMA_AC 7 +#define NUM_BLOCK_TYPES 8 + +#define I_PICTURE_START_CODE 0xB3 +#define PB_PICTURE_START_CODE 0xB6 +#define SLICE_START_CODE_MIN 0x00 +#define SLICE_START_CODE_MAX 0xAF +#define USER_DATA_START_CODE 0xB2 +#define SEQUENCE_HEADER_CODE 0xB0 +#define EXTENSION_START_CODE 0xB5 +#define SEQUENCE_END_CODE 0xB1 +#define VIDEO_EDIT_CODE 0xB7 + +#define EOS 1 +#define SOP 2 +#define SOS 3 +#define P8x8 8 +#define I8MB 9 +#define I4MB 10 +#define IBLOCK 11 +#define SI4MB 12 +#define MAXMODE 13 + +#define IS_INTRA(MB) ((MB)->mb_type == I8MB || (MB)->mb_type == I4MB) +#define IS_NEWINTRA(MB) ((MB)->mb_type == I4MB) +#define IS_OLDINTRA(MB) ((MB)->mb_type == I8MB) +#define IS_INTER(MB) ((MB)->mb_type != I8MB && (MB)->mb_type != I4MB) +#define IS_INTERMV(MB) ((MB)->mb_type != I8MB && (MB)->mb_type != I4MB\ + && (MB)->mb_type != 0) + +#define IS_DIRECT(MB) ((MB)->mb_type == 0 && (img->type == B_IMG)) +#define IS_COPY(MB) ((MB)->mb_type == 0 && (img->type == P_IMG)) +#define IS_P8x8(MB) ((MB)->mb_type == P8x8) + +#define P_IMG 0 +#define B_IMG 1 +#define I_IMG 2 + +#define FIELD 0 +#define FRAME 1 + +#define SE_CABP 21 +struct decoding_environment_s { + unsigned int dbuffer; + int dbits_to_go; + unsigned char *dcodestrm; + int *dcodestrm_len; +}; + +struct bi_context_type_s { + unsigned char MPS; + unsigned int LG_PMPS; + unsigned char cycno; +}; + + +/********************************************************************** + * C O N T E X T S F O R R M S Y N T A X E L E M E N T S + ********************************************************************** + */ + +#define NUM_MB_TYPE_CTX 11 +#define NUM_B8_TYPE_CTX 9 +#define NUM_MV_RES_CTX 10 +#define NUM_REF_NO_CTX 6 +#define NUM_DELTA_QP_CTX 4 +#define NUM_MB_AFF_CTX 4 + +struct motion_info_contexts_s { + struct bi_context_type_s mb_type_contexts[4][NUM_MB_TYPE_CTX]; + struct bi_context_type_s b8_type_contexts[2][NUM_B8_TYPE_CTX]; + struct bi_context_type_s mv_res_contexts[2][NUM_MV_RES_CTX]; + struct bi_context_type_s ref_no_contexts[2][NUM_REF_NO_CTX]; + struct bi_context_type_s delta_qp_contexts[NUM_DELTA_QP_CTX]; + struct bi_context_type_s mb_aff_contexts[NUM_MB_AFF_CTX]; +#ifdef TEST_WEIGHTING_AEC +struct bi_context_type_s mb_weighting_pred; +#endif +}; + +#define NUM_IPR_CTX 2 +#define NUM_CIPR_CTX 4 +#define NUM_CBP_CTX 4 +#define NUM_BCBP_CTX 4 +#define NUM_MAP_CTX 16 +#define NUM_LAST_CTX 16 + +#define NUM_ONE_CTX 5 +#define NUM_ABS_CTX 5 + +struct texture_info_contexts { + struct bi_context_type_s ipr_contexts[NUM_IPR_CTX]; + struct bi_context_type_s cipr_contexts[NUM_CIPR_CTX]; + struct bi_context_type_s cbp_contexts[3][NUM_CBP_CTX]; + struct bi_context_type_s bcbp_contexts[NUM_BLOCK_TYPES][NUM_BCBP_CTX]; + struct bi_context_type_s one_contexts[NUM_BLOCK_TYPES][NUM_ONE_CTX]; + struct bi_context_type_s abs_contexts[NUM_BLOCK_TYPES][NUM_ABS_CTX]; + struct bi_context_type_s fld_map_contexts[NUM_BLOCK_TYPES][NUM_MAP_CTX]; + struct bi_context_type_s fld_last_contexts + [NUM_BLOCK_TYPES][NUM_LAST_CTX]; + struct bi_context_type_s map_contexts[NUM_BLOCK_TYPES][NUM_MAP_CTX]; + struct bi_context_type_s last_contexts[NUM_BLOCK_TYPES][NUM_LAST_CTX]; +}; +struct img_par; + +struct syntaxelement { + int type; + int value1; + int value2; + int len; + int inf; + unsigned int bitpattern; + int context; + int k; + int golomb_grad; + int golomb_maxlevels; +#if TRACE +#define TRACESTRING_SIZE 100 + char tracestring[TRACESTRING_SIZE]; +#endif + + void (*mapping)(int len, int info, int *value1, int *value2); + + void (*reading)(struct syntaxelement *, struct img_par *, + struct decoding_environment_s *); + +}; + +struct bitstream_s { + + int read_len; + int code_len; + + int frame_bitoffset; + int bitstream_length; + + unsigned char *stream_buffer; +}; + +struct datapartition { + + struct bitstream_s *bitstream; + struct decoding_environment_s de_aec; + + int (*read_syntax_element)(struct syntaxelement *, struct img_par *, + struct datapartition *); +/*!< virtual function; + * actual method depends on chosen data partition and + * entropy coding method + */ +}; + +struct slice_s { + int picture_id; + int qp; + int picture_type; + int start_mb_nr; + int max_part_nr; + int num_mb; + + struct datapartition *part_arr; + struct motion_info_contexts_s *mot_ctx; + struct texture_info_contexts *tex_ctx; + int field_ctx[3][2]; +}; + +struct img_par { + int number; + int current_mb_nr; + int max_mb_nr; + int current_slice_nr; + int tr; + int qp; + int type; + + int typeb; + + int width; + int height; + int width_cr; + int height_cr; + int source_bitdepth; + int mb_y; + int mb_x; + int block_y; + int pix_y; + int pix_x; + int pix_c_y; + int block_x; + int pix_c_x; + + int ***mv; + int mpr[16][16]; + + int m7[16][16]; + int m8[/*2*/4][8][8]; + int cof[4][/*6*/8][4][4]; + int cofu[4]; + int **ipredmode; + int quad[256]; + int cod_counter; + + int ***dfmv; + int ***dbmv; + int **fw_reffrarr; + int **bw_reffrarr; + + int ***mv_frm; + int **fw_reffrarr_frm; + int **bw_reffrarr_frm; + int imgtr_next_p; + int imgtr_last_p; + int tr_frm; + int tr_fld; + int imgtr_last_prev_p; + + int no_forward_reference; + int seq_header_indicate; + int b_discard_flag; + + int ***fw_mv; + int ***bw_mv; + int subblock_x; + int subblock_y; + + int buf_cycle; + + int direct_type; + + int ***mv_top; + int ***mv_bot; + int **fw_reffrarr_top; + int **bw_reffrarr_top; + int **fw_reffrarr_bot; + int **bw_reffrarr_bot; + + int **ipredmode_top; + int **ipredmode_bot; + int ***fw_mv_top; + int ***fw_mv_bot; + int ***bw_mv_top; + int ***bw_mv_bot; + int ***dfmv_top; + int ***dbmv_top; + int ***dfmv_bot; + int ***dbm_bot; + + int toppoc; + int bottompoc; + int framepoc; + unsigned int frame_num; + + unsigned int pic_distance; + int delta_pic_order_cnt_bottom; + + signed int pic_distance_msb; + unsigned int prev_pic_distance_lsb; + signed int curr_pic_distance_msb; + unsigned int this_poc; + + int pic_width_inmbs; + int pic_height_inmbs; + int pic_size_inmbs; + + int block8_x, block8_y; + int structure; + int pn; + int buf_used; + int buf_size; + int picture_structure; + int advanced_pred_mode_disable; + int types; + int current_mb_nr_fld; + + int p_field_enhanced; + int b_field_enhanced; + + int slice_weighting_flag; + int lum_scale[4]; + int lum_shift[4]; + int chroma_scale[4]; + int chroma_shift[4]; + int mb_weighting_flag; + int weighting_prediction; + int mpr_weight[16][16]; + int top_bot; + int bframe_number; + + int auto_crop_right; + int auto_crop_bottom; + + struct slice_s *current_slice; + int is_v_block; + int is_intra_block; + + int new_seq_header_flag; + int new_sequence_flag; + int last_pic_bbv_delay; + + int sequence_end_flag; + int is_top_field; + + int abt_flag; + int qp_shift; + +#ifdef EIGHTH +int eighth_subpixel_flag; +int subpixel_precision; +int unit_length; +int subpixel_mask; + +int max_mvd; +int min_mvd; +#endif + +}; + +struct macroblock { + int qp; + int slice_nr; + int delta_quant; + struct macroblock *mb_available[3][3]; + /*!< pointer to neighboring MBs in a 3x3 window of current MB, + *which is located at [1][1] + * NULL pointer identifies neighboring MBs which are unavailable + */ + + int mb_type; + int mvd[2][BLOCK_MULTIPLE][BLOCK_MULTIPLE][2]; + int cbp, cbp_blk, cbp01; + unsigned long cbp_bits; + + int b8mode[4]; + int b8pdir[4]; + int mb_type_2; + int c_ipred_mode_2; + int dct_mode; + + int c_ipred_mode; + int lf_disable; + int lf_alpha_c0_offset; + int lf_beta_offset; + + int CABT[4]; + int CABP[4]; + int cbp_4x4[4]; + + int skip_flag; + + struct macroblock *mb_available_up; + struct macroblock *mb_available_left; + unsigned int mbaddr_a, mbaddr_b, mbaddr_c, mbaddr_d; + unsigned int mbavail_a, mbavail_b, mbavail_c, mbavail_d; + +}; + +struct macroblock *mb_data; + +struct img_par *img; + +struct bitstream_s *curr_stream; + +struct datapartition *alloc_partition(int n); + +unsigned int vld_mem_start_addr; +unsigned int vld_mem_end_addr; + +int marker_bit; + +int progressive_sequence; +int horizontal_size; +int vertical_size; + +int second_ifield; +int pre_img_type; + +/* slice_header() */ +int slice_vertical_position; +int slice_vertical_position_extension; +int fixed_picture_qp; +int fixed_slice_qp; +int slice_qp; + +/* + ************************************************************************* + * Function:ue_v, reads an u(v) syntax element, the length in bits is stored in + the global UsedBits variable + * Input: + tracestring + the string for the trace file + bitstream + the stream to be read from + * Output: + * Return: the value of the coded syntax element + * Attention: + ************************************************************************* + */ +/*! + * definition of AVS syntaxelements + * order of elements follow dependencies for picture reconstruction + */ +/*! + * \brief Assignment of old TYPE partition elements to new + * elements + * + * old element | new elements + * TYPE_HEADER | SE_HEADER, SE_PTYPE + * TYPE_MBHEADER | SE_MBTYPE, SE_REFFRAME, SE_INTRAPREDMODE + * TYPE_MVD | SE_MVD + * TYPE_CBP | SE_CBP_INTRA, SE_CBP_INTER * SE_DELTA_QUANT_INTER + * SE_DELTA_QUANT_INTRA + * TYPE_COEFF_Y | SE_LUM_DC_INTRA, SE_LUM_AC_INTRA, + SE_LUM_DC_INTER, SE_LUM_AC_INTER + * TYPE_2x2DC | SE_CHR_DC_INTRA, SE_CHR_DC_INTER + * TYPE_COEFF_C | SE_CHR_AC_INTRA, SE_CHR_AC_INTER + * TYPE_EOS | SE_EOS + */ + +#define SE_HEADER 0 +#define SE_PTYPE 1 +#define SE_MBTYPE 2 +#define SE_REFFRAME 3 +#define SE_INTRAPREDMODE 4 +#define SE_MVD 5 +#define SE_CBP_INTRA 6 +#define SE_LUM_DC_INTRA 7 +#define SE_CHR_DC_INTRA 8 +#define SE_LUM_AC_INTRA 9 +#define SE_CHR_AC_INTRA 10 +#define SE_CBP_INTER 11 +#define SE_LUM_DC_INTER 12 +#define SE_CHR_DC_INTER 13 +#define SE_LUM_AC_INTER 14 +#define SE_CHR_AC_INTER 15 +#define SE_DELTA_QUANT_INTER 16 +#define SE_DELTA_QUANT_INTRA 17 +#define SE_BFRAME 18 +#define SE_EOS 19 +#define SE_MAX_ELEMENTS 20 +#define SE_CBP01 21 +int chroma_format; +/* + ************************************************************************* + * Function:Reads bits from the bitstream buffer + * Input: + byte buffer[] + containing VLC-coded data bits + int totbitoffset + bit offset from start of partition + int bytecount + total bytes in bitstream + int numbits + number of bits to read + * Output: + * Return: + * Attention: + ************************************************************************* + */ + +int get_bits(unsigned char buffer[], int totbitoffset, int *info, int bytecount, + int numbits) +{ + register int inf; + long byteoffset; + int bitoffset; + + int bitcounter = numbits; + + byteoffset = totbitoffset / 8; + bitoffset = 7 - (totbitoffset % 8); + + inf = 0; + while (numbits) { + inf <<= 1; + inf |= (buffer[byteoffset] & (0x01 << bitoffset)) >> bitoffset; + numbits--; + bitoffset--; + if (bitoffset < 0) { + byteoffset++; + bitoffset += 8; + if (byteoffset > bytecount) + return -1; + } + } + + *info = inf; + + + return bitcounter; +} + +/* + ************************************************************************* + * Function:read FLC codeword from UVLC-partition + * Input: + * Output: + * Return: + * Attention: + ************************************************************************* + */ + +int read_syntaxelement_flc(struct syntaxelement *sym) +{ + int frame_bitoffset = curr_stream->frame_bitoffset; + unsigned char *buf = curr_stream->stream_buffer; + int bitstreamlengthinbytes = curr_stream->bitstream_length; + + if ((get_bits(buf, frame_bitoffset, &(sym->inf), bitstreamlengthinbytes, + sym->len)) < 0) + return -1; + + curr_stream->frame_bitoffset += sym->len; + sym->value1 = sym->inf; + +#if TRACE + tracebits2(sym->tracestring, sym->len, sym->inf); +#endif + + return 1; +} + +/* + ************************************************************************* + * Function:ue_v, reads an u(1) syntax element, the length in bits is stored in + the global UsedBits variable + * Input: + tracestring + the string for the trace file + bitstream + the stream to be read from + * Output: + * Return: the value of the coded syntax element + * Attention: + ************************************************************************* + */ +int u_1(char *tracestring) +{ + return u_v(1, tracestring); +} + +/* + ************************************************************************* + * Function:mapping rule for ue(v) syntax elements + * Input:length and info + * Output:number in the code table + * Return: + * Attention: + ************************************************************************* + */ +void linfo_ue(int len, int info, int *value1, int *dummy) +{ + *value1 = (int)pow2(2, (len / 2)) + info - 1; +} + +int u_v(int leninbits, char *tracestring) +{ + struct syntaxelement symbol, *sym = &symbol; + +#ifdef AVSP_LONG_CABAC +#else + assert(curr_stream->stream_buffer != NULL); +#endif + sym->type = SE_HEADER; + sym->mapping = linfo_ue; + sym->len = leninbits; + read_syntaxelement_flc(sym); + + return sym->inf; +} + +/* + ************************************************************************* + * Function:mapping rule for se(v) syntax elements + * Input:length and info + * Output:signed mvd + * Return: + * Attention: + ************************************************************************* + */ + +void linfo_se(int len, int info, int *value1, int *dummy) +{ + int n; + + n = (int)pow2(2, (len / 2)) + info - 1; + *value1 = (n + 1) / 2; + if ((n & 0x01) == 0) + *value1 = -*value1; + +} + +/* + ************************************************************************* + * Function:length and info + * Input: + * Output:cbp (intra) + * Return: + * Attention: + ************************************************************************* + */ + +void linfo_cbp_intra(int len, int info, int *cbp, int *dummy) +{ +} + +const int NCBP[64][2] = {{4, 0}, {16, 19}, {17, 16}, {19, 15}, {14, 18}, + {9, 11}, {22, 31}, {8, 13}, {11, 17}, {21, 30}, {10, 12}, + {7, 9}, {12, 10}, {6, 7}, {5, 8}, {1, 1}, {35, 4}, {47, 42}, { + 48, 38}, {38, 27}, {46, 39}, {36, 33}, {50, 59}, + {26, 26}, {45, 40}, {52, 58}, {41, 35}, {28, 25}, {37, 29}, {23, + 24}, {31, 28}, {2, 3}, {43, 5}, {51, 51}, {56, + 52}, {39, 37}, {55, 50}, {33, 43}, {62, 63}, { + 27, 44}, {54, 53}, {60, 62}, {40, 48}, {32, 47}, + {42, 34}, {24, 45}, {29, 49}, {3, 6}, {49, 14}, {53, 55}, {57, + 56}, {25, 36}, {58, 54}, {30, 41}, {59, 60}, { + 15, 21}, {61, 57}, {63, 61}, {44, 46}, {18, 22}, + {34, 32}, {13, 20}, {20, 23}, {0, 2} }; + +unsigned int s1, t1, value_s, value_t; +unsigned char dec_bypass, dec_final; + +#define get_byte() { \ + dbuffer = dcodestrm[(*dcodestrm_len)++];\ + dbits_to_go = 7; \ +} + +#define dbuffer (dep->dbuffer) +#define dbits_to_go (dep->dbits_to_go) +#define dcodestrm (dep->dcodestrm) +#define dcodestrm_len (dep->dcodestrm_len) + +#define B_BITS 10 + +#define LG_PMPS_SHIFTNO 2 + +#define HALF (1 << (B_BITS-1)) +#define QUARTER (1 << (B_BITS-2)) + +unsigned int biari_decode_symbol(struct decoding_environment_s *dep, + struct bi_context_type_s *bi_ct) +{ + register unsigned char bit; + register unsigned char s_flag; + register unsigned char is_lps = 0; + register unsigned char cwr; + register unsigned char cycno = bi_ct->cycno; + register unsigned int lg_pmps = bi_ct->LG_PMPS; + register unsigned int t_rlps; + register unsigned int s2, t2; + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & AEC_DUMP) + io_printf("LG_PMPS : %03X, MPS : %d, cycno : %d -- %p\n", + bi_ct->LG_PMPS, bi_ct->MPS, bi_ct->cycno, bi_ct); +#endif + + bit = bi_ct->MPS; + + cwr = (cycno <= 1) ? 3 : (cycno == 2) ? 4 : 5; + + if (t1 >= (lg_pmps >> LG_PMPS_SHIFTNO)) { + s2 = s1; + t2 = t1 - (lg_pmps >> LG_PMPS_SHIFTNO); + s_flag = 0; + } else { + s2 = s1 + 1; + t2 = 256 + t1 - (lg_pmps >> LG_PMPS_SHIFTNO); + s_flag = 1; + } + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & AEC_DUMP) + io_printf(" s2 : %d, t2 : %03X\n", s2, t2); +#endif + + if (s2 > value_s || (s2 == value_s && value_t >= t2)) { + is_lps = 1; + bit = !bit; + + t_rlps = (s_flag == 0) ? + (lg_pmps >> LG_PMPS_SHIFTNO) : + (t1 + (lg_pmps >> LG_PMPS_SHIFTNO)); + + if (s2 == value_s) + value_t = (value_t - t2); + else { + if (--dbits_to_go < 0) + get_byte(); + + value_t = (value_t << 1) + | ((dbuffer >> dbits_to_go) & 0x01); + value_t = 256 + value_t - t2; + + } + + while (t_rlps < QUARTER) { + t_rlps = t_rlps << 1; + if (--dbits_to_go < 0) + get_byte(); + + value_t = (value_t << 1) + | ((dbuffer >> dbits_to_go) & 0x01); + } + + s1 = 0; + t1 = t_rlps & 0xff; + + value_s = 0; + while (value_t < QUARTER) { + int j; + + if (--dbits_to_go < 0) + get_byte(); + j = (dbuffer >> dbits_to_go) & 0x01; + + value_t = (value_t << 1) | j; + value_s++; + } + value_t = value_t & 0xff; + } else { + + s1 = s2; + t1 = t2; + } + + if (dec_bypass) + return bit; + + if (is_lps) + cycno = (cycno <= 2) ? (cycno + 1) : 3; + else if (cycno == 0) + cycno = 1; + bi_ct->cycno = cycno; + + if (is_lps) { + switch (cwr) { + case 3: + lg_pmps = lg_pmps + 197; + break; + case 4: + lg_pmps = lg_pmps + 95; + break; + default: + lg_pmps = lg_pmps + 46; + } + + if (lg_pmps >= (256 << LG_PMPS_SHIFTNO)) { + lg_pmps = (512 << LG_PMPS_SHIFTNO) - 1 - lg_pmps; + bi_ct->MPS = !(bi_ct->MPS); + } + } else { +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & AEC_DUMP) + io_printf(" - lg_pmps_MPS : %X (%X - %X - %X)\n", + lg_pmps - (unsigned int)(lg_pmps>>cwr) + - (unsigned int)(lg_pmps>>(cwr+2)), + lg_pmps, + (unsigned int)(lg_pmps>>cwr), + (unsigned int)(lg_pmps>>(cwr+2)) + ); +#endif + lg_pmps = lg_pmps - (unsigned int)(lg_pmps >> cwr) + - (unsigned int)(lg_pmps >> (cwr + 2)); + } + + bi_ct->LG_PMPS = lg_pmps; + + return bit; +} + +unsigned int biari_decode_symbolw(struct decoding_environment_s *dep, + struct bi_context_type_s *bi_ct1, + struct bi_context_type_s *bi_ct2) +{ + register unsigned char bit1, bit2; + register unsigned char pred_mps, bit; + register unsigned int lg_pmps; + register unsigned char cwr1, cycno1 = bi_ct1->cycno; + register unsigned char cwr2, cycno2 = bi_ct2->cycno; + register unsigned int lg_pmps1 = bi_ct1->LG_PMPS; + register unsigned int lg_pmps2 = + bi_ct2->LG_PMPS; + register unsigned int t_rlps; + register unsigned char s_flag, is_lps = 0; + register unsigned int s2, t2; + + + bit1 = bi_ct1->MPS; + bit2 = bi_ct2->MPS; + + cwr1 = (cycno1 <= 1) ? 3 : (cycno1 == 2) ? 4 : 5; + cwr2 = (cycno2 <= 1) ? 3 : (cycno2 == 2) ? 4 : 5; + + if (bit1 == bit2) { + pred_mps = bit1; + lg_pmps = (lg_pmps1 + lg_pmps2) / 2; + } else { + if (lg_pmps1 < lg_pmps2) { + pred_mps = bit1; + lg_pmps = (256 << LG_PMPS_SHIFTNO) - 1 + - ((lg_pmps2 - lg_pmps1) >> 1); + } else { + pred_mps = bit2; + lg_pmps = (256 << LG_PMPS_SHIFTNO) - 1 + - ((lg_pmps1 - lg_pmps2) >> 1); + } + } + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & AEC_DUMP) + io_printf(" - Begin - LG_PMPS : %03X, MPS : %d\n", + lg_pmps, pred_mps); +#endif + if (t1 >= (lg_pmps >> LG_PMPS_SHIFTNO)) { + s2 = s1; + t2 = t1 - (lg_pmps >> LG_PMPS_SHIFTNO); + s_flag = 0; + } else { + s2 = s1 + 1; + t2 = 256 + t1 - (lg_pmps >> LG_PMPS_SHIFTNO); + s_flag = 1; + } + + bit = pred_mps; + if (s2 > value_s || (s2 == value_s && value_t >= t2)) { + is_lps = 1; + bit = !bit; + t_rlps = (s_flag == 0) ? + (lg_pmps >> LG_PMPS_SHIFTNO) : + (t1 + (lg_pmps >> LG_PMPS_SHIFTNO)); + + if (s2 == value_s) + value_t = (value_t - t2); + else { + if (--dbits_to_go < 0) + get_byte(); + + value_t = (value_t << 1) + | ((dbuffer >> dbits_to_go) & 0x01); + value_t = 256 + value_t - t2; + } + + while (t_rlps < QUARTER) { + t_rlps = t_rlps << 1; + if (--dbits_to_go < 0) + get_byte(); + + value_t = (value_t << 1) + | ((dbuffer >> dbits_to_go) & 0x01); + } + s1 = 0; + t1 = t_rlps & 0xff; + + value_s = 0; + while (value_t < QUARTER) { + int j; + + if (--dbits_to_go < 0) + get_byte(); + j = (dbuffer >> dbits_to_go) & 0x01; + + value_t = (value_t << 1) | j; + value_s++; + } + value_t = value_t & 0xff; + } else { + s1 = s2; + t1 = t2; + } + + if (bit != bit1) { + cycno1 = (cycno1 <= 2) ? (cycno1 + 1) : 3; + } else { + if (cycno1 == 0) + cycno1 = 1; + } + + if (bit != bit2) { + cycno2 = (cycno2 <= 2) ? (cycno2 + 1) : 3; + } else { + if (cycno2 == 0) + cycno2 = 1; + } + bi_ct1->cycno = cycno1; + bi_ct2->cycno = cycno2; + + { + + if (bit == bit1) { + lg_pmps1 = + lg_pmps1 + - (unsigned int)(lg_pmps1 + >> cwr1) + - (unsigned int)(lg_pmps1 + >> (cwr1 + + 2)); + } else { + switch (cwr1) { + case 3: + lg_pmps1 = lg_pmps1 + 197; + break; + case 4: + lg_pmps1 = lg_pmps1 + 95; + break; + default: + lg_pmps1 = lg_pmps1 + 46; + } + + if (lg_pmps1 >= (256 << LG_PMPS_SHIFTNO)) { + lg_pmps1 = (512 << LG_PMPS_SHIFTNO) - 1 + - lg_pmps1; + bi_ct1->MPS = !(bi_ct1->MPS); + } + } + bi_ct1->LG_PMPS = lg_pmps1; + + if (bit == bit2) { + lg_pmps2 = + lg_pmps2 + - (unsigned int)(lg_pmps2 + >> cwr2) + - (unsigned int)(lg_pmps2 + >> (cwr2 + + 2)); + } else { + switch (cwr2) { + case 3: + lg_pmps2 = lg_pmps2 + 197; + break; + case 4: + lg_pmps2 = lg_pmps2 + 95; + break; + default: + lg_pmps2 = lg_pmps2 + 46; + } + + if (lg_pmps2 >= (256 << LG_PMPS_SHIFTNO)) { + lg_pmps2 = (512 << LG_PMPS_SHIFTNO) - 1 + - lg_pmps2; + bi_ct2->MPS = !(bi_ct2->MPS); + } + } + bi_ct2->LG_PMPS = lg_pmps2; + } + + + return bit; +} + +/*! + ************************************************************************ + * \brief + * biari_decode_symbol_eq_prob(): + * \return + * the decoded symbol + ************************************************************************ + */ +unsigned int biari_decode_symbol_eq_prob(struct decoding_environment_s *dep) +{ + unsigned char bit; + struct bi_context_type_s octx; + struct bi_context_type_s *ctx = &octx; + + ctx->LG_PMPS = (QUARTER << LG_PMPS_SHIFTNO) - 1; + ctx->MPS = 0; + ctx->cycno = 0xfe; + dec_bypass = 1; + bit = biari_decode_symbol(dep, ctx); + dec_bypass = 0; + return bit; +} + +unsigned int biari_decode_final(struct decoding_environment_s *dep) +{ + unsigned char bit; + struct bi_context_type_s octx; + struct bi_context_type_s *ctx = &octx; + + ctx->LG_PMPS = 1 << LG_PMPS_SHIFTNO; + ctx->MPS = 0; + ctx->cycno = 0xff; + dec_final = 1; + bit = biari_decode_symbol(dep, ctx); + dec_final = 0; + return bit; +} + +int i_8(char *tracestring) +{ + int frame_bitoffset = curr_stream->frame_bitoffset; + unsigned char *buf = curr_stream->stream_buffer; + int bitstreamlengthinbytes = curr_stream->bitstream_length; + struct syntaxelement symbol, *sym = &symbol; +#ifdef AVSP_LONG_CABAC +#else + assert(curr_stream->stream_buffer != NULL); +#endif + + sym->len = 8; + sym->type = SE_HEADER; + sym->mapping = linfo_ue; + + if ((get_bits(buf, frame_bitoffset, &(sym->inf), bitstreamlengthinbytes, + sym->len)) < 0) + return -1; + curr_stream->frame_bitoffset += sym->len; + sym->value1 = sym->inf; + if (sym->inf & 0x80) + sym->inf = -(~((int)0xffffff00 | sym->inf) + 1); +#if TRACE + tracebits2(sym->tracestring, sym->len, sym->inf); +#endif + return sym->inf; +} + +/*! + ************************************************************************ + * \brief + * arideco_bits_read + ************************************************************************ + */ +int arideco_bits_read(struct decoding_environment_s *dep) +{ + + return 8 * ((*dcodestrm_len) - 1) + (8 - dbits_to_go); +} + +/*! + ************************************************************************ + * \brief + * arithmetic decoding + ************************************************************************ + */ +int read_syntaxelement_aec(struct syntaxelement *se, struct img_par *img, + struct datapartition *this_data_part) +{ + int curr_len; + struct decoding_environment_s *dep_dp = &(this_data_part->de_aec); + + curr_len = arideco_bits_read(dep_dp); + + se->reading(se, img, dep_dp); + + se->len = (arideco_bits_read(dep_dp) - curr_len); + return se->len; +} + +/*! + ************************************************************************ + * \brief + * This function is used to arithmetically decode the + * run length info of the skip mb + ************************************************************************ + */ +void readrunlenghtfrombuffer_aec(struct syntaxelement *se, struct img_par *img, + struct decoding_environment_s *dep_dp) +{ + struct bi_context_type_s *pctx; + int ctx, symbol; + + pctx = img->current_slice->tex_ctx->one_contexts[0]; + symbol = 0; + ctx = 0; + while (biari_decode_symbol(dep_dp, pctx + ctx) == 0) { + symbol += 1; + ctx++; + if (ctx >= 3) + ctx = 3; + } + se->value1 = symbol; +#if TRACE + fprintf(p_trace, "@%d%s\t\t\t%d\n", + symbol_count++, se->tracestring, se->value1); + fflush(p_trace); +#endif +} + +/*! + ************************************************************************ + * \brief + * This function is used to arithmetically decode a pair of + * intra prediction modes of a given MB. + ************************************************************************ + */ +int mapd_intrap[5] = {0, 2, 3, 4, 1}; +void read_intrapredmode_aec(struct syntaxelement *se, struct img_par *img, + struct decoding_environment_s *dep_dp) +{ + struct bi_context_type_s *pctx; + int ctx, symbol; + + pctx = img->current_slice->tex_ctx->one_contexts[1]; + symbol = 0; + ctx = 0; +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & AEC_DUMP) + io_printf(" -- read_intrapredmode_aec ctx : %d\n", ctx); +#endif + while (biari_decode_symbol(dep_dp, pctx + ctx) == 0) { + symbol += 1; + ctx++; + if (ctx >= 3) + ctx = 3; +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & AEC_DUMP) + io_printf(" -- read_intrapredmode_aec ctx : %d\n", ctx); +#endif + if (symbol == 4) + break; + } + se->value1 = mapd_intrap[symbol] - 1; + +#if TRACE + fprintf(p_trace, "@%d %s\t\t\t%d\n", + symbol_count++, se->tracestring, se->value1); + fflush(p_trace); +#endif +} + +/*! + ************************************************************************ + * \brief + * decoding of unary binarization using one or 2 distinct + * models for the first and all remaining bins; no terminating + * "0" for max_symbol + *********************************************************************** + */ +unsigned int unary_bin_max_decode(struct decoding_environment_s *dep_dp, + struct bi_context_type_s *ctx, + int ctx_offset, unsigned int max_symbol) +{ + unsigned int l; + unsigned int symbol; + struct bi_context_type_s *ictx; + + symbol = biari_decode_symbol(dep_dp, ctx); + + if (symbol == 0) + return 0; + + if (max_symbol == 1) + return symbol; + symbol = 0; + ictx = ctx + ctx_offset; + do { + l = biari_decode_symbol(dep_dp, ictx); + symbol++; + } while ((l != 0) && (symbol < max_symbol - 1)); + if ((l != 0) && (symbol == max_symbol - 1)) + symbol++; + return symbol; +} + +/*! + ************************************************************************ + * \brief + * decoding of unary binarization using one or 2 distinct + * models for the first and all remaining bins + *********************************************************************** + */ +unsigned int unary_bin_decode(struct decoding_environment_s *dep_dp, + struct bi_context_type_s *ctx, int ctx_offset) +{ + unsigned int l; + unsigned int symbol; + struct bi_context_type_s *ictx; + + symbol = 1 - biari_decode_symbol(dep_dp, ctx); + + if (symbol == 0) + return 0; + symbol = 0; + ictx = ctx + ctx_offset; + do { + l = 1 - biari_decode_symbol(dep_dp, ictx); + symbol++; + } while (l != 0); + return symbol; +} + +/*! + ************************************************************************ + * \brief + * This function is used to arithmetically decode the chroma + * intra prediction mode of a given MB. + ************************************************************************ + */ +void read_cipredmode_aec(struct syntaxelement *se, + struct img_par *img, + struct decoding_environment_s *dep_dp) +{ + struct texture_info_contexts *ctx = img->current_slice->tex_ctx; + struct macroblock *curr_mb = &mb_data[img->current_mb_nr]; + int act_ctx, a, b; + int act_sym = se->value1; + + if (curr_mb->mb_available_up == NULL) + b = 0; + else { + /*if ( (curr_mb->mb_available_up)->mb_type==IPCM) + * b=0; + * else + */ + b = (((curr_mb->mb_available_up)->c_ipred_mode != 0) ? 1 : 0); + } + + if (curr_mb->mb_available_left == NULL) + a = 0; + else { + /* if ( (curr_mb->mb_available_left)->mb_type==IPCM) + * a=0; + * else + */ + a = (((curr_mb->mb_available_left)->c_ipred_mode != 0) ? 1 : 0); + } + + act_ctx = a + b; + + + act_sym = biari_decode_symbol(dep_dp, ctx->cipr_contexts + act_ctx); + + if (act_sym != 0) + act_sym = unary_bin_max_decode(dep_dp, ctx->cipr_contexts + 3, + 0, 2) + 1; + + se->value1 = act_sym; + +#if TRACE + fprintf(p_trace, "@%d %s\t\t%d\n", + symbol_count++, se->tracestring, se->value1); + fflush(p_trace); +#endif + +} + +int slice_header(char *buf, int startcodepos, int length) +{ + int i; + + int weight_para_num = 0; + int mb_row; + int mb_column; + int mb_index; + int mb_width, mb_height; + + mb_column = 0; + + memcpy(curr_stream->stream_buffer, buf, length); + curr_stream->code_len = curr_stream->bitstream_length = length; + + curr_stream->read_len = + curr_stream->frame_bitoffset = (startcodepos) * 8; + slice_vertical_position = u_v(8, "slice vertical position"); + + push_es(slice_vertical_position, 8); + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & SLICE_INFO_DUMP) + io_printf(" * 8-bits slice_vertical_position : %d\n", + slice_vertical_position); +#endif + + if (vertical_size > 2800) { + slice_vertical_position_extension = u_v(3, + "slice vertical position extension"); + push_es(slice_vertical_position_extension, 3); + + } + + if (vertical_size > 2800) + mb_row = (slice_vertical_position_extension << 7) + + slice_vertical_position; + else + mb_row = slice_vertical_position; + + mb_width = (horizontal_size + 15) / 16; + if (!progressive_sequence) + mb_height = 2 * ((vertical_size + 31) / 32); + else + mb_height = (vertical_size + 15) / 16; + + + mb_index = mb_row * mb_width + mb_column; + + if (!img->picture_structure && img->type == I_IMG + && (mb_index >= mb_width * mb_height / 2)) { + second_ifield = 1; + img->type = P_IMG; + pre_img_type = P_IMG; + } + + { + if (!fixed_picture_qp) { + fixed_slice_qp = u_v(1, "fixed_slice_qp"); + push_es(fixed_slice_qp, 1); +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & SLICE_INFO_DUMP) + io_printf(" * 1-bit fixed_slice_qp : %d\n", + fixed_slice_qp); +#endif + slice_qp = u_v(6, "slice_qp"); + push_es(slice_qp, 6); +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & SLICE_INFO_DUMP) + io_printf(" * 6-bits slice_qp : %d\n", + slice_qp); +#endif + + img->qp = slice_qp; + } + + if (img->type != I_IMG) { + img->slice_weighting_flag = u_v(1, + "slice weighting flag"); + + if (img->slice_weighting_flag) { + + if (second_ifield && !img->picture_structure) + weight_para_num = 1; + else if (img->type == P_IMG + && img->picture_structure) + weight_para_num = 2; + else if (img->type == P_IMG + && !img->picture_structure) + weight_para_num = 4; + else if (img->type == B_IMG + && img->picture_structure) + weight_para_num = 2; + else if (img->type == B_IMG + && !img->picture_structure) + weight_para_num = 4; + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & SLICE_INFO_DUMP) + io_printf(" - weight_para_num : %d\n", + weight_para_num); +#endif + for (i = 0; i < weight_para_num; i++) { + img->lum_scale[i] = u_v(8, + "luma scale"); + + img->lum_shift[i] = i_8("luma shift"); + + marker_bit = u_1("insert bit"); + + + { + img->chroma_scale[i] = u_v(8, + "chroma scale"); + + img->chroma_shift[i] = i_8( + "chroma shift"); + + marker_bit = u_1("insert bit"); + + } + } + img->mb_weighting_flag = u_v(1, + "MB weighting flag"); + + } + } + } + + +#if 1 + return mb_index; +#endif +} + +void no_mem_exit(char *where) +{ + io_printf("%s\r\n", where); +} + +unsigned char bit[8] = {0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01}; + +struct inputstream_s { + /*FILE *f;*/ + unsigned char buf[SVA_STREAM_BUF_SIZE]; + unsigned int uclear_bits; + unsigned int upre_3bytes; + int ibyte_position; + int ibuf_bytesnum; + int iclear_bitsnum; + int istuff_bitsnum; + int ibits_count; +}; + +struct inputstream_s IRABS; +struct inputstream_s *p_irabs = &IRABS; + +struct stat_bits { + int curr_frame_bits; + int prev_frame_bits; + int emulate_bits; + int prev_emulate_bits; + int last_unit_bits; + int bitrate; + int total_bitrate[1000]; + int coded_pic_num; + int time_s; +}; + +struct stat_bits *stat_bits_ptr; + +unsigned char *temp_slice_buf; +int start_codeposition; +int first_slice_length; +int first_slice_startpos; + +int bitstream_buf_used; +int startcode_offset; + +int bitstream_read_ptr; + +int demulate_enable; + +int last_dquant; + +int total_mb_count; + +int current_mb_skip; + +int skip_mode_flag; + +int current_mb_intra; + +/* + ************************************************************************* + * Function: Check start code's type + * Input: + * Output: + * Return: + * Author: XZHENG, 20080515 + ************************************************************************* + */ +void check_type(int startcode) +{ + startcode = startcode & 0x000000ff; + switch (startcode) { + case 0xb0: + case 0xb2: + case 0xb5: + demulate_enable = 0; + break; + default: + demulate_enable = 1; + break; + } + +} +/* + ************************************************************************* + * Function: + * Input: + * Output: + * Return: 0 : OK + -1 : arrive at stream end + -2 : meet another start code + * Attention: + ************************************************************************* + */ +int clear_nextbyte(struct inputstream_s *p) +{ + int i, k, j; + unsigned char temp[3]; + + i = p->ibyte_position; + k = p->ibuf_bytesnum - i; + if (k < 3) { + for (j = 0; j < k; j++) + temp[j] = p->buf[i + j]; + + p->ibuf_bytesnum = read_bitstream(p->buf + k, + SVA_STREAM_BUF_SIZE - k); + bitstream_buf_used++; + if (p->ibuf_bytesnum == 0) { + if (k > 0) { + while (k > 0) { + p->upre_3bytes = ((p->upre_3bytes << 8) + | p->buf[i]) + & 0x00ffffff; + if (p->upre_3bytes < 4 + && demulate_enable) { + p->uclear_bits = + (p->uclear_bits + << 6) + | (p->buf[i] + >> 2); + p->iclear_bitsnum += 6; + stat_bits_ptr->emulate_bits + += 2; + } else { + p->uclear_bits = (p->uclear_bits + << 8) + | p->buf[i]; + p->iclear_bitsnum += 8; + } + p->ibyte_position++; + k--; + i++; + } + return 0; + } else { + return -1; + } + } else { + for (j = 0; j < k; j++) + p->buf[j] = temp[j]; + p->ibuf_bytesnum += k; + i = p->ibyte_position = 0; + } + } + if (p->buf[i] == 0 && p->buf[i + 1] == 0 && p->buf[i + 2] == 1) + return -2; + p->upre_3bytes = ((p->upre_3bytes << 8) | p->buf[i]) & 0x00ffffff; + if (p->upre_3bytes < 4 && demulate_enable) { + p->uclear_bits = (p->uclear_bits << 6) | (p->buf[i] >> 2); + p->iclear_bitsnum += 6; + stat_bits_ptr->emulate_bits += 2; + } else { + p->uclear_bits = (p->uclear_bits << 8) | p->buf[i]; + p->iclear_bitsnum += 8; + } + p->ibyte_position++; + return 0; +} + +/* + ************************************************************************* + * Function: + * Input: + * Output: + * Return: 0 : OK + -1 : arrive at stream end + -2 : meet another start code + * Attention: + ************************************************************************* + */ +int read_n_bit(struct inputstream_s *p, int n, int *v) +{ + int r; + unsigned int t; + + while (n > p->iclear_bitsnum) { + r = clear_nextbyte(p); + if (r) { + if (r == -1) { + if (p->ibuf_bytesnum - p->ibyte_position > 0) + break; + } + return r; + } + } + t = p->uclear_bits; + r = 32 - p->iclear_bitsnum; + *v = (t << r) >> (32 - n); + p->iclear_bitsnum -= n; + return 0; +} + +#ifdef AVSP_LONG_CABAC +unsigned char TMP_BUF[2 * SVA_STREAM_BUF_SIZE]; +int tmp_buf_wr_ptr; +int tmp_buf_rd_ptr; +int tmp_buf_count; +#endif +void open_irabs(struct inputstream_s *p) +{ + p->uclear_bits = 0xffffffff; + p->ibyte_position = 0; + p->ibuf_bytesnum = 0; + p->iclear_bitsnum = 0; + p->istuff_bitsnum = 0; + p->ibits_count = 0; + p->upre_3bytes = 0; + + bitstream_buf_used = 0; + bitstream_read_ptr = (src_start - 16) & 0xfffffff0; + +#ifdef AVSP_LONG_CABAC + tmp_buf_count = 0; + tmp_buf_wr_ptr = 0; + tmp_buf_rd_ptr = 0; +#endif + +} + +void move_bitstream(unsigned int move_from_addr, unsigned int move_to_addr, + int move_size) +{ + int move_bytes_left = move_size; + unsigned int move_read_addr; + unsigned int move_write_addr = move_to_addr; + + int move_byte; + unsigned int data32; + + while (move_from_addr > vld_mem_end_addr) { + move_from_addr = move_from_addr + vld_mem_start_addr + - vld_mem_end_addr - 8; + } + move_read_addr = move_from_addr; + while (move_bytes_left > 0) { + move_byte = move_bytes_left; + if (move_byte > 512) + move_byte = 512; + if ((move_read_addr + move_byte) > vld_mem_end_addr) + move_byte = (vld_mem_end_addr + 8) - move_read_addr; + + WRITE_VREG(LMEM_DMA_ADR, move_read_addr); + WRITE_VREG(LMEM_DMA_COUNT, move_byte / 2); + WRITE_VREG(LMEM_DMA_CTRL, 0xc200); + + data32 = 0x8000; + while (data32 & 0x8000) + data32 = READ_VREG(LMEM_DMA_CTRL); + + WRITE_VREG(LMEM_DMA_ADR, move_write_addr); + WRITE_VREG(LMEM_DMA_COUNT, move_byte / 2); + WRITE_VREG(LMEM_DMA_CTRL, 0x8200); + + data32 = 0x8000; + while (data32 & 0x8000) + data32 = READ_VREG(LMEM_DMA_CTRL); + + data32 = 0x0fff; + while (data32 & 0x0fff) + data32 = READ_VREG(WRRSP_LMEM); + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & STREAM_INFO_DUMP) + io_printf(" 2 MOVE %d Bytes from 0x%x to 0x%x\n", + move_byte, move_read_addr, move_write_addr); +#endif + + move_read_addr = move_read_addr + move_byte; + if (move_read_addr > vld_mem_end_addr) + move_read_addr = vld_mem_start_addr; + move_write_addr = move_write_addr + move_byte; + move_bytes_left = move_bytes_left - move_byte; + } + +} + +int read_bitstream(unsigned char *buf, int size) +{ + int i; + +#ifdef AVSP_LONG_CABAC + + unsigned int *TMP_BUF_32 = (unsigned int *)bitstream_read_tmp; + + if (tmp_buf_count < size) { + dma_sync_single_for_cpu(amports_get_dma_device(), + bitstream_read_tmp_phy, SVA_STREAM_BUF_SIZE, + DMA_FROM_DEVICE); + + move_bitstream(bitstream_read_ptr, bitstream_read_tmp_phy, + SVA_STREAM_BUF_SIZE); + + for (i = 0; i < SVA_STREAM_BUF_SIZE / 8; i++) { + TMP_BUF[tmp_buf_wr_ptr++] = + (TMP_BUF_32[2 * i + 1] >> 24) & 0xff; + if (tmp_buf_wr_ptr >= (2 * SVA_STREAM_BUF_SIZE)) + tmp_buf_wr_ptr = 0; + TMP_BUF[tmp_buf_wr_ptr++] = + (TMP_BUF_32[2 * i + 1] >> 16) & 0xff; + if (tmp_buf_wr_ptr >= (2 * SVA_STREAM_BUF_SIZE)) + tmp_buf_wr_ptr = 0; + TMP_BUF[tmp_buf_wr_ptr++] = (TMP_BUF_32[2 * i + 1] >> 8) + & 0xff; + if (tmp_buf_wr_ptr >= (2 * SVA_STREAM_BUF_SIZE)) + tmp_buf_wr_ptr = 0; + TMP_BUF[tmp_buf_wr_ptr++] = (TMP_BUF_32[2 * i + 1] >> 0) + & 0xff; + if (tmp_buf_wr_ptr >= (2 * SVA_STREAM_BUF_SIZE)) + tmp_buf_wr_ptr = 0; + TMP_BUF[tmp_buf_wr_ptr++] = + (TMP_BUF_32[2 * i + 0] >> 24) & 0xff; + if (tmp_buf_wr_ptr >= (2 * SVA_STREAM_BUF_SIZE)) + tmp_buf_wr_ptr = 0; + TMP_BUF[tmp_buf_wr_ptr++] = + (TMP_BUF_32[2 * i + 0] >> 16) & 0xff; + if (tmp_buf_wr_ptr >= (2 * SVA_STREAM_BUF_SIZE)) + tmp_buf_wr_ptr = 0; + TMP_BUF[tmp_buf_wr_ptr++] = (TMP_BUF_32[2 * i + 0] >> 8) + & 0xff; + if (tmp_buf_wr_ptr >= (2 * SVA_STREAM_BUF_SIZE)) + tmp_buf_wr_ptr = 0; + TMP_BUF[tmp_buf_wr_ptr++] = (TMP_BUF_32[2 * i + 0] >> 0) + & 0xff; + if (tmp_buf_wr_ptr >= (2 * SVA_STREAM_BUF_SIZE)) + tmp_buf_wr_ptr = 0; + } + tmp_buf_count = tmp_buf_count + SVA_STREAM_BUF_SIZE; + bitstream_read_ptr = bitstream_read_ptr + SVA_STREAM_BUF_SIZE; + } + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & STREAM_INFO_DUMP) + io_printf(" Read %d bytes from %d, size left : %d\n", + size, tmp_buf_rd_ptr, tmp_buf_count); +#endif + for (i = 0; i < size; i++) { + buf[i] = TMP_BUF[tmp_buf_rd_ptr++]; + if (tmp_buf_rd_ptr >= (2 * SVA_STREAM_BUF_SIZE)) + tmp_buf_rd_ptr = 0; + } + tmp_buf_count = tmp_buf_count - size; + +#else + for (i = 0; i < size; i++) + buf[i] = tmp_stream[bitstream_read_ptr + i]; + bitstream_read_ptr = bitstream_read_ptr + size; +#endif + + return size; +} + +int next_startcode(struct inputstream_s *p) +{ + int i, m; + unsigned char a = 0, b = 0; + + m = 0; + + while (1) { + if (p->ibyte_position >= p->ibuf_bytesnum - 2) { + m = p->ibuf_bytesnum - p->ibyte_position; + if (m < 0) + return -2; + if (m == 1) + b = p->buf[p->ibyte_position + 1]; + if (m == 2) { + b = p->buf[p->ibyte_position + 1]; + a = p->buf[p->ibyte_position]; + } + p->ibuf_bytesnum = read_bitstream(p->buf, + SVA_STREAM_BUF_SIZE); + p->ibyte_position = 0; + bitstream_buf_used++; + } + + if (p->ibuf_bytesnum + m < 3) + return -1; + + if (m == 1 && b == 0 && p->buf[0] == 0 && p->buf[1] == 1) { + p->ibyte_position = 2; + p->iclear_bitsnum = 0; + p->istuff_bitsnum = 0; + p->ibits_count += 24; + p->upre_3bytes = 1; + return 0; + } + + if (m == 2 && b == 0 && a == 0 && p->buf[0] == 1) { + p->ibyte_position = 1; + p->iclear_bitsnum = 0; + p->istuff_bitsnum = 0; + p->ibits_count += 24; + p->upre_3bytes = 1; + return 0; + } + + if (m == 2 && b == 0 && p->buf[0] == 0 && p->buf[1] == 1) { + p->ibyte_position = 2; + p->iclear_bitsnum = 0; + p->istuff_bitsnum = 0; + p->ibits_count += 24; + p->upre_3bytes = 1; + return 0; + } + + for (i = p->ibyte_position; i < p->ibuf_bytesnum - 2; i++) { + if (p->buf[i] == 0 && p->buf[i + 1] == 0 + && p->buf[i + 2] == 1) { + p->ibyte_position = i + 3; + p->iclear_bitsnum = 0; + p->istuff_bitsnum = 0; + p->ibits_count += 24; + p->upre_3bytes = 1; + return 0; + } + p->ibits_count += 8; + } + p->ibyte_position = i; + } +} + +int get_oneunit(char *buf, int *startcodepos, int *length) +{ + int i, j, k; + + i = next_startcode(p_irabs); + + if (i != 0) { + if (i == -1) + io_printf( + "\narrive at stream end and start code is not found!"); + if (i == -2) + io_printf("\np->ibyte_position error!"); + + } + startcode_offset = + p_irabs->ibyte_position + - 3 + (bitstream_buf_used-1) + * SVA_STREAM_BUF_SIZE; + buf[0] = 0; + buf[1] = 0; + buf[2] = 1; + *startcodepos = 3; + i = read_n_bit(p_irabs, 8, &j); + buf[3] = (char)j; + + check_type(buf[3]); + if (buf[3] == SEQUENCE_END_CODE) { + *length = 4; + return -1; + } + k = 4; + while (1) { + i = read_n_bit(p_irabs, 8, &j); + if (i < 0) + break; + buf[k++] = (char)j; + if (k >= (MAX_CODED_FRAME_SIZE - 1)) + break; + } + if (p_irabs->iclear_bitsnum > 0) { + int shift; + + shift = 8 - p_irabs->iclear_bitsnum; + i = read_n_bit(p_irabs, p_irabs->iclear_bitsnum, &j); + + if (j != 0) + buf[k++] = (char)(j << shift); + stat_bits_ptr->last_unit_bits += shift; + } + *length = k; + return k; +} + +/*unsigned char tmp_buf[MAX_CODED_FRAME_SIZE] __attribute__ ((aligned(64)));*/ +/*unsigned char tmp_buf[MAX_CODED_FRAME_SIZE] __aligned(64);*/ +int header(void) +{ + unsigned char *buf; + int startcodepos, length; + + unsigned char *tmp_buf; + + tmp_buf = (unsigned char *)avsp_heap_adr; + + buf = &tmp_buf[0]; + while (1) { + start_codeposition = get_oneunit(buf, &startcodepos, &length); + + switch (buf[startcodepos]) { + case SEQUENCE_HEADER_CODE: + io_printf( + "# SEQUENCE_HEADER_CODE (0x%02x) found at offset %d (0x%x)\n", + buf[startcodepos], startcode_offset, + startcode_offset); + break; + case EXTENSION_START_CODE: + io_printf( + "# EXTENSION_START_CODE (0x%02x) found at offset %d (0x%x)\n", + buf[startcodepos], startcode_offset, + startcode_offset); + break; + case USER_DATA_START_CODE: + io_printf( + "# USER_DATA_START_CODE (0x%02x) found at offset %d (0x%x)\n", + buf[startcodepos], startcode_offset, + startcode_offset); + break; + case VIDEO_EDIT_CODE: + io_printf( + "# VIDEO_EDIT_CODE (0x%02x) found at offset %d (0x%x)\n", + buf[startcodepos], startcode_offset, + startcode_offset); + break; + case I_PICTURE_START_CODE: + io_printf( + "# I_PICTURE_START_CODE (0x%02x) found at offset %d (0x%x)\n", + buf[startcodepos], startcode_offset, + startcode_offset); + break; + case PB_PICTURE_START_CODE: + io_printf( + "# PB_PICTURE_START_CODE (0x%02x) found at offset %d (0x%x)\n", + buf[startcodepos], startcode_offset, + startcode_offset); + break; + case SEQUENCE_END_CODE: + io_printf( + "# SEQUENCE_END_CODE (0x%02x) found at offset %d (0x%x)\n", + buf[startcodepos], startcode_offset, + startcode_offset); + break; + default: + io_printf( + "# SLICE_START_CODE (0x%02x) found at offset %d (0x%x)\n", + buf[startcodepos], startcode_offset, + startcode_offset); +#if 0 + io_printf("VLD_MEM_VIFIFO_START_PTR %x\r\n", + READ_VREG(VLD_MEM_VIFIFO_START_PTR)); + io_printf("VLD_MEM_VIFIFO_CURR_PTR %x\r\n", + READ_VREG(VLD_MEM_VIFIFO_CURR_PTR)); + io_printf("VLD_MEM_VIFIFO_END_PTR %x\r\n", + READ_VREG(VLD_MEM_VIFIFO_END_PTR)); + io_printf("VLD_MEM_VIFIFO_WP %x\r\n" + READ_VREG(VLD_MEM_VIFIFO_WP)); + io_printf("VLD_MEM_VIFIFO_RP %x\r\n", + READ_VREG(VLD_MEM_VIFIFO_RP)); + io_printf("VLD_MEM_VBUF_RD_PTR %x\r\n" + READ_VREG(VLD_MEM_VBUF_RD_PTR)); + io_printf("VLD_MEM_VIFIFO_BUF_CNTL %x\r\n", + READ_VREG(VLD_MEM_VIFIFO_BUF_CNTL)); + io_printf("PARSER_VIDEO_HOLE %x\r\n", + READ_MPEG_REG(PARSER_VIDEO_HOLE)); +#endif + if ((buf[startcodepos] >= SLICE_START_CODE_MIN + && buf[startcodepos] + <= SLICE_START_CODE_MAX) + && ((!img->seq_header_indicate) + || (img->type == B_IMG + && img->b_discard_flag + == 1 + && !img->no_forward_reference))) { + break; + } else if (buf[startcodepos] >= SLICE_START_CODE_MIN) { + + first_slice_length = length; + first_slice_startpos = startcodepos; + + temp_slice_buf = &tmp_buf[0]; + return SOP; + } else { + io_printf("Can't find start code"); + return -EOS; + } + } + } + +} + +/* + ************************************************************************* + * Function:Allocates a Bitstream + * Input: + * Output:allocated Bitstream point + * Return: + * Attention: + ************************************************************************* + */ +struct bitstream_s *alloc_bitstream(void) +{ + struct bitstream_s *bitstream; + + bitstream = (struct bitstream_s *)local_alloc(1, + sizeof(struct bitstream_s)); + if (bitstream == NULL) { + io_printf( + "AllocBitstream: Memory allocation for Bitstream failed"); + return NULL; + } + bitstream->stream_buffer = (unsigned char *)local_alloc( + MAX_CODED_FRAME_SIZE, + sizeof(unsigned char)); + if (bitstream->stream_buffer == NULL) { + io_printf( + "AllocBitstream: Memory allocation for streamBuffer failed"); + return NULL; + } + + return bitstream; +} + +void biari_init_context_logac(struct bi_context_type_s *ctx) +{ + ctx->LG_PMPS = (QUARTER << LG_PMPS_SHIFTNO) - 1; + ctx->MPS = 0; + ctx->cycno = 0; +} + +#define BIARI_CTX_INIT1_LOG(jj, ctx)\ +{\ + for (j = 0; j < jj; j++)\ + biari_init_context_logac(&(ctx[j]));\ +} + +#define BIARI_CTX_INIT2_LOG(ii, jj, ctx)\ +{\ + for (i = 0; i < ii; i++)\ + for (j = 0; j < jj; j++)\ + biari_init_context_logac(&(ctx[i][j]));\ +} + +#define BIARI_CTX_INIT3_LOG(ii, jj, kk, ctx)\ +{\ + for (i = 0; i < ii; i++)\ + for (j = 0; j < jj; j++)\ + for (k = 0; k < kk; k++)\ + biari_init_context_logac(&(ctx[i][j][k]));\ +} + +#define BIARI_CTX_INIT4_LOG(ii, jj, kk, ll, ctx)\ +{\ + for (i = 0; i < ii; i++)\ + for (j = 0; j < jj; j++)\ + for (k = 0; k < kk; k++)\ + for (l = 0; l < ll; l++)\ + biari_init_context_logac\ + (&(ctx[i][j][k][l]));\ +} + +void init_contexts(struct img_par *img) +{ + struct motion_info_contexts_s *mc = img->current_slice->mot_ctx; + struct texture_info_contexts *tc = img->current_slice->tex_ctx; + int i, j; + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & SLICE_INFO_DUMP) + io_printf(" ---- init_contexts ----\n"); +#endif + + BIARI_CTX_INIT2_LOG(3, NUM_MB_TYPE_CTX, mc->mb_type_contexts); + BIARI_CTX_INIT2_LOG(2, NUM_B8_TYPE_CTX, mc->b8_type_contexts); + BIARI_CTX_INIT2_LOG(2, NUM_MV_RES_CTX, mc->mv_res_contexts); + BIARI_CTX_INIT2_LOG(2, NUM_REF_NO_CTX, mc->ref_no_contexts); + BIARI_CTX_INIT1_LOG(NUM_DELTA_QP_CTX, mc->delta_qp_contexts); + BIARI_CTX_INIT1_LOG(NUM_MB_AFF_CTX, mc->mb_aff_contexts); + + BIARI_CTX_INIT1_LOG(NUM_IPR_CTX, tc->ipr_contexts); + BIARI_CTX_INIT1_LOG(NUM_CIPR_CTX, tc->cipr_contexts); + BIARI_CTX_INIT2_LOG(3, NUM_CBP_CTX, tc->cbp_contexts); + BIARI_CTX_INIT2_LOG(NUM_BLOCK_TYPES, NUM_BCBP_CTX, tc->bcbp_contexts); + BIARI_CTX_INIT2_LOG(NUM_BLOCK_TYPES, NUM_ONE_CTX, tc->one_contexts); + BIARI_CTX_INIT2_LOG(NUM_BLOCK_TYPES, NUM_ABS_CTX, tc->abs_contexts); + BIARI_CTX_INIT2_LOG(NUM_BLOCK_TYPES, NUM_MAP_CTX, tc->fld_map_contexts); + BIARI_CTX_INIT2_LOG(NUM_BLOCK_TYPES, NUM_LAST_CTX, + tc->fld_last_contexts); + BIARI_CTX_INIT2_LOG(NUM_BLOCK_TYPES, NUM_MAP_CTX, tc->map_contexts); + BIARI_CTX_INIT2_LOG(NUM_BLOCK_TYPES, NUM_LAST_CTX, tc->last_contexts); +#ifdef TEST_WEIGHTING_AEC + biari_init_context_logac(&mc->mb_weighting_pred); +#endif +} + +/*! + ************************************************************************ + * \brief + * Allocation of contexts models for the motion info + * used for arithmetic decoding + * + ************************************************************************ + */ +struct motion_info_contexts_s *create_contexts_motioninfo(void) +{ + struct motion_info_contexts_s *deco_ctx; + + deco_ctx = (struct motion_info_contexts_s *)local_alloc(1, + sizeof(struct motion_info_contexts_s)); + if (deco_ctx == NULL) + no_mem_exit("create_contexts_motioninfo: deco_ctx"); + + return deco_ctx; +} + +/*! + ************************************************************************ + * \brief + * Allocates of contexts models for the texture info + * used for arithmetic decoding + ************************************************************************ + */ +struct texture_info_contexts *create_contexts_textureinfo(void) +{ + struct texture_info_contexts *deco_ctx; + + deco_ctx = (struct texture_info_contexts *)local_alloc(1, + sizeof(struct texture_info_contexts)); + if (deco_ctx == NULL) + no_mem_exit("create_contexts_textureinfo: deco_ctx"); + + return deco_ctx; +} + +struct datapartition *alloc_partition(int n) +{ + struct datapartition *part_arr, *datapart; + int i; + + part_arr = + (struct datapartition *)local_alloc(n, sizeof(struct datapartition)); + if (part_arr == NULL) { + no_mem_exit( + "alloc_partition: Memory allocation for Data Partition failed"); + return NULL; + } + +#if LIWR_FIX + part_arr[0].bitstream = NULL; +#else + for (i = 0; i < n; i++) { + datapart = &(part_arr[i]); + datapart->bitstream = (struct bitstream_s *)local_alloc(1, + sizeof(struct bitstream_s)); + if (datapart->bitstream == NULL) { + no_mem_exit( + "alloc_partition: Memory allocation for Bitstream failed"); + return NULL; + } + } +#endif + return part_arr; +} + +int malloc_slice(struct img_par *img) +{ + struct slice_s *currslice; + + img->current_slice = + (struct slice_s *)local_alloc(1, sizeof(struct slice_s)); + currslice = img->current_slice; + if (currslice == NULL) { + no_mem_exit( + "Memory allocation for struct slice_s datastruct Failed" + ); + return 0; + } + if (1) { + + currslice->mot_ctx = create_contexts_motioninfo(); + if (currslice->mot_ctx == NULL) + return 0; + + currslice->tex_ctx = create_contexts_textureinfo(); + if (currslice->tex_ctx == NULL) + return 0; + } +#if LIWR_FIX + currslice->max_part_nr = 1; +#else + currslice->max_part_nr = 3; +#endif + currslice->part_arr = alloc_partition(currslice->max_part_nr); + if (currslice->part_arr == NULL) + return 0; + return 1; +} + +void init(struct img_par *img) +{ + int i; + + for (i = 0; i < 256; i++) + img->quad[i] = i * i; +} + +/* + ************************************************************************* + * Function:Allocate 2D memory array -> int array2D[rows][columns] + * Input: + * Output: memory size in bytes + * Return: + * Attention: + ************************************************************************* + */ + +int get_mem2Dint(int ***array2D, int rows, int columns) +{ + int i; + + *array2D = (int **)local_alloc(rows, sizeof(int *)); + if (*array2D == NULL) { + no_mem_exit("get_mem2Dint: array2D"); + return -1; + } + (*array2D)[0] = (int *)local_alloc(rows * columns, sizeof(int)); + if ((*array2D)[0] == NULL) { + no_mem_exit("get_mem2Dint: array2D"); + return -1; + } + + for (i = 1; i < rows; i++) + (*array2D)[i] = (*array2D)[i - 1] + columns; + + return rows * columns * sizeof(int); +} + +int initial_decode(void) +{ + int i, j; + int ret; + int img_height = (vertical_size + img->auto_crop_bottom); + int memory_size = 0; + + ret = malloc_slice(img); + if (ret == 0) + return 0; + + mb_data = (struct macroblock *)local_alloc( + (img->width / MB_BLOCK_SIZE) + * (img_height /*vertical_size*/ + / MB_BLOCK_SIZE), sizeof(struct macroblock)); + if (mb_data == NULL) { + no_mem_exit("init_global_buffers: mb_data"); + return 0; + } + + if (progressive_sequence) { + int size; + size = get_mem2Dint(&(img->ipredmode), + img->width / B8_SIZE * 2 + 4, + vertical_size / B8_SIZE * 2 + 4); + if (size == -1) + return 0; + + memory_size += size; + } else { + int size; + size = get_mem2Dint(&(img->ipredmode), + img->width / B8_SIZE * 2 + 4, + (vertical_size + 32) / (2 * B8_SIZE) * 4 + 4); + if (size == -1) + return 0; + + memory_size += size; + } + + for (i = 0; i < img->width / (B8_SIZE) * 2 + 4; i++) { + for (j = 0; j < img->height / (B8_SIZE) * 2 + 4; j++) + img->ipredmode[i][j] = -1; + } + + init(img); + img->number = 0; + img->type = I_IMG; + img->imgtr_last_p = 0; + img->imgtr_next_p = 0; + + img->new_seq_header_flag = 1; + img->new_sequence_flag = 1; + + return 1; +} + +void aec_new_slice(void) +{ + last_dquant = 0; +} + +/*! + ************************************************************************ + * \brief + * Initializes the DecodingEnvironment for the arithmetic coder + ************************************************************************ + */ + +void arideco_start_decoding(struct decoding_environment_s *dep, + unsigned char *cpixcode, + int firstbyte, int *cpixcode_len, int slice_type) +{ + + dcodestrm = cpixcode; + dcodestrm_len = cpixcode_len; + *dcodestrm_len = firstbyte; + + s1 = 0; + t1 = QUARTER - 1; + value_s = 0; + + value_t = 0; + + { + int i; + + dbits_to_go = 0; + for (i = 0; i < B_BITS - 1; i++) { + if (--dbits_to_go < 0) + get_byte(); + + value_t = (value_t << 1) + | ((dbuffer >> dbits_to_go) & 0x01); + } + } + + while (value_t < QUARTER) { + if (--dbits_to_go < 0) + get_byte(); + + value_t = (value_t << 1) | ((dbuffer >> dbits_to_go) & 0x01); + value_s++; + } + value_t = value_t & 0xff; + + dec_final = dec_bypass = 0; + + + +} + +/* + ************************************************************************* + * Function:Checks the availability of neighboring macroblocks of + the current macroblock for prediction and context determination; + marks the unavailable MBs for intra prediction in the + ipredmode-array by -1. Only neighboring MBs in the causal + past of the current MB are checked. + * Input: + * Output: + * Return: + * Attention: + ************************************************************************* + */ + +void checkavailabilityofneighbors(struct img_par *img) +{ + int i, j; + const int mb_width = img->width / MB_BLOCK_SIZE; + const int mb_nr = img->current_mb_nr; + struct macroblock *curr_mb = &mb_data[mb_nr]; + int check_value; + int remove_prediction; + + curr_mb->mb_available_up = NULL; + curr_mb->mb_available_left = NULL; + + for (i = 0; i < 3; i++) + for (j = 0; j < 3; j++) + mb_data[mb_nr].mb_available[i][j] = NULL; + + mb_data[mb_nr].mb_available[1][1] = curr_mb; + + if (img->pix_x >= MB_BLOCK_SIZE) { + remove_prediction = curr_mb->slice_nr + != mb_data[mb_nr - 1].slice_nr; + + if (remove_prediction) + + { + + img->ipredmode[(img->block_x + 1) * 2 - 1][(img->block_y + + 1) * 2] = -1; + img->ipredmode[(img->block_x + 1) * 2 - 1][(img->block_y + + 1) * 2 + 1] = -1; + img->ipredmode[(img->block_x + 1) * 2 - 1][(img->block_y + + 2) * 2] = -1; + img->ipredmode[(img->block_x + 1) * 2 - 1][(img->block_y + + 2) * 2 + 1] = -1; + } + if (!remove_prediction) + curr_mb->mb_available[1][0] = &(mb_data[mb_nr - 1]); + + } + + check_value = (img->pix_y >= MB_BLOCK_SIZE); + if (check_value) { + remove_prediction = curr_mb->slice_nr + != mb_data[mb_nr - mb_width].slice_nr; + + if (remove_prediction) { + img->ipredmode + [(img->block_x + 1) * 2][(img->block_y + 1) + * 2 - 1] = -1; + img->ipredmode[(img->block_x + 1) * 2 + 1][(img->block_y + + 1) * 2 - 1] = -1; + img->ipredmode[(img->block_x + 1) * 2 + 2][(img->block_y + + 1) * 2 - 1] = -1; + img->ipredmode[(img->block_x + 1) * 2 + 3][(img->block_y + + 1) * 2 - 1] = -1; + } + + if (!remove_prediction) { + curr_mb->mb_available[0][1] = + &(mb_data[mb_nr - mb_width]); + } + } + + if (img->pix_y >= MB_BLOCK_SIZE && img->pix_x >= MB_BLOCK_SIZE) { + remove_prediction = curr_mb->slice_nr + != mb_data[mb_nr - mb_width - 1].slice_nr; + + if (remove_prediction) { + img->ipredmode[img->block_x * 2 + 1][img->block_y * 2 + + 1] = -1; + } + if (!remove_prediction) { + curr_mb->mb_available[0][0] = &(mb_data[mb_nr - mb_width + - 1]); + } + } + + if (img->pix_y >= MB_BLOCK_SIZE + && img->pix_x < (img->width - MB_BLOCK_SIZE)) { + if (curr_mb->slice_nr == mb_data[mb_nr - mb_width + 1].slice_nr) + curr_mb->mb_available[0][2] = &(mb_data[mb_nr - mb_width + + 1]); + } + + if (1) { + curr_mb->mbaddr_a = mb_nr - 1; + curr_mb->mbaddr_b = mb_nr - img->pic_width_inmbs; + curr_mb->mbaddr_c = mb_nr - img->pic_width_inmbs + 1; + curr_mb->mbaddr_d = mb_nr - img->pic_width_inmbs - 1; + + curr_mb->mbavail_a = + (curr_mb->mb_available[1][0] != NULL) ? 1 : 0; + curr_mb->mbavail_b = + (curr_mb->mb_available[0][1] != NULL) ? 1 : 0; + curr_mb->mbavail_c = + (curr_mb->mb_available[0][2] != NULL) ? 1 : 0; + curr_mb->mbavail_d = + (curr_mb->mb_available[0][0] != NULL) ? 1 : 0; + + } + +} + +void checkavailabilityofneighborsaec(void) +{ + + int i, j; + const int mb_width = img->width / MB_BLOCK_SIZE; + const int mb_nr = img->current_mb_nr; + struct macroblock *curr_mb = &(mb_data[mb_nr]); + int check_value; + + for (i = 0; i < 3; i++) + for (j = 0; j < 3; j++) + mb_data[mb_nr].mb_available[i][j] = NULL; + mb_data[mb_nr].mb_available[1][1] = &(mb_data[mb_nr]); + + if (img->pix_x >= MB_BLOCK_SIZE) { + int remove_prediction = curr_mb->slice_nr + != mb_data[mb_nr - 1].slice_nr; + if (!remove_prediction) + curr_mb->mb_available[1][0] = &(mb_data[mb_nr - 1]); + } + + check_value = (img->pix_y >= MB_BLOCK_SIZE); + if (check_value) { + int remove_prediction = curr_mb->slice_nr + != mb_data[mb_nr - mb_width].slice_nr; + + if (!remove_prediction) { + curr_mb->mb_available[0][1] = + &(mb_data[mb_nr - mb_width]); + } + } + + if (img->pix_y >= MB_BLOCK_SIZE && img->pix_x >= MB_BLOCK_SIZE) { + int remove_prediction = curr_mb->slice_nr + != mb_data[mb_nr - mb_width - 1].slice_nr; + if (!remove_prediction) { + curr_mb->mb_available[0][0] = &(mb_data[mb_nr - mb_width + - 1]); + } + } + + if (img->pix_y >= MB_BLOCK_SIZE + && img->pix_x < (img->width - MB_BLOCK_SIZE)) { + if (curr_mb->slice_nr == mb_data[mb_nr - mb_width + 1].slice_nr) + curr_mb->mb_available[0][2] = &(mb_data[mb_nr - mb_width + + 1]); + } + curr_mb->mb_available_left = curr_mb->mb_available[1][0]; + curr_mb->mb_available_up = curr_mb->mb_available[0][1]; + curr_mb->mbaddr_a = mb_nr - 1; + curr_mb->mbaddr_b = mb_nr - img->pic_width_inmbs; + curr_mb->mbaddr_c = mb_nr - img->pic_width_inmbs + 1; + curr_mb->mbaddr_d = mb_nr - img->pic_width_inmbs - 1; + + curr_mb->mbavail_a = (curr_mb->mb_available[1][0] != NULL) ? 1 : 0; + curr_mb->mbavail_b = (curr_mb->mb_available[0][1] != NULL) ? 1 : 0; + curr_mb->mbavail_c = (curr_mb->mb_available[0][2] != NULL) ? 1 : 0; + curr_mb->mbavail_d = (curr_mb->mb_available[0][0] != NULL) ? 1 : 0; +} + +/* + ************************************************************************* + * Function:initializes the current macroblock + * Input: + * Output: + * Return: + * Attention: + ************************************************************************* + */ + +void start_macroblock(struct img_par *img) +{ + int i, j, k, l; + struct macroblock *curr_mb; + +#ifdef AVSP_LONG_CABAC +#else + +#endif + + curr_mb = &mb_data[img->current_mb_nr]; + + /* Update coordinates of the current macroblock */ + img->mb_x = (img->current_mb_nr) % (img->width / MB_BLOCK_SIZE); + img->mb_y = (img->current_mb_nr) / (img->width / MB_BLOCK_SIZE); + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & MB_NUM_DUMP) + io_printf(" #Begin MB : %d, (%x, %x) es_ptr %d\n", + img->current_mb_nr, img->mb_x, img->mb_y, es_ptr); +#endif + + + total_mb_count = total_mb_count + 1; + + /* Define vertical positions */ + img->block_y = img->mb_y * BLOCK_SIZE / 2; /* luma block position */ + img->block8_y = img->mb_y * BLOCK_SIZE / 2; + img->pix_y = img->mb_y * MB_BLOCK_SIZE; /* luma macroblock position */ + if (chroma_format == 2) + img->pix_c_y = img->mb_y * + MB_BLOCK_SIZE; /* chroma macroblock position */ + else + img->pix_c_y = img->mb_y * + MB_BLOCK_SIZE / 2; /* chroma macroblock position */ + + /* Define horizontal positions */ + img->block_x = img->mb_x * BLOCK_SIZE / 2; /* luma block position */ + img->block8_x = img->mb_x * BLOCK_SIZE / 2; + img->pix_x = img->mb_x * MB_BLOCK_SIZE; /* luma pixel position */ + img->pix_c_x = img->mb_x * + MB_BLOCK_SIZE / 2; /* chroma pixel position */ + + checkavailabilityofneighbors(img); + + /*<!*******EDIT START BY lzhang ******************/ + + if (1) + checkavailabilityofneighborsaec(); + /*<!*******EDIT end BY lzhang ******************/ + + curr_mb->qp = img->qp; + curr_mb->mb_type = 0; + curr_mb->delta_quant = 0; + curr_mb->cbp = 0; + curr_mb->cbp_blk = 0; + curr_mb->c_ipred_mode = DC_PRED_8; + curr_mb->c_ipred_mode_2 = DC_PRED_8; + + for (l = 0; l < 2; l++) + for (j = 0; j < BLOCK_MULTIPLE; j++) + for (i = 0; i < BLOCK_MULTIPLE; i++) + for (k = 0; k < 2; k++) + curr_mb->mvd[l][j][i][k] = 0; + + curr_mb->cbp_bits = 0; + + for (j = 0; j < MB_BLOCK_SIZE; j++) + for (i = 0; i < MB_BLOCK_SIZE; i++) + img->m7[i][j] = 0; + + for (j = 0; j < 2 * BLOCK_SIZE; j++) + for (i = 0; i < 2 * BLOCK_SIZE; i++) { + img->m8[0][i][j] = 0; + img->m8[1][i][j] = 0; + img->m8[2][i][j] = 0; + img->m8[3][i][j] = 0; + } + + curr_mb->lf_disable = 1; + + img->weighting_prediction = 0; +} + +/* + ************************************************************************* + * Function:init macroblock I and P frames + * Input: + * Output: + * Return: + * Attention: + ************************************************************************* + */ + +void init_macroblock(struct img_par *img) +{ + int i, j; + + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + img->ipredmode[img->block_x * 2 + i + 2][img->block_y + * 2 + j + 2] = -1; + } + } + +} + +/* + ************************************************************************* + * Function:Interpret the mb mode for I-Frames + * Input: + * Output: + * Return: + * Attention: + ************************************************************************* + */ + +void interpret_mb_mode_i(struct img_par *img) +{ + int i; + + struct macroblock *curr_mb = &mb_data[img->current_mb_nr]; + int num = 4; + + curr_mb->mb_type = I8MB; + + + current_mb_intra = 1; + + for (i = 0; i < 4; i++) { + curr_mb->b8mode[i] = IBLOCK; + curr_mb->b8pdir[i] = -1; + } + + for (i = num; i < 4; i++) { + curr_mb->b8mode[i] = + curr_mb->mb_type_2 == P8x8 ? + 4 : curr_mb->mb_type_2; + curr_mb->b8pdir[i] = 0; + } +} + +const int pred_4x4[9][9] = {{0, 0, 0, 0, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1, 1, + 1, 1}, {0, 1, 2, 3, 4, 5, 6, 7, 8}, {0, 0, 0, 3, 3, 3, 3, 3, 3}, + {0, 1, 4, 4, 4, 4, 4, 4, 4}, {0, 1, 5, 5, 5, 5, 5, 5, 5}, {0, 0, + 0, 0, 0, 0, 6, 0, 0}, + {0, 1, 7, 7, 7, 7, 7, 7, 7}, {0, 0, 0, 0, 4, 5, 6, 7, 8} + +}; + +const int pred_4x4to8x8[9] = {0, 1, 2, 3, 4, 1, 0, 1, 0 + +}; + +const int pred_8x8to4x4[5] = {0, 1, 2, 3, 4}; + +void read_ipred_block_modes(struct img_par *img, int b8) +{ + int bi, bj, dec; + struct syntaxelement curr_se; + struct macroblock *curr_mb; + int j2; + int mostprobableintrapredmode; + int upintrapredmode; + int uprightintrapredmode; + int leftintrapredmode; + int leftdownintrapredmode; + int intrachromapredmodeflag; + + struct slice_s *currslice = img->current_slice; + struct datapartition *dp; + + curr_mb = mb_data + img->current_mb_nr; + intrachromapredmodeflag = IS_INTRA(curr_mb); + + curr_se.type = SE_INTRAPREDMODE; +#if TRACE + strncpy(curr_se.tracestring, "Ipred Mode", TRACESTRING_SIZE); +#endif + + if (b8 < 4) { + if (curr_mb->b8mode[b8] == IBLOCK) { + intrachromapredmodeflag = 1; + + if (1) { + dp = &(currslice->part_arr[0]); + curr_se.reading = read_intrapredmode_aec; + dp->read_syntax_element(&curr_se, img, dp); + + if (curr_se.value1 == -1) + push_es(1, 1); + else + push_es(curr_se.value1, 3); + + + } + bi = img->block_x + (b8 & 1); + bj = img->block_y + (b8 / 2); + + upintrapredmode = img->ipredmode[(bi + 1) * 2][(bj) * 2 + + 1]; + uprightintrapredmode = + img->ipredmode[(bi + 1) * 2 + 1][(bj) + * 2 + 1]; + leftintrapredmode = + img->ipredmode[(bi) * 2 + 1][(bj + 1) + * 2]; + leftdownintrapredmode = img->ipredmode[(bi) * 2 + 1][(bj + + 1) * 2 + 1]; + + if ((upintrapredmode < 0) || (leftintrapredmode < 0)) { + mostprobableintrapredmode = DC_PRED; + } else if ((upintrapredmode < NO_INTRA_PMODE) + && (leftintrapredmode < + NO_INTRA_PMODE)) { + mostprobableintrapredmode = + upintrapredmode + < leftintrapredmode ? + upintrapredmode : + leftintrapredmode; + } else if (upintrapredmode < NO_INTRA_PMODE) { + mostprobableintrapredmode = upintrapredmode; + } else if (leftintrapredmode < NO_INTRA_PMODE) { + mostprobableintrapredmode = leftintrapredmode; + } else { + mostprobableintrapredmode = + pred_4x4[leftintrapredmode + - INTRA_PMODE_4x4][upintrapredmode + - INTRA_PMODE_4x4]; + mostprobableintrapredmode = + pred_4x4to8x8[mostprobableintrapredmode]; + } + + + + dec = + (curr_se.value1 == -1) ? + mostprobableintrapredmode : + curr_se.value1 + + (curr_se.value1 + >= mostprobableintrapredmode); + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & MB_INFO_DUMP) + io_printf(" - ipredmode[%d] : %d\n", b8, dec); +#endif + + img->ipredmode[(1 + bi) * 2][(1 + bj) * 2] = dec; + img->ipredmode[(1 + bi) * 2 + 1][(1 + bj) * 2] = dec; + img->ipredmode[(1 + bi) * 2][(1 + bj) * 2 + 1] = dec; + img->ipredmode[(1 + bi) * 2 + 1][(1 + bj) * 2 + 1] = + dec; + + j2 = bj; + } + } else if (b8 == 4 && curr_mb->b8mode[b8 - 3] == IBLOCK) { + + curr_se.type = SE_INTRAPREDMODE; +#if TRACE + strncpy(curr_se.tracestring, + "Chroma intra pred mode", TRACESTRING_SIZE); +#endif + + if (1) { + dp = &(currslice->part_arr[0]); + curr_se.reading = read_cipredmode_aec; + dp->read_syntax_element(&curr_se, img, dp); + } else + + { + } + curr_mb->c_ipred_mode = curr_se.value1; + + push_es(UE[curr_se.value1][0], UE[curr_se.value1][1]); + +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & MB_INFO_DUMP) + io_printf(" * UE c_ipred_mode read : %d\n", + curr_mb->c_ipred_mode); +#endif + + if (curr_se.value1 < DC_PRED_8 || curr_se.value1 > PLANE_8) { +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & MB_INFO_DUMP) + io_printf("%d\n", img->current_mb_nr); +#endif + pr_info("illegal chroma intra pred mode!\n"); + } + } +} + +/*! + ************************************************************************ + * \brief + * This function is used to arithmetically decode the coded + * block pattern of a given MB. + ************************************************************************ + */ +void readcp_aec(struct syntaxelement *se, struct img_par *img, + struct decoding_environment_s *dep_dp) +{ + struct texture_info_contexts *ctx = img->current_slice->tex_ctx; + struct macroblock *curr_mb = &mb_data[img->current_mb_nr]; + + int mb_x, mb_y; + int a, b; + int curr_cbp_ctx, curr_cbp_idx; + int cbp = 0; + int cbp_bit; + int mask; + + for (mb_y = 0; mb_y < 4; mb_y += 2) { + for (mb_x = 0; mb_x < 4; mb_x += 2) { + if (curr_mb->b8mode[mb_y + (mb_x / 2)] == IBLOCK) + curr_cbp_idx = 0; + else + curr_cbp_idx = 1; + + if (mb_y == 0) { + if (curr_mb->mb_available_up == NULL) + b = 0; + else { + b = ((((curr_mb->mb_available_up)->cbp + & (1 << (2 + mb_x / 2))) + == 0) ? 1 : 0); + } + + } else + b = (((cbp & (1 << (mb_x / 2))) == 0) ? 1 : 0); + + if (mb_x == 0) { + if (curr_mb->mb_available_left == NULL) + a = 0; + else { + a = + ((((curr_mb->mb_available_left)->cbp + & (1 + << (2 + * (mb_y + / 2) + + 1))) + == 0) ? + 1 : 0); + } + } else + a = (((cbp & (1 << mb_y)) == 0) ? 1 : 0); + curr_cbp_ctx = a + 2 * b; + mask = (1 << (mb_y + mb_x / 2)); + cbp_bit = biari_decode_symbol(dep_dp, + ctx->cbp_contexts[0] + curr_cbp_ctx); + + if (cbp_bit) + cbp += mask; + } + } + curr_cbp_ctx = 0; + cbp_bit = biari_decode_symbol(dep_dp, + ctx->cbp_contexts[1] + curr_cbp_ctx); + + if (cbp_bit) { + curr_cbp_ctx = 1; + cbp_bit = biari_decode_symbol(dep_dp, + ctx->cbp_contexts[1] + curr_cbp_ctx); + if (cbp_bit) { + cbp += 48; + + } else { + curr_cbp_ctx = 1; + cbp_bit = biari_decode_symbol(dep_dp, + ctx->cbp_contexts[1] + curr_cbp_ctx); + cbp += (cbp_bit == 1) ? 32 : 16; + + } + } + + se->value1 = cbp; + if (!cbp) + last_dquant = 0; + + + +} + +/*! + ************************************************************************ + * \brief + * This function is used to arithmetically decode the delta qp + * of a given MB. + ************************************************************************ + */ +void readdquant_aec(struct syntaxelement *se, struct img_par *img, + struct decoding_environment_s *dep_dp) +{ + struct motion_info_contexts_s *ctx = img->current_slice->mot_ctx; + + int act_ctx; + int act_sym; + int dquant; + + + act_ctx = ((last_dquant != 0) ? 1 : 0); + + act_sym = 1 + - biari_decode_symbol(dep_dp, + ctx->delta_qp_contexts + act_ctx); + if (act_sym != 0) { + act_ctx = 2; + act_sym = unary_bin_decode(dep_dp, + ctx->delta_qp_contexts + act_ctx, 1); + act_sym++; + } + act_sym &= 0x3f; + push_es(UE[act_sym][0], UE[act_sym][1]); + + dquant = (act_sym + 1) / 2; + if ((act_sym & 0x01) == 0) + dquant = -dquant; + se->value1 = dquant; + + last_dquant = dquant; + +} + +int csyntax; + +#define CHECKDELTAQP {\ + if (img->qp+curr_mb->delta_quant > 63\ + || img->qp+curr_mb->delta_quant < 0) {\ + csyntax = 0;\ + transcoding_error_flag = 1;\ + io_printf("error(0) (%3d|%3d) @ MB%d\n",\ + curr_mb->delta_quant,\ + img->qp+curr_mb->delta_quant,\ + img->picture_structure == 0 \ + ? img->current_mb_nr_fld : img->current_mb_nr);\ + } } + +int dct_level[65]; +int dct_run[65]; +int pair_pos; +int dct_pairs = -1; +const int t_chr[5] = {0, 1, 2, 4, 3000}; + +void readrunlevel_aec_ref(struct syntaxelement *se, struct img_par *img, + struct decoding_environment_s *dep_dp) +{ + int pairs, rank, pos; + int run, level, abslevel, symbol; + int sign; + + if (dct_pairs < 0) { + struct bi_context_type_s (*primary)[NUM_MAP_CTX]; + struct bi_context_type_s *pctx; + struct bi_context_type_s *pCTX2; + int ctx, ctx2, offset; + + if (se->context == LUMA_8x8) { + if (img->picture_structure == 0) { + primary = + img->current_slice->tex_ctx->fld_map_contexts; + } else { + primary = + img->current_slice->tex_ctx->map_contexts; + } + } else { + if (img->picture_structure == 0) { + primary = + img->current_slice->tex_ctx->fld_last_contexts; + } else { + primary = + img->current_slice->tex_ctx->last_contexts; + } + } + + rank = 0; + pos = 0; + for (pairs = 0; pairs < 65; pairs++) { +#ifdef DECODING_SANITY_CHECK + /*max index is NUM_BLOCK_TYPES - 1*/ + pctx = primary[rank & 0x7]; +#else + pctx = primary[rank]; +#endif + if (rank > 0) { +#ifdef DECODING_SANITY_CHECK + /*max index is NUM_BLOCK_TYPES - 1*/ + pCTX2 = primary[(5 + (pos >> 5)) & 0x7]; +#else + pCTX2 = primary[5 + (pos >> 5)]; +#endif + ctx2 = (pos >> 1) & 0x0f; + ctx = 0; + + + if (biari_decode_symbolw(dep_dp, pctx + ctx, + pCTX2 + ctx2)) { + break; + } + } + + ctx = 1; + symbol = 0; + while (biari_decode_symbol(dep_dp, pctx + ctx) == 0) { + symbol += 1; + ctx++; + if (ctx >= 2) + ctx = 2; + } + abslevel = symbol + 1; + + if (biari_decode_symbol_eq_prob(dep_dp)) { + level = -abslevel; + sign = 1; + } else { + level = abslevel; + sign = 0; + } +#if TRACE + tracebits2("level", 1, level); +#endif + + if (abslevel == 1) + offset = 4; + else + offset = 6; + symbol = 0; + ctx = 0; + while (biari_decode_symbol(dep_dp, pctx + ctx + offset) + == 0) { + symbol += 1; + ctx++; + if (ctx >= 1) + ctx = 1; + } + run = symbol; + +#if TRACE + tracebits2("run", 1, run); +#endif + dct_level[pairs] = level; + dct_run[pairs] = run; + if (abslevel > t_chr[rank]) { + if (abslevel <= 2) + rank = abslevel; + else if (abslevel <= 4) + rank = 3; + else + rank = 4; + } + pos += (run + 1); + if (pos >= 64) + pos = 63; + } + dct_pairs = pairs; + pair_pos = dct_pairs; + } + + if (dct_pairs > 0) { + se->value1 = dct_level[pair_pos - 1]; + se->value2 = dct_run[pair_pos - 1]; + pair_pos--; + } else { + + se->value1 = se->value2 = 0; + } + + if ((dct_pairs--) == 0) + pair_pos = 0; +} + +int b8_ctr; +#if 0 +int curr_residual_chroma[4][16][16]; +int curr_residual_luma[16][16]; +#endif + +const int SCAN[2][64][2] = {{{0, 0}, {0, 1}, {0, 2}, {1, 0}, {0, 3}, {0, 4}, {1, + 1}, {1, 2}, {0, 5}, {0, 6}, {1, 3}, {2, 0}, {2, 1}, {0, 7}, {1, + 4}, {2, 2}, {3, 0}, {1, 5}, {1, 6}, {2, 3}, {3, 1}, {3, 2}, {4, + 0}, {1, 7}, {2, 4}, {4, 1}, {2, 5}, {3, 3}, {4, 2}, {2, 6}, {3, + 4}, {4, 3}, {5, 0}, {5, 1}, {2, 7}, {3, 5}, {4, 4}, {5, 2}, {6, + 0}, {5, 3}, {3, 6}, {4, 5}, {6, 1}, {6, 2}, {5, 4}, {3, 7}, {4, + 6}, {6, 3}, {5, 5}, {4, 7}, {6, 4}, {5, 6}, {6, 5}, {5, 7}, {6, + 6}, {7, 0}, {6, 7}, {7, 1}, {7, 2}, {7, 3}, {7, 4}, {7, 5}, {7, + 6}, {7, 7} }, {{0, 0}, {1, 0}, {0, 1}, {0, 2}, {1, 1}, {2, 0}, { + 3, 0}, {2, 1}, {1, 2}, {0, 3}, {0, 4}, {1, 3}, {2, 2}, {3, 1}, { + 4, 0}, {5, 0}, {4, 1}, {3, 2}, {2, 3}, {1, 4}, {0, 5}, {0, 6}, { + 1, 5}, {2, 4}, {3, 3}, {4, 2}, {5, 1}, {6, 0}, {7, 0}, {6, 1}, { + 5, 2}, {4, 3}, {3, 4}, {2, 5}, {1, 6}, {0, 7}, {1, 7}, {2, 6}, { + 3, 5}, {4, 4}, {5, 3}, {6, 2}, {7, 1}, {7, 2}, {6, 3}, {5, 4}, { + 4, 5}, {3, 6}, {2, 7}, {3, 7}, {4, 6}, {5, 5}, {6, 4}, {7, 3}, { + 7, 4}, {6, 5}, {5, 6}, {4, 7}, {5, 7}, {6, 6}, {7, 5}, {7, 6}, { + 6, 7}, {7, 7} } }; + +const int SCAN_4x4[16][2] = {{0, 0}, {1, 0}, {0, 1}, {0, 2}, {1, 1}, {2, 0}, {3, + 0}, {2, 1}, {1, 2}, {0, 3}, {1, 3}, {2, 2}, {3, 1}, {3, 2}, {2, + 3}, {3, 3} }; + +/* + ************************************************************************* + * Function: + * Input: + * Output: + * Return: + * Attention: + ************************************************************************* + */ + +void encode_golomb_word(unsigned int symbol, unsigned int grad0, + unsigned int max_levels, unsigned int *res_bits, + unsigned int *res_len) +{ + unsigned int level, res, numbits; + + res = 1UL << grad0; + level = 1UL; + numbits = 1UL + grad0; + + while (symbol >= res && level < max_levels) { + symbol -= res; + res = res << 1; + level++; + numbits += 2UL; + } + + if (level >= max_levels) { + if (symbol >= res) + symbol = res - 1UL; + } + + *res_bits = res | symbol; + *res_len = numbits; +} + +/* + ************************************************************************* + * Function: + * Input: + * Output: + * Return: + * Attention: + ************************************************************************* + */ + +void encode_multilayer_golomb_word(unsigned int symbol, + const unsigned int *grad, const unsigned int *max_levels, + unsigned int *res_bits, unsigned int *res_len) +{ + unsigned int accbits, acclen, bits, len, tmp; + + accbits = acclen = 0UL; + + while (1) { + encode_golomb_word(symbol, *grad, *max_levels, &bits, &len); + accbits = (accbits << len) | bits; + acclen += len; +#ifdef AVSP_LONG_CABAC +#else + assert(acclen <= 32UL); +#endif + tmp = *max_levels - 1UL; + + if (!((len == (tmp << 1) + (*grad)) + && (bits == (1UL << (tmp + *grad)) - 1UL))) + break; + + tmp = *max_levels; + symbol -= (((1UL << tmp) - 1UL) << (*grad)) - 1UL; + grad++; + max_levels++; + } + *res_bits = accbits; + *res_len = acclen; +} + +/* + ************************************************************************* + * Function: + * Input: + * Output: + * Return: + * Attention: + ************************************************************************* + */ + +int writesyntaxelement_golomb(struct syntaxelement *se, int write_to_stream) +{ + unsigned int bits, len, i; + unsigned int grad[4], max_lev[4]; + + if (!(se->golomb_maxlevels & ~0xFF)) + encode_golomb_word(se->value1, se->golomb_grad, + se->golomb_maxlevels, &bits, &len); + else { + for (i = 0UL; i < 4UL; i++) { + grad[i] = (se->golomb_grad >> (i << 3)) & 0xFFUL; + max_lev[i] = (se->golomb_maxlevels >> (i << 3)) + & 0xFFUL; + } + encode_multilayer_golomb_word(se->value1, grad, max_lev, &bits, + &len); + } + + se->len = len; + se->bitpattern = bits; + + if (write_to_stream) + push_es(bits, len); + return se->len; +} + +/* + ************************************************************************* + * Function:Get coded block pattern and coefficients (run/level) + from the bitstream + * Input: + * Output: + * Return: + * Attention: + ************************************************************************* + */ + +void read_cbpandcoeffsfrom_nal(struct img_par *img) +{ + + int tablenum; + int inumblk; + int inumcoeff; + int symbol2D; + int escape_level_diff; + const int (*AVS_2DVLC_table_intra)[26][27]; + const int (*AVS_2DVLC_table_chroma)[26][27]; + int write_to_stream; + struct syntaxelement currse_enc; + struct syntaxelement *e_currse = &currse_enc; + + int coeff_save[65][2]; + int coeff_ptr; + + int ii, jj; + int mb_nr = img->current_mb_nr; + + int m2, jg2; + struct macroblock *curr_mb = &mb_data[mb_nr]; + + int block8x8; + + int block_x, block_y; + + struct slice_s *currslice = img->current_slice; + int level, run, coef_ctr, len, k, i0, j0, uv, qp; + + int boff_x, boff_y, start_scan; + struct syntaxelement curr_se; + struct datapartition *dp; + + AVS_2DVLC_table_intra = AVS_2DVLC_INTRA; + AVS_2DVLC_table_chroma = AVS_2DVLC_CHROMA; + write_to_stream = 1; + + dct_pairs = -1; + + curr_mb->qp = img->qp; + qp = curr_mb->qp; + + + for (block_y = 0; block_y < 4; block_y += 2) {/* all modes */ + for (block_x = 0; block_x < 4; block_x += 2) { + block8x8 = 2 * (block_y / 2) + block_x / 2; + if (curr_mb->cbp & (1 << block8x8)) { + tablenum = 0; + inumblk = 1; + inumcoeff = 65; + coeff_save[0][0] = 0; + coeff_save[0][1] = 0; + coeff_ptr = 1; + + b8_ctr = block8x8; + + boff_x = (block8x8 % 2) << 3; + boff_y = (block8x8 / 2) << 3; + + img->subblock_x = boff_x >> 2; + img->subblock_y = boff_y >> 2; + + start_scan = 0; + coef_ctr = start_scan - 1; + level = 1; + img->is_v_block = 0; + img->is_intra_block = IS_INTRA(curr_mb); + for (k = start_scan; + (k < 65) && (level != 0); + k++) { + + curr_se.context = LUMA_8x8; + curr_se.type = + (IS_INTRA(curr_mb)) ? + SE_LUM_AC_INTRA : + SE_LUM_AC_INTER; + + dp = &(currslice->part_arr[0]); + curr_se.reading = + readrunlevel_aec_ref; + dp-> + read_syntax_element(&curr_se, + img, dp); + level = curr_se.value1; + run = curr_se.value2; + len = curr_se.len; + + if (level != 0) { + coeff_save[coeff_ptr][0] = + run; + coeff_save[coeff_ptr][1] = + level; + coeff_ptr++; + } + + + + if (level != 0) {/* leave if len = 1 */ + coef_ctr += run + 1; + if ((img->picture_structure + == FRAME)) { + ii = + SCAN[img->picture_structure] + [coef_ctr][0]; + jj = + SCAN[img->picture_structure] + [coef_ctr][1]; + } else { + ii = + SCAN[img->picture_structure] + [coef_ctr][0]; + jj = + SCAN[img->picture_structure] + [coef_ctr][1]; + } + + } + } + + while (coeff_ptr > 0) { + run = + coeff_save[coeff_ptr + - 1][0]; + level = + coeff_save[coeff_ptr + - 1][1]; + + coeff_ptr--; + + symbol2D = CODE2D_ESCAPE_SYMBOL; + if (level > -27 && level < 27 + && run < 26) { + if (tablenum == 0) + + symbol2D = + AVS_2DVLC_table_intra + [tablenum] + [run][abs( + level) + - 1]; + else + + symbol2D = + AVS_2DVLC_table_intra + [tablenum] + [run][abs( + level)]; + if (symbol2D >= 0 + && level + < 0) + symbol2D++; + if (symbol2D < 0) + + symbol2D = + (CODE2D_ESCAPE_SYMBOL + + (run + << 1) + + ((level + > 0) ? + 1 : + 0)); + } + + else { + + symbol2D = + (CODE2D_ESCAPE_SYMBOL + + (run + << 1) + + ((level + > 0) ? + 1 : + 0)); + } + + + + e_currse->type = SE_LUM_AC_INTER; + e_currse->value1 = symbol2D; + e_currse->value2 = 0; + + e_currse->golomb_grad = + vlc_golomb_order + [0][tablenum][0]; + e_currse->golomb_maxlevels = + vlc_golomb_order + [0][tablenum][1]; + + writesyntaxelement_golomb( + e_currse, + write_to_stream); + + if (symbol2D + >= CODE2D_ESCAPE_SYMBOL) { + + e_currse->type = + SE_LUM_AC_INTER; + e_currse->golomb_grad = + 1; + e_currse->golomb_maxlevels = + 11; + escape_level_diff = + abs( + level) + - ((run + > MaxRun[0][tablenum]) ? + 1 : + refabslevel[tablenum][run]); + e_currse->value1 = + escape_level_diff; + + writesyntaxelement_golomb( + e_currse, + write_to_stream); + + } + + if (abs(level) + > incvlc_intra[tablenum]) { + if (abs(level) <= 2) + tablenum = + abs( + level); + else if (abs(level) <= 4) + tablenum = 3; + else if (abs(level) <= 7) + tablenum = 4; + else if (abs(level) + <= 10) + tablenum = 5; + else + tablenum = 6; + } + } + + + } + } + } + + + + m2 = img->mb_x * 2; + jg2 = img->mb_y * 2; + + + uv = -1; + block_y = 4; +#if 0 + qp = QP_SCALE_CR[curr_mb->qp]; +#endif + for (block_x = 0; block_x < 4; block_x += 2) { + + uv++; + + + b8_ctr = (uv + 4); + if ((curr_mb->cbp >> (uv + 4)) & 0x1) { + + tablenum = 0; + inumblk = 1; + inumcoeff = 65; + coeff_save[0][0] = 0; + coeff_save[0][1] = 0; + coeff_ptr = 1; + + coef_ctr = -1; + level = 1; + img->subblock_x = 0; + img->subblock_y = 0; + curr_se.context = CHROMA_AC; + curr_se.type = (IS_INTRA(curr_mb) ? + SE_CHR_AC_INTRA : + SE_CHR_AC_INTER); + dp = &(currslice->part_arr[0]); + curr_se.reading = readrunlevel_aec_ref; + img->is_v_block = uv; + img->is_intra_block = IS_INTRA(curr_mb); + for (k = 0; (k < 65) && (level != 0); k++) { + + dp->read_syntax_element + (&curr_se, img, dp); + level = curr_se.value1; + run = curr_se.value2; + len = curr_se.len; + + if (level != 0) { + coeff_save[coeff_ptr][0] = run; + coeff_save[coeff_ptr][1] = + level; + coeff_ptr++; + } + + + if (level != 0) { + coef_ctr = coef_ctr + run + 1; + if ((img->picture_structure + == FRAME) + /*&& (!curr_mb->mb_field)*/) { + i0 = + SCAN[img->picture_structure] + [coef_ctr][0]; + j0 = + SCAN[img->picture_structure] + [coef_ctr][1]; + } else { + i0 = + SCAN[img->picture_structure] + [coef_ctr][0]; + j0 = + SCAN[img->picture_structure] + [coef_ctr][1]; + } + + } + } + + while (coeff_ptr > 0) { + + run = coeff_save[coeff_ptr - 1][0]; + level = coeff_save[coeff_ptr - 1][1]; + + coeff_ptr--; + + symbol2D = CODE2D_ESCAPE_SYMBOL; + if (level > -27 && level < 27 + && run < 26) { + if (tablenum == 0) + + symbol2D = + AVS_2DVLC_table_chroma + [tablenum][run][abs( + level) + - 1]; + else + symbol2D = + AVS_2DVLC_table_chroma + [tablenum][run][abs( + level)]; + if (symbol2D >= 0 + && level < 0) + symbol2D++; + if (symbol2D < 0) + symbol2D = + (CODE2D_ESCAPE_SYMBOL + + (run + << 1) + + ((level + > 0) ? + 1 : + 0)); + } + + else { + symbol2D = + (CODE2D_ESCAPE_SYMBOL + + (run + << 1) + + ((level + > 0) ? + 1 : + 0)); + } + + e_currse->type = SE_LUM_AC_INTER; + e_currse->value1 = symbol2D; + e_currse->value2 = 0; + e_currse->golomb_grad = + vlc_golomb_order[2] + [tablenum][0]; + e_currse->golomb_maxlevels = + vlc_golomb_order[2] + [tablenum][1]; + + writesyntaxelement_golomb(e_currse, + write_to_stream); + + /* + * if (write_to_stream) + * { + * bitCount[BITS_COEFF_UV_MB]+=e_currse->len; + * e_currse++; + * curr_mb->currSEnr++; + * } + * no_bits+=e_currse->len; + + + * if (icoef == 0) break; + */ + + if (symbol2D >= CODE2D_ESCAPE_SYMBOL) { + + e_currse->type = SE_LUM_AC_INTER; + e_currse->golomb_grad = 0; + e_currse->golomb_maxlevels = 11; + escape_level_diff = + abs(level) + - ((run + > MaxRun[2][tablenum]) ? + 1 : + refabslevel[tablenum + + 14][run]); + e_currse->value1 = + escape_level_diff; + + writesyntaxelement_golomb( + e_currse, + write_to_stream); + + } + + if (abs(level) + > incvlc_chroma[tablenum]) { + if (abs(level) <= 2) + tablenum = abs(level); + else if (abs(level) <= 4) + tablenum = 3; + else + tablenum = 4; + } + } + + } + } +} + +/* + ************************************************************************* + * Function:Get the syntax elements from the NAL + * Input: + * Output: + * Return: + * Attention: + ************************************************************************* + */ + +int read_one_macroblock(struct img_par *img) +{ + int i, j; + + struct syntaxelement curr_se; + struct macroblock *curr_mb = &mb_data[img->current_mb_nr]; + + int cabp_flag; + + int tempcbp; + int fixqp; + + struct slice_s *currslice = img->current_slice; + struct datapartition *dp; + + fixqp = (fixed_picture_qp || fixed_slice_qp); + + for (i = 0; i < 8; i++) + for (j = 0; j < 8; j++) { + img->m8[0][i][j] = 0; + img->m8[1][i][j] = 0; + img->m8[2][i][j] = 0; + img->m8[3][i][j] = 0; + } + + current_mb_skip = 0; + + curr_mb->qp = img->qp; + curr_se.type = SE_MBTYPE; + curr_se.mapping = linfo_ue; + + curr_mb->mb_type_2 = 0; + + if (img->type == I_IMG) + curr_mb->mb_type = 0; + + interpret_mb_mode_i(img); + + init_macroblock(img); + + if ((IS_INTRA(curr_mb)) && (img->abt_flag)) { + +#if TRACE + strncpy(curr_se.tracestring, "cabp_flag", TRACESTRING_SIZE); +#endif + + curr_se.len = 1; + curr_se.type = SE_CABP; + read_syntaxelement_flc(&curr_se); + cabp_flag = curr_se.value1; + if (cabp_flag == 0) { + curr_mb->CABP[0] = 0; + curr_mb->CABP[1] = 0; + curr_mb->CABP[2] = 0; + curr_mb->CABP[3] = 0; + } else { + for (i = 0; i < 4; i++) { + curr_se.len = 1; + curr_se.type = SE_CABP; + read_syntaxelement_flc(&curr_se); + curr_mb->CABP[i] = curr_se.value1; + } + } + + } else { + curr_mb->CABP[0] = 0; + curr_mb->CABP[1] = 0; + curr_mb->CABP[2] = 0; + curr_mb->CABP[3] = 0; + + } + + if (IS_INTRA(curr_mb)) { + for (i = 0; i < /*5*/(chroma_format + 4); i++) + + read_ipred_block_modes(img, i); + } + + curr_se.type = SE_CBP_INTRA; + curr_se.mapping = linfo_cbp_intra; + +#if TRACE + snprintf(curr_se.tracestring, TRACESTRING_SIZE, "CBP"); +#endif + + if (img->type == I_IMG || IS_INTER(curr_mb)) { + curr_se.golomb_maxlevels = 0; + + if (1) { + dp = &(currslice->part_arr[0]); + curr_se.reading = readcp_aec; + dp->read_syntax_element(&curr_se, img, dp); + } + + + curr_mb->cbp = curr_se.value1; + push_es(UE[NCBP[curr_se.value1][0]][0], + UE[NCBP[curr_se.value1][0]][1]); + + } + +# if 1 + if (curr_mb->cbp != 0) + tempcbp = 1; + else + tempcbp = 0; +#else + + if (chroma_format == 2) { +#if TRACE + snprintf(curr_se.tracestring, TRACESTRING_SIZE, "CBP422"); +#endif + curr_se.mapping = /*linfo_se*/linfo_ue; + curr_se.type = SE_CBP_INTRA; + readsyntaxelement_uvlc(&curr_se, inp); + curr_mb->cbp01 = curr_se.value1; + io_printf(" * UE cbp01 read : 0x%02X\n", curr_mb->cbp01); + } + + if (chroma_format == 2) { + if (curr_mb->cbp != 0 || curr_mb->cbp01 != 0) + tempcbp = 1; + else + tempcbp = 0; + + } else { + if (curr_mb->cbp != 0) + tempcbp = 1; + else + tempcbp = 0; + } + +#endif + + if (IS_INTRA(curr_mb) && (img->abt_flag) && (curr_mb->cbp & (0xF))) { + curr_mb->CABT[0] = curr_mb->CABP[0]; + curr_mb->CABT[1] = curr_mb->CABP[1]; + curr_mb->CABT[2] = curr_mb->CABP[2]; + curr_mb->CABT[3] = curr_mb->CABP[3]; + } else { + + curr_mb->CABT[0] = 0; + curr_mb->CABT[1] = 0; + curr_mb->CABT[2] = 0; + curr_mb->CABT[3] = 0; + + if (!fixqp && (tempcbp)) { + if (IS_INTER(curr_mb)) + curr_se.type = SE_DELTA_QUANT_INTER; + else + curr_se.type = SE_DELTA_QUANT_INTRA; + +#if TRACE + snprintf(curr_se.tracestring, + TRACESTRING_SIZE, "Delta quant "); +#endif + + if (1) { + dp = &(currslice->part_arr[0]); + curr_se.reading = readdquant_aec; + dp->read_syntax_element(&curr_se, img, dp); + } + + curr_mb->delta_quant = curr_se.value1; +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & MB_INFO_DUMP) { + io_printf(" * SE delta_quant read : %d\n", + curr_mb->delta_quant); + } +#endif + CHECKDELTAQP + + if (transcoding_error_flag) + return -1; + + img->qp = (img->qp - MIN_QP + curr_mb->delta_quant + + (MAX_QP - MIN_QP + 1)) + % (MAX_QP - MIN_QP + 1) + MIN_QP; + } + + if (fixqp) { + curr_mb->delta_quant = 0; + img->qp = (img->qp - MIN_QP + curr_mb->delta_quant + + (MAX_QP - MIN_QP + 1)) + % (MAX_QP - MIN_QP + 1) + MIN_QP; + + } +#ifdef DUMP_DEBUG + if (avs_get_debug_flag() & MB_INFO_DUMP) + io_printf(" - img->qp : %d\n", img->qp); +#endif + } + + read_cbpandcoeffsfrom_nal(img); + return DECODE_MB; +} + +/*! + ************************************************************************ + * \brief + * finding end of a slice in case this is not the end of a frame + * + * Unsure whether the "correction" below actually solves an off-by-one + * problem or whether it introduces one in some cases :-( Anyway, + * with this change the bit stream format works with AEC again. + * StW, 8.7.02 + ************************************************************************ + */ +int aec_startcode_follows(struct img_par *img, int eos_bit) +{ + struct slice_s *currslice = img->current_slice; + struct datapartition *dp; + unsigned int bit; + struct decoding_environment_s *dep_dp; + + dp = &(currslice->part_arr[0]); + dep_dp = &(dp->de_aec); + + if (eos_bit) + bit = biari_decode_final(dep_dp); + else + bit = 0; + + return bit == 1 ? 1 : 0; +} + +#ifdef AVSP_LONG_CABAC +int process_long_cabac(void) +#else +void main(void) +#endif +{ + int data32; + int current_header; + int i; + int tmp; + int ret; + + int byte_startposition; + int aec_mb_stuffing_bit; + struct slice_s *currslice; +#ifdef PERFORMANCE_DEBUG + pr_info("enter %s\r\n", __func__); +#endif + transcoding_error_flag = 0; + ret = 0; + es_buf = es_write_addr_virt; + + if (local_heap_init(MAX_CODED_FRAME_SIZE * 4) < 0) { + ret = -1; + goto End; + } + + img = (struct img_par *)local_alloc(1, sizeof(struct img_par)); + if (img == NULL) { + no_mem_exit("main: img"); + ret = -1; + goto End; + } + stat_bits_ptr = (struct stat_bits *)local_alloc(1, + sizeof(struct stat_bits)); + if (stat_bits_ptr == NULL) { + no_mem_exit("main: stat_bits"); + ret = -1; + goto End; + } + + curr_stream = alloc_bitstream(); + if (curr_stream == NULL) { + io_printf("alloc bitstream failed\n"); + ret = -1; + goto End; + } + + chroma_format = 1; + demulate_enable = 0; + img->seq_header_indicate = 1; + +#ifdef AVSP_LONG_CABAC + data32 = READ_VREG(LONG_CABAC_REQ); + progressive_sequence = (data32 >> 1) & 1; + fixed_picture_qp = (data32 >> 2) & 1; + img->picture_structure = (data32 >> 3) & 1; + img->type = (data32 >> 4) & 3; + skip_mode_flag = (data32 >> 6) & 1; + + src_start = READ_VREG(LONG_CABAC_SRC_ADDR); + des_start = READ_VREG(LONG_CABAC_DES_ADDR); + + data32 = READ_VREG(LONG_CABAC_PIC_SIZE); + horizontal_size = (data32 >> 0) & 0xffff; + vertical_size = (data32 >> 16) & 0xffff; + if (horizontal_size * vertical_size > 1920 * 1080) { + io_printf("pic size check failed: width = %d, height = %d\n", + horizontal_size, vertical_size); + ret = -1; + goto End; + } + + vld_mem_start_addr = READ_VREG(VLD_MEM_VIFIFO_START_PTR); + vld_mem_end_addr = READ_VREG(VLD_MEM_VIFIFO_END_PTR); + +#else + progressive_sequence = 0; + fixed_picture_qp = 0; + img->picture_structure = 0; + img->type = I_IMG; + skip_mode_flag = 1; + horizontal_size = 1920; + vertical_size = 1080; + + src_start = 0; +#endif + + if (horizontal_size % 16 != 0) + img->auto_crop_right = 16 - (horizontal_size % 16); + else + img->auto_crop_right = 0; + + if (!progressive_sequence) { + if (vertical_size % 32 != 0) + img->auto_crop_bottom = 32 - (vertical_size % 32); + else + img->auto_crop_bottom = 0; + } else { + if (vertical_size % 16 != 0) + img->auto_crop_bottom = 16 - (vertical_size % 16); + else + img->auto_crop_bottom = 0; + } + + img->width = (horizontal_size + img->auto_crop_right); + if (img->picture_structure) + img->height = (vertical_size + img->auto_crop_bottom); + else + img->height = (vertical_size + img->auto_crop_bottom) / 2; + img->width_cr = (img->width >> 1); + + img->pic_width_inmbs = img->width / MB_BLOCK_SIZE; + img->pic_height_inmbs = img->height / MB_BLOCK_SIZE; + img->pic_size_inmbs = img->pic_width_inmbs * img->pic_height_inmbs; + + io_printf( + "[LONG CABAC] Start Transcoding from 0x%x to 0x%x Size : %d x %d\r\n", + src_start, des_start, horizontal_size, vertical_size); +#if 0 + io_printf("VLD_MEM_VIFIFO_START_PTR %x\r\n", + READ_VREG(VLD_MEM_VIFIFO_START_PTR)); + io_printf("VLD_MEM_VIFIFO_CURR_PTR %x\r\n", + READ_VREG(VLD_MEM_VIFIFO_CURR_PTR)); + io_printf("VLD_MEM_VIFIFO_END_PTR %x\r\n", + READ_VREG(VLD_MEM_VIFIFO_END_PTR)); + io_printf("VLD_MEM_VIFIFO_WP %x\r\n", + READ_VREG(VLD_MEM_VIFIFO_WP)); + io_printf("VLD_MEM_VIFIFO_RP %x\r\n", + READ_VREG(VLD_MEM_VIFIFO_RP)); + io_printf("VLD_MEM_VBUF_RD_PTR %x\r\n", + READ_VREG(VLD_MEM_VBUF_RD_PTR)); + io_printf("VLD_MEM_VIFIFO_BUF_CNTL %x\r\n", + READ_VREG(VLD_MEM_VIFIFO_BUF_CNTL)); +#endif + io_printf( + "[LONG CABAC] progressive_sequence : %d, fixed_picture_qp : %d, skip_mode_flag : %d\r\n", + progressive_sequence, fixed_picture_qp, skip_mode_flag); + io_printf("[LONG CABAC] picture_structure : %d, picture_type : %d\r\n", + img->picture_structure, img->type); + + open_irabs(p_irabs); + + + if (initial_decode() == 0) { + io_printf("initial_decode failed\n"); + ret = -1; + goto End; + } + + init_es(); + + current_header = header(); + io_printf("[LONG CABAC] header Return : %d\n", current_header); + + tmp = slice_header(temp_slice_buf, first_slice_startpos, + first_slice_length); + + init_contexts(img); + aec_new_slice(); + byte_startposition = (curr_stream->frame_bitoffset) / 8; + + currslice = img->current_slice; + + if (1) { + for (i = 0; i < 1; i++) { + img->current_slice->part_arr[i].read_syntax_element = + read_syntaxelement_aec; + img->current_slice->part_arr[i].bitstream = curr_stream; + } + curr_stream = currslice->part_arr[0].bitstream; + } + if ((curr_stream->frame_bitoffset) % 8 != 0) + byte_startposition++; + + arideco_start_decoding(&img->current_slice->part_arr[0].de_aec, + curr_stream->stream_buffer, (byte_startposition), + &(curr_stream->read_len), img->type); + + img->current_mb_nr = 0; + total_mb_count = 0; + while (img->current_mb_nr < img->pic_size_inmbs) + + { + start_macroblock(img); + if (-1 == read_one_macroblock(img)) { + ret = -1; + pr_info("macroblock trans failed, exit\n"); + goto End; + } + if (img->cod_counter <= 0) + aec_mb_stuffing_bit = aec_startcode_follows(img, 1); + img->current_mb_nr++; + } + + push_es(0xff, 8); + io_printf(" Total ES_LENGTH : %d\n", es_ptr); + +#ifdef AVSP_LONG_CABAC + push_es(0xff, 64); + if (es_buf_is_overflow) { + io_printf("fatal error: es_buf_is_overflow\n"); + ret = -1; + goto End; + } + + if (transcoding_error_flag == 0) { +#if 1 + dma_sync_single_for_device(amports_get_dma_device(), + es_write_addr_phy, + es_ptr, DMA_TO_DEVICE); + + wmb(); /**/ +#endif + } +#else + fclose(f_es); +#endif + +End: +#ifdef AVSP_LONG_CABAC + WRITE_VREG(LONG_CABAC_REQ, 0); +#endif + local_heap_uninit(); +#ifdef PERFORMANCE_DEBUG + pr_info("exit %s\r\n", __func__); +#endif + return ret; +} +#endif
diff --git a/drivers/frame_provider/decoder/avs2/Makefile b/drivers/frame_provider/decoder/avs2/Makefile new file mode 100644 index 0000000..5fe8566 --- /dev/null +++ b/drivers/frame_provider/decoder/avs2/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_AVS2) += amvdec_avs2.o +amvdec_avs2-objs += vavs2.o avs2_bufmgr.o
diff --git a/drivers/frame_provider/decoder/avs2/avs2_bufmgr.c b/drivers/frame_provider/decoder/avs2/avs2_bufmgr.c new file mode 100644 index 0000000..de9a3d2 --- /dev/null +++ b/drivers/frame_provider/decoder/avs2/avs2_bufmgr.c
@@ -0,0 +1,2205 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/slab.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/sched/clock.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include "avs2_global.h" + +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../utils/vdec.h" +#include "../utils/amvdec.h" + +#undef pr_info +#define pr_info printk + +#define assert(chk_cond) {\ + if (!(chk_cond))\ + pr_info("error line %d\n", __LINE__);\ + while (!(chk_cond))\ + ;\ +} + +int16_t get_param(uint16_t value, int8_t *print_info) +{ + if (is_avs2_print_param()) + pr_info("%s = %x\n", print_info, value); + return (int16_t)value; +} + +void readAlfCoeff(struct avs2_decoder *avs2_dec, struct ALFParam_s *Alfp) +{ + int32_t pos; + union param_u *rpm_param = &avs2_dec->param; + + int32_t f = 0, symbol, pre_symbole; + const int32_t numCoeff = (int32_t)ALF_MAX_NUM_COEF; + + switch (Alfp->componentID) { + case ALF_Cb: + case ALF_Cr: { + for (pos = 0; pos < numCoeff; pos++) { + if (Alfp->componentID == ALF_Cb) + Alfp->coeffmulti[0][pos] = + get_param( + rpm_param->alf.alf_cb_coeffmulti[pos], + "Chroma ALF coefficients"); + else + Alfp->coeffmulti[0][pos] = + get_param( + rpm_param->alf.alf_cr_coeffmulti[pos], + "Chroma ALF coefficients"); +#if Check_Bitstream + if (pos <= 7) + assert(Alfp->coeffmulti[0][pos] >= -64 + && Alfp->coeffmulti[0][pos] <= 63); + if (pos == 8) + assert(Alfp->coeffmulti[0][pos] >= -1088 + && Alfp->coeffmulti[0][pos] <= 1071); +#endif + } + } + break; + case ALF_Y: { + int32_t region_distance_idx = 0; + Alfp->filters_per_group = + get_param(rpm_param->alf.alf_filters_num_m_1, + "ALF_filter_number_minus_1"); +#if Check_Bitstream + assert(Alfp->filters_per_group >= 0 + && Alfp->filters_per_group <= 15); +#endif + Alfp->filters_per_group = Alfp->filters_per_group + 1; + + memset(Alfp->filterPattern, 0, NO_VAR_BINS * sizeof(int32_t)); + pre_symbole = 0; + symbol = 0; + for (f = 0; f < Alfp->filters_per_group; f++) { + if (f > 0) { + if (Alfp->filters_per_group != 16) { + symbol = + get_param(rpm_param->alf.region_distance + [region_distance_idx++], + "Region distance"); + } else { + symbol = 1; + } + Alfp->filterPattern[symbol + pre_symbole] = 1; + pre_symbole = symbol + pre_symbole; + } + + for (pos = 0; pos < numCoeff; pos++) { + Alfp->coeffmulti[f][pos] = + get_param( + rpm_param->alf.alf_y_coeffmulti[f][pos], + "Luma ALF coefficients"); +#if Check_Bitstream + if (pos <= 7) + assert( + Alfp->coeffmulti[f][pos] + >= -64 && + Alfp->coeffmulti[f][pos] + <= 63); + if (pos == 8) + assert( + Alfp->coeffmulti[f][pos] + >= -1088 && + Alfp->coeffmulti[f][pos] + <= 1071); +#endif + + } + } + +#if Check_Bitstream + assert(pre_symbole >= 0 && pre_symbole <= 15); + +#endif + } + break; + default: { + pr_info("Not a legal component ID\n"); + assert(0); + return; /* exit(-1);*/ + } + } +} + +void Read_ALF_param(struct avs2_decoder *avs2_dec) +{ + struct inp_par *input = &avs2_dec->input; + struct ImageParameters_s *img = &avs2_dec->img; + union param_u *rpm_param = &avs2_dec->param; + int32_t compIdx; + if (input->alf_enable) { + img->pic_alf_on[0] = + get_param( + rpm_param->alf.picture_alf_enable_Y, + "alf_pic_flag_Y"); + img->pic_alf_on[1] = + get_param( + rpm_param->alf.picture_alf_enable_Cb, + "alf_pic_flag_Cb"); + img->pic_alf_on[2] = + get_param( + rpm_param->alf.picture_alf_enable_Cr, + "alf_pic_flag_Cr"); + + avs2_dec->m_alfPictureParam[ALF_Y].alf_flag + = img->pic_alf_on[ALF_Y]; + avs2_dec->m_alfPictureParam[ALF_Cb].alf_flag + = img->pic_alf_on[ALF_Cb]; + avs2_dec->m_alfPictureParam[ALF_Cr].alf_flag + = img->pic_alf_on[ALF_Cr]; + if (img->pic_alf_on[0] + || img->pic_alf_on[1] + || img->pic_alf_on[2]) { + for (compIdx = 0; + compIdx < NUM_ALF_COMPONENT; + compIdx++) { + if (img->pic_alf_on[compIdx]) { + readAlfCoeff( + avs2_dec, + &avs2_dec->m_alfPictureParam[compIdx]); + } + } + } + } + +} + +void Get_SequenceHeader(struct avs2_decoder *avs2_dec) +{ + struct inp_par *input = &avs2_dec->input; + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + union param_u *rpm_param = &avs2_dec->param; + /*int32_t i, j;*/ + + /*fpr_info(stdout, "Sequence Header\n");*/ + /*memcpy(currStream->streamBuffer, buf, length);*/ + /*currStream->code_len = currStream->bitstream_length = length;*/ + /*currStream->read_len = currStream->frame_bitoffset = (startcodepos + + 1) * 8;*/ + + input->profile_id = + get_param(rpm_param->p.profile_id, "profile_id"); + input->level_id = + get_param(rpm_param->p.level_id, "level_id"); + hd->progressive_sequence = + get_param( + rpm_param->p.progressive_sequence, + "progressive_sequence"); +#if INTERLACE_CODING + hd->is_field_sequence = + get_param( + rpm_param->p.is_field_sequence, + "field_coded_sequence"); +#endif +#if HALF_PIXEL_COMPENSATION || HALF_PIXEL_CHROMA + img->is_field_sequence = hd->is_field_sequence; +#endif + hd->horizontal_size = + get_param(rpm_param->p.horizontal_size, "horizontal_size"); + hd->vertical_size = + get_param(rpm_param->p.vertical_size, "vertical_size"); + input->chroma_format = + get_param(rpm_param->p.chroma_format, "chroma_format"); + input->output_bit_depth = 8; + input->sample_bit_depth = 8; + hd->sample_precision = 1; + if (input->profile_id == BASELINE10_PROFILE) { /* 10bit profile (0x52)*/ + input->output_bit_depth = + get_param(rpm_param->p.sample_precision, + "sample_precision"); + input->output_bit_depth = + 6 + (input->output_bit_depth) * 2; + input->sample_bit_depth = + get_param(rpm_param->p.encoding_precision, + "encoding_precision"); + input->sample_bit_depth = + 6 + (input->sample_bit_depth) * 2; + } else { /* other profile*/ + hd->sample_precision = + get_param(rpm_param->p.sample_precision, + "sample_precision"); + } + hd->aspect_ratio_information = + get_param(rpm_param->p.aspect_ratio_information, + "aspect_ratio_information"); + hd->frame_rate_code = + get_param(rpm_param->p.frame_rate_code, "frame_rate_code"); + + hd->bit_rate_lower = + get_param(rpm_param->p.bit_rate_lower, "bit_rate_lower"); + /*hd->marker_bit = get_param(rpm_param->p.marker_bit, + * "marker bit");*/ + /*CHECKMARKERBIT*/ + hd->bit_rate_upper = + get_param(rpm_param->p.bit_rate_upper, "bit_rate_upper"); + hd->low_delay = + get_param(rpm_param->p.low_delay, "low_delay"); + /*hd->marker_bit = + get_param(rpm_param->p.marker_bit2, + "marker bit");*/ + /*CHECKMARKERBIT*/ +#if M3480_TEMPORAL_SCALABLE + hd->temporal_id_exist_flag = + get_param(rpm_param->p.temporal_id_exist_flag, + "temporal_id exist flag"); /*get + Extention Flag*/ +#endif + /*u_v(18, "bbv buffer size");*/ + input->g_uiMaxSizeInBit = + get_param(rpm_param->p.g_uiMaxSizeInBit, + "Largest Coding Block Size"); + + + /*hd->background_picture_enable = 0x01 ^ + (get_param(rpm_param->p.avs2_seq_flags, + "background_picture_disable") + >> BACKGROUND_PICTURE_DISABLE_BIT) & 0x1;*/ + /*rain???*/ + hd->background_picture_enable = 0x01 ^ + ((get_param(rpm_param->p.avs2_seq_flags, + "background_picture_disable") + >> BACKGROUND_PICTURE_DISABLE_BIT) & 0x1); + + + hd->b_dmh_enabled = 1; + + hd->b_mhpskip_enabled = + get_param(rpm_param->p.avs2_seq_flags >> B_MHPSKIP_ENABLED_BIT, + "mhpskip enabled") & 0x1; + hd->dhp_enabled = + get_param(rpm_param->p.avs2_seq_flags >> DHP_ENABLED_BIT, + "dhp enabled") & 0x1; + hd->wsm_enabled = + get_param(rpm_param->p.avs2_seq_flags >> WSM_ENABLED_BIT, + "wsm enabled") & 0x1; + + img->inter_amp_enable = + get_param(rpm_param->p.avs2_seq_flags >> INTER_AMP_ENABLE_BIT, + "Asymmetric Motion Partitions") & 0x1; + input->useNSQT = + get_param(rpm_param->p.avs2_seq_flags >> USENSQT_BIT, + "useNSQT") & 0x1; + input->useSDIP = + get_param(rpm_param->p.avs2_seq_flags >> USESDIP_BIT, + "useNSIP") & 0x1; + + hd->b_secT_enabled = + get_param(rpm_param->p.avs2_seq_flags >> B_SECT_ENABLED_BIT, + "secT enabled") & 0x1; + + input->sao_enable = + get_param(rpm_param->p.avs2_seq_flags >> SAO_ENABLE_BIT, + "SAO Enable Flag") & 0x1; + input->alf_enable = + get_param(rpm_param->p.avs2_seq_flags >> ALF_ENABLE_BIT, + "ALF Enable Flag") & 0x1; + hd->b_pmvr_enabled = + get_param(rpm_param->p.avs2_seq_flags >> B_PMVR_ENABLED_BIT, + "pmvr enabled") & 0x1; + + + hd->gop_size = get_param(rpm_param->p.num_of_RPS, + "num_of_RPS"); +#if Check_Bitstream + /*assert(hd->gop_size<=32);*/ +#endif + + if (hd->low_delay == 0) { + hd->picture_reorder_delay = + get_param(rpm_param->p.picture_reorder_delay, + "picture_reorder_delay"); + } + + input->crossSliceLoopFilter = + get_param(rpm_param->p.avs2_seq_flags + >> CROSSSLICELOOPFILTER_BIT, + "Cross Loop Filter Flag") & 0x1; + +#if BCBR + if ((input->profile_id == SCENE_PROFILE || + input->profile_id == SCENE10_PROFILE) && + hd->background_picture_enable) { + hd->bcbr_enable = u_v(1, + "block_composed_background_picture_enable"); + u_v(1, "reserved bits"); + } else { + hd->bcbr_enable = 0; + u_v(2, "reserved bits"); + } +#else + /*u_v(2, "reserved bits");*/ +#endif + + img->width = hd->horizontal_size; + img->height = hd->vertical_size; + img->width_cr = (img->width >> 1); + + if (input->chroma_format == 1) { + img->height_cr + = (img->height >> 1); + } + + img->PicWidthInMbs = img->width / MIN_CU_SIZE; + img->PicHeightInMbs = img->height / MIN_CU_SIZE; + img->PicSizeInMbs = img->PicWidthInMbs * img->PicHeightInMbs; + img->buf_cycle = input->buf_cycle + 1; + img->max_mb_nr = (img->width * img->height) + / (MIN_CU_SIZE * MIN_CU_SIZE); + +#ifdef AML +avs2_dec->lcu_size = + get_param(rpm_param->p.lcu_size, "lcu_size"); +avs2_dec->lcu_size = 1<<(avs2_dec->lcu_size); +#endif +hc->seq_header++; +} + + +void Get_I_Picture_Header(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + union param_u *rpm_param = &avs2_dec->param; + +#if RD1501_FIX_BG /*//Longfei.Wang@mediatek.com*/ + hd->background_picture_flag = 0; + hd->background_picture_output_flag = 0; + img->typeb = 0; +#endif + + hd->time_code_flag = + get_param(rpm_param->p.time_code_flag, + "time_code_flag"); + + if (hd->time_code_flag) { + hd->time_code = + get_param(rpm_param->p.time_code, + "time_code"); + } + if (hd->background_picture_enable) { + hd->background_picture_flag = + get_param(rpm_param->p.background_picture_flag, + "background_picture_flag"); + + if (hd->background_picture_flag) { + img->typeb = + BACKGROUND_IMG; + } else { + img->typeb = 0; + } + + if (img->typeb == BACKGROUND_IMG) { + hd->background_picture_output_flag = + get_param( + rpm_param->p.background_picture_output_flag, + "background_picture_output_flag"); + } + } + + + { + img->coding_order = + get_param(rpm_param->p.coding_order, + "coding_order"); + + + +#if M3480_TEMPORAL_SCALABLE + if (hd->temporal_id_exist_flag == 1) { + hd->cur_layer = + get_param(rpm_param->p.cur_layer, + "temporal_id"); + } +#endif +#if RD1501_FIX_BG /*Longfei.Wang@mediatek.com*/ + if (hd->low_delay == 0 + && !(hd->background_picture_flag && + !hd->background_picture_output_flag)) { /*cdp*/ +#else + if (hd->low_delay == 0 && + !(hd->background_picture_enable && + !hd->background_picture_output_flag)) { /*cdp*/ +#endif + hd->displaydelay = + get_param(rpm_param->p.displaydelay, + "picture_output_delay"); + } + + } + { + int32_t RPS_idx;/* = (img->coding_order-1) % gop_size;*/ + int32_t predict; + int32_t j; + predict = + get_param(rpm_param->p.predict, + "use RCS in SPS"); + /*if (predict) {*/ + RPS_idx = + get_param(rpm_param->p.RPS_idx, + "predict for RCS"); + /* hd->curr_RPS = hd->decod_RPS[RPS_idx];*/ + /*} else {*/ + /*gop size16*/ + hd->curr_RPS.referd_by_others = + get_param(rpm_param->p.referd_by_others_cur, + "refered by others"); + hd->curr_RPS.num_of_ref = + get_param(rpm_param->p.num_of_ref_cur, + "num of reference picture"); + for (j = 0; j < hd->curr_RPS.num_of_ref; j++) { + hd->curr_RPS.ref_pic[j] = + get_param(rpm_param->p.ref_pic_cur[j], + "delta COI of ref pic"); + } + hd->curr_RPS.num_to_remove = + get_param(rpm_param->p.num_to_remove_cur, + "num of removed picture"); +#ifdef SANITY_CHECK + if (hd->curr_RPS.num_to_remove > MAXREF) { + hd->curr_RPS.num_to_remove = MAXREF; + pr_info("Warning, %s: num_to_remove %d beyond range, force to MAXREF\n", + __func__, hd->curr_RPS.num_to_remove); + } +#endif + + for (j = 0; j < hd->curr_RPS.num_to_remove; j++) { + hd->curr_RPS.remove_pic[j] = + get_param( + rpm_param->p.remove_pic_cur[j], + "delta COI of removed pic"); + } + /*u_v(1, "marker bit");*/ + + /*}*/ + } + /*xyji 12.23*/ + if (hd->low_delay) { + /*ue_v( + "bbv check times");*/ + } + + hd->progressive_frame = + get_param(rpm_param->p.progressive_frame, + "progressive_frame"); + + if (!hd->progressive_frame) { + img->picture_structure = + get_param(rpm_param->p.picture_structure, + "picture_structure"); + } else { + img->picture_structure + = 1; + } + + hd->top_field_first = + get_param(rpm_param->p.top_field_first, + "top_field_first"); + hd->repeat_first_field = + get_param(rpm_param->p.repeat_first_field, + "repeat_first_field"); +#if INTERLACE_CODING + if (hd->is_field_sequence) { + hd->is_top_field = + get_param(rpm_param->p.is_top_field, + "is_top_field"); +#if HALF_PIXEL_COMPENSATION || HALF_PIXEL_CHROMA + img->is_top_field = hd->is_top_field; +#endif + } +#endif + + + img->qp = hd->picture_qp; + + img->type = I_IMG; + +} + +/* + * Function:pb picture header + * Input: + * Output: + * Return: + * Attention: + */ + +void Get_PB_Picture_Header(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + union param_u *rpm_param = &avs2_dec->param; + + + /*u_v(32, "bbv delay");*/ + + hd->picture_coding_type = + get_param(rpm_param->p.picture_coding_type, + "picture_coding_type"); + + if (hd->background_picture_enable && + (hd->picture_coding_type == 1 || + hd->picture_coding_type == 3)) { + if (hd->picture_coding_type == 1) { + hd->background_pred_flag = + get_param( + rpm_param->p.background_pred_flag, + "background_pred_flag"); + } else { + hd->background_pred_flag = 0; + } + if (hd->background_pred_flag == 0) { + + hd->background_reference_enable = + get_param( + rpm_param-> + p.background_reference_enable, + "background_reference_enable"); + + } else { +#if RD170_FIX_BG + hd->background_reference_enable = 1; +#else + hd->background_reference_enable = 0; +#endif + } + + } else { + hd->background_pred_flag = 0; + hd->background_reference_enable = 0; + } + + + + if (hd->picture_coding_type == 1) { + img->type = + P_IMG; + } else if (hd->picture_coding_type == 3) { + img->type = + F_IMG; + } else { + img->type = + B_IMG; + } + + + if (hd->picture_coding_type == 1 && + hd->background_pred_flag) { + img->typeb = BP_IMG; + } else { + img->typeb = 0; + } + + + { + img->coding_order = + get_param( + rpm_param->p.coding_order, + "coding_order"); + + +#if M3480_TEMPORAL_SCALABLE + if (hd->temporal_id_exist_flag == 1) { + hd->cur_layer = + get_param(rpm_param->p.cur_layer, + "temporal_id"); + } +#endif + + if (hd->low_delay == 0) { + hd->displaydelay = + get_param(rpm_param->p.displaydelay, + "displaydelay"); + } + } + { + int32_t RPS_idx;/* = (img->coding_order-1) % gop_size;*/ + int32_t predict; + predict = + get_param(rpm_param->p.predict, + "use RPS in SPS"); + if (predict) { + RPS_idx = + get_param(rpm_param->p.RPS_idx, + "predict for RPS"); + hd->curr_RPS = hd->decod_RPS[RPS_idx]; + } /*else*/ + { + /*gop size16*/ + int32_t j; + hd->curr_RPS.referd_by_others = + get_param( + rpm_param->p.referd_by_others_cur, + "refered by others"); + hd->curr_RPS.num_of_ref = + get_param( + rpm_param->p.num_of_ref_cur, + "num of reference picture"); + for (j = 0; j < hd->curr_RPS.num_of_ref; j++) { + hd->curr_RPS.ref_pic[j] = + get_param( + rpm_param->p.ref_pic_cur[j], + "delta COI of ref pic"); + } + hd->curr_RPS.num_to_remove = + get_param( + rpm_param->p.num_to_remove_cur, + "num of removed picture"); +#ifdef SANITY_CHECK + if (hd->curr_RPS.num_to_remove > MAXREF) { + hd->curr_RPS.num_to_remove = MAXREF; + pr_info("Warning, %s: num_to_remove %d beyond range, force to MAXREF\n", + __func__, hd->curr_RPS.num_to_remove); + } +#endif + for (j = 0; + j < hd->curr_RPS.num_to_remove; j++) { + hd->curr_RPS.remove_pic[j] = + get_param( + rpm_param->p.remove_pic_cur[j], + "delta COI of removed pic"); + } + /*u_v(1, "marker bit");*/ + + } + } + /*xyji 12.23*/ + if (hd->low_delay) { + /*ue_v( + "bbv check times");*/ + } + + hd->progressive_frame = + get_param(rpm_param->p.progressive_frame, + "progressive_frame"); + + if (!hd->progressive_frame) { + img->picture_structure = + get_param(rpm_param->p.picture_structure, + "picture_structure"); + } else { + img->picture_structure = 1; + } + + hd->top_field_first = + get_param(rpm_param->p.top_field_first, + "top_field_first"); + hd->repeat_first_field = + get_param(rpm_param->p.repeat_first_field, + "repeat_first_field"); +#if INTERLACE_CODING + if (hd->is_field_sequence) { + hd->is_top_field = + get_param(rpm_param->p.is_top_field, + "is_top_field"); +#if HALF_PIXEL_COMPENSATION || HALF_PIXEL_CHROMA + img->is_top_field = hd->is_top_field; +#endif + /*u_v(1, "reserved bit for interlace coding");*/ + } +#endif + +#if Check_Bitstream + /*assert(hd->picture_qp>=0&&hd->picture_qp<=(63 + 8 * + (input->sample_bit_depth - 8)));*/ +#endif + + img->random_access_decodable_flag = + get_param(rpm_param->p.random_access_decodable_flag, + "random_access_decodable_flag"); + + img->qp = hd->picture_qp; +} + + + + +void calc_picture_distance(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + /* + union param_u *rpm_param = &avs2_dec->param; + + for POC mode 0: + uint32_t MaxPicDistanceLsb = (1 << 8); + */ + if (img->coding_order < img->PrevPicDistanceLsb) + + { + int32_t i, j; + + hc->total_frames++; + for (i = 0; i < avs2_dec->ref_maxbuffer; i++) { + if ( + avs2_dec->fref[i]->imgtr_fwRefDistance + >= 0) { + avs2_dec->fref[i]-> + imgtr_fwRefDistance -= 256; + avs2_dec->fref[i]-> + imgcoi_ref -= 256; + } +#if RD170_FIX_BG + for (j = 0; j < MAXREF; j++) { +#else + for (j = 0; j < 4; j++) { +#endif + avs2_dec->fref[i]->ref_poc[j] -= 256; + } + } + for (i = 0; i < avs2_dec->outprint.buffer_num; i++) { + avs2_dec->outprint.stdoutdata[i].framenum -= 256; + avs2_dec->outprint.stdoutdata[i].tr -= 256; + } + + hd->last_output -= 256; + hd->curr_IDRtr -= 256; + hd->curr_IDRcoi -= 256; + hd->next_IDRtr -= 256; + hd->next_IDRcoi -= 256; + } + if (hd->low_delay == 0) { + img->tr = img->coding_order + + hd->displaydelay - hd->picture_reorder_delay; + } else { + img->tr = + img->coding_order; + } + +#if REMOVE_UNUSED + img->pic_distance = img->tr; +#else + img->pic_distance = img->tr % 256; +#endif + hc->picture_distance = img->pic_distance; + +} + +int32_t avs2_init_global_buffers(struct avs2_decoder *avs2_dec) +{ + struct inp_par *input = &avs2_dec->input; + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + + int32_t refnum; + + int32_t memory_size = 0; + /* +int32_t img_height = (hd->vertical_size + img->auto_crop_bottom); + */ + img->buf_cycle = input->buf_cycle + 1; + + img->buf_cycle *= 2; + + hc->background_ref = hc->backgroundReferenceFrame; + + for (refnum = 0; refnum < REF_MAXBUFFER; refnum++) { + avs2_dec->fref[refnum] = &avs2_dec->frm_pool[refnum]; + + /*//avs2_dec->fref[i] memory allocation*/ + if (is_avs2_print_bufmgr_detail()) + pr_info("[t] avs2_dec->fref[%d]@0x%p\n", + refnum, avs2_dec->fref[refnum]); + avs2_dec->fref[refnum]->imgcoi_ref = -257; + avs2_dec->fref[refnum]->is_output = -1; + avs2_dec->fref[refnum]->refered_by_others = -1; + avs2_dec->fref[refnum]-> + imgtr_fwRefDistance = -256; + init_frame_t(avs2_dec->fref[refnum]); +#ifdef AML + avs2_dec->fref[refnum]->index = refnum; +#endif + } +#ifdef AML + avs2_dec->f_bg = NULL; + + avs2_dec->m_bg = &avs2_dec->frm_pool[REF_MAXBUFFER]; + /*///avs2_dec->fref[i] memory allocation*/ + if (is_avs2_print_bufmgr_detail()) + pr_info("[t] avs2_dec->m_bg@0x%p\n", + avs2_dec->m_bg); + avs2_dec->m_bg->imgcoi_ref = -257; + avs2_dec->m_bg->is_output = -1; + avs2_dec->m_bg->refered_by_others = -1; + avs2_dec->m_bg->imgtr_fwRefDistance = -256; + init_frame_t(avs2_dec->m_bg); + avs2_dec->m_bg->index = refnum; +#endif + +#if BCBR + /*init BCBR related*/ + img->iNumCUsInFrame = + ((img->width + MAX_CU_SIZE - 1) / MAX_CU_SIZE) + * ((img->height + MAX_CU_SIZE - 1) + / MAX_CU_SIZE); + /*img->BLCUidx = (int32_t*) calloc( + img->iNumCUsInFrame, sizeof(int32_t));*/ + /*memset( img->BLCUidx, 0, img->iNumCUsInFrame);*/ +#endif + return memory_size; +} + +#ifdef AML +static void free_unused_buffers(struct avs2_decoder *avs2_dec) +{ + struct inp_par *input = &avs2_dec->input; + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + + int32_t refnum; + + img->buf_cycle = input->buf_cycle + 1; + + img->buf_cycle *= 2; + + hc->background_ref = hc->backgroundReferenceFrame; + + for (refnum = 0; refnum < REF_MAXBUFFER; refnum++) { +#ifndef NO_DISPLAY + if (avs2_dec->fref[refnum]->vf_ref > 0 || + avs2_dec->fref[refnum]->to_prepare_disp) + continue; +#endif + if (is_avs2_print_bufmgr_detail()) + pr_info("%s[t] avs2_dec->fref[%d]@0x%p\n", + __func__, refnum, avs2_dec->fref[refnum]); + avs2_dec->fref[refnum]->imgcoi_ref = -257; + avs2_dec->fref[refnum]->is_output = -1; + avs2_dec->fref[refnum]->refered_by_others = -1; + avs2_dec->fref[refnum]-> + imgtr_fwRefDistance = -256; + memset(avs2_dec->fref[refnum]->ref_poc, 0, + sizeof(avs2_dec->fref[refnum]->ref_poc)); + } + avs2_dec->f_bg = NULL; + + if (is_avs2_print_bufmgr_detail()) + pr_info("%s[t] avs2_dec->m_bg@0x%p\n", + __func__, avs2_dec->m_bg); + avs2_dec->m_bg->imgcoi_ref = -257; + avs2_dec->m_bg->is_output = -1; + avs2_dec->m_bg->refered_by_others = -1; + avs2_dec->m_bg->imgtr_fwRefDistance = -256; + memset(avs2_dec->m_bg->ref_poc, 0, + sizeof(avs2_dec->m_bg->ref_poc)); + +#if BCBR + /*init BCBR related*/ + img->iNumCUsInFrame = + ((img->width + MAX_CU_SIZE - 1) / MAX_CU_SIZE) + * ((img->height + MAX_CU_SIZE - 1) + / MAX_CU_SIZE); + /*img->BLCUidx = (int32_t*) calloc( + img->iNumCUsInFrame, sizeof(int32_t));*/ + /*memset( img->BLCUidx, 0, img->iNumCUsInFrame);*/ +#endif +} +#endif + +void init_frame_t(struct avs2_frame_s *currfref) +{ + memset(currfref, 0, sizeof(struct avs2_frame_s)); + currfref->imgcoi_ref = -257; + currfref->is_output = -1; + currfref->refered_by_others = -1; + currfref->imgtr_fwRefDistance = -256; + memset(currfref->ref_poc, 0, sizeof(currfref->ref_poc)); +} + +void get_reference_list_info(struct avs2_decoder *avs2_dec, int8_t *str) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + + int8_t str_tmp[16]; + int32_t i; + /* int32_t poc = hc->f_rec->imgtr_fwRefDistance; + fred.chiu@mediatek.com*/ + + if (img->num_of_references > 0) { + strcpy(str, "["); + for (i = 0; i < img->num_of_references; i++) { +#if RD1510_FIX_BG + if (img->type == B_IMG) { + sprintf(str_tmp, "%4d ", + hc->f_rec-> + ref_poc[ + img->num_of_references - 1 - i]); + } else { + sprintf(str_tmp, "%4d ", + hc->f_rec->ref_poc[i]); + } +#else + sprintf(str_tmp, "%4d ", + avs2_dec->fref[i]->imgtr_fwRefDistance); +#endif + + str_tmp[5] = '\0'; + strcat(str, str_tmp); + } + strcat(str, "]"); + } else { + str[0] = '\0'; + } +} + +void prepare_RefInfo(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + + int32_t i, j; + int32_t ii; + struct avs2_frame_s *tmp_fref; + + /*update IDR frame*/ + if (img->tr > hd->next_IDRtr && hd->curr_IDRtr != hd->next_IDRtr) { + hd->curr_IDRtr = hd->next_IDRtr; + hd->curr_IDRcoi = hd->next_IDRcoi; + } + /* re-order the ref buffer according to RPS*/ + img->num_of_references = hd->curr_RPS.num_of_ref; + +#if 1 + /*rain*/ + if (is_avs2_print_bufmgr_detail()) { + pr_info("%s: coding_order is %d, curr_IDRcoi is %d\n", + __func__, img->coding_order, hd->curr_IDRcoi); + for (ii = 0; ii < MAXREF; ii++) { + pr_info("ref_pic(%d)=%d\n", + ii, hd->curr_RPS.ref_pic[ii]); + } + for (ii = 0; ii < avs2_dec->ref_maxbuffer; ii++) { + pr_info( + "fref[%d]: index %d imgcoi_ref %d imgtr_fwRefDistance %d\n", + ii, avs2_dec->fref[ii]->index, + avs2_dec->fref[ii]->imgcoi_ref, + avs2_dec->fref[ii]->imgtr_fwRefDistance); + } + } +#endif + + for (i = 0; i < hd->curr_RPS.num_of_ref; i++) { + /*int32_t accumulate = 0;*/ + /* copy tmp_fref from avs2_dec->fref[i] */ + tmp_fref = avs2_dec->fref[i]; + +#if REMOVE_UNUSED + for (j = i; j < avs2_dec->ref_maxbuffer; j++) { + /*/////////////to be modified IDR*/ + if (avs2_dec->fref[j]->imgcoi_ref == + img->coding_order - + hd->curr_RPS.ref_pic[i]) { + break; + } + } +#else + + for (j = i; j < avs2_dec->ref_maxbuffer; j++) { + /*/////////////to be modified IDR*/ + int32_t k , tmp_tr; + for (k = 0; k < avs2_dec->ref_maxbuffer; k++) { + if (((int32_t)img->coding_order - + (int32_t)hd->curr_RPS.ref_pic[i]) == + avs2_dec->fref[k]->imgcoi_ref && + avs2_dec->fref[k]->imgcoi_ref >= -256) { + break; + } + } + if (k == avs2_dec->ref_maxbuffer) { + tmp_tr = + -1-1; + } else { + tmp_tr = + avs2_dec->fref[k]->imgtr_fwRefDistance; + } + if (tmp_tr < hd->curr_IDRtr) { + hd->curr_RPS.ref_pic[i] = + img->coding_order - hd->curr_IDRcoi; + + for (k = 0; k < i; k++) { + if (hd->curr_RPS.ref_pic[k] == + hd->curr_RPS.ref_pic[i]) { + accumulate++; + break; + } + } + } + if (avs2_dec->fref[j]->imgcoi_ref == + img->coding_order - hd->curr_RPS.ref_pic[i]) { + break; + } + } + if (j == avs2_dec->ref_maxbuffer || accumulate) + img->num_of_references--; +#endif + if (j != avs2_dec->ref_maxbuffer) { + /* copy avs2_dec->fref[i] from avs2_dec->fref[j] */ + avs2_dec->fref[i] = avs2_dec->fref[j]; + /* copy avs2_dec->fref[j] from ferf[tmp] */ + avs2_dec->fref[j] = tmp_fref; + if (is_avs2_print_bufmgr_detail()) { + pr_info("%s, switch %d %d: ", __func__, i, j); + for (ii = 0; ii < hd->curr_RPS.num_of_ref + || ii <= j; ii++) + pr_info("%d ", + avs2_dec->fref[ii]->index); + pr_info("\n"); + } + } + } + if (img->type == B_IMG && + (avs2_dec->fref[0]->imgtr_fwRefDistance <= img->tr + || avs2_dec->fref[1]->imgtr_fwRefDistance >= img->tr)) { + + pr_info("wrong reference configuration for B frame\n"); + pr_info( + "fref0 imgtr_fwRefDistance %d, fref1 imgtr_fwRefDistance %d, img->tr %d\n", + avs2_dec->fref[0]->imgtr_fwRefDistance, + avs2_dec->fref[1]->imgtr_fwRefDistance, + img->tr); + hc->f_rec->error_mark = 1; + avs2_dec->bufmgr_error_flag = 1; + return; /* exit(-1);*/ + /*******************************************/ + } + +#if !FIX_PROFILE_LEVEL_DPB_RPS_1 + /* delete the frame that will never be used*/ + for (i = 0; i < hd->curr_RPS.num_to_remove; i++) { + for (j = 0; j < avs2_dec->ref_maxbuffer; j++) { + if (avs2_dec->fref[j]->imgcoi_ref >= -256 + && avs2_dec->fref[j]->imgcoi_ref + == img->coding_order - + hd->curr_RPS.remove_pic[i]) { + break; + } + } + if (j < avs2_dec->ref_maxbuffer && + j >= img->num_of_references) { + avs2_dec->fref[j]->imgcoi_ref = -257; +#if M3480_TEMPORAL_SCALABLE + avs2_dec->fref[j]->temporal_id = -1; +#endif + if (avs2_dec->fref[j]->is_output == -1) { + avs2_dec->fref[j]-> + imgtr_fwRefDistance = -256; + } + } + } +#endif + + /* add inter-view reference picture*/ + + /* add current frame to ref buffer*/ + for (i = 0; i < avs2_dec->ref_maxbuffer; i++) { + if ((avs2_dec->fref[i]->imgcoi_ref < -256 + || abs(avs2_dec->fref[i]-> + imgtr_fwRefDistance - img->tr) >= 128) + && avs2_dec->fref[i]->is_output == -1 + && avs2_dec->fref[i]->bg_flag == 0 +#ifndef NO_DISPLAY + && avs2_dec->fref[i]->vf_ref == 0 + && avs2_dec->fref[i]->to_prepare_disp == 0 +#endif + ) { + break; + } + } + if (i == avs2_dec->ref_maxbuffer) { + pr_info( + "%s, warning, no enough buf\n", + __func__); + i--; + } + + hc->f_rec = avs2_dec->fref[i]; + hc->currentFrame = hc->f_rec->ref; + hc->f_rec->imgtr_fwRefDistance = img->tr; + hc->f_rec->imgcoi_ref = img->coding_order; +#if M3480_TEMPORAL_SCALABLE + hc->f_rec->temporal_id = hd->cur_layer; +#endif + hc->f_rec->is_output = 1; +#ifdef AML + hc->f_rec->error_mark = 0; + hc->f_rec->decoded_lcu = 0; + hc->f_rec->slice_type = img->type; +#endif + hc->f_rec->refered_by_others = hd->curr_RPS.referd_by_others; + if (is_avs2_print_bufmgr_detail()) + pr_info( + "%s, set f_rec (cur_pic) <= fref[%d] img->tr %d coding_order %d img_type %d\n", + __func__, i, img->tr, img->coding_order, + img->type); + + if (img->type != B_IMG) { + for (j = 0; + j < img->num_of_references; j++) { + hc->f_rec->ref_poc[j] = + avs2_dec->fref[j]->imgtr_fwRefDistance; + } + } else { + hc->f_rec->ref_poc[0] = + avs2_dec->fref[1]->imgtr_fwRefDistance; + hc->f_rec->ref_poc[1] = + avs2_dec->fref[0]->imgtr_fwRefDistance; + } + +#if M3480_TEMPORAL_SCALABLE + + for (j = img->num_of_references; + j < 4; j++) { + /**/ + hc->f_rec->ref_poc[j] = 0; + } + + if (img->type == INTRA_IMG) { + int32_t l; + for (l = 0; l < 4; l++) { + hc->f_rec->ref_poc[l] + = img->tr; + } + } + +#endif + +/*////////////////////////////////////////////////////////////////////////*/ + /* updata ref pointer*/ + + if (img->type != I_IMG) { + + img->imgtr_next_P = img->type == B_IMG ? + avs2_dec->fref[0]->imgtr_fwRefDistance : img->tr; + if (img->type == B_IMG) { + hd->trtmp = avs2_dec->fref[0]->imgtr_fwRefDistance; + avs2_dec->fref[0]->imgtr_fwRefDistance = + avs2_dec->fref[1]->imgtr_fwRefDistance; + } + } +#if 1 + /*rain*/ + if (is_avs2_print_bufmgr_detail()) { + for (ii = 0; ii < avs2_dec->ref_maxbuffer; ii++) { + pr_info( + "fref[%d]: index %d imgcoi_ref %d imgtr_fwRefDistance %d refered %d, is_out %d, bg %d, vf_ref %d ref_pos(%d,%d,%d,%d,%d,%d,%d)\n", + ii, avs2_dec->fref[ii]->index, + avs2_dec->fref[ii]->imgcoi_ref, + avs2_dec->fref[ii]->imgtr_fwRefDistance, + avs2_dec->fref[ii]->refered_by_others, + avs2_dec->fref[ii]->is_output, + avs2_dec->fref[ii]->bg_flag, + avs2_dec->fref[ii]->vf_ref, + avs2_dec->fref[ii]->ref_poc[0], + avs2_dec->fref[ii]->ref_poc[1], + avs2_dec->fref[ii]->ref_poc[2], + avs2_dec->fref[ii]->ref_poc[3], + avs2_dec->fref[ii]->ref_poc[4], + avs2_dec->fref[ii]->ref_poc[5], + avs2_dec->fref[ii]->ref_poc[6] + ); + } + } +#endif +} + +int32_t init_frame(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + + +#if RD1510_FIX_BG + if (img->type == I_IMG && + img->typeb == BACKGROUND_IMG) { /*G/GB frame*/ + img->num_of_references = 0; + } else if (img->type == P_IMG && img->typeb == BP_IMG) { + /* only one reference frame(G\GB) for S frame*/ + img->num_of_references = 1; + } +#endif + + if (img->typeb == BACKGROUND_IMG && + hd->background_picture_output_flag == 0) { + hc->currentFrame = hc->background_ref; +#ifdef AML + hc->cur_pic = avs2_dec->m_bg; +#endif + } else { + prepare_RefInfo(avs2_dec); +#ifdef AML + hc->cur_pic = hc->f_rec; +#endif + } + + +#ifdef FIX_CHROMA_FIELD_MV_BK_DIST + if (img->typeb == BACKGROUND_IMG + && img->is_field_sequence) { + avs2_dec->bk_img_is_top_field + = img->is_top_field; + } +#endif + return 0; +} + +void delete_trbuffer(struct outdata_s *data, int32_t pos) +{ + int32_t i; + for (i = pos; + i < data->buffer_num - 1; i++) { + data->stdoutdata[i] = + data->stdoutdata[i + 1]; + } + data->buffer_num--; +} + +#if RD170_FIX_BG +void flushDPB(struct avs2_decoder *avs2_dec) +{ + struct Video_Dec_data_s *hd = &avs2_dec->hd; + int j, tmp_min, i, pos = -1; + int search_times = avs2_dec->outprint.buffer_num; + + tmp_min = 1 << 20; + i = 0, j = 0; + pos = -1; + + for (j = 0; j < search_times; j++) { + pos = -1; + tmp_min = (1 << 20); + /*search for min poi picture to display*/ + for (i = 0; i < avs2_dec->outprint.buffer_num; i++) { + if (avs2_dec->outprint.stdoutdata[i].tr < tmp_min) { + pos = i; + tmp_min = avs2_dec->outprint.stdoutdata[i].tr; + } + } + + if (pos != -1) { + hd->last_output = avs2_dec->outprint.stdoutdata[pos].tr; + report_frame(avs2_dec, &avs2_dec->outprint, pos); + if (avs2_dec->outprint.stdoutdata[pos].typeb + == BACKGROUND_IMG && + avs2_dec->outprint.stdoutdata[pos]. + background_picture_output_flag + == 0) { + /*write_GB_frame(hd->p_out_background);*/ + } else { + write_frame(avs2_dec, + avs2_dec->outprint.stdoutdata[pos].tr); + } + + delete_trbuffer(&avs2_dec->outprint, pos); + } + } + + /*clear dpb info*/ + for (j = 0; j < REF_MAXBUFFER; j++) { + avs2_dec->fref[j]->imgtr_fwRefDistance = -256; + avs2_dec->fref[j]->imgcoi_ref = -257; + avs2_dec->fref[j]->temporal_id = -1; + avs2_dec->fref[j]->refered_by_others = 0; + } +} +#endif + + + +#if M3480_TEMPORAL_SCALABLE +void cleanRefMVBufRef(int pos) +{ +#if 0 + int k, x, y; + /*re-init mvbuf*/ + for (k = 0; k < 2; k++) { + for (y = 0; y < img->height / MIN_BLOCK_SIZE; y++) { + for (x = 0; x < img->width / MIN_BLOCK_SIZE; x++) + fref[pos]->mvbuf[y][x][k] = 0; + + } + } + /*re-init refbuf*/ + for (y = 0; y < img->height / MIN_BLOCK_SIZE; y++) { + for (x = 0; x < img->width / MIN_BLOCK_SIZE ; x++) + fref[pos]->refbuf[y][x] = -1; + + } +#endif +} +#endif + +static int frame_postprocessing(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + + int32_t pointer_tmp = avs2_dec->outprint.buffer_num; + int32_t i; + struct STDOUT_DATA_s *p_outdata; +#if RD160_FIX_BG + int32_t j, tmp_min, output_cur_dec_pic, pos = -1; + int32_t search_times = avs2_dec->outprint.buffer_num; +#endif + /*pic dist by Grandview Semi. @ [06-07-20 15:25]*/ + img->PrevPicDistanceLsb = (img->coding_order % 256); + + pointer_tmp = avs2_dec->outprint.buffer_num; + p_outdata = &avs2_dec->outprint.stdoutdata[pointer_tmp]; + + p_outdata->type = img->type; + p_outdata->typeb = img->typeb; + p_outdata->framenum = img->tr; + p_outdata->tr = img->tr; +#if 0 /*def ORI*/ + p_outdata->qp = img->qp; +#else + p_outdata->qp = 0; +#endif + /*p_outdata->snr_y = snr->snr_y;*/ + /*p_outdata->snr_u = snr->snr_u;*/ + /*p_outdata->snr_v = snr->snr_v;*/ + p_outdata->tmp_time = hd->tmp_time; + p_outdata->picture_structure = img->picture_structure; + /*p_outdata->curr_frame_bits = + StatBitsPtr->curr_frame_bits;*/ + /*p_outdata->emulate_bits = StatBitsPtr->emulate_bits;*/ +#if RD1501_FIX_BG + p_outdata->background_picture_output_flag + = hd->background_picture_output_flag; + /*Longfei.Wang@mediatek.com*/ +#endif + +#if RD160_FIX_BG + p_outdata->picture_reorder_delay = hd->picture_reorder_delay; +#endif + avs2_dec->outprint.buffer_num++; + +#if RD170_FIX_BG + search_times = avs2_dec->outprint.buffer_num; +#endif + /* record the reference list*/ + strcpy(p_outdata->str_reference_list, hc->str_list_reference); + +#if !REF_OUTPUT + #error "!!!REF_OUTPUT should be 1" + for (i = 0; i < avs2_dec->outprint.buffer_num; i++) { + min_tr(avs2_dec->outprint, &pos); + if (avs2_dec->outprint.stdoutdata[pos].tr < img->tr + || avs2_dec->outprint.stdoutdata[pos].tr + == (hd->last_output + 1)) { + hd->last_output = avs2_dec->outprint.stdoutdata[pos].tr; + report_frame(avs2_dec, &avs2_dec->outprint, pos); +#if 0 /*def ORI*/ + write_frame(hd->p_out, + avs2_dec->outprint.stdoutdata[pos].tr); +#endif + delete_trbuffer(&avs2_dec->outprint, pos); + i--; + } else { + break; + } + } +#else +#if RD160_FIX_BG /*Longfei.Wang@mediatek.com*/ + tmp_min = 1 << 20; + i = 0, j = 0; + output_cur_dec_pic = 0; + pos = -1; + for (j = 0; j < search_times; j++) { + pos = -1; + tmp_min = (1 << 20); + /*search for min poi picture to display*/ + for (i = 0; i < avs2_dec->outprint.buffer_num; i++) { + if ((avs2_dec->outprint.stdoutdata[i].tr < tmp_min) && + ((avs2_dec->outprint.stdoutdata[i].tr + + avs2_dec->outprint.stdoutdata[i]. + picture_reorder_delay) + <= (int32_t)img->coding_order)) { + pos = i; + tmp_min = avs2_dec->outprint.stdoutdata[i].tr; + } + } + + if ((0 == hd->displaydelay) && (0 == output_cur_dec_pic)) { + if (img->tr <= tmp_min) {/*fred.chiu@mediatek.com*/ + /*output current decode picture + right now*/ + pos = avs2_dec->outprint.buffer_num - 1; + output_cur_dec_pic = 1; + } + } + if (pos != -1) { + hd->last_output = avs2_dec->outprint.stdoutdata[pos].tr; + report_frame(avs2_dec, &avs2_dec->outprint, pos); +#if 1 /*def ORI*/ + if (avs2_dec->outprint.stdoutdata[pos].typeb + == BACKGROUND_IMG && + avs2_dec->outprint.stdoutdata[pos]. + background_picture_output_flag == 0) { + /**/ + /**/ + } else { + write_frame(avs2_dec, + avs2_dec->outprint.stdoutdata[pos].tr); + } +#endif + delete_trbuffer(&avs2_dec->outprint, pos); + } + + } + +#else + #error "!!!RD160_FIX_BG should be defined" + if (img->coding_order + + (uint32_t)hc->total_frames * 256 >= + (uint32_t)hd->picture_reorder_delay) { + int32_t tmp_min, pos = -1; + tmp_min = 1 << 20; + + for (i = 0; i < + avs2_dec->outprint.buffer_num; i++) { + if (avs2_dec->outprint.stdoutdata[i].tr + < tmp_min && + avs2_dec->outprint.stdoutdata[i].tr + >= hd->last_output) { + /*GB has the same "tr" with "last_output"*/ + pos = i; + tmp_min = + avs2_dec->outprint.stdoutdata[i].tr; + } + } + + if (pos != -1) { + hd->last_output = avs2_dec->outprint.stdoutdata[pos].tr; + report_frame(avs2_dec, &avs2_dec->outprint, pos); +#if RD1501_FIX_BG + if (avs2_dec->outprint.stdoutdata[pos].typeb + == BACKGROUND_IMG && avs2_dec-> + outprint.stdoutdata[pos]. + background_picture_output_flag == 0) { +#else + if (avs2_dec->outprint.stdoutdata[pos].typeb + == BACKGROUND_IMG && + hd->background_picture_output_flag + == 0) { +#endif + write_GB_frame( + hd->p_out_background); + } else { + write_frame(avs2_dec, + avs2_dec->outprint.stdoutdata[pos].tr); + } + delete_trbuffer(&avs2_dec->outprint, pos); + + } + + } +#endif +#endif + return pos; + + } + +void write_frame(struct avs2_decoder *avs2_dec, int32_t pos) +{ + int32_t j; + + if (is_avs2_print_bufmgr_detail()) + pr_info("%s(pos = %d)\n", __func__, pos); + + for (j = 0; j < avs2_dec->ref_maxbuffer; j++) { + if (avs2_dec->fref[j]->imgtr_fwRefDistance == pos) { + avs2_dec->fref[j]->imgtr_fwRefDistance_bak = pos; + avs2_dec->fref[j]->is_output = -1; + avs2_dec->fref[j]->to_prepare_disp = + avs2_dec->to_prepare_disp_count++; + if (avs2_dec->fref[j]->refered_by_others == 0 + || avs2_dec->fref[j]->imgcoi_ref + == -257) { + avs2_dec->fref[j]->imgtr_fwRefDistance + = -256; + avs2_dec->fref[j]->imgcoi_ref = -257; +#if M3480_TEMPORAL_SCALABLE + avs2_dec->fref[j]->temporal_id = -1; +#endif + if (is_avs2_print_bufmgr_detail()) + pr_info("%s, fref index %d\n", + __func__, j); + } + break; + } + } +} + +/*rain???, outdata *data*/ +void report_frame(struct avs2_decoder *avs2_dec, + struct outdata_s *data, int32_t pos) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + + int8_t *Frmfld; + int8_t Frm[] = "FRM"; + int8_t Fld[] = "FLD"; + struct STDOUT_DATA_s *p_stdoutdata + = &data->stdoutdata[pos]; + const int8_t *typ; + +#if 0 + if (input->MD5Enable & 0x02) { + sprintf(MD5str, "%08X%08X%08X%08X\0", + p_stdoutdata->DecMD5Value[0], + p_stdoutdata->DecMD5Value[1], + p_stdoutdata->DecMD5Value[2], + p_stdoutdata->DecMD5Value[3]); + } else { + memset(MD5val, 0, 16); + memset(MD5str, 0, 33); + } +#endif + + if (p_stdoutdata-> + picture_structure) { + Frmfld = Frm; + } else { + Frmfld = Fld; + } +#if INTERLACE_CODING + if (img->is_field_sequence) { /*rcs??*/ + Frmfld = Fld; + } +#endif + if ((p_stdoutdata->tr + hc->total_frames * 256) + == hd->end_SeqTr) { /* I picture*/ + /*if ( img->new_sequence_flag == 1 )*/ + { + img->sequence_end_flag = 0; + /*fprintf(stdout, "Sequence + End\n\n");*/ + } + } + if ((p_stdoutdata->tr + hc->total_frames * 256) + == hd->next_IDRtr) { +#if !RD170_FIX_BG + if (hd->vec_flag) /**/ +#endif + { + hd->vec_flag = 0; + /*fprintf(stdout, "Video Edit + Code\n");*/ + } + } + + if (p_stdoutdata->typeb == BACKGROUND_IMG) { + typ = (hd->background_picture_output_flag != 0) ? "G" : "GB"; + } else { +#if REMOVE_UNUSED + typ = (p_stdoutdata->type == INTRA_IMG) + ? "I" : (p_stdoutdata->type == INTER_IMG) ? + ((p_stdoutdata->typeb == BP_IMG) ? "S" : "P") + : (p_stdoutdata->type == F_IMG ? "F" : "B"); +#else + typ = (p_stdoutdata->type == INTRA_IMG) ? "I" : + (p_stdoutdata->type == INTER_IMG) ? + ((p_stdoutdata->type == BP_IMG) ? "S" : "P") + : (p_stdoutdata->type == F_IMG ? "F" : "B"); +#endif + } + +#if 0 + /*rain???*/ + pr_info("%3d(%s) %3d %5d %7.4f %7.4f %7.4f %5d\t\t%s %8d %6d\t%s", + p_stdoutdata->framenum + hc->total_frames * 256, + typ, p_stdoutdata->tr + hc->total_frames * 256, + p_stdoutdata->qp, p_stdoutdata->snr_y, + p_stdoutdata->snr_u, p_stdoutdata->snr_v, + p_stdoutdata->tmp_time, Frmfld, + p_stdoutdata->curr_frame_bits, + p_stdoutdata->emulate_bits, + ""); +#endif + if (is_avs2_print_bufmgr_detail()) + pr_info(" %s\n", p_stdoutdata->str_reference_list); + + /*fflush(stdout);*/ + hd->FrameNum++; +} + +void avs2_prepare_header(struct avs2_decoder *avs2_dec, int32_t start_code) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + + switch (start_code) { + case SEQUENCE_HEADER_CODE: + img->new_sequence_flag = 1; + if (is_avs2_print_bufmgr_detail()) + pr_info("SEQUENCE\n"); +#ifdef TO_CHECK +#if SEQ_CHANGE_CHECKER + if (seq_checker_buf == NULL) { + seq_checker_buf = malloc(length); + seq_checker_length = length; + memcpy(seq_checker_buf, Buf, length); + } else { + if ((seq_checker_length != length) || + (memcmp(seq_checker_buf, Buf, length) != 0)) { + free(seq_checker_buf); + /*fprintf(stdout, + "Non-conformance + stream: sequence + header cannot change + !!\n");*/ +#if RD170_FIX_BG + seq_checker_buf = NULL; + seq_checker_length = 0; + seq_checker_buf = malloc(length); + seq_checker_length = length; + memcpy(seq_checker_buf, Buf, length); +#endif + } + + + } +#endif +#if RD170_FIX_BG + if (input->alf_enable + && alfParAllcoated == 1) { + ReleaseAlfGlobalBuffer(); + alfParAllcoated = 0; + } +#endif +/*TO_CHECK*/ +#endif +#if FIX_FLUSH_DPB_BY_LF + if (hd->vec_flag) { + int32_t k; + if (is_avs2_print_bufmgr_detail()) + pr_info("vec_flag is 1, flushDPB and reinit bugmgr\n"); + + flushDPB(avs2_dec); + for (k = 0; k < avs2_dec->ref_maxbuffer; k++) + cleanRefMVBufRef(k); + + hd->vec_flag = 0; +#ifdef AML + free_unused_buffers(avs2_dec); +#else + free_global_buffers(avs2_dec); +#endif + img->number = 0; + img->PrevPicDistanceLsb = 0; + avs2_dec->init_hw_flag = 0; + } +#endif + +#if FIX_SEQ_END_FLUSH_DPB_BY_LF + if (img->new_sequence_flag + && img->sequence_end_flag) { + int32_t k; + if (is_avs2_print_bufmgr_detail()) + pr_info( + "new_sequence_flag after sequence_end_flag, flushDPB and reinit bugmgr\n"); + flushDPB(avs2_dec); + for (k = 0; k < avs2_dec->ref_maxbuffer; k++) + cleanRefMVBufRef(k); + +#ifdef AML + free_unused_buffers(avs2_dec); +#else + free_global_buffers(avs2_dec); +#endif + img->number = 0; + img->PrevPicDistanceLsb = 0; + avs2_dec->init_hw_flag = 0; + } +#endif + img->seq_header_indicate = 1; + break; + case I_PICTURE_START_CODE: + if (is_avs2_print_bufmgr_detail()) + pr_info("PIC-I\n"); + Get_SequenceHeader(avs2_dec); + Get_I_Picture_Header(avs2_dec); + calc_picture_distance(avs2_dec); + Read_ALF_param(avs2_dec); + if (!img->seq_header_indicate) { + img->B_discard_flag = 1; + /*fprintf(stdout, " I + %3d\t\tDIDSCARD!!\n", + img->tr);*/ + break; + } + break; + case PB_PICTURE_START_CODE: + if (is_avs2_print_bufmgr_detail()) + pr_info("PIC-PB\n"); + Get_SequenceHeader(avs2_dec); + Get_PB_Picture_Header(avs2_dec); + calc_picture_distance(avs2_dec); + Read_ALF_param(avs2_dec); + /* xiaozhen zheng, 20071009*/ + if (!img->seq_header_indicate) { + img->B_discard_flag = 1; + + if (img->type == P_IMG) { + /*fprintf(stdout, " P + %3d\t\tDIDSCARD!!\n", + img->tr);*/ + } + if (img->type == F_IMG) { + /*fprintf(stdout, " F + %3d\t\tDIDSCARD!!\n", + img->tr);*/ + } else { + /*fprintf(stdout, " B + %3d\t\tDIDSCARD!!\n", + img->tr);*/ + } + + break; + } + + if (img->seq_header_indicate == 1 + && img->type != B_IMG) { + img->B_discard_flag = 0; + } + if (img->type == B_IMG && img->B_discard_flag == 1 + && !img->random_access_decodable_flag) { + /*fprintf(stdout, " B + %3d\t\tDIDSCARD!!\n", + img->tr);*/ + break; + } + + break; + case SEQUENCE_END_CODE: + if (is_avs2_print_bufmgr_detail()) + pr_info("SEQUENCE_END_CODE\n"); +#ifdef TO_CHECK +#if SEQ_CHANGE_CHECKER + if (seq_checker_buf != NULL) { + free(seq_checker_buf); + seq_checker_buf = NULL; + seq_checker_length = 0; + } +#endif +#endif +img->new_sequence_flag = 1; +img->sequence_end_flag = 1; +break; + case VIDEO_EDIT_CODE: + if (is_avs2_print_bufmgr_detail()) + pr_info("VIDEO_EDIT_CODE\n"); + /*video_edit_code_data(Buf, startcodepos, length);*/ + hd->vec_flag = 1; +#ifdef TO_CHECK +#if SEQ_CHANGE_CHECKER + if (seq_checker_buf != NULL) { + free(seq_checker_buf); + seq_checker_buf = NULL; + seq_checker_length = 0; + } +#endif +#endif + +break; + } +} + +#ifdef AML +static uint32_t log2i(uint32_t val) +{ + uint32_t ret = -1; + while (val != 0) { + val >>= 1; + ret++; + } + return ret; +} +#endif + +int32_t avs2_process_header(struct avs2_decoder *avs2_dec) +{ + struct inp_par *input = &avs2_dec->input; + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + int32_t lcu_x_num_div; + int32_t lcu_y_num_div; + + int32_t N8_SizeScale; + /*pr_info("%s\n", __func__);*/ + { + N8_SizeScale = 1; + + if (hd->horizontal_size % + (MIN_CU_SIZE * N8_SizeScale) != 0) { + img->auto_crop_right = + (MIN_CU_SIZE * N8_SizeScale) - + (hd->horizontal_size % + (MIN_CU_SIZE * N8_SizeScale)); + } else + img->auto_crop_right = 0; + +#if !INTERLACE_CODING + if (hd->progressive_sequence) /**/ +#endif + { + if (hd->vertical_size % + (MIN_CU_SIZE * N8_SizeScale) != 0) { + img->auto_crop_bottom = + (MIN_CU_SIZE * N8_SizeScale) - + (hd->vertical_size % + (MIN_CU_SIZE * N8_SizeScale)); + } else + img->auto_crop_bottom = 0; + } + + /* Reinit parameters (NOTE: need to do + before init_frame //*/ + img->width = + (hd->horizontal_size + img->auto_crop_right); + img->height = + (hd->vertical_size + img->auto_crop_bottom); + img->width_cr = (img->width >> 1); + + if (input->chroma_format == 1) + img->height_cr = (img->height >> 1); + + img->PicWidthInMbs = img->width / MIN_CU_SIZE; + img->PicHeightInMbs = img->height / MIN_CU_SIZE; + img->PicSizeInMbs = img->PicWidthInMbs * img->PicHeightInMbs; + img->max_mb_nr = (img->width * img->height) / + (MIN_CU_SIZE * MIN_CU_SIZE); + } + + if (img->new_sequence_flag && img->sequence_end_flag) { +#if 0/*RD170_FIX_BG //*/ + int32_t k; + flushDPB(); + for (k = 0; k < avs2_dec->ref_maxbuffer; k++) + cleanRefMVBufRef(k); + + free_global_buffers(); + img->number = 0; +#endif + hd->end_SeqTr = img->tr; + img->sequence_end_flag = 0; + } + if (img->new_sequence_flag) { + hd->next_IDRtr = img->tr; + hd->next_IDRcoi = img->coding_order; + img->new_sequence_flag = 0; + } +#if 0/*RD170_FIX_BG*/ + if (hd->vec_flag) { + int32_t k; + flushDPB(); + for (k = 0; k < avs2_dec->ref_maxbuffer; k++) + cleanRefMVBufRef(k); + + hd->vec_flag = 0; + free_global_buffers(); + img->number = 0; + } +#endif +/* allocate memory for frame buffers*/ +#if 0 +/* called in vavs2.c*/ + if (img->number == 0) + avs2_init_global_buffers(avs2_dec); +#endif + img->current_mb_nr = 0; + + init_frame(avs2_dec); + + img->types = img->type; /* jlzheng 7.15*/ + + if (img->type != B_IMG) { + hd->pre_img_type = img->type; + hd->pre_img_types = img->types; + } + +#ifdef AML + avs2_dec->lcu_size_log2 = log2i(avs2_dec->lcu_size); + lcu_x_num_div = (img->width/avs2_dec->lcu_size); + lcu_y_num_div = (img->height/avs2_dec->lcu_size); + avs2_dec->lcu_x_num = ((img->width % avs2_dec->lcu_size) == 0) ? + lcu_x_num_div : lcu_x_num_div+1; + avs2_dec->lcu_y_num = ((img->height % avs2_dec->lcu_size) == 0) ? + lcu_y_num_div : lcu_y_num_div+1; + avs2_dec->lcu_total = avs2_dec->lcu_x_num*avs2_dec->lcu_y_num; +#endif + return SOP; +} + +int avs2_post_process(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + int32_t i; + int ret; + if (img->typeb == BACKGROUND_IMG && hd->background_picture_enable) { +#ifdef AML + for (i = 0; i < avs2_dec->ref_maxbuffer; i++) { + if (avs2_dec->fref[i]->bg_flag != 0) { + avs2_dec->fref[i]->bg_flag = 0; + if (is_avs2_print_bufmgr_detail()) + pr_info( + "clear old BACKGROUND_IMG for index %d\r\n", + avs2_dec->fref[i]->index); + } + } + if (is_avs2_print_bufmgr_detail()) + pr_info( + "post_process: set BACKGROUND_IMG flag for %d\r\n", + hc->cur_pic->index); + avs2_dec->f_bg = hc->cur_pic; + hc->cur_pic->bg_flag = 1; +#endif + } + +#if BCBR + if (hd->background_picture_enable + && hd->bcbr_enable && img->number > 0) + updateBgReference(); +#endif + + if (img->typeb == BACKGROUND_IMG && + hd->background_picture_output_flag == 0) + hd->background_number++; + + if (img->type == B_IMG) { + avs2_dec->fref[0]->imgtr_fwRefDistance + = hd->trtmp; + } + + /* record the reference list information*/ + get_reference_list_info(avs2_dec, avs2_dec->hc.str_list_reference); + + /*pr_info("%s\n", __func__);*/ + ret = frame_postprocessing(avs2_dec); + +#if FIX_PROFILE_LEVEL_DPB_RPS_1 + /* delete the frame that will never be used*/ + { + int32_t i, j; + if (is_avs2_print_bufmgr_detail()) { + pr_info( + "%s, coding_order %d to remove %d buf: ", + __func__, + img->coding_order, + hd->curr_RPS.num_to_remove); + for (i = 0; i < hd->curr_RPS.num_to_remove; i++) + pr_info("%d ", hd->curr_RPS.remove_pic[i]); + pr_info("\n"); + } + for (i = 0; i < hd->curr_RPS.num_to_remove; i++) { + for (j = 0; j < avs2_dec->ref_maxbuffer; j++) { + + if (avs2_dec->fref[j]->imgcoi_ref >= -256 + && avs2_dec->fref[j]->imgcoi_ref == + img->coding_order - + hd->curr_RPS.remove_pic[i]) + break; + } + if (j < avs2_dec->ref_maxbuffer) { /**/ +#if FIX_RPS_PICTURE_REMOVE +/* Label new frames as "un-referenced" */ + avs2_dec->fref[j]->refered_by_others = 0; + + /* remove frames which have been outputted */ + if (avs2_dec->fref[j]->is_output == -1) { + avs2_dec->fref[j]-> + imgtr_fwRefDistance = -256; + avs2_dec->fref[j]->imgcoi_ref = -257; + avs2_dec->fref[j]->temporal_id = -1; + + } +#else + avs2_dec->fref[j]->imgcoi_ref = -257; +#if M3480_TEMPORAL_SCALABLE + avs2_dec->fref[j]->temporal_id = -1; +#endif + if (avs2_dec->fref[j]->is_output == -1) { + avs2_dec->fref[j]->imgtr_fwRefDistance + = -256; + } +#endif + } + } + } +#endif + + + /*! TO 19.11.2001 Known Problem: for init_frame + * we have to know the picture type of the + * actual frame*/ + /*! in case the first slice of the P-Frame + * following the I-Frame was lost we decode this + * P-Frame but! do not write it because it + * was + * assumed to be an I-Frame in init_frame.So we + * force the decoder to*/ + /*! guess the right picture type. This is a hack + * a should be removed by the time there is a + * clean*/ + /*! solution where we do not have to know the + * picture type for the function init_frame.*/ + /*! End TO 19.11.2001//Lou*/ + + { + if (img->type == I_IMG || + img->type == P_IMG || + img->type == F_IMG) + img->number++; + else { + hc->Bframe_ctr++; /* B + pictures*/ + } + } + return ret; +} + +void init_avs2_decoder(struct avs2_decoder *avs2_dec) +{ + int32_t i, j, k; + + struct inp_par *input = &avs2_dec->input; + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + if (is_avs2_print_bufmgr_detail()) + pr_info("[t] struct avs2_dec @0x%p\n", avs2_dec); + memset(avs2_dec, 0, sizeof(struct avs2_decoder)); +#ifdef AML + avs2_dec->to_prepare_disp_count = 1; +#endif + /* + * ALFParam init + */ + for (i = 0; i < 3; i++) { + avs2_dec->m_alfPictureParam[i].alf_flag = 0; /*1*/ + avs2_dec->m_alfPictureParam[i].num_coeff = 9; /*1*/ + avs2_dec->m_alfPictureParam[i].filters_per_group = 3; /*1*/ + avs2_dec->m_alfPictureParam[i].componentID = i; /*1*/ + for (j = 0; j < 16; j++) { + avs2_dec->m_alfPictureParam[i].filterPattern[j] = 0; + /*16*/ + } + for (j = 0; j < 16; j++) { + for (k = 0; k < 9; k++) { + avs2_dec-> + m_alfPictureParam[i].coeffmulti[j][k] = 0; + /*16*9*/ + } + } + } + + img->seq_header_indicate = 0; + img->B_discard_flag = 0; + + hd->eos = 0; + + if (input->ref_pic_order) { /*ref order*/ + hd->dec_ref_num = 0; + } + + /* + memset(g_log2size, -1, MAX_CU_SIZE + 1); + c = 2; + for (k = 4; k <= MAX_CU_SIZE; k *= 2) { + g_log2size[k] = c; + c++; + } + */ + + avs2_dec->outprint.buffer_num = 0; + + hd->last_output = -1; + hd->end_SeqTr = -1; + hd->curr_IDRtr = 0; + hd->curr_IDRcoi = 0; + hd->next_IDRtr = 0; + hd->next_IDRcoi = 0; + /* Allocate Slice data struct*/ + img->number = 0; + img->type = I_IMG; + + img->imgtr_next_P = 0; + + img->imgcoi_next_ref = 0; + + + img->num_of_references = 0; + hc->seq_header = 0; + + img->new_sequence_flag = 1; + + hd->vec_flag = 0; + + hd->FrameNum = 0; + + /* B pictures*/ + hc->Bframe_ctr = 0; + hc->total_frames = 0; + + /* time for total decoding session*/ + hc->tot_time = 0; + +} +
diff --git a/drivers/frame_provider/decoder/avs2/avs2_global.h b/drivers/frame_provider/decoder/avs2/avs2_global.h new file mode 100644 index 0000000..be35a5e --- /dev/null +++ b/drivers/frame_provider/decoder/avs2/avs2_global.h
@@ -0,0 +1,1690 @@ +/* The copyright in this software is being made available under the BSD + * License, included below. This software may be subject to other third party + * and contributor rights, including patent rights, and no such rights are + * granted under this license. + * + * Copyright (c) 2002-2016, Audio Video coding Standard Workgroup of China + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Audio Video coding Standard Workgroup of China + * nor the names of its contributors maybe + * used to endorse or promote products + * derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + + + + +/* + * File name: global.h + * Function: global definitions for for AVS decoder. + * + */ + +#ifndef _GLOBAL_H_ +#define _GLOBAL_H_ + +/* #include <stdio.h> //!< for FILE */ +/* #include <stdlib.h> */ + +#define AML +#define SANITY_CHECK +#undef NO_DISPLAY + +/* #include "define.h" */ +#define RD "19.2" +#define VERSION "19.2" + +#define RESERVED_PROFILE_ID 0x24 +#define BASELINE_PICTURE_PROFILE 18 +#define BASELINE_PROFILE 32 /* 0x20 */ +#define BASELINE10_PROFILE 34 /* 0x22 */ + + +#define SCENE_PROFILE 48 /* 0x21 */ +#define SCENE10_PROFILE 50 /* 0x23 */ + +#define TRACE 0 /* !< 0:Trace off 1:Trace on */ + + +/* Type definitions and file operation for Windows/Linux + * All file operations for windows are replaced with native (FILE *) operations + * Falei LUO (falei.luo@vipl.ict.ac.cn) + * */ + +#define _FILE_OFFSET_BITS 64 /* for 64 bit fseeko */ +#define fseek fseeko + +#define int16 int16_t +#define int64 int64_t + +/* ////////////////// bug fix ///////////////////////////// */ +#define ALFSliceFix 1 +#define WRITENBIT_FIX 1 +#define FIX_PROFILE_LEVEL_DPB_RPS_1 1 +#define FIX_PROFILE_LEVEL_DPB_RPS_2 1 +#define FIX_RPS_PICTURE_REMOVE 1 /* flluo@pku.edu.cn */ +#define Mv_Clip 1 /* yuquanhe@hisilicon.com */ +#define REMOVE_UNUSED 1 /* yuquanhe@hisilicon.com */ +#define SAO_Height_Fix 1 /* yuquanhe@hisilicon.com */ +#define B_BACKGROUND_Fix 1 /* yuquanhe@hisilicon.com */ +#define Check_Bitstream 1 /* yuquanhe@hisilicon.com */ +#define Wq_param_Clip 1 /* yuquanhe@hisilicon.com */ + /* luofalei flluo@pku.edu.cn , wlq15@mails.tsinghua.edu.cn , + Longfei.Wang@mediatek.com */ +#define RD1501_FIX_BG 1 + /* yuquanhe@hisilicon.com ; he-yuan.lin@mstarsemi.com */ +#define Mv_Rang 1 + /* Longfei.Wang@mediatek.com ;fred.chiu@mediatek.com + jie1222.chen@samsung.com */ +#define RD160_FIX_BG 1 + /* Y_K_Tu@novatek.com.tw, he-yuan.lin@mstarsemi.com, + victor.huang@montage-tech.com M4041 */ +#define RD1601_FIX_BG 1 +#define SEQ_CHANGE_CHECKER 1 /* he-yuan.lin@mstarsemi.com */ +#define M4140_END_OF_SLICE_CHECKER 1 /* he-yuan.lin@mstarsemi.com */ + /* wlq15@mails.tsinghua.edu.cn */ +#define Mv_check_bug 1 +#define SAO_ASSERTION_FIX 1 /* fred.chiu@mediatek.com */ +#define FIELD_HORI_MV_NO_SCALE_FIX 1 /* fred.chiu@mediatek.com */ +#define RD170_FIX_BG 1 +#define FIX_CHROMA_FIELD_MV_BK_DIST 1 +#define FIX_LUMA_FIELD_MV_BK_DIST 1 +#define FIX_CHROMA_FIELD_MV_CLIP 1 +#if 1 +#define FIX_FLUSH_DPB_BY_LF 1 /* fred.chiu@mediatek.com */ +#define FIX_SEQ_END_FLUSH_DPB_BY_LF 1 /* fred.chiu@mediatek.com */ +#else +#define FIX_FLUSH_DPB_BY_LF 0 /* fred.chiu@mediatek.com */ +#define FIX_SEQ_END_FLUSH_DPB_BY_LF 0 /* fred.chiu@mediatek.com */ +#endif +#define RD191_FIX_BUG 1 /* yuquanhe@hsilicon.com */ +#define SYM_MV_SCALE_FIX 1/* peisong.chen@broadcom.com */ +#define BUG_10BIT_REFINEQP 0 /* wangzhenyu */ + + + +#if RD191_FIX_BUG +#endif + +/************************ + * AVS2 macros start + **************************/ + +#define INTERLACE_CODING 1 +#if INTERLACE_CODING /* M3531: MV scaling compensation */ +/* Luma component */ +#define HALF_PIXEL_COMPENSATION 1 /* common functions definition */ +#define HALF_PIXEL_COMPENSATION_PMV 1 /* spacial MV prediction */ +#define HALF_PIXEL_COMPENSATION_DIRECT 1 /* B direct mode */ + /* MV derivation method 1, weighted P_skip mode */ +#define HALF_PIXEL_COMPENSATION_M1 1 + /* M1 related with mv-scaling function */ +#define HALF_PIXEL_COMPENSATION_M1_FUCTION 1 +#define HALF_PIXEL_COMPENSATION_MVD 1 /* MV scaling from FW->BW */ +/* Chroma components */ + /* chroma MV is scaled with luma MV for 4:2:0 format */ +#define HALF_PIXEL_CHROMA 1 + /* half pixel compensation for p skip/direct */ +#define HALF_PIXEL_PSKIP 1 +#define INTERLACE_CODING_FIX 1 /* HLS fix */ +#define OUTPUT_INTERLACE_MERGED_PIC 1 + +#endif +/* + ******************************* +AVS2 10bit/12bit profile + ******************************** + */ + +#define DBFIX_10bit 1 + +#define BUG_10bit 1 + +/* + *************************************** +AVS2 HIGH LEVEL SYNTAX + *************************************** + */ +#define AVS2_HDR_HLS 1 + /* AVS2 HDR technology //yuquanhe@hisilicon.com */ +#define AVS2_HDR_Tec 1 +#if AVS2_HDR_Tec +#define HDR_CHROMA_DELTA_QP 1 /* M3905 */ +#define HDR_ADPTIVE_UV_DELTA 1 +#endif +/* + ************************************* +AVS2 S2 + ************************************* + */ +#define AVS2_S2_FASTMODEDECISION 1 +#define RD1510_FIX_BG 1 /* 20160714, flluo@pku.edu.cn */ + + +/* ////////////////// prediction techniques ///////////////////////////// */ +#define LAM_2Level_TU 0.8 + + +#define DIRECTION 4 +#define DS_FORWARD 4 +#define DS_BACKWARD 2 +#define DS_SYM 3 +#define DS_BID 1 + +#define MH_PSKIP_NUM 4 +#define NUM_OFFSET 0 +#define BID_P_FST 1 +#define BID_P_SND 2 +#define FW_P_FST 3 +#define FW_P_SND 4 +#define WPM_NUM 3 + /* M3330 changes it to 2, the original value is 3 */ +#define MAX_MVP_CAND_NUM 2 + +#define DMH_MODE_NUM 5 /* Number of DMH mode */ +#define TH_ME 0 /* Threshold of ME */ + +#define MV_SCALE 1 + +/* ///// reference picture management // */ +#define FIX_MAX_REF 1 /* Falei LUO, flluo@pku.edu.cn */ +#if FIX_MAX_REF + /* maximum number of reference frame for each frame */ +#define MAXREF 7 +#define MAXGOP 32 +#endif + +/* #define REF_MAXBUFFER 7 */ +/* more bufferes for displaying and background */ +/* #define REF_MAXBUFFER 15 */ +#if 1 +#define REF_MAXBUFFER 23 +#define REF_BUFFER 16 +#else +#if RD170_FIX_BG +#define REF_MAXBUFFER 16 +#else +#define REF_MAXBUFFER 7 +#endif +#endif + +#ifdef TO_PORTING + /* block-composed background reference, fangdong@mail.ustc.edu.cn */ +#define BCBR 1 +#else +#define BCBR 0 +#endif +/* one more buffer for background when background_picture_output_flag is 0*/ +#define AVS2_MAX_BUFFER_NUM (REF_MAXBUFFER + 1) + +/* /////////////////Adaptive Loop Filter////////////////////////// */ +#define NUM_ALF_COEFF_CTX 1 +#define NUM_ALF_LCU_CTX 4 + +#define LAMBDA_SCALE_LUMA (1.0) +#define LAMBDA_SCALE_CHROMA (1.0) + + + +/* ////////////////// entropy coding ///////////////////////////// */ + /* M3090: Make sure rs1 will not overflow for 8-bit unsign char */ +#define NUN_VALUE_BOUND 254 +#define Encoder_BYPASS_Final 1 /* M3484 */ +#define Decoder_Bypass_Annex 0 /* M3484 */ +#define Decoder_Final_Annex 0 /* M3540 */ + + +/* ////////////////// coefficient coding ///// */ + /* M3035 size of an coefficient group, 4x4 */ +#define CG_SIZE 16 + +#define SWAP(x, y) {\ + (y) = (y) ^ (x);\ + (x) = (y) ^ (x);\ + (y) = (x) ^ (y);\ +} + +/* ////////////////// encoder optimization /////// */ +#define TH 2 + +#define M3624MDLOG /* reserved */ + +#define TDRDO 1 /* M3528 */ +/* #define FIX_TDRDO_BG 1 // flluo@pku.edu.cn, 20160318// */ +#define RATECONTROL 1 /* M3580 M3627 M3689 */ +#define AQPO 1 /* M3623 */ +#define AQPOM3694 0 +#define AQPOM4063 1 +#define AQPOM3762 1 +#define BGQPO 1 /* M4061 */ +#if BGQPO +#define LONGREFERENCE 32 +#endif + +/* #define REPORT */ +/* ////////////////// Quantization /////////////////////////////////////// */ + /* Adaptive frequency weighting quantization */ +#define FREQUENCY_WEIGHTING_QUANTIZATION 1 +#if FREQUENCY_WEIGHTING_QUANTIZATION +#define CHROMA_DELTA_QP 1 +#define AWQ_WEIGHTING 1 +#define AWQ_LARGE_BLOCK_ENABLE 1 +#define COUNT_BIT_OVERHEAD 0 +#define AWQ_LARGE_BLOCK_EXT_MAPPING 1 +#endif + +#define QuantClip 1 +#define QuantMatrixClipFix 1 /* 20160418, fllu@pku.edu.cn */ + +#define WQ_MATRIX_FCD 1 +#if !WQ_MATRIX_FCD +#define WQ_FLATBASE_INBIT 7 +#else +#define WQ_FLATBASE_INBIT 6 +#endif + + +#define REFINED_QP 1 + + +/* ////////////////// delta QP ///// */ + /* M3122: the minimum dQP unit is Macro block */ +#define MB_DQP 1 + /* M3122: 1 represents left prediction + and 0 represents previous prediction */ +#define LEFT_PREDICTION 1 + + +/* //////////////////////SAO///////// */ +#define NUM_BO_OFFSET 32 +#define MAX_NUM_SAO_CLASSES 32 +#define NUM_SAO_BO_CLASSES_LOG2 5 +#define NUM_SAO_BO_CLASSES_IN_BIT 5 +#define MAX_DOUBLE (1.7e + 308) +#define NUM_SAO_EO_TYPES_LOG2 2 +#define NUM_SAO_BO_CLASSES (1<<NUM_SAO_BO_CLASSES_LOG2) +#define SAO_RATE_THR 0.75 +#define SAO_RATE_CHROMA_THR 1 +#define SAO_SHIFT_PIX_NUM 4 + +#define SAO_PARA_CROSS_SLICE 1 +#define SAO_MULSLICE_FTR_FIX 1 + +/* /////////////////// Transform ///////////////////// */ +#define SEC_TR_SIZE 4 + /* apply secT to greater than or equal to 8x8 block, */ +#define SEC_TR_MIN_BITSIZE 3 + +#define BUGFIXED_COMBINED_ST_BD 1 + +/* /////////////////// Scalable ///////////////////// */ +#define M3480_TEMPORAL_SCALABLE 1 +#define TEMPORAL_MAXLEVEL 8 +#define TEMPORAL_MAXLEVEL_BIT 3 + + + + +/* + ************************************* + * AVS2 macros end + * + ************************************* + */ + +#define CHROMA 1 +#define LUMA_8x8 2 +#define NUM_BLOCK_TYPES 8 + +#if (!defined clamp) + /* !< clamp a to the range of [b;c] */ +#define clamp(a, b, c) ((a) < (b) ? (b) : ((a) > (c) ? (c) : (a))) +#endif + + /* POC200301 moved from defines.h */ +#define LOG2_MAX_FRAME_NUM_MINUS4 4 + /* !< bytes for one frame */ +#define MAX_CODED_FRAME_SIZE 15000000 + +/* ----------------------- */ +/* FLAGS and DEFINES for new chroma intra prediction, Dzung Hoang */ +/* Threshold values to zero out quantized transform coefficients. */ +/* Recommend that _CHROMA_COEFF_COST_ be low to improve chroma quality */ +#define _LUMA_COEFF_COST_ 4 /* !< threshold for luma coeffs */ + /* !< Number of pixels padded around the reference frame (>=4) */ +#define IMG_PAD_SIZE 64 + +#define OUTSTRING_SIZE 255 + + /* !< abs macro, faster than procedure */ +#define absm(A) ((A) < (0) ? (-(A)) : (A)) + /* !< used for start value for some variables */ +#define MAX_VALUE 999999 + +#define Clip1(a) ((a) > 255 ? 255:((a) < 0 ? 0 : (a))) +#define Clip3(min, max, val) (((val) < (min)) ?\ + (min) : (((val) > (max)) ? (max) : (val))) + +/* --------------------------------------------- */ + +/* block size of block transformed by AVS */ +#define PSKIPDIRECT 0 +#define P2NX2N 1 +#define P2NXN 2 +#define PNX2N 3 +#define PHOR_UP 4 +#define PHOR_DOWN 5 +#define PVER_LEFT 6 +#define PVER_RIGHT 7 +#define PNXN 8 +#define I8MB 9 +#define I16MB 10 +#define IBLOCK 11 +#define InNxNMB 12 +#define INxnNMB 13 +#define MAXMODE 14 /* add yuqh 20130824 */ +#define LAMBDA_ACCURACY_BITS 16 +#define LAMBDA_FACTOR(lambda) ((int)((double)(1 << LAMBDA_ACCURACY_BITS)\ + * lambda + 0.5)) +#define WEIGHTED_COST(factor, bits) (((factor) * (bits))\ + >> LAMBDA_ACCURACY_BITS) +#define MV_COST(f, s, cx, cy, px, py) (WEIGHTED_COST(f, mvbits[((cx) << (s))\ + - px] + mvbits[((cy) << (s)) - py])) +#define REF_COST(f, ref) (WEIGHTED_COST(f, refbits[(ref)])) + +#define BWD_IDX(ref) (((ref) < 2) ? 1 - (ref) : (ref)) +#define REF_COST_FWD(f, ref) (WEIGHTED_COST(f,\ + ((img->num_ref_pic_active_fwd_minus1 == 0) ?\ + 0 : refbits[(ref)]))) +#define REF_COST_BWD(f, ef) (WEIGHTED_COST(f,\ + ((img->num_ref_pic_active_bwd_minus1 == 0) ?\ + 0 : BWD_IDX(refbits[ref])))) + +#define IS_INTRA(MB) ((MB)->cuType == I8MB ||\ + (MB)->cuType == I16MB ||\ + (MB)->cuType == InNxNMB || (MB)->cuType == INxnNMB) +#define IS_INTER(MB) ((MB)->cuType != I8MB &&\ + (MB)->cuType != I16MB && (MB)->cuType != InNxNMB\ + && (MB)->cuType != INxnNMB) +#define IS_INTERMV(MB) ((MB)->cuType != I8MB &&\ + (MB)->cuType != I16MB && (MB)->cuType != InNxNMB &&\ + (MB)->cuType != INxnNMB && (MB)->cuType != 0) + + +#define IS_DIRECT(MB) ((MB)->cuType == PSKIPDIRECT && (img->type == B_IMG)) +#define IS_P_SKIP(MB) ((MB)->cuType == PSKIPDIRECT &&\ + (((img->type == F_IMG)) || ((img->type == P_IMG)))) +#define IS_P8x8(MB) ((MB)->cuType == PNXN) + +/* Quantization parameter range */ +#define MIN_QP 0 +#define MAX_QP 63 +#define SHIFT_QP 11 + +/* Picture types */ +#define INTRA_IMG 0 /* !< I frame */ +#define INTER_IMG 1 /* !< P frame */ +#define B_IMG 2 /* !< B frame */ +#define I_IMG 0 /* !< I frame */ +#define P_IMG 1 /* !< P frame */ +#define F_IMG 4 /* !< F frame */ + +#define BACKGROUND_IMG 3 + +#define BP_IMG 5 + + +/* Direct Mode types */ +#define MIN_CU_SIZE 8 +#define MIN_BLOCK_SIZE 4 +#define MIN_CU_SIZE_IN_BIT 3 +#define MIN_BLOCK_SIZE_IN_BIT 2 +#define BLOCK_MULTIPLE (MIN_CU_SIZE/(MIN_BLOCK_SIZE)) +#define MAX_CU_SIZE 64 +#define MAX_CU_SIZE_IN_BIT 6 +#define B4X4_IN_BIT 2 +#define B8X8_IN_BIT 3 +#define B16X16_IN_BIT 4 +#define B32X32_IN_BIT 5 +#define B64X64_IN_BIT 6 + /* !< # luma intra prediction modes */ +#define NUM_INTRA_PMODE 33 + /* number of luma modes for full RD search */ +#define NUM_MODE_FULL_RD 9 + /* !< #chroma intra prediction modes */ +#define NUM_INTRA_PMODE_CHROMA 5 + +/* luma intra prediction modes */ + +#define DC_PRED 0 +#define PLANE_PRED 1 +#define BI_PRED 2 +#define VERT_PRED 12 +#define HOR_PRED 24 + + +/* chroma intra prediction modes */ +#define DM_PRED_C 0 +#define DC_PRED_C 1 +#define HOR_PRED_C 2 +#define VERT_PRED_C 3 +#define BI_PRED_C 4 + +#define EOS 1 /* !< End Of Sequence */ + /* !< Start Of Picture */ +#define SOP 2 + +#define DECODING_OK 0 +#define SEARCH_SYNC 1 +#define DECODE_MB 1 + +#ifndef max + /* !< Macro returning max value */ +#define max(a, b) ((a) > (b) ? (a) : (b)) + /* !< Macro returning min value */ +#define min(a, b) ((a) < (b) ? (a) : (b)) +#endif + + +#define XY_MIN_PMV 1 +#if XY_MIN_PMV +#define MVPRED_xy_MIN 0 +#else +#define MVPRED_MEDIAN 0 +#endif +#define MVPRED_L 1 +#define MVPRED_U 2 +#define MVPRED_UR 3 + +#define DUAL 4 +#define FORWARD 0 +#define BACKWARD 1 +#define SYM 2 +#define BID 3 +#define INTRA -1 + +#define BUF_CYCLE 5 + +#define ROI_M3264 1 /* ROI Information Encoding */ + +#define PicExtensionData 1 + + +#define REF_OUTPUT 1 /* M3337 */ + + +/* MV scaling 14 bit */ +#define MULTI 16384 +#define HALF_MULTI 8192 +#define OFFSET 14 +/* end of MV scaling */ + /* store the middle pixel's mv in a motion information unit */ +#define MV_DECIMATION_FACTOR 4 + +/* BUGFIX_AVAILABILITY_INTRA */ +#define NEIGHBOR_INTRA_LEFT 0 +#define NEIGHBOR_INTRA_UP 1 +#define NEIGHBOR_INTRA_UP_RIGHT 2 +#define NEIGHBOR_INTRA_UP_LEFT 3 +#define NEIGHBOR_INTRA_LEFT_DOWN 4 +/* end of BUGFIX_AVAILABILITY_INTRA */ + +/* end #include "define.h" */ + +/*#include "commonStructures.h"*/ + +/*typedef uint16_t byte;*/ /* !< byte type definition */ +#define byte uint16_t +#define pel_t byte + +enum BitCountType_e { + BITS_HEADER, + BITS_TOTAL_MB, + BITS_MB_MODE, + BITS_INTER_MB, + BITS_CBP_MB, + BITS_CBP01_MB, + BITS_COEFF_Y_MB, + BITS_COEFF_UV_MB, + BITS_DELTA_QUANT_MB, + BITS_SAO_MB, + MAX_BITCOUNTER_MB +}; + + +enum SAOEOClasses { +/* EO Groups, the assignments depended on +how you implement the edgeType calculation */ + SAO_CLASS_EO_FULL_VALLEY = 0, + SAO_CLASS_EO_HALF_VALLEY = 1, + SAO_CLASS_EO_PLAIN = 2, + SAO_CLASS_EO_HALF_PEAK = 3, + SAO_CLASS_EO_FULL_PEAK = 4, + SAO_CLASS_BO = 5, + NUM_SAO_EO_CLASSES = SAO_CLASS_BO, + NUM_SAO_OFFSET +}; + +struct SAOstatdata { + int32_t diff[MAX_NUM_SAO_CLASSES]; + int32_t count[MAX_NUM_SAO_CLASSES]; +}; + +struct CopyRight_s { + int32_t extension_id; + int32_t copyright_flag; + int32_t copyright_id; + int32_t original_or_copy; + int32_t reserved; + int32_t copyright_number; +}; + +struct CameraParamters_s { + int32_t reserved; + int32_t camera_id; + int32_t height_of_image_device; + int32_t focal_length; + int32_t f_number; + int32_t vertical_angle_of_view; + int32_t camera_position_x; + int32_t camera_position_y; + int32_t camera_position_z; + int32_t camera_direction_x; + int32_t camera_direction_y; + int32_t camera_direction_z; + int32_t image_plane_vertical_x; + int32_t image_plane_vertical_y; + int32_t image_plane_vertical_z; +}; + +/* ! SNRParameters */ +struct SNRParameters_s { + double snr_y; /* !< current Y SNR */ + double snr_u; /* !< current U SNR */ + double snr_v; /* !< current V SNR */ + double snr_y1; /* !< SNR Y(dB) first frame */ + double snr_u1; /* !< SNR U(dB) first frame */ + double snr_v1; /* !< SNR V(dB) first frame */ + double snr_ya; /* !< Average SNR Y(dB) remaining frames */ + double snr_ua; /* !< Average SNR U(dB) remaining frames */ + double snr_va; /* !< Average SNR V(dB) remaining frames */ +#if INTERLACE_CODING + double i_snr_ya; /* !< current Y SNR */ + double i_snr_ua; /* !< current U SNR */ + double i_snr_va; /* !< current V SNR */ +#endif +}; + +/* signal to noise ratio parameters */ + +/* ! codingUnit */ +struct codingUnit { + uint32_t ui_MbBitSize; + int32_t uiBitSize; /* size of MB */ + /* !< number of current syntax element */ + int32_t currSEnr; + int32_t slice_nr; + int32_t delta_quant; /* !< for rate control */ + int32_t delta_qp; + int32_t qp; + int32_t bitcounter[MAX_BITCOUNTER_MB]; + struct codingUnit + *mb_available[3][3]; /*!< pointer to neighboring MBs + in a 3x3 window of current MB, which is located at [1][1] \n + NULL pointer identifies neighboring MBs which are unavailable */ + /* some storage of codingUnit syntax elements for global access */ + int32_t cuType; + int32_t weighted_skipmode; + + int32_t md_directskip_mode; + + int32_t trans_size; + int + /* !< indices correspond to [forw,backw][block_y][block_x][x,y, dmh] */ + mvd[2][BLOCK_MULTIPLE][BLOCK_MULTIPLE][3]; + + int32_t intra_pred_modes[BLOCK_MULTIPLE * BLOCK_MULTIPLE]; + int32_t real_intra_pred_modes[BLOCK_MULTIPLE * BLOCK_MULTIPLE]; + int32_t l_ipred_mode; + int32_t cbp, cbp_blk; + uint32_t cbp_bits; + + int32_t b8mode[4]; + int32_t b8pdir[4]; + /* !< chroma intra prediction mode */ + int32_t c_ipred_mode; + + /* !< pointer to neighboring MB (AEC) */ + struct codingUnit *mb_available_up; + /* !< pointer to neighboring MB (AEC) */ + struct codingUnit *mb_available_left; + int32_t mbAddrA, mbAddrB, mbAddrC, mbAddrD; + /* !<added by mz, 2008.04 */ + int32_t slice_set_index; + /* added by mz, 2008.04 */ + int32_t slice_header_flag; + int32_t sliceqp; /* added by mz, 2008.04 */ +#if MB_DQP + int32_t previouse_qp; + int32_t left_cu_qp; +#endif + int32_t block_available_up; + int32_t block_available_left; + +}; + + +/* image parameters */ +struct syntaxelement; +struct slice; +struct alfdatapart; +struct SAOBlkParam_s { + int32_t modeIdc; /* NEW, MERGE, OFF */ + /* NEW: EO_0, EO_90, EO_135, EO_45, BO. MERGE: left, above */ + int32_t typeIdc; + int32_t startBand; /* BO: starting band index */ + int32_t startBand2; + int32_t deltaband; + int32_t offset[MAX_NUM_SAO_CLASSES]; +}; +struct ALFParam_s { + int32_t alf_flag; + int32_t num_coeff; + int32_t filters_per_group; + int32_t componentID; + int32_t filterPattern[16]; /* *filterPattern; */ + int32_t coeffmulti[16][9]; /* **coeffmulti; */ +}; + +enum ALFComponentID { + ALF_Y = 0, + ALF_Cb, + ALF_Cr, + NUM_ALF_COMPONENT +}; +struct ALF_APS_s { + int32_t usedflag; + int32_t cur_number; + int32_t max_number; + struct ALFParam_s alf_par[NUM_ALF_COMPONENT]; +}; + + +/* ------------------------------------------------------ + * frame data + */ +struct avs2_frame_s { + int32_t imgcoi_ref; + byte * *referenceFrame[3]; + int32_t **refbuf; + int32_t ***mvbuf; +#if 0 + double saorate[NUM_SAO_COMPONENTS]; +#endif + byte ***ref; + + int32_t imgtr_fwRefDistance; + int32_t refered_by_others; + int32_t is_output; + int32_t to_prepare_disp; +#if M3480_TEMPORAL_SCALABLE + /* temporal level setted in configure file */ + int32_t temporal_id; +#endif + byte **oneForthRefY; +#if FIX_MAX_REF + int32_t ref_poc[MAXREF]; +#else + int32_t ref_poc[4]; +#endif +#ifdef AML + int32_t index; + int32_t mmu_alloc_flag; + int32_t lcu_size_log2; + /*uint32_t header_adr;*/ + uint32_t mc_y_adr; + uint32_t mc_u_v_adr; + uint32_t mc_canvas_y; + uint32_t mc_canvas_u_v; + uint32_t mpred_mv_wr_start_addr; + uint8_t bg_flag; + /**/ + unsigned long header_adr; + /*AVS2_10B_MMU_DW*/ + unsigned long dw_header_adr; + + int buf_size; + int lcu_total; + int comp_body_size; + uint32_t dw_y_adr; + uint32_t dw_u_v_adr; + int y_canvas_index; + int uv_canvas_index; + struct canvas_config_s canvas_config[2]; + int double_write_mode; + int bit_depth; + unsigned long cma_alloc_addr; + int BUF_index; + int pic_w; + int pic_h; + int stream_offset; + u32 pts; + u64 pts64; + /**/ + int vf_ref; + int decode_idx; + int slice_type; + int32_t imgtr_fwRefDistance_bak; + int32_t error_mark; + int32_t decoded_lcu; +#endif +#ifndef MV_USE_FIXED_BUF + int mv_buf_index; +#endif + + /* picture qos infomation*/ + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; + + u32 hw_decode_time; + u32 frame_size; // For frame base mode + + char *cuva_data_buf; + int cuva_data_size; +}; + + +struct ImageParameters_s { + struct codingUnit *mb_data; + int32_t number; /* <! frame number */ + int32_t numIPFrames; + + int32_t type; + int32_t typeb; + int32_t typeb_before; + + int32_t qp; /* <! quant for the current frame */ + int32_t current_mb_nr; /* bitstream order */ + int32_t current_slice_nr; + int32_t tr; /* <! temporal reference, 8 bit, */ + + int32_t width; /* !< Number of pels */ + int32_t width_cr; /* !< Number of pels chroma */ + int32_t height; /* !< Number of lines */ + int32_t height_cr; /* !< Number of lines chroma */ + int32_t PicWidthInMbs; + int32_t PicSizeInMbs; + int32_t block8_x, block8_y; + int32_t subblock_x; + int32_t subblock_y; + + int32_t num_of_references; + /* <! Bug Fix: correct picture size for outputted reconstructed pictures */ + int32_t auto_crop_right; + int32_t auto_crop_bottom; + int32_t buf_cycle; + int32_t picture_structure; + /* <! pointer to current Slice data struct */ + struct slice *currentSlice; + + int32_t **predBlock; /* !< current best prediction mode */ + int32_t **predBlockTmp; + /* !< the diff pixel values between orginal image and prediction */ + int32_t **resiY; + /* !< Array containing square values,used for snr computation */ + int32_t *quad; + + /* //location of current MB////// */ + int32_t mb_y; /* !< current MB vertical */ + int32_t mb_x; /* !< current MB horizontal */ + int32_t pix_y; /* !< current pixel vertical */ + int32_t pix_x; /* !< current pixel horizontal */ + int32_t pix_c_y; /* !< current pixel chroma vertical */ + int32_t pix_c_x; /* !< current pixel chroma horizontal */ + + int32_t imgtr_next_P; + + int32_t imgcoi_next_ref; + + /* !< GH ipredmode[90][74];prediction mode for inter frames */ + /* fix from ver 4.1 */ + int32_t **ipredmode; + int32_t **rec_ipredmode; + + + /* //////////////decoder////////////////////////// */ + int32_t max_mb_nr; + int32_t **intra_block; + + int32_t block_y; + int32_t block_x; + /* <! final 4x4 block. Extended to 16x16 for AVS */ + int32_t resiUV[2][MAX_CU_SIZE][MAX_CU_SIZE]; + + int32_t **fw_refFrArr; /* <! [72][88]; */ + int32_t **bw_refFrArr; /* <! [72][88]; */ + + int32_t random_access_decodable_flag; + + int32_t seq_header_indicate; + int32_t B_discard_flag; + + /* B pictures */ + uint32_t pic_distance; + + uint32_t coding_order; + + uint32_t PrevPicDistanceLsb; + int32_t CurrPicDistanceMsb; + + int32_t PicHeightInMbs; + + int32_t types; + + int32_t new_sequence_flag; + int32_t sequence_end_flag; /* <! rm52k_r2 */ + + int32_t current_slice_set_index; /* <! added by mz, 2008.04 */ + int32_t current_slice_header_flag; /* <! added by mz, 2008.04 */ + int32_t slice_set_qp[64]; /* <! added by mz, 2008.04 */ + + + int32_t inter_amp_enable; + + /* ////////////////////////encoder////////////////////////// */ + + /* int32_t nb_references; //!< replaced by "num_of_references" */ + + int32_t framerate; + + int32_t ***predBlockY; /* !< all 9 prediction modes */ + /* !< new chroma 8x8 intra prediction modes */ + int32_t ****predBlockUV; + + int32_t **Coeff_all;/* qyu 0821 */ + + struct syntaxelement *MB_SyntaxElements; /* !< by oliver 0612 */ + + /* B pictures */ + + int32_t b_frame_to_code; + int32_t num_ref_pic_active_fwd_minus1; + int32_t num_ref_pic_active_bwd_minus1; + int32_t mv_range_flag; + + uint32_t frame_num; /* frame_num for this frame */ + int32_t slice_offset; + /* the following are sent in the slice header */ + int32_t NoResidueDirect; + int32_t coded_mb_nr; + int32_t progressive_frame; + int32_t tc_reserve_bit; + /* the last MB no in current slice. Yulj 2004.07.15 */ + int32_t mb_no_currSliceLastMB; + int32_t Seqheader_flag; /* Added by cjw, 20070327 */ + int32_t EncodeEnd_flag; /* Carmen, 2007/12/19 */ + + uint16_t bbv_delay; + + int32_t tmp_fwBSkipMv[DIRECTION + 1][2]; + int32_t tmp_bwBSkipMv[DIRECTION + 1][2]; + + int32_t tmp_pref_fst[MH_PSKIP_NUM + NUM_OFFSET + 1]; + int32_t tmp_pref_snd[MH_PSKIP_NUM + NUM_OFFSET + 1]; + int32_t tmp_fstPSkipMv[MH_PSKIP_NUM + NUM_OFFSET + 1][3]; + int32_t tmp_sndPSkipMv[MH_PSKIP_NUM + NUM_OFFSET + 1][3]; +#if BCBR +byte *org_ref_y; +byte *org_ref_u; +byte *org_ref_v; +int32_t *BLCUidx; +int32_t *DQPList; +int32_t iNumCUsInFrame; + +byte *org_ref2_y; +byte *org_ref2_u; +byte *org_ref2_v; +int32_t ref2Num; +#endif +/* //////////////SAO parameter////////////////// */ +double *cur_saorate; +#if 0 +int32_t slice_sao_on[NUM_SAO_COMPONENTS]; +#endif +int32_t pic_alf_on[NUM_ALF_COMPONENT]; +struct alfdatapart *dp_ALF; + +#if INTERLACE_CODING +int32_t is_field_sequence; +int32_t is_top_field; +#endif + + +}; + + + +/* ! struct for context management */ +struct BiContextType_s { + uint8_t MPS; /* 1 bit */ + uint32_t LG_PMPS; /* 10 bits */ + uint8_t cycno; /* 2 bits */ +}; + +/*********************************************************************** + * D a t a t y p e s f o r A E C + ************************************************************************/ + + + +struct pix_pos { + int32_t available; /* ABCD */ + int32_t mb_addr; /* MB position */ + int32_t x; + int32_t y; + int32_t pos_x; /* 4x4 x-pos */ + int32_t pos_y; +}; + + + +struct STDOUT_DATA_s { + int32_t type; + int32_t typeb; + + int32_t framenum; + int32_t tr; + int32_t qp; + double snr_y; + double snr_u; + double snr_v; + int32_t tmp_time; + int32_t picture_structure; + int32_t curr_frame_bits; + int32_t emulate_bits; + + uint32_t DecMD5Value[4]; +#if RD1501_FIX_BG +int32_t background_picture_output_flag;/* Longfei.Wang@mediatek.com */ +#endif +#if RD160_FIX_BG +int32_t picture_reorder_delay; +#endif +int8_t str_reference_list[128]; /* reference list information */ +}; + +/********************************************************************** + * C O N T E X T S F O R T M L S Y N T A X E L E M E N T S + ********************************************************************** + */ +#define NUM_CuType_CTX (11 + 10) +#define NUM_B8_TYPE_CTX 9 +#define NUM_MVD_CTX 15 +#define NUM_PMV_IDX_CTX 10 +#define NUM_REF_NO_CTX 6 +#define NUM_DELTA_QP_CTX 4 +#define NUM_INTER_DIR_CTX 18 +#define NUM_INTER_DIR_DHP_CTX 3 +#define NUM_B8_TYPE_DHP_CTX 1 +#define NUM_AMP_CTX 2 +#define NUM_C_INTRA_MODE_CTX 4 +#define NUM_CBP_CTX 4 +#define NUM_BCBP_CTX 4 +#define NUM_MAP_CTX 17 +#define NUM_LAST_CTX 17 + +#define NUM_INTRA_MODE_CTX 7 + +#define NUM_ABS_CTX 5 +#define NUM_TU_CTX 3 +#define NUM_SPLIT_CTX 8 /* CU depth */ +#if BCBR +#define NUM_BGBLCOK_CTX 1 +#endif + +#define NUM_BRP_CTX 8 + + +#define NUM_LAST_CG_CTX_LUMA 12 +#define NUM_LAST_CG_CTX_CHROMA 6 +#define NUM_SIGCG_CTX_LUMA 2 +#define NUM_SIGCG_CTX_CHROMA 1 +#define NUM_LAST_POS_CTX_LUMA 56 +#define NUM_LAST_POS_CTX_CHROMA 16 +#define NUM_LAST_CG_CTX (NUM_LAST_CG_CTX_LUMA + NUM_LAST_CG_CTX_CHROMA) +#define NUM_SIGCG_CTX (NUM_SIGCG_CTX_LUMA + NUM_SIGCG_CTX_CHROMA) +#define NUM_LAST_POS_CTX (NUM_LAST_POS_CTX_LUMA + NUM_LAST_POS_CTX_CHROMA) +#define NUM_SAO_MERGE_FLAG_CTX 3 +#define NUM_SAO_MODE_CTX 1 +#define NUM_SAO_OFFSET_CTX 2 +#define NUM_INTER_DIR_MIN_CTX 2 + +/*end #include "commonStructures.h"*/ + +/*#include "commonVariables.h"*/ + +/* +extern struct CameraParamters_s *camera; +extern struct SNRParameters_s *snr; +extern struct ImageParameters_s *img; + */ + +/* avs2_frame_t *fref[REF_MAXBUFFER]; */ + + +#define ET_SIZE 300 /* !< size of error text buffer */ + + +/* ------------------------------------------------------ + * common data + */ +struct Video_Com_data_s { + int32_t Bframe_ctr; + + /* FILE *p_log; //!< SNR file */ + /* FILE *p_trace; //!< Trace file */ + + int32_t tot_time; + + /* Tsinghua for picture_distance 200701 */ + int32_t picture_distance; + + /* M3178 PKU Reference Manage */ + int32_t coding_order; + /* !< current encoding/decoding frame pointer */ + struct avs2_frame_s *f_rec; + int32_t seq_header; + /* !< Array for reference frames of each block */ + int32_t **refFrArr; + int32_t **p_snd_refFrArr; + + byte ***currentFrame; /* [yuv][height][width] */ +#ifdef AML + struct avs2_frame_s *cur_pic; /*either f_rec or m_bg*/ +#endif + byte **backgroundReferenceFrame[3]; + byte ***background_ref; + + + int32_t total_frames; + + /* mv_range, 20071009 */ + int32_t Min_V_MV; + int32_t Max_V_MV; + int32_t Min_H_MV; + int32_t Max_H_MV; + /* !< buffer for error message for exit with error(void) */ + int8_t errortext[ET_SIZE]; + int8_t str_list_reference[128]; + + +}; +/* extern Video_Com_data *hc; */ + + +/*end #include "commonVariables.h"*/ +/* #define USE_PARAM_TXT */ +/* +#if FIX_CHROMA_FIELD_MV_BK_DIST +int8_t bk_img_is_top_field; +#endif +*/ +/* void write_GB_frame(FILE *p_dec); */ + +#if !FIX_MAX_REF +#define MAXREF 4 +#define MAXGOP 32 +#endif + +struct StatBits { + int32_t curr_frame_bits; + int32_t prev_frame_bits; + int32_t emulate_bits; + int32_t prev_emulate_bits; + int32_t last_unit_bits; + int32_t bitrate; + int32_t total_bitrate[1000]; + int32_t coded_pic_num; + int32_t time_s; +}; + +struct reference_management { + int32_t poc; + int32_t qp_offset; + int32_t num_of_ref; + int32_t referd_by_others; + int32_t ref_pic[MAXREF]; + int32_t predict; + int32_t deltaRPS; + int32_t num_to_remove; + int32_t remove_pic[MAXREF]; +}; + + +/* ------------------------------------------------------ + * dec data + */ +struct Video_Dec_data_s { + byte **background_frame[3]; + int32_t background_reference_enable; + + int32_t background_picture_flag; + int32_t background_picture_output_flag; + int32_t background_picture_enable; + + int32_t background_number; + +#if BCBR + int32_t bcbr_enable; +#endif + + int32_t demulate_enable; + int32_t currentbitoffset; + + int32_t aspect_ratio_information; + int32_t frame_rate_code; + int32_t bit_rate_lower; + int32_t bit_rate_upper; + int32_t marker_bit; + + int32_t video_format; + int32_t color_description; + int32_t color_primaries; + int32_t transfer_characteristics; + int32_t matrix_coefficients; + + int32_t progressive_sequence; +#if INTERLACE_CODING +int32_t is_field_sequence; +#endif +int32_t low_delay; +int32_t horizontal_size; +int32_t vertical_size; +int32_t sample_precision; +int32_t video_range; + +int32_t display_horizontal_size; +int32_t display_vertical_size; +int32_t TD_mode; +int32_t view_packing_mode; +int32_t view_reverse; + +int32_t b_pmvr_enabled; +int32_t dhp_enabled; +int32_t b_dmh_enabled; +int32_t b_mhpskip_enabled; +int32_t wsm_enabled; +int32_t b_secT_enabled; + +int32_t tmp_time; +int32_t FrameNum; +int32_t eos; +int32_t pre_img_type; +int32_t pre_img_types; +/* int32_t pre_str_vec; */ +int32_t pre_img_tr; +int32_t pre_img_qp; +int32_t pre_tmp_time; +int32_t RefPicExist; /* 20071224 */ +int32_t BgRefPicExist; +int32_t dec_ref_num; /* ref order */ + +/* video edit code */ /* M1956 by Grandview 2006.12.12 */ +int32_t vec_flag; + +/* Copyright_extension(void) header */ +int32_t copyright_flag; +int32_t copyright_identifier; +int32_t original_or_copy; +int64_t copyright_number_1; +int64_t copyright_number_2; +int64_t copyright_number_3; +/* Camera_parameters_extension */ +int32_t camera_id; +int32_t height_of_image_device; +int32_t focal_length; +int32_t f_number; +int32_t vertical_angle_of_view; +int32_t camera_position_x_upper; +int32_t camera_position_x_lower; +int32_t camera_position_y_upper; +int32_t camera_position_y_lower; +int32_t camera_position_z_upper; +int32_t camera_position_z_lower; +int32_t camera_direction_x; +int32_t camera_direction_y; +int32_t camera_direction_z; +int32_t image_plane_vertical_x; +int32_t image_plane_vertical_y; +int32_t image_plane_vertical_z; + +#if AVS2_HDR_HLS +/* mastering_display_and_content_metadata_extension(void) header */ +int32_t display_primaries_x0; +int32_t display_primaries_y0; +int32_t display_primaries_x1; +int32_t display_primaries_y1; +int32_t display_primaries_x2; +int32_t display_primaries_y2; +int32_t white_point_x; +int32_t white_point_y; +int32_t max_display_mastering_luminance; +int32_t min_display_mastering_luminance; +int32_t maximum_content_light_level; +int32_t maximum_frame_average_light_level; +#endif + +/* I_pictures_header(void) */ +int32_t top_field_first; +int32_t repeat_first_field; +int32_t progressive_frame; +#if INTERLACE_CODING +int32_t is_top_field; +#endif +/* int32_t fixed_picture_qp; //qyu 0927 */ +int32_t picture_qp; +int32_t fixed_picture_qp; +int32_t time_code_flag; +int32_t time_code; +int32_t loop_filter_disable; +int32_t loop_filter_parameter_flag; +/* int32_t alpha_offset; */ +/* int32_t beta_offset; */ + +/* Pb_picture_header(void) */ +int32_t picture_coding_type; + +/*picture_display_extension(void)*/ +int32_t frame_centre_horizontal_offset[4]; +int32_t frame_centre_vertical_offset[4]; + +/* slice_header(void) */ +int32_t img_width; +int32_t slice_vertical_position; +int32_t slice_vertical_position_extension; +int32_t fixed_slice_qp; +int32_t slice_qp; +int32_t slice_horizontal_positon; /* added by mz, 2008.04 */ +int32_t slice_horizontal_positon_extension; + +int32_t StartCodePosition; +int32_t background_pred_flag; + + +/* Reference Manage */ +int32_t displaydelay; +int32_t picture_reorder_delay; +#if M3480_TEMPORAL_SCALABLE +int32_t temporal_id_exist_flag; +#endif + +int32_t gop_size; +struct reference_management decod_RPS[MAXGOP]; +struct reference_management curr_RPS; +int32_t last_output; +int32_t trtmp; +#if M3480_TEMPORAL_SCALABLE +int32_t cur_layer; +#endif + +/* Adaptive frequency weighting quantization */ +#if FREQUENCY_WEIGHTING_QUANTIZATION +int32_t weight_quant_enable_flag; +int32_t load_seq_weight_quant_data_flag; + +int32_t pic_weight_quant_enable_flag; +int32_t pic_weight_quant_data_index; +int32_t weighting_quant_param; +int32_t weighting_quant_model; +int16_t quant_param_undetail[6]; /* M2148 2007-09 */ +int16_t quant_param_detail[6]; /* M2148 2007-09 */ +int32_t WeightQuantEnable; /* M2148 2007-09 */ +int32_t mb_adapt_wq_disable; /* M2331 2008-04 */ +int32_t mb_wq_mode; /* M2331 2008-04 */ +#if CHROMA_DELTA_QP +int32_t chroma_quant_param_disable; +int32_t chroma_quant_param_delta_u; +int32_t chroma_quant_param_delta_v; +#endif + +int32_t b_pre_dec_intra_img; +int32_t pre_dec_img_type; +int32_t CurrentSceneModel; +#endif + +int32_t curr_IDRcoi; +int32_t curr_IDRtr; +int32_t next_IDRtr; +int32_t next_IDRcoi; +int32_t end_SeqTr; + +#if MB_DQP +int32_t lastQP; +/* FILE * testQP; */ +#endif + +}; +/* extern Video_Dec_data *hd; */ + +struct DecodingEnvironment_s { + uint32_t Dbuffer; + int32_t Dbits_to_go; + uint8_t *Dcodestrm; + int32_t *Dcodestrm_len; +}; + +/* added at rm52k version */ + +struct inp_par; + + + +/* ! Slice */ +struct slice { + int32_t picture_id; + int32_t qp; + int32_t picture_type; /* !< picture type */ + int32_t start_mb_nr; + /* !< number of different partitions */ + int32_t max_part_nr; + + /* added by lzhang */ + /* !< pointer to struct of context models for use in AEC */ + struct SyntaxInfoContexts_s *syn_ctx; +}; + +struct alfdatapart { + struct Bitstream_s *bitstream; + struct DecodingEnvironment_s de_AEC; + struct SyntaxInfoContexts_s *syn_ctx; +}; +/* static int32_t alfParAllcoated = 0; */ + +/* input parameters from configuration file */ +struct inp_par { + int32_t buf_cycle; /* <! Frame buffer size */ + int32_t ref_pic_order; /* <! ref order */ + int32_t output_dec_pic; /* <! output_dec_pic */ + int32_t profile_id; + int32_t level_id; + int32_t chroma_format; + int32_t g_uiMaxSizeInBit; + int32_t alpha_c_offset; + int32_t beta_offset; + int32_t useNSQT; +#if MB_DQP + int32_t useDQP; +#endif + int32_t useSDIP; + int32_t sao_enable; +#if M3480_TEMPORAL_SCALABLE + int32_t temporal_id_exist_flag; +#endif + int32_t alf_enable; + + int32_t crossSliceLoopFilter; + + int32_t sample_bit_depth; /* sample bit depth */ + /* decoded file bit depth (assuming output_bit_depth is + less or equal to sample_bit_depth) */ + int32_t output_bit_depth; + + + int32_t MD5Enable; + +#if OUTPUT_INTERLACE_MERGED_PIC + int32_t output_interlace_merged_picture; +#endif + +}; + +/* extern struct inp_par *input; */ + +struct outdata_s { +#if RD170_FIX_BG + struct STDOUT_DATA_s stdoutdata[REF_MAXBUFFER]; +#else + struct STDOUT_DATA_s stdoutdata[8]; +#endif + int32_t buffer_num; +}; +/* outdata outprint; */ + +#define PAYLOAD_TYPE_IDERP 8 + +struct Bitstream_s *AllocBitstream(void); +void FreeBitstream(void); +#if TRACE +void tracebits2(const int8_t *trace_str, int32_t len, int32_t info); +#endif + +/* int32_t direct_mv[45][80][4][4][3]; // only to verify result */ + +#define I_PICTURE_START_CODE 0xB3 +#define PB_PICTURE_START_CODE 0xB6 +#define SLICE_START_CODE_MIN 0x00 +#define SLICE_START_CODE_MAX 0x8F +#define USER_DATA_START_CODE 0xB2 +#define SEQUENCE_HEADER_CODE 0xB0 +#define EXTENSION_START_CODE 0xB5 +#define SEQUENCE_END_CODE 0xB1 +#define VIDEO_EDIT_CODE 0xB7 + + +#define SEQUENCE_DISPLAY_EXTENSION_ID 2 +#define COPYRIGHT_EXTENSION_ID 4 +#define CAMERAPARAMETERS_EXTENSION_ID 11 +#define PICTURE_DISPLAY_EXTENSION_ID 7 +#if M3480_TEMPORAL_SCALABLE +#define TEMPORAL_SCALABLE_EXTENSION_ID 3 +#endif + +#if ROI_M3264 +#if RD1501_FIX_BG +#define LOCATION_DATA_EXTENSION_ID 12 +#else +#define LOCATION_DATA_EXTENSION_ID 15 +#endif +#endif + +#if AVS2_HDR_HLS +#define MASTERING_DISPLAY_AND_CONTENT_METADATA_EXTENSION 10 +#endif + +void malloc_slice(void); +void free_slice(void); + + +void read_ipred_modes(void); + +int32_t AEC_startcode_follows(int32_t eos_bit); + +/* extern uint32_t max_value_s; */ + +/*ComAdaptiveLoopFilter.h*/ +#define ALF_MAX_NUM_COEF 9 +#define NO_VAR_BINS 16 + + +#define RPM_BEGIN 0x100 +#define ALF_BEGIN 0x180 +#define RPM_END 0x280 + +union param_u { + struct { + uint16_t data[RPM_END - RPM_BEGIN]; + } l; + struct { + /*sequence*/ + uint16_t profile_id; + uint16_t level_id; + uint16_t progressive_sequence; + uint16_t is_field_sequence; + uint16_t horizontal_size; + uint16_t vertical_size; + uint16_t chroma_format; + uint16_t sample_precision; + uint16_t encoding_precision; + uint16_t aspect_ratio_information; + uint16_t frame_rate_code; + uint16_t bit_rate_lower; + uint16_t bit_rate_upper; + uint16_t low_delay; + uint16_t temporal_id_exist_flag; + uint16_t g_uiMaxSizeInBit; + +#define BACKGROUND_PICTURE_DISABLE_BIT 11 +#define B_MHPSKIP_ENABLED_BIT 10 +#define DHP_ENABLED_BIT 9 +#define WSM_ENABLED_BIT 8 +#define INTER_AMP_ENABLE_BIT 7 +#define USENSQT_BIT 6 +#define USESDIP_BIT 5 +#define B_SECT_ENABLED_BIT 4 +#define SAO_ENABLE_BIT 3 +#define ALF_ENABLE_BIT 2 +#define B_PMVR_ENABLED_BIT 1 +#define CROSSSLICELOOPFILTER_BIT 0 + uint16_t avs2_seq_flags; + + uint16_t num_of_RPS; + uint16_t picture_reorder_delay; + /*PIC*/ + uint16_t time_code_flag; + uint16_t time_code; + uint16_t background_picture_flag; + uint16_t background_picture_output_flag; + uint16_t coding_order; + uint16_t cur_layer; + uint16_t displaydelay; /*???*/ + uint16_t predict; /*???*/ + uint16_t RPS_idx; /*???*/ + uint16_t referd_by_others_cur; + uint16_t num_of_ref_cur; + uint16_t ref_pic_cur[8]; + uint16_t num_to_remove_cur; + uint16_t remove_pic_cur[8]; + uint16_t progressive_frame; + uint16_t picture_structure; + uint16_t top_field_first; + uint16_t repeat_first_field; + uint16_t is_top_field; + + uint16_t picture_coding_type; + uint16_t background_pred_flag; + uint16_t background_reference_enable; + uint16_t random_access_decodable_flag; + uint16_t lcu_size; + uint16_t alpha_c_offset; + uint16_t beta_offset; + uint16_t chroma_quant_param_delta_cb; + uint16_t chroma_quant_param_delta_cr; + uint16_t loop_filter_disable; + + uint16_t video_signal_type; + uint16_t color_description; + uint16_t display_primaries_x[3]; + uint16_t display_primaries_y[3]; + uint16_t white_point_x; + uint16_t white_point_y; + uint16_t max_display_mastering_luminance; + uint16_t min_display_mastering_luminance; + uint16_t max_content_light_level; + uint16_t max_picture_average_light_level; + } p; + struct { + uint16_t padding[ALF_BEGIN - RPM_BEGIN]; + uint16_t picture_alf_enable_Y; + uint16_t picture_alf_enable_Cb; + uint16_t picture_alf_enable_Cr; + uint16_t alf_filters_num_m_1; + uint16_t region_distance[16]; + uint16_t alf_cb_coeffmulti[9]; + uint16_t alf_cr_coeffmulti[9]; + uint16_t alf_y_coeffmulti[16][9]; + } alf; +}; + + +struct avs2_decoder { + uint8_t init_hw_flag; + struct inp_par input; + struct ImageParameters_s img; + struct Video_Com_data_s hc; + struct Video_Dec_data_s hd; + union param_u param; + struct avs2_frame_s frm_pool[AVS2_MAX_BUFFER_NUM]; + struct avs2_frame_s *fref[REF_MAXBUFFER]; +#ifdef AML + /*used for background + when background_picture_output_flag is 0*/ + struct avs2_frame_s *m_bg; + /*current background picture, ether m_bg or fref[..]*/ + struct avs2_frame_s *f_bg; +#endif + struct outdata_s outprint; + uint32_t cm_header_start; + struct ALFParam_s m_alfPictureParam[NUM_ALF_COMPONENT]; +#ifdef FIX_CHROMA_FIELD_MV_BK_DIST + int8_t bk_img_is_top_field; +#endif +#ifdef AML + int32_t lcu_size; + int32_t lcu_size_log2; + int32_t lcu_x_num; + int32_t lcu_y_num; + int32_t lcu_total; + int32_t ref_maxbuffer; + int32_t to_prepare_disp_count; + int8_t bufmgr_error_flag; +#endif +}; + + +extern void write_frame(struct avs2_decoder *avs2_dec, int32_t pos); +extern void init_frame_t(struct avs2_frame_s *currfref); +extern void report_frame(struct avs2_decoder *avs2_dec, + struct outdata_s *data, int32_t pos); + +extern int avs2_post_process(struct avs2_decoder *avs2_dec); +extern void avs2_prepare_header(struct avs2_decoder *avs2_dec, + int32_t start_code); +extern int32_t avs2_process_header(struct avs2_decoder *avs2_dec); + +extern void init_avs2_decoder(struct avs2_decoder *avs2_dec); + +extern int32_t avs2_init_global_buffers(struct avs2_decoder *avs2_dec); + +extern bool is_avs2_print_param(void); +extern bool is_avs2_print_bufmgr_detail(void); +#endif +
diff --git a/drivers/frame_provider/decoder/avs2/vavs2.c b/drivers/frame_provider/decoder/avs2/vavs2.c new file mode 100644 index 0000000..e21dc55 --- /dev/null +++ b/drivers/frame_provider/decoder/avs2/vavs2.c
@@ -0,0 +1,8623 @@ + /* + * drivers/amlogic/amports/avs2.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/frame_sync/tsync.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/slab.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/sched/clock.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include "avs2_global.h" + +#define MEM_NAME "codec_avs2" +/* #include <mach/am_regs.h> */ +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../utils/vdec.h" +#include "../utils/amvdec.h" + +#include <linux/amlogic/media/video_sink/video.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/config_parser.h" +#include "../utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../utils/vdec_feature.h" + +#define I_ONLY_SUPPORT +#define MIX_STREAM_SUPPORT +#define CONSTRAIN_MAX_BUF_NUM + +#define CO_MV_COMPRESS + +#include "vavs2.h" +#define HEVC_SHIFT_LENGTH_PROTECT 0x313a +#define HEVC_MPRED_CTRL4 0x324c +#define HEVC_MPRED_CTRL9 0x325b +#define HEVC_DBLK_CFGD 0x350d +#define HEVC_CM_HEADER_START_ADDR 0x3628 +#define HEVC_DBLK_CFGB 0x350b +#define HEVCD_MPP_ANC2AXI_TBL_DATA 0x3464 +#define HEVC_SAO_MMU_VH1_ADDR 0x363b +#define HEVC_SAO_MMU_VH0_ADDR 0x363a + +#define HEVC_CM_BODY_LENGTH2 0x3663 +#define HEVC_CM_HEADER_OFFSET2 0x3664 +#define HEVC_CM_HEADER_LENGTH2 0x3665 + +#define HEVC_ASSIST_MMU_MAP_ADDR 0x3009 + +#define HEVC_CM_HEADER_START_ADDR 0x3628 +#define HEVC_CM_HEADER_START_ADDR2 0x364a +#define HEVC_SAO_MMU_VH1_ADDR 0x363b +#define HEVC_SAO_MMU_VH0_ADDR 0x363a +#define HEVC_SAO_MMU_VH0_ADDR2 0x364d +#define HEVC_SAO_MMU_VH1_ADDR2 0x364e + +#define HEVC_SAO_MMU_DMA_CTRL2 0x364c +#define HEVC_SAO_MMU_STATUS2 0x3650 +#define HEVC_DW_VH0_ADDDR 0x365e +#define HEVC_DW_VH1_ADDDR 0x365f + +#define HEVC_SAO_CTRL9 0x362d + + + +/* + * AVS2_DEC_STATUS define +*/ +/*internal*/ +#define AVS2_DEC_IDLE 0 +#define AVS2_SEQUENCE 1 +#define AVS2_I_PICTURE 2 +#define AVS2_PB_PICTURE 3 +#define AVS2_DISCARD_STARTCODE 4 +#define AVS2_DISCARD_NAL 4 + +#define AVS2_SLICE_DECODING 6 + +#define SWAP_IN_CMD 0x10 +#define SWAP_OUT_CMD 0x11 +#define SWAP_OUTIN_CMD 0x12 +#define SWAP_DONE 0x13 +#define SWAP_POST_INIT 0x14 + +/*head*/ +#define AVS2_HEAD_SEQ_READY 0x21 +#define AVS2_HEAD_PIC_I_READY 0x22 +#define AVS2_HEAD_PIC_PB_READY 0x23 +#define AVS2_HEAD_SEQ_END_READY 0x24 +#define AVS2_STARTCODE_SEARCH_DONE 0x25 + +/*pic done*/ +#define HEVC_DECPIC_DATA_DONE 0x30 +#define HEVC_DECPIC_DATA_ERROR 0x31 +#define HEVC_NAL_DECODE_DONE 0x32 +#define AVS2_DECODE_BUFEMPTY 0x33 +#define AVS2_DECODE_TIMEOUT 0x34 +#define AVS2_DECODE_OVER_SIZE 0x35 +#define AVS2_EOS 0x36 + +/*cmd*/ +#define AVS2_10B_DISCARD_NAL 0xf0 +#define AVS2_SEARCH_NEW_PIC 0xf1 +#define AVS2_ACTION_ERROR 0xfe +#define HEVC_ACTION_ERROR 0xfe +#define AVS2_ACTION_DONE 0xff +/*AVS2_DEC_STATUS end*/ + + +#define VF_POOL_SIZE 32 + +#undef pr_info +#define pr_info printk + +#define DECODE_MODE_SINGLE (0 | (0x80 << 24)) +#define DECODE_MODE_MULTI_STREAMBASE (1 | (0x80 << 24)) +#define DECODE_MODE_MULTI_FRAMEBASE (2 | (0x80 << 24)) + + +#define VP9_TRIGGER_FRAME_DONE 0x100 +#define VP9_TRIGGER_FRAME_ENABLE 0x200 + +/*#define MV_MEM_UNIT 0x240*/ +#define MV_MEM_UNIT 0x200 +/*--------------------------------------------------- + Include "parser_cmd.h" +---------------------------------------------------*/ +#define PARSER_CMD_SKIP_CFG_0 0x0000090b + +#define PARSER_CMD_SKIP_CFG_1 0x1b14140f + +#define PARSER_CMD_SKIP_CFG_2 0x001b1910 + + +#define PARSER_CMD_NUMBER 37 + +static unsigned short parser_cmd[PARSER_CMD_NUMBER] = { +0x0401, +0x8401, +0x0800, +0x0402, +0x9002, +0x1423, +0x8CC3, +0x1423, +0x8804, +0x9825, +0x0800, +0x04FE, +0x8406, +0x8411, +0x1800, +0x8408, +0x8409, +0x8C2A, +0x9C2B, +0x1C00, +0x840F, +0x8407, +0x8000, +0x8408, +0x2000, +0xA800, +0x8410, +0x04DE, +0x840C, +0x840D, +0xAC00, +0xA000, +0x08C0, +0x08E0, +0xA40E, +0xFC00, +0x7C00 +}; + +static int32_t g_WqMDefault4x4[16] = { + 64, 64, 64, 68, + 64, 64, 68, 72, + 64, 68, 76, 80, + 72, 76, 84, 96 +}; + + +static int32_t g_WqMDefault8x8[64] = { + 64, 64, 64, 64, 68, 68, 72, 76, + 64, 64, 64, 68, 72, 76, 84, 92, + 64, 64, 68, 72, 76, 80, 88, 100, + 64, 68, 72, 80, 84, 92, 100, 112, + 68, 72, 80, 84, 92, 104, 112, 128, + 76, 80, 84, 92, 104, 116, 132, 152, + 96, 100, 104, 116, 124, 140, 164, 188, + 104, 108, 116, 128, 152, 172, 192, 216 +}; +/*#define HEVC_PIC_STRUCT_SUPPORT*/ +/* to remove, fix build error */ + +/*#define CODEC_MM_FLAGS_FOR_VDECODER 0*/ + +#define MULTI_INSTANCE_SUPPORT +/* #define ERROR_HANDLE_DEBUG */ + +#ifndef STAT_KTHREAD +#define STAT_KTHREAD 0x40 +#endif + +#ifdef MULTI_INSTANCE_SUPPORT +#define MAX_DECODE_INSTANCE_NUM 12 +#define MULTI_DRIVER_NAME "ammvdec_avs2" + +#define lock_buffer(dec, flags) \ + spin_lock_irqsave(&dec->buffer_lock, flags) + +#define unlock_buffer(dec, flags) \ + spin_unlock_irqrestore(&dec->buffer_lock, flags) + +static u32 debug_mask = 0xffffffff; +#define get_dbg_flag(dec) ((debug_mask & (1 << dec->index)) ? debug : 0) + +static unsigned int max_decode_instance_num + = MAX_DECODE_INSTANCE_NUM; +static unsigned int decode_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int display_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int max_process_time[MAX_DECODE_INSTANCE_NUM]; +static unsigned int run_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int input_empty[MAX_DECODE_INSTANCE_NUM]; +static unsigned int not_run_ready[MAX_DECODE_INSTANCE_NUM]; + +static u32 decode_timeout_val = 200; + +static int start_decode_buf_level = 0x8000; + +static u32 work_buf_size; + +static u32 mv_buf_margin; +static int pre_decode_buf_level = 0x1000; +static u32 again_threshold; + + +/* DOUBLE_WRITE_MODE is enabled only when NV21 8 bit output is needed */ +/* double_write_mode: + * 0, no double write; + * 1, 1:1 ratio; + * 2, (1/4):(1/4) ratio; + * 3, (1/4):(1/4) ratio, with both compressed frame included + * 4, (1/2):(1/2) ratio; + * 8, (1/8):(1/8) ratio; + * 0x10, double write only + * 0x100, if > 1080p,use mode 4,else use mode 1; + * 0x200, if > 1080p,use mode 2,else use mode 1; + * 0x300, if > 720p, use mode 4, else use mode 1; + */ +static u32 double_write_mode; +static u32 without_display_mode; + +static u32 mv_buf_dynamic_alloc; + +#define DRIVER_NAME "amvdec_avs2" +#define DRIVER_HEADER_NAME "amvdec_avs2_header" + + +#define PUT_INTERVAL (HZ/100) +#define ERROR_SYSTEM_RESET_COUNT 200 + +#define PTS_NORMAL 0 +#define PTS_NONE_REF_USE_DURATION 1 + +#define PTS_MODE_SWITCHING_THRESHOLD 3 +#define PTS_MODE_SWITCHING_RECOVERY_THREASHOLD 3 + +#define DUR2PTS(x) ((x)*90/96) + +struct AVS2Decoder_s; +static int vavs2_vf_states(struct vframe_states *states, void *); +static struct vframe_s *vavs2_vf_peek(void *); +static struct vframe_s *vavs2_vf_get(void *); +static void vavs2_vf_put(struct vframe_s *, void *); +static int vavs2_event_cb(int type, void *data, void *private_data); +static void set_vframe(struct AVS2Decoder_s *dec, + struct vframe_s *vf, struct avs2_frame_s *pic, u8 dummy); + +static int vavs2_stop(struct AVS2Decoder_s *dec); +static s32 vavs2_init(struct vdec_s *vdec); +static void vavs2_prot_init(struct AVS2Decoder_s *dec); +static int vavs2_local_init(struct AVS2Decoder_s *dec); +static void vavs2_put_timer_func(struct timer_list *timer); +static void dump_data(struct AVS2Decoder_s *dec, int size); +static unsigned char get_data_check_sum + (struct AVS2Decoder_s *dec, int size); +static void dump_pic_list(struct AVS2Decoder_s *dec); + +static const char vavs2_dec_id[] = "vavs2-dev"; + +#define PROVIDER_NAME "decoder.avs2" +#define MULTI_INSTANCE_PROVIDER_NAME "vdec.avs2" + +static const struct vframe_operations_s vavs2_vf_provider = { + .peek = vavs2_vf_peek, + .get = vavs2_vf_get, + .put = vavs2_vf_put, + .event_cb = vavs2_event_cb, + .vf_states = vavs2_vf_states, +}; + +static struct vframe_provider_s vavs2_vf_prov; + +static u32 bit_depth_luma; +static u32 bit_depth_chroma; +static u32 frame_width; +static u32 frame_height; +static u32 video_signal_type; +static u32 pts_unstable; +static u32 on_no_keyframe_skiped; + +static u32 force_video_signal_type; +static u32 enable_force_video_signal_type; +#define VIDEO_SIGNAL_TYPE_AVAILABLE_MASK 0x20000000 +#define HDR_CUVA_MASK 0x40000000 + + +static const char * const video_format_names[] = { + "component", "PAL", "NTSC", "SECAM", + "MAC", "unspecified", "Reserved", "Reserved" +}; + +static inline int div_r32(int64_t m, int n) +{ +/* +return (int)(m/n) +*/ +#ifndef CONFIG_ARM64 + int64_t qu = 0; + qu = div_s64(m, n); + return (int)qu; +#else + return (int)(m/n); +#endif +} + +enum vpx_bit_depth_t { + AVS2_BITS_8 = 8, /**< 8 bits */ + AVS2_BITS_10 = 10, /**< 10 bits */ + AVS2_BITS_12 = 12, /**< 12 bits */ +}; + +/*USE_BUF_BLOCK*/ +struct BUF_s { + int index; + unsigned int alloc_flag; + /*buffer */ + unsigned int cma_page_count; + unsigned long alloc_addr; + unsigned long start_adr; + unsigned int size; + + unsigned int free_start_adr; +} /*BUF_t */; + +struct MVBUF_s { + unsigned long start_adr; + unsigned int size; + int used_flag; +} /*MVBUF_t */; + + /* #undef BUFMGR_ONLY to enable hardware configuration */ + +/*#define TEST_WR_PTR_INC*/ +#define WR_PTR_INC_NUM 128 + +#define SIMULATION +#define DOS_PROJECT +#undef MEMORY_MAP_IN_REAL_CHIP + +/*#undef DOS_PROJECT*/ +/*#define MEMORY_MAP_IN_REAL_CHIP*/ + +/*#define BUFFER_MGR_ONLY*/ +/*#define CONFIG_HEVC_CLK_FORCED_ON*/ +/*#define ENABLE_SWAP_TEST*/ + +#ifdef AVS2_10B_NV21 +#define MEM_MAP_MODE 2 /* 0:linear 1:32x32 2:64x32*/ +#else +#define MEM_MAP_MODE 0 /* 0:linear 1:32x32 2:64x32*/ +#endif + +#ifdef AVS2_10B_NV21 +#else +#define LOSLESS_COMPRESS_MODE +#endif + +#define DOUBLE_WRITE_YSTART_TEMP 0x02000000 +#define DOUBLE_WRITE_CSTART_TEMP 0x02900000 + +#define AVS2_DBG_BUFMGR 0x01 +#define AVS2_DBG_BUFMGR_MORE 0x02 +#define AVS2_DBG_BUFMGR_DETAIL 0x04 +#define AVS2_DBG_IRQ_EVENT 0x08 +#define AVS2_DBG_OUT_PTS 0x10 +#define AVS2_DBG_PRINT_SOURCE_LINE 0x20 +#define AVS2_DBG_PRINT_PARAM 0x40 +#define AVS2_DBG_PRINT_PIC_LIST 0x80 +#define AVS2_DBG_SEND_PARAM_WITH_REG 0x100 +#define AVS2_DBG_MERGE 0x200 +#define AVS2_DBG_DBG_LF_PRINT 0x400 +#define AVS2_DBG_REG 0x800 +#define AVS2_DBG_PIC_LEAK 0x1000 +#define AVS2_DBG_PIC_LEAK_WAIT 0x2000 +#define AVS2_DBG_HDR_INFO 0x4000 +#define AVS2_DBG_HDR_DATA 0x8000 +#define AVS2_DBG_DIS_LOC_ERROR_PROC 0x10000 +#define AVS2_DBG_DIS_SYS_ERROR_PROC 0x20000 +#define AVS2_DBG_DUMP_PIC_LIST 0x40000 +#define AVS2_DBG_TRIG_SLICE_SEGMENT_PROC 0x80000 +#define AVS2_DBG_FORCE_UNCOMPRESS 0x100000 +#define AVS2_DBG_LOAD_UCODE_FROM_FILE 0x200000 +#define AVS2_DBG_FORCE_SEND_AGAIN 0x400000 +#define AVS2_DBG_DUMP_DATA 0x800000 +#define AVS2_DBG_DUMP_LMEM_BUF 0x1000000 +#define AVS2_DBG_DUMP_RPM_BUF 0x2000000 +#define AVS2_DBG_CACHE 0x4000000 +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 +/*MULTI_INSTANCE_SUPPORT*/ +#define PRINT_FLAG_ERROR 0 +#define PRINT_FLAG_VDEC_STATUS 0x20000000 +#define PRINT_FLAG_VDEC_DETAIL 0x40000000 +#define PRINT_FLAG_VDEC_DATA 0x80000000 + +#define PRINT_LINE() \ + do { \ + if (debug & AVS2_DBG_PRINT_SOURCE_LINE)\ + pr_info("%s line %d\n", __func__, __LINE__);\ + } while (0) + +static u32 debug; + +static u32 debug_again; + +bool is_avs2_print_param(void) +{ + bool ret = false; + if (debug & AVS2_DBG_PRINT_PARAM) + ret = true; + return ret; +} + +bool is_avs2_print_bufmgr_detail(void) +{ + bool ret = false; + if (debug & AVS2_DBG_BUFMGR_DETAIL) + ret = true; + return ret; +} +static bool is_reset; +/*for debug*/ +static u32 force_bufspec; +/* + udebug_flag: + bit 0, enable ucode print + bit 1, enable ucode detail print + bit [31:16] not 0, pos to dump lmem + bit 2, pop bits to lmem + bit [11:8], pre-pop bits for alignment (when bit 2 is 1) +*/ +static u32 udebug_flag; +/* + when udebug_flag[1:0] is not 0 + udebug_pause_pos not 0, + pause position +*/ +static u32 udebug_pause_pos; +/* + when udebug_flag[1:0] is not 0 + and udebug_pause_pos is not 0, + pause only when DEBUG_REG2 is equal to this val +*/ +static u32 udebug_pause_val; + +static u32 udebug_pause_decode_idx; + +static u32 force_disp_pic_index; + +#define AUX_BUF_ALIGN(adr) ((adr + 0xf) & (~0xf)) +static u32 cuva_buf_size = 512; + +#define DEBUG_REG +#ifdef DEBUG_REG +static void WRITE_VREG_DBG2(unsigned adr, unsigned val) +{ + if (debug & AVS2_DBG_REG) + pr_info("%s(%x, %x)\n", __func__, adr, val); + if (adr != 0) + WRITE_VREG(adr, val); +} + +#undef WRITE_VREG +#define WRITE_VREG WRITE_VREG_DBG2 +#endif + +#define MMU_COMPRESS_HEADER_SIZE_1080P 0x10000 +#define MMU_COMPRESS_HEADER_SIZE_4K 0x48000 +#define MMU_COMPRESS_HEADER_SIZE_8K 0x120000 + +#define INVALID_IDX -1 /* Invalid buffer index.*/ + + +#define FRAME_BUFFERS (AVS2_MAX_BUFFER_NUM) +#define HEADER_FRAME_BUFFERS (FRAME_BUFFERS) +#define MAX_BUF_NUM (FRAME_BUFFERS) + +#define FRAME_CONTEXTS_LOG2 2 +#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2) +/*buffer + header buffer + workspace*/ + +#ifdef MV_USE_FIXED_BUF +#define MAX_BMMU_BUFFER_NUM ((FRAME_BUFFERS + HEADER_FRAME_BUFFERS + 1)+1) +#define VF_BUFFER_IDX(n) (n) +#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n+1) +#define WORK_SPACE_BUF_ID (FRAME_BUFFERS + HEADER_FRAME_BUFFERS+1) +#else +#define MAX_BMMU_BUFFER_NUM (((FRAME_BUFFERS*2)+HEADER_FRAME_BUFFERS+1)+1) +#define VF_BUFFER_IDX(n) (n) +#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n+1) +#define MV_BUFFER_IDX(n) ((FRAME_BUFFERS * 2) + n+1) +#define WORK_SPACE_BUF_ID ((FRAME_BUFFERS * 2) + HEADER_FRAME_BUFFERS+1) +//#define DW_HEADER_BUFFER_IDX(n) ((FRAME_BUFFERS * 3) + n+1) +#endif + +#define CO_MV_BUF_SIZE_1080P 0x3fc00 +#define CO_MV_BUF_SIZE_4K 0x120000 +#define CO_MV_BUF_SIZE_8K 0x480000 +/* +static void set_canvas(struct AVS2Decoder_s *dec, + struct avs2_frame_s *pic); +int avs2_prepare_display_buf(struct AVS2Decoder_s *dec, + int pos); +*/ + + +struct buff_s { + u32 buf_start; + u32 buf_size; + u32 buf_end; +}; + +struct BuffInfo_s { + u32 max_width; + u32 max_height; + u32 start_adr; + u32 end_adr; + struct buff_s ipp; + struct buff_s sao_abv; + struct buff_s sao_vb; + struct buff_s short_term_rps; + struct buff_s rcs; + struct buff_s sps; + struct buff_s pps; + struct buff_s sao_up; + struct buff_s swap_buf; + struct buff_s swap_buf2; + struct buff_s scalelut; + struct buff_s dblk_para; + struct buff_s dblk_data; + struct buff_s dblk_data2; +#ifdef AVS2_10B_MMU + struct buff_s mmu_vbh; + struct buff_s cm_header; +#endif +#ifdef AVS2_10B_MMU_DW + struct buff_s mmu_vbh_dw; + struct buff_s cm_header_dw; +#endif + struct buff_s mpred_above; +#ifdef MV_USE_FIXED_BUF + struct buff_s mpred_mv; +#endif + struct buff_s rpm; + struct buff_s lmem; +}; + +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_CONFIG_PARAM 3 +#define DEC_RESULT_ERROR 4 +#define DEC_INIT_PICLIST 5 +#define DEC_UNINIT_PICLIST 6 +#define DEC_RESULT_GET_DATA 7 +#define DEC_RESULT_GET_DATA_RETRY 8 +#define DEC_RESULT_EOS 9 +#define DEC_RESULT_FORCE_EXIT 10 + +static void avs2_work(struct work_struct *work); +struct loop_filter_info_n; +struct loopfilter; +struct segmentation; + +struct AVS2Decoder_s { + int pic_list_init_flag; + unsigned char index; + spinlock_t buffer_lock; + struct device *cma_dev; + struct platform_device *platform_dev; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + struct vframe_chunk_s *chunk; + int dec_result; + struct work_struct work; + u32 start_shift_bytes; + + struct BuffInfo_s work_space_buf_store; + unsigned long buf_start; + u32 buf_size; + u32 cma_alloc_count; + unsigned long cma_alloc_addr; + uint8_t eos; + unsigned long int start_process_time; + unsigned last_lcu_idx; + int decode_timeout_count; + unsigned timeout_num; + + int double_write_mode; + + unsigned char m_ins_flag; + char *provider_name; + int frame_count; + u32 stat; + struct timer_list timer; + u32 frame_dur; + u32 frame_ar; + u32 vavs2_ratio; + int fatal_error; + uint8_t init_flag; + uint8_t first_sc_checked; + uint8_t process_busy; +#define PROC_STATE_INIT 0 +#define PROC_STATE_HEAD_DONE 1 +#define PROC_STATE_DECODING 2 +#define PROC_STATE_HEAD_AGAIN 3 +#define PROC_STATE_DECODE_AGAIN 4 +#define PROC_STATE_TEST1 5 + uint8_t process_state; + u32 ucode_pause_pos; + + int show_frame_num; +#ifndef AVS2_10B_MMU + struct buff_s mc_buf_spec; +#endif + struct dec_sysinfo vavs2_amstream_dec_info; + void *rpm_addr; + void *lmem_addr; + dma_addr_t rpm_phy_addr; + dma_addr_t lmem_phy_addr; + unsigned short *lmem_ptr; + unsigned short *debug_ptr; + +#ifdef AVS2_10B_MMU + bool mmu_enable; + + u32 cuva_size; + void *cuva_addr; + dma_addr_t cuva_phy_addr; + + void *frame_mmu_map_addr; + dma_addr_t frame_mmu_map_phy_addr; +#endif +#ifdef AVS2_10B_MMU_DW + bool dw_mmu_enable; + void *dw_mmu_box; + void *dw_frame_mmu_map_addr; + dma_addr_t dw_frame_mmu_map_phy_addr; +#endif + unsigned int use_cma_flag; + + struct BUF_s m_BUF[MAX_BUF_NUM]; + struct MVBUF_s m_mv_BUF[MAX_BUF_NUM]; + u32 used_buf_num; + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(pending_q, struct vframe_s *, VF_POOL_SIZE); + struct vframe_s vfpool[VF_POOL_SIZE]; + u32 vf_pre_count; + u32 vf_get_count; + u32 vf_put_count; + int buf_num; + unsigned int losless_comp_body_size; + + u32 video_signal_type; + u32 video_ori_signal_type; + + int pts_mode; + int last_lookup_pts; + int last_pts; + u64 last_lookup_pts_us64; + u64 last_pts_us64; + u64 shift_byte_count; + u32 shift_byte_count_lo; + u32 shift_byte_count_hi; + int pts_mode_switching_count; + int pts_mode_recovery_count; + + bool get_frame_dur; + u32 saved_resolution; + + /**/ + int refresh_frame_flags; + uint8_t hold_ref_buf; + struct BuffInfo_s *work_space_buf; +#ifndef AVS2_10B_MMU + struct buff_s *mc_buf; +#endif + unsigned int frame_width; + unsigned int frame_height; + + unsigned short *rpm_ptr; + int init_pic_w; + int init_pic_h; + + int slice_type; + + int decode_idx; + int slice_idx; + uint8_t wait_buf; + uint8_t error_flag; + unsigned int bufmgr_error_count; + + /* bit 0, for decoding; bit 1, for displaying */ + uint8_t ignore_bufmgr_error; + uint8_t skip_PB_before_I; + int PB_skip_mode; + int PB_skip_count_after_decoding; + /*hw*/ + + /**/ + struct vdec_info *gvs; + + + unsigned int dec_status; + u32 last_put_idx; + int new_frame_displayed; + void *mmu_box; + void *bmmu_box; + struct vframe_master_display_colour_s vf_dp; + struct firmware_s *fw; +#ifdef AVS2_10B_MMU + int cur_fb_idx_mmu; + long used_4k_num; +#endif + struct avs2_decoder avs2_dec; +#define ALF_NUM_BIT_SHIFT 6 +#define NO_VAR_BINS 16 + int32_t m_filterCoeffSym[16][9]; + int32_t m_varIndTab[NO_VAR_BINS]; + + struct vframe_s vframe_dummy; + /* start_decoding_flag, + bit 0, SEQ ready + bit 1, I ready + */ + unsigned char start_decoding_flag; + uint32_t mpred_abv_start_addr; + uint32_t mpred_abv_start_addr_bak; + u8 next_again_flag; + u32 pre_parser_wr_ptr; + int need_cache_size; + u64 sc_start_time; +#ifdef I_ONLY_SUPPORT + u32 i_only; +#endif + int frameinfo_enable; + struct vframe_qos_s vframe_qos; + u32 dynamic_buf_margin; + int sidebind_type; + int sidebind_channel_id; + u32 endian; + char vdec_name[32]; + char pts_name[32]; + char new_q_name[32]; + char disp_q_name[32]; + dma_addr_t rdma_phy_adr; + unsigned *rdma_adr; + int hdr_flag; +}; + +static int compute_losless_comp_body_size( + struct AVS2Decoder_s *dec, int width, int height, + uint8_t is_bit_depth_10); + +static int avs2_print(struct AVS2Decoder_s *dec, + int flag, const char *fmt, ...) +{ +#define HEVC_PRINT_BUF 256 + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + if (dec == NULL || + (flag == 0) || + (debug & flag)) { + va_list args; + va_start(args, fmt); + if (dec) + len = sprintf(buf, "[%d]", dec->index); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; +} + +static int avs2_print_cont(struct AVS2Decoder_s *dec, + int flag, const char *fmt, ...) +{ + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + if (dec == NULL || + (flag == 0) || + (debug & flag)) { + va_list args; + va_start(args, fmt); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; +} + +#define PROB_SIZE (496 * 2 * 4) +#define PROB_BUF_SIZE (0x5000) +#define COUNT_BUF_SIZE (0x300 * 4 * 4) +/*compute_losless_comp_body_size(4096, 2304, 1) = 18874368(0x1200000)*/ +#define MAX_FRAME_4K_NUM 0x1200 +#define MAX_FRAME_8K_NUM 0x4800 +#define MAX_SIZE_4K (4096 * 2304) +#define IS_8K_SIZE(w, h) (((w) * (h)) > MAX_SIZE_4K) +#define IS_4K_SIZE(w, h) (((w) * (h)) > (1920*1088)) + +static int get_frame_mmu_map_size(struct AVS2Decoder_s *dec) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (IS_8K_SIZE(dec->init_pic_w, dec->init_pic_h))) + return (MAX_FRAME_8K_NUM * 4); + return (MAX_FRAME_4K_NUM * 4); +} + +static int get_compress_header_size(struct AVS2Decoder_s *dec) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (IS_8K_SIZE(dec->init_pic_w, dec->init_pic_h))) + return MMU_COMPRESS_HEADER_SIZE_8K; + else if (IS_4K_SIZE(dec->init_pic_w, dec->init_pic_h)) + return MMU_COMPRESS_HEADER_SIZE_4K; + return MMU_COMPRESS_HEADER_SIZE_1080P; +} + +static void reset_process_time(struct AVS2Decoder_s *dec) +{ + if (dec->start_process_time) { + unsigned process_time = + 1000 * (jiffies - dec->start_process_time) / HZ; + dec->start_process_time = 0; + if (process_time > max_process_time[dec->index]) + max_process_time[dec->index] = process_time; + } +} + +static void start_process_time(struct AVS2Decoder_s *dec) +{ + dec->start_process_time = jiffies; + dec->decode_timeout_count = 0; + dec->last_lcu_idx = 0; +} + +static void update_decoded_pic(struct AVS2Decoder_s *dec); + +static void timeout_process(struct AVS2Decoder_s *dec) +{ + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *cur_pic = avs2_dec->hc.cur_pic; + dec->timeout_num++; + amhevc_stop(); + avs2_print(dec, + 0, "%s decoder timeout\n", __func__); + if (cur_pic) + cur_pic->error_mark = 1; + dec->dec_result = DEC_RESULT_DONE; + update_decoded_pic(dec); + reset_process_time(dec); + vdec_schedule_work(&dec->work); +} + +static u32 get_valid_double_write_mode(struct AVS2Decoder_s *dec) +{ + u32 dw_mode; + + dw_mode = (dec->m_ins_flag && + ((double_write_mode & 0x80000000) == 0)) ? + dec->double_write_mode : + (double_write_mode & 0x7fffffff); + if (dw_mode & 0x20) { + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T3) + && ((dw_mode & 0xf) == 2 || (dw_mode & 0xf) == 3)) + dw_mode = 0; + } + + return dw_mode; +} + +static int get_double_write_mode(struct AVS2Decoder_s *dec) +{ + u32 valid_dw_mode = get_valid_double_write_mode(dec); + int w = dec->avs2_dec.img.width; + int h = dec->avs2_dec.img.height; + u32 dw = 0x1; /*1:1*/ + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + return dw; +} + +/* for double write buf alloc */ +static int get_double_write_mode_init(struct AVS2Decoder_s *dec) +{ + u32 valid_dw_mode = get_valid_double_write_mode(dec); + u32 dw; + int w = dec->init_pic_w; + int h = dec->init_pic_h; + + dw = 0x1; /*1:1*/ + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + return dw; +} + +//#define MAX_4K_NUM 0x1200 +#ifdef AVS2_10B_MMU +int avs2_alloc_mmu( + struct AVS2Decoder_s *dec, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr) +{ + int bit_depth_10 = (bit_depth == AVS2_BITS_10); + int picture_size; + int cur_mmu_4k_number, max_frame_num; + + picture_size = compute_losless_comp_body_size( + dec, pic_width, pic_height, + bit_depth_10); + cur_mmu_4k_number = ((picture_size + (1 << 12) - 1) >> 12); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + max_frame_num = MAX_FRAME_8K_NUM; + else + max_frame_num = MAX_FRAME_4K_NUM; + if (cur_mmu_4k_number > max_frame_num) { + pr_err("over max !! cur_mmu_4k_number 0x%x width %d height %d\n", + cur_mmu_4k_number, pic_width, pic_height); + return -1; + } + return decoder_mmu_box_alloc_idx( + dec->mmu_box, + cur_buf_idx, + cur_mmu_4k_number, + mmu_index_adr); +} +#endif + +#ifdef AVS2_10B_MMU_DW +int avs2_alloc_dw_mmu( + struct AVS2Decoder_s *dec, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr) +{ + int bit_depth_10 = (bit_depth == AVS2_BITS_10); + int picture_size; + int cur_mmu_4k_number, max_frame_num; + + picture_size = compute_losless_comp_body_size( + dec, pic_width, pic_height, + bit_depth_10); + cur_mmu_4k_number = ((picture_size + (1 << 12) - 1) >> 12); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + max_frame_num = MAX_FRAME_8K_NUM; + else + max_frame_num = MAX_FRAME_4K_NUM; + if (cur_mmu_4k_number > max_frame_num) { + pr_err("over max !! cur_mmu_4k_number 0x%x width %d height %d\n", + cur_mmu_4k_number, pic_width, pic_height); + return -1; + } + return decoder_mmu_box_alloc_idx( + dec->dw_mmu_box, + cur_buf_idx, + cur_mmu_4k_number, + mmu_index_adr); +} +#endif + +static int get_free_buf_count(struct AVS2Decoder_s *dec) +{ + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + int i; + int count = 0; + for (i = 0; i < avs2_dec->ref_maxbuffer; i++) { + if ((avs2_dec->fref[i]->imgcoi_ref < -256 +#if 0 + || abs(avs2_dec->fref[i]-> + imgtr_fwRefDistance - img->tr) >= 128 +#endif + ) && avs2_dec->fref[i]->is_output == -1 + && avs2_dec->fref[i]->bg_flag == 0 +#ifndef NO_DISPLAY + && avs2_dec->fref[i]->vf_ref == 0 + && avs2_dec->fref[i]->to_prepare_disp == 0 +#endif + ) { + count++; + } + } + + return count; +} + +#ifdef CONSTRAIN_MAX_BUF_NUM +static int get_vf_ref_only_buf_count(struct AVS2Decoder_s *dec) +{ + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + int i; + int count = 0; + for (i = 0; i < avs2_dec->ref_maxbuffer; i++) { + if ((avs2_dec->fref[i]->imgcoi_ref < -256 +#if 0 + || abs(avs2_dec->fref[i]-> + imgtr_fwRefDistance - img->tr) >= 128 +#endif + ) && avs2_dec->fref[i]->is_output == -1 + && avs2_dec->fref[i]->bg_flag == 0 +#ifndef NO_DISPLAY + && avs2_dec->fref[i]->vf_ref > 0 + && avs2_dec->fref[i]->to_prepare_disp == 0 +#endif + ) { + count++; + } + } + + return count; +} + +static int get_used_buf_count(struct AVS2Decoder_s *dec) +{ + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + int i; + int count = 0; + for (i = 0; i < avs2_dec->ref_maxbuffer; i++) { + if ((avs2_dec->fref[i]->imgcoi_ref >= -256 +#if 0 + || abs(avs2_dec->fref[i]-> + imgtr_fwRefDistance - img->tr) >= 128 +#endif + ) || avs2_dec->fref[i]->is_output != -1 + || avs2_dec->fref[i]->bg_flag != 0 +#ifndef NO_DISPLAY + || avs2_dec->fref[i]->vf_ref != 0 + || avs2_dec->fref[i]->to_prepare_disp != 0 +#endif + ) { + count++; + } + } + + return count; +} +#endif + +int avs2_bufmgr_init(struct AVS2Decoder_s *dec, struct BuffInfo_s *buf_spec_i, + struct buff_s *mc_buf_i) { + + dec->frame_count = 0; +#ifdef AVS2_10B_MMU + dec->used_4k_num = -1; + dec->cur_fb_idx_mmu = INVALID_IDX; +#endif + + + /* private init */ + dec->work_space_buf = buf_spec_i; +#ifndef AVS2_10B_MMU + dec->mc_buf = mc_buf_i; +#endif + dec->rpm_addr = NULL; + dec->lmem_addr = NULL; + + dec->use_cma_flag = 0; + dec->decode_idx = 0; + dec->slice_idx = 0; + /*int m_uiMaxCUWidth = 1<<7;*/ + /*int m_uiMaxCUHeight = 1<<7;*/ + dec->wait_buf = 0; + dec->error_flag = 0; + dec->skip_PB_before_I = 0; + + dec->pts_mode = PTS_NORMAL; + dec->last_pts = 0; + dec->last_lookup_pts = 0; + dec->last_pts_us64 = 0; + dec->last_lookup_pts_us64 = 0; + dec->shift_byte_count = 0; + dec->shift_byte_count_lo = 0; + dec->shift_byte_count_hi = 0; + dec->pts_mode_switching_count = 0; + dec->pts_mode_recovery_count = 0; + + dec->buf_num = 0; + + dec->bufmgr_error_count = 0; + return 0; +} + + + +#define HEVC_CM_BODY_START_ADDR 0x3626 +#define HEVC_CM_BODY_LENGTH 0x3627 +#define HEVC_CM_HEADER_LENGTH 0x3629 +#define HEVC_CM_HEADER_OFFSET 0x362b + +#define LOSLESS_COMPRESS_MODE + +/*#define DECOMP_HEADR_SURGENT*/ + +static u32 mem_map_mode; /* 0:linear 1:32x32 2:64x32 ; m8baby test1902 */ +static u32 enable_mem_saving = 1; +static u32 force_w_h; + +static u32 force_fps; + + +const u32 avs2_version = 201602101; +static u32 radr; +static u32 rval; +static u32 pop_shorts; +static u32 dbg_cmd; +static u32 dbg_skip_decode_index; +/* + * bit 0~3, for HEVCD_IPP_AXIIF_CONFIG endian config + * bit 8~23, for HEVC_SAO_CTRL1 endian config + */ +static u32 endian; +#define HEVC_CONFIG_BIG_ENDIAN ((0x880 << 8) | 0x8) +#define HEVC_CONFIG_LITTLE_ENDIAN ((0xff0 << 8) | 0xf) + +#ifdef ERROR_HANDLE_DEBUG +static u32 dbg_nal_skip_flag; + /* bit[0], skip vps; bit[1], skip sps; bit[2], skip pps */ +static u32 dbg_nal_skip_count; +#endif +/*for debug*/ +static u32 decode_pic_begin; +static uint slice_parse_begin; +static u32 step; +#ifdef MIX_STREAM_SUPPORT +static u32 buf_alloc_width = 4096; +static u32 buf_alloc_height = 2304; + +static u32 dynamic_buf_num_margin; +#else +static u32 buf_alloc_width; +static u32 buf_alloc_height; +static u32 dynamic_buf_num_margin = 7; +#endif +#ifdef CONSTRAIN_MAX_BUF_NUM +static u32 run_ready_max_vf_only_num; +static u32 run_ready_display_q_num; + /*0: not check + 0xff: avs2_dec.ref_maxbuffer + */ +static u32 run_ready_max_buf_num = 0xff; +#endif +static u32 buf_alloc_depth = 10; +static u32 buf_alloc_size; +/* +bit[0]: 0, + bit[1]: 0, always release cma buffer when stop + bit[1]: 1, never release cma buffer when stop +bit[0]: 1, when stop, release cma buffer if blackout is 1; +do not release cma buffer is blackout is not 1 + +bit[2]: 0, when start decoding, check current displayed buffer + (only for buffer decoded by vp9) if blackout is 0 + 1, do not check current displayed buffer + +bit[3]: 1, if blackout is not 1, do not release current + displayed cma buffer always. +*/ +/* set to 1 for fast play; + set to 8 for other case of "keep last frame" +*/ +static u32 buffer_mode = 1; +/* buffer_mode_dbg: debug only*/ +static u32 buffer_mode_dbg = 0xffff0000; +/**/ + +/* +bit 0, 1: only display I picture; +bit 1, 1: only decode I picture; +*/ +static u32 i_only_flag; + + +static u32 max_decoding_time; +/* +error handling +*/ +/*error_handle_policy: +bit 0: search seq again if buffer mgr error occur + (buffer mgr error count need big than + re_search_seq_threshold) +bit 1: 1, display from I picture; + 0, display from any correct pic +*/ + +static u32 error_handle_policy = 1; +/* +re_search_seq_threshold: + bit 7~0: buffer mgr error research seq count + bit 15~8: frame count threshold +*/ +static u32 re_search_seq_threshold = 0x800; /*0x8;*/ +/*static u32 parser_sei_enable = 1;*/ + +static u32 max_buf_num = (REF_BUFFER + 1); + +static u32 run_ready_min_buf_num = 2; + +static DEFINE_MUTEX(vavs2_mutex); + +#define HEVC_DEC_STATUS_REG HEVC_ASSIST_SCRATCH_0 +#define HEVC_RPM_BUFFER HEVC_ASSIST_SCRATCH_1 +#define AVS2_ALF_SWAP_BUFFER HEVC_ASSIST_SCRATCH_2 +#define HEVC_RCS_BUFFER HEVC_ASSIST_SCRATCH_3 +#define HEVC_SPS_BUFFER HEVC_ASSIST_SCRATCH_4 +#define HEVC_PPS_BUFFER HEVC_ASSIST_SCRATCH_5 +//#define HEVC_SAO_UP HEVC_ASSIST_SCRATCH_6 +#ifdef AVS2_10B_MMU +#define AVS2_MMU_MAP_BUFFER HEVC_ASSIST_SCRATCH_7 +#else +#define HEVC_STREAM_SWAP_BUFFER HEVC_ASSIST_SCRATCH_7 +#endif +#define HEVC_STREAM_SWAP_BUFFER2 HEVC_ASSIST_SCRATCH_8 +/* +#define VP9_PROB_SWAP_BUFFER HEVC_ASSIST_SCRATCH_9 +#define VP9_COUNT_SWAP_BUFFER HEVC_ASSIST_SCRATCH_A +#define VP9_SEG_MAP_BUFFER HEVC_ASSIST_SCRATCH_B +*/ +#define AVS2_CUVA_ADR HEVC_ASSIST_SCRATCH_A +#define AVS2_CUVA_DATA_SIZE HEVC_ASSIST_SCRATCH_B + +//#define HEVC_SCALELUT HEVC_ASSIST_SCRATCH_D +#define HEVC_WAIT_FLAG HEVC_ASSIST_SCRATCH_E +#define RPM_CMD_REG HEVC_ASSIST_SCRATCH_F +#define LMEM_DUMP_ADR HEVC_ASSIST_SCRATCH_9 +#define HEVC_STREAM_SWAP_TEST HEVC_ASSIST_SCRATCH_L +/*!!!*/ +#define HEVC_DECODE_COUNT HEVC_ASSIST_SCRATCH_M +#define HEVC_DECODE_SIZE HEVC_ASSIST_SCRATCH_N +#define DEBUG_REG1 HEVC_ASSIST_SCRATCH_G +#define DEBUG_REG2 HEVC_ASSIST_SCRATCH_H + + +/* +ucode parser/search control +bit 0: 0, header auto parse; 1, header manual parse +bit 1: 0, auto skip for noneseamless stream; 1, no skip +bit [3:2]: valid when bit1==0; +0, auto skip nal before first vps/sps/pps/idr; +1, auto skip nal before first vps/sps/pps +2, auto skip nal before first vps/sps/pps, + and not decode until the first I slice (with slice address of 0) + +3, auto skip before first I slice (nal_type >=16 && nal_type<=21) +bit [15:4] nal skip count (valid when bit0 == 1 (manual mode) ) +bit [16]: for NAL_UNIT_EOS when bit0 is 0: + 0, send SEARCH_DONE to arm ; 1, do not send SEARCH_DONE to arm +bit [17]: for NAL_SEI when bit0 is 0: + 0, do not parse SEI in ucode; 1, parse SEI in ucode +bit [31:20]: used by ucode for debug purpose +*/ +#define NAL_SEARCH_CTL HEVC_ASSIST_SCRATCH_I + /*DECODE_MODE: set before start decoder + bit 7~0: decode mode + bit 23~16: start_decoding_flag + bit [0] - SEQ_ready + bit [2:1] - I Picture Count + bit 31~24: chip feature + */ +#define DECODE_MODE HEVC_ASSIST_SCRATCH_J +#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K + /*read only*/ +#define CUR_NAL_UNIT_TYPE HEVC_ASSIST_SCRATCH_J + +#define RPM_BUF_SIZE (0x600 * 2) +#define LMEM_BUF_SIZE (0x600 * 2) + + /*mmu_vbh buf is used by HEVC_SAO_MMU_VH0_ADDR, HEVC_SAO_MMU_VH1_ADDR*/ +#define VBH_BUF_SIZE_1080P 0x3000 +#define VBH_BUF_SIZE_4K 0x5000 +#define VBH_BUF_SIZE_8K 0xa000 +#define VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh.buf_size / 2) + /*mmu_vbh_dw buf is used by HEVC_SAO_MMU_VH0_ADDR2,HEVC_SAO_MMU_VH1_ADDR2, + HEVC_DW_VH0_ADDDR, HEVC_DW_VH1_ADDDR*/ +#define DW_VBH_BUF_SIZE_1080P (VBH_BUF_SIZE_1080P * 2) +#define DW_VBH_BUF_SIZE_4K (VBH_BUF_SIZE_4K * 2) +#define DW_VBH_BUF_SIZE_8K (VBH_BUF_SIZE_8K * 2) +#define DW_VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh_dw.buf_size / 4) + +/* necessary 4K page size align for t7/t3 decoder and after */ +#define WORKBUF_ALIGN(addr) (ALIGN(addr, PAGE_SIZE)) + +#define WORK_BUF_SPEC_NUM 6 +static struct BuffInfo_s amvavs2_workbuff_spec[WORK_BUF_SPEC_NUM] = { + { + /* 8M bytes */ + .max_width = 1920, + .max_height = 1088, + .ipp = { + /* IPP work space calculation : + 4096 * (Y+CbCr+Flags) = 12k, round to 16k */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x30000, + }, + .sao_vb = { + .buf_size = 0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + total 64x16x2 = 2048 bytes (0x800) */ + .buf_size = 0x800, + }, + .rcs = { + /* RCS STORE AREA - Max 32 RCS, each has 32 bytes, + total 0x0400 bytes */ + .buf_size = 0x400, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + total 0x0800 bytes*/ + .buf_size = 0x800, + }, + .pps = { + /*PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + total 0x2000 bytes*/ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + each has 16 bytes total 0x2800 bytes */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + (only 144 cycles valid) */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = + 32Kbytes (0x8000) */ + .buf_size = 0x8000, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + each para 1024bytes(total:0x40000), + data 1024bytes(total:0x40000)*/ + .buf_size = 0x40000, + }, + .dblk_data = { + .buf_size = 0x40000, + }, + .dblk_data2 = { + .buf_size = 0x40000, + }, +#ifdef AVS2_10B_MMU + .mmu_vbh = { + .buf_size = 0x5000, /*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x8000, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = {/* 1080p, 0x40000 per buffer */ + .buf_size = 0x40000 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096, + .max_height = 2304, + .ipp = { + /* IPP work space calculation : + 4096 * (Y+CbCr+Flags) = 12k, round to 16k */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x30000, + }, + .sao_vb = { + .buf_size = 0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + total 64x16x2 = 2048 bytes (0x800) */ + .buf_size = 0x800, + }, + .rcs = { + /* RCS STORE AREA - Max 16 RCS, each has 32 bytes, + total 0x0400 bytes */ + .buf_size = 0x400, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + total 0x0800 bytes */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + total 0x2000 bytes */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + each has 16 bytes total 0x2800 bytes */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + (only 144 cycles valid) */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = 32Kbytes + (0x8000) */ + .buf_size = 0x8000, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + each para 1024bytes(total:0x40000), + data 1024bytes(total:0x40000)*/ + .buf_size = 0x80000, + }, + .dblk_data = { + /*DBLK -> Max 256(4096/16) LCU, + each para 1024bytes(total:0x40000), + data 1024bytes(total:0x40000)*/ + .buf_size = 0x80000, + }, + .dblk_data2 = { + .buf_size = 0x80000, + }, +#ifdef AVS2_10B_MMU + .mmu_vbh = { + .buf_size = 0x5000,/*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x10000, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = 0x120000 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096 * 2, + .max_height = 2304 * 2, + .ipp = { + /*IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, + round to 16k*/ + .buf_size = 0x4000 * 2, + }, + .sao_abv = { + .buf_size = 0x30000 * 2, + }, + .sao_vb = { + .buf_size = 0x30000 * 2, + }, + .short_term_rps = { + /*SHORT_TERM_RPS - Max 64 set, 16 entry every set, + total 64x16x2 = 2048 bytes (0x800)*/ + .buf_size = 0x800, + }, + .rcs = { + /*RCS STORE AREA - Max 16 RCS, each has 32 bytes, + total 0x0400 bytes*/ + .buf_size = 0x400, + }, + .sps = { + /*SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + total 0x0800 bytes*/ + .buf_size = 0x800, + }, + .pps = { + /*PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, total + 0x2000 bytes*/ + .buf_size = 0x2000, + }, + .sao_up = { + /*SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes i + total 0x2800 bytes*/ + .buf_size = 0x2800 * 2, + }, + .swap_buf = { + /*256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid)*/ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /*support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000)*/ + .buf_size = 0x8000 * 2, + }, + .dblk_para = { + .buf_size = 0x40000 * 2, + }, + .dblk_data = { + .buf_size = 0x80000 * 2, + }, + .dblk_data2 = { + .buf_size = 0x80000 * 2, + }, +#ifdef AVS2_10B_MMU + .mmu_vbh = { + .buf_size = 0x5000 * 2, /*2*16*2304/4, 4K*/ + }, +#if 0 + .cm_header = { + /*0x44000 = ((1088*2*1024*4)/32/4)*(32/8)*/ + .buf_size = MMU_COMPRESS_8K_HEADER_SIZE * 17, + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x8000 * 2, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /*4k2k , 0x100000 per buffer*/ + .buf_size = 0x120000 * FRAME_BUFFERS * 4, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + /* 8M bytes */ + .max_width = 1920, + .max_height = 1088, + .ipp = { + /* IPP work space calculation : + 4096 * (Y+CbCr+Flags) = 12k, round to 16k */ + .buf_size = 0x1e00, + }, + .sao_abv = { + .buf_size = 0, + }, + .sao_vb = { + .buf_size = 0, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + total 64x16x2 = 2048 bytes (0x800) */ + .buf_size = 0x800, + }, + .rcs = { + /* RCS STORE AREA - Max 32 RCS, each has 32 bytes, + total 0x0400 bytes */ + .buf_size = 0x400, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + total 0x0800 bytes*/ + .buf_size = 0x800, + }, + .pps = { + /*PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + total 0x2000 bytes*/ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + each has 16 bytes total 0x2800 bytes */ + .buf_size = 0, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + (only 144 cycles valid) */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = + 32Kbytes (0x8000) */ + .buf_size = 0, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + each para 1024bytes(total:0x40000), + data 1024bytes(total:0x40000)*/ + .buf_size = 0x3d00, //0x3c80, + }, + .dblk_data = { + .buf_size = 0x62800, + }, + .dblk_data2 = { + .buf_size = 0x62800, + }, +#ifdef AVS2_10B_MMU + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_1080P, /*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif +#endif +#ifdef AVS2_10B_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_1080P, //2*16*2304/4, 4K + }, +#if 0 + .cm_header_dw = { + .buf_size = MMU_COMPRESS_HEADER_SIZE_DW*17, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x1e00, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = {/* 1080p, 0x40000 per buffer */ + .buf_size = CO_MV_BUF_SIZE_1080P * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096, + .max_height = 2304, + .ipp = { + /* IPP work space calculation : + 4096 * (Y+CbCr+Flags) = 12k, round to 16k */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0, + }, + .sao_vb = { + .buf_size = 0, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + total 64x16x2 = 2048 bytes (0x800) */ + .buf_size = 0x800, + }, + .rcs = { + /* RCS STORE AREA - Max 16 RCS, each has 32 bytes, + total 0x0400 bytes */ + .buf_size = 0x400, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + total 0x0800 bytes */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + total 0x2000 bytes */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + each has 16 bytes total 0x2800 bytes */ + .buf_size = 0, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + (only 144 cycles valid) */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = 32Kbytes + (0x8000) */ + .buf_size = 0, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + each para 1024bytes(total:0x40000), + data 1024bytes(total:0x40000)*/ + .buf_size = 0x8100, //0x8080, + }, + .dblk_data = { + /*DBLK -> Max 256(4096/16) LCU, + each para 1024bytes(total:0x40000), + data 1024bytes(total:0x40000)*/ + .buf_size = 0x88800, + }, + .dblk_data2 = { + .buf_size = 0x88800, + }, +#ifdef AVS2_10B_MMU + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_4K,/*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif +#endif +#ifdef AVS2_10B_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_4K, //2*16*2304/4, 4K + }, +#if 0 + .cm_header_dw = { + .buf_size = MMU_COMPRESS_HEADER_SIZE_DW*17, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x4000, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = CO_MV_BUF_SIZE_4K * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096 * 2, + .max_height = 2304 * 2, + .ipp = { + /*IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, + round to 16k*/ + .buf_size = 0x4000 * 2, + }, + .sao_abv = { + .buf_size = 0, + }, + .sao_vb = { + .buf_size = 0, + }, + .short_term_rps = { + /*SHORT_TERM_RPS - Max 64 set, 16 entry every set, + total 64x16x2 = 2048 bytes (0x800)*/ + .buf_size = 0x800, + }, + .rcs = { + /*RCS STORE AREA - Max 16 RCS, each has 32 bytes, + total 0x0400 bytes*/ + .buf_size = 0x400, + }, + .sps = { + /*SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + total 0x0800 bytes*/ + .buf_size = 0x800, + }, + .pps = { + /*PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, total + 0x2000 bytes*/ + .buf_size = 0x2000, + }, + .sao_up = { + /*SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes i + total 0x2800 bytes*/ + .buf_size = 0, + }, + .swap_buf = { + /*256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid)*/ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /*support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000)*/ + .buf_size = 0, + }, + .dblk_para = { + .buf_size = 0x10100, //0x10080, + }, + .dblk_data = { + .buf_size = 0x110800, + }, + .dblk_data2 = { + .buf_size = 0x110800, + }, +#ifdef AVS2_10B_MMU + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_8K, /*2*16*2304/4, 4K*/ + }, +#if 0 + .cm_header = { + /*0x44000 = ((1088*2*1024*4)/32/4)*(32/8)*/ + .buf_size = MMU_COMPRESS_8K_HEADER_SIZE * 17, + }, +#endif +#endif +#ifdef AVS2_10B_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_8K, //2*16*2304/4, 4K + }, +#if 0 + .cm_header_dw = { + .buf_size = MMU_COMPRESS_HEADER_SIZE_DW*17, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x8000, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /*4k2k , 0x100000 per buffer*/ + .buf_size = CO_MV_BUF_SIZE_8K * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + } +}; + +#define IS_8K_SIZE(w, h) (((w) * (h)) > MAX_SIZE_4K) +#define IS_4K_SIZE(w, h) (((w) * (h)) > (1920*1088)) +#ifndef MV_USE_FIXED_BUF +static uint32_t get_mv_buf_size(struct AVS2Decoder_s *dec, int width, int height) { + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + uint32_t size; + if (mv_buf_dynamic_alloc == 1) { + int mv_mem_unit = + avs2_dec->lcu_size_log2 == 6 ? 0x200 : avs2_dec->lcu_size_log2 == + 5 ? 0x80 : 0x20; + int extended_pic_width = (width + avs2_dec->lcu_size -1) + & (~(avs2_dec->lcu_size - 1)); + int extended_pic_height = (height + avs2_dec->lcu_size -1) + & (~(avs2_dec->lcu_size - 1)); + int lcu_x_num = extended_pic_width / avs2_dec->lcu_size; + int lcu_y_num = extended_pic_height / avs2_dec->lcu_size; + int new_size = lcu_x_num * lcu_y_num * mv_mem_unit; + size = (new_size + 0xffff) & (~0xffff); + + } else { + if (IS_8K_SIZE(width, height)) + size = CO_MV_BUF_SIZE_8K; + else if (IS_4K_SIZE(width, height)) + size = CO_MV_BUF_SIZE_4K; + else + size = CO_MV_BUF_SIZE_1080P; + } + return size; +} +#endif + +/*Losless compression body buffer size 4K per 64x32 (jt)*/ +static int compute_losless_comp_body_size(struct AVS2Decoder_s *dec, + int width, int height, + uint8_t is_bit_depth_10) +{ + int width_x64; + int height_x32; + int bsize; + width_x64 = width + 63; + width_x64 >>= 6; + height_x32 = height + 31; + height_x32 >>= 5; +#ifdef AVS2_10B_MMU + bsize = (is_bit_depth_10 ? 4096 : 3200) + * width_x64 * height_x32; +#else + bsize = (is_bit_depth_10 ? 4096 : 3072) + * width_x64 * height_x32; +#endif + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s(%d,%d,%d)=>%d\n", + __func__, width, height, + is_bit_depth_10, bsize); + + return bsize; +} + +/* Losless compression header buffer size 32bytes per 128x64 (jt)*/ +static int compute_losless_comp_header_size(struct AVS2Decoder_s *dec, + int width, int height) +{ + int width_x128; + int height_x64; + int hsize; + width_x128 = width + 127; + width_x128 >>= 7; + height_x64 = height + 63; + height_x64 >>= 6; + + hsize = 32 * width_x128 * height_x64; + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s(%d,%d)=>%d\n", + __func__, width, height, + hsize); + + return hsize; +} + +static void init_buff_spec(struct AVS2Decoder_s *dec, + struct BuffInfo_s *buf_spec) +{ + void *mem_start_virt; + buf_spec->ipp.buf_start = + WORKBUF_ALIGN(buf_spec->start_adr); + buf_spec->sao_abv.buf_start = + WORKBUF_ALIGN(buf_spec->ipp.buf_start + buf_spec->ipp.buf_size); + buf_spec->sao_vb.buf_start = + WORKBUF_ALIGN(buf_spec->sao_abv.buf_start + buf_spec->sao_abv.buf_size); + buf_spec->short_term_rps.buf_start = + WORKBUF_ALIGN(buf_spec->sao_vb.buf_start + buf_spec->sao_vb.buf_size); + buf_spec->rcs.buf_start = + WORKBUF_ALIGN(buf_spec->short_term_rps.buf_start + buf_spec->short_term_rps.buf_size); + buf_spec->sps.buf_start = + WORKBUF_ALIGN(buf_spec->rcs.buf_start + buf_spec->rcs.buf_size); + buf_spec->pps.buf_start = + WORKBUF_ALIGN(buf_spec->sps.buf_start + buf_spec->sps.buf_size); + buf_spec->sao_up.buf_start = + WORKBUF_ALIGN(buf_spec->pps.buf_start + buf_spec->pps.buf_size); + buf_spec->swap_buf.buf_start = + WORKBUF_ALIGN(buf_spec->sao_up.buf_start + buf_spec->sao_up.buf_size); + buf_spec->swap_buf2.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf.buf_start + buf_spec->swap_buf.buf_size); + buf_spec->scalelut.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf2.buf_start + buf_spec->swap_buf2.buf_size); + buf_spec->dblk_para.buf_start = + WORKBUF_ALIGN(buf_spec->scalelut.buf_start + buf_spec->scalelut.buf_size); + buf_spec->dblk_data.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_para.buf_start + buf_spec->dblk_para.buf_size); + buf_spec->dblk_data2.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data.buf_start + buf_spec->dblk_data.buf_size); +#ifdef AVS2_10B_MMU + buf_spec->mmu_vbh.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data2.buf_start + buf_spec->dblk_data2.buf_size); + #ifdef AVS2_10B_MMU_DW + buf_spec->mmu_vbh_dw.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh.buf_start + buf_spec->mmu_vbh.buf_size); + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh_dw.buf_start + buf_spec->mmu_vbh_dw.buf_size); + #else + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh.buf_start + buf_spec->mmu_vbh.buf_size); + #endif +#else /* AVS2_10B_MMU */ + #ifdef AVS2_10B_MMU_DW + buf_spec->mmu_vbh_dw.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data2.buf_start + buf_spec->dblk_data2.buf_size); + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh_dw.buf_start + buf_spec->mmu_vbh_dw.buf_size); + #else + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data2.buf_start + buf_spec->dblk_data2.buf_size); + #endif +#endif /* AVS2_10B_MMU */ +#ifdef MV_USE_FIXED_BUF + buf_spec->mpred_mv.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_mv.buf_start + buf_spec->mpred_mv.buf_size); +#else + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); +#endif + buf_spec->lmem.buf_start = + WORKBUF_ALIGN(buf_spec->rpm.buf_start + buf_spec->rpm.buf_size); + buf_spec->end_adr = + WORKBUF_ALIGN(buf_spec->lmem.buf_start + buf_spec->lmem.buf_size); + + if (dec) { + mem_start_virt = + codec_mm_phys_to_virt(buf_spec->dblk_para.buf_start); + if (mem_start_virt) { + memset(mem_start_virt, 0, buf_spec->dblk_para.buf_size); + codec_mm_dma_flush(mem_start_virt, + buf_spec->dblk_para.buf_size, + DMA_TO_DEVICE); + } else { + /*not virt for tvp playing, + may need clear on ucode.*/ + pr_err("mem_start_virt failed\n"); + } + if (debug) { + pr_info("%s workspace (%x %x) size = %x\n", __func__, + buf_spec->start_adr, buf_spec->end_adr, + buf_spec->end_adr - buf_spec->start_adr); + } + if (debug) { + pr_info("ipp.buf_start :%x\n", + buf_spec->ipp.buf_start); + pr_info("sao_abv.buf_start :%x\n", + buf_spec->sao_abv.buf_start); + pr_info("sao_vb.buf_start :%x\n", + buf_spec->sao_vb.buf_start); + pr_info("short_term_rps.buf_start :%x\n", + buf_spec->short_term_rps.buf_start); + pr_info("rcs.buf_start :%x\n", + buf_spec->rcs.buf_start); + pr_info("sps.buf_start :%x\n", + buf_spec->sps.buf_start); + pr_info("pps.buf_start :%x\n", + buf_spec->pps.buf_start); + pr_info("sao_up.buf_start :%x\n", + buf_spec->sao_up.buf_start); + pr_info("swap_buf.buf_start :%x\n", + buf_spec->swap_buf.buf_start); + pr_info("swap_buf2.buf_start :%x\n", + buf_spec->swap_buf2.buf_start); + pr_info("scalelut.buf_start :%x\n", + buf_spec->scalelut.buf_start); + pr_info("dblk_para.buf_start :%x\n", + buf_spec->dblk_para.buf_start); + pr_info("dblk_data.buf_start :%x\n", + buf_spec->dblk_data.buf_start); + pr_info("dblk_data2.buf_start :%x\n", + buf_spec->dblk_data2.buf_start); + #ifdef AVS2_10B_MMU + pr_info("mmu_vbh.buf_start :%x\n", + buf_spec->mmu_vbh.buf_start); + #endif + #ifdef AVS2_10B_MMU_DW + pr_info("mmu_vbh_dw.buf_start :%x\n", + buf_spec->mmu_vbh_dw.buf_start); + #endif + pr_info("mpred_above.buf_start :%x\n", + buf_spec->mpred_above.buf_start); +#ifdef MV_USE_FIXED_BUF + pr_info("mpred_mv.buf_start :%x\n", + buf_spec->mpred_mv.buf_start); +#endif + if ((debug & AVS2_DBG_SEND_PARAM_WITH_REG) == 0) { + pr_info("rpm.buf_start :%x\n", + buf_spec->rpm.buf_start); + } + } + } + +} + +static void uninit_mmu_buffers(struct AVS2Decoder_s *dec) +{ +#ifdef AVS2_10B_MMU_DW + if (dec->dw_mmu_enable && dec->dw_mmu_box) { + decoder_mmu_box_free(dec->dw_mmu_box); + dec->dw_mmu_box = NULL; + } +#endif + decoder_mmu_box_free(dec->mmu_box); + dec->mmu_box = NULL; + + if (dec->bmmu_box) + decoder_bmmu_box_free(dec->bmmu_box); + dec->bmmu_box = NULL; +} + +#ifndef AVS2_10B_MMU +static void init_buf_list(struct AVS2Decoder_s *dec) +{ + int i; + int buf_size; + int mc_buffer_end = dec->mc_buf->buf_start + dec->mc_buf->buf_size; + dec->used_buf_num = max_buf_num; + + if (dec->used_buf_num > MAX_BUF_NUM) + dec->used_buf_num = MAX_BUF_NUM; + if (buf_alloc_size > 0) { + buf_size = buf_alloc_size; + avs2_print(dec, AVS2_DBG_BUFMGR, + "[Buffer Management] init_buf_list:\n"); + } else { + int pic_width = dec->init_pic_w; + int pic_height = dec->init_pic_h; + + /*SUPPORT_10BIT*/ + int losless_comp_header_size = compute_losless_comp_header_size + (dec, pic_width, pic_height); + int losless_comp_body_size = compute_losless_comp_body_size + (dec, pic_width, pic_height, buf_alloc_depth == 10); + int mc_buffer_size = losless_comp_header_size + + losless_comp_body_size; + int mc_buffer_size_h = (mc_buffer_size + 0xffff)>>16; + + int dw_mode = get_double_write_mode_init(dec); + + if (dw_mode) { + int pic_width_dw = pic_width / + get_double_write_ratio(dw_mode); + int pic_height_dw = pic_height / + get_double_write_ratio(dw_mode); + int lcu_size = 64; /*fixed 64*/ + int pic_width_64 = (pic_width_dw + 63) & (~0x3f); + int pic_height_32 = (pic_height_dw + 31) & (~0x1f); + int pic_width_lcu = + (pic_width_64 % lcu_size) ? pic_width_64 / lcu_size + + 1 : pic_width_64 / lcu_size; + int pic_height_lcu = + (pic_height_32 % lcu_size) ? pic_height_32 / lcu_size + + 1 : pic_height_32 / lcu_size; + int lcu_total = pic_width_lcu * pic_height_lcu; + int mc_buffer_size_u_v = lcu_total * lcu_size * lcu_size / 2; + int mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16; + /*64k alignment*/ + buf_size = ((mc_buffer_size_u_v_h << 16) * 3); + } else + buf_size = 0; + + if (mc_buffer_size & 0xffff) { /*64k alignment*/ + mc_buffer_size_h += 1; + } + if ((dw_mode & 0x10) == 0) + buf_size += (mc_buffer_size_h << 16); + avs2_print(dec, AVS2_DBG_BUFMGR, + "init_buf_list num %d (width %d height %d):\n", + dec->used_buf_num, pic_width, pic_height); + } + + for (i = 0; i < dec->used_buf_num; i++) { + if (((i + 1) * buf_size) > dec->mc_buf->buf_size) + dec->use_cma_flag = 1; + + dec->m_BUF[i].alloc_flag = 0; + dec->m_BUF[i].index = i; + + dec->use_cma_flag = 1; + if (dec->use_cma_flag) { + dec->m_BUF[i].cma_page_count = + PAGE_ALIGN(buf_size) / PAGE_SIZE; + if (decoder_bmmu_box_alloc_buf_phy(dec->bmmu_box, + VF_BUFFER_IDX(i), buf_size, DRIVER_NAME, + &dec->m_BUF[i].alloc_addr) < 0) { + dec->m_BUF[i].cma_page_count = 0; + if (i <= 5) { + dec->fatal_error |= + DECODER_FATAL_ERROR_NO_MEM; + } + break; + } + dec->m_BUF[i].start_adr = dec->m_BUF[i].alloc_addr; + } else { + dec->m_BUF[i].cma_page_count = 0; + dec->m_BUF[i].alloc_addr = 0; + dec->m_BUF[i].start_adr = + dec->mc_buf->buf_start + i * buf_size; + } + dec->m_BUF[i].size = buf_size; + dec->m_BUF[i].free_start_adr = dec->m_BUF[i].start_adr; + + if (((dec->m_BUF[i].start_adr + buf_size) > mc_buffer_end) + && (dec->m_BUF[i].alloc_addr == 0)) { + if (debug) { + avs2_print(dec, 0, + "Max mc buffer or mpred_mv buffer is used\n"); + } + break; + } + + avs2_print(dec, AVS2_DBG_BUFMGR, + "Buffer %d: start_adr %p size %x\n", i, + (void *)dec->m_BUF[i].start_adr, + dec->m_BUF[i].size); + } + dec->buf_num = i; +} +#endif + +static int config_pic(struct AVS2Decoder_s *dec, + struct avs2_frame_s *pic, int32_t lcu_size_log2) +{ + int ret = -1; + int i; + int pic_width = dec->init_pic_w; + int pic_height = dec->init_pic_h; + /*struct avs2_decoder *avs2_dec = &dec->avs2_dec; + int32_t lcu_size_log2 = avs2_dec->lcu_size_log2;*/ + int32_t lcu_size = 1 << lcu_size_log2; + int pic_width_64 = (pic_width + 63) & (~0x3f); + int pic_height_32 = (pic_height + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 + : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 + : pic_height_32 / lcu_size; + int lcu_total = pic_width_lcu * pic_height_lcu; + + u32 y_adr = 0; + int buf_size = 0; + + int losless_comp_header_size = + compute_losless_comp_header_size( + dec, pic_width, pic_height); + int losless_comp_body_size = compute_losless_comp_body_size( + dec, pic_width, + pic_height, buf_alloc_depth == 10); + int mc_buffer_size = losless_comp_header_size + losless_comp_body_size; + int mc_buffer_size_h = (mc_buffer_size + 0xffff) >> 16; + int mc_buffer_size_u_v = 0; + int mc_buffer_size_u_v_h = 0; + int dw_mode = get_double_write_mode_init(dec); + + if (dw_mode && ((dw_mode & 0x20) == 0)) { + int pic_width_dw = pic_width / + get_double_write_ratio(dw_mode); + int pic_height_dw = pic_height / + get_double_write_ratio(dw_mode); + int pic_width_64_dw = (pic_width_dw + 63) & (~0x3f); + int pic_height_32_dw = (pic_height_dw + 31) & (~0x1f); + int pic_width_lcu_dw = (pic_width_64_dw % lcu_size) ? + pic_width_64_dw / lcu_size + 1 + : pic_width_64_dw / lcu_size; + int pic_height_lcu_dw = (pic_height_32_dw % lcu_size) ? + pic_height_32_dw / lcu_size + 1 + : pic_height_32_dw / lcu_size; + int lcu_total_dw = pic_width_lcu_dw * pic_height_lcu_dw; + + mc_buffer_size_u_v = lcu_total_dw * lcu_size * lcu_size / 2; + mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16; + /*64k alignment*/ + buf_size = ((mc_buffer_size_u_v_h << 16) * 3); + buf_size = ((buf_size + 0xffff) >> 16) << 16; + } + if (mc_buffer_size & 0xffff) /*64k alignment*/ + mc_buffer_size_h += 1; + + +#ifdef AVS2_10B_MMU + if (dec->mmu_enable) { + pic->header_adr = decoder_bmmu_box_get_phy_addr( + dec->bmmu_box, HEADER_BUFFER_IDX(pic->index)); + +#ifdef AVS2_10B_MMU_DW + if (dec->dw_mmu_enable) { + pic->dw_header_adr = pic->header_adr + + get_compress_header_size(dec); + } +#endif + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "buf_size %d, MMU header_adr %d: %ld\n", + buf_size, pic->index, pic->header_adr); + } +#else + if ((dw_mode & 0x10) == 0) + buf_size += (mc_buffer_size_h << 16); +#endif + + i = pic->index; + +#ifndef AVS2_10B_MMU + if (debug) { + pr_err("start %x .size=%d\n", + dec->mc_buf_spec.buf_start + i * buf_size, buf_size); + } + for (i = 0; i < dec->buf_num; i++) { + y_adr = ((dec->m_BUF[i].free_start_adr + + 0xffff) >> 16) << 16; + /*64k alignment*/ + if ((y_adr+buf_size) <= (dec->m_BUF[i].start_adr+ + dec->m_BUF[i].size)) { + dec->m_BUF[i].free_start_adr = + y_adr + buf_size; + break; + } + } + if (i < dec->buf_num) +#else + /*if ((dec->mc_buf->buf_start + (i + 1) * buf_size) < + dec->mc_buf->buf_end) + y_adr = dec->mc_buf->buf_start + i * buf_size; + else {*/ + if (buf_size > 0 && pic->cma_alloc_addr == 0) { + ret = decoder_bmmu_box_alloc_buf_phy(dec->bmmu_box, + VF_BUFFER_IDX(i), + buf_size, DRIVER_NAME, + &pic->cma_alloc_addr); + if (ret < 0) { + avs2_print(dec, 0, + "decoder_bmmu_box_alloc_buf_phy idx %d size %d fail\n", + VF_BUFFER_IDX(i), + buf_size + ); + return ret; + } + + if (pic->cma_alloc_addr) + y_adr = pic->cma_alloc_addr; + else { + avs2_print(dec, 0, + "decoder_bmmu_box_alloc_buf_phy idx %d size %d return null\n", + VF_BUFFER_IDX(i), + buf_size + ); + return -1; + } + } +#endif + { + /*ensure get_pic_by_POC() + not get the buffer not decoded*/ + pic->BUF_index = i; + pic->lcu_total = lcu_total; + + pic->comp_body_size = losless_comp_body_size; + pic->buf_size = buf_size; + pic->mc_canvas_y = pic->index; + pic->mc_canvas_u_v = pic->index; +#ifndef AVS2_10B_MMU + pic->mc_y_adr = y_adr; + if (dw_mode & 0x10) { + pic->mc_u_v_adr = y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); + + pic->mc_canvas_y = + (pic->index << 1); + pic->mc_canvas_u_v = + (pic->index << 1) + 1; + + pic->dw_y_adr = y_adr; + pic->dw_u_v_adr = pic->mc_u_v_adr; + } else +#endif + if (dw_mode) { +#ifdef AVS2_10B_MMU + pic->dw_y_adr = y_adr; + pic->dw_u_v_adr = pic->dw_y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); + pic->mc_y_adr = pic->dw_y_adr; + pic->mc_u_v_adr = pic->dw_u_v_adr; +#else + pic->dw_y_adr = y_adr + (mc_buffer_size_h << 16); + pic->dw_u_v_adr = pic->dw_y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); +#endif + } +#ifdef MV_USE_FIXED_BUF + pic->mpred_mv_wr_start_addr = + dec->work_space_buf->mpred_mv.buf_start + + pic->index * (dec->work_space_buf->mpred_mv.buf_size / FRAME_BUFFERS); + if (pic->mpred_mv_wr_start_addr > + (dec->work_space_buf->mpred_mv.buf_start + + dec->work_space_buf->mpred_mv.buf_size)) { + avs2_print(dec, 0, "err: fixed mv buf out of size, 0x0%x\n", + pic->mpred_mv_wr_start_addr); + pic->mpred_mv_wr_start_addr = + dec->work_space_buf->mpred_mv.buf_start; + } +#endif + if (debug) { + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s index %d BUF_index %d mc_y_adr %x ", + __func__, pic->index, + pic->BUF_index, + pic->mc_y_adr); + avs2_print_cont(dec, AVS2_DBG_BUFMGR, + "comp_body_size %x comp_buf_size %x ", + pic->comp_body_size, + pic->buf_size); + avs2_print_cont(dec, AVS2_DBG_BUFMGR, + "mpred_mv_wr_start_adr %d\n", + pic->mpred_mv_wr_start_addr); + avs2_print_cont(dec, AVS2_DBG_BUFMGR, + "dw_y_adr %d, pic->dw_u_v_adr =%d\n", + pic->dw_y_adr, + pic->dw_u_v_adr); + } + ret = 0; + } + + return ret; +} + +static void init_pic_list(struct AVS2Decoder_s *dec, + int32_t lcu_size_log2) +{ + int i; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *pic; +#ifdef AVS2_10B_MMU + if (dec->mmu_enable) { + for (i = 0; i < dec->used_buf_num; i++) { + unsigned long buf_addr; + u32 header_size = get_compress_header_size(dec); +#ifdef AVS2_10B_MMU_DW + if (dec->dw_mmu_enable) + header_size <<= 1; +#endif + if (decoder_bmmu_box_alloc_buf_phy + (dec->bmmu_box, + HEADER_BUFFER_IDX(i), header_size, + DRIVER_HEADER_NAME, + &buf_addr) < 0){ + avs2_print(dec, 0, + "%s malloc compress header failed %d\n", + DRIVER_HEADER_NAME, i); + dec->fatal_error |= DECODER_FATAL_ERROR_NO_MEM; + return; + } + } + } +#endif + dec->frame_height = avs2_dec->img.height; + dec->frame_width = avs2_dec->img.width; + + for (i = 0; i < dec->used_buf_num; i++) { + if (i == (dec->used_buf_num - 1)) + pic = avs2_dec->m_bg; + else + pic = avs2_dec->fref[i]; + pic->index = i; + pic->BUF_index = -1; + pic->mv_buf_index = -1; + if (config_pic(dec, pic, lcu_size_log2) < 0) { + if (debug) + avs2_print(dec, 0, + "Config_pic %d fail\n", + pic->index); + pic->index = -1; + break; + } + pic->pic_w = avs2_dec->img.width; + pic->pic_h = avs2_dec->img.height; + } + for (; i < dec->used_buf_num; i++) { + if (i == (dec->used_buf_num - 1)) + pic = avs2_dec->m_bg; + else + pic = avs2_dec->fref[i]; + pic->index = -1; + pic->BUF_index = -1; + pic->mv_buf_index = -1; + } + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s ok, used_buf_num = %d\n", + __func__, dec->used_buf_num); + dec->pic_list_init_flag = 1; +} + + +static void init_pic_list_hw(struct AVS2Decoder_s *dec) +{ + int i; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *pic; + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x0);*/ +#if 0 + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (0x1 << 2)); + +#ifdef DUAL_CORE_64 + WRITE_VREG(HEVC2_HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (0x1 << 2)); +#endif +#endif + for (i = 0; i < dec->used_buf_num; i++) { + if (i == (dec->used_buf_num - 1)) + pic = avs2_dec->m_bg; + else + pic = avs2_dec->fref[i]; + if (pic->index < 0) + break; +#ifdef AVS2_10B_MMU + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + pic->header_adr + | (pic->mc_canvas_y << 8)|0x1);*/ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (pic->index << 8)); + +#ifdef DUAL_CORE_64 + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXLX2) + WRITE_VREG(HEVC2_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (pic->index << 8)); + else + WRITE_VREG(HEVC2_HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (pic->index << 8)); +#endif + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, pic->header_adr >> 5); +#else + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + pic->mc_y_adr + | (pic->mc_canvas_y << 8) | 0x1);*/ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, pic->mc_y_adr >> 5); +#endif +#ifndef LOSLESS_COMPRESS_MODE + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + pic->mc_u_v_adr + | (pic->mc_canvas_u_v << 8)| 0x1);*/ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, pic->mc_u_v_adr >> 5); +#endif +#ifdef DUAL_CORE_64 +#ifdef AVS2_10B_MMU + WRITE_VREG(HEVC2_HEVCD_MPP_ANC2AXI_TBL_DATA, + pic->header_adr >> 5); +#else + WRITE_VREG(HEVC2_HEVCD_MPP_ANC2AXI_TBL_DATA, + pic->mc_y_adr >> 5); +#endif +#ifndef LOSLESS_COMPRESS_MODE + WRITE_VREG(HEVC2_HEVCD_MPP_ANC2AXI_TBL_DATA, + pic->mc_u_v_adr >> 5); +#endif +/*DUAL_CORE_64*/ +#endif + } + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1); +#ifdef DUAL_CORE_64 + WRITE_VREG(HEVC2_HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + 0x1); +#endif + /*Zero out canvas registers in IPP -- avoid simulation X*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 1); + for (i = 0; i < 32; i++) { + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); +#ifdef DUAL_CORE_64 + WRITE_VREG(HEVC2_HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); +#endif + } +} + + +static void dump_pic_list(struct AVS2Decoder_s *dec) +{ + int ii; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + for (ii = 0; ii < avs2_dec->ref_maxbuffer; ii++) { + avs2_print(dec, 0, + "fref[%d]: index %d decode_id %d mvbuf %d imgcoi_ref %d imgtr_fwRefDistance %d refered %d, pre %d is_out %d, bg %d, vf_ref %d error %d lcu %d ref_pos(%d,%d,%d,%d,%d,%d,%d)\n", + ii, avs2_dec->fref[ii]->index, + avs2_dec->fref[ii]->decode_idx, + avs2_dec->fref[ii]->mv_buf_index, + avs2_dec->fref[ii]->imgcoi_ref, + avs2_dec->fref[ii]->imgtr_fwRefDistance, + avs2_dec->fref[ii]->refered_by_others, + avs2_dec->fref[ii]->to_prepare_disp, + avs2_dec->fref[ii]->is_output, + avs2_dec->fref[ii]->bg_flag, + avs2_dec->fref[ii]->vf_ref, + avs2_dec->fref[ii]->error_mark, + avs2_dec->fref[ii]->decoded_lcu, + avs2_dec->fref[ii]->ref_poc[0], + avs2_dec->fref[ii]->ref_poc[1], + avs2_dec->fref[ii]->ref_poc[2], + avs2_dec->fref[ii]->ref_poc[3], + avs2_dec->fref[ii]->ref_poc[4], + avs2_dec->fref[ii]->ref_poc[5], + avs2_dec->fref[ii]->ref_poc[6] + ); + } + return; +} + +static int config_mc_buffer(struct AVS2Decoder_s *dec) +{ + int32_t i; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *pic; + struct avs2_frame_s *cur_pic = avs2_dec->hc.cur_pic; + + /*if (avs2_dec->img.type == I_IMG) + return 0; + */ + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "Entered config_mc_buffer....\n"); + if (avs2_dec->f_bg != NULL) { + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "config_mc_buffer for background (canvas_y %d, canvas_u_v %d)\n", + avs2_dec->f_bg->mc_canvas_y, avs2_dec->f_bg->mc_canvas_u_v); + /*WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (7 << 8) | (0<<1) | 1); L0:BG */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (15 << 8) | (0<<1) | 1); /* L0:BG*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (avs2_dec->f_bg->mc_canvas_u_v << 16) | + (avs2_dec->f_bg->mc_canvas_u_v << 8) | + avs2_dec->f_bg->mc_canvas_y); + /*WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (23 << 8) | (0<<1) | 1); L1:BG*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (31 << 8) | (0<<1) | 1); /* L1:BG*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (avs2_dec->f_bg->mc_canvas_u_v << 16) | + (avs2_dec->f_bg->mc_canvas_u_v << 8) | + avs2_dec->f_bg->mc_canvas_y); + } + + if (avs2_dec->img.type == I_IMG) + return 0; + + if (avs2_dec->img.type == P_IMG) { + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "config_mc_buffer for P_IMG, img type %d\n", + avs2_dec->img.type); + /*refer to prepare_RefInfo()*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0<<1) | 1); + for (i = 0; i < avs2_dec->img.num_of_references; i++) { + pic = avs2_dec->fref[i]; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v << 16) | + (pic->mc_canvas_u_v << 8) | + pic->mc_canvas_y); + + if (pic->error_mark) + cur_pic->error_mark = 1; + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "refid %x mc_canvas_u_v %x mc_canvas_y %x error_mark %x\n", + i, pic->mc_canvas_u_v, pic->mc_canvas_y, + pic->error_mark); + } + } else if (avs2_dec->img.type == F_IMG) { + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "config_mc_buffer for F_IMG, img type %d\n", + avs2_dec->img.type); + /*refer to prepare_RefInfo()*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0<<1) | 1); + for (i = 0; i < avs2_dec->img.num_of_references; i++) { + pic = avs2_dec->fref[i]; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v << 16) | + (pic->mc_canvas_u_v << 8) | + pic->mc_canvas_y); + + if (pic->error_mark) + cur_pic->error_mark = 1; + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "refid %x mc_canvas_u_v %x mc_canvas_y %x error_mark %x\n", + i, pic->mc_canvas_u_v, pic->mc_canvas_y, + pic->error_mark); + } + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (0<<1) | 1); + for (i = 0; i < avs2_dec->img.num_of_references; i++) { + pic = avs2_dec->fref[i]; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v << 16) | + (pic->mc_canvas_u_v << 8) | + pic->mc_canvas_y); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "refid %x mc_canvas_u_v %x mc_canvas_y %x\n", + i, pic->mc_canvas_u_v, pic->mc_canvas_y); + } + } else { + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "config_mc_buffer for B_IMG\n"); + /*refer to prepare_RefInfo()*/ + pic = avs2_dec->fref[1]; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0<<1) | 1); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v << 16) | + (pic->mc_canvas_u_v << 8) | + pic->mc_canvas_y); + + if (pic->error_mark) + cur_pic->error_mark = 1; + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "refid %x mc_canvas_u_v %x mc_canvas_y %x error_mark %x\n", + 1, pic->mc_canvas_u_v, pic->mc_canvas_y, + pic->error_mark); + + pic = avs2_dec->fref[0]; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (0<<1) | 1); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v<<16) | + (pic->mc_canvas_u_v<<8) | + pic->mc_canvas_y); + + if (pic->error_mark) + cur_pic->error_mark = 1; + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "refid %x mc_canvas_u_v %x mc_canvas_y %x error_mark %x\n", + 0, pic->mc_canvas_u_v, pic->mc_canvas_y, + pic->error_mark); + } + return 0; +} +#if 0 +static void mcrcc_get_hitrate(void) +{ + u32 tmp; + u32 raw_mcr_cnt; + u32 hit_mcr_cnt; + u32 byp_mcr_cnt_nchoutwin; + u32 byp_mcr_cnt_nchcanv; + int hitrate; + + if (debug & AVS2_DBG_CACHE) + pr_info("[cache_util.c] Entered mcrcc_get_hitrate...\n"); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x0<<1)); + raw_mcr_cnt = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x1<<1)); + hit_mcr_cnt = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x2<<1)); + byp_mcr_cnt_nchoutwin = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x3<<1)); + byp_mcr_cnt_nchcanv = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + + if (debug & AVS2_DBG_CACHE) { + pr_info("raw_mcr_cnt_total: %d\n",raw_mcr_cnt); + pr_info("hit_mcr_cnt_total: %d\n",hit_mcr_cnt); + pr_info("byp_mcr_cnt_nchoutwin_total: %d\n",byp_mcr_cnt_nchoutwin); + pr_info("byp_mcr_cnt_nchcanv_total: %d\n",byp_mcr_cnt_nchcanv); + } + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x4<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & AVS2_DBG_CACHE) + pr_info("miss_mcr_0_cnt_total: %d\n", tmp); + + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x5<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & AVS2_DBG_CACHE) + pr_info("miss_mcr_1_cnt_total: %d\n", tmp); + + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x6<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & AVS2_DBG_CACHE) + pr_info("hit_mcr_0_cnt_total: %d\n",tmp); + + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x7<<1)); + tmp= READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & AVS2_DBG_CACHE) + pr_info("hit_mcr_1_cnt_total: %d\n",tmp); + + if (raw_mcr_cnt != 0) { + hitrate = (hit_mcr_cnt / raw_mcr_cnt) * 100; + if (debug & AVS2_DBG_CACHE) + pr_info("MCRCC_HIT_RATE : %d\n", hitrate); + hitrate = ((byp_mcr_cnt_nchoutwin + byp_mcr_cnt_nchcanv) + /raw_mcr_cnt) * 100; + if (debug & AVS2_DBG_CACHE) + pr_info("MCRCC_BYP_RATE : %d\n", hitrate); + } else if (debug & AVS2_DBG_CACHE) { + pr_info("MCRCC_HIT_RATE : na\n"); + pr_info("MCRCC_BYP_RATE : na\n"); + } + return; +} + + +static void decomp_get_hitrate(void) +{ + u32 raw_mcr_cnt; + u32 hit_mcr_cnt; + int hitrate; + + if (debug & AVS2_DBG_CACHE) + pr_info("[cache_util.c] Entered decomp_get_hitrate...\n"); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x0<<1)); + raw_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x1<<1)); + hit_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + + if (debug & AVS2_DBG_CACHE) { + pr_info("hcache_raw_cnt_total: %d\n",raw_mcr_cnt); + pr_info("hcache_hit_cnt_total: %d\n",hit_mcr_cnt); + } + if (raw_mcr_cnt != 0) { + hitrate = (hit_mcr_cnt / raw_mcr_cnt) * 100; + if (debug & AVS2_DBG_CACHE) + pr_info("DECOMP_HCACHE_HIT_RATE : %d\n", hitrate); + } else { + if (debug & AVS2_DBG_CACHE) + pr_info("DECOMP_HCACHE_HIT_RATE : na\n"); + } + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x2<<1)); + raw_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x3<<1)); + hit_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + + if (debug & AVS2_DBG_CACHE) { + pr_info("dcache_raw_cnt_total: %d\n", raw_mcr_cnt); + pr_info("dcache_hit_cnt_total: %d\n", hit_mcr_cnt); + } + if (raw_mcr_cnt != 0) { + hitrate = (hit_mcr_cnt / raw_mcr_cnt) * 100; + if (debug & AVS2_DBG_CACHE) + pr_info("DECOMP_DCACHE_HIT_RATE : %d\n", hitrate); + } else if (debug & AVS2_DBG_CACHE) { + pr_info("DECOMP_DCACHE_HIT_RATE : na\n"); + } +return; +} + +static void decomp_get_comprate(void) +{ + u32 raw_ucomp_cnt; + u32 fast_comp_cnt; + u32 slow_comp_cnt; + int comprate; + + if (debug & AVS2_DBG_CACHE) + pr_info("[cache_util.c] Entered decomp_get_comprate...\n"); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x4<<1)); + fast_comp_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x5<<1)); + slow_comp_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x6<<1)); + raw_ucomp_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + if (debug & AVS2_DBG_CACHE) { + pr_info("decomp_fast_comp_total: %d\n", fast_comp_cnt); + pr_info("decomp_slow_comp_total: %d\n", slow_comp_cnt); + pr_info("decomp_raw_uncomp_total: %d\n", raw_ucomp_cnt); + } + + if (raw_ucomp_cnt != 0) { + comprate = ((fast_comp_cnt + slow_comp_cnt) + / raw_ucomp_cnt) * 100; + if (debug & AVS2_DBG_CACHE) + pr_info("DECOMP_COMP_RATIO : %d\n", comprate); + } else if (debug & AVS2_DBG_CACHE) { + pr_info("DECOMP_COMP_RATIO : na\n"); + } + return; +} +#endif + +static void config_mcrcc_axi_hw(struct AVS2Decoder_s *dec) +{ + uint32_t rdata32; + uint32_t rdata32_2; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2); /* reset mcrcc*/ + + if (avs2_dec->img.type == I_IMG) { /* I-PIC*/ + /* remove reset -- disables clock */ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x0); + return; + } +/* + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + mcrcc_get_hitrate(); + decomp_get_hitrate(); + decomp_get_comprate(); + } +*/ + if ((avs2_dec->img.type == B_IMG) || + (avs2_dec->img.type == F_IMG)) { /*B-PIC or F_PIC*/ + /*Programme canvas0 */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 0); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + /*Programme canvas1 */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (1 << 1) | 0); + rdata32_2 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32_2 = rdata32_2 & 0xffff; + rdata32_2 = rdata32_2 | (rdata32_2 << 16); + if (rdata32 == rdata32_2) { + rdata32_2 = + READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32_2 = rdata32_2 & 0xffff; + rdata32_2 = rdata32_2 | (rdata32_2 << 16); + } + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32_2); + } else { /* P-PIC */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (1 << 1) | 0); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + /*Programme canvas1*/ + rdata32 = + READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + } + /*enable mcrcc progressive-mode */ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); + return; +} + +static void config_mpred_hw(struct AVS2Decoder_s *dec) +{ + uint32_t data32; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *cur_pic = avs2_dec->hc.cur_pic; + struct avs2_frame_s *col_pic = avs2_dec->fref[0]; + int32_t mpred_mv_rd_start_addr; + int32_t mpred_curr_lcu_x; + int32_t mpred_curr_lcu_y; + int32_t mpred_mv_rd_end_addr; + int32_t above_en; + int32_t mv_wr_en; + int32_t mv_rd_en; + int32_t col_isIntra; + int mv_mem_unit; + if (avs2_dec->img.type != I_IMG) { + above_en = 1; + mv_wr_en = 1; + mv_rd_en = 1; + col_isIntra = 0; + } else { + above_en = 1; + mv_wr_en = 1; + mv_rd_en = 0; + col_isIntra = 0; + } + + mpred_mv_rd_start_addr = + col_pic->mpred_mv_wr_start_addr; + data32 = READ_VREG(HEVC_MPRED_CURR_LCU); + mpred_curr_lcu_x = data32 & 0xffff; + mpred_curr_lcu_y = (data32 >> 16) & 0xffff; + + mv_mem_unit = avs2_dec->lcu_size_log2 == 6 ? + 0x200 : (avs2_dec->lcu_size_log2 == 5 ? + 0x80 : 0x20); + + mpred_mv_rd_end_addr = + mpred_mv_rd_start_addr + + ((avs2_dec->lcu_x_num * + avs2_dec->lcu_y_num) * mv_mem_unit); + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "cur pic index %d col pic index %d\n", + cur_pic->index, col_pic->index); + + + WRITE_VREG(HEVC_MPRED_MV_WR_START_ADDR, + cur_pic->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_RD_START_ADDR, + col_pic->mpred_mv_wr_start_addr); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[MPRED CO_MV] write 0x%x read 0x%x\n", + cur_pic->mpred_mv_wr_start_addr, + col_pic->mpred_mv_wr_start_addr); + + data32 = + ((avs2_dec->bk_img_is_top_field) << 13) | + ((avs2_dec->hd.background_picture_enable & 1) << 12) | + ((avs2_dec->hd.curr_RPS.num_of_ref & 7) << 8) | + ((avs2_dec->hd.b_pmvr_enabled & 1) << 6) | + ((avs2_dec->img.is_top_field & 1) << 5) | + ((avs2_dec->img.is_field_sequence & 1) << 4) | + ((avs2_dec->img.typeb & 7) << 1) | + (avs2_dec->hd.background_reference_enable & 0x1); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "HEVC_MPRED_CTRL9 <= 0x%x(num of ref %d)\n", + data32, avs2_dec->hd.curr_RPS.num_of_ref); + WRITE_VREG(HEVC_MPRED_CTRL9, data32); + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "%s: dis %d %d %d %d %d %d %d fref0_ref_poc %d %d %d %d %d %d %d\n", + __func__, + avs2_dec->fref[0]->imgtr_fwRefDistance, + avs2_dec->fref[1]->imgtr_fwRefDistance, + avs2_dec->fref[2]->imgtr_fwRefDistance, + avs2_dec->fref[3]->imgtr_fwRefDistance, + avs2_dec->fref[4]->imgtr_fwRefDistance, + avs2_dec->fref[5]->imgtr_fwRefDistance, + avs2_dec->fref[6]->imgtr_fwRefDistance, + avs2_dec->fref[0]->ref_poc[0], + avs2_dec->fref[0]->ref_poc[1], + avs2_dec->fref[0]->ref_poc[2], + avs2_dec->fref[0]->ref_poc[3], + avs2_dec->fref[0]->ref_poc[4], + avs2_dec->fref[0]->ref_poc[5], + avs2_dec->fref[0]->ref_poc[6] + ); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "pic_distance %d, imgtr_next_P %d\n", + avs2_dec->img.pic_distance, avs2_dec->img.imgtr_next_P); + + + WRITE_VREG(HEVC_MPRED_CUR_POC, avs2_dec->img.pic_distance); + WRITE_VREG(HEVC_MPRED_COL_POC, avs2_dec->img.imgtr_next_P); + + /*below MPRED Ref_POC_xx_Lx registers + must follow Ref_POC_xx_L0 -> + Ref_POC_xx_L1 in pair write order!!!*/ + WRITE_VREG(HEVC_MPRED_L0_REF00_POC, + avs2_dec->fref[0]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF00_POC, + avs2_dec->fref[0]->ref_poc[0]); + + WRITE_VREG(HEVC_MPRED_L0_REF01_POC, + avs2_dec->fref[1]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF01_POC, + avs2_dec->fref[0]->ref_poc[1]); + + WRITE_VREG(HEVC_MPRED_L0_REF02_POC, + avs2_dec->fref[2]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF02_POC, + avs2_dec->fref[0]->ref_poc[2]); + + WRITE_VREG(HEVC_MPRED_L0_REF03_POC, + avs2_dec->fref[3]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF03_POC, + avs2_dec->fref[0]->ref_poc[3]); + + WRITE_VREG(HEVC_MPRED_L0_REF04_POC, + avs2_dec->fref[4]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF04_POC, + avs2_dec->fref[0]->ref_poc[4]); + + WRITE_VREG(HEVC_MPRED_L0_REF05_POC, + avs2_dec->fref[5]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF05_POC, + avs2_dec->fref[0]->ref_poc[5]); + + WRITE_VREG(HEVC_MPRED_L0_REF06_POC, + avs2_dec->fref[6]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF06_POC, + avs2_dec->fref[0]->ref_poc[6]); + + + WRITE_VREG(HEVC_MPRED_MV_RD_END_ADDR, + mpred_mv_rd_end_addr); +} + +static void config_dblk_hw(struct AVS2Decoder_s *dec) +{ + /* + * Picture level de-block parameter configuration here + */ + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + union param_u *rpm_param = &avs2_dec->param; + uint32_t data32; + + data32 = READ_VREG(HEVC_DBLK_CFG1); + data32 = (((data32 >> 20) & 0xfff) << 20) | + (((avs2_dec->input.sample_bit_depth == 10) + ? 0xa : 0x0) << 16) | /*[16 +: 4]: {luma_bd[1:0], + chroma_bd[1:0]}*/ + (((data32 >> 2) & 0x3fff) << 2) | + (((rpm_param->p.lcu_size == 6) + ? 0 : (rpm_param->p.lcu_size == 5) + ? 1 : 2) << 0);/*[ 0 +: 2]: lcu_size*/ + WRITE_VREG(HEVC_DBLK_CFG1, data32); + + data32 = (avs2_dec->img.height << 16) | + avs2_dec->img.width; + WRITE_VREG(HEVC_DBLK_CFG2, data32); + /* + [27 +: 1]: cross_slice_loopfilter_enable_flag + [26 +: 1]: loop_filter_disable + [25 +: 1]: useNSQT + [22 +: 3]: imgtype + [17 +: 5]: alpha_c_offset (-8~8) + [12 +: 5]: beta_offset (-8~8) + [ 6 +: 6]: chroma_quant_param_delta_u (-16~16) + [ 0 +: 6]: chroma_quant_param_delta_v (-16~16) + */ + data32 = ((avs2_dec->input.crossSliceLoopFilter + & 0x1) << 27) | + ((rpm_param->p.loop_filter_disable & 0x1) << 26) | + ((avs2_dec->input.useNSQT & 0x1) << 25) | + ((avs2_dec->img.type & 0x7) << 22) | + ((rpm_param->p.alpha_c_offset & 0x1f) << 17) | + ((rpm_param->p.beta_offset & 0x1f) << 12) | + ((rpm_param->p.chroma_quant_param_delta_cb & 0x3f) << 6) | + ((rpm_param->p.chroma_quant_param_delta_cr & 0x3f) << 0); + + WRITE_VREG(HEVC_DBLK_CFG9, data32); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] cfgDBLK: crossslice(%d),lfdisable(%d),bitDepth(%d),lcuSize(%d),NSQT(%d)\n", + avs2_dec->input.crossSliceLoopFilter, + rpm_param->p.loop_filter_disable, + avs2_dec->input.sample_bit_depth, + avs2_dec->lcu_size, + avs2_dec->input.useNSQT); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] cfgDBLK: alphaCOffset(%d),betaOffset(%d),quantDeltaCb(%d),quantDeltaCr(%d)\n", + rpm_param->p.alpha_c_offset, + rpm_param->p.beta_offset, + rpm_param->p.chroma_quant_param_delta_cb, + rpm_param->p.chroma_quant_param_delta_cr); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] cfgDBLK: .done.\n"); +} + +static void config_sao_hw(struct AVS2Decoder_s *dec) +{ + uint32_t data32; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *cur_pic = avs2_dec->hc.cur_pic; + + int lcu_size = 64; + int mc_buffer_size_u_v = + cur_pic->lcu_total * lcu_size*lcu_size/2; + int mc_buffer_size_u_v_h = + (mc_buffer_size_u_v + 0xffff) >> 16;/*64k alignment*/ + + data32 = READ_VREG(HEVC_SAO_CTRL0); + data32 &= (~0xf); + data32 |= avs2_dec->lcu_size_log2; + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "%s, lcu_size_log2 = %d, config HEVC_SAO_CTRL0 0x%x\n", + __func__, + avs2_dec->lcu_size_log2, + data32); + + WRITE_VREG(HEVC_SAO_CTRL0, data32); + +#ifndef AVS2_10B_MMU + if ((get_double_write_mode(dec) & 0x10) == 0) + WRITE_VREG(HEVC_CM_BODY_START_ADDR, cur_pic->mc_y_adr); +#endif + if ((get_double_write_mode(dec) & 0x20) == 0) { + WRITE_VREG(HEVC_SAO_Y_START_ADDR, cur_pic->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_START_ADDR, cur_pic->dw_u_v_adr); + WRITE_VREG(HEVC_SAO_Y_WPTR, cur_pic->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_WPTR, cur_pic->dw_u_v_adr); + } else { + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0xffffffff); + WRITE_VREG(HEVC_SAO_C_START_ADDR, 0xffffffff); + } +#ifdef AVS2_10B_MMU + WRITE_VREG(HEVC_CM_HEADER_START_ADDR, cur_pic->header_adr); +#endif +#ifdef AVS2_10B_MMU_DW + if (dec->dw_mmu_enable) { + WRITE_VREG(HEVC_CM_HEADER_START_ADDR2, cur_pic->dw_header_adr); + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0); + WRITE_VREG(HEVC_SAO_C_START_ADDR, 0); + } +#endif + + data32 = (mc_buffer_size_u_v_h << 16) << 1; + /*pr_info("data32=%x,mc_buffer_size_u_v_h=%x,lcu_total=%x\n", + data32, mc_buffer_size_u_v_h, cur_pic->lcu_total);*/ + WRITE_VREG(HEVC_SAO_Y_LENGTH, data32); + + data32 = (mc_buffer_size_u_v_h << 16); + WRITE_VREG(HEVC_SAO_C_LENGTH, data32); + +#ifdef AVS2_10B_NV21 +#ifdef DOS_PROJECT + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + /*[13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32*/ + data32 |= (MEM_MAP_MODE << 12); + data32 &= (~0x3); + data32 |= 0x1; /* [1]:dw_disable [0]:cm_disable*/ + + /* + * [31:24] ar_fifo1_axi_thred + * [23:16] ar_fifo0_axi_thred + * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes + * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + * [11:08] axi_lendian_C + * [07:04] axi_lendian_Y + * [3] reserved + * [2] clk_forceon + * [1] dw_disable:disable double write output + * [0] cm_disable:disable compress output + */ + data32 &= (~(3 << 14)); + data32 |= (2 << 14); + + WRITE_VREG(HEVC_SAO_CTRL1, data32); + /*[23:22] dw_v1_ctrl [21:20] dw_v0_ctrl [19:18] dw_h1_ctrl + [17:16] dw_h0_ctrl*/ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /*set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + ata32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /*[5:4] address_format 00:linear 01:32x32 10:64x32*/ + data32 |= (MEM_MAP_MODE << 4); + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#else + /*m8baby test1902*/ + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + /*[13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32*/ + data32 |= (MEM_MAP_MODE << 12); + data32 &= (~0xff0); + /*data32 |= 0x670;*/ /*Big-Endian per 64-bit*/ + data32 |= 0x880; /*.Big-Endian per 64-bit */ + data32 &= (~0x3); + data32 |= 0x1; /*[1]:dw_disable [0]:cm_disable*/ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + /* [23:22] dw_v1_ctrl [21:20] dw_v0_ctrl + [19:18] dw_h1_ctrl [17:16] dw_h0_ctrl*/ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /* set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + /* + * [3:0] little_endian + * [5:4] address_format 00:linear 01:32x32 10:64x32 + * [7:6] reserved + * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte + * [11:10] reserved + * [12] CbCr_byte_swap + * [31:13] reserved + */ + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /*[5:4] address_format 00:linear 01:32x32 10:64x32*/ + data32 |= (MEM_MAP_MODE << 4); + data32 &= (~0xF); + data32 |= 0x8; /*Big-Endian per 64-bit*/ + + data32 &= (~(3 << 8)); + data32 |= (2 << 8); + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#endif +#else + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~(3 << 14)); + data32 |= (2 << 14); /* line align with 64*/ + data32 &= (~0x3000); + data32 |= (MEM_MAP_MODE << 12); /* [13:12] axi_aformat, 0-Linear, + 1-32x32, 2-64x32 */ + data32 &= (~0xff0); +#ifdef AVS2_10B_MMU_DW + if (dec->dw_mmu_enable == 0) + data32 |= ((dec->endian >> 8) & 0xfff); +#else + data32 |= ((dec->endian >> 8) & 0xfff); /* data32 |= 0x670; Big-Endian per 64-bit */ +#endif + data32 &= (~0x3); /*[1]:dw_disable [0]:cm_disable*/ +#if 0 + if (get_cpu_major_id() < MESON_CPU_MAJOR_ID_G12A) { + if (get_double_write_mode(dec) == 0) + data32 |= 0x2; /*disable double write*/ +#ifndef AVS2_10B_MMU + else + if (get_double_write_mode(dec) & 0x10) + data32 |= 0x1; /*disable cm*/ +#endif + } +#endif + if (get_double_write_mode(dec) == 0) + data32 |= 0x2; /*disable double write*/ + else if (get_double_write_mode(dec) & 0x10) + data32 |= 0x1; /*disable cm*/ + + /* + * [31:24] ar_fifo1_axi_thred + * [23:16] ar_fifo0_axi_thred + * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes + * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + * [11:08] axi_lendian_C + * [07:04] axi_lendian_Y + * [3] reserved + * [2] clk_forceon + * [1] dw_disable:disable double write output + * [0] cm_disable:disable compress output + */ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + + if (get_double_write_mode(dec) & 0x10) { + /* [23:22] dw_v1_ctrl + [21:20] dw_v0_ctrl + [19:18] dw_h1_ctrl + [17:16] dw_h0_ctrl + */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /*set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } else { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) + WRITE_VREG(HEVC_SAO_CTRL26, 0); + + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 &= (~(0xff << 16)); + if ((get_double_write_mode(dec) & 0xf) == 8 || + (get_double_write_mode(dec) & 0xf) == 9) { + data32 |= (0xff<<16); + WRITE_VREG(HEVC_SAO_CTRL26, 0xf); + } else if ((get_double_write_mode(dec) & 0xf) == 2 || + (get_double_write_mode(dec) & 0xf) == 3) + data32 |= (0xff<<16); + else if ((get_double_write_mode(dec) & 0xf) == 4) + data32 |= (0x33<<16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + + /* + * [3:0] little_endian + * [5:4] address_format 00:linear 01:32x32 10:64x32 + * [7:6] reserved + * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte + * [11:10] reserved + * [12] CbCr_byte_swap + * [31:13] reserved + */ + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /* [5:4] -- address_format 00:linear 01:32x32 10:64x32 */ + data32 |= (mem_map_mode << 4); + data32 &= (~0xF); + data32 |= (dec->endian & 0xf); /* valid only when double write only */ + /*data32 |= 0x8;*/ /* Big-Endian per 64-bit */ + data32 &= (~(3 << 8)); + data32 |= (2 << 8); /* line align with 64 for dw only */ + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#endif +#ifndef AVS2_10B_NV21 +#ifdef AVS2_10B_MMU_DW + if (dec->dw_mmu_enable) { + struct BuffInfo_s *buf_spec = NULL; + buf_spec = &dec->work_space_buf_store; + WRITE_VREG(HEVC_DW_VH0_ADDDR, buf_spec->mmu_vbh_dw.buf_start + (2 * DW_VBH_BUF_SIZE(buf_spec))); + WRITE_VREG(HEVC_DW_VH1_ADDDR, buf_spec->mmu_vbh_dw.buf_start + (3 * DW_VBH_BUF_SIZE(buf_spec))); + } +#endif +#endif + +} + +static void reconstructCoefficients(struct AVS2Decoder_s *dec, + struct ALFParam_s *alfParam) +{ + int32_t g, sum, i, coeffPred; + for (g = 0; g < alfParam->filters_per_group; g++) { + sum = 0; + for (i = 0; i < alfParam->num_coeff - 1; i++) { + sum += (2 * alfParam->coeffmulti[g][i]); + dec->m_filterCoeffSym[g][i] = + alfParam->coeffmulti[g][i]; + /*pr_info("[t] dec->m_filterCoeffSym[%d][%d]=0x%x\n", + g, i, dec->m_filterCoeffSym[g][i]);*/ + } + coeffPred = (1 << ALF_NUM_BIT_SHIFT) - sum; + dec->m_filterCoeffSym[g][alfParam->num_coeff - 1] + = coeffPred + + alfParam->coeffmulti[g][alfParam->num_coeff - 1]; + /*pr_info("[t] dec->m_filterCoeffSym[%d][%d]=0x%x\n", + g, (alfParam->num_coeff - 1), + dec->m_filterCoeffSym[g][alfParam->num_coeff - 1]);*/ + } +} + +static void reconstructCoefInfo(struct AVS2Decoder_s *dec, + int32_t compIdx, struct ALFParam_s *alfParam) +{ + int32_t i; + if (compIdx == ALF_Y) { + if (alfParam->filters_per_group > 1) { + for (i = 1; i < NO_VAR_BINS; ++i) { + if (alfParam->filterPattern[i]) + dec->m_varIndTab[i] = + dec->m_varIndTab[i - 1] + 1; + else + dec->m_varIndTab[i] = + dec->m_varIndTab[i - 1]; + } + } + } + reconstructCoefficients(dec, alfParam); +} + +static void config_alf_hw(struct AVS2Decoder_s *dec) +{ + /* + * Picture level ALF parameter configuration here + */ + uint32_t data32; + int32_t i, j; + int32_t m_filters_per_group; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct ALFParam_s *m_alfPictureParam_y = + &avs2_dec->m_alfPictureParam[0]; + struct ALFParam_s *m_alfPictureParam_cb = + &avs2_dec->m_alfPictureParam[1]; + struct ALFParam_s *m_alfPictureParam_cr = + &avs2_dec->m_alfPictureParam[2]; + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[t]alfy,cidx(%d),flag(%d),filters_per_group(%d),filterPattern[0]=0x%x,[15]=0x%x\n", + m_alfPictureParam_y->componentID, + m_alfPictureParam_y->alf_flag, + m_alfPictureParam_y->filters_per_group, + m_alfPictureParam_y->filterPattern[0], + m_alfPictureParam_y->filterPattern[15]); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[t]alfy,num_coeff(%d),coeffmulti[0][0]=0x%x,[0][1]=0x%x,[1][0]=0x%x,[1][1]=0x%x\n", + m_alfPictureParam_y->num_coeff, + m_alfPictureParam_y->coeffmulti[0][0], + m_alfPictureParam_y->coeffmulti[0][1], + m_alfPictureParam_y->coeffmulti[1][0], + m_alfPictureParam_y->coeffmulti[1][1]); + + /*Cr*/ + for (i = 0; i < 16; i++) + dec->m_varIndTab[i] = 0; + for (j = 0; j < 16; j++) + for (i = 0; i < 9; i++) + dec->m_filterCoeffSym[j][i] = 0; + reconstructCoefInfo(dec, 2, m_alfPictureParam_cr); + data32 = + ((dec->m_filterCoeffSym[0][4] & 0xf) << 28) | + ((dec->m_filterCoeffSym[0][3] & 0x7f) << 21) | + ((dec->m_filterCoeffSym[0][2] & 0x7f) << 14) | + ((dec->m_filterCoeffSym[0][1] & 0x7f) << 7) | + ((dec->m_filterCoeffSym[0][0] & 0x7f) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + data32 = + ((dec->m_filterCoeffSym[0][8] & 0x7f) << 24) | + ((dec->m_filterCoeffSym[0][7] & 0x7f) << 17) | + ((dec->m_filterCoeffSym[0][6] & 0x7f) << 10) | + ((dec->m_filterCoeffSym[0][5] & 0x7f) << 3) | + (((dec->m_filterCoeffSym[0][4] >> 4) & 0x7) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] pic_alf_on_cr(%d), alf_cr_coef(%d %d %d %d %d %d %d %d %d)\n", + m_alfPictureParam_cr->alf_flag, + dec->m_filterCoeffSym[0][0], + dec->m_filterCoeffSym[0][1], + dec->m_filterCoeffSym[0][2], + dec->m_filterCoeffSym[0][3], + dec->m_filterCoeffSym[0][4], + dec->m_filterCoeffSym[0][5], + dec->m_filterCoeffSym[0][6], + dec->m_filterCoeffSym[0][7], + dec->m_filterCoeffSym[0][8]); + + /* Cb*/ + for (j = 0; j < 16; j++) + for (i = 0; i < 9; i++) + dec->m_filterCoeffSym[j][i] = 0; + reconstructCoefInfo(dec, 1, m_alfPictureParam_cb); + data32 = + ((dec->m_filterCoeffSym[0][4] & 0xf) << 28) | + ((dec->m_filterCoeffSym[0][3] & 0x7f) << 21) | + ((dec->m_filterCoeffSym[0][2] & 0x7f) << 14) | + ((dec->m_filterCoeffSym[0][1] & 0x7f) << 7) | + ((dec->m_filterCoeffSym[0][0] & 0x7f) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + data32 = + ((dec->m_filterCoeffSym[0][8] & 0x7f) << 24) | + ((dec->m_filterCoeffSym[0][7] & 0x7f) << 17) | + ((dec->m_filterCoeffSym[0][6] & 0x7f) << 10) | + ((dec->m_filterCoeffSym[0][5] & 0x7f) << 3) | + (((dec->m_filterCoeffSym[0][4] >> 4) & 0x7) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] pic_alf_on_cb(%d), alf_cb_coef(%d %d %d %d %d %d %d %d %d)\n", + m_alfPictureParam_cb->alf_flag, + dec->m_filterCoeffSym[0][0], + dec->m_filterCoeffSym[0][1], + dec->m_filterCoeffSym[0][2], + dec->m_filterCoeffSym[0][3], + dec->m_filterCoeffSym[0][4], + dec->m_filterCoeffSym[0][5], + dec->m_filterCoeffSym[0][6], + dec->m_filterCoeffSym[0][7], + dec->m_filterCoeffSym[0][8]); + + /* Y*/ + for (j = 0; j < 16; j++) + for (i = 0; i < 9; i++) + dec->m_filterCoeffSym[j][i] = 0; + reconstructCoefInfo(dec, 0, m_alfPictureParam_y); + data32 = + ((dec->m_varIndTab[7] & 0xf) << 28) | + ((dec->m_varIndTab[6] & 0xf) << 24) | + ((dec->m_varIndTab[5] & 0xf) << 20) | + ((dec->m_varIndTab[4] & 0xf) << 16) | + ((dec->m_varIndTab[3] & 0xf) << 12) | + ((dec->m_varIndTab[2] & 0xf) << 8) | + ((dec->m_varIndTab[1] & 0xf) << 4) | + ((dec->m_varIndTab[0] & 0xf) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + data32 = ((dec->m_varIndTab[15] & 0xf) << 28) | + ((dec->m_varIndTab[14] & 0xf) << 24) | + ((dec->m_varIndTab[13] & 0xf) << 20) | + ((dec->m_varIndTab[12] & 0xf) << 16) | + ((dec->m_varIndTab[11] & 0xf) << 12) | + ((dec->m_varIndTab[10] & 0xf) << 8) | + ((dec->m_varIndTab[9] & 0xf) << 4) | + ((dec->m_varIndTab[8] & 0xf) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] pic_alf_on_y(%d), alf_y_tab(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)\n", + m_alfPictureParam_y->alf_flag, + dec->m_varIndTab[0], + dec->m_varIndTab[1], + dec->m_varIndTab[2], + dec->m_varIndTab[3], + dec->m_varIndTab[4], + dec->m_varIndTab[5], + dec->m_varIndTab[6], + dec->m_varIndTab[7], + dec->m_varIndTab[8], + dec->m_varIndTab[9], + dec->m_varIndTab[10], + dec->m_varIndTab[11], + dec->m_varIndTab[12], + dec->m_varIndTab[13], + dec->m_varIndTab[14], + dec->m_varIndTab[15]); + + m_filters_per_group = + (m_alfPictureParam_y->alf_flag == 0) ? + 1 : m_alfPictureParam_y->filters_per_group; + for (i = 0; i < m_filters_per_group; i++) { + data32 = + ((dec->m_filterCoeffSym[i][4] & 0xf) << 28) | + ((dec->m_filterCoeffSym[i][3] & 0x7f) << 21) | + ((dec->m_filterCoeffSym[i][2] & 0x7f) << 14) | + ((dec->m_filterCoeffSym[i][1] & 0x7f) << 7) | + ((dec->m_filterCoeffSym[i][0] & 0x7f) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + data32 = + /*[31] last indication*/ + ((i == m_filters_per_group-1) << 31) | + ((dec->m_filterCoeffSym[i][8] & 0x7f) << 24) | + ((dec->m_filterCoeffSym[i][7] & 0x7f) << 17) | + ((dec->m_filterCoeffSym[i][6] & 0x7f) << 10) | + ((dec->m_filterCoeffSym[i][5] & 0x7f) << 3) | + (((dec->m_filterCoeffSym[i][4] >> 4) & 0x7) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] alf_y_coef[%d](%d %d %d %d %d %d %d %d %d)\n", + i, dec->m_filterCoeffSym[i][0], + dec->m_filterCoeffSym[i][1], + dec->m_filterCoeffSym[i][2], + dec->m_filterCoeffSym[i][3], + dec->m_filterCoeffSym[i][4], + dec->m_filterCoeffSym[i][5], + dec->m_filterCoeffSym[i][6], + dec->m_filterCoeffSym[i][7], + dec->m_filterCoeffSym[i][8]); + } + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] cfgALF .done.\n"); +} + +static void config_other_hw(struct AVS2Decoder_s *dec) +{ + uint32_t data32; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *cur_pic = avs2_dec->hc.cur_pic; + int bit_depth = cur_pic->bit_depth; + int losless_comp_header_size = + compute_losless_comp_header_size( + dec, cur_pic->pic_w, + cur_pic->pic_h); + int losless_comp_body_size = + compute_losless_comp_body_size( + dec, cur_pic->pic_w, + cur_pic->pic_h, (bit_depth == AVS2_BITS_10)); + cur_pic->comp_body_size = losless_comp_body_size; + +#ifdef LOSLESS_COMPRESS_MODE + data32 = READ_VREG(HEVC_SAO_CTRL5); + if (bit_depth == AVS2_BITS_10) + data32 &= ~(1 << 9); + else + data32 |= (1 << 9); + + WRITE_VREG(HEVC_SAO_CTRL5, data32); + +#ifdef AVS2_10B_MMU + /*bit[4] : paged_mem_mode*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); +#else + /*bit[3] smem mdoe*/ + if (bit_depth == AVS2_BITS_10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0 << 3)); + else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (1 << 3)); +#endif + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, (losless_comp_body_size >> 5)); + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL3,(0xff<<20) | (0xff<<10) | 0xff);*/ + WRITE_VREG(HEVC_CM_BODY_LENGTH, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, losless_comp_header_size); +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif +} + +static u32 init_cuva_size; +static int cuva_data_is_avaible(struct AVS2Decoder_s *dec) +{ + u32 reg_val; + + reg_val = READ_VREG(AVS2_CUVA_DATA_SIZE); + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s:reg_val: %u \n", + __func__, reg_val); + if (reg_val != 0 && reg_val != init_cuva_size) + return 1; + else + return 0; +} + +static void config_cuva_buf(struct AVS2Decoder_s *dec) +{ + WRITE_VREG(AVS2_CUVA_ADR, dec->cuva_phy_addr); + init_cuva_size = (dec->cuva_size >> 4) << 16; + WRITE_VREG(AVS2_CUVA_DATA_SIZE, init_cuva_size); +} + +static void set_cuva_data(struct AVS2Decoder_s *dec) +{ + int i; + unsigned short *cuva_adr; + unsigned int size_reg_val = + READ_VREG(AVS2_CUVA_DATA_SIZE); + unsigned int cuva_count = 0; + int cuva_size = 0; + struct avs2_frame_s *pic = dec->avs2_dec.hc.cur_pic; + if (pic == NULL || 0 == cuva_data_is_avaible(dec)) { + avs2_print(dec, AVS2_DBG_HDR_INFO, + "%s:pic 0x%p or data not avaible\n", + __func__, pic); + return; + } + + cuva_adr = (unsigned short *)dec->cuva_addr; + cuva_count = ((size_reg_val >> 16) << 4) >> 1; + cuva_size = dec->cuva_size; + dec->hdr_flag |= HDR_CUVA_MASK; + + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s:pic 0x%p cuva_count(%d) cuva_size(%d) hdr_flag 0x%x\n", + __func__, pic, cuva_count, cuva_size, dec->hdr_flag); + if (cuva_size > 0 && cuva_count > 0) { + int new_size; + char *new_buf; + + new_size = cuva_size; + new_buf = vzalloc(new_size); + if (new_buf) { + unsigned char *p = new_buf; + int len = 0; + pic->cuva_data_buf = new_buf; + + for (i = 0; i < cuva_count; i += 4) { + int j; + + for (j = 0; j < 4; j++) { + unsigned short aa = cuva_adr[i + 3 - j]; + *p = aa & 0xff; + p++; + len++; + } + } + if (len > 0) { + pic->cuva_data_size = len; + } + + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "cuva: (size %d)\n", + pic->cuva_data_size); + if (get_dbg_flag(dec) & AVS2_DBG_HDR_DATA) { + for (i = 0; i < pic->cuva_data_size; i++) { + pr_info("%02x ", pic->cuva_data_buf[i]); + if (((i + 1) & 0xf) == 0) + pr_info("\n"); + } + pr_info("\n"); + } + + } else { + avs2_print(dec, 0, "new buf alloc failed\n"); + if (pic->cuva_data_buf) + vfree(pic->cuva_data_buf); + pic->cuva_data_buf = NULL; + pic->cuva_data_size = 0; + } + } +} + +static void release_cuva_data(struct avs2_frame_s *pic) +{ + if (pic == NULL) + return; + if (pic->cuva_data_buf) { + vfree(pic->cuva_data_buf); + } + pic->cuva_data_buf = NULL; + pic->cuva_data_size = 0; +} + +static void avs2_config_work_space_hw(struct AVS2Decoder_s *dec) +{ + struct BuffInfo_s *buf_spec = dec->work_space_buf; +#ifdef LOSLESS_COMPRESS_MODE + int losless_comp_header_size = + compute_losless_comp_header_size( + dec, dec->init_pic_w, + dec->init_pic_h); + int losless_comp_body_size = + compute_losless_comp_body_size(dec, + dec->init_pic_w, + dec->init_pic_h, buf_alloc_depth == 10); +#endif +#ifdef AVS2_10B_MMU + unsigned int data32; +#endif + if (debug && dec->init_flag == 0) + avs2_print(dec, 0, + "%s %x %x %x %x %x %x %x %x %x %x %x %x %x\n", + __func__, + buf_spec->ipp.buf_start, + buf_spec->start_adr, + buf_spec->short_term_rps.buf_start, + buf_spec->rcs.buf_start, + buf_spec->sps.buf_start, + buf_spec->pps.buf_start, + buf_spec->sao_up.buf_start, + buf_spec->swap_buf.buf_start, + buf_spec->swap_buf2.buf_start, + buf_spec->scalelut.buf_start, + buf_spec->dblk_para.buf_start, + buf_spec->dblk_data.buf_start, + buf_spec->dblk_data2.buf_start); + WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, buf_spec->ipp.buf_start); + if ((debug & AVS2_DBG_SEND_PARAM_WITH_REG) == 0) + WRITE_VREG(HEVC_RPM_BUFFER, (u32)dec->rpm_phy_addr); + WRITE_VREG(AVS2_ALF_SWAP_BUFFER, buf_spec->short_term_rps.buf_start); + WRITE_VREG(HEVC_RCS_BUFFER, buf_spec->rcs.buf_start); + WRITE_VREG(HEVC_SPS_BUFFER, buf_spec->sps.buf_start); + WRITE_VREG(HEVC_PPS_BUFFER, buf_spec->pps.buf_start); + //WRITE_VREG(HEVC_SAO_UP, buf_spec->sao_up.buf_start); +#ifdef AVS2_10B_MMU + WRITE_VREG(AVS2_MMU_MAP_BUFFER, dec->frame_mmu_map_phy_addr); +#else + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER, buf_spec->swap_buf.buf_start); +#endif +#ifdef AVS2_10B_MMU_DW + if (dec->dw_mmu_enable) { + //WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR2, FRAME_MMU_MAP_ADDR_DW); + WRITE_VREG(HEVC_SAO_MMU_DMA_CTRL2, dec->dw_frame_mmu_map_phy_addr); + } +#endif + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, buf_spec->swap_buf2.buf_start); + //WRITE_VREG(HEVC_SCALELUT, buf_spec->scalelut.buf_start); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + if (buf_spec->max_width <= 4096 && buf_spec->max_height <= 2304) + WRITE_VREG(HEVC_DBLK_CFG3, 0x404010); //default value + else + WRITE_VREG(HEVC_DBLK_CFG3, 0x808020); // make left storage 2 x 4k] + avs2_print(dec, AVS2_DBG_BUFMGR, + "HEVC_DBLK_CFG3 = %x\n", READ_VREG(HEVC_DBLK_CFG3)); + } + + /* cfg_p_addr */ + WRITE_VREG(HEVC_DBLK_CFG4, buf_spec->dblk_para.buf_start); + /* cfg_d_addr */ + WRITE_VREG(HEVC_DBLK_CFG5, buf_spec->dblk_data.buf_start); + + WRITE_VREG(HEVC_DBLK_CFGE, buf_spec->dblk_data2.buf_start); + +#ifdef LOSLESS_COMPRESS_MODE + data32 = READ_VREG(HEVC_SAO_CTRL5); +#if 1 + data32 &= ~(1<<9); +#else + if (params->p.bit_depth != 0x00) + data32 &= ~(1<<9); + else + data32 |= (1<<9); +#endif + WRITE_VREG(HEVC_SAO_CTRL5, data32); +#ifdef AVS2_10B_MMU + /*bit[4] : paged_mem_mode*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0); +#else + /* bit[3] smem mode*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0<<3)); + + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, (losless_comp_body_size >> 5)); +#endif + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL2,(losless_comp_body_size >> 5));*/ + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL3,(0xff<<20) | (0xff<<10) | 0xff);*/ +/*8-bit mode */ + WRITE_VREG(HEVC_CM_BODY_LENGTH, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, losless_comp_header_size); +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif + +#ifdef AVS2_10B_MMU + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR, buf_spec->mmu_vbh.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR, buf_spec->mmu_vbh.buf_start + + VBH_BUF_SIZE(buf_spec)); + /*data32 = READ_VREG(HEVC_SAO_CTRL9);*/ + /*data32 |= 0x1;*/ + /*WRITE_VREG(HEVC_SAO_CTRL9, data32);*/ + + /* use HEVC_CM_HEADER_START_ADDR */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (1<<10); +#if 1 + if (debug & AVS2_DBG_FORCE_UNCOMPRESS) + data32 |= 0x80; +#endif + WRITE_VREG(HEVC_SAO_CTRL5, data32); + +#endif + +#ifdef AVS2_10B_MMU_DW + if (dec->dw_mmu_enable) { + u32 data_tmp; + data_tmp = READ_VREG(HEVC_SAO_CTRL9); + data_tmp |= (1<<10); + WRITE_VREG(HEVC_SAO_CTRL9, data_tmp); + + WRITE_VREG(HEVC_CM_BODY_LENGTH2,losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET2,losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH2,losless_comp_header_size); + + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR2, buf_spec->mmu_vbh_dw.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR2, buf_spec->mmu_vbh_dw.buf_start + DW_VBH_BUF_SIZE(buf_spec)); + + /* use HEVC_CM_HEADER_START_ADDR */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (1<<15); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } +#endif + + + WRITE_VREG(LMEM_DUMP_ADR, (u32)dec->lmem_phy_addr); + + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, buf_spec->mpred_above.buf_start); + +#ifdef CO_MV_COMPRESS + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) { + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 |= (1 << 1); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); + } +#endif +} + +static void decomp_perfcount_reset(void) +{ + if (debug & AVS2_DBG_CACHE) + pr_info("[cache_util.c] Entered decomp_perfcount_reset...\n"); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)0x1); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)0x0); + return; +} + +static void mcrcc_perfcount_reset(void) +{ + if (debug & AVS2_DBG_CACHE) + pr_info("[cache_util.c] Entered mcrcc_perfcount_reset...\n"); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)0x1); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)0x0); + return; +} + +static void avs2_init_decoder_hw(struct AVS2Decoder_s *dec) +{ + unsigned int data32; + unsigned int decode_mode; + int i; + /*if (debug & AVS2_DBG_BUFMGR_MORE) + pr_info("%s\n", __func__);*/ + data32 = READ_VREG(HEVC_PARSER_INT_CONTROL); +#if 1 + /* set bit 31~29 to 3 if HEVC_STREAM_FIFO_CTL[29] is 1 */ + data32 &= ~(7 << 29); + data32 |= (3 << 29); +#endif + data32 = data32 | + (1 << 24) |/*stream_buffer_empty_int_amrisc_enable*/ + (1 << 22) |/*stream_fifo_empty_int_amrisc_enable*/ + (1 << 7) |/*dec_done_int_cpu_enable*/ + (1 << 4) |/*startcode_found_int_cpu_enable*/ + (0 << 3) |/*startcode_found_int_amrisc_enable*/ + (1 << 0) /*parser_int_enable*/ + ; + WRITE_VREG(HEVC_PARSER_INT_CONTROL, data32); + + data32 = READ_VREG(HEVC_SHIFT_STATUS); + data32 = data32 | + (0 << 1) |/*emulation_check_off VP9 + do not have emulation*/ + (1 << 0)/*startcode_check_on*/ + ; + WRITE_VREG(HEVC_SHIFT_STATUS, data32); + WRITE_VREG(HEVC_SHIFT_CONTROL, + (6 << 20) | /* emu_push_bits (6-bits for AVS2)*/ + (0 << 19) | /* emu_3_enable, maybe turned on in microcode*/ + (0 << 18) | /* emu_2_enable, maybe turned on in microcode*/ + (0 << 17) | /* emu_1_enable, maybe turned on in microcode*/ + (0 << 16) | /* emu_0_enable, maybe turned on in microcode*/ + (0 << 14) | /*disable_start_code_protect*/ + (3 << 6) | /* sft_valid_wr_position*/ + (2 << 4) | /* emulate_code_length_sub_1*/ + (2 << 1) | /* start_code_length_sub_1*/ + (1 << 0) /* stream_shift_enable*/ + ); + + WRITE_VREG(HEVC_SHIFT_LENGTH_PROTECT, + (0 << 30) | /*data_protect_fill_00_enable*/ + (1 << 29) /*data_protect_fill_ff_enable*/ + ); + WRITE_VREG(HEVC_CABAC_CONTROL, + (1 << 0)/*cabac_enable*/ + ); + + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, + (1 << 0)/* hevc_parser_core_clk_en*/ + ); + + + WRITE_VREG(HEVC_DEC_STATUS_REG, 0); + + /*Initial IQIT_SCALELUT memory -- just to avoid X in simulation*/ + if (is_rdma_enable()) + rdma_back_end_work(dec->rdma_phy_adr, RDMA_SIZE); + else { + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 0);/*cfg_p_addr*/ + for (i = 0; i < 1024; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, 0); + } + +#ifdef ENABLE_SWAP_TEST + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 100); +#else + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 0); +#endif + if (!dec->m_ins_flag) + decode_mode = DECODE_MODE_SINGLE; + else if (vdec_frame_based(hw_to_vdec(dec))) + decode_mode = DECODE_MODE_MULTI_FRAMEBASE; + else + decode_mode = DECODE_MODE_MULTI_STREAMBASE; + if (dec->avs2_dec.bufmgr_error_flag && + (error_handle_policy & 0x1)) { + dec->bufmgr_error_count++; + dec->avs2_dec.bufmgr_error_flag = 0; + if (dec->bufmgr_error_count > + (re_search_seq_threshold & 0xff) + && dec->frame_count > + ((re_search_seq_threshold >> 8) & 0xff)) { + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + dec->start_decoding_flag = 0; + avs2_dec->hd.vec_flag = 1; + dec->skip_PB_before_I = 1; + avs2_print(dec, 0, + "!!Bufmgr error, search seq again (0x%x %d %d)\n", + error_handle_policy, + dec->frame_count, + dec->bufmgr_error_count); + dec->bufmgr_error_count = 0; + } + } + decode_mode |= (dec->start_decoding_flag << 16); + + WRITE_VREG(DECODE_MODE, decode_mode); + WRITE_VREG(HEVC_DECODE_SIZE, 0); + WRITE_VREG(HEVC_DECODE_COUNT, 0); + + /*Send parser_cmd*/ + WRITE_VREG(HEVC_PARSER_CMD_WRITE, (1 << 16) | (0 << 0)); + for (i = 0; i < PARSER_CMD_NUMBER; i++) + WRITE_VREG(HEVC_PARSER_CMD_WRITE, parser_cmd[i]); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2); + + + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + (1 << 9) | /* parser_alf_if_en*/ + /* (1 << 8) |*/ /*sao_sw_pred_enable*/ + (1 << 5) | /*parser_sao_if_en*/ + (1 << 2) | /*parser_mpred_if_en*/ + (1 << 0) /*parser_scaler_if_en*/ + ); + +#ifdef MULTI_INSTANCE_SUPPORT + WRITE_VREG(HEVC_MPRED_INT_STATUS, (1<<31)); + + WRITE_VREG(HEVC_PARSER_RESULT_3, 0xffffffff); + + for (i = 0; i < 8; i++) + data32 = READ_VREG(HEVC_MPRED_ABV_START_ADDR); + + WRITE_VREG(DOS_SW_RESET3, (1<<18)); /* reset mpred */ + WRITE_VREG(DOS_SW_RESET3, 0); + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, data32); + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, data32); + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, data32); +#endif + /*End of Multi-instance*/ + /*Changed to Start MPRED in microcode*/ + /* + pr_info("[test.c] Start MPRED\n"); + WRITE_VREG(HEVC_MPRED_INT_STATUS, + (1<<31) + ); + */ + + /*AVS2 default seq_wq_matrix config*/ + + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "Config AVS2 default seq_wq_matrix ...\n"); + /*4x4*/ + /* default seq_wq_matrix_4x4 begin address*/ + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 64); + for (i = 0; i < 16; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, g_WqMDefault4x4[i]); + + /*8x8*/ + /*default seq_wq_matrix_8x8 begin address*/ + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 0); + for (i = 0; i < 64; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, g_WqMDefault8x8[i]); + + + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (0 << 1) | /*enable ipp*/ + (1 << 0) /*software reset ipp and mpp*/ + ); + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (1 << 1) | /*enable ipp*/ + (0 << 0) /*software reset ipp and mpp*/ + ); +#if 0 +/*AVS2_10B_NV21*/ + /*Enable NV21 reference read mode for MC*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif + /* Init dblk*/ + data32 = READ_VREG(HEVC_DBLK_CFGB); + data32 |= (2 << 0); + /* [3:0] cfg_video_type -> AVS2*/ + + data32 &= (~0x300); /*[8]:first write enable (compress) + [9]:double write enable (uncompress)*/ + if (get_double_write_mode(dec) == 0) + data32 |= (0x1 << 8); /*enable first write*/ + else if (get_double_write_mode(dec) == 0x10) + data32 |= (0x1 << 9); /*double write only*/ + else + data32 |= ((0x1 << 8) | (0x1 << 9)); + WRITE_VREG(HEVC_DBLK_CFGB, data32); + + WRITE_VREG(HEVC_DBLK_CFG0, (1 << 0)); /* [0] rst_sync*/ + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "Bitstream level Init for DBLK .Done.\n"); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + mcrcc_perfcount_reset(); + decomp_perfcount_reset(); + } + + return; +} + + +#ifdef CONFIG_HEVC_CLK_FORCED_ON +static void config_avs2_clk_forced_on(void) +{ + unsigned int rdata32; + /*IQIT*/ + rdata32 = READ_VREG(HEVC_IQIT_CLK_RST_CTRL); + WRITE_VREG(HEVC_IQIT_CLK_RST_CTRL, rdata32 | (0x1 << 2)); + + /* DBLK*/ + rdata32 = READ_VREG(HEVC_DBLK_CFG0); + WRITE_VREG(HEVC_DBLK_CFG0, rdata32 | (0x1 << 2)); + + /* SAO*/ + rdata32 = READ_VREG(HEVC_SAO_CTRL1); + WRITE_VREG(HEVC_SAO_CTRL1, rdata32 | (0x1 << 2)); + + /*MPRED*/ + rdata32 = READ_VREG(HEVC_MPRED_CTRL1); + WRITE_VREG(HEVC_MPRED_CTRL1, rdata32 | (0x1 << 24)); + + /* PARSER*/ + rdata32 = READ_VREG(HEVC_STREAM_CONTROL); + WRITE_VREG(HEVC_STREAM_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_SHIFT_CONTROL); + WRITE_VREG(HEVC_SHIFT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_CABAC_CONTROL); + WRITE_VREG(HEVC_CABAC_CONTROL, rdata32 | (0x1 << 13)); + rdata32 = READ_VREG(HEVC_PARSER_CORE_CONTROL); + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_INT_CONTROL); + WRITE_VREG(HEVC_PARSER_INT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_IF_CONTROL); + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + rdata32 | (0x1 << 6) | (0x1 << 3) | (0x1 << 1)); + + /*IPP*/ + rdata32 = READ_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG); + WRITE_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG, rdata32 | 0xffffffff); + + /* MCRCC*/ + rdata32 = READ_VREG(HEVCD_MCRCC_CTL1); + WRITE_VREG(HEVCD_MCRCC_CTL1, rdata32 | (0x1 << 3)); +} +#endif + +static struct AVS2Decoder_s gAVS2Decoder; + +static void avs2_local_uninit(struct AVS2Decoder_s *dec) +{ + dec->rpm_ptr = NULL; + dec->lmem_ptr = NULL; + if (dec->rpm_addr) { + dma_free_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, dec->rpm_addr, + dec->rpm_phy_addr); + dec->rpm_addr = NULL; + } + + if (dec->cuva_addr) { + dma_free_coherent(amports_get_dma_device(), + dec->cuva_size, dec->cuva_addr, + dec->cuva_phy_addr); + dec->cuva_addr = NULL; + } + + if (dec->lmem_addr) { + if (dec->lmem_phy_addr) + dma_free_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, dec->lmem_addr, + dec->lmem_phy_addr); + dec->lmem_addr = NULL; + } + +#ifdef AVS2_10B_MMU + if (dec->frame_mmu_map_addr) { + if (dec->frame_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(dec), dec->frame_mmu_map_addr, + dec->frame_mmu_map_phy_addr); + dec->frame_mmu_map_addr = NULL; + } +#endif + +#ifdef AVS2_10B_MMU_DW + if (dec->dw_frame_mmu_map_addr) { + if (dec->dw_frame_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(dec), dec->dw_frame_mmu_map_addr, + dec->dw_frame_mmu_map_phy_addr); + dec->dw_frame_mmu_map_addr = NULL; + } +#endif + + if (dec->gvs) + vfree(dec->gvs); + dec->gvs = NULL; +} + +static int avs2_local_init(struct AVS2Decoder_s *dec) +{ + int ret = -1; + /*int losless_comp_header_size, losless_comp_body_size;*/ + + struct BuffInfo_s *cur_buf_info = NULL; + + cur_buf_info = &dec->work_space_buf_store; + if (force_bufspec) { + memcpy(cur_buf_info, &amvavs2_workbuff_spec[force_bufspec & 0xf], + sizeof(struct BuffInfo_s)); + pr_info("force buffer spec %d\n", force_bufspec & 0xf); + } else { + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + memcpy(cur_buf_info, &amvavs2_workbuff_spec[2], /* 8k */ + sizeof(struct BuffInfo_s)); + else + memcpy(cur_buf_info, &amvavs2_workbuff_spec[1], /* 4k */ + sizeof(struct BuffInfo_s)); + } else + memcpy(cur_buf_info, &amvavs2_workbuff_spec[0],/* 1080p */ + sizeof(struct BuffInfo_s)); + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) { + memcpy(cur_buf_info, &amvavs2_workbuff_spec[5], /* 8k */ + sizeof(struct BuffInfo_s)); + } else + memcpy(cur_buf_info, &amvavs2_workbuff_spec[3],/* 1080p */ + sizeof(struct BuffInfo_s)); + } + } + + cur_buf_info->start_adr = dec->buf_start; +#ifndef AVS2_10B_MMU + dec->mc_buf_spec.buf_end = dec->buf_start + dec->buf_size; +#endif + + init_buff_spec(dec, cur_buf_info); + + init_avs2_decoder(&dec->avs2_dec); + +#ifdef AVS2_10B_MMU + avs2_bufmgr_init(dec, cur_buf_info, NULL); +#else + dec->mc_buf_spec.buf_start = (cur_buf_info->end_adr + 0xffff) + & (~0xffff); + dec->mc_buf_spec.buf_size = (dec->mc_buf_spec.buf_end + - dec->mc_buf_spec.buf_start); + if (debug) { + pr_err("dec->mc_buf_spec.buf_start %x-%x\n", + dec->mc_buf_spec.buf_start, + dec->mc_buf_spec.buf_start + + dec->mc_buf_spec.buf_size); + } + avs2_bufmgr_init(dec, cur_buf_info, &dec->mc_buf_spec); +#endif + if ((buf_alloc_width & buf_alloc_height) == 0) { + if (!vdec_is_support_4k() + && (buf_alloc_width > 1920 && buf_alloc_height > 1088)) { + buf_alloc_width = 1920; + buf_alloc_height = 1088; + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + buf_alloc_width = 3840; + buf_alloc_height = 2160; + } + } + dec->init_pic_w = buf_alloc_width ? buf_alloc_width : + (dec->vavs2_amstream_dec_info.width ? + dec->vavs2_amstream_dec_info.width : + dec->work_space_buf->max_width); + dec->init_pic_h = buf_alloc_height ? buf_alloc_height : + (dec->vavs2_amstream_dec_info.height ? + dec->vavs2_amstream_dec_info.height : + dec->work_space_buf->max_height); + +#ifndef AVS2_10B_MMU + init_buf_list(dec); +#else + dec->used_buf_num = max_buf_num + dec->dynamic_buf_margin; + if (dec->used_buf_num > MAX_BUF_NUM) + dec->used_buf_num = MAX_BUF_NUM; + if (dec->used_buf_num > FRAME_BUFFERS) + dec->used_buf_num = FRAME_BUFFERS; +#endif + dec->avs2_dec.ref_maxbuffer = dec->used_buf_num - 1; + /*init_pic_list(dec);*/ + + pts_unstable = ((unsigned long)(dec->vavs2_amstream_dec_info.param) + & 0x40) >> 6; + + if ((debug & AVS2_DBG_SEND_PARAM_WITH_REG) == 0) { + dec->rpm_addr = dma_alloc_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, + &dec->rpm_phy_addr, GFP_KERNEL); + if (dec->rpm_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + return -1; + } + avs2_print(dec, AVS2_DBG_BUFMGR, + "rpm_phy_addr %x\n", (u32) dec->rpm_phy_addr); + dec->rpm_ptr = dec->rpm_addr; + } + + if (cuva_buf_size > 0) { + dec->cuva_size = AUX_BUF_ALIGN(cuva_buf_size); + + dec->cuva_addr = dma_alloc_coherent(amports_get_dma_device(), + dec->cuva_size, &dec->cuva_phy_addr, GFP_KERNEL); + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s, cuva_size = %d cuva_phy_addr %x dec->cuva_addr = %px\n", + __func__, dec->cuva_size, (u32)dec->cuva_phy_addr, dec->cuva_addr); + if (dec->cuva_addr == NULL) { + pr_err("%s: failed to alloc cuva buffer\n", __func__); + return -1; + } + } + + dec->lmem_addr = dma_alloc_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, + &dec->lmem_phy_addr, GFP_KERNEL); + if (dec->lmem_addr == NULL) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + return -1; + } else + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s, lmem_phy_addr %x\n", + __func__, (u32)dec->lmem_phy_addr); + dec->lmem_ptr = dec->lmem_addr; + +#ifdef AVS2_10B_MMU + if (dec->mmu_enable) { + dec->frame_mmu_map_addr = dma_alloc_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(dec), + &dec->frame_mmu_map_phy_addr, GFP_KERNEL); + if (dec->frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(dec->frame_mmu_map_addr, 0, get_frame_mmu_map_size(dec)); + } +#endif + +#ifdef AVS2_10B_MMU_DW + if (dec->dw_mmu_enable) { + dec->dw_frame_mmu_map_addr = dma_alloc_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(dec), + &dec->dw_frame_mmu_map_phy_addr, GFP_KERNEL); + if (dec->dw_frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(dec->dw_frame_mmu_map_addr, 0, get_frame_mmu_map_size(dec)); + } +#endif + ret = 0; + return ret; +} + +/******************************************** + * Mailbox command + ********************************************/ +#define CMD_FINISHED 0 +#define CMD_ALLOC_VIEW 1 +#define CMD_FRAME_DISPLAY 3 +#define CMD_DEBUG 10 + + +#define DECODE_BUFFER_NUM_MAX 32 +#define DISPLAY_BUFFER_NUM 6 + +#define video_domain_addr(adr) (adr&0x7fffffff) +#define DECODER_WORK_SPACE_SIZE 0x800000 + +#define spec2canvas(x) \ + (((x)->uv_canvas_index << 16) | \ + ((x)->uv_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + + +static void set_canvas(struct AVS2Decoder_s *dec, + struct avs2_frame_s *pic) +{ + int canvas_w = ALIGN(pic->pic_w, 64)/4; + int canvas_h = ALIGN(pic->pic_h, 32)/4; + int blkmode = mem_map_mode; + struct vdec_s *vdec = hw_to_vdec(dec); + /*CANVAS_BLKMODE_64X32*/ + if (pic->double_write_mode) { + canvas_w = pic->pic_w / + get_double_write_ratio(pic->double_write_mode); + canvas_h = pic->pic_h / + get_double_write_ratio(pic->double_write_mode); + /*sao_crtl1 aligned with 64*/ + canvas_w = ALIGN(canvas_w, 64); + canvas_h = ALIGN(canvas_h, 32); + + if (vdec->parallel_dec == 1) { + if (pic->y_canvas_index == -1) + pic->y_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + if (pic->uv_canvas_index == -1) + pic->uv_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + } else { + pic->y_canvas_index = 128 + pic->index * 2; + pic->uv_canvas_index = 128 + pic->index * 2 + 1; + } + + config_cav_lut_ex(pic->y_canvas_index, + pic->dw_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, 0x7, VDEC_HEVC); + config_cav_lut_ex(pic->uv_canvas_index, + pic->dw_u_v_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, 0x7, VDEC_HEVC); +#ifdef MULTI_INSTANCE_SUPPORT + pic->canvas_config[0].phy_addr = pic->dw_y_adr; + pic->canvas_config[0].width = canvas_w; + pic->canvas_config[0].height = canvas_h; + pic->canvas_config[0].block_mode = blkmode; + pic->canvas_config[0].endian = 7; + + pic->canvas_config[1].phy_addr = pic->dw_u_v_adr; + pic->canvas_config[1].width = canvas_w; + pic->canvas_config[1].height = canvas_h; + pic->canvas_config[1].block_mode = blkmode; + pic->canvas_config[1].endian = 7; +#endif + } else { + #ifndef AVS2_10B_MMU + if (vdec->parallel_dec == 1) { + if (pic->y_canvas_index == -1) + pic->y_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + if (pic->uv_canvas_index == -1) + pic->uv_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + } else { + pic->y_canvas_index = 128 + pic->index; + pic->uv_canvas_index = 128 + pic->index; + } + + config_cav_lut_ex(pic->y_canvas_index, + pic->mc_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, 0x7, VDEC_HEVC); + config_cav_lut_ex(pic->uv_canvas_index, + pic->mc_u_v_adr,canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, 0x7, VDEC_HEVC); + #endif + } +} + +static void set_frame_info(struct AVS2Decoder_s *dec, struct vframe_s *vf) +{ + unsigned int ar = 0; + unsigned int pixel_ratio = 0;; + + vf->duration = dec->frame_dur; + vf->duration_pulldown = 0; + vf->flag = 0; + vf->prop.master_display_colour = dec->vf_dp; + if (dec->hdr_flag & HDR_CUVA_MASK) + dec->video_signal_type |= 1 << 31; + vf->signal_type = dec->video_signal_type; + + avs2_print(dec, AVS2_DBG_HDR_INFO, + "signal_typesignal_type 0x%x \n", + vf->signal_type); + + pixel_ratio = dec->vavs2_amstream_dec_info.ratio; + + if (dec->vavs2_ratio == 0) { + /* always stretch to 16:9 */ + vf->ratio_control |= (0x90 << + DISP_RATIO_ASPECT_RATIO_BIT); + vf->sar_width = 1; + vf->sar_height = 1; + } else { + switch (pixel_ratio) { + case 1: + vf->sar_width = 1; + vf->sar_height = 1; + ar = (vf->height * dec->vavs2_ratio) / vf->width; + break; + case 2: + vf->sar_width = 4; + vf->sar_height = 3; + ar = (vf->height * 3 * dec->vavs2_ratio) / (vf->width * 4); + break; + case 3: + vf->sar_width = 16; + vf->sar_height = 9; + ar = (vf->height * 9 * dec->vavs2_ratio) / (vf->width * 16); + break; + case 4: + vf->sar_width = 221; + vf->sar_height = 100; + ar = (vf->height * 100 * dec->vavs2_ratio) / (vf->width * + 221); + break; + default: + vf->sar_width = 1; + vf->sar_height = 1; + ar = (vf->height * dec->vavs2_ratio) / vf->width; + break; + } + } + + ar = min_t(u32, ar, DISP_RATIO_ASPECT_RATIO_MAX); + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + + vf->sidebind_type = dec->sidebind_type; + vf->sidebind_channel_id = dec->sidebind_channel_id; + + return; +} + +static int vavs2_vf_states(struct vframe_states *states, void *op_arg) +{ + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)op_arg; + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&dec->newframe_q); + states->buf_avail_num = kfifo_len(&dec->display_q); + + if (step == 2) + states->buf_avail_num = 0; + return 0; +} + +static struct vframe_s *vavs2_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)op_arg; + if (step == 2) + return NULL; + + if (force_disp_pic_index & 0x100) { + if (force_disp_pic_index & 0x200) + return NULL; + return &dec->vframe_dummy; + } + + if (kfifo_len(&dec->display_q) > VF_POOL_SIZE) { + avs2_print(dec, AVS2_DBG_BUFMGR, + "kfifo len:%d invaild, peek error\n", + kfifo_len(&dec->display_q)); + return NULL; + } + + if (kfifo_peek(&dec->display_q, &vf)) + return vf; + + return NULL; +} + +static struct avs2_frame_s *get_pic_by_index( + struct AVS2Decoder_s *dec, int index) +{ + int i; + struct avs2_frame_s *pic = NULL; + if (index == (dec->used_buf_num - 1)) + pic = dec->avs2_dec.m_bg; + else if (index >= 0 && index < dec->used_buf_num) { + for (i = 0; i < dec->used_buf_num; i++) { + if (dec->avs2_dec.fref[i]->index == index) + pic = dec->avs2_dec.fref[i]; + } + } + return pic; +} + +static struct vframe_s *vavs2_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)op_arg; + if (step == 2) + return NULL; + else if (step == 1) + step = 2; + + if (force_disp_pic_index & 0x100) { + int idx = force_disp_pic_index & 0xff; + struct avs2_frame_s *pic = NULL; + if (idx >= 0 + && idx < dec->avs2_dec.ref_maxbuffer) + pic = get_pic_by_index(dec, idx); + if (pic == NULL) + return NULL; + if (force_disp_pic_index & 0x200) + return NULL; + + vf = &dec->vframe_dummy; + + set_vframe(dec, vf, pic, 1); + + force_disp_pic_index |= 0x200; + return vf; + } + + if (kfifo_get(&dec->display_q, &vf)) { + uint8_t index = vf->index & 0xff; + ATRACE_COUNTER(dec->disp_q_name, kfifo_len(&dec->display_q)); + if (index < dec->used_buf_num) { + struct avs2_frame_s *pic = get_pic_by_index(dec, index); + if (pic == NULL && + (debug & AVS2_DBG_PIC_LEAK)) { + int i; + avs2_print(dec, 0, + "%s error index 0x%x pic not exist\n", + __func__, index); + dump_pic_list(dec); + for (i = 0; i < 10; i++) { + pic = get_pic_by_index(dec, index); + pr_info("pic = %p\n", pic); + } + + if (debug & AVS2_DBG_PIC_LEAK) + debug |= AVS2_DBG_PIC_LEAK_WAIT; + return NULL; + } + dec->vf_get_count++; + if (pic) + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s index 0x%x pos %d getcount %d type 0x%x w/h %d/%d, pts %d, %lld\n", + __func__, index, + pic->imgtr_fwRefDistance_bak, + dec->vf_get_count, + vf->type, + vf->width, vf->height, + vf->pts, + vf->pts_us64); + return vf; + } + } + return NULL; +} + +static void vavs2_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)op_arg; + uint8_t index; + + if (vf == (&dec->vframe_dummy)) + return; + + if (!vf) + return; + + index = vf->index & 0xff; + + kfifo_put(&dec->newframe_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(dec->new_q_name, kfifo_len(&dec->newframe_q)); + dec->vf_put_count++; + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s index putcount 0x%x %d\n", + __func__, vf->index, + dec->vf_put_count); + + if (index < dec->used_buf_num) { + unsigned long flags; + struct avs2_frame_s *pic; + + lock_buffer(dec, flags); + pic = get_pic_by_index(dec, index); + if (pic && pic->vf_ref > 0) + pic->vf_ref--; + else { + if (pic) + avs2_print(dec, 0, + "%s, error pic (index %d) vf_ref is %d\n", + __func__, index, pic->vf_ref); + else + avs2_print(dec, 0, + "%s, error pic (index %d) is NULL\n", + __func__, index); + } + if (dec->wait_buf) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + dec->last_put_idx = index; + dec->new_frame_displayed++; + unlock_buffer(dec, flags); + } + +} + +static int vavs2_event_cb(int type, void *data, void *private_data) +{ + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)private_data; + + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(hw_to_vdec(dec)); + else + req->req_result[0] = 0xffffffff; + } else if (type & VFRAME_EVENT_RECEIVER_GET_AUX_DATA) { + struct provider_aux_req_s *req = + (struct provider_aux_req_s *)data; + unsigned char index; + unsigned long flags; + struct avs2_frame_s *pic; + + if (!req->vf) { + req->aux_size = dec->vf_put_count; + return 0; + } + lock_buffer(dec, flags); + index = req->vf->index & 0xff; + req->aux_buf = NULL; + req->aux_size = 0; + req->format = VFORMAT_AVS2; + if (index < dec->used_buf_num) { + pic = get_pic_by_index(dec, index); + req->aux_buf = pic->cuva_data_buf; + req->aux_size = pic->cuva_data_size; + } + unlock_buffer(dec, flags); + + avs2_print(dec, PRINT_FLAG_VDEC_STATUS, + "%s pic 0x%p index %d =>size %d\n", + __func__, pic, index, req->aux_size); + } + + return 0; +} + +static struct avs2_frame_s *get_disp_pic(struct AVS2Decoder_s *dec) +{ + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *pic = NULL; + int32_t j; + int32_t pre_disp_count_min = 0x7fffffff; + for (j = 0; j < avs2_dec->ref_maxbuffer; j++) { + if (avs2_dec->fref[j]->to_prepare_disp && + avs2_dec->fref[j]->to_prepare_disp < + pre_disp_count_min) { + pre_disp_count_min = + avs2_dec->fref[j]->to_prepare_disp; + pic = avs2_dec->fref[j]; + } + } + if (pic) + pic->to_prepare_disp = 0; + + return pic; + +} + + + +static void fill_frame_info(struct AVS2Decoder_s *dec, + struct avs2_frame_s *pic, unsigned int framesize, unsigned int pts) +{ + struct vframe_qos_s *vframe_qos = &dec->vframe_qos; + + if (pic->slice_type == I_IMG) + vframe_qos->type = 1; + else if (pic->slice_type == P_IMG) + vframe_qos->type = 2; + else if (pic->slice_type == B_IMG) + vframe_qos->type = 3; +/* +#define SHOW_QOS_INFO +*/ + if (input_frame_based(hw_to_vdec(dec))) + vframe_qos->size = pic->frame_size; + else + vframe_qos->size = framesize; + vframe_qos->pts = pts; +#ifdef SHOW_QOS_INFO + avs2_print(dec, 0, "slice:%d\n", pic->slice_type); +#endif + + + vframe_qos->max_mv = pic->max_mv; + vframe_qos->avg_mv = pic->avg_mv; + vframe_qos->min_mv = pic->min_mv; +#ifdef SHOW_QOS_INFO + avs2_print(dec, 0, "mv: max:%d, avg:%d, min:%d\n", + vframe_qos->max_mv, + vframe_qos->avg_mv, + vframe_qos->min_mv); +#endif + + vframe_qos->max_qp = pic->max_qp; + vframe_qos->avg_qp = pic->avg_qp; + vframe_qos->min_qp = pic->min_qp; +#ifdef SHOW_QOS_INFO + avs2_print(dec, 0, "qp: max:%d, avg:%d, min:%d\n", + vframe_qos->max_qp, + vframe_qos->avg_qp, + vframe_qos->min_qp); +#endif + + vframe_qos->max_skip = pic->max_skip; + vframe_qos->avg_skip = pic->avg_skip; + vframe_qos->min_skip = pic->min_skip; +#ifdef SHOW_QOS_INFO + avs2_print(dec, 0, "skip: max:%d, avg:%d, min:%d\n", + vframe_qos->max_skip, + vframe_qos->avg_skip, + vframe_qos->min_skip); +#endif + + vframe_qos->num++; + +} + +static void set_vframe(struct AVS2Decoder_s *dec, + struct vframe_s *vf, struct avs2_frame_s *pic, u8 dummy) +{ + unsigned long flags; + int stream_offset; + unsigned int frame_size = 0; + int pts_discontinue; + struct vdec_s *vdec = hw_to_vdec(dec); + stream_offset = pic->stream_offset; + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s index = %d pos = %d\r\n", + __func__, pic->index, + pic->imgtr_fwRefDistance); + + if (pic->double_write_mode && (pic->double_write_mode & 0x20) == 0) + set_canvas(dec, pic); + + display_frame_count[dec->index]++; + + if (!dummy) { +#ifdef MULTI_INSTANCE_SUPPORT + if (vdec_frame_based(vdec)) { + vf->pts = pic->pts; + vf->pts_us64 = pic->pts64; + } else { +#endif + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + /* if (pts_lookup_offset(PTS_TYPE_VIDEO, + stream_offset, &vf->pts, 0) != 0) { */ + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, stream_offset, + &vf->pts, &frame_size, 0, + &vf->pts_us64) != 0) { +#ifdef DEBUG_PTS + dec->pts_missed++; +#endif + vf->pts = 0; + vf->pts_us64 = 0; + } + } + } +#ifdef DEBUG_PTS + else + dec->pts_hit++; +#endif + if (pts_unstable) + dec->pts_mode = PTS_NONE_REF_USE_DURATION; + + fill_frame_info(dec, pic, frame_size, vf->pts); + + if ((dec->pts_mode == PTS_NORMAL) && (vf->pts != 0) + && dec->get_frame_dur) { + int pts_diff = (int)vf->pts - dec->last_lookup_pts; + + if (pts_diff < 0) { + dec->pts_mode_switching_count++; + dec->pts_mode_recovery_count = 0; + + if (dec->pts_mode_switching_count >= + PTS_MODE_SWITCHING_THRESHOLD) { + dec->pts_mode = + PTS_NONE_REF_USE_DURATION; + pr_info + ("HEVC: switch to n_d mode.\n"); + } + + } else { + int p = PTS_MODE_SWITCHING_RECOVERY_THREASHOLD; + dec->pts_mode_recovery_count++; + if (dec->pts_mode_recovery_count > p) { + dec->pts_mode_switching_count = 0; + dec->pts_mode_recovery_count = 0; + } + } + } + + pts_discontinue = + (abs(dec->last_pts - vf->pts) >= + tsync_vpts_discontinuity_margin()); + + if (vf->pts != 0) + dec->last_lookup_pts = vf->pts; + + if ((dec->pts_mode == PTS_NONE_REF_USE_DURATION) + && ((pic->slice_type != I_IMG) || (!pts_discontinue && + !first_pts_checkin_complete(PTS_TYPE_AUDIO)))) + vf->pts = dec->last_pts + DUR2PTS(dec->frame_dur); + dec->last_pts = vf->pts; + + if (vf->pts_us64 != 0) + dec->last_lookup_pts_us64 = vf->pts_us64; + + if ((dec->pts_mode == PTS_NONE_REF_USE_DURATION) + && ((pic->slice_type != I_IMG) || (!pts_discontinue && + !first_pts_checkin_complete(PTS_TYPE_AUDIO)))) { + vf->pts_us64 = + dec->last_pts_us64 + + (DUR2PTS(dec->frame_dur) * 100 / 9); + } + + dec->last_pts_us64 = vf->pts_us64; + avs2_print(dec, AVS2_DBG_OUT_PTS, + "avs2 dec out pts: vf->pts=%d, vf->pts_us64 = %lld\n", + vf->pts, vf->pts_us64); + } + + vf->index = 0xff00 | pic->index; + + if (pic->double_write_mode & 0x10) { + /* double write only */ + vf->compBodyAddr = 0; + vf->compHeadAddr = 0; + } else { +#ifdef AVS2_10B_MMU + vf->compBodyAddr = 0; + vf->compHeadAddr = pic->header_adr; +#ifdef AVS2_10B_MMU_DW + vf->dwBodyAddr = 0; + vf->dwHeadAddr = 0; + if (pic->double_write_mode & 0x20) { + u32 mode = pic->double_write_mode & 0xf; + if (mode == 5 || mode == 3) + vf->dwHeadAddr = pic->dw_header_adr; + else if ((mode == 1 || mode == 2 || mode == 4) + && ((debug & AVS2_DBG_OUT_PTS) == 0)) { + vf->compHeadAddr = pic->dw_header_adr; + pr_info("Use dw mmu for display\n"); + } + } +#endif + +#else + vf->compBodyAddr = pic->mc_y_adr; /*body adr*/ + vf->compHeadAddr = pic->mc_y_adr + pic->comp_body_size; +#endif + } + if (pic->double_write_mode && + ((pic->double_write_mode & 0x20) == 0)) { + vf->type = VIDTYPE_PROGRESSIVE | + VIDTYPE_VIU_FIELD; + vf->type |= VIDTYPE_VIU_NV21; + if (pic->double_write_mode == 3) { + vf->type |= VIDTYPE_COMPRESS; +#ifdef AVS2_10B_MMU + vf->type |= VIDTYPE_SCATTER; +#endif + } +#ifdef MULTI_INSTANCE_SUPPORT + if (dec->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + pic->canvas_config[0]; + vf->canvas0_config[1] = + pic->canvas_config[1]; + + vf->canvas1_config[0] = + pic->canvas_config[0]; + vf->canvas1_config[1] = + pic->canvas_config[1]; + + } else +#endif + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(pic); + } else { + vf->canvas0Addr = vf->canvas1Addr = 0; + vf->type = VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; +#ifdef AVS2_10B_MMU + vf->type |= VIDTYPE_SCATTER; +#endif + } + + switch (pic->bit_depth) { + case AVS2_BITS_8: + vf->bitdepth = BITDEPTH_Y8 | + BITDEPTH_U8 | BITDEPTH_V8; + break; + case AVS2_BITS_10: + case AVS2_BITS_12: + vf->bitdepth = BITDEPTH_Y10 | + BITDEPTH_U10 | BITDEPTH_V10; + break; + default: + vf->bitdepth = BITDEPTH_Y10 | + BITDEPTH_U10 | BITDEPTH_V10; + break; + } + if ((vf->type & VIDTYPE_COMPRESS) == 0) + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + if (pic->bit_depth == AVS2_BITS_8) + vf->bitdepth |= BITDEPTH_SAVING_MODE; + + set_frame_info(dec, vf); + /* if((vf->width!=pic->width)| + (vf->height!=pic->height)) */ + /* pr_info("aaa: %d/%d, %d/%d\n", + vf->width,vf->height, pic->width, + pic->height); */ + vf->width = pic->pic_w / + get_double_write_ratio(pic->double_write_mode); + vf->height = pic->pic_h / + get_double_write_ratio(pic->double_write_mode); + if (force_w_h != 0) { + vf->width = (force_w_h >> 16) & 0xffff; + vf->height = force_w_h & 0xffff; + } + if ((pic->double_write_mode & 0x20) && + ((pic->double_write_mode & 0xf) == 2 || + (pic->double_write_mode & 0xf) == 4)) { + vf->compWidth = pic->pic_w / + get_double_write_ratio( + pic->double_write_mode & 0xf); + vf->compHeight = pic->pic_h / + get_double_write_ratio( + pic->double_write_mode & 0xf); + } else { + vf->compWidth = pic->pic_w; + vf->compHeight = pic->pic_h; + } + if (force_fps & 0x100) { + u32 rate = force_fps & 0xff; + if (rate) + vf->duration = 96000/rate; + else + vf->duration = 0; + } +#ifdef AVS2_10B_MMU + if (vf->type & VIDTYPE_SCATTER) { +#ifdef AVS2_10B_MMU_DW + if (pic->double_write_mode & 0x20) { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + dec->dw_mmu_box, pic->index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + dec->bmmu_box, + HEADER_BUFFER_IDX(pic->BUF_index)); + vf->mem_dw_handle = NULL; + } else +#endif + { + vf->mem_handle = decoder_mmu_box_get_mem_handle( + dec->mmu_box, + pic->index); + vf->mem_head_handle = decoder_bmmu_box_get_mem_handle( + dec->bmmu_box, + HEADER_BUFFER_IDX(pic->index)); + } + } else { + vf->mem_handle = decoder_bmmu_box_get_mem_handle( + dec->bmmu_box, + VF_BUFFER_IDX(pic->index)); + vf->mem_head_handle = decoder_bmmu_box_get_mem_handle( + dec->bmmu_box, + HEADER_BUFFER_IDX(pic->index)); + } +#else + vf->mem_handle = decoder_bmmu_box_get_mem_handle( + dec->bmmu_box, + VF_BUFFER_IDX(pic->index)); +#endif + if (!vdec->vbuf.use_ptsserv && vdec_stream_based(vdec)) { + vf->pts_us64 = stream_offset; + vf->pts = 0; + } + if (!dummy) { + lock_buffer(dec, flags); + pic->vf_ref = 1; + unlock_buffer(dec, flags); + } + dec->vf_pre_count++; +} + +static inline void dec_update_gvs(struct AVS2Decoder_s *dec) +{ + if (dec->gvs->frame_height != dec->frame_height) { + dec->gvs->frame_width = dec->frame_width; + dec->gvs->frame_height = dec->frame_height; + } + if (dec->gvs->frame_dur != dec->frame_dur) { + dec->gvs->frame_dur = dec->frame_dur; + if (dec->frame_dur != 0) + dec->gvs->frame_rate = ((96000 * 10 / dec->frame_dur) % 10) < 5 ? + 96000 / dec->frame_dur : (96000 / dec->frame_dur +1); + else + dec->gvs->frame_rate = -1; + } + dec->gvs->status = dec->stat | dec->fatal_error; +} + +static int avs2_prepare_display_buf(struct AVS2Decoder_s *dec) +{ +#ifndef NO_DISPLAY + struct vframe_s *vf = NULL; + /*unsigned short slice_type;*/ + struct avs2_frame_s *pic; + struct vdec_s *pvdec = hw_to_vdec(dec); + while (1) { + pic = get_disp_pic(dec); + if (pic == NULL) + break; + + if (force_disp_pic_index & 0x100) { + /*recycle directly*/ + continue; + } + + if (pic->error_mark) { + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "!!!error pic, skip\n", + 0); + continue; + } + + if (dec->start_decoding_flag != 0) { + if (dec->skip_PB_before_I && + pic->slice_type != I_IMG) { + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "!!!slice type %d (not I) skip\n", + 0, pic->slice_type); + continue; + } + dec->skip_PB_before_I = 0; + } + + if (kfifo_get(&dec->newframe_q, &vf) == 0) { + pr_info("fatal error, no available buffer slot."); + return -1; + } + + if (vf) { + struct vdec_info tmp4x; + int stream_offset = pic->stream_offset; + set_vframe(dec, vf, pic, 0); + decoder_do_frame_check(pvdec, vf); + vdec_vframe_ready(pvdec, vf); + kfifo_put(&dec->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(dec->pts_name, vf->pts); + ATRACE_COUNTER(dec->new_q_name, kfifo_len(&dec->newframe_q)); + ATRACE_COUNTER(dec->disp_q_name, kfifo_len(&dec->display_q)); + + dec_update_gvs(dec); + /*count info*/ + vdec_count_info(dec->gvs, 0, stream_offset); + if (stream_offset) { + if (pic->slice_type == I_IMG) { + dec->gvs->i_decoded_frames++; + } else if (pic->slice_type == P_IMG) { + dec->gvs->p_decoded_frames++; + } else if (pic->slice_type == B_IMG) { + dec->gvs->b_decoded_frames++; + } + } + memcpy(&tmp4x, dec->gvs, sizeof(struct vdec_info)); + tmp4x.bit_depth_luma = bit_depth_luma; + tmp4x.bit_depth_chroma = bit_depth_chroma; + tmp4x.double_write_mode = pic->double_write_mode; + vdec_fill_vdec_frame(pvdec, &dec->vframe_qos, &tmp4x, vf, pic->hw_decode_time); + pvdec->vdec_fps_detec(pvdec->id); + if (without_display_mode == 0) { + vf_notify_receiver(dec->provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } else + vavs2_vf_put(vavs2_vf_get(dec), dec); + } + } +/*!NO_DISPLAY*/ +#endif + return 0; +} + +static void get_rpm_param(union param_u *params) +{ + int i; + unsigned int data32; + if (debug & AVS2_DBG_BUFMGR) + pr_info("enter %s\r\n", __func__); + for (i = 0; i < (RPM_END - RPM_BEGIN); i++) { + do { + data32 = READ_VREG(RPM_CMD_REG); + /*pr_info("%x\n", data32);*/ + } while ((data32 & 0x10000) == 0); + params->l.data[i] = data32&0xffff; + /*pr_info("%x\n", data32);*/ + WRITE_VREG(RPM_CMD_REG, 0); + } + if (debug & AVS2_DBG_BUFMGR) + pr_info("leave %s\r\n", __func__); +} +static void debug_buffer_mgr_more(struct AVS2Decoder_s *dec) +{ + int i; + if (!(debug & AVS2_DBG_BUFMGR_MORE)) + return; + pr_info("avs2_param: (%d)\n", dec->avs2_dec.img.number); + for (i = 0; i < (RPM_END-RPM_BEGIN); i++) { + pr_info("%04x ", dec->avs2_dec.param.l.data[i]); + if (((i + 1) & 0xf) == 0) + pr_info("\n"); + } +} + +#ifdef AVS2_10B_MMU +static void avs2_recycle_mmu_buf_tail(struct AVS2Decoder_s *dec) +{ + if (dec->cur_fb_idx_mmu != INVALID_IDX) { + if (dec->used_4k_num == -1) { + dec->used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); + if (dec->m_ins_flag) + hevc_mmu_dma_check(hw_to_vdec(dec)); + decoder_mmu_box_free_idx_tail(dec->mmu_box, + dec->cur_fb_idx_mmu, dec->used_4k_num); + } + dec->cur_fb_idx_mmu = INVALID_IDX; + dec->used_4k_num = -1; + } +} + +static void avs2_recycle_mmu_buf(struct AVS2Decoder_s *dec) +{ + if (dec->cur_fb_idx_mmu != INVALID_IDX) { + decoder_mmu_box_free_idx(dec->mmu_box, + dec->cur_fb_idx_mmu); + + dec->cur_fb_idx_mmu = INVALID_IDX; + dec->used_4k_num = -1; + } +} +#endif + +static void dec_again_process(struct AVS2Decoder_s *dec) +{ + amhevc_stop(); + dec->dec_result = DEC_RESULT_AGAIN; + if (dec->process_state == + PROC_STATE_DECODING) { + dec->process_state = + PROC_STATE_DECODE_AGAIN; + } else if (dec->process_state == + PROC_STATE_HEAD_DONE) { + dec->process_state = + PROC_STATE_HEAD_AGAIN; + } + dec->next_again_flag = 1; + reset_process_time(dec); + vdec_schedule_work(&dec->work); +} + +static uint32_t log2i(uint32_t val) +{ + uint32_t ret = -1; + while (val != 0) { + val >>= 1; + ret++; + } + return ret; +} + +static void check_pic_error(struct AVS2Decoder_s *dec, + struct avs2_frame_s *pic) +{ + if (pic->decoded_lcu == 0) { + pic->decoded_lcu = + (READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff) + 1; + } + if (pic->decoded_lcu != dec->avs2_dec.lcu_total) { + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s error pic(index %d imgtr_fwRefDistance %d) decoded lcu %d (total %d)\n", + __func__, pic->index, pic->imgtr_fwRefDistance, + pic->decoded_lcu, dec->avs2_dec.lcu_total); + pic->error_mark = 1; + } else { + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s pic(index %d imgtr_fwRefDistance %d) decoded lcu %d (total %d)\n", + __func__, pic->index, pic->imgtr_fwRefDistance, + pic->decoded_lcu, dec->avs2_dec.lcu_total); + + } +} +static void update_decoded_pic(struct AVS2Decoder_s *dec) +{ + struct avs2_frame_s *pic = dec->avs2_dec.hc.cur_pic; + if (pic) { + dec->avs2_dec.hc.cur_pic->decoded_lcu = + (READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff) + 1; + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s pic(index %d imgtr_fwRefDistance %d) decoded lcu %d (total %d)\n", + __func__, pic->index, pic->imgtr_fwRefDistance, + pic->decoded_lcu, dec->avs2_dec.lcu_total); + } +} +/* +[SE] [BUG][BUG-171463][chuanqi.wang]: get frame rate by video sequeue*/ +static int get_frame_rate(union param_u *params, struct AVS2Decoder_s *dec) +{ + int tmp = 0; + + switch (params->p.frame_rate_code) { + case 1: + case 2: + tmp = 24; + break; + case 3: + tmp = 25; + break; + case 4: + case 5: + tmp = 30; + break; + case 6: + tmp = 50; + break; + case 7: + case 8: + tmp = 60; + break; + case 9: + tmp = 100; + break; + case 10: + tmp = 120; + break; + default: + tmp = 25; + break; + } + + if (!params->p.progressive_sequence) + tmp = tmp / 2; + dec->frame_dur = div_u64(96000ULL, tmp); + dec->get_frame_dur = true; + /*avs2_print(dec, 0, "avs2 frame_dur:%d,progressive:%d\n", dec->frame_dur, params->p.progressive_sequence);*/ + return 0; +} + + +#define HEVC_MV_INFO 0x310d +#define HEVC_QP_INFO 0x3137 +#define HEVC_SKIP_INFO 0x3136 + +/* only when we decoded one field or one frame, +we can call this function to get qos info*/ +static void get_picture_qos_info(struct AVS2Decoder_s *dec) +{ + struct avs2_frame_s *picture = dec->avs2_dec.hc.cur_pic; + struct vdec_s *vdec = hw_to_vdec(dec); + if (!picture) { + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s decode picture is none exist\n"); + + return; + } + if (vdec->mvfrm) { + picture->frame_size = vdec->mvfrm->frame_size; + picture->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; + } + +/* +#define DEBUG_QOS +*/ + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + unsigned char a[3]; + unsigned char i, j, t; + unsigned long data; + + data = READ_VREG(HEVC_MV_INFO); + if (picture->slice_type == I_IMG) + data = 0; + a[0] = data & 0xff; + a[1] = (data >> 8) & 0xff; + a[2] = (data >> 16) & 0xff; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_mv = a[2]; + picture->avg_mv = a[1]; + picture->min_mv = a[0]; +#ifdef DEBUG_QOS + avs2_print(dec, 0, "mv data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); +#endif + + data = READ_VREG(HEVC_QP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_qp = a[2]; + picture->avg_qp = a[1]; + picture->min_qp = a[0]; +#ifdef DEBUG_QOS + avs2_print(dec, 0, "qp data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); +#endif + + data = READ_VREG(HEVC_SKIP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_skip = a[2]; + picture->avg_skip = a[1]; + picture->min_skip = a[0]; + +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "skip data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); +#endif + } else { + uint32_t blk88_y_count; + uint32_t blk88_c_count; + uint32_t blk22_mv_count; + uint32_t rdata32; + int32_t mv_hi; + int32_t mv_lo; + uint32_t rdata32_l; + uint32_t mvx_L0_hi; + uint32_t mvy_L0_hi; + uint32_t mvx_L1_hi; + uint32_t mvy_L1_hi; + int64_t value; + uint64_t temp_value; +#ifdef DEBUG_QOS + int pic_number = 0; +#endif + + picture->max_mv = 0; + picture->avg_mv = 0; + picture->min_mv = 0; + + picture->max_skip = 0; + picture->avg_skip = 0; + picture->min_skip = 0; + + picture->max_qp = 0; + picture->avg_qp = 0; + picture->min_qp = 0; + + + +#ifdef DEBUG_QOS + avs2_print(dec, 0, "slice_type:%d, poc:%d\n", + picture->slice_type, + pic_number); +#endif + /* set rd_idx to 0 */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, 0); + + blk88_y_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_y_count == 0) { +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_y_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] Y QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_y_count, + rdata32, blk88_y_count); +#endif + picture->avg_qp = rdata32/blk88_y_count; + /* intra_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] Y intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + /* skipped_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] Y skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + picture->avg_skip = rdata32*100/blk88_y_count; + /* coeff_non_zero_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] Y ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_y_count*1)), + '%', rdata32); +#endif + /* blk66_c_count */ + blk88_c_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_c_count == 0) { +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_c_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] C QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_c_count, + rdata32, blk88_c_count); +#endif + /* intra_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] C intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* skipped_cu_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] C skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* coeff_non_zero_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] C ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_c_count*1)), + '%', rdata32); +#endif + + /* 1'h0, qp_c_max[6:0], 1'h0, qp_c_min[6:0], + 1'h0, qp_y_max[6:0], 1'h0, qp_y_min[6:0] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, "[Picture %d Quality] Y QP min : %d\n", + pic_number, (rdata32>>0)&0xff); +#endif + picture->min_qp = (rdata32>>0)&0xff; + +#ifdef DEBUG_QOS + avs2_print(dec, 0, "[Picture %d Quality] Y QP max : %d\n", + pic_number, (rdata32>>8)&0xff); +#endif + picture->max_qp = (rdata32>>8)&0xff; + +#ifdef DEBUG_QOS + avs2_print(dec, 0, "[Picture %d Quality] C QP min : %d\n", + pic_number, (rdata32>>16)&0xff); + avs2_print(dec, 0, "[Picture %d Quality] C QP max : %d\n", + pic_number, (rdata32>>24)&0xff); +#endif + + /* blk22_mv_count */ + blk22_mv_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk22_mv_count == 0) { +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] NO MV Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* mvy_L1_count[39:32], mvx_L1_count[39:32], + mvy_L0_count[39:32], mvx_L0_count[39:32] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + /* should all be 0x00 or 0xff */ +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] MV AVG High Bits: 0x%X\n", + pic_number, rdata32); +#endif + mvx_L0_hi = ((rdata32>>0)&0xff); + mvy_L0_hi = ((rdata32>>8)&0xff); + mvx_L1_hi = ((rdata32>>16)&0xff); + mvy_L1_hi = ((rdata32>>24)&0xff); + + /* mvx_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvx_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + value = div_s64(value, blk22_mv_count); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] MVX_L0 AVG : %d (%lld/%d)\n", + pic_number, (int)value, + value, blk22_mv_count); +#endif + picture->avg_mv = value; + + /* mvy_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvy_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] MVY_L0 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvx_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvx_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] MVX_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvy_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvy_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] MVY_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* {mvx_L0_max, mvx_L0_min} // format : {sign, abs[14:0]} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; +#ifdef DEBUG_QOS + avs2_print(dec, 0, "[Picture %d Quality] MVX_L0 MAX : %d\n", + pic_number, mv_hi); +#endif + picture->max_mv = mv_hi; + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; +#ifdef DEBUG_QOS + avs2_print(dec, 0, "[Picture %d Quality] MVX_L0 MIN : %d\n", + pic_number, mv_lo); +#endif + picture->min_mv = mv_lo; + +#ifdef DEBUG_QOS + /* {mvy_L0_max, mvy_L0_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + avs2_print(dec, 0, "[Picture %d Quality] MVY_L0 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + avs2_print(dec, 0, "[Picture %d Quality] MVY_L0 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvx_L1_max, mvx_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + avs2_print(dec, 0, "[Picture %d Quality] MVX_L1 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + avs2_print(dec, 0, "[Picture %d Quality] MVX_L1 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvy_L1_max, mvy_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + avs2_print(dec, 0, "[Picture %d Quality] MVY_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + avs2_print(dec, 0, "[Picture %d Quality] MVY_L1 MIN : %d\n", + pic_number, mv_lo); +#endif + + rdata32 = READ_VREG(HEVC_PIC_QUALITY_CTRL); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] After Read : VDEC_PIC_QUALITY_CTRL : 0x%x\n", + pic_number, rdata32); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + } +} + +static irqreturn_t vavs2_isr_thread_fn(int irq, void *data) +{ + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)data; + unsigned int dec_status = dec->dec_status; + int i, ret; + int32_t start_code = 0; + + /*if (dec->wait_buf) + pr_info("set wait_buf to 0\r\n"); + */ + + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s decode_status 0x%x process_state %d lcu 0x%x\n", + __func__, dec_status, dec->process_state, + READ_VREG(HEVC_PARSER_LCU_START)); + +#ifndef G12A_BRINGUP_DEBUG + if (dec->eos) { + PRINT_LINE(); + goto irq_handled_exit; + } +#endif + dec->wait_buf = 0; + if (dec_status == AVS2_DECODE_BUFEMPTY) { + PRINT_LINE(); + if (dec->m_ins_flag) { + reset_process_time(dec); + if (!vdec_frame_based(hw_to_vdec(dec))) + dec_again_process(dec); + else { + dec->dec_result = DEC_RESULT_DONE; + reset_process_time(dec); + amhevc_stop(); + vdec_schedule_work(&dec->work); + } + } + goto irq_handled_exit; + } else if (dec_status == HEVC_DECPIC_DATA_DONE) { + PRINT_LINE(); + dec->start_decoding_flag |= 0x3; + if (dec->m_ins_flag) { + set_cuva_data(dec); + update_decoded_pic(dec); + get_picture_qos_info(dec); + reset_process_time(dec); + dec->dec_result = DEC_RESULT_DONE; + amhevc_stop(); +#if 0 /*def AVS2_10B_MMU*/ + if (dec->m_ins_flag) { + /*avs2_recycle_mmu_buf_tail(dec);*/ + dec->used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); + } +#endif + +#if 0 + /*keep hardware state*/ + WRITE_VREG(HEVC_MPRED_INT_STATUS, (1<<31)); + WRITE_VREG(HEVC_PARSER_RESULT_3, 0xffffffff); + dec->mpred_abv_start_addr = + READ_VREG(HEVC_MPRED_ABV_START_ADDR); + /**/ +#endif + vdec_schedule_work(&dec->work); + } + goto irq_handled_exit; + } + PRINT_LINE(); +#if 0 + if (dec_status == AVS2_EOS) { + if (dec->m_ins_flag) + reset_process_time(dec); + + avs2_print(dec, AVS2_DBG_BUFMGR, + "AVS2_EOS, flush buffer\r\n"); + + avs2_post_process(&dec->avs2_dec); + avs2_prepare_display_buf(dec); + + avs2_print(dec, AVS2_DBG_BUFMGR, + "send AVS2_10B_DISCARD_NAL\r\n"); + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_10B_DISCARD_NAL); + if (dec->m_ins_flag) { + update_decoded_pic(dec); + dec->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&dec->work); + } + goto irq_handled_exit; + } else +#endif + if (dec_status == AVS2_DECODE_OVER_SIZE) { + avs2_print(dec, 0, + "avs2 decode oversize !!\n"); + debug |= (AVS2_DBG_DIS_LOC_ERROR_PROC | + AVS2_DBG_DIS_SYS_ERROR_PROC); + dec->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + if (dec->m_ins_flag) + reset_process_time(dec); + goto irq_handled_exit; + } + PRINT_LINE(); + + if (dec->m_ins_flag) + reset_process_time(dec); + + if (dec_status == AVS2_HEAD_SEQ_READY) + start_code = SEQUENCE_HEADER_CODE; + else if (dec_status == AVS2_HEAD_PIC_I_READY) + start_code = I_PICTURE_START_CODE; + else if (dec_status == AVS2_HEAD_PIC_PB_READY) + start_code = PB_PICTURE_START_CODE; + else if (dec_status == AVS2_STARTCODE_SEARCH_DONE) + /*SEQUENCE_END_CODE, VIDEO_EDIT_CODE*/ + start_code = READ_VREG(CUR_NAL_UNIT_TYPE); + + if (dec->process_state == + PROC_STATE_HEAD_AGAIN + ) { + if ((start_code == I_PICTURE_START_CODE) + || (start_code == PB_PICTURE_START_CODE)) { + avs2_print(dec, 0, + "PROC_STATE_HEAD_AGAIN error, start_code 0x%x!!!\r\n", + start_code); + goto irq_handled_exit; + } else { + avs2_print(dec, AVS2_DBG_BUFMGR, + "PROC_STATE_HEAD_AGAIN, start_code 0x%x\r\n", + start_code); + dec->process_state = PROC_STATE_HEAD_DONE; + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_ACTION_DONE); + goto irq_handled_exit; + } + } else if (dec->process_state == + PROC_STATE_DECODE_AGAIN) { + if ((start_code == I_PICTURE_START_CODE) + || (start_code == PB_PICTURE_START_CODE)) { + avs2_print(dec, AVS2_DBG_BUFMGR, + "PROC_STATE_DECODE_AGAIN=> decode_slice, start_code 0x%x\r\n", + start_code); + goto decode_slice; + } else { + avs2_print(dec, 0, + "PROC_STATE_DECODE_AGAIN, start_code 0x%x!!!\r\n", + start_code); + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_ACTION_DONE); + goto irq_handled_exit; + } + } + + if ((start_code == I_PICTURE_START_CODE) + || (start_code == PB_PICTURE_START_CODE) + || (start_code == SEQUENCE_END_CODE) + || (start_code == VIDEO_EDIT_CODE)) { + PRINT_LINE(); + + if (dec->avs2_dec.hc.cur_pic != NULL) { + int32_t ii; +#ifdef AVS2_10B_MMU + avs2_recycle_mmu_buf_tail(dec); +#endif + check_pic_error(dec, dec->avs2_dec.hc.cur_pic); + avs2_post_process(&dec->avs2_dec); + + if (debug & AVS2_DBG_PRINT_PIC_LIST) + dump_pic_list(dec); + + avs2_prepare_display_buf(dec); + dec->avs2_dec.hc.cur_pic = NULL; + for (ii = 0; ii < dec->avs2_dec.ref_maxbuffer; + ii++) { + struct avs2_frame_s *pic = + dec->avs2_dec.fref[ii]; + if (pic->bg_flag == 0 && + pic->is_output == -1 && + pic->mmu_alloc_flag && + pic->vf_ref == 0) { + if (pic->refered_by_others == 0) { +#ifdef AVS2_10B_MMU + pic->mmu_alloc_flag = 0; + /*release_buffer_4k( + dec->avs2_dec.fref[ii]->index);*/ + decoder_mmu_box_free_idx(dec->mmu_box, + pic->index); +#endif +#ifndef MV_USE_FIXED_BUF + decoder_bmmu_box_free_idx( + dec->bmmu_box, + MV_BUFFER_IDX(pic->index)); + pic->mpred_mv_wr_start_addr = 0; +#endif + } + } + } + } + } + + if ((dec_status == AVS2_HEAD_PIC_I_READY) + || (dec_status == AVS2_HEAD_PIC_PB_READY)) { + PRINT_LINE(); + + if (debug & AVS2_DBG_SEND_PARAM_WITH_REG) { + get_rpm_param( + &dec->avs2_dec.param); + } else { + + for (i = 0; i < (RPM_END - RPM_BEGIN); i += 4) { + int ii; + for (ii = 0; ii < 4; ii++) + dec->avs2_dec.param.l.data[i + ii] = + dec->rpm_ptr[i + 3 - ii]; + } + } +#ifdef SANITY_CHECK + if (dec->avs2_dec.param.p.num_of_ref_cur > + dec->avs2_dec.ref_maxbuffer) { + pr_info("Warning: Wrong num_of_ref_cur %d, force to %d\n", + dec->avs2_dec.param.p.num_of_ref_cur, + dec->avs2_dec.ref_maxbuffer); + dec->avs2_dec.param.p.num_of_ref_cur = + dec->avs2_dec.ref_maxbuffer; + } +#endif + PRINT_LINE(); + + debug_buffer_mgr_more(dec); + get_frame_rate(&dec->avs2_dec.param, dec); + +#if 1 // The video_signal_type is type of uint16_t and result false, so comment it out. + if (dec->avs2_dec.param.p.video_signal_type + & (1<<30)) { + union param_u *pPara; + + avs2_print(dec, 0, + "avs2 HDR meta data present\n"); + pPara = &dec->avs2_dec.param; + + /*clean this flag*/ + pPara->p.video_signal_type + &= ~(1<<30); + + dec->vf_dp.present_flag = 1; + + dec->vf_dp.white_point[0] + = pPara->p.white_point_x; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "white_point[0]:0x%x\n", + dec->vf_dp.white_point[0]); + + dec->vf_dp.white_point[1] + = pPara->p.white_point_y; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "white_point[1]:0x%x\n", + dec->vf_dp.white_point[1]); + + for (i = 0; i < 3; i++) { + dec->vf_dp.primaries[i][0] + = pPara->p.display_primaries_x[i]; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "primaries[%d][0]:0x%x\n", + i, + dec->vf_dp.primaries[i][0]); + } + + for (i = 0; i < 3; i++) { + dec->vf_dp.primaries[i][1] + = pPara->p.display_primaries_y[i]; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "primaries[%d][1]:0x%x\n", + i, + dec->vf_dp.primaries[i][1]); + } + + dec->vf_dp.luminance[0] + = pPara->p.max_display_mastering_luminance; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "luminance[0]:0x%x\n", + dec->vf_dp.luminance[0]); + + dec->vf_dp.luminance[1] + = pPara->p.min_display_mastering_luminance; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "luminance[1]:0x%x\n", + dec->vf_dp.luminance[1]); + + + dec->vf_dp.content_light_level.present_flag + = 1; + dec->vf_dp.content_light_level.max_content + = pPara->p.max_content_light_level; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "max_content:0x%x\n", + dec->vf_dp.content_light_level.max_content); + + dec->vf_dp.content_light_level.max_pic_average + = pPara->p.max_picture_average_light_level; + + avs2_print(dec, AVS2_DBG_HDR_INFO, + "max_pic_average:0x%x\n", + dec->vf_dp.content_light_level.max_pic_average); + } +#endif + + + if (dec->video_ori_signal_type != + ((dec->avs2_dec.param.p.video_signal_type << 16) + | dec->avs2_dec.param.p.color_description)) { + u32 v = dec->avs2_dec.param.p.video_signal_type; + u32 c = dec->avs2_dec.param.p.color_description; + u32 convert_c = c; + + if (v & 0x2000) { + avs2_print(dec, AVS2_DBG_HDR_INFO, + "video_signal_type present:\n"); + avs2_print(dec, AVS2_DBG_HDR_INFO, + " %s %s\n", + video_format_names[(v >> 10) & 7], + ((v >> 9) & 1) ? + "full_range" : "limited"); + if (v & 0x100) { + u32 transfer; + u32 maxtrix; + + avs2_print(dec, AVS2_DBG_HDR_INFO, + "color_description present:\n"); + avs2_print(dec, AVS2_DBG_HDR_INFO, + "color_primarie = %d\n", + v & 0xff); + avs2_print(dec, AVS2_DBG_HDR_INFO, + "transfer_characteristic = %d\n", + (c >> 8) & 0xff); + avs2_print(dec, AVS2_DBG_HDR_INFO, + " matrix_coefficient = %d\n", + c & 0xff); + + transfer = (c >> 8) & 0xFF; + if (transfer >= 15) + avs2_print(dec, AVS2_DBG_HDR_INFO, + "unsupport transfer_characteristic\n"); + else if (transfer == 14) + transfer = 18; /* HLG */ + else if (transfer == 13) + transfer = 32; + else if (transfer == 12) + transfer = 16; + else if (transfer == 11) + transfer = 15; + + maxtrix = c & 0xFF; + if (maxtrix >= 10) + avs2_print(dec, AVS2_DBG_HDR_INFO, + "unsupport matrix_coefficient\n"); + else if (maxtrix == 9) + maxtrix = 10; + else if (maxtrix == 8) + maxtrix = 9; + + convert_c = (transfer << 8) | (maxtrix); + + avs2_print(dec, AVS2_DBG_HDR_INFO, + " convered c:0x%x\n", + convert_c); + } + } + + if (enable_force_video_signal_type) + dec->video_signal_type + = force_video_signal_type; + else { + dec->video_signal_type + = (v << 16) | convert_c; + + dec->video_ori_signal_type + = (v << 16) | c; + } + + video_signal_type = dec->video_signal_type; + } + } +#if 0 + if ((debug_again & 0x4) && + dec->process_state == + PROC_STATE_INIT) { + if (start_code == PB_PICTURE_START_CODE) { + dec->process_state = PROC_STATE_TEST1; + dec_again_process(dec); + goto irq_handled_exit; + } + } +#endif + PRINT_LINE(); + avs2_prepare_header(&dec->avs2_dec, start_code); + + if (start_code == SEQUENCE_HEADER_CODE || + start_code == VIDEO_EDIT_CODE || + start_code == SEQUENCE_END_CODE) { + if (dec->m_ins_flag && + vdec_frame_based(hw_to_vdec(dec))) + dec->start_decoding_flag |= 0x1; + dec->process_state = PROC_STATE_HEAD_DONE; + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_ACTION_DONE); + } else if (start_code == I_PICTURE_START_CODE || + start_code == PB_PICTURE_START_CODE) { + ret = 0; + if (dec->pic_list_init_flag == 0) { + int32_t lcu_size_log2 = + log2i(dec->avs2_dec.param.p.lcu_size); + + avs2_init_global_buffers(&dec->avs2_dec); + /*avs2_dec->m_bg->index is + set to dec->used_buf_num - 1*/ + init_pic_list(dec, lcu_size_log2); + init_pic_list_hw(dec); + } + ret = avs2_process_header(&dec->avs2_dec); + if (!dec->m_ins_flag) + dec->slice_idx++; + + if (dec->m_ins_flag && ret + && dec->avs2_dec.hc.cur_pic->cuva_data_buf != NULL) + release_cuva_data(dec->avs2_dec.hc.cur_pic); + + PRINT_LINE(); +#ifdef I_ONLY_SUPPORT + if ((start_code == PB_PICTURE_START_CODE) && + (dec->i_only & 0x2)) + ret = -2; +#endif + + if (ret >= 0) { +#ifdef AVS2_10B_MMU + if (dec->mmu_enable) { + ret = avs2_alloc_mmu(dec, + dec->avs2_dec.hc.cur_pic->index, + dec->avs2_dec.img.width, + dec->avs2_dec.img.height, + dec->avs2_dec.input.sample_bit_depth, + dec->frame_mmu_map_addr); + if (ret >= 0) { + dec->cur_fb_idx_mmu = + dec->avs2_dec.hc.cur_pic->index; + dec->avs2_dec.hc.cur_pic->mmu_alloc_flag = 1; + } else + pr_err("can't alloc need mmu1,idx %d ret =%d\n", + dec->avs2_dec.hc.cur_pic->index, + ret); + } +#endif +#ifdef AVS2_10B_MMU_DW + if (dec->dw_mmu_enable) { + ret = avs2_alloc_dw_mmu(dec, + dec->avs2_dec.hc.cur_pic->index, + dec->avs2_dec.img.width, + dec->avs2_dec.img.height, + dec->avs2_dec.input.sample_bit_depth, + dec->dw_frame_mmu_map_addr); + if (ret >= 0) { + dec->cur_fb_idx_mmu = + dec->avs2_dec.hc.cur_pic->index; + dec->avs2_dec.hc.cur_pic->mmu_alloc_flag = 1; + } else + pr_err("can't alloc need dw mmu1,idx %d ret =%d\n", + dec->avs2_dec.hc.cur_pic->index, + ret); + } +#endif + } + +#ifndef MV_USE_FIXED_BUF + if (ret >= 0 && dec->avs2_dec.hc.cur_pic-> + mpred_mv_wr_start_addr == 0) { + unsigned long buf_addr; + unsigned mv_buf_size = get_mv_buf_size( + dec, + dec->avs2_dec.hc.cur_pic->pic_w, + dec->avs2_dec.hc.cur_pic->pic_h); + int i = dec->avs2_dec.hc.cur_pic->index; + /*if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + mv_buf_size = 0x120000 * 4;*/ + if (decoder_bmmu_box_alloc_buf_phy + (dec->bmmu_box, + MV_BUFFER_IDX(i), + mv_buf_size, + DRIVER_NAME, + &buf_addr) < 0) + ret = -1; + else + dec->avs2_dec.hc.cur_pic-> + mpred_mv_wr_start_addr + = buf_addr; + } +#endif + if (ret < 0) { + avs2_print(dec, AVS2_DBG_BUFMGR, + "avs2_bufmgr_process=> %d, AVS2_10B_DISCARD_NAL\r\n", + ret); + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_10B_DISCARD_NAL); + #ifdef AVS2_10B_MMU + if (dec->mmu_enable) + avs2_recycle_mmu_buf(dec); + #endif + if (dec->m_ins_flag) { + dec->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&dec->work); + } + + goto irq_handled_exit; + } else { + PRINT_LINE(); + dec->avs2_dec.hc.cur_pic->stream_offset = + READ_VREG(HEVC_SHIFT_BYTE_COUNT); + /* + struct PIC_BUFFER_CONFIG_s *cur_pic + = &cm->cur_frame->buf; + cur_pic->decode_idx = dec->frame_count; + */ + if (!dec->m_ins_flag) { + dec->frame_count++; + decode_frame_count[dec->index] + = dec->frame_count; + } + /*MULTI_INSTANCE_SUPPORT*/ + if (dec->chunk) { + dec->avs2_dec.hc.cur_pic->pts = + dec->chunk->pts; + dec->avs2_dec.hc.cur_pic->pts64 = + dec->chunk->pts64; + } + /**/ + dec->avs2_dec.hc.cur_pic->bit_depth + = dec->avs2_dec.input.sample_bit_depth; + dec->avs2_dec.hc.cur_pic->double_write_mode + = get_double_write_mode(dec); +decode_slice: + PRINT_LINE(); + + config_mc_buffer(dec); + config_mcrcc_axi_hw(dec); + config_mpred_hw(dec); + config_dblk_hw(dec); + config_sao_hw(dec); + config_alf_hw(dec); + config_other_hw(dec); + + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "=>fref0 imgtr_fwRefDistance %d, fref1 imgtr_fwRefDistance %d, dis2/dis3/dis4 %d %d %d img->tr %d\n", + dec->avs2_dec.fref[0]->imgtr_fwRefDistance, + dec->avs2_dec.fref[1]->imgtr_fwRefDistance, + dec->avs2_dec.fref[2]->imgtr_fwRefDistance, + dec->avs2_dec.fref[3]->imgtr_fwRefDistance, + dec->avs2_dec.fref[4]->imgtr_fwRefDistance, + dec->avs2_dec.img.tr); + + if ((debug_again & 0x2) && + dec->process_state == + PROC_STATE_INIT) { + dec->process_state = PROC_STATE_DECODING; + dec_again_process(dec); + goto irq_handled_exit; + } + + dec->process_state = PROC_STATE_DECODING; + + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_ACTION_DONE); + + } + + if (dec->m_ins_flag) + start_process_time(dec); + } +irq_handled_exit: + PRINT_LINE(); + dec->process_busy = 0; + return IRQ_HANDLED; +} + +static irqreturn_t vavs2_isr(int irq, void *data) +{ + int i; + unsigned int dec_status; + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)data; + uint debug_tag; + + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + dec_status = READ_VREG(HEVC_DEC_STATUS_REG); + + if (!dec) + return IRQ_HANDLED; + if (dec->init_flag == 0) + return IRQ_HANDLED; + if (dec->process_busy)/*on process.*/ + return IRQ_HANDLED; + dec->dec_status = dec_status; + dec->process_busy = 1; + if (debug & AVS2_DBG_IRQ_EVENT) + avs2_print(dec, 0, + "avs2 isr dec status = 0x%x, lcu 0x%x shiftbyte 0x%x (%x %x lev %x, wr %x, rd %x)\n", + dec_status, READ_VREG(HEVC_PARSER_LCU_START), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR) + ); + + debug_tag = READ_HREG(DEBUG_REG1); + if (debug_tag & 0x10000) { + dma_sync_single_for_cpu( + amports_get_dma_device(), + dec->lmem_phy_addr, + LMEM_BUF_SIZE, + DMA_FROM_DEVICE); + + pr_info("LMEM<tag %x>:\n", READ_HREG(DEBUG_REG1)); + for (i = 0; i < 0x400; i += 4) { + int ii; + if ((i & 0xf) == 0) + pr_info("%03x: ", i); + for (ii = 0; ii < 4; ii++) { + pr_info("%04x ", + dec->lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + pr_info("\n"); + } + + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == dec->decode_idx) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + dec->ucode_pause_pos = udebug_pause_pos; + } else if (debug_tag & 0x20000) + dec->ucode_pause_pos = 0xffffffff; + if (dec->ucode_pause_pos) + reset_process_time(dec); + else + WRITE_HREG(DEBUG_REG1, 0); + } else if (debug_tag != 0) { + pr_info( + "dbg%x: %x lcu %x\n", READ_HREG(DEBUG_REG1), + READ_HREG(DEBUG_REG2), + READ_VREG(HEVC_PARSER_LCU_START)); + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == dec->decode_idx) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + dec->ucode_pause_pos = udebug_pause_pos; + } + if (dec->ucode_pause_pos) + reset_process_time(dec); + else + WRITE_HREG(DEBUG_REG1, 0); + dec->process_busy = 0; + return IRQ_HANDLED; + } + + if (!dec->m_ins_flag) { + if (dec->error_flag == 1) { + dec->error_flag = 2; + dec->process_busy = 0; + return IRQ_HANDLED; + } else if (dec->error_flag == 3) { + dec->process_busy = 0; + return IRQ_HANDLED; + } + + if ((dec->pic_list_init_flag) && + get_free_buf_count(dec) <= 0) { + /* + if (dec->wait_buf == 0) + pr_info("set wait_buf to 1\r\n"); + */ + dec->wait_buf = 1; + dec->process_busy = 0; + if (debug & AVS2_DBG_IRQ_EVENT) + avs2_print(dec, 0, "wait_buf\n"); + return IRQ_HANDLED; + } else if (force_disp_pic_index) { + dec->process_busy = 0; + return IRQ_HANDLED; + } + } + return IRQ_WAKE_THREAD; +} + +static void vavs2_put_timer_func(struct timer_list *timer) +{ + struct AVS2Decoder_s *dec = container_of(timer, + struct AVS2Decoder_s, timer); + uint8_t empty_flag; + unsigned int buf_level; + + enum receviver_start_e state = RECEIVER_INACTIVE; + if (dec->m_ins_flag) { + if (hw_to_vdec(dec)->next_status + == VDEC_STATUS_DISCONNECTED) { + dec->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&dec->work); + avs2_print(dec, AVS2_DBG_BUFMGR, + "vdec requested to be disconnected\n"); + return; + } + } + if (dec->init_flag == 0) { + if (dec->stat & STAT_TIMER_ARM) { + timer->expires = jiffies + PUT_INTERVAL; + add_timer(&dec->timer); + } + return; + } + if (dec->m_ins_flag == 0) { + if (vf_get_receiver(dec->provider_name)) { + state = + vf_notify_receiver(dec->provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) + state = RECEIVER_INACTIVE; + } else + state = RECEIVER_INACTIVE; + + empty_flag = (READ_VREG(HEVC_PARSER_INT_STATUS) >> 6) & 0x1; + /* error watchdog */ + if (empty_flag == 0) { + /* decoder has input */ + if ((debug & AVS2_DBG_DIS_LOC_ERROR_PROC) == 0) { + + buf_level = READ_VREG(HEVC_STREAM_LEVEL); + /* receiver has no buffer to recycle */ + if ((state == RECEIVER_INACTIVE) && + (kfifo_is_empty(&dec->display_q) && + buf_level > 0x200) + ) { + WRITE_VREG + (HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } + + if ((debug & AVS2_DBG_DIS_SYS_ERROR_PROC) == 0) { + /* receiver has no buffer to recycle */ + /*if ((state == RECEIVER_INACTIVE) && + (kfifo_is_empty(&dec->display_q))) { + pr_info("avs2 something error,need reset\n"); + }*/ + } + } + } else { + if ( + (decode_timeout_val > 0) && + (dec->start_process_time > 0) && + ((1000 * (jiffies - dec->start_process_time) / HZ) + > decode_timeout_val) + ) { + int current_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + if (dec->last_lcu_idx == current_lcu_idx) { + if (dec->decode_timeout_count > 0) + dec->decode_timeout_count--; + if (dec->decode_timeout_count == 0) { + if (input_frame_based( + hw_to_vdec(dec)) || + (READ_VREG(HEVC_STREAM_LEVEL) > 0x200)) + timeout_process(dec); + else { + avs2_print(dec, 0, + "timeout & empty, again\n"); + dec_again_process(dec); + } + } + } else { + start_process_time(dec); + dec->last_lcu_idx = current_lcu_idx; + } + } + } + + if ((dec->ucode_pause_pos != 0) && + (dec->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != dec->ucode_pause_pos) { + dec->ucode_pause_pos = 0; + WRITE_HREG(DEBUG_REG1, 0); + } + if (debug & AVS2_DBG_DUMP_DATA) { + debug &= ~AVS2_DBG_DUMP_DATA; + avs2_print(dec, 0, + "%s: chunk size 0x%x off 0x%x sum 0x%x\n", + __func__, + dec->chunk->size, + dec->chunk->offset, + get_data_check_sum(dec, dec->chunk->size) + ); + dump_data(dec, dec->chunk->size); + } + if (debug & AVS2_DBG_DUMP_PIC_LIST) { + dump_pic_list(dec); + debug &= ~AVS2_DBG_DUMP_PIC_LIST; + } + if (debug & AVS2_DBG_TRIG_SLICE_SEGMENT_PROC) { + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + debug &= ~AVS2_DBG_TRIG_SLICE_SEGMENT_PROC; + } + if (debug & AVS2_DBG_DUMP_RPM_BUF) { + int i; + + pr_info("RPM:\n"); + for (i = 0; i < RPM_BUF_SIZE; i += 4) { + int ii; + if ((i & 0xf) == 0) + pr_info("%03x: ", i); + for (ii = 0; ii < 4; ii++) { + pr_info("%04x ", + dec->lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + pr_info("\n"); + } + debug &= ~AVS2_DBG_DUMP_RPM_BUF; + } + if (debug & AVS2_DBG_DUMP_LMEM_BUF) { + int i; + + pr_info("LMEM:\n"); + for (i = 0; i < LMEM_BUF_SIZE; i += 4) { + int ii; + if ((i & 0xf) == 0) + pr_info("%03x: ", i); + for (ii = 0; ii < 4; ii++) { + pr_info("%04x ", + dec->lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + pr_info("\n"); + } + debug &= ~AVS2_DBG_DUMP_LMEM_BUF; + } + /*if (debug & AVS2_DBG_HW_RESET) { + }*/ + + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + if (pop_shorts != 0) { + int i; + u32 sum = 0; + pr_info("pop stream 0x%x shorts\r\n", pop_shorts); + for (i = 0; i < pop_shorts; i++) { + u32 data = + (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + WRITE_HREG(HEVC_SHIFT_COMMAND, + (1<<7)|16); + if ((i & 0xf) == 0) + pr_info("%04x:", i); + pr_info("%04x ", data); + if (((i + 1) & 0xf) == 0) + pr_info("\r\n"); + sum += data; + } + pr_info("\r\nsum = %x\r\n", sum); + pop_shorts = 0; + } + if (dbg_cmd != 0) { + if (dbg_cmd == 1) { + u32 disp_laddr; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB && + get_double_write_mode(dec) == 0) { + disp_laddr = + READ_VCBUS_REG(AFBC_BODY_BADDR) << 4; + } else { + struct canvas_s cur_canvas; + canvas_read((READ_VCBUS_REG(VD1_IF0_CANVAS0) + & 0xff), &cur_canvas); + disp_laddr = cur_canvas.addr; + } + pr_info("current displayed buffer address %x\r\n", + disp_laddr); + } + dbg_cmd = 0; + } + /*don't changed at start.*/ + if (dec->get_frame_dur && dec->show_frame_num > 60 && + dec->frame_dur > 0 && dec->saved_resolution != + frame_width * frame_height * + (96000 / dec->frame_dur)) { + int fps = 96000 / dec->frame_dur; + if (hevc_source_changed(VFORMAT_AVS2, + frame_width, frame_height, fps) > 0) + dec->saved_resolution = frame_width * + frame_height * fps; + } + + timer->expires = jiffies + PUT_INTERVAL; + add_timer(timer); +} + + +int vavs2_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + + if (!dec) + return -1; + + vstatus->frame_width = dec->frame_width; + vstatus->frame_height = dec->frame_height; + + if (dec->frame_dur != 0) + vstatus->frame_rate = ((96000 * 10 / dec->frame_dur) % 10) < 5 ? + 96000 / dec->frame_dur : (96000 / dec->frame_dur +1); + else + vstatus->frame_rate = -1; + vstatus->error_count = 0; + vstatus->status = dec->stat | dec->fatal_error; + vstatus->frame_dur = dec->frame_dur; + vstatus->bit_rate = dec->gvs->bit_rate; + vstatus->frame_data = dec->gvs->frame_data; + vstatus->total_data = dec->gvs->total_data; + vstatus->frame_count = dec->gvs->frame_count; + vstatus->error_frame_count = dec->gvs->error_frame_count; + vstatus->drop_frame_count = dec->gvs->drop_frame_count; + vstatus->i_decoded_frames = dec->gvs->i_decoded_frames; + vstatus->i_lost_frames = dec->gvs->i_lost_frames; + vstatus->i_concealed_frames = dec->gvs->i_concealed_frames; + vstatus->p_decoded_frames = dec->gvs->p_decoded_frames; + vstatus->p_lost_frames = dec->gvs->p_lost_frames; + vstatus->p_concealed_frames = dec->gvs->p_concealed_frames; + vstatus->b_decoded_frames = dec->gvs->b_decoded_frames; + vstatus->b_lost_frames = dec->gvs->b_lost_frames; + vstatus->b_concealed_frames = dec->gvs->b_concealed_frames; + vstatus->total_data = dec->gvs->total_data; + vstatus->samp_cnt = dec->gvs->samp_cnt; + vstatus->offset = dec->gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + return 0; +} + +int vavs2_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +static void vavs2_prot_init(struct AVS2Decoder_s *dec) +{ + unsigned int data32; + + avs2_config_work_space_hw(dec); + if (dec->pic_list_init_flag) + init_pic_list_hw(dec); + + avs2_init_decoder_hw(dec); + +#if 1 + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s\n", __func__); + data32 = READ_VREG(HEVC_STREAM_CONTROL); + data32 = data32 | + (1 << 0)/*stream_fetch_enable*/ + ; + WRITE_VREG(HEVC_STREAM_CONTROL, data32); +#if 0 + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x00000100) { + pr_info("avs2 prot init error %d\n", __LINE__); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x00000300) { + pr_info("avs2 prot init error %d\n", __LINE__); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x12345678); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x9abcdef0); + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x12345678) { + pr_info("avs2 prot init error %d\n", __LINE__); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x9abcdef0) { + pr_info("avs2 prot init error %d\n", __LINE__); + return; + } +#endif + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x00000100); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x00000000); +#endif + + + + WRITE_VREG(HEVC_WAIT_FLAG, 1); + + /* WRITE_VREG(HEVC_MPSR, 1); */ + + /* clear mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 1); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(HEVC_PSCALE_CTRL, 0); + + WRITE_VREG(DEBUG_REG1, 0x0); + /*check vps/sps/pps/i-slice in ucode*/ + WRITE_VREG(NAL_SEARCH_CTL, 0x8); + + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + + config_cuva_buf(dec); +} + +#ifdef I_ONLY_SUPPORT +static int vavs2_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + if (i_only_flag & 0x100) + return 0; + if (trickmode == TRICKMODE_I || trickmode == TRICKMODE_I_HEVC) + dec->i_only = 0x3; + else if (trickmode == TRICKMODE_NONE) + dec->i_only = 0x0; + return 0; +} +#endif + +static int vavs2_local_init(struct AVS2Decoder_s *dec) +{ + int i; + int ret; + int width, height; + + dec->vavs2_ratio = dec->vavs2_amstream_dec_info.ratio; + + dec->gvs = vzalloc(sizeof(struct vdec_info)); + if (NULL == dec->gvs) { + avs2_print(dec, 0, + "the struct of vdec status malloc failed.\n"); + return -1; + } +#ifdef DEBUG_PTS + dec->pts_missed = 0; + dec->pts_hit = 0; +#endif + dec->new_frame_displayed = 0; + dec->last_put_idx = -1; + dec->saved_resolution = 0; + dec->get_frame_dur = false; + on_no_keyframe_skiped = 0; + width = dec->vavs2_amstream_dec_info.width; + height = dec->vavs2_amstream_dec_info.height; + dec->frame_dur = + (dec->vavs2_amstream_dec_info.rate == + 0) ? 3600 : dec->vavs2_amstream_dec_info.rate; + if (width && height) + dec->frame_ar = height * 0x100 / width; +/* +TODO:FOR VERSION +*/ + avs2_print(dec, AVS2_DBG_BUFMGR, + "avs2: ver (%d,%d) decinfo: %dx%d rate=%d\n", avs2_version, + 0, width, height, dec->frame_dur); + + if (dec->frame_dur == 0) + dec->frame_dur = 96000 / 24; +#ifdef I_ONLY_SUPPORT + if (i_only_flag & 0x100) + dec->i_only = i_only_flag & 0xff; + else if ((unsigned long) dec->vavs2_amstream_dec_info.param + & 0x08) + dec->i_only = 0x7; + else + dec->i_only = 0x0; +#endif + INIT_KFIFO(dec->display_q); + INIT_KFIFO(dec->newframe_q); + + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &dec->vfpool[i]; + dec->vfpool[i].index = -1; + kfifo_put(&dec->newframe_q, vf); + } + + + ret = avs2_local_init(dec); + + return ret; +} + + +static s32 vavs2_init(struct vdec_s *vdec) +{ + int ret = -1, size = -1; + int fw_size = 0x1000 * 16; + struct firmware_s *fw = NULL; + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)vdec->private; + + timer_setup(&dec->timer, vavs2_put_timer_func, 0); + + dec->stat |= STAT_TIMER_INIT; + if (vavs2_local_init(dec) < 0) + return -EBUSY; + + vdec_set_vframe_comm(vdec, DRIVER_NAME); + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + size = get_firmware_data(VIDEO_DEC_AVS2_MMU, fw->data); + if (size < 0) { + pr_err("get firmware fail.\n"); + vfree(fw); + return -1; + } + + fw->len = fw_size; + + if (dec->m_ins_flag) { + dec->timer.expires = jiffies + PUT_INTERVAL; + + /*add_timer(&dec->timer); + + dec->stat |= STAT_TIMER_ARM; + dec->stat |= STAT_ISR_REG;*/ + + INIT_WORK(&dec->work, avs2_work); + dec->fw = fw; + + return 0; + } + + amhevc_enable(); + + ret = amhevc_loadmc_ex(VFORMAT_AVS2, NULL, fw->data); + if (ret < 0) { + amhevc_disable(); + vfree(fw); + pr_err("AVS2: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(fw); + + dec->stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + vavs2_prot_init(dec); + + if (vdec_request_threaded_irq(VDEC_IRQ_0, + vavs2_isr, + vavs2_isr_thread_fn, + IRQF_ONESHOT,/*run thread on this irq disabled*/ + "vavs2-irq", (void *)dec)) { + pr_info("vavs2 irq register error.\n"); + amhevc_disable(); + return -ENOENT; + } + + dec->stat |= STAT_ISR_REG; + + dec->provider_name = PROVIDER_NAME; + vf_provider_init(&vavs2_vf_prov, PROVIDER_NAME, + &vavs2_vf_provider, dec); + vf_reg_provider(&vavs2_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); + if (dec->frame_dur != 0) { + if (!is_reset) + vf_notify_receiver(dec->provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)dec->frame_dur)); + } + dec->stat |= STAT_VF_HOOK; + + dec->timer.expires = jiffies + PUT_INTERVAL; + add_timer(&dec->timer); + + dec->stat |= STAT_TIMER_ARM; + + /* dec->stat |= STAT_KTHREAD; */ + dec->process_busy = 0; + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%d, vavs2_init, RP=0x%x\n", + __LINE__, READ_VREG(HEVC_STREAM_RD_PTR)); + return 0; +} + +static int vmavs2_stop(struct AVS2Decoder_s *dec) +{ + dec->init_flag = 0; + dec->first_sc_checked = 0; + if (dec->stat & STAT_TIMER_ARM) { + del_timer_sync(&dec->timer); + dec->stat &= ~STAT_TIMER_ARM; + } + + if (dec->stat & STAT_VF_HOOK) { + if (!is_reset) + vf_notify_receiver(dec->provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + + vf_unreg_provider(&vavs2_vf_prov); + dec->stat &= ~STAT_VF_HOOK; + } + avs2_local_uninit(dec); + reset_process_time(dec); + cancel_work_sync(&dec->work); + uninit_mmu_buffers(dec); + if (dec->fw) { + vfree(dec->fw); + dec->fw = NULL; + } + + return 0; +} + + +static int vavs2_stop(struct AVS2Decoder_s *dec) +{ + + dec->init_flag = 0; + dec->first_sc_checked = 0; + if (dec->stat & STAT_VDEC_RUN) { + amhevc_stop(); + dec->stat &= ~STAT_VDEC_RUN; + } + + if (dec->stat & STAT_ISR_REG) { + if (!dec->m_ins_flag) + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 0); + vdec_free_irq(VDEC_IRQ_0, (void *)dec); + dec->stat &= ~STAT_ISR_REG; + } + + if (dec->stat & STAT_TIMER_ARM) { + del_timer_sync(&dec->timer); + dec->stat &= ~STAT_TIMER_ARM; + } + + if (dec->stat & STAT_VF_HOOK) { + if (!is_reset) + vf_notify_receiver(dec->provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + + vf_unreg_provider(&vavs2_vf_prov); + dec->stat &= ~STAT_VF_HOOK; + } + avs2_local_uninit(dec); + + if (dec->m_ins_flag) + cancel_work_sync(&dec->work); + else + amhevc_disable(); + uninit_mmu_buffers(dec); + + return 0; +} + +static int amvdec_avs2_mmu_init(struct AVS2Decoder_s *dec) +{ + int tvp_flag = vdec_secure(hw_to_vdec(dec)) ? + CODEC_MM_FLAGS_TVP : 0; + int buf_size = 48; + + dec->need_cache_size = buf_size * SZ_1M; + dec->sc_start_time = get_jiffies_64(); +#ifdef AVS2_10B_MMU + if (dec->mmu_enable) { + dec->mmu_box = decoder_mmu_box_alloc_box(DRIVER_NAME, + dec->index, FRAME_BUFFERS, + dec->need_cache_size, + tvp_flag + ); + if (!dec->mmu_box) { + pr_err("avs2 alloc mmu box failed!!\n"); + return -1; + } + } +#endif +#ifdef AVS2_10B_MMU_DW + if (dec->dw_mmu_enable) { + dec->dw_mmu_box = decoder_mmu_box_alloc_box(DRIVER_NAME, + dec->index, FRAME_BUFFERS, + dec->need_cache_size, + tvp_flag + ); + if (!dec->dw_mmu_box) { + pr_err("avs2 alloc dw mmu box failed!!\n"); + dec->dw_mmu_enable = 0; + } + } +#endif + dec->bmmu_box = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + dec->index, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + tvp_flag); + if (!dec->bmmu_box) { + pr_err("avs2 alloc bmmu box failed!!\n"); + return -1; + } + return 0; +} + +static int amvdec_avs2_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + /*struct BUF_s BUF[MAX_BUF_NUM];*/ + struct AVS2Decoder_s *dec = &gAVS2Decoder; + int ret; + pr_info("%s\n", __func__); + + dec = vzalloc(sizeof(struct AVS2Decoder_s)); + if (!dec) + return -ENOMEM; + + pdata->private = dec; + platform_set_drvdata(pdev, pdata); + + mutex_lock(&vavs2_mutex); + + dec->init_flag = 0; + dec->first_sc_checked = 0; + dec->eos = 0; + dec->start_process_time = 0; + dec->timeout_num = 0; + dec->fatal_error = 0; + dec->show_frame_num = 0; + if (pdata == NULL) { + avs2_print(dec, 0, + "\namvdec_avs2 memory resource undefined.\n"); + mutex_unlock(&vavs2_mutex); + return -EFAULT; + } + dec->m_ins_flag = 0; + dec->platform_dev = pdev; + platform_set_drvdata(pdev, pdata); + +#ifdef AVS2_10B_MMU_DW + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + dec->dw_mmu_enable = + (get_double_write_mode(dec) & 0x20) ? 1 : 0; + } else { + dec->dw_mmu_enable = 0; + } +#endif + if (amvdec_avs2_mmu_init(dec) < 0) { + mutex_unlock(&vavs2_mutex); + pr_err("avs2 alloc bmmu box failed!!\n"); + return -1; + } + + ret = decoder_bmmu_box_alloc_buf_phy(dec->bmmu_box, WORK_SPACE_BUF_ID, + work_buf_size, DRIVER_NAME, &pdata->mem_start); + if (ret < 0) { + uninit_mmu_buffers(dec); + mutex_unlock(&vavs2_mutex); + return ret; + } + dec->buf_size = work_buf_size; + + dec->buf_start = pdata->mem_start; + + + if (debug) { + avs2_print(dec, 0, + "===AVS2 decoder mem resource 0x%lx size 0x%x\n", + pdata->mem_start, dec->buf_size); + } + + if (pdata->sys_info) { + dec->vavs2_amstream_dec_info = *pdata->sys_info; + dec->frame_width = dec->vavs2_amstream_dec_info.width; + dec->frame_height = dec->vavs2_amstream_dec_info.height; + } else { + dec->vavs2_amstream_dec_info.width = 0; + dec->vavs2_amstream_dec_info.height = 0; + dec->vavs2_amstream_dec_info.rate = 30; + } + dec->cma_dev = pdata->cma_dev; + + dec->endian = HEVC_CONFIG_LITTLE_ENDIAN; + if (is_support_vdec_canvas()) + dec->endian = HEVC_CONFIG_BIG_ENDIAN; + if (endian) + dec->endian = endian; + + pdata->private = dec; + pdata->dec_status = vavs2_dec_status; + /*pdata->set_isreset = vavs2_set_isreset;*/ + is_reset = 0; + if (vavs2_init(pdata) < 0) { + pr_info("\namvdec_avs2 init failed.\n"); + avs2_local_uninit(dec); + uninit_mmu_buffers(dec); + pdata->dec_status = NULL; + mutex_unlock(&vavs2_mutex); + return -ENODEV; + } + /*set the max clk for smooth playing...*/ + hevc_source_changed(VFORMAT_AVS2, + 4096, 2048, 60); + mutex_unlock(&vavs2_mutex); + + return 0; +} + +static int amvdec_avs2_remove(struct platform_device *pdev) +{ + struct AVS2Decoder_s *dec = &gAVS2Decoder; + if (debug) + pr_info("amvdec_avs2_remove\n"); + + mutex_lock(&vavs2_mutex); + + vavs2_stop(dec); + + + hevc_source_changed(VFORMAT_AVS2, 0, 0, 0); + + +#ifdef DEBUG_PTS + pr_info("pts missed %ld, pts hit %ld, duration %d\n", + dec->pts_missed, dec->pts_hit, dec->frame_dur); +#endif + + mutex_unlock(&vavs2_mutex); + + return 0; +} + +/****************************************/ + +static struct platform_driver amvdec_avs2_driver = { + .probe = amvdec_avs2_probe, + .remove = amvdec_avs2_remove, +#ifdef CONFIG_PM + .suspend = amhevc_suspend, + .resume = amhevc_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t amvdec_avs2_profile = { + .name = "avs2", + .profile = "" +}; + +static struct codec_profile_t amvdec_avs2_profile_mult; + +static unsigned char get_data_check_sum + (struct AVS2Decoder_s *dec, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (!dec->chunk->block->is_mapped) + data = codec_mm_vmap(dec->chunk->block->start + + dec->chunk->offset, size); + else + data = ((u8 *)dec->chunk->block->start_virt) + + dec->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + if (!dec->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void dump_data(struct AVS2Decoder_s *dec, int size) +{ + int jj; + u8 *data = NULL; + int padding_size = dec->chunk->offset & + (VDEC_FIFO_ALIGN - 1); + + if (!dec->chunk->block->is_mapped) + data = codec_mm_vmap(dec->chunk->block->start + + dec->chunk->offset, size); + else + data = ((u8 *)dec->chunk->block->start_virt) + + dec->chunk->offset; + + avs2_print(dec, 0, "padding: "); + for (jj = padding_size; jj > 0; jj--) + avs2_print_cont(dec, + 0, + "%02x ", *(data - jj)); + avs2_print_cont(dec, 0, "data adr %p\n", + data); + + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + avs2_print(dec, + 0, + "%06x:", jj); + avs2_print_cont(dec, + 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + avs2_print(dec, + 0, + "\n"); + } + avs2_print(dec, + 0, + "\n"); + + if (!dec->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); +} + +static void avs2_work(struct work_struct *work) +{ + struct AVS2Decoder_s *dec = container_of(work, + struct AVS2Decoder_s, work); + struct vdec_s *vdec = hw_to_vdec(dec); + /* finished decoding one frame or error, + * notify vdec core to switch context + */ + avs2_print(dec, PRINT_FLAG_VDEC_DETAIL, + "%s dec_result %d %x %x %x\n", + __func__, + dec->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + + if (((dec->dec_result == DEC_RESULT_GET_DATA) || + (dec->dec_result == DEC_RESULT_GET_DATA_RETRY)) + && (hw_to_vdec(dec)->next_status != + VDEC_STATUS_DISCONNECTED)) { + if (!vdec_has_more_input(vdec)) { + dec->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&dec->work); + return; + } + + if (dec->dec_result == DEC_RESULT_GET_DATA) { + avs2_print(dec, PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA %x %x %x\n", + __func__, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + vdec_vframe_dirty(vdec, dec->chunk); + vdec_clean_input(vdec); + } + + if (get_free_buf_count(dec) >= + run_ready_min_buf_num) { + int r; + int decode_size; + r = vdec_prepare_input(vdec, &dec->chunk); + if (r < 0) { + dec->dec_result = DEC_RESULT_GET_DATA_RETRY; + + avs2_print(dec, + PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&dec->work); + return; + } + dec->dec_result = DEC_RESULT_NONE; + avs2_print(dec, PRINT_FLAG_VDEC_STATUS, + "%s: chunk size 0x%x sum 0x%x\n", + __func__, r, + (debug & PRINT_FLAG_VDEC_STATUS) ? + get_data_check_sum(dec, r) : 0 + ); + if (debug & PRINT_FLAG_VDEC_DATA) + dump_data(dec, dec->chunk->size); + + decode_size = dec->chunk->size + + (dec->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + + WRITE_VREG(HEVC_DECODE_SIZE, + READ_VREG(HEVC_DECODE_SIZE) + decode_size); + + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_ACTION_DONE); + + start_process_time(dec); + + } else{ + dec->dec_result = DEC_RESULT_GET_DATA_RETRY; + + avs2_print(dec, PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&dec->work); + } + return; + } else if (dec->dec_result == DEC_RESULT_DONE) { + /* if (!dec->ctx_valid) + dec->ctx_valid = 1; */ + dec->slice_idx++; + dec->frame_count++; + dec->process_state = PROC_STATE_INIT; + decode_frame_count[dec->index] = dec->frame_count; + +#ifdef AVS2_10B_MMU + if (dec->mmu_enable) + dec->used_4k_num = (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); +#endif + avs2_print(dec, PRINT_FLAG_VDEC_STATUS, + "%s (===> %d) dec_result %d %x %x %x shiftbytes 0x%x decbytes 0x%x\n", + __func__, + dec->frame_count, + dec->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_BYTE_COUNT) - + dec->start_shift_bytes + ); + vdec_vframe_dirty(hw_to_vdec(dec), dec->chunk); + } else if (dec->dec_result == DEC_RESULT_AGAIN) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec)) { + dec->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&dec->work); + return; + } + } else if (dec->dec_result == DEC_RESULT_EOS) { + avs2_print(dec, 0, + "%s: end of stream\n", + __func__); + dec->eos = 1; + if ( dec->avs2_dec.hc.cur_pic != NULL) { + check_pic_error(dec, dec->avs2_dec.hc.cur_pic); + avs2_post_process(&dec->avs2_dec); + avs2_prepare_display_buf(dec); + } + vdec_vframe_dirty(hw_to_vdec(dec), dec->chunk); + } else if (dec->dec_result == DEC_RESULT_FORCE_EXIT) { + avs2_print(dec, PRINT_FLAG_VDEC_STATUS, + "%s: force exit\n", + __func__); + if (dec->stat & STAT_VDEC_RUN) { + amhevc_stop(); + dec->stat &= ~STAT_VDEC_RUN; + } + + if (dec->stat & STAT_ISR_REG) { + if (!dec->m_ins_flag) + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 0); + vdec_free_irq(VDEC_IRQ_0, (void *)dec); + dec->stat &= ~STAT_ISR_REG; + } + } + + if (dec->stat & STAT_TIMER_ARM) { + del_timer_sync(&dec->timer); + dec->stat &= ~STAT_TIMER_ARM; + } + /* mark itself has all HW resource released and input released */ + if (vdec->parallel_dec ==1) + vdec_core_finish_run(vdec, CORE_MASK_HEVC); + else + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + if (dec->vdec_cb) + dec->vdec_cb(hw_to_vdec(dec), dec->vdec_cb_arg); +} + +static int avs2_hw_ctx_restore(struct AVS2Decoder_s *dec) +{ + /* new to do ... */ + vavs2_prot_init(dec); + return 0; +} + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + int tvp = vdec_secure(hw_to_vdec(dec)) ? + CODEC_MM_FLAGS_TVP : 0; + unsigned long ret = 0; + avs2_print(dec, + PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__); + if (debug & AVS2_DBG_PIC_LEAK_WAIT) + return ret; + + if (dec->eos) + return ret; + if (!dec->first_sc_checked) { + int size = decoder_mmu_box_sc_check(dec->mmu_box, tvp); + dec->first_sc_checked = 1; + avs2_print(dec, 0, "vavs2 cached=%d need_size=%d speed= %d ms\n", + size, (dec->need_cache_size >> PAGE_SHIFT), + (int)(get_jiffies_64() - dec->sc_start_time) * 1000/HZ); + } + + if (dec->next_again_flag && + (!vdec_frame_based(vdec))) { + u32 parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + if (parser_wr_ptr >= dec->pre_parser_wr_ptr && + (parser_wr_ptr - dec->pre_parser_wr_ptr) < + again_threshold) { + int r = vdec_sync_input(vdec); + avs2_print(dec, + PRINT_FLAG_VDEC_DETAIL, "%s buf lelvel:%x\n", __func__, r); + return 0; + } + } +/* + if (vdec_stream_based(vdec) && (dec->pic_list_init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + + if (level < pre_decode_buf_level) + return 0; + } +*/ + + if ((dec->pic_list_init_flag == 0) || + get_free_buf_count(dec) >= + run_ready_min_buf_num) + ret = 1; +#ifdef CONSTRAIN_MAX_BUF_NUM + if (dec->pic_list_init_flag) { + if (run_ready_max_vf_only_num > 0 && + get_vf_ref_only_buf_count(dec) >= + run_ready_max_vf_only_num + ) + ret = 0; + if (run_ready_display_q_num > 0 && + kfifo_len(&dec->display_q) >= + run_ready_display_q_num) + ret = 0; + + if (run_ready_max_buf_num == 0xff && + get_used_buf_count(dec) >= + dec->avs2_dec.ref_maxbuffer) + ret = 0; + else if (run_ready_max_buf_num && + get_used_buf_count(dec) >= + run_ready_max_buf_num) + ret = 0; + } +#endif + if (ret) + not_run_ready[dec->index] = 0; + else + not_run_ready[dec->index]++; + + if (vdec->parallel_dec == 1) + return ret ? CORE_MASK_HEVC : 0; + else + return ret ? (CORE_MASK_VDEC_1 | CORE_MASK_HEVC) : 0; +} + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + int r; + + run_count[dec->index]++; + dec->vdec_cb_arg = arg; + dec->vdec_cb = callback; + /* dec->chunk = vdec_prepare_input(vdec); */ + hevc_reset_core(vdec); + + if (vdec_stream_based(vdec)) { + dec->pre_parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + dec->next_again_flag = 0; + } + + r = vdec_prepare_input(vdec, &dec->chunk); + if (r < 0) { + input_empty[dec->index]++; + + dec->dec_result = DEC_RESULT_AGAIN; + + avs2_print(dec, PRINT_FLAG_VDEC_DETAIL, + "ammvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&dec->work); + return; + } + input_empty[dec->index] = 0; + dec->dec_result = DEC_RESULT_NONE; + dec->start_shift_bytes = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + + if (debug & PRINT_FLAG_VDEC_STATUS) { + int ii; + avs2_print(dec, 0, + "%s (%d): size 0x%x (0x%x 0x%x) sum 0x%x (%x %x %x %x %x) bytes 0x%x", + __func__, + dec->frame_count, r, + dec->chunk ? dec->chunk->size : 0, + dec->chunk ? dec->chunk->offset : 0, + dec->chunk ? ((vdec_frame_based(vdec) && + (debug & PRINT_FLAG_VDEC_STATUS)) ? + get_data_check_sum(dec, r) : 0) : 0, + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + dec->start_shift_bytes); + if (vdec_frame_based(vdec) && dec->chunk) { + u8 *data = NULL; + if (!dec->chunk->block->is_mapped) + data = codec_mm_vmap(dec->chunk->block->start + + dec->chunk->offset, 8); + else + data = ((u8 *)dec->chunk->block->start_virt) + + dec->chunk->offset; + + avs2_print_cont(dec, 0, "data adr %p:", + data); + for (ii = 0; ii < 8; ii++) + avs2_print_cont(dec, 0, "%02x ", + data[ii]); + if (!dec->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + avs2_print_cont(dec, 0, "\r\n"); + } + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else if (amhevc_loadmc_ex(VFORMAT_AVS2, NULL, dec->fw->data) < 0) { + vdec->mc_loaded = 0; + amhevc_disable(); + avs2_print(dec, 0, + "%s: Error amvdec_loadmc fail\n", __func__); + dec->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&dec->work); + return; + } else { + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_AVS2; + } + + + if (avs2_hw_ctx_restore(dec) < 0) { + vdec_schedule_work(&dec->work); + return; + } + + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_SEARCH_NEW_PIC); + + if (vdec_frame_based(vdec) && dec->chunk) { + if (debug & PRINT_FLAG_VDEC_DATA) + dump_data(dec, dec->chunk->size); + + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, 0); + r = dec->chunk->size + + (dec->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + if (vdec->mvfrm) + vdec->mvfrm->frame_size = dec->chunk->size; + } + + WRITE_VREG(HEVC_DECODE_SIZE, r); + WRITE_VREG(HEVC_DECODE_COUNT, dec->slice_idx); + dec->init_flag = 1; + + avs2_print(dec, PRINT_FLAG_VDEC_DETAIL, + "%s: start hevc (%x %x %x)\n", + __func__, + READ_VREG(HEVC_DEC_STATUS_REG), + READ_VREG(HEVC_MPC_E), + READ_VREG(HEVC_MPSR)); + + start_process_time(dec); + mod_timer(&dec->timer, jiffies); + dec->stat |= STAT_TIMER_ARM; + dec->stat |= STAT_ISR_REG; + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + amhevc_start(); + dec->stat |= STAT_VDEC_RUN; +} + +static void reset(struct vdec_s *vdec) +{ + + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + + avs2_print(dec, + PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__); + +} + +static irqreturn_t avs2_irq_cb(struct vdec_s *vdec, int irq) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + return vavs2_isr(0, dec); +} + +static irqreturn_t avs2_threaded_irq_cb(struct vdec_s *vdec, int irq) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + return vavs2_isr_thread_fn(0, dec); +} + +static void avs2_dump_state(struct vdec_s *vdec) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + int i; + avs2_print(dec, 0, "====== %s\n", __func__); + + avs2_print(dec, 0, + "width/height (%d/%d), used_buf_num %d\n", + dec->avs2_dec.img.width, + dec->avs2_dec.img.height, + dec->used_buf_num + ); + + avs2_print(dec, 0, + "is_framebase(%d), eos %d, dec_result 0x%x dec_frm %d disp_frm %d run %d not_run_ready %d input_empty %d\n", + input_frame_based(vdec), + dec->eos, + dec->dec_result, + decode_frame_count[dec->index], + display_frame_count[dec->index], + run_count[dec->index], + not_run_ready[dec->index], + input_empty[dec->index] + ); + + if (vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + avs2_print(dec, 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + + avs2_print(dec, 0, + "%s, newq(%d/%d), dispq(%d/%d), vf prepare/get/put (%d/%d/%d), free_buf_count %d (min %d for run_ready)\n", + __func__, + kfifo_len(&dec->newframe_q), + VF_POOL_SIZE, + kfifo_len(&dec->display_q), + VF_POOL_SIZE, + dec->vf_pre_count, + dec->vf_get_count, + dec->vf_put_count, + get_free_buf_count(dec), + run_ready_min_buf_num + ); + + dump_pic_list(dec); + + for (i = 0; i < MAX_BUF_NUM; i++) { + avs2_print(dec, 0, + "mv_Buf(%d) start_adr 0x%x size 0x%x used %d\n", + i, + dec->m_mv_BUF[i].start_adr, + dec->m_mv_BUF[i].size, + dec->m_mv_BUF[i].used_flag); + } + + avs2_print(dec, 0, + "HEVC_DEC_STATUS_REG=0x%x\n", + READ_VREG(HEVC_DEC_STATUS_REG)); + avs2_print(dec, 0, + "HEVC_MPC_E=0x%x\n", + READ_VREG(HEVC_MPC_E)); + avs2_print(dec, 0, + "DECODE_MODE=0x%x\n", + READ_VREG(DECODE_MODE)); + avs2_print(dec, 0, + "NAL_SEARCH_CTL=0x%x\n", + READ_VREG(NAL_SEARCH_CTL)); + avs2_print(dec, 0, + "HEVC_PARSER_LCU_START=0x%x\n", + READ_VREG(HEVC_PARSER_LCU_START)); + avs2_print(dec, 0, + "HEVC_DECODE_SIZE=0x%x\n", + READ_VREG(HEVC_DECODE_SIZE)); + avs2_print(dec, 0, + "HEVC_SHIFT_BYTE_COUNT=0x%x\n", + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + avs2_print(dec, 0, + "HEVC_STREAM_START_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_START_ADDR)); + avs2_print(dec, 0, + "HEVC_STREAM_END_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_END_ADDR)); + avs2_print(dec, 0, + "HEVC_STREAM_LEVEL=0x%x\n", + READ_VREG(HEVC_STREAM_LEVEL)); + avs2_print(dec, 0, + "HEVC_STREAM_WR_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_WR_PTR)); + avs2_print(dec, 0, + "HEVC_STREAM_RD_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_RD_PTR)); + avs2_print(dec, 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + avs2_print(dec, 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (input_frame_based(vdec) && + (debug & PRINT_FLAG_VDEC_DATA) + ) { + int jj; + if (dec->chunk && dec->chunk->block && + dec->chunk->size > 0) { + u8 *data = NULL; + if (!dec->chunk->block->is_mapped) + data = codec_mm_vmap(dec->chunk->block->start + + dec->chunk->offset, dec->chunk->size); + else + data = ((u8 *)dec->chunk->block->start_virt) + + dec->chunk->offset; + avs2_print(dec, 0, + "frame data size 0x%x\n", + dec->chunk->size); + for (jj = 0; jj < dec->chunk->size; jj++) { + if ((jj & 0xf) == 0) + avs2_print(dec, 0, + "%06x:", jj); + avs2_print_cont(dec, 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + avs2_print_cont(dec, 0, + "\n"); + } + + if (!dec->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } + +} + +static int ammvdec_avs2_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + int ret; + int config_val; + int i; + struct vframe_content_light_level_s content_light_level; + struct vframe_master_display_colour_s vf_dp; + /*struct BUF_s BUF[MAX_BUF_NUM];*/ + struct AVS2Decoder_s *dec = NULL; + + pr_info("%s\n", __func__); + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) { + pr_info("%s, chip id %d is not support avs2\n", + __func__, get_cpu_major_id()); + return -1; + } + if (pdata == NULL) { + pr_info("\nammvdec_avs2 memory resource undefined.\n"); + return -EFAULT; + } + /*dec = (struct AVS2Decoder_s *)devm_kzalloc(&pdev->dev, + sizeof(struct AVS2Decoder_s), GFP_KERNEL);*/ + memset(&vf_dp, 0, sizeof(struct vframe_master_display_colour_s)); + dec = vzalloc(sizeof(struct AVS2Decoder_s)); + if (dec == NULL) { + pr_info("\nammvdec_avs2 device data allocation failed\n"); + return -ENOMEM; + } + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < AVS2_MAX_BUFFER_NUM; i++) { + dec->avs2_dec.frm_pool[i].y_canvas_index = -1; + dec->avs2_dec.frm_pool[i].uv_canvas_index = -1; + } + } + pdata->private = dec; + pdata->dec_status = vavs2_dec_status; +#ifdef I_ONLY_SUPPORT + pdata->set_trickmode = vavs2_set_trickmode; +#endif + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = avs2_irq_cb; + pdata->threaded_irq_handler = avs2_threaded_irq_cb; + pdata->dump_state = avs2_dump_state; + + /* + * memcpy(&BUF[0], &dec->m_BUF[0], sizeof(struct BUF_s) * MAX_BUF_NUM); + * memset(dec, 0, sizeof(struct AVS2Decoder_s)); + * memcpy(&dec->m_BUF[0], &BUF[0], sizeof(struct BUF_s) * MAX_BUF_NUM); + */ + + dec->index = pdev->id; + dec->m_ins_flag = 1; + if (is_rdma_enable()) { + dec->rdma_adr = dma_alloc_coherent(amports_get_dma_device(), RDMA_SIZE, &dec->rdma_phy_adr, GFP_KERNEL); + for (i = 0; i < SCALELUT_DATA_WRITE_NUM; i++) { + dec->rdma_adr[i * 4] = HEVC_IQIT_SCALELUT_WR_ADDR & 0xfff; + dec->rdma_adr[i * 4 + 1] = i; + dec->rdma_adr[i * 4 + 2] = HEVC_IQIT_SCALELUT_DATA & 0xfff; + dec->rdma_adr[i * 4 + 3] = 0; + if (i == SCALELUT_DATA_WRITE_NUM - 1) { + dec->rdma_adr[i * 4 + 2] = (HEVC_IQIT_SCALELUT_DATA & 0xfff) | 0x20000; + } + } + } + + snprintf(dec->vdec_name, sizeof(dec->vdec_name), + "avs2-%d", dec->index); + snprintf(dec->pts_name, sizeof(dec->pts_name), + "%s-pts", dec->vdec_name); + snprintf(dec->new_q_name, sizeof(dec->new_q_name), + "%s-newframe_q", dec->vdec_name); + snprintf(dec->disp_q_name, sizeof(dec->disp_q_name), + "%s-dispframe_q", dec->vdec_name); + + if (pdata->use_vfm_path) { + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + dec->frameinfo_enable = 1; + } else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + MULTI_INSTANCE_PROVIDER_NAME ".%02x", pdev->id & 0xff); + + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vavs2_vf_provider, dec); + + dec->provider_name = pdata->vf_provider_name; + platform_set_drvdata(pdev, pdata); + + dec->platform_dev = pdev; + dec->video_signal_type = 0; + dec->video_ori_signal_type = 0; + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_TXLX) + dec->stat |= VP9_TRIGGER_FRAME_ENABLE; + + if ((debug & IGNORE_PARAM_FROM_CONFIG) == 0 && pdata->config_len) { + /*use ptr config for doubel_write_mode, etc*/ + avs2_print(dec, 0, "pdata->config=%s\n", pdata->config); + if (get_config_int(pdata->config, "avs2_double_write_mode", + &config_val) == 0) + dec->double_write_mode = config_val; + else + dec->double_write_mode = double_write_mode; + + if (get_config_int(pdata->config, "parm_v4l_buffer_margin", + &config_val) == 0) + dec->dynamic_buf_margin = config_val; + else + dec->dynamic_buf_margin = 0; + + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + dec->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + dec->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, "HDRStaticInfo", + &vf_dp.present_flag) == 0 + && vf_dp.present_flag == 1) { + get_config_int(pdata->config, "mG.x", + &vf_dp.primaries[0][0]); + get_config_int(pdata->config, "mG.y", + &vf_dp.primaries[0][1]); + get_config_int(pdata->config, "mB.x", + &vf_dp.primaries[1][0]); + get_config_int(pdata->config, "mB.y", + &vf_dp.primaries[1][1]); + get_config_int(pdata->config, "mR.x", + &vf_dp.primaries[2][0]); + get_config_int(pdata->config, "mR.y", + &vf_dp.primaries[2][1]); + get_config_int(pdata->config, "mW.x", + &vf_dp.white_point[0]); + get_config_int(pdata->config, "mW.y", + &vf_dp.white_point[1]); + get_config_int(pdata->config, "mMaxDL", + &vf_dp.luminance[0]); + get_config_int(pdata->config, "mMinDL", + &vf_dp.luminance[1]); + vf_dp.content_light_level.present_flag = 1; + get_config_int(pdata->config, "mMaxCLL", + &content_light_level.max_content); + get_config_int(pdata->config, "mMaxFALL", + &content_light_level.max_pic_average); + vf_dp.content_light_level = content_light_level; + dec->video_signal_type = (1 << 29) + | (5 << 26) /* unspecified */ + | (0 << 25) /* limit */ + | (1 << 24) /* color available */ + | (9 << 16) /* 2020 */ + | (16 << 8) /* 2084 */ + | (9 << 0); /* 2020 */ + } + dec->vf_dp = vf_dp; + } else { + /*dec->vavs2_amstream_dec_info.width = 0; + dec->vavs2_amstream_dec_info.height = 0; + dec->vavs2_amstream_dec_info.rate = 30;*/ + dec->double_write_mode = double_write_mode; + dec->dynamic_buf_margin = dynamic_buf_num_margin; + } + video_signal_type = dec->video_signal_type; + + if (double_write_mode) { + dec->double_write_mode = get_double_write_mode(dec); + } + + if ((dec->double_write_mode & 0x10) == 0) + dec->mmu_enable = 1; + +#ifdef AVS2_10B_MMU_DW + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + dec->dw_mmu_enable = + (get_double_write_mode(dec) & 0x20) ? 1 : 0; + } else { + dec->dw_mmu_enable = 0; + } +#endif + if (amvdec_avs2_mmu_init(dec) < 0) { + pr_err("avs2 alloc bmmu box failed!!\n"); + /* devm_kfree(&pdev->dev, (void *)dec); */ + vfree((void *)dec); + return -1; + } + dec->cma_alloc_count = PAGE_ALIGN(work_buf_size) / PAGE_SIZE; + ret = decoder_bmmu_box_alloc_buf_phy(dec->bmmu_box, WORK_SPACE_BUF_ID, + dec->cma_alloc_count * PAGE_SIZE, DRIVER_NAME, + &dec->cma_alloc_addr); + if (ret < 0) { + uninit_mmu_buffers(dec); + /* devm_kfree(&pdev->dev, (void *)dec); */ + vfree((void *)dec); + return ret; + } + dec->buf_start = dec->cma_alloc_addr; + dec->buf_size = work_buf_size; + + dec->init_flag = 0; + dec->first_sc_checked = 0; + dec->fatal_error = 0; + dec->show_frame_num = 0; + + if (debug) { + pr_info("===AVS2 decoder mem resource 0x%lx size 0x%x\n", + dec->buf_start, + dec->buf_size); + } + + if (pdata->sys_info) { + dec->vavs2_amstream_dec_info = *pdata->sys_info; + dec->frame_width = dec->vavs2_amstream_dec_info.width; + dec->frame_height = dec->vavs2_amstream_dec_info.height; + } else { + dec->vavs2_amstream_dec_info.width = 0; + dec->vavs2_amstream_dec_info.height = 0; + dec->vavs2_amstream_dec_info.rate = 30; + } + + dec->endian = HEVC_CONFIG_LITTLE_ENDIAN; + if (is_support_vdec_canvas()) + dec->endian = HEVC_CONFIG_BIG_ENDIAN; + if (endian) + dec->endian = endian; + + dec->cma_dev = pdata->cma_dev; + if (vavs2_init(pdata) < 0) { + pr_info("\namvdec_avs2 init failed.\n"); + avs2_local_uninit(dec); + uninit_mmu_buffers(dec); + /* devm_kfree(&pdev->dev, (void *)dec); */ + vfree((void *)dec); + pdata->dec_status = NULL; + return -ENODEV; + } + vdec_set_prepare_level(pdata, start_decode_buf_level); + hevc_source_changed(VFORMAT_AVS2, + 4096, 2048, 60); + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_HEVC); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } + + return 0; +} + +static int ammvdec_avs2_remove(struct platform_device *pdev) +{ + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *pic; + int i; + + if (debug) + pr_info("amvdec_avs2_remove\n"); + + vmavs2_stop(dec); + + if (pdata->parallel_dec == 1) + vdec_core_release(hw_to_vdec(dec), CORE_MASK_HEVC); + else + vdec_core_release(hw_to_vdec(dec), CORE_MASK_HEVC); + + vdec_set_status(hw_to_vdec(dec), VDEC_STATUS_DISCONNECTED); + if (pdata->parallel_dec == 1) { + for (i = 0; i < AVS2_MAX_BUFFER_NUM; i++) { + pdata->free_canvas_ex(dec->avs2_dec.frm_pool[i].y_canvas_index, pdata->id); + pdata->free_canvas_ex(dec->avs2_dec.frm_pool[i].uv_canvas_index, pdata->id); + } + } + + for (i = 0; i < dec->used_buf_num; i++) { + if (i == (dec->used_buf_num - 1)) + pic = avs2_dec->m_bg; + else + pic = avs2_dec->fref[i]; + release_cuva_data(pic); + } + + +#ifdef DEBUG_PTS + pr_info("pts missed %ld, pts hit %ld, duration %d\n", + dec->pts_missed, dec->pts_hit, dec->frame_dur); +#endif + if (is_rdma_enable()) + dma_free_coherent(amports_get_dma_device(), RDMA_SIZE, dec->rdma_adr, dec->rdma_phy_adr); + /* devm_kfree(&pdev->dev, (void *)dec); */ + vfree((void *)dec); + return 0; +} + +static struct platform_driver ammvdec_avs2_driver = { + .probe = ammvdec_avs2_probe, + .remove = ammvdec_avs2_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = MULTI_DRIVER_NAME, + } +}; +#endif +static struct mconfig avs2_configs[] = { + MC_PU32("bit_depth_luma", &bit_depth_luma), + MC_PU32("bit_depth_chroma", &bit_depth_chroma), + MC_PU32("frame_width", &frame_width), + MC_PU32("frame_height", &frame_height), + MC_PU32("debug", &debug), + MC_PU32("radr", &radr), + MC_PU32("rval", &rval), + MC_PU32("pop_shorts", &pop_shorts), + MC_PU32("dbg_cmd", &dbg_cmd), + MC_PU32("dbg_skip_decode_index", &dbg_skip_decode_index), + MC_PU32("endian", &endian), + MC_PU32("step", &step), + MC_PU32("udebug_flag", &udebug_flag), + MC_PU32("decode_pic_begin", &decode_pic_begin), + MC_PU32("slice_parse_begin", &slice_parse_begin), + MC_PU32("i_only_flag", &i_only_flag), + MC_PU32("error_handle_policy", &error_handle_policy), + MC_PU32("buf_alloc_width", &buf_alloc_width), + MC_PU32("buf_alloc_height", &buf_alloc_height), + MC_PU32("buf_alloc_depth", &buf_alloc_depth), + MC_PU32("buf_alloc_size", &buf_alloc_size), + MC_PU32("buffer_mode", &buffer_mode), + MC_PU32("buffer_mode_dbg", &buffer_mode_dbg), + MC_PU32("max_buf_num", &max_buf_num), + MC_PU32("dynamic_buf_num_margin", &dynamic_buf_num_margin), + MC_PU32("mem_map_mode", &mem_map_mode), + MC_PU32("double_write_mode", &double_write_mode), + MC_PU32("enable_mem_saving", &enable_mem_saving), + MC_PU32("force_w_h", &force_w_h), + MC_PU32("force_fps", &force_fps), + MC_PU32("max_decoding_time", &max_decoding_time), + MC_PU32("on_no_keyframe_skiped", &on_no_keyframe_skiped), + MC_PU32("start_decode_buf_level", &start_decode_buf_level), + MC_PU32("decode_timeout_val", &decode_timeout_val), +}; +static struct mconfig_node avs2_node; + +static int __init amvdec_avs2_driver_init_module(void) +{ + +#ifdef AVS2_10B_MMU + struct BuffInfo_s *p_buf_info; + + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + p_buf_info = &amvavs2_workbuff_spec[2]; + else + p_buf_info = &amvavs2_workbuff_spec[1]; + } else + p_buf_info = &amvavs2_workbuff_spec[0]; + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) { + p_buf_info = &amvavs2_workbuff_spec[5]; + } else + p_buf_info = &amvavs2_workbuff_spec[3]; + } + + init_buff_spec(NULL, p_buf_info); + work_buf_size = + (p_buf_info->end_adr - p_buf_info->start_adr + + 0xffff) & (~0xffff); + +#endif + pr_debug("amvdec_avs2 module init\n"); + +#ifdef ERROR_HANDLE_DEBUG + dbg_nal_skip_flag = 0; + dbg_nal_skip_count = 0; +#endif + udebug_flag = 0; + decode_pic_begin = 0; + slice_parse_begin = 0; + step = 0; + buf_alloc_size = 0; + if (platform_driver_register(&ammvdec_avs2_driver)) + pr_err("failed to register ammvdec_avs2 driver\n"); + + if (platform_driver_register(&amvdec_avs2_driver)) { + pr_err("failed to register amvdec_avs2 driver\n"); + return -ENODEV; + } + + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D)) { + amvdec_avs2_profile.name = "avs2_unsupport"; + } else if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SM1) { + if (vdec_is_support_4k()) + amvdec_avs2_profile.profile = + "4k, 10bit, dwrite, compressed"; + else + amvdec_avs2_profile.profile = + "10bit, dwrite, compressed"; + } else { + /* cpu id larger than sm1 support 8k */ + amvdec_avs2_profile.profile = + "8k, 10bit, dwrite, compressed"; + } + + vcodec_profile_register(&amvdec_avs2_profile); + amvdec_avs2_profile_mult = amvdec_avs2_profile; + amvdec_avs2_profile_mult.name = "mavs2"; + vcodec_profile_register(&amvdec_avs2_profile_mult); + + INIT_REG_NODE_CONFIGS("media.decoder", &avs2_node, + "avs2", avs2_configs, CONFIG_FOR_RW); + vcodec_feature_register(VFORMAT_AVS2, 0); + + return 0; +} + +static void __exit amvdec_avs2_driver_remove_module(void) +{ + pr_debug("amvdec_avs2 module remove.\n"); + platform_driver_unregister(&ammvdec_avs2_driver); + platform_driver_unregister(&amvdec_avs2_driver); +} + +/****************************************/ + +module_param(bit_depth_luma, uint, 0664); +MODULE_PARM_DESC(bit_depth_luma, "\n amvdec_avs2 bit_depth_luma\n"); + +module_param(bit_depth_chroma, uint, 0664); +MODULE_PARM_DESC(bit_depth_chroma, "\n amvdec_avs2 bit_depth_chroma\n"); + +module_param(frame_width, uint, 0664); +MODULE_PARM_DESC(frame_width, "\n amvdec_avs2 frame_width\n"); + +module_param(frame_height, uint, 0664); +MODULE_PARM_DESC(frame_height, "\n amvdec_avs2 frame_height\n"); + +module_param(debug, uint, 0664); +MODULE_PARM_DESC(debug, "\n amvdec_avs2 debug\n"); + +module_param(debug_again, uint, 0664); +MODULE_PARM_DESC(debug_again, "\n amvdec_avs2 debug_again\n"); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(pop_shorts, uint, 0664); +MODULE_PARM_DESC(pop_shorts, "\nrval\n"); + +module_param(dbg_cmd, uint, 0664); +MODULE_PARM_DESC(dbg_cmd, "\ndbg_cmd\n"); + +module_param(dbg_skip_decode_index, uint, 0664); +MODULE_PARM_DESC(dbg_skip_decode_index, "\ndbg_skip_decode_index\n"); + +module_param(endian, uint, 0664); +MODULE_PARM_DESC(endian, "\nrval\n"); + +module_param(step, uint, 0664); +MODULE_PARM_DESC(step, "\n amvdec_avs2 step\n"); + +module_param(decode_pic_begin, uint, 0664); +MODULE_PARM_DESC(decode_pic_begin, "\n amvdec_avs2 decode_pic_begin\n"); + +module_param(slice_parse_begin, uint, 0664); +MODULE_PARM_DESC(slice_parse_begin, "\n amvdec_avs2 slice_parse_begin\n"); + +module_param(i_only_flag, uint, 0664); +MODULE_PARM_DESC(i_only_flag, "\n amvdec_avs2 i_only_flag\n"); + +module_param(error_handle_policy, uint, 0664); +MODULE_PARM_DESC(error_handle_policy, "\n amvdec_avs2 error_handle_policy\n"); + +module_param(re_search_seq_threshold, uint, 0664); +MODULE_PARM_DESC(re_search_seq_threshold, "\n amvdec_avs2 re_search_seq_threshold\n"); + +module_param(buf_alloc_width, uint, 0664); +MODULE_PARM_DESC(buf_alloc_width, "\n buf_alloc_width\n"); + +module_param(buf_alloc_height, uint, 0664); +MODULE_PARM_DESC(buf_alloc_height, "\n buf_alloc_height\n"); + +module_param(buf_alloc_depth, uint, 0664); +MODULE_PARM_DESC(buf_alloc_depth, "\n buf_alloc_depth\n"); + +module_param(buf_alloc_size, uint, 0664); +MODULE_PARM_DESC(buf_alloc_size, "\n buf_alloc_size\n"); + +module_param(buffer_mode, uint, 0664); +MODULE_PARM_DESC(buffer_mode, "\n buffer_mode\n"); + +module_param(buffer_mode_dbg, uint, 0664); +MODULE_PARM_DESC(buffer_mode_dbg, "\n buffer_mode_dbg\n"); +/*USE_BUF_BLOCK*/ +module_param(max_buf_num, uint, 0664); +MODULE_PARM_DESC(max_buf_num, "\n max_buf_num\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n dynamic_buf_num_margin\n"); + +#ifdef CONSTRAIN_MAX_BUF_NUM +module_param(run_ready_max_vf_only_num, uint, 0664); +MODULE_PARM_DESC(run_ready_max_vf_only_num, "\n run_ready_max_vf_only_num\n"); + +module_param(run_ready_display_q_num, uint, 0664); +MODULE_PARM_DESC(run_ready_display_q_num, "\n run_ready_display_q_num\n"); + +module_param(run_ready_max_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_max_buf_num, "\n run_ready_max_buf_num\n"); +#endif + +module_param(mv_buf_margin, uint, 0664); +MODULE_PARM_DESC(mv_buf_margin, "\n mv_buf_margin\n"); + +module_param(run_ready_min_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_min_buf_num, "\n run_ready_min_buf_num\n"); + +/**/ + +module_param(mem_map_mode, uint, 0664); +MODULE_PARM_DESC(mem_map_mode, "\n mem_map_mode\n"); + +module_param(double_write_mode, uint, 0664); +MODULE_PARM_DESC(double_write_mode, "\n double_write_mode\n"); + +module_param(enable_mem_saving, uint, 0664); +MODULE_PARM_DESC(enable_mem_saving, "\n enable_mem_saving\n"); + +module_param(force_w_h, uint, 0664); +MODULE_PARM_DESC(force_w_h, "\n force_w_h\n"); + +module_param(force_fps, uint, 0664); +MODULE_PARM_DESC(force_fps, "\n force_fps\n"); + +module_param(max_decoding_time, uint, 0664); +MODULE_PARM_DESC(max_decoding_time, "\n max_decoding_time\n"); + +module_param(on_no_keyframe_skiped, uint, 0664); +MODULE_PARM_DESC(on_no_keyframe_skiped, "\n on_no_keyframe_skiped\n"); + + +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n avs2 start_decode_buf_level\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, + "\n avs2 decode_timeout_val\n"); + +module_param_array(decode_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(display_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(max_process_time, uint, + &max_decode_instance_num, 0664); + +module_param_array(run_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(input_empty, uint, + &max_decode_instance_num, 0664); + +module_param_array(not_run_ready, uint, + &max_decode_instance_num, 0664); + +module_param(video_signal_type, uint, 0664); +MODULE_PARM_DESC(video_signal_type, "\n amvdec_avs2 video_signal_type\n"); + +module_param(force_video_signal_type, uint, 0664); +MODULE_PARM_DESC(force_video_signal_type, "\n amvdec_avs2 force_video_signal_type\n"); + +module_param(enable_force_video_signal_type, uint, 0664); +MODULE_PARM_DESC(enable_force_video_signal_type, "\n amvdec_avs2 enable_force_video_signal_type\n"); + +module_param(force_bufspec, uint, 0664); +MODULE_PARM_DESC(force_bufspec, "\n amvdec_h265 force_bufspec\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_h265 udebug_flag\n"); + +module_param(udebug_pause_pos, uint, 0664); +MODULE_PARM_DESC(udebug_pause_pos, "\n udebug_pause_pos\n"); + +module_param(udebug_pause_val, uint, 0664); +MODULE_PARM_DESC(udebug_pause_val, "\n udebug_pause_val\n"); + +module_param(udebug_pause_decode_idx, uint, 0664); +MODULE_PARM_DESC(udebug_pause_decode_idx, "\n udebug_pause_decode_idx\n"); + +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, + "\n amvdec_avs2 pre_decode_buf_level\n"); + +module_param(again_threshold, uint, 0664); +MODULE_PARM_DESC(again_threshold, "\n again_threshold\n"); + + +module_param(force_disp_pic_index, int, 0664); +MODULE_PARM_DESC(force_disp_pic_index, + "\n amvdec_h265 force_disp_pic_index\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n without_display_mode\n"); + +module_param(mv_buf_dynamic_alloc, uint, 0664); +MODULE_PARM_DESC(mv_buf_dynamic_alloc, "\n mv_buf_dynamic_alloc\n"); + +module_init(amvdec_avs2_driver_init_module); +module_exit(amvdec_avs2_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC avs2 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <tim.yao@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/avs2/vavs2.h b/drivers/frame_provider/decoder/avs2/vavs2.h new file mode 100644 index 0000000..071bfb3 --- /dev/null +++ b/drivers/frame_provider/decoder/avs2/vavs2.h
@@ -0,0 +1,26 @@ +/* + * drivers/amlogic/amports/vavs2.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#ifndef VAVS2_H +#define VAVS2_H + +#define AVS2_10B_MMU +#define AVS2_10B_MMU_DW + +void adapt_coef_probs(int pic_count, int prev_kf, int cur_kf, int pre_fc, +unsigned int *prev_prob, unsigned int *cur_prob, unsigned int *count); +#endif
diff --git a/drivers/frame_provider/decoder/avs_multi/Makefile b/drivers/frame_provider/decoder/avs_multi/Makefile new file mode 100644 index 0000000..638cec0 --- /dev/null +++ b/drivers/frame_provider/decoder/avs_multi/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_AVS_MULTI) += amvdec_mavs.o +amvdec_mavs-objs += avs_multi.o
diff --git a/drivers/frame_provider/decoder/avs_multi/avs_multi.c b/drivers/frame_provider/decoder/avs_multi/avs_multi.c new file mode 100644 index 0000000..7dd20bb --- /dev/null +++ b/drivers/frame_provider/decoder/avs_multi/avs_multi.c
@@ -0,0 +1,5030 @@ +/* + * drivers/amlogic/amports/vavs.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../../stream_input/amports/streambuf_reg.h" +#include "../utils/amvdec.h" +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/dma-mapping.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/slab.h> +#include "avs_multi.h" +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include "../utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include <linux/amlogic/tee.h> +#include "../utils/vdec_feature.h" + +#define DEBUG_MULTI_FLAG 0 +/* +#define DEBUG_WITH_SINGLE_MODE +#define DEBUG_MULTI_WITH_AUTOMODE +#define DEBUG_MULTI_FRAME_INS +*/ + + +#define USE_DYNAMIC_BUF_NUM + +#ifdef DEBUG_WITH_SINGLE_MODE +#define DRIVER_NAME "amvdec_avs" +#else +#define DRIVER_NAME "ammvdec_avs" +#endif + +#define MULTI_DRIVER_NAME "ammvdec_avs" + +#define ENABLE_USER_DATA + +#if 1/* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +#define NV21 +#endif + +#define USE_AVS_SEQ_INFO +#define HANDLE_AVS_IRQ +#define DEBUG_PTS + +#define CHECK_INTERVAL (HZ/100) + +#define I_PICTURE 0 +#define P_PICTURE 1 +#define B_PICTURE 2 + +#define LMEM_BUF_SIZE (0x500 * 2) + +/* #define ORI_BUFFER_START_ADDR 0x81000000 */ +#define ORI_BUFFER_START_ADDR 0x80000000 + +#define INTERLACE_FLAG 0x80 +#define TOP_FIELD_FIRST_FLAG 0x40 + +/* protocol registers */ +#define AVS_PIC_RATIO AV_SCRATCH_0 +#define AVS_PIC_WIDTH AV_SCRATCH_1 +#define AVS_PIC_HEIGHT AV_SCRATCH_2 +#define AVS_FRAME_RATE AV_SCRATCH_3 + +/*#define AVS_ERROR_COUNT AV_SCRATCH_6*/ +#define AVS_SOS_COUNT AV_SCRATCH_7 +#define AVS_BUFFERIN AV_SCRATCH_8 +#define AVS_BUFFEROUT AV_SCRATCH_9 +#define AVS_REPEAT_COUNT AV_SCRATCH_A +#define AVS_TIME_STAMP AV_SCRATCH_B +#define AVS_OFFSET_REG AV_SCRATCH_C +#define MEM_OFFSET_REG AV_SCRATCH_F +#define AVS_ERROR_RECOVERY_MODE AV_SCRATCH_G +#define DECODE_PIC_COUNT AV_SCRATCH_G + +#define DECODE_MODE AV_SCRATCH_6 +#define DECODE_MODE_SINGLE 0x0 +#define DECODE_MODE_MULTI_FRAMEBASE 0x1 +#define DECODE_MODE_MULTI_STREAMBASE 0x2 +#define DECODE_MODE_MULTI_STREAMBASE_CONT 0x3 + +#define DECODE_STATUS AV_SCRATCH_H +#define DECODE_STATUS_PIC_DONE 0x1 +#define DECODE_STATUS_DECODE_BUF_EMPTY 0x2 +#define DECODE_STATUS_SEARCH_BUF_EMPTY 0x3 +#define DECODE_STATUS_SKIP_PIC_DONE 0x4 +#define DECODE_SEARCH_HEAD 0xff + +#define DECODE_STOP_POS AV_SCRATCH_J + +#define DECODE_LMEM_BUF_ADR AV_SCRATCH_I + +#define DECODE_CFG AV_SCRATCH_K + +#define VF_POOL_SIZE 64 +#define PUT_INTERVAL (HZ/100) + +#if 1 /*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8*/ +#define INT_AMVENCODER INT_DOS_MAILBOX_1 +#else +/* #define AMVENC_DEV_VERSION "AML-MT" */ +#define INT_AMVENCODER INT_MAILBOX_1A +#endif + +#ifdef USE_DYNAMIC_BUF_NUM +static unsigned int buf_spec_reg[] = { + AV_SCRATCH_0, + AV_SCRATCH_1, + AV_SCRATCH_2, + AV_SCRATCH_3, + AV_SCRATCH_7, /*AVS_SOS_COUNT*/ + AV_SCRATCH_D, /*DEBUG_REG2*/ + AV_SCRATCH_E, /*DEBUG_REG1*/ + AV_SCRATCH_M /*user_data_poc_number*/ +}; +#endif + +#define DEBUG_REG1 AV_SCRATCH_E +#define DEBUG_REG2 AV_SCRATCH_D + + +static void check_timer_func(struct timer_list *timer); +static void vavs_work(struct work_struct *work); + +#define DEC_CONTROL_FLAG_FORCE_2500_1080P_INTERLACE 0x0001 +static u32 dec_control = DEC_CONTROL_FLAG_FORCE_2500_1080P_INTERLACE; + + +#define VPP_VD1_POSTBLEND (1 << 10) + +static int debug; +static unsigned int debug_mask = 0xff; + +/*for debug*/ +/* + udebug_flag: + bit 0, enable ucode print + bit 1, enable ucode more print + bit 3, enable ucdode detail print + bit [31:16] not 0, pos to dump lmem + bit 2, pop bits to lmem + bit [11:8], pre-pop bits for alignment (when bit 2 is 1) + + avs only: + bit [8], disable empty muitl-instance handling + bit [9], enable writting of VC1_CONTROL_REG in ucode +*/ +static u32 udebug_flag; +/* + when udebug_flag[1:0] is not 0 + udebug_pause_pos not 0, + pause position +*/ +static u32 udebug_pause_pos; +/* + when udebug_flag[1:0] is not 0 + and udebug_pause_pos is not 0, + pause only when DEBUG_REG2 is equal to this val +*/ +static u32 udebug_pause_val; + +static u32 udebug_pause_decode_idx; + +static u32 udebug_pause_ins_id; + +static u32 force_fps; + +#ifdef DEBUG_MULTI_FRAME_INS +static u32 delay; +#endif + +static u32 step; + +static u32 start_decoding_delay; + +#define AVS_DEV_NUM 9 +static unsigned int max_decode_instance_num = AVS_DEV_NUM; +static unsigned int max_process_time[AVS_DEV_NUM]; +static unsigned int max_get_frame_interval[AVS_DEV_NUM]; +static unsigned int run_count[AVS_DEV_NUM]; +static unsigned int ins_udebug_flag[AVS_DEV_NUM]; +#ifdef DEBUG_MULTI_FRAME_INS +static unsigned int max_run_count[AVS_DEV_NUM]; +#endif +/* +error_handle_policy: +*/ +static unsigned int error_handle_policy = 3; + +static u32 again_threshold = 0; /*0x40;*/ + +static unsigned int decode_timeout_val = 200; +static unsigned int start_decode_buf_level = 0x8000; + +/******************************** +firmware_sel + 0: use avsp_trans long cabac ucode; + 1: not use avsp_trans long cabac ucode + in ucode: + #define USE_EXT_BUFFER_ASSIGNMENT + #undef USE_DYNAMIC_BUF_NUM +********************************/ +static int firmware_sel; +static int disable_longcabac_trans = 1; +static int pre_decode_buf_level = 0x800; + + +static struct vframe_s *vavs_vf_peek(void *); +static struct vframe_s *vavs_vf_get(void *); +static void vavs_vf_put(struct vframe_s *, void *); +static int vavs_vf_states(struct vframe_states *states, void *); +static int vavs_event_cb(int type, void *data, void *private_data); + +static const char vavs_dec_id[] = "vavs-dev"; + +#define PROVIDER_NAME "decoder.avs" +static DEFINE_SPINLOCK(lock); +static DEFINE_MUTEX(vavs_mutex); + +static const struct vframe_operations_s vavs_vf_provider = { + .peek = vavs_vf_peek, + .get = vavs_vf_get, + .put = vavs_vf_put, + .event_cb = vavs_event_cb, + .vf_states = vavs_vf_states, +}; +/* +static void *mm_blk_handle; +*/ +static struct vframe_provider_s vavs_vf_prov; + +#define VF_BUF_NUM_MAX 16 +#ifdef DEBUG_MULTI_FRAME_INS +#define WORKSPACE_SIZE (16 * SZ_1M) +#else +#define WORKSPACE_SIZE (4 * SZ_1M) +#endif +#ifdef AVSP_LONG_CABAC +#define MAX_BMMU_BUFFER_NUM (VF_BUF_NUM_MAX + 2) +#define WORKSPACE_SIZE_A (MAX_CODED_FRAME_SIZE + LOCAL_HEAP_SIZE) +#else +#define MAX_BMMU_BUFFER_NUM (VF_BUF_NUM_MAX + 1) +#endif + +#define RV_AI_BUFF_START_ADDR 0x01a00000 +#define LONG_CABAC_RV_AI_BUFF_START_ADDR 0x00000000 + +/* 4 buffers not enough for multi inc*/ +static u32 vf_buf_num = 8; +/*static u32 vf_buf_num_used;*/ +static u32 canvas_base = 128; +#ifdef NV21 +static int canvas_num = 2; /*NV21*/ +#else +static int canvas_num = 3; +#endif + +#if 0 +static struct vframe_s vfpool[VF_POOL_SIZE]; +/*static struct vframe_s vfpool2[VF_POOL_SIZE];*/ +static struct vframe_s *cur_vfpool; +static unsigned char recover_flag; +static s32 vfbuf_use[VF_BUF_NUM_MAX]; +static u32 saved_resolution; +static u32 frame_width, frame_height, frame_dur, frame_prog; +static struct timer_list recycle_timer; +static u32 stat; +#endif +static u32 buf_size = 32 * 1024 * 1024; +#if 0 +static u32 buf_offset; +static u32 avi_flag; +static u32 vavs_ratio; +static u32 pic_type; +#endif +static u32 pts_by_offset = 1; +#if 0 +static u32 total_frame; +static u32 next_pts; +static unsigned char throw_pb_flag; +#ifdef DEBUG_PTS +static u32 pts_hit, pts_missed, pts_i_hit, pts_i_missed; +#endif +#endif +static u32 radr, rval; +static u32 dbg_cmd; +#if 0 +static struct dec_sysinfo vavs_amstream_dec_info; +static struct vdec_info *gvs; +static u32 fr_hint_status; +static struct work_struct notify_work; +static struct work_struct set_clk_work; +static bool is_reset; +#endif +/*static struct vdec_s *vdec;*/ + +#ifdef AVSP_LONG_CABAC +static struct work_struct long_cabac_wd_work; +void *es_write_addr_virt; +dma_addr_t es_write_addr_phy; + +void *bitstream_read_tmp; +dma_addr_t bitstream_read_tmp_phy; +void *avsp_heap_adr; +static uint long_cabac_busy; +#endif + +#if 0 +#ifdef ENABLE_USER_DATA +static void *user_data_buffer; +static dma_addr_t user_data_buffer_phys; +#endif +static DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(recycle_q, struct vframe_s *, VF_POOL_SIZE); +#endif +static inline u32 index2canvas(u32 index) +{ + const u32 canvas_tab[VF_BUF_NUM_MAX] = { + 0x010100, 0x030302, 0x050504, 0x070706, + 0x090908, 0x0b0b0a, 0x0d0d0c, 0x0f0f0e, + 0x111110, 0x131312, 0x151514, 0x171716, + 0x191918, 0x1b1b1a, 0x1d1d1c, 0x1f1f1e, + }; + const u32 canvas_tab_3[4] = { + 0x010100, 0x040403, 0x070706, 0x0a0a09 + }; + + if (canvas_num == 2) + return canvas_tab[index] + (canvas_base << 16) + + (canvas_base << 8) + canvas_base; + + return canvas_tab_3[index] + (canvas_base << 16) + + (canvas_base << 8) + canvas_base; +} + +static const u32 frame_rate_tab[16] = { + 96000 / 30, /* forbidden */ + 96000000 / 23976, /* 24000/1001 (23.967) */ + 96000 / 24, + 96000 / 25, + 9600000 / 2997, /* 30000/1001 (29.97) */ + 96000 / 30, + 96000 / 50, + 9600000 / 5994, /* 60000/1001 (59.94) */ + 96000 / 60, + /* > 8 reserved, use 24 */ + 96000 / 24, 96000 / 24, 96000 / 24, 96000 / 24, + 96000 / 24, 96000 / 24, 96000 / 24 +}; + +#define DECODE_BUFFER_NUM_MAX VF_BUF_NUM_MAX +#define PIC_PTS_NUM 64 +struct buf_pool_s { + unsigned detached; + struct vframe_s vf; +}; + +#define buf_of_vf(vf) container_of(vf, struct buf_pool_s, vf) + +struct pic_pts_s { + u32 pts; + u64 pts64; + u64 timestamp; + unsigned short decode_pic_count; +}; + +struct vdec_avs_hw_s { + spinlock_t lock; + unsigned char m_ins_flag; + struct platform_device *platform_dev; + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(recycle_q, struct vframe_s *, VF_POOL_SIZE); + struct buf_pool_s vfpool[VF_POOL_SIZE]; + s32 vfbuf_use[VF_BUF_NUM_MAX]; + unsigned char again_flag; + unsigned char recover_flag; + u32 frame_width; + u32 frame_height; + u32 frame_dur; + u32 frame_prog; + u32 saved_resolution; + u32 avi_flag; + u32 vavs_ratio; + u32 pic_type; + + u32 vf_buf_num_used; + u32 total_frame; + u32 next_pts; + unsigned char throw_pb_flag; + struct pic_pts_s pic_pts[PIC_PTS_NUM]; + int pic_pts_wr_pos; + +#ifdef DEBUG_PTS + u32 pts_hit; + u32 pts_missed; + u32 pts_i_hit; + u32 pts_i_missed; +#endif +#ifdef ENABLE_USER_DATA + struct work_struct userdata_push_work; + void *user_data_buffer; + dma_addr_t user_data_buffer_phys; +#endif + dma_addr_t lmem_addr; + ulong lmem_phy_addr; + + u32 buf_offset; + + struct dec_sysinfo vavs_amstream_dec_info; + struct vdec_info *gvs; + u32 fr_hint_status; + struct work_struct set_clk_work; + bool is_reset; + + /*debug*/ + u32 ucode_pause_pos; + /**/ + u32 decode_pic_count; + u8 reset_decode_flag; + u32 display_frame_count; + u32 buf_status; + u32 pre_parser_wr_ptr; + /* + buffer_status &= ~buf_recycle_status + */ + u32 buf_recycle_status; + u32 seqinfo; + u32 ctx_valid; + u32 dec_control; + void *mm_blk_handle; + struct vframe_chunk_s *chunk; + u32 stat; + u8 init_flag; + unsigned long buf_start; + u32 buf_size; + + u32 reg_scratch_0; + u32 reg_scratch_1; + u32 reg_scratch_2; + u32 reg_scratch_3; + u32 reg_scratch_4; + u32 reg_scratch_5; + u32 reg_scratch_6; + u32 reg_scratch_7; + u32 reg_scratch_8; + u32 reg_scratch_9; + u32 reg_scratch_A; + u32 reg_scratch_B; + u32 reg_scratch_C; + u32 reg_scratch_D; + u32 reg_scratch_E; + u32 reg_scratch_F; + u32 reg_scratch_G; + u32 reg_scratch_H; + u32 reg_scratch_I; + u32 reg_mb_width; + u32 reg_viff_bit_cnt; + u32 reg_canvas_addr; + u32 reg_dbkr_canvas_addr; + u32 reg_dbkw_canvas_addr; + u32 reg_anc2_canvas_addr; + u32 reg_anc0_canvas_addr; + u32 reg_anc1_canvas_addr; + u32 reg_anc3_canvas_addr; + u32 reg_anc4_canvas_addr; + u32 reg_anc5_canvas_addr; + u32 slice_ver_pos_pic_type; + u32 vc1_control_reg; + u32 avs_co_mb_wr_addr; + u32 slice_start_byte_01; + u32 slice_start_byte_23; + u32 vcop_ctrl_reg; + u32 iqidct_control; + u32 rv_ai_mb_count; + u32 slice_qp; + u32 dc_scaler; + u32 avsp_iq_wq_param_01; + u32 avsp_iq_wq_param_23; + u32 avsp_iq_wq_param_45; + u32 avs_co_mb_rd_addr; + u32 dblk_mb_wid_height; + u32 mc_pic_w_h; + u32 avs_co_mb_rw_ctl; + u32 vld_decode_control; + + struct timer_list check_timer; + u32 decode_timeout_count; + unsigned long int start_process_time; + u32 last_vld_level; + u32 eos; + u32 canvas_spec[DECODE_BUFFER_NUM_MAX]; + struct canvas_config_s canvas_config[DECODE_BUFFER_NUM_MAX][2]; + + s32 refs[2]; + int dec_result; + struct timer_list recycle_timer; + struct work_struct work; + struct work_struct notify_work; + atomic_t error_handler_run; + struct work_struct fatal_error_wd_work; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; +/* for error handling */ + u32 run_count; + u32 not_run_ready; + u32 input_empty; + u32 prepare_num; + u32 put_num; + u32 peek_num; + u32 get_num; + u32 drop_frame_count; + u32 buffer_not_ready; + int frameinfo_enable; + struct firmware_s *fw; + u32 old_udebug_flag; + u32 decode_status_skip_pic_done_flag; + u32 decode_decode_cont_start_code; + int vdec_pg_enable_flag; + char vdec_name[32]; + char pts_name[32]; + char new_q_name[32]; + char disp_q_name[32]; +}; + +static void reset_process_time(struct vdec_avs_hw_s *hw); +static void start_process_time(struct vdec_avs_hw_s *hw); +static void vavs_save_regs(struct vdec_avs_hw_s *hw); + +struct vdec_avs_hw_s *ghw; + +#define MULTI_INSTANCE_PROVIDER_NAME "vdec.avs" + +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_ERROR 3 +#define DEC_RESULT_FORCE_EXIT 4 +#define DEC_RESULT_EOS 5 +#define DEC_RESULT_GET_DATA 6 +#define DEC_RESULT_GET_DATA_RETRY 7 +#define DEC_RESULT_USERDATA 8 + +#define DECODE_ID(hw) (hw->m_ins_flag? hw_to_vdec(hw)->id : 0) + +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_RUN_FLOW 0X0001 +#define PRINT_FLAG_DECODING 0x0002 +#define PRINT_FLAG_PTS 0x0004 +#define PRINT_FLAG_VFRAME_DETAIL 0x0010 +#define PRINT_FLAG_VLD_DETAIL 0x0020 +#define PRINT_FLAG_DEC_DETAIL 0x0040 +#define PRINT_FLAG_BUFFER_DETAIL 0x0080 +#define PRINT_FLAG_FORCE_DONE 0x0100 +#define PRINT_FLAG_COUNTER 0X0200 +#define PRINT_FRAMEBASE_DATA 0x0400 +#define PRINT_FLAG_PARA_DATA 0x1000 +#define DEBUG_FLAG_PREPARE_MORE_INPUT 0x2000 +#define DEBUG_FLAG_PRINT_REG 0x4000 +#define DEBUG_FLAG_DISABLE_TIMEOUT 0x10000 +#define DEBUG_WAIT_DECODE_DONE_WHEN_STOP 0x20000 +#define DEBUG_PIC_DONE_WHEN_UCODE_PAUSE 0x40000 + + +#undef DEBUG_REG +#ifdef DEBUG_REG +static void WRITE_VREG_DBG2(unsigned adr, unsigned val) +{ + if (debug & DEBUG_FLAG_PRINT_REG) + pr_info("%s(%x, %x)\n", __func__, adr, val); + if (adr != 0) + WRITE_VREG(adr, val); +} + +#undef WRITE_VREG +#define WRITE_VREG WRITE_VREG_DBG2 +#endif + +#undef pr_info +#define pr_info printk +static int debug_print(struct vdec_avs_hw_s *hw, + int flag, const char *fmt, ...) +{ +#define AVS_PRINT_BUF 256 + unsigned char buf[AVS_PRINT_BUF]; + int len = 0; + int index = 0; + if (hw) + index = hw->m_ins_flag ? DECODE_ID(hw) : 0; + if (hw == NULL || + (flag == 0) || + ((debug_mask & + (1 << index)) + && (debug & flag))) { + va_list args; + + va_start(args, fmt); + if (hw) + len = sprintf(buf, "[%d]", index); + vsnprintf(buf + len, AVS_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; +} + +static int debug_print_cont(struct vdec_avs_hw_s *hw, + int flag, const char *fmt, ...) +{ + unsigned char buf[AVS_PRINT_BUF]; + int len = 0; + int index = 0; + if (hw) + index = hw->m_ins_flag ? DECODE_ID(hw) : 0; + if (hw == NULL || + (flag == 0) || + ((debug_mask & + (1 << index)) + && (debug & flag))) { + va_list args; + + va_start(args, fmt); + vsnprintf(buf + len, AVS_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; +} + +static void avs_pts_check_in(struct vdec_avs_hw_s *hw, + unsigned short decode_pic_count, struct vframe_chunk_s *chunk) +{ + if (chunk) + debug_print(hw, PRINT_FLAG_PTS, + "%s %d (wr pos %d), pts %d pts64 %ld timestamp %ld\n", + __func__, decode_pic_count, hw->pic_pts_wr_pos, + chunk->pts, (u64)(chunk->pts64), (u64)(chunk->timestamp)); + else + debug_print(hw, PRINT_FLAG_PTS, + "%s %d, chunk is null\n", + __func__, decode_pic_count); + + if (chunk) { + hw->pic_pts[hw->pic_pts_wr_pos].pts = chunk->pts; + hw->pic_pts[hw->pic_pts_wr_pos].pts64 = chunk->pts64; + hw->pic_pts[hw->pic_pts_wr_pos].timestamp = chunk->timestamp; + } else { + hw->pic_pts[hw->pic_pts_wr_pos].pts = 0; + hw->pic_pts[hw->pic_pts_wr_pos].pts64 = 0; + hw->pic_pts[hw->pic_pts_wr_pos].timestamp = 0; + } + hw->pic_pts[hw->pic_pts_wr_pos].decode_pic_count + = decode_pic_count; + hw->pic_pts_wr_pos++; + if (hw->pic_pts_wr_pos >= PIC_PTS_NUM) + hw->pic_pts_wr_pos = 0; + return; +} + +static void clear_pts_buf(struct vdec_avs_hw_s *hw) +{ + int i; + debug_print(hw, PRINT_FLAG_PTS, + "%s\n", __func__); + hw->pic_pts_wr_pos = 0; + for (i = 0; i < PIC_PTS_NUM; i++) { + hw->pic_pts[hw->pic_pts_wr_pos].pts = 0; + hw->pic_pts[hw->pic_pts_wr_pos].pts64 = 0; + hw->pic_pts[hw->pic_pts_wr_pos].timestamp = 0; + hw->pic_pts[hw->pic_pts_wr_pos].decode_pic_count = 0; + } +} + +static int set_vframe_pts(struct vdec_avs_hw_s *hw, + unsigned short decode_pic_count, struct vframe_s *vf) +{ + int i; + int ret = -1; + for (i = 0; i < PIC_PTS_NUM; i++) { + if (hw->pic_pts[i].decode_pic_count == decode_pic_count) { + vf->pts = hw->pic_pts[i].pts; + vf->pts_us64 = hw->pic_pts[i].pts64; + vf->timestamp = hw->pic_pts[i].timestamp; + ret = 0; + debug_print(hw, PRINT_FLAG_PTS, + "%s %d (rd pos %d), pts %d pts64 %ld timestamp %ld\n", + __func__, decode_pic_count, i, + vf->pts, vf->pts_us64, vf->timestamp); + + break; + } + } + return ret; +} + +static void avs_vf_notify_receiver(struct vdec_avs_hw_s *hw, + const char *provider_name, int event_type, void *data) +{ + if (hw->m_ins_flag) + vf_notify_receiver(hw_to_vdec(hw)->vf_provider_name, + event_type, data); + else + vf_notify_receiver(provider_name, event_type, data); +} + +static void set_frame_info(struct vdec_avs_hw_s *hw, struct vframe_s *vf, + unsigned int *duration) +{ + int ar = 0; + + unsigned int pixel_ratio = READ_VREG(AVS_PIC_RATIO); + hw->prepare_num++; +#ifndef USE_AVS_SEQ_INFO + if (hw->vavs_amstream_dec_info.width > 0 + && hw->vavs_amstream_dec_info.height > 0) { + vf->width = hw->vavs_amstream_dec_info.width; + vf->height = hw->vavs_amstream_dec_info.height; + } else +#endif + { + vf->width = READ_VREG(AVS_PIC_WIDTH); + vf->height = READ_VREG(AVS_PIC_HEIGHT); + hw->frame_width = vf->width; + hw->frame_height = vf->height; + /* pr_info("%s: (%d,%d)\n", __func__,vf->width, vf->height);*/ + } + +#ifndef USE_AVS_SEQ_INFO + if (hw->vavs_amstream_dec_info.rate > 0) + *duration = hw->vavs_amstream_dec_info.rate; + else +#endif + { + *duration = frame_rate_tab[READ_VREG(AVS_FRAME_RATE) & 0xf]; + /* pr_info("%s: duration = %d\n", __func__, *duration); */ + hw->frame_dur = *duration; + schedule_work(&hw->notify_work); + } + + if (hw->vavs_ratio == 0) { + /* always stretch to 16:9 */ + vf->ratio_control |= (0x90 << + DISP_RATIO_ASPECT_RATIO_BIT); + vf->sar_width = 1; + vf->sar_height = 1; + } else { + switch (pixel_ratio) { + case 1: + vf->sar_width = 1; + vf->sar_height = 1; + ar = (vf->height * hw->vavs_ratio) / vf->width; + break; + case 2: + vf->sar_width = 4; + vf->sar_height = 3; + ar = (vf->height * 3 * hw->vavs_ratio) / (vf->width * 4); + break; + case 3: + vf->sar_width = 16; + vf->sar_height = 9; + ar = (vf->height * 9 * hw->vavs_ratio) / (vf->width * 16); + break; + case 4: + vf->sar_width = 221; + vf->sar_height = 100; + ar = (vf->height * 100 * hw->vavs_ratio) / (vf->width * + 221); + break; + default: + vf->sar_width = 1; + vf->sar_height = 1; + ar = (vf->height * hw->vavs_ratio) / vf->width; + break; + } + } + + ar = min(ar, DISP_RATIO_ASPECT_RATIO_MAX); + + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + /*vf->ratio_control |= DISP_RATIO_FORCECONFIG | DISP_RATIO_KEEPRATIO; */ + + vf->flag = 0; + buf_of_vf(vf)->detached = 0; + +} + +#ifdef ENABLE_USER_DATA + +/*static struct work_struct userdata_push_work;*/ +/* +#define DUMP_LAST_REPORTED_USER_DATA +*/ +static void userdata_push_process(struct vdec_avs_hw_s *hw) +{ + unsigned int user_data_flags; + unsigned int user_data_wp; + unsigned int user_data_length; + struct userdata_poc_info_t user_data_poc; +#ifdef DUMP_LAST_REPORTED_USER_DATA + int user_data_len; + int wp_start; + unsigned char *pdata; + int nLeft; +#endif + + user_data_flags = READ_VREG(AV_SCRATCH_N); + user_data_wp = (user_data_flags >> 16) & 0xffff; + user_data_length = user_data_flags & 0x7fff; + +#ifdef DUMP_LAST_REPORTED_USER_DATA + dma_sync_single_for_cpu(amports_get_dma_device(), + hw->user_data_buffer_phys, USER_DATA_SIZE, + DMA_FROM_DEVICE); + + if (user_data_length & 0x07) + user_data_len = (user_data_length + 8) & 0xFFFFFFF8; + else + user_data_len = user_data_length; + + if (user_data_wp >= user_data_len) { + wp_start = user_data_wp - user_data_len; + + pdata = (unsigned char *)hw->user_data_buffer; + pdata += wp_start; + nLeft = user_data_len; + while (nLeft >= 8) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + } else { + wp_start = user_data_wp + + USER_DATA_SIZE - user_data_len; + + pdata = (unsigned char *)hw->user_data_buffer; + pdata += wp_start; + nLeft = USER_DATA_SIZE - wp_start; + + while (nLeft >= 8) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + + pdata = (unsigned char *)hw->user_data_buffer; + nLeft = user_data_wp; + while (nLeft >= 8) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + } +#endif + +/* + pr_info("pocinfo 0x%x, poc %d, wp 0x%x, len %d\n", + READ_VREG(AV_SCRATCH_L), READ_VREG(AV_SCRATCH_M), + user_data_wp, user_data_length); +*/ + user_data_poc.poc_info = READ_VREG(AV_SCRATCH_L); + user_data_poc.poc_number = READ_VREG(AV_SCRATCH_M); + + WRITE_VREG(AV_SCRATCH_N, 0); +/* + wakeup_userdata_poll(user_data_poc, user_data_wp, + (unsigned long)hw->user_data_buffer, + USER_DATA_SIZE, user_data_length); +*/ +} + +static void userdata_push_do_work(struct work_struct *work) +{ + struct vdec_avs_hw_s *hw = + container_of(work, struct vdec_avs_hw_s, userdata_push_work); + userdata_push_process(hw); +} + +static u8 UserDataHandler(struct vdec_avs_hw_s *hw) +{ + unsigned int user_data_flags; + + user_data_flags = READ_VREG(AV_SCRATCH_N); + if (user_data_flags & (1 << 15)) { /* data ready */ + if (hw->m_ins_flag) { + hw->dec_result = DEC_RESULT_USERDATA; + vdec_schedule_work(&hw->work); + return 1; + } else + schedule_work(&hw->userdata_push_work); + } + return 0; +} +#endif + + +static inline void avs_update_gvs(struct vdec_avs_hw_s *hw) +{ + if (hw->gvs->frame_height != hw->frame_height) { + hw->gvs->frame_width = hw->frame_width; + hw->gvs->frame_height = hw->frame_height; + } + if (hw->gvs->frame_dur != hw->frame_dur) { + hw->gvs->frame_dur = hw->frame_dur; + if (hw->frame_dur != 0) + hw->gvs->frame_rate = ((96000 * 10 / hw->frame_dur) % 10) < 5 ? + 96000 / hw->frame_dur : (96000 / hw->frame_dur +1); + else + hw->gvs->frame_rate = -1; + } + + hw->gvs->status = hw->stat; + hw->gvs->error_count = READ_VREG(AV_SCRATCH_C); + hw->gvs->drop_frame_count = hw->drop_frame_count; + +} + +#ifdef HANDLE_AVS_IRQ +static irqreturn_t vavs_isr(int irq, void *dev_id) +#else +static void vavs_isr(void) +#endif +{ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + return IRQ_WAKE_THREAD; +} +/* + *static int run_flag = 1; + *static int step_flag; + */ +static int error_recovery_mode; /*0: blocky 1: mosaic*/ +/* + *static uint error_watchdog_threshold=10; + *static uint error_watchdog_count; + *static uint error_watchdog_buf_threshold = 0x4000000; + */ + +static struct vframe_s *vavs_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)op_arg; + hw->peek_num++; + if (step == 2) + return NULL; + if (hw->recover_flag) + return NULL; + + if (kfifo_len(&hw->display_q) > VF_POOL_SIZE) { + debug_print(hw, PRINT_FLAG_RUN_FLOW, + "kfifo len:%d invaild, peek error\n", + kfifo_len(&hw->display_q)); + return NULL; + } + + if (kfifo_peek(&hw->display_q, &vf)) { + if (vf) { + if (force_fps & 0x100) { + u32 rate = force_fps & 0xff; + + if (rate) + vf->duration = 96000/rate; + else + vf->duration = 0; + } + + } + return vf; + } + + return NULL; + +} + +static struct vframe_s *vavs_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)op_arg; + unsigned long flags; + + if (hw->recover_flag) + return NULL; + + if (step == 2) + return NULL; + else if (step == 1) + step = 2; + + spin_lock_irqsave(&lock, flags); + if (kfifo_get(&hw->display_q, &vf)) { + if (vf) { + hw->get_num++; + if (force_fps & 0x100) { + u32 rate = force_fps & 0xff; + + if (rate) + vf->duration = 96000/rate; + else + vf->duration = 0; + } + + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "%s, index = %d, w %d h %d, type 0x%x detached %d\n", + __func__, + vf->index, + vf->width, + vf->height, + vf->type, + buf_of_vf(vf)->detached); + } + spin_unlock_irqrestore(&lock, flags); + return vf; + } + spin_unlock_irqrestore(&lock, flags); + return NULL; + +} + +static void vavs_vf_put(struct vframe_s *vf, void *op_arg) +{ + int i; + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)op_arg; + + if (vf) { + hw->put_num++; + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "%s, index = %d, w %d h %d, type 0x%x detached 0x%x\n", + __func__, + vf->index, + vf->width, + vf->height, + vf->type, + buf_of_vf(vf)->detached); + } + if (hw->recover_flag) + return; + + for (i = 0; i < VF_POOL_SIZE; i++) { + if (vf == &hw->vfpool[i].vf) + break; + } + if (i < VF_POOL_SIZE) + + kfifo_put(&hw->recycle_q, (const struct vframe_s *)vf); + +} + +static int vavs_event_cb(int type, void *data, void *private_data) +{ + struct vdec_avs_hw_s *hw = (struct vdec_avs_hw_s *)private_data; + + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(hw_to_vdec(hw)); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +static int vavs_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + /*if (!(hw->stat & STAT_VDEC_RUN)) + return -1;*/ + if (!hw) + return -1; + + vstatus->frame_width = hw->frame_width; + vstatus->frame_height = hw->frame_height; + if (hw->frame_dur != 0) + vstatus->frame_rate = ((96000 * 10 / hw->frame_dur) % 10) < 5 ? + 96000 / hw->frame_dur : (96000 / hw->frame_dur +1); + else + vstatus->frame_rate = -1; + vstatus->error_count = READ_VREG(AV_SCRATCH_C); + vstatus->status = hw->stat; + vstatus->bit_rate = hw->gvs->bit_rate; + vstatus->frame_dur = hw->frame_dur; + vstatus->frame_data = hw->gvs->frame_data; + vstatus->total_data = hw->gvs->total_data; + vstatus->frame_count = hw->gvs->frame_count; + vstatus->error_frame_count = hw->gvs->error_frame_count; + vstatus->drop_frame_count = hw->gvs->drop_frame_count; + vstatus->i_decoded_frames = hw->gvs->i_decoded_frames; + vstatus->i_lost_frames = hw->gvs->i_lost_frames; + vstatus->i_concealed_frames = hw->gvs->i_concealed_frames; + vstatus->p_decoded_frames = hw->gvs->p_decoded_frames; + vstatus->p_lost_frames = hw->gvs->p_lost_frames; + vstatus->p_concealed_frames = hw->gvs->p_concealed_frames; + vstatus->b_decoded_frames = hw->gvs->b_decoded_frames; + vstatus->b_lost_frames = hw->gvs->b_lost_frames; + vstatus->b_concealed_frames = hw->gvs->b_concealed_frames; + vstatus->total_data = hw->gvs->total_data; + vstatus->samp_cnt = hw->gvs->samp_cnt; + vstatus->offset = hw->gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + +static int vavs_set_isreset(struct vdec_s *vdec, int isreset) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + + hw->is_reset = isreset; + return 0; +} + +static int vavs_vdec_info_init(struct vdec_avs_hw_s *hw) +{ + + hw->gvs = kzalloc(sizeof(struct vdec_info), GFP_KERNEL); + if (NULL == hw->gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -ENOMEM; + } + + return 0; +} +/****************************************/ +static int vavs_canvas_init(struct vdec_avs_hw_s *hw) +{ + int i, ret; + u32 canvas_width, canvas_height; + u32 decbuf_size, decbuf_y_size, decbuf_uv_size; + unsigned long buf_start; + int need_alloc_buf_num; + struct vdec_s *vdec = NULL; + + if (hw->m_ins_flag) + vdec = hw_to_vdec(hw); + + if (buf_size <= 0x00400000) { + /* SD only */ + canvas_width = 768; + canvas_height = 576; + decbuf_y_size = 0x80000; + decbuf_uv_size = 0x20000; + decbuf_size = 0x100000; + } else { + /* HD & SD */ + canvas_width = 1920; + canvas_height = 1088; + decbuf_y_size = 0x200000; + decbuf_uv_size = 0x80000; + decbuf_size = 0x300000; + } + +#ifdef AVSP_LONG_CABAC + need_alloc_buf_num = hw->vf_buf_num_used + 2; +#else + need_alloc_buf_num = hw->vf_buf_num_used + 1; +#endif + for (i = 0; i < need_alloc_buf_num; i++) { + + if (i == (need_alloc_buf_num - 1)) + decbuf_size = WORKSPACE_SIZE; +#ifdef AVSP_LONG_CABAC + else if (i == (need_alloc_buf_num - 2)) + decbuf_size = WORKSPACE_SIZE_A; +#endif + ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, i, + decbuf_size, DRIVER_NAME, &buf_start); + if (ret < 0) + return ret; + if (i == (need_alloc_buf_num - 1)) { + if (firmware_sel == 1) + hw->buf_offset = buf_start - + RV_AI_BUFF_START_ADDR; + else + hw->buf_offset = buf_start - + LONG_CABAC_RV_AI_BUFF_START_ADDR; + continue; + } +#ifdef AVSP_LONG_CABAC + else if (i == (need_alloc_buf_num - 2)) { + avsp_heap_adr = codec_mm_phys_to_virt(buf_start); + continue; + } +#endif + if (hw->m_ins_flag) { + unsigned canvas; + + if (vdec->parallel_dec == 1) { + unsigned tmp; + if (canvas_u(hw->canvas_spec[i]) == 0xff) { + tmp = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~(0xffff << 8); + hw->canvas_spec[i] |= tmp << 8; + hw->canvas_spec[i] |= tmp << 16; + } + if (canvas_y(hw->canvas_spec[i]) == 0xff) { + tmp = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~0xff; + hw->canvas_spec[i] |= tmp; + } + canvas = hw->canvas_spec[i]; + } else { + canvas = vdec->get_canvas(i, 2); + hw->canvas_spec[i] = canvas; + } + + hw->canvas_config[i][0].phy_addr = + buf_start; + hw->canvas_config[i][0].width = + canvas_width; + hw->canvas_config[i][0].height = + canvas_height; + hw->canvas_config[i][0].block_mode = + CANVAS_BLKMODE_32X32; + + hw->canvas_config[i][1].phy_addr = + buf_start + decbuf_y_size; + hw->canvas_config[i][1].width = + canvas_width; + hw->canvas_config[i][1].height = + canvas_height / 2; + hw->canvas_config[i][1].block_mode = + CANVAS_BLKMODE_32X32; + + } else { +#ifdef NV21 + config_cav_lut_ex(canvas_base + canvas_num * i + 0, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); + config_cav_lut_ex(canvas_base + canvas_num * i + 1, + buf_start + + decbuf_y_size, canvas_width, + canvas_height / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); +#else + config_cav_lut_ex(canvas_num * i + 0, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); + config_cav_lut_ex(canvas_num * i + 1, + buf_start + + decbuf_y_size, canvas_width / 2, + canvas_height / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); + config_cav_lut_ex(canvas_num * i + 2, + buf_start + + decbuf_y_size + decbuf_uv_size, + canvas_width / 2, canvas_height / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); +#endif + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "canvas config %d, addr %p\n", i, + (void *)buf_start); + } + } + return 0; +} + +static void vavs_recover(struct vdec_avs_hw_s *hw) +{ + vavs_canvas_init(hw); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 9) | (1 << 8)); + WRITE_VREG(DOS_SW_RESET0, 0); + + if (firmware_sel == 1) { + WRITE_VREG(POWER_CTL_VLD, 0x10); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 2, + MEM_FIFO_CNT_BIT, 2); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 8, + MEM_LEVEL_CNT_BIT, 6); + } + + + if (firmware_sel == 0) { + /* fixed canvas index */ + WRITE_VREG(AV_SCRATCH_0, canvas_base); + WRITE_VREG(AV_SCRATCH_1, hw->vf_buf_num_used); + } else { + int ii; +#ifndef USE_DYNAMIC_BUF_NUM + for (ii = 0; ii < 4; ii++) { + WRITE_VREG(AV_SCRATCH_0 + ii, + (canvas_base + canvas_num * ii) | + ((canvas_base + canvas_num * ii + 1) + << 8) | + ((canvas_base + canvas_num * ii + 1) + << 16) + ); + } +#else + for (ii = 0; ii < hw->vf_buf_num_used; ii += 2) { + WRITE_VREG(buf_spec_reg[ii >> 1], + (canvas_base + canvas_num * ii) | + ((canvas_base + canvas_num * ii + 1) + << 8) | + ((canvas_base + canvas_num * ii + 2) + << 16) | + ((canvas_base + canvas_num * ii + 3) + << 24) + ); + } +#endif + } + + /* notify ucode the buffer offset */ + WRITE_VREG(AV_SCRATCH_F, hw->buf_offset); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + +#ifndef USE_DYNAMIC_BUF_NUM + WRITE_VREG(AVS_SOS_COUNT, 0); +#endif + WRITE_VREG(AVS_BUFFERIN, 0); + WRITE_VREG(AVS_BUFFEROUT, 0); + if (error_recovery_mode) + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 0); + else + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 1); + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); +#ifndef USE_DYNAMIC_BUF_NUM /* def DEBUG_UCODE */ + WRITE_VREG(AV_SCRATCH_D, 0); +#endif + +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + +#ifdef PIC_DC_NEED_CLEAR + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 31); +#endif + +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) { + WRITE_VREG(LONG_CABAC_DES_ADDR, es_write_addr_phy); + WRITE_VREG(LONG_CABAC_REQ, 0); + WRITE_VREG(LONG_CABAC_PIC_SIZE, 0); + WRITE_VREG(LONG_CABAC_SRC_ADDR, 0); + } +#endif + WRITE_VREG(AV_SCRATCH_5, 0); + +} + +#define MBY_MBX MB_MOTION_MODE /*0xc07*/ +#define AVS_CO_MB_WR_ADDR 0xc38 +#define AVS_CO_MB_RW_CTL 0xc3d +#define AVS_CO_MB_RD_ADDR 0xc39 +#define AVSP_IQ_WQ_PARAM_01 0x0e19 +#define AVSP_IQ_WQ_PARAM_23 0x0e1a +#define AVSP_IQ_WQ_PARAM_45 0x0e1b + +static void vavs_save_regs(struct vdec_avs_hw_s *hw) +{ + hw->reg_scratch_0 = READ_VREG(AV_SCRATCH_0); + hw->reg_scratch_1 = READ_VREG(AV_SCRATCH_1); + hw->reg_scratch_2 = READ_VREG(AV_SCRATCH_2); + hw->reg_scratch_3 = READ_VREG(AV_SCRATCH_3); + hw->reg_scratch_4 = READ_VREG(AV_SCRATCH_4); + hw->reg_scratch_5 = READ_VREG(AV_SCRATCH_5); + hw->reg_scratch_6 = READ_VREG(AV_SCRATCH_6); + hw->reg_scratch_7 = READ_VREG(AV_SCRATCH_7); + hw->reg_scratch_8 = READ_VREG(AV_SCRATCH_8); + hw->reg_scratch_9 = READ_VREG(AV_SCRATCH_9); + hw->reg_scratch_A = READ_VREG(AV_SCRATCH_A); + hw->reg_scratch_B = READ_VREG(AV_SCRATCH_B); + hw->reg_scratch_C = READ_VREG(AV_SCRATCH_C); + hw->reg_scratch_D = READ_VREG(AV_SCRATCH_D); + hw->reg_scratch_E = READ_VREG(AV_SCRATCH_E); + hw->reg_scratch_F = READ_VREG(AV_SCRATCH_F); + hw->reg_scratch_G = READ_VREG(AV_SCRATCH_G); + hw->reg_scratch_H = READ_VREG(AV_SCRATCH_H); + hw->reg_scratch_I = READ_VREG(AV_SCRATCH_I); + + hw->reg_mb_width = READ_VREG(MB_WIDTH); + hw->reg_viff_bit_cnt = READ_VREG(VIFF_BIT_CNT); + + hw->reg_canvas_addr = READ_VREG(REC_CANVAS_ADDR); + hw->reg_dbkr_canvas_addr = READ_VREG(DBKR_CANVAS_ADDR); + hw->reg_dbkw_canvas_addr = READ_VREG(DBKW_CANVAS_ADDR); + hw->reg_anc2_canvas_addr = READ_VREG(ANC2_CANVAS_ADDR); + hw->reg_anc0_canvas_addr = READ_VREG(ANC0_CANVAS_ADDR); + hw->reg_anc1_canvas_addr = READ_VREG(ANC1_CANVAS_ADDR); + hw->reg_anc3_canvas_addr = READ_VREG(ANC3_CANVAS_ADDR); + hw->reg_anc4_canvas_addr = READ_VREG(ANC4_CANVAS_ADDR); + hw->reg_anc5_canvas_addr = READ_VREG(ANC5_CANVAS_ADDR); + + hw->slice_ver_pos_pic_type = READ_VREG(SLICE_VER_POS_PIC_TYPE); + + hw->vc1_control_reg = READ_VREG(VC1_CONTROL_REG); + hw->avs_co_mb_wr_addr = READ_VREG(AVS_CO_MB_WR_ADDR); + hw->slice_start_byte_01 = READ_VREG(SLICE_START_BYTE_01); + hw->slice_start_byte_23 = READ_VREG(SLICE_START_BYTE_23); + hw->vcop_ctrl_reg = READ_VREG(VCOP_CTRL_REG); + hw->iqidct_control = READ_VREG(IQIDCT_CONTROL); + hw->rv_ai_mb_count = READ_VREG(RV_AI_MB_COUNT); + hw->slice_qp = READ_VREG(SLICE_QP); + + hw->dc_scaler = READ_VREG(DC_SCALER); + hw->avsp_iq_wq_param_01 = READ_VREG(AVSP_IQ_WQ_PARAM_01); + hw->avsp_iq_wq_param_23 = READ_VREG(AVSP_IQ_WQ_PARAM_23); + hw->avsp_iq_wq_param_45 = READ_VREG(AVSP_IQ_WQ_PARAM_45); + hw->avs_co_mb_rd_addr = READ_VREG(AVS_CO_MB_RD_ADDR); + hw->dblk_mb_wid_height = READ_VREG(DBLK_MB_WID_HEIGHT); + hw->mc_pic_w_h = READ_VREG(MC_PIC_W_H); + hw->avs_co_mb_rw_ctl = READ_VREG(AVS_CO_MB_RW_CTL); + + hw->vld_decode_control = READ_VREG(VLD_DECODE_CONTROL); +} + +static void vavs_restore_regs(struct vdec_avs_hw_s *hw) +{ + debug_print(hw, PRINT_FLAG_DECODING, + "%s scratch_8 (AVS_BUFFERIN) 0x%x, decode_pic_count = %d\n", + __func__, hw->reg_scratch_8, hw->decode_pic_count); + + WRITE_VREG(AV_SCRATCH_0, hw->reg_scratch_0); + WRITE_VREG(AV_SCRATCH_1, hw->reg_scratch_1); + WRITE_VREG(AV_SCRATCH_2, hw->reg_scratch_2); + WRITE_VREG(AV_SCRATCH_3, hw->reg_scratch_3); + WRITE_VREG(AV_SCRATCH_4, hw->reg_scratch_4); + WRITE_VREG(AV_SCRATCH_5, hw->reg_scratch_5); + WRITE_VREG(AV_SCRATCH_6, hw->reg_scratch_6); + WRITE_VREG(AV_SCRATCH_7, hw->reg_scratch_7); + WRITE_VREG(AV_SCRATCH_8, hw->reg_scratch_8); + WRITE_VREG(AV_SCRATCH_9, hw->reg_scratch_9); + WRITE_VREG(AV_SCRATCH_A, hw->reg_scratch_A); + WRITE_VREG(AV_SCRATCH_B, hw->reg_scratch_B); + WRITE_VREG(AV_SCRATCH_C, hw->reg_scratch_C); + WRITE_VREG(AV_SCRATCH_D, hw->reg_scratch_D); + WRITE_VREG(AV_SCRATCH_E, hw->reg_scratch_E); + WRITE_VREG(AV_SCRATCH_F, hw->reg_scratch_F); + WRITE_VREG(AV_SCRATCH_G, hw->reg_scratch_G); + WRITE_VREG(AV_SCRATCH_H, hw->reg_scratch_H); + WRITE_VREG(AV_SCRATCH_I, hw->reg_scratch_I); + + WRITE_VREG(MB_WIDTH, hw->reg_mb_width); + WRITE_VREG(VIFF_BIT_CNT, hw->reg_viff_bit_cnt); + + WRITE_VREG(REC_CANVAS_ADDR, hw->reg_canvas_addr); + WRITE_VREG(DBKR_CANVAS_ADDR, hw->reg_dbkr_canvas_addr); + WRITE_VREG(DBKW_CANVAS_ADDR, hw->reg_dbkw_canvas_addr); + WRITE_VREG(ANC2_CANVAS_ADDR, hw->reg_anc2_canvas_addr); + WRITE_VREG(ANC0_CANVAS_ADDR, hw->reg_anc0_canvas_addr); + WRITE_VREG(ANC1_CANVAS_ADDR, hw->reg_anc1_canvas_addr); + WRITE_VREG(ANC3_CANVAS_ADDR, hw->reg_anc3_canvas_addr); + WRITE_VREG(ANC4_CANVAS_ADDR, hw->reg_anc4_canvas_addr); + WRITE_VREG(ANC5_CANVAS_ADDR, hw->reg_anc5_canvas_addr); + + WRITE_VREG(SLICE_VER_POS_PIC_TYPE, hw->slice_ver_pos_pic_type); + + WRITE_VREG(VC1_CONTROL_REG, hw->vc1_control_reg); + WRITE_VREG(AVS_CO_MB_WR_ADDR, hw->avs_co_mb_wr_addr); + WRITE_VREG(SLICE_START_BYTE_01, hw->slice_start_byte_01); + WRITE_VREG(SLICE_START_BYTE_23, hw->slice_start_byte_23); + WRITE_VREG(VCOP_CTRL_REG, hw->vcop_ctrl_reg); + WRITE_VREG(IQIDCT_CONTROL, hw->iqidct_control); + WRITE_VREG(RV_AI_MB_COUNT, hw->rv_ai_mb_count); + WRITE_VREG(SLICE_QP, hw->slice_qp); + + WRITE_VREG(DC_SCALER, hw->dc_scaler); + WRITE_VREG(AVSP_IQ_WQ_PARAM_01, hw->avsp_iq_wq_param_01); + WRITE_VREG(AVSP_IQ_WQ_PARAM_23, hw->avsp_iq_wq_param_23); + WRITE_VREG(AVSP_IQ_WQ_PARAM_45, hw->avsp_iq_wq_param_45); + WRITE_VREG(AVS_CO_MB_RD_ADDR, hw->avs_co_mb_rd_addr); + WRITE_VREG(DBLK_MB_WID_HEIGHT, hw->dblk_mb_wid_height); + WRITE_VREG(MC_PIC_W_H, hw->mc_pic_w_h); + WRITE_VREG(AVS_CO_MB_RW_CTL, hw->avs_co_mb_rw_ctl); + + WRITE_VREG(VLD_DECODE_CONTROL, hw->vld_decode_control); + +} + +static int vavs_prot_init(struct vdec_avs_hw_s *hw) +{ + int r = 0; +#if DEBUG_MULTI_FLAG > 0 + if (hw->decode_pic_count == 0) { +#endif +#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 9) | (1 << 8)); + WRITE_VREG(DOS_SW_RESET0, 0); + +#else + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + READ_RESET_REG(RESET0_REGISTER); + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + + WRITE_RESET_REG(RESET2_REGISTER, RESET_PIC_DC | RESET_DBLK); +#endif +#if DEBUG_MULTI_FLAG > 0 + } +#endif + /***************** reset vld **********************************/ + WRITE_VREG(POWER_CTL_VLD, 0x10); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 2, MEM_FIFO_CNT_BIT, 2); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 8, MEM_LEVEL_CNT_BIT, 6); + /*************************************************************/ + if (hw->m_ins_flag) { + int i; + if (hw->decode_pic_count == 0) { + r = vavs_canvas_init(hw); +#ifndef USE_DYNAMIC_BUF_NUM + for (i = 0; i < 4; i++) { + WRITE_VREG(AV_SCRATCH_0 + i, + hw->canvas_spec[i] + ); + } +#else + for (i = 0; i < hw->vf_buf_num_used; i++) + WRITE_VREG(buf_spec_reg[i], 0); + for (i = 0; i < hw->vf_buf_num_used; i += 2) { + WRITE_VREG(buf_spec_reg[i >> 1], + (hw->canvas_spec[i] & 0xffff) | + ((hw->canvas_spec[i + 1] & 0xffff) + << 16) + ); + debug_print(hw, PRINT_FLAG_DECODING, + "%s WRITE_VREG(0x%x, 0x%x)\n", + __func__, buf_spec_reg[i >> 1], READ_VREG(buf_spec_reg[i >> 1])); + } +#endif + } else + vavs_restore_regs(hw); + + for (i = 0; i < hw->vf_buf_num_used; i++) { + config_cav_lut_ex(canvas_y(hw->canvas_spec[i]), + hw->canvas_config[i][0].phy_addr, + hw->canvas_config[i][0].width, + hw->canvas_config[i][0].height, + CANVAS_ADDR_NOWRAP, + hw->canvas_config[i][0].block_mode, + 0, VDEC_1); + + config_cav_lut_ex(canvas_u(hw->canvas_spec[i]), + hw->canvas_config[i][1].phy_addr, + hw->canvas_config[i][1].width, + hw->canvas_config[i][1].height, + CANVAS_ADDR_NOWRAP, + hw->canvas_config[i][1].block_mode, + 0, VDEC_1); + } + } else { + r = vavs_canvas_init(hw); +#ifdef NV21 + if (firmware_sel == 0) { + /* fixed canvas index */ + WRITE_VREG(AV_SCRATCH_0, canvas_base); + WRITE_VREG(AV_SCRATCH_1, hw->vf_buf_num_used); + } else { + int ii; +#ifndef USE_DYNAMIC_BUF_NUM + for (ii = 0; ii < 4; ii++) { + WRITE_VREG(AV_SCRATCH_0 + ii, + (canvas_base + canvas_num * ii) | + ((canvas_base + canvas_num * ii + 1) + << 8) | + ((canvas_base + canvas_num * ii + 1) + << 16) + ); + } +#else + for (ii = 0; ii < hw->vf_buf_num_used; ii += 2) { + WRITE_VREG(buf_spec_reg[ii >> 1], + (canvas_base + canvas_num * ii) | + ((canvas_base + canvas_num * ii + 1) + << 8) | + ((canvas_base + canvas_num * ii + 2) + << 16) | + ((canvas_base + canvas_num * ii + 3) + << 24) + ); + } +#endif + /* + *WRITE_VREG(AV_SCRATCH_0, 0x010100); + *WRITE_VREG(AV_SCRATCH_1, 0x040403); + *WRITE_VREG(AV_SCRATCH_2, 0x070706); + *WRITE_VREG(AV_SCRATCH_3, 0x0a0a09); + */ + } +#else + /* index v << 16 | u << 8 | y */ + WRITE_VREG(AV_SCRATCH_0, 0x020100); + WRITE_VREG(AV_SCRATCH_1, 0x050403); + WRITE_VREG(AV_SCRATCH_2, 0x080706); + WRITE_VREG(AV_SCRATCH_3, 0x0b0a09); +#endif + } + /* notify ucode the buffer offset */ + if (hw->decode_pic_count == 0) + WRITE_VREG(AV_SCRATCH_F, hw->buf_offset); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + + if (hw->decode_pic_count == 0) { +#ifndef USE_DYNAMIC_BUF_NUM + WRITE_VREG(AVS_SOS_COUNT, 0); +#endif + WRITE_VREG(AVS_BUFFERIN, 0); + WRITE_VREG(AVS_BUFFEROUT, 0); + } + if (error_recovery_mode) + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 0); + else + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 1); + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); +#ifndef USE_DYNAMIC_BUF_NUM /* def DEBUG_UCODE */ + if (hw->decode_pic_count == 0) + WRITE_VREG(AV_SCRATCH_D, 0); +#endif + +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + +#ifdef PIC_DC_NEED_CLEAR + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 31); +#endif + if (hw->m_ins_flag && start_decoding_delay > 0) + msleep(start_decoding_delay); + + //pr_info("+++++++++++++++++++++++++++++++\n"); + //pr_info("+++++++++++++++++++++++++++++++\n"); + //pr_info("+++++++++++++++++++++++++++++++\n"); +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) { + WRITE_VREG(LONG_CABAC_DES_ADDR, es_write_addr_phy); + WRITE_VREG(LONG_CABAC_REQ, 0); + WRITE_VREG(LONG_CABAC_PIC_SIZE, 0); + WRITE_VREG(LONG_CABAC_SRC_ADDR, 0); + } +#endif + +#ifdef ENABLE_USER_DATA + if (hw->decode_pic_count == 0) { + WRITE_VREG(AV_SCRATCH_N, (u32)(hw->user_data_buffer_phys - hw->buf_offset)); + pr_debug("AV_SCRATCH_N = 0x%x\n", READ_VREG(AV_SCRATCH_N)); + } else + WRITE_VREG(AV_SCRATCH_N, 0); +#endif + if (hw->m_ins_flag) { + if (vdec_frame_based(hw_to_vdec(hw))) + WRITE_VREG(DECODE_MODE, DECODE_MODE_MULTI_FRAMEBASE); + else { + if (hw->decode_status_skip_pic_done_flag) { + WRITE_VREG(DECODE_CFG, hw->decode_decode_cont_start_code); + WRITE_VREG(DECODE_MODE, DECODE_MODE_MULTI_STREAMBASE_CONT); + } else + WRITE_VREG(DECODE_MODE, DECODE_MODE_MULTI_STREAMBASE); + } + WRITE_VREG(DECODE_LMEM_BUF_ADR, (u32)hw->lmem_phy_addr); + } else + WRITE_VREG(DECODE_MODE, DECODE_MODE_SINGLE); + + if (ins_udebug_flag[DECODE_ID(hw)] && + (ins_udebug_flag[DECODE_ID(hw)] >> 16) == hw->decode_pic_count) { + WRITE_VREG(DECODE_STOP_POS, + ins_udebug_flag[DECODE_ID(hw)] & 0xffff); + } + else + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + hw->old_udebug_flag = udebug_flag; + + return r; +} + + +#ifdef AVSP_LONG_CABAC +static unsigned char es_write_addr[MAX_CODED_FRAME_SIZE] __aligned(64); +#endif +static void vavs_local_init(struct vdec_avs_hw_s *hw) +{ + int i; + + hw->vf_buf_num_used = vf_buf_num; + + hw->vavs_ratio = hw->vavs_amstream_dec_info.ratio; + + hw->avi_flag = (unsigned long) hw->vavs_amstream_dec_info.param; + + hw->frame_width = hw->frame_height = hw->frame_dur = hw->frame_prog = 0; + + hw->throw_pb_flag = 1; + + hw->total_frame = 0; + hw->saved_resolution = 0; + hw->next_pts = 0; + +#ifdef DEBUG_PTS + hw->pts_hit = hw->pts_missed = hw->pts_i_hit = hw->pts_i_missed = 0; +#endif + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->recycle_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hw->vfpool[i].vf; + + hw->vfpool[i].vf.index = hw->vf_buf_num_used; + hw->vfpool[i].vf.bufWidth = 1920; + hw->vfpool[i].detached = 0; + kfifo_put(&hw->newframe_q, vf); + } + for (i = 0; i < hw->vf_buf_num_used; i++) + hw->vfbuf_use[i] = 0; + + /*cur_vfpool = vfpool;*/ + + if (hw->recover_flag == 1) + return; + + if (hw->mm_blk_handle) { + pr_info("decoder_bmmu_box_free\n"); + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + + hw->mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER); + if (hw->mm_blk_handle == NULL) + pr_info("Error, decoder_bmmu_box_alloc_box fail\n"); + +} + +static int vavs_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)op_arg; + + + spin_lock_irqsave(&lock, flags); + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hw->newframe_q); + states->buf_avail_num = kfifo_len(&hw->display_q); + states->buf_recycle_num = kfifo_len(&hw->recycle_q); + if (step == 2) + states->buf_avail_num = 0; + spin_unlock_irqrestore(&lock, flags); + return 0; +} + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER +static void vavs_ppmgr_reset(void) +{ + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_RESET, NULL); + + vavs_local_init(ghw); + + pr_info("vavs: vf_ppmgr_reset\n"); +} +#endif + +static void vavs_local_reset(struct vdec_avs_hw_s *hw) +{ + mutex_lock(&vavs_mutex); + hw->recover_flag = 1; + pr_info("error, local reset\n"); + amvdec_stop(); + msleep(100); + avs_vf_notify_receiver(hw, PROVIDER_NAME, VFRAME_EVENT_PROVIDER_RESET, NULL); + vavs_local_init(hw); + vavs_recover(hw); + +#ifdef ENABLE_USER_DATA + reset_userdata_fifo(1); +#endif + + amvdec_start(); + hw->recover_flag = 0; +#if 0 + error_watchdog_count = 0; + + pr_info("pc %x stream buf wp %x rp %x level %x\n", + READ_VREG(MPC_E), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); +#endif + + + + mutex_unlock(&vavs_mutex); +} + +#if 0 +static struct work_struct fatal_error_wd_work; +static struct work_struct notify_work; +static atomic_t error_handler_run = ATOMIC_INIT(0); +#endif +static void vavs_fatal_error_handler(struct work_struct *work) +{ + struct vdec_avs_hw_s *hw = + container_of(work, struct vdec_avs_hw_s, fatal_error_wd_work); + if (debug & AVS_DEBUG_OLD_ERROR_HANDLE) { + mutex_lock(&vavs_mutex); + pr_info("vavs fatal error reset !\n"); + amvdec_stop(); +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vavs_ppmgr_reset(); +#else + vf_light_unreg_provider(&vavs_vf_prov); + vavs_local_init(hw); + vf_reg_provider(&vavs_vf_prov); +#endif + vavs_recover(hw); + amvdec_start(); + mutex_unlock(&vavs_mutex); + } else { + pr_info("avs fatal_error_handler\n"); + vavs_local_reset(hw); + } + atomic_set(&hw->error_handler_run, 0); +} + +static void vavs_notify_work(struct work_struct *work) +{ + struct vdec_avs_hw_s *hw = + container_of(work, struct vdec_avs_hw_s, notify_work); + if (hw->fr_hint_status == VDEC_NEED_HINT) { + avs_vf_notify_receiver(hw, PROVIDER_NAME , + VFRAME_EVENT_PROVIDER_FR_HINT , + (void *)((unsigned long)hw->frame_dur)); + hw->fr_hint_status = VDEC_HINTED; + } + return; +} + +static void avs_set_clk(struct work_struct *work) +{ + struct vdec_avs_hw_s *hw = + container_of(work, struct vdec_avs_hw_s, set_clk_work); + if (hw->frame_dur > 0 && hw->saved_resolution != + hw->frame_width * hw->frame_height * (96000 / hw->frame_dur)) { + int fps = 96000 / hw->frame_dur; + + hw->saved_resolution = hw->frame_width * hw->frame_height * fps; + if (firmware_sel == 0 && + (debug & AVS_DEBUG_USE_FULL_SPEED)) { + vdec_source_changed(VFORMAT_AVS, + 4096, 2048, 60); + } else { + vdec_source_changed(VFORMAT_AVS, + hw->frame_width, hw->frame_height, fps); + } + + } +} + +#ifdef DEBUG_MULTI_WITH_AUTOMODE +int delay_count = 0; +#endif +static void vavs_put_timer_func(struct timer_list *arg) +{ + struct vdec_avs_hw_s *hw = container_of(arg, + struct vdec_avs_hw_s, recycle_timer); + struct timer_list *timer = &hw->recycle_timer; + +#ifndef HANDLE_AVS_IRQ + vavs_isr(); +#endif +#ifdef DEBUG_MULTI_WITH_AUTOMODE + if (delay_count > 0) { + if (delay_count == 1) + amvdec_start(); + delay_count--; + } +#endif + if (READ_VREG(AVS_SOS_COUNT)) { + if (!error_recovery_mode) { +#if 0 + if (debug & AVS_DEBUG_OLD_ERROR_HANDLE) { + mutex_lock(&vavs_mutex); + pr_info("vavs fatal error reset !\n"); + amvdec_stop(); +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vavs_ppmgr_reset(); +#else + vf_light_unreg_provider(&vavs_vf_prov); + vavs_local_init(); + vf_reg_provider(&vavs_vf_prov); +#endif + vavs_recover(); + amvdec_start(); + mutex_unlock(&vavs_mutex); + } else { + vavs_local_reset(); + } +#else + if (!atomic_read(&hw->error_handler_run)) { + atomic_set(&hw->error_handler_run, 1); + pr_info("AVS_SOS_COUNT = %d\n", + READ_VREG(AVS_SOS_COUNT)); + pr_info("WP = 0x%x, RP = 0x%x, LEVEL = 0x%x, AVAIL = 0x%x, CUR_PTR = 0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL), + READ_VREG(VLD_MEM_VIFIFO_CURR_PTR)); + schedule_work(&hw->fatal_error_wd_work); + } +#endif + } + } +#if 0 + if (long_cabac_busy == 0 && + error_watchdog_threshold > 0 && + kfifo_len(&hw->display_q) == 0 && + READ_VREG(VLD_MEM_VIFIFO_LEVEL) > + error_watchdog_buf_threshold) { + pr_info("newq %d dispq %d recyq %d\r\n", + kfifo_len(&hw->newframe_q), + kfifo_len(&hw->display_q), + kfifo_len(&hw->recycle_q)); + pr_info("pc %x stream buf wp %x rp %x level %x\n", + READ_VREG(MPC_E), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + error_watchdog_count++; + if (error_watchdog_count >= error_watchdog_threshold) + vavs_local_reset(); + } else + error_watchdog_count = 0; +#endif + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + if ((hw->ucode_pause_pos != 0) && + (hw->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != hw->ucode_pause_pos) { + hw->ucode_pause_pos = 0; + WRITE_VREG(DEBUG_REG1, 0); + } + + if (!kfifo_is_empty(&hw->recycle_q) && (READ_VREG(AVS_BUFFERIN) == 0)) { + struct vframe_s *vf; + + if (kfifo_get(&hw->recycle_q, &vf)) { + if ((vf->index < hw->vf_buf_num_used) && + (--hw->vfbuf_use[vf->index] == 0)) { + debug_print(hw, PRINT_FLAG_DECODING, + "%s WRITE_VREG(AVS_BUFFERIN, 0x%x) for vf index of %d\n", + __func__, + ~(1 << vf->index), vf->index); + WRITE_VREG(AVS_BUFFERIN, ~(1 << vf->index)); + vf->index = hw->vf_buf_num_used; + } + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + } + + } + + schedule_work(&hw->set_clk_work); + + timer->expires = jiffies + PUT_INTERVAL; + + add_timer(timer); +} + +#ifdef AVSP_LONG_CABAC + +static void long_cabac_do_work(struct work_struct *work) +{ + int status = 0; + struct vdec_avs_hw_s *hw = gw; +#ifdef PERFORMANCE_DEBUG + pr_info("enter %s buf level (new %d, display %d, recycle %d)\r\n", + __func__, + kfifo_len(&hw->newframe_q), + kfifo_len(&hw->display_q), + kfifo_len(&hw->recycle_q) + ); +#endif + mutex_lock(&vavs_mutex); + long_cabac_busy = 1; + while (READ_VREG(LONG_CABAC_REQ)) { + if (process_long_cabac() < 0) { + status = -1; + break; + } + } + long_cabac_busy = 0; + mutex_unlock(&vavs_mutex); +#ifdef PERFORMANCE_DEBUG + pr_info("exit %s buf level (new %d, display %d, recycle %d)\r\n", + __func__, + kfifo_len(&hw->newframe_q), + kfifo_len(&hw->display_q), + kfifo_len(&hw->recycle_q) + ); +#endif + if (status < 0) { + pr_info("transcoding error, local reset\r\n"); + vavs_local_reset(hw); + } + +} +#endif + +#ifdef AVSP_LONG_CABAC +static void init_avsp_long_cabac_buf(void) +{ +#if 0 + es_write_addr_phy = (unsigned long)codec_mm_alloc_for_dma( + "vavs", + PAGE_ALIGN(MAX_CODED_FRAME_SIZE)/PAGE_SIZE, + 0, CODEC_MM_FLAGS_DMA_CPU); + es_write_addr_virt = codec_mm_phys_to_virt(es_write_addr_phy); + +#elif 0 + es_write_addr_virt = + (void *)dma_alloc_coherent(amports_get_dma_device(), + MAX_CODED_FRAME_SIZE, &es_write_addr_phy, + GFP_KERNEL); +#else + /*es_write_addr_virt = kmalloc(MAX_CODED_FRAME_SIZE, GFP_KERNEL); + * es_write_addr_virt = (void *)__get_free_pages(GFP_KERNEL, + * get_order(MAX_CODED_FRAME_SIZE)); + */ + es_write_addr_virt = &es_write_addr[0]; + if (es_write_addr_virt == NULL) { + pr_err("%s: failed to alloc es_write_addr_virt buffer\n", + __func__); + return; + } + + es_write_addr_phy = dma_map_single(amports_get_dma_device(), + es_write_addr_virt, + MAX_CODED_FRAME_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(amports_get_dma_device(), + es_write_addr_phy)) { + pr_err("%s: failed to map es_write_addr_virt buffer\n", + __func__); + /*kfree(es_write_addr_virt);*/ + es_write_addr_virt = NULL; + return; + } +#endif + + +#ifdef BITSTREAM_READ_TMP_NO_CACHE + bitstream_read_tmp = + (void *)dma_alloc_coherent(amports_get_dma_device(), + SVA_STREAM_BUF_SIZE, &bitstream_read_tmp_phy, + GFP_KERNEL); + +#else + + bitstream_read_tmp = kmalloc(SVA_STREAM_BUF_SIZE, GFP_KERNEL); + /*bitstream_read_tmp = (void *)__get_free_pages(GFP_KERNEL, + *get_order(MAX_CODED_FRAME_SIZE)); + */ + if (bitstream_read_tmp == NULL) { + pr_err("%s: failed to alloc bitstream_read_tmp buffer\n", + __func__); + return; + } + + bitstream_read_tmp_phy = dma_map_single(amports_get_dma_device(), + bitstream_read_tmp, + SVA_STREAM_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(amports_get_dma_device(), + bitstream_read_tmp_phy)) { + pr_err("%s: failed to map rpm buffer\n", __func__); + kfree(bitstream_read_tmp); + bitstream_read_tmp = NULL; + return; + } +#endif +} +#endif + + +static s32 vavs_init(struct vdec_avs_hw_s *hw) +{ + int ret, size = -1; + struct firmware_s *fw; + u32 fw_size = 0x1000 * 16; + /*char *buf = vmalloc(0x1000 * 16); + + if (IS_ERR_OR_NULL(buf)) + return -ENOMEM; + */ + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + pr_info("vavs_init\n"); + //init_timer(&hw->recycle_timer); + + //hw->stat |= STAT_TIMER_INIT; + + //amvdec_enable(); + + //vdec_enable_DMC(NULL); + + vavs_local_init(hw); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM) + size = get_firmware_data(VIDEO_DEC_AVS_MULTI, fw->data); + else { + if (firmware_sel == 1) + size = get_firmware_data(VIDEO_DEC_AVS_NOCABAC, fw->data); +#ifdef AVSP_LONG_CABAC + else { + init_avsp_long_cabac_buf(); + size = get_firmware_data(VIDEO_DEC_AVS_MULTI, fw->data); + } +#endif + } + + if (size < 0) { + amvdec_disable(); + pr_err("get firmware fail."); + vfree(fw); + return -1; + } + + fw->len = size; + hw->fw = fw; + + if (hw->m_ins_flag) { + timer_setup(&hw->check_timer, check_timer_func, 0); + //init_timer(&hw->check_timer); + //hw->check_timer.data = (ulong) hw; + //hw->check_timer.function = check_timer_func; + hw->check_timer.expires = jiffies + CHECK_INTERVAL; + + + //add_timer(&hw->check_timer); + hw->stat |= STAT_TIMER_ARM; + + INIT_WORK(&hw->work, vavs_work); + + hw->fw = fw; + return 0; + } + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM) + ret = amvdec_loadmc_ex(VFORMAT_AVS, NULL, fw->data); + else if (firmware_sel == 1) + ret = amvdec_loadmc_ex(VFORMAT_AVS, "avs_no_cabac", fw->data); + else + ret = amvdec_loadmc_ex(VFORMAT_AVS, NULL, fw->data); + + if (ret < 0) { + amvdec_disable(); + /*vfree(buf);*/ + pr_err("AVS: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + /*vfree(buf);*/ + + hw->stat |= STAT_MC_LOAD; + + + /* enable AMRISC side protocol */ + ret = vavs_prot_init(hw); + if (ret < 0) + return ret; + +#ifdef HANDLE_AVS_IRQ + if (vdec_request_irq(VDEC_IRQ_1, vavs_isr, + "vavs-irq", (void *)hw)) { + amvdec_disable(); + pr_info("vavs irq register error.\n"); + return -ENOENT; + } +#endif + + hw->stat |= STAT_ISR_REG; + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_provider_init(&vavs_vf_prov, PROVIDER_NAME, &vavs_vf_provider, hw); + vf_reg_provider(&vavs_vf_prov); + avs_vf_notify_receiver(hw, PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); +#else + vf_provider_init(&vavs_vf_prov, PROVIDER_NAME, &vavs_vf_provider, hw); + vf_reg_provider(&vavs_vf_prov); +#endif + + if (hw->vavs_amstream_dec_info.rate != 0) { + if (!hw->is_reset) + avs_vf_notify_receiver(hw, PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long) + hw->vavs_amstream_dec_info.rate)); + hw->fr_hint_status = VDEC_HINTED; + } else + hw->fr_hint_status = VDEC_NEED_HINT; + + hw->stat |= STAT_VF_HOOK; + + timer_setup(&hw->recycle_timer, vavs_put_timer_func, 0); + //hw->recycle_timer.data = (ulong)(hw); + //hw->recycle_timer.function = vavs_put_timer_func; + hw->recycle_timer.expires = jiffies + PUT_INTERVAL; + + add_timer(&hw->recycle_timer); + + hw->stat |= STAT_TIMER_ARM; + +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) + INIT_WORK(&long_cabac_wd_work, long_cabac_do_work); +#endif + vdec_source_changed(VFORMAT_AVS, + 1920, 1080, 30); +#ifdef DEBUG_MULTI_WITH_AUTOMODE + if (start_decoding_delay == 0) + amvdec_start(); + else + delay_count = start_decoding_delay/10; +#else + amvdec_start(); +#endif + hw->stat |= STAT_VDEC_RUN; + return 0; +} + +static int amvdec_avs_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_avs_hw_s *hw = NULL; + + if (pdata == NULL) { + pr_info("amvdec_avs memory resource undefined.\n"); + return -EFAULT; + } + + hw = (struct vdec_avs_hw_s *)vzalloc(sizeof(struct vdec_avs_hw_s)); + if (hw == NULL) { + pr_info("\nammvdec_avs decoder driver alloc failed\n"); + return -ENOMEM; + } + pdata->private = hw; + ghw = hw; + atomic_set(&hw->error_handler_run, 0); + hw->m_ins_flag = 0; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM || disable_longcabac_trans) + firmware_sel = 1; + + if (firmware_sel == 1) { +#ifndef USE_DYNAMIC_BUF_NUM + vf_buf_num = 4; +#endif + canvas_base = 0; + canvas_num = 3; + } else { + + canvas_base = 128; + canvas_num = 2; /*NV21*/ + } + + + if (pdata->sys_info) + hw->vavs_amstream_dec_info = *pdata->sys_info; + + pr_info("%s (%d,%d) %d\n", __func__, hw->vavs_amstream_dec_info.width, + hw->vavs_amstream_dec_info.height, hw->vavs_amstream_dec_info.rate); + + pdata->dec_status = vavs_dec_status; + pdata->set_isreset = vavs_set_isreset; + hw->is_reset = 0; + + pdata->user_data_read = NULL; + pdata->reset_userdata_fifo = NULL; + + vavs_vdec_info_init(hw); + +#ifdef ENABLE_USER_DATA + if (NULL == hw->user_data_buffer) { + hw->user_data_buffer = + dma_alloc_coherent(amports_get_dma_device(), + USER_DATA_SIZE, + &hw->user_data_buffer_phys, GFP_KERNEL); + if (!hw->user_data_buffer) { + pr_info("%s: Can not allocate hw->user_data_buffer\n", + __func__); + return -ENOMEM; + } + pr_debug("hw->user_data_buffer = 0x%p, hw->user_data_buffer_phys = 0x%x\n", + hw->user_data_buffer, (u32)hw->user_data_buffer_phys); + } +#endif + INIT_WORK(&hw->set_clk_work, avs_set_clk); + if (vavs_init(hw) < 0) { + pr_info("amvdec_avs init failed.\n"); + kfree(hw->gvs); + hw->gvs = NULL; + pdata->dec_status = NULL; + if (hw->fw) + vfree(hw->fw); + hw->fw = NULL; + return -ENODEV; + } + /*vdec = pdata;*/ + + INIT_WORK(&hw->fatal_error_wd_work, vavs_fatal_error_handler); + atomic_set(&hw->error_handler_run, 0); +#ifdef ENABLE_USER_DATA + INIT_WORK(&hw->userdata_push_work, userdata_push_do_work); +#endif + INIT_WORK(&hw->notify_work, vavs_notify_work); + + return 0; +} + +static int amvdec_avs_remove(struct platform_device *pdev) +{ + struct vdec_avs_hw_s *hw = ghw; + + cancel_work_sync(&hw->fatal_error_wd_work); + atomic_set(&hw->error_handler_run, 0); +#ifdef ENABLE_USER_DATA + cancel_work_sync(&hw->userdata_push_work); +#endif + cancel_work_sync(&hw->notify_work); + cancel_work_sync(&hw->set_clk_work); + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)vavs_dec_id); + hw->stat &= ~STAT_ISR_REG; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->recycle_timer); + hw->stat &= ~STAT_TIMER_ARM; + } +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) { + mutex_lock(&vavs_mutex); + cancel_work_sync(&long_cabac_wd_work); + mutex_unlock(&vavs_mutex); + + if (es_write_addr_virt) { +#if 0 + codec_mm_free_for_dma("vavs", es_write_addr_phy); +#else + dma_unmap_single(amports_get_dma_device(), + es_write_addr_phy, + MAX_CODED_FRAME_SIZE, DMA_FROM_DEVICE); + /*kfree(es_write_addr_virt);*/ + es_write_addr_virt = NULL; +#endif + } + +#ifdef BITSTREAM_READ_TMP_NO_CACHE + if (bitstream_read_tmp) { + dma_free_coherent(amports_get_dma_device(), + SVA_STREAM_BUF_SIZE, bitstream_read_tmp, + bitstream_read_tmp_phy); + bitstream_read_tmp = NULL; + } +#else + if (bitstream_read_tmp) { + dma_unmap_single(amports_get_dma_device(), + bitstream_read_tmp_phy, + SVA_STREAM_BUF_SIZE, DMA_FROM_DEVICE); + kfree(bitstream_read_tmp); + bitstream_read_tmp = NULL; + } +#endif + } +#endif + if (hw->stat & STAT_VF_HOOK) { + if (hw->fr_hint_status == VDEC_HINTED && !hw->is_reset) + avs_vf_notify_receiver(hw, PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_END_HINT, NULL); + hw->fr_hint_status = VDEC_NO_NEED_HINT; + vf_unreg_provider(&vavs_vf_prov); + hw->stat &= ~STAT_VF_HOOK; + } + +#ifdef ENABLE_USER_DATA + if (hw->user_data_buffer != NULL) { + dma_free_coherent( + amports_get_dma_device(), + USER_DATA_SIZE, + hw->user_data_buffer, + hw->user_data_buffer_phys); + hw->user_data_buffer = NULL; + hw->user_data_buffer_phys = 0; + } +#endif + + if (hw->fw) { + vfree(hw->fw); + hw->fw = NULL; + } + + //amvdec_disable(); + //vdec_disable_DMC(NULL); + + hw->pic_type = 0; + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } +#ifdef DEBUG_PTS + pr_debug("pts hit %d, pts missed %d, i hit %d, missed %d\n", hw->pts_hit, + hw->pts_missed, hw->pts_i_hit, hw->pts_i_missed); + pr_debug("total frame %d, hw->avi_flag %d, rate %d\n", hw->total_frame, hw->avi_flag, + hw->vavs_amstream_dec_info.rate); +#endif + kfree(hw->gvs); + hw->gvs = NULL; + vfree(hw); + return 0; +} + +/****************************************/ +#if 0 +static struct platform_driver amvdec_avs_driver = { + .probe = amvdec_avs_probe, + .remove = amvdec_avs_remove, + .driver = { + .name = DRIVER_NAME, + } +}; +#endif + +static void recycle_frames(struct vdec_avs_hw_s *hw); + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + int ret = 1; + unsigned buf_busy_mask = (1 << hw->vf_buf_num_used) - 1; +#ifdef DEBUG_MULTI_FRAME_INS + if ((DECODE_ID(hw) == 0) && run_count[0] > run_count[1] && + run_count[1] < max_run_count[1]) + return 0; + + if ((DECODE_ID(hw) == 1) && run_count[1] >= run_count[0] && + run_count[0] < max_run_count[0]) + return 0; + + if (max_run_count[DECODE_ID(hw)] > 0 && + run_count[DECODE_ID(hw)] >= max_run_count[DECODE_ID(hw)]) + return 0; +#endif + if (vdec_stream_based(vdec) && (hw->init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + + if (level < pre_decode_buf_level) { + hw->not_run_ready++; + return 0; + } + } + + if (hw->reset_decode_flag == 0 && + hw->again_flag == 0 && + (hw->buf_status & buf_busy_mask) == buf_busy_mask) { + recycle_frames(hw); + if (hw->buf_recycle_status == 0) + ret = 0; + } + + if (again_threshold > 0 && + hw->pre_parser_wr_ptr != 0 && + hw->again_flag && + (!vdec_frame_based(vdec))) { + u32 parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_rp); + if (parser_wr_ptr >= hw->pre_parser_wr_ptr && + (parser_wr_ptr - hw->pre_parser_wr_ptr) < + again_threshold) { + int r = vdec_sync_input(vdec); + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "%s buf lelvel:%x\n", __func__, r); + ret = 0; + } + } + + if (ret) + hw->not_run_ready = 0; + else + hw->not_run_ready++; + + if (ret != 0) { + if (vdec->parallel_dec == 1) + return (unsigned long)(CORE_MASK_VDEC_1); + else + return (unsigned long)(CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + } else + return 0; +} + +static void vavs_work(struct work_struct *work) +{ + struct vdec_avs_hw_s *hw = + container_of(work, struct vdec_avs_hw_s, work); + struct vdec_s *vdec = hw_to_vdec(hw); + if (hw->dec_result != DEC_RESULT_AGAIN) + debug_print(hw, PRINT_FLAG_RUN_FLOW, + "ammvdec_avs: vavs_work,result=%d,status=%d\n", + hw->dec_result, hw_to_vdec(hw)->next_status); + hw->again_flag = 0; + if (hw->dec_result == DEC_RESULT_USERDATA) { + userdata_push_process(hw); + return; + } else if (hw->dec_result == DEC_RESULT_DONE) { + + if (!hw->ctx_valid) + hw->ctx_valid = 1; +#ifdef DEBUG_MULTI_FRAME_INS + msleep(delay); +#endif + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + } else if (hw->dec_result == DEC_RESULT_AGAIN + && (hw_to_vdec(hw)->next_status != + VDEC_STATUS_DISCONNECTED)) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + hw->again_flag = 1; + if (!vdec_has_more_input(hw_to_vdec(hw))) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + } else if (hw->dec_result == DEC_RESULT_GET_DATA + && (hw_to_vdec(hw)->next_status != + VDEC_STATUS_DISCONNECTED)) { + if (!vdec_has_more_input(hw_to_vdec(hw))) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + debug_print(hw, PRINT_FLAG_VLD_DETAIL, + "%s DEC_RESULT_GET_DATA %x %x %x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP)); + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + vdec_clean_input(hw_to_vdec(hw)); + return; + } else if (hw->dec_result == DEC_RESULT_FORCE_EXIT) { + debug_print(hw, PRINT_FLAG_ERROR, + "%s: force exit\n", __func__); + if (hw->stat & STAT_ISR_REG) { + amvdec_stop(); + /*disable mbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 0); + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + } else if (hw->dec_result == DEC_RESULT_EOS) { + debug_print(hw, PRINT_FLAG_DECODING, + "%s: end of stream\n", __func__); + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + hw->eos = 1; + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + vdec_clean_input(hw_to_vdec(hw)); + } + if (hw->stat & STAT_VDEC_RUN) { +#if DEBUG_MULTI_FLAG == 1 +#else + amvdec_stop(); +#endif + hw->stat &= ~STAT_VDEC_RUN; + } + /*wait_vmmpeg12_search_done(hw);*/ + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + if (hw->dec_result == DEC_RESULT_DONE) + hw->buf_recycle_status = 0; + debug_print(hw, PRINT_FLAG_RUN_FLOW, "work end %d\n", hw->dec_result); + if (vdec->parallel_dec == 1) + vdec_core_finish_run(hw_to_vdec(hw), CORE_MASK_VDEC_1); + else + vdec_core_finish_run(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + if (hw->vdec_cb) { + hw->vdec_cb(hw_to_vdec(hw), hw->vdec_cb_arg); + debug_print(hw, 0x80000, + "%s:\n", __func__); + } +} + + +static void reset_process_time(struct vdec_avs_hw_s *hw) +{ + if (!hw->m_ins_flag) + return; + if (hw->start_process_time) { + unsigned process_time = + 1000 * (jiffies - hw->start_process_time) / HZ; + hw->start_process_time = 0; + if (process_time > max_process_time[DECODE_ID(hw)]) + max_process_time[DECODE_ID(hw)] = process_time; + } +} +static void start_process_time(struct vdec_avs_hw_s *hw) +{ + hw->decode_timeout_count = 2; + hw->start_process_time = jiffies; +} + +static void handle_decoding_error(struct vdec_avs_hw_s *hw) +{ + int i; + unsigned long flags; + struct vframe_s *vf; + spin_lock_irqsave(&lock, flags); + for (i = 0; i < VF_POOL_SIZE; i++) { + vf = &hw->vfpool[i].vf; + if (vf->index < hw->vf_buf_num_used) { + hw->vfpool[i].detached = 1; + hw->vfbuf_use[vf->index] = 0; + } + } + if (error_handle_policy & 0x2) { + while (!kfifo_is_empty(&hw->display_q)) { + if (kfifo_get(&hw->display_q, &vf)) { + if (buf_of_vf(vf)->detached !=0) { + debug_print(hw, PRINT_FLAG_DECODING, + "%s recycle %d => newframe_q\n", + __func__, + vf->index); + vf->index = hw->vf_buf_num_used; + buf_of_vf(vf)->detached = 0; + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + } + } + + } + } + clear_pts_buf(hw); + hw->decode_pic_count = 0; + hw->reset_decode_flag = 1; + hw->pre_parser_wr_ptr = 0; + hw->buf_status = 0; + hw->throw_pb_flag = 1; + spin_unlock_irqrestore(&lock, flags); +} + +static void timeout_process(struct vdec_avs_hw_s *hw) +{ + struct vdec_s *vdec = hw_to_vdec(hw); + amvdec_stop(); + if (error_handle_policy & 0x1) { + handle_decoding_error(hw); + } else { + vavs_save_regs(hw); + + //if (hw->decode_pic_count == 0) + hw->decode_pic_count++; + if ((hw->decode_pic_count & 0xffff) == 0) { + /*make ucode do not handle it as first picture*/ + hw->decode_pic_count++; + } + } + hw->dec_result = DEC_RESULT_DONE; + + debug_print(hw, PRINT_FLAG_ERROR, + "%s decoder timeout, status=%d, level=%d, bit_cnt=0x%x\n", + __func__, vdec->status, READ_VREG(VLD_MEM_VIFIFO_LEVEL), READ_VREG(VIFF_BIT_CNT)); + reset_process_time(hw); + vdec_schedule_work(&hw->work); +} + + +static void recycle_frame_bufferin(struct vdec_avs_hw_s *hw) +{ + if (!kfifo_is_empty(&hw->recycle_q) && (READ_VREG(AVS_BUFFERIN) == 0)) { + struct vframe_s *vf; + + if (kfifo_get(&hw->recycle_q, &vf)) { + if (buf_of_vf(vf)->detached) { + debug_print(hw, 0, + "%s recycle detached vf, index=%d detched %d used %d\n", + __func__, vf->index, + buf_of_vf(vf)->detached, + hw->vfbuf_use[vf->index]); + } + if ((vf->index < hw->vf_buf_num_used) && + (buf_of_vf(vf)->detached == 0) && + (--hw->vfbuf_use[vf->index] == 0)) { + hw->buf_recycle_status |= (1 << vf->index); + WRITE_VREG(AVS_BUFFERIN, ~(1 << vf->index)); + debug_print(hw, PRINT_FLAG_DECODING, + "%s WRITE_VREG(AVS_BUFFERIN, 0x%x) for vf index of %d => buf_recycle_status 0x%x\n", + __func__, + READ_VREG(AVS_BUFFERIN), vf->index, + hw->buf_recycle_status); + } + vf->index = hw->vf_buf_num_used; + buf_of_vf(vf)->detached = 0; + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + } + + } + +} + +static void recycle_frames(struct vdec_avs_hw_s *hw) +{ + while (!kfifo_is_empty(&hw->recycle_q)) { + struct vframe_s *vf; + + if (kfifo_get(&hw->recycle_q, &vf)) { + if (buf_of_vf(vf)->detached) { + debug_print(hw, 0, + "%s recycle detached vf, index=%d detched %d used %d\n", + __func__, vf->index, + buf_of_vf(vf)->detached, + hw->vfbuf_use[vf->index]); + } + + + if ((vf->index < hw->vf_buf_num_used) && + (buf_of_vf(vf)->detached == 0) && + (--hw->vfbuf_use[vf->index] == 0)) { + hw->buf_recycle_status |= (1 << vf->index); + debug_print(hw, PRINT_FLAG_DECODING, + "%s for vf index of %d => buf_recycle_status 0x%x\n", + __func__, + vf->index, + hw->buf_recycle_status); + } + vf->index = hw->vf_buf_num_used; + buf_of_vf(vf)->detached = 0; + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + } + + } + +} + + +static void check_timer_func(struct timer_list *timer) +{ + struct vdec_avs_hw_s *hw = container_of(timer, + struct vdec_avs_hw_s, check_timer); + struct vdec_s *vdec = hw_to_vdec(hw); + unsigned int timeout_val = decode_timeout_val; + unsigned long flags; + + if (hw->m_ins_flag && + (debug & + DEBUG_WAIT_DECODE_DONE_WHEN_STOP) == 0 && + vdec->next_status == + VDEC_STATUS_DISCONNECTED) { + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + debug_print(hw, + 0, "vdec requested to be disconnected\n"); + return; + } + + /*recycle*/ + if (!hw->m_ins_flag) { + spin_lock_irqsave(&lock, flags); + recycle_frame_bufferin(hw); + spin_unlock_irqrestore(&lock, flags); + } + + if (hw->m_ins_flag) { + if ((READ_VREG(AV_SCRATCH_5) & 0xf) != 0 && + (READ_VREG(AV_SCRATCH_5) & 0xff00) != 0){ + /*ucode buffer empty*/ + if ((kfifo_len(&hw->recycle_q) == 0) && + (kfifo_len(&hw->display_q) == 0)) { + debug_print(hw, + 0, "AV_SCRATCH_5=0x%x, recover ucode buffer_status\n", + READ_VREG(AV_SCRATCH_5)); + WRITE_VREG(AV_SCRATCH_5, 0x10); + /*let ucode to recover buffer_status*/ + } + } + } + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + + if (udebug_flag != hw->old_udebug_flag) { + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + hw->old_udebug_flag = udebug_flag; + } + if (dbg_cmd != 0) { + if (dbg_cmd == 1) { + int r = vdec_sync_input(vdec); + dbg_cmd = 0; + pr_info( + "vdec_sync_input=>0x%x, (lev %x, wp %x rp %x, prp %x, pwp %x)\n", + r, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + STBUF_READ(&vdec->vbuf, get_rp), + STBUF_READ(&vdec->vbuf, get_wp)); + } + } + + if ((debug & DEBUG_FLAG_DISABLE_TIMEOUT) == 0 && + (timeout_val > 0) && + (hw->start_process_time > 0) && + ((1000 * (jiffies - hw->start_process_time) / HZ) + > timeout_val)) { + if (hw->last_vld_level == READ_VREG(VLD_MEM_VIFIFO_LEVEL)) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + timeout_process(hw); + } + hw->last_vld_level = READ_VREG(VLD_MEM_VIFIFO_LEVEL); + } + + if (READ_VREG(AVS_SOS_COUNT)) { + if (!error_recovery_mode) { + amvdec_stop(); + if (error_handle_policy & 0x1) { + handle_decoding_error(hw); + } else { + vavs_save_regs(hw); + + //if (hw->decode_pic_count == 0) + hw->decode_pic_count++; + if ((hw->decode_pic_count & 0xffff) == 0) { + /*make ucode do not handle it as first picture*/ + hw->decode_pic_count++; + } + } + hw->dec_result = DEC_RESULT_DONE; + + debug_print(hw, PRINT_FLAG_ERROR, + "%s decoder error, status=%d, level=%d, AVS_SOS_COUNT=0x%x\n", + __func__, vdec->status, READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(AVS_SOS_COUNT)); + reset_process_time(hw); + vdec_schedule_work(&hw->work); + } + } + + if ((hw->ucode_pause_pos != 0) && + (hw->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != hw->ucode_pause_pos) { + hw->ucode_pause_pos = 0; + WRITE_VREG(DEBUG_REG1, 0); + } + + if (vdec->next_status == VDEC_STATUS_DISCONNECTED) { + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + pr_info("vdec requested to be disconnected\n"); + return; + } + + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static int avs_hw_ctx_restore(struct vdec_avs_hw_s *hw) +{ + /*int r = 0;*/ + vavs_prot_init(hw); + + return 0; +} + +static unsigned char get_data_check_sum + (struct vdec_avs_hw_s *hw, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void run(struct vdec_s *vdec, unsigned long mask, +void (*callback)(struct vdec_s *, void *), + void *arg) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + int save_reg; + int size, ret; + if (!hw->vdec_pg_enable_flag) { + hw->vdec_pg_enable_flag = 1; + amvdec_enable(); + } + save_reg = READ_VREG(POWER_CTL_VLD); + /* reset everything except DOS_TOP[1] and APB_CBUS[0]*/ + debug_print(hw, PRINT_FLAG_RUN_FLOW,"run in\n"); + if (vdec_stream_based(vdec)) { + hw->pre_parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + } +#if 1 +#if DEBUG_MULTI_FLAG > 0 + if (hw->decode_pic_count == 0) { +#endif + WRITE_VREG(DOS_SW_RESET0, 0xfffffff0); + WRITE_VREG(DOS_SW_RESET0, 0); + WRITE_VREG(POWER_CTL_VLD, save_reg); + hw->run_count++; + run_count[DECODE_ID(hw)] = hw->run_count; + vdec_reset_core(vdec); +#if DEBUG_MULTI_FLAG > 0 + } +#endif +#else + vdec_reset_core(vdec); +#endif + hw->vdec_cb_arg = arg; + hw->vdec_cb = callback; + + size = vdec_prepare_input(vdec, &hw->chunk); + if (debug & DEBUG_FLAG_PREPARE_MORE_INPUT) { + if (size < start_decode_buf_level) { + /*debug_print(hw, PRINT_FLAG_VLD_DETAIL, + "DEC_RESULT_AGAIN %x %x %x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP));*/ + + hw->input_empty++; + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + return; + } + } else { + if (size < 0) { + hw->input_empty++; + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + return; + } + } + if (input_frame_based(vdec)) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + if (debug & PRINT_FLAG_RUN_FLOW + ) { + debug_print(hw, 0, + "%s decode_pic_count %d buf_recycle_status 0x%x: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, hw->decode_pic_count, + hw->buf_recycle_status, + size, get_data_check_sum(hw, size), + data[0], data[1], data[2], data[3], + data[4], data[5], data[size - 4], + data[size - 3], data[size - 2], + data[size - 1]); + } + if (debug & PRINT_FRAMEBASE_DATA + ) { + int jj; + + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + debug_print(hw, + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + debug_print(hw, + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + debug_print(hw, + PRINT_FRAMEBASE_DATA, + "\n"); + } + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } else + debug_print(hw, PRINT_FLAG_RUN_FLOW, + "%s decode_pic_count %d buf_recycle_status 0x%x: %x %x %x %x %x size 0x%x\n", + __func__, + hw->decode_pic_count, + hw->buf_recycle_status, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + STBUF_READ(&vdec->vbuf, get_rp), + STBUF_READ(&vdec->vbuf, get_wp), + size); + + + hw->input_empty = 0; + debug_print(hw, PRINT_FLAG_RUN_FLOW, + "%s,%d, size=%d\n", __func__, __LINE__, size); + + /*vdec_enable_input(vdec); + need run after VC1_CONTROL_REG is configured + */ + hw->init_flag = 1; + + if (hw->chunk) + debug_print(hw, PRINT_FLAG_RUN_FLOW, + "input chunk offset %d, size %d\n", + hw->chunk->offset, hw->chunk->size); + + hw->dec_result = DEC_RESULT_NONE; + /*vdec->mc_loaded = 0;*/ + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else { + ret = amvdec_vdec_loadmc_buf_ex(VFORMAT_AVS, "avs_multi", vdec, + hw->fw->data, hw->fw->len); + if (ret < 0) { + pr_err("[%d] %s: the %s fw loading failed, err: %x\n", vdec->id, + hw->fw->name, tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_AVS; + } + if (avs_hw_ctx_restore(hw) < 0) { + hw->dec_result = DEC_RESULT_ERROR; + debug_print(hw, PRINT_FLAG_ERROR, + "ammvdec_avs: error HW context restore\n"); + vdec_schedule_work(&hw->work); + return; + } + + /* + This configureation of VC1_CONTROL_REG will + pop bits (even no data in the stream buffer) if input is enabled, + so it can only be configured before vdec_enable_input() is called. + So move this code from ucode to here + */ +#define DISABLE_DBLK_HCMD 0 +#define DISABLE_MC_HCMD 0 + WRITE_VREG(VC1_CONTROL_REG, (DISABLE_DBLK_HCMD<<6) | + (DISABLE_MC_HCMD<<5) | (1 << 7) | (0xc <<8) | (1<<14)); + if (vdec_frame_based(vdec)) { + size = hw->chunk->size + + (hw->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + } + + + vdec_enable_input(vdec); + /**/ + + /*wmb();*/ + hw->stat |= STAT_MC_LOAD; + hw->last_vld_level = 0; + + debug_print(hw, PRINT_FLAG_DECODING, + "%s READ_VREG(AVS_BUFFERIN)=0x%x, recycle_q num %d\n", + __func__, READ_VREG(AVS_BUFFERIN), + kfifo_len(&hw->recycle_q)); + + WRITE_VREG(VIFF_BIT_CNT, size * 8); + if (hw->reset_decode_flag) + WRITE_VREG(DECODE_STATUS, 0); + else { + recycle_frames(hw); + avs_pts_check_in(hw, + hw->decode_pic_count & 0xffff, + hw->chunk); + + WRITE_VREG(DECODE_STATUS, + (hw->decode_pic_count & 0xffff) | + ((~hw->buf_recycle_status) << 16)); + } + + hw->reset_decode_flag = 0; + //hw->decode_status_skip_pic_done_flag = 0; + start_process_time(hw); +#if DEBUG_MULTI_FLAG == 1 + if (hw->decode_pic_count > 0) + WRITE_VREG(DECODE_STATUS, 0xff); + else +#endif + amvdec_start(); + hw->stat |= STAT_VDEC_RUN; + + hw->stat |= STAT_TIMER_ARM; + + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static void reset(struct vdec_s *vdec) +{ +} + +static irqreturn_t vmavs_isr_thread_fn(struct vdec_s *vdec, int irq) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + u32 reg; + struct vframe_s *vf = NULL; + u32 dur; + u32 repeat_count; + u32 picture_type; + u32 buffer_index; + u32 frame_size; + bool force_interlaced_frame = false; + unsigned int pts, pts_valid = 0, offset = 0; + u64 pts_us64; + u32 debug_tag; + u32 buffer_status_debug; + //struct vdec_avs_hw_s *hw = (struct vdec_avs_hw_s *)dev_id; + + /*if (debug & AVS_DEBUG_UCODE) { + if (READ_VREG(AV_SCRATCH_E) != 0) { + pr_info("dbg%x: %x\n", READ_VREG(AV_SCRATCH_E), + READ_VREG(AV_SCRATCH_D)); + WRITE_VREG(AV_SCRATCH_E, 0); + } + }*/ + + debug_print(hw, PRINT_FLAG_RUN_FLOW, "READ_VREG(AVS_BUFFEROUT) 0x%x, READ_VREG(DECODE_STATUS) 0x%x READ_VREG(AV_SCRATCH_N) 0x%x, READ_VREG(DEBUG_REG1) 0x%x\n", + READ_VREG(AVS_BUFFEROUT),READ_VREG(DECODE_STATUS), READ_VREG(AV_SCRATCH_N), READ_VREG(DEBUG_REG1)); + + debug_tag = READ_VREG(DEBUG_REG1); + buffer_status_debug = debug_tag >> 16; + debug_tag &= 0xffff; + /* if (debug_tag & 0x10000) { + int i; + dma_sync_single_for_cpu( + amports_get_dma_device(), + hw->lmem_phy_addr, + LMEM_BUF_SIZE, + DMA_FROM_DEVICE); + + debug_print(hw, 0, + "LMEM<tag %x>:\n", debug_tag); + + for (i = 0; i < 0x400; i += 4) { + int ii; + unsigned short *lmem_ptr = hw->lmem_addr; + if ((i & 0xf) == 0) + debug_print_cont(hw, 0, "%03x: ", i); + for (ii = 0; ii < 4; ii++) { + debug_print_cont(hw, 0, "%04x ", + lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + debug_print_cont(hw, 0, "\n"); + } + + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == hw->decode_pic_count) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_VREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hw->ucode_pause_pos = udebug_pause_pos; + } + else if (debug_tag & 0x20000) + hw->ucode_pause_pos = 0xffffffff; + if (hw->ucode_pause_pos) + reset_process_time(hw); + else + WRITE_VREG(DEBUG_REG1, 0); + } else*/ if (debug_tag != 0) { + debug_print(hw, 1, + "dbg%x: %x buffer_status 0x%x l/w/r %x %x %x bitcnt %x AVAIL %x\n", + debug_tag, + READ_VREG(DEBUG_REG2), + buffer_status_debug, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VIFF_BIT_CNT), + READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL)); + + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == hw->decode_pic_count) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_VREG(DEBUG_REG2)) && + (udebug_pause_ins_id == 0 || + DECODE_ID(hw) == (udebug_pause_ins_id -1))) { + udebug_pause_pos &= 0xffff; + hw->ucode_pause_pos = udebug_pause_pos; + if (debug & DEBUG_PIC_DONE_WHEN_UCODE_PAUSE) { + hw->decode_pic_count++; + if ((hw->decode_pic_count & 0xffff) == 0) { + /*make ucode do not handle it as first picture*/ + hw->decode_pic_count++; + } + reset_process_time(hw); + hw->dec_result = DEC_RESULT_DONE; + amvdec_stop(); + vavs_save_regs(hw); + debug_print(hw, PRINT_FLAG_DECODING, + "%s ucode pause, force done, decode_pic_count = %d, bit_cnt=0x%x\n", + __func__, + hw->decode_pic_count, + READ_VREG(VIFF_BIT_CNT)); + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + } + if (hw->ucode_pause_pos) + reset_process_time(hw); + else + WRITE_VREG(DEBUG_REG1, 0); + return IRQ_HANDLED; + } else { + debug_print(hw, PRINT_FLAG_DECODING, + "%s decode_status 0x%x, buffer_status 0x%x\n", + __func__, + READ_VREG(DECODE_STATUS), + buffer_status_debug); + } + +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0 && READ_VREG(LONG_CABAC_REQ)) { +#ifdef PERFORMANCE_DEBUG + pr_info("%s:schedule long_cabac_wd_work\r\n", __func__); +#endif + pr_info("schedule long_cabac_wd_work and requested from %d\n", + (READ_VREG(LONG_CABAC_REQ) >> 8)&0xFF); + schedule_work(&long_cabac_wd_work); + } +#endif + +#ifdef ENABLE_USER_DATA + if (UserDataHandler(hw)) + return IRQ_HANDLED; +#endif + reg = READ_VREG(AVS_BUFFEROUT); + if (reg) { + unsigned short decode_pic_count + = READ_VREG(DECODE_PIC_COUNT); + debug_print(hw, PRINT_FLAG_DECODING, "AVS_BUFFEROUT=0x%x decode_pic_count %d\n", + reg, decode_pic_count); + if (pts_by_offset) { + offset = READ_VREG(AVS_OFFSET_REG); + debug_print(hw, PRINT_FLAG_DECODING, "AVS OFFSET=%x\n", offset); + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + if (pts_lookup_offset_us64(PTS_TYPE_VIDEO, offset, &pts, + &frame_size, 0, &pts_us64) == 0) { + pts_valid = 1; +#ifdef DEBUG_PTS + hw->pts_hit++; +#endif + } else { +#ifdef DEBUG_PTS + hw->pts_missed++; +#endif + } + } + } + + repeat_count = READ_VREG(AVS_REPEAT_COUNT); +#ifdef USE_DYNAMIC_BUF_NUM + buffer_index = + ((reg & 0x7) + + (((reg >> 8) & 0x3) << 3) - 1) & 0x1f; +#else + if (firmware_sel == 0) + buffer_index = + ((reg & 0x7) + + (((reg >> 8) & 0x3) << 3) - 1) & 0x1f; + else + buffer_index = + ((reg & 0x7) - 1) & 3; +#endif + picture_type = (reg >> 3) & 7; +#ifdef DEBUG_PTS + if (picture_type == I_PICTURE) { + /* pr_info("I offset 0x%x, pts_valid %d\n", + * offset, pts_valid); + */ + if (!pts_valid) + hw->pts_i_missed++; + else + hw->pts_i_hit++; + } +#endif + + if ((dec_control & DEC_CONTROL_FLAG_FORCE_2500_1080P_INTERLACE) + && hw->frame_width == 1920 && hw->frame_height == 1080) { + force_interlaced_frame = true; + } + + if (hw->throw_pb_flag && picture_type != I_PICTURE) { + + debug_print(hw, PRINT_FLAG_DECODING, + "%s WRITE_VREG(AVS_BUFFERIN, 0x%x) for throwing picture with type of %d\n", + __func__, + ~(1 << buffer_index), picture_type); + + WRITE_VREG(AVS_BUFFERIN, ~(1 << buffer_index)); + } else if (reg & INTERLACE_FLAG || force_interlaced_frame) { /* interlace */ + hw->throw_pb_flag = 0; + + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "interlace, picture type %d\n", + picture_type); + + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + set_frame_info(hw, vf, &dur); + vf->bufWidth = 1920; + hw->pic_type = 2; + if ((picture_type == I_PICTURE) && pts_valid) { + vf->pts = pts; + vf->pts_us64 = pts_us64; + if ((repeat_count > 1) && hw->avi_flag) { + /* hw->next_pts = pts + + * (hw->vavs_amstream_dec_info.rate * + * repeat_count >> 1)*15/16; + */ + hw->next_pts = + pts + + (dur * repeat_count >> 1) * + 15 / 16; + } else + hw->next_pts = 0; + } else { + vf->pts = hw->next_pts; + if (vf->pts == 0) { + vf->pts_us64 = 0; + } + if ((repeat_count > 1) && hw->avi_flag) { + /* vf->duration = + * hw->vavs_amstream_dec_info.rate * + * repeat_count >> 1; + */ + vf->duration = dur * repeat_count >> 1; + if (hw->next_pts != 0) { + hw->next_pts += + ((vf->duration) - + ((vf->duration) >> 4)); + } + } else { + /* vf->duration = + * hw->vavs_amstream_dec_info.rate >> 1; + */ + vf->duration = dur >> 1; + hw->next_pts = 0; + } + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->duration_pulldown = 0; + if (force_interlaced_frame) { + vf->type = VIDTYPE_INTERLACE_TOP; + }else{ + vf->type = + (reg & TOP_FIELD_FIRST_FLAG) + ? VIDTYPE_INTERLACE_TOP + : VIDTYPE_INTERLACE_BOTTOM; + } +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + if (hw->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + + vf->canvas0_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas0_config[1] = hw->canvas_config[buffer_index][1]; + + vf->canvas1_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas1_config[1] = hw->canvas_config[buffer_index][1]; + } else + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->type_original = vf->type; + + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "buffer_index %d, canvas addr %x\n", + buffer_index, vf->canvas0Addr); + vf->pts = (pts_valid)?pts:0; + //vf->pts_us64 = (pts_valid) ? pts_us64 : 0; + hw->vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, + buffer_index); + + if (hw->m_ins_flag && vdec_frame_based(hw_to_vdec(hw))) + set_vframe_pts(hw, decode_pic_count, vf); + + if (vdec_stream_based(vdec) && (!vdec->vbuf.use_ptsserv)) { + vf->pts_us64 = + (((u64)vf->duration << 32) & 0xffffffff00000000) | offset; + vf->pts = 0; + } + + debug_print(hw, PRINT_FLAG_PTS, + "interlace1 vf->pts = %d, vf->pts_us64 = %lld, pts_valid = %d\n", vf->pts, vf->pts_us64, pts_valid); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->pts); + avs_vf_notify_receiver(hw, PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + pr_info("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + set_frame_info(hw, vf, &dur); + vf->bufWidth = 1920; + if (force_interlaced_frame) + vf->pts = 0; + else + vf->pts = hw->next_pts; + + if (vf->pts == 0) { + vf->pts_us64 = 0; + } + + if ((repeat_count > 1) && hw->avi_flag) { + /* vf->duration = hw->vavs_amstream_dec_info.rate * + * repeat_count >> 1; + */ + vf->duration = dur * repeat_count >> 1; + if (hw->next_pts != 0) { + hw->next_pts += + ((vf->duration) - + ((vf->duration) >> 4)); + } + } else { + /* vf->duration = hw->vavs_amstream_dec_info.rate + * >> 1; + */ + vf->duration = dur >> 1; + hw->next_pts = 0; + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->duration_pulldown = 0; + if (force_interlaced_frame) { + vf->type = VIDTYPE_INTERLACE_BOTTOM; + } else { + vf->type = + (reg & TOP_FIELD_FIRST_FLAG) ? + VIDTYPE_INTERLACE_BOTTOM : + VIDTYPE_INTERLACE_TOP; + } +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + if (hw->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + + vf->canvas0_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas0_config[1] = hw->canvas_config[buffer_index][1]; + + vf->canvas1_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas1_config[1] = hw->canvas_config[buffer_index][1]; + } else + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->type_original = vf->type; + vf->pts_us64 = 0; + hw->vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, + buffer_index); + + if (hw->m_ins_flag && vdec_frame_based(hw_to_vdec(hw))) + set_vframe_pts(hw, decode_pic_count, vf); + + if (vdec_stream_based(vdec) && (!vdec->vbuf.use_ptsserv)) { + vf->pts_us64 = (u64)-1; + vf->pts = 0; + } + debug_print(hw, PRINT_FLAG_PTS, + "interlace2 vf->pts = %d, vf->pts_us64 = %lld, pts_valid = %d\n", vf->pts, vf->pts_us64, pts_valid); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->pts); + avs_vf_notify_receiver(hw, PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + hw->total_frame++; + } else { /* progressive */ + hw->throw_pb_flag = 0; + + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "progressive picture type %d\n", + picture_type); + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + set_frame_info(hw, vf, &dur); + vf->bufWidth = 1920; + hw->pic_type = 1; + + if ((picture_type == I_PICTURE) && pts_valid) { + vf->pts = pts; + if ((repeat_count > 1) && hw->avi_flag) { + /* hw->next_pts = pts + + * (hw->vavs_amstream_dec_info.rate * + * repeat_count)*15/16; + */ + hw->next_pts = + pts + + (dur * repeat_count) * 15 / 16; + } else + hw->next_pts = 0; + } else { + vf->pts = hw->next_pts; + if (vf->pts == 0) { + vf->pts_us64 = 0; + } + if ((repeat_count > 1) && hw->avi_flag) { + /* vf->duration = + * hw->vavs_amstream_dec_info.rate * + * repeat_count; + */ + vf->duration = dur * repeat_count; + if (hw->next_pts != 0) { + hw->next_pts += + ((vf->duration) - + ((vf->duration) >> 4)); + } + } else { + /* vf->duration = + * hw->vavs_amstream_dec_info.rate; + */ + vf->duration = dur; + hw->next_pts = 0; + } + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->duration_pulldown = 0; + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + if (hw->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + + vf->canvas0_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas0_config[1] = hw->canvas_config[buffer_index][1]; + + vf->canvas1_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas1_config[1] = hw->canvas_config[buffer_index][1]; + } else + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->type_original = vf->type; + + vf->pts = (pts_valid)?pts:0; + //vf->pts_us64 = (pts_valid) ? pts_us64 : 0; + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "buffer_index %d, canvas addr %x\n", + buffer_index, vf->canvas0Addr); + debug_print(hw, PRINT_FLAG_PTS, + "progressive vf->pts = %d, vf->pts_us64 = %lld, pts_valid = %d\n", vf->pts, vf->pts_us64, pts_valid); + hw->vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, + buffer_index); + + if (hw->m_ins_flag && vdec_frame_based(hw_to_vdec(hw))) + set_vframe_pts(hw, decode_pic_count, vf); + + if (vdec_stream_based(vdec) && (!vdec->vbuf.use_ptsserv)) { + vf->pts_us64 = + (((u64)vf->duration << 32) & 0xffffffff00000000) | offset; + vf->pts = 0; + } + decoder_do_frame_check(hw_to_vdec(hw), vf); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->pts); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + avs_vf_notify_receiver(hw, PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + hw->total_frame++; + } + + /*count info*/ + vdec_count_info(hw->gvs, 0, offset); + if (offset) { + if (picture_type == I_PICTURE) { + hw->gvs->i_decoded_frames++; + } else if (picture_type == P_PICTURE) { + hw->gvs->p_decoded_frames++; + } else if (picture_type == B_PICTURE) { + hw->gvs->b_decoded_frames++; + } + } + avs_update_gvs(hw); + vdec_fill_vdec_frame(hw_to_vdec(hw), NULL, hw->gvs, vf, 0); + + /* pr_info("PicType = %d, PTS = 0x%x\n", + * picture_type, vf->pts); + */ + WRITE_VREG(AVS_BUFFEROUT, 0); + } + //WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + + if (hw->m_ins_flag) { + u32 status_reg = READ_VREG(DECODE_STATUS); + u32 decode_status = status_reg & 0xff; + if (hw->dec_result == DEC_RESULT_DONE || + hw->dec_result == DEC_RESULT_AGAIN) { + debug_print(hw, PRINT_FLAG_DECODING, + "%s !!! READ_VREG(DECODE_STATUS) = 0x%x, decode_status 0x%x, buf_status 0x%x, dec_result = 0x%x, decode_pic_count = %d bit_cnt=0x%x\n", + __func__, status_reg, decode_status, + hw->buf_status, + hw->dec_result, hw->decode_pic_count, + READ_VREG(VIFF_BIT_CNT)); + return IRQ_HANDLED; + } else if (decode_status == DECODE_STATUS_PIC_DONE || + decode_status == DECODE_STATUS_SKIP_PIC_DONE) { + hw->buf_status = (status_reg >> 16) & 0xffff; + if (decode_status == DECODE_STATUS_SKIP_PIC_DONE) { + hw->decode_status_skip_pic_done_flag = 1; + hw->decode_decode_cont_start_code = (status_reg >> 8) & 0xff; + } else + hw->decode_status_skip_pic_done_flag = 0; + hw->decode_pic_count++; + if ((hw->decode_pic_count & 0xffff) == 0) { + /*make ucode do not handle it as first picture*/ + hw->decode_pic_count++; + } + reset_process_time(hw); + hw->dec_result = DEC_RESULT_DONE; +#if DEBUG_MULTI_FLAG == 1 + WRITE_VREG(DECODE_STATUS, 0); +#else + amvdec_stop(); +#endif + vavs_save_regs(hw); + debug_print(hw, PRINT_FLAG_DECODING, + "%s %s, READ_VREG(DECODE_STATUS) = 0x%x, decode_status 0x%x, buf_status 0x%x, dec_result = 0x%x, decode_pic_count = %d, bit_cnt=0x%x\n", + __func__, + (decode_status == DECODE_STATUS_PIC_DONE) ? + "DECODE_STATUS_PIC_DONE" : "DECODE_STATUS_SKIP_PIC_DONE", + status_reg, decode_status, + hw->buf_status, + hw->dec_result, hw->decode_pic_count, + READ_VREG(VIFF_BIT_CNT)); + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } else if (decode_status == DECODE_STATUS_DECODE_BUF_EMPTY || + decode_status == DECODE_STATUS_SEARCH_BUF_EMPTY) { + hw->buf_status = (status_reg >> 16) & 0xffff; + reset_process_time(hw); +#if DEBUG_MULTI_FLAG == 1 + WRITE_VREG(DECODE_STATUS, 0); +#else + amvdec_stop(); +#endif + if (vdec_frame_based(hw_to_vdec(hw))) { + hw->dec_result = DEC_RESULT_DONE; + //if (hw->decode_pic_count == 0) { + hw->decode_pic_count++; + //} + if ((hw->decode_pic_count & 0xffff) == 0) { + /*make ucode do not handle it as first picture*/ + hw->decode_pic_count++; + } + vavs_save_regs(hw); + } else + hw->dec_result = DEC_RESULT_AGAIN; + + debug_print(hw, PRINT_FLAG_DECODING, + "%s BUF_EMPTY, READ_VREG(DECODE_STATUS) = 0x%x, decode_status 0x%x, buf_status 0x%x, scratch_8 (AVS_BUFFERIN) 0x%x, dec_result = 0x%x, decode_pic_count = %d, bit_cnt=0x%x, hw->decode_status_skip_pic_done_flag = %d, hw->decode_decode_cont_start_code = 0x%x\n", + __func__, status_reg, decode_status, + hw->buf_status, + hw->reg_scratch_8, + hw->dec_result, hw->decode_pic_count, + READ_VREG(VIFF_BIT_CNT), hw->decode_status_skip_pic_done_flag, hw->decode_decode_cont_start_code); + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + } + + +#ifdef HANDLE_AVS_IRQ + return IRQ_HANDLED; +#else + return; +#endif +} + +static irqreturn_t vmavs_isr(struct vdec_s *vdec, int irq) +{ + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + return IRQ_WAKE_THREAD; + //return vavs_isr(0, hw); + +} + +static void vmavs_dump_state(struct vdec_s *vdec) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + int i; + debug_print(hw, 0, + "====== %s\n", __func__); + + debug_print(hw, 0, + "width/height (%d/%d), dur %d\n", + hw->frame_width, + hw->frame_height, + hw->frame_dur + ); + + debug_print(hw, 0, + "is_framebase(%d), decode_status 0x%x, buf_status 0x%x, buf_recycle_status 0x%x, throw %d, eos %d, state 0x%x, dec_result 0x%x dec_frm %d disp_frm %d run %d not_run_ready %d input_empty %d\n", + vdec_frame_based(vdec), + READ_VREG(DECODE_STATUS) & 0xff, + hw->buf_status, + hw->buf_recycle_status, + hw->throw_pb_flag, + hw->eos, + hw->stat, + hw->dec_result, + hw->decode_pic_count, + hw->display_frame_count, + hw->run_count, + hw->not_run_ready, + hw->input_empty + ); + + if (vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + debug_print(hw, 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + + debug_print(hw, 0, + "%s, newq(%d/%d), dispq(%d/%d)recycleq(%d/%d) drop %d vf peek %d, prepare/get/put (%d/%d/%d)\n", + __func__, + kfifo_len(&hw->newframe_q), + VF_POOL_SIZE, + kfifo_len(&hw->display_q), + VF_POOL_SIZE, + kfifo_len(&hw->recycle_q), + VF_POOL_SIZE, + hw->drop_frame_count, + hw->peek_num, + hw->prepare_num, + hw->get_num, + hw->put_num + ); + + debug_print(hw, 0, "vfbuf_use:\n"); + for (i = 0; i < hw->vf_buf_num_used; i++) + debug_print(hw, 0, "%d: vf_buf_use %d\n", + i, hw->vfbuf_use[i]); + + debug_print(hw, 0, + "DECODE_STATUS=0x%x\n", + READ_VREG(DECODE_STATUS)); + debug_print(hw, 0, + "MPC_E=0x%x\n", + READ_VREG(MPC_E)); + debug_print(hw, 0, + "DECODE_MODE=0x%x\n", + READ_VREG(DECODE_MODE)); + debug_print(hw, 0, + "wait_buf_status, AV_SCRATCH_5=0x%x\n", + READ_VREG(AV_SCRATCH_5)); + debug_print(hw, 0, + "MBY_MBX=0x%x\n", + READ_VREG(MBY_MBX)); + debug_print(hw, 0, + "VIFF_BIT_CNT=0x%x\n", + READ_VREG(VIFF_BIT_CNT)); + debug_print(hw, 0, + "VLD_MEM_VIFIFO_LEVEL=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + debug_print(hw, 0, + "VLD_MEM_VIFIFO_WP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP)); + debug_print(hw, 0, + "VLD_MEM_VIFIFO_RP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_RP)); + debug_print(hw, 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + debug_print(hw, 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (vdec_frame_based(vdec) && + (debug & PRINT_FRAMEBASE_DATA) + ) { + int jj; + if (hw->chunk && hw->chunk->block && + hw->chunk->size > 0) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, hw->chunk->size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + debug_print(hw, 0, + "frame data size 0x%x\n", + hw->chunk->size); + for (jj = 0; jj < hw->chunk->size; jj++) { + if ((jj & 0xf) == 0) + debug_print(hw, + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + debug_print_cont(hw, + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + debug_print_cont(hw, + PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } + +} + + int ammvdec_avs_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_avs_hw_s *hw = NULL; + int r = 0; + + if (vdec_get_debug_flags() & 0x8) + return amvdec_avs_probe(pdev); + + pr_info("ammvdec_avs probe start.\n"); + + if (pdata == NULL) { + pr_info("ammvdec_avs platform data undefined.\n"); + return -EFAULT; + } + + hw = (struct vdec_avs_hw_s *)vzalloc(sizeof(struct vdec_avs_hw_s)); + if (hw == NULL) { + pr_info("\nammvdec_avs decoder driver alloc failed\n"); + return -ENOMEM; + } + /*atomic_set(&hw->error_handler_run, 0);*/ + hw->m_ins_flag = 1; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM || disable_longcabac_trans) + firmware_sel = 1; + + if (firmware_sel == 1) { +#ifndef USE_DYNAMIC_BUF_NUM + vf_buf_num = 4; +#endif + canvas_base = 0; + canvas_num = 3; + } else { + pr_info("Error, do not support longcabac work around!!!"); + r = -ENOMEM; + goto error1; + } + + if (pdata->sys_info) + hw->vavs_amstream_dec_info = *pdata->sys_info; + + hw->is_reset = 0; + pdata->user_data_read = NULL; + pdata->reset_userdata_fifo = NULL; + + pdata->private = hw; + pdata->dec_status = vavs_dec_status; + pdata->set_isreset = vavs_set_isreset; + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = vmavs_isr; + pdata->threaded_irq_handler = vmavs_isr_thread_fn; + pdata->dump_state = vmavs_dump_state; + + snprintf(hw->vdec_name, sizeof(hw->vdec_name), + "avs-%d", pdev->id); + snprintf(hw->pts_name, sizeof(hw->pts_name), + "%s-pts", hw->vdec_name); + snprintf(hw->new_q_name, sizeof(hw->new_q_name), + "%s-newframe_q", hw->vdec_name); + snprintf(hw->disp_q_name, sizeof(hw->disp_q_name), + "%s-dispframe_q", hw->vdec_name); + + vavs_vdec_info_init(hw); + +#ifdef ENABLE_USER_DATA + if (NULL == hw->user_data_buffer) { + hw->user_data_buffer = + dma_alloc_coherent(amports_get_dma_device(), + USER_DATA_SIZE, + &hw->user_data_buffer_phys, GFP_KERNEL); + if (!hw->user_data_buffer) { + pr_info("%s: Can not allocate hw->user_data_buffer\n", + __func__); + r = -ENOMEM; + goto error2; + } + pr_debug("hw->user_data_buffer = 0x%p, hw->user_data_buffer_phys = 0x%x\n", + hw->user_data_buffer, (u32)hw->user_data_buffer_phys); + } +#endif + /*hw->lmem_addr = kmalloc(LMEM_BUF_SIZE, GFP_KERNEL); + if (hw->lmem_addr == NULL) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + return -1; + } + hw->lmem_phy_addr = dma_map_single(amports_get_dma_device(), + hw->lmem_addr, LMEM_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(amports_get_dma_device(), + hw->lmem_phy_addr)) { + pr_err("%s: failed to map lmem buffer\n", __func__); + kfree(hw->lmem_addr); + hw->lmem_addr = NULL; + return -1; + }*/ + /*INIT_WORK(&hw->set_clk_work, avs_set_clk);*/ + hw->lmem_addr = (dma_addr_t)dma_alloc_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, (dma_addr_t *)&hw->lmem_phy_addr, GFP_KERNEL); + if (hw->lmem_addr == 0) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + r = -1; + goto error3; + } + + if (vavs_init(hw) < 0) { + pr_info("amvdec_avs init failed.\n"); + r = -ENODEV; + goto error4; + } + + /*INIT_WORK(&hw->fatal_error_wd_work, vavs_fatal_error_handler); + atomic_set(&hw->error_handler_run, 0);*/ +#if 0 +#ifdef ENABLE_USER_DATA + INIT_WORK(&hw->userdata_push_work, userdata_push_do_work); +#endif +#endif + INIT_WORK(&hw->notify_work, vavs_notify_work); + + if (pdata->use_vfm_path) { + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + hw->frameinfo_enable = 1; + } + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + MULTI_INSTANCE_PROVIDER_NAME ".%02x", pdev->id & 0xff); + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->canvas_spec[i] = 0xffffff; + } + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vavs_vf_provider, hw); + + platform_set_drvdata(pdev, pdata); + + hw->platform_dev = pdev; + + vdec_set_prepare_level(pdata, start_decode_buf_level); + + vdec_set_vframe_comm(pdata, DRIVER_NAME); + + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_VDEC_1); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } + + /*INIT_WORK(&hw->userdata_push_work, userdata_push_do_work);*/ + + return 0; + +error4: + dma_free_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, (void *)hw->lmem_addr, + hw->lmem_phy_addr); +error3: + dma_free_coherent( + amports_get_dma_device(), + USER_DATA_SIZE, + hw->user_data_buffer, + hw->user_data_buffer_phys); +error2: + kfree(hw->gvs); + hw->gvs = NULL; + pdata->dec_status = NULL; +error1: + vfree(hw); + return r; +} + + int ammvdec_avs_remove(struct platform_device *pdev) +{ + + if (vdec_get_debug_flags() & 0x8) + return amvdec_avs_remove(pdev); + else { + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec = hw_to_vdec(hw); + int i; + + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + + cancel_work_sync(&hw->work); + cancel_work_sync(&hw->notify_work); + + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1); + else + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + vdec_set_status(hw_to_vdec(hw), VDEC_STATUS_DISCONNECTED); + + if (vdec->parallel_dec == 1) { + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + vdec->free_canvas_ex(canvas_y(hw->canvas_spec[i]), vdec->id); + vdec->free_canvas_ex(canvas_u(hw->canvas_spec[i]), vdec->id); + } + } + #ifdef ENABLE_USER_DATA + if (hw->user_data_buffer != NULL) { + dma_free_coherent( + amports_get_dma_device(), + USER_DATA_SIZE, + hw->user_data_buffer, + hw->user_data_buffer_phys); + hw->user_data_buffer = NULL; + hw->user_data_buffer_phys = 0; + } + #endif + /*if (hw->lmem_addr) { + dma_unmap_single(amports_get_dma_device(), + hw->lmem_phy_addr, LMEM_BUF_SIZE, DMA_FROM_DEVICE); + kfree(hw->lmem_addr); + hw->lmem_addr = NULL; + }*/ + if (hw->lmem_addr) { + dma_free_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, (void *)hw->lmem_addr, + hw->lmem_phy_addr); + hw->lmem_addr = 0; + } + + if (hw->fw) { + vfree(hw->fw); + hw->fw = NULL; + } + + pr_info("ammvdec_avs removed.\n"); + if (hw->gvs) { + kfree(hw->gvs); + hw->gvs = NULL; + } + + vfree(hw); + return 0; + } +} + + +#ifdef DEBUG_MULTI_WITH_AUTOMODE +struct stream_buf_s *get_vbuf(void); +s32 esparser_init(struct stream_buf_s *buf, struct vdec_s *vdec); + + +static s32 vavs_init2(struct vdec_avs_hw_s *hw) +{ + int size = -1; + struct firmware_s *fw; + u32 fw_size = 0x1000 * 16; + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + pr_info("vavs_init\n"); + + amvdec_enable(); + + + vavs_local_init(hw); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM) + size = get_firmware_data(VIDEO_DEC_AVS_MULTI, fw->data); + else { + if (firmware_sel == 1) + size = get_firmware_data(VIDEO_DEC_AVS_NOCABAC, fw->data); +#ifdef AVSP_LONG_CABAC + else { + init_avsp_long_cabac_buf(); + size = get_firmware_data(VIDEO_DEC_AVS_MULTI, fw->data); + } +#endif + } + + if (size < 0) { + amvdec_disable(); + pr_err("get firmware fail."); + /*vfree(buf);*/ + return -1; + } + + fw->len = size; + hw->fw = fw; + if (hw->m_ins_flag) { + init_timer(&hw->check_timer); + hw->check_timer.data = (ulong) hw; + hw->check_timer.function = check_timer_func; + hw->check_timer.expires = jiffies + CHECK_INTERVAL; + + + //add_timer(&hw->check_timer); + hw->stat |= STAT_TIMER_ARM; + + INIT_WORK(&hw->work, vavs_work); + + hw->fw = fw; + } + return 0; +} + +unsigned int debug_flag2; +static int vavs_prot_init2(struct vdec_avs_hw_s *hw, unsigned char post_flag) +{ + int r = 0; + /* + * 2: assist + * 3: vld_reset + * 4: vld_part_reset + * 5: vfifo reset + * 6: iqidct + * 7: mc + * 8: dblk + * 9: pic_dc + * 10: psc + * 11: mcpu + * 12: ccpu + * 13: ddr + * 14: afifo + */ + unsigned char run_flag; +#ifdef OOO + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) /*| (1 << 4)*/); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) /*| (1 << 4)*/); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 9) | (1 << 8)); + WRITE_VREG(DOS_SW_RESET0, 0); +#endif + /***************** reset vld **********************************/ +#ifdef OOO + WRITE_VREG(POWER_CTL_VLD, 0x10); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 2, MEM_FIFO_CNT_BIT, 2); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 8, MEM_LEVEL_CNT_BIT, 6); +#endif + if (start_decoding_delay & 0x80000) + msleep(start_decoding_delay&0xffff); + +if (debug_flag2 & 0x1) + run_flag = post_flag; +else + run_flag = !post_flag; +if (run_flag) { + if (hw->m_ins_flag) { + int i; + if (hw->decode_pic_count == 0) { + r = vavs_canvas_init(hw); +#ifndef USE_DYNAMIC_BUF_NUM + for (i = 0; i < 4; i++) { + WRITE_VREG(AV_SCRATCH_0 + i, + hw->canvas_spec[i] + ); + } +#else + for (i = 0; i < hw->vf_buf_num_used; i += 2) { + WRITE_VREG(buf_spec_reg[i >> 1], + (hw->canvas_spec[i] & 0xffff) | + ((hw->canvas_spec[i + 1] & 0xffff) + << 16) + ); + } +#endif + } else + vavs_restore_regs(hw); + + for (i = 0; i < hw->vf_buf_num_used; i++) { + config_cav_lut_ex(canvas_y(hw->canvas_spec[i]), + hw->canvas_config[i][0].phy_addr, + hw->canvas_config[i][0].width, + hw->canvas_config[i][0].height, + CANVAS_ADDR_NOWRAP, + hw->canvas_config[i][0].block_mode, + 0, VDEC_1); + + config_cav_lut_ex(canvas_u(hw->canvas_spec[i]), + hw->canvas_config[i][1].phy_addr, + hw->canvas_config[i][1].width, + hw->canvas_config[i][1].height, + CANVAS_ADDR_NOWRAP, + hw->canvas_config[i][1].block_mode, + 0, VDEC_1); + } + } +} + +if (debug_flag2 & 0x2) + run_flag = post_flag; +else + run_flag = !post_flag; +if (run_flag) { + + /* notify ucode the buffer offset */ + if (hw->decode_pic_count == 0) + WRITE_VREG(AV_SCRATCH_F, hw->buf_offset); +#ifdef OOO + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); +#endif + } + if (start_decoding_delay & 0x40000) + msleep(start_decoding_delay&0xffff); + + if (debug_flag2 & 0x4) + run_flag = post_flag; + else + run_flag = !post_flag; + if (run_flag) { + if (hw->decode_pic_count == 0) { +#ifndef USE_DYNAMIC_BUF_NUM + WRITE_VREG(AVS_SOS_COUNT, 0); +#endif + WRITE_VREG(AVS_BUFFERIN, 0); + WRITE_VREG(AVS_BUFFEROUT, 0); + } + if (error_recovery_mode) + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 0); + else + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 1); + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); +} + +if (debug_flag2 & 0x8) + run_flag = post_flag; +else + run_flag = !post_flag; +if (run_flag) { + +#ifndef USE_DYNAMIC_BUF_NUM /* def DEBUG_UCODE */ + if (hw->decode_pic_count == 0) + WRITE_VREG(AV_SCRATCH_D, 0); +#endif + if (start_decoding_delay & 0x10000) + msleep(start_decoding_delay&0xffff); +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + + if (start_decoding_delay & 0x20000) + msleep(start_decoding_delay&0xffff); + + +#ifdef PIC_DC_NEED_CLEAR + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 31); +#endif +} +if (debug_flag2 & 0x10) + run_flag = post_flag; +else + run_flag = !post_flag; +if (run_flag) { +#ifdef ENABLE_USER_DATA + if (firmware_sel == 0) { + pr_info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! firmware_sel is 0\n"); + WRITE_VREG(AV_SCRATCH_N, (u32)(hw->user_data_buffer_phys - hw->buf_offset)); + pr_debug("AV_SCRATCH_N = 0x%x\n", READ_VREG(AV_SCRATCH_N)); + } +#endif +} + +if (debug_flag2 & 0x20) + run_flag = post_flag; +else + run_flag = !post_flag; +if (run_flag) { + if (hw->m_ins_flag) { + if (vdec_frame_based(hw_to_vdec(hw))) + WRITE_VREG(DECODE_MODE, DECODE_MODE_MULTI_FRAMEBASE); + else + WRITE_VREG(DECODE_MODE, DECODE_MODE_MULTI_STREAMBASE); + WRITE_VREG(DECODE_LMEM_BUF_ADR, (u32)hw->lmem_phy_addr); + } else + WRITE_VREG(DECODE_MODE, DECODE_MODE_SINGLE); + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + hw->old_udebug_flag = udebug_flag; +} + return r; +} + +static void init_hw(struct vdec_s *vdec) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + int ret; + pr_info("%s, %d\n", __func__, __LINE__); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM) + ret = amvdec_loadmc_ex(VFORMAT_AVS, NULL, hw->fw->data); + else if (firmware_sel == 1) + ret = amvdec_loadmc_ex(VFORMAT_AVS, "avs_no_cabac", hw->fw->data); + else + ret = amvdec_loadmc_ex(VFORMAT_AVS, NULL, hw->fw->data); + + if (ret < 0) { + amvdec_disable(); + /*vfree(buf);*/ + pr_err("AVS: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + } + pr_info("%s, %d\n", __func__, __LINE__); + + /*vfree(buf);*/ + + hw->stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + ret = vavs_prot_init2(hw, 0); + if (ret < 0) + return; + pr_info("%s, %d\n", __func__, __LINE__); + +} + + +static unsigned long run_ready2(struct vdec_s *vdec, unsigned long mask) +{ + return 1; +} + +static void run2(struct vdec_s *vdec, unsigned long mask, +void (*callback)(struct vdec_s *, void *), + void *arg) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + pr_info("%s, %d\n", __func__, __LINE__); + + vavs_prot_init2(hw, 1); + + vdec_source_changed(VFORMAT_AVS, + 1920, 1080, 30); + + amvdec_start(); + + hw->stat |= STAT_VDEC_RUN; + pr_info("%s %d\n", __func__, __LINE__); + +} + +static int ammvdec_avs_probe2(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_avs_hw_s *hw = NULL; + + pr_info("ammvdec_avs probe start.\n"); + + if (pdata == NULL) { + pr_info("ammvdec_avs platform data undefined.\n"); + return -EFAULT; + } + pr_info("%s %d\n", __func__, __LINE__); + + hw = (struct vdec_avs_hw_s *)vzalloc(sizeof(struct vdec_avs_hw_s)); + if (hw == NULL) { + pr_info("\nammvdec_avs decoder driver alloc failed\n"); + return -ENOMEM; + } + pr_info("%s %d\n", __func__, __LINE__); + /*atomic_set(&hw->error_handler_run, 0);*/ + hw->m_ins_flag = 1; + pr_info("%s %d\n", __func__, __LINE__); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM || disable_longcabac_trans) + firmware_sel = 1; + pr_info("%s %d\n", __func__, __LINE__); + + if (firmware_sel == 1) { +#ifndef USE_DYNAMIC_BUF_NUM + vf_buf_num = 4; +#endif + canvas_base = 0; + canvas_num = 3; + } else { + pr_info("Error, do not support longcabac work around!!!"); + return -ENOMEM; + } + pr_info("%s %d\n", __func__, __LINE__); + + if (pdata->sys_info) + hw->vavs_amstream_dec_info = *pdata->sys_info; + pr_info("%s %d\n", __func__, __LINE__); + + hw->is_reset = 0; + pdata->user_data_read = NULL; + pdata->reset_userdata_fifo = NULL; + + pr_info("%s %d\n", __func__, __LINE__); + + pdata->private = hw; + pdata->dec_status = vavs_dec_status; + pdata->set_isreset = vavs_set_isreset; + pdata->run_ready = run_ready2; + pdata->run = run2; + pdata->reset = reset; + pdata->irq_handler = vmavs_isr; + pdata->threaded_irq_handler = vmavs_isr_thread_fn; + pdata->dump_state = vmavs_dump_state; + + pr_info("%s %d\n", __func__, __LINE__); + + vavs_vdec_info_init(hw); + + pr_info("%s %d\n", __func__, __LINE__); + +#ifdef ENABLE_USER_DATA + if (NULL == hw->user_data_buffer) { + hw->user_data_buffer = + dma_alloc_coherent(amports_get_dma_device(), + USER_DATA_SIZE, + &hw->user_data_buffer_phys, GFP_KERNEL); + if (!hw->user_data_buffer) { + pr_info("%s: Can not allocate hw->user_data_buffer\n", + __func__); + return -ENOMEM; + } + pr_debug("hw->user_data_buffer = 0x%p, hw->user_data_buffer_phys = 0x%x\n", + hw->user_data_buffer, (u32)hw->user_data_buffer_phys); + } +#endif + hw->lmem_addr = kmalloc(LMEM_BUF_SIZE, GFP_KERNEL); + if (hw->lmem_addr == NULL) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + return -1; + } + hw->lmem_phy_addr = dma_map_single(amports_get_dma_device(), + hw->lmem_addr, LMEM_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(amports_get_dma_device(), + hw->lmem_phy_addr)) { + pr_err("%s: failed to map lmem buffer\n", __func__); + kfree(hw->lmem_addr); + hw->lmem_addr = NULL; + return -1; + } + + pr_info("%s %d\n", __func__, __LINE__); + + /*INIT_WORK(&hw->set_clk_work, avs_set_clk);*/ + + pr_info("%s %d\n", __func__, __LINE__); + + if (vavs_init2(hw) < 0) { + pr_info("amvdec_avs init failed.\n"); + kfree(hw->gvs); + hw->gvs = NULL; + pdata->dec_status = NULL; + return -ENODEV; + } + /*vdec = pdata;*/ + pr_info("%s, %d\n", __func__, __LINE__); + +if (hw->m_ins_flag) { + INIT_WORK(&hw->notify_work, vavs_notify_work); +#if 1 + if (pdata->use_vfm_path) { + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + hw->frameinfo_enable = 1; + } + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + MULTI_INSTANCE_PROVIDER_NAME ".%02x", pdev->id & 0xff); + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->canvas_spec[i] = 0xffffff; + } + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vavs_vf_provider, hw); + + platform_set_drvdata(pdev, pdata); + + hw->platform_dev = pdev; + + vdec_set_prepare_level(pdata, start_decode_buf_level); + + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_VDEC_1); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } + pr_info("%s, %d\n", __func__, __LINE__); +#endif +}else{ + /*INIT_WORK(&hw->fatal_error_wd_work, vavs_fatal_error_handler); + atomic_set(&hw->error_handler_run, 0);*/ +#ifdef ENABLE_USER_DATA + INIT_WORK(&hw->userdata_push_work, userdata_push_do_work); +#endif + INIT_WORK(&hw->notify_work, vavs_notify_work); +} + + init_hw(pdata); + return 0; +} + +static int ammvdec_avs_remove2(struct platform_device *pdev) +{ + struct vdec_avs_hw_s *hw = ghw; + + cancel_work_sync(&hw->fatal_error_wd_work); + atomic_set(&hw->error_handler_run, 0); +#ifdef ENABLE_USER_DATA + cancel_work_sync(&hw->userdata_push_work); +#endif + cancel_work_sync(&hw->notify_work); + cancel_work_sync(&hw->set_clk_work); + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)vavs_dec_id); + hw->stat &= ~STAT_ISR_REG; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->recycle_timer); + hw->stat &= ~STAT_TIMER_ARM; + } +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) { + mutex_lock(&vavs_mutex); + cancel_work_sync(&long_cabac_wd_work); + mutex_unlock(&vavs_mutex); + + if (es_write_addr_virt) { +#if 0 + codec_mm_free_for_dma("vavs", es_write_addr_phy); +#else + dma_unmap_single(amports_get_dma_device(), + es_write_addr_phy, + MAX_CODED_FRAME_SIZE, DMA_FROM_DEVICE); + /*kfree(es_write_addr_virt);*/ + es_write_addr_virt = NULL; +#endif + } + +#ifdef BITSTREAM_READ_TMP_NO_CACHE + if (bitstream_read_tmp) { + dma_free_coherent(amports_get_dma_device(), + SVA_STREAM_BUF_SIZE, bitstream_read_tmp, + bitstream_read_tmp_phy); + bitstream_read_tmp = NULL; + } +#else + if (bitstream_read_tmp) { + dma_unmap_single(amports_get_dma_device(), + bitstream_read_tmp_phy, + SVA_STREAM_BUF_SIZE, DMA_FROM_DEVICE); + kfree(bitstream_read_tmp); + bitstream_read_tmp = NULL; + } +#endif + } +#endif + if (hw->stat & STAT_VF_HOOK) { + if (hw->fr_hint_status == VDEC_HINTED && !hw->is_reset) + avs_vf_notify_receiver(hw, PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_END_HINT, NULL); + hw->fr_hint_status = VDEC_NO_NEED_HINT; + vf_unreg_provider(&vavs_vf_prov); + hw->stat &= ~STAT_VF_HOOK; + } + +#ifdef ENABLE_USER_DATA + if (hw->user_data_buffer != NULL) { + dma_free_coherent( + amports_get_dma_device(), + USER_DATA_SIZE, + hw->user_data_buffer, + hw->user_data_buffer_phys); + hw->user_data_buffer = NULL; + hw->user_data_buffer_phys = 0; + } +#endif + + if (hw->fw) { + vfree(hw->fw); + hw->fw = NULL; + } + + amvdec_disable(); + /*vdec_disable_DMC(NULL);*/ + + hw->pic_type = 0; + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } +#ifdef DEBUG_PTS + pr_debug("pts hit %d, pts missed %d, i hit %d, missed %d\n", hw->pts_hit, + hw->pts_missed, hw->pts_i_hit, hw->pts_i_missed); + pr_debug("total frame %d, hw->avi_flag %d, rate %d\n", hw->total_frame, hw->avi_flag, + hw->vavs_amstream_dec_info.rate); +#endif + kfree(hw->gvs); + hw->gvs = NULL; + vfree(hw); + return 0; +} +#endif + +static struct platform_driver ammvdec_avs_driver = { +#ifdef DEBUG_MULTI_WITH_AUTOMODE + .probe = ammvdec_avs_probe2, + .remove = ammvdec_avs_remove2, +#else + .probe = ammvdec_avs_probe, + .remove = ammvdec_avs_remove, +#endif +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = MULTI_DRIVER_NAME, + } +}; + +static struct codec_profile_t ammvdec_avs_profile = { + .name = "mavs", + .profile = "" +}; + +static struct mconfig mavs_configs[] = { + /*MC_PU32("stat", &stat), + MC_PU32("debug_flag", &debug_flag), + MC_PU32("error_recovery_mode", &error_recovery_mode), + MC_PU32("hw->pic_type", &hw->pic_type), + MC_PU32("radr", &radr), + MC_PU32("vf_buf_num", &vf_buf_num), + MC_PU32("vf_buf_num_used", &vf_buf_num_used), + MC_PU32("canvas_base", &canvas_base), + MC_PU32("firmware_sel", &firmware_sel), + */ +}; +static struct mconfig_node mavs_node; + + +static int __init ammvdec_avs_driver_init_module(void) +{ + pr_debug("ammvdec_avs module init\n"); + + if (platform_driver_register(&ammvdec_avs_driver)) + pr_err("failed to register ammvdec_avs driver\n"); +#ifdef DEBUG_WITH_SINGLE_MODE + if (platform_driver_register(&amvdec_avs_driver)) { + pr_info("failed to register amvdec_avs driver\n"); + return -ENODEV; + } +#else + //amvdec_avs_driver = amvdec_avs_driver; +#endif + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB) + ammvdec_avs_profile.profile = "mavs+"; + + vcodec_profile_register(&ammvdec_avs_profile); + INIT_REG_NODE_CONFIGS("media.decoder", &mavs_node, + "mavs", mavs_configs, CONFIG_FOR_RW); + vcodec_feature_register(VFORMAT_AVS, 0); + return 0; +} + + + +static void __exit ammvdec_avs_driver_remove_module(void) +{ + pr_debug("ammvdec_avs module remove.\n"); + + platform_driver_unregister(&ammvdec_avs_driver); +#ifdef DEBUG_WITH_SINGLE_MODE + platform_driver_unregister(&amvdec_avs_driver); +#endif +} + +/****************************************/ +/* +module_param(stat, uint, 0664); +MODULE_PARM_DESC(stat, "\n amvdec_avs stat\n"); +*/ +/****************************************** + *module_param(run_flag, uint, 0664); + *MODULE_PARM_DESC(run_flag, "\n run_flag\n"); + * + *module_param(step_flag, uint, 0664); + *MODULE_PARM_DESC(step_flag, "\n step_flag\n"); + ******************************************* + */ +module_param(step, uint, 0664); +MODULE_PARM_DESC(step, "\n step\n"); + +module_param(debug, uint, 0664); +MODULE_PARM_DESC(debug, "\n debug\n"); + +module_param(debug_mask, uint, 0664); +MODULE_PARM_DESC(debug_mask, "\n debug_mask\n"); + +module_param(error_recovery_mode, uint, 0664); +MODULE_PARM_DESC(error_recovery_mode, "\n error_recovery_mode\n"); + +/****************************************** + *module_param(error_watchdog_threshold, uint, 0664); + *MODULE_PARM_DESC(error_watchdog_threshold, "\n error_watchdog_threshold\n"); + * + *module_param(error_watchdog_buf_threshold, uint, 0664); + *MODULE_PARM_DESC(error_watchdog_buf_threshold, + * "\n error_watchdog_buf_threshold\n"); + ******************************************* + */ +/* +module_param(pic_type, uint, 0444); +MODULE_PARM_DESC(pic_type, "\n amdec_vas picture type\n"); +*/ +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(dbg_cmd, uint, 0664); +MODULE_PARM_DESC(dbg_cmd, "\n dbg_cmd\n"); + +module_param(vf_buf_num, uint, 0664); +MODULE_PARM_DESC(vf_buf_num, "\nvf_buf_num\n"); + +/* +module_param(vf_buf_num_used, uint, 0664); +MODULE_PARM_DESC(vf_buf_num_used, "\nvf_buf_num_used\n"); +*/ +module_param(canvas_base, uint, 0664); +MODULE_PARM_DESC(canvas_base, "\ncanvas_base\n"); + + +module_param(firmware_sel, uint, 0664); +MODULE_PARM_DESC(firmware_sel, "\n firmware_sel\n"); + +module_param(disable_longcabac_trans, uint, 0664); +MODULE_PARM_DESC(disable_longcabac_trans, "\n disable_longcabac_trans\n"); + +module_param(dec_control, uint, 0664); +MODULE_PARM_DESC(dec_control, "\n amvdec_vavs decoder control\n"); + +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n avs start_decode_buf_level\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, + "\n avs decode_timeout_val\n"); + +module_param(error_handle_policy, uint, 0664); +MODULE_PARM_DESC(error_handle_policy, + "\n avs error_handle_policy\n"); + +module_param(again_threshold, uint, 0664); +MODULE_PARM_DESC(again_threshold, "\n again_threshold\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_h265 udebug_flag\n"); + +module_param(udebug_pause_pos, uint, 0664); +MODULE_PARM_DESC(udebug_pause_pos, "\n udebug_pause_pos\n"); + +module_param(udebug_pause_val, uint, 0664); +MODULE_PARM_DESC(udebug_pause_val, "\n udebug_pause_val\n"); + +module_param(udebug_pause_decode_idx, uint, 0664); +MODULE_PARM_DESC(udebug_pause_decode_idx, "\n udebug_pause_decode_idx\n"); + +module_param(udebug_pause_ins_id, uint, 0664); +MODULE_PARM_DESC(udebug_pause_ins_id, "\n udebug_pause_ins_id\n"); + +module_param(start_decoding_delay, uint, 0664); +MODULE_PARM_DESC(start_decoding_delay, "\n start_decoding_delay\n"); + +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, + "\n ammvdec_mavs pre_decode_buf_level\n"); + + +#ifdef DEBUG_MULTI_WITH_AUTOMODE +module_param(debug_flag2, uint, 0664); +MODULE_PARM_DESC(debug_flag2, "\n debug_flag2\n"); +#endif +module_param(force_fps, uint, 0664); +MODULE_PARM_DESC(force_fps, "\n force_fps\n"); + +#ifdef DEBUG_MULTI_FRAME_INS +module_param(delay, uint, 0664); +MODULE_PARM_DESC(delay, "\n delay\n"); + +module_param_array(max_run_count, uint, &max_decode_instance_num, 0664); + +#endif + +module_param_array(ins_udebug_flag, uint, &max_decode_instance_num, 0664); + +module_param_array(max_process_time, uint, &max_decode_instance_num, 0664); + +module_param_array(run_count, uint, &max_decode_instance_num, 0664); + +module_param_array(max_get_frame_interval, uint, + &max_decode_instance_num, 0664); + + +module_init(ammvdec_avs_driver_init_module); +module_exit(ammvdec_avs_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC AVS Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Qi Wang <qi.wang@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/avs_multi/avs_multi.h b/drivers/frame_provider/decoder/avs_multi/avs_multi.h new file mode 100644 index 0000000..8922b40 --- /dev/null +++ b/drivers/frame_provider/decoder/avs_multi/avs_multi.h
@@ -0,0 +1,90 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef AVS_H_ +#define AVS_H_ + +#ifdef CONFIG_AMLOGIC_AVSP_LONG_CABAC +#define AVSP_LONG_CABAC +#endif +/*#define BITSTREAM_READ_TMP_NO_CACHE*/ + +#ifdef AVSP_LONG_CABAC +#define MAX_CODED_FRAME_SIZE 1500000 /*!< bytes for one frame*/ +#define LOCAL_HEAP_SIZE (1024*1024*10) +/* + *#define MAX_CODED_FRAME_SIZE 240000 + *#define MAX_CODED_FRAME_SIZE 700000 + */ +#define SVA_STREAM_BUF_SIZE 1024 + +extern void *es_write_addr_virt; +extern dma_addr_t es_write_addr_phy; + +extern void *bitstream_read_tmp; +extern dma_addr_t bitstream_read_tmp_phy; +extern void *avsp_heap_adr; + +int avs_get_debug_flag(void); + +int process_long_cabac(void); + +/* bit [6] - skip_mode_flag + * bit [5:4] - picture_type + * bit [3] - picture_structure (0-Field, 1-Frame) + * bit [2] - fixed_picture_qp + * bit [1] - progressive_sequence + * bit [0] - active + */ +#define LONG_CABAC_REQ AV_SCRATCH_K +#define LONG_CABAC_SRC_ADDR AV_SCRATCH_H +#define LONG_CABAC_DES_ADDR AV_SCRATCH_I +/* bit[31:16] - vertical_size + * bit[15:0] - horizontal_size + */ +#define LONG_CABAC_PIC_SIZE AV_SCRATCH_J + +#endif + +/* + *#define PERFORMANCE_DEBUG + *#define DUMP_DEBUG + */ +#define AVS_DEBUG_PRINT 0x01 +#define AVS_DEBUG_OLD_ERROR_HANDLE 0x10 +#define AVS_DEBUG_USE_FULL_SPEED 0x80 +#define AEC_DUMP 0x100 +#define STREAM_INFO_DUMP 0x200 +#define SLICE_INFO_DUMP 0x400 +#define MB_INFO_DUMP 0x800 +#define MB_NUM_DUMP 0x1000 +#define BLOCK_NUM_DUMP 0x2000 +#define COEFF_DUMP 0x4000 +#define ES_DUMP 0x8000 +#define DQUANT_DUMP 0x10000 +#define STREAM_INFO_DUMP_MORE 0x20000 +#define STREAM_INFO_DUMP_MORE2 0x40000 + +extern void *es_write_addr_virt; +extern void *bitstream_read_tmp; +extern dma_addr_t bitstream_read_tmp_phy; +int read_bitstream(unsigned char *Buf, int size); +int u_v(int LenInBits, char *tracestring); + +#endif
diff --git a/drivers/frame_provider/decoder/h264/Makefile b/drivers/frame_provider/decoder/h264/Makefile new file mode 100644 index 0000000..b7c85ee --- /dev/null +++ b/drivers/frame_provider/decoder/h264/Makefile
@@ -0,0 +1,6 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_H264) += amvdec_h264.o +amvdec_h264-objs += vh264.o + +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_H264_MVC) += amvdec_h264mvc.o +amvdec_h264mvc-objs += vh264_mvc.o +
diff --git a/drivers/frame_provider/decoder/h264/vh264.c b/drivers/frame_provider/decoder/h264/vh264.c new file mode 100644 index 0000000..55f0581 --- /dev/null +++ b/drivers/frame_provider/decoder/h264/vh264.c
@@ -0,0 +1,4509 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/h264/vh264.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/tsync.h> +#include <linux/workqueue.h> +#include <linux/dma-mapping.h> +#include <linux/atomic.h> +#include <linux/module.h> +#include <linux/slab.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/canvas/canvas.h> +#include "../utils/vdec.h" +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../utils/amvdec.h" +#include "vh264.h" +#include "../../../stream_input/amports/streambuf.h" +#include <linux/delay.h> +#include <linux/amlogic/media/video_sink/video.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/amlogic/media/ge2d/ge2d.h> +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include <linux/uaccess.h> + +#define DRIVER_NAME "amvdec_h264" +#define MODULE_NAME "amvdec_h264" +#define MEM_NAME "codec_264" +#define HANDLE_H264_IRQ + +#if 0 +/* currently, only iptv supports this function*/ +#define SUPPORT_BAD_MACRO_BLOCK_REDUNDANCY +#endif + +/* #define DEBUG_PTS */ +#if 0 /* MESON_CPU_TYPE <= MESON_CPU_TYPE_MESON6TV */ +#define DROP_B_FRAME_FOR_1080P_50_60FPS +#endif +#define RATE_MEASURE_NUM 8 +#define RATE_CORRECTION_THRESHOLD 5 +#define RATE_24_FPS 4004 /* 23.97 */ +#define RATE_25_FPS 3840 /* 25 */ +#define DUR2PTS(x) ((x)*90/96) +#define PTS2DUR(x) ((x)*96/90) +#define DUR2PTS_REM(x) (x*90 - DUR2PTS(x)*96) +#define FIX_FRAME_RATE_CHECK_IDRFRAME_NUM 2 +#define VDEC_CLOCK_ADJUST_FRAME 30 + +static inline bool close_to(int a, int b, int m) +{ + return (abs(a - b) < m) ? true : false; +} + +static DEFINE_MUTEX(vh264_mutex); +#define DEF_BUF_START_ADDR 0x1000000 +#define V_BUF_ADDR_OFFSET_NEW (0x1ee000) +#define V_BUF_ADDR_OFFSET (0x13e000) + +#define PIC_SINGLE_FRAME 0 +#define PIC_TOP_BOT_TOP 1 +#define PIC_BOT_TOP_BOT 2 +#define PIC_DOUBLE_FRAME 3 +#define PIC_TRIPLE_FRAME 4 +#define PIC_TOP_BOT 5 +#define PIC_BOT_TOP 6 +#define PIC_INVALID 7 + +#define EXTEND_SAR 0xff + +#define VF_POOL_SIZE 64 +#define VF_BUF_NUM 24 +#define WORKSPACE_BUF_NUM 2 +#define PUT_INTERVAL (HZ/100) +#define NO_DISP_WD_COUNT (3 * HZ / PUT_INTERVAL) + +#define SWITCHING_STATE_OFF 0 +#define SWITCHING_STATE_ON_CMD3 1 +#define SWITCHING_STATE_ON_CMD1 2 +#define SWITCHING_STATE_ON_CMD1_PENDING 3 + + +#define DEC_CONTROL_FLAG_FORCE_2997_1080P_INTERLACE 0x0001 +#define DEC_CONTROL_FLAG_FORCE_2500_576P_INTERLACE 0x0002 +#define DEC_CONTROL_FLAG_DISABLE_FAST_POC 0x0004 + +#define INCPTR(p) ptr_atomic_wrap_inc(&p) + +#define SLICE_TYPE_I 2 +#define SLICE_TYPE_P 5 +#define SLICE_TYPE_B 6 + +struct buffer_spec_s { + unsigned int y_addr; + unsigned int u_addr; + unsigned int v_addr; + + int y_canvas_index; + int u_canvas_index; + int v_canvas_index; + + unsigned int y_canvas_width; + unsigned int u_canvas_width; + unsigned int v_canvas_width; + + unsigned int y_canvas_height; + unsigned int u_canvas_height; + unsigned int v_canvas_height; + + unsigned long phy_addr; + int alloc_count; +}; + +#define spec2canvas(x) \ + (((x)->v_canvas_index << 16) | \ + ((x)->u_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + +static struct vframe_s *vh264_vf_peek(void *); +static struct vframe_s *vh264_vf_get(void *); +static void vh264_vf_put(struct vframe_s *, void *); +static int vh264_vf_states(struct vframe_states *states, void *); +static int vh264_event_cb(int type, void *data, void *private_data); + +static void vh264_prot_init(void); +static int vh264_local_init(void); +static void vh264_put_timer_func(struct timer_list *timer); +static void stream_switching_done(void); + +static const char vh264_dec_id[] = "vh264-dev"; + +#define PROVIDER_NAME "decoder.h264" + +static const struct vframe_operations_s vh264_vf_provider_ops = { + .peek = vh264_vf_peek, + .get = vh264_vf_get, + .put = vh264_vf_put, + .event_cb = vh264_event_cb, + .vf_states = vh264_vf_states, +}; + +static struct vframe_provider_s vh264_vf_prov; +/*TODO irq*/ +#if 1 +static u32 frame_width, frame_height, frame_dur, frame_prog, frame_packing_type, + last_duration; +static u32 saved_resolution; +static u32 last_mb_width, last_mb_height; +#else +static u32 frame_buffer_size; +static u32 frame_width, frame_height, frame_dur, frame_prog, last_duration; +static u32 last_mb_width, last_mb_height; +static u32 frame_packing_type; +#endif +static DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(recycle_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(delay_display_q, struct vframe_s *, VF_POOL_SIZE); + +static struct vframe_s vfpool[VF_POOL_SIZE]; +static s32 vfbuf_use[VF_BUF_NUM]; +static struct buffer_spec_s buffer_spec[VF_BUF_NUM]; +static struct buffer_spec_s fense_buffer_spec[2]; +/* disp buf + keep buf+ fense buf + workspace */ + +#define MAX_BLK_BUFFERS (VF_BUF_NUM + 2 + WORKSPACE_BUF_NUM) +#define VF_BUFFER_IDX(n) (WORKSPACE_BUF_NUM + n) +#define FENSE_BUFFER_IDX(n) (WORKSPACE_BUF_NUM + VF_BUF_NUM + n) + +#define USER_DATA_RUND_SIZE (USER_DATA_SIZE + 4096) +static struct vframe_s fense_vf[2]; + +static struct timer_list recycle_timer; +static u32 stat; +static s32 buf_offset; +static u32 pts_outside; +static u32 sync_outside; +static u32 dec_control; +static u32 vh264_ratio; +static u32 vh264_rotation; +static u32 use_idr_framerate; +static u32 high_bandwidth; + +static u32 seq_info; +static u32 timing_info_present_flag; +static u32 fixed_frame_rate_flag; +static u32 fixed_frame_rate_check_count; +static u32 aspect_ratio_info; +static u32 num_units_in_tick; +static u32 time_scale; +static u32 h264_ar; +static u32 decoder_debug_flag; +static u32 dpb_size_adj = 6; +static u32 fr_hint_status; + +#ifdef DROP_B_FRAME_FOR_1080P_50_60FPS +static u32 last_interlaced; +#endif +static bool is_4k; +static unsigned char h264_first_pts_ready; +static bool h264_first_valid_pts_ready; +static u32 h264pts1, h264pts2; +static u32 h264_pts_count, duration_from_pts_done, duration_on_correcting; +static u32 vh264_error_count; +static u32 vh264_no_disp_count; +static u32 fatal_error_flag; +static u32 fatal_error_reset; +static u32 max_refer_buf = 1; +static u32 decoder_force_reset; +static unsigned int no_idr_error_count; +static unsigned int no_idr_error_max = 60; +static unsigned int canvas_mode; + +#ifdef SUPPORT_BAD_MACRO_BLOCK_REDUNDANCY +/* 0~128*/ +static u32 bad_block_scale; +#endif +static u32 enable_userdata_debug; + +static unsigned int enable_switch_fense = 1; +#define EN_SWITCH_FENCE() (enable_switch_fense && !is_4k) +static struct vframe_qos_s s_vframe_qos; +static int frame_count; + +#if 0 +static u32 vh264_no_disp_wd_count; +#endif +static u32 vh264_running; +static s32 vh264_stream_switching_state; +static s32 vh264_eos; +static struct vframe_s *p_last_vf; +static s32 iponly_early_mode; +static void *mm_blk_handle; +static int tvp_flag; +static bool is_reset; + +/*TODO irq*/ +#if 1 +static u32 last_pts, last_pts_remainder; +#else +static u32 last_pts; +#endif +static bool check_pts_discontinue; +static u32 wait_buffer_counter; +static u32 video_signal_from_vui; + +static uint error_recovery_mode; +static uint error_recovery_mode_in = 3; +static uint error_recovery_mode_use = 3; + +static uint mb_total = 0, mb_width = 0, mb_height; +static uint saved_idc_level; +#define UCODE_IP_ONLY 2 +#define UCODE_IP_ONLY_PARAM 1 +static uint ucode_type; + +#ifdef DEBUG_PTS +static unsigned long pts_missed, pts_hit; +#endif +static uint debugfirmware; + +static atomic_t vh264_active = ATOMIC_INIT(0); +static int vh264_reset; +static struct work_struct error_wd_work; +static struct work_struct stream_switching_work; +static struct work_struct set_parameter_work; +static struct work_struct notify_work; +static struct work_struct set_clk_work; +static struct work_struct userdata_push_work; + +struct h264_qos_data_node_t { + struct list_head list; + + uint32_t b_offset; + int poc; + /* picture qos infomation*/ + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; +}; + +/*qos data records list waiting for match with picture that be display*/ +static struct list_head picture_qos_list; +/*free qos data records list*/ +static struct list_head free_qos_nodes_list; +#define MAX_FREE_QOS_NODES 64 +static struct h264_qos_data_node_t free_nodes[MAX_FREE_QOS_NODES]; +static struct work_struct qos_work; +static struct dec_sysinfo vh264_amstream_dec_info; +static dma_addr_t mc_dma_handle; +static void *mc_cpu_addr; +static u32 first_offset; +static u32 first_pts; +static u32 first_frame_size; +static u64 first_pts64; +static bool first_pts_cached; +static void *sei_data_buffer; +static dma_addr_t sei_data_buffer_phys; +static int clk_adj_frame_count; + +#define MC_OFFSET_HEADER 0x0000 +#define MC_OFFSET_DATA 0x1000 +#define MC_OFFSET_MMCO 0x2000 +#define MC_OFFSET_LIST 0x3000 +#define MC_OFFSET_SLICE 0x4000 + +#define MC_TOTAL_SIZE (20*SZ_1K) +#define MC_SWAP_SIZE (4*SZ_1K) + +#define MODE_ERROR 0 +#define MODE_FULL 1 + +static DEFINE_SPINLOCK(lock); +static DEFINE_SPINLOCK(prepare_lock); +static DEFINE_SPINLOCK(recycle_lock); + +static bool block_display_q; +static int vh264_stop(int mode); +static s32 vh264_init(void); + + +#define DFS_HIGH_THEASHOLD 3 + +static bool pts_discontinue; + +static struct ge2d_context_s *ge2d_videoh264_context; + +static struct vdec_info *gvs; + +static struct vdec_s *vdec_h264; + +static int ge2d_videoh264task_init(void) +{ + if (ge2d_videoh264_context == NULL) + ge2d_videoh264_context = create_ge2d_work_queue(); + + if (ge2d_videoh264_context == NULL) { + pr_info("create_ge2d_work_queue video task failed\n"); + return -1; + } + return 0; +} + +static int ge2d_videoh264task_release(void) +{ + if (ge2d_videoh264_context) { + destroy_ge2d_work_queue(ge2d_videoh264_context); + ge2d_videoh264_context = NULL; + } + return 0; +} + +static int ge2d_canvas_dup(struct canvas_s *srcy, struct canvas_s *srcu, + struct canvas_s *des, int format, u32 srcindex, + u32 desindex) +{ + + struct config_para_ex_s ge2d_config; + /* pr_info("[%s]h264 ADDR srcy[0x%lx] srcu[0x%lx] des[0x%lx]\n", + * __func__, srcy->addr, srcu->addr, des->addr); + */ + memset(&ge2d_config, 0, sizeof(struct config_para_ex_s)); + + ge2d_config.alu_const_color = 0; + ge2d_config.bitmask_en = 0; + ge2d_config.src1_gb_alpha = 0; + + ge2d_config.src_planes[0].addr = srcy->addr; + ge2d_config.src_planes[0].w = srcy->width; + ge2d_config.src_planes[0].h = srcy->height; + + ge2d_config.src_planes[1].addr = srcu->addr; + ge2d_config.src_planes[1].w = srcu->width; + ge2d_config.src_planes[1].h = srcu->height; + + ge2d_config.dst_planes[0].addr = des->addr; + ge2d_config.dst_planes[0].w = des->width; + ge2d_config.dst_planes[0].h = des->height; + + ge2d_config.src_para.canvas_index = srcindex; + ge2d_config.src_para.mem_type = CANVAS_TYPE_INVALID; + ge2d_config.src_para.format = format; + ge2d_config.src_para.fill_color_en = 0; + ge2d_config.src_para.fill_mode = 0; + ge2d_config.src_para.color = 0; + ge2d_config.src_para.top = 0; + ge2d_config.src_para.left = 0; + ge2d_config.src_para.width = srcy->width; + ge2d_config.src_para.height = srcy->height; + + ge2d_config.dst_para.canvas_index = desindex; + ge2d_config.dst_para.mem_type = CANVAS_TYPE_INVALID; + ge2d_config.dst_para.format = format; + ge2d_config.dst_para.fill_color_en = 0; + ge2d_config.dst_para.fill_mode = 0; + ge2d_config.dst_para.color = 0; + ge2d_config.dst_para.top = 0; + ge2d_config.dst_para.left = 0; + ge2d_config.dst_para.width = srcy->width; + ge2d_config.dst_para.height = srcy->height; + + if (ge2d_context_config_ex(ge2d_videoh264_context, &ge2d_config) < 0) { + pr_info("ge2d_context_config_ex failed\n"); + return -1; + } + + stretchblt_noalpha(ge2d_videoh264_context, 0, 0, srcy->width, + srcy->height, 0, 0, srcy->width, srcy->height); + + return 0; +} + +static inline int fifo_level(void) +{ + return VF_POOL_SIZE - kfifo_len(&newframe_q); +} + + +void spec_set_canvas(struct buffer_spec_s *spec, + unsigned int width, unsigned int height) +{ + int endian; + + endian = (canvas_mode == CANVAS_BLKMODE_LINEAR)?7:0; + config_cav_lut_ex(spec->y_canvas_index, + spec->y_addr, + width, height, + CANVAS_ADDR_NOWRAP, canvas_mode, endian, VDEC_1); + + config_cav_lut_ex(spec->u_canvas_index, + spec->u_addr, + width, height / 2, + CANVAS_ADDR_NOWRAP, canvas_mode, endian, VDEC_1); + +} + +static void vh264_notify_work(struct work_struct *work) +{ + pr_info("frame duration changed %d\n", frame_dur); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long)frame_dur)); + + return; +} + +static void prepare_display_q(void) +{ + unsigned long flags; + int count; + + spin_lock_irqsave(&prepare_lock, flags); + + if (block_display_q) { + spin_unlock_irqrestore(&prepare_lock, flags); + return; + } + + spin_unlock_irqrestore(&prepare_lock, flags); + + count = (int)VF_POOL_SIZE - + kfifo_len(&delay_display_q) - + kfifo_len(&display_q) - + kfifo_len(&recycle_q) - + kfifo_len(&newframe_q); + + if ((vh264_stream_switching_state != SWITCHING_STATE_OFF) + || !EN_SWITCH_FENCE()) + count = 0; + else + count = (count < 2) ? 0 : 2; + + while (kfifo_len(&delay_display_q) > count) { + struct vframe_s *vf; + + if (kfifo_get(&delay_display_q, &vf)) { + kfifo_put(&display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } +} + +static struct vframe_s *vh264_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + + if (kfifo_peek(&display_q, &vf)) + return vf; + + return NULL; +} + +static struct vframe_s *vh264_vf_get(void *op_arg) +{ + struct vframe_s *vf; + + if (kfifo_get(&display_q, &vf)) + return vf; + + return NULL; +} +static bool vf_valid_check(struct vframe_s *vf) { + int i; + for (i = 0; i < VF_POOL_SIZE; i++) { + if (vf == &vfpool[i]) + return true; + } + pr_info(" invalid vf been put, vf = %p\n", vf); + for (i = 0; i < VF_POOL_SIZE; i++) { + pr_info("www valid vf[%d]= %p \n", i, &vfpool[i]); + } + return false; +} + +static void vh264_vf_put(struct vframe_s *vf, void *op_arg) +{ + unsigned long flags; + + spin_lock_irqsave(&recycle_lock, flags); + + if ((vf != &fense_vf[0]) && (vf != &fense_vf[1])) { + if (vf && (vf_valid_check(vf) == true)) + kfifo_put(&recycle_q, (const struct vframe_s *)vf); + } + spin_unlock_irqrestore(&recycle_lock, flags); +} + +static int vh264_event_cb(int type, void *data, void *private_data) +{ + if (type & VFRAME_EVENT_RECEIVER_RESET) { + unsigned long flags; + + amvdec_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vh264_vf_prov); +#endif + spin_lock_irqsave(&lock, flags); + vh264_local_init(); + vh264_prot_init(); + spin_unlock_irqrestore(&lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vh264_vf_prov); +#endif + amvdec_start(); + } + return 0; +} + +static int vh264_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + + spin_lock_irqsave(&lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&newframe_q); + states->buf_avail_num = kfifo_len(&display_q) + + kfifo_len(&delay_display_q); + states->buf_recycle_num = kfifo_len(&recycle_q); + + spin_unlock_irqrestore(&lock, flags); + + return 0; +} + +#if 0 +static tvin_trans_fmt_t convert_3d_format(u32 type) +{ + const tvin_trans_fmt_t conv_tab[] = { + 0, /* checkerboard */ + 0, /* column alternation */ + TVIN_TFMT_3D_LA, /* row alternation */ + TVIN_TFMT_3D_LRH_OLER, /* side by side */ + TVIN_TFMT_3D_FA /* top bottom */ + }; + + return (type <= 4) ? conv_tab[type] : 0; +} +#endif + + + +#define DUMP_CC_AS_ASCII + +#ifdef DUMP_CC_AS_ASCII +static int vbi_to_ascii(int c) +{ + if (c < 0) + return '?'; + + c &= 0x7F; + + if (c < 0x20 || c >= 0x7F) + return '.'; + + return c; +} + +static void dump_cc_ascii(const uint8_t *buf, unsigned int vpts, int poc) +{ + int cc_flag; + int cc_count; + int i; + int szAscii[32]; + int index = 0; + + cc_flag = buf[1] & 0x40; + if (!cc_flag) { + pr_info("### cc_flag is invalid\n"); + return; + } + cc_count = buf[1] & 0x1f; + + for (i = 0; i < cc_count; ++i) { + unsigned int b0; + unsigned int cc_valid; + unsigned int cc_type; + unsigned char cc_data1; + unsigned char cc_data2; + + b0 = buf[3 + i * 3]; + cc_valid = b0 & 4; + cc_type = b0 & 3; + cc_data1 = buf[4 + i * 3]; + cc_data2 = buf[5 + i * 3]; + + + if (cc_type == 0) { + /* NTSC pair, Line 21 */ + szAscii[index++] = vbi_to_ascii(cc_data1); + szAscii[index++] = vbi_to_ascii(cc_data2); + if ((!cc_valid) || (i >= 3)) + break; + } + } + + if (index > 0 && index <= 8) { + char pr_buf[128]; + int len; + + sprintf(pr_buf, "push vpts:0x%x, poc:%d :", vpts, poc); + len = strlen(pr_buf); + for (i=0;i<index;i++) + sprintf(pr_buf + len + i*2, "%c ", szAscii[i]); + pr_info("%s\n", pr_buf); + } + +} +#endif + +/* +#define DUMP_USER_DATA_HEX +*/ +#ifdef DUMP_USER_DATA_HEX +static void print_data(unsigned char *pdata, int len) +{ + int nLeft; + char buf[128]; + + nLeft = len; + while (nLeft >= 16) { + int i; + + for (i=0;i<16;i++) + sprintf(buf+i*3, "%02x ", pdata[i]); + + pr_info("%s\n", buf); + nLeft -= 16; + pdata += 16; + } + + while (nLeft >= 8) { + int i; + for (i=0;i<nLeft;i++) + sprintf(buf+i*3, "%02x ", pdata[i]); + + pr_info("%s\n", buf); + nLeft -= 8; + pdata += 8; + } +} +#endif + + + +static void aml_swap_data(uint8_t *user_data, int ud_size) +{ + int swap_blocks, i, j, k, m; + unsigned char c_temp; + + /* swap byte order */ + swap_blocks = ud_size / 8; + for (i = 0; i < swap_blocks; i++) { + j = i * 8; + k = j + 7; + for (m = 0; m < 4; m++) { + c_temp = user_data[j]; + user_data[j++] = user_data[k]; + user_data[k--] = c_temp; + } + } +} + + +static void udr_dump_data(unsigned int user_data_wp, + unsigned int user_data_length, + unsigned int pts, + int poc) +{ + unsigned char *pdata; + int user_data_len; + int wp_start; + int nLeft; + unsigned char szBuf[256]; + int nOffset; + + dma_sync_single_for_cpu(amports_get_dma_device(), + sei_data_buffer_phys, USER_DATA_SIZE, + DMA_FROM_DEVICE); + + if (user_data_length & 0x07) + user_data_len = (user_data_length + 8) & 0xFFFFFFF8; + else + user_data_len = user_data_length; + + if (user_data_wp >= user_data_len) { + wp_start = user_data_wp - user_data_len; + + pdata = (unsigned char *)sei_data_buffer; + pdata += wp_start; + nLeft = user_data_len; + + memset(szBuf, 0, 256); + memcpy(szBuf, pdata, user_data_len); + } else { + wp_start = user_data_wp + + USER_DATA_SIZE - user_data_len; + + pdata = (unsigned char *)sei_data_buffer; + pdata += wp_start; + nLeft = USER_DATA_SIZE - wp_start; + + memset(szBuf, 0, 256); + memcpy(szBuf, pdata, nLeft); + nOffset = nLeft; + + pdata = (unsigned char *)sei_data_buffer; + nLeft = user_data_wp; + memcpy(szBuf+nOffset, pdata, nLeft); + } + + aml_swap_data(szBuf, user_data_len); + +#ifdef DUMP_USER_DATA_HEX + print_data(szBuf, user_data_len); +#endif + +#ifdef DUMP_CC_AS_ASCII + dump_cc_ascii(szBuf+7, pts, poc); +#endif +} + + +struct vh264_userdata_recored_t { + struct userdata_meta_info_t meta_info; + u32 rec_start; + u32 rec_len; +}; + +#define USERDATA_FIFO_NUM 256 + +struct vh264_userdata_info_t { + struct vh264_userdata_recored_t records[USERDATA_FIFO_NUM]; + u8 *data_buf; + u8 *data_buf_end; + u32 buf_len; + u32 read_index; + u32 write_index; + u32 last_wp; +}; + +static struct vh264_userdata_info_t *p_userdata_mgr; + +static DEFINE_MUTEX(userdata_mutex); + + +void vh264_crate_userdata_manager(u8 *userdata_buf, int buf_len) +{ + p_userdata_mgr = (struct vh264_userdata_info_t *) + vmalloc(sizeof(struct vh264_userdata_info_t)); + if (p_userdata_mgr) { + memset(p_userdata_mgr, 0, + sizeof(struct vh264_userdata_info_t)); + p_userdata_mgr->data_buf = userdata_buf; + p_userdata_mgr->buf_len = buf_len; + p_userdata_mgr->data_buf_end = userdata_buf + buf_len; + } +} + +void vh264_destroy_userdata_manager(void) +{ + if (p_userdata_mgr) { + vfree(p_userdata_mgr); + p_userdata_mgr = NULL; + } +} + +/* +#define DUMP_USER_DATA +*/ +#ifdef DUMP_USER_DATA + +#define MAX_USER_DATA_SIZE 3145728 +static void *user_data_buf; +static unsigned char *pbuf_start; +static int total_len; +static int bskip; +static int n_userdata_id; + + +static void print_mem_data(unsigned char *pdata, + int len, + unsigned int flag, + unsigned int duration, + unsigned int vpts, + unsigned int vpts_valid, + int rec_id) +{ + int nLeft; + + nLeft = len; +#if 0 + pr_info("%d len = %d, flag = %d, duration = %d, vpts = 0x%x, vpts_valid = %d\n", + rec_id, len, flag, + duration, vpts, vpts_valid); +#endif + pr_info("%d len = %d, flag = %d, vpts = 0x%x\n", + rec_id, len, flag, vpts); + + + while (nLeft >= 16) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7], + pdata[8], pdata[9], pdata[10], pdata[11], + pdata[12], pdata[13], pdata[14], pdata[15]); + nLeft -= 16; + pdata += 16; + } + + + while (nLeft > 0) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } +} + + +static void dump_data(u8 *pdata, + unsigned int user_data_length, + unsigned int flag, + unsigned int duration, + unsigned int vpts, + unsigned int vpts_valid, + int rec_id) +{ + unsigned char szBuf[256]; + + + memset(szBuf, 0, 256); + memcpy(szBuf, pdata, user_data_length); +/* + aml_swap_data(szBuf, user_data_length); +*/ + + print_mem_data(szBuf, user_data_length, + flag, duration, vpts, + vpts_valid, rec_id); + +#ifdef DEBUG_CC_DUMP_ASCII + dump_cc_ascii(szBuf+7); +#endif +} + +static void push_to_buf(u8 *pdata, int len, struct userdata_meta_info_t *pmeta) +{ + u32 *pLen; + int info_cnt; + u8 *pbuf_end; + + if (!user_data_buf) + return; + + if (bskip) { + pr_info("over size, skip\n"); + return; + } + info_cnt = 0; + pLen = (u32 *)pbuf_start; + + *pLen = len; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->duration; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->flags; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->vpts; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->vpts_valid; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + + *pLen = n_userdata_id; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + + + pbuf_end = (u8 *)sei_data_buffer + USER_DATA_SIZE; + if (pdata + len > pbuf_end) { + int first_section_len; + + first_section_len = pbuf_end - pdata; + memcpy(pbuf_start, pdata, first_section_len); + pdata = (u8 *)sei_data_buffer; + pbuf_start += first_section_len; + memcpy(pbuf_start, pdata, len - first_section_len); + pbuf_start += len - first_section_len; + } else { + memcpy(pbuf_start, pdata, len); + pbuf_start += len; + } + + total_len += len + info_cnt * sizeof(u32); + if (total_len >= MAX_USER_DATA_SIZE-4096) + bskip = 1; +} + + +static void dump_userdata_info( + void *puser_data, + int len, + struct userdata_meta_info_t *pmeta) +{ + u8 *pstart; + + pstart = (u8 *)puser_data; + + + push_to_buf(pstart, len, pmeta); +} + +static void show_user_data_buf(void) +{ + u8 *pbuf; + int len; + unsigned int flag; + unsigned int duration; + unsigned int vpts; + unsigned int vpts_valid; + int rec_id; + + pr_info("show user data buf\n"); + pbuf = user_data_buf; + + while (pbuf < pbuf_start) { + u32 *pLen; + + pLen = (u32 *)pbuf; + + len = *pLen; + pLen++; + pbuf += sizeof(u32); + + duration = *pLen; + pLen++; + pbuf += sizeof(u32); + + flag = *pLen; + pLen++; + pbuf += sizeof(u32); + + vpts = *pLen; + pLen++; + pbuf += sizeof(u32); + + vpts_valid = *pLen; + pLen++; + pbuf += sizeof(u32); + + rec_id = *pLen; + pLen++; + pbuf += sizeof(u32); + + dump_data(pbuf, len, flag, duration, vpts, vpts_valid, rec_id); + pbuf += len; + msleep(30); + } +} + +static int vh264_init_userdata_dump(void) +{ + user_data_buf = kmalloc(MAX_USER_DATA_SIZE, GFP_KERNEL); + if (user_data_buf) + return 1; + else + return 0; +} + +static void vh264_dump_userdata(void) +{ + if (user_data_buf) { + show_user_data_buf(); + kfree(user_data_buf); + user_data_buf = NULL; + } +} + +static void vh264_reset_user_data_buf(void) +{ + total_len = 0; + pbuf_start = user_data_buf; + bskip = 0; + n_userdata_id = 0; +} +#endif + +static void vh264_add_userdata(struct userdata_meta_info_t meta_info, int wp) +{ + struct vh264_userdata_recored_t *p_userdata_rec; + int data_length; + + mutex_lock(&userdata_mutex); + + if (p_userdata_mgr) { + if (wp > p_userdata_mgr->last_wp) + data_length = wp - p_userdata_mgr->last_wp; + else + data_length = wp + p_userdata_mgr->buf_len - + p_userdata_mgr->last_wp; + + if (data_length & 0x7) + data_length = (((data_length + 8) >> 3) << 3); +#if 0 + pr_info("wakeup_push: ri:%d, wi:%d, data_len:%d, last_wp:%d, wp:%d, id = %d\n", + p_userdata_mgr->read_index, + p_userdata_mgr->write_index, + data_length, + p_userdata_mgr->last_wp, + wp, + n_userdata_id); +#endif + p_userdata_rec = p_userdata_mgr->records + + p_userdata_mgr->write_index; + p_userdata_rec->meta_info = meta_info; + p_userdata_rec->rec_start = p_userdata_mgr->last_wp; + p_userdata_rec->rec_len = data_length; + p_userdata_mgr->last_wp = wp; + +#ifdef DUMP_USER_DATA + dump_userdata_info(p_userdata_mgr->data_buf + + p_userdata_rec->rec_start, + data_length, + &meta_info); + n_userdata_id++; +#endif + + p_userdata_mgr->write_index++; + if (p_userdata_mgr->write_index >= USERDATA_FIFO_NUM) + p_userdata_mgr->write_index = 0; + } + mutex_unlock(&userdata_mutex); + + vdec_wakeup_userdata_poll(vdec_h264); +} + +static int vh264_user_data_read(struct vdec_s *vdec, + struct userdata_param_t *puserdata_para) +{ + int rec_ri, rec_wi; + int rec_len; + u8 *rec_data_start; + u8 *pdest_buf; + struct vh264_userdata_recored_t *p_userdata_rec; + u32 data_size; + u32 res; + int copy_ok = 1; + + + pdest_buf = puserdata_para->pbuf_addr; + + + mutex_lock(&userdata_mutex); + + if (!p_userdata_mgr) { + mutex_unlock(&userdata_mutex); + return 0; + } +/* + pr_info("ri = %d, wi = %d\n", + p_userdata_mgr->read_index, + p_userdata_mgr->write_index); +*/ + rec_ri = p_userdata_mgr->read_index; + rec_wi = p_userdata_mgr->write_index; + + if (rec_ri == rec_wi) { + mutex_unlock(&userdata_mutex); + return 0; + } + + p_userdata_rec = p_userdata_mgr->records + rec_ri; + + rec_len = p_userdata_rec->rec_len; + rec_data_start = p_userdata_rec->rec_start + p_userdata_mgr->data_buf; +/* + pr_info("rec_len:%d, rec_start:%d, buf_len:%d\n", + p_userdata_rec->rec_len, + p_userdata_rec->rec_start, + puserdata_para->buf_len); +*/ + if (rec_len <= puserdata_para->buf_len) { + /* dvb user data buffer is enought to copy the whole recored. */ + data_size = rec_len; + if (rec_data_start + data_size + > p_userdata_mgr->data_buf_end) { + int first_section_len; + + first_section_len = p_userdata_mgr->buf_len - + p_userdata_rec->rec_start; + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + first_section_len); + if (res) { + pr_info("p1 read not end res=%d, request=%d\n", + res, first_section_len); + copy_ok = 0; + + p_userdata_rec->rec_len -= + first_section_len - res; + p_userdata_rec->rec_start += + first_section_len - res; + puserdata_para->data_size = + first_section_len - res; + } else { + res = (u32)copy_to_user( + (void *)(pdest_buf+first_section_len), + (void *)p_userdata_mgr->data_buf, + data_size - first_section_len); + if (res) { + pr_info("p2 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + p_userdata_rec->rec_len -= + data_size - res; + p_userdata_rec->rec_start = + data_size - first_section_len - res; + puserdata_para->data_size = + data_size - res; + } + } else { + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + data_size); + if (res) { + pr_info("p3 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start += data_size - res; + puserdata_para->data_size = data_size - res; + } + + if (copy_ok) { + p_userdata_mgr->read_index++; + if (p_userdata_mgr->read_index >= USERDATA_FIFO_NUM) + p_userdata_mgr->read_index = 0; + } + } else { + /* dvb user data buffer is not enought + to copy the whole recored. */ + data_size = puserdata_para->buf_len; + if (rec_data_start + data_size + > p_userdata_mgr->data_buf_end) { + int first_section_len; + + first_section_len = p_userdata_mgr->buf_len + - p_userdata_rec->rec_start; + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + first_section_len); + if (res) { + pr_info("p4 read not end res=%d, request=%d\n", + res, first_section_len); + copy_ok = 0; + p_userdata_rec->rec_len -= + first_section_len - res; + p_userdata_rec->rec_start += + first_section_len - res; + puserdata_para->data_size = + first_section_len - res; + } else { + /* first secton copy is ok*/ + res = (u32)copy_to_user( + (void *)(pdest_buf+first_section_len), + (void *)p_userdata_mgr->data_buf, + data_size - first_section_len); + if (res) { + pr_info("p5 read not end res=%d, request=%d\n", + res, + data_size - first_section_len); + copy_ok = 0; + } + + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start = + data_size - first_section_len - res; + puserdata_para->data_size = data_size - res; + } + } else { + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + data_size); + if (res) { + pr_info("p6 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start += data_size - res; + puserdata_para->data_size = data_size - res; + } + + if (copy_ok) { + p_userdata_mgr->read_index++; + if (p_userdata_mgr->read_index + >= USERDATA_FIFO_NUM) + p_userdata_mgr->read_index = 0; + } + + } + puserdata_para->meta_info = p_userdata_rec->meta_info; + + if (p_userdata_mgr->read_index <= p_userdata_mgr->write_index) + puserdata_para->meta_info.records_in_que = + p_userdata_mgr->write_index - + p_userdata_mgr->read_index; + else + puserdata_para->meta_info.records_in_que = + p_userdata_mgr->write_index + + USERDATA_FIFO_NUM - + p_userdata_mgr->read_index; + + puserdata_para->version = (0<<24|0<<16|0<<8|1); + + mutex_unlock(&userdata_mutex); + + return 1; +} + +static void vh264_wakeup_userdata_poll(struct vdec_s *vdec) +{ + amstream_wakeup_userdata_poll(vdec); +} + +static void vh264_reset_userdata_fifo(struct vdec_s *vdec, int bInit) +{ + mutex_lock(&userdata_mutex); + + if (p_userdata_mgr) { + pr_info("h264_reset_userdata_fifo: bInit: %d, ri: %d, wi: %d\n", + bInit, p_userdata_mgr->read_index, + p_userdata_mgr->write_index); + p_userdata_mgr->read_index = 0; + p_userdata_mgr->write_index = 0; + + if (bInit) + p_userdata_mgr->last_wp = 0; + } + + mutex_unlock(&userdata_mutex); +} + +static void h264_reset_qos_mgr(void) +{ + int i; + + pr_info("h264_reset_qos_mgr\n"); + + INIT_LIST_HEAD(&free_qos_nodes_list); + INIT_LIST_HEAD(&picture_qos_list); + + for (i = 0; i < MAX_FREE_QOS_NODES; i++) { + free_nodes[i].b_offset = 0xFFFFFFFF; + + list_add_tail(&free_nodes[i].list, + &free_qos_nodes_list); + } +} + + +static void load_qos_data(int pic_number, uint32_t b_offset) +{ + uint32_t blk88_y_count; + uint32_t blk88_c_count; + uint32_t blk22_mv_count; + uint32_t rdata32; + int32_t mv_hi; + int32_t mv_lo; + uint32_t rdata32_l; + uint32_t mvx_L0_hi; + uint32_t mvy_L0_hi; + uint32_t mvx_L1_hi; + uint32_t mvy_L1_hi; + int64_t value; + uint64_t temp_value; +/* +#define DEBUG_QOS +*/ +#define SUPPORT_NODE + +#ifdef SUPPORT_NODE + struct h264_qos_data_node_t *node; + struct h264_qos_data_node_t *tmp; + int bFoundNode = 0; + + node = NULL; + if (!list_empty(&picture_qos_list)) { + list_for_each_entry_safe(node, tmp, &picture_qos_list, list) { + if (node->b_offset == b_offset) { + bFoundNode = 1; + break; + } + } + } + /* + pr_info("bFoundNode = %d, node:0x%p\n", bFoundNode, node); + */ + if (!bFoundNode) { + if (!list_empty(&free_qos_nodes_list)) { + node = list_entry( + free_qos_nodes_list.next, + struct h264_qos_data_node_t, + list); + /* + pr_info("get a node:0x%p\n", node); + */ + } else { + pr_info("there is no qos data node avaible\n"); + + return; + } + } + + node->b_offset = b_offset; + node->poc = pic_number; + + node->max_mv = 0; + node->avg_mv = 0; + node->min_mv = 0; + + node->max_skip = 0; + node->avg_skip = 0; + node->min_skip = 0; + + node->max_qp = 0; + node->avg_qp = 0; + node->min_qp = 0; +#endif + + + + + + + /* set rd_idx to 0 */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, 0); + blk88_y_count = READ_VREG(VDEC_PIC_QUALITY_DATA); + if (blk88_y_count == 0) { +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, (1<<8)); + +#ifdef SUPPORT_NODE + list_move(&node->list, &picture_qos_list); +#endif + return; + } + /* qp_y_sum */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_y_count, + rdata32, blk88_y_count); +#endif +#ifdef SUPPORT_NODE + node->avg_qp = rdata32/blk88_y_count; +#endif + + /* intra_y_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + /* skipped_y_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif +#ifdef SUPPORT_NODE + node->avg_skip = rdata32*100/blk88_y_count; +#endif + /* coeff_non_zero_y_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_y_count*1)), + '%', rdata32); +#endif + /* blk66_c_count */ + blk88_c_count = READ_VREG(VDEC_PIC_QUALITY_DATA); + if (blk88_c_count == 0) { +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, (1<<8)); + +#ifdef SUPPORT_NODE + list_move(&node->list, &picture_qos_list); +#endif + return; + } + /* qp_c_sum */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_c_count, + rdata32, blk88_c_count); +#endif + /* intra_c_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* skipped_cu_c_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* coeff_non_zero_c_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_c_count*1)), + '%', rdata32); +#endif + + /* 1'h0, qp_c_max[6:0], 1'h0, qp_c_min[6:0], + 1'h0, qp_y_max[6:0], 1'h0, qp_y_min[6:0] */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y QP min : %d\n", + pic_number, (rdata32>>0)&0xff); +#endif +#ifdef SUPPORT_NODE + node->min_qp = (rdata32>>0)&0xff; +#endif + +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y QP max : %d\n", + pic_number, (rdata32>>8)&0xff); +#endif +#ifdef SUPPORT_NODE + node->max_qp = (rdata32>>8)&0xff; +#endif + +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C QP min : %d\n", + pic_number, (rdata32>>16)&0xff); + pr_info(" [Picture %d Quality] C QP max : %d\n", + pic_number, (rdata32>>24)&0xff); +#endif + + /* blk22_mv_count */ + blk22_mv_count = READ_VREG(VDEC_PIC_QUALITY_DATA); + if (blk22_mv_count == 0) { +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] NO MV Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, (1<<8)); +#ifdef SUPPORT_NODE + list_move(&node->list, &picture_qos_list); +#endif + return; + } + /* mvy_L1_count[39:32], mvx_L1_count[39:32], + mvy_L0_count[39:32], mvx_L0_count[39:32] */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + /* should all be 0x00 or 0xff */ +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MV AVG High Bits: 0x%X\n", + pic_number, rdata32); +#endif + mvx_L0_hi = ((rdata32>>0)&0xff); + mvy_L0_hi = ((rdata32>>8)&0xff); + mvx_L1_hi = ((rdata32>>16)&0xff); + mvy_L1_hi = ((rdata32>>24)&0xff); + + /* mvx_L0_count[31:0] */ + rdata32_l = READ_VREG(VDEC_PIC_QUALITY_DATA); + temp_value = mvx_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvx_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + value = div_s64(value, blk22_mv_count); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVX_L0 AVG : %d (%lld/%d)\n", + pic_number, (int)(value), + value, blk22_mv_count); +#endif +#ifdef SUPPORT_NODE + node->avg_mv = value; +#endif + + /* mvy_L0_count[31:0] */ + rdata32_l = READ_VREG(VDEC_PIC_QUALITY_DATA); + temp_value = mvy_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvy_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVY_L0 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvx_L1_count[31:0] */ + rdata32_l = READ_VREG(VDEC_PIC_QUALITY_DATA); + temp_value = mvx_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvx_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVX_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvy_L1_count[31:0] */ + rdata32_l = READ_VREG(VDEC_PIC_QUALITY_DATA); + temp_value = mvy_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvy_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVY_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* {mvx_L0_max, mvx_L0_min} // format : {sign, abs[14:0]} */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVX_L0 MAX : %d\n", + pic_number, mv_hi); +#endif +#ifdef SUPPORT_NODE + node->max_mv = mv_hi; +#endif + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVX_L0 MIN : %d\n", + pic_number, mv_lo); +#endif +#ifdef SUPPORT_NODE + node->min_mv = mv_lo; +#endif + +#ifdef DEBUG_QOS + /* {mvy_L0_max, mvy_L0_min} */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + pr_info(" [Picture %d Quality] MVY_L0 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + pr_info(" [Picture %d Quality] MVY_L0 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvx_L1_max, mvx_L1_min} */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + pr_info(" [Picture %d Quality] MVX_L1 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + pr_info(" [Picture %d Quality] MVX_L1 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvy_L1_max, mvy_L1_min} */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + pr_info(" [Picture %d Quality] MVY_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + pr_info(" [Picture %d Quality] MVY_L1 MIN : %d\n", + pic_number, mv_lo); +#endif + + rdata32 = READ_VREG(VDEC_PIC_QUALITY_CTRL); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] After Read : VDEC_PIC_QUALITY_CTRL : 0x%x\n", + pic_number, rdata32); +#endif + /* reset all counts */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, (1<<8)); +#ifdef SUPPORT_NODE + list_move(&node->list, &picture_qos_list); +#endif +} + +void search_qos_node(struct vframe_qos_s *picture_qos, uint32_t b_offset) +{ + struct h264_qos_data_node_t *node; + struct h264_qos_data_node_t *tmp; + + if (!list_empty(&picture_qos_list)) { + list_for_each_entry_safe(node, tmp, &picture_qos_list, list) { + if (node->b_offset == b_offset) { + + picture_qos->avg_mv = node->avg_mv; + picture_qos->min_mv = node->min_mv; + picture_qos->max_mv = node->max_mv; + + picture_qos->avg_skip = node->avg_skip; + picture_qos->min_skip = node->min_skip; + picture_qos->max_skip = node->max_skip; + + picture_qos->avg_qp = node->avg_qp; + picture_qos->min_qp = node->min_qp; + picture_qos->max_qp = node->max_qp; + +#if 0 + pr_info("POC:%d, mv: max:%d, avg:%d, min:%d\n" + "qp: max:%d, avg:%d, min:%d\n" + "skip: max:%d, avg:%d, min:%d\n", + node->poc, + picture_qos->max_mv, + picture_qos->avg_mv, + picture_qos->min_mv, + picture_qos->max_qp, + picture_qos->avg_qp, + picture_qos->min_qp, + picture_qos->max_skip, + picture_qos->avg_skip, + picture_qos->min_skip); +#endif + node->b_offset = 0xFFFFFFFF; + list_move(&node->list, &free_qos_nodes_list); + + break; + } + } + } +} + +static void qos_do_work(struct work_struct *work) +{ + uint32_t poc; + uint32_t bOffset; + + + poc = READ_VREG(AV_SCRATCH_M); + bOffset = READ_VREG(AV_SCRATCH_L); +/* + pr_info("poc:%d, bOffset:0x%x\n", poc, bOffset); +*/ + load_qos_data(poc, bOffset); + + + WRITE_VREG(AV_SCRATCH_0, 0); +} + +static void userdata_push_do_work(struct work_struct *work) +{ + unsigned int sei_itu35_flags; + unsigned int sei_itu35_wp; + unsigned int sei_itu35_data_length; + + struct userdata_meta_info_t meta_info; + u32 offset, pts; + u64 pts_us64 = 0; + u32 slice_type; + u32 reg; + u32 poc_number; + u32 picture_struct; + + memset(&meta_info, 0, sizeof(meta_info)); + + meta_info.duration = frame_dur; + + reg = READ_VREG(AV_SCRATCH_M); + poc_number = reg & 0x7FFFFFF; + if ((poc_number >> 16) == 0x7FF) + poc_number = (reg & 0x7FFFFFF) - 0x8000000; + + slice_type = (reg >> 29) & 0x7; + switch (slice_type) { + case SLICE_TYPE_I: + meta_info.flags |= 1<<7; + break; + case SLICE_TYPE_P: + meta_info.flags |= 3<<7; + break; + case SLICE_TYPE_B: + meta_info.flags |= 2<<7; + break; + } + meta_info.poc_number = poc_number; + picture_struct = (reg >> 27) & 0x3; + + meta_info.flags |= (VFORMAT_H264 << 3) | (picture_struct << 12); + + + offset = READ_VREG(AV_SCRATCH_L); + + if (pts_pickout_offset_us64 + (PTS_TYPE_VIDEO, offset, &pts, 0, &pts_us64) != 0) { + pr_info("pts pick outfailed, offset:0x%x\n", offset); + pts = -1; + meta_info.vpts_valid = 0; + } else + meta_info.vpts_valid = 1; + meta_info.vpts = pts; +/* + pr_info("offset:0x%x, vpts:0x%x, slice:%d, poc:%d\n", + offset, pts, slice_type, + poc_number); +*/ + sei_itu35_flags = READ_VREG(AV_SCRATCH_J); + sei_itu35_wp = (sei_itu35_flags >> 16) & 0xffff; + sei_itu35_data_length = sei_itu35_flags & 0x7fff; + + if (enable_userdata_debug) + udr_dump_data(sei_itu35_wp, + sei_itu35_data_length, + pts, poc_number); + + + vh264_add_userdata(meta_info, sei_itu35_wp); + + WRITE_VREG(AV_SCRATCH_J, 0); +} + + +static void set_frame_info(struct vframe_s *vf) +{ + vf->width = frame_width; + vf->height = frame_height; + vf->duration = frame_dur; + vf->ratio_control = + (min(h264_ar, (u32) DISP_RATIO_ASPECT_RATIO_MAX)) << + DISP_RATIO_ASPECT_RATIO_BIT; + vf->orientation = vh264_rotation; + vf->flag = 0; + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER_3D_PROCESS + vf->trans_fmt = 0; + if ((vf->trans_fmt == TVIN_TFMT_3D_LRF) || + (vf->trans_fmt == TVIN_TFMT_3D_LA)) { + vf->left_eye.start_x = 0; + vf->left_eye.start_y = 0; + vf->left_eye.width = frame_width / 2; + vf->left_eye.height = frame_height; + + vf->right_eye.start_x = 0; + vf->right_eye.start_y = 0; + vf->right_eye.width = frame_width / 2; + vf->right_eye.height = frame_height; + } else if ((vf->trans_fmt == TVIN_TFMT_3D_LRH_OLER) || + (vf->trans_fmt == TVIN_TFMT_3D_TB)) { + vf->left_eye.start_x = 0; + vf->left_eye.start_y = 0; + vf->left_eye.width = frame_width / 2; + vf->left_eye.height = frame_height; + + vf->right_eye.start_x = 0; + vf->right_eye.start_y = 0; + vf->right_eye.width = frame_width / 2; + vf->right_eye.height = frame_height; + } +#endif + +} + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER +static void vh264_ppmgr_reset(void) +{ + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_RESET, NULL); + + vh264_local_init(); + + pr_info("vh264dec: vf_ppmgr_reset\n"); +} +#endif + +static int get_max_dpb_size(int level_idc, int mb_width, int mb_height) +{ + int size, r; + + switch (level_idc) { + case 10: + r = 1485; + break; + case 11: + r = 3375; + break; + case 12: + case 13: + case 20: + r = 8910; + break; + case 21: + r = 17820; + break; + case 22: + case 30: + r = 30375; + break; + case 31: + r = 67500; + break; + case 32: + r = 76800; + break; + case 40: + case 41: + case 42: + r = 122880; + break; + case 50: + r = 414000; + break; + case 51: + case 52: + r = 691200; + break; + default: + return 0; + } + size = (mb_width * mb_height + + (mb_width * mb_height / 2)) * 256 * 10; + r = (r * 1024 + size-1) / size; + r = min(r, 16); + /*pr_info("max_dpb %d size:%d\n", r, size);*/ + return r; +} +static void vh264_set_params(struct work_struct *work) +{ + int aspect_ratio_info_present_flag, aspect_ratio_idc; + int max_dpb_size, actual_dpb_size, max_reference_size; + int i, mb_mv_byte, ret; + unsigned long addr; + unsigned int post_canvas, buf_size, endian; + unsigned int frame_mbs_only_flag; + unsigned int chroma_format_idc, chroma444, video_signal; + unsigned int crop_infor, crop_bottom, crop_right, level_idc; + if (!atomic_read(&vh264_active)) + return; + mutex_lock(&vh264_mutex); + if (vh264_stream_switching_state == SWITCHING_STATE_ON_CMD1) + vh264_stream_switching_state = SWITCHING_STATE_ON_CMD1_PENDING; + post_canvas = get_post_canvas(); + clk_adj_frame_count = 0; + /* set to max decoder clock rate at the beginning */ + + if (vdec_is_support_4k()) + vdec_source_changed(VFORMAT_H264, 3840, 2160, 60); + else + vdec_source_changed(VFORMAT_H264, 1920, 1080, 29); + + timing_info_present_flag = 0; + mb_width = READ_VREG(AV_SCRATCH_1); + seq_info = READ_VREG(AV_SCRATCH_2); + aspect_ratio_info = READ_VREG(AV_SCRATCH_3); + num_units_in_tick = READ_VREG(AV_SCRATCH_4); + time_scale = READ_VREG(AV_SCRATCH_5); + level_idc = READ_VREG(AV_SCRATCH_A); + if (level_idc > 0) + saved_idc_level = level_idc; + else if (saved_idc_level > 0) + level_idc = saved_idc_level; + video_signal = READ_VREG(AV_SCRATCH_H); + video_signal_from_vui = + ((video_signal & 0xffff) << 8) | + ((video_signal & 0xff0000) >> 16) | + ((video_signal & 0x3f000000)); +/* + * pr_info("video_signal_type_present_flag 0x%x\n", + * (video_signal_from_vui >> 29) & 1); + * pr_info("video_format 0x%x\n", + * (video_signal_from_vui >> 26) & 7); + * pr_info("video_full_range_flag 0x%x\n", + * (video_signal_from_vui >> 25) & 1); + * pr_info("color_description_present_flag 0x%x\n", + * (video_signal_from_vui >> 24) & 1); + * pr_info("color_primaries 0x%x\n", + * (video_signal_from_vui >> 16) & 0xff); + * pr_info("transfer_characteristic 0x%x\n", + * (video_signal_from_vui >> 8) & 0xff); + * pr_info("matrix_coefficient 0x%x\n", + * video_signal_from_vui & 0xff); + */ + + mb_total = (mb_width >> 8) & 0xffff; + max_reference_size = (mb_width >> 24) & 0x7f; + mb_mv_byte = (mb_width & 0x80000000) ? 24 : 96; + if (ucode_type == UCODE_IP_ONLY_PARAM) + mb_mv_byte = 96; + mb_width = mb_width & 0xff; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXTVBB) { + if (!mb_width && mb_total) + mb_width = 256; + } + if (mb_width) + mb_height = mb_total / mb_width; + last_duration = 0; + /* AV_SCRATCH_2 + * bit 15: frame_mbs_only_flag + * bit 13-14: chroma_format_idc + */ + frame_mbs_only_flag = (seq_info >> 15) & 0x01; + chroma_format_idc = (seq_info >> 13) & 0x03; + chroma444 = (chroma_format_idc == 3) ? 1 : 0; + + /* @AV_SCRATCH_6.31-16 = (left << 8 | right ) << 1 + * @AV_SCRATCH_6.15-0 = (top << 8 | bottom ) << + * (2 - frame_mbs_only_flag) + */ + crop_infor = READ_VREG(AV_SCRATCH_6); + crop_bottom = (crop_infor & 0xff) >> (2 - frame_mbs_only_flag); + crop_right = ((crop_infor >> 16) & 0xff) >> (2 - frame_mbs_only_flag); + + /* if width or height from outside is not equal to mb, then use mb */ + /* add: for seeking stream with other resolution */ + if ((last_mb_width && (last_mb_width != mb_width)) + || (mb_width != ((frame_width + 15) >> 4))) + frame_width = 0; + if ((last_mb_height && (last_mb_height != mb_height)) + || (mb_height != ((frame_height + 15) >> 4))) + frame_height = 0; + last_mb_width = mb_width; + last_mb_height = mb_height; + + if ((frame_width == 0) || (frame_height == 0) || crop_infor) { + frame_width = mb_width << 4; + frame_height = mb_height << 4; + if (frame_mbs_only_flag) { + frame_height = + frame_height - (2 >> chroma444) * + min(crop_bottom, + (unsigned int)((8 << chroma444) - 1)); + frame_width = + frame_width - (2 >> chroma444) * min(crop_right, + (unsigned + int)((8 << chroma444) - 1)); + } else { + frame_height = + frame_height - (4 >> chroma444) * + min(crop_bottom, + (unsigned int)((8 << chroma444) + - 1)); + frame_width = + frame_width - (4 >> chroma444) * min(crop_right, + (unsigned + int)((8 << + chroma444) + - 1)); + } +#if 0 + pr_info + ("frame_mbs_only_flag %d, crop_bottom %d, frame_height %d, ", + frame_mbs_only_flag, crop_bottom, frame_height); + pr_info + ("mb_height %d,crop_right %d, frame_width %d, mb_width %d\n", + mb_height, crop_right, frame_width, mb_width); +#endif + if (frame_height == 1088) + frame_height = 1080; + } + + mb_width = (mb_width + 3) & 0xfffffffc; + mb_height = (mb_height + 3) & 0xfffffffc; + mb_total = mb_width * mb_height; + + /*max_reference_size <= max_dpb_size <= actual_dpb_size*/ + is_4k = (mb_total > 8160) ? true:false; + + + max_dpb_size = get_max_dpb_size(level_idc, mb_width, mb_height); + if (max_dpb_size < max_reference_size) + max_dpb_size = max_reference_size; + if (max_dpb_size > 15 + && get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXTVBB + && (codec_mm_get_total_size() < 80 * SZ_1M)) { + actual_dpb_size + = max_reference_size + dpb_size_adj; + if (actual_dpb_size > VF_BUF_NUM) + actual_dpb_size = VF_BUF_NUM; + } else { + actual_dpb_size = max_dpb_size + dpb_size_adj; + actual_dpb_size = min(actual_dpb_size, VF_BUF_NUM); + } + max_reference_size++; + pr_info("actual_dpb_size %d max_dpb_size %d max_ref %d\n", + actual_dpb_size, max_dpb_size, + max_reference_size); + buf_size = mb_total * mb_mv_byte * max_reference_size; + + ret = decoder_bmmu_box_alloc_buf_phy(mm_blk_handle, 1, + buf_size, DRIVER_NAME, &addr); + + if (ret < 0) { + fatal_error_flag = + DECODER_FATAL_ERROR_NO_MEM; + vh264_running = 0; + mutex_unlock(&vh264_mutex); + return; + } + + WRITE_VREG(AV_SCRATCH_1, addr); + WRITE_VREG(AV_SCRATCH_3, post_canvas); + WRITE_VREG(AV_SCRATCH_4, addr + buf_size); + + if (!(READ_VREG(AV_SCRATCH_F) & 0x1)) { + for (i = 0; i < actual_dpb_size; i++) { +#ifdef DOUBLE_WRITE + int page_count = + PAGE_ALIGN((mb_total << 8) + (mb_total + << 7) + (mb_total << 6) + + (mb_total << 5)) / PAGE_SIZE; +#else + int page_count = + PAGE_ALIGN((mb_total << 8) + + (mb_total << 7)) / PAGE_SIZE; +#endif + + ret = decoder_bmmu_box_alloc_buf_phy(mm_blk_handle, + VF_BUFFER_IDX(i), + page_count << PAGE_SHIFT, + DRIVER_NAME, &buffer_spec[i].phy_addr); + + if (ret < 0) { + buffer_spec[i].alloc_count = 0; + fatal_error_flag = + DECODER_FATAL_ERROR_NO_MEM; + vh264_running = 0; + mutex_unlock(&vh264_mutex); + return; + } + + addr = buffer_spec[i].phy_addr; + buffer_spec[i].alloc_count = page_count; + + if (i <= 21) { + buffer_spec[i].y_addr = addr; + addr += mb_total << 8; + buffer_spec[i].u_addr = addr; + buffer_spec[i].v_addr = addr; + addr += mb_total << 7; + vfbuf_use[i] = 0; + + buffer_spec[i].y_canvas_index = 128 + i * 2; + buffer_spec[i].u_canvas_index = 128 + i * 2 + 1; + buffer_spec[i].v_canvas_index = 128 + i * 2 + 1; + + buffer_spec[i].y_canvas_width = mb_width << 4; + buffer_spec[i].y_canvas_height = mb_height << 4; + buffer_spec[i].u_canvas_width = mb_width << 4; + buffer_spec[i].u_canvas_height = mb_height << 4; + buffer_spec[i].v_canvas_width = mb_width << 4; + buffer_spec[i].v_canvas_height = mb_height << 4; + + endian = (canvas_mode == CANVAS_BLKMODE_LINEAR)?7:0; + config_cav_lut_ex(128 + i * 2, + buffer_spec[i].y_addr, + mb_width << 4, mb_height << 4, + CANVAS_ADDR_NOWRAP, + canvas_mode, endian, VDEC_1); + config_cav_lut_ex(128 + i * 2 + 1, + buffer_spec[i].u_addr, + mb_width << 4, mb_height << 3, + CANVAS_ADDR_NOWRAP, + canvas_mode, endian, VDEC_1); + WRITE_VREG(ANC0_CANVAS_ADDR + i, + spec2canvas(&buffer_spec[i])); + } else { + buffer_spec[i].y_canvas_index = + 2 * (i - 21) + 4; + buffer_spec[i].y_addr = addr; + addr += mb_total << 8; + buffer_spec[i].u_canvas_index = + 2 * (i - 21) + 5; + buffer_spec[i].v_canvas_index = + 2 * (i - 21) + 5; + buffer_spec[i].u_addr = addr; + addr += mb_total << 7; + vfbuf_use[i] = 0; + + buffer_spec[i].y_canvas_width = mb_width << 4; + buffer_spec[i].y_canvas_height = mb_height << 4; + buffer_spec[i].u_canvas_width = mb_width << 4; + buffer_spec[i].u_canvas_height = mb_height << 4; + buffer_spec[i].v_canvas_width = mb_width << 4; + buffer_spec[i].v_canvas_height = mb_height << 4; + + spec_set_canvas(&buffer_spec[i] + , mb_width << 4, mb_height << 4); + WRITE_VREG(ANC0_CANVAS_ADDR + i + , spec2canvas(&buffer_spec[i])); + } + } + } else { + fatal_error_flag = + DECODER_FATAL_ERROR_NO_MEM; + vh264_running = 0; + mutex_unlock(&vh264_mutex); + pr_err("never be here!!\n"); + return; + } + + timing_info_present_flag = seq_info & 0x2; + fixed_frame_rate_flag = 0; + aspect_ratio_info_present_flag = seq_info & 0x1; + aspect_ratio_idc = (seq_info >> 16) & 0xff; + + if (timing_info_present_flag) { + fixed_frame_rate_flag = seq_info & 0x40; + + if (((num_units_in_tick * 120) >= time_scale + && ((!sync_outside) || (!frame_dur))) && + num_units_in_tick + && time_scale) { + if (use_idr_framerate || !frame_dur + || !duration_from_pts_done || vh264_running) { + u32 frame_dur_es = + div_u64(96000ULL * 2 * + num_units_in_tick, + time_scale); + + /* hack to avoid use ES frame duration + * when it's half of the rate from + * system info + */ + /* sometimes the encoder is given a wrong + * frame rate but the system side information + *is more reliable + */ + if ((frame_dur * 2) != frame_dur_es) { + frame_dur = frame_dur_es; + if (fr_hint_status == VDEC_NEED_HINT) { + schedule_work(¬ify_work); + fr_hint_status = VDEC_HINTED; + } + } + } + } + } else + pr_info("H.264: timing_info not present\n"); + + if (aspect_ratio_info_present_flag) { + if (aspect_ratio_idc == EXTEND_SAR) { + h264_ar = + div_u64(256ULL * (aspect_ratio_info >> 16) * + frame_height, + (aspect_ratio_info & 0xffff) * + frame_width); + } else { + /* pr_info("v264dec: aspect_ratio_idc = %d\n", + * aspect_ratio_idc); + */ + + switch (aspect_ratio_idc) { + case 1: + h264_ar = 0x100 * frame_height / frame_width; + break; + case 2: + h264_ar = 0x100 * frame_height * 11 / + (frame_width * 12); + break; + case 3: + h264_ar = 0x100 * frame_height * 11 / + (frame_width * 10); + break; + case 4: + h264_ar = 0x100 * frame_height * 11 / + (frame_width * 16); + break; + case 5: + h264_ar = 0x100 * frame_height * 33 / + (frame_width * 40); + break; + case 6: + h264_ar = 0x100 * frame_height * 11 / + (frame_width * 24); + break; + case 7: + h264_ar = 0x100 * frame_height * 11 / + (frame_width * 20); + break; + case 8: + h264_ar = 0x100 * frame_height * 11 / + (frame_width * 32); + break; + case 9: + h264_ar = 0x100 * frame_height * 33 / + (frame_width * 80); + break; + case 10: + h264_ar = 0x100 * frame_height * 11 / + (frame_width * 18); + break; + case 11: + h264_ar = 0x100 * frame_height * 11 / + (frame_width * 15); + break; + case 12: + h264_ar = 0x100 * frame_height * 33 / + (frame_width * 64); + break; + case 13: + h264_ar = 0x100 * frame_height * 99 / + (frame_width * 160); + break; + case 14: + h264_ar = 0x100 * frame_height * 3 / + (frame_width * 4); + break; + case 15: + h264_ar = 0x100 * frame_height * 2 / + (frame_width * 3); + break; + case 16: + h264_ar = 0x100 * frame_height * 1 / + (frame_width * 2); + break; + default: + if (vh264_ratio >> 16) { + h264_ar = (frame_height * + (vh264_ratio & 0xffff) * + 0x100 + + ((vh264_ratio >> 16) * + frame_width / 2)) / + ((vh264_ratio >> 16) * + frame_width); + } else { + h264_ar = frame_height * 0x100 / + frame_width; + } + break; + } + } + } else { + pr_info("v264dec: aspect_ratio not available from source\n"); + if (vh264_ratio >> 16) { + /* high 16 bit is width, low 16 bit is height */ + h264_ar = + ((vh264_ratio & 0xffff) * frame_height * 0x100 + + (vh264_ratio >> 16) * frame_width / 2) / + ((vh264_ratio >> 16) * frame_width); + } else + h264_ar = frame_height * 0x100 / frame_width; + } + + WRITE_VREG(AV_SCRATCH_0, + (max_reference_size << 24) | (actual_dpb_size << 16) | + (max_dpb_size << 8)); + if (vh264_stream_switching_state != SWITCHING_STATE_OFF) { + vh264_stream_switching_state = SWITCHING_STATE_OFF; + pr_info("Leaving switching mode.\n"); + } + mutex_unlock(&vh264_mutex); +} + +static unsigned int pts_inc_by_duration( + unsigned int *new_pts, unsigned int *new_pts_rem) +{ + unsigned int r, rem; + + r = last_pts + DUR2PTS(frame_dur); + rem = last_pts_remainder + DUR2PTS_REM(frame_dur); + + if (rem >= 96) { + r++; + rem -= 96; + } + + if (new_pts) + *new_pts = r; + if (new_pts_rem) + *new_pts_rem = rem; + + return r; +} +static inline bool vh264_isr_parser(struct vframe_s *vf, + unsigned int pts_valid, unsigned int buffer_index, + unsigned int pts) +{ + unsigned int pts_duration = 0; + + if (h264_first_pts_ready == 0) { + if (pts_valid == 0) { + vfbuf_use[buffer_index]++; + vf->index = buffer_index; + kfifo_put(&recycle_q, + (const struct vframe_s *)vf); + return false; + } + + h264pts1 = pts; + h264_pts_count = 0; + h264_first_pts_ready = 1; + } else { + if (pts < h264pts1) { + if (h264_pts_count > 24) { + pr_info("invalid h264pts1, reset\n"); + h264pts1 = pts; + h264_pts_count = 0; + } + } + if (pts_valid && (pts > h264pts1) && (h264_pts_count > 24) + && (duration_from_pts_done == 0)) { + unsigned int + old_duration = frame_dur; + h264pts2 = pts; + + pts_duration = (h264pts2 - h264pts1) * 16 / + (h264_pts_count * 15); + + if ((pts_duration != frame_dur) + && (!pts_outside)) { + if (use_idr_framerate) { + bool pts_c_24 = close_to(pts_duration, + RATE_24_FPS, + RATE_CORRECTION_THRESHOLD); + bool frm_c_25 = close_to(frame_dur, + RATE_25_FPS, + RATE_CORRECTION_THRESHOLD); + bool pts_c_25 = close_to(pts_duration, + RATE_25_FPS, + RATE_CORRECTION_THRESHOLD); + bool frm_c_24 = close_to(frame_dur, + RATE_24_FPS, + RATE_CORRECTION_THRESHOLD); + if ((pts_c_24 && frm_c_25) + || (pts_c_25 && frm_c_24)) { + pr_info + ("H.264:Correct frame dur "); + pr_info + (" from %d to duration based ", + frame_dur); + pr_info + ("on PTS %d ---\n", + pts_duration); + frame_dur = pts_duration; + duration_from_pts_done = 1; + } else if (((frame_dur < 96000 / 240) + && (pts_duration > 96000 / 240)) + || (!duration_on_correcting && + !frm_c_25 && !frm_c_24)) { + /* fft: if the frame rate is + * not regular, use the + * calculate rate insteadof. + */ + pr_info + ("H.264:Correct frame dur "); + pr_info + (" from %d to duration based ", + frame_dur); + pr_info + ("on PTS %d ---\n", + pts_duration); + frame_dur = pts_duration; + duration_on_correcting = 1; + } + } else { + if (close_to(pts_duration, + frame_dur, 2000)) { + frame_dur = pts_duration; + pr_info + ("used calculate frame rate,"); + pr_info("on duration =%d\n", + frame_dur); + } else { + pr_info + ("don't use calculate frame "); + pr_info + ("rate pts_duration =%d\n", + pts_duration); + } + } + } + + if (duration_from_pts_done == 0) { + if (close_to + (pts_duration, + old_duration, + RATE_CORRECTION_THRESHOLD)) { + pr_info + ("finished correct frame dur"); + pr_info + (" new=%d,old_duration=%d,cnt=%d\n", + pts_duration, + old_duration, + h264_pts_count); + duration_from_pts_done = 1; + } else { /*not the same,redo it. */ + if (!close_to(pts_duration, + old_duration, 1000) && + !close_to(pts_duration, + frame_dur, 1000) && + close_to(pts_duration, + last_duration, 200)) { + /* yangle: frame_dur must + * wrong,recover it. + */ + frame_dur = pts_duration; + } + + pr_info + ("restart correct frame duration "); + pr_info + ("new=%d,old_duration=%d,cnt=%d\n", + pts_duration, + old_duration, + h264_pts_count); + h264pts1 = h264pts2; + h264_pts_count = 0; + duration_from_pts_done = 0; + } + } + last_duration = pts_duration; + } + } + return true; +} + +static inline void h264_update_gvs(void) +{ + u32 ratio_control; + u32 ar; + + if (gvs->frame_height != frame_height) { + gvs->frame_width = frame_width; + gvs->frame_height = frame_height; + } + if (gvs->frame_dur != frame_dur) { + gvs->frame_dur = frame_dur; + if (frame_dur != 0) + gvs->frame_rate = 96000 / frame_dur; + else + gvs->frame_rate = -1; + } + gvs->error_count = READ_VREG(AV_SCRATCH_D); + gvs->status = stat; + if (fatal_error_reset) + gvs->status |= fatal_error_flag; + ar = min_t(u32, + h264_ar, + DISP_RATIO_ASPECT_RATIO_MAX); + ratio_control = + ar << DISP_RATIO_ASPECT_RATIO_BIT; + gvs->ratio_control = ratio_control; +} + +#ifdef HANDLE_H264_IRQ +static irqreturn_t vh264_isr(int irq, void *dev_id) +#else +static void vh264_isr(void) +#endif +{ + unsigned int buffer_index; + struct vframe_s *vf; + unsigned int cpu_cmd; + unsigned int pts, pts_lookup_save, pts_valid_save, pts_valid = 0; + unsigned int pts_us64_valid = 0; + unsigned int framesize; + u64 pts_us64; + bool force_interlaced_frame = false; + unsigned int sei_itu35_flags; + + static const unsigned int idr_num = + FIX_FRAME_RATE_CHECK_IDRFRAME_NUM; + static const unsigned int flg_1080_itl = + DEC_CONTROL_FLAG_FORCE_2997_1080P_INTERLACE; + static const unsigned int flg_576_itl = + DEC_CONTROL_FLAG_FORCE_2500_576P_INTERLACE; + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + if (0 == (stat & STAT_VDEC_RUN)) { + pr_info("decoder is not running\n"); +#ifdef HANDLE_H264_IRQ + return IRQ_HANDLED; +#else + return; +#endif + } + + cpu_cmd = READ_VREG(AV_SCRATCH_0); + +#ifdef DROP_B_FRAME_FOR_1080P_50_60FPS + if ((frame_dur < 2004) && + (frame_width >= 1400) && + (frame_height >= 1000) && (last_interlaced == 0)) + SET_VREG_MASK(AV_SCRATCH_F, 0x8); +#endif + if ((decoder_force_reset == 1) + || ((error_recovery_mode != 1) + && (no_idr_error_count >= no_idr_error_max) + && (ucode_type != UCODE_IP_ONLY_PARAM))) { + vh264_running = 0; + pr_info("force reset decoder %d!!!\n", no_idr_error_count); + schedule_work(&error_wd_work); + decoder_force_reset = 0; + no_idr_error_count = 0; + } else if ((cpu_cmd & 0xff) == 1) { + if (unlikely + (vh264_running + && (kfifo_len(&newframe_q) != VF_POOL_SIZE))) { + /* a cmd 1 sent during decoding w/o getting a cmd 3. */ + /* should not happen but the original code has such + * case, do the same process + */ + if ((READ_VREG(AV_SCRATCH_1) & 0xff) + == 1) {/*invalid mb_width*/ + vh264_running = 0; + fatal_error_flag = DECODER_FATAL_ERROR_UNKNOWN; + /* this is fatal error, need restart */ + pr_info("cmd 1 fatal error happened\n"); + schedule_work(&error_wd_work); + } else { + vh264_stream_switching_state = SWITCHING_STATE_ON_CMD1; + pr_info("Enter switching mode cmd1.\n"); + schedule_work(&stream_switching_work); + } + return IRQ_HANDLED; + } + pr_info("Enter set parameter cmd1.\n"); + schedule_work(&set_parameter_work); + return IRQ_HANDLED; + } else if ((cpu_cmd & 0xff) == 2) { + int frame_mb_only, pic_struct_present, pic_struct, prog_frame, + poc_sel, idr_flag, eos, error; + int i, status, num_frame, b_offset; + int current_error_count, slice_type; + + vh264_running = 1; + vh264_no_disp_count = 0; + num_frame = (cpu_cmd >> 8) & 0xff; + frame_mb_only = seq_info & 0x8000; + pic_struct_present = seq_info & 0x10; + + current_error_count = READ_VREG(AV_SCRATCH_D); + if (vh264_error_count != current_error_count) { + /* pr_info("decoder error happened, count %d\n", + * current_error_count); + */ + vh264_error_count = current_error_count; + } + + for (i = 0; (i < num_frame) && (!vh264_eos); i++) { + status = READ_VREG(AV_SCRATCH_1 + i); + buffer_index = status & 0x1f; + error = status & 0x200; + slice_type = (READ_VREG(AV_SCRATCH_H) >> (i * 4)) & 0xf; + + if ((error_recovery_mode_use & 2) && error) + check_pts_discontinue = true; + if (ucode_type == UCODE_IP_ONLY_PARAM + && iponly_early_mode) + continue; + if ((p_last_vf != NULL) + && (p_last_vf->index == buffer_index)) + continue; + + if (buffer_index >= VF_BUF_NUM) + continue; + + pic_struct = (status >> 5) & 0x7; + prog_frame = status & 0x100; + poc_sel = status & 0x200; + idr_flag = status & 0x400; + frame_packing_type = (status >> 12) & 0x7; + eos = (status >> 15) & 1; + + if (eos) + vh264_eos = 1; + + b_offset = (status >> 16) & 0xffff; + + if (error) + no_idr_error_count++; + if (idr_flag || + (!error && (slice_type != SLICE_TYPE_I))) + no_idr_error_count = 0; + + if (decoder_debug_flag) { + pr_info + ("slice_type %x idr %x error %x count %d", + slice_type, idr_flag, error, + no_idr_error_count); + pr_info(" prog %x pic_struct %x offset %x\n", + prog_frame, pic_struct, b_offset); + } +#ifdef DROP_B_FRAME_FOR_1080P_50_60FPS + last_interlaced = prog_frame ? 0 : 1; +#endif + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + + if (clk_adj_frame_count < (VDEC_CLOCK_ADJUST_FRAME + 1)) + clk_adj_frame_count++; + + set_frame_info(vf); + + switch (i) { + case 0: + b_offset |= + (READ_VREG(AV_SCRATCH_A) & 0xffff) + << 16; + break; + case 1: + b_offset |= + READ_VREG(AV_SCRATCH_A) & 0xffff0000; + break; + case 2: + b_offset |= + (READ_VREG(AV_SCRATCH_B) & 0xffff) + << 16; + break; + case 3: + b_offset |= + READ_VREG(AV_SCRATCH_B) & 0xffff0000; + break; + case 4: + b_offset |= + (READ_VREG(AV_SCRATCH_C) & 0xffff) + << 16; + break; + case 5: + b_offset |= + READ_VREG(AV_SCRATCH_C) & 0xffff0000; + break; + default: + break; + } + + if (error) + gvs->drop_frame_count++; + + /* add 64bit pts us ; */ + if (unlikely + ((b_offset == first_offset) + && (first_pts_cached))) { + pts = first_pts; + pts_us64 = first_pts64; + framesize = first_frame_size; + first_pts_cached = false; + pts_valid = 1; + pts_us64_valid = 1; +#ifdef DEBUG_PTS + pts_hit++; +#endif + } else if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, b_offset, &pts, + &framesize, 0, &pts_us64) == 0) { + pts_valid = 1; + pts_us64_valid = 1; +#ifdef DEBUG_PTS + pts_hit++; +#endif + } else { + pts_valid = 0; + pts_us64_valid = 0; + framesize = 0; +#ifdef DEBUG_PTS + pts_missed++; +#endif + } + + if (idr_flag) + s_vframe_qos.type = 4; + else if (slice_type == SLICE_TYPE_I) + s_vframe_qos.type = 1; + else if (slice_type == SLICE_TYPE_P) + s_vframe_qos.type = 2; + else if (slice_type == SLICE_TYPE_B || slice_type == 8) + s_vframe_qos.type = 3; + + s_vframe_qos.size = framesize; + + if (pts_valid) + s_vframe_qos.pts = pts; + else + s_vframe_qos.pts = last_pts + DUR2PTS(frame_dur); +#ifndef ENABLE_SEI_ITU_T35 + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + u32 reg_data; + if (i) { + reg_data = READ_VREG(AV_SCRATCH_N); + s_vframe_qos.max_mv + = (reg_data >> 16) & 0xffff; + s_vframe_qos.avg_mv + = (reg_data >> 8) & 0xff; + s_vframe_qos.min_mv + = reg_data & 0xff; + reg_data = READ_VREG(AV_SCRATCH_L); + s_vframe_qos.max_qp + = (reg_data >> 16) & 0xff; + s_vframe_qos.avg_qp + = (reg_data >> 8) & 0xff; + s_vframe_qos.min_qp + = reg_data & 0xff; + reg_data = READ_VREG(AV_SCRATCH_M); + s_vframe_qos.max_skip + = (reg_data >> 16) & 0xff; + s_vframe_qos.avg_skip + = (reg_data >> 8) & 0xff; + s_vframe_qos.min_skip + = reg_data & 0xff; + } else { + reg_data = READ_VREG(AV_SCRATCH_J); + s_vframe_qos.max_mv + = (reg_data >> 16) & 0xffff; + s_vframe_qos.avg_mv + = (reg_data >> 8) & 0xff; + s_vframe_qos.min_mv + = reg_data & 0xff; + reg_data = READ_VREG(AV_SCRATCH_I); + s_vframe_qos.max_qp + = (reg_data >> 16) & 0xff; + s_vframe_qos.avg_qp + = (reg_data >> 8) & 0xff; + s_vframe_qos.min_qp + = reg_data & 0xff; + reg_data = READ_VREG(AV_SCRATCH_K); + s_vframe_qos.max_skip + = (reg_data >> 16) & 0xff; + s_vframe_qos.avg_skip + = (reg_data >> 8) & 0xff; + s_vframe_qos.min_skip + = reg_data & 0xff; + } + if (decoder_debug_flag&0x2) { + pr_info("max_mv %d avg_mv %d min_mv %d slice_type %d offset %x i = %d\n", + s_vframe_qos.max_mv, + s_vframe_qos.avg_mv, + s_vframe_qos.min_mv, + slice_type, + b_offset, + i); + pr_info("max_qp %d avg_qp %d min_qp %d\n", + s_vframe_qos.max_qp, + s_vframe_qos.avg_qp, + s_vframe_qos.min_qp); + pr_info("max_skip %d avg_skip %d min_skip %d\n", + s_vframe_qos.max_skip, + s_vframe_qos.avg_skip, + s_vframe_qos.min_skip); + } + } else + search_qos_node(&s_vframe_qos, b_offset); +#endif + frame_count++; + + s_vframe_qos.num = frame_count; + //vdec_fill_frame_info(&s_vframe_qos, 1); + + /* on second IDR frame,check the diff between pts + * compute from duration and pts from lookup , + * if large than frame_dur,we think it is uncorrect. + */ + pts_lookup_save = pts; + pts_valid_save = pts_valid; + if (fixed_frame_rate_flag + && (fixed_frame_rate_check_count <= + idr_num)) { + if (idr_flag && pts_valid) { + fixed_frame_rate_check_count++; + /* pr_info("diff:%d\n", + * last_pts - pts_lookup_save); + */ + if ((fixed_frame_rate_check_count == + idr_num) && + (abs(pts - (last_pts + + DUR2PTS(frame_dur))) > + DUR2PTS(frame_dur))) { + fixed_frame_rate_flag = 0; + pr_info("pts sync mode play\n"); + } + + if (fixed_frame_rate_flag + && (fixed_frame_rate_check_count + > idr_num)) { + pr_info + ("fix_frame_rate mode play\n"); + } + } + } + + if (READ_VREG(AV_SCRATCH_F) & 2) { + /* for I only mode, ignore the PTS information + * and only uses frame duration for each I + * frame decoded + */ + if (p_last_vf) + pts_valid = 0; + /* also skip frame duration calculation + * based on PTS + */ + duration_from_pts_done = 1; + /* and add a default duration for 1/30 second + * if there is no valid frame + * duration available + */ + if (frame_dur == 0) + frame_dur = 96000 / 30; + } + + if (sync_outside == 0) { + if (!vh264_isr_parser(vf, + pts_valid, buffer_index, pts)) + continue; + + h264_pts_count++; + } else { + if (!idr_flag) + pts_valid = 0; + } + + if (pts_valid && !pts_discontinue) { + pts_discontinue = + (abs(last_pts - pts) >= + tsync_vpts_discontinuity_margin()); + } + /* if use_idr_framerate or fixed frame rate, only + * use PTS for IDR frames except for pts discontinue + */ + if (timing_info_present_flag && + frame_dur && + (use_idr_framerate || + (fixed_frame_rate_flag != 0)) + && pts_valid && h264_first_valid_pts_ready + && (!pts_discontinue)) { + pts_valid = + (slice_type == SLICE_TYPE_I) ? 1 : 0; + } + + if (!h264_first_valid_pts_ready && pts_valid) { + h264_first_valid_pts_ready = true; + last_pts = pts - DUR2PTS(frame_dur); + last_pts_remainder = 0; + } + /* calculate PTS of next frame and smooth + * PTS for fixed rate source + */ + if (pts_valid) { + if ((fixed_frame_rate_flag) && + (!pts_discontinue) && + (abs(pts_inc_by_duration(NULL, NULL) + - pts) + < DUR2PTS(frame_dur))) { + pts = pts_inc_by_duration(&pts, + &last_pts_remainder); + } else + last_pts_remainder = 0; + + } else { + if (fixed_frame_rate_flag && !pts_discontinue && + (fixed_frame_rate_check_count > idr_num) && + pts_valid_save && (sync_outside == 0) && + (abs(pts_inc_by_duration(NULL, NULL) - pts) + > DUR2PTS(frame_dur))) { + duration_from_pts_done = 0; + pr_info("recalc frame_dur\n"); + } else + pts = pts_inc_by_duration(&pts, + &last_pts_remainder); + pts_valid = 1; + } + + if ((dec_control & + flg_1080_itl) + && (frame_width == 1920) + && (frame_height >= 1080) + && (vf->duration == 3203)) + force_interlaced_frame = true; + else if ((dec_control & + flg_576_itl) + && (frame_width == 720) + && (frame_height == 576) + && (vf->duration == 3840)) + force_interlaced_frame = true; + + /* for frames with PTS, check if there is PTS + * discontinue based on previous frames + * (including error frames), + * force no VPTS discontinue reporting if we saw + *errors earlier but only once. + */ + + /*count info*/ + h264_update_gvs(); + vdec_count_info(gvs, error, b_offset); + vdec_fill_vdec_frame(vdec_h264, &s_vframe_qos, gvs, vf, 0); + + if ((pts_valid) && (check_pts_discontinue) + && (!error)) { + if (pts_discontinue) { + vf->flag = 0; + check_pts_discontinue = false; + } else if ((pts - last_pts) < 90000) { + vf->flag = VFRAME_FLAG_NO_DISCONTINUE; + check_pts_discontinue = false; + } + } + + last_pts = pts; + + if (fixed_frame_rate_flag + && (fixed_frame_rate_check_count <= + idr_num) + && (sync_outside == 0) + && pts_valid_save) + pts = pts_lookup_save; + + if (pic_struct_present) { + if ((pic_struct == PIC_TOP_BOT) + || (pic_struct == PIC_BOT_TOP)) + prog_frame = 0; + } + + if ((!force_interlaced_frame) + && (prog_frame + || (pic_struct_present + && pic_struct + <= PIC_TRIPLE_FRAME))) { + if (pic_struct_present) { + if (pic_struct == PIC_TOP_BOT_TOP + || pic_struct + == PIC_BOT_TOP_BOT) { + vf->duration += + vf->duration >> 1; + } else if (pic_struct == + PIC_DOUBLE_FRAME) + vf->duration += vf->duration; + else if (pic_struct == + PIC_TRIPLE_FRAME) { + vf->duration += + vf->duration << 1; + } + } + + last_pts = + last_pts + DUR2PTS(vf->duration - + frame_dur); + + vf->index = buffer_index; + vf->type = + VIDTYPE_PROGRESSIVE | + VIDTYPE_VIU_FIELD | + VIDTYPE_VIU_NV21; + vf->duration_pulldown = 0; + vf->signal_type = video_signal_from_vui; + vf->index = buffer_index; + vf->pts = (pts_valid) ? pts : 0; + if (pts_us64_valid == 1) + vf->pts_us64 = pts_us64; + else + vf->pts_us64 = div64_u64(((u64)vf->pts)*100, 9); + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(&buffer_spec[buffer_index]); + vf->type_original = vf->type; + vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + VF_BUFFER_IDX(buffer_index)); + decoder_do_frame_check(NULL, vf); + if ((error_recovery_mode_use & 2) && error) { + kfifo_put(&recycle_q, + (const struct vframe_s *)vf); + } else { + p_last_vf = vf; + pts_discontinue = false; + kfifo_put(&delay_display_q, + (const struct vframe_s *)vf); + } + } else { + if (pic_struct_present + && pic_struct == PIC_TOP_BOT) + vf->type = VIDTYPE_INTERLACE_TOP; + else if (pic_struct_present + && pic_struct == PIC_BOT_TOP) + vf->type = VIDTYPE_INTERLACE_BOTTOM; + else { + vf->type = + poc_sel ? + VIDTYPE_INTERLACE_BOTTOM : + VIDTYPE_INTERLACE_TOP; + } + vf->type |= VIDTYPE_VIU_NV21; + vf->type |= VIDTYPE_INTERLACE_FIRST; + + high_bandwidth |= + ((codec_mm_get_total_size() < 80 * SZ_1M) + & ((READ_VREG(AV_SCRATCH_N) & 0xf) == 3) + & ((frame_width * frame_height) >= 1920*1080)); + if (high_bandwidth) + vf->flag |= VFRAME_FLAG_HIGH_BANDWIDTH; + + vf->duration >>= 1; + vf->duration_pulldown = 0; + vf->signal_type = video_signal_from_vui; + vf->index = buffer_index; + vf->pts = (pts_valid) ? pts : 0; + if (pts_us64_valid == 1) + vf->pts_us64 = pts_us64; + else + vf->pts_us64 = div64_u64(((u64)vf->pts)*100, 9); + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(&buffer_spec[buffer_index]); + vf->type_original = vf->type; + vfbuf_use[buffer_index]++; + vf->ready_jiffies64 = jiffies_64; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + VF_BUFFER_IDX(buffer_index)); + decoder_do_frame_check(NULL, vf); + if ((error_recovery_mode_use & 2) && error) { + kfifo_put(&recycle_q, + (const struct vframe_s *)vf); + continue; + } else { + pts_discontinue = false; + kfifo_put(&delay_display_q, + (const struct vframe_s *)vf); + } + + if (READ_VREG(AV_SCRATCH_F) & 2) + continue; + + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info + ("fatal error, no avail buffer slot."); + return IRQ_HANDLED; + } + + set_frame_info(vf); + + if (pic_struct_present + && pic_struct == PIC_TOP_BOT) + vf->type = VIDTYPE_INTERLACE_BOTTOM; + else if (pic_struct_present + && pic_struct == PIC_BOT_TOP) + vf->type = VIDTYPE_INTERLACE_TOP; + else { + vf->type = + poc_sel ? + VIDTYPE_INTERLACE_TOP : + VIDTYPE_INTERLACE_BOTTOM; + } + + vf->type |= VIDTYPE_VIU_NV21; + vf->duration >>= 1; + vf->duration_pulldown = 0; + vf->signal_type = video_signal_from_vui; + vf->index = buffer_index; + vf->pts = 0; + vf->pts_us64 = 0; + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(&buffer_spec[buffer_index]); + vf->type_original = vf->type; + vfbuf_use[buffer_index]++; + if (high_bandwidth) + vf->flag |= VFRAME_FLAG_HIGH_BANDWIDTH; + + p_last_vf = vf; + vf->ready_jiffies64 = jiffies_64; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + VF_BUFFER_IDX(buffer_index)); + kfifo_put(&delay_display_q, + (const struct vframe_s *)vf); + } + } + + WRITE_VREG(AV_SCRATCH_0, 0); + } else if ((cpu_cmd & 0xff) == 3) { + vh264_running = 1; + vh264_stream_switching_state = SWITCHING_STATE_ON_CMD3; + + pr_info("Enter switching mode cmd3.\n"); + schedule_work(&stream_switching_work); + + } else if ((cpu_cmd & 0xff) == 4) { + vh264_running = 1; + /* reserved for slice group */ + WRITE_VREG(AV_SCRATCH_0, 0); + } else if ((cpu_cmd & 0xff) == 5) { + vh264_running = 1; + /* reserved for slice group */ + WRITE_VREG(AV_SCRATCH_0, 0); + } else if ((cpu_cmd & 0xff) == 6) { + vh264_running = 0; + fatal_error_flag = DECODER_FATAL_ERROR_UNKNOWN; + /* this is fatal error, need restart */ + pr_info("fatal error happend\n"); + amvdec_stop(); + if (!fatal_error_reset) + schedule_work(&error_wd_work); + } else if ((cpu_cmd & 0xff) == 7) { + vh264_running = 0; + frame_width = (READ_VREG(AV_SCRATCH_1) + 1) * 16; + pr_info("Over decoder supported size, width = %d\n", + frame_width); + fatal_error_flag = DECODER_FATAL_ERROR_SIZE_OVERFLOW; + } else if ((cpu_cmd & 0xff) == 8) { + vh264_running = 0; + frame_height = (READ_VREG(AV_SCRATCH_1) + 1) * 16; + pr_info("Over decoder supported size, height = %d\n", + frame_height); + fatal_error_flag = DECODER_FATAL_ERROR_SIZE_OVERFLOW; + } else if ((cpu_cmd & 0xff) == 9) { + first_offset = READ_VREG(AV_SCRATCH_1); + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, first_offset, &first_pts, + &first_frame_size, 0, + &first_pts64) == 0) + first_pts_cached = true; + WRITE_VREG(AV_SCRATCH_0, 0); + } else if ((cpu_cmd & 0xff) == 0xa) { + int b_offset; + unsigned int frame_size; + + b_offset = READ_VREG(AV_SCRATCH_2); + buffer_index = READ_VREG(AV_SCRATCH_1); + /*pr_info("iponly output %d b_offset %x\n", + * buffer_index,b_offset); + */ + if (kfifo_get(&newframe_q, &vf) == 0) { + WRITE_VREG(AV_SCRATCH_0, 0); + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + if (pts_lookup_offset_us64 (PTS_TYPE_VIDEO, b_offset, + &pts, &frame_size, + 0, &pts_us64) != 0) + vf->pts_us64 = vf->pts = 0; + else { + vf->pts_us64 = pts_us64; + vf->pts = pts; + } + set_frame_info(vf); + vf->type = VIDTYPE_PROGRESSIVE | + VIDTYPE_VIU_FIELD | + VIDTYPE_VIU_NV21; + vf->duration_pulldown = 0; + vf->signal_type = video_signal_from_vui; + vf->index = buffer_index; + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(&buffer_spec[buffer_index]); + vf->type_original = vf->type; + vf->mem_handle = decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + VF_BUFFER_IDX(buffer_index)); + vfbuf_use[buffer_index]++; + p_last_vf = vf; + pts_discontinue = false; + iponly_early_mode = 1; + decoder_do_frame_check(NULL, vf); + kfifo_put(&delay_display_q, + (const struct vframe_s *)vf); + WRITE_VREG(AV_SCRATCH_0, 0); + } else if ((cpu_cmd & 0xff) == 0xB) { + schedule_work(&qos_work); + } + + sei_itu35_flags = READ_VREG(AV_SCRATCH_J); + if (sei_itu35_flags & (1 << 15)) { /* data ready */ +#ifdef ENABLE_SEI_ITU_T35 + schedule_work(&userdata_push_work); +#else + /* necessary if enabled itu_t35 in ucode*/ + WRITE_VREG(AV_SCRATCH_J, 0); +#endif + } + +#ifdef HANDLE_H264_IRQ + return IRQ_HANDLED; +#else + return; +#endif +} + +static void vh264_set_clk(struct work_struct *work) +{ + int fps = 96000 / frame_dur; + + if (frame_dur < 10) /*dur is too small ,think it errors fps*/ + fps = 60; + saved_resolution = frame_width * frame_height * fps; + vdec_source_changed(VFORMAT_H264, + frame_width, frame_height, fps); +} + +static void vh264_put_timer_func(struct timer_list *timer) +{ + unsigned int wait_buffer_status; + unsigned int wait_i_pass_frames; + unsigned int reg_val; + + enum receviver_start_e state = RECEIVER_INACTIVE; + + if (vh264_reset) { + pr_info("operation forbidden in timer !\n"); + goto exit; + } + + prepare_display_q(); + + if (vf_get_receiver(PROVIDER_NAME)) { + state = + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) { + /* receiver has no event_cb or receiver's + * event_cb does not process this event + */ + state = RECEIVER_INACTIVE; + } + } else + state = RECEIVER_INACTIVE; +#ifndef HANDLE_H264_IRQ + vh264_isr(); +#endif + + if (vh264_stream_switching_state != SWITCHING_STATE_OFF) + wait_buffer_counter = 0; + else { + reg_val = READ_VREG(AV_SCRATCH_9); + wait_buffer_status = reg_val & (1 << 31); + wait_i_pass_frames = reg_val & 0xff; + if (wait_buffer_status) { + if (kfifo_is_empty(&display_q) && + kfifo_is_empty(&delay_display_q) && + kfifo_is_empty(&recycle_q) && + (state == RECEIVER_INACTIVE)) { + pr_info("$$$$decoder is waiting for buffer\n"); + if (++wait_buffer_counter > 4) { + amvdec_stop(); + schedule_work(&error_wd_work); + } + } else + wait_buffer_counter = 0; + } else if (wait_i_pass_frames > 1000) { + pr_info("i passed frames > 1000\n"); + amvdec_stop(); + schedule_work(&error_wd_work); + } + } + +#if 0 + if (!wait_buffer_status) { + if (vh264_no_disp_count++ > NO_DISP_WD_COUNT) { + pr_info("$$$decoder did not send frame out\n"); + amvdec_stop(); +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vh264_ppmgr_reset(); +#else + vf_light_unreg_provider(PROVIDER_NAME); + vh264_local_init(); + vf_reg_provider(vh264_vf_prov); +#endif + vh264_prot_init(); + amvdec_start(); + + vh264_no_disp_count = 0; + vh264_no_disp_wd_count++; + } + } +#endif + + while (!kfifo_is_empty(&recycle_q) && + ((READ_VREG(AV_SCRATCH_7) == 0) + || (READ_VREG(AV_SCRATCH_8) == 0)) + && (vh264_stream_switching_state == SWITCHING_STATE_OFF)) { + struct vframe_s *vf; + + if (kfifo_get(&recycle_q, &vf)) { + if (vf->index < VF_BUF_NUM) { + if (--vfbuf_use[vf->index] == 0) { + if (READ_VREG(AV_SCRATCH_7) == 0) { + WRITE_VREG(AV_SCRATCH_7, + vf->index + 1); + } else { + WRITE_VREG(AV_SCRATCH_8, + vf->index + 1); + } + } + + vf->index = VF_BUF_NUM; + kfifo_put(&newframe_q, + (const struct vframe_s *)vf); + } + } + } + + if (vh264_stream_switching_state != SWITCHING_STATE_OFF) { + while (!kfifo_is_empty(&recycle_q)) { + struct vframe_s *vf; + + if (kfifo_get(&recycle_q, &vf)) { + if (vf->index < VF_BUF_NUM) { + vf->index = VF_BUF_NUM; + kfifo_put(&newframe_q, + (const struct vframe_s *)vf); + } + } + } + + WRITE_VREG(AV_SCRATCH_7, 0); + WRITE_VREG(AV_SCRATCH_8, 0); + + if (kfifo_len(&newframe_q) == VF_POOL_SIZE) + stream_switching_done(); + } + + if (ucode_type != UCODE_IP_ONLY_PARAM && + (clk_adj_frame_count > VDEC_CLOCK_ADJUST_FRAME) && + frame_dur > 0 && saved_resolution != + frame_width * frame_height * (96000 / frame_dur)) + schedule_work(&set_clk_work); + +exit: + timer->expires = jiffies + PUT_INTERVAL; + + add_timer(timer); +} + +int vh264_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + u32 ratio_control; + u32 ar; + + if (!(stat & STAT_VDEC_RUN)) + return -1; + + vstatus->frame_width = frame_width; + vstatus->frame_height = frame_height; + if (frame_dur != 0) + vstatus->frame_rate = 96000 / frame_dur; + else + vstatus->frame_rate = -1; + vstatus->error_count = READ_VREG(AV_SCRATCH_D); + vstatus->status = stat; + if (fatal_error_reset) + vstatus->status |= fatal_error_flag; + vstatus->bit_rate = gvs->bit_rate; + vstatus->frame_dur = frame_dur; + vstatus->frame_data = gvs->frame_data; + vstatus->total_data = gvs->total_data; + vstatus->frame_count = gvs->frame_count; + vstatus->error_frame_count = gvs->error_frame_count; + vstatus->drop_frame_count = gvs->drop_frame_count; + vstatus->total_data = gvs->total_data; + vstatus->samp_cnt = gvs->samp_cnt; + vstatus->offset = gvs->offset; + ar = min_t(u32, + h264_ar, + DISP_RATIO_ASPECT_RATIO_MAX); + ratio_control = + ar << DISP_RATIO_ASPECT_RATIO_BIT; + vstatus->ratio_control = ratio_control; + + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + +static int vh264_vdec_info_init(void) +{ + gvs = kzalloc(sizeof(struct vdec_info), GFP_KERNEL); + if (NULL == gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -ENOMEM; + } + return 0; +} + +int vh264_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + if (trickmode == TRICKMODE_I) { + WRITE_VREG(AV_SCRATCH_F, + (READ_VREG(AV_SCRATCH_F) & 0xfffffffc) | 2); + trickmode_i = 1; + } else if (trickmode == TRICKMODE_NONE) { + WRITE_VREG(AV_SCRATCH_F, READ_VREG(AV_SCRATCH_F) & 0xfffffffc); + trickmode_i = 0; + } + + return 0; +} + +int vh264_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +static void vh264_prot_init(void) +{ + ulong timeout = jiffies + HZ; + + while (READ_VREG(DCAC_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) { + pr_info("%s DCAC_DMA_CTRL time out\n", __func__); + break; + } + } + + timeout = jiffies + HZ; + while (READ_VREG(LMEM_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) { + pr_info("%s LMEM_DMA_CTRL time out\n", __func__); + break; + } + } + +#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 9) | (1 << 8)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + +#else + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + READ_RESET_REG(RESET0_REGISTER); + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + + WRITE_RESET_REG(RESET2_REGISTER, RESET_PIC_DC | RESET_DBLK); +#endif + + WRITE_VREG(POWER_CTL_VLD, + READ_VREG(POWER_CTL_VLD) | + (0 << 10) | (1 << 9) | (1 << 6)); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + + WRITE_VREG(AV_SCRATCH_0, 0); + WRITE_VREG(AV_SCRATCH_1, buf_offset); + if (!tee_enabled()) + WRITE_VREG(AV_SCRATCH_G, mc_dma_handle); + WRITE_VREG(AV_SCRATCH_7, 0); + WRITE_VREG(AV_SCRATCH_8, 0); + WRITE_VREG(AV_SCRATCH_9, 0); + WRITE_VREG(AV_SCRATCH_N, 0); + +#ifdef SUPPORT_BAD_MACRO_BLOCK_REDUNDANCY + if (bad_block_scale > 128) + bad_block_scale = 128; + WRITE_VREG(AV_SCRATCH_A, bad_block_scale); +#endif + + error_recovery_mode_use = + (error_recovery_mode != + 0) ? error_recovery_mode : error_recovery_mode_in; + WRITE_VREG(AV_SCRATCH_F, + (READ_VREG(AV_SCRATCH_F) & 0xffffffc3) | + (READ_VREG(AV_SCRATCH_F) & 0xffffff43) | + ((error_recovery_mode_use & 0x1) << 4)); + if (dec_control & DEC_CONTROL_FLAG_DISABLE_FAST_POC) + SET_VREG_MASK(AV_SCRATCH_F, 1 << 7); + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); + if (ucode_type == UCODE_IP_ONLY_PARAM) + SET_VREG_MASK(AV_SCRATCH_F, 1 << 6); + else + CLEAR_VREG_MASK(AV_SCRATCH_F, 1 << 6); + + WRITE_VREG(AV_SCRATCH_I, (u32)(sei_data_buffer_phys - buf_offset)); + WRITE_VREG(AV_SCRATCH_J, 0); + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) && !is_meson_mtvd_cpu()) { + /* pr_info("vh264 meson8 prot init\n"); */ + WRITE_VREG(MDEC_PIC_DC_THRESH, 0x404038aa); + } + /* #endif */ +} + +static int vh264_local_init(void) +{ + int i, ret; + u32 size; + unsigned long buf_start; + vh264_ratio = vh264_amstream_dec_info.ratio; + /* vh264_ratio = 0x100; */ + + vh264_rotation = (((unsigned long) vh264_amstream_dec_info.param) + >> 16) & 0xffff; + + frame_prog = 0; + frame_width = vh264_amstream_dec_info.width; + frame_height = vh264_amstream_dec_info.height; + frame_dur = vh264_amstream_dec_info.rate; + pts_outside = ((unsigned long) vh264_amstream_dec_info.param) & 0x01; + sync_outside = ((unsigned long) vh264_amstream_dec_info.param & 0x02) + >> 1; + use_idr_framerate = ((unsigned long) vh264_amstream_dec_info.param + & 0x04) >> 2; + max_refer_buf = !(((unsigned long) vh264_amstream_dec_info.param + & 0x10) >> 4); + if (!vh264_reset) { + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + + mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BLK_BUFFERS, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + tvp_flag); + } + pr_info + ("H264 sysinfo: %dx%d duration=%d, pts_outside=%d \n", + frame_width, frame_height, frame_dur, pts_outside); + pr_debug("sync_outside=%d, use_idr_framerate=%d\n", + sync_outside, use_idr_framerate); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXTVBB) + size = V_BUF_ADDR_OFFSET_NEW; + else + size = V_BUF_ADDR_OFFSET; + + ret = decoder_bmmu_box_alloc_buf_phy(mm_blk_handle, 0, + size, DRIVER_NAME, &buf_start); + if (ret < 0) + return ret; + + buf_offset = buf_start - DEF_BUF_START_ADDR; + + if ((unsigned long) vh264_amstream_dec_info.param & 0x08) + ucode_type = UCODE_IP_ONLY_PARAM; + else + ucode_type = 0; + + if ((unsigned long) vh264_amstream_dec_info.param & 0x20) + error_recovery_mode_in = 1; + else + error_recovery_mode_in = 3; + + if (!vh264_running) { + last_mb_width = 0; + last_mb_height = 0; + } + + for (i = 0; i < VF_BUF_NUM; i++) + vfbuf_use[i] = 0; + + INIT_KFIFO(display_q); + INIT_KFIFO(delay_display_q); + INIT_KFIFO(recycle_q); + INIT_KFIFO(newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &vfpool[i]; + + vfpool[i].index = VF_BUF_NUM; + vfpool[i].bufWidth = 1920; + kfifo_put(&newframe_q, vf); + } + +#ifdef DROP_B_FRAME_FOR_1080P_50_60FPS + last_interlaced = 1; +#endif + h264_first_pts_ready = 0; + h264_first_valid_pts_ready = false; + h264pts1 = 0; + h264pts2 = 0; + h264_pts_count = 0; + duration_from_pts_done = 0; + vh264_error_count = READ_VREG(AV_SCRATCH_D); + + p_last_vf = NULL; + check_pts_discontinue = false; + last_pts = 0; + wait_buffer_counter = 0; + vh264_no_disp_count = 0; + fatal_error_flag = 0; + high_bandwidth = 0; + vh264_stream_switching_state = SWITCHING_STATE_OFF; +#ifdef DEBUG_PTS + pts_missed = 0; + pts_hit = 0; +#endif + pts_discontinue = false; + no_idr_error_count = 0; + + vh264_reset_userdata_fifo(vdec_h264, 1); + h264_reset_qos_mgr(); + + if (enable_switch_fense) { + for (i = 0; i < ARRAY_SIZE(fense_buffer_spec); i++) { + struct buffer_spec_s *s = &fense_buffer_spec[i]; + s->alloc_count = 3 * SZ_1M / PAGE_SIZE; + ret = decoder_bmmu_box_alloc_buf_phy(mm_blk_handle, + FENSE_BUFFER_IDX(i), + 3 * SZ_1M, DRIVER_NAME, &s->phy_addr); + + if (ret < 0) { + fatal_error_flag = + DECODER_FATAL_ERROR_NO_MEM; + vh264_running = 0; + return ret; + } + s->y_canvas_index = 2 * i; + s->u_canvas_index = 2 * i + 1; + s->v_canvas_index = 2 * i + 1; + } + } + return 0; +} + +static s32 vh264_init(void) +{ + int ret = 0; + int trickmode_fffb = 0; + int firmwareloaded = 0; + + /* pr_info("\nvh264_init\n"); */ + timer_setup(&recycle_timer, vh264_put_timer_func, 0); + + stat |= STAT_TIMER_INIT; + + vh264_running = 0;/* init here to reset last_mb_width&last_mb_height */ + vh264_eos = 0; + duration_on_correcting = 0; + first_pts = 0; + first_pts64 = 0; + first_offset = 0; + first_pts_cached = false; + fixed_frame_rate_check_count = 0; + fr_hint_status = VDEC_NO_NEED_HINT; + saved_resolution = 0; + iponly_early_mode = 0; + saved_idc_level = 0; + + frame_count = 0; + memset(&s_vframe_qos, 0, sizeof(s_vframe_qos)); + /*init vdec status*/ + ret = vh264_vdec_info_init(); + if (0 != ret) + return -ret; + + ret = vh264_local_init(); + if (ret < 0) + return ret; + query_video_status(0, &trickmode_fffb); + + amvdec_enable(); + if (!firmwareloaded && tee_enabled()) { + ret = amvdec_loadmc_ex(VFORMAT_H264, NULL, NULL); + if (ret < 0) { + amvdec_disable(); + pr_err("H264: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return ret; + } + } else { + /* -- ucode loading (amrisc and swap code) */ + mc_cpu_addr = + dma_alloc_coherent(amports_get_dma_device(), MC_TOTAL_SIZE, + &mc_dma_handle, GFP_KERNEL); + if (!mc_cpu_addr) { + amvdec_disable(); + del_timer_sync(&recycle_timer); + pr_err("vh264_init: Can not allocate mc memory.\n"); + return -ENOMEM; + } + + pr_debug("264 ucode swap area: phyaddr %p, cpu vaddr %p\n", + (void *)mc_dma_handle, mc_cpu_addr); + if (debugfirmware) { + int r0, r1, r2, r3, r4, r5; + char firmwarename[32]; + + pr_debug("start load debug %d firmware ...\n", debugfirmware); + + snprintf(firmwarename, 32, "%s%d", "vh264_mc", debugfirmware); + r0 = amvdec_loadmc_ex(VFORMAT_H264, firmwarename, NULL); + +#define DEBUGGET_FW(t, name, buf, size, ret)\ + do {\ + snprintf(firmwarename, 32, "%s%d", name,\ + debugfirmware);\ + ret = get_decoder_firmware_data(t,\ + firmwarename, buf, size);\ + } while (0) + /*memcpy((u8 *) mc_cpu_addr + MC_OFFSET_HEADER, vh264_header_mc, + *MC_SWAP_SIZE); + */ + DEBUGGET_FW(VFORMAT_H264, "vh264_header_mc", + (u8 *) mc_cpu_addr + MC_OFFSET_HEADER, + MC_SWAP_SIZE, r1); + + /*memcpy((u8 *) mc_cpu_addr + MC_OFFSET_DATA, vh264_data_mc, + *MC_SWAP_SIZE); + */ + DEBUGGET_FW(VFORMAT_H264, "vh264_data_mc", + (u8 *) mc_cpu_addr + MC_OFFSET_DATA, MC_SWAP_SIZE, r2); + /*memcpy((u8 *) mc_cpu_addr + MC_OFFSET_MMCO, vh264_mmco_mc, + *MC_SWAP_SIZE); + */ + DEBUGGET_FW(VFORMAT_H264, "vh264_mmco_mc", + (u8 *) mc_cpu_addr + MC_OFFSET_MMCO, MC_SWAP_SIZE, r3); + /*memcpy((u8 *) mc_cpu_addr + MC_OFFSET_LIST, vh264_list_mc, + *MC_SWAP_SIZE); + */ + DEBUGGET_FW(VFORMAT_H264, "vh264_list_mc", + (u8 *) mc_cpu_addr + MC_OFFSET_LIST, MC_SWAP_SIZE, r4); + /*memcpy((u8 *) mc_cpu_addr + MC_OFFSET_SLICE, vh264_slice_mc, + *MC_SWAP_SIZE); + */ + DEBUGGET_FW(VFORMAT_H264, "vh264_slice_mc", + (u8 *) mc_cpu_addr + MC_OFFSET_SLICE, MC_SWAP_SIZE, r5); + + if (r0 < 0 || r1 < 0 || r2 < 0 || r3 < 0 || r4 < 0 || r5 < 0) { + pr_err("264 load debugfirmware err %d,%d,%d,%d,%d,%d\n", + r0, r1, r2, r3, r4, r5); + amvdec_disable(); + if (mc_cpu_addr) { + dma_free_coherent(amports_get_dma_device(), + MC_TOTAL_SIZE, mc_cpu_addr, + mc_dma_handle); + mc_cpu_addr = NULL; + } + return -EBUSY; + } + firmwareloaded = 1; + } else { + int ret = -1; + char *buf = vmalloc(0x1000 * 16); + + if (IS_ERR_OR_NULL(buf)) + return -ENOMEM; + + if (get_firmware_data(VIDEO_DEC_H264, buf) < 0) { + pr_err("get firmware fail."); + vfree(buf); + return -1; + } + + ret = amvdec_loadmc_ex(VFORMAT_H264, NULL, buf); + memcpy((u8 *) mc_cpu_addr + MC_OFFSET_HEADER, + buf + 0x4000, MC_SWAP_SIZE); + memcpy((u8 *) mc_cpu_addr + MC_OFFSET_DATA, + buf + 0x2000, MC_SWAP_SIZE); + memcpy((u8 *) mc_cpu_addr + MC_OFFSET_MMCO, + buf + 0x6000, MC_SWAP_SIZE); + memcpy((u8 *) mc_cpu_addr + MC_OFFSET_LIST, + buf + 0x3000, MC_SWAP_SIZE); + memcpy((u8 *) mc_cpu_addr + MC_OFFSET_SLICE, + buf + 0x5000, MC_SWAP_SIZE); + + vfree(buf); + + if (ret < 0) { + amvdec_disable(); + if (mc_cpu_addr) { + dma_free_coherent(amports_get_dma_device(), + MC_TOTAL_SIZE, mc_cpu_addr, + mc_dma_handle); + mc_cpu_addr = NULL; + } + pr_err("H264: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + } + } + + stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + vh264_prot_init(); + +#ifdef HANDLE_H264_IRQ + /*TODO irq */ + + if (vdec_request_irq(VDEC_IRQ_1, vh264_isr, + "vh264-irq", (void *)vh264_dec_id)) { + pr_err("vh264 irq register error.\n"); + amvdec_disable(); + return -ENOENT; + } +#endif + + stat |= STAT_ISR_REG; + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_provider_init(&vh264_vf_prov, PROVIDER_NAME, &vh264_vf_provider_ops, + NULL); + vf_reg_provider(&vh264_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); +#else + vf_provider_init(&vh264_vf_prov, PROVIDER_NAME, &vh264_vf_provider_ops, + NULL); + vf_reg_provider(&vh264_vf_prov); +#endif + + if (frame_dur != 0) { + if (!is_reset) { + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long)frame_dur)); + fr_hint_status = VDEC_HINTED; + } + } else + fr_hint_status = VDEC_NEED_HINT; + + stat |= STAT_VF_HOOK; + + recycle_timer.expires = jiffies + PUT_INTERVAL; + add_timer(&recycle_timer); + + stat |= STAT_TIMER_ARM; + + vh264_stream_switching_state = SWITCHING_STATE_OFF; + + stat |= STAT_VDEC_RUN; + wmb(); /* Ensure fetchbuf contents visible */ + + /* -- start decoder */ + amvdec_start(); + + init_userdata_fifo(); + + return 0; +} + +static int vh264_stop(int mode) +{ + + + if (stat & STAT_VDEC_RUN) { + amvdec_stop(); + stat &= ~STAT_VDEC_RUN; + } + + if (stat & STAT_ISR_REG) { + WRITE_VREG(ASSIST_MBOX1_MASK, 0); + /*TODO irq */ + + vdec_free_irq(VDEC_IRQ_1, (void *)vh264_dec_id); + + stat &= ~STAT_ISR_REG; + } + + if (stat & STAT_TIMER_ARM) { + del_timer_sync(&recycle_timer); + stat &= ~STAT_TIMER_ARM; + } + + if (stat & STAT_VF_HOOK) { + if (mode == MODE_FULL) { + if (fr_hint_status == VDEC_HINTED) + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + fr_hint_status = VDEC_NO_NEED_HINT; + } + + vf_unreg_provider(&vh264_vf_prov); + stat &= ~STAT_VF_HOOK; + } + + if (stat & STAT_MC_LOAD) { + if (mc_cpu_addr != NULL) { + dma_free_coherent(amports_get_dma_device(), + MC_TOTAL_SIZE, mc_cpu_addr, + mc_dma_handle); + mc_cpu_addr = NULL; + } + } + if (sei_data_buffer != NULL) { + dma_free_coherent( + amports_get_dma_device(), + USER_DATA_RUND_SIZE, + sei_data_buffer, + sei_data_buffer_phys); + sei_data_buffer = NULL; + sei_data_buffer_phys = 0; + } + amvdec_disable(); + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + memset(&fense_buffer_spec, 0, sizeof(fense_buffer_spec)); + memset(&buffer_spec, 0, sizeof(buffer_spec)); + return 0; +} + +static void wait_vh264_search_done(void) +{ + u32 vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + int count = 0; + do { + usleep_range(100, 500); + if (vld_rp == READ_VREG(VLD_MEM_VIFIFO_RP)) + break; + if (count > 2000) { + pr_info("%s, timeout count %d vld_rp 0x%x VLD_MEM_VIFIFO_RP 0x%x\n", + __func__, count, vld_rp, READ_VREG(VLD_MEM_VIFIFO_RP)); + break; + } else + vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + count++; + } while (1); +} + + +static void error_do_work(struct work_struct *work) +{ + + /* + * we need to lock vh264_stop/vh264_init. + * because we will call amvdec_h264_remove on this step; + * then we may call more than once on + * free_irq/deltimer/..and some other. + */ + if (atomic_read(&vh264_active)) { + amvdec_stop(); + do { + msleep(50); + } while (vh264_stream_switching_state != SWITCHING_STATE_OFF); + wait_vh264_search_done(); + vh264_reset = 1; +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vh264_ppmgr_reset(); +#else + vf_light_unreg_provider(&vh264_vf_prov); + + vh264_local_init(); + + vf_reg_provider(&vh264_vf_prov); +#endif + vh264_prot_init(); + amvdec_start(); + vh264_reset = 0; + } +} + +static void stream_switching_done(void) +{ + int state = vh264_stream_switching_state; + + WRITE_VREG(AV_SCRATCH_7, 0); + WRITE_VREG(AV_SCRATCH_8, 0); + WRITE_VREG(AV_SCRATCH_9, 0); + + if (state == SWITCHING_STATE_ON_CMD1) { + pr_info("Enter set parameter cmd1 switching_state %x.\n", + vh264_stream_switching_state); + schedule_work(&set_parameter_work); + return; + } else if (state == SWITCHING_STATE_ON_CMD1_PENDING) + return; + + vh264_stream_switching_state = SWITCHING_STATE_OFF; + + wmb(); /* Ensure fetchbuf contents visible */ + + if (state == SWITCHING_STATE_ON_CMD3) + WRITE_VREG(AV_SCRATCH_0, 0); + + pr_info("Leaving switching mode.\n"); +} + +/* construt a new frame as a copy of last frame so frame receiver can + * release all buffer resources to decoder. + */ +static void stream_switching_do(struct work_struct *work) +{ + int mb_total_num, mb_width_num, mb_height_num, i = 0; + struct vframe_s *vf = NULL; + u32 y_index, u_index, src_index, des_index, y_desindex, u_desindex; + struct canvas_s csy, csu, cyd; + unsigned long flags; + bool delay = true; + + if (!atomic_read(&vh264_active)) + return; + + if (vh264_stream_switching_state == SWITCHING_STATE_OFF) + return; + + spin_lock_irqsave(&prepare_lock, flags); + + block_display_q = true; + + spin_unlock_irqrestore(&prepare_lock, flags); + + mb_total_num = mb_total; + mb_width_num = mb_width; + mb_height_num = mb_height; + + while (is_4k || kfifo_len(&delay_display_q) > 2) { + if (kfifo_get(&delay_display_q, &vf)) { + kfifo_put(&display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } else + break; + } + + if (!kfifo_get(&delay_display_q, &vf)) { + vf = p_last_vf; + delay = false; + } + + while (vf) { + int buffer_index; + + buffer_index = vf->index & 0xff; + + /* construct a clone of the frame from last frame */ + +#if 0 + + pr_info("src yaddr[0x%x] index[%d] width[%d] heigth[%d]\n", + buffer_spec[buffer_index].y_addr, + buffer_spec[buffer_index].y_canvas_index, + buffer_spec[buffer_index].y_canvas_width, + buffer_spec[buffer_index].y_canvas_height); + + pr_info("src uaddr[0x%x] index[%d] width[%d] heigth[%d]\n", + buffer_spec[buffer_index].u_addr, + buffer_spec[buffer_index].u_canvas_index, + buffer_spec[buffer_index].u_canvas_width, + buffer_spec[buffer_index].u_canvas_height); +#endif + if (EN_SWITCH_FENCE()) { + y_index = buffer_spec[buffer_index].y_canvas_index; + u_index = buffer_spec[buffer_index].u_canvas_index; + + canvas_read(y_index, &csy); + canvas_read(u_index, &csu); + + config_cav_lut_ex(fense_buffer_spec[i].y_canvas_index, + fense_buffer_spec[i].phy_addr, + mb_width_num << 4, mb_height_num << 4, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, 0, VDEC_1); + config_cav_lut_ex(fense_buffer_spec[i].u_canvas_index, + fense_buffer_spec[i].phy_addr + + (mb_total_num << 8), + mb_width_num << 4, mb_height_num << 3, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, 0, VDEC_1); + + y_desindex = fense_buffer_spec[i].y_canvas_index; + u_desindex = fense_buffer_spec[i].u_canvas_index; + + canvas_read(y_desindex, &cyd); + + src_index = ((y_index & 0xff) | + ((u_index << 8) & 0x0000ff00)); + des_index = ((y_desindex & 0xff) | + ((u_desindex << 8) & 0x0000ff00)); + + ge2d_canvas_dup(&csy, &csu, &cyd, + GE2D_FORMAT_M24_NV21, + src_index, + des_index); + } + vf->mem_handle = decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + FENSE_BUFFER_IDX(i)); + fense_vf[i] = *vf; + fense_vf[i].index = -1; + + if (EN_SWITCH_FENCE()) + fense_vf[i].canvas0Addr = + spec2canvas(&fense_buffer_spec[i]); + else + fense_vf[i].flag |= VFRAME_FLAG_SWITCHING_FENSE; + + /* send clone to receiver */ + kfifo_put(&display_q, + (const struct vframe_s *)&fense_vf[i]); + ATRACE_COUNTER(MODULE_NAME, fense_vf[i].pts); + /* early recycle frames for last session */ + if (delay) + vh264_vf_put(vf, NULL); + + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + + i++; + + if (!kfifo_get(&delay_display_q, &vf)) + break; + } + + block_display_q = false; + + pr_info("Switching fense frame post\n"); +} + +static int amvdec_h264_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + + mutex_lock(&vh264_mutex); + + if (pdata == NULL) { + pr_info("\namvdec_h264 memory resource undefined.\n"); + mutex_unlock(&vh264_mutex); + return -EFAULT; + } + canvas_mode = pdata->canvas_mode; + tvp_flag = vdec_secure(pdata) ? CODEC_MM_FLAGS_TVP : 0; + if (pdata->sys_info) + vh264_amstream_dec_info = *pdata->sys_info; + if (sei_data_buffer == NULL) { + sei_data_buffer = + dma_alloc_coherent(amports_get_dma_device(), + USER_DATA_RUND_SIZE, + &sei_data_buffer_phys, GFP_KERNEL); + if (!sei_data_buffer) { + pr_info("%s: Can not allocate sei_data_buffer\n", + __func__); + mutex_unlock(&vh264_mutex); + return -ENOMEM; + } + /* pr_info("buffer 0x%x, phys 0x%x, remap 0x%x\n", + * sei_data_buffer, sei_data_buffer_phys, + * (u32)sei_data_buffer_remap); + */ + } + pdata->dec_status = vh264_dec_status; + pdata->set_trickmode = vh264_set_trickmode; + pdata->set_isreset = vh264_set_isreset; + + pdata->user_data_read = vh264_user_data_read; + pdata->reset_userdata_fifo = vh264_reset_userdata_fifo; + pdata->wakeup_userdata_poll = vh264_wakeup_userdata_poll; + + is_reset = 0; + clk_adj_frame_count = 0; + if (vh264_init() < 0) { + pr_info("\namvdec_h264 init failed.\n"); + kfree(gvs); + gvs = NULL; + pdata->dec_status = NULL; + mutex_unlock(&vh264_mutex); + return -ENODEV; + } + vdec_h264 = pdata; + vh264_crate_userdata_manager(sei_data_buffer, USER_DATA_SIZE); + vh264_reset_userdata_fifo(vdec_h264, 1); + +#ifdef DUMP_USER_DATA + vh264_init_userdata_dump(); + vh264_reset_user_data_buf(); +#endif + + INIT_WORK(&error_wd_work, error_do_work); + INIT_WORK(&stream_switching_work, stream_switching_do); + INIT_WORK(&set_parameter_work, vh264_set_params); + INIT_WORK(¬ify_work, vh264_notify_work); + INIT_WORK(&set_clk_work, vh264_set_clk); + INIT_WORK(&userdata_push_work, userdata_push_do_work); + INIT_WORK(&qos_work, qos_do_work); + + atomic_set(&vh264_active, 1); + + mutex_unlock(&vh264_mutex); + vdec_set_vframe_comm(pdata, DRIVER_NAME); + + return 0; +} + +static int amvdec_h264_remove(struct platform_device *pdev) +{ + atomic_set(&vh264_active, 0); + cancel_work_sync(&set_parameter_work); + cancel_work_sync(&error_wd_work); + cancel_work_sync(&stream_switching_work); + cancel_work_sync(¬ify_work); + cancel_work_sync(&userdata_push_work); + cancel_work_sync(&qos_work); + + + vh264_stop(MODE_FULL); + wait_vh264_search_done(); + vdec_source_changed(VFORMAT_H264, 0, 0, 0); +#ifdef DUMP_USER_DATA + vh264_dump_userdata(); +#endif + vh264_destroy_userdata_manager(); + atomic_set(&vh264_active, 0); +#ifdef DEBUG_PTS + pr_info + ("pts missed %ld, pts hit %ld, pts_outside %d, duration %d, ", + pts_missed, pts_hit, pts_outside, frame_dur); + pr_info("sync_outside %d, use_idr_framerate %d\n", + sync_outside, use_idr_framerate); +#endif + kfree(gvs); + gvs = NULL; + cancel_work_sync(&set_clk_work); + mutex_unlock(&vh264_mutex); + return 0; +} + +/****************************************/ + +static struct platform_driver amvdec_h264_driver = { + .probe = amvdec_h264_probe, + .remove = amvdec_h264_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t amvdec_h264_profile = { + .name = "h264", + .profile = "" +}; + + +static struct mconfig h264_configs[] = { + MC_PU32("stat", &stat), + MC_PU32("error_recovery_mode", &error_recovery_mode), + MC_PU32("sync_outside", &sync_outside), + MC_PU32("dec_control", &dec_control), + MC_PU32("fatal_error_reset", &fatal_error_reset), + MC_PU32("max_refer_buf", &max_refer_buf), + MC_PU32("ucode_type", &ucode_type), + MC_PU32("debugfirmware", &debugfirmware), + MC_PU32("fixed_frame_rate_flag", &fixed_frame_rate_flag), + MC_PU32("decoder_debug_flag", &decoder_debug_flag), + MC_PU32("dpb_size_adj", &dpb_size_adj), + MC_PU32("decoder_force_reset", &decoder_force_reset), + MC_PU32("no_idr_error_max", &no_idr_error_max), + MC_PU32("enable_switch_fense", &enable_switch_fense), +}; +static struct mconfig_node h264_node; + + +static int __init amvdec_h264_driver_init_module(void) +{ + pr_debug("amvdec_h264 module init\n"); + + ge2d_videoh264task_init(); + + if (platform_driver_register(&amvdec_h264_driver)) { + pr_err("failed to register amvdec_h264 driver\n"); + return -ENODEV; + } + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXTVBB + && (codec_mm_get_total_size() > 80 * SZ_1M) && + get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D) { + amvdec_h264_profile.profile = "4k"; + } + vcodec_profile_register(&amvdec_h264_profile); + INIT_REG_NODE_CONFIGS("media.decoder", &h264_node, + "h264", h264_configs, CONFIG_FOR_RW); + return 0; +} + +static void __exit amvdec_h264_driver_remove_module(void) +{ + pr_debug("amvdec_h264 module remove.\n"); + + platform_driver_unregister(&amvdec_h264_driver); + + ge2d_videoh264task_release(); +} + +/****************************************/ + +module_param(stat, uint, 0664); +MODULE_PARM_DESC(stat, "\n amvdec_h264 stat\n"); +module_param(error_recovery_mode, uint, 0664); +MODULE_PARM_DESC(error_recovery_mode, "\n amvdec_h264 error_recovery_mode\n"); +module_param(sync_outside, uint, 0664); +MODULE_PARM_DESC(sync_outside, "\n amvdec_h264 sync_outside\n"); +module_param(dec_control, uint, 0664); +MODULE_PARM_DESC(dec_control, "\n amvdec_h264 decoder control\n"); +module_param(frame_count, uint, 0664); +MODULE_PARM_DESC(frame_count, + "\n amvdec_h264 decoded total count\n"); +module_param(fatal_error_reset, uint, 0664); +MODULE_PARM_DESC(fatal_error_reset, + "\n amvdec_h264 decoder reset when fatal error happens\n"); +module_param(max_refer_buf, uint, 0664); +MODULE_PARM_DESC(max_refer_buf, + "\n amvdec_h264 dec buffering or not for reference frame\n"); +module_param(ucode_type, uint, 0664); +MODULE_PARM_DESC(ucode_type, + "\n amvdec_h264 dec buffering or not for reference frame\n"); +module_param(debugfirmware, uint, 0664); +MODULE_PARM_DESC(debugfirmware, "\n amvdec_h264 debug load firmware\n"); +module_param(fixed_frame_rate_flag, uint, 0664); +MODULE_PARM_DESC(fixed_frame_rate_flag, + "\n amvdec_h264 fixed_frame_rate_flag\n"); +module_param(decoder_debug_flag, uint, 0664); +MODULE_PARM_DESC(decoder_debug_flag, + "\n amvdec_h264 decoder_debug_flag\n"); + +module_param(dpb_size_adj, uint, 0664); +MODULE_PARM_DESC(dpb_size_adj, + "\n amvdec_h264 dpb_size_adj\n"); + + +module_param(decoder_force_reset, uint, 0664); +MODULE_PARM_DESC(decoder_force_reset, + "\n amvdec_h264 decoder force reset\n"); +module_param(no_idr_error_max, uint, 0664); +MODULE_PARM_DESC(no_idr_error_max, + "\n print no_idr_error_max\n"); +module_param(enable_switch_fense, uint, 0664); +MODULE_PARM_DESC(enable_switch_fense, + "\n enable switch fense\n"); + +#ifdef SUPPORT_BAD_MACRO_BLOCK_REDUNDANCY +module_param(bad_block_scale, uint, 0664); +MODULE_PARM_DESC(bad_block_scale, + "\n print bad_block_scale\n"); +#endif + +module_param(enable_userdata_debug, uint, 0664); +MODULE_PARM_DESC(enable_userdata_debug, + "\n enable_userdata_debug\n"); + + +module_init(amvdec_h264_driver_init_module); +module_exit(amvdec_h264_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC H264 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Chen Zhang <chen.zhang@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/h264/vh264.h b/drivers/frame_provider/decoder/h264/vh264.h new file mode 100644 index 0000000..6c8e4ad --- /dev/null +++ b/drivers/frame_provider/decoder/h264/vh264.h
@@ -0,0 +1,27 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/h264/vh264.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VH264_H +#define VH264_H + +extern int query_video_status(int type, int *value); + +/* extern s32 vh264_init(void); */ + +extern s32 vh264_release(void); + +#endif /* VMPEG4_H */
diff --git a/drivers/frame_provider/decoder/h264/vh264_mvc.c b/drivers/frame_provider/decoder/h264/vh264_mvc.c new file mode 100644 index 0000000..4827e38 --- /dev/null +++ b/drivers/frame_provider/decoder/h264/vh264_mvc.c
@@ -0,0 +1,1918 @@ +/* + * drivers/amlogic/amports/vh264mvc.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/workqueue.h> +#include <linux/dma-mapping.h> +#include <linux/atomic.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "../utils/vdec.h" +#include "../utils/amvdec.h" +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/firmware.h" +#include "../utils/config_parser.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" + +#define TIME_TASK_PRINT_ENABLE 0x100 +#define PUT_PRINT_ENABLE 0x200 + +#define DRIVER_NAME "amvdec_h264mvc" +#define MODULE_NAME "amvdec_h264mvc" + +#define HANDLE_h264mvc_IRQ + +#define DEBUG_PTS +#define DEBUG_SKIP + +#define PUT_INTERVAL (HZ/100) + +#define STAT_TIMER_INIT 0x01 +#define STAT_MC_LOAD 0x02 +#define STAT_ISR_REG 0x04 +#define STAT_VF_HOOK 0x08 +#define STAT_TIMER_ARM 0x10 +#define STAT_VDEC_RUN 0x20 + +#define DROPPING_THREAD_HOLD 4 +#define DROPPING_FIRST_WAIT 16 +#define DISPLAY_INVALID_POS -65536 + +#define INIT_DROP_FRAME_CNT 8 + +static int vh264mvc_vf_states(struct vframe_states *states, void *); +static struct vframe_s *vh264mvc_vf_peek(void *); +static struct vframe_s *vh264mvc_vf_get(void *); +static void vh264mvc_vf_put(struct vframe_s *, void *); +static int vh264mvc_event_cb(int type, void *data, void *private_data); + +static void vh264mvc_prot_init(void); +static int vh264mvc_local_init(void); +static void vh264mvc_put_timer_func(struct timer_list *timer); + +static const char vh264mvc_dec_id[] = "vh264mvc-dev"; + +#define PROVIDER_NAME "decoder.h264mvc" + +static struct vdec_info *gvs; +static struct work_struct alloc_work; +static struct work_struct set_clk_work; + +static DEFINE_MUTEX(vh264_mvc_mutex); + +static const struct vframe_operations_s vh264mvc_vf_provider = { + .peek = vh264mvc_vf_peek, + .get = vh264mvc_vf_get, + .put = vh264mvc_vf_put, + .event_cb = vh264mvc_event_cb, + .vf_states = vh264mvc_vf_states, +}; + +static struct vframe_provider_s vh264mvc_vf_prov; + +static struct vdec_s *vdec = NULL; +static u32 frame_width, frame_height, frame_dur; +static u32 saved_resolution; +static struct timer_list recycle_timer; +static u32 stat; +static u32 pts_outside; +static u32 sync_outside; +static u32 vh264mvc_ratio; +static u32 h264mvc_ar; +static u32 no_dropping_cnt; +static s32 init_drop_cnt; +spinlock_t mvc_rp_lock; + +#ifdef DEBUG_SKIP +static unsigned long view_total, view_dropped; +#endif + +#ifdef DEBUG_PTS +static unsigned long pts_missed, pts_hit; +#endif + +static atomic_t vh264mvc_active = ATOMIC_INIT(0); +static struct work_struct error_wd_work; + +static struct dec_sysinfo vh264mvc_amstream_dec_info; +static dma_addr_t mc_dma_handle; +static void *mc_cpu_addr; + +static DEFINE_SPINLOCK(lock); + +static int vh264mvc_stop(void); +static s32 vh264mvc_init(void); + +/*************************** + * new + ************************** + */ + +/* bit[3:0] command : */ +/* 0 - command finished */ +/* (DATA0 - {level_idc_mmco, max_reference_frame_num, width, height} */ +/* 1 - alloc view_0 display_buffer and reference_data_area */ +/* 2 - alloc view_1 display_buffer and reference_data_area */ +#define MAILBOX_COMMAND AV_SCRATCH_0 +#define MAILBOX_DATA_0 AV_SCRATCH_1 +#define MAILBOX_DATA_1 AV_SCRATCH_2 +#define MAILBOX_DATA_2 AV_SCRATCH_3 +#define CANVAS_START AV_SCRATCH_6 +#define BUFFER_RECYCLE AV_SCRATCH_7 +#define DROP_CONTROL AV_SCRATCH_8 +#define PICTURE_COUNT AV_SCRATCH_9 +#define DECODE_STATUS AV_SCRATCH_A +#define SPS_STATUS AV_SCRATCH_B +#define PPS_STATUS AV_SCRATCH_C +#define SIM_RESERV_D AV_SCRATCH_D +#define WORKSPACE_START AV_SCRATCH_E +#define SIM_RESERV_F AV_SCRATCH_F +#define DECODE_ERROR_CNT AV_SCRATCH_G +#define CURRENT_UCODE AV_SCRATCH_H +#define CURRENT_SPS_PPS AV_SCRATCH_I/* bit[15:9]-SPS, bit[8:0]-PPS */ +#define DECODE_SKIP_PICTURE AV_SCRATCH_J +#define UCODE_START_ADDR AV_SCRATCH_K +#define SIM_RESERV_L AV_SCRATCH_L +#define REF_START_VIEW_0 AV_SCRATCH_M +#define REF_START_VIEW_1 AV_SCRATCH_N + +/******************************************** + * Mailbox command + ********************************************/ +#define CMD_FINISHED 0 +#define CMD_ALLOC_VIEW_0 1 +#define CMD_ALLOC_VIEW_1 2 +#define CMD_FRAME_DISPLAY 3 +#define CMD_FATAL_ERROR 4 + +#define CANVAS_INDEX_START 0x78 +/* /AMVDEC_H264MVC_CANVAS_INDEX */ + +#define MC_TOTAL_SIZE (28*SZ_1K) +#define MC_SWAP_SIZE (4*SZ_1K) + +unsigned int DECODE_BUFFER_START = 0x00200000; +unsigned int DECODE_BUFFER_END = 0x05000000; + +/* #define DISPLAY_BUFFER_NUM 4 */ +static unsigned int dynamic_buf_num_margin = 8; + +#define DECODE_BUFFER_NUM_MAX 16 +#define MAX_BMMU_BUFFER_NUM (DECODE_BUFFER_NUM_MAX + dynamic_buf_num_margin) +#define TOTAL_BMMU_BUFF_NUM (MAX_BMMU_BUFFER_NUM * 2 + 3) +#define VF_BUFFER_IDX(n) (2 + n) + +#define DECODER_WORK_SPACE_SIZE 0xa0000 + + +static unsigned int ANC_CANVAS_ADDR; +static unsigned int index; +static unsigned long ref_start_addr[2]; +static unsigned int max_dec_frame_buffering[2]; +static unsigned int total_dec_frame_buffering[2]; + +static unsigned int dpb_size, ref_size; + +static int display_buff_id; +static int display_view_id; +static int display_POC; +static int stream_offset; + +#define video_domain_addr(adr) (adr&0x7fffffff) +static unsigned long work_space_adr; + +struct buffer_spec_s { + unsigned int y_addr; + unsigned int u_addr; + unsigned int v_addr; + + int y_canvas_index; + int u_canvas_index; + int v_canvas_index; + + struct page *alloc_pages; + unsigned long phy_addr; + int alloc_count; +}; +/* +static struct buffer_spec_s buffer_spec0[MAX_BMMU_BUFFER_NUM]; +static struct buffer_spec_s buffer_spec1[MAX_BMMU_BUFFER_NUM]; +*/ +static struct buffer_spec_s *buffer_spec0; +static struct buffer_spec_s *buffer_spec1; +static void *mm_blk_handle; + +/* + * dbg_mode: + * bit 0: 1, print debug information + * bit 4: 1, recycle buffer without displaying; + * bit 5: 1, buffer single frame step , set dbg_cmd to 1 to step + * + */ +static int dbg_mode; +static int dbg_cmd; +static int view_mode = + 3; /* 0, left; 1 ,right ; 2, left<->right 3, right<->left */ +static int drop_rate = 2; +static int drop_thread_hold; +/**/ + +struct mvc_buf_s { + struct list_head list; + struct vframe_s vframe; + int display_POC; + int view0_buff_id; + int view1_buff_id; + int view0_drop; + int view1_drop; + int stream_offset; + unsigned int pts; +} /*mvc_buf_t */; + +#define spec2canvas(x) \ + (((x)->v_canvas_index << 16) | \ + ((x)->u_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + +#define to_mvcbuf(vf) \ + container_of(vf, struct mvc_buf_s, vframe) + +static int vf_buf_init_flag; + +static void init_vf_buf(void) +{ + + vf_buf_init_flag = 1; +} + +static void uninit_vf_buf(void) +{ + +} + +/* #define QUEUE_SUPPORT */ + +struct mvc_info_s { + int view0_buf_id; + int view1_buf_id; + int view0_drop; + int view1_drop; + int display_pos; + int used; + int slot; + unsigned int stream_offset; +}; + +#define VF_POOL_SIZE 20 +static struct vframe_s vfpool[VF_POOL_SIZE]; +static struct mvc_info_s vfpool_idx[VF_POOL_SIZE]; +static s32 view0_vfbuf_use[DECODE_BUFFER_NUM_MAX]; +static s32 view1_vfbuf_use[DECODE_BUFFER_NUM_MAX]; + +static s32 fill_ptr, get_ptr, putting_ptr, put_ptr; +static s32 dirty_frame_num; +static s32 enable_recycle; + +static s32 init_drop_frame_id[INIT_DROP_FRAME_CNT]; +#define INCPTR(p) ptr_atomic_wrap_inc(&p) +static inline void ptr_atomic_wrap_inc(u32 *ptr) +{ + u32 i = *ptr; + + i++; + + if (i >= VF_POOL_SIZE) + i = 0; + + *ptr = i; +} + +static void set_frame_info(struct vframe_s *vf) +{ + unsigned int ar = 0; + + vf->width = frame_width; + vf->height = frame_height; + vf->duration = frame_dur; + vf->duration_pulldown = 0; + + if (vh264mvc_ratio == 0) { + /* always stretch to 16:9 */ + vf->ratio_control |= (0x90 << + DISP_RATIO_ASPECT_RATIO_BIT); + vf->sar_height = 1; + vf->sar_width = 1; + } else { + /* h264mvc_ar = ((float)frame_height/frame_width) + *customer_ratio; + */ + switch (h264mvc_ar) { + case 1: + ar = 0x3ff; + vf->sar_height = 1; + vf->sar_width = 1; + break; + case 2: + ar = 0x3ff; + vf->sar_height = 11; + vf->sar_width = 12; + break; + case 3: + ar = 0x3ff; + vf->sar_height = 11; + vf->sar_width = 10; + break; + case 4: + ar = 0x3ff; + vf->sar_height = 11; + vf->sar_width = 16; + break; + case 5: + ar = 0x3ff; + vf->sar_height = 33; + vf->sar_width = 40; + break; + case 6: + ar = 0x3ff; + vf->sar_height = 11; + vf->sar_width = 24; + break; + case 7: + ar = 0x3ff; + vf->sar_height = 11; + vf->sar_width = 20; + break; + case 8: + ar = 0x3ff; + vf->sar_height = 11; + vf->sar_width = 32; + break; + case 9: + ar = 0x3ff; + vf->sar_height = 33; + vf->sar_width = 80; + break; + case 10: + ar = 0x3ff; + vf->sar_height = 11; + vf->sar_width = 18; + break; + case 11: + ar = 0x3ff; + vf->sar_height = 11; + vf->sar_width = 15; + break; + case 12: + ar = 0x3ff; + vf->sar_height = 33; + vf->sar_width = 64; + break; + case 13: + ar = 0x3ff; + vf->sar_height = 99; + vf->sar_width = 160; + break; + case 14: + ar = 0x3ff; + vf->sar_height = 3; + vf->sar_width = 4; + break; + case 15: + ar = 0x3ff; + vf->sar_height = 2; + vf->sar_width = 3; + break; + case 16: + ar = 0x3ff; + vf->sar_height = 1; + vf->sar_width = 2; + break; + default: + ar = 0x3ff; + vf->sar_height = 1; + vf->sar_width = 1; + break; + } + } + ar = min_t(u32, ar, DISP_RATIO_ASPECT_RATIO_MAX); + + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); +} + +static int vh264mvc_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&lock, flags); + states->vf_pool_size = VF_POOL_SIZE; + + i = put_ptr - fill_ptr; + if (i < 0) + i += VF_POOL_SIZE; + states->buf_free_num = i; + + i = putting_ptr - put_ptr; + if (i < 0) + i += VF_POOL_SIZE; + states->buf_recycle_num = i; + + i = fill_ptr - get_ptr; + if (i < 0) + i += VF_POOL_SIZE; + states->buf_avail_num = i; + + spin_unlock_irqrestore(&lock, flags); + return 0; +} + +void send_drop_cmd(void) +{ + int ready_cnt = 0; + int temp_get_ptr = get_ptr; + int temp_fill_ptr = fill_ptr; + + while (temp_get_ptr != temp_fill_ptr) { + if ((vfpool_idx[temp_get_ptr].view0_buf_id >= 0) + && (vfpool_idx[temp_get_ptr].view1_buf_id >= 0) + && (vfpool_idx[temp_get_ptr].view0_drop == 0) + && (vfpool_idx[temp_get_ptr].view1_drop == 0)) + ready_cnt++; + INCPTR(temp_get_ptr); + } + if (dbg_mode & 0x40) { + pr_info("ready_cnt is %d ; no_dropping_cnt is %d\n", ready_cnt, + no_dropping_cnt); + } + if ((no_dropping_cnt >= DROPPING_FIRST_WAIT) + && (ready_cnt < drop_thread_hold)) + WRITE_VREG(DROP_CONTROL, (1 << 31) | (drop_rate)); + else + WRITE_VREG(DROP_CONTROL, 0); +} + +#if 0 +int get_valid_frame(void) +{ + int ready_cnt = 0; + int temp_get_ptr = get_ptr; + int temp_fill_ptr = fill_ptr; + + while (temp_get_ptr != temp_fill_ptr) { + if ((vfpool_idx[temp_get_ptr].view0_buf_id >= 0) + && (vfpool_idx[temp_get_ptr].view1_buf_id >= 0) + && (vfpool_idx[temp_get_ptr].view0_drop == 0) + && (vfpool_idx[temp_get_ptr].view1_drop == 0)) + ready_cnt++; + INCPTR(temp_get_ptr); + } + return ready_cnt; +} +#endif +static struct vframe_s *vh264mvc_vf_peek(void *op_arg) +{ + + if (get_ptr == fill_ptr) + return NULL; + send_drop_cmd(); + return &vfpool[get_ptr]; + +} + +static struct vframe_s *vh264mvc_vf_get(void *op_arg) +{ + struct vframe_s *vf; + int view0_buf_id; + int view1_buf_id; + struct buffer_spec_s *buf_spec_0, *buf_spec_1; + + if (get_ptr == fill_ptr) + return NULL; + + view0_buf_id = vfpool_idx[get_ptr].view0_buf_id; + view1_buf_id = vfpool_idx[get_ptr].view1_buf_id; + vf = &vfpool[get_ptr]; + + if ((view0_buf_id >= 0) && (view1_buf_id >= 0)) { + if (view_mode == 0 || view_mode == 1) { + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; + + buf_spec_0 = (view_mode == 0) ? (&buffer_spec0[view0_buf_id]) : + (&buffer_spec1[view1_buf_id]); + vf->canvas0Addr = vf->canvas1Addr = spec2canvas(buf_spec_0); + + if (is_support_vdec_canvas()) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->canvas0_config[0].block_mode = CANVAS_BLKMODE_32X32; + vf->canvas0_config[0].phy_addr = buf_spec_0->y_addr; + vf->canvas0_config[0].width = vdec_cav_get_width(buf_spec_0->y_canvas_index); + vf->canvas0_config[0].height = vdec_cav_get_height(buf_spec_0->y_canvas_index); + vf->canvas0_config[1].block_mode = CANVAS_BLKMODE_32X32; + vf->canvas0_config[1].phy_addr = buf_spec_0->u_addr; + vf->canvas0_config[1].width = vdec_cav_get_width(buf_spec_0->u_canvas_index); + vf->canvas0_config[1].height = vdec_cav_get_height(buf_spec_0->u_canvas_index); + vf->canvas0_config[2].block_mode = CANVAS_BLKMODE_32X32; + vf->canvas0_config[2].phy_addr = buf_spec_0->v_addr; + vf->canvas0_config[2].width = vdec_cav_get_width(buf_spec_0->v_canvas_index); + vf->canvas0_config[2].height = vdec_cav_get_height(buf_spec_0->v_canvas_index); + vf->plane_num = 3; + } + } else { + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_MVC; + + vf->left_eye.start_x = 0; + vf->left_eye.start_y = 0; + vf->left_eye.width = vf->width; + vf->left_eye.height = vf->height; + vf->right_eye.start_x = 0; + vf->right_eye.start_y = 0; + vf->right_eye.width = vf->width; + vf->right_eye.height = vf->height; + //vf->trans_fmt = TVIN_TFMT_3D_TB; + + if (view_mode == 2) { + buf_spec_0 = &buffer_spec1[view1_buf_id]; + buf_spec_1 = &buffer_spec0[view0_buf_id]; + } else { + buf_spec_0 = &buffer_spec0[view0_buf_id]; + buf_spec_1 = &buffer_spec1[view1_buf_id]; + } + if (is_support_vdec_canvas()) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->canvas0_config[0].block_mode = CANVAS_BLKMODE_32X32; + vf->canvas0_config[0].phy_addr = buf_spec_0->y_addr; + vf->canvas0_config[0].width = vdec_cav_get_width(buf_spec_0->y_canvas_index); + vf->canvas0_config[0].height = vdec_cav_get_height(buf_spec_0->y_canvas_index); + vf->canvas0_config[1].block_mode = CANVAS_BLKMODE_32X32; + vf->canvas0_config[1].phy_addr = buf_spec_0->u_addr; + vf->canvas0_config[1].width = vdec_cav_get_width(buf_spec_0->u_canvas_index); + vf->canvas0_config[1].height = vdec_cav_get_height(buf_spec_0->u_canvas_index); + vf->canvas0_config[2].block_mode = CANVAS_BLKMODE_32X32; + vf->canvas0_config[2].phy_addr = buf_spec_0->v_addr; + vf->canvas0_config[2].width = vdec_cav_get_width(buf_spec_0->v_canvas_index); + vf->canvas0_config[2].height = vdec_cav_get_height(buf_spec_0->v_canvas_index); + + vf->canvas1_config[0].block_mode = CANVAS_BLKMODE_32X32; + vf->canvas1_config[0].phy_addr = buf_spec_1->y_addr; + vf->canvas1_config[0].width = vdec_cav_get_width(buf_spec_1->y_canvas_index); + vf->canvas1_config[0].height = vdec_cav_get_height(buf_spec_1->y_canvas_index); + vf->canvas1_config[1].block_mode = CANVAS_BLKMODE_32X32; + vf->canvas1_config[1].phy_addr = buf_spec_1->u_addr; + vf->canvas1_config[1].width = vdec_cav_get_width(buf_spec_1->u_canvas_index); + vf->canvas1_config[1].height = vdec_cav_get_height(buf_spec_1->u_canvas_index); + vf->canvas1_config[2].block_mode = CANVAS_BLKMODE_32X32; + vf->canvas1_config[2].phy_addr = buf_spec_1->v_addr; + vf->canvas1_config[2].width = vdec_cav_get_width(buf_spec_1->v_canvas_index); + vf->canvas1_config[2].height = vdec_cav_get_height(buf_spec_1->v_canvas_index); + vf->plane_num = 3; + } else { + vf->canvas0Addr = spec2canvas(buf_spec_0); + vf->canvas1Addr = spec2canvas(buf_spec_1); + } + } + } + + vf->type_original = vf->type; + if (((vfpool_idx[get_ptr].view0_drop != 0) + || (vfpool_idx[get_ptr].view1_drop != 0)) + && ((no_dropping_cnt >= DROPPING_FIRST_WAIT))) + vf->frame_dirty = 1; + else + vf->frame_dirty = 0; + + INCPTR(get_ptr); + + if (frame_width == 0) + frame_width = vh264mvc_amstream_dec_info.width; + if (frame_height == 0) + frame_height = vh264mvc_amstream_dec_info.height; + + vf->width = frame_width; + vf->height = frame_height; + + if ((no_dropping_cnt < DROPPING_FIRST_WAIT) && (vf->frame_dirty == 0)) + no_dropping_cnt++; + return vf; + +} + +static void vh264mvc_vf_put(struct vframe_s *vf, void *op_arg) +{ + + if (vf_buf_init_flag == 0) + return; + if (vf->frame_dirty) { + + vf->frame_dirty = 0; + dirty_frame_num++; + enable_recycle = 0; + if (dbg_mode & PUT_PRINT_ENABLE) { + pr_info("invalid: dirty_frame_num is !!! %d\n", + dirty_frame_num); + } + } else { + INCPTR(putting_ptr); + while (dirty_frame_num > 0) { + INCPTR(putting_ptr); + dirty_frame_num--; + } + enable_recycle = 1; + if (dbg_mode & PUT_PRINT_ENABLE) { + pr_info("valid: dirty_frame_num is @@@ %d\n", + dirty_frame_num); + } + /* send_drop_cmd(); */ + } + +} + +static int vh264mvc_event_cb(int type, void *data, void *private_data) +{ + if (type & VFRAME_EVENT_RECEIVER_RESET) { + unsigned long flags; + + amvdec_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vh264mvc_vf_prov); +#endif + spin_lock_irqsave(&lock, flags); + vh264mvc_local_init(); + vh264mvc_prot_init(); + spin_unlock_irqrestore(&lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vh264mvc_vf_prov); +#endif + amvdec_start(); + } + return 0; +} + +/**/ +static long init_canvas(int view_index, int refbuf_size, long dpb_size, + int dpb_number, int mb_width, int mb_height, + struct buffer_spec_s *buffer_spec) +{ + + unsigned long addr; + int i, j, bmmu_index; + int mb_total, ret = -1; + /* cav_con canvas; */ + mb_total = mb_width * mb_height; + mutex_lock(&vh264_mvc_mutex); + + for (j = 0; j < (dpb_number + 1); j++) { + int page_count; + if (j == 0) { + if (!view_index) + bmmu_index = 1; + else + bmmu_index = dpb_number + 2; + + ret = decoder_bmmu_box_alloc_buf_phy(mm_blk_handle, + bmmu_index, refbuf_size, DRIVER_NAME, + &ref_start_addr[view_index]); + + if (ret < 0) { + mutex_unlock(&vh264_mvc_mutex); + return ret; + } + + continue; + } + /* canvas buf */ + WRITE_VREG(ANC_CANVAS_ADDR, + index | ((index + 1) << 8) | + ((index + 2) << 16)); + ANC_CANVAS_ADDR++; + + i = j - 1; + if (!view_index) + bmmu_index = VF_BUFFER_IDX(i); + else + bmmu_index = VF_BUFFER_IDX(i) + dpb_number + 1; +#ifdef DOUBLE_WRITE + page_count = PAGE_ALIGN((mb_total << 8) + (mb_total << 7) + + (mb_total << 6) + (mb_total << 5)) / PAGE_SIZE; +#else + page_count = PAGE_ALIGN((mb_total << 8) + + (mb_total << 7)) / PAGE_SIZE; +#endif + + ret = decoder_bmmu_box_alloc_buf_phy(mm_blk_handle, + bmmu_index, page_count << PAGE_SHIFT, + DRIVER_NAME, &buffer_spec[i].phy_addr); + + if (ret < 0) { + buffer_spec[i].alloc_count = 0; + mutex_unlock(&vh264_mvc_mutex); + return ret; + } + + addr = buffer_spec[i].phy_addr; + buffer_spec[i].alloc_count = page_count; + buffer_spec[i].y_addr = addr; + buffer_spec[i].y_canvas_index = index; + config_cav_lut_ex(index, addr, + mb_width << 4, mb_height << 4, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_32X32, 0, VDEC_1); + + addr += mb_total << 8; + index++; + buffer_spec[i].u_addr = addr; + buffer_spec[i].u_canvas_index = index; + config_cav_lut_ex(index, addr, mb_width << 3, mb_height << 3, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_32X32, 0, VDEC_1); + + addr += mb_total << 6; + index++; + buffer_spec[i].v_addr = addr; + buffer_spec[i].v_canvas_index = index; + config_cav_lut_ex(index, addr, mb_width << 3, mb_height << 3, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_32X32, 0, VDEC_1); + + index++; + } + mutex_unlock(&vh264_mvc_mutex); + return 0; +} + +static int get_max_dec_frame_buf_size(int level_idc, + int max_reference_frame_num, int mb_width, + int mb_height) +{ + int pic_size = mb_width * mb_height * 384; + + int size = 0; + + switch (level_idc) { + case 9: + size = 152064; + break; + case 10: + size = 152064; + break; + case 11: + size = 345600; + break; + case 12: + size = 912384; + break; + case 13: + size = 912384; + break; + case 20: + size = 912384; + break; + case 21: + size = 1824768; + break; + case 22: + size = 3110400; + break; + case 30: + size = 3110400; + break; + case 31: + size = 6912000; + break; + case 32: + size = 7864320; + break; + case 40: + size = 12582912; + break; + case 41: + size = 12582912; + break; + case 42: + size = 13369344; + break; + case 50: + size = 42393600; + break; + case 51: + size = 70778880; + break; + default: + break; + } + + size /= pic_size; + size = size + 1; /* For MVC need onr more buffer */ + if (max_reference_frame_num > size) + size = max_reference_frame_num; + if (size > DECODE_BUFFER_NUM_MAX) + size = DECODE_BUFFER_NUM_MAX; + + return size; +} + +int check_in_list(int pos, int *slot) +{ + int i; + int ret = 0; + + for (i = 0; i < VF_POOL_SIZE; i++) { + if ((vfpool_idx[i].display_pos == pos) + && (vfpool_idx[i].used == 0)) { + ret = 1; + *slot = vfpool_idx[i].slot; + break; + } + } + return ret; +} + +static void do_alloc_work(struct work_struct *work) +{ + int level_idc, max_reference_frame_num, mb_width, mb_height; + int refbuf_size; + int ret = READ_VREG(MAILBOX_COMMAND); + + switch (ret & 0xff) { + case CMD_ALLOC_VIEW_0: + if (dbg_mode & 0x1) { + pr_info + ("Start H264 display buffer for view 0\n"); + } + + ret = READ_VREG(MAILBOX_DATA_0); + level_idc = (ret >> 24) & 0xff; + max_reference_frame_num = (ret >> 16) & 0xff; + mb_width = (ret >> 8) & 0xff; + mb_height = (ret >> 0) & 0xff; + max_dec_frame_buffering[0] = + get_max_dec_frame_buf_size(level_idc, + max_reference_frame_num, + mb_width, mb_height); + + total_dec_frame_buffering[0] = + max_dec_frame_buffering[0] + dynamic_buf_num_margin; + + mb_width = (mb_width + 3) & 0xfffffffc; + mb_height = (mb_height + 3) & 0xfffffffc; + + dpb_size = mb_width * mb_height * 384; + ref_size = mb_width * mb_height * 96; + + if (dbg_mode & 0x1) { + pr_info("dpb_size: 0x%x\n", dpb_size); + pr_info("ref_size: 0x%x\n", ref_size); + pr_info("total_dec_frame_buffering[0] : 0x%x\n", + total_dec_frame_buffering[0]); + pr_info("max_reference_frame_num: 0x%x\n", + max_reference_frame_num); + } + refbuf_size + = ref_size * (max_reference_frame_num + 1) * 2; + + if (is_support_vdec_canvas()) + index = 0; + else + index = CANVAS_INDEX_START; + ANC_CANVAS_ADDR = ANC0_CANVAS_ADDR; + + ret = + init_canvas(0, refbuf_size, dpb_size, + total_dec_frame_buffering[0], mb_width, + mb_height, buffer_spec0); + + if (ret < 0) { + pr_info(" Un-expected memory alloc problem\n"); + return; + } + + WRITE_VREG(REF_START_VIEW_0, + video_domain_addr(ref_start_addr[0])); + WRITE_VREG(MAILBOX_DATA_0, + (max_dec_frame_buffering[0] << 8) | + (total_dec_frame_buffering[0] << 0)); + WRITE_VREG(MAILBOX_DATA_1, ref_size); + WRITE_VREG(MAILBOX_COMMAND, CMD_FINISHED); + + if (dbg_mode & 0x1) { + pr_info + ("End H264 display buffer for view 0\n"); + } + if (frame_width == 0) { + if (vh264mvc_amstream_dec_info.width) + frame_width = vh264mvc_amstream_dec_info.width; + else + frame_width = mb_width << 4; + } + if (frame_height == 0) { + frame_height = mb_height << 4; + if (frame_height == 1088) + frame_height = 1080; + } + break; + case CMD_ALLOC_VIEW_1: + if (dbg_mode & 0x1) { + pr_info + ("Start H264 display buffer for view 1\n"); + } + + ret = READ_VREG(MAILBOX_DATA_0); + level_idc = (ret >> 24) & 0xff; + max_reference_frame_num = (ret >> 16) & 0xff; + mb_width = (ret >> 8) & 0xff; + mb_height = (ret >> 0) & 0xff; + max_dec_frame_buffering[1] = + get_max_dec_frame_buf_size(level_idc, + max_reference_frame_num, + mb_width, mb_height); + if (max_dec_frame_buffering[1] != max_dec_frame_buffering[0]) { + pr_info + (" Warning: view0/1 max_dec_frame_buffering "); + pr_info("different : 0x%x/0x%x, Use View0\n", + max_dec_frame_buffering[0], + max_dec_frame_buffering[1]); + max_dec_frame_buffering[1] = max_dec_frame_buffering[0]; + } + + total_dec_frame_buffering[1] = + max_dec_frame_buffering[1] + dynamic_buf_num_margin; + + mb_width = (mb_width + 3) & 0xfffffffc; + mb_height = (mb_height + 3) & 0xfffffffc; + + dpb_size = mb_width * mb_height * 384; + ref_size = mb_width * mb_height * 96; + refbuf_size = ref_size * (max_reference_frame_num + 1) * 2; + if (dbg_mode & 0x1) { + pr_info("dpb_size: 0x%x\n", dpb_size); + pr_info("ref_size: 0x%x\n", ref_size); + pr_info("total_dec_frame_buffering[1] : 0x%x\n", + total_dec_frame_buffering[1]); + pr_info("max_reference_frame_num: 0x%x\n", + max_reference_frame_num); + } + + if (is_support_vdec_canvas()) + index = total_dec_frame_buffering[0] * 3; + else + index = CANVAS_INDEX_START + total_dec_frame_buffering[0] * 3; + ANC_CANVAS_ADDR = + ANC0_CANVAS_ADDR + total_dec_frame_buffering[0]; + + ret = init_canvas(1, refbuf_size, dpb_size, + total_dec_frame_buffering[1], mb_width, + mb_height, buffer_spec1); + + if (ret < 0) { + pr_info(" Un-expected memory alloc problem\n"); + return; + } + + WRITE_VREG(REF_START_VIEW_1, + video_domain_addr(ref_start_addr[1])); + WRITE_VREG(MAILBOX_DATA_0, + (max_dec_frame_buffering[1] << 8) | + (total_dec_frame_buffering[1] << 0)); + WRITE_VREG(MAILBOX_DATA_1, ref_size); + WRITE_VREG(MAILBOX_COMMAND, CMD_FINISHED); + + if (dbg_mode & 0x1) { + pr_info + ("End H264 buffer allocation for view 1\n"); + } + if (frame_width == 0) { + if (vh264mvc_amstream_dec_info.width) + frame_width = vh264mvc_amstream_dec_info.width; + else + frame_width = mb_width << 4; + } + if (frame_height == 0) { + frame_height = mb_height << 4; + if (frame_height == 1088) + frame_height = 1080; + } + break; + } + +} + +static void mvc_set_rp(void) { + unsigned long flags; + + spin_lock_irqsave(&mvc_rp_lock, flags); + STBUF_WRITE(&vdec->vbuf, set_rp, + READ_VREG(VLD_MEM_VIFIFO_RP)); + spin_unlock_irqrestore(&mvc_rp_lock, flags); +} + +#ifdef HANDLE_h264mvc_IRQ +static irqreturn_t vh264mvc_isr(int irq, void *dev_id) +#else +static void vh264mvc_isr(void) +#endif +{ + int drop_status; + struct vframe_s *vf; + unsigned int pts, pts_valid = 0; + u64 pts_us64; + u32 frame_size; + int ret = READ_VREG(MAILBOX_COMMAND); + + mvc_set_rp(); + + /* pr_info("vh264mvc_isr, cmd =%x\n", ret); */ + switch (ret & 0xff) { + case CMD_ALLOC_VIEW_0: + case CMD_ALLOC_VIEW_1: + schedule_work(&alloc_work); + break; + case CMD_FRAME_DISPLAY: + ret = READ_VREG(MAILBOX_DATA_0); + display_buff_id = (ret >> 0) & 0x3f; + display_view_id = (ret >> 6) & 0x3; + drop_status = (ret >> 8) & 0x1; + display_POC = READ_VREG(MAILBOX_DATA_1); + stream_offset = READ_VREG(MAILBOX_DATA_2); + /* if (display_view_id == 0) */ + WRITE_VREG(MAILBOX_COMMAND, CMD_FINISHED); + +#ifdef DEBUG_SKIP + view_total++; + if (drop_status) + view_dropped++; +#endif + if (dbg_mode & 0x1) { + pr_info + (" H264 display frame ready - View : %x, Buffer : %x\n", + display_view_id, display_buff_id); + pr_info + (" H264 display frame POC -- Buffer : %x, POC : %x\n", + display_buff_id, display_POC); + pr_info("H264 display frame ready\n"); + } + if (dbg_mode & 0x10) { + if ((dbg_mode & 0x20) == 0) { + while (READ_VREG(BUFFER_RECYCLE) != 0) + ; + WRITE_VREG(BUFFER_RECYCLE, + (display_view_id << 8) | + (display_buff_id + 1)); + display_buff_id = -1; + display_view_id = -1; + display_POC = -1; + } + } else { + unsigned char in_list_flag = 0; + + int slot = 0; + + in_list_flag = check_in_list(display_POC, &slot); + + if ((dbg_mode & 0x40) && (drop_status)) { + pr_info + ("drop_status:%dview_id=%d,buff_id=%d,", + drop_status, display_view_id, display_buff_id); + pr_info + ("offset=%d, display_POC = %d,fill_ptr=0x%x\n", + stream_offset, display_POC, fill_ptr); + } + + if ((in_list_flag) && (stream_offset != 0)) { + pr_info + ("error case ,display_POC is %d, slot is %d\n", + display_POC, slot); + in_list_flag = 0; + } + if (!in_list_flag) { + if (display_view_id == 0) { + vfpool_idx[fill_ptr].view0_buf_id = + display_buff_id; + view0_vfbuf_use[display_buff_id]++; + vfpool_idx[fill_ptr].stream_offset = + stream_offset; + vfpool_idx[fill_ptr].view0_drop = + drop_status; + } + if (display_view_id == 1) { + vfpool_idx[fill_ptr].view1_buf_id = + display_buff_id; + vfpool_idx[fill_ptr].view1_drop = + drop_status; + view1_vfbuf_use[display_buff_id]++; + } + vfpool_idx[fill_ptr].slot = fill_ptr; + vfpool_idx[fill_ptr].display_pos = display_POC; + + } else { + if (display_view_id == 0) { + vfpool_idx[slot].view0_buf_id = + display_buff_id; + view0_vfbuf_use[display_buff_id]++; + vfpool_idx[slot].stream_offset = + stream_offset; + vfpool_idx[slot].view0_drop = + drop_status; + + } + if (display_view_id == 1) { + vfpool_idx[slot].view1_buf_id = + display_buff_id; + view1_vfbuf_use[display_buff_id]++; + vfpool_idx[slot].view1_drop = + drop_status; + } + vf = &vfpool[slot]; + + if (display_view_id == 0) { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + VF_BUFFER_IDX(display_buff_id)); + + } else if (display_view_id == 1) { + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + VF_BUFFER_IDX(display_buff_id)); + + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + VF_BUFFER_IDX(display_buff_id) + + total_dec_frame_buffering[0] + + 1); + } + + + + if (vfpool_idx[slot].stream_offset == 0) { + pr_info + ("error case, invalid stream offset\n"); + } + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, + vfpool_idx[slot].stream_offset, &pts, + &frame_size, + 0x10000, &pts_us64) == 0) + pts_valid = 1; + else + pts_valid = 0; + vf->pts = (pts_valid) ? pts : 0; + vf->pts_us64 = (pts_valid) ? pts_us64 : 0; + /* vf->pts = vf->pts_us64 ? vf->pts_us64 + * : vf->pts ; + */ + /* vf->pts = vf->pts_us64; */ + if (dbg_mode & 0x80) + pr_info("vf->pts:%d\n", vf->pts); + vfpool_idx[slot].used = 1; + INCPTR(fill_ptr); + set_frame_info(vf); + + gvs->frame_dur = frame_dur; + vdec_count_info(gvs, 0, + vfpool_idx[slot].stream_offset); + + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + + } + } + break; + case CMD_FATAL_ERROR: + pr_info("fatal error !!!\n"); + schedule_work(&error_wd_work); + break; + default: + break; + } +#ifdef HANDLE_h264mvc_IRQ + return IRQ_HANDLED; +#else + return; +#endif +} + +static void vh264_mvc_set_clk(struct work_struct *work) +{ + if (frame_dur > 0 && saved_resolution != + frame_width * frame_height * (96000 / frame_dur)) { + int fps = 96000 / frame_dur; + + saved_resolution = frame_width * frame_height * fps; + vdec_source_changed(VFORMAT_H264MVC, + frame_width, frame_height, fps * 2); + } +} + +static void vh264mvc_put_timer_func(struct timer_list *timer) +{ + int valid_frame = 0; + + mvc_set_rp(); + + if (enable_recycle == 0) { + if (dbg_mode & TIME_TASK_PRINT_ENABLE) { + /* valid_frame = get_valid_frame(); */ + pr_info("dirty_frame_num is %d , valid frame is %d\n", + dirty_frame_num, valid_frame); + + } + /* goto RESTART; */ + } + + while ((putting_ptr != put_ptr) && (READ_VREG(BUFFER_RECYCLE) == 0)) { + int view0_buf_id = vfpool_idx[put_ptr].view0_buf_id; + int view1_buf_id = vfpool_idx[put_ptr].view1_buf_id; + + if ((view0_buf_id >= 0) && + (view0_vfbuf_use[view0_buf_id] == 1)) { + if (dbg_mode & 0x100) { + pr_info + ("round 0: put_ptr is %d ;view0_buf_id is %d\n", + put_ptr, view0_buf_id); + } + WRITE_VREG(BUFFER_RECYCLE, + (0 << 8) | (view0_buf_id + 1)); + view0_vfbuf_use[view0_buf_id] = 0; + vfpool_idx[put_ptr].view0_buf_id = -1; + vfpool_idx[put_ptr].view0_drop = 0; + } else if ((view1_buf_id >= 0) + && (view1_vfbuf_use[view1_buf_id] == 1)) { + if (dbg_mode & 0x100) { + pr_info + ("round 1: put_ptr is %d ;view1_buf_id %d==\n", + put_ptr, view1_buf_id); + } + WRITE_VREG(BUFFER_RECYCLE, + (1 << 8) | (view1_buf_id + 1)); + view1_vfbuf_use[view1_buf_id] = 0; + vfpool_idx[put_ptr].display_pos = DISPLAY_INVALID_POS; + vfpool_idx[put_ptr].view1_buf_id = -1; + vfpool_idx[put_ptr].view1_drop = 0; + vfpool_idx[put_ptr].used = 0; + INCPTR(put_ptr); + } + } + + schedule_work(&set_clk_work); + + /* RESTART: */ + timer->expires = jiffies + PUT_INTERVAL; + + add_timer(timer); +} + +int vh264mvc_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + vstatus->frame_width = frame_width; + vstatus->frame_height = frame_height; + if (frame_dur != 0) + vstatus->frame_rate = 96000 / frame_dur; + else + vstatus->frame_rate = -1; + vstatus->error_count = READ_VREG(AV_SCRATCH_D); + vstatus->status = stat; + vstatus->bit_rate = gvs->bit_rate; + vstatus->frame_dur = frame_dur; + vstatus->frame_data = gvs->frame_data; + vstatus->total_data = gvs->total_data; + vstatus->frame_count = gvs->frame_count; + vstatus->error_frame_count = gvs->error_frame_count; + vstatus->drop_frame_count = gvs->drop_frame_count; + vstatus->total_data = gvs->total_data; + vstatus->samp_cnt = gvs->samp_cnt; + vstatus->offset = gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + +static int vh264mvc_vdec_info_init(void) +{ + gvs = kzalloc(sizeof(struct vdec_info), GFP_KERNEL); + if (NULL == gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -ENOMEM; + } + return 0; +} + +int vh264mvc_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + if (trickmode == TRICKMODE_I) { + WRITE_VREG(AV_SCRATCH_F, + (READ_VREG(AV_SCRATCH_F) & 0xfffffffc) | 2); + trickmode_i = 1; + } else if (trickmode == TRICKMODE_NONE) { + WRITE_VREG(AV_SCRATCH_F, READ_VREG(AV_SCRATCH_F) & 0xfffffffc); + trickmode_i = 0; + } + + return 0; +} + +static void H264_DECODE_INIT(void) +{ + int i; + + i = READ_VREG(DECODE_SKIP_PICTURE); + +#if 1 /* /MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 9) | (1 << 8)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + +#else + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + READ_RESET_REG(RESET0_REGISTER); + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + WRITE_RESET_REG(RESET2_REGISTER, RESET_PIC_DC | RESET_DBLK); +#endif + + /* Wait for some time for RESET */ + READ_VREG(DECODE_SKIP_PICTURE); + READ_VREG(DECODE_SKIP_PICTURE); + + WRITE_VREG(DECODE_SKIP_PICTURE, i); + + /* fill_weight_pred */ + WRITE_VREG(MC_MPORT_CTRL, 0x0300); + for (i = 0; i < 192; i++) + WRITE_VREG(MC_MPORT_DAT, 0x100); + WRITE_VREG(MC_MPORT_CTRL, 0); + + WRITE_VREG(MB_WIDTH, 0xff); /* invalid mb_width */ + + /* set slice start to 0x000000 or 0x000001 for check more_rbsp_data */ + WRITE_VREG(SLICE_START_BYTE_01, 0x00000000); + WRITE_VREG(SLICE_START_BYTE_23, 0x01010000); + /* set to mpeg2 to enable mismatch logic */ + WRITE_VREG(MPEG1_2_REG, 1); + /* disable COEF_GT_64 , error_m4_table and voff_rw_err */ + WRITE_VREG(VLD_ERROR_MASK, 0x1011); + + /* Config MCPU Amrisc interrupt */ + WRITE_VREG(ASSIST_AMR1_INT0, 0x1); /* viu_vsync_int */ + WRITE_VREG(ASSIST_AMR1_INT1, 0x5); /* mbox_isr */ + WRITE_VREG(ASSIST_AMR1_INT2, 0x8); /* vld_isr */ + WRITE_VREG(ASSIST_AMR1_INT3, 0x15); /* vififo_empty */ + WRITE_VREG(ASSIST_AMR1_INT4, 0xd); /* rv_ai_mb_finished_int */ + WRITE_VREG(ASSIST_AMR1_INT7, 0x14); /* dcac_dma_done */ + + /* Config MCPU Amrisc interrupt */ + WRITE_VREG(ASSIST_AMR1_INT5, 0x9); /* MCPU interrupt */ + WRITE_VREG(ASSIST_AMR1_INT6, 0x17); /* CCPU interrupt */ + + WRITE_VREG(CPC_P, 0xc00); /* CCPU Code will start from 0xc00 */ + WRITE_VREG(CINT_VEC_BASE, (0xc20 >> 5)); +#if 0 + WRITE_VREG(POWER_CTL_VLD, + READ_VREG(POWER_CTL_VLD) | (0 << 10) | + (1 << 9) | (1 << 6)); +#else + WRITE_VREG(POWER_CTL_VLD, ((1 << 10) | /* disable cabac_step_2 */ + (1 << 9) | /* viff_drop_flag_en */ + (1 << 6) /* h264_000003_en */ + ) + ); +#endif + WRITE_VREG(M4_CONTROL_REG, (1 << 13)); /* H264_DECODE_INFO - h264_en */ + + if (is_support_vdec_canvas()) + WRITE_VREG(CANVAS_START, 0); + else + WRITE_VREG(CANVAS_START, CANVAS_INDEX_START); +#if 1 + /* Start Address of Workspace (UCODE, temp_data...) */ + WRITE_VREG(WORKSPACE_START, + video_domain_addr(work_space_adr)); +#else + /* Start Address of Workspace (UCODE, temp_data...) */ + WRITE_VREG(WORKSPACE_START, + 0x05000000); +#endif + /* Clear all sequence parameter set available */ + WRITE_VREG(SPS_STATUS, 0); + /* Clear all picture parameter set available */ + WRITE_VREG(PPS_STATUS, 0); + /* Set current microcode to NULL */ + WRITE_VREG(CURRENT_UCODE, 0xff); + /* Set current SPS/PPS to NULL */ + WRITE_VREG(CURRENT_SPS_PPS, 0xffff); + /* Set decode status to DECODE_START_HEADER */ + WRITE_VREG(DECODE_STATUS, 1); +} + +static void vh264mvc_prot_init(void) +{ + while (READ_VREG(DCAC_DMA_CTRL) & 0x8000) + ; + while (READ_VREG(LMEM_DMA_CTRL) & 0x8000) + ; /* reg address is 0x350 */ + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + + H264_DECODE_INIT(); + +#if 1 /* /MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + WRITE_VREG(DOS_SW_RESET0, (1 << 11)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + +#else + WRITE_RESET_REG(RESET0_REGISTER, 0x80); /* RESET MCPU */ +#endif + + WRITE_VREG(MAILBOX_COMMAND, 0); + WRITE_VREG(BUFFER_RECYCLE, 0); + WRITE_VREG(DROP_CONTROL, 0); + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); + + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + +#if 1 /* /MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + WRITE_VREG(MDEC_PIC_DC_THRESH, 0x404038aa); +#endif +} + +static int vh264mvc_local_init(void) +{ + int i, size, ret; + display_buff_id = -1; + display_view_id = -1; + display_POC = -1; + no_dropping_cnt = 0; + init_drop_cnt = INIT_DROP_FRAME_CNT; + + for (i = 0; i < INIT_DROP_FRAME_CNT; i++) + init_drop_frame_id[i] = 0; + +#ifdef DEBUG_PTS + pts_missed = 0; + pts_hit = 0; +#endif + +#ifdef DEBUG_SKIP + view_total = 0; + view_dropped = 0; +#endif + + /* vh264mvc_ratio = vh264mvc_amstream_dec_info.ratio; */ + vh264mvc_ratio = 0x100; + + /* frame_width = vh264mvc_amstream_dec_info.width; */ + /* frame_height = vh264mvc_amstream_dec_info.height; */ + frame_dur = vh264mvc_amstream_dec_info.rate; + if (frame_dur == 0) + frame_dur = 96000 / 24; + + pts_outside = ((unsigned long) vh264mvc_amstream_dec_info.param) & 0x01; + sync_outside = ((unsigned long) vh264mvc_amstream_dec_info.param & 0x02) + >> 1; + INIT_WORK(&alloc_work, do_alloc_work); + + max_dec_frame_buffering[0] = -1; + max_dec_frame_buffering[1] = -1; + fill_ptr = get_ptr = put_ptr = putting_ptr = 0; + dirty_frame_num = 0; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + view0_vfbuf_use[i] = 0; + view1_vfbuf_use[i] = 0; + } + + for (i = 0; i < VF_POOL_SIZE; i++) { + vfpool_idx[i].display_pos = -1; + vfpool_idx[i].view0_buf_id = DISPLAY_INVALID_POS; + vfpool_idx[i].view1_buf_id = -1; + vfpool_idx[i].view0_drop = 0; + vfpool_idx[i].view1_drop = 0; + vfpool_idx[i].used = 0; + } + for (i = 0; i < VF_POOL_SIZE; i++) { + memset(&vfpool[i], 0, sizeof(struct vframe_s)); + vfpool[i].index = i; + } + init_vf_buf(); + + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + + mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + TOTAL_BMMU_BUFF_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER); + + size = DECODER_WORK_SPACE_SIZE; + ret = decoder_bmmu_box_alloc_buf_phy(mm_blk_handle, 0, + size, DRIVER_NAME, &work_space_adr); + + return ret; +} + +static s32 vh264mvc_init(void) +{ + int ret = -1; + char *buf = vmalloc(0x1000 * 16); + + if (buf == NULL) + return -ENOMEM; + + pr_info("\nvh264mvc_init\n"); + timer_setup(&recycle_timer, vh264mvc_put_timer_func, 0); + + stat |= STAT_TIMER_INIT; + + ret = vh264mvc_vdec_info_init(); + if (0 != ret) { + vfree(buf); + return -ret; + } + + ret = vh264mvc_local_init(); + if (ret < 0) { + vfree(buf); + return ret; + } + + amvdec_enable(); + + if (tee_enabled()) { + ret = amvdec_loadmc_ex(VFORMAT_H264MVC, NULL, buf); + if (ret != 0) { + amvdec_disable(); + vfree(buf); + pr_err("H264_MVC: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -1; + } + } else { + /* -- ucode loading (amrisc and swap code) */ + mc_cpu_addr = dma_alloc_coherent(amports_get_dma_device(), + MC_TOTAL_SIZE, &mc_dma_handle, GFP_KERNEL); + if (!mc_cpu_addr) { + amvdec_disable(); + vfree(buf); + pr_err("vh264_mvc init: Can not allocate mc memory.\n"); + return -ENOMEM; + } + + WRITE_VREG(UCODE_START_ADDR, mc_dma_handle); + + if (get_firmware_data(VIDEO_DEC_H264_MVC, buf) < 0) { + pr_err("get firmware fail."); + vfree(buf); + return -1; + } + + ret = amvdec_loadmc_ex(VFORMAT_H264MVC, NULL, buf); + + /*header*/ + memcpy((u8 *) mc_cpu_addr, buf + 0x1000, 0x1000); + /*mmco*/ + memcpy((u8 *) mc_cpu_addr + 0x1000, buf + 0x2000, 0x2000); + /*slice*/ + memcpy((u8 *) mc_cpu_addr + 0x3000, buf + 0x4000, 0x3000); + + if (ret < 0) { + amvdec_disable(); + + dma_free_coherent(amports_get_dma_device(), + MC_TOTAL_SIZE, + mc_cpu_addr, mc_dma_handle); + mc_cpu_addr = NULL; + return -EBUSY; + } + } + vfree(buf); + + stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + vh264mvc_prot_init(); + +#ifdef HANDLE_h264mvc_IRQ + if (vdec_request_irq(VDEC_IRQ_1, vh264mvc_isr, + "vh264mvc-irq", (void *)vh264mvc_dec_id)) { + pr_info("vh264mvc irq register error.\n"); + amvdec_disable(); + return -ENOENT; + } +#endif + + stat |= STAT_ISR_REG; + + vf_provider_init(&vh264mvc_vf_prov, PROVIDER_NAME, + &vh264mvc_vf_provider, NULL); + vf_reg_provider(&vh264mvc_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); + + stat |= STAT_VF_HOOK; + + recycle_timer.expires = jiffies + PUT_INTERVAL; + add_timer(&recycle_timer); + + stat |= STAT_TIMER_ARM; + + amvdec_start(); + + stat |= STAT_VDEC_RUN; + + return 0; +} + +static int vh264mvc_stop(void) +{ + if (stat & STAT_VDEC_RUN) { + amvdec_stop(); + stat &= ~STAT_VDEC_RUN; + } + + if (stat & STAT_ISR_REG) { + WRITE_VREG(ASSIST_MBOX1_MASK, 0); +#ifdef HANDLE_h264mvc_IRQ + vdec_free_irq(VDEC_IRQ_1, (void *)vh264mvc_dec_id); +#endif + stat &= ~STAT_ISR_REG; + } + + if (stat & STAT_TIMER_ARM) { + del_timer_sync(&recycle_timer); + stat &= ~STAT_TIMER_ARM; + } + + if (stat & STAT_VF_HOOK) { + ulong flags; + + spin_lock_irqsave(&lock, flags); + spin_unlock_irqrestore(&lock, flags); + vf_unreg_provider(&vh264mvc_vf_prov); + stat &= ~STAT_VF_HOOK; + } + + if (stat & STAT_MC_LOAD) { + if (mc_cpu_addr != NULL) { + dma_free_coherent(amports_get_dma_device(), + MC_TOTAL_SIZE, mc_cpu_addr, mc_dma_handle); + mc_cpu_addr = NULL; + } + + stat &= ~STAT_MC_LOAD; + } + + amvdec_disable(); + + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + uninit_vf_buf(); + return 0; +} + +static void error_do_work(struct work_struct *work) +{ + if (atomic_read(&vh264mvc_active)) { + vh264mvc_stop(); + vh264mvc_init(); + } +} + +static int amvdec_h264mvc_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + int config_val = 0; + + pr_info("amvdec_h264mvc probe start.\n"); + mutex_lock(&vh264_mvc_mutex); + +#if 0 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + pr_info("\namvdec_h264mvc memory resource undefined.\n"); + return -EFAULT; + } +#endif + + if (pdata == NULL) { + mutex_unlock(&vh264_mvc_mutex); + pr_info("\namvdec_h264mvc memory resource undefined.\n"); + return -EFAULT; + } + + if (pdata->sys_info) + vh264mvc_amstream_dec_info = *pdata->sys_info; + + if (pdata->config_len) { + pr_info("pdata->config: %s\n", pdata->config); + if (get_config_int(pdata->config, "parm_v4l_buffer_margin", + &config_val) == 0) + dynamic_buf_num_margin = config_val; + } + + pdata->dec_status = vh264mvc_dec_status; + /* pdata->set_trickmode = vh264mvc_set_trickmode; */ + + buffer_spec0 = (struct buffer_spec_s *)vzalloc( + sizeof(struct buffer_spec_s) * MAX_BMMU_BUFFER_NUM * 2); + if (NULL == buffer_spec0) + return -ENOMEM; + buffer_spec1 = &buffer_spec0[MAX_BMMU_BUFFER_NUM]; + + if (vh264mvc_init() < 0) { + pr_info("\namvdec_h264mvc init failed.\n"); + kfree(gvs); + gvs = NULL; + vfree(buffer_spec0); + buffer_spec0 = NULL; + mutex_unlock(&vh264_mvc_mutex); + return -ENODEV; + } + + INIT_WORK(&error_wd_work, error_do_work); + INIT_WORK(&set_clk_work, vh264_mvc_set_clk); + spin_lock_init(&mvc_rp_lock); + + vdec = pdata; + + atomic_set(&vh264mvc_active, 1); + + mutex_unlock(&vh264_mvc_mutex); + + pr_info("amvdec_h264mvc probe end.\n"); + + return 0; +} + +static int amvdec_h264mvc_remove(struct platform_device *pdev) +{ + pr_info("amvdec_h264mvc_remove\n"); + cancel_work_sync(&alloc_work); + cancel_work_sync(&error_wd_work); + cancel_work_sync(&set_clk_work); + vh264mvc_stop(); + frame_width = 0; + frame_height = 0; + vdec_source_changed(VFORMAT_H264MVC, 0, 0, 0); + atomic_set(&vh264mvc_active, 0); + +#ifdef DEBUG_PTS + pr_info + ("pts missed %ld, pts hit %ld, pts_outside %d, ", + pts_missed, pts_hit, pts_outside); + pr_info("duration %d, sync_outside %d\n", + frame_dur, sync_outside); +#endif + +#ifdef DEBUG_SKIP + pr_info("view_total = %ld, dropped %ld\n", view_total, view_dropped); +#endif + vfree(buffer_spec0); + buffer_spec0 = NULL; + kfree(gvs); + gvs = NULL; + + return 0; +} + +/****************************************/ + +static struct platform_driver amvdec_h264mvc_driver = { + .probe = amvdec_h264mvc_probe, + .remove = amvdec_h264mvc_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t amvdec_hmvc_profile = { + .name = "hmvc", + .profile = "" +}; +static struct codec_profile_t amvdec_hmvc_profile_single; + +static struct mconfig h264mvc_configs[] = { + MC_PU32("stat", &stat), + MC_PU32("dbg_mode", &dbg_mode), + MC_PU32("view_mode", &view_mode), + MC_PU32("dbg_cmd", &dbg_cmd), + MC_PU32("drop_rate", &drop_rate), + MC_PU32("drop_thread_hold", &drop_thread_hold), +}; +static struct mconfig_node h264mvc_node; + +static int __init amvdec_h264mvc_driver_init_module(void) +{ + pr_debug("amvdec_h264mvc module init\n"); + + if (platform_driver_register(&amvdec_h264mvc_driver)) { + pr_err("failed to register amvdec_h264mvc driver\n"); + return -ENODEV; + } + + vcodec_profile_register(&amvdec_hmvc_profile); + amvdec_hmvc_profile_single = amvdec_hmvc_profile; + amvdec_hmvc_profile_single.name = "h264mvc"; + vcodec_profile_register(&amvdec_hmvc_profile_single); + INIT_REG_NODE_CONFIGS("media.decoder", &h264mvc_node, + "h264mvc", h264mvc_configs, CONFIG_FOR_RW); + return 0; +} + +static void __exit amvdec_h264mvc_driver_remove_module(void) +{ + pr_debug("amvdec_h264mvc module remove.\n"); + + platform_driver_unregister(&amvdec_h264mvc_driver); +} + +/****************************************/ + +module_param(stat, uint, 0664); +MODULE_PARM_DESC(stat, "\n amvdec_h264mvc stat\n"); + +module_param(dbg_mode, uint, 0664); +MODULE_PARM_DESC(dbg_mode, "\n amvdec_h264mvc dbg mode\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n amvdec_h264mvc dynamic_buf_num_margin\n"); + +module_param(view_mode, uint, 0664); +MODULE_PARM_DESC(view_mode, "\n amvdec_h264mvc view mode\n"); + +module_param(dbg_cmd, uint, 0664); +MODULE_PARM_DESC(dbg_cmd, "\n amvdec_h264mvc cmd mode\n"); + +module_param(drop_rate, uint, 0664); +MODULE_PARM_DESC(drop_rate, "\n amvdec_h264mvc drop rate\n"); + +module_param(drop_thread_hold, uint, 0664); +MODULE_PARM_DESC(drop_thread_hold, "\n amvdec_h264mvc drop thread hold\n"); +module_init(amvdec_h264mvc_driver_init_module); +module_exit(amvdec_h264mvc_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC h264mvc Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Chen Zhang <chen.zhang@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/h264_multi/Makefile b/drivers/frame_provider/decoder/h264_multi/Makefile new file mode 100644 index 0000000..21dfb6a --- /dev/null +++ b/drivers/frame_provider/decoder/h264_multi/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_H264_MULTI) += amvdec_mh264.o +amvdec_mh264-objs += vmh264.o h264_dpb.o
diff --git a/drivers/frame_provider/decoder/h264_multi/h264_dpb.c b/drivers/frame_provider/decoder/h264_multi/h264_dpb.c new file mode 100644 index 0000000..52d9611 --- /dev/null +++ b/drivers/frame_provider/decoder/h264_multi/h264_dpb.c
@@ -0,0 +1,6008 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> + +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../utils/vdec.h" +#include "../utils/amvdec.h" + +#include "h264_dpb.h" + +#define FRAME_NUM_MAX_SIZE 0x10000 + +#undef pr_info +#define pr_info printk +int dpb_print(int index, int debug_flag, const char *fmt, ...) +{ + if (((h264_debug_flag & debug_flag) && + ((1 << index) & h264_debug_mask)) + || (debug_flag == PRINT_FLAG_ERROR)) { + unsigned char *buf = kzalloc(512, GFP_ATOMIC); + int len = 0; + va_list args; + + if (!buf) + return 0; + + va_start(args, fmt); + len = sprintf(buf, "%d: ", index); + vsnprintf(buf + len, 512-len, fmt, args); + pr_debug("%s", buf); + va_end(args); + kfree(buf); + } + return 0; +} + +int dpb_print_cont(int index, int debug_flag, const char *fmt, ...) +{ + if (((h264_debug_flag & debug_flag) && + ((1 << index) & h264_debug_mask)) + || (debug_flag == PRINT_FLAG_ERROR)) { + unsigned char *buf = kzalloc(512, GFP_ATOMIC); + int len = 0; + va_list args; + + if (!buf) + return 0; + + va_start(args, fmt); + vsnprintf(buf + len, 512-len, fmt, args); + pr_info("%s", buf); + va_end(args); + kfree(buf); + } + return 0; +} + +unsigned char dpb_is_debug(int index, int debug_flag) +{ + if (((h264_debug_flag & debug_flag) && + ((1 << index) & h264_debug_mask)) + || (debug_flag == PRINT_FLAG_ERROR)) + return 1; + return 0; +} + +#define CHECK_VALID(list_size, mark) {\ + if (list_size > MAX_LIST_SIZE || list_size < 0) { \ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_ERROR, \ + "%s(%d): listXsize[%d] %d is larger than max size\r\n",\ + __func__, __LINE__, mark, list_size);\ + list_size = 0; \ + p_H264_Dpb->dpb_error_flag = __LINE__;\ + } \ + } + +static struct DecRefPicMarking_s + dummy_dec_ref_pic_marking_buffer + [DEC_REF_PIC_MARKING_BUFFER_NUM_MAX]; +static struct StorablePicture dummy_pic; +static struct FrameStore dummy_fs; +static struct StorablePicture *get_new_pic( + struct h264_dpb_stru *p_H264_Dpb, + enum PictureStructure structure, unsigned char is_output); + + +static void init_dummy_fs(void) +{ + dummy_fs.frame = &dummy_pic; + dummy_fs.top_field = &dummy_pic; + dummy_fs.bottom_field = &dummy_pic; + + dummy_pic.top_field = &dummy_pic; + dummy_pic.bottom_field = &dummy_pic; + dummy_pic.frame = &dummy_pic; + + dummy_pic.dec_ref_pic_marking_buffer = + &dummy_dec_ref_pic_marking_buffer[0]; +} + +enum { + LIST_0 = 0, + LIST_1 = 1, + BI_PRED = 2, + BI_PRED_L0 = 3, + BI_PRED_L1 = 4 +}; + +void ref_pic_list_reordering(struct h264_dpb_stru *p_H264_Dpb, + struct Slice *currSlice) +{ + /* struct VideoParameters *p_Vid = currSlice->p_Vid; + * byte dP_nr = assignSE2partition[currSlice->dp_mode][SE_HEADER]; + * DataPartition *partition = &(currSlice->partArr[dP_nr]); + * Bitstream *currStream = partition->bitstream; + */ + int i, j, val; + unsigned short *reorder_cmd = + &p_H264_Dpb->dpb_param.mmco.l0_reorder_cmd[0]; + /* alloc_ref_pic_list_reordering_buffer(currSlice); */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + if (currSlice->slice_type != I_SLICE && + currSlice->slice_type != SI_SLICE) { + /* val = currSlice->ref_pic_list_reordering_flag[LIST_0] = + * read_u_1 ("SH: ref_pic_list_reordering_flag_l0", + * currStream, &p_Dec->UsedBits); + */ + if (reorder_cmd[0] != 3) { + val = currSlice-> + ref_pic_list_reordering_flag[LIST_0] = 1; + } else { + val = currSlice-> + ref_pic_list_reordering_flag[LIST_0] = 0; + } + if (val) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%s, ref_pic_list_reordering_flag[LIST_0] is 1\n", + __func__); + + j = 0; + i = 0; + do { + val = currSlice-> + modification_of_pic_nums_idc[LIST_0][i] = + reorder_cmd[j++]; + /* read_ue_v( + * "SH: modification_of_pic_nums_idc_l0", + * currStream, &p_Dec->UsedBits); + */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%d(%d):val %x\n", i, j, val); + if (j >= 66) { + currSlice-> + ref_pic_list_reordering_flag[LIST_0] = + 0; /* by rain */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "%s error\n", __func__); + break; + } + if (val == 0 || val == 1) { + currSlice-> + abs_diff_pic_num_minus1[LIST_0][i] = + reorder_cmd[j++]; + /* read_ue_v("SH: " + *"abs_diff_pic_num_minus1_l0", + *currStream, &p_Dec->UsedBits); + */ + } else { + if (val == 2) { + currSlice-> + long_term_pic_idx[LIST_0][i] = + reorder_cmd[j++]; + /* read_ue_v( + *"SH: long_term_pic_idx_l0", + *currStream, + *&p_Dec->UsedBits); + */ + } + } + i++; + /* assert (i>currSlice-> + * num_ref_idx_active[LIST_0]); + */ + if ( + +/* + * i>currSlice->num_ref_idx_active[LIST_0] || + */ + i >= REORDERING_COMMAND_MAX_SIZE) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%s error %d %d\n", + __func__, i, + currSlice-> + num_ref_idx_active[LIST_0]); + currSlice-> + ref_pic_list_reordering_flag[LIST_0] = + 0; /* by rain */ + break; + } + if (j >= 66) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, "%s error\n", + __func__); + currSlice-> + ref_pic_list_reordering_flag[LIST_0] = + 0; /* by rain */ + break; + } + + } while (val != 3); + } + } + + if (currSlice->slice_type == B_SLICE) { + reorder_cmd = &p_H264_Dpb->dpb_param.mmco.l1_reorder_cmd[0]; + /* val = currSlice->ref_pic_list_reordering_flag[LIST_1] + *= read_u_1 ("SH: ref_pic_list_reordering_flag_l1", + *currStream, + *&p_Dec->UsedBits); + */ + + if (reorder_cmd[0] != 3) { + val = + currSlice->ref_pic_list_reordering_flag[LIST_1] = 1; + } else { + val = + currSlice->ref_pic_list_reordering_flag[LIST_1] = 0; + } + + if (val) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%s, ref_pic_list_reordering_flag[LIST_1] is 1\n", + __func__); + + j = 0; + i = 0; + do { + val = currSlice-> + modification_of_pic_nums_idc[LIST_1][i] = + reorder_cmd[j++]; + /* read_ue_v( + *"SH: modification_of_pic_nums_idc_l1", + *currStream, + *&p_Dec->UsedBits); + */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%d(%d):val %x\n", + i, j, val); + if (j >= 66) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, "%s error\n", + __func__); + currSlice-> + ref_pic_list_reordering_flag[LIST_1] = + 0; /* by rain */ + break; + } + if (val == 0 || val == 1) { + currSlice-> + abs_diff_pic_num_minus1[LIST_1][i] = + reorder_cmd[j++]; + /* read_ue_v( + *"SH: abs_diff_pic_num_minus1_l1", + *currStream, &p_Dec->UsedBits); + */ + } else { + if (val == 2) { + currSlice-> + long_term_pic_idx[LIST_1][i] = + reorder_cmd[j++]; + /* read_ue_v( + *"SH: long_term_pic_idx_l1", + *currStream, + *&p_Dec->UsedBits); + */ + } + } + i++; + /* assert(i>currSlice-> + * num_ref_idx_active[LIST_1]); + */ + if ( + /*i>currSlice->num_ref_idx_active[LIST_1] || */ + i >= REORDERING_COMMAND_MAX_SIZE) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%s error %d %d\n", + __func__, i, + currSlice-> + num_ref_idx_active[LIST_0]); + currSlice-> + ref_pic_list_reordering_flag[LIST_1] = + 0; /* by rain */ + break; + } + if (j >= 66) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "%s error\n", __func__); + break; + } + } while (val != 3); + } + } + + /* set reference index of redundant slices. */ + /* + *if (currSlice->redundant_pic_cnt && + *(currSlice->slice_type != I_SLICE)) + *{ + * currSlice->redundant_slice_ref_idx = + * currSlice->abs_diff_pic_num_minus1[LIST_0][0] + 1; + *} + */ +} + +void slice_prepare(struct h264_dpb_stru *p_H264_Dpb, + struct DecodedPictureBuffer *p_Dpb, + struct VideoParameters *p_Vid, + struct SPSParameters *sps, struct Slice *pSlice) +{ + int i, j; + /* p_Vid->active_sps = sps; */ + unsigned short *mmco_cmd = &p_H264_Dpb->dpb_param.mmco.mmco_cmd[0]; + /* for decode_poc */ + sps->pic_order_cnt_type = + p_H264_Dpb->dpb_param.l.data[PIC_ORDER_CNT_TYPE]; + sps->log2_max_pic_order_cnt_lsb_minus4 = + p_H264_Dpb->dpb_param.l.data[LOG2_MAX_PIC_ORDER_CNT_LSB] - 4; + sps->num_ref_frames_in_pic_order_cnt_cycle = + p_H264_Dpb-> + dpb_param.l.data[NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE]; + for (i = 0; i < 128; i++) + sps->offset_for_ref_frame[i] = + (short) p_H264_Dpb-> + dpb_param.mmco.offset_for_ref_frame_base[i]; + sps->offset_for_non_ref_pic = + (short) p_H264_Dpb->dpb_param.l.data[OFFSET_FOR_NON_REF_PIC]; + sps->offset_for_top_to_bottom_field = + (short) p_H264_Dpb->dpb_param.l.data + [OFFSET_FOR_TOP_TO_BOTTOM_FIELD]; + + pSlice->frame_num = p_H264_Dpb->dpb_param.dpb.frame_num; + pSlice->idr_flag = + (p_H264_Dpb->dpb_param.dpb.NAL_info_mmco & 0x1f) + == 5 ? 1 : 0; + pSlice->nal_reference_idc = + (p_H264_Dpb->dpb_param.dpb.NAL_info_mmco >> 5) + & 0x3; + pSlice->pic_order_cnt_lsb = + p_H264_Dpb->dpb_param.dpb.pic_order_cnt_lsb; + pSlice->field_pic_flag = 0; + pSlice->bottom_field_flag = 0; + pSlice->delta_pic_order_cnt_bottom = val( + p_H264_Dpb->dpb_param.dpb.delta_pic_order_cnt_bottom); + pSlice->delta_pic_order_cnt[0] = val( + p_H264_Dpb->dpb_param.dpb.delta_pic_order_cnt_0); + pSlice->delta_pic_order_cnt[1] = val( + p_H264_Dpb->dpb_param.dpb.delta_pic_order_cnt_1); + + p_Vid->last_has_mmco_5 = 0; + /* last memory_management_control_operation is 5 */ + p_Vid->last_pic_bottom_field = 0; + p_Vid->max_frame_num = 1 << + (p_H264_Dpb->dpb_param.l.data[LOG2_MAX_FRAME_NUM]); + + /**/ + pSlice->structure = (p_H264_Dpb-> + dpb_param.l.data[NEW_PICTURE_STRUCTURE] == 3) ? + FRAME : p_H264_Dpb->dpb_param.l.data[NEW_PICTURE_STRUCTURE]; + if (pSlice->structure == FRAME) { + pSlice->field_pic_flag = 0; + pSlice->bottom_field_flag = 0; + } else { + pSlice->field_pic_flag = 1; + if (pSlice->structure == TOP_FIELD) + pSlice->bottom_field_flag = 0; + else + pSlice->bottom_field_flag = 1; + } + pSlice->pic_struct = p_H264_Dpb->dpb_param.l.data[PICTURE_STRUCT]; + + sps->num_ref_frames = p_H264_Dpb-> + dpb_param.l.data[MAX_REFERENCE_FRAME_NUM]; + sps->profile_idc = + (p_H264_Dpb->dpb_param.l.data[PROFILE_IDC_MMCO] >> 8) & 0xff; + /*sps->max_dpb_size = p_H264_Dpb->dpb_param.l.data[MAX_DPB_SIZE];*/ + if (pSlice->idr_flag) { + pSlice->long_term_reference_flag = mmco_cmd[0] & 1; + pSlice->no_output_of_prior_pics_flag = (mmco_cmd[0] >> 1) & 1; + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "IDR: long_term_reference_flag %d no_output_of_prior_pics_flag %d\r\n", + pSlice->long_term_reference_flag, + pSlice->no_output_of_prior_pics_flag); + + p_H264_Dpb->long_term_reference_flag = pSlice->long_term_reference_flag; + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "idr set pre_frame_num(%d) to frame_num (%d)\n", + p_Vid->pre_frame_num, pSlice->frame_num); + + p_Vid->pre_frame_num = pSlice->frame_num; + } else if (p_H264_Dpb->mDPB.first_pic_done == 0) { + /* by rain + handle the case when first slice is I instead of IDR + */ + p_Vid->pre_frame_num = pSlice->frame_num; + } + /* pSlice->adaptive_ref_pic_buffering_flag; */ + sps->log2_max_frame_num_minus4 = + p_H264_Dpb->dpb_param.l.data[LOG2_MAX_FRAME_NUM] - 4; + sps->frame_num_gap_allowed = p_H264_Dpb->dpb_param.l.data[FRAME_NUM_GAP_ALLOWED]; + + p_Vid->non_conforming_stream = + p_H264_Dpb->dpb_param.l.data[NON_CONFORMING_STREAM]; + p_Vid->recovery_point = + p_H264_Dpb->dpb_param.l.data[RECOVERY_POINT]; + switch (p_H264_Dpb->dpb_param.l.data[SLICE_TYPE]) { + case I_Slice: + pSlice->slice_type = I_SLICE; + break; + case P_Slice: + pSlice->slice_type = P_SLICE; + break; + case B_Slice: + pSlice->slice_type = B_SLICE; + break; + default: + pSlice->slice_type = NUM_SLICE_TYPES; + break; + } + + pSlice->num_ref_idx_active[LIST_0] = + p_H264_Dpb->dpb_param.dpb.num_ref_idx_l0_active_minus1 + + 1; + /* p_H264_Dpb->dpb_param.l.data[PPS_NUM_REF_IDX_L0_ACTIVE_MINUS1]; */ + pSlice->num_ref_idx_active[LIST_1] = + p_H264_Dpb->dpb_param.dpb.num_ref_idx_l1_active_minus1 + + 1; + /* p_H264_Dpb->dpb_param.l.data[PPS_NUM_REF_IDX_L1_ACTIVE_MINUS1]; */ + + pSlice->p_Vid = p_Vid; + pSlice->p_Dpb = p_Dpb; + /* + p_H264_Dpb->colocated_buf_size = + p_H264_Dpb->dpb_param.l.data[FRAME_SIZE_IN_MB] * 96;*/ + pSlice->first_mb_in_slice = + p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE]; + pSlice->mode_8x8_flags = p_H264_Dpb->dpb_param.l.data[MODE_8X8_FLAGS]; + pSlice->picture_structure_mmco = + p_H264_Dpb->dpb_param.dpb.picture_structure_mmco; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s slice_type is %d, num_ref_idx_active[0,1]=%d,%d nal_reference_idc %d pic struct 0x%x(mmco stru 0x%x)\n", + __func__, pSlice->slice_type, + pSlice->num_ref_idx_active[LIST_0], + pSlice->num_ref_idx_active[LIST_1], + pSlice->nal_reference_idc, + pSlice->structure, + pSlice->picture_structure_mmco); +#ifdef ERROR_CHECK + if (pSlice->num_ref_idx_active[LIST_0] >= MAX_LIST_SIZE) { + pSlice->num_ref_idx_active[LIST_0] = MAX_LIST_SIZE - 1; + p_H264_Dpb->dpb_error_flag = __LINE__; + } + if (pSlice->num_ref_idx_active[LIST_1] >= MAX_LIST_SIZE) { + pSlice->num_ref_idx_active[LIST_1] = MAX_LIST_SIZE - 1; + p_H264_Dpb->dpb_error_flag = __LINE__; + } +#endif + +#if 1 + /* dec_ref_pic_marking_buffer */ + pSlice->adaptive_ref_pic_buffering_flag = 0; + if (pSlice->nal_reference_idc) { + for (i = 0, j = 0; i < 44; j++) { + unsigned short val; + struct DecRefPicMarking_s *tmp_drpm = + &pSlice->dec_ref_pic_marking_buffer[j]; + memset(tmp_drpm, 0, sizeof(struct DecRefPicMarking_s)); + val = tmp_drpm-> + memory_management_control_operation = + mmco_cmd[i++]; + tmp_drpm->Next = NULL; + if (j > 0) { + pSlice-> + dec_ref_pic_marking_buffer[j - 1].Next = + tmp_drpm; + } + if (val == 0 || i >= 44) + break; + pSlice->adaptive_ref_pic_buffering_flag = 1; + if ((val == 1) || (val == 3)) { + tmp_drpm->difference_of_pic_nums_minus1 = + mmco_cmd[i++]; + } + if (val == 2) + tmp_drpm->long_term_pic_num = mmco_cmd[i++]; + if (i >= 44) + break; + if ((val == 3) || (val == 6)) + tmp_drpm->long_term_frame_idx = mmco_cmd[i++]; + if (val == 4) { + tmp_drpm->max_long_term_frame_idx_plus1 = + mmco_cmd[i++]; + } + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "dec_ref_pic_marking_buffer[%d]:operation %x diff_pic_minus1 %x long_pic_num %x long_frame_idx %x max_long_frame_idx_plus1 %x\n", + j, + tmp_drpm->memory_management_control_operation, + tmp_drpm->difference_of_pic_nums_minus1, + tmp_drpm->long_term_pic_num, + tmp_drpm->long_term_frame_idx, + tmp_drpm->max_long_term_frame_idx_plus1); + } + } + + ref_pic_list_reordering(p_H264_Dpb, pSlice); +#endif + + /*VUI*/ + p_H264_Dpb->vui_status = p_H264_Dpb->dpb_param.l.data[VUI_STATUS]; + p_H264_Dpb->aspect_ratio_idc = + p_H264_Dpb->dpb_param.l.data[ASPECT_RATIO_IDC]; + p_H264_Dpb->aspect_ratio_sar_width = + p_H264_Dpb->dpb_param.l.data[ASPECT_RATIO_SAR_WIDTH]; + p_H264_Dpb->aspect_ratio_sar_height = + p_H264_Dpb->dpb_param.l.data[ASPECT_RATIO_SAR_HEIGHT]; + + p_H264_Dpb->fixed_frame_rate_flag = p_H264_Dpb->dpb_param.l.data[ + FIXED_FRAME_RATE_FLAG]; + p_H264_Dpb->num_units_in_tick = + p_H264_Dpb->dpb_param.l.data[NUM_UNITS_IN_TICK]; + p_H264_Dpb->time_scale = p_H264_Dpb->dpb_param.l.data[TIME_SCALE] | + (p_H264_Dpb->dpb_param.l.data[TIME_SCALE + 1] << 16); + + p_H264_Dpb->bitstream_restriction_flag = + (p_H264_Dpb->dpb_param.l.data[SPS_FLAGS2] >> 3) & 0x1; + p_H264_Dpb->num_reorder_frames = + p_H264_Dpb->dpb_param.l.data[NUM_REORDER_FRAMES]; + p_H264_Dpb->max_dec_frame_buffering = + p_H264_Dpb->dpb_param.l.data[MAX_BUFFER_FRAME]; + + /**/ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s return\n", __func__); +} + +static void decode_poc(struct VideoParameters *p_Vid, struct Slice *pSlice) +{ + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Vid, + struct h264_dpb_stru, mVideo); + struct SPSParameters *active_sps = p_Vid->active_sps; + int i; + /* for POC mode 0: */ + unsigned int MaxPicOrderCntLsb = (1 << + (active_sps->log2_max_pic_order_cnt_lsb_minus4 + 4)); + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DEBUG_POC, + "%s:pic_order_cnt_type %d, idr_flag %d last_has_mmco_5 %d last_pic_bottom_field %d pic_order_cnt_lsb %d PrevPicOrderCntLsb %d\r\n", + __func__, + active_sps->pic_order_cnt_type, + pSlice->idr_flag, + p_Vid->last_has_mmco_5, + p_Vid->last_pic_bottom_field, + pSlice->pic_order_cnt_lsb, + p_Vid->PrevPicOrderCntLsb + ); + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DEBUG_POC, + "%s:field_pic_flag %d, bottom_field_flag %d frame_num %d PreviousFrameNum %d PreviousFrameNumOffset %d ax_frame_num %d num_ref_frames_in_pic_order_cnt_cycle %d offset_for_non_ref_pic %d\r\n", + __func__, + pSlice->field_pic_flag, + pSlice->bottom_field_flag, + pSlice->frame_num, + p_Vid->PreviousFrameNum, + p_Vid->PreviousFrameNumOffset, + p_Vid->max_frame_num, + active_sps->num_ref_frames_in_pic_order_cnt_cycle, + active_sps->offset_for_non_ref_pic + ); + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DEBUG_POC, + "%s: delta_pic_order_cnt %d %d nal_reference_idc %d\r\n", + __func__, + pSlice->delta_pic_order_cnt[0], pSlice->delta_pic_order_cnt[1], + pSlice->nal_reference_idc + ); + + + switch (active_sps->pic_order_cnt_type) { + case 0: /* POC MODE 0 */ + /* 1st */ + if (pSlice->idr_flag) { + p_Vid->PrevPicOrderCntMsb = 0; + p_Vid->PrevPicOrderCntLsb = 0; + } else { + if (p_Vid->last_has_mmco_5) { + if (p_Vid->last_pic_bottom_field) { + p_Vid->PrevPicOrderCntMsb = 0; + p_Vid->PrevPicOrderCntLsb = 0; + } else { + p_Vid->PrevPicOrderCntMsb = 0; + p_Vid->PrevPicOrderCntLsb = + pSlice->toppoc; + } + } + } + /* Calculate the MSBs of current picture */ + if (pSlice->pic_order_cnt_lsb < p_Vid->PrevPicOrderCntLsb && + (p_Vid->PrevPicOrderCntLsb - pSlice->pic_order_cnt_lsb) >= + (MaxPicOrderCntLsb / 2)) + pSlice->PicOrderCntMsb = p_Vid->PrevPicOrderCntMsb + + MaxPicOrderCntLsb; + else if (pSlice->pic_order_cnt_lsb > + p_Vid->PrevPicOrderCntLsb && + (pSlice->pic_order_cnt_lsb - + p_Vid->PrevPicOrderCntLsb) > + (MaxPicOrderCntLsb / 2)) + pSlice->PicOrderCntMsb = p_Vid->PrevPicOrderCntMsb - + MaxPicOrderCntLsb; + else + pSlice->PicOrderCntMsb = p_Vid->PrevPicOrderCntMsb; + + /* 2nd */ + if (pSlice->field_pic_flag == 0) { + /* frame pix */ + pSlice->toppoc = pSlice->PicOrderCntMsb + + pSlice->pic_order_cnt_lsb; + pSlice->bottompoc = pSlice->toppoc + + pSlice->delta_pic_order_cnt_bottom; + pSlice->ThisPOC = pSlice->framepoc = + (pSlice->toppoc < pSlice->bottompoc) ? + pSlice->toppoc : pSlice->bottompoc; + /* POC200301 */ + } else if (pSlice->bottom_field_flag == 0) { + /* top field */ + pSlice->ThisPOC = pSlice->toppoc = + pSlice->PicOrderCntMsb + + pSlice->pic_order_cnt_lsb; + } else { + /* bottom field */ + pSlice->ThisPOC = pSlice->bottompoc = + pSlice->PicOrderCntMsb + + pSlice->pic_order_cnt_lsb; + } + pSlice->framepoc = pSlice->ThisPOC; + + p_Vid->ThisPOC = pSlice->ThisPOC; + + /* if ( pSlice->frame_num != p_Vid->PreviousFrameNum) + * Seems redundant + */ + p_Vid->PreviousFrameNum = pSlice->frame_num; + + if (pSlice->nal_reference_idc) { + p_Vid->PrevPicOrderCntLsb = pSlice->pic_order_cnt_lsb; + p_Vid->PrevPicOrderCntMsb = pSlice->PicOrderCntMsb; + } + + break; + + case 1: /* POC MODE 1 */ + /* 1st */ + if (pSlice->idr_flag) { + p_Vid->FrameNumOffset = 0; /* first pix of IDRGOP */ + if (pSlice->frame_num) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "frame_num not equal to zero in IDR picture %d", + -1020); + } else { + if (p_Vid->last_has_mmco_5) { + p_Vid->PreviousFrameNumOffset = 0; + p_Vid->PreviousFrameNum = 0; + } + if (pSlice->frame_num < p_Vid->PreviousFrameNum) { + /* not first pix of IDRGOP */ + p_Vid->FrameNumOffset = + p_Vid->PreviousFrameNumOffset + + p_Vid->max_frame_num; + } else { + p_Vid->FrameNumOffset = + p_Vid->PreviousFrameNumOffset; + } + } + + /* 2nd */ + if (active_sps->num_ref_frames_in_pic_order_cnt_cycle) + pSlice->AbsFrameNum = + p_Vid->FrameNumOffset + pSlice->frame_num; + else + pSlice->AbsFrameNum = 0; + if ((!pSlice->nal_reference_idc) && pSlice->AbsFrameNum > 0) + pSlice->AbsFrameNum--; + + /* 3rd */ + p_Vid->ExpectedDeltaPerPicOrderCntCycle = 0; + + if (active_sps->num_ref_frames_in_pic_order_cnt_cycle) + for (i = 0; i < (int) active_sps-> + num_ref_frames_in_pic_order_cnt_cycle; i++) { + p_Vid->ExpectedDeltaPerPicOrderCntCycle += + active_sps->offset_for_ref_frame[i]; + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DEBUG_POC, + "%s: offset_for_ref_frame %d\r\n", + __func__, + active_sps-> + offset_for_ref_frame[i]); + } + + if (pSlice->AbsFrameNum) { + p_Vid->PicOrderCntCycleCnt = + (pSlice->AbsFrameNum - 1) / + active_sps-> + num_ref_frames_in_pic_order_cnt_cycle; + p_Vid->FrameNumInPicOrderCntCycle = + (pSlice->AbsFrameNum - 1) % + active_sps-> + num_ref_frames_in_pic_order_cnt_cycle; + p_Vid->ExpectedPicOrderCnt = + p_Vid->PicOrderCntCycleCnt * + p_Vid->ExpectedDeltaPerPicOrderCntCycle; + for (i = 0; i <= (int)p_Vid-> + FrameNumInPicOrderCntCycle; i++) { + p_Vid->ExpectedPicOrderCnt += + active_sps->offset_for_ref_frame[i]; + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DEBUG_POC, + "%s: offset_for_ref_frame %d\r\n", + __func__, + active_sps-> + offset_for_ref_frame[i]); + } + } else + p_Vid->ExpectedPicOrderCnt = 0; + + if (!pSlice->nal_reference_idc) + p_Vid->ExpectedPicOrderCnt += + active_sps->offset_for_non_ref_pic; + + if (pSlice->field_pic_flag == 0) { + /* frame pix */ + pSlice->toppoc = p_Vid->ExpectedPicOrderCnt + + pSlice->delta_pic_order_cnt[0]; + pSlice->bottompoc = pSlice->toppoc + + active_sps->offset_for_top_to_bottom_field + + pSlice->delta_pic_order_cnt[1]; + pSlice->ThisPOC = pSlice->framepoc = + (pSlice->toppoc < pSlice->bottompoc) ? + pSlice->toppoc : pSlice->bottompoc; + /* POC200301 */ + } else if (pSlice->bottom_field_flag == 0) { + /* top field */ + pSlice->ThisPOC = pSlice->toppoc = + p_Vid->ExpectedPicOrderCnt + + pSlice->delta_pic_order_cnt[0]; + } else { + /* bottom field */ + pSlice->ThisPOC = pSlice->bottompoc = + p_Vid->ExpectedPicOrderCnt + + active_sps->offset_for_top_to_bottom_field + + pSlice->delta_pic_order_cnt[0]; + } + pSlice->framepoc = pSlice->ThisPOC; + + p_Vid->PreviousFrameNum = pSlice->frame_num; + p_Vid->PreviousFrameNumOffset = p_Vid->FrameNumOffset; + + break; + + + case 2: /* POC MODE 2 */ + if (pSlice->idr_flag) { /* IDR picture */ + p_Vid->FrameNumOffset = 0; /* first pix of IDRGOP */ + pSlice->ThisPOC = pSlice->framepoc = pSlice->toppoc = + pSlice->bottompoc = 0; + if (pSlice->frame_num) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "frame_num not equal to zero in IDR picture %d", + -1020); + } else { + if (p_Vid->last_has_mmco_5) { + p_Vid->PreviousFrameNum = 0; + p_Vid->PreviousFrameNumOffset = 0; + } + if (pSlice->frame_num < p_Vid->PreviousFrameNum) + p_Vid->FrameNumOffset = + p_Vid->PreviousFrameNumOffset + + p_Vid->max_frame_num; + else + p_Vid->FrameNumOffset = + p_Vid->PreviousFrameNumOffset; + + pSlice->AbsFrameNum = p_Vid->FrameNumOffset + + pSlice->frame_num; + if (!pSlice->nal_reference_idc) + pSlice->ThisPOC = + (2 * pSlice->AbsFrameNum - 1); + else + pSlice->ThisPOC = (2 * pSlice->AbsFrameNum); + + if (pSlice->field_pic_flag == 0) + pSlice->toppoc = pSlice->bottompoc = + pSlice->framepoc = pSlice->ThisPOC; + else if (pSlice->bottom_field_flag == 0) + pSlice->toppoc = pSlice->framepoc = + pSlice->ThisPOC; + else + pSlice->bottompoc = pSlice->framepoc = + pSlice->ThisPOC; + } + + p_Vid->PreviousFrameNum = pSlice->frame_num; + p_Vid->PreviousFrameNumOffset = p_Vid->FrameNumOffset; + break; + + + default: + /* error must occurs */ + /* assert( 1==0 ); */ + break; + } +} + +void fill_frame_num_gap(struct VideoParameters *p_Vid, struct Slice *currSlice) +{ + struct h264_dpb_stru *p_H264_Dpb = + container_of(p_Vid, struct h264_dpb_stru, mVideo); + struct SPSParameters *active_sps = p_Vid->active_sps; + int CurrFrameNum; + int UnusedShortTermFrameNum; + struct StorablePicture *picture = NULL; + int tmp1 = currSlice->delta_pic_order_cnt[0]; + int tmp2 = currSlice->delta_pic_order_cnt[1]; + int ret; + + currSlice->delta_pic_order_cnt[0] = + currSlice->delta_pic_order_cnt[1] = 0; + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "A gap in frame number is found, try to fill it.(pre_frame_num %d, max_frame_num %d\n", + p_Vid->pre_frame_num, p_Vid->max_frame_num + ); + + UnusedShortTermFrameNum = (p_Vid->pre_frame_num + 1) + % p_Vid->max_frame_num; + CurrFrameNum = currSlice->frame_num; /*p_Vid->frame_num;*/ + + while (CurrFrameNum != UnusedShortTermFrameNum) { + /*pr_err("CurrFrameNum = %d, UnusedShortTermFrameNum = %d\n", CurrFrameNum, UnusedShortTermFrameNum);*/ + /*picture = alloc_storable_picture + *(p_Vid, FRAME, p_Vid->width, + *p_Vid->height, + *p_Vid->width_cr, + *p_Vid->height_cr, 1); + */ + picture = get_new_pic(p_H264_Dpb, + p_H264_Dpb->mSlice.structure, + /*p_Vid->width, p_Vid->height, + *p_Vid->width_cr, + p_Vid->height_cr,*/ 1); + + if (picture == NULL) { + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "%s Error: get_new_pic return NULL\r\n", + __func__); + /*h264_debug_flag |= PRINT_FLAG_DUMP_DPB;*/ + dump_dpb(p_Dpb, 0); + return; + } + + picture->colocated_buf_index = -1; + picture->buf_spec_num = -1; + picture->buf_spec_is_alloced = 0; + + picture->coded_frame = 1; + picture->pic_num = UnusedShortTermFrameNum; + picture->frame_num = UnusedShortTermFrameNum; + picture->non_existing = 1; + picture->is_output = 1; + picture->used_for_reference = 1; + picture->adaptive_ref_pic_buffering_flag = 0; + #if (MVC_EXTENSION_ENABLE) + picture->view_id = currSlice->view_id; + #endif + + currSlice->frame_num = UnusedShortTermFrameNum; + if (active_sps->pic_order_cnt_type != 0) { + /*decode_poc(p_Vid, p_Vid->ppSliceList[0]);*/ + decode_poc(&p_H264_Dpb->mVideo, &p_H264_Dpb->mSlice); + } + picture->top_poc = currSlice->toppoc; + picture->bottom_poc = currSlice->bottompoc; + picture->frame_poc = currSlice->framepoc; + picture->poc = currSlice->framepoc; + + ret = store_picture_in_dpb(p_H264_Dpb, picture, 0); + if (ret == -1) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "%s Error: store_picture_in_dpb failed, break\n", + __func__); + release_picture(p_H264_Dpb, picture); + bufmgr_force_recover(p_H264_Dpb); + return; + } else if (ret == -2) + release_picture(p_H264_Dpb, picture); + + picture = NULL; + p_Vid->pre_frame_num = UnusedShortTermFrameNum; + UnusedShortTermFrameNum = + (UnusedShortTermFrameNum + 1) % + p_Vid->max_frame_num; + } + currSlice->delta_pic_order_cnt[0] = tmp1; + currSlice->delta_pic_order_cnt[1] = tmp2; + currSlice->frame_num = CurrFrameNum; +} + +void dpb_init_global(struct h264_dpb_stru *p_H264_Dpb, + int id, int actual_dpb_size, int max_reference_size) +{ + int i; + + init_dummy_fs(); + + memset(&p_H264_Dpb->mDPB, 0, sizeof(struct DecodedPictureBuffer)); + + memset(&p_H264_Dpb->mSlice, 0, sizeof(struct Slice)); + memset(&p_H264_Dpb->mVideo, 0, sizeof(struct VideoParameters)); + memset(&p_H264_Dpb->mSPS, 0, sizeof(struct SPSParameters)); + + for (i = 0; i < DPB_SIZE_MAX; i++) { + memset(&(p_H264_Dpb->mFrameStore[i]), 0, + sizeof(struct FrameStore)); + } + + for (i = 0; i < MAX_PIC_BUF_NUM; i++) { + memset(&(p_H264_Dpb->m_PIC[i]), 0, + sizeof(struct StorablePicture)); + p_H264_Dpb->m_PIC[i].index = i; + } + p_H264_Dpb->decoder_index = id; + + /* make sure dpb_init_global + *can be called during decoding + *(in DECODE_STATE_IDLE or DECODE_STATE_READY state) + */ + p_H264_Dpb->mDPB.size = actual_dpb_size; + p_H264_Dpb->max_reference_size = max_reference_size; + p_H264_Dpb->poc_even_odd_flag = 0; +} + +static void init_picture(struct h264_dpb_stru *p_H264_Dpb, + struct Slice *currSlice, + struct StorablePicture *dec_picture) +{ + /* struct VideoParameters *p_Vid = &(p_H264_Dpb->mVideo); */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s dec_picture %p\n", __func__, dec_picture); + dec_picture->top_poc = currSlice->toppoc; + dec_picture->bottom_poc = currSlice->bottompoc; + dec_picture->frame_poc = currSlice->framepoc; + switch (currSlice->structure) { + case TOP_FIELD: { + dec_picture->poc = currSlice->toppoc; + /* p_Vid->number *= 2; */ + break; + } + case BOTTOM_FIELD: { + dec_picture->poc = currSlice->bottompoc; + /* p_Vid->number = p_Vid->number * 2 + 1; */ + break; + } + case FRAME: { + dec_picture->poc = currSlice->framepoc; + break; + } + default: + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "p_Vid->structure not initialized %d\n", 235); + } + + /* dec_picture->slice_type = p_Vid->type; */ + dec_picture->used_for_reference = (currSlice->nal_reference_idc != 0); + dec_picture->idr_flag = currSlice->idr_flag; + dec_picture->no_output_of_prior_pics_flag = + currSlice->no_output_of_prior_pics_flag; + dec_picture->long_term_reference_flag = + currSlice->long_term_reference_flag; +#if 1 + dec_picture->adaptive_ref_pic_buffering_flag = + currSlice->adaptive_ref_pic_buffering_flag; + dec_picture->dec_ref_pic_marking_buffer = + &currSlice->dec_ref_pic_marking_buffer[0]; +#endif + /* currSlice->dec_ref_pic_marking_buffer = NULL; */ + + /* dec_picture->mb_aff_frame_flag = currSlice->mb_aff_frame_flag; */ + /* dec_picture->PicWidthInMbs = p_Vid->PicWidthInMbs; */ + + /* p_Vid->get_mb_block_pos = + * dec_picture->mb_aff_frame_flag ? get_mb_block_pos_mbaff : + * get_mb_block_pos_normal; + */ + /* p_Vid->getNeighbour = + * dec_picture->mb_aff_frame_flag ? getAffNeighbour : + * getNonAffNeighbour; + */ + + dec_picture->pic_num = currSlice->frame_num; + dec_picture->frame_num = currSlice->frame_num; + + /* dec_picture->recovery_frame = + * (unsigned int) ((int) currSlice->frame_num == + * p_Vid->recovery_frame_num); + */ + + dec_picture->coded_frame = (currSlice->structure == FRAME); + + /* dec_picture->chroma_format_idc = active_sps->chroma_format_idc; */ + + /* dec_picture->frame_mbs_only_flag = + * active_sps->frame_mbs_only_flag; + */ + /* dec_picture->frame_cropping_flag = + * active_sps->frame_cropping_flag; + */ + + if ((currSlice->picture_structure_mmco & 0x3) == 3) { + dec_picture->mb_aff_frame_flag = 1; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s, picture_structure_mmco is %x, set mb_aff_frame_flag to 1\n", + __func__, + currSlice->picture_structure_mmco); + } + + if (currSlice->pic_struct < PIC_INVALID) { + dec_picture->pic_struct = currSlice->pic_struct; + } else { + dec_picture->pic_struct = PIC_INVALID; + } + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s pic_struct = %d\n", __func__, dec_picture->pic_struct); +} + +void dump_pic(struct h264_dpb_stru *p_H264_Dpb) +{ + int ii; + struct StorablePicture *pic; + for (ii = 0; ii < MAX_PIC_BUF_NUM; ii++) { + pic = &(p_H264_Dpb->m_PIC[ii]); + if (pic->is_used) { + dpb_print(p_H264_Dpb->decoder_index, 0, + "pic(%d,%d) poc %d is_used %d bufspec %d colbuf %d for_ref %d long_term %d pre_out %d output %d nonexist %d data_flag 0x%x\n", + ii, pic->index, + pic->poc, + pic->is_used, + pic->buf_spec_num, + pic->colocated_buf_index, + pic->used_for_reference, + pic->is_long_term, + pic->pre_output, + pic->is_output, + pic->non_existing, + pic->data_flag); + } + } +} + +/* +static void is_pic_used_by_dpb(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *pic) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + unsigned i; + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->top_field == pic || + p_Dpb->fs[i]->bottom_field == pic || + p_Dpb->fs[i]->frame == pic + ) + break; + } + if (i < p_Dpb->used_size) + return 1; + return 0; +} +*/ + +static struct StorablePicture *get_new_pic(struct h264_dpb_stru *p_H264_Dpb, + enum PictureStructure structure, unsigned char is_output) +{ + struct StorablePicture *s = NULL; + struct StorablePicture *pic; + struct VideoParameters *p_Vid = &(p_H264_Dpb->mVideo); + /* recycle un-used pic */ + int ii = 0; + + for (ii = 0; ii < MAX_PIC_BUF_NUM; ii++) { + pic = &(p_H264_Dpb->m_PIC[ii]); + if (pic->is_used == 0) { + pic->is_used = 1; + s = pic; + break; + } + } + + if (s) { + s->buf_spec_is_alloced = 0; + s->pic_num = 0; + s->frame_num = 0; + s->long_term_frame_idx = 0; + s->long_term_pic_num = 0; + s->used_for_reference = 0; + s->is_long_term = 0; + s->non_existing = 0; + s->is_output = 0; + s->pre_output = 0; + s->max_slice_id = 0; + s->data_flag &= ~(ERROR_FLAG | NODISP_FLAG); +#if (MVC_EXTENSION_ENABLE) + s->view_id = -1; +#endif + + s->structure = structure; + +#if 0 + s->size_x = size_x; + s->size_y = size_y; + s->size_x_cr = size_x_cr; + s->size_y_cr = size_y_cr; + s->size_x_m1 = size_x - 1; + s->size_y_m1 = size_y - 1; + s->size_x_cr_m1 = size_x_cr - 1; + s->size_y_cr_m1 = size_y_cr - 1; + + s->top_field = p_Vid->no_reference_picture; + s->bottom_field = p_Vid->no_reference_picture; + s->frame = p_Vid->no_reference_picture; +#endif + /* s->dec_ref_pic_marking_buffer = NULL; */ + + s->coded_frame = 0; + s->mb_aff_frame_flag = 0; + + s->top_poc = s->bottom_poc = s->poc = 0; + s->seiHasTone_mapping = 0; + s->frame_mbs_only_flag = p_Vid->active_sps->frame_mbs_only_flag; + + if (!p_Vid->active_sps->frame_mbs_only_flag && + structure != FRAME) { + int i, j; + + for (j = 0; j < MAX_NUM_SLICES; j++) { + for (i = 0; i < 2; i++) { + /* s->listX[j][i] = + *calloc(MAX_LIST_SIZE, + *sizeof (struct StorablePicture *)); + *+1 for reordering ??? + + *if (NULL == s->listX[j][i]) + *no_mem_exit("alloc_storable_picture: + *s->listX[i]"); + */ + } + } + } + } else + p_H264_Dpb->buf_alloc_fail = 1; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s %p\n", __func__, s); + return s; +} + +static void free_picture(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *pic) +{ + if (pic == NULL || pic->index < 0 || + pic->index >= MAX_PIC_BUF_NUM) + return; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s %p %d\n", __func__, pic, pic->index); + /* assert(pic->index<MAX_PIC_BUF_NUM); */ + p_H264_Dpb->m_PIC[pic->index].is_used = 0; +} + +static void gen_field_ref_ids(struct VideoParameters *p_Vid, + struct StorablePicture *p) +{ + int i, j; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Vid, + struct h264_dpb_stru, mVideo); + /* ! Generate Frame parameters from field information. */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + + /* copy the list; */ + for (j = 0; j < p_Vid->iSliceNumOfCurrPic; j++) { + if (p->listX[j][LIST_0]) { + p->listXsize[j][LIST_0] = + p_Vid->ppSliceList[j]->listXsize[LIST_0]; + for (i = 0; i < p->listXsize[j][LIST_0]; i++) + p->listX[j][LIST_0][i] = + p_Vid->ppSliceList[j]->listX[LIST_0][i]; + } + if (p->listX[j][LIST_1]) { + p->listXsize[j][LIST_1] = + p_Vid->ppSliceList[j]->listXsize[LIST_1]; + for (i = 0; i < p->listXsize[j][LIST_1]; i++) + p->listX[j][LIST_1][i] = + p_Vid->ppSliceList[j]->listX[LIST_1][i]; + } + } +} + +static void init_dpb(struct h264_dpb_stru *p_H264_Dpb, int type) +{ + unsigned int i; + struct VideoParameters *p_Vid = &p_H264_Dpb->mVideo; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + struct SPSParameters *active_sps = &p_H264_Dpb->mSPS; + + p_Vid->active_sps = active_sps; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + + p_Dpb->p_Vid = p_Vid; + if (p_Dpb->init_done) { + /* free_dpb(p_Dpb); */ + if (p_Vid->no_reference_picture) { + free_picture(p_H264_Dpb, p_Vid->no_reference_picture); + p_Vid->no_reference_picture = NULL; + } + p_Dpb->init_done = 0; + } + + /* p_Dpb->size = 10; //active_sps->max_dpb_size; //16; + * getDpbSize(p_Vid, active_sps) + + * p_Vid->p_Inp->dpb_plus[type==2? 1: 0]; + * p_Dpb->size = active_sps->max_dpb_size; //16; + * getDpbSize(p_Vid, active_sps) + + * p_Vid->p_Inp->dpb_plus[type==2? 1: 0]; + * p_Dpb->size initialzie in vh264.c + */ + p_Dpb->num_ref_frames = active_sps->num_ref_frames; + /* p_Dpb->num_ref_frames initialzie in vh264.c */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s dpb_size is %d num_ref_frames = %d (%d)\n", + __func__, p_Dpb->size, + p_Dpb->num_ref_frames, + active_sps->num_ref_frames); + if (active_sps->num_ref_frames == 0xffff) { + dpb_print(p_H264_Dpb->decoder_index, 0, + "!!!Warning, num_ref_frames = %d is invalid\n", + active_sps->num_ref_frames); + } + +#if 0 + /* ??? */ +#if (MVC_EXTENSION_ENABLE) + if ((unsigned int)active_sps->max_dec_frame_buffering < + active_sps->num_ref_frames) { +#else + if (p_Dpb->size < active_sps->num_ref_frames) { +#endif + error( + "DPB size at specified level is smaller than the specified number of reference frames. This is not allowed.\n", + 1000); + } +#endif + + p_Dpb->used_size = 0; + p_Dpb->last_picture = NULL; + + p_Dpb->ref_frames_in_buffer = 0; + p_Dpb->ltref_frames_in_buffer = 0; + +#if 0 + p_Dpb->fs = calloc(p_Dpb->size, sizeof(struct FrameStore *)); + if (NULL == p_Dpb->fs) + no_mem_exit("init_dpb: p_Dpb->fs"); + + p_Dpb->fs_ref = calloc(p_Dpb->size, sizeof(struct FrameStore *)); + if (NULL == p_Dpb->fs_ref) + no_mem_exit("init_dpb: p_Dpb->fs_ref"); + + p_Dpb->fs_ltref = calloc(p_Dpb->size, sizeof(struct FrameStore *)); + if (NULL == p_Dpb->fs_ltref) + no_mem_exit("init_dpb: p_Dpb->fs_ltref"); +#endif + +#if (MVC_EXTENSION_ENABLE) + p_Dpb->fs_ilref = calloc(1, sizeof(struct FrameStore *)); + if (NULL == p_Dpb->fs_ilref) + no_mem_exit("init_dpb: p_Dpb->fs_ilref"); +#endif + + for (i = 0; i < p_Dpb->size; i++) { + p_Dpb->fs[i] = &(p_H264_Dpb->mFrameStore[i]); + /* alloc_frame_store(); */ + p_Dpb->fs[i]->index = i; + p_Dpb->fs_ref[i] = NULL; + p_Dpb->fs_ltref[i] = NULL; + p_Dpb->fs[i]->layer_id = 0; /* MVC_INIT_VIEW_ID; */ +#if (MVC_EXTENSION_ENABLE) + p_Dpb->fs[i]->view_id = MVC_INIT_VIEW_ID; + p_Dpb->fs[i]->inter_view_flag[0] = + p_Dpb->fs[i]->inter_view_flag[1] = 0; + p_Dpb->fs[i]->anchor_pic_flag[0] = + p_Dpb->fs[i]->anchor_pic_flag[1] = 0; +#endif + } +#if (MVC_EXTENSION_ENABLE) + if (type == 2) { + p_Dpb->fs_ilref[0] = alloc_frame_store(); + /* These may need some cleanups */ + p_Dpb->fs_ilref[0]->view_id = MVC_INIT_VIEW_ID; + p_Dpb->fs_ilref[0]->inter_view_flag[0] = + p_Dpb->fs_ilref[0]->inter_view_flag[1] = 0; + p_Dpb->fs_ilref[0]->anchor_pic_flag[0] = + p_Dpb->fs_ilref[0]->anchor_pic_flag[1] = 0; + /* given that this is in a different buffer, + * do we even need proc_flag anymore? + */ + } else + p_Dpb->fs_ilref[0] = NULL; +#endif + + /* + *for (i = 0; i < 6; i++) + *{ + *currSlice->listX[i] = + * calloc(MAX_LIST_SIZE, sizeof (struct StorablePicture *)); + * +1 for reordering + *if (NULL == currSlice->listX[i]) + *no_mem_exit("init_dpb: currSlice->listX[i]"); + *} + */ + /* allocate a dummy storable picture */ + if (!p_Vid->no_reference_picture) { + p_Vid->no_reference_picture = get_new_pic(p_H264_Dpb, + FRAME, + /*p_Vid->width, p_Vid->height, + *p_Vid->width_cr, p_Vid->height_cr, + */ + 1); + p_Vid->no_reference_picture->top_field = + p_Vid->no_reference_picture; + p_Vid->no_reference_picture->bottom_field = + p_Vid->no_reference_picture; + p_Vid->no_reference_picture->frame = + p_Vid->no_reference_picture; + } + p_Dpb->last_output_poc = INT_MIN; + +#if (MVC_EXTENSION_ENABLE) + p_Dpb->last_output_view_id = -1; +#endif + + p_Vid->last_has_mmco_5 = 0; + + init_colocate_buf(p_H264_Dpb, p_H264_Dpb->max_reference_size); + + p_Dpb->init_done = 1; + +#if 0 +/* ??? */ + /* picture error concealment */ + if (p_Vid->conceal_mode != 0 && !p_Vid->last_out_fs) + p_Vid->last_out_fs = alloc_frame_store(); +#endif +} + +static void dpb_split_field(struct h264_dpb_stru *p_H264_Dpb, + struct FrameStore *fs) +{ + struct StorablePicture *fs_top = NULL, *fs_btm = NULL; + struct StorablePicture *frame = fs->frame; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s %p %p\n", __func__, fs, frame); + + fs->poc = frame->poc; + + if (!frame->frame_mbs_only_flag) { + fs_top = fs->top_field = get_new_pic(p_H264_Dpb, + TOP_FIELD, + /* frame->size_x, frame->size_y, + *frame->size_x_cr, frame->size_y_cr, + */ + 1); + fs_btm = fs->bottom_field = get_new_pic(p_H264_Dpb, + BOTTOM_FIELD, + /*frame->size_x, frame->size_y, + *frame->size_x_cr, frame->size_y_cr, + */ + 1); + if (fs_top == NULL || fs_btm == NULL) + return; +#if 1 +/* rain */ + fs_top->buf_spec_num = frame->buf_spec_num; + fs_btm->buf_spec_num = frame->buf_spec_num; + + fs_top->colocated_buf_index = frame->colocated_buf_index; + fs_btm->colocated_buf_index = frame->colocated_buf_index; + + fs_top->data_flag = frame->data_flag; + fs_btm->data_flag = frame->data_flag; +#endif + fs_top->poc = frame->top_poc; + fs_btm->poc = frame->bottom_poc; + +#if (MVC_EXTENSION_ENABLE) + fs_top->view_id = frame->view_id; + fs_btm->view_id = frame->view_id; +#endif + + fs_top->frame_poc = frame->frame_poc; + + fs_top->bottom_poc = fs_btm->bottom_poc = frame->bottom_poc; + fs_top->top_poc = fs_btm->top_poc = frame->top_poc; + fs_btm->frame_poc = frame->frame_poc; + + fs_top->used_for_reference = fs_btm->used_for_reference + = frame->used_for_reference; + fs_top->is_long_term = fs_btm->is_long_term + = frame->is_long_term; + fs->long_term_frame_idx = fs_top->long_term_frame_idx + = fs_btm->long_term_frame_idx + = frame->long_term_frame_idx; + + fs_top->coded_frame = fs_btm->coded_frame = 1; + fs_top->mb_aff_frame_flag = fs_btm->mb_aff_frame_flag + = frame->mb_aff_frame_flag; + + frame->top_field = fs_top; + frame->bottom_field = fs_btm; + frame->frame = frame; + fs_top->bottom_field = fs_btm; + fs_top->frame = frame; + fs_top->top_field = fs_top; + fs_btm->top_field = fs_top; + fs_btm->frame = frame; + fs_btm->bottom_field = fs_btm; + +#if (MVC_EXTENSION_ENABLE) + fs_top->view_id = fs_btm->view_id = fs->view_id; + fs_top->inter_view_flag = fs->inter_view_flag[0]; + fs_btm->inter_view_flag = fs->inter_view_flag[1]; +#endif + + fs_top->chroma_format_idc = fs_btm->chroma_format_idc = + frame->chroma_format_idc; + fs_top->iCodingType = fs_btm->iCodingType = frame->iCodingType; + } else { + fs->top_field = NULL; + fs->bottom_field = NULL; + frame->top_field = NULL; + frame->bottom_field = NULL; + frame->frame = frame; + } + +} + + +static void dpb_combine_field(struct h264_dpb_stru *p_H264_Dpb, + struct FrameStore *fs) +{ + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + + if (!fs->frame) { + fs->frame = get_new_pic(p_H264_Dpb, + FRAME, + /* fs->top_field->size_x, fs->top_field->size_y*2, + *fs->top_field->size_x_cr, fs->top_field->size_y_cr*2, + */ + 1); + } + if (!fs->frame) + return; +#if 1 +/* rain */ + fs->frame->buf_spec_num = fs->top_field->buf_spec_num; + fs->frame->colocated_buf_index = fs->top_field->colocated_buf_index; + fs->frame->data_flag = fs->top_field->data_flag; + fs->frame->slice_type = fs->top_field->slice_type; + if (fs->bottom_field) + fs->frame->data_flag |= (fs->bottom_field->data_flag & 0xf0); +#endif + + if (fs->bottom_field) { + fs->poc = fs->frame->poc = fs->frame->frame_poc = imin( + fs->top_field->poc, fs->bottom_field->poc); + + fs->bottom_field->frame_poc = fs->top_field->frame_poc = fs->frame->poc; + + fs->bottom_field->top_poc = fs->frame->top_poc = fs->top_field->poc; + fs->top_field->bottom_poc = fs->frame->bottom_poc = + fs->bottom_field->poc; + + fs->frame->used_for_reference = (fs->top_field->used_for_reference && + fs->bottom_field->used_for_reference); + fs->frame->is_long_term = (fs->top_field->is_long_term && + fs->bottom_field->is_long_term); + } + + if (fs->frame->is_long_term) + fs->frame->long_term_frame_idx = fs->long_term_frame_idx; + + fs->frame->top_field = fs->top_field; + if (fs->bottom_field) + fs->frame->bottom_field = fs->bottom_field; + fs->frame->frame = fs->frame; + + fs->frame->coded_frame = 0; + + fs->frame->chroma_format_idc = fs->top_field->chroma_format_idc; + fs->frame->frame_cropping_flag = fs->top_field->frame_cropping_flag; + if (fs->frame->frame_cropping_flag) { + fs->frame->frame_crop_top_offset = + fs->top_field->frame_crop_top_offset; + fs->frame->frame_crop_bottom_offset = + fs->top_field->frame_crop_bottom_offset; + fs->frame->frame_crop_left_offset = + fs->top_field->frame_crop_left_offset; + fs->frame->frame_crop_right_offset = + fs->top_field->frame_crop_right_offset; + } + if (fs->bottom_field) { + fs->top_field->frame = fs->bottom_field->frame = fs->frame; + fs->top_field->top_field = fs->top_field; + fs->top_field->bottom_field = fs->bottom_field; + fs->bottom_field->top_field = fs->top_field; + fs->bottom_field->bottom_field = fs->bottom_field; + } + + /**/ +#if (MVC_EXTENSION_ENABLE) + fs->frame->view_id = fs->view_id; +#endif + fs->frame->iCodingType = fs->top_field->iCodingType; + if (fs->bottom_field && fs->top_field->poc < fs->bottom_field->poc) { + fs->pts = fs->top_field->pts; + fs->pts64 = fs->top_field->pts64; + /*SWPL-7105 fix */ + if ((fs->frame->slice_type == B_SLICE) + && (!fs->bottom_field->pts) &&(!fs->bottom_field->pts64)) { + fs->pts = 0; + fs->pts64 = 0; + } + fs->offset_delimiter = fs->top_field->offset_delimiter; + fs->decoded_frame_size = fs->top_field->pic_size + fs->bottom_field->pic_size; + } else if (fs->bottom_field) { + fs->pts = fs->bottom_field->pts; + fs->pts64 = fs->bottom_field->pts64; + fs->offset_delimiter = fs->bottom_field->offset_delimiter; + fs->decoded_frame_size = fs->top_field->pic_size + fs->bottom_field->pic_size; + } + /* FIELD_CODING ;*/ +} + +static void calculate_frame_no(struct VideoParameters *p_Vid, + struct StorablePicture *p) +{ +#if 0 +/* ??? */ + InputParameters *p_Inp = p_Vid->p_Inp; + /* calculate frame number */ + int psnrPOC = p_Vid->active_sps->mb_adaptive_frame_field_flag ? + p->poc / (p_Inp->poc_scale) : p->poc / (p_Inp->poc_scale); + + if (psnrPOC == 0) { /* && p_Vid->psnr_number) */ + p_Vid->idr_psnr_number = + p_Vid->g_nFrame * p_Vid->ref_poc_gap / (p_Inp->poc_scale); + } + p_Vid->psnr_number = imax(p_Vid->psnr_number, + p_Vid->idr_psnr_number + psnrPOC); + + p_Vid->frame_no = p_Vid->idr_psnr_number + psnrPOC; +#endif +} + +static void insert_picture_in_dpb(struct h264_dpb_stru *p_H264_Dpb, + struct FrameStore *fs, + struct StorablePicture *p, + unsigned char data_flag) +{ + struct vdec_frames_s *mvfrm = p_H264_Dpb->vdec->mvfrm; + struct VideoParameters *p_Vid = &p_H264_Dpb->mVideo; + /* InputParameters *p_Inp = p_Vid->p_Inp; + * dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + * "insert (%s) pic with frame_num #%d, poc %d\n", + * (p->structure == FRAME)?"FRAME": + * (p->structure == TOP_FIELD)?"TOP_FIELD": + * "BOTTOM_FIELD", p->pic_num, p->poc); + * assert (p!=NULL); + * assert (fs!=NULL); + */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s %p %p\n", __func__, fs, p); + p_H264_Dpb->dpb_frame_count++; + fs->dpb_frame_count = p_H264_Dpb->dpb_frame_count; +#if 1 +/* rain */ +/* p->buf_spec_num = fs->index; */ + p->data_flag = data_flag; + fs->data_flag |= data_flag; + fs->buf_spec_num = p->buf_spec_num; + fs->colocated_buf_index = p->colocated_buf_index; +#endif + p->slice_type = p_H264_Dpb->mSlice.slice_type; + switch (p->structure) { + case FRAME: + fs->frame = p; + fs->is_used = 3; + fs->slice_type = p->slice_type; + fs->frame_size = p->frame_size; + fs->offset_delimiter = p->offset_delimiter; + fs->decoded_frame_size = p->pic_size; + if (p->used_for_reference) { + fs->is_reference = 3; + fs->is_orig_reference = 3; + if (p->is_long_term) { + fs->is_long_term = 3; + fs->long_term_frame_idx = + p->long_term_frame_idx; + } + } + fs->pts = p->pts; + fs->pts64 = p->pts64; + fs->layer_id = p->layer_id; +#if (MVC_EXTENSION_ENABLE) + fs->view_id = p->view_id; + fs->inter_view_flag[0] = fs->inter_view_flag[1] = + p->inter_view_flag; + fs->anchor_pic_flag[0] = fs->anchor_pic_flag[1] = + p->anchor_pic_flag; +#endif + /* generate field views */ + /* return; */ + dpb_split_field(p_H264_Dpb, fs); + /* return; */ + break; + case TOP_FIELD: + fs->top_field = p; + fs->is_used |= 1; + fs->layer_id = p->layer_id; + if (fs->frame_size == 0) { + fs->slice_type = p->slice_type; +// fs->pts = p->pts; +// fs->pts64 = p->pts64; + } + fs->frame_size += p->frame_size; +#if (MVC_EXTENSION_ENABLE) + fs->view_id = p->view_id; + fs->inter_view_flag[0] = p->inter_view_flag; + fs->anchor_pic_flag[0] = p->anchor_pic_flag; +#endif + if (p->used_for_reference) { + fs->is_reference |= 1; + fs->is_orig_reference |= 1; + if (p->is_long_term) { + fs->is_long_term |= 1; + fs->long_term_frame_idx = + p->long_term_frame_idx; + } + } + if (fs->is_used == 3) { + /* generate frame view */ + dpb_combine_field(p_H264_Dpb, fs); + } else { + fs->poc = p->poc; + } + gen_field_ref_ids(p_Vid, p); + break; + case BOTTOM_FIELD: + fs->bottom_field = p; + fs->is_used |= 2; + fs->layer_id = p->layer_id; + if (fs->frame_size == 0) { + fs->slice_type = p->slice_type; +// fs->pts = p->pts; +// fs->pts64 = p->pts64; + } + fs->frame_size += p->frame_size; +#if (MVC_EXTENSION_ENABLE) + fs->view_id = p->view_id; + fs->inter_view_flag[1] = p->inter_view_flag; + fs->anchor_pic_flag[1] = p->anchor_pic_flag; +#endif + if (p->used_for_reference) { + fs->is_reference |= 2; + fs->is_orig_reference |= 2; + if (p->is_long_term) { + fs->is_long_term |= 2; + fs->long_term_frame_idx = + p->long_term_frame_idx; + } + } + if (fs->is_used == 3) { + /* generate frame view */ + dpb_combine_field(p_H264_Dpb, fs); + } else { + fs->poc = p->poc; + } + gen_field_ref_ids(p_Vid, p); + break; + } + fs->frame_num = p->pic_num; + fs->recovery_frame = p->recovery_frame; + + fs->is_output = p->is_output; + fs->pre_output = p->pre_output; + + /* picture qos infomation*/ + fs->max_mv = p->max_mv; + fs->avg_mv = p->avg_mv; + fs->min_mv = p->min_mv; + + fs->max_qp = p->max_qp; + fs->avg_qp = p->avg_qp; + fs->min_qp = p->min_qp; + + fs->max_skip = p->max_skip; + fs->avg_skip = p->avg_skip; + fs->min_skip = p->min_skip; + + if (fs->is_used == 3) { + calculate_frame_no(p_Vid, p); +#if 0 +/* ??? */ + if (-1 != p_Vid->p_ref && !p_Inp->silent) + find_snr(p_Vid, fs->frame, &p_Vid->p_ref); +#endif + //fs->pts = p->pts; + //fs->pts64 = p->pts64; + } + fs->timestamp = p->timestamp; + if (mvfrm) { + fs->frame_size2 = mvfrm->frame_size; + fs->hw_decode_time = mvfrm->hw_decode_time; + } +} + +void reset_frame_store(struct h264_dpb_stru *p_H264_Dpb, + struct FrameStore *f) +{ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + + if (f) { + if (f->frame) { + free_picture(p_H264_Dpb, f->frame); + f->frame = NULL; + } + if (f->top_field) { + free_picture(p_H264_Dpb, f->top_field); + f->top_field = NULL; + } + if (f->bottom_field) { + free_picture(p_H264_Dpb, f->bottom_field); + f->bottom_field = NULL; + } + + /**/ + f->is_used = 0; + f->is_reference = 0; + f->is_long_term = 0; + f->is_orig_reference = 0; + + f->is_output = 0; + f->pre_output = 0; + f->show_frame = false; + + f->frame = NULL; + f->top_field = NULL; + f->bottom_field = NULL; + + /* free(f); */ + } +} + +void unmark_for_reference(struct DecodedPictureBuffer *p_Dpb, + struct FrameStore *fs) +{ + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s %p %p %p %p\n", __func__, + fs, fs->frame, fs->top_field, fs->bottom_field); + /* return; */ + if (fs->is_used & 1) { + if (fs->top_field) + fs->top_field->used_for_reference = 0; + } + if (fs->is_used & 2) { + if (fs->bottom_field) + fs->bottom_field->used_for_reference = 0; + } + if (fs->is_used == 3) { + if (fs->top_field && fs->bottom_field) { + fs->top_field->used_for_reference = 0; + fs->bottom_field->used_for_reference = 0; + } + fs->frame->used_for_reference = 0; + } + + fs->is_reference = 0; + +} + +static void unmark_for_long_term_reference(struct FrameStore *fs) +{ + if (fs->is_used & 1) { + if (fs->top_field) { + fs->top_field->used_for_reference = 0; + fs->top_field->is_long_term = 0; + } + } + if (fs->is_used & 2) { + if (fs->bottom_field) { + fs->bottom_field->used_for_reference = 0; + fs->bottom_field->is_long_term = 0; + } + } + if (fs->is_used == 3) { + if (fs->top_field && fs->bottom_field) { + fs->top_field->used_for_reference = 0; + fs->top_field->is_long_term = 0; + fs->bottom_field->used_for_reference = 0; + fs->bottom_field->is_long_term = 0; + } + fs->frame->used_for_reference = 0; + fs->frame->is_long_term = 0; + } + + fs->is_reference = 0; + fs->is_long_term = 0; +} + +int get_long_term_flag_by_buf_spec_num(struct h264_dpb_stru *p_H264_Dpb, + int buf_spec_num) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + unsigned int i; + + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->buf_spec_num == buf_spec_num) + return p_Dpb->fs[i]->is_long_term; + } + return -1; +} + +static void update_pic_num(struct h264_dpb_stru *p_H264_Dpb) +{ + unsigned int i; + struct Slice *currSlice = &p_H264_Dpb->mSlice; + struct VideoParameters *p_Vid = currSlice->p_Vid; + struct DecodedPictureBuffer *p_Dpb = currSlice->p_Dpb; + struct SPSParameters *active_sps = p_Vid->active_sps; + int add_top = 0, add_bottom = 0; + int max_frame_num = 1 << (active_sps->log2_max_frame_num_minus4 + 4); + + if (currSlice->structure == FRAME) { + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL || + p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_used == 3) { + if ((p_Dpb->fs_ref[i]->frame-> + used_for_reference) && + (!p_Dpb->fs_ref[i]->frame-> + is_long_term)) { + if (p_Dpb->fs_ref[i]->frame_num > + currSlice->frame_num) { + p_Dpb->fs_ref[i]-> + frame_num_wrap = + p_Dpb->fs_ref[i]->frame_num + - max_frame_num; + } else { + p_Dpb->fs_ref[i]-> + frame_num_wrap = + p_Dpb->fs_ref[i]->frame_num; + } + p_Dpb->fs_ref[i]->frame->pic_num = + p_Dpb->fs_ref[i]->frame_num_wrap; + } + } + } + /* update long_term_pic_num */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ltref[i] == NULL || + p_Dpb->fs_ltref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ltref[i]->is_used == 3) { + if (p_Dpb->fs_ltref[i]->frame->is_long_term) { + p_Dpb->fs_ltref[i]->frame-> + long_term_pic_num = + p_Dpb->fs_ltref[i]->frame-> + long_term_frame_idx; + } + } + } + } else { + if (currSlice->structure == TOP_FIELD) { + add_top = 1; + add_bottom = 0; + } else { + add_top = 0; + add_bottom = 1; + } + + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference) { + if (p_Dpb->fs_ref[i]->frame_num > currSlice-> + frame_num) { + p_Dpb->fs_ref[i]->frame_num_wrap = + p_Dpb->fs_ref[i]->frame_num - + max_frame_num; + } else { + p_Dpb->fs_ref[i]->frame_num_wrap = + p_Dpb->fs_ref[i]->frame_num; + } + if (p_Dpb->fs_ref[i]->is_reference & 1) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->top_field + == NULL) { + p_H264_Dpb->dpb_error_flag = + __LINE__; + continue; + } +#endif + p_Dpb->fs_ref[i]->top_field-> + pic_num = (2 * p_Dpb->fs_ref[i]-> + frame_num_wrap) + add_top; + } + if (p_Dpb->fs_ref[i]->is_reference & 2) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->bottom_field + == NULL) { + p_H264_Dpb->dpb_error_flag = + __LINE__; + continue; + } +#endif + p_Dpb->fs_ref[i]->bottom_field-> + pic_num = (2 * p_Dpb->fs_ref[i]-> + frame_num_wrap) + add_bottom; + } + } + } + /* update long_term_pic_num */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ltref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ltref[i]->is_long_term & 1) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ltref[i]->top_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + p_Dpb->fs_ltref[i]->top_field-> + long_term_pic_num = 2 * + p_Dpb->fs_ltref[i]->top_field-> + long_term_frame_idx + add_top; + } + if (p_Dpb->fs_ltref[i]->is_long_term & 2) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ltref[i]->bottom_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + p_Dpb->fs_ltref[i]->bottom_field-> + long_term_pic_num = 2 * + p_Dpb->fs_ltref[i]->bottom_field-> + long_term_frame_idx + add_bottom; + } + } + } +} + +static void remove_frame_from_dpb(struct h264_dpb_stru *p_H264_Dpb, int pos) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + struct FrameStore *fs = p_Dpb->fs[pos]; + struct FrameStore *tmp; + unsigned int i; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s pos %d %p\n", __func__, pos, fs); + + /* dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + * "remove frame with frame_num #%d\n", fs->frame_num); + */ + switch (fs->is_used) { + case 3: + free_picture(p_H264_Dpb, fs->frame); + free_picture(p_H264_Dpb, fs->top_field); + free_picture(p_H264_Dpb, fs->bottom_field); + fs->frame = NULL; + fs->top_field = NULL; + fs->bottom_field = NULL; + break; + case 2: + free_picture(p_H264_Dpb, fs->bottom_field); + fs->bottom_field = NULL; + break; + case 1: + free_picture(p_H264_Dpb, fs->top_field); + fs->top_field = NULL; + break; + case 0: + break; + default: + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "invalid frame store type %x", 500); + } + fs->data_flag = 0; + fs->is_used = 0; + fs->is_long_term = 0; + fs->is_reference = 0; + fs->is_orig_reference = 0; + fs->frame_size = 0; + /* move empty framestore to end of buffer */ + tmp = p_Dpb->fs[pos]; + + for (i = pos; i < p_Dpb->used_size - 1; i++) + p_Dpb->fs[i] = p_Dpb->fs[i + 1]; + p_Dpb->fs[p_Dpb->used_size - 1] = tmp; + + if (p_Dpb->used_size) + p_Dpb->used_size--; +} + +int is_used_for_reference(struct FrameStore *fs) +{ + if (fs->is_reference) + return 1; + + if (fs->is_used == 3) { /* frame */ + if (fs->frame->used_for_reference) + return 1; + } + + if (fs->is_used & 1) { /* top field */ + if (fs->top_field) { + if (fs->top_field->used_for_reference) + return 1; + } + } + + if (fs->is_used & 2) { /* bottom field */ + if (fs->bottom_field) { + if (fs->bottom_field->used_for_reference) + return 1; + } + } + return 0; +} + +static int remove_unused_frame_from_dpb(struct h264_dpb_stru *p_H264_Dpb) +{ + unsigned int i; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + /* check for frames that were already output and no longer + * used for reference + */ + for (i = 0; i < p_Dpb->used_size; i++) { + if ((!is_used_for_reference(p_Dpb->fs[i])) && + (p_Dpb->fs[i]->colocated_buf_index >= 0)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "release_colocate_buf[%d] for fs[%d]\n", + p_Dpb->fs[i]->colocated_buf_index, i); + + release_colocate_buf(p_H264_Dpb, + p_Dpb->fs[i]->colocated_buf_index); /* rain */ + p_Dpb->fs[i]->colocated_buf_index = -1; + } + } + + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->is_output && + (!is_used_for_reference(p_Dpb->fs[i]))) { + release_buf_spec_num(p_H264_Dpb->vdec, + p_Dpb->fs[i]->buf_spec_num); + p_Dpb->fs[i]->buf_spec_num = -1; + remove_frame_from_dpb(p_H264_Dpb, i); + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%s[%d]\n", + __func__, i); + + return 1; + } + } + return 0; +} + +static int unmark_one_error_out_frame(struct h264_dpb_stru *p_H264_Dpb) +{ + int ret = 0; + unsigned i; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->is_output && + ((p_Dpb->fs[i]->data_flag & ERROR_FLAG) || + (p_Dpb->fs[i]->data_flag & NULL_FLAG)) + ) { + unmark_for_reference(p_Dpb, p_Dpb->fs[i]); + + ret = 1; + break; + } + } + return ret; +} + +static int unmark_one_out_frame(struct h264_dpb_stru *p_H264_Dpb) +{ + int ret = 0; + unsigned i; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->is_output) { + unmark_for_reference(p_Dpb, p_Dpb->fs[i]); + + ret = 1; + } + } + return ret; +} +/* + force_flag, + 1, remove one error buf (is_out is 1) if there is no un-used buf + 2, remove one buf (is_out is 1) if there is no un-used buf +*/ +void bufmgr_h264_remove_unused_frame(struct h264_dpb_stru *p_H264_Dpb, + u8 force_flag) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + int ret = 0; + unsigned char removed_flag = 0; + do { + ret = remove_unused_frame_from_dpb(p_H264_Dpb); + if (ret != 0) + removed_flag = 1; + } while (ret != 0); + if (removed_flag) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%s\r\n", __func__); + dump_dpb(p_Dpb, 0); + } else if (force_flag == 2) { + if (unmark_one_out_frame(p_H264_Dpb)) { + dpb_print(p_H264_Dpb->decoder_index, + 0, "%s, Warnning, force unmark one frame\r\n", + __func__); + update_ref_list(p_Dpb); + remove_unused_frame_from_dpb(p_H264_Dpb); + dump_dpb(p_Dpb, 0); + } + } else if (force_flag == 1) { + if (unmark_one_error_out_frame(p_H264_Dpb)) { + dpb_print(p_H264_Dpb->decoder_index, + 0, "%s, unmark error frame\r\n", + __func__); + update_ref_list(p_Dpb); + remove_unused_frame_from_dpb(p_H264_Dpb); + dump_dpb(p_Dpb, 0); + } + } +} + +#ifdef OUTPUT_BUFFER_IN_C +int is_there_unused_frame_from_dpb(struct DecodedPictureBuffer *p_Dpb) +{ + unsigned int i; + + /* check for frames that were already output and no longer + * used for reference + */ + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->is_output && + (!is_used_for_reference(p_Dpb->fs[i]))) { + return 1; + } + } + return 0; +} +#endif + +static void get_smallest_poc(struct DecodedPictureBuffer *p_Dpb, int *poc, + int *pos) +{ + unsigned int i; + unsigned long flags; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + struct vdec_s *vdec= (struct vdec_s *)p_H264_Dpb->vdec; + void *p = vh264_get_bufspec_lock(vdec); + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%s\n", __func__); + if (p_Dpb->used_size < 1) { + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "Cannot determine smallest POC, DPB empty. %d\n", + 150); + } + + *pos = -1; + *poc = INT_MAX; + if (p == NULL) + return; + spin_lock_irqsave(p, flags); + for (i = 0; i < p_Dpb->used_size; i++) { +#ifdef OUTPUT_BUFFER_IN_C + /* rain */ + if ((*poc > p_Dpb->fs[i]->poc) && + (!p_Dpb->fs[i]->is_output) && + (!p_Dpb->fs[i]->pre_output)) { +#else + if ((*poc > p_Dpb->fs[i]->poc) && (!p_Dpb->fs[i]->is_output)) { +#endif + *poc = p_Dpb->fs[i]->poc; + *pos = i; + } + } + spin_unlock_irqrestore(p, flags); +} + +int output_frames(struct h264_dpb_stru *p_H264_Dpb, unsigned char flush_flag) +{ + int poc, pos; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + int i; + int none_displayed_num = 0; + unsigned char fast_output_flag = 0; + if (!flush_flag) { + for (i = 0; i < p_Dpb->used_size; i++) { + if ((!p_Dpb->fs[i]->is_output) && + (!p_Dpb->fs[i]->pre_output) &&((p_Dpb->fs[i]->is_used == 3 + ||p_Dpb->fs[i]->data_flag & ERROR_FLAG ))) { + none_displayed_num++; + if ((p_H264_Dpb->first_insert_frame == FirstInsertFrm_IDLE || + p_H264_Dpb->first_insert_frame == FirstInsertFrm_RESET) + && (p_Dpb->fs[i]->is_used == 3) + && (p_Dpb->last_output_poc == INT_MIN)) { + if (p_H264_Dpb->first_insert_frame == FirstInsertFrm_IDLE) + fast_output_flag = 1; + p_H264_Dpb->first_insert_frame = FirstInsertFrm_OUT; + p_H264_Dpb->first_output_poc = p_Dpb->fs[i]->poc; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s first insert frame i %d poc %d frame_num %x\n", + __func__, i, p_Dpb->fs[i]->poc, p_Dpb->fs[i]->frame_num); + } + + /*check poc even/odd*/ + if (p_H264_Dpb->poc_even_odd_flag == 0 && + p_H264_Dpb->decode_pic_count >= 3) + p_H264_Dpb->poc_even_odd_flag = 2; + if (p_Dpb->fs[i]->poc & 0x1) + p_H264_Dpb->poc_even_odd_flag = 1; + /**/ + + if ((p_H264_Dpb->fast_output_enable & 0x1) && + (p_Dpb->fs[i]->data_flag & IDR_FLAG)) + fast_output_flag = 1; + if ((p_H264_Dpb->fast_output_enable & 0x2) && + ((p_Dpb->fs[i]->poc - + p_Dpb->last_output_poc) + == 1)) + fast_output_flag = 1; + if ((p_H264_Dpb->fast_output_enable & 0x4) && + (p_H264_Dpb->poc_even_odd_flag == 2) && + (p_Dpb->fs[i]->is_used == 3) && + ((p_Dpb->fs[i]->poc - + p_Dpb->last_output_poc) + == 2)) + fast_output_flag = 1; + } + } + if (fast_output_flag) + ; + else if (none_displayed_num < + p_H264_Dpb->reorder_output) + return 0; + } + + get_smallest_poc(p_Dpb, &poc, &pos); + + if (pos == -1) + return 0; +#if 0 + if (is_used_for_reference(p_Dpb->fs[pos])) + return 0; +#endif + if (p_H264_Dpb->first_insert_frame == FirstInsertFrm_OUT) { + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s pos %d pos->poc %d first_output_poc %d \n", + __func__, pos, p_Dpb->fs[pos]->poc, p_H264_Dpb->first_output_poc); + + if (p_Dpb->fs[pos]->poc < p_H264_Dpb->first_output_poc) + p_Dpb->fs[pos]->data_flag |= NODISP_FLAG; + else if (p_Dpb->last_output_poc != INT_MIN) + p_H264_Dpb->first_insert_frame = FirstInsertFrm_SKIPDONE; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s first_insert_frame %d \n", __func__, p_H264_Dpb->first_insert_frame); + } + if (prepare_display_buf(p_H264_Dpb->vdec, p_Dpb->fs[pos]) >= 0) { + if (!p_H264_Dpb->without_display_mode && + p_Dpb->fs[pos]->show_frame != false) + p_Dpb->fs[pos]->pre_output = 1; + } else { + if (h264_debug_flag & PRINT_FLAG_DPB_DETAIL) { + dpb_print(p_H264_Dpb->decoder_index, 0, + "%s[%d] poc:%d last_output_poc:%d poc_even_odd_flag:%d\n", + __func__, pos, poc, + p_Dpb->last_output_poc, + p_H264_Dpb->poc_even_odd_flag); + dump_dpb(p_Dpb, 1); + } + return 0; + } + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s[%d] poc %d last_output_poc %d poc_even_odd_flag %d\n", + __func__, pos, poc, + p_Dpb->last_output_poc, + p_H264_Dpb->poc_even_odd_flag); + + p_Dpb->last_output_poc = poc; + return 1; + +} + + +void flush_dpb(struct h264_dpb_stru *p_H264_Dpb) +{ + /* struct VideoParameters *p_Vid = p_Dpb->p_Vid; */ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + unsigned int i; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + + /* diagnostics */ + /* dpb_print(p_H264_Dpb->decoder_index, + *PRINT_FLAG_DPB_DETAIL, + *"Flush remaining frames from the dpb." + *"p_Dpb->size = %d, p_Dpb->used_size = %d\n", + *p_Dpb->size, p_Dpb->used_size); + */ + + if (!p_Dpb->init_done) + return; +/* if(p_Vid->conceal_mode == 0) */ +#if 0 +/* ??? */ + if (p_Vid->conceal_mode != 0) + conceal_non_ref_pics(p_Dpb, 0); +#endif + /* mark all frames unused */ + for (i = 0; i < p_Dpb->used_size; i++) { +#if MVC_EXTENSION_ENABLE + assert(p_Dpb->fs[i]->view_id == p_Dpb->layer_id); +#endif + unmark_for_reference(p_Dpb, p_Dpb->fs[i]); + + } + + while (remove_unused_frame_from_dpb(p_H264_Dpb)) + ; + + /* output frames in POC order */ + while (output_frames(p_H264_Dpb, 1)) + ; + + + p_Dpb->last_output_poc = INT_MIN; +} + +static int is_short_term_reference(struct DecodedPictureBuffer *p_Dpb, + struct FrameStore *fs) +{ + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + if (fs->is_used == 3) { /* frame */ + if ((fs->frame->used_for_reference) && + (!fs->frame->is_long_term)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "[[%s 1]]", + __func__); + return 1; + } + } + + if (fs->is_used & 1) { /* top field */ + if (fs->top_field) { + if ((fs->top_field->used_for_reference) && + (!fs->top_field->is_long_term)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "[[%s 2]]", + __func__); + return 1; + } + } + } + + if (fs->is_used & 2) { /* bottom field */ + if (fs->bottom_field) { + if ((fs->bottom_field->used_for_reference) && + (!fs->bottom_field->is_long_term)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "[[%s 3]]", + __func__); + return 1; + } + } + } + return 0; +} + +static int is_long_term_reference(struct FrameStore *fs) +{ + + if (fs->is_used == 3) { /* frame */ + if ((fs->frame->used_for_reference) && + (fs->frame->is_long_term)) { + return 1; + } + } + + if (fs->is_used & 1) { /* top field */ + if (fs->top_field) { + if ((fs->top_field->used_for_reference) && + (fs->top_field->is_long_term)) { + return 1; + } + } + } + + if (fs->is_used & 2) { /* bottom field */ + if (fs->bottom_field) { + if ((fs->bottom_field->used_for_reference) && + (fs->bottom_field->is_long_term)) { + return 1; + } + } + } + return 0; +} + +void update_ref_list(struct DecodedPictureBuffer *p_Dpb) +{ + unsigned int i, j; + + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s (%d, %d)\n", __func__, p_Dpb->size, p_Dpb->used_size); + for (i = 0, j = 0; i < p_Dpb->used_size; i++) { +#if 1 + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "fs[%d]: fs %p frame %p is_reference %d %d %d\n", + i, p_Dpb->fs[i], p_Dpb->fs[i]->frame, + p_Dpb->fs[i]->frame != NULL ? + p_Dpb->fs[i]->frame->used_for_reference : 0, + p_Dpb->fs[i]->top_field != NULL ? + p_Dpb->fs[i]->top_field->used_for_reference : + 0, + p_Dpb->fs[i]->bottom_field != NULL ? + p_Dpb->fs[i]->bottom_field->used_for_reference : 0); +#endif + if (is_short_term_reference(p_Dpb, p_Dpb->fs[i])) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "fs_ref[%d]=fs[%d]: fs %p\n", j, i, p_Dpb->fs[i]); + p_Dpb->fs_ref[j++] = p_Dpb->fs[i]; + } + } + + p_Dpb->ref_frames_in_buffer = j; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s dpb size is %d, %d\n", __func__, p_Dpb->size, j); + while (j < p_Dpb->size) { + /* dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + *"fs_ref[%d]=null\n", j); + */ + p_Dpb->fs_ref[j++] = NULL; + } +#ifdef ERROR_CHECK + for (i = 0; i < DPB_SIZE_MAX; i++) { + if (p_Dpb->fs_ref[i] == NULL) + p_Dpb->fs_ref[i] = &dummy_fs; + } +#endif +} + +static void update_ltref_list(struct DecodedPictureBuffer *p_Dpb) +{ + unsigned int i, j; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + for (i = 0, j = 0; i < p_Dpb->used_size; i++) { + if (is_long_term_reference(p_Dpb->fs[i])) + p_Dpb->fs_ltref[j++] = p_Dpb->fs[i]; + } + + p_Dpb->ltref_frames_in_buffer = j; + + while (j < p_Dpb->size) + p_Dpb->fs_ltref[j++] = NULL; +#ifdef ERROR_CHECK + for (i = 0; i < DPB_SIZE_MAX; i++) { + if (p_Dpb->fs_ltref[i] == NULL) + p_Dpb->fs_ltref[i] = &dummy_fs; + } +#endif +} + +static void idr_memory_management(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *p) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s ref_frames_in_buffer %d ltref_frames_in_buffer %d\n", + __func__, p_Dpb->ref_frames_in_buffer, + p_Dpb->ltref_frames_in_buffer); + + + if (p->no_output_of_prior_pics_flag) { + int i; + for (i = 0; i < p_Dpb->used_size; i++) { + unmark_for_reference(p_Dpb, p_Dpb->fs[i]); + if (p_Dpb->fs[i]->is_long_term) + unmark_for_long_term_reference(p_Dpb->fs[i]); + if (!p_Dpb->fs[i]->is_output && !p_Dpb->fs[i]->pre_output) + set_frame_output_flag(p_H264_Dpb, i); + } +#if 0 + /*???*/ + /* free all stored pictures */ + int i; + + for (i = 0; i < p_Dpb->used_size; i++) { + /* reset all reference settings + * free_frame_store(p_Dpb->fs[i]); + * p_Dpb->fs[i] = alloc_frame_store(); + */ + reset_frame_store(p_H264_Dpb, p_Dpb->fs[i]); /* ??? */ + } + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) + p_Dpb->fs_ref[i] = NULL; + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) + p_Dpb->fs_ltref[i] = NULL; + p_Dpb->used_size = 0; +#endif + } else { + flush_dpb(p_H264_Dpb); + } + p_Dpb->last_picture = NULL; + + update_ref_list(p_Dpb); + update_ltref_list(p_Dpb); + p_Dpb->last_output_poc = INT_MIN; + + if (p->long_term_reference_flag) { + p_Dpb->max_long_term_pic_idx = 0; + p->is_long_term = 1; + p->long_term_frame_idx = 0; + } else { + p_Dpb->max_long_term_pic_idx = -1; + p->is_long_term = 0; + } + +#if (MVC_EXTENSION_ENABLE) + p_Dpb->last_output_view_id = -1; +#endif + +} + +static void sliding_window_memory_management( + struct DecodedPictureBuffer *p_Dpb, + struct StorablePicture *p) +{ + unsigned int i; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + unsigned char slide_flag = 0; + unsigned int sliding_margin = imax( + 1, p_Dpb->num_ref_frames) - p_Dpb->ltref_frames_in_buffer; + /* assert (!p->idr_flag); */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s ref_frames_in_buffer %d ltref_frames_in_buffer %d\n", + __func__, p_Dpb->ref_frames_in_buffer, + p_Dpb->ltref_frames_in_buffer); + /* if this is a reference pic with sliding window, + unmark first ref frame */ + if (p_Dpb->ref_frames_in_buffer == sliding_margin) + slide_flag = 1; + /*else if ((h264_error_proc_policy & 0x8) && + (p_Dpb->ref_frames_in_buffer > sliding_margin)) + slide_flag = 1;*/ + + if (slide_flag) { + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->is_reference && + (!(p_Dpb->fs[i]->is_long_term))) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "unmark %d\n", i); + unmark_for_reference(p_Dpb, p_Dpb->fs[i]); + update_ref_list(p_Dpb); + break; + } + } + } + + p->is_long_term = 0; +} + +static void check_num_ref(struct DecodedPictureBuffer *p_Dpb) +{ + if ((int)(p_Dpb->ltref_frames_in_buffer + + p_Dpb->ref_frames_in_buffer) > + imax(1, p_Dpb->num_ref_frames)) { + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "Max. number of reference frames exceeded. Invalid stream. lt %d ref %d mum_ref %d\n", + p_Dpb->ltref_frames_in_buffer, + p_Dpb->ref_frames_in_buffer, + p_Dpb->num_ref_frames); + } +} + +void dump_dpb(struct DecodedPictureBuffer *p_Dpb, u8 force) +{ + unsigned int i; + unsigned char *buf = NULL; + unsigned int buf_size = 512, len = 0; + struct h264_dpb_stru *p_H264_Dpb = + container_of(p_Dpb, struct h264_dpb_stru, mDPB); + +#define DPB_STRCAT(args...) do { \ + len += snprintf(buf + len, \ + buf_size - len, ##args);\ + } while (0) + + if ((h264_debug_flag & PRINT_FLAG_DUMP_DPB) == 0 && + force == 0) + return; + + buf = kzalloc(buf_size, GFP_ATOMIC); + if (buf == NULL) + return; + + for (i = 0; i < p_Dpb->used_size; i++) { + len = 0; + memset(buf, 0, buf_size); + DPB_STRCAT("fn=%d is_used %d ", + p_Dpb->fs[i]->frame_num, + p_Dpb->fs[i]->is_used); + + if (p_Dpb->fs[i]->is_used & 1) { + if (p_Dpb->fs[i]->top_field) + DPB_STRCAT("T: poc=%d pic_num=%d ", + p_Dpb->fs[i]->top_field->poc, + p_Dpb->fs[i]->top_field->pic_num); + else + DPB_STRCAT("T: poc=%d ", + p_Dpb->fs[i]->frame->top_poc); + } + if (p_Dpb->fs[i]->is_used & 2) { + if (p_Dpb->fs[i]->bottom_field) + DPB_STRCAT("B: poc=%d pic_num=%d ", + p_Dpb->fs[i]->bottom_field->poc, + p_Dpb->fs[i]->bottom_field->pic_num); + else + DPB_STRCAT("B: poc=%d ", + p_Dpb->fs[i]->frame->bottom_poc); + } + if (p_Dpb->fs[i]->is_used == 3) { + if (p_Dpb->fs[i]->frame != NULL) + DPB_STRCAT("F: poc=%d pic_num=%d ", + p_Dpb->fs[i]->frame->poc, + p_Dpb->fs[i]->frame->pic_num); + else + DPB_STRCAT("fs[%d] frame is null ", i); + } + DPB_STRCAT("G: poc=%d) ", p_Dpb->fs[i]->poc); + if (p_Dpb->fs[i]->is_reference) + DPB_STRCAT("ref (%d) ", p_Dpb->fs[i]->is_reference); + if (p_Dpb->fs[i]->is_long_term) + DPB_STRCAT("lt_ref (%d) ", p_Dpb->fs[i]->is_reference); + if (p_Dpb->fs[i]->is_output) + DPB_STRCAT("out(displayed) "); + if (p_Dpb->fs[i]->pre_output) + DPB_STRCAT("pre_output(in dispq or displaying) "); + if (p_Dpb->fs[i]->is_used == 3) { + if (p_Dpb->fs[i]->frame != NULL && p_Dpb->fs[i]->frame->non_existing) + DPB_STRCAT("non_existing "); + else + DPB_STRCAT("fs[%d] frame is null ", i); + } + DPB_STRCAT("dpb_frame_count %d ", + p_Dpb->fs[i]->dpb_frame_count); + +#if (MVC_EXTENSION_ENABLE) + if (p_Dpb->fs[i]->is_reference) + DPB_STRCAT("view_id (%d) ", p_Dpb->fs[i]->view_id); +#endif + if (p_Dpb->fs[i]->data_flag) { + DPB_STRCAT("data_flag(0x%x)", + p_Dpb->fs[i]->data_flag); + } + DPB_STRCAT(" bufspec %d\n", + p_Dpb->fs[i]->buf_spec_num); + dpb_print(p_H264_Dpb->decoder_index, 0, "%s", buf); + } + + kfree(buf); +} + +/*! + ************************************************************************ + * \brief + * adaptive memory management + * + ************************************************************************ + */ + +static int get_pic_num_x(struct StorablePicture *p, + int difference_of_pic_nums_minus1) +{ + int currPicNum; + + if (p->structure == FRAME) + currPicNum = p->frame_num; + else + currPicNum = 2 * p->frame_num + 1; + + return currPicNum - (difference_of_pic_nums_minus1 + 1); +} + +/*! + ************************************************************************ + * \brief + * Adaptive Memory Management: Mark short term picture unused + ************************************************************************ + */ +static void mm_unmark_short_term_for_reference(struct DecodedPictureBuffer + *p_Dpb, struct StorablePicture *p, + int difference_of_pic_nums_minus1) +{ + struct h264_dpb_stru *p_H264_Dpb = + container_of(p_Dpb, struct h264_dpb_stru, mDPB); + int picNumX; + + unsigned int i; + + picNumX = get_pic_num_x(p, difference_of_pic_nums_minus1); + + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p->structure == FRAME) { + if ((p_Dpb->fs_ref[i]->is_reference == 3) && + (p_Dpb->fs_ref[i]->is_long_term == 0)) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->frame->pic_num == + picNumX) { + unmark_for_reference(p_Dpb, + p_Dpb->fs_ref[i]); + return; + } + } + } else { + if ((p_Dpb->fs_ref[i]->is_reference & 1) && + (!(p_Dpb->fs_ref[i]->is_long_term & 1))) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->top_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->top_field->pic_num == + picNumX) { + p_Dpb->fs_ref[i]-> + top_field->used_for_reference = 0; + p_Dpb->fs_ref[i]->is_reference &= 2; + if ((p_Dpb->fs_ref[i]->is_used == 3) +#ifdef ERROR_CHECK + && p_Dpb->fs_ref[i]->frame +#endif + ) { + p_Dpb->fs_ref[i]->frame-> + used_for_reference = 0; + } + return; + } + } + if ((p_Dpb->fs_ref[i]->is_reference & 2) && + (!(p_Dpb->fs_ref[i]->is_long_term & 2))) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->bottom_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->bottom_field->pic_num == + picNumX) { + p_Dpb->fs_ref[i]->bottom_field-> + used_for_reference = 0; + p_Dpb->fs_ref[i]->is_reference &= 1; + if ((p_Dpb->fs_ref[i]->is_used == 3) +#ifdef ERROR_CHECK + && p_Dpb->fs_ref[i]->frame +#endif + ) { + p_Dpb->fs_ref[i]->frame-> + used_for_reference = 0; + } + return; + } + } + } + } +} + +/*! + ************************************************************************ + * \brief + * Adaptive Memory Management: Mark long term picture unused + ************************************************************************ + */ +static void mm_unmark_long_term_for_reference(struct DecodedPictureBuffer + *p_Dpb, struct StorablePicture *p, int long_term_pic_num) +{ + unsigned int i; + + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (p->structure == FRAME) { + if ((p_Dpb->fs_ltref[i]->is_reference == 3) && + (p_Dpb->fs_ltref[i]->is_long_term == 3)) { + if (p_Dpb->fs_ltref[i]->frame-> + long_term_pic_num == + long_term_pic_num) { + unmark_for_long_term_reference( + p_Dpb->fs_ltref[i]); + } + } + } else { + if ((p_Dpb->fs_ltref[i]->is_reference & 1) && + ((p_Dpb->fs_ltref[i]->is_long_term & 1))) { + if (p_Dpb->fs_ltref[i]->top_field-> + long_term_pic_num == + long_term_pic_num) { + p_Dpb->fs_ltref[i]->top_field-> + used_for_reference = 0; + p_Dpb->fs_ltref[i]->top_field-> + is_long_term = 0; + p_Dpb->fs_ltref[i]->is_reference &= 2; + p_Dpb->fs_ltref[i]->is_long_term &= 2; + if (p_Dpb->fs_ltref[i]->is_used == 3) { + p_Dpb->fs_ltref[i]->frame-> + used_for_reference = 0; + p_Dpb->fs_ltref[i]->frame-> + is_long_term = 0; + } + return; + } + } + if ((p_Dpb->fs_ltref[i]->is_reference & 2) && + ((p_Dpb->fs_ltref[i]->is_long_term & 2))) { + if (p_Dpb->fs_ltref[i]->bottom_field-> + long_term_pic_num == + long_term_pic_num) { + p_Dpb->fs_ltref[i]->bottom_field-> + used_for_reference = 0; + p_Dpb->fs_ltref[i]->bottom_field-> + is_long_term = 0; + p_Dpb->fs_ltref[i]->is_reference &= 1; + p_Dpb->fs_ltref[i]->is_long_term &= 1; + if (p_Dpb->fs_ltref[i]->is_used == 3) { + p_Dpb->fs_ltref[i]->frame-> + used_for_reference = 0; + p_Dpb->fs_ltref[i]->frame-> + is_long_term = 0; + } + return; + } + } + } + } +} + + +/*! + ************************************************************************ + * \brief + * Mark a long-term reference frame or complementary + * field pair unused for referemce + ************************************************************************ + */ +static void unmark_long_term_frame_for_reference_by_frame_idx( + struct DecodedPictureBuffer *p_Dpb, int long_term_frame_idx) +{ + unsigned int i; + + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (p_Dpb->fs_ltref[i]->long_term_frame_idx == + long_term_frame_idx) + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + } +} + + +static void unmark1(struct DecodedPictureBuffer *p_Dpb, + unsigned int curr_frame_num, int i) +{ + if (p_Dpb->last_picture) { + /*if ((p_Dpb->last_picture != p_Dpb->fs_ltref[i]) || + p_Dpb->last_picture->frame_num != curr_frame_num) {*/ + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + /*} else { + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + }*/ + } +} + +static void unmark2(struct DecodedPictureBuffer *p_Dpb, + int curr_pic_num, int i) +{ + if ((p_Dpb->fs_ltref[i]->frame_num) != + (unsigned int)(curr_pic_num >> 1)) + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); +} + +static void unmark3_top(struct DecodedPictureBuffer *p_Dpb, + unsigned int curr_frame_num, int curr_pic_num, int mark_current, int i) +{ + if (p_Dpb->fs_ltref[i]->is_long_term == 3) { + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + } else { + if (p_Dpb->fs_ltref[i]->is_long_term == 1) { + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + } else { + if (mark_current) + unmark1(p_Dpb, curr_frame_num, i); + else + unmark2(p_Dpb, curr_pic_num, i); + } + } +} + +static void unmark3_bottom(struct DecodedPictureBuffer *p_Dpb, + unsigned int curr_frame_num, int curr_pic_num, int mark_current, int i) +{ + if (p_Dpb->fs_ltref[i]->is_long_term == 2) { + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + } else { + if (mark_current) + unmark1(p_Dpb, curr_frame_num, i); + else + unmark2(p_Dpb, curr_pic_num, i); + } +} + +static void unmark_long_term_field_for_reference_by_frame_idx( + struct DecodedPictureBuffer *p_Dpb, enum PictureStructure structure, + int long_term_frame_idx, int mark_current, unsigned int curr_frame_num, + int curr_pic_num) +{ + struct VideoParameters *p_Vid = p_Dpb->p_Vid; + unsigned int i; + + /* assert(structure!=FRAME); */ + if (curr_pic_num < 0) + curr_pic_num += (2 * p_Vid->max_frame_num); + + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (p_Dpb->fs_ltref[i]->long_term_frame_idx == + long_term_frame_idx) { + if (structure == TOP_FIELD) + unmark3_top(p_Dpb, curr_frame_num, + curr_pic_num, mark_current, i); + + if (structure == BOTTOM_FIELD) + unmark3_bottom(p_Dpb, curr_frame_num, + curr_pic_num, mark_current, i); + } + } +} + +/*! + ************************************************************************ + * \brief + * mark a picture as long-term reference + ************************************************************************ + */ +static void mark_pic_long_term(struct DecodedPictureBuffer *p_Dpb, + struct StorablePicture *p, + int long_term_frame_idx, int picNumX) +{ + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + unsigned int i; + int add_top, add_bottom; + + if (p->structure == FRAME) { + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference == 3) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((!p_Dpb->fs_ref[i]->frame-> + is_long_term) && + (p_Dpb->fs_ref[i]->frame->pic_num == + picNumX)) { + p_Dpb->fs_ref[i]-> + long_term_frame_idx = + p_Dpb->fs_ref[i]->frame-> + long_term_frame_idx = + long_term_frame_idx; + p_Dpb->fs_ref[i]->frame-> + long_term_pic_num = + long_term_frame_idx; + p_Dpb->fs_ref[i]->frame-> + is_long_term = 1; + + if (p_Dpb->fs_ref[i]->top_field && + p_Dpb->fs_ref[i]->bottom_field) { + p_Dpb->fs_ref[i]->top_field-> + long_term_frame_idx = + p_Dpb->fs_ref[i]-> + bottom_field-> + long_term_frame_idx = + long_term_frame_idx; + p_Dpb->fs_ref[i]->top_field-> + long_term_pic_num = + long_term_frame_idx; + p_Dpb->fs_ref[i]-> + bottom_field-> + long_term_pic_num = + long_term_frame_idx; + + p_Dpb->fs_ref[i]->top_field-> + is_long_term = + p_Dpb->fs_ref[i]-> + bottom_field-> + is_long_term + = 1; + + } + p_Dpb->fs_ref[i]->is_long_term = 3; + return; + } + } + } + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "Warning: reference frame for long term marking not found\n"); + } else { + if (p->structure == TOP_FIELD) { + add_top = 1; + add_bottom = 0; + } else { + add_top = 0; + add_bottom = 1; + } + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference & 1) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->top_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((!p_Dpb->fs_ref[i]->top_field-> + is_long_term) && + (p_Dpb->fs_ref[i]->top_field->pic_num == + picNumX)) { + if ((p_Dpb->fs_ref[i]-> + is_long_term) && + (p_Dpb->fs_ref[i]-> + long_term_frame_idx != + long_term_frame_idx)) { + dpb_print(p_H264_Dpb-> + decoder_index, + PRINT_FLAG_DPB_DETAIL, + "Warning: assigning long_term_frame_idx different from other field\n"); + } + + p_Dpb->fs_ref[i]-> + long_term_frame_idx = + p_Dpb->fs_ref[i]->top_field-> + long_term_frame_idx + = long_term_frame_idx; + p_Dpb->fs_ref[i]->top_field-> + long_term_pic_num = + 2 * long_term_frame_idx + + add_top; + p_Dpb->fs_ref[i]->top_field-> + is_long_term = 1; + p_Dpb->fs_ref[i]->is_long_term |= 1; + if ((p_Dpb->fs_ref[i]->is_long_term + == 3) +#ifdef ERROR_CHECK + && p_Dpb->fs_ref[i]->frame +#endif + ) { + p_Dpb->fs_ref[i]->frame-> + is_long_term = 1; + p_Dpb->fs_ref[i]->frame-> + long_term_frame_idx = + p_Dpb->fs_ref[i]-> + frame-> + long_term_pic_num = + long_term_frame_idx; + } + return; + } + } + if (p_Dpb->fs_ref[i]->is_reference & 2) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->bottom_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((!p_Dpb->fs_ref[i]->bottom_field-> + is_long_term) && + (p_Dpb->fs_ref[i]->bottom_field->pic_num + == picNumX)) { + if ((p_Dpb->fs_ref[i]-> + is_long_term) && + (p_Dpb->fs_ref[i]-> + long_term_frame_idx != + long_term_frame_idx)) { + dpb_print(p_H264_Dpb-> + decoder_index, + PRINT_FLAG_DPB_DETAIL, + "Warning: assigning long_term_frame_idx different from other field\n"); + } + + p_Dpb->fs_ref[i]-> + long_term_frame_idx = + p_Dpb->fs_ref[i]->bottom_field + ->long_term_frame_idx + = long_term_frame_idx; + p_Dpb->fs_ref[i]->bottom_field-> + long_term_pic_num = 2 * + long_term_frame_idx + + add_bottom; + p_Dpb->fs_ref[i]->bottom_field-> + is_long_term = 1; + p_Dpb->fs_ref[i]->is_long_term |= 2; + if ((p_Dpb->fs_ref[i]-> + is_long_term == 3) +#ifdef ERROR_CHECK + && p_Dpb->fs_ref[i]->frame +#endif + ) { + p_Dpb->fs_ref[i]->frame-> + is_long_term = 1; + p_Dpb->fs_ref[i]->frame-> + long_term_frame_idx = + p_Dpb->fs_ref[i]-> + frame-> + long_term_pic_num = + long_term_frame_idx; + } + return; + } + } + } + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "Warning: reference field for long term marking not found\n"); + } +} + + +/*! + ************************************************************************ + * \brief + * Assign a long term frame index to a short term picture + ************************************************************************ + */ +static void mm_assign_long_term_frame_idx(struct DecodedPictureBuffer *p_Dpb, + struct StorablePicture *p, int difference_of_pic_nums_minus1, + int long_term_frame_idx) +{ + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + int picNumX = get_pic_num_x(p, difference_of_pic_nums_minus1); + + /* remove frames/fields with same long_term_frame_idx */ + if (p->structure == FRAME) { + unmark_long_term_frame_for_reference_by_frame_idx(p_Dpb, + long_term_frame_idx); + } else { + unsigned int i; + enum PictureStructure structure = FRAME; + + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference & 1) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->top_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->top_field-> + pic_num == picNumX) { + structure = TOP_FIELD; + break; + } + } + if (p_Dpb->fs_ref[i]->is_reference & 2) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->bottom_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->bottom_field-> + pic_num == picNumX) { + structure = BOTTOM_FIELD; + break; + } + } + } + if (structure == FRAME) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "field for long term marking not found %d", + 200); + } + + unmark_long_term_field_for_reference_by_frame_idx(p_Dpb, + structure, + long_term_frame_idx, 0, 0, picNumX); + } + + mark_pic_long_term(p_Dpb, p, long_term_frame_idx, picNumX); +} + +/*! + ************************************************************************ + * \brief + * Set new max long_term_frame_idx + ************************************************************************ + */ +static void mm_update_max_long_term_frame_idx(struct DecodedPictureBuffer + *p_Dpb, int max_long_term_frame_idx_plus1) +{ + unsigned int i; + + p_Dpb->max_long_term_pic_idx = max_long_term_frame_idx_plus1 - 1; + + /* check for invalid frames */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (p_Dpb->fs_ltref[i]->long_term_frame_idx > + p_Dpb->max_long_term_pic_idx) { + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + } + } +} + + +/*! + ************************************************************************ + * \brief + * Mark all long term reference pictures unused for reference + ************************************************************************ + */ +static void mm_unmark_all_long_term_for_reference(struct DecodedPictureBuffer + *p_Dpb) +{ + mm_update_max_long_term_frame_idx(p_Dpb, 0); +} + +/*! + ************************************************************************ + * \brief + * Mark all short term reference pictures unused for reference + ************************************************************************ + */ +static void mm_unmark_all_short_term_for_reference(struct DecodedPictureBuffer + *p_Dpb) +{ + unsigned int i; + + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) + unmark_for_reference(p_Dpb, p_Dpb->fs_ref[i]); + update_ref_list(p_Dpb); +} + + +/*! + ************************************************************************ + * \brief + * Mark the current picture used for long term reference + ************************************************************************ + */ +static void mm_mark_current_picture_long_term(struct DecodedPictureBuffer + *p_Dpb, struct StorablePicture *p, int long_term_frame_idx) +{ + /* remove long term pictures with same long_term_frame_idx */ + if (p->structure == FRAME) { + unmark_long_term_frame_for_reference_by_frame_idx(p_Dpb, + long_term_frame_idx); + } else { + unmark_long_term_field_for_reference_by_frame_idx(p_Dpb, + p->structure, long_term_frame_idx, + 1, p->pic_num, 0); + } + + p->is_long_term = 1; + p->long_term_frame_idx = long_term_frame_idx; +} + +static void adaptive_memory_management(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *p) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + struct DecRefPicMarking_s *tmp_drpm; + struct VideoParameters *p_Vid = p_Dpb->p_Vid; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + p_Vid->last_has_mmco_5 = 0; + + /* assert (!p->idr_flag); */ + /* assert (p->adaptive_ref_pic_buffering_flag); */ + + while (p->dec_ref_pic_marking_buffer) { + tmp_drpm = p->dec_ref_pic_marking_buffer; + switch (tmp_drpm->memory_management_control_operation) { + case 0: + if (tmp_drpm->Next != NULL) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "error, memory_management_control_operation = 0 not last operation in buffer\n"); + break; + case 1: + mm_unmark_short_term_for_reference(p_Dpb, p, + tmp_drpm->difference_of_pic_nums_minus1); + update_ref_list(p_Dpb); + break; + case 2: + mm_unmark_long_term_for_reference(p_Dpb, p, + tmp_drpm->long_term_pic_num); + update_ltref_list(p_Dpb); + break; + case 3: + mm_assign_long_term_frame_idx(p_Dpb, p, + tmp_drpm->difference_of_pic_nums_minus1, + tmp_drpm->long_term_frame_idx); + update_ref_list(p_Dpb); + update_ltref_list(p_Dpb); + break; + case 4: + mm_update_max_long_term_frame_idx(p_Dpb, + tmp_drpm->max_long_term_frame_idx_plus1); + update_ltref_list(p_Dpb); + break; + case 5: + mm_unmark_all_short_term_for_reference(p_Dpb); + mm_unmark_all_long_term_for_reference(p_Dpb); + p_Vid->last_has_mmco_5 = 1; + break; + case 6: + mm_mark_current_picture_long_term(p_Dpb, p, + tmp_drpm->long_term_frame_idx); + check_num_ref(p_Dpb); + break; + default: + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "error, invalid memory_management_control_operation in buffer\n"); + } + p->dec_ref_pic_marking_buffer = tmp_drpm->Next; + /* free (tmp_drpm); */ + } + if (p_Vid->last_has_mmco_5) { + p->pic_num = p->frame_num = 0; + + switch (p->structure) { + case TOP_FIELD: { + /* p->poc = p->top_poc = p_Vid->toppoc =0; */ + p->poc = p->top_poc = 0; + break; + } + case BOTTOM_FIELD: { + /* p->poc = p->bottom_poc = p_Vid->bottompoc = 0; */ + p->poc = p->bottom_poc = 0; + break; + } + case FRAME: { + p->top_poc -= p->poc; + p->bottom_poc -= p->poc; + + /* p_Vid->toppoc = p->top_poc; */ + /* p_Vid->bottompoc = p->bottom_poc; */ + + p->poc = imin(p->top_poc, p->bottom_poc); + /* p_Vid->framepoc = p->poc; */ + break; + } + } + /* currSlice->ThisPOC = p->poc; */ +#if (MVC_EXTENSION_ENABLE) + if (p->view_id == 0) { + flush_dpb(p_Vid->p_Dpb_layer[0]); + flush_dpb(p_Vid->p_Dpb_layer[1]); + } else { + flush_dpb(p_Dpb); + } +#else + flush_dpb(p_H264_Dpb); +#endif + } +} + + +int store_picture_in_dpb(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *p, + unsigned char data_flag) +{ + /* struct VideoParameters *p_Vid = p_Dpb->p_Vid; */ + struct VideoParameters *p_Vid = &p_H264_Dpb->mVideo; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + unsigned int i, frame_outside_count = 0; +#if 0 + int poc, pos; +#endif + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s p_Vid %p\n", __func__, p_Vid); + + /* picture error concealment */ + + /* diagnostics */ + /* dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + * "Storing (%s) non-ref pic with frame_num #%d\n", + * (p->type == FRAME)?"FRAME":(p->type == TOP_FIELD)? + * "TOP_FIELD":"BOTTOM_FIELD", p->pic_num); + */ + /* if frame, check for new store, */ + /* assert (p!=NULL); */ + + p_Vid->last_has_mmco_5 = 0; + p_Vid->last_pic_bottom_field = (p->structure == BOTTOM_FIELD); + if (p->idr_flag) { + idr_memory_management(p_H264_Dpb, p); + if (p_H264_Dpb->first_insert_frame == FirstInsertFrm_OUT) + p_H264_Dpb->first_insert_frame = FirstInsertFrm_SKIPDONE; +#if 0 +/* ??? */ + /* picture error concealment */ + memset(p_Vid->pocs_in_dpb, 0, sizeof(int) * 100); +#endif + } else { +#if 1 +/* ??? */ + /* adaptive memory management */ + if (p->used_for_reference && + (p->adaptive_ref_pic_buffering_flag)) + adaptive_memory_management(p_H264_Dpb, p); +#endif + } + + if ((p->structure == TOP_FIELD) || (p->structure == BOTTOM_FIELD)) { + /* check for frame store with same pic_number */ + if (p_Dpb->last_picture) { + if ((int)p_Dpb->last_picture->frame_num == + p->pic_num) { + if (((p->structure == TOP_FIELD) && + (p_Dpb->last_picture->is_used == 2)) || + ((p->structure == BOTTOM_FIELD) && + (p_Dpb->last_picture->is_used == 1))) { + if ((p->used_for_reference && + (p_Dpb->last_picture-> + is_orig_reference != 0)) || + (!p->used_for_reference && + (p_Dpb->last_picture-> + is_orig_reference == 0))) { + insert_picture_in_dpb( + p_H264_Dpb, + p_Dpb->last_picture, + p, data_flag); + update_ref_list(p_Dpb); + update_ltref_list(p_Dpb); + dump_dpb(p_Dpb, 0); + p_Dpb->last_picture = NULL; + return 0; + } + } + } + } + } + /* this is a frame or a field which has no stored + * complementary field + */ + + /* sliding window, if necessary */ + if ((!p->idr_flag) && (p->used_for_reference && + (!p->adaptive_ref_pic_buffering_flag))) { + sliding_window_memory_management(p_Dpb, p); + } + + /* picture error concealment */ + if (p_Vid->conceal_mode != 0) { + for (i = 0; i < p_Dpb->size; i++) + if (p_Dpb->fs[i]->is_reference) + p_Dpb->fs[i]->concealment_reference = 1; + } + + while (remove_unused_frame_from_dpb(p_H264_Dpb)) + ; + + while (output_frames(p_H264_Dpb, 0)) + ; + + /* check for duplicate frame number in short term reference buffer */ + if ((p->used_for_reference) && (!p->is_long_term)) { + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) + continue; +#endif + if (p_Dpb->fs_ref[i]->frame_num == p->frame_num) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "duplicate frame_num in short-term reference picture buffer %d\n", + 500); + if (p_Dpb->fs_ref[i]->dpb_frame_count == p_H264_Dpb->dpb_frame_count) { + dpb_print(p_H264_Dpb->decoder_index, + 0, "duplicate frame, no insert to dpb\n"); + return -2; + } else { + dpb_print(p_H264_Dpb->decoder_index, + 0, "duplicate frame_num release defore ref\n"); + unmark_for_reference(p_Dpb, p_Dpb->fs_ref[i]); + update_ref_list(p_Dpb); + } + } + } + } + /* store at end of buffer */ + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s p_Dpb->used_size %d\n", __func__, p_Dpb->used_size); + if (p_Dpb->used_size >= p_Dpb->size) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "%s Error: used_sizd %d is large than dpb size\r\n", + __func__, p_Dpb->used_size); + /*h264_debug_flag |= PRINT_FLAG_DUMP_DPB;*/ + dump_dpb(p_Dpb, 0); + return -1; + } + + insert_picture_in_dpb(p_H264_Dpb, p_Dpb->fs[p_Dpb->used_size], + p, data_flag); + + /* picture error concealment */ + if (p->idr_flag) + p_Vid->earlier_missing_poc = 0; + + if (p->structure != FRAME) + p_Dpb->last_picture = p_Dpb->fs[p_Dpb->used_size]; + else + p_Dpb->last_picture = NULL; + + p_Dpb->used_size++; +#if 0 +/* ??? */ + if (p_Vid->conceal_mode != 0) + p_Vid->pocs_in_dpb[p_Dpb->used_size - 1] = p->poc; +#endif + update_ref_list(p_Dpb); + update_ltref_list(p_Dpb); + + check_num_ref(p_Dpb); + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->pre_output) + frame_outside_count++; + } + + if (p_H264_Dpb->fast_output_enable == H264_OUTPUT_MODE_FAST) + i = 1; + else + i = 0; + + if (i || (p_H264_Dpb->first_insert_frame < FirstInsertFrm_SKIPDONE)) { + while (output_frames(p_H264_Dpb, i)) + ; + } + + dump_dpb(p_Dpb, 0); + p_Dpb->first_pic_done = 1; /*by rain*/ + + return 0; +} + +void bufmgr_post(struct h264_dpb_stru *p_H264_Dpb) +{ + /*VideoParameters *p_Vid = p_Dpb->p_Vid;*/ + struct VideoParameters *p_Vid = &p_H264_Dpb->mVideo; + + if (p_Vid->last_has_mmco_5) + p_Vid->pre_frame_num = 0; +} +/********************************** + * + * Initialize reference lists + ********************************** + */ +#define __COMPARE(context, p1, p2) comp(p1, p2) +#define __SHORTSORT(lo, hi, width, comp, context) \ + shortsort(lo, hi, width, comp) +#define CUTOFF 8 /* testing shows that this is good value */ +#define STKSIZ (8*sizeof(void *) - 2) + +#undef swap +static void swap( + char *a, + char *b, + size_t width +) +{ + char tmp; + + if (a != b) + /* Do the swap one character at a time to avoid potential + * alignment problems. + */ + while (width--) { + tmp = *a; + *a++ = *b; + *b++ = tmp; + } +} + +static void shortsort( + char *lo, + char *hi, + size_t width, + int (*comp)(const void *, const void *) +) +{ + char *p, *max; + + /* Note: in assertions below, i and j are alway inside original + * bound of array to sort. + */ + + while (hi > lo) { + /* A[i] <= A[j] for i <= j, j > hi */ + max = lo; + for (p = lo + width; p <= hi; p += width) { + /* A[i] <= A[max] for lo <= i < p */ + if (__COMPARE(context, p, max) > 0) + max = p; + /* A[i] <= A[max] for lo <= i <= p */ + } + + /* A[i] <= A[max] for lo <= i <= hi */ + + swap(max, hi, width); + + /* A[i] <= A[hi] for i <= hi, so A[i] <= A[j] for i <= j, + * j >= hi + */ + + hi -= width; + + /* A[i] <= A[j] for i <= j, j > hi, loop top condition + * established + */ + } + /* A[i] <= A[j] for i <= j, j > lo, which implies A[i] <= A[j] + * for i < j, so array is sorted + */ +} + +static void qsort( + void *base, + size_t num, + size_t width, + int (*comp)(const void *, const void *) +) +{ + char *lo, *hi; /* ends of sub-array currently sorting */ + char *mid; /* points to middle of subarray */ + char *loguy, *higuy; /* traveling pointers for partition step */ + size_t size; /* size of the sub-array */ + char *lostk[STKSIZ], *histk[STKSIZ]; + int stkptr; + +/* stack for saving sub-array to be + * processed + */ +#if 0 + /* validation section */ + _VALIDATE_RETURN_VOID(base != NULL || num == 0, EINVAL); + _VALIDATE_RETURN_VOID(width > 0, EINVAL); + _VALIDATE_RETURN_VOID(comp != NULL, EINVAL); +#endif + if (num < 2) + return; /* nothing to do */ + + stkptr = 0; /* initialize stack */ + + lo = (char *)base; + hi = (char *)base + width * (num - 1); /* initialize limits */ + + /* this entry point is for pseudo-recursion calling: setting + * lo and hi and jumping to here is like recursion, but stkptr is + * preserved, locals aren't, so we preserve stuff on the stack + */ +recurse: + + size = (hi - lo) / width + 1; /* number of el's to sort */ + + /* below a certain size, it is faster to use a O(n^2) sorting method */ + if (size <= CUTOFF) { + __SHORTSORT(lo, hi, width, comp, context); + } else { + /* First we pick a partitioning element. The efficiency of + * the algorithm demands that we find one that is approximately + * the median of the values, but also that we select one fast. + * We choose the median of the first, middle, and last + * elements, to avoid bad performance in the face of already + * sorted data, or data that is made up of multiple sorted + * runs appended together. Testing shows that a + * median-of-three algorithm provides better performance than + * simply picking the middle element for the latter case. + */ + + mid = lo + (size / 2) * width; /* find middle element */ + + /* Sort the first, middle, last elements into order */ + if (__COMPARE(context, lo, mid) > 0) + swap(lo, mid, width); + if (__COMPARE(context, lo, hi) > 0) + swap(lo, hi, width); + if (__COMPARE(context, mid, hi) > 0) + swap(mid, hi, width); + + /* We now wish to partition the array into three pieces, one + * consisting of elements <= partition element, one of elements + * equal to the partition element, and one of elements > than + * it. This is done below; comments indicate conditions + * established at every step. + */ + + loguy = lo; + higuy = hi; + + /* Note that higuy decreases and loguy increases on every + * iteration, so loop must terminate. + */ + for (;;) { + /* lo <= loguy < hi, lo < higuy <= hi, + * A[i] <= A[mid] for lo <= i <= loguy, + * A[i] > A[mid] for higuy <= i < hi, + * A[hi] >= A[mid] + */ + + /* The doubled loop is to avoid calling comp(mid,mid), + * since some existing comparison funcs don't work + * when passed the same value for both pointers. + */ + + if (mid > loguy) { + do { + loguy += width; + } while (loguy < mid && + __COMPARE(context, loguy, mid) <= 0); + } + if (mid <= loguy) { + do { + loguy += width; + } while (loguy <= hi && + __COMPARE(context, loguy, mid) <= 0); + } + + /* lo < loguy <= hi+1, A[i] <= A[mid] for + * lo <= i < loguy, + * either loguy > hi or A[loguy] > A[mid] + */ + + do { + higuy -= width; + } while (higuy > mid && + __COMPARE(context, higuy, mid) > 0); + + /* lo <= higuy < hi, A[i] > A[mid] for higuy < i < hi, + * either higuy == lo or A[higuy] <= A[mid] + */ + + if (higuy < loguy) + break; + + /* if loguy > hi or higuy == lo, then we would have + * exited, so A[loguy] > A[mid], A[higuy] <= A[mid], + * loguy <= hi, higuy > lo + */ + + swap(loguy, higuy, width); + + /* If the partition element was moved, follow it. + * Only need to check for mid == higuy, since before + * the swap, A[loguy] > A[mid] implies loguy != mid. + */ + + if (mid == higuy) + mid = loguy; + + /* A[loguy] <= A[mid], A[higuy] > A[mid]; so condition + * at top of loop is re-established + */ + } + + /* A[i] <= A[mid] for lo <= i < loguy, + * A[i] > A[mid] for higuy < i < hi, + * A[hi] >= A[mid] + * higuy < loguy + * implying: + * higuy == loguy-1 + * or higuy == hi - 1, loguy == hi + 1, A[hi] == A[mid] + */ + + /* Find adjacent elements equal to the partition element. The + * doubled loop is to avoid calling comp(mid,mid), since some + * existing comparison funcs don't work when passed the same + * value for both pointers. + */ + + higuy += width; + if (mid < higuy) { + do { + higuy -= width; + } while (higuy > mid && + __COMPARE(context, higuy, mid) == 0); + } + if (mid >= higuy) { + do { + higuy -= width; + } while (higuy > lo && + __COMPARE(context, higuy, mid) == 0); + } + + /* OK, now we have the following: + * higuy < loguy + * lo <= higuy <= hi + * A[i] <= A[mid] for lo <= i <= higuy + * A[i] == A[mid] for higuy < i < loguy + * A[i] > A[mid] for loguy <= i < hi + * A[hi] >= A[mid] + */ + + /* We've finished the partition, now we want to sort the + * subarrays [lo, higuy] and [loguy, hi]. + * We do the smaller one first to minimize stack usage. + * We only sort arrays of length 2 or more. + */ + + if (higuy - lo >= hi - loguy) { + if (lo < higuy) { + lostk[stkptr] = lo; + histk[stkptr] = higuy; + ++stkptr; + } /* save big recursion for later */ + + if (loguy < hi) { + lo = loguy; + goto recurse; /* do small recursion */ + } + } else { + if (loguy < hi) { + lostk[stkptr] = loguy; + histk[stkptr] = hi; + ++stkptr; /* save big recursion for later */ + } + + if (lo < higuy) { + hi = higuy; + goto recurse; /* do small recursion */ + } + } + } + + /* We have sorted the array, except for any pending sorts on the stack. + * Check if there are any, and do them. + */ + + --stkptr; + if (stkptr >= 0) { + lo = lostk[stkptr]; + hi = histk[stkptr]; + goto recurse; /* pop subarray from stack */ + } else + return; /* all subarrays done */ +} + +/*! + ************************************************************************ + * \brief + * compares two stored pictures by picture number for qsort in + * descending order + * + ************************************************************************ + */ +static inline int compare_pic_by_pic_num_desc(const void *arg1, + const void *arg2) +{ + int pic_num1 = (*(struct StorablePicture **)arg1)->pic_num; + int pic_num2 = (*(struct StorablePicture **)arg2)->pic_num; + + if (pic_num1 < pic_num2) + return 1; + if (pic_num1 > pic_num2) + return -1; + else + return 0; +} + +/*! + ************************************************************************ + * \brief + * compares two stored pictures by picture number for qsort in + * descending order + * + ************************************************************************ + */ +static inline int compare_pic_by_lt_pic_num_asc(const void *arg1, + const void *arg2) +{ + int long_term_pic_num1 = + (*(struct StorablePicture **)arg1)->long_term_pic_num; + int long_term_pic_num2 = + (*(struct StorablePicture **)arg2)->long_term_pic_num; + + if (long_term_pic_num1 < long_term_pic_num2) + return -1; + if (long_term_pic_num1 > long_term_pic_num2) + return 1; + else + return 0; +} + +/*! + ************************************************************************ + * \brief + * compares two frame stores by pic_num for qsort in descending order + * + ************************************************************************ + */ +static inline int compare_fs_by_frame_num_desc(const void *arg1, + const void *arg2) +{ + int frame_num_wrap1 = (*(struct FrameStore **)arg1)->frame_num_wrap; + int frame_num_wrap2 = (*(struct FrameStore **)arg2)->frame_num_wrap; + + if (frame_num_wrap1 < frame_num_wrap2) + return 1; + if (frame_num_wrap1 > frame_num_wrap2) + return -1; + else + return 0; +} + + +/*! + ************************************************************************ + * \brief + * compares two frame stores by lt_pic_num for qsort in descending order + * + ************************************************************************ + */ +static inline int compare_fs_by_lt_pic_idx_asc(const void *arg1, + const void *arg2) +{ + int long_term_frame_idx1 = + (*(struct FrameStore **)arg1)->long_term_frame_idx; + int long_term_frame_idx2 = + (*(struct FrameStore **)arg2)->long_term_frame_idx; + + if (long_term_frame_idx1 < long_term_frame_idx2) + return -1; + else if (long_term_frame_idx1 > long_term_frame_idx2) + return 1; + else + return 0; +} + + +/*! + ************************************************************************ + * \brief + * compares two stored pictures by poc for qsort in ascending order + * + ************************************************************************ + */ +static inline int compare_pic_by_poc_asc(const void *arg1, const void *arg2) +{ + int poc1 = (*(struct StorablePicture **)arg1)->poc; + int poc2 = (*(struct StorablePicture **)arg2)->poc; + + if (poc1 < poc2) + return -1; + else if (poc1 > poc2) + return 1; + else + return 0; +} + + +/*! + ************************************************************************ + * \brief + * compares two stored pictures by poc for qsort in descending order + * + ************************************************************************ + */ +static inline int compare_pic_by_poc_desc(const void *arg1, const void *arg2) +{ + int poc1 = (*(struct StorablePicture **)arg1)->poc; + int poc2 = (*(struct StorablePicture **)arg2)->poc; + + if (poc1 < poc2) + return 1; + else if (poc1 > poc2) + return -1; + else + return 0; +} + + +/*! + ************************************************************************ + * \brief + * compares two frame stores by poc for qsort in ascending order + * + ************************************************************************ + */ +static inline int compare_fs_by_poc_asc(const void *arg1, const void *arg2) +{ + int poc1 = (*(struct FrameStore **)arg1)->poc; + int poc2 = (*(struct FrameStore **)arg2)->poc; + + if (poc1 < poc2) + return -1; + else if (poc1 > poc2) + return 1; + else + return 0; +} + + +/*! + ************************************************************************ + * \brief + * compares two frame stores by poc for qsort in descending order + * + ************************************************************************ + */ +static inline int compare_fs_by_poc_desc(const void *arg1, const void *arg2) +{ + int poc1 = (*(struct FrameStore **)arg1)->poc; + int poc2 = (*(struct FrameStore **)arg2)->poc; + + if (poc1 < poc2) + return 1; + else if (poc1 > poc2) + return -1; + else + return 0; +} + +/*! + ************************************************************************ + * \brief + * returns true, if picture is short term reference picture + * + ************************************************************************ + */ +static inline int is_short_ref(struct StorablePicture *s) +{ +#ifdef ERROR_CHECK + return (s && + (s->used_for_reference) && (!(s->is_long_term))); +#else + return (s->used_for_reference) && (!(s->is_long_term)); +#endif +} + + +/*! + ************************************************************************ + * \brief + * returns true, if picture is long term reference picture + * + ************************************************************************ + */ +static inline int is_long_ref(struct StorablePicture *s) +{ +#ifdef ERROR_CHECK + return (s && + s->used_for_reference) && (s->is_long_term); +#else + return (s->used_for_reference) && (s->is_long_term); +#endif +} + +/*! + ************************************************************************ + * \brief + * Initialize reference lists for a P Slice + * + ************************************************************************ + */ +/*! + ************************************************************************ + * \brief + * Generates a alternating field list from a given FrameStore list + * + ************************************************************************ + */ +static void gen_pic_list_from_frame_list(enum PictureStructure currStructure, + struct FrameStore **fs_list, int list_idx, + struct StorablePicture **list, + char *list_size, int long_term) +{ + int top_idx = 0; + int bot_idx = 0; + + int (*is_ref)(struct StorablePicture *s) = (long_term) ? is_long_ref : + is_short_ref; + + + if (currStructure == TOP_FIELD) { + while ((top_idx < list_idx) || (bot_idx < list_idx)) { + for (; top_idx < list_idx; top_idx++) { + if (fs_list[top_idx]->is_used & 1) { + if (is_ref(fs_list[top_idx]-> + top_field)) { + /* short term ref pic */ + list[(short) *list_size] = + fs_list[top_idx]->top_field; + (*list_size)++; + top_idx++; + break; + } + } + } + for (; bot_idx < list_idx; bot_idx++) { + if (fs_list[bot_idx]->is_used & 2) { + if (is_ref(fs_list[bot_idx]-> + bottom_field)) { + /* short term ref pic */ + list[(short) *list_size] = + fs_list[bot_idx]->bottom_field; + (*list_size)++; + bot_idx++; + break; + } + } + } + } + } + if (currStructure == BOTTOM_FIELD) { + while ((top_idx < list_idx) || (bot_idx < list_idx)) { + for (; bot_idx < list_idx; bot_idx++) { + if (fs_list[bot_idx]->is_used & 2) { + if (is_ref(fs_list[bot_idx]-> + bottom_field)) { + /* short term ref pic */ + list[(short) *list_size] = + fs_list[bot_idx]->bottom_field; + (*list_size)++; + bot_idx++; + break; + } + } + } + for (; top_idx < list_idx; top_idx++) { + if (fs_list[top_idx]->is_used & 1) { + if (is_ref(fs_list[top_idx]-> + top_field)) { + /* short term ref pic */ + list[(short) *list_size] = + fs_list[top_idx]->top_field; + (*list_size)++; + top_idx++; + break; + } + } + } + } + } +} + +static void init_lists_p_slice(struct Slice *currSlice) +{ + struct VideoParameters *p_Vid = currSlice->p_Vid; + struct DecodedPictureBuffer *p_Dpb = currSlice->p_Dpb; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + + unsigned int i; + + int list0idx = 0; + int listltidx = 0; + + struct FrameStore **fs_list0; + struct FrameStore **fs_listlt; + +#if (MVC_EXTENSION_ENABLE) + currSlice->listinterviewidx0 = 0; + currSlice->listinterviewidx1 = 0; +#endif + + if (currSlice->structure == FRAME) { + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL || + p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_used == 3) { + if ((p_Dpb->fs_ref[i]->frame-> + used_for_reference) && + (!p_Dpb->fs_ref[i]->frame-> + is_long_term)) { + currSlice->listX[0][list0idx++] = + p_Dpb->fs_ref[i]->frame; + } + } + } + /* order list 0 by PicNum */ + qsort((void *)currSlice->listX[0], list0idx, + sizeof(struct StorablePicture *), + compare_pic_by_pic_num_desc); + currSlice->listXsize[0] = (char) list0idx; + CHECK_VALID(currSlice->listXsize[0], 0); + if (h264_debug_flag & PRINT_FLAG_DPB_DETAIL) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "listX[0] (PicNum): "); + for (i = 0; i < list0idx; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + currSlice->listX[0][i]->pic_num); + } + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + } + /* long term handling */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (p_Dpb->fs_ltref[i]->is_used == 3) { + if (p_Dpb->fs_ltref[i]->frame->is_long_term) { + currSlice->listX[0][list0idx++] = + p_Dpb->fs_ltref[i]->frame; + } + } + } + qsort((void *)&currSlice->listX[0][ + (short) currSlice->listXsize[0]], + list0idx - currSlice->listXsize[0], + sizeof(struct StorablePicture *), + compare_pic_by_lt_pic_num_asc); + currSlice->listXsize[0] = (char) list0idx; + CHECK_VALID(currSlice->listXsize[0], 0); + } else { +#if 0 + fs_list0 = calloc(p_Dpb->size, sizeof(struct FrameStore *)); + if (fs_list0 == NULL) + no_mem_exit("init_lists: fs_list0"); + fs_listlt = calloc(p_Dpb->size, sizeof(struct FrameStore *)); + if (fs_listlt == NULL) + no_mem_exit("init_lists: fs_listlt"); +#else + fs_list0 = &(p_Dpb->fs_list0[0]); + fs_listlt = &(p_Dpb->fs_listlt[0]); +#endif + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference) + fs_list0[list0idx++] = p_Dpb->fs_ref[i]; + } + + qsort((void *)fs_list0, list0idx, sizeof(struct FrameStore *), + compare_fs_by_frame_num_desc); + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "fs_list0 (FrameNum): "); + for (i = 0; i < list0idx; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + fs_list0[i]->frame_num_wrap); + } + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "\n"); + + currSlice->listXsize[0] = 0; + gen_pic_list_from_frame_list(currSlice->structure, fs_list0, + list0idx, currSlice->listX[0], + &currSlice->listXsize[0], 0); + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "listX[0] (PicNum): "); + for (i = 0; i < currSlice->listXsize[0]; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + currSlice->listX[0][i]->pic_num); + } + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "\n"); + + /* long term handling */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) + fs_listlt[listltidx++] = p_Dpb->fs_ltref[i]; + + qsort((void *)fs_listlt, listltidx, sizeof(struct FrameStore *), + compare_fs_by_lt_pic_idx_asc); + + gen_pic_list_from_frame_list(currSlice->structure, fs_listlt, + listltidx, currSlice->listX[0], + &currSlice->listXsize[0], 1); + + /* free(fs_list0); */ + /* free(fs_listlt); */ + } + currSlice->listXsize[1] = 0; + + + /* set max size */ + currSlice->listXsize[0] = (char) imin(currSlice->listXsize[0], + currSlice->num_ref_idx_active[LIST_0]); + currSlice->listXsize[1] = (char) imin(currSlice->listXsize[1], + currSlice->num_ref_idx_active[LIST_1]); + CHECK_VALID(currSlice->listXsize[0], 0); + CHECK_VALID(currSlice->listXsize[1], 1); + + /* set the unused list entries to NULL */ + for (i = currSlice->listXsize[0]; i < (MAX_LIST_SIZE); i++) + currSlice->listX[0][i] = p_Vid->no_reference_picture; + for (i = currSlice->listXsize[1]; i < (MAX_LIST_SIZE); i++) + currSlice->listX[1][i] = p_Vid->no_reference_picture; + +#if PRINTREFLIST +#if (MVC_EXTENSION_ENABLE) + /* print out for h264_debug_flag purpose */ + if ((p_Vid->profile_idc == MVC_HIGH || + p_Vid->profile_idc == STEREO_HIGH) && + currSlice->current_slice_nr == 0) { + if (currSlice->listXsize[0] > 0) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " ** (CurViewID:%d %d) %s Ref Pic List 0 ****\n", + currSlice->view_id, + currSlice->ThisPOC, + currSlice->structure == FRAME ? "FRM" : + (currSlice->structure == TOP_FIELD ? + "TOP" : "BOT")); + for (i = 0; i < (unsigned int)(currSlice-> + listXsize[0]); i++) { /* ref list 0 */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " %2d -> POC: %4d PicNum: %4d ViewID: %d\n", + i, + currSlice->listX[0][i]->poc, + currSlice->listX[0][i]->pic_num, + currSlice->listX[0][i]->view_id); + } + } + } +#endif +#endif +} + + +/*! + ************************************************************************ + * \brief + * Initialize reference lists + * + ************************************************************************ + */ +static void init_mbaff_lists(struct h264_dpb_stru *p_H264_Dpb, + struct Slice *currSlice) +{ + unsigned int j; + int i; + struct VideoParameters *p_Vid = &p_H264_Dpb->mVideo; + for (i = 2; i < 6; i++) { + for (j = 0; j < MAX_LIST_SIZE; j++) + currSlice->listX[i][j] = p_Vid->no_reference_picture; + currSlice->listXsize[i] = 0; + } + + for (i = 0; i < currSlice->listXsize[0]; i++) { +#ifdef ERROR_CHECK + if (currSlice->listX[0][i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + pr_info( + "error currSlice->listX[0][%d] is NULL\r\n", i); + break; + } +#endif + currSlice->listX[2][2 * i] = + currSlice->listX[0][i]->top_field; + currSlice->listX[2][2 * i + 1] = + currSlice->listX[0][i]->bottom_field; + currSlice->listX[4][2 * i] = + currSlice->listX[0][i]->bottom_field; + currSlice->listX[4][2 * i + 1] = + currSlice->listX[0][i]->top_field; + } + currSlice->listXsize[2] = currSlice->listXsize[4] = + currSlice->listXsize[0] * 2; + + for (i = 0; i < currSlice->listXsize[1]; i++) { +#ifdef ERROR_CHECK + if (currSlice->listX[1][i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + pr_info( + "error currSlice->listX[1][%d] is NULL\r\n", i); + break; + } +#endif + currSlice->listX[3][2 * i] = + currSlice->listX[1][i]->top_field; + currSlice->listX[3][2 * i + 1] = + currSlice->listX[1][i]->bottom_field; + currSlice->listX[5][2 * i] = + currSlice->listX[1][i]->bottom_field; + currSlice->listX[5][2 * i + 1] = + currSlice->listX[1][i]->top_field; + } + currSlice->listXsize[3] = currSlice->listXsize[5] = + currSlice->listXsize[1] * 2; +} + + + +static void init_lists_i_slice(struct Slice *currSlice) +{ + +#if (MVC_EXTENSION_ENABLE) + currSlice->listinterviewidx0 = 0; + currSlice->listinterviewidx1 = 0; +#endif + + currSlice->listXsize[0] = 0; + currSlice->listXsize[1] = 0; +} + +static void init_lists_b_slice(struct Slice *currSlice) +{ + struct VideoParameters *p_Vid = currSlice->p_Vid; + struct DecodedPictureBuffer *p_Dpb = currSlice->p_Dpb; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + + unsigned int i; + int j; + + int list0idx = 0; + int list0idx_1 = 0; + int listltidx = 0; + + struct FrameStore **fs_list0; + struct FrameStore **fs_list1; + struct FrameStore **fs_listlt; + +#if (MVC_EXTENSION_ENABLE) + currSlice->listinterviewidx0 = 0; + currSlice->listinterviewidx1 = 0; +#endif + + { + /* B-Slice */ + if (currSlice->structure == FRAME) { + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL || + p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((p_Dpb->fs_ref[i]->is_used == 3) && + ((p_Dpb->fs_ref[i]->frame-> + used_for_reference) && + (!p_Dpb->fs_ref[i]->frame-> + is_long_term)) && + (currSlice->framepoc >= + p_Dpb->fs_ref[i]->frame->poc)) { + /* !KS use >= for error + * concealment + */ + currSlice->listX[0][list0idx++] = + p_Dpb->fs_ref[i]->frame; + } + } + qsort((void *)currSlice->listX[0], list0idx, + sizeof(struct StorablePicture *), + compare_pic_by_poc_desc); + + /* get the backward reference picture + * (POC>current POC) in list0; + */ + list0idx_1 = list0idx; + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL || + p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((p_Dpb->fs_ref[i]->is_used == 3) && + ((p_Dpb->fs_ref[i]->frame-> + used_for_reference) && + (!p_Dpb->fs_ref[i]->frame-> + is_long_term)) && + (currSlice->framepoc < + p_Dpb->fs_ref[i]->frame->poc)) { + currSlice-> + listX[0][list0idx++] = + p_Dpb->fs_ref[i]->frame; + } + } + qsort((void *)&currSlice->listX[0][list0idx_1], + list0idx - list0idx_1, + sizeof(struct StorablePicture *), + compare_pic_by_poc_asc); + + for (j = 0; j < list0idx_1; j++) { + currSlice-> + listX[1][list0idx - list0idx_1 + j] = + currSlice->listX[0][j]; + } + for (j = list0idx_1; j < list0idx; j++) { + currSlice->listX[1][j - list0idx_1] = + currSlice->listX[0][j]; + } + + currSlice->listXsize[0] = currSlice->listXsize[1] = + (char) list0idx; + CHECK_VALID(currSlice->listXsize[0], 0); + CHECK_VALID(currSlice->listXsize[1], 1); + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "listX[0] (PicNum): "); + for (i = 0; i < currSlice->listXsize[0]; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + currSlice->listX[0][i]->pic_num); + } + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "listX[1] (PicNum): "); + for (i = 0; i < currSlice->listXsize[1]; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + currSlice->listX[1][i]->pic_num); + } + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + /* dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "currSlice->listX[0] currPoc=%d (Poc): ", + * p_Vid->framepoc); + * for (i=0; i<currSlice->listXsize[0]; i++) { + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "%d ", currSlice->listX[0][i]->poc); + * } + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, "\n"); + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "currSlice->listX[1] currPoc=%d (Poc): ", + * p_Vid->framepoc); + * for (i=0; i<currSlice->listXsize[1]; i++) { + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "%d ", + * currSlice->listX[1][i]->poc); + * } + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, "\n"); + */ + + /* long term handling */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (p_Dpb->fs_ltref[i]->is_used == 3) { + if (p_Dpb->fs_ltref[i]->frame-> + is_long_term) { + currSlice-> + listX[0][list0idx] = + p_Dpb->fs_ltref[i]->frame; + currSlice-> + listX[1][list0idx++] = + p_Dpb->fs_ltref[i]->frame; + } + } + } + qsort((void *)&currSlice-> + listX[0][(short) currSlice->listXsize[0]], + list0idx - currSlice->listXsize[0], + sizeof(struct StorablePicture *), + compare_pic_by_lt_pic_num_asc); + qsort((void *)&currSlice-> + listX[1][(short) currSlice->listXsize[0]], + list0idx - currSlice->listXsize[0], + sizeof(struct StorablePicture *), + compare_pic_by_lt_pic_num_asc); + currSlice->listXsize[0] = currSlice->listXsize[1] = + (char) list0idx; + CHECK_VALID(currSlice->listXsize[0], 0); + CHECK_VALID(currSlice->listXsize[1], 1); + } else { +#if 0 + fs_list0 = calloc(p_Dpb->size, + sizeof(struct FrameStore *)); + if (fs_list0 == NULL) + no_mem_exit("init_lists: fs_list0"); + fs_list1 = calloc(p_Dpb->size, + sizeof(struct FrameStore *)); + if (fs_list1 == NULL) + no_mem_exit("init_lists: fs_list1"); + fs_listlt = calloc(p_Dpb->size, + sizeof(struct FrameStore *)); + if (fs_listlt == NULL) + no_mem_exit("init_lists: fs_listlt"); +#else + fs_list0 = &(p_Dpb->fs_list0[0]); + fs_list1 = &(p_Dpb->fs_list1[0]); + fs_listlt = &(p_Dpb->fs_listlt[0]); + +#endif + currSlice->listXsize[0] = 0; + currSlice->listXsize[1] = 1; + + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_used) { + if (currSlice->ThisPOC >= + p_Dpb->fs_ref[i]->poc) { + fs_list0[list0idx++] = + p_Dpb->fs_ref[i]; + } + } + } + qsort((void *)fs_list0, list0idx, + sizeof(struct FrameStore *), + compare_fs_by_poc_desc); + list0idx_1 = list0idx; + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_used) { + if (currSlice->ThisPOC < + p_Dpb->fs_ref[i]->poc) { + fs_list0[list0idx++] = + p_Dpb->fs_ref[i]; + } + } + } + qsort((void *)&fs_list0[list0idx_1], + list0idx - list0idx_1, + sizeof(struct FrameStore *), + compare_fs_by_poc_asc); + + for (j = 0; j < list0idx_1; j++) { + fs_list1[list0idx - list0idx_1 + j] = + fs_list0[j]; + } + for (j = list0idx_1; j < list0idx; j++) + fs_list1[j - list0idx_1] = fs_list0[j]; + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "fs_list0 currPoc=%d (Poc): ", + currSlice->ThisPOC); + for (i = 0; i < list0idx; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + fs_list0[i]->poc); + } + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "fs_list1 currPoc=%d (Poc): ", + currSlice->ThisPOC); + for (i = 0; i < list0idx; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + fs_list1[i]->poc); + } + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + + currSlice->listXsize[0] = 0; + currSlice->listXsize[1] = 0; + gen_pic_list_from_frame_list(currSlice->structure, + fs_list0, list0idx, + currSlice->listX[0], + &currSlice->listXsize[0], 0); + gen_pic_list_from_frame_list(currSlice->structure, + fs_list1, list0idx, + currSlice->listX[1], + &currSlice->listXsize[1], 0); + + /* dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "currSlice->listX[0] currPoc=%d (Poc): ", + * p_Vid->framepoc); + * for (i=0; i<currSlice->listXsize[0]; i++) { + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, "%d ", + * currSlice->listX[0][i]->poc); + * } + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, "\n"); + */ + /* dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "currSlice->listX[1] currPoc=%d (Poc): ", + * p_Vid->framepoc); + * for (i=0; i<currSlice->listXsize[1]; i++) { + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, "%d ", + * currSlice->listX[1][i]->poc); + * } + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "\n"); + */ + + /* long term handling */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) + fs_listlt[listltidx++] = p_Dpb->fs_ltref[i]; + + qsort((void *)fs_listlt, listltidx, + sizeof(struct FrameStore *), + compare_fs_by_lt_pic_idx_asc); + + gen_pic_list_from_frame_list(currSlice->structure, + fs_listlt, listltidx, + currSlice->listX[0], + &currSlice->listXsize[0], 1); + gen_pic_list_from_frame_list(currSlice->structure, + fs_listlt, listltidx, + currSlice->listX[1], + &currSlice->listXsize[1], 1); + + /* free(fs_list0); */ + /* free(fs_list1); */ + /* free(fs_listlt); */ + } + } + + if ((currSlice->listXsize[0] == currSlice->listXsize[1]) && + (currSlice->listXsize[0] > 1)) { + /* check if lists are identical, + *if yes swap first two elements of currSlice->listX[1] + */ + int diff = 0; + + for (j = 0; j < currSlice->listXsize[0]; j++) { + if (currSlice->listX[0][j] != + currSlice->listX[1][j]) { + diff = 1; + break; + } + } + if (!diff) { + struct StorablePicture *tmp_s = + currSlice->listX[1][0]; + currSlice->listX[1][0] = currSlice->listX[1][1]; + currSlice->listX[1][1] = tmp_s; + } + } + + /* set max size */ + currSlice->listXsize[0] = (char) imin(currSlice->listXsize[0], + currSlice->num_ref_idx_active[LIST_0]); + currSlice->listXsize[1] = (char) imin(currSlice->listXsize[1], + currSlice->num_ref_idx_active[LIST_1]); + CHECK_VALID(currSlice->listXsize[0], 0); + CHECK_VALID(currSlice->listXsize[1], 1); + + /* set the unused list entries to NULL */ + for (i = currSlice->listXsize[0]; i < (MAX_LIST_SIZE); i++) + currSlice->listX[0][i] = p_Vid->no_reference_picture; + for (i = currSlice->listXsize[1]; i < (MAX_LIST_SIZE); i++) + currSlice->listX[1][i] = p_Vid->no_reference_picture; + +#if PRINTREFLIST +#if (MVC_EXTENSION_ENABLE) + /* print out for h264_debug_flag purpose */ + if ((p_Vid->profile_idc == MVC_HIGH || + p_Vid->profile_idc == STEREO_HIGH) && + currSlice->current_slice_nr == 0) { + if ((currSlice->listXsize[0] > 0) || + (currSlice->listXsize[1] > 0)) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + if (currSlice->listXsize[0] > 0) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " ** (CurViewID:%d %d) %s Ref Pic List 0 ****\n", + currSlice->view_id, + currSlice->ThisPOC, + currSlice->structure == FRAME ? "FRM" : + (currSlice->structure == TOP_FIELD ? + "TOP" : "BOT")); + for (i = 0; i < (unsigned int)(currSlice-> + listXsize[0]); i++) { /* ref list 0 */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " %2d -> POC: %4d PicNum: %4d ViewID: %d\n", + i, + currSlice->listX[0][i]->poc, + currSlice->listX[0][i]->pic_num, + currSlice->listX[0][i]->view_id); + } + } + if (currSlice->listXsize[1] > 0) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " ** (CurViewID:%d %d) %s Ref Pic List 1 ****\n", + currSlice->view_id, + currSlice->ThisPOC, + currSlice->structure == FRAME ? "FRM" : + (currSlice->structure == TOP_FIELD ? "TOP" : + "BOT")); + for (i = 0; i < (unsigned int)(currSlice-> + listXsize[1]); i++) { /* ref list 1 */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " %2d -> POC: %4d PicNum: %4d ViewID: %d\n", + i, + currSlice->listX[1][i]->poc, + currSlice->listX[1][i]->pic_num, + currSlice->listX[1][i]->view_id); + } + } + } +#endif +#endif +} + +static struct StorablePicture *get_short_term_pic(struct Slice *currSlice, + struct DecodedPictureBuffer *p_Dpb, int picNum) +{ + unsigned int i; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { + if (currSlice->structure == FRAME) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference == 3) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((!p_Dpb->fs_ref[i]->frame-> + is_long_term) && + (p_Dpb->fs_ref[i]->frame-> + pic_num == picNum)) + return p_Dpb->fs_ref[i]->frame; + } + } else { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference & 1) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->top_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((!p_Dpb->fs_ref[i]->top_field-> + is_long_term) && + (p_Dpb->fs_ref[i]->top_field-> + pic_num == picNum)) + return p_Dpb->fs_ref[i]->top_field; + } + if (p_Dpb->fs_ref[i]->is_reference & 2) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->bottom_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((!p_Dpb->fs_ref[i]->bottom_field-> + is_long_term) && + (p_Dpb->fs_ref[i]->bottom_field-> + pic_num == picNum)) + return p_Dpb->fs_ref[i]->bottom_field; + } + } + } + + return currSlice->p_Vid->no_reference_picture; +} + + +static void reorder_short_term(struct Slice *currSlice, int cur_list, + int num_ref_idx_lX_active_minus1, + int picNumLX, int *refIdxLX) +{ + struct h264_dpb_stru *p_H264_Dpb = container_of(currSlice->p_Vid, + struct h264_dpb_stru, mVideo); + + struct StorablePicture **RefPicListX = currSlice->listX[cur_list]; + int cIdx, nIdx; + + struct StorablePicture *picLX; + + picLX = get_short_term_pic(currSlice, currSlice->p_Dpb, picNumLX); + + for (cIdx = num_ref_idx_lX_active_minus1 + 1; cIdx > *refIdxLX; + cIdx--) { + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s: RefPicListX[ %d ] = RefPicListX[ %d ]\n", + __func__, cIdx, cIdx - 1); + RefPicListX[cIdx] = RefPicListX[cIdx - 1]; + } + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s: RefPicListX[ %d ] = pic %x (%d)\n", __func__, + *refIdxLX, picLX, picNumLX); + + RefPicListX[(*refIdxLX)++] = picLX; + + nIdx = *refIdxLX; + + for (cIdx = *refIdxLX; cIdx <= num_ref_idx_lX_active_minus1 + 1; + cIdx++) { + if (RefPicListX[cIdx]) + if ((RefPicListX[cIdx]->is_long_term) || + (RefPicListX[cIdx]->pic_num != picNumLX)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%s: RefPicListX[ %d ] = RefPicListX[ %d ]\n", + __func__, nIdx, cIdx); + RefPicListX[nIdx++] = RefPicListX[cIdx]; + } + } +} + + +static struct StorablePicture *get_long_term_pic(struct Slice *currSlice, + struct DecodedPictureBuffer *p_Dpb, int LongtermPicNum) +{ + unsigned int i; + + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (currSlice->structure == FRAME) { + if (p_Dpb->fs_ltref[i]->is_reference == 3) + if ((p_Dpb->fs_ltref[i]->frame) && + (p_Dpb->fs_ltref[i]->frame-> + is_long_term) && + (p_Dpb->fs_ltref[i]->frame-> + long_term_pic_num == + LongtermPicNum)) + return p_Dpb->fs_ltref[i]->frame; + } else { + if (p_Dpb->fs_ltref[i]->is_reference & 1) + if ((p_Dpb->fs_ltref[i]->top_field) && + (p_Dpb->fs_ltref[i]->top_field-> + is_long_term) && + (p_Dpb->fs_ltref[i]->top_field-> + long_term_pic_num == LongtermPicNum)) + return p_Dpb->fs_ltref[i]->top_field; + + if (p_Dpb->fs_ltref[i]->is_reference & 2) + if ((p_Dpb->fs_ltref[i]->bottom_field) && + (p_Dpb->fs_ltref[i]->bottom_field-> + is_long_term) && + (p_Dpb->fs_ltref[i]->bottom_field-> + long_term_pic_num == + LongtermPicNum)) + return p_Dpb->fs_ltref[i]-> + bottom_field; + } + } + return NULL; +} + +/*! + ************************************************************************ + * \brief + * Reordering process for long-term reference pictures + * + ************************************************************************ + */ +static void reorder_long_term(struct Slice *currSlice, + struct StorablePicture **RefPicListX, + int num_ref_idx_lX_active_minus1, + int LongTermPicNum, int *refIdxLX) +{ + int cIdx, nIdx; + + struct StorablePicture *picLX; + + picLX = get_long_term_pic(currSlice, currSlice->p_Dpb, LongTermPicNum); + + for (cIdx = num_ref_idx_lX_active_minus1 + 1; cIdx > *refIdxLX; cIdx--) + RefPicListX[cIdx] = RefPicListX[cIdx - 1]; + + RefPicListX[(*refIdxLX)++] = picLX; + + nIdx = *refIdxLX; + + for (cIdx = *refIdxLX; cIdx <= num_ref_idx_lX_active_minus1 + 1; + cIdx++) { + if (RefPicListX[cIdx]) { + if ((!RefPicListX[cIdx]->is_long_term) || + (RefPicListX[cIdx]->long_term_pic_num != + LongTermPicNum)) + RefPicListX[nIdx++] = RefPicListX[cIdx]; + } + } +} + +static void reorder_ref_pic_list(struct Slice *currSlice, int cur_list) +{ + int *modification_of_pic_nums_idc = + currSlice->modification_of_pic_nums_idc[cur_list]; + int *abs_diff_pic_num_minus1 = + currSlice->abs_diff_pic_num_minus1[cur_list]; + int *long_term_pic_idx = currSlice->long_term_pic_idx[cur_list]; + int num_ref_idx_lX_active_minus1 = + currSlice->num_ref_idx_active[cur_list] - 1; + + struct VideoParameters *p_Vid = currSlice->p_Vid; + int i; + + int maxPicNum, currPicNum, picNumLXNoWrap, picNumLXPred, picNumLX; + int refIdxLX = 0; + + if (currSlice->structure == FRAME) { + maxPicNum = p_Vid->max_frame_num; + currPicNum = currSlice->frame_num; + } else { + maxPicNum = 2 * p_Vid->max_frame_num; + currPicNum = 2 * currSlice->frame_num + 1; + } + + picNumLXPred = currPicNum; + + for (i = 0; i < REORDERING_COMMAND_MAX_SIZE && + modification_of_pic_nums_idc[i] != 3; i++) { + if (modification_of_pic_nums_idc[i] > 3) { + struct h264_dpb_stru *p_H264_Dpb = + container_of(p_Vid, struct h264_dpb_stru, mVideo); + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "error, Invalid modification_of_pic_nums_idc command\n"); + /*h264_debug_flag = 0x1f;*/ + break; + } + if (modification_of_pic_nums_idc[i] < 2) { + if (modification_of_pic_nums_idc[i] == 0) { + if (picNumLXPred - (abs_diff_pic_num_minus1[i] + + 1) < 0) + picNumLXNoWrap = picNumLXPred - + (abs_diff_pic_num_minus1[i] + 1) + + maxPicNum; + else + picNumLXNoWrap = picNumLXPred - + (abs_diff_pic_num_minus1[i] + 1); + } else { /* (modification_of_pic_nums_idc[i] == 1) */ + if (picNumLXPred + (abs_diff_pic_num_minus1[i] + + 1) >= maxPicNum) + picNumLXNoWrap = picNumLXPred + + (abs_diff_pic_num_minus1[i] + 1) - + maxPicNum; + else + picNumLXNoWrap = picNumLXPred + + (abs_diff_pic_num_minus1[i] + 1); + } + picNumLXPred = picNumLXNoWrap; + + if (picNumLXNoWrap > currPicNum) + picNumLX = picNumLXNoWrap - maxPicNum; + else + picNumLX = picNumLXNoWrap; + +#if (MVC_EXTENSION_ENABLE) + reorder_short_term(currSlice, cur_list, + num_ref_idx_lX_active_minus1, picNumLX, + &refIdxLX, -1); +#else + reorder_short_term(currSlice, cur_list, + num_ref_idx_lX_active_minus1, picNumLX, + &refIdxLX); +#endif + } else { /* (modification_of_pic_nums_idc[i] == 2) */ +#if (MVC_EXTENSION_ENABLE) + reorder_long_term(currSlice, currSlice->listX[cur_list], + num_ref_idx_lX_active_minus1, + long_term_pic_idx[i], &refIdxLX, -1); +#else + reorder_long_term(currSlice, currSlice->listX[cur_list], + num_ref_idx_lX_active_minus1, + long_term_pic_idx[i], &refIdxLX); +#endif + } + + } + /* that's a definition */ + currSlice->listXsize[cur_list] = + (char)(num_ref_idx_lX_active_minus1 + 1); +} + +static void reorder_lists(struct Slice *currSlice) +{ + struct VideoParameters *p_Vid = currSlice->p_Vid; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Vid, + struct h264_dpb_stru, mVideo); + int i; + + if ((currSlice->slice_type != I_SLICE) && + (currSlice->slice_type != SI_SLICE)) { + if (currSlice->ref_pic_list_reordering_flag[LIST_0]) + reorder_ref_pic_list(currSlice, LIST_0); + if (p_Vid->no_reference_picture == + currSlice-> + listX[0][currSlice->num_ref_idx_active[LIST_0] - 1]) { + if (p_Vid->non_conforming_stream) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "RefPicList0[ %d ] is equal to 'no reference picture'\n", + currSlice-> + num_ref_idx_active[LIST_0] - 1); + else + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "RefPicList0 [ num_ref_idx_l0_active_minus1 ] is equal to 'no reference picture', invalid bitstream %d\n", + 500); + } + /* that's a definition */ + currSlice->listXsize[0] = + (char) imin(currSlice->listXsize[0], + currSlice->num_ref_idx_active[LIST_0]); + CHECK_VALID(currSlice->listXsize[0], 0); + if (h264_debug_flag & PRINT_FLAG_DPB_DETAIL) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "listX[0] reorder (PicNum): "); + for (i = 0; i < currSlice->listXsize[0]; i++) { + dpb_print_cont(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + currSlice->listX[0][i]->pic_num); + } + dpb_print_cont(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + } + } + + if (currSlice->slice_type == B_SLICE) { + if (currSlice->ref_pic_list_reordering_flag[LIST_1]) + reorder_ref_pic_list(currSlice, LIST_1); + if (p_Vid->no_reference_picture == + currSlice->listX[1][currSlice-> + num_ref_idx_active[LIST_1] - 1]) { + if (p_Vid->non_conforming_stream) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "RefPicList1[ %d ] is equal to 'no reference picture'\n", + currSlice-> + num_ref_idx_active[LIST_1] - 1); + else + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "RefPicList1 [ num_ref_idx_l1_active_minus1 ] is equal to 'no reference picture', invalid bitstream %d\n", + 500); + } + /* that's a definition */ + currSlice->listXsize[1] = + (char)currSlice->num_ref_idx_active[LIST_1]; + if (h264_debug_flag & PRINT_FLAG_DPB_DETAIL) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "listX[1] reorder (PicNum): "); + for (i = 0; i < currSlice->listXsize[1]; i++) { + if (currSlice->listX[1][i]) + dpb_print_cont(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + currSlice->listX[1][i]->pic_num); + } + dpb_print_cont(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + } + } + + /* free_ref_pic_list_reordering_buffer(currSlice); */ + + if (currSlice->slice_type == P_SLICE) { +#if PRINTREFLIST + unsigned int i; +#if (MVC_EXTENSION_ENABLE) + /* print out for h264_debug_flag purpose */ + if ((p_Vid->profile_idc == MVC_HIGH || + p_Vid->profile_idc == STEREO_HIGH) && + currSlice->current_slice_nr == 0) { + if (currSlice->listXsize[0] > 0 + && (h264_debug_flag & PRINT_FLAG_DPB_DETAIL)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " ** (FinalViewID:%d) %s Ref Pic List 0 ****\n", + currSlice->view_id, + currSlice->structure == FRAME ? + "FRM" : + (currSlice->structure == TOP_FIELD ? + "TOP" : "BOT")); + for (i = 0; i < (unsigned int)(currSlice-> + listXsize[0]); i++) { /* ref list 0 */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " %2d -> POC: %4d PicNum: %4d ViewID: %d\n", + i, + currSlice->listX[0][i]->poc, + currSlice->listX[0][i]-> + pic_num, + currSlice->listX[0][i]-> + view_id); + } + } + } +#endif +#endif + } else if (currSlice->slice_type == B_SLICE) { +#if PRINTREFLIST + unsigned int i; +#if (MVC_EXTENSION_ENABLE) + /* print out for h264_debug_flag purpose */ + if ((p_Vid->profile_idc == MVC_HIGH || + p_Vid->profile_idc == STEREO_HIGH) && + currSlice->current_slice_nr == 0) { + if ((currSlice->listXsize[0] > 0) || + (currSlice->listXsize[1] > 0)) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + if (currSlice->listXsize[0] > 0 + && (h264_debug_flag & PRINT_FLAG_DPB_DETAIL)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " ** (FinalViewID:%d) %s Ref Pic List 0 ****\n", + currSlice->view_id, + currSlice->structure == FRAME ? + "FRM" : + (currSlice->structure == TOP_FIELD ? + "TOP" : "BOT")); + for (i = 0; i < (unsigned int)(currSlice-> + listXsize[0]); i++) { /* ref list 0 */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " %2d -> POC: %4d PicNum: %4d ViewID: %d\n", + i, + currSlice->listX[0][i]->poc, + currSlice->listX[0][i]-> + pic_num, + currSlice->listX[0][i]-> + view_id); + } + } + if (currSlice->listXsize[1] > 0 + && (h264_debug_flag & PRINT_FLAG_DPB_DETAIL)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " ** (FinalViewID:%d) %s Ref Pic List 1 ****\n", + currSlice->view_id, + currSlice->structure == FRAME ? + "FRM" : + (currSlice->structure == TOP_FIELD ? + "TOP" : "BOT")); + for (i = 0; i < (unsigned int)(currSlice-> + listXsize[1]); i++) { /* ref list 1 */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " %2d -> POC: %4d PicNum: %4d ViewID: %d\n", + i, + currSlice->listX[1][i]->poc, + currSlice->listX[1][i]-> + pic_num, + currSlice->listX[1][i]-> + view_id); + } + } + } +#endif + +#endif + } +} + +void init_colocate_buf(struct h264_dpb_stru *p_H264_Dpb, int count) +{ + p_H264_Dpb->colocated_buf_map = 0; + p_H264_Dpb->colocated_buf_count = count; +} + +int allocate_colocate_buf(struct h264_dpb_stru *p_H264_Dpb) +{ + int i; + + for (i = 0; i < p_H264_Dpb->colocated_buf_count; i++) { + if (((p_H264_Dpb->colocated_buf_map >> i) & 0x1) == 0) { + p_H264_Dpb->colocated_buf_map |= (1 << i); + break; + } + } + if (i == p_H264_Dpb->colocated_buf_count) { + i = -1; + p_H264_Dpb->buf_alloc_fail = 1; + } + return i; +} + +int release_colocate_buf(struct h264_dpb_stru *p_H264_Dpb, int index) +{ + if (index >= 0) { + if (index >= p_H264_Dpb->colocated_buf_count) { + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_ERROR, + "%s error, index %d is bigger than buf count %d\n", + __func__, index, + p_H264_Dpb->colocated_buf_count); + } else { + if (((p_H264_Dpb->colocated_buf_map >> + index) & 0x1) == 0x1) { + p_H264_Dpb->colocated_buf_map &= + (~(1 << index)); + } else { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "%s error, index %d is not allocated\n", + __func__, index); + } + } + } + return 0; +} + +void set_frame_output_flag(struct h264_dpb_stru *p_H264_Dpb, int index) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + p_H264_Dpb->mFrameStore[index].is_output = 1; + p_H264_Dpb->mFrameStore[index].pre_output = 0; + p_H264_Dpb->mFrameStore[index].show_frame = false; + dump_dpb(p_Dpb, 0); +} + +#if 0 +void init_old_slice(OldSliceParams *p_old_slice) +{ + p_old_slice->field_pic_flag = 0; + p_old_slice->pps_id = INT_MAX; + p_old_slice->frame_num = INT_MAX; + p_old_slice->nal_ref_idc = INT_MAX; + p_old_slice->idr_flag = 0; + + p_old_slice->pic_oder_cnt_lsb = UINT_MAX; + p_old_slice->delta_pic_oder_cnt_bottom = INT_MAX; + + p_old_slice->delta_pic_order_cnt[0] = INT_MAX; + p_old_slice->delta_pic_order_cnt[1] = INT_MAX; +} + + +void copy_slice_info(struct Slice *currSlice, OldSliceParams *p_old_slice) +{ + struct VideoParameters *p_Vid = currSlice->p_Vid; + + p_old_slice->pps_id = currSlice->pic_parameter_set_id; + p_old_slice->frame_num = currSlice->frame_num; + /* p_Vid->frame_num; */ + p_old_slice->field_pic_flag = + currSlice->field_pic_flag; + /* p_Vid->field_pic_flag; */ + + if (currSlice->field_pic_flag) + p_old_slice->bottom_field_flag = currSlice->bottom_field_flag; + + p_old_slice->nal_ref_idc = currSlice->nal_reference_idc; + p_old_slice->idr_flag = (byte) currSlice->idr_flag; + + if (currSlice->idr_flag) + p_old_slice->idr_pic_id = currSlice->idr_pic_id; + + if (p_Vid->active_sps->pic_order_cnt_type == 0) { + p_old_slice->pic_oder_cnt_lsb = + currSlice->pic_order_cnt_lsb; + p_old_slice->delta_pic_oder_cnt_bottom = + currSlice->delta_pic_order_cnt_bottom; + } + + if (p_Vid->active_sps->pic_order_cnt_type == 1) { + p_old_slice->delta_pic_order_cnt[0] = + currSlice->delta_pic_order_cnt[0]; + p_old_slice->delta_pic_order_cnt[1] = + currSlice->delta_pic_order_cnt[1]; + } +#if (MVC_EXTENSION_ENABLE) + p_old_slice->view_id = currSlice->view_id; + p_old_slice->inter_view_flag = currSlice->inter_view_flag; + p_old_slice->anchor_pic_flag = currSlice->anchor_pic_flag; +#endif + p_old_slice->layer_id = currSlice->layer_id; +} + +int is_new_picture(StorablePicture *dec_picture, struct Slice *currSlice, + OldSliceParams *p_old_slice) +{ + struct VideoParameters *p_Vid = currSlice->p_Vid; + + int result = 0; + + result |= (dec_picture == NULL); + + result |= (p_old_slice->pps_id != currSlice->pic_parameter_set_id); + + result |= (p_old_slice->frame_num != currSlice->frame_num); + + result |= (p_old_slice->field_pic_flag != currSlice->field_pic_flag); + + if (currSlice->field_pic_flag && p_old_slice->field_pic_flag) { + result |= (p_old_slice->bottom_field_flag != + currSlice->bottom_field_flag); + } + + result |= (p_old_slice->nal_ref_idc != + currSlice->nal_reference_idc) && + ((p_old_slice->nal_ref_idc == 0) || + (currSlice->nal_reference_idc == 0)); + result |= (p_old_slice->idr_flag != currSlice->idr_flag); + + if (currSlice->idr_flag && p_old_slice->idr_flag) + result |= (p_old_slice->idr_pic_id != currSlice->idr_pic_id); + + if (p_Vid->active_sps->pic_order_cnt_type == 0) { + result |= (p_old_slice->pic_oder_cnt_lsb != + currSlice->pic_order_cnt_lsb); + if (p_Vid->active_pps-> + bottom_field_pic_order_in_frame_present_flag == 1 && + !currSlice->field_pic_flag) { + result |= (p_old_slice->delta_pic_oder_cnt_bottom != + currSlice->delta_pic_order_cnt_bottom); + } + } + + if (p_Vid->active_sps->pic_order_cnt_type == 1) { + if (!p_Vid->active_sps->delta_pic_order_always_zero_flag) { + result |= (p_old_slice->delta_pic_order_cnt[0] != + currSlice->delta_pic_order_cnt[0]); + if (p_Vid->active_pps-> + bottom_field_pic_order_in_frame_present_flag == 1 && + !currSlice->field_pic_flag) { + result |= (p_old_slice-> + delta_pic_order_cnt[1] != + currSlice->delta_pic_order_cnt[1]); + } + } + } + +#if (MVC_EXTENSION_ENABLE) + result |= (currSlice->view_id != p_old_slice->view_id); + result |= (currSlice->inter_view_flag != p_old_slice->inter_view_flag); + result |= (currSlice->anchor_pic_flag != p_old_slice->anchor_pic_flag); +#endif + result |= (currSlice->layer_id != p_old_slice->layer_id); + return result; +} +#else +int is_new_picture(struct StorablePicture *dec_picture, + struct h264_dpb_stru *p_H264_Dpb, + struct OldSliceParams *p_old_slice) +{ + int ret = 0; + + if (p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE] == 0) + ret = 1; + return ret; +} + +#endif + +/* +* release bufspec and pic for picture not in dpb buf +*/ +int release_picture(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *pic) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + if (p_Dpb->last_picture == NULL) { + if (pic->colocated_buf_index >= 0) { + release_colocate_buf(p_H264_Dpb, + pic->colocated_buf_index); + pic->colocated_buf_index = -1; + } + release_buf_spec_num(p_H264_Dpb->vdec, pic->buf_spec_num); + } else { + if (pic->buf_spec_is_alloced == 1) + release_buf_spec_num(p_H264_Dpb->vdec, + pic->buf_spec_num); + } + + free_picture(p_H264_Dpb, pic); + return 0; +} + +#ifdef ERROR_HANDLE_TEST +/* +* remove all pictures in dpb and release bufspec/pic of them +*/ +void remove_dpb_pictures(struct h264_dpb_stru *p_H264_Dpb) +{ + /* struct VideoParameters *p_Vid = p_Dpb->p_Vid; */ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + struct Slice *currSlice = &p_H264_Dpb->mSlice; + unsigned i, j; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + + if (!p_Dpb->init_done) + return; + + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->colocated_buf_index >= 0) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "release_colocate_buf[%d] for fs[%d]\n", + p_Dpb->fs[i]->colocated_buf_index, i); + + release_colocate_buf(p_H264_Dpb, + p_Dpb->fs[i]->colocated_buf_index); /* rain */ + p_Dpb->fs[i]->colocated_buf_index = -1; + } + if (!p_Dpb->fs[i]->pre_output) { + release_buf_spec_num(p_H264_Dpb->vdec, + p_Dpb->fs[i]->buf_spec_num); + p_Dpb->fs[i]->buf_spec_num = -1; + } + remove_frame_from_dpb(p_H264_Dpb, i); + } + + for (i = 0; i < p_Dpb->used_size; i++) { + p_Dpb->fs_ref[i] = NULL; + p_Dpb->fs_ltref[i] = NULL; + p_Dpb->fs_list0[i] = NULL; + p_Dpb->fs_list1[i] = NULL; + p_Dpb->fs_listlt[i] = NULL; + } + for (i = 0; i < 2; i++) { + currSlice->listXsize[i] = 0; + for (j = 0; j < (MAX_LIST_SIZE * 2); j++) + currSlice->listX[i][j] = NULL; + } + p_Dpb->ref_frames_in_buffer = 0; + p_Dpb->ltref_frames_in_buffer = 0; + p_Dpb->last_output_poc = INT_MIN; +} +#endif + +static void check_frame_store_same_pic_num(struct DecodedPictureBuffer *p_Dpb, + struct StorablePicture *p, struct Slice *currSlice) +{ + if (p_Dpb->last_picture) { + if ((int)p_Dpb->last_picture->frame_num == p->pic_num) { + if (((p->structure == TOP_FIELD) && + (p_Dpb->last_picture->is_used == 2)) || + ((p->structure == BOTTOM_FIELD) && + (p_Dpb->last_picture->is_used == 1))) { + if ((p->used_for_reference && + (p_Dpb->last_picture-> + is_orig_reference != 0)) || + (!p->used_for_reference && + (p_Dpb->last_picture-> + is_orig_reference == 0))) { + p->buf_spec_num = + p_Dpb->last_picture-> + buf_spec_num; + p->buf_spec_is_alloced = 0; + p->colocated_buf_index = p_Dpb-> + last_picture-> + colocated_buf_index; + if (currSlice->structure == + TOP_FIELD) { + p->bottom_poc = + p_Dpb->last_picture-> + bottom_field->poc; + } else { + p->top_poc = + p_Dpb->last_picture-> + top_field->poc; + } + p->frame_poc = imin(p->bottom_poc, + p->top_poc); + } + } + } + } +} + +int h264_slice_header_process(struct h264_dpb_stru *p_H264_Dpb, int *frame_num_gap) +{ + + int new_pic_flag = 0; + struct Slice *currSlice = &p_H264_Dpb->mSlice; + struct VideoParameters *p_Vid = &p_H264_Dpb->mVideo; + struct DecodedPictureBuffer *p_Dpb = + &p_H264_Dpb->mDPB; +#if 0 + new_pic_flag = is_new_picture(p_H264_Dpb->mVideo.dec_picture, + p_H264_Dpb, + &p_H264_Dpb->mVideo.old_slice); + + if (new_pic_flag) { /* new picture */ + if (p_H264_Dpb->mVideo.dec_picture) { + store_picture_in_dpb(p_H264_Dpb, + p_H264_Dpb->mVideo.dec_picture); + /* dump_dpb(&p_H264_Dpb->mDPB); */ + } + } +#else + new_pic_flag = (p_H264_Dpb->mVideo.dec_picture == NULL); +#endif + p_H264_Dpb->buf_alloc_fail = 0; + p_H264_Dpb->dpb_error_flag = 0; + slice_prepare(p_H264_Dpb, &p_H264_Dpb->mDPB, &p_H264_Dpb->mVideo, + &p_H264_Dpb->mSPS, &p_H264_Dpb->mSlice); + + if (p_Dpb->num_ref_frames != p_H264_Dpb->mSPS.num_ref_frames) { + dpb_print(p_H264_Dpb->decoder_index, 0, + "num_ref_frames change from %d to %d\r\n", + p_Dpb->num_ref_frames, p_H264_Dpb->mSPS.num_ref_frames); + p_Dpb->num_ref_frames = p_H264_Dpb->mSPS.num_ref_frames; + } + /* if (p_Vid->active_sps != sps) { */ + if (p_H264_Dpb->mDPB.init_done == 0) { + /*init_global_buffers(p_Vid, 0); + * ** * *if (!p_Vid->no_output_of_prior_pics_flag) + ** * *{ + ** * * flush_dpb(p_Vid->p_Dpb_layer[0]); + ** * *} + ** * *init_dpb(p_Vid, p_Vid->p_Dpb_layer[0], 0); + */ + init_dpb(p_H264_Dpb, 0); + } + + + if (new_pic_flag) { /* new picture */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "check frame_num gap: cur frame_num %d pre_frame_num %d max_frmae_num %d\r\n", + currSlice->frame_num, + p_Vid->pre_frame_num, + p_Vid->max_frame_num); + if (p_Vid->recovery_point == 0 && + p_Vid->max_frame_num <= FRAME_NUM_MAX_SIZE && + currSlice->frame_num != p_Vid->pre_frame_num && + currSlice->frame_num != + (p_Vid->pre_frame_num + 1) % p_Vid->max_frame_num) { + struct SPSParameters *active_sps = p_Vid->active_sps; + /*if (active_sps-> + *gaps_in_frame_num_value_allowed_flag + *== 0) { + * error("An unintentional + * loss of pictures occurs! Exit\n", + * 100); + *} + *if (p_Vid->conceal_mode == 0) + */ + if (active_sps->frame_num_gap_allowed) + fill_frame_num_gap(p_Vid, currSlice); + *frame_num_gap = 1; + } + + if (currSlice->nal_reference_idc) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "nal_reference_idc not 0, set pre_frame_num(%d) to frame_num (%d)\n", + p_Vid->pre_frame_num, currSlice->frame_num); + p_Vid->pre_frame_num = currSlice->frame_num; + } + + decode_poc(&p_H264_Dpb->mVideo, &p_H264_Dpb->mSlice); + p_H264_Dpb->mVideo.dec_picture = get_new_pic(p_H264_Dpb, + p_H264_Dpb->mSlice.structure, + /*p_Vid->width, p_Vid->height, + * p_Vid->width_cr, + * p_Vid->height_cr, + */ + 1); + if (p_H264_Dpb->mVideo.dec_picture) { + u32 offset_lo, offset_hi; + struct DecodedPictureBuffer *p_Dpb = + &p_H264_Dpb->mDPB; + struct StorablePicture *p = + p_H264_Dpb->mVideo.dec_picture; + init_picture(p_H264_Dpb, &p_H264_Dpb->mSlice, + p_H264_Dpb->mVideo.dec_picture); +#if 1 + /* rain */ + offset_lo = + p_H264_Dpb->dpb_param.l.data[OFFSET_DELIMITER_LO]; + offset_hi = + p_H264_Dpb->dpb_param.l.data[OFFSET_DELIMITER_HI]; + p_H264_Dpb->mVideo.dec_picture->offset_delimiter = + (offset_lo | offset_hi << 16); + p_H264_Dpb->mVideo.dec_picture->buf_spec_num = -1; + p_H264_Dpb->mVideo.dec_picture-> + colocated_buf_index = -1; + update_pic_num(p_H264_Dpb); + + if ((currSlice->structure == TOP_FIELD) || + (currSlice->structure == BOTTOM_FIELD)) { + /* check for frame store with same + * pic_number + */ + check_frame_store_same_pic_num(p_Dpb, p, + currSlice); + } + + if (p_H264_Dpb->mVideo.dec_picture->buf_spec_num == + -1) { + p_H264_Dpb->mVideo.dec_picture->buf_spec_num = + get_free_buf_idx(p_H264_Dpb->vdec); + if (p_H264_Dpb->mVideo.dec_picture->buf_spec_num + < 0) { + p_H264_Dpb->buf_alloc_fail = 1; + p_H264_Dpb->mVideo.dec_picture-> + buf_spec_is_alloced = 0; + } else + p_H264_Dpb->mVideo.dec_picture-> + buf_spec_is_alloced = 1; + + if (p_H264_Dpb->mVideo.dec_picture-> + used_for_reference) { + p_H264_Dpb->mVideo.dec_picture-> + colocated_buf_index = + allocate_colocate_buf( + p_H264_Dpb); + } + } +#endif + if (post_picture_early(p_H264_Dpb->vdec, + p_H264_Dpb->mVideo.dec_picture->buf_spec_num)) + return -1; + } + } + + + + if (p_H264_Dpb->mSlice.slice_type == P_SLICE) + init_lists_p_slice(&p_H264_Dpb->mSlice); + else if (p_H264_Dpb->mSlice.slice_type == B_SLICE) + init_lists_b_slice(&p_H264_Dpb->mSlice); + else + init_lists_i_slice(&p_H264_Dpb->mSlice); + + reorder_lists(&p_H264_Dpb->mSlice); + + if (p_H264_Dpb->mSlice.structure == FRAME) + init_mbaff_lists(p_H264_Dpb, &p_H264_Dpb->mSlice); + + if (new_pic_flag) + return 1; + + return 0; +} + +enum PictureStructure get_cur_slice_picture_struct( + struct h264_dpb_stru *p_H264_Dpb) +{ + struct Slice *currSlice = &p_H264_Dpb->mSlice; + return currSlice->structure; +} + +static unsigned char is_pic_in_dpb(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *pic) +{ + unsigned char ret = 0; + int i; + struct DecodedPictureBuffer *p_Dpb = + &p_H264_Dpb->mDPB; + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->top_field == pic || + p_Dpb->fs[i]->bottom_field == pic || + p_Dpb->fs[i]->frame == pic) { + ret = 1; + break; + } + } + return ret; +} + +int dpb_check_ref_list_error( + struct h264_dpb_stru *p_H264_Dpb) +{ + int i; + /*int j;*/ + struct Slice *currSlice = &p_H264_Dpb->mSlice; + /* in first output, ignore ref check */ + if ((p_H264_Dpb->first_insert_frame == FirstInsertFrm_OUT) && + (p_H264_Dpb->mVideo.dec_picture) && + p_H264_Dpb->first_output_poc > p_H264_Dpb->mVideo.dec_picture->poc) { + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "p_H264_Dpb->first_output_poc %d, p_H264_Dpb->mVideo.dec_picture->poc %d\n", + p_H264_Dpb->first_output_poc, p_H264_Dpb->mVideo.dec_picture->poc); + return 0; + } + if ((currSlice->slice_type != I_SLICE) && + (currSlice->slice_type != SI_SLICE)) { + for (i = 0; i < currSlice->listXsize[0]; i++) { + /*for (j = i + 1; j < currSlice->listXsize[0]; j++) { + if(currSlice->listX[0][i]->pic_num == + currSlice->listX[0][j]->pic_num) + return 1; + }*/ + if (currSlice->listX[0][i] == NULL) + return 5; + if (!is_pic_in_dpb(p_H264_Dpb, + currSlice->listX[0][i])) + return 1; + if (currSlice->listX[0][i]->frame && + currSlice->listX[0][i]->frame->non_existing) + return 3; + } + } + + if (currSlice->slice_type == B_SLICE) { + for (i = 0; i < currSlice->listXsize[1]; i++) { + /*for (j = i + 1; j < currSlice->listXsize[1]; j++) { + if(currSlice->listX[1][i]->pic_num == + currSlice->listX[1][j]->pic_num) + return 2; + } + for (j = 0; j < currSlice->listXsize[0]; j++) { + if(currSlice->listX[1][i]->pic_num == + currSlice->listX[0][j]->pic_num) + return 3; + }*/ + if (currSlice->listX[1][i] == NULL) + return 6; + if (!is_pic_in_dpb(p_H264_Dpb, + currSlice->listX[1][i])) + return 2; + if (currSlice->listX[1][i]->frame && + currSlice->listX[1][i]->frame->non_existing) + return 4; +#if 0 + if (currSlice->listXsize[0] == 1 && + currSlice->listXsize[1] == 1 && + currSlice->listX[1][0] == + currSlice->listX[0][0]) + return 3; +#endif + } + } + return 0; +} +
diff --git a/drivers/frame_provider/decoder/h264_multi/h264_dpb.h b/drivers/frame_provider/decoder/h264_multi/h264_dpb.h new file mode 100644 index 0000000..8b3e8bb --- /dev/null +++ b/drivers/frame_provider/decoder/h264_multi/h264_dpb.h
@@ -0,0 +1,1005 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef H264_DPB_H_ +#define H264_DPB_H_ + +#define ERROR_CHECK + +#define OUTPUT_BUFFER_IN_C + +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_VDEC_STATUS 0X0001 +#define PRINT_FLAG_UCODE_EVT 0x0002 +#define PRINT_FLAG_MMU_DETAIL 0x0004 +#define PRINT_FLAG_ERRORFLAG_DBG 0x0008 +#define PRINT_FLAG_DPB_DETAIL 0x0010 +#define PRINT_FLAG_DEC_DETAIL 0x0020 +#define PRINT_FLAG_VDEC_DETAIL 0x0040 +#define PRINT_FLAG_DUMP_DPB 0x0080 +#define PRINT_FRAMEBASE_DATA 0x0100 +#define PRINT_FLAG_DEBUG_POC 0x0200 +#define RRINT_FLAG_RPM 0x0400 +#define DEBUG_DISABLE_RUNREADY_RMBUF 0x0800 +#define PRINT_FLAG_DUMP_BUFSPEC 0x1000 +#define PRINT_FLAG_FCC_STATUS 0x2000 +#define PRINT_FLAG_SEI_DETAIL 0x4000 +#define PRINT_FLAG_V4L_DETAIL 0x8000 +#define DISABLE_ERROR_HANDLE 0x10000 +#define DEBUG_DUMP_STAT 0x80000 +#define DEBUG_TIMEOUT_DEC_STAT 0x800000 + +/*setting canvas mode and endian. + if this flag is set, value of canvas mode + will according to the value of mem_map_mode. + endian will be forced set to 0 in + CANVAS_BLKMODE_LINEAR mode. + otherwise picture will display abnormal. + if this flag is not set, value of canvas mode + will be determined by the user speace config. + endian will be set 7 in CANVAS_BLKMODE_LINEAR mode. +*/ +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 + +#define MVC_EXTENSION_ENABLE 0 +#define PRINTREFLIST 0 + +#define MAX_LIST_SIZE 33 + +#define H264_OUTPUT_MODE_NORMAL 0x4 +#define H264_OUTPUT_MODE_FAST 0x8 + +//#define FALSE 0 + +#define H264_SLICE_HEAD_DONE 0x01 +#define H264_PIC_DATA_DONE 0x02 +/*#define H264_SPS_DONE 0x03*/ +/*#define H264_PPS_DONE 0x04*/ +/*#define H264_SLICE_DATA_DONE 0x05*/ +/*#define H264_DATA_END 0x06*/ + +#define H264_CONFIG_REQUEST 0x11 +#define H264_DATA_REQUEST 0x12 +#define H264_WRRSP_REQUEST 0x13 +#define H264_WRRSP_DONE 0x14 + +#define H264_DECODE_BUFEMPTY 0x20 +#define H264_DECODE_TIMEOUT 0x21 +#define H264_SEARCH_BUFEMPTY 0x22 +#define H264_DECODE_OVER_SIZE 0x23 + +#define VIDEO_SIGNAL_LOW 0x26 +#define VIDEO_SIGNAL_HIGHT 0x27 + + +#define H264_FIND_NEXT_PIC_NAL 0x50 +#define H264_FIND_NEXT_DVEL_NAL 0x51 +#define H264_AUX_DATA_READY 0x52 + +#define H264_SEI_DATA_READY 0x53 +#define H264_SEI_DATA_DONE 0x54 + + /* 0x8x, search state*/ +#define H264_STATE_SEARCH_AFTER_SPS 0x80 +#define H264_STATE_SEARCH_AFTER_PPS 0x81 +#define H264_STATE_PARSE_SLICE_HEAD 0x82 +#define H264_STATE_SEARCH_HEAD 0x83 + /**/ +#define H264_ACTION_SEARCH_HEAD 0xf0 +#define H264_ACTION_DECODE_SLICE 0xf1 +#define H264_ACTION_CONFIG_DONE 0xf2 +#define H264_ACTION_DECODE_NEWPIC 0xf3 +#define H264_ACTION_DECODE_START 0xff + +#define RPM_BEGIN 0x0 +#define RPM_END 0x400 + +#define val(s) (s[0]|(s[1]<<16)) + +#define FRAME_IN_DPB 24 +#define DPB_OFFSET 0x100 +#define MMCO_OFFSET 0x200 +union param { +#if 0 +#define H_TIME_STAMP_START 0X00 +#define H_TIME_STAMP_END 0X17 +#define PTS_ZERO_0 0X18 +#define PTS_ZERO_1 0X19 +#endif +#define FIXED_FRAME_RATE_FLAG 0X21 + +#define OFFSET_DELIMITER_LO 0x2f +#define OFFSET_DELIMITER_HI 0x30 + + +#define SLICE_IPONLY_BREAK 0X5C +#define PREV_MAX_REFERENCE_FRAME_NUM 0X5D +#define EOS 0X5E +#define FRAME_PACKING_TYPE 0X5F +#define OLD_POC_PAR_1 0X60 +#define OLD_POC_PAR_2 0X61 +#define PREV_MBX 0X62 +#define PREV_MBY 0X63 +#define ERROR_SKIP_MB_NUM 0X64 +#define ERROR_MB_STATUS 0X65 +#define L0_PIC0_STATUS 0X66 +#define TIMEOUT_COUNTER 0X67 +#define BUFFER_SIZE 0X68 +#define BUFFER_SIZE_HI 0X69 +#define CROPPING_LEFT_RIGHT 0X6A +#define CROPPING_TOP_BOTTOM 0X6B +#if 1 + /* sps_flags2: + *bit 3, bitstream_restriction_flag + *bit 2, pic_struct_present_flag + *bit 1, vcl_hrd_parameters_present_flag + *bit 0, nal_hrd_parameters_present_flag + */ +#define SPS_FLAGS2 0x6c +#define NUM_REORDER_FRAMES 0x6d +#else +#define POC_SELECT_NEED_SWAP 0X6C +#define POC_SELECT_SWAP 0X6D +#endif +#define MAX_BUFFER_FRAME 0X6E + +#define NON_CONFORMING_STREAM 0X70 +#define RECOVERY_POINT 0X71 +#define POST_CANVAS 0X72 +#define POST_CANVAS_H 0X73 +#define SKIP_PIC_COUNT 0X74 +#define TARGET_NUM_SCALING_LIST 0X75 +#define FF_POST_ONE_FRAME 0X76 +#define PREVIOUS_BIT_CNT 0X77 +#define MB_NOT_SHIFT_COUNT 0X78 +#define PIC_STATUS 0X79 +#define FRAME_COUNTER 0X7A +#define NEW_SLICE_TYPE 0X7B +#define NEW_PICTURE_STRUCTURE 0X7C +#define NEW_FRAME_NUM 0X7D +#define NEW_IDR_PIC_ID 0X7E +#define IDR_PIC_ID 0X7F + +/* h264 LOCAL */ +#define NAL_UNIT_TYPE 0X80 +#define NAL_REF_IDC 0X81 +#define SLICE_TYPE 0X82 +#define LOG2_MAX_FRAME_NUM 0X83 +#define FRAME_MBS_ONLY_FLAG 0X84 +#define PIC_ORDER_CNT_TYPE 0X85 +#define LOG2_MAX_PIC_ORDER_CNT_LSB 0X86 +#define PIC_ORDER_PRESENT_FLAG 0X87 +#define REDUNDANT_PIC_CNT_PRESENT_FLAG 0X88 +#define PIC_INIT_QP_MINUS26 0X89 +#define DEBLOCKING_FILTER_CONTROL_PRESENT_FLAG 0X8A +#define NUM_SLICE_GROUPS_MINUS1 0X8B +#define MODE_8X8_FLAGS 0X8C +#define ENTROPY_CODING_MODE_FLAG 0X8D +#define SLICE_QUANT 0X8E +#define TOTAL_MB_HEIGHT 0X8F +#define PICTURE_STRUCTURE 0X90 +#define TOP_INTRA_TYPE 0X91 +#define RV_AI_STATUS 0X92 +#define AI_READ_START 0X93 +#define AI_WRITE_START 0X94 +#define AI_CUR_BUFFER 0X95 +#define AI_DMA_BUFFER 0X96 +#define AI_READ_OFFSET 0X97 +#define AI_WRITE_OFFSET 0X98 +#define AI_WRITE_OFFSET_SAVE 0X99 +#define RV_AI_BUFF_START 0X9A +#define I_PIC_MB_COUNT 0X9B +#define AI_WR_DCAC_DMA_CTRL 0X9C +#define SLICE_MB_COUNT 0X9D +#define PICTYPE 0X9E +#define SLICE_GROUP_MAP_TYPE 0X9F +#define MB_TYPE 0XA0 +#define MB_AFF_ADDED_DMA 0XA1 +#define PREVIOUS_MB_TYPE 0XA2 +#define WEIGHTED_PRED_FLAG 0XA3 +#define WEIGHTED_BIPRED_IDC 0XA4 +/* bit 3:2 - PICTURE_STRUCTURE + * bit 1 - MB_ADAPTIVE_FRAME_FIELD_FLAG + * bit 0 - FRAME_MBS_ONLY_FLAG + */ +#define MBFF_INFO 0XA5 +#define TOP_INTRA_TYPE_TOP 0XA6 + +#define RV_AI_BUFF_INC 0xa7 + +#define DEFAULT_MB_INFO_LO 0xa8 + +/* 0 -- no need to read + * 1 -- need to wait Left + * 2 -- need to read Intra + * 3 -- need to read back MV + */ +#define NEED_READ_TOP_INFO 0xa9 +/* 0 -- idle + * 1 -- wait Left + * 2 -- reading top Intra + * 3 -- reading back MV + */ +#define READ_TOP_INFO_STATE 0xaa +#define DCAC_MBX 0xab +#define TOP_MB_INFO_OFFSET 0xac +#define TOP_MB_INFO_RD_IDX 0xad +#define TOP_MB_INFO_WR_IDX 0xae + +#define VLD_NO_WAIT 0 +#define VLD_WAIT_BUFFER 1 +#define VLD_WAIT_HOST 2 +#define VLD_WAIT_GAP 3 + +#define VLD_WAITING 0xaf + +#define MB_X_NUM 0xb0 +/* #define MB_WIDTH 0xb1 */ +#define MB_HEIGHT 0xb2 +#define MBX 0xb3 +#define TOTAL_MBY 0xb4 +#define INTR_MSK_SAVE 0xb5 + +/* #define has_time_stamp 0xb6 */ +#define NEED_DISABLE_PPE 0xb6 +#define IS_NEW_PICTURE 0XB7 +#define PREV_NAL_REF_IDC 0XB8 +#define PREV_NAL_UNIT_TYPE 0XB9 +#define FRAME_MB_COUNT 0XBA +#define SLICE_GROUP_UCODE 0XBB +#define SLICE_GROUP_CHANGE_RATE 0XBC +#define SLICE_GROUP_CHANGE_CYCLE_LEN 0XBD +#define DELAY_LENGTH 0XBE +#define PICTURE_STRUCT 0XBF +/* #define pre_picture_struct 0xc0 */ +#define DCAC_PREVIOUS_MB_TYPE 0xc1 + +#define TIME_STAMP 0XC2 +#define H_TIME_STAMP 0XC3 +#define VPTS_MAP_ADDR 0XC4 +#define H_VPTS_MAP_ADDR 0XC5 + +/*#define MAX_DPB_SIZE 0XC6*/ +#define PIC_INSERT_FLAG 0XC7 + +#define TIME_STAMP_START 0XC8 +#define TIME_STAMP_END 0XDF + +#define OFFSET_FOR_NON_REF_PIC 0XE0 +#define OFFSET_FOR_TOP_TO_BOTTOM_FIELD 0XE2 +#define MAX_REFERENCE_FRAME_NUM 0XE4 +#define FRAME_NUM_GAP_ALLOWED 0XE5 +#define NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE 0XE6 +#define PROFILE_IDC_MMCO 0XE7 +#define LEVEL_IDC_MMCO 0XE8 +#define FRAME_SIZE_IN_MB 0XE9 +#define DELTA_PIC_ORDER_ALWAYS_ZERO_FLAG 0XEA +#define PPS_NUM_REF_IDX_L0_ACTIVE_MINUS1 0XEB +#define PPS_NUM_REF_IDX_L1_ACTIVE_MINUS1 0XEC +#define CURRENT_SPS_ID 0XED +#define CURRENT_PPS_ID 0XEE +/* bit 0 - sequence parameter set may change + * bit 1 - picture parameter set may change + * bit 2 - new dpb just inited + * bit 3 - IDR picture not decoded yet + * bit 5:4 - 0: mb level code loaded 1: picture + * level code loaded 2: slice level code loaded + */ +#define DECODE_STATUS 0XEF +#define FIRST_MB_IN_SLICE 0XF0 +#define PREV_MB_WIDTH 0XF1 +#define PREV_FRAME_SIZE_IN_MB 0XF2 +/*#define MAX_REFERENCE_FRAME_NUM_IN_MEM 0XF3*/ +/* bit 0 - aspect_ratio_info_present_flag + * bit 1 - timing_info_present_flag + * bit 2 - nal_hrd_parameters_present_flag + * bit 3 - vcl_hrd_parameters_present_flag + * bit 4 - pic_struct_present_flag + * bit 5 - bitstream_restriction_flag + */ +#define VUI_STATUS 0XF4 +#define ASPECT_RATIO_IDC 0XF5 +#define ASPECT_RATIO_SAR_WIDTH 0XF6 +#define ASPECT_RATIO_SAR_HEIGHT 0XF7 +#define NUM_UNITS_IN_TICK 0XF8 +#define TIME_SCALE 0XFA +#define CURRENT_PIC_INFO 0XFC +#define DPB_BUFFER_INFO 0XFD +#define REFERENCE_POOL_INFO 0XFE +#define REFERENCE_LIST_INFO 0XFF + struct{ + unsigned short data[RPM_END-RPM_BEGIN]; + } l; + struct{ + unsigned short dump[DPB_OFFSET]; + unsigned short dpb_base[FRAME_IN_DPB<<3]; + + unsigned short dpb_max_buffer_frame; + unsigned short actual_dpb_size; + + unsigned short colocated_buf_status; + + unsigned short num_forward_short_term_reference_pic; + unsigned short num_short_term_reference_pic; + unsigned short num_reference_pic; + + unsigned short current_dpb_index; + unsigned short current_decoded_frame_num; + unsigned short current_reference_frame_num; + + unsigned short l0_size; + unsigned short l1_size; + + /* [6:5] : nal_ref_idc */ + /* [4:0] : nal_unit_type */ + unsigned short NAL_info_mmco; + + /* [1:0] : 00 - top field, 01 - bottom field, + * 10 - frame, 11 - mbaff frame + */ + unsigned short picture_structure_mmco; + + unsigned short frame_num; + unsigned short pic_order_cnt_lsb; + + unsigned short num_ref_idx_l0_active_minus1; + unsigned short num_ref_idx_l1_active_minus1; + + unsigned short PrevPicOrderCntLsb; + unsigned short PreviousFrameNum; + + /* 32 bits variables */ + unsigned short delta_pic_order_cnt_bottom[2]; + unsigned short delta_pic_order_cnt_0[2]; + unsigned short delta_pic_order_cnt_1[2]; + + unsigned short PrevPicOrderCntMsb[2]; + unsigned short PrevFrameNumOffset[2]; + + unsigned short frame_pic_order_cnt[2]; + unsigned short top_field_pic_order_cnt[2]; + unsigned short bottom_field_pic_order_cnt[2]; + + unsigned short colocated_mv_addr_start[2]; + unsigned short colocated_mv_addr_end[2]; + unsigned short colocated_mv_wr_addr[2]; + + unsigned short frame_crop_left_offset; + unsigned short frame_crop_right_offset; + unsigned short frame_crop_top_offset; + unsigned short frame_crop_bottom_offset; + unsigned short chroma_format_idc; + } dpb; + struct { + unsigned short dump[MMCO_OFFSET]; + + /* array base address for offset_for_ref_frame */ + unsigned short offset_for_ref_frame_base[128]; + + /* 0 - Index in DPB + * 1 - Picture Flag + * [ 2] : 0 - short term reference, + * 1 - long term reference + * [ 1] : bottom field + * [ 0] : top field + * 2 - Picture Number (short term or long term) low 16 bits + * 3 - Picture Number (short term or long term) high 16 bits + */ + unsigned short reference_base[128]; + + /* command and parameter, until command is 3 */ + unsigned short l0_reorder_cmd[66]; + unsigned short l1_reorder_cmd[66]; + + /* command and parameter, until command is 0 */ + unsigned short mmco_cmd[44]; + + unsigned short l0_base[40]; + unsigned short l1_base[40]; + } mmco; + struct { + /* from ucode lmem, do not change this struct */ + } p; +}; + + +struct StorablePicture; +struct VideoParameters; +struct DecodedPictureBuffer; + +/* New enum for field processing */ +enum PictureStructure { + FRAME, + TOP_FIELD, + BOTTOM_FIELD +}; + +typedef enum { + PIC_SINGLE_FRAME = 0, + PIC_TOP, + PIC_BOT, + PIC_TOP_BOT, + PIC_BOT_TOP, + PIC_TOP_BOT_TOP = 5, + PIC_BOT_TOP_BOT, + PIC_DOUBLE_FRAME, + PIC_TRIPLE_FRAME, + PIC_INVALID, +} PicStruct_E; + +#define I_Slice 2 +#define P_Slice 5 +#define B_Slice 6 +#define P_Slice_0 0 +#define B_Slice_1 1 +#define I_Slice_7 7 + +enum SliceType { + P_SLICE = 0, + B_SLICE = 1, + I_SLICE = 2, + SP_SLICE = 3, + SI_SLICE = 4, + NUM_SLICE_TYPES = 5 +}; + +enum ProfileIDC { + FREXT_CAVLC444 = 44, /*!< YUV 4:4:4/14 "CAVLC 4:4:4"*/ + BASELINE = 66, /*!< YUV 4:2:0/8 "Baseline"*/ + MAIN = 77, /*!< YUV 4:2:0/8 "Main"*/ + EXTENDED = 88, /*!< YUV 4:2:0/8 "Extended"*/ + FREXT_HP = 100, /*!< YUV 4:2:0/8 "High"*/ + FREXT_Hi10P = 110, /*!< YUV 4:2:0/10 "High 10"*/ + FREXT_Hi422 = 122, /*!< YUV 4:2:2/10 "High 4:2:2"*/ + FREXT_Hi444 = 244, /*!< YUV 4:4:4/14 "High 4:4:4"*/ + MVC_HIGH = 118, /*!< YUV 4:2:0/8 "Multiview High"*/ + STEREO_HIGH = 128 /*!< YUV 4:2:0/8 "Stereo High"*/ +}; + +enum FirstInsertFrm_State { + FirstInsertFrm_IDLE = 0, + FirstInsertFrm_OUT = 1, + FirstInsertFrm_RESET = 2, + FirstInsertFrm_SKIPDONE = 3, +}; + + +struct SPSParameters { + unsigned int profile_idc; + unsigned int level_idc; + int pic_order_cnt_type; + int log2_max_pic_order_cnt_lsb_minus4; + int num_ref_frames_in_pic_order_cnt_cycle; + short offset_for_ref_frame[128]; + short offset_for_non_ref_pic; + short offset_for_top_to_bottom_field; + + /**/ + int frame_mbs_only_flag; + int num_ref_frames; + int max_dpb_size; + int log2_max_frame_num_minus4; + int frame_num_gap_allowed; +}; + +#define DEC_REF_PIC_MARKING_BUFFER_NUM_MAX 45 +struct DecRefPicMarking_s { + int memory_management_control_operation; + int difference_of_pic_nums_minus1; + int long_term_pic_num; + int long_term_frame_idx; + int max_long_term_frame_idx_plus1; + struct DecRefPicMarking_s *Next; +}; + +#define REORDERING_COMMAND_MAX_SIZE 33 +struct Slice { + int first_mb_in_slice; + int mode_8x8_flags; + int picture_structure_mmco; + + int frame_num; + int idr_flag; + int toppoc; + int bottompoc; + int framepoc; + int pic_order_cnt_lsb; + int PicOrderCntMsb; + unsigned char field_pic_flag; + unsigned char bottom_field_flag; + int ThisPOC; + int nal_reference_idc; + int AbsFrameNum; + int delta_pic_order_cnt_bottom; + int delta_pic_order_cnt[2]; + + /**/ + char listXsize[6]; + struct StorablePicture *listX[6][MAX_LIST_SIZE * 2]; + + /**/ + enum PictureStructure structure; + int long_term_reference_flag; + int no_output_of_prior_pics_flag; + int adaptive_ref_pic_buffering_flag; + + struct VideoParameters *p_Vid; + struct DecodedPictureBuffer *p_Dpb; + int num_ref_idx_active[2]; /* number of available list references */ + + /*modification*/ + int slice_type; /* slice type */ + int ref_pic_list_reordering_flag[2]; + int modification_of_pic_nums_idc[2][REORDERING_COMMAND_MAX_SIZE]; + int abs_diff_pic_num_minus1[2][REORDERING_COMMAND_MAX_SIZE]; + int long_term_pic_idx[2][REORDERING_COMMAND_MAX_SIZE]; + /**/ + unsigned char dec_ref_pic_marking_buffer_valid; + struct DecRefPicMarking_s + dec_ref_pic_marking_buffer[DEC_REF_PIC_MARKING_BUFFER_NUM_MAX]; + int pic_struct; +}; + +struct OldSliceParams { + unsigned int field_pic_flag; + unsigned int frame_num; + int nal_ref_idc; + unsigned int pic_oder_cnt_lsb; + int delta_pic_oder_cnt_bottom; + int delta_pic_order_cnt[2]; + unsigned char bottom_field_flag; + unsigned char idr_flag; + int idr_pic_id; + int pps_id; +#if (MVC_EXTENSION_ENABLE) + int view_id; + int inter_view_flag; + int anchor_pic_flag; +#endif + int layer_id; +}; + +struct VideoParameters { + int PrevPicOrderCntMsb; + int PrevPicOrderCntLsb; + unsigned char last_has_mmco_5; + unsigned char last_pic_bottom_field; + int ThisPOC; + int PreviousFrameNum; + int FrameNumOffset; + int PreviousFrameNumOffset; + int max_frame_num; + unsigned int pre_frame_num; + int ExpectedDeltaPerPicOrderCntCycle; + int PicOrderCntCycleCnt; + int FrameNumInPicOrderCntCycle; + int ExpectedPicOrderCnt; + + /**/ + struct SPSParameters *active_sps; + struct Slice **ppSliceList; + int iSliceNumOfCurrPic; + int conceal_mode; + int earlier_missing_poc; + int pocs_in_dpb[100]; + + struct OldSliceParams old_slice; + /**/ + struct StorablePicture *dec_picture; + struct StorablePicture *no_reference_picture; + + /*modification*/ + int non_conforming_stream; + int recovery_point; +}; + +static inline int imin(int a, int b) +{ + return ((a) < (b)) ? (a) : (b); +} + +static inline int imax(int a, int b) +{ + return ((a) > (b)) ? (a) : (b); +} + +#define MAX_PIC_BUF_NUM 128 +#define MAX_NUM_SLICES 50 + +struct StorablePicture { +/**/ + int width; + int height; + + int y_canvas_index; + int u_canvas_index; + int v_canvas_index; +/**/ + int index; + unsigned char is_used; + + enum PictureStructure structure; + + int poc; + int top_poc; + int bottom_poc; + int frame_poc; + unsigned int frame_num; + unsigned int recovery_frame; + + int pic_num; + int buf_spec_num; + int buf_spec_is_alloced; + int colocated_buf_index; + int long_term_pic_num; + int long_term_frame_idx; + + unsigned char is_long_term; + int used_for_reference; + int is_output; +#if 1 + /* rain */ + int pre_output; +#endif + int non_existing; + int separate_colour_plane_flag; + + short max_slice_id; + + int size_x, size_y, size_x_cr, size_y_cr; + int size_x_m1, size_y_m1, size_x_cr_m1, size_y_cr_m1; + int coded_frame; + int mb_aff_frame_flag; + unsigned int PicWidthInMbs; + unsigned int PicSizeInMbs; + int iLumaPadY, iLumaPadX; + int iChromaPadY, iChromaPadX; + + /* for mb aff, if frame for referencing the top field */ + struct StorablePicture *top_field; + /* for mb aff, if frame for referencing the bottom field */ + struct StorablePicture *bottom_field; + /* for mb aff, if field for referencing the combined frame */ + struct StorablePicture *frame; + + int slice_type; + int idr_flag; + int no_output_of_prior_pics_flag; + int long_term_reference_flag; + int adaptive_ref_pic_buffering_flag; + + int chroma_format_idc; + int frame_mbs_only_flag; + int frame_cropping_flag; + int frame_crop_left_offset; + int frame_crop_right_offset; + int frame_crop_top_offset; + int frame_crop_bottom_offset; + int qp; + int chroma_qp_offset[2]; + int slice_qp_delta; + /* stores the memory management control operations */ + struct DecRefPicMarking_s *dec_ref_pic_marking_buffer; + + /* picture error concealment */ + /*indicates if this is a concealed picture */ + int concealed_pic; + + /* variables for tone mapping */ + int seiHasTone_mapping; + int tone_mapping_model_id; + int tonemapped_bit_depth; + /* imgpel* tone_mapping_lut; tone mapping look up table */ + + int proc_flag; +#if (MVC_EXTENSION_ENABLE) + int view_id; + int inter_view_flag; + int anchor_pic_flag; +#endif + int iLumaStride; + int iChromaStride; + int iLumaExpandedHeight; + int iChromaExpandedHeight; + /* imgpel **cur_imgY; for more efficient get_block_luma */ + int no_ref; + int iCodingType; + + char listXsize[MAX_NUM_SLICES][2]; + struct StorablePicture **listX[MAX_NUM_SLICES][2]; + int layer_id; + u32 offset_delimiter; + u32 pts; + u64 pts64; + u64 timestamp; + unsigned char data_flag; + int pic_struct; + + /* picture qos infomation*/ + int frame_size; + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; + u32 pic_size; +}; + +struct FrameStore { + /* rain */ + int buf_spec_num; + /* rain */ + int colocated_buf_index; + + /* 0=empty; 1=top; 2=bottom; 3=both fields (or frame) */ + int is_used; + /* 0=not used for ref; 1=top used; 2=bottom used; + * 3=both fields (or frame) used + */ + int is_reference; + /* 0=not used for ref; 1=top used; 2=bottom used; + * 3=both fields (or frame) used + */ + int is_long_term; + /* original marking by nal_ref_idc: 0=not used for ref; 1=top used; + * 2=bottom used; 3=both fields (or frame) used + */ + int is_orig_reference; + + int is_non_existent; + + unsigned int frame_num; + unsigned int recovery_frame; + + int frame_num_wrap; + int long_term_frame_idx; + int is_output; +#if 1 + /* rain */ + int pre_output; + /* index in gFrameStore */ + int index; +#define I_FLAG 0x01 +#define IDR_FLAG 0x02 +#define ERROR_FLAG 0x10 +#define NULL_FLAG 0x20 +#define NODISP_FLAG 0x80 + unsigned char data_flag; +#endif + int poc; + + /* picture error concealment */ + int concealment_reference; + + struct StorablePicture *frame; + struct StorablePicture *top_field; + struct StorablePicture *bottom_field; + +#if (MVC_EXTENSION_ENABLE) + int view_id; + int inter_view_flag[2]; + int anchor_pic_flag[2]; +#endif + int layer_id; + u32 offset_delimiter; + u32 pts; + u64 pts64; + u64 timestamp; + + + /* picture qos infomation*/ + int slice_type; + int frame_size; + + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; + int dpb_frame_count; + u32 hw_decode_time; + u32 frame_size2; // For recording the chunk->size in frame mode + bool show_frame; + struct dma_fence *fence; + u32 decoded_frame_size; +}; + +/* #define DPB_SIZE_MAX 16 */ +#define DPB_SIZE_MAX 32 +struct DecodedPictureBuffer { + struct VideoParameters *p_Vid; + /* InputParameters *p_Inp; ??? */ + struct FrameStore *fs[DPB_SIZE_MAX]; + struct FrameStore *fs_ref[DPB_SIZE_MAX]; + struct FrameStore *fs_ltref[DPB_SIZE_MAX]; + /* inter-layer reference (for multi-layered codecs) */ + struct FrameStore *fs_ilref[DPB_SIZE_MAX]; + /**/ + struct FrameStore *fs_list0[DPB_SIZE_MAX]; + struct FrameStore *fs_list1[DPB_SIZE_MAX]; + struct FrameStore *fs_listlt[DPB_SIZE_MAX]; + + /**/ + unsigned int size; + unsigned int used_size; + unsigned int ref_frames_in_buffer; + unsigned int ltref_frames_in_buffer; + int last_output_poc; +#if (MVC_EXTENSION_ENABLE) + int last_output_view_id; +#endif + int max_long_term_pic_idx; + + + int init_done; + int first_pic_done; /*by rain*/ + int num_ref_frames; + + struct FrameStore *last_picture; + unsigned int used_size_il; + int layer_id; + + /* DPB related function; */ +}; + +struct h264_dpb_stru { + struct vdec_s *vdec; + int decoder_index; + + union param dpb_param; + + int decode_idx; + int buf_num; + int curr_POC; + int reorder_pic_num; + unsigned int dec_dpb_size; + u8 fast_output_enable; + /*poc_even_flag: + 0, init; 1, odd; 2, even*/ + u8 poc_even_odd_flag; + u32 decode_pic_count; + /**/ + unsigned int max_reference_size; + + unsigned int colocated_buf_map; + unsigned int colocated_buf_count; + unsigned int colocated_mv_addr_start; + unsigned int colocated_mv_addr_end; + unsigned int colocated_buf_size; + + struct DecodedPictureBuffer mDPB; + struct Slice mSlice; + struct VideoParameters mVideo; + struct SPSParameters mSPS; + + struct StorablePicture m_PIC[MAX_PIC_BUF_NUM]; + struct FrameStore mFrameStore[DPB_SIZE_MAX]; + + /*vui*/ + unsigned int vui_status; + unsigned int num_units_in_tick; + unsigned int time_scale; + unsigned int fixed_frame_rate_flag; + unsigned int aspect_ratio_idc; + unsigned int aspect_ratio_sar_width; + unsigned int aspect_ratio_sar_height; + u8 bitstream_restriction_flag; + u16 num_reorder_frames; + u16 max_dec_frame_buffering; + + unsigned int frame_crop_left_offset; + unsigned int frame_crop_right_offset; + unsigned int frame_crop_top_offset; + unsigned int frame_crop_bottom_offset; + unsigned int chroma_format_idc; + + unsigned int dec_dpb_status; + unsigned int last_dpb_status; + unsigned char buf_alloc_fail; + unsigned int dpb_error_flag; + unsigned int reorder_output; + unsigned int first_insert_frame; + int first_output_poc; + int dpb_frame_count; + u32 without_display_mode; + int long_term_reference_flag; +}; + + +extern unsigned int h264_debug_flag; +extern unsigned int h264_debug_mask; + +int dpb_print(int indext, int debug_flag, const char *fmt, ...); + +int dpb_print_cont(int index, int debug_flag, const char *fmt, ...); + +unsigned char dpb_is_debug(int index, int debug_flag); + +int prepare_display_buf(struct vdec_s *vdec, struct FrameStore *frame); + +int release_buf_spec_num(struct vdec_s *vdec, int buf_spec_num); + +void set_frame_output_flag(struct h264_dpb_stru *p_H264_Dpb, int index); + +int is_there_unused_frame_from_dpb(struct DecodedPictureBuffer *p_Dpb); + +int h264_slice_header_process(struct h264_dpb_stru *p_H264_Dpb, int *frame_num_gap); + +void dpb_init_global(struct h264_dpb_stru *p_H264_Dpb, + int id, int actual_dpb_size, int max_reference_size); + +void init_colocate_buf(struct h264_dpb_stru *p_H264_Dpb, int count); + +int release_colocate_buf(struct h264_dpb_stru *p_H264_Dpb, int index); + +int get_free_buf_idx(struct vdec_s *vdec); + +int store_picture_in_dpb(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *p, unsigned char data_flag); + +int release_picture(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *pic); + +void remove_dpb_pictures(struct h264_dpb_stru *p_H264_Dpb); + +void bufmgr_post(struct h264_dpb_stru *p_H264_Dpb); + +void bufmgr_force_recover(struct h264_dpb_stru *p_H264_Dpb); + +int get_long_term_flag_by_buf_spec_num(struct h264_dpb_stru *p_H264_Dpb, + int buf_spec_num); + +void bufmgr_h264_remove_unused_frame(struct h264_dpb_stru *p_H264_Dpb, + u8 force_flag); + +void flush_dpb(struct h264_dpb_stru *p_H264_Dpb); + +void print_pic_info(int decindex, const char *info, + struct StorablePicture *pic, + int slice_type); +void dump_dpb(struct DecodedPictureBuffer *p_Dpb, u8 force); + +void dump_pic(struct h264_dpb_stru *p_H264_Dpb); + +void * vh264_get_bufspec_lock(struct vdec_s *vdec); + +enum PictureStructure get_cur_slice_picture_struct( + struct h264_dpb_stru *p_H264_Dpb); + +int dpb_check_ref_list_error( + struct h264_dpb_stru *p_H264_Dpb); + +void unmark_for_reference(struct DecodedPictureBuffer *p_Dpb, + struct FrameStore *fs); + +void update_ref_list(struct DecodedPictureBuffer *p_Dpb); + +int post_picture_early(struct vdec_s *vdec, int index); + +int is_used_for_reference(struct FrameStore *fs); + +#endif
diff --git a/drivers/frame_provider/decoder/h264_multi/vmh264.c b/drivers/frame_provider/decoder/h264_multi/vmh264.c new file mode 100644 index 0000000..5932c91 --- /dev/null +++ b/drivers/frame_provider/decoder/h264_multi/vmh264.c
@@ -0,0 +1,11272 @@ +/* + * drivers/amlogic/amports/vh264.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/random.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/tsync.h> +#include <linux/workqueue.h> +#include <linux/dma-mapping.h> +#include <linux/atomic.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../utils/vdec_input.h" +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/sched/clock.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../utils/vdec.h" +#include "../utils/amvdec.h" +#include "../h264/vh264.h" +#include "../../../stream_input/amports/streambuf.h" +#include <linux/delay.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include "../utils/firmware.h" +#include <linux/uaccess.h> +#include "../utils/config_parser.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../utils/vdec_v4l2_buffer_ops.h" +#include <linux/crc32.h> +#include <media/v4l2-mem2mem.h> +#include "../utils/vdec_feature.h" + + +#define DETECT_WRONG_MULTI_SLICE + +/* +to enable DV of frame mode +#define DOLBY_META_SUPPORT in ucode +*/ + +#undef pr_info +#define pr_info printk +#define VDEC_DW +#define DEBUG_UCODE +#define MEM_NAME "codec_m264" +#define MULTI_INSTANCE_FRAMEWORK +/* #define ONE_COLOCATE_BUF_PER_DECODE_BUF */ +#include "h264_dpb.h" +/* #define SEND_PARAM_WITH_REG */ + +#define DRIVER_NAME "ammvdec_h264" +#define DRIVER_HEADER_NAME "ammvdec_h264_header" + +#define CHECK_INTERVAL (HZ/100) + +#define SEI_DATA_SIZE (8*1024) +#define SEI_ITU_DATA_SIZE (4*1024) + +#define RATE_MEASURE_NUM 8 +#define RATE_CORRECTION_THRESHOLD 5 +#define RATE_2397_FPS 4004 /* 23.97 */ +#define RATE_25_FPS 3840 /* 25 */ +#define RATE_2997_FPS 3203 /* 29.97 */ +#define DUR2PTS(x) ((x)*90/96) +#define PTS2DUR(x) ((x)*96/90) +#define DUR2PTS_REM(x) (x*90 - DUR2PTS(x)*96) +#define FIX_FRAME_RATE_CHECK_IFRAME_NUM 2 + +#define FIX_FRAME_RATE_OFF 0 +#define FIX_FRAME_RATE_ON 1 +#define FIX_FRAME_RATE_SMOOTH_CHECKING 2 + +#define DEC_CONTROL_FLAG_FORCE_2997_1080P_INTERLACE 0x0001 +#define DEC_CONTROL_FLAG_FORCE_2500_576P_INTERLACE 0x0002 +#define DEC_CONTROL_FLAG_FORCE_RATE_2397_FPS_FIX_FRAME_RATE 0x0010 +#define DEC_CONTROL_FLAG_FORCE_RATE_2997_FPS_FIX_FRAME_RATE 0x0020 + +#define DECODE_ID(hw) (hw_to_vdec(hw)->id) + +#define RATE_MEASURE_NUM 8 +#define RATE_CORRECTION_THRESHOLD 5 +#define RATE_24_FPS 4004 /* 23.97 */ +#define RATE_25_FPS 3840 /* 25 */ +#define DUR2PTS(x) ((x)*90/96) +#define PTS2DUR(x) ((x)*96/90) +#define DUR2PTS_REM(x) (x*90 - DUR2PTS(x)*96) +#define FIX_FRAME_RATE_CHECK_IDRFRAME_NUM 2 + +#define ALIGN_WIDTH(x) (ALIGN((x), 64)) +#define ALIGN_HEIGHT(x) (ALIGN((x), 32)) + +#define H264_DEV_NUM 9 + +#define CONSTRAIN_MAX_BUF_NUM + +#define H264_MMU +#define VIDEO_SIGNAL_TYPE_AVAILABLE_MASK 0x20000000 +#define INVALID_IDX -1 /* Invalid buffer index.*/ + +static int mmu_enable; +/*mmu do not support mbaff*/ +static int force_enable_mmu = 0; +unsigned int h264_debug_flag; /* 0xa0000000; */ +unsigned int h264_debug_mask = 0xff; + /* + *h264_debug_cmd: + * 0x1xx, force decoder id of xx to be disconnected + */ +unsigned int h264_debug_cmd; + +static int ref_b_frame_error_max_count = 50; + +static unsigned int dec_control = + DEC_CONTROL_FLAG_FORCE_2997_1080P_INTERLACE | + DEC_CONTROL_FLAG_FORCE_2500_576P_INTERLACE; + +static unsigned int force_rate_streambase; +static unsigned int force_rate_framebase; +static unsigned int force_disp_bufspec_num; +static unsigned int fixed_frame_rate_mode; +static unsigned int error_recovery_mode_in; +static int start_decode_buf_level = 0x4000; +static int pre_decode_buf_level = 0x1000; +static int stream_mode_start_num = 4; +static int dirty_again_threshold = 100; +static unsigned int colocate_old_cal; + + +static int check_dirty_data(struct vdec_s *vdec); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +/*to make reorder size difference of bl and el not too big*/ +static unsigned int reorder_dpb_size_margin_dv = 16; +#endif +static unsigned int reorder_dpb_size_margin = 6; +static unsigned int reference_buf_margin = 4; + +#ifdef CONSTRAIN_MAX_BUF_NUM +static u32 run_ready_max_vf_only_num; +static u32 run_ready_display_q_num; + /*0: not check + 0xff: mDPB.size + */ +static u32 run_ready_max_buf_num = 0xff; +#endif + +static u32 run_ready_min_buf_num = 2; + +#define VDEC_ASSIST_CANVAS_BLK32 0x5 + + +static unsigned int max_alloc_buf_count; +static unsigned int decode_timeout_val = 100; +static unsigned int errordata_timeout_val = 50; +static unsigned int get_data_timeout_val = 2000; +#if 1 +/* H264_DATA_REQUEST does not work, disable it, +decode has error for data in none continuous address +*/ +static unsigned int frame_max_data_packet; +#else +static unsigned int frame_max_data_packet = 8; +#endif +static unsigned int radr; +static unsigned int rval; +static u32 endian = 0xff0; + +/* + udebug_flag: + bit 0, enable ucode print + bit 1, enable ucode detail print + bit 3, disable ucode watchdog + bit [31:16] not 0, pos to dump lmem + bit 2, pop bits to lmem + bit [11:8], pre-pop bits for alignment (when bit 2 is 1) +*/ +static u32 udebug_flag; +/* + when udebug_flag[1:0] is not 0 + udebug_pause_pos not 0, + pause position +*/ +static u32 udebug_pause_pos; +/* + when udebug_flag[1:0] is not 0 + and udebug_pause_pos is not 0, + pause only when DEBUG_REG2 is equal to this val +*/ +static u32 udebug_pause_val; + +static u32 udebug_pause_decode_idx; + +static unsigned int disp_vframe_valve_level; + +static unsigned int max_decode_instance_num = H264_DEV_NUM; +static unsigned int decode_frame_count[H264_DEV_NUM]; +static unsigned int display_frame_count[H264_DEV_NUM]; +static unsigned int max_process_time[H264_DEV_NUM]; +static unsigned int max_get_frame_interval[H264_DEV_NUM]; +static unsigned int run_count[H264_DEV_NUM]; +static unsigned int input_empty[H264_DEV_NUM]; +static unsigned int not_run_ready[H264_DEV_NUM]; +static unsigned int ref_frame_mark_flag[H264_DEV_NUM] = +{1, 1, 1, 1, 1, 1, 1, 1, 1}; + +#define VDEC_CLOCK_ADJUST_FRAME 30 +static unsigned int clk_adj_frame_count; + +/* + *bit[3:0]: 0, run ; 1, pause; 3, step + *bit[4]: 1, schedule run + */ +static unsigned int step[H264_DEV_NUM]; + +#define AUX_BUF_ALIGN(adr) ((adr + 0xf) & (~0xf)) +static u32 prefix_aux_buf_size = (16 * 1024); +static u32 suffix_aux_buf_size; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +static u32 dv_toggle_prov_name; + +static u32 dolby_meta_with_el; +#endif + +/* + bit[8] + 0: use sys_info[bit 3] + not 0:use i_only_flag[7:0] + bit[7:0]: + bit 0, 1: only display I picture; + bit 1, 1: only decode I picture; +*/ +static unsigned int i_only_flag; + +/* + error_proc_policy: + bit[0] send_error_frame_flag; + (valid when bit[31] is 1, otherwise use sysinfo) + bit[1] do not decode if config_decode_buf() fail + bit[2] force release buf if in deadlock + bit[3] force sliding window ref_frames_in_buffer > num_ref_frames + bit[4] check inactive of receiver + bit[5] reset buffmgr if in deadlock + bit[6] reset buffmgr if bufspec, collocate buf, pic alloc fail + bit[7] reset buffmgr if dpb error + + bit[8] check total mbx/mby of decoded frame + bit[9] check ERROR_STATUS_REG + bit[10] check reference list + bit[11] mark error if dpb error + bit[12] i_only when error happen + bit[13] 0: mark error according to last pic, 1: ignore mark error + bit[14] 0: result done when timeout from ucode. 1: reset bufmgr when timeout. + bit[15] 1: dpb_frame_count If the dpb_frame_count difference is large, it moves out of the DPB buffer. + bit[16] 1: check slice header number. + bit[17] 1: If the decoded Mb count is insufficient but greater than the threshold, it is considered the correct frame. + bit[18] 1: time out status, store pic to dpb buffer. + bit[19] 1: If a lot b frames are wrong consecutively, the DPB queue reset. + bit[20] 1: fixed some error stream will lead to the diffusion of the error, resulting playback stuck. + bit[21] 1: fixed DVB loop playback cause jetter issue. + bit[22] 1: In streaming mode, support for discarding data. + bit[23] 0: set error flag on frame number gap error and drop it, 1: ignore error. +*/ +static unsigned int error_proc_policy = 0x7fCfb6; /*0x1f14*/ + + +/* + error_skip_count: + bit[11:0] error skip frame count + bit[15:12] error skip i picture count +*/ +static unsigned int error_skip_count = (0x2 << 12) | 0x40; + +static unsigned int force_sliding_margin; +/* + bit[1:0]: + 0, start playing from any frame + 1, start playing from I frame + bit[15:8]: the count of skip frames after first I + 2, start playing from second I frame (decode from the first I) + bit[15:8]: the max count of skip frames after first I + 3, start playing from IDR +*/ +static unsigned int first_i_policy = 1; + +/* + fast_output_enable: + bit [0], output frame if there is IDR in list + bit [1], output frame if the current poc is 1 big than the previous poc + bit [2], if even poc only, output frame ifthe cuurent poc + is 2 big than the previous poc + bit [3], ip only +*/ +static unsigned int fast_output_enable = H264_OUTPUT_MODE_NORMAL; + +static unsigned int enable_itu_t35 = 1; + +static unsigned int frmbase_cont_bitlevel = 0x40; + +static unsigned int frmbase_cont_bitlevel2 = 0x1; + +static unsigned int check_slice_num = 30; + +static unsigned int mb_count_threshold = 5; /*percentage*/ + +#define MH264_USERDATA_ENABLE + +/* DOUBLE_WRITE_MODE is enabled only when NV21 8 bit output is needed */ +/* hevc->double_write_mode: + 0, no double write + 1, 1:1 ratio + 2, (1/4):(1/4) ratio + 3, (1/4):(1/4) ratio, with both compressed frame included + 4, (1/2):(1/2) ratio + 0x10, double write only + 0x10000: vdec dw horizotal 1/2 + 0x20000: vdec dw horizotal/vertical 1/2 +*/ +static u32 double_write_mode; +static u32 without_display_mode; + +static int loop_playback_poc_threshold = 400; +static int poc_threshold = 50; + +static u32 lookup_check_conut = 30; + + +/* + *[3:0] 0: default use config from omx. + * 1: force enable fence. + * 2: disable fence. + *[7:4] 0: fence use for driver. + * 1: fence fd use for app. + */ +static u32 force_config_fence; + +#define IS_VDEC_DW(hw) (hw->double_write_mode >> 16 & 0xf) + +static void vmh264_dump_state(struct vdec_s *vdec); + +#define is_in_parsing_state(status) \ + ((status == H264_ACTION_SEARCH_HEAD) || \ + ((status & 0xf0) == 0x80)) + +#define is_interlace(frame) \ + ((frame->frame &&\ + frame->top_field &&\ + frame->bottom_field &&\ + (!frame->frame->coded_frame)) || \ + (frame->frame && \ + frame->frame->coded_frame && \ + (!frame->frame->frame_mbs_only_flag) && \ + frame->frame->structure == FRAME)) + +static inline bool close_to(int a, int b, int m) +{ + return (abs(a - b) < m) ? true : false; +} + +#if 0 +#define h264_alloc_hw_stru(dev, size, opt) devm_kzalloc(dev, size, opt) +#define h264_free_hw_stru(dev, hw) devm_kfree(dev, hw) +#else +#define h264_alloc_hw_stru(dev, size, opt) vzalloc(size) +#define h264_free_hw_stru(dev, hw) vfree(hw) +#endif + +/* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +#define NV21 +/* #endif */ + +/* 12M for L41 */ +#define MAX_DPB_BUFF_SIZE (12*1024*1024) +#define DEFAULT_MEM_SIZE (32*1024*1024) +#define AVIL_DPB_BUFF_SIZE 0x01ec2000 + +#define DEF_BUF_START_ADDR 0x00000000 +#define mem_sps_base 0x01c3c00 +#define mem_pps_base 0x01cbc00 +/*#define V_BUF_ADDR_OFFSET (0x13e000)*/ +u32 V_BUF_ADDR_OFFSET = 0x200000; +#define DCAC_READ_MARGIN (64 * 1024) + + +#define EXTEND_SAR 0xff +#define BUFSPEC_POOL_SIZE 64 +#define VF_POOL_SIZE 64 +#define VF_POOL_NUM 2 +#define MAX_VF_BUF_NUM 27 +#define BMMU_MAX_BUFFERS (BUFSPEC_POOL_SIZE + 3) +#define BMMU_REF_IDX (BUFSPEC_POOL_SIZE) +#define BMMU_DPB_IDX (BUFSPEC_POOL_SIZE + 1) +#define BMMU_EXTIF_IDX (BUFSPEC_POOL_SIZE + 2) +#define EXTIF_BUF_SIZE (0x10000 * 2) + +#define HEADER_BUFFER_IDX(n) (n) +#define VF_BUFFER_IDX(n) (n) + + +#define PUT_INTERVAL (HZ/100) +#define NO_DISP_WD_COUNT (3 * HZ / PUT_INTERVAL) + +#define MMU_MAX_BUFFERS BUFSPEC_POOL_SIZE +#define SWITCHING_STATE_OFF 0 +#define SWITCHING_STATE_ON_CMD3 1 +#define SWITCHING_STATE_ON_CMD1 2 + + + +#define INCPTR(p) ptr_atomic_wrap_inc(&p) + +#define SLICE_TYPE_I 2 +#define SLICE_TYPE_P 5 +#define SLICE_TYPE_B 6 + +struct buffer_spec_s { + /* + used: + -1, none allocated + 0, allocated, free + 1, used by dpb + 2, in disp queue; + 3, in disp queue, isolated, + do not use for dpb when vf_put; + 4, to release + 5, in disp queue, isolated (but not to release) + do not use for dpb when vf_put; + */ + unsigned int used; + unsigned int info0; + unsigned int info1; + unsigned int info2; + unsigned int y_addr; + unsigned int u_addr; + unsigned int v_addr; + + int y_canvas_index; + int u_canvas_index; + int v_canvas_index; + +#ifdef VDEC_DW + unsigned int vdec_dw_y_addr; + unsigned int vdec_dw_u_addr; + unsigned int vdec_dw_v_addr; + + int vdec_dw_y_canvas_index; + int vdec_dw_u_canvas_index; + int vdec_dw_v_canvas_index; +#ifdef NV21 + struct canvas_config_s vdec_dw_canvas_config[2]; +#else + struct canvas_config_s vdec_dw_canvas_config[3]; +#endif +#endif + +#ifdef NV21 + struct canvas_config_s canvas_config[2]; +#else + struct canvas_config_s canvas_config[3]; +#endif + unsigned long cma_alloc_addr; + unsigned int buf_adr; +#ifdef H264_MMU + unsigned long alloc_header_addr; +#endif + char *aux_data_buf; + int aux_data_size; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + unsigned char dv_enhance_exist; +#endif + int canvas_pos; + int vf_ref; + /*unsigned int comp_body_size;*/ + unsigned int dw_y_adr; + unsigned int dw_u_v_adr; + int fs_idx; +}; + +#define AUX_DATA_SIZE(pic) (hw->buffer_spec[pic->buf_spec_num].aux_data_size) +#define AUX_DATA_BUF(pic) (hw->buffer_spec[pic->buf_spec_num].aux_data_buf) +#define DEL_EXIST(h, p) (h->buffer_spec[p->buf_spec_num].dv_enhance_exist) + + +#define vdec_dw_spec2canvas(x) \ + (((x)->vdec_dw_v_canvas_index << 16) | \ + ((x)->vdec_dw_u_canvas_index << 8) | \ + ((x)->vdec_dw_y_canvas_index << 0)) + + +#define spec2canvas(x) \ + (((x)->v_canvas_index << 16) | \ + ((x)->u_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + +#define FRAME_INDEX(vf_index) (vf_index & 0xff) +#define BUFSPEC_INDEX(vf_index) ((vf_index >> 8) & 0xff) +#define VF_INDEX(frm_idx, bufspec_idx) (frm_idx | (bufspec_idx << 8)) + +static struct vframe_s *vh264_vf_peek(void *); +static struct vframe_s *vh264_vf_get(void *); +static void vh264_vf_put(struct vframe_s *, void *); +static int vh264_vf_states(struct vframe_states *states, void *); +static int vh264_event_cb(int type, void *data, void *private_data); +static void vh264_work(struct work_struct *work); +static void vh264_timeout_work(struct work_struct *work); +static void vh264_notify_work(struct work_struct *work); +#ifdef MH264_USERDATA_ENABLE +static void user_data_ready_notify_work(struct work_struct *work); +static void vmh264_wakeup_userdata_poll(struct vdec_s *vdec); +#endif + +static const char vh264_dec_id[] = "vh264-dev"; + +#define PROVIDER_NAME "vdec.h264" + +static const struct vframe_operations_s vf_provider_ops = { + .peek = vh264_vf_peek, + .get = vh264_vf_get, + .put = vh264_vf_put, + .event_cb = vh264_event_cb, + .vf_states = vh264_vf_states, +}; + +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_CONFIG_PARAM 3 +#define DEC_RESULT_GET_DATA 4 +#define DEC_RESULT_GET_DATA_RETRY 5 +#define DEC_RESULT_ERROR 6 +#define DEC_RESULT_EOS 7 +#define DEC_RESULT_FORCE_EXIT 8 +#define DEC_RESULT_TIMEOUT 9 + + +/* + *static const char *dec_result_str[] = { + * "DEC_RESULT_NONE ", + * "DEC_RESULT_DONE ", + * "DEC_RESULT_AGAIN ", + * "DEC_RESULT_CONFIG_PARAM", + * "DEC_RESULT_GET_DATA ", + * "DEC_RESULT_GET_DA_RETRY", + * "DEC_RESULT_ERROR ", + *}; + */ + +#define UCODE_IP_ONLY 2 +#define UCODE_IP_ONLY_PARAM 1 + +#define MC_OFFSET_HEADER 0x0000 +#define MC_OFFSET_DATA 0x1000 +#define MC_OFFSET_MMCO 0x2000 +#define MC_OFFSET_LIST 0x3000 +#define MC_OFFSET_SLICE 0x4000 +#define MC_OFFSET_MAIN 0x5000 + +#define MC_TOTAL_SIZE ((20+16)*SZ_1K) +#define MC_SWAP_SIZE (4*SZ_1K) +#define MODE_ERROR 0 +#define MODE_FULL 1 + +#define DFS_HIGH_THEASHOLD 3 + +#define INIT_FLAG_REG AV_SCRATCH_2 +#define HEAD_PADING_REG AV_SCRATCH_3 +#define UCODE_WATCHDOG_REG AV_SCRATCH_7 +#define LMEM_DUMP_ADR AV_SCRATCH_L +#define DEBUG_REG1 AV_SCRATCH_M +#define DEBUG_REG2 AV_SCRATCH_N +#define FRAME_COUNTER_REG AV_SCRATCH_I +#define RPM_CMD_REG AV_SCRATCH_A +#define H264_DECODE_SIZE AV_SCRATCH_E +#define H264_DECODE_MODE AV_SCRATCH_4 +#define H264_DECODE_SEQINFO AV_SCRATCH_5 +#define H264_AUX_ADR AV_SCRATCH_C +#define H264_AUX_DATA_SIZE AV_SCRATCH_H + +#define H264_DECODE_INFO M4_CONTROL_REG /* 0xc29 */ +#define DPB_STATUS_REG AV_SCRATCH_J +#define ERROR_STATUS_REG AV_SCRATCH_9 + /* + NAL_SEARCH_CTL: bit 0, enable itu_t35 + NAL_SEARCH_CTL: bit 1, enable mmu + NAL_SEARCH_CTL: bit 2, detect frame_mbs_only_flag whether switch resolution + NAL_SEARCH_CTL: bit 15,bitstream_restriction_flag + */ +#define NAL_SEARCH_CTL AV_SCRATCH_9 +#define MBY_MBX MB_MOTION_MODE /*0xc07*/ + +#define DECODE_MODE_SINGLE 0x0 +#define DECODE_MODE_MULTI_FRAMEBASE 0x1 +#define DECODE_MODE_MULTI_STREAMBASE 0x2 +#define DECODE_MODE_MULTI_DVBAL 0x3 +#define DECODE_MODE_MULTI_DVENL 0x4 +static DEFINE_MUTEX(vmh264_mutex); + +#ifdef MH264_USERDATA_ENABLE + +struct mh264_userdata_record_t { + struct userdata_meta_info_t meta_info; + u32 rec_start; + u32 rec_len; +}; + +struct mh264_ud_record_wait_node_t { + struct list_head list; + struct mh264_userdata_record_t ud_record; +}; +#define USERDATA_FIFO_NUM 256 +#define MAX_FREE_USERDATA_NODES 5 + +struct mh264_userdata_info_t { + struct mh264_userdata_record_t records[USERDATA_FIFO_NUM]; + u8 *data_buf; + u8 *data_buf_end; + u32 buf_len; + u32 read_index; + u32 write_index; + u32 last_wp; +}; + + +#endif + +struct mh264_fence_vf_t { + u32 used_size; + struct vframe_s *fence_vf[VF_POOL_SIZE]; +}; + +struct vdec_h264_hw_s { + spinlock_t lock; + spinlock_t bufspec_lock; + int id; + struct platform_device *platform_dev; + unsigned long cma_alloc_addr; + /* struct page *collocate_cma_alloc_pages; */ + unsigned long collocate_cma_alloc_addr; + + u32 prefix_aux_size; + u32 suffix_aux_size; + void *aux_addr; + dma_addr_t aux_phy_addr; + + /* buffer for store all sei data */ + void *sei_data_buf; + u32 sei_data_len; + + /* buffer for storing one itu35 recored */ + void *sei_itu_data_buf; + u32 sei_itu_data_len; + + /* recycle buffer for user data storing all itu35 records */ + void *sei_user_data_buffer; + u32 sei_user_data_wp; +#ifdef MH264_USERDATA_ENABLE + struct work_struct user_data_ready_work; +#endif + struct StorablePicture *last_dec_picture; + + ulong lmem_phy_addr; + dma_addr_t lmem_addr; + + void *bmmu_box; +#ifdef H264_MMU + void *mmu_box; + void *frame_mmu_map_addr; + dma_addr_t frame_mmu_map_phy_addr; + u32 hevc_cur_buf_idx; + u32 losless_comp_body_size; + u32 losless_comp_body_size_sao; + u32 losless_comp_header_size; + u32 mc_buffer_size_u_v; + u32 mc_buffer_size_u_v_h; + u32 is_idr_frame; + u32 is_new_pic; + u32 frame_done; + u32 frame_busy; + unsigned long extif_addr; + int double_write_mode; + int mmu_enable; +#endif + + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + + int cur_pool; + struct vframe_s vfpool[VF_POOL_NUM][VF_POOL_SIZE]; + struct buffer_spec_s buffer_spec[BUFSPEC_POOL_SIZE]; + struct vframe_s switching_fense_vf; + struct h264_dpb_stru dpb; + u8 init_flag; + u8 first_sc_checked; + u8 has_i_frame; + u8 config_bufmgr_done; + u32 max_reference_size; + u32 decode_pic_count; + u32 reflist_error_count; + int start_search_pos; + u32 reg_iqidct_control; + bool reg_iqidct_control_init_flag; + u32 reg_vcop_ctrl_reg; + u32 reg_rv_ai_mb_count; + u32 vld_dec_control; + struct vframe_s vframe_dummy; + + unsigned char buffer_empty_flag; + + u32 frame_width; + u32 frame_height; + u32 frame_dur; + u32 frame_prog; + u32 frame_packing_type; + + struct vframe_chunk_s *chunk; + + u32 stat; + unsigned long buf_start; + u32 buf_offset; + u32 buf_size; + /* u32 ucode_map_start; */ + u32 pts_outside; + u32 sync_outside; + u32 vh264_ratio; + u32 vh264_rotation; + u32 use_idr_framerate; + + u32 seq_info; + u32 seq_info2; + u32 video_signal_from_vui; /*to do .. */ + u32 timing_info_present_flag; + u32 fixed_frame_rate_flag; + u32 bitstream_restriction_flag; + u32 num_reorder_frames; + u32 max_dec_frame_buffering; + u32 iframe_count; + u32 aspect_ratio_info; + u32 num_units_in_tick; + u32 time_scale; + u32 h264_ar; + bool h264_first_valid_pts_ready; + u32 h264pts1; + u32 h264pts2; + u32 pts_duration; + u32 h264_pts_count; + u32 duration_from_pts_done; + u32 pts_unstable; + u32 unstable_pts; + u32 last_checkout_pts; + u32 max_refer_buf; + + s32 vh264_stream_switching_state; + struct vframe_s *p_last_vf; + u32 last_pts; + u32 last_pts_remainder; + u32 last_duration; + u32 last_mb_width, last_mb_height; + bool check_pts_discontinue; + bool pts_discontinue; + u32 wait_buffer_counter; + u32 first_offset; + u32 first_pts; + u64 first_pts64; + bool first_pts_cached; + u64 last_pts64; +#if 0 + void *sei_data_buffer; + dma_addr_t sei_data_buffer_phys; +#endif + + uint error_recovery_mode; + uint mb_total; + uint mb_width; + uint mb_height; + + uint i_only; + int skip_frame_count; + bool no_poc_reorder_flag; + bool send_error_frame_flag; + dma_addr_t mc_dma_handle; + void *mc_cpu_addr; + int vh264_reset; + + atomic_t vh264_active; + + struct dec_sysinfo vh264_amstream_dec_info; + + int dec_result; + u32 timeout_processing; + struct work_struct work; + struct work_struct notify_work; + struct work_struct timeout_work; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + + struct timer_list check_timer; + + /**/ + unsigned int last_frame_time; + u32 vf_pre_count; + atomic_t vf_get_count; + atomic_t vf_put_count; + + /* timeout handle */ + unsigned long int start_process_time; + unsigned int last_mby_mbx; + unsigned int last_vld_level; + unsigned int decode_timeout_count; + unsigned int timeout_num; + unsigned int search_dataempty_num; + unsigned int decode_timeout_num; + unsigned int decode_dataempty_num; + unsigned int buffer_empty_recover_num; + + unsigned get_data_count; + unsigned get_data_start_time; + /**/ + + /*log*/ + unsigned int packet_write_success_count; + unsigned int packet_write_EAGAIN_count; + unsigned int packet_write_ENOMEM_count; + unsigned int packet_write_EFAULT_count; + unsigned int total_read_size_pre; + unsigned int total_read_size; + unsigned int frame_count_pre; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + u8 switch_dvlayer_flag; + u8 got_valid_nal; +#endif + u8 eos; + u8 data_flag; + u32 no_error_count; + u32 no_error_i_count; + /* + NODISP_FLAG + */ + u8 dec_flag; + + u32 ucode_pause_pos; + + u8 reset_bufmgr_flag; + u32 reset_bufmgr_count; + ulong timeout; + u32 timeout_flag; + u32 cfg_param1; + u32 cfg_param2; + u32 cfg_param3; + u32 cfg_param4; + int valve_count; + u8 next_again_flag; + u32 pre_parser_wr_ptr; + struct firmware_s *fw; + struct firmware_s *fw_mmu; +#ifdef MH264_USERDATA_ENABLE + /*user data*/ + struct mutex userdata_mutex; + struct mh264_userdata_info_t userdata_info; + struct mh264_userdata_record_t ud_record; + int wait_for_udr_send; +#endif + u32 no_mem_count; + u32 canvas_mode; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + wait_queue_head_t wait_q; + u32 reg_g_status; + struct mutex chunks_mutex; + int need_cache_size; + u64 sc_start_time; + u8 frmbase_cont_flag; + struct vframe_qos_s vframe_qos; + int frameinfo_enable; + bool first_head_check_flag; + unsigned int height_aspect_ratio; + unsigned int width_aspect_ratio; + unsigned int first_i_policy; + u32 reorder_dpb_size_margin; + bool wait_reset_done_flag; +#ifdef DETECT_WRONG_MULTI_SLICE + unsigned int multi_slice_pic_check_count; + /* multi_slice_pic_flag: + 0, unknown; + 1, single slice; + 2, multi slice + */ + unsigned int multi_slice_pic_flag; + unsigned int picture_slice_count; + unsigned int cur_picture_slice_count; + unsigned char force_slice_as_picture_flag; + unsigned int last_picture_slice_count; + unsigned int first_pre_frame_num; +#endif + u32 res_ch_flag; + u32 b_frame_error_count; + struct vdec_info gvs; + u32 kpi_first_i_comming; + u32 kpi_first_i_decoded; + int sidebind_type; + int sidebind_channel_id; + u32 low_latency_mode; + int ip_field_error_count; + int buffer_wrap[BUFSPEC_POOL_SIZE]; + int loop_flag; + int loop_last_poc; + bool enable_fence; + int fence_usage; + bool discard_dv_data; + u32 metadata_config_flag; + int vdec_pg_enable_flag; + u32 save_reg_f; + u32 start_bit_cnt; + u32 right_frame_count; + u32 wrong_frame_count; + u32 error_frame_width; + u32 error_frame_height; + ulong fb_token; + int dec_again_cnt; + struct mh264_fence_vf_t fence_vf_s; + struct mutex fence_mutex; + u32 no_decoder_buffer_flag; + u32 video_signal_type; + struct trace_decoder_name trace; + int csd_change_flag; +}; + +static u32 again_threshold; + +static void timeout_process(struct vdec_h264_hw_s *hw); +static void dump_bufspec(struct vdec_h264_hw_s *hw, + const char *caller); +static void h264_reconfig(struct vdec_h264_hw_s *hw); +static void h264_reset_bufmgr(struct vdec_s *vdec); +static void vh264_local_init(struct vdec_h264_hw_s *hw, bool is_reset); +static int vh264_hw_ctx_restore(struct vdec_h264_hw_s *hw); +static int vh264_stop(struct vdec_h264_hw_s *hw); +static s32 vh264_init(struct vdec_h264_hw_s *hw); +static void set_frame_info(struct vdec_h264_hw_s *hw, struct vframe_s *vf, + u32 index); +static void release_aux_data(struct vdec_h264_hw_s *hw, + int buf_spec_num); +#ifdef ERROR_HANDLE_TEST +static void h264_clear_dpb(struct vdec_h264_hw_s *hw); +#endif + +#define H265_PUT_SAO_4K_SET 0x03 +#define H265_ABORT_SAO_4K_SET 0x04 +#define H265_ABORT_SAO_4K_SET_DONE 0x05 + +#define SYS_COMMAND HEVC_ASSIST_SCRATCH_0 +#define H265_CHECK_AXI_INFO_BASE HEVC_ASSIST_SCRATCH_8 +#define H265_SAO_4K_SET_BASE HEVC_ASSIST_SCRATCH_9 +#define H265_SAO_4K_SET_COUNT HEVC_ASSIST_SCRATCH_A +#define HEVCD_MPP_ANC2AXI_TBL_DATA 0x3464 + + +#define HEVC_CM_HEADER_START_ADDR 0x3628 +#define HEVC_CM_BODY_START_ADDR 0x3626 +#define HEVC_CM_BODY_LENGTH 0x3627 +#define HEVC_CM_HEADER_LENGTH 0x3629 +#define HEVC_CM_HEADER_OFFSET 0x362b +#define HEVC_SAO_CTRL9 0x362d +#define HEVCD_MPP_DECOMP_CTL3 0x34c4 +#define HEVCD_MPP_VDEC_MCR_CTL 0x34c8 +#define HEVC_DBLK_CFGB 0x350b +#define HEVC_ASSIST_MMU_MAP_ADDR 0x3009 + +#define H265_DW_NO_SCALE +#define H265_MEM_MAP_MODE 0 /*0:linear 1:32x32 2:64x32*/ +#define H265_LOSLESS_COMPRESS_MODE +#define MAX_FRAME_4K_NUM 0x1200 +#define FRAME_MMU_MAP_SIZE (MAX_FRAME_4K_NUM * 4) + +/* 0:linear 1:32x32 2:64x32 ; m8baby test1902 */ +static u32 mem_map_mode = H265_MEM_MAP_MODE; + +#define MAX_SIZE_4K (4096 * 2304) +#define MAX_SIZE_2K (1920 * 1088) + +static int is_oversize(int w, int h) +{ + int max = MAX_SIZE_4K; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) + max = MAX_SIZE_2K; + + if (w < 0 || h < 0) + return true; + + if (h != 0 && (w > max / h)) + return true; + + return false; +} + +static void vmh264_udc_fill_vpts(struct vdec_h264_hw_s *hw, + int frame_type, + u32 vpts, + u32 vpts_valid); +static int compute_losless_comp_body_size(int width, + int height, int bit_depth_10); +static int compute_losless_comp_header_size(int width, int height); + +static int hevc_alloc_mmu(struct vdec_h264_hw_s *hw, int pic_idx, + int pic_width, int pic_height, u16 bit_depth, + unsigned int *mmu_index_adr) { + int cur_buf_idx; + int bit_depth_10 = (bit_depth != 0x00); + int picture_size; + u32 cur_mmu_4k_number; + + WRITE_VREG(CURR_CANVAS_CTRL, pic_idx<<24); + cur_buf_idx = READ_VREG(CURR_CANVAS_CTRL)&0xff; + picture_size = compute_losless_comp_body_size(pic_width, + pic_height, bit_depth_10); + cur_mmu_4k_number = ((picture_size+(1<<12)-1) >> 12); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, + "alloc_mmu new_fb_idx %d picture_size %d cur_mmu_4k_number %d\n", + cur_buf_idx, picture_size, cur_mmu_4k_number); + + if (cur_mmu_4k_number > MAX_FRAME_4K_NUM) { + pr_err("hevc_alloc_mmu cur_mmu_4k_number %d unsupport\n", + cur_mmu_4k_number); + return -1; + } + + return decoder_mmu_box_alloc_idx( + hw->mmu_box, + cur_buf_idx, + cur_mmu_4k_number, + mmu_index_adr); +} + +static int compute_losless_comp_body_size(int width, + int height, int bit_depth_10) +{ + int width_x64; + int height_x32; + int bsize; + + width_x64 = width + 63; + width_x64 >>= 6; + + height_x32 = height + 31; + height_x32 >>= 5; + +#ifdef H264_MMU + bsize = (bit_depth_10 ? 4096 : 3264) * width_x64*height_x32; +#else + bsize = (bit_depth_10 ? 4096 : 3072) * width_x64*height_x32; +#endif + return bsize; +} + +static int compute_losless_comp_header_size(int width, int height) +{ + int width_x64; + int width_x128; + int height_x64; + int hsize; + + width_x64 = width + 63; + width_x64 >>= 6; + + width_x128 = width + 127; + width_x128 >>= 7; + + height_x64 = height + 63; + height_x64 >>= 6; + +#ifdef H264_MMU + hsize = 128*width_x64*height_x64; +#else + hsize = 32*width_x128*height_x64; +#endif + return hsize; +} + +static int get_dw_size(struct vdec_h264_hw_s *hw, u32 *pdw_buffer_size_u_v_h) +{ + int pic_width, pic_height; + int lcu_size = 16; + int dw_buf_size; + u32 dw_buffer_size_u_v; + u32 dw_buffer_size_u_v_h; + int dw_mode = hw->double_write_mode; + + pic_width = hw->frame_width; + pic_height = hw->frame_height; + + if (dw_mode) { + int pic_width_dw = pic_width / + get_double_write_ratio(hw->double_write_mode); + int pic_height_dw = pic_height / + get_double_write_ratio(hw->double_write_mode); + + int pic_width_lcu_dw = (pic_width_dw % lcu_size) ? + pic_width_dw / lcu_size + 1 : + pic_width_dw / lcu_size; + int pic_height_lcu_dw = (pic_height_dw % lcu_size) ? + pic_height_dw / lcu_size + 1 : + pic_height_dw / lcu_size; + int lcu_total_dw = pic_width_lcu_dw * pic_height_lcu_dw; + + + dw_buffer_size_u_v = lcu_total_dw * lcu_size * lcu_size / 2; + dw_buffer_size_u_v_h = (dw_buffer_size_u_v + 0xffff) >> 16; + /*64k alignment*/ + dw_buf_size = ((dw_buffer_size_u_v_h << 16) * 3); + *pdw_buffer_size_u_v_h = dw_buffer_size_u_v_h; + } else { + *pdw_buffer_size_u_v_h = 0; + dw_buf_size = 0; + } + + return dw_buf_size; +} + + +static void hevc_mcr_config_canv2axitbl(struct vdec_h264_hw_s *hw, int restore) +{ + int i, size; + u32 canvas_addr; + unsigned long maddr; + int num_buff = hw->dpb.mDPB.size; + int dw_size = 0; + u32 dw_buffer_size_u_v_h; + u32 blkmode = hw->canvas_mode; + int dw_mode = hw->double_write_mode; + struct vdec_s *vdec = hw_to_vdec(hw); + + canvas_addr = ANC0_CANVAS_ADDR; + for (i = 0; i < num_buff; i++) + WRITE_VREG((canvas_addr + i), i | (i << 8) | (i << 16)); + + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, (0x1 << 1) | (0x1 << 2)); + size = hw->losless_comp_body_size + hw->losless_comp_header_size; + + + dw_size = get_dw_size(hw, &dw_buffer_size_u_v_h); + size += dw_size; + if (size > 0) + size += 0x10000; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL, + "dw_buffer_size_u_v_h = %d, dw_size = 0x%x, size = 0x%x\n", + dw_buffer_size_u_v_h, dw_size, size); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL, + "body_size = %d, header_size = %d, body_size_sao = %d\n", + hw->losless_comp_body_size, + hw->losless_comp_header_size, + hw->losless_comp_body_size_sao); + + for (i = 0; i < num_buff; i++) { + if (!restore) { + if (decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, + HEADER_BUFFER_IDX(i), size, + DRIVER_HEADER_NAME, &maddr) < 0) { + dpb_print(DECODE_ID(hw), 0, + "%s malloc compress header failed %d\n", + DRIVER_HEADER_NAME, i); + return; + } + if (hw->enable_fence) { + vdec_fence_buffer_count_increase((ulong)vdec->sync); + INIT_LIST_HEAD(&vdec->sync->release_callback[HEADER_BUFFER_IDX(i)].node); + decoder_bmmu_box_add_callback_func(hw->bmmu_box, HEADER_BUFFER_IDX(i), (void *)&vdec->sync->release_callback[i]); + } + } else + maddr = hw->buffer_spec[i].alloc_header_addr; + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, maddr >> 5); + hw->buffer_spec[i].alloc_header_addr = maddr; + dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL, + "%s : canvas: %d axiaddr:%x size 0x%x\n", + __func__, i, (u32)maddr, size); + + if (dw_mode) { + u32 addr; + int canvas_w; + int canvas_h; + + canvas_w = hw->frame_width / + get_double_write_ratio(hw->double_write_mode); + canvas_h = hw->frame_height / + get_double_write_ratio(hw->double_write_mode); + + if (hw->canvas_mode == 0) + canvas_w = ALIGN(canvas_w, 32); + else + canvas_w = ALIGN(canvas_w, 64); + canvas_h = ALIGN(canvas_h, 32); + + hw->buffer_spec[i].dw_y_adr = + maddr + hw->losless_comp_header_size; + + hw->buffer_spec[i].dw_y_adr = + ((hw->buffer_spec[i].dw_y_adr + 0xffff) >> 16) + << 16; + hw->buffer_spec[i].dw_u_v_adr = + hw->buffer_spec[i].dw_y_adr + + (dw_buffer_size_u_v_h << 16) * 2; + + + hw->buffer_spec[i].buf_adr + = hw->buffer_spec[i].dw_y_adr; + addr = hw->buffer_spec[i].buf_adr; + + + dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL, + "dw_y_adr = 0x%x, dw_u_v_adr = 0x%x, y_addr = 0x%x, u_addr = 0x%x, v_addr = 0x%x, width = %d, height = %d\n", + hw->buffer_spec[i].dw_y_adr, + hw->buffer_spec[i].dw_u_v_adr, + hw->buffer_spec[i].y_addr, + hw->buffer_spec[i].u_addr, + hw->buffer_spec[i].v_addr, + canvas_w, + canvas_h); + + hw->buffer_spec[i].canvas_config[0].phy_addr = + hw->buffer_spec[i].dw_y_adr; + hw->buffer_spec[i].canvas_config[0].width = canvas_w; + hw->buffer_spec[i].canvas_config[0].height = canvas_h; + hw->buffer_spec[i].canvas_config[0].block_mode = + blkmode; + hw->buffer_spec[i].canvas_config[0].endian = 7; + + hw->buffer_spec[i].canvas_config[1].phy_addr = + hw->buffer_spec[i].dw_u_v_adr; + hw->buffer_spec[i].canvas_config[1].width = canvas_w; + hw->buffer_spec[i].canvas_config[1].height = canvas_h; + hw->buffer_spec[i].canvas_config[1].block_mode = + blkmode; + hw->buffer_spec[i].canvas_config[1].endian = 7; + } + } + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1); + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (0<<1) | 1); + for (i = 0; i < 32; i++) + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); + return; +} +static void hevc_mcr_config_mc_ref(struct vdec_h264_hw_s *hw) +{ + u32 i; + u32 ref_canv; + struct Slice *pSlice = &(hw->dpb.mSlice); + /*REFLIST[0]*/ + for (i = 0; i < (unsigned int)(pSlice->listXsize[0]); i++) { + struct StorablePicture *ref = pSlice->listX[0][i]; + if (ref == NULL) + return; + WRITE_VREG(CURR_CANVAS_CTRL, ref->buf_spec_num<<24); + ref_canv = READ_VREG(CURR_CANVAS_CTRL)&0xffffff; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (ref->buf_spec_num & 0x3f) << 8); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, ref_canv); + } + /*REFLIST[1]*/ + for (i = 0; i < (unsigned int)(pSlice->listXsize[1]); i++) { + struct StorablePicture *ref = pSlice->listX[1][i]; + if (ref == NULL) + return; + WRITE_VREG(CURR_CANVAS_CTRL, ref->buf_spec_num<<24); + ref_canv = READ_VREG(CURR_CANVAS_CTRL)&0xffffff; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (ref->buf_spec_num & 0x3f) << 8); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, ref_canv); + } + return; +} + +static void hevc_mcr_config_mcrcc(struct vdec_h264_hw_s *hw) +{ + u32 rdata32; + u32 rdata32_2; + u32 slice_type; + struct StorablePicture *ref; + struct Slice *pSlice; + slice_type = hw->dpb.mSlice.slice_type; + pSlice = &(hw->dpb.mSlice); + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2); + if (slice_type == I_SLICE) { + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x0); + return; + } + if (slice_type == B_SLICE) { + ref = pSlice->listX[0][0]; + if (ref == NULL) + return; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + ((ref->buf_spec_num & 0x3f) << 8)); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + ref = pSlice->listX[1][0]; + if (ref == NULL) + return; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + ((ref->buf_spec_num & 0x3f) << 8)); + rdata32_2 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32_2 = rdata32_2 & 0xffff; + rdata32_2 = rdata32_2 | (rdata32_2 << 16); + if (rdata32 == rdata32_2) { + ref = pSlice->listX[1][1]; + if (ref == NULL) + return; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + ((ref->buf_spec_num & 0x3f) << 8)); + rdata32_2 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32_2 = rdata32_2 & 0xffff; + rdata32_2 = rdata32_2 | (rdata32_2 << 16); + } + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32_2); + } else { /*P-PIC*/ + ref = pSlice->listX[0][0]; + if (ref == NULL) + return; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + ((ref->buf_spec_num & 0x3f) << 8)); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + ref = pSlice->listX[0][1]; + if (ref == NULL) + return; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + ((ref->buf_spec_num & 0x3f) << 8)); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + } + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); + return; +} + + +static void hevc_mcr_sao_global_hw_init(struct vdec_h264_hw_s *hw, + u32 width, u32 height) { + u32 data32; + u32 lcu_x_num, lcu_y_num; + u32 lcu_total; + u32 mc_buffer_size_u_v; + u32 mc_buffer_size_u_v_h; + int dw_mode = hw->double_write_mode; + + /*lcu_x_num = (width + 15) >> 4;*/ + // width need to be round to 64 pixel -- case0260 1/10/2020 + lcu_x_num = (((width + 63) >> 6) << 2); + lcu_y_num = (height + 15) >> 4; + lcu_total = lcu_x_num * lcu_y_num; + + hw->mc_buffer_size_u_v = mc_buffer_size_u_v = lcu_total*16*16/2; + hw->mc_buffer_size_u_v_h = + mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff)>>16; + + hw->losless_comp_body_size = 0; + + hw->losless_comp_body_size_sao = + compute_losless_comp_body_size(width, height, 0); + hw->losless_comp_header_size = + compute_losless_comp_header_size(width, height); + + WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x1); /*sw reset ipp10b_top*/ + WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x0); /*sw reset ipp10b_top*/ + + /* setup lcu_size = 16*/ + WRITE_VREG(HEVCD_IPP_TOP_LCUCONFIG, 16); /*set lcu size = 16*/ + /*pic_width/pic_height*/ + WRITE_VREG(HEVCD_IPP_TOP_FRMCONFIG, + (height & 0xffff) << 16 | (width & 0xffff)); + /* bitdepth_luma = 8*/ + /* bitdepth_chroma = 8*/ + WRITE_VREG(HEVCD_IPP_BITDEPTH_CONFIG, 0x0);/*set bit-depth 8 */ + +#ifdef H265_LOSLESS_COMPRESS_MODE + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0); +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + data32 |= (hw->canvas_mode << 4); + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); + + WRITE_VREG(HEVCD_MPP_DECOMP_CTL3, + (0x80 << 20) | (0x80 << 10) | (0xff)); + + WRITE_VREG(HEVCD_MPP_VDEC_MCR_CTL, 0x1 | (0x1 << 4)); + + /*comfig vdec:h264:mdec to use hevc mcr/mcrcc/decomp*/ + WRITE_VREG(MDEC_PIC_DC_MUX_CTRL, + READ_VREG(MDEC_PIC_DC_MUX_CTRL) | 0x1 << 31); + /* ipp_enable*/ + WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x1 << 1); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(HEVC_DBLK_CFG1, 0x2); // set ctusize==16 + WRITE_VREG(HEVC_DBLK_CFG2, ((height & 0xffff)<<16) | (width & 0xffff)); + if (dw_mode & 0x10) + WRITE_VREG(HEVC_DBLK_CFGB, 0x40405603); + else if (dw_mode) + WRITE_VREG(HEVC_DBLK_CFGB, 0x40405703); + else + WRITE_VREG(HEVC_DBLK_CFGB, 0x40405503); + } + + data32 = READ_VREG(HEVC_SAO_CTRL0); + data32 &= (~0xf); + data32 |= 0x4; + WRITE_VREG(HEVC_SAO_CTRL0, data32); + WRITE_VREG(HEVC_SAO_PIC_SIZE, (height & 0xffff) << 16 | + (width & 0xffff)); + data32 = ((lcu_x_num-1) | (lcu_y_num-1) << 16); + + WRITE_VREG(HEVC_SAO_PIC_SIZE_LCU, data32); + data32 = (lcu_x_num | lcu_y_num << 16); + WRITE_VREG(HEVC_SAO_TILE_SIZE_LCU, data32); + data32 = (mc_buffer_size_u_v_h << 16) << 1; + WRITE_VREG(HEVC_SAO_Y_LENGTH, data32); + data32 = (mc_buffer_size_u_v_h << 16); + WRITE_VREG(HEVC_SAO_C_LENGTH, data32); + + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + data32 &= (~0xff0); + data32 |= endian; /* Big-Endian per 64-bit */ + + if (hw->mmu_enable && (dw_mode & 0x10)) + data32 |= ((hw->canvas_mode << 12) |1); + else if (hw->mmu_enable && dw_mode) + data32 |= ((hw->canvas_mode << 12)); + else + data32 |= ((hw->canvas_mode << 12)|2); + + WRITE_VREG(HEVC_SAO_CTRL1, data32); + +#ifdef H265_DW_NO_SCALE + WRITE_VREG(HEVC_SAO_CTRL5, READ_VREG(HEVC_SAO_CTRL5) & ~(0xff << 16)); + if (hw->mmu_enable && dw_mode) { + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 &= (~(0xff << 16)); + if (dw_mode == 2 || + dw_mode == 3) + data32 |= (0xff<<16); + else if (dw_mode == 4) + data32 |= (0x33<<16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + + +#endif + + +#ifdef H265_LOSLESS_COMPRESS_MODE + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (1<<9); /*8-bit smem-mode*/ + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + WRITE_VREG(HEVC_CM_BODY_LENGTH, hw->losless_comp_body_size_sao); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, hw->losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, hw->losless_comp_header_size); +#endif + +#ifdef H265_LOSLESS_COMPRESS_MODE + WRITE_VREG(HEVC_SAO_CTRL9, READ_VREG(HEVC_SAO_CTRL9) | (0x1 << 1)); + WRITE_VREG(HEVC_SAO_CTRL5, READ_VREG(HEVC_SAO_CTRL5) | (0x1 << 10)); +#endif + + WRITE_VREG(HEVC_SAO_CTRL9, READ_VREG(HEVC_SAO_CTRL9) | 0x1 << 7); + + memset(hw->frame_mmu_map_addr, 0, FRAME_MMU_MAP_SIZE); + + WRITE_VREG(MDEC_EXTIF_CFG0, hw->extif_addr); + WRITE_VREG(MDEC_EXTIF_CFG1, 0x80000000); + return; +} + +static void hevc_sao_set_slice_type(struct vdec_h264_hw_s *hw, + u32 is_new_pic, u32 is_idr) +{ + hw->is_new_pic = is_new_pic; + hw->is_idr_frame = is_idr; + return; +} + +static void hevc_sao_set_pic_buffer(struct vdec_h264_hw_s *hw, + struct StorablePicture *pic) { + u32 mc_y_adr; + u32 mc_u_v_adr; + u32 dw_y_adr; + u32 dw_u_v_adr; + u32 canvas_addr; + int ret; + int dw_mode = hw->double_write_mode; + if (hw->is_new_pic != 1) + return; + + if (hw->is_idr_frame) { + /* William TBD */ + memset(hw->frame_mmu_map_addr, 0, FRAME_MMU_MAP_SIZE); + } + + WRITE_VREG(CURR_CANVAS_CTRL, pic->buf_spec_num << 24); + canvas_addr = READ_VREG(CURR_CANVAS_CTRL)&0xffffff; + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, (0x0 << 1) | + (0x0 << 2) | ((canvas_addr & 0xff) << 8)); + mc_y_adr = READ_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA) << 5; + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, (0x0 << 1) | + (0x0 << 2) | (((canvas_addr >> 8) & 0xff) << 8)); + mc_u_v_adr = READ_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA) << 5; + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1); + + + if (dw_mode) { + dw_y_adr = hw->buffer_spec[pic->buf_spec_num].dw_y_adr; + dw_u_v_adr = hw->buffer_spec[pic->buf_spec_num].dw_u_v_adr; + } else { + dw_y_adr = 0; + dw_u_v_adr = 0; + } +#ifdef H265_LOSLESS_COMPRESS_MODE + if (dw_mode) + WRITE_VREG(HEVC_SAO_Y_START_ADDR, dw_y_adr); + WRITE_VREG(HEVC_CM_BODY_START_ADDR, mc_y_adr); +#ifdef H264_MMU + WRITE_VREG(HEVC_CM_HEADER_START_ADDR, mc_y_adr); +#else + WRITE_VREG(HEVC_CM_HEADER_START_ADDR, + (mc_y_adr + hw->losless_comp_body_size)); +#endif +#else + WRITE_VREG(HEVC_SAO_Y_START_ADDR, mc_y_adr); +#endif + +#ifndef H265_LOSLESS_COMPRESS_MODE + WRITE_VREG(HEVC_SAO_C_START_ADDR, mc_u_v_adr); +#else + if (dw_mode) + WRITE_VREG(HEVC_SAO_C_START_ADDR, dw_u_v_adr); +#endif + +#ifndef LOSLESS_COMPRESS_MODE + if (dw_mode) { + WRITE_VREG(HEVC_SAO_Y_WPTR, mc_y_adr); + WRITE_VREG(HEVC_SAO_C_WPTR, mc_u_v_adr); + } +#else + WRITE_VREG(HEVC_SAO_Y_WPTR, dw_y_adr); + WRITE_VREG(HEVC_SAO_C_WPTR, dw_u_v_adr); +#endif + + ret = hevc_alloc_mmu(hw, pic->buf_spec_num, + (hw->mb_width << 4), (hw->mb_height << 4), 0x0, + hw->frame_mmu_map_addr); + if (ret != 0) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, "can't alloc need mmu1,idx %d ret =%d\n", + pic->buf_spec_num, + ret); + return; + } + + /*Reset SAO + Enable SAO slice_start*/ + if (hw->mmu_enable && get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + WRITE_VREG(HEVC_DBLK_CFG0, 0x1); // reset buffer32x4 in lpf for every picture + WRITE_VREG(HEVC_SAO_INT_STATUS, + READ_VREG(HEVC_SAO_INT_STATUS) | 0x1 << 28); + WRITE_VREG(HEVC_SAO_INT_STATUS, + READ_VREG(HEVC_SAO_INT_STATUS) | 0x1 << 31); + /*pr_info("hevc_sao_set_pic_buffer:mc_y_adr: %x\n", mc_y_adr);*/ + /*Send coommand to hevc-code to supply 4k buffers to sao*/ + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(H265_SAO_4K_SET_BASE, (u32)hw->frame_mmu_map_phy_addr); + WRITE_VREG(H265_SAO_4K_SET_COUNT, MAX_FRAME_4K_NUM); + } else + WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR, (u32)hw->frame_mmu_map_phy_addr); + WRITE_VREG(SYS_COMMAND, H265_PUT_SAO_4K_SET); + hw->frame_busy = 1; + return; +} + + +static void hevc_set_unused_4k_buff_idx(struct vdec_h264_hw_s *hw, + u32 buf_spec_num) { + WRITE_VREG(CURR_CANVAS_CTRL, buf_spec_num<<24); + hw->hevc_cur_buf_idx = READ_VREG(CURR_CANVAS_CTRL)&0xff; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, " %s cur_buf_idx %d buf_spec_num %d\n", + __func__, hw->hevc_cur_buf_idx, buf_spec_num); + return; +} + + +static void hevc_set_frame_done(struct vdec_h264_hw_s *hw) +{ + ulong timeout = jiffies + HZ; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, "hevc_frame_done...set\n"); + while ((READ_VREG(HEVC_SAO_INT_STATUS) & 0x1) == 0) { + if (time_after(jiffies, timeout)) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, " %s..timeout!\n", __func__); + break; + } + } + timeout = jiffies + HZ; + while (READ_VREG(HEVC_CM_CORE_STATUS) & 0x1) { + if (time_after(jiffies, timeout)) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, " %s cm_core..timeout!\n", __func__); + break; + } + } + WRITE_VREG(HEVC_SAO_INT_STATUS, 0x1); + hw->frame_done = 1; + return; +} + +static void release_cur_decoding_buf(struct vdec_h264_hw_s *hw) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + if (p_H264_Dpb->mVideo.dec_picture) { + release_picture(p_H264_Dpb, + p_H264_Dpb->mVideo.dec_picture); + p_H264_Dpb->mVideo.dec_picture->data_flag &= ~ERROR_FLAG; + p_H264_Dpb->mVideo.dec_picture = NULL; + if (hw->mmu_enable) + hevc_set_frame_done(hw); + } +} + +static void hevc_sao_wait_done(struct vdec_h264_hw_s *hw) +{ + ulong timeout = jiffies + HZ; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, "hevc_sao_wait_done...start\n"); + while ((READ_VREG(HEVC_SAO_INT_STATUS) >> 31)) { + if (time_after(jiffies, timeout)) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, + "hevc_sao_wait_done...wait timeout!\n"); + break; + } + } + timeout = jiffies + HZ; + if ((hw->frame_busy == 1) && (hw->frame_done == 1) ) { + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(SYS_COMMAND, H265_ABORT_SAO_4K_SET); + while ((READ_VREG(SYS_COMMAND) & 0xff) != + H265_ABORT_SAO_4K_SET_DONE) { + if (time_after(jiffies, timeout)) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, + "wait h265_abort_sao_4k_set_done timeout!\n"); + break; + } + } + } + amhevc_stop(); + hw->frame_busy = 0; + hw->frame_done = 0; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, + "sao wait done ,hevc stop!\n"); + } + return; +} +static void buf_spec_init(struct vdec_h264_hw_s *hw, bool buffer_reset_flag) +{ + int i; + unsigned long flags; + spin_lock_irqsave(&hw->bufspec_lock, flags); + + for (i = 0; i < VF_POOL_SIZE; i++) { + struct vframe_s *vf = &hw->vfpool[hw->cur_pool][i]; + u32 ref_idx = BUFSPEC_INDEX(vf->index); + if ((vf->index != -1) && + (hw->buffer_spec[ref_idx].vf_ref == 0) && + (hw->buffer_spec[ref_idx].used != -1)) { + vf->index = -1; + } + } + + hw->cur_pool++; + if (hw->cur_pool >= VF_POOL_NUM) + hw->cur_pool = 0; + + for (i = 0; i < VF_POOL_SIZE; i++) { + struct vframe_s *vf = &hw->vfpool[hw->cur_pool][i]; + u32 ref_idx = BUFSPEC_INDEX(vf->index); + if ((vf->index != -1) && + (hw->buffer_spec[ref_idx].vf_ref == 0) && + (hw->buffer_spec[ref_idx].used != -1)) { + vf->index = -1; + } + } + /* buffers are alloced when error reset, v4l must find buffer by buffer_wrap[] */ + if (hw->reset_bufmgr_flag && buffer_reset_flag) { + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->buffer_spec[i].used == 1 || hw->buffer_spec[i].used == 2) + hw->buffer_spec[i].used = 0; + } + } else { + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + hw->buffer_spec[i].used = -1; + hw->buffer_spec[i].canvas_pos = -1; + hw->buffer_wrap[i] = -1; + } + } + + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + spin_unlock_irqrestore(&hw->bufspec_lock, flags); +} + + +/*is active in buf management */ +static unsigned char is_buf_spec_in_use(struct vdec_h264_hw_s *hw, + int buf_spec_num) +{ + unsigned char ret = 0; + if (hw->buffer_spec[buf_spec_num].used == 1 || + hw->buffer_spec[buf_spec_num].used == 2 || + hw->buffer_spec[buf_spec_num].used == 3 || + hw->buffer_spec[buf_spec_num].used == 5) + ret = 1; + return ret; +} + +static unsigned char is_buf_spec_in_disp_q(struct vdec_h264_hw_s *hw, + int buf_spec_num) +{ + unsigned char ret = 0; + if (hw->buffer_spec[buf_spec_num].used == 2 || + hw->buffer_spec[buf_spec_num].used == 3 || + hw->buffer_spec[buf_spec_num].used == 5) + ret = 1; + return ret; +} + +static int alloc_one_buf_spec(struct vdec_h264_hw_s *hw, int i) +{ + struct vdec_s *vdec = hw_to_vdec(hw); + if (hw->mmu_enable) { + if (hw->buffer_spec[i].alloc_header_addr) + return 0; + else + return -1; + } else { + + int buf_size = (hw->mb_total << 8) + (hw->mb_total << 7); + int addr; +#ifdef VDEC_DW + int orig_buf_size; + orig_buf_size = buf_size; + if (IS_VDEC_DW(hw) == 1) + buf_size += (hw->mb_total << 7) + (hw->mb_total << 6); + else if (IS_VDEC_DW(hw) == 2) + buf_size += (hw->mb_total << 6) + (hw->mb_total << 5); + else if (IS_VDEC_DW(hw) == 4) + buf_size += (hw->mb_total << 4) + (hw->mb_total << 3); + else if (IS_VDEC_DW(hw) == 8) + buf_size += (hw->mb_total << 2) + (hw->mb_total << 1); + if (IS_VDEC_DW(hw)) { + u32 align_size; + /* add align padding size for blk64x32: (mb_w<<4)*32, (mb_h<<4)*64 */ + align_size = ((hw->mb_width << 9) + (hw->mb_height << 10)) / IS_VDEC_DW(hw); + /* double align padding size for uv*/ + align_size <<= 1; + buf_size += align_size + PAGE_SIZE; + } +#endif + if (hw->buffer_spec[i].cma_alloc_addr) + return 0; + + if (decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, i, + PAGE_ALIGN(buf_size), DRIVER_NAME, + &hw->buffer_spec[i].cma_alloc_addr) < 0) { + hw->buffer_spec[i].cma_alloc_addr = 0; + if (hw->no_mem_count++ > 3) { + hw->stat |= DECODER_FATAL_ERROR_NO_MEM; + hw->reset_bufmgr_flag = 1; + } + dpb_print(DECODE_ID(hw), 0, + "%s, fail to alloc buf for bufspec%d, try later\n", + __func__, i + ); + return -1; + } else { + if (hw->enable_fence) { + vdec_fence_buffer_count_increase((ulong)vdec->sync); + INIT_LIST_HEAD(&vdec->sync->release_callback[i].node); + decoder_bmmu_box_add_callback_func(hw->bmmu_box, i, (void *)&vdec->sync->release_callback[i]); + } + hw->no_mem_count = 0; + hw->stat &= ~DECODER_FATAL_ERROR_NO_MEM; + } + if (!vdec_secure(vdec)) { + /*init internal buf*/ + char *tmpbuf = (char *)codec_mm_phys_to_virt(hw->buffer_spec[i].cma_alloc_addr); + if (tmpbuf) { + memset(tmpbuf, 0, PAGE_ALIGN(buf_size)); + codec_mm_dma_flush(tmpbuf, + PAGE_ALIGN(buf_size), + DMA_TO_DEVICE); + } else { + tmpbuf = codec_mm_vmap(hw->buffer_spec[i].cma_alloc_addr, PAGE_ALIGN(buf_size)); + if (tmpbuf) { + memset(tmpbuf, 0, PAGE_ALIGN(buf_size)); + codec_mm_dma_flush(tmpbuf, + PAGE_ALIGN(buf_size), + DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(tmpbuf); + } + } + } + hw->buffer_spec[i].buf_adr = + hw->buffer_spec[i].cma_alloc_addr; + addr = hw->buffer_spec[i].buf_adr; + + + hw->buffer_spec[i].y_addr = addr; + addr += hw->mb_total << 8; + hw->buffer_spec[i].u_addr = addr; + hw->buffer_spec[i].v_addr = addr; + addr += hw->mb_total << 7; + + hw->buffer_spec[i].canvas_config[0].phy_addr = + hw->buffer_spec[i].y_addr; + hw->buffer_spec[i].canvas_config[0].width = + hw->mb_width << 4; + hw->buffer_spec[i].canvas_config[0].height = + hw->mb_height << 4; + hw->buffer_spec[i].canvas_config[0].block_mode = + hw->canvas_mode; + + hw->buffer_spec[i].canvas_config[1].phy_addr = + hw->buffer_spec[i].u_addr; + hw->buffer_spec[i].canvas_config[1].width = + hw->mb_width << 4; + hw->buffer_spec[i].canvas_config[1].height = + hw->mb_height << 3; + hw->buffer_spec[i].canvas_config[1].block_mode = + hw->canvas_mode; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s, alloc buf for bufspec%d\n", + __func__, i); +#ifdef VDEC_DW + if (!IS_VDEC_DW(hw)) + return 0; + else { + int w_shift = 3, h_shift = 3; + + if (IS_VDEC_DW(hw) == 1) { + w_shift = 3; + h_shift = 4; + } else if (IS_VDEC_DW(hw) == 2) { + w_shift = 3; + h_shift = 3; + } else if (IS_VDEC_DW(hw) == 4) { + w_shift = 2; + h_shift = 2; + } else if (IS_VDEC_DW(hw) == 8) { + w_shift = 1; + h_shift = 1; + } + + addr = hw->buffer_spec[i].cma_alloc_addr + PAGE_ALIGN(orig_buf_size); + hw->buffer_spec[i].vdec_dw_y_addr = addr; + addr += ALIGN_WIDTH(hw->mb_width << w_shift) * ALIGN_HEIGHT(hw->mb_height << h_shift); + hw->buffer_spec[i].vdec_dw_u_addr = addr; + hw->buffer_spec[i].vdec_dw_v_addr = addr; + addr += hw->mb_total << (w_shift + h_shift - 1); + + hw->buffer_spec[i].vdec_dw_canvas_config[0].phy_addr = + hw->buffer_spec[i].vdec_dw_y_addr; + hw->buffer_spec[i].vdec_dw_canvas_config[0].width = + ALIGN_WIDTH(hw->mb_width << w_shift); + hw->buffer_spec[i].vdec_dw_canvas_config[0].height = + ALIGN_HEIGHT(hw->mb_height << h_shift); + hw->buffer_spec[i].vdec_dw_canvas_config[0].block_mode = + hw->canvas_mode; + + hw->buffer_spec[i].vdec_dw_canvas_config[1].phy_addr = + hw->buffer_spec[i].vdec_dw_u_addr; + hw->buffer_spec[i].vdec_dw_canvas_config[1].width = + ALIGN_WIDTH(hw->mb_width << w_shift); + hw->buffer_spec[i].vdec_dw_canvas_config[1].height = + ALIGN_HEIGHT(hw->mb_height << (h_shift - 1)); + hw->buffer_spec[i].vdec_dw_canvas_config[1].block_mode = + hw->canvas_mode; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s, vdec_dw: alloc buf for bufspec%d blkmod %d\n", + __func__, i, hw->canvas_mode); + } +#endif + } + return 0; +} + +static int alloc_one_buf_spec_from_queue(struct vdec_h264_hw_s *hw, int idx) +{ + int ret = 0; + struct aml_vcodec_ctx *ctx = NULL; + struct buffer_spec_s *bs = &hw->buffer_spec[idx]; + struct canvas_config_s *y_canvas_cfg = NULL; + struct canvas_config_s *c_canvas_cfg = NULL; + struct vdec_v4l2_buffer *fb = NULL; + unsigned int y_addr = 0, c_addr = 0; + + if (IS_ERR_OR_NULL(hw->v4l2_ctx)) { + pr_err("the v4l context has err.\n"); + return -1; + } + + if (bs->cma_alloc_addr) + return 0; + + ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), try alloc from v4l queue buf size: %d\n", + ctx->id, __func__, + (hw->mb_total << 8) + (hw->mb_total << 7)); + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] get fb fail.\n", ctx->id); + return ret; + } + + bs->cma_alloc_addr = (unsigned long)fb; + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), cma alloc addr: 0x%x, out %d dec %d\n", + ctx->id, __func__, bs->cma_alloc_addr, + ctx->cap_pool.out, ctx->cap_pool.dec); + + if (fb->num_planes == 1) { + y_addr = fb->m.mem[0].addr; + c_addr = fb->m.mem[0].addr + fb->m.mem[0].offset; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + } else if (fb->num_planes == 2) { + y_addr = fb->m.mem[0].addr; + c_addr = fb->m.mem[1].addr; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + fb->m.mem[1].bytes_used = fb->m.mem[1].size; + } + + fb->status = FB_ST_DECODER; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), y_addr: %x, size: %u\n", + ctx->id, __func__, y_addr, fb->m.mem[0].size); + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), c_addr: %x, size: %u\n", + ctx->id, __func__, c_addr, fb->m.mem[1].size); + + bs->y_addr = y_addr; + bs->u_addr = c_addr; + bs->v_addr = c_addr; + + y_canvas_cfg = &bs->canvas_config[0]; + c_canvas_cfg = &bs->canvas_config[1]; + + y_canvas_cfg->phy_addr = y_addr; + y_canvas_cfg->width = hw->mb_width << 4; + y_canvas_cfg->height = hw->mb_height << 4; + y_canvas_cfg->block_mode = hw->canvas_mode; + //fb->m.mem[0].bytes_used = y_canvas_cfg->width * y_canvas_cfg->height; + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), y_w: %d, y_h: %d\n", ctx->id, __func__, + y_canvas_cfg->width,y_canvas_cfg->height); + + c_canvas_cfg->phy_addr = c_addr; + c_canvas_cfg->width = hw->mb_width << 4; + c_canvas_cfg->height = hw->mb_height << 3; + c_canvas_cfg->block_mode = hw->canvas_mode; + //fb->m.mem[1].bytes_used = c_canvas_cfg->width * c_canvas_cfg->height; + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), c_w: %d, c_h: %d\n", ctx->id, __func__, + c_canvas_cfg->width, c_canvas_cfg->height); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), alloc buf for bufspec%d\n", ctx->id, __func__, idx); + + return ret; +} + +static void config_decode_canvas(struct vdec_h264_hw_s *hw, int i) +{ + int blkmode = hw->canvas_mode; + int endian = 0; + + if (blkmode == CANVAS_BLKMODE_LINEAR) { + if ((h264_debug_flag & IGNORE_PARAM_FROM_CONFIG) == 0) + endian = 7; + else + endian = 0; + } + + if (hw->is_used_v4l) + endian = 7; + + config_cav_lut_ex(hw->buffer_spec[i]. + y_canvas_index, + hw->buffer_spec[i].y_addr, + hw->mb_width << 4, + hw->mb_height << 4, + CANVAS_ADDR_NOWRAP, + blkmode, + endian, + VDEC_1); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(VDEC_ASSIST_CANVAS_BLK32, + (1 << 11) | /* canvas_blk32_wr */ + (blkmode << 10) | /* canvas_blk32*/ + (1 << 8) | /* canvas_index_wr*/ + (hw->buffer_spec[i].y_canvas_index << 0) /* canvas index*/ + ); + } + + config_cav_lut_ex(hw->buffer_spec[i]. + u_canvas_index, + hw->buffer_spec[i].u_addr, + hw->mb_width << 4, + hw->mb_height << 3, + CANVAS_ADDR_NOWRAP, + blkmode, + endian, + VDEC_1); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(VDEC_ASSIST_CANVAS_BLK32, + (1 << 11) | + (blkmode << 10) | + (1 << 8) | + (hw->buffer_spec[i].u_canvas_index << 0)); + } + + WRITE_VREG(ANC0_CANVAS_ADDR + hw->buffer_spec[i].canvas_pos, + spec2canvas(&hw->buffer_spec[i])); + + +#ifdef VDEC_DW + if (!IS_VDEC_DW(hw)) + return; + else { + config_cav_lut_ex(hw->buffer_spec[i]. + vdec_dw_y_canvas_index, + hw->buffer_spec[i].vdec_dw_canvas_config[0].phy_addr, + hw->buffer_spec[i].vdec_dw_canvas_config[0].width, + hw->buffer_spec[i].vdec_dw_canvas_config[0].height, + CANVAS_ADDR_NOWRAP, + blkmode, + endian, + VDEC_1); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(VDEC_ASSIST_CANVAS_BLK32, + (1 << 11) | + (blkmode << 10) | + (1 << 8) | + (hw->buffer_spec[i].vdec_dw_y_canvas_index << 0)); + } + + config_cav_lut_ex(hw->buffer_spec[i]. + vdec_dw_u_canvas_index, + hw->buffer_spec[i].vdec_dw_canvas_config[1].phy_addr, + hw->buffer_spec[i].vdec_dw_canvas_config[1].width, + hw->buffer_spec[i].vdec_dw_canvas_config[1].height, + CANVAS_ADDR_NOWRAP, + blkmode, + endian, + VDEC_1); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(VDEC_ASSIST_CANVAS_BLK32, + (1 << 11) | + (blkmode << 10) | + (1 << 8) | + (hw->buffer_spec[i].vdec_dw_u_canvas_index << 0)); + } + } +#endif +} + +static void config_decode_canvas_ex(struct vdec_h264_hw_s *hw, int i) +{ + u32 blkmode = hw->canvas_mode; + int canvas_w; + int canvas_h; + + canvas_w = hw->frame_width / + get_double_write_ratio(hw->double_write_mode); + canvas_h = hw->frame_height / + get_double_write_ratio(hw->double_write_mode); + + if (hw->canvas_mode == 0) + canvas_w = ALIGN(canvas_w, 32); + else + canvas_w = ALIGN(canvas_w, 64); + canvas_h = ALIGN(canvas_h, 32); + + config_cav_lut_ex(hw->buffer_spec[i]. + y_canvas_index, + hw->buffer_spec[i].dw_y_adr, + canvas_w, + canvas_h, + CANVAS_ADDR_NOWRAP, + blkmode, + 7, + VDEC_HEVC); + + config_cav_lut_ex(hw->buffer_spec[i]. + u_canvas_index, + hw->buffer_spec[i].dw_u_v_adr, + canvas_w, + canvas_h, + CANVAS_ADDR_NOWRAP, + blkmode, + 7, + VDEC_HEVC); +} + +static int v4l_get_free_buffer_spec(struct vdec_h264_hw_s *hw) +{ + int i; + + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->buffer_spec[i].cma_alloc_addr == 0) + return i; + } + + return -1; +} + +static int v4l_find_buffer_spec_idx(struct vdec_h264_hw_s *hw, unsigned int v4l_indx) +{ + int i; + + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->buffer_wrap[i] == v4l_indx) + return i; + } + return -1; +} + +static int v4l_get_free_buf_idx(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct aml_vcodec_ctx * v4l = hw->v4l2_ctx; + struct v4l_buff_pool *pool = &v4l->cap_pool; + struct buffer_spec_s *pic = NULL; + int i, rt, idx = INVALID_IDX; + ulong flags; + u32 state = 0, index; + + spin_lock_irqsave(&hw->bufspec_lock, flags); + for (i = 0; i < pool->in; ++i) { + state = (pool->seq[i] >> 16); + index = (pool->seq[i] & 0xffff); + + switch (state) { + case V4L_CAP_BUFF_IN_DEC: + rt = v4l_find_buffer_spec_idx(hw, index); + if (rt >= 0) { + pic = &hw->buffer_spec[rt]; + if ((pic->vf_ref == 0) && + (pic->used == 0) && + pic->cma_alloc_addr) { + idx = rt; + } + } + break; + case V4L_CAP_BUFF_IN_M2M: + rt = v4l_get_free_buffer_spec(hw); + if (rt >= 0) { + pic = &hw->buffer_spec[rt]; + if (!alloc_one_buf_spec_from_queue(hw, rt)) { + struct vdec_v4l2_buffer *fb; + config_decode_canvas(hw, rt); + fb = (struct vdec_v4l2_buffer *)pic->cma_alloc_addr; + hw->buffer_wrap[rt] = fb->buf_idx; + idx = rt; + } + } + break; + default: + break; + } + + if (idx != INVALID_IDX) { + pic->used = 1; + break; + } + } + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + + if (idx < 0) { + dpb_print(DECODE_ID(hw), 0, "%s fail, state %d\n", __func__, state); + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + dpb_print(DECODE_ID(hw), 0, "%s, %d\n", + __func__, hw->buffer_wrap[i]); + } + vmh264_dump_state(vdec); + } else { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *)pic->cma_alloc_addr; + + fb->status = FB_ST_DECODER; + } + + return idx; +} + +int get_free_buf_idx(struct vdec_s *vdec) +{ + int i; + unsigned long addr, flags; + int index = -1; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + int buf_total = BUFSPEC_POOL_SIZE; + + if (hw->is_used_v4l) + return v4l_get_free_buf_idx(vdec); + + spin_lock_irqsave(&hw->bufspec_lock, flags); + /*hw->start_search_pos = 0;*/ + for (i = hw->start_search_pos; i < buf_total; i++) { + if (hw->mmu_enable) + addr = hw->buffer_spec[i].alloc_header_addr; + else + addr = hw->buffer_spec[i].cma_alloc_addr; + + if (hw->buffer_spec[i].vf_ref == 0 && + hw->buffer_spec[i].used == 0 && addr) { + hw->buffer_spec[i].used = 1; + hw->start_search_pos = i+1; + index = i; + hw->buffer_wrap[i] = index; + break; + } + } + if (index < 0) { + for (i = 0; i < hw->start_search_pos; i++) { + if (hw->mmu_enable) + addr = hw->buffer_spec[i].alloc_header_addr; + else + addr = hw->buffer_spec[i].cma_alloc_addr; + + if (hw->buffer_spec[i].vf_ref == 0 && + hw->buffer_spec[i].used == 0 && addr) { + hw->buffer_spec[i].used = 1; + hw->start_search_pos = i+1; + index = i; + hw->buffer_wrap[i] = index; + break; + } + } + } + + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + if (hw->start_search_pos >= buf_total) + hw->start_search_pos = 0; + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s, buf_spec_num %d\n", __func__, index); + + if (index < 0) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s fail\n", __func__); + vmh264_dump_state(vdec); + } + + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + return index; +} + +int release_buf_spec_num(struct vdec_s *vdec, int buf_spec_num) +{ + /*u32 cur_buf_idx;*/ + unsigned long flags; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL, + "%s buf_spec_num %d used %d\n", + __func__, buf_spec_num, + buf_spec_num > 0 ? hw->buffer_spec[buf_spec_num].used : 0); + if (buf_spec_num >= 0 && + buf_spec_num < BUFSPEC_POOL_SIZE + ) { + spin_lock_irqsave(&hw->bufspec_lock, flags); + hw->buffer_spec[buf_spec_num].used = 0; + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + if (hw->mmu_enable) { + /*WRITE_VREG(CURR_CANVAS_CTRL, buf_spec_num<<24); + cur_buf_idx = READ_VREG(CURR_CANVAS_CTRL); + cur_buf_idx = cur_buf_idx&0xff;*/ + decoder_mmu_box_free_idx(hw->mmu_box, buf_spec_num); + } + release_aux_data(hw, buf_spec_num); + } + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + return 0; +} + +static void config_buf_specs(struct vdec_s *vdec) +{ + int i, j; + unsigned long flags; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + int mode = IS_VDEC_DW(hw) ? 2 : 1; + + spin_lock_irqsave(&hw->bufspec_lock, flags); + for (i = 0, j = 0; + j < hw->dpb.mDPB.size + && i < BUFSPEC_POOL_SIZE; + i++) { + int canvas; + if (hw->buffer_spec[i].used != -1) + continue; + if (vdec->parallel_dec == 1) { + if (hw->buffer_spec[i].y_canvas_index == -1) + hw->buffer_spec[i].y_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].u_canvas_index == -1) { + hw->buffer_spec[i].u_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->buffer_spec[i].v_canvas_index = hw->buffer_spec[i].u_canvas_index; + } +#ifdef VDEC_DW + if (IS_VDEC_DW(hw)) { + if (hw->buffer_spec[i].vdec_dw_y_canvas_index == -1) + hw->buffer_spec[i].vdec_dw_y_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].vdec_dw_u_canvas_index == -1) { + hw->buffer_spec[i].vdec_dw_u_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->buffer_spec[i].vdec_dw_v_canvas_index = + hw->buffer_spec[i].vdec_dw_u_canvas_index; + } + } +#endif + } else { + canvas = vdec->get_canvas(j * mode, 2); + hw->buffer_spec[i].y_canvas_index = canvas_y(canvas); + hw->buffer_spec[i].u_canvas_index = canvas_u(canvas); + hw->buffer_spec[i].v_canvas_index = canvas_v(canvas); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DPB_DETAIL, + "config canvas (%d) %x for bufspec %d\r\n", + j, canvas, i); +#ifdef VDEC_DW + if (IS_VDEC_DW(hw)) { + canvas = vdec->get_canvas(j * mode + 1, 2); + hw->buffer_spec[i].vdec_dw_y_canvas_index = canvas_y(canvas); + hw->buffer_spec[i].vdec_dw_u_canvas_index = canvas_u(canvas); + hw->buffer_spec[i].vdec_dw_v_canvas_index = canvas_v(canvas); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DPB_DETAIL, + "vdec_dw: config canvas (%d) %x for bufspec %d\r\n", + j, canvas, i); + } +#endif + } + + hw->buffer_spec[i].used = 0; + hw->buffer_spec[i].canvas_pos = j; + + + j++; + } + spin_unlock_irqrestore(&hw->bufspec_lock, flags); +} + +static void config_buf_specs_ex(struct vdec_s *vdec) +{ + int i, j; + unsigned long flags; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + int mode = IS_VDEC_DW(hw) ? 2 : 1; + + spin_lock_irqsave(&hw->bufspec_lock, flags); + for (i = 0, j = 0; + j < hw->dpb.mDPB.size + && i < BUFSPEC_POOL_SIZE; + i++) { + int canvas = 0; + if (hw->buffer_spec[i].used != -1) + continue; + if (vdec->parallel_dec == 1) { + if (hw->buffer_spec[i].y_canvas_index == -1) + hw->buffer_spec[i].y_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].u_canvas_index == -1) { + hw->buffer_spec[i].u_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->buffer_spec[i].v_canvas_index = hw->buffer_spec[i].u_canvas_index; + } +#ifdef VDEC_DW + if (IS_VDEC_DW(hw)) { + if (hw->buffer_spec[i].vdec_dw_y_canvas_index == -1) + hw->buffer_spec[i].vdec_dw_y_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].vdec_dw_u_canvas_index == -1) { + hw->buffer_spec[i].vdec_dw_u_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->buffer_spec[i].vdec_dw_v_canvas_index = + hw->buffer_spec[i].vdec_dw_u_canvas_index; + } + } +#endif + } else { + canvas = vdec->get_canvas(j* mode, 2); + hw->buffer_spec[i].y_canvas_index = canvas_y(canvas); + hw->buffer_spec[i].u_canvas_index = canvas_u(canvas); + hw->buffer_spec[i].v_canvas_index = canvas_v(canvas); + + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DPB_DETAIL, + "config canvas (%d) %x for bufspec %d\r\n", + j, canvas, i); +#ifdef VDEC_DW + if (IS_VDEC_DW(hw)) { + canvas = vdec->get_canvas(j*mode + 1, 2); + hw->buffer_spec[i].vdec_dw_y_canvas_index = canvas_y(canvas); + hw->buffer_spec[i].vdec_dw_u_canvas_index = canvas_u(canvas); + hw->buffer_spec[i].vdec_dw_v_canvas_index = canvas_v(canvas); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DPB_DETAIL, + "vdec_dw: config canvas (%d) %x for bufspec %d\r\n", + j, canvas, i); + } +#endif + } + + hw->buffer_spec[i].used = 0; + hw->buffer_spec[i].alloc_header_addr = 0; + hw->buffer_spec[i].canvas_pos = j; + + j++; + } + spin_unlock_irqrestore(&hw->bufspec_lock, flags); +} + + +static void dealloc_buf_specs(struct vdec_h264_hw_s *hw, + unsigned char release_all) +{ + int i; + unsigned long flags; + unsigned char dealloc_flag = 0; + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->buffer_spec[i].used == 4 || + release_all) { + dealloc_flag = 1; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DPB_DETAIL, + "%s buf_spec_num %d\n", + __func__, i + ); + spin_lock_irqsave + (&hw->bufspec_lock, flags); + hw->buffer_spec[i].used = -1; + spin_unlock_irqrestore + (&hw->bufspec_lock, flags); + release_aux_data(hw, i); + + if (!hw->mmu_enable) { + if (hw->buffer_spec[i].cma_alloc_addr) { + if (!hw->is_used_v4l) { + decoder_bmmu_box_free_idx( + hw->bmmu_box, + i); + } + spin_lock_irqsave + (&hw->bufspec_lock, flags); + hw->buffer_spec[i].cma_alloc_addr = 0; + hw->buffer_spec[i].buf_adr = 0; + spin_unlock_irqrestore + (&hw->bufspec_lock, flags); + } + } else { + if (hw->buffer_spec[i].alloc_header_addr) { + decoder_mmu_box_free_idx( + hw->mmu_box, + i); + spin_lock_irqsave + (&hw->bufspec_lock, flags); + hw->buffer_spec[i]. + alloc_header_addr = 0; + hw->buffer_spec[i].buf_adr = 0; + spin_unlock_irqrestore + (&hw->bufspec_lock, flags); + } + } + } + } + if (dealloc_flag && + dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + return; +} + +unsigned char have_free_buf_spec(struct vdec_s *vdec) +{ + int i; + unsigned long addr; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct aml_vcodec_ctx * ctx = hw->v4l2_ctx; + int canvas_pos_min = BUFSPEC_POOL_SIZE; + int index = -1; + int ret = 0; + int allocated_count = 0; + + if (hw->is_used_v4l) { + struct h264_dpb_stru *dpb = &hw->dpb; + + /* trigger to parse head data. */ + if (!hw->v4l_params_parsed) + return 1; + + if (dpb->mDPB.used_size >= dpb->mDPB.size - 1) + return 0; + + for (i = 0; i < hw->dpb.mDPB.size; i++) { + if (hw->buffer_spec[i].used == 0 && + hw->buffer_spec[i].vf_ref == 0 && + hw->buffer_spec[i].cma_alloc_addr) { + return 1; + } + } + + if (ctx->cap_pool.dec < hw->dpb.mDPB.size) { + if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) >= + run_ready_min_buf_num) { + if (ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token)) + return 1; + } + } + + return 0; + } + + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->mmu_enable) + addr = hw->buffer_spec[i].alloc_header_addr; + else + addr = hw->buffer_spec[i].cma_alloc_addr; + if (hw->buffer_spec[i].used == 0 && + hw->buffer_spec[i].vf_ref == 0) { + + if (addr) + return 1; + if (hw->buffer_spec[i].canvas_pos < canvas_pos_min) { + canvas_pos_min = hw->buffer_spec[i].canvas_pos; + index = i; + } + } + if (addr) + allocated_count++; + } + if (index >= 0) { + mutex_lock(&vmh264_mutex); + dealloc_buf_specs(hw, 0); + if (max_alloc_buf_count == 0 || + allocated_count < max_alloc_buf_count) { + if (alloc_one_buf_spec(hw, index) >= 0) + ret = 1; + } + mutex_unlock(&vmh264_mutex); + } + + return ret; +} + +static int get_buf_spec_by_canvas_pos(struct vdec_h264_hw_s *hw, + int canvas_pos) +{ + int i; + int j = 0; + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->buffer_spec[i].canvas_pos >= 0) { + if (j == canvas_pos) + return i; + j++; + } + } + return -1; +} +static void update_vf_memhandle(struct vdec_h264_hw_s *hw, + struct vframe_s *vf, int index) +{ + if (index < 0) { + vf->mem_handle = NULL; + vf->mem_head_handle = NULL; + } else if (vf->type & VIDTYPE_SCATTER) { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + hw->mmu_box, index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, HEADER_BUFFER_IDX(index)); + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, VF_BUFFER_IDX(index)); + /* vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, HEADER_BUFFER_IDX(index));*/ + } + return; +} +static int check_force_interlace(struct vdec_h264_hw_s *hw, + struct FrameStore *frame) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int bForceInterlace = 0; + /* no di in secure mode, disable force di */ + if (vdec_secure(hw_to_vdec(hw))) + return 0; + + if (hw->i_only) + return 0; + + if ((dec_control & DEC_CONTROL_FLAG_FORCE_2997_1080P_INTERLACE) + && hw->bitstream_restriction_flag + && (hw->frame_width == 1920) + && (hw->frame_height >= 1080) /* For being compatible with a fake progressive stream which is interlaced actually*/ + && (hw->frame_dur == 3203 || (hw->frame_dur == 3840 && p_H264_Dpb->mSPS.profile_idc == 100 && + p_H264_Dpb->mSPS.level_idc == 40))) { + bForceInterlace = 1; + } else if ((dec_control & DEC_CONTROL_FLAG_FORCE_2500_576P_INTERLACE) + && (hw->frame_width == 720) + && (hw->frame_height == 576) + && (hw->frame_dur == 3840)) { + bForceInterlace = 1; + } + + return bForceInterlace; +} + +static void fill_frame_info(struct vdec_h264_hw_s *hw, struct FrameStore *frame) +{ + struct vframe_qos_s *vframe_qos = &hw->vframe_qos; + + if (frame->slice_type == I_SLICE) + vframe_qos->type = 1; + else if (frame->slice_type == P_SLICE) + vframe_qos->type = 2; + else if (frame->slice_type == B_SLICE) + vframe_qos->type = 3; + + if (input_frame_based(hw_to_vdec(hw))) + vframe_qos->size = frame->frame_size2; + else + vframe_qos->size = frame->frame_size; + vframe_qos->pts = frame->pts64; + + vframe_qos->max_mv = frame->max_mv; + vframe_qos->avg_mv = frame->avg_mv; + vframe_qos->min_mv = frame->min_mv; +/* + pr_info("mv: max:%d, avg:%d, min:%d\n", + vframe_qos->max_mv, + vframe_qos->avg_mv, + vframe_qos->min_mv); +*/ + + vframe_qos->max_qp = frame->max_qp; + vframe_qos->avg_qp = frame->avg_qp; + vframe_qos->min_qp = frame->min_qp; +/* + pr_info("qp: max:%d, avg:%d, min:%d\n", + vframe_qos->max_qp, + vframe_qos->avg_qp, + vframe_qos->min_qp); +*/ + + vframe_qos->max_skip = frame->max_skip; + vframe_qos->avg_skip = frame->avg_skip; + vframe_qos->min_skip = frame->min_skip; +/* + pr_info("skip: max:%d, avg:%d, min:%d\n", + vframe_qos->max_skip, + vframe_qos->avg_skip, + vframe_qos->min_skip); +*/ + vframe_qos->num++; +} + +static int is_iframe(struct FrameStore *frame) { + + if (frame->frame && frame->frame->slice_type == I_SLICE) { + return 1; + } + return 0; +} + +static int post_prepare_process(struct vdec_s *vdec, struct FrameStore *frame) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + int buffer_index = frame->buf_spec_num; + + if (buffer_index < 0 || buffer_index >= BUFSPEC_POOL_SIZE) { + dpb_print(DECODE_ID(hw), 0, + "%s, buffer_index 0x%x is beyond range\n", + __func__, buffer_index); + return -1; + } + + if (force_disp_bufspec_num & 0x100) { + /*recycle directly*/ + if (hw->buffer_spec[frame->buf_spec_num].used != 3 && + hw->buffer_spec[frame->buf_spec_num].used != 5) + set_frame_output_flag(&hw->dpb, frame->index); + + /*make pre_output not set*/ + return -1; + } + if (error_proc_policy & 0x1000) { + int error_skip_i_count = (error_skip_count >> 12) & 0xf; + int error_skip_frame_count = error_skip_count & 0xfff; + if (((hw->no_error_count < error_skip_frame_count) + && (error_skip_i_count == 0 || + hw->no_error_i_count < error_skip_i_count)) + && (!(frame->data_flag & I_FLAG))) + frame->data_flag |= ERROR_FLAG; + } + + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, + "%s, buffer_index 0x%x frame_error %x poc %d hw error %x error_proc_policy %x\n", + __func__, buffer_index, + frame->data_flag & ERROR_FLAG, + frame->poc, hw->data_flag & ERROR_FLAG, + error_proc_policy); + + if (frame->frame == NULL && + ((frame->is_used == 1 && frame->top_field) + || (frame->is_used == 2 && frame->bottom_field))) { + if (hw->i_only) { + if (frame->is_used == 1) + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s No bottom_field !! frame_num %d used %d\n", + __func__, frame->frame_num, frame->is_used); + if (frame->is_used == 2) + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s No top_field !! frame_num %d used %d\n", + __func__, frame->frame_num, frame->is_used); + } + else { + frame->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, + "%s Error frame_num %d used %d\n", + __func__, frame->frame_num, frame->is_used); + } + } + if (vdec_stream_based(vdec) && !(frame->data_flag & NODISP_FLAG)) { + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + if ((pts_lookup_offset_us64(PTS_TYPE_VIDEO, + frame->offset_delimiter, &frame->pts, &frame->frame_size, + 0, &frame->pts64) == 0)) { + if ((lookup_check_conut && (hw->vf_pre_count > lookup_check_conut) && + (hw->wrong_frame_count > hw->right_frame_count)) && + ((frame->decoded_frame_size * 2 < frame->frame_size))) { + /*resolve many frame only one check in pts, cause playback unsmooth issue*/ + frame->pts64 = hw->last_pts64 +DUR2PTS(hw->frame_dur) ; + frame->pts = hw->last_pts + DUR2PTS(hw->frame_dur); + } + hw->right_frame_count++; + } else { + frame->pts64 = hw->last_pts64 +DUR2PTS(hw->frame_dur) ; + frame->pts = hw->last_pts + DUR2PTS(hw->frame_dur); + hw->wrong_frame_count++; + } + } + + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s error= 0x%x poc = %d offset= 0x%x pts= 0x%x last_pts =0x%x pts64 = %lld last_pts64= %lld duration = %d\n", + __func__, (frame->data_flag & ERROR_FLAG), frame->poc, + frame->offset_delimiter, frame->pts,hw->last_pts, + frame->pts64, hw->last_pts64, hw->frame_dur); + hw->last_pts64 = frame->pts64; + hw->last_pts = frame->pts; + } + + /* SWPL-18973 96000/15=6400, less than 15fps check */ + if ((!hw->duration_from_pts_done) && (hw->frame_dur > 6400ULL)) { + if ((check_force_interlace(hw, frame)) && + (frame->slice_type == I_SLICE) && + (hw->pts_outside)) { + if ((!hw->h264_pts_count) || (!hw->h264pts1)) { + hw->h264pts1 = frame->pts; + hw->h264_pts_count = 0; + } else if (frame->pts > hw->h264pts1) { + u32 calc_dur = + PTS2DUR(frame->pts - hw->h264pts1); + calc_dur = ((calc_dur/hw->h264_pts_count) << 1); + if (hw->frame_dur < (calc_dur + 200) && + hw->frame_dur > (calc_dur - 200)) { + hw->frame_dur >>= 1; + vdec_schedule_work(&hw->notify_work); + dpb_print(DECODE_ID(hw), 0, + "correct frame_dur %d, calc_dur %d, count %d\n", + hw->frame_dur, (calc_dur >> 1), hw->h264_pts_count); + hw->duration_from_pts_done = 1; + hw->h264_pts_count = 0; + } + } + } + hw->h264_pts_count++; + } + + if (frame->data_flag & ERROR_FLAG) { + vdec_count_info(&hw->gvs, 1, 0); + if (frame->slice_type == I_SLICE) { + hw->gvs.i_concealed_frames++; + } else if (frame->slice_type == P_SLICE) { + hw->gvs.p_concealed_frames++; + } else if (frame->slice_type == B_SLICE) { + hw->gvs.b_concealed_frames++; + } + if (!hw->send_error_frame_flag) { + hw->gvs.drop_frame_count++; + if (frame->slice_type == I_SLICE) { + hw->gvs.i_lost_frames++; + } else if (frame->slice_type == P_SLICE) { + hw->gvs.p_lost_frames++; + } else if (frame->slice_type == B_SLICE) { + hw->gvs.b_lost_frames++; + } + } + + } + + if ((!hw->enable_fence) && + ((frame->data_flag & NODISP_FLAG) || + (frame->data_flag & NULL_FLAG) || + ((!hw->send_error_frame_flag) && + (frame->data_flag & ERROR_FLAG)) || + ((hw->i_only & 0x1) && + (!(frame->data_flag & I_FLAG))))) { + frame->show_frame = false; + return 0; + } + + if (dpb_is_debug(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL)) { + dpb_print(DECODE_ID(hw), 0, + "%s, fs[%d] poc %d, buf_spec_num %d\n", + __func__, frame->index, frame->poc, + frame->buf_spec_num); + print_pic_info(DECODE_ID(hw), "predis_frm", + frame->frame, -1); + print_pic_info(DECODE_ID(hw), "predis_top", + frame->top_field, -1); + print_pic_info(DECODE_ID(hw), "predis_bot", + frame->bottom_field, -1); + } + + frame->show_frame = true; + + return 0; +} + +static int post_video_frame(struct vdec_s *vdec, struct FrameStore *frame) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct vframe_s *vf = NULL; + int buffer_index = frame->buf_spec_num; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + ulong nv_order = VIDTYPE_VIU_NV21; + int bForceInterlace = 0; + int vf_count = 1; + int i; + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + } + + if (!is_interlace(frame)) + vf_count = 1; + else + vf_count = 2; + + bForceInterlace = check_force_interlace(hw, frame); + if (bForceInterlace) + vf_count = 2; + + if (!hw->enable_fence) + hw->buffer_spec[buffer_index].vf_ref = 0; + fill_frame_info(hw, frame); + + if ((hw->is_used_v4l) && + (vdec->prog_only)) + vf_count = 1; + + for (i = 0; i < vf_count; i++) { + if (kfifo_get(&hw->newframe_q, &vf) == 0 || + vf == NULL) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s fatal error, no available buffer slot.\n", + __func__); + return -1; + } + vf->duration_pulldown = 0; + if (!(is_iframe(frame)) && hw->unstable_pts) { + vf->pts = 0; + vf->pts_us64 = 0; + vf->timestamp = 0; + vf->index = VF_INDEX(frame->index, buffer_index); + } else { + vf->pts = frame->pts; + vf->pts_us64 = frame->pts64; + vf->timestamp = frame->timestamp; + vf->index = VF_INDEX(frame->index, buffer_index); + } + + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->buffer_spec[buffer_index].cma_alloc_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + } + + if (hw->enable_fence) { + /* fill fence information. */ + if (hw->fence_usage == FENCE_USE_FOR_DRIVER) + vf->fence = frame->fence; + } + + if (hw->mmu_enable) { + if (hw->double_write_mode & 0x10) { + /* double write only */ + vf->compBodyAddr = 0; + vf->compHeadAddr = 0; + } else { + /*head adr*/ + vf->compHeadAddr = + hw->buffer_spec[buffer_index].alloc_header_addr; + /*body adr*/ + vf->compBodyAddr = 0; + vf->canvas0Addr = vf->canvas1Addr = 0; + } + + vf->type = VIDTYPE_SCATTER; + + if (hw->double_write_mode) { + vf->type |= VIDTYPE_PROGRESSIVE + | VIDTYPE_VIU_FIELD; + vf->type |= nv_order; + if (hw->double_write_mode == 3) + vf->type |= VIDTYPE_COMPRESS; + + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + hw->buffer_spec[buffer_index]. + canvas_config[0]; + vf->canvas0_config[1] = + hw->buffer_spec[buffer_index]. + canvas_config[1]; + + vf->canvas1_config[0] = + hw->buffer_spec[buffer_index]. + canvas_config[0]; + vf->canvas1_config[1] = + hw->buffer_spec[buffer_index]. + canvas_config[1]; + + } else { + vf->type |= + VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; + vf->canvas0Addr = vf->canvas1Addr = 0; + } + + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + + vf->compWidth = hw->frame_width; + vf->compHeight = hw->frame_height; + } else { + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | + nv_order; + + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(&hw->buffer_spec[buffer_index]); +#ifdef VDEC_DW + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T7) { + if (IS_VDEC_DW(hw)) + vf->canvas0Addr = vf->canvas1Addr = + vdec_dw_spec2canvas(&hw->buffer_spec[buffer_index]); + } else { + if (IS_VDEC_DW(hw)) + vf->canvas0Addr = vf->canvas1Addr = -1; + } +#endif + + } + set_frame_info(hw, vf, buffer_index); + if (hw->discard_dv_data) { + vf->discard_dv_data = true; + } + + if (hw->mmu_enable && hw->double_write_mode) { + vf->width = hw->frame_width / + get_double_write_ratio(hw->double_write_mode); + vf->height = hw->frame_height / + get_double_write_ratio(hw->double_write_mode); + } + + if (frame->slice_type == I_SLICE) { + vf->frame_type |= V4L2_BUF_FLAG_KEYFRAME; + } else if (frame->slice_type == P_SLICE) { + vf->frame_type |= V4L2_BUF_FLAG_PFRAME; + } else if (frame->slice_type == B_SLICE) { + vf->frame_type |= V4L2_BUF_FLAG_BFRAME; + } + + vf->flag = 0; + if (frame->data_flag & I_FLAG) + vf->flag |= VFRAME_FLAG_SYNCFRAME; + if (frame->data_flag & ERROR_FLAG) + vf->flag |= VFRAME_FLAG_ERROR_RECOVERY; + update_vf_memhandle(hw, vf, buffer_index); + + if (!hw->enable_fence) { + hw->buffer_spec[buffer_index].used = 2; + hw->buffer_spec[buffer_index].vf_ref++; + } + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s %d frame = %p top_field = %p bottom_field = %p\n", __func__, __LINE__, frame->frame, + frame->top_field, frame->bottom_field); + + if (frame->frame != NULL) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s %d coded_frame = %d frame_mbs_only_flag = %d structure = %d\n", __func__, __LINE__, + frame->frame->coded_frame, frame->frame->frame_mbs_only_flag, frame->frame->structure); + } + + if (bForceInterlace || is_interlace(frame)) { + vf->type = + VIDTYPE_INTERLACE_FIRST | + nv_order; + + if (frame->frame != NULL && + (frame->frame->pic_struct == PIC_TOP_BOT || + frame->frame->pic_struct == PIC_BOT_TOP) && + frame->frame->coded_frame) { + if (frame->frame != NULL && frame->frame->pic_struct == PIC_TOP_BOT) { + vf->type |= (i == 0 ? + VIDTYPE_INTERLACE_TOP : + VIDTYPE_INTERLACE_BOTTOM); + } else if (frame->frame != NULL && frame->frame->pic_struct == PIC_BOT_TOP) { + vf->type |= (i == 0 ? + VIDTYPE_INTERLACE_BOTTOM : + VIDTYPE_INTERLACE_TOP); + } + } else if (frame->top_field != NULL && frame->bottom_field != NULL) {/*top first*/ + if (frame->top_field->poc <= frame->bottom_field->poc) + vf->type |= (i == 0 ? + VIDTYPE_INTERLACE_TOP : + VIDTYPE_INTERLACE_BOTTOM); + else + vf->type |= (i == 0 ? + VIDTYPE_INTERLACE_BOTTOM : + VIDTYPE_INTERLACE_TOP); + } else { + vf->type |= (i == 0 ? + VIDTYPE_INTERLACE_TOP : + VIDTYPE_INTERLACE_BOTTOM); + } + vf->duration = vf->duration/2; + if (i == 1) { + vf->pts = 0; + vf->pts_us64 = 0; + } + + if (frame->frame) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s %d type = 0x%x pic_struct = %d pts = 0x%x pts_us64 = 0x%llx bForceInterlace = %d\n", + __func__, __LINE__, vf->type, frame->frame->pic_struct, + vf->pts, vf->pts_us64, bForceInterlace); + } + } + + if (hw->i_only) { + if (vf_count == 1 && frame->is_used == 1 && frame->top_field + && frame->bottom_field == NULL && frame->frame == NULL) { + vf->type = + VIDTYPE_INTERLACE_FIRST | + nv_order; + vf->type |= VIDTYPE_INTERLACE_TOP; + vf->duration = vf->duration/2; + } + + if (vf_count == 1 && frame->is_used == 2 && frame->bottom_field + && frame->top_field == NULL && frame->frame == NULL) { + vf->type = + VIDTYPE_INTERLACE_FIRST | + nv_order; + vf->type |= VIDTYPE_INTERLACE_BOTTOM; + vf->duration = vf->duration/2; + } + } + + /*vf->ratio_control |= (0x3FF << DISP_RATIO_ASPECT_RATIO_BIT);*/ + vf->sar_width = hw->width_aspect_ratio; + vf->sar_height = hw->height_aspect_ratio; + if (!vdec->vbuf.use_ptsserv && vdec_stream_based(vdec)) { + /* offset for tsplayer pts lookup */ + if (i == 0) { + vf->pts_us64 = + (((u64)vf->duration << 32) & + 0xffffffff00000000) | frame->offset_delimiter; + vf->pts = 0; + } else { + vf->pts_us64 = (u64)-1; + vf->pts = 0; + } + } + + hw->vf_pre_count++; + vdec_vframe_ready(hw_to_vdec(hw), vf); + if (!frame->show_frame) { + vh264_vf_put(vf, vdec); + atomic_add(1, &hw->vf_get_count); + continue; + } + + if (i == 0) { + struct vdec_s *pvdec; + struct vdec_info vs; + + pvdec = hw_to_vdec(hw); + memset(&vs, 0, sizeof(struct vdec_info)); + pvdec->dec_status(pvdec, &vs); + decoder_do_frame_check(pvdec, vf); + vdec_fill_vdec_frame(pvdec, &hw->vframe_qos, &vs, vf, frame->hw_decode_time); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "[%s:%d] i_decoded_frame = %d p_decoded_frame = %d b_decoded_frame = %d\n", + __func__, __LINE__,vs.i_decoded_frames,vs.p_decoded_frames,vs.b_decoded_frames); + } + + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->trace.pts_name, vf->pts); + ATRACE_COUNTER(hw->trace.disp_q_name, kfifo_len(&hw->display_q)); + ATRACE_COUNTER(hw->trace.new_q_name, kfifo_len(&hw->newframe_q)); + hw->vf_pre_count++; + vdec->vdec_fps_detec(vdec->id); +#ifdef AUX_DATA_CRC + decoder_do_aux_data_check(vdec, hw->buffer_spec[buffer_index].aux_data_buf, + hw->buffer_spec[buffer_index].aux_data_size); +#endif + + dpb_print(DECODE_ID(hw), PRINT_FLAG_SEI_DETAIL, "aux_data_size: %d, signal_type: 0x%x\n", + hw->buffer_spec[buffer_index].aux_data_size, hw->video_signal_type); + + if (dpb_is_debug(DECODE_ID(hw), PRINT_FLAG_SEI_DETAIL)) { + int i = 0; + PR_INIT(128); + + for (i = 0; i < hw->buffer_spec[buffer_index].aux_data_size; i++) { + PR_FILL("%02x ", hw->buffer_spec[buffer_index].aux_data_buf[i]); + if (((i + 1) & 0xf) == 0) + PR_INFO(hw->id); + } + PR_INFO(hw->id); + } + + if (hw->is_used_v4l) { + update_vframe_src_fmt(vf, + hw->buffer_spec[buffer_index].aux_data_buf, + hw->buffer_spec[buffer_index].aux_data_size, + false, vdec->vf_provider_name, NULL); + } + + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vh264_vf_put(vh264_vf_get(vdec), vdec); + } else { + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } else + vh264_vf_put(vh264_vf_get(vdec), vdec); + } + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + + return 0; +} + +int post_picture_early(struct vdec_s *vdec, int index) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct h264_dpb_stru *dpb_stru = &hw->dpb; + struct FrameStore fs; + u32 offset_lo, offset_hi; + + if (!hw->enable_fence) + return 0; + + /* create fence for each buffers. */ + if (vdec_timeline_create_fence(vdec->sync)) + return -1; + + memset(&fs, 0, sizeof(fs)); + + fs.buf_spec_num = index; + fs.fence = vdec->sync->fence; + fs.slice_type = dpb_stru->mSlice.slice_type; + fs.dpb_frame_count = dpb_stru->dpb_frame_count; + + offset_lo = dpb_stru->dpb_param.l.data[OFFSET_DELIMITER_LO]; + offset_hi = dpb_stru->dpb_param.l.data[OFFSET_DELIMITER_HI]; + fs.offset_delimiter = (offset_lo | offset_hi << 16); + + if (hw->chunk) { + fs.pts = hw->chunk->pts; + fs.pts64 = hw->chunk->pts64; + fs.timestamp = hw->chunk->timestamp; + } + fs.show_frame = true; + post_video_frame(vdec, &fs); + + display_frame_count[DECODE_ID(hw)]++; + return 0; +} + +int prepare_display_buf(struct vdec_s *vdec, struct FrameStore *frame) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)vdec->private; + + if (hw->enable_fence) { + int i, j, used_size, ret; + int signed_count = 0; + struct vframe_s *signed_fence[VF_POOL_SIZE]; + + post_prepare_process(vdec, frame); + + if (!frame->show_frame) + pr_info("do not display.\n"); + + hw->buffer_spec[frame->buf_spec_num].used = 2; + hw->buffer_spec[frame->buf_spec_num].vf_ref = 1; + hw->buffer_spec[frame->buf_spec_num].fs_idx = frame->index; + + /* notify signal to wake up wq of fence. */ + vdec_timeline_increase(vdec->sync, 1); + + mutex_lock(&hw->fence_mutex); + used_size = hw->fence_vf_s.used_size; + if (used_size) { + for (i = 0, j = 0; i < VF_POOL_SIZE && j < used_size; i++) { + if (hw->fence_vf_s.fence_vf[i] != NULL) { + ret = dma_fence_get_status(hw->fence_vf_s.fence_vf[i]->fence); + if (ret == 1) { + signed_fence[signed_count] = hw->fence_vf_s.fence_vf[i]; + hw->fence_vf_s.fence_vf[i] = NULL; + hw->fence_vf_s.used_size--; + signed_count++; + } + j++; + } + } + } + mutex_unlock(&hw->fence_mutex); + if (signed_count != 0) { + for (i = 0; i < signed_count; i++) + vh264_vf_put(signed_fence[i], vdec); + } + + return 0; + } + + if (post_prepare_process(vdec, frame)) + return -1; + + if (post_video_frame(vdec, frame)) + return -1; + + display_frame_count[DECODE_ID(hw)]++; + return 0; +} + +int notify_v4l_eos(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = &hw->vframe_dummy; + struct vdec_v4l2_buffer *fb = NULL; + int index = INVALID_IDX; + ulong expires; + + if (hw->eos) { + if (hw->is_used_v4l) { + expires = jiffies + msecs_to_jiffies(2000); + while (INVALID_IDX == (index = v4l_get_free_buf_idx(vdec))) { + if (time_after(jiffies, expires) || + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) + break; + } + + if (index == INVALID_IDX) { + ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token); + if (ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC) < 0) { + pr_err("[%d] EOS get free buff fail.\n", ctx->id); + return -1; + } + } + } + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + vf->v4l_mem_handle = (index == INVALID_IDX) ? (ulong)fb : + hw->buffer_spec[index].cma_alloc_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + + ATRACE_COUNTER(hw->trace.pts_name, vf->pts); + + if (hw->is_used_v4l) + fb->task->submit(fb->task, TASK_TYPE_DEC); + else + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + + pr_info("[%d] H264 EOS notify.\n", (hw->is_used_v4l)?ctx->id:vdec->id); + } + + return 0; +} + +/****************** + * Hardware config + */ +char *slice_type_name[] = { + "P_SLICE ", + "B_SLICE ", + "I_SLICE ", + "SP_SLICE", + "SI_SLICE", +}; + +char *picture_structure_name[] = { + "FRAME", + "TOP_FIELD", + "BOTTOM_FIELD" +}; + +void print_pic_info(int decindex, const char *info, + struct StorablePicture *pic, + int slice_type) +{ + if (pic) + dpb_print(decindex, PRINT_FLAG_DEC_DETAIL, + "%s: %s (original %s), %s, mb_aff_frame_flag %d poc %d, pic_num %d, buf_spec_num %d data_flag 0x%x\n", + info, + picture_structure_name[pic->structure], + pic->coded_frame ? "Frame" : "Field", + (slice_type < 0 || + slice_type >= (sizeof(slice_type_name) / sizeof(slice_type_name[0]))) ? "" : slice_type_name[slice_type], + pic->mb_aff_frame_flag, + pic->poc, + pic->pic_num, + pic->buf_spec_num, + pic->data_flag); +} + +static void reset_process_time(struct vdec_h264_hw_s *hw) +{ + if (hw->start_process_time) { + unsigned process_time = + 1000 * (jiffies - hw->start_process_time) / HZ; + hw->start_process_time = 0; + if (process_time > max_process_time[DECODE_ID(hw)]) + max_process_time[DECODE_ID(hw)] = process_time; + } +} + +static void start_process_time(struct vdec_h264_hw_s *hw) +{ + hw->decode_timeout_count = 10; + hw->start_process_time = jiffies; +} + +static void config_aux_buf(struct vdec_h264_hw_s *hw) +{ + WRITE_VREG(H264_AUX_ADR, hw->aux_phy_addr); + WRITE_VREG(H264_AUX_DATA_SIZE, + ((hw->prefix_aux_size >> 4) << 16) | + (hw->suffix_aux_size >> 4) + ); +} + +/* +* dv_meta_flag: 1, dolby meta only; 2, not include dolby meta +*/ +static void set_aux_data(struct vdec_h264_hw_s *hw, + struct StorablePicture *pic, unsigned char suffix_flag, + unsigned char dv_meta_flag, struct vdec_h264_hw_s *hw_b) +{ + int i; + unsigned short *aux_adr; + unsigned size_reg_val = + READ_VREG(H264_AUX_DATA_SIZE); + unsigned aux_count = 0; + int aux_size = 0; + struct vdec_h264_hw_s *hw_buf = hw_b ? hw_b : hw; + if (pic == NULL || pic->buf_spec_num < 0 || pic->buf_spec_num >= BUFSPEC_POOL_SIZE + || (!is_buf_spec_in_use(hw, pic->buf_spec_num))) + return; + + if (suffix_flag) { + aux_adr = (unsigned short *) + (hw_buf->aux_addr + + hw_buf->prefix_aux_size); + aux_count = + ((size_reg_val & 0xffff) << 4) + >> 1; + aux_size = + hw_buf->suffix_aux_size; + } else { + aux_adr = + (unsigned short *)hw_buf->aux_addr; + aux_count = + ((size_reg_val >> 16) << 4) + >> 1; + aux_size = + hw_buf->prefix_aux_size; + } + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_SEI_DETAIL)) { + dpb_print(DECODE_ID(hw), 0, + "%s:poc %d old size %d count %d,suf %d dv_flag %d\r\n", + __func__, pic->poc, AUX_DATA_SIZE(pic), + aux_count, suffix_flag, dv_meta_flag); + } + if (aux_size > 0 && aux_count > 0) { + int heads_size = 0; + int new_size; + char *new_buf; + for (i = 0; i < aux_count; i++) { + unsigned char tag = aux_adr[i] >> 8; + if (tag != 0 && tag != 0xff) { + if (dv_meta_flag == 0) + heads_size += 8; + else if (dv_meta_flag == 1 && tag == 0x1) + heads_size += 8; + else if (dv_meta_flag == 2 && tag != 0x1) + heads_size += 8; + } + } + new_size = AUX_DATA_SIZE(pic) + aux_count + heads_size; + new_buf = krealloc(AUX_DATA_BUF(pic), + new_size, + GFP_KERNEL); + if (new_buf) { + unsigned char valid_tag = 0; + unsigned char *h = + new_buf + + AUX_DATA_SIZE(pic); + unsigned char *p = h + 8; + int len = 0; + int padding_len = 0; + AUX_DATA_BUF(pic) = new_buf; + for (i = 0; i < aux_count; i += 4) { + int ii; + unsigned char tag = aux_adr[i + 3] >> 8; + if (tag != 0 && tag != 0xff) { + if (dv_meta_flag == 0) + valid_tag = 1; + else if (dv_meta_flag == 1 + && tag == 0x1) + valid_tag = 1; + else if (dv_meta_flag == 2 + && tag != 0x1) + valid_tag = 1; + else + valid_tag = 0; + if (valid_tag && len > 0) { + AUX_DATA_SIZE(pic) += + (len + 8); + h[0] = + (len >> 24) & 0xff; + h[1] = + (len >> 16) & 0xff; + h[2] = + (len >> 8) & 0xff; + h[3] = + (len >> 0) & 0xff; + h[6] = + (padding_len >> 8) + & 0xff; + h[7] = + (padding_len) & 0xff; + h += (len + 8); + p += 8; + len = 0; + padding_len = 0; + } + if (valid_tag) { + h[4] = tag; + h[5] = 0; + h[6] = 0; + h[7] = 0; + } + } + if (valid_tag) { + for (ii = 0; ii < 4; ii++) { + unsigned short aa = + aux_adr[i + 3 + - ii]; + *p = aa & 0xff; + p++; + len++; + /*if ((aa >> 8) == 0xff) + padding_len++;*/ + } + } + } + if (len > 0) { + AUX_DATA_SIZE(pic) += (len + 8); + h[0] = (len >> 24) & 0xff; + h[1] = (len >> 16) & 0xff; + h[2] = (len >> 8) & 0xff; + h[3] = (len >> 0) & 0xff; + h[6] = (padding_len >> 8) & 0xff; + h[7] = (padding_len) & 0xff; + } + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_SEI_DETAIL)) { + dpb_print(DECODE_ID(hw), 0, + "aux: (size %d) suffix_flag %d\n", + AUX_DATA_SIZE(pic), suffix_flag); + for (i = 0; i < AUX_DATA_SIZE(pic); i++) { + dpb_print_cont(DECODE_ID(hw), 0, + "%02x ", AUX_DATA_BUF(pic)[i]); + if (((i + 1) & 0xf) == 0) + dpb_print_cont( + DECODE_ID(hw), + 0, "\n"); + } + dpb_print_cont(DECODE_ID(hw), + 0, "\n"); + } + + } + } + +} + +static void release_aux_data(struct vdec_h264_hw_s *hw, + int buf_spec_num) +{ + kfree(hw->buffer_spec[buf_spec_num].aux_data_buf); + hw->buffer_spec[buf_spec_num].aux_data_buf = NULL; + hw->buffer_spec[buf_spec_num].aux_data_size = 0; +} + +static void dump_aux_buf(struct vdec_h264_hw_s *hw) +{ + int i; + unsigned short *aux_adr = + (unsigned short *) + hw->aux_addr; + unsigned aux_size = + (READ_VREG(H264_AUX_DATA_SIZE) + >> 16) << 4; + + if (hw->prefix_aux_size > 0) { + dpb_print(DECODE_ID(hw), + 0, + "prefix aux: (size %d)\n", + aux_size); + for (i = 0; i < + (aux_size >> 1); i++) { + dpb_print_cont(DECODE_ID(hw), + 0, + "%04x ", + *(aux_adr + i)); + if (((i + 1) & 0xf) + == 0) + dpb_print_cont( + DECODE_ID(hw), + 0, "\n"); + } + } + if (hw->suffix_aux_size > 0) { + aux_adr = (unsigned short *) + (hw->aux_addr + + hw->prefix_aux_size); + aux_size = + (READ_VREG(H264_AUX_DATA_SIZE) & 0xffff) + << 4; + dpb_print(DECODE_ID(hw), + 0, + "suffix aux: (size %d)\n", + aux_size); + for (i = 0; i < + (aux_size >> 1); i++) { + dpb_print_cont(DECODE_ID(hw), + 0, + "%04x ", *(aux_adr + i)); + if (((i + 1) & 0xf) == 0) + dpb_print_cont(DECODE_ID(hw), + 0, "\n"); + } + } +} + +#ifdef VDEC_DW + +struct vdec_dw_param_set{ + char dw_x_shrink_1st; + char dw_x_shrink_2nd; + char dw_x_shrink_3rd; + char dw_y_shrink_1st; + char dw_y_shrink_2nd; + char dw_y_shrink_3rd; + char dw_merge_8to16; + char dw_merge_16to32; + char dw_dma_blk_mode; + char dw_bwsave_mode; +}; +//#define FOR_LPDDR4_EFFICIENCY + +static void h264_vdec_dw_cfg(struct vdec_h264_hw_s *hw, int canvas_pos) +{ + u32 data32 = 0, stride = 0; + struct vdec_dw_param_set *p = NULL; + struct vdec_dw_param_set dw_param_set_pool[] = { + /*x1, x2, x3, y1, y2, y3, m8t6, m16to32 */ + //{0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, /* 1/1, 1/1 */ + {1, 0, 0, 0, 0, 0, 0, 0, 0, 1}, /* 1/2, 1/1 */ + {1, 0, 0, 1, 0, 0, 0, 0, 0, 1}, /* 1/2, 1/2 */ + //{1, 0, 0, 1, 1, 0, 0, 0, 0, 1}, /* 1/4, 1/2 */ + {2, 0, 1, 1, 3, 0, 0, 1, 0, 1}, /* 1/4, 1/4 */ + //{1, 1, 1, 0, 1, 1, 1, 1, 0, 1}, /*> 1080p 1/8, 1/4 */ + {1, 1, 1, 1, 1, 1, 1, 1, 0, 1}, /*> 1080p 1/8, 1/8 */ + }; + + if (IS_VDEC_DW(hw)) + p = &dw_param_set_pool[__ffs(IS_VDEC_DW(hw))]; + else + return; + + WRITE_VREG(MDEC_DOUBLEW_CFG3, + hw->buffer_spec[canvas_pos].vdec_dw_y_addr); // luma start address + WRITE_VREG(MDEC_DOUBLEW_CFG4, + hw->buffer_spec[canvas_pos].vdec_dw_u_addr); // chroma start address + + stride = ALIGN_WIDTH((hw->mb_width << 4) / (IS_VDEC_DW(hw))); + if ((IS_VDEC_DW(hw)) == 1) //width 1/2 + stride >>= 1; + data32 = (stride << 16) | stride; + WRITE_VREG(MDEC_DOUBLEW_CFG5, data32); // chroma stride | luma stride + + data32 = 0; + p->dw_dma_blk_mode = hw->canvas_mode; + data32 |= ((p->dw_x_shrink_1st << 0 ) | // 1st down-scale horizontal, 00:no-scale 01:1/2avg 10:left 11:right + (p->dw_y_shrink_1st << 2 ) | // 1st down-scale vertical, 00:no-scale 01:1/2avg 10:up 11:down + (p->dw_x_shrink_2nd << 4 ) | // 2nd down-scale horizontal, 00:no-scale 01:1/2avg 10:left 11:right + (p->dw_y_shrink_2nd << 6 ) | // 2nd down-scale vertical, 00:no-scale 01:1/2avg 10:up 11:down + (p->dw_x_shrink_3rd << 8 ) | // 3rd down-scale horizontal, 00:no-scale 01:1/2avg 10:left 11:right + (p->dw_y_shrink_3rd << 10) | // 3rd down-scale vertical, 00:no-scale 01:1/2avg 10:up 11:down + (p->dw_merge_8to16 << 12 ) | // 8->16 horizontal block merge for better ddr efficiency + (p->dw_merge_16to32 << 13) | // 16->32 horizontal block merge for better ddr efficiency + (p->dw_dma_blk_mode << 14) | // DMA block mode, 0:linear 1:32x32 2:64x32 +#ifdef FOR_LPDDR4_EFFICIENCY + (1 << 19) | +#endif + (p->dw_bwsave_mode << 22)); // Save line buffers to save band width + WRITE_VREG(MDEC_DOUBLEW_CFG1, data32); // add some special tests here + + data32 = 0; + data32 |= (1 << 0) | (0 << 27); + WRITE_VREG(MDEC_DOUBLEW_CFG0, data32); // Double Write Enable | source from dblk + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "vdec_double_write mode %d\n", + IS_VDEC_DW(hw)); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "param {%d, %d, %d, %d, %d, %d, %d, %d, %d}\n", + p->dw_x_shrink_1st, + p->dw_y_shrink_1st, + p->dw_x_shrink_2nd, + p->dw_y_shrink_2nd, + p->dw_x_shrink_3rd, + p->dw_y_shrink_3rd, + p->dw_merge_8to16, + p->dw_merge_16to32, + p->dw_dma_blk_mode); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "cfg0,1,3,4,5 = {%x, %x, %x, %x, %x}\n", + READ_VREG(MDEC_DOUBLEW_CFG0), + READ_VREG(MDEC_DOUBLEW_CFG1), + READ_VREG(MDEC_DOUBLEW_CFG3), + READ_VREG(MDEC_DOUBLEW_CFG4), + READ_VREG(MDEC_DOUBLEW_CFG5)); +} +#endif + +static void config_decode_mode(struct vdec_h264_hw_s *hw) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + struct vdec_s *vdec = hw_to_vdec(hw); +#endif + if (input_frame_based(hw_to_vdec(hw))) + WRITE_VREG(H264_DECODE_MODE, + DECODE_MODE_MULTI_FRAMEBASE); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (vdec->slave) + WRITE_VREG(H264_DECODE_MODE, + (hw->got_valid_nal << 8) | + DECODE_MODE_MULTI_DVBAL); + else if (vdec->master) + WRITE_VREG(H264_DECODE_MODE, + (hw->got_valid_nal << 8) | + DECODE_MODE_MULTI_DVENL); +#endif + else + WRITE_VREG(H264_DECODE_MODE, + DECODE_MODE_MULTI_STREAMBASE); + WRITE_VREG(H264_DECODE_SEQINFO, + hw->seq_info2); + WRITE_VREG(HEAD_PADING_REG, 0); + + if (hw->init_flag == 0) + WRITE_VREG(INIT_FLAG_REG, 0); + else + WRITE_VREG(INIT_FLAG_REG, 1); +} + +int config_decode_buf(struct vdec_h264_hw_s *hw, struct StorablePicture *pic) +{ + /* static int count = 0; */ + int ret = 0; + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + struct Slice *pSlice = &(p_H264_Dpb->mSlice); + unsigned int colocate_adr_offset; + unsigned int val; + struct StorablePicture *last_pic = hw->last_dec_picture; + +#ifdef ONE_COLOCATE_BUF_PER_DECODE_BUF + int colocate_buf_index; +#endif +#define H264_BUFFER_INFO_INDEX PMV3_X /* 0xc24 */ +#define H264_BUFFER_INFO_DATA PMV2_X /* 0xc22 */ +#define H264_CURRENT_POC_IDX_RESET LAST_SLICE_MV_ADDR /* 0xc30 */ +#define H264_CURRENT_POC LAST_MVY /* 0xc32 shared with conceal MV */ + +#define H264_CO_MB_WR_ADDR VLD_C38 /* 0xc38 */ +/* bit 31:30 -- L1[0] picture coding structure, + * 00 - top field, 01 - bottom field, + * 10 - frame, 11 - mbaff frame + * bit 29 - L1[0] top/bot for B field pciture , 0 - top, 1 - bot + * bit 28:0 h264_co_mb_mem_rd_addr[31:3] + * -- only used for B Picture Direct mode [2:0] will set to 3'b000 + */ +#define H264_CO_MB_RD_ADDR VLD_C39 /* 0xc39 */ + +/* bit 15 -- flush co_mb_data to DDR -- W-Only + * bit 14 -- h264_co_mb_mem_wr_addr write Enable -- W-Only + * bit 13 -- h264_co_mb_info_wr_ptr write Enable -- W-Only + * bit 9 -- soft_reset -- W-Only + * bit 8 -- upgent + * bit 7:2 -- h264_co_mb_mem_wr_addr + * bit 1:0 -- h264_co_mb_info_wr_ptr + */ +#define H264_CO_MB_RW_CTL VLD_C3D /* 0xc3d */ +#define DCAC_DDR_BYTE64_CTL 0x0e1d + unsigned long canvas_adr; + unsigned int ref_reg_val; + unsigned int one_ref_cfg = 0; + int h264_buffer_info_data_write_count; + int i, j; + unsigned int colocate_wr_adr; + unsigned int colocate_rd_adr; + unsigned char use_direct_8x8; + int canvas_pos; + canvas_pos = hw->buffer_spec[pic->buf_spec_num].canvas_pos; + WRITE_VREG(H264_CURRENT_POC_IDX_RESET, 0); + WRITE_VREG(H264_CURRENT_POC, pic->frame_poc); + WRITE_VREG(H264_CURRENT_POC, pic->top_poc); + WRITE_VREG(H264_CURRENT_POC, pic->bottom_poc); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "%s: pic_num is %d, poc is %d (%d, %d, %d), buf_spec_num %d canvas_pos %d\n", + __func__, pic->pic_num, pic->poc, pic->frame_poc, + pic->top_poc, pic->bottom_poc, pic->buf_spec_num, + canvas_pos); + print_pic_info(DECODE_ID(hw), "cur", pic, pSlice->slice_type); + +#ifdef VDEC_DW + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T7) { + if (IS_VDEC_DW(hw) && pic->mb_aff_frame_flag) + WRITE_VREG(MDEC_DOUBLEW_CFG0, + (READ_VREG(MDEC_DOUBLEW_CFG0) & (~(1 << 30)))); + } +#endif + WRITE_VREG(CURR_CANVAS_CTRL, canvas_pos << 24); + canvas_adr = READ_VREG(CURR_CANVAS_CTRL) & 0xffffff; + + if (!hw->mmu_enable) { + WRITE_VREG(REC_CANVAS_ADDR, canvas_adr); + WRITE_VREG(DBKR_CANVAS_ADDR, canvas_adr); + WRITE_VREG(DBKW_CANVAS_ADDR, canvas_adr); +#ifdef VDEC_DW + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T7) { + WRITE_VREG(MDEC_DOUBLEW_CFG1, + (hw->buffer_spec[canvas_pos].vdec_dw_y_canvas_index | + (hw->buffer_spec[canvas_pos].vdec_dw_u_canvas_index << 8))); + } else { + h264_vdec_dw_cfg(hw, canvas_pos); + } +#endif + } else + hevc_sao_set_pic_buffer(hw, pic); + + if (pic->mb_aff_frame_flag) + hw->buffer_spec[pic->buf_spec_num].info0 = 0xf4c0; + else if (pic->structure == TOP_FIELD) + hw->buffer_spec[pic->buf_spec_num].info0 = 0xf400; + else if (pic->structure == BOTTOM_FIELD) + hw->buffer_spec[pic->buf_spec_num].info0 = 0xf440; + else + hw->buffer_spec[pic->buf_spec_num].info0 = 0xf480; + + if (pic->bottom_poc < pic->top_poc) + hw->buffer_spec[pic->buf_spec_num].info0 |= 0x100; + + hw->buffer_spec[pic->buf_spec_num].info1 = pic->top_poc; + hw->buffer_spec[pic->buf_spec_num].info2 = pic->bottom_poc; + WRITE_VREG(H264_BUFFER_INFO_INDEX, 16); + + for (j = 0; j < hw->dpb.mDPB.size; j++) { + int long_term_flag; + i = get_buf_spec_by_canvas_pos(hw, j); + if (i < 0) + break; + long_term_flag = + get_long_term_flag_by_buf_spec_num(p_H264_Dpb, i); + if (long_term_flag > 0) { + if (long_term_flag & 0x1) + hw->buffer_spec[i].info0 |= (1 << 4); + else + hw->buffer_spec[i].info0 &= ~(1 << 4); + + if (long_term_flag & 0x2) + hw->buffer_spec[i].info0 |= (1 << 5); + else + hw->buffer_spec[i].info0 &= ~(1 << 5); + } + + if (i == pic->buf_spec_num) + WRITE_VREG(H264_BUFFER_INFO_DATA, + hw->buffer_spec[i].info0 | 0xf); + else + WRITE_VREG(H264_BUFFER_INFO_DATA, + hw->buffer_spec[i].info0); + WRITE_VREG(H264_BUFFER_INFO_DATA, hw->buffer_spec[i].info1); + WRITE_VREG(H264_BUFFER_INFO_DATA, hw->buffer_spec[i].info2); + } + + /* config reference buffer */ + if (hw->mmu_enable) { + hevc_mcr_config_mc_ref(hw); + hevc_mcr_config_mcrcc(hw); + } + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "list0 size %d\n", pSlice->listXsize[0]); + WRITE_VREG(H264_BUFFER_INFO_INDEX, 0); + ref_reg_val = 0; + j = 0; + h264_buffer_info_data_write_count = 0; + + //disable this read cache when frame width <= 64 (4MBs) + //IQIDCT_CONTROL, bit[16] dcac_dma_read_cache_disable + if (hw->frame_width <= 64) { + SET_VREG_MASK(IQIDCT_CONTROL,(1 << 16)); + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)) + // Disable DDR_BYTE64_CACHE + WRITE_VREG(DCAC_DDR_BYTE64_CTL, + (READ_VREG(DCAC_DDR_BYTE64_CTL) & (~0xf)) | 0xa); + } + else + CLEAR_VREG_MASK(IQIDCT_CONTROL,(1 << 16)); + + if (last_pic) + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, + "last_pic->data_flag %x slice_type %x last_pic->slice_type %x\n", + last_pic->data_flag, pSlice->slice_type, last_pic->slice_type); + if (!hw->i_only && !(error_proc_policy & 0x2000) && + last_pic && (last_pic->data_flag & ERROR_FLAG) + && (!(last_pic->slice_type == B_SLICE)) + && (!(pSlice->slice_type == I_SLICE))) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, + "no i/idr error mark\n"); + hw->data_flag |= ERROR_FLAG; + pic->data_flag |= ERROR_FLAG; + } + + for (i = 0; i < (unsigned int)(pSlice->listXsize[0]); i++) { + /*ref list 0 */ + struct StorablePicture *ref = pSlice->listX[0][i]; + unsigned int cfg; + /* bit[6:5] - frame/field info, + * 01 - top, 10 - bottom, 11 - frame + */ + #ifdef ERROR_CHECK + if (ref == NULL) { + hw->data_flag |= ERROR_FLAG; + pic->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, " ref list0 NULL\n"); + return -1; + } + if ((ref->data_flag & ERROR_FLAG) && ref_frame_mark_flag[DECODE_ID(hw)]) { + hw->data_flag |= ERROR_FLAG; + pic->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, " ref error mark1 \n"); + } + + if (error_proc_policy & 0x80000) { + if (ref_b_frame_error_max_count && + ref->slice_type == B_SLICE) { + if (ref->data_flag & ERROR_FLAG) + hw->b_frame_error_count++; + else + hw->b_frame_error_count = 0; + if (hw->b_frame_error_count > ref_b_frame_error_max_count) { + hw->b_frame_error_count = 0; + dpb_print(DECODE_ID(hw), 0, + "error %d B frame, reset dpb buffer\n", + ref_b_frame_error_max_count); + return -1; + } + } + } + + if (ref->data_flag & NULL_FLAG) + hw->data_flag |= NULL_FLAG; +#endif + canvas_pos = hw->buffer_spec[ref->buf_spec_num].canvas_pos; + + if (ref->structure == TOP_FIELD) + cfg = 0x1; + else if (ref->structure == BOTTOM_FIELD) + cfg = 0x2; + else /* FRAME */ + cfg = 0x3; + + one_ref_cfg = (canvas_pos & 0x1f) | (cfg << 5); + ref_reg_val <<= 8; + ref_reg_val |= one_ref_cfg; + j++; + + if (j == 4) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "H264_BUFFER_INFO_DATA: %x\n", ref_reg_val); + WRITE_VREG(H264_BUFFER_INFO_DATA, ref_reg_val); + h264_buffer_info_data_write_count++; + j = 0; + } + print_pic_info(DECODE_ID(hw), "list0", + pSlice->listX[0][i], -1); + } + if (j != 0) { + while (j != 4) { + ref_reg_val <<= 8; + ref_reg_val |= one_ref_cfg; + j++; + } + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "H264_BUFFER_INFO_DATA: %x\n", + ref_reg_val); + WRITE_VREG(H264_BUFFER_INFO_DATA, ref_reg_val); + h264_buffer_info_data_write_count++; + } + ref_reg_val = (one_ref_cfg << 24) | (one_ref_cfg<<16) | + (one_ref_cfg << 8) | one_ref_cfg; + for (i = h264_buffer_info_data_write_count; i < 8; i++) + WRITE_VREG(H264_BUFFER_INFO_DATA, ref_reg_val); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "list1 size %d\n", pSlice->listXsize[1]); + WRITE_VREG(H264_BUFFER_INFO_INDEX, 8); + ref_reg_val = 0; + j = 0; + + for (i = 0; i < (unsigned int)(pSlice->listXsize[1]); i++) { + /* ref list 0 */ + struct StorablePicture *ref = pSlice->listX[1][i]; + unsigned int cfg; + /* bit[6:5] - frame/field info, + * 01 - top, 10 - bottom, 11 - frame + */ + + #ifdef ERROR_CHECK + if (ref == NULL) { + hw->data_flag |= ERROR_FLAG; + pic->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, " ref error list1 NULL\n"); + return -2; + } + if ((ref->data_flag & ERROR_FLAG) && (ref_frame_mark_flag[DECODE_ID(hw)])) { + pic->data_flag |= ERROR_FLAG; + hw->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, " ref error mark2\n"); + } + if (ref->data_flag & NULL_FLAG) + hw->data_flag |= NULL_FLAG; +#endif + canvas_pos = hw->buffer_spec[ref->buf_spec_num].canvas_pos; + if (ref->structure == TOP_FIELD) + cfg = 0x1; + else if (ref->structure == BOTTOM_FIELD) + cfg = 0x2; + else /* FRAME */ + cfg = 0x3; + one_ref_cfg = (canvas_pos & 0x1f) | (cfg << 5); + ref_reg_val <<= 8; + ref_reg_val |= one_ref_cfg; + j++; + + if (j == 4) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "H264_BUFFER_INFO_DATA: %x\n", + ref_reg_val); + WRITE_VREG(H264_BUFFER_INFO_DATA, ref_reg_val); + j = 0; + } + print_pic_info(DECODE_ID(hw), "list1", + pSlice->listX[1][i], -1); + } + if (j != 0) { + while (j != 4) { + ref_reg_val <<= 8; + ref_reg_val |= one_ref_cfg; + j++; + } + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "H264_BUFFER_INFO_DATA: %x\n", ref_reg_val); + WRITE_VREG(H264_BUFFER_INFO_DATA, ref_reg_val); + } + + /* configure co-locate buffer */ + while ((READ_VREG(H264_CO_MB_RW_CTL) >> 11) & 0x1) + ; + if ((pSlice->mode_8x8_flags & 0x4) && + (pSlice->mode_8x8_flags & 0x2)) + use_direct_8x8 = 1; + else + use_direct_8x8 = 0; + +#ifndef ONE_COLOCATE_BUF_PER_DECODE_BUF + colocate_adr_offset = + ((pic->structure == FRAME && pic->mb_aff_frame_flag == 0) + ? 1 : 2) * 96; + if (use_direct_8x8) + colocate_adr_offset >>= 2; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "colocate buf size of each mb 0x%x first_mb_in_slice 0x%x colocate_adr_offset 0x%x\r\n", + colocate_adr_offset, pSlice->first_mb_in_slice, + colocate_adr_offset * pSlice->first_mb_in_slice); + + colocate_adr_offset *= pSlice->first_mb_in_slice; + + if ((pic->colocated_buf_index >= 0) && + (pic->colocated_buf_index < p_H264_Dpb->colocated_buf_count)) { + colocate_wr_adr = p_H264_Dpb->colocated_mv_addr_start + + ((p_H264_Dpb->colocated_buf_size * + pic->colocated_buf_index) + >> (use_direct_8x8 ? 2 : 0)); + if ((colocate_wr_adr + p_H264_Dpb->colocated_buf_size) > + p_H264_Dpb->colocated_mv_addr_end) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "Error, colocate buf is not enough, index is %d\n", + pic->colocated_buf_index); + ret = -3; + } + val = colocate_wr_adr + colocate_adr_offset; + WRITE_VREG(H264_CO_MB_WR_ADDR, val); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "WRITE_VREG(H264_CO_MB_WR_ADDR) = %x, first_mb_in_slice %x pic_structure %x colocate_adr_offset %x mode_8x8_flags %x colocated_buf_size %x\n", + val, pSlice->first_mb_in_slice, pic->structure, + colocate_adr_offset, pSlice->mode_8x8_flags, + p_H264_Dpb->colocated_buf_size); + } else { + WRITE_VREG(H264_CO_MB_WR_ADDR, 0xffffffff); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "WRITE_VREG(H264_CO_MB_WR_ADDR) = 0xffffffff\n"); + } +#else + colocate_buf_index = hw->buffer_spec[pic->buf_spec_num].canvas_pos; + colocate_adr_offset = + ((pic->structure == FRAME && pic->mb_aff_frame_flag == 0) ? 1 : 2) * 96; + if (use_direct_8x8) + colocate_adr_offset >>= 2; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "colocate buf size of each mb 0x%x first_mb_in_slice 0x%x colocate_adr_offset 0x%x\r\n", + colocate_adr_offset, pSlice->first_mb_in_slice, + colocate_adr_offset * pSlice->first_mb_in_slice); + + colocate_adr_offset *= pSlice->first_mb_in_slice; + + colocate_wr_adr = p_H264_Dpb->colocated_mv_addr_start + + ((p_H264_Dpb->colocated_buf_size * colocate_buf_index) >> + (use_direct_8x8 ? 2 : 0)); + + if ((colocate_wr_adr + p_H264_Dpb->colocated_buf_size) > + p_H264_Dpb->colocated_mv_addr_end) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "Error, colocate buf is not enough, col buf index is %d\n", + colocate_buf_index); + ret = -4; + } + val = colocate_wr_adr + colocate_adr_offset; + WRITE_VREG(H264_CO_MB_WR_ADDR, val); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "WRITE_VREG(H264_CO_MB_WR_ADDR) = %x, first_mb_in_slice %x pic_structure %x colocate_adr_offset %x mode_8x8_flags %x colocated_buf_size %x\n", + val, pSlice->first_mb_in_slice, pic->structure, + colocate_adr_offset, pSlice->mode_8x8_flags, + p_H264_Dpb->colocated_buf_size); +#endif + if (pSlice->listXsize[1] > 0) { + struct StorablePicture *colocate_pic = pSlice->listX[1][0]; + /* H264_CO_MB_RD_ADDR[bit 31:30], + * original picture structure of L1[0], + * 00 - top field, 01 - bottom field, + * 10 - frame, 11 - mbaff frame + */ + int l10_structure, cur_structure; + int cur_colocate_ref_type; + /* H264_CO_MB_RD_ADDR[bit 29], top/bot for B field pciture, + * 0 - top, 1 - bot + */ + unsigned int val; + unsigned int colocate_rd_adr_offset; + unsigned int mby_mbx; + unsigned int mby, mbx; + +#ifdef ERROR_CHECK + if (colocate_pic == NULL) { + hw->data_flag |= ERROR_FLAG; + pic->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, " colocate error pic NULL\n"); + return -5; + } + if (colocate_pic->data_flag & ERROR_FLAG) { + pic->data_flag |= ERROR_FLAG; + hw->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, " colocare ref error mark\n"); + } + if (colocate_pic->data_flag & NULL_FLAG) + hw->data_flag |= NULL_FLAG; +#endif + + if (colocate_pic->mb_aff_frame_flag) + l10_structure = 3; + else { + if (colocate_pic->coded_frame) + l10_structure = 2; + else + l10_structure = (colocate_pic->structure == + BOTTOM_FIELD) ? 1 : 0; + } + + //ALLEGRO_FIX, ported from single mode ucode + mby_mbx = READ_VREG(MBY_MBX); + mby = pSlice->first_mb_in_slice / hw->mb_width; + mbx = pSlice->first_mb_in_slice % hw->mb_width; + if (pic->mb_aff_frame_flag) + cur_structure = 3; + else { + if (pic->coded_frame) + cur_structure = 2; + else + cur_structure = (pic->structure == + BOTTOM_FIELD) ? 1 : 0; + } + if (cur_structure < 2) { + //current_field_structure + if (l10_structure != 2) { + colocate_rd_adr_offset = pSlice->first_mb_in_slice * 2; + } else { + // field_ref_from_frame co_mv_rd_addr : + // mby*2*mb_width + mbx + colocate_rd_adr_offset = mby * 2 * hw->mb_width + mbx; + } + + } else { + //current_frame_structure + if (l10_structure < 2) { + //calculate_co_mv_offset_frame_ref_field: + // frame_ref_from_field co_mv_rd_addr : + // (mby/2*mb_width+mbx)*2 + colocate_rd_adr_offset = ((mby / 2) * hw->mb_width + mbx) * 2; + } else if (cur_structure == 2) { + colocate_rd_adr_offset = pSlice->first_mb_in_slice; + } else { + //mbaff frame case1196 + colocate_rd_adr_offset = pSlice->first_mb_in_slice * 2; + } + + } + + colocate_rd_adr_offset *= 96; + if (use_direct_8x8) + colocate_rd_adr_offset >>= 2; + + if (colocate_old_cal) + colocate_rd_adr_offset = colocate_adr_offset; + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "first_mb_in_slice 0x%x 0x%x 0x%x (MBY_MBX reg 0x%x) use_direct_8x8 %d cur %d (mb_aff_frame_flag %d, coded_frame %d structure %d) col %d (mb_aff_frame_flag %d, coded_frame %d structure %d) offset 0x%x rdoffset 0x%x\n", + pSlice->first_mb_in_slice, mby, mbx, mby_mbx, use_direct_8x8, + cur_structure, pic->mb_aff_frame_flag, pic->coded_frame, pic->structure, + l10_structure, colocate_pic->mb_aff_frame_flag, colocate_pic->coded_frame, colocate_pic->structure, + colocate_adr_offset, + colocate_rd_adr_offset); + +#if 0 + /*case0016, p16, + *cur_colocate_ref_type should be configured base on current pic + */ + if (pic->structure == FRAME && + pic->mb_aff_frame_flag) + cur_colocate_ref_type = 0; + else if (pic->structure == BOTTOM_FIELD) + cur_colocate_ref_type = 1; + else + cur_colocate_ref_type = 0; +#else + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + " CUR TMP DEBUG : mb_aff_frame_flag : %d, structure : %d coded_frame %d\n", + pic->mb_aff_frame_flag, + pic->structure, + pic->coded_frame); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + " COL TMP DEBUG : mb_aff_frame_flag : %d, structure : %d coded_frame %d\n", + colocate_pic->mb_aff_frame_flag, + colocate_pic->structure, + colocate_pic->coded_frame); + if (pic->structure == FRAME || pic->mb_aff_frame_flag) { + cur_colocate_ref_type = + (abs(pic->poc - colocate_pic->top_poc) + < abs(pic->poc - + colocate_pic->bottom_poc)) ? 0 : 1; + } else + cur_colocate_ref_type = + (colocate_pic->structure + == BOTTOM_FIELD) ? 1 : 0; +#endif + +#ifndef ONE_COLOCATE_BUF_PER_DECODE_BUF + if ((colocate_pic->colocated_buf_index >= 0) && + (colocate_pic->colocated_buf_index < + p_H264_Dpb->colocated_buf_count)) { + colocate_rd_adr = p_H264_Dpb->colocated_mv_addr_start + + ((p_H264_Dpb->colocated_buf_size * + colocate_pic->colocated_buf_index) + >> (use_direct_8x8 ? 2 : 0)); + if ((colocate_rd_adr + p_H264_Dpb->colocated_buf_size) > + p_H264_Dpb->colocated_mv_addr_end) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_ERROR, + "Error, colocate buf is not enough, index is %d\n", + colocate_pic->colocated_buf_index); + ret = -6; + } + /* bit 31:30 -- L1[0] picture coding structure, + * 00 - top field, 01 - bottom field, + * 10 - frame, 11 - mbaff frame + * bit 29 - L1[0] top/bot for B field pciture, + * 0 - top, 1 - bot + * bit 28:0 h264_co_mb_mem_rd_addr[31:3] + * -- only used for B Picture Direct mode + * [2:0] will set to 3'b000 + */ + /* #define H264_CO_MB_RD_ADDR VLD_C39 0xc39 */ + val = ((colocate_rd_adr+colocate_rd_adr_offset) >> 3) | + (l10_structure << 30) | + (cur_colocate_ref_type << 29); + WRITE_VREG(H264_CO_MB_RD_ADDR, val); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "co idx %d, WRITE_VREG(H264_CO_MB_RD_ADDR) = %x, addr %x L1(0) pic_structure %d mbaff %d\n", + colocate_pic->colocated_buf_index, + val, colocate_rd_adr + colocate_rd_adr_offset, + colocate_pic->structure, + colocate_pic->mb_aff_frame_flag); + } else { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "Error, reference pic has no colocated buf\n"); + ret = -7; + } +#else + colocate_buf_index = + hw->buffer_spec[colocate_pic->buf_spec_num].canvas_pos; + colocate_rd_adr = p_H264_Dpb->colocated_mv_addr_start + + ((p_H264_Dpb->colocated_buf_size * + colocate_buf_index) + >> (use_direct_8x8 ? 2 : 0)); + if ((colocate_rd_adr + p_H264_Dpb->colocated_buf_size) > + p_H264_Dpb->colocated_mv_addr_end) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "Error, colocate buf is not enough, col buf index is %d\n", + colocate_buf_index); + ret = -8; + } + /* bit 31:30 -- L1[0] picture coding structure, + * 00 - top field, 01 - bottom field, + * 10 - frame, 11 - mbaff frame + * bit 29 - L1[0] top/bot for B field pciture, + * 0 - top, 1 - bot + * bit 28:0 h264_co_mb_mem_rd_addr[31:3] + * -- only used for B Picture Direct mode + * [2:0] will set to 3'b000 + */ + /* #define H264_CO_MB_RD_ADDR VLD_C39 0xc39 */ + val = ((colocate_rd_adr+colocate_rd_adr_offset)>>3) | + (l10_structure << 30) | (cur_colocate_ref_type << 29); + WRITE_VREG(H264_CO_MB_RD_ADDR, val); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "WRITE_VREG(H264_CO_MB_RD_ADDR) = %x, L1(0) pic_structure %d mbaff %d\n", + val, colocate_pic->structure, + colocate_pic->mb_aff_frame_flag); +#endif + } + return ret; +} + +static int vh264_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + struct vdec_s *vdec = op_arg; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + + spin_lock_irqsave(&hw->lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hw->newframe_q); + states->buf_avail_num = kfifo_len(&hw->display_q); + + spin_unlock_irqrestore(&hw->lock, flags); + + return 0; +} + +static struct vframe_s *vh264_vf_peek(void *op_arg) +{ + struct vframe_s *vf[2] = {0, 0}; + struct vdec_s *vdec = op_arg; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + + if (!hw) + return NULL; + + if (force_disp_bufspec_num & 0x100) { + if (force_disp_bufspec_num & 0x200) + return NULL; + return &hw->vframe_dummy; + } + + if (kfifo_len(&hw->display_q) > VF_POOL_SIZE) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "kfifo len:%d invaild, peek error\n", + kfifo_len(&hw->display_q)); + return NULL; + } + + if (kfifo_out_peek(&hw->display_q, (void *)&vf, 2)) { + if (vf[1]) { + vf[0]->next_vf_pts_valid = true; + vf[0]->next_vf_pts = vf[1]->pts; + } else + vf[0]->next_vf_pts_valid = false; + return vf[0]; + } + + return NULL; +} + +static struct vframe_s *vh264_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + ulong nv_order = VIDTYPE_VIU_NV21; + + if (!hw) + return NULL; + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + } + + if (force_disp_bufspec_num & 0x100) { + int buffer_index = force_disp_bufspec_num & 0xff; + if (force_disp_bufspec_num & 0x200) + return NULL; + + vf = &hw->vframe_dummy; + vf->duration_pulldown = 0; + vf->pts = 0; + vf->pts_us64 = 0; + set_frame_info(hw, vf, buffer_index); + vf->flag = 0; + if (hw->mmu_enable) { + if (hw->double_write_mode & 0x10) { + /* double write only */ + vf->compBodyAddr = 0; + vf->compHeadAddr = 0; + } else { + /*head adr*/ + vf->compHeadAddr = + hw->buffer_spec[buffer_index].alloc_header_addr; + /*body adr*/ + vf->compBodyAddr = 0; + vf->canvas0Addr = vf->canvas1Addr = 0; + } + + vf->type = VIDTYPE_SCATTER; + + if (hw->double_write_mode) { + vf->type |= VIDTYPE_PROGRESSIVE + | VIDTYPE_VIU_FIELD; + vf->type |= nv_order; + if (hw->double_write_mode == 3) + vf->type |= VIDTYPE_COMPRESS; + + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + hw->buffer_spec[buffer_index]. + canvas_config[0]; + vf->canvas0_config[1] = + hw->buffer_spec[buffer_index]. + canvas_config[1]; + + vf->canvas1_config[0] = + hw->buffer_spec[buffer_index]. + canvas_config[0]; + vf->canvas1_config[1] = + hw->buffer_spec[buffer_index]. + canvas_config[1]; + } else { + vf->type |= + VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; + vf->canvas0Addr = vf->canvas1Addr = 0; + } + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + + vf->compWidth = hw->frame_width; + vf->compHeight = hw->frame_height; + + if (hw->double_write_mode) { + vf->width = hw->frame_width / + get_double_write_ratio(hw->double_write_mode); + vf->height = hw->frame_height / + get_double_write_ratio(hw->double_write_mode); + } + } else { + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | + nv_order; + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(&hw->buffer_spec[buffer_index]); + } + + /*vf->mem_handle = decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, buffer_index);*/ + update_vf_memhandle(hw, vf, buffer_index); + force_disp_bufspec_num |= 0x200; + return vf; + } + + if (kfifo_get(&hw->display_q, &vf)) { + int time = jiffies; + unsigned int frame_interval = + 1000*(time - hw->last_frame_time)/HZ; + struct vframe_s *next_vf = NULL; + ATRACE_COUNTER(hw->trace.disp_q_name, kfifo_len(&hw->display_q)); + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_VDEC_DETAIL)) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int frame_index = FRAME_INDEX(vf->index); + if (frame_index < 0 || + frame_index >= DPB_SIZE_MAX) { + dpb_print(DECODE_ID(hw), 0, + "%s vf index 0x%x error\r\n", + __func__, vf->index); + } else { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "%s buf_spec_num %d vf %p poc %d dur %d pts %d interval %dms, ts: %lld\n", + __func__, BUFSPEC_INDEX(vf->index), vf, + p_H264_Dpb->mFrameStore[frame_index].poc, + vf->duration, vf->pts, frame_interval, vf->timestamp); + } + } + if (hw->last_frame_time > 0) { + if (frame_interval > + max_get_frame_interval[DECODE_ID(hw)]) + max_get_frame_interval[DECODE_ID(hw)] + = frame_interval; + } + hw->last_frame_time = time; + vf->index_disp = atomic_read(&hw->vf_get_count); + atomic_add(1, &hw->vf_get_count); + if (kfifo_peek(&hw->display_q, &next_vf) && next_vf) { + vf->next_vf_pts_valid = true; + vf->next_vf_pts = next_vf->pts; + } else + vf->next_vf_pts_valid = false; + + return vf; + } + + return NULL; +} + +static bool vf_valid_check(struct vframe_s *vf, struct vdec_h264_hw_s *hw) { + int i,j; + if (hw->is_used_v4l) + return true; + for (i = 0; i < VF_POOL_SIZE; i++) { + for (j = 0; j < VF_POOL_NUM; j ++) { + if (vf == &(hw->vfpool[j][i]) || vf == &hw->vframe_dummy) + return true; + } + } + dpb_print(DECODE_ID(hw), 0, " invalid vf been put, vf = %p\n", vf); + for (i = 0; i < VF_POOL_SIZE; i++) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "dump vf [%d]= %p\n", i, &(hw->vfpool[hw->cur_pool][i])); + } + return false; +} + +static void vh264_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + unsigned long flags; + int buf_spec_num; + int frame_index; + + if (vf == (&hw->vframe_dummy)) + return; + + if (!vf) + return; + + if (vf->index == -1) { + dpb_print(DECODE_ID(hw), 0, + "Warning: %s vf %p invalid index\r\n", + __func__, vf); + return; + } + + if (hw->enable_fence && vf->fence) { + int ret, i; + + mutex_lock(&hw->fence_mutex); + ret = dma_fence_get_status(vf->fence); + if (ret == 0) { + for (i = 0; i < VF_POOL_SIZE; i++) { + if (hw->fence_vf_s.fence_vf[i] == NULL) { + hw->fence_vf_s.fence_vf[i] = vf; + hw->fence_vf_s.used_size++; + mutex_unlock(&hw->fence_mutex); + return; + } + } + } + mutex_unlock(&hw->fence_mutex); + } + + buf_spec_num = BUFSPEC_INDEX(vf->index); + if (hw->enable_fence) + frame_index = hw->buffer_spec[buf_spec_num].fs_idx; + else + frame_index = FRAME_INDEX(vf->index); + + if (frame_index < 0 || + frame_index >= DPB_SIZE_MAX || + buf_spec_num < 0 || + buf_spec_num >= BUFSPEC_POOL_SIZE) { + dpb_print(DECODE_ID(hw), 0, + "%s vf index 0x%x error\r\n", + __func__, vf->index); + return; + } + /*get_buf_spec_idx_by_canvas_config(hw, + &vf->canvas0_config[0]);*/ + + if (hw->enable_fence && vf->fence) { + vdec_fence_put(vf->fence); + vf->fence = NULL; + } + + spin_lock_irqsave(&hw->bufspec_lock, flags); + if (hw->buffer_spec[buf_spec_num].used == 2) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s %p to fs[%d], poc %d buf_spec_num %d used %d vf_ref %d\n", + __func__, vf, frame_index, + p_H264_Dpb->mFrameStore[frame_index].poc, + buf_spec_num, + hw->buffer_spec[buf_spec_num].used, + hw->buffer_spec[buf_spec_num].vf_ref); + hw->buffer_spec[buf_spec_num].vf_ref--; + if (hw->buffer_spec[buf_spec_num].vf_ref <= 0) + set_frame_output_flag(&hw->dpb, frame_index); + } else { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s %p isolated vf, buf_spec_num %d used %d vf_ref %d\n", + __func__, vf, buf_spec_num, + hw->buffer_spec[buf_spec_num].used, + hw->buffer_spec[buf_spec_num].vf_ref); + hw->buffer_spec[buf_spec_num].vf_ref--; + if (hw->buffer_spec[buf_spec_num].vf_ref <= 0) { + if (hw->buffer_spec[buf_spec_num].used == 3) + hw->buffer_spec[buf_spec_num].used = 4; + else if (hw->buffer_spec[buf_spec_num].used == 5) + hw->buffer_spec[buf_spec_num].used = 0; + } + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + + } + + if (hw->is_used_v4l) { + struct buffer_spec_s *pic = &hw->buffer_spec[buf_spec_num]; + + if (vf->v4l_mem_handle != pic->cma_alloc_addr) + pic->cma_alloc_addr = vf->v4l_mem_handle; + } + + atomic_add(1, &hw->vf_put_count); + if (vf && (vf_valid_check(vf, hw) == true)) { + kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->trace.new_q_name, kfifo_len(&hw->newframe_q)); + } + +#define ASSIST_MBOX1_IRQ_REG VDEC_ASSIST_MBOX1_IRQ_REG + if (hw->buffer_empty_flag) + WRITE_VREG(ASSIST_MBOX1_IRQ_REG, 0x1); + spin_unlock_irqrestore(&hw->bufspec_lock, flags); +} + +void * vh264_get_bufspec_lock(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + if (hw) + return (&hw->bufspec_lock); + else + return NULL; +} +static int vh264_event_cb(int type, void *data, void *op_arg) +{ + unsigned long flags; + struct vdec_s *vdec = op_arg; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + + if (type & VFRAME_EVENT_RECEIVER_GET_AUX_DATA) { + struct provider_aux_req_s *req = + (struct provider_aux_req_s *)data; + int buf_spec_num; + + if (!req->vf) { + req->aux_size = atomic_read(&hw->vf_put_count); + return 0; + } + buf_spec_num = BUFSPEC_INDEX(req->vf->index); + spin_lock_irqsave(&hw->lock, flags); + req->aux_buf = NULL; + req->aux_size = 0; + if (buf_spec_num >= 0 && + buf_spec_num < BUFSPEC_POOL_SIZE && + is_buf_spec_in_disp_q(hw, buf_spec_num) + ) { + req->aux_buf = + hw->buffer_spec[buf_spec_num].aux_data_buf; + req->aux_size = + hw->buffer_spec[buf_spec_num].aux_data_size; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + req->dv_enhance_exist = + hw->buffer_spec[buf_spec_num].dv_enhance_exist; +#else + req->dv_enhance_exist = 0; +#endif + } + spin_unlock_irqrestore(&hw->lock, flags); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s(type 0x%x vf buf_spec_num 0x%x)=>size 0x%x\n", + __func__, type, buf_spec_num, req->aux_size); + } else if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +static void set_frame_info(struct vdec_h264_hw_s *hw, struct vframe_s *vf, + u32 index) +{ + struct canvas_config_s *p_canvas_config; + int force_rate = input_frame_based(hw_to_vdec(hw)) ? + force_rate_framebase : force_rate_streambase; + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s (%d,%d) dur %d, vf %p, index %d\n", __func__, + hw->frame_width, hw->frame_height, hw->frame_dur, vf, index); + + /* signal_type */ + if (hw->video_signal_from_vui & VIDEO_SIGNAL_TYPE_AVAILABLE_MASK) { + vf->signal_type = hw->video_signal_from_vui; + if (hw->is_used_v4l) { + struct aml_vdec_hdr_infos hdr; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + memset(&hdr, 0, sizeof(hdr)); + hdr.signal_type = hw->video_signal_from_vui; + vdec_v4l_set_hdr_infos(ctx, &hdr); + } + } else + vf->signal_type = 0; + hw->video_signal_type = vf->signal_type; + + vf->width = hw->frame_width; + vf->height = hw->frame_height; + if (force_rate) { + if (force_rate == -1) + vf->duration = 0; + else + vf->duration = 96000/force_rate; + } else + vf->duration = hw->frame_dur; + vf->ratio_control = + (min(hw->h264_ar, (u32) DISP_RATIO_ASPECT_RATIO_MAX)) << + DISP_RATIO_ASPECT_RATIO_BIT; + vf->orientation = hw->vh264_rotation; + + vf->sidebind_type = hw->sidebind_type; + vf->sidebind_channel_id = hw->sidebind_channel_id; + + if (hw->mmu_enable) + return; + + vf->canvas0Addr = vf->canvas1Addr = -1; +#ifdef NV21 + vf->plane_num = 2; +#else + vf->plane_num = 3; +#endif + + if (IS_VDEC_DW(hw)) { + if (IS_VDEC_DW(hw) == 1) + vf->width = hw->frame_width / 2; + else + vf->width = (hw->frame_width / IS_VDEC_DW(hw)); + vf->height = (hw->frame_height / IS_VDEC_DW(hw)); + p_canvas_config = &hw->buffer_spec[index].vdec_dw_canvas_config[0]; + } else + p_canvas_config = &hw->buffer_spec[index].canvas_config[0]; + + vf->canvas0_config[0] = p_canvas_config[0]; + vf->canvas0_config[1] = p_canvas_config[1]; +#ifndef NV21 + vf->canvas0_config[2] = p_canvas_config[2]; +#endif + vf->canvas1_config[0] = p_canvas_config[0]; + vf->canvas1_config[1] = p_canvas_config[1]; +#ifndef NV21 + vf->canvas1_config[2] = p_canvas_config[2]; +#endif +} + +static void get_picture_qos_info(struct StorablePicture *picture) +{ + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + unsigned char a[3]; + unsigned char i, j, t; + unsigned long data; + + get_random_bytes(&data, sizeof(unsigned long)); + if (picture->slice_type == I_SLICE) + data = 0; + a[0] = data & 0xff; + a[1] = (data >> 8) & 0xff; + a[2] = (data >> 16) & 0xff; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_mv = a[2]; + picture->avg_mv = a[1]; + picture->min_mv = a[0]; + /* + pr_info("mv data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + */ + + get_random_bytes(&data, sizeof(unsigned long)); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_qp = a[2]; + picture->avg_qp = a[1]; + picture->min_qp = a[0]; + /* + pr_info("qp data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + */ + + get_random_bytes(&data, sizeof(unsigned long)); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_skip = a[2]; + picture->avg_skip = a[1]; + picture->min_skip = a[0]; + + + /* + pr_info("skip data %x a[0]= %x a[1]= %x a[2]= %x\n", + data,a[0], a[1], a[2]); + */ + } else { + uint32_t blk88_y_count; + uint32_t blk88_c_count; + uint32_t blk22_mv_count; + uint32_t rdata32; + int32_t mv_hi; + int32_t mv_lo; + uint32_t rdata32_l; + uint32_t mvx_L0_hi; + uint32_t mvy_L0_hi; + uint32_t mvx_L1_hi; + uint32_t mvy_L1_hi; + int64_t value; + uint64_t temp_value; +/* +#define DEBUG_QOS +*/ +#ifdef DEBUG_QOS + int pic_number = picture->poc; +#endif + + picture->max_mv = 0; + picture->avg_mv = 0; + picture->min_mv = 0; + + picture->max_skip = 0; + picture->avg_skip = 0; + picture->min_skip = 0; + + picture->max_qp = 0; + picture->avg_qp = 0; + picture->min_qp = 0; + + + + + + /* set rd_idx to 0 */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, 0); + blk88_y_count = READ_VREG(VDEC_PIC_QUALITY_DATA); + if (blk88_y_count == 0) { +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_y_sum */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_y_count, + rdata32, blk88_y_count); +#endif + picture->avg_qp = rdata32/blk88_y_count; + /* intra_y_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + /* skipped_y_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + picture->avg_skip = rdata32*100/blk88_y_count; + /* coeff_non_zero_y_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_y_count*1)), + '%', rdata32); +#endif + /* blk66_c_count */ + blk88_c_count = READ_VREG(VDEC_PIC_QUALITY_DATA); + if (blk88_c_count == 0) { +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_c_sum */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_c_count, + rdata32, blk88_c_count); +#endif + /* intra_c_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* skipped_cu_c_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* coeff_non_zero_c_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_c_count*1)), + '%', rdata32); +#endif + + /* 1'h0, qp_c_max[6:0], 1'h0, qp_c_min[6:0], + 1'h0, qp_y_max[6:0], 1'h0, qp_y_min[6:0] */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y QP min : %d\n", + pic_number, (rdata32>>0)&0xff); +#endif + picture->min_qp = (rdata32>>0)&0xff; + +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y QP max : %d\n", + pic_number, (rdata32>>8)&0xff); +#endif + picture->max_qp = (rdata32>>8)&0xff; + +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C QP min : %d\n", + pic_number, (rdata32>>16)&0xff); + pr_info(" [Picture %d Quality] C QP max : %d\n", + pic_number, (rdata32>>24)&0xff); +#endif + + /* blk22_mv_count */ + blk22_mv_count = READ_VREG(VDEC_PIC_QUALITY_DATA); + if (blk22_mv_count == 0) { +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] NO MV Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* mvy_L1_count[39:32], mvx_L1_count[39:32], + mvy_L0_count[39:32], mvx_L0_count[39:32] */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + /* should all be 0x00 or 0xff */ +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MV AVG High Bits: 0x%X\n", + pic_number, rdata32); +#endif + mvx_L0_hi = ((rdata32>>0)&0xff); + mvy_L0_hi = ((rdata32>>8)&0xff); + mvx_L1_hi = ((rdata32>>16)&0xff); + mvy_L1_hi = ((rdata32>>24)&0xff); + + /* mvx_L0_count[31:0] */ + rdata32_l = READ_VREG(VDEC_PIC_QUALITY_DATA); + temp_value = mvx_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvx_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + value = div_s64(value, blk22_mv_count); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVX_L0 AVG : %d (%lld/%d)\n", + pic_number, (int)(value), + value, blk22_mv_count); +#endif + picture->avg_mv = value; + + /* mvy_L0_count[31:0] */ + rdata32_l = READ_VREG(VDEC_PIC_QUALITY_DATA); + temp_value = mvy_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvy_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVY_L0 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvx_L1_count[31:0] */ + rdata32_l = READ_VREG(VDEC_PIC_QUALITY_DATA); + temp_value = mvx_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvx_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVX_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvy_L1_count[31:0] */ + rdata32_l = READ_VREG(VDEC_PIC_QUALITY_DATA); + temp_value = mvy_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvy_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVY_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* {mvx_L0_max, mvx_L0_min} // format : {sign, abs[14:0]} */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVX_L0 MAX : %d\n", + pic_number, mv_hi); +#endif + picture->max_mv = mv_hi; + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVX_L0 MIN : %d\n", + pic_number, mv_lo); +#endif + picture->min_mv = mv_lo; + +#ifdef DEBUG_QOS + /* {mvy_L0_max, mvy_L0_min} */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + pr_info(" [Picture %d Quality] MVY_L0 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + pr_info(" [Picture %d Quality] MVY_L0 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvx_L1_max, mvx_L1_min} */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + pr_info(" [Picture %d Quality] MVX_L1 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + pr_info(" [Picture %d Quality] MVX_L1 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvy_L1_max, mvy_L1_min} */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + pr_info(" [Picture %d Quality] MVY_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + pr_info(" [Picture %d Quality] MVY_L1 MIN : %d\n", + pic_number, mv_lo); +#endif + + rdata32 = READ_VREG(VDEC_PIC_QUALITY_CTRL); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] After Read : VDEC_PIC_QUALITY_CTRL : 0x%x\n", + pic_number, rdata32); +#endif + /* reset all counts */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, (1<<8)); + } +} + +static int get_dec_dpb_size(struct vdec_h264_hw_s *hw, int mb_width, + int mb_height) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int pic_size = mb_width * mb_height * 384; + int size = 0, size_vui; + int level_idc = p_H264_Dpb->mSPS.level_idc; + + switch (level_idc) { + case 9: + size = 152064; + break; + case 10: + size = 152064; + break; + case 11: + size = 345600; + break; + case 12: + size = 912384; + break; + case 13: + size = 912384; + break; + case 20: + size = 912384; + break; + case 21: + size = 1824768; + break; + case 22: + size = 3110400; + break; + case 30: + size = 3110400; + break; + case 31: + size = 6912000; + break; + case 32: + size = 7864320; + break; + case 40: + size = 12582912; + break; + case 41: + size = 12582912; + break; + case 42: + size = 13369344; + break; + case 50: + size = 42393600; + break; + case 51: + case 52: + default: + size = 70778880; + break; + } + + size /= pic_size; + size = imin(size, 16); + dpb_print(DECODE_ID(hw), 0, + "level_idc = %d pic_size = %d size = %d\n", + level_idc, pic_size, size); + if (p_H264_Dpb->bitstream_restriction_flag) { + if ((int)p_H264_Dpb->max_dec_frame_buffering > size) { + dpb_print(DECODE_ID(hw), 0, + "max_dec_frame_buffering larger than MaxDpbSize.\n"); + } + size_vui = imax (1, p_H264_Dpb->max_dec_frame_buffering); + if (size_vui < size) { + dpb_print(DECODE_ID(hw), 0, + "Warning: max_dec_frame_buffering(%d) is less than DPB size(%d) calculated from Profile/Level.\n", + size_vui, size); + } + size = size_vui; + } + + size += 2; /* need two more buffer */ + + return size; +} + +static int get_dec_dpb_size_active(struct vdec_h264_hw_s *hw, u32 param1) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + struct vdec_s *vdec = hw_to_vdec(hw); +#endif + int mb_width, mb_total; + int mb_height = 0; + int active_buffer_spec_num, dec_dpb_size; + u32 used_reorder_dpb_size_margin + = hw->reorder_dpb_size_margin; + + mb_width = param1 & 0xff; + mb_total = (param1 >> 8) & 0xffff; + if (!mb_width && mb_total) /*for 4k2k*/ + mb_width = 256; + if (mb_width) + mb_height = mb_total/mb_width; + if (mb_width <= 0 || mb_height <= 0 || + is_oversize(mb_width << 4, mb_height << 4)) { + dpb_print(DECODE_ID(hw), 0, + "!!!wrong param1 0x%x mb_width/mb_height (0x%x/0x%x) %x\r\n", + param1, + mb_width, + mb_height); + hw->error_frame_width = mb_width << 4; + hw->error_frame_height = mb_height << 4; + return -1; + } + hw->error_frame_width = 0; + hw->error_frame_height = 0; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->master || vdec->slave) + used_reorder_dpb_size_margin = + reorder_dpb_size_margin_dv; +#endif + + dec_dpb_size = get_dec_dpb_size(hw , mb_width, mb_height); + + active_buffer_spec_num = + dec_dpb_size + + used_reorder_dpb_size_margin; + + if (active_buffer_spec_num > MAX_VF_BUF_NUM) { + active_buffer_spec_num = MAX_VF_BUF_NUM; + dec_dpb_size = active_buffer_spec_num + - used_reorder_dpb_size_margin; + } + + hw->dpb.mDPB.size = active_buffer_spec_num; + + if (hw->no_poc_reorder_flag) + dec_dpb_size = 1; + + return dec_dpb_size; +} + +static void vh264_config_canvs_for_mmu(struct vdec_h264_hw_s *hw) +{ + int i, j; + + if (hw->double_write_mode) { + mutex_lock(&vmh264_mutex); + if (hw->decode_pic_count == 0) { + for (j = 0; j < hw->dpb.mDPB.size; j++) { + i = get_buf_spec_by_canvas_pos(hw, j); + if (i >= 0) + config_decode_canvas_ex(hw, i); + } + } + mutex_unlock(&vmh264_mutex); + } +} + +static int vh264_set_params(struct vdec_h264_hw_s *hw, + u32 param1, u32 param2, u32 param3, u32 param4, bool buffer_reset_flag) +{ + int i, j; + int mb_width, mb_total; + int max_reference_size, level_idc; + int mb_height = 0; + unsigned long flags; + /*int mb_mv_byte;*/ + struct vdec_s *vdec = hw_to_vdec(hw); + u32 seq_info2; + int ret = 0; + int active_buffer_spec_num; + unsigned int buf_size; + unsigned int frame_mbs_only_flag; + unsigned int chroma_format_idc; + unsigned int crop_bottom, crop_right; + unsigned int used_reorder_dpb_size_margin + = hw->reorder_dpb_size_margin; + u8 *colocate_vaddr = NULL; + int dec_dpb_size_change = 0; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->master || vdec->slave) + used_reorder_dpb_size_margin = + reorder_dpb_size_margin_dv; +#endif + seq_info2 = param1; + hw->seq_info = param2; + + mb_width = seq_info2 & 0xff; + mb_total = (seq_info2 >> 8) & 0xffff; + if (!mb_width && mb_total) /*for 4k2k*/ + mb_width = 256; + if (mb_width) + mb_height = mb_total/mb_width; + if (mb_width <= 0 || mb_height <= 0 || + is_oversize(mb_width << 4, mb_height << 4)) { + dpb_print(DECODE_ID(hw), 0, + "!!!wrong seq_info2 0x%x mb_width/mb_height (0x%x/0x%x) %x\r\n", + seq_info2, + mb_width, + mb_height); + hw->error_frame_width = mb_width << 4; + hw->error_frame_height = mb_height << 4; + return -1; + } + hw->error_frame_width = 0; + hw->error_frame_height = 0; + + dec_dpb_size_change = hw->csd_change_flag && (hw->dpb.dec_dpb_size != get_dec_dpb_size_active(hw, param1)); + + if (((seq_info2 != 0 && + hw->seq_info2 != seq_info2) || hw->csd_change_flag) && + hw->seq_info2 != 0 + ) { + if (hw->seq_info2 != seq_info2 || dec_dpb_size_change) { /*picture size changed*/ + h264_reconfig(hw); + } else { + /*someting changes and not including dpb_size, width, height, ...*/ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + u32 reg_val = param4; + max_reference_size = (reg_val >> 8) & 0xff; + hw->dpb.reorder_output = max_reference_size; + + if (p_H264_Dpb->bitstream_restriction_flag && + p_H264_Dpb->num_reorder_frames <= p_H264_Dpb->max_dec_frame_buffering && + p_H264_Dpb->num_reorder_frames >= 0) { + hw->dpb.reorder_output = hw->num_reorder_frames + 1; + } + } + } + + if (hw->config_bufmgr_done == 0) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + u32 reg_val; + int sub_width_c = 0, sub_height_c = 0; + + hw->cfg_param1 = param1; + hw->cfg_param2 = param2; + hw->cfg_param3 = param3; + hw->cfg_param4 = param4; + + hw->seq_info2 = seq_info2; + dpb_print(DECODE_ID(hw), 0, + "AV_SCRATCH_1 = %x, AV_SCRATCH_2 %x\r\n", + seq_info2, hw->seq_info); + + dpb_init_global(&hw->dpb, + DECODE_ID(hw), 0, 0); + + p_H264_Dpb->fast_output_enable = fast_output_enable; + /*mb_mv_byte = (seq_info2 & 0x80000000) ? 24 : 96;*/ + if (hw->enable_fence) + p_H264_Dpb->fast_output_enable = H264_OUTPUT_MODE_FAST; +#if 1 + /*crop*/ + /* AV_SCRATCH_2 + bit 15: frame_mbs_only_flag + bit 13-14: chroma_format_idc */ + frame_mbs_only_flag = (hw->seq_info >> 15) & 0x01; + if (p_H264_Dpb->mSPS.profile_idc != 100 && + p_H264_Dpb->mSPS.profile_idc != 110 && + p_H264_Dpb->mSPS.profile_idc != 122 && + p_H264_Dpb->mSPS.profile_idc != 144) { + p_H264_Dpb->chroma_format_idc = 1; + } + chroma_format_idc = p_H264_Dpb->chroma_format_idc; + + /* @AV_SCRATCH_6.31-16 = (left << 8 | right ) << 1 + @AV_SCRATCH_6.15-0 = (top << 8 | bottom ) << + (2 - frame_mbs_only_flag) */ + + switch (chroma_format_idc) { + case 1: + sub_width_c = 2; + sub_height_c = 2; + break; + + case 2: + sub_width_c = 2; + sub_height_c = 1; + break; + + case 3: + sub_width_c = 1; + sub_height_c = 1; + break; + + default: + break; + } + + if (chroma_format_idc == 0) { + crop_right = p_H264_Dpb->frame_crop_right_offset; + crop_bottom = p_H264_Dpb->frame_crop_bottom_offset * + (2 - frame_mbs_only_flag); + } else { + crop_right = sub_width_c * p_H264_Dpb->frame_crop_right_offset; + crop_bottom = sub_height_c * p_H264_Dpb->frame_crop_bottom_offset * + (2 - frame_mbs_only_flag); + } + + p_H264_Dpb->mSPS.frame_mbs_only_flag = frame_mbs_only_flag; + hw->frame_width = mb_width << 4; + hw->frame_height = mb_height << 4; + + hw->frame_width = hw->frame_width - crop_right; + hw->frame_height = hw->frame_height - crop_bottom; + + dpb_print(DECODE_ID(hw), 0, + "chroma_format_idc = %d frame_mbs_only_flag %d, crop_bottom %d, frame_height %d,\n", + chroma_format_idc, frame_mbs_only_flag, crop_bottom, hw->frame_height); + dpb_print(DECODE_ID(hw), 0, + "mb_height %d,crop_right %d, frame_width %d, mb_width %d\n", + mb_height, crop_right, + hw->frame_width, mb_width); + + if (hw->frame_height == 1088 && (crop_right != 0 || crop_bottom != 0)) + hw->frame_height = 1080; +#endif + reg_val = param4; + level_idc = reg_val & 0xff; + p_H264_Dpb->mSPS.level_idc = level_idc; + max_reference_size = (reg_val >> 8) & 0xff; + hw->dpb.reorder_output = max_reference_size; + hw->dpb.dec_dpb_size = + get_dec_dpb_size(hw , mb_width, mb_height); + if (!hw->mmu_enable) { + mb_width = (mb_width+3) & 0xfffffffc; + mb_height = (mb_height+3) & 0xfffffffc; + } + mb_total = mb_width * mb_height; + hw->mb_width = mb_width; + hw->mb_height = mb_height; + hw->mb_total = mb_total; + if (hw->mmu_enable) + hevc_mcr_sao_global_hw_init(hw, + (hw->mb_width << 4), (hw->mb_height << 4)); + + dpb_print(DECODE_ID(hw), 0, + "mb height/widht/total: %x/%x/%x level_idc %x max_ref_num %x\n", + mb_height, mb_width, mb_total, + level_idc, max_reference_size); + + p_H264_Dpb->colocated_buf_size = mb_total * 96; + + dpb_print(DECODE_ID(hw), 0, + "restriction_flag=%d, max_dec_frame_buffering=%d, dec_dpb_size=%d num_reorder_frames %d used_reorder_dpb_size_margin %d\n", + hw->bitstream_restriction_flag, + hw->max_dec_frame_buffering, + hw->dpb.dec_dpb_size, + hw->num_reorder_frames, + used_reorder_dpb_size_margin); + + if (p_H264_Dpb->bitstream_restriction_flag && + p_H264_Dpb->num_reorder_frames <= p_H264_Dpb->max_dec_frame_buffering && + p_H264_Dpb->num_reorder_frames >= 0) { + hw->dpb.reorder_output = hw->num_reorder_frames + 1; + } + + active_buffer_spec_num = + hw->dpb.dec_dpb_size + + used_reorder_dpb_size_margin; + hw->max_reference_size = + max_reference_size + reference_buf_margin; + + if (active_buffer_spec_num > MAX_VF_BUF_NUM) { + active_buffer_spec_num = MAX_VF_BUF_NUM; + hw->dpb.dec_dpb_size = active_buffer_spec_num + - used_reorder_dpb_size_margin; + dpb_print(DECODE_ID(hw), 0, + "active_buffer_spec_num is larger than MAX %d, set dec_dpb_size to %d\n", + MAX_VF_BUF_NUM, hw->dpb.dec_dpb_size); + } + hw->dpb.mDPB.size = active_buffer_spec_num; + if (hw->max_reference_size > MAX_VF_BUF_NUM) + hw->max_reference_size = MAX_VF_BUF_NUM; + hw->dpb.max_reference_size = hw->max_reference_size; + + if (hw->no_poc_reorder_flag) + hw->dpb.dec_dpb_size = 1; + dpb_print(DECODE_ID(hw), 0, + "%s active_buf_spec_num %d dec_dpb_size %d collocate_buf_num %d\r\n", + __func__, active_buffer_spec_num, + hw->dpb.dec_dpb_size, + hw->max_reference_size); + + if (hw->kpi_first_i_comming == 0) { + hw->kpi_first_i_comming = 1; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "[vdec_kpi][%s] First I frame comming.\n", __func__); + } + + buf_size = (hw->mb_total << 8) + (hw->mb_total << 7); + + mutex_lock(&vmh264_mutex); + if (!hw->mmu_enable) { + if (!buffer_reset_flag || hw->is_used_v4l) + config_buf_specs(vdec); + i = get_buf_spec_by_canvas_pos(hw, 0); + + if (hw->is_used_v4l) { + if (i != -1) { + pr_info("v4l: delay alloc the buffer.\n"); + } + } else { + if ((i != -1) && alloc_one_buf_spec(hw, i) >= 0) + config_decode_canvas(hw, i); + else + ret = -1; + } + } else { + if (hw->double_write_mode) { + config_buf_specs_ex(vdec); + } else { + spin_lock_irqsave(&hw->bufspec_lock, flags); + for (i = 0, j = 0; + j < active_buffer_spec_num + && i < BUFSPEC_POOL_SIZE; + i++) { + if (hw->buffer_spec[i].used != -1) + continue; + hw->buffer_spec[i].used = 0; + hw->buffer_spec[i]. + alloc_header_addr = 0; + hw->buffer_spec[i].canvas_pos = j; + j++; + } + spin_unlock_irqrestore(&hw->bufspec_lock, + flags); + } + hevc_mcr_config_canv2axitbl(hw, 0); + } + mutex_unlock(&vmh264_mutex); + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + +#ifdef ONE_COLOCATE_BUF_PER_DECODE_BUF + buf_size = PAGE_ALIGN( + p_H264_Dpb->colocated_buf_size * + active_buffer_spec_num); +#else + buf_size = PAGE_ALIGN( + p_H264_Dpb->colocated_buf_size * + hw->max_reference_size); +#endif + + if (decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, BMMU_REF_IDX, + buf_size, DRIVER_NAME, + &hw->collocate_cma_alloc_addr) < 0) + return -1; + if (!vdec_secure(vdec)) { + /* clear for some mosaic problem after reset bufmgr */ + colocate_vaddr = codec_mm_vmap(hw->collocate_cma_alloc_addr, buf_size); + if (colocate_vaddr != NULL) { + memset(colocate_vaddr, 0, buf_size); + codec_mm_dma_flush(colocate_vaddr, buf_size, DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(colocate_vaddr); + } + } + + hw->dpb.colocated_mv_addr_start = + hw->collocate_cma_alloc_addr; +#ifdef ONE_COLOCATE_BUF_PER_DECODE_BUF + hw->dpb.colocated_mv_addr_end = + hw->dpb.colocated_mv_addr_start + + (p_H264_Dpb->colocated_buf_size * + active_buffer_spec_num); +#else + hw->dpb.colocated_mv_addr_end = + hw->dpb.colocated_mv_addr_start + + (p_H264_Dpb->colocated_buf_size * + hw->max_reference_size); +#endif + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "callocate cma, %lx, %x\n", + hw->collocate_cma_alloc_addr, + hw->dpb.colocated_mv_addr_start); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "colocated_mv_addr_start %x colocated_mv_addr_end %x\n", + hw->dpb.colocated_mv_addr_start, + hw->dpb.colocated_mv_addr_end); + if (!hw->mmu_enable) { + mutex_lock(&vmh264_mutex); + if (ret >= 0 && hw->decode_pic_count == 0) { + int buf_cnt; + /* h264_reconfig: alloc later*/ + buf_cnt = hw->dpb.mDPB.size; + + for (j = 1; j < buf_cnt; j++) { + i = get_buf_spec_by_canvas_pos(hw, j); + + if (hw->is_used_v4l) { + pr_info("v4l: delay alloc the buffer.\n"); + break; + } else if (alloc_one_buf_spec(hw, i) < 0) + break; + + config_decode_canvas(hw, i); + } + } + mutex_unlock(&vmh264_mutex); + } else { + vh264_config_canvs_for_mmu(hw); + } + + hw->config_bufmgr_done = 1; + + /*end of config_bufmgr_done */ + } + + return ret; +} + +static void vui_config(struct vdec_h264_hw_s *hw) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int aspect_ratio_info_present_flag, aspect_ratio_idc; + /*time*/ + hw->num_units_in_tick = p_H264_Dpb->num_units_in_tick; + hw->time_scale = p_H264_Dpb->time_scale; + hw->timing_info_present_flag = p_H264_Dpb->vui_status & 0x2; + + hw->bitstream_restriction_flag = + p_H264_Dpb->bitstream_restriction_flag; + hw->num_reorder_frames = + p_H264_Dpb->num_reorder_frames; + hw->max_dec_frame_buffering = + p_H264_Dpb->max_dec_frame_buffering; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "vui_config: pdb %d, %d, %d\n", + p_H264_Dpb->bitstream_restriction_flag, + p_H264_Dpb->num_reorder_frames, + p_H264_Dpb->max_dec_frame_buffering); + + hw->fixed_frame_rate_flag = 0; + if (hw->timing_info_present_flag) { + hw->fixed_frame_rate_flag = + p_H264_Dpb->fixed_frame_rate_flag; + + if (((hw->num_units_in_tick * 120) >= hw->time_scale && + ((!hw->sync_outside) || + (!hw->frame_dur))) + && hw->num_units_in_tick && hw->time_scale) { + if (hw->use_idr_framerate || + hw->fixed_frame_rate_flag || + !hw->frame_dur || + !hw->duration_from_pts_done + /*|| vh264_running*/) { + u32 frame_dur_es = + div_u64(96000ULL * 2 * hw->num_units_in_tick, + hw->time_scale); + if (hw->frame_dur != frame_dur_es) { + hw->h264_first_valid_pts_ready = false; + hw->h264pts1 = 0; + hw->h264pts2 = 0; + hw->h264_pts_count = 0; + hw->duration_from_pts_done = 0; + fixed_frame_rate_mode = + FIX_FRAME_RATE_OFF; + hw->pts_duration = 0; + hw->frame_dur = frame_dur_es; + if (!hw->fixed_frame_rate_flag && (p_H264_Dpb->mSPS.profile_idc != BASELINE)) { + if (frame_dur_es == 7680) + hw->frame_dur = frame_dur_es /2; + } + vdec_schedule_work(&hw->notify_work); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DEC_DETAIL, + "frame_dur %d from timing_info\n", + hw->frame_dur); + } + + /*hack to avoid use ES frame duration when + *it's half of the rate from system info + * sometimes the encoder is given a wrong + * frame rate but the system side information + * is more reliable + *if ((frame_dur * 2) != frame_dur_es) { + * frame_dur = frame_dur_es; + *} + */ + } + } + } else { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "H.264: timing_info not present\n"); + } + + /*aspect ratio*/ + aspect_ratio_info_present_flag = + p_H264_Dpb->vui_status & 0x1; + aspect_ratio_idc = p_H264_Dpb->aspect_ratio_idc; + + if (aspect_ratio_info_present_flag) { + if (aspect_ratio_idc == EXTEND_SAR) { + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = + p_H264_Dpb->aspect_ratio_sar_height; + hw->width_aspect_ratio = + p_H264_Dpb->aspect_ratio_sar_width; + } else { + /* pr_info("v264dec: aspect_ratio_idc = %d\n", + aspect_ratio_idc); */ + + switch (aspect_ratio_idc) { + case 1: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 1; + hw->width_aspect_ratio = 1; + break; + case 2: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 12; + break; + case 3: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 10; + break; + case 4: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 16; + break; + case 5: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 33; + hw->width_aspect_ratio = 40; + break; + case 6: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 24; + break; + case 7: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 20; + break; + case 8: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 32; + break; + case 9: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 33; + hw->width_aspect_ratio = 80; + break; + case 10: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 18; + break; + case 11: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 15; + break; + case 12: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 33; + hw->width_aspect_ratio = 64; + break; + case 13: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 99; + hw->width_aspect_ratio = 160; + break; + case 14: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 3; + hw->width_aspect_ratio = 4; + break; + case 15: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 2; + hw->width_aspect_ratio = 3; + break; + case 16: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 1; + hw->width_aspect_ratio = 2; + break; + default: + if (hw->vh264_ratio >> 16) { + hw->h264_ar = (hw->frame_height * + (hw->vh264_ratio & 0xffff) * + 0x100 + + ((hw->vh264_ratio >> 16) * + hw->frame_width / 2)) / + ((hw->vh264_ratio >> 16) * + hw->frame_width); + hw->height_aspect_ratio = 1; + hw->width_aspect_ratio = 1; + } else { + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 1; + hw->width_aspect_ratio = 1; + } + break; + } + } + } else { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "v264dec: aspect_ratio not available from source\n"); + if (hw->vh264_ratio >> 16) { + /* high 16 bit is width, low 16 bit is height */ + hw->h264_ar = + ((hw->vh264_ratio & 0xffff) * + hw->frame_height * 0x100 + + (hw->vh264_ratio >> 16) * + hw->frame_width / 2) / + ((hw->vh264_ratio >> 16) * + hw->frame_width); + hw->height_aspect_ratio = 1; + hw->width_aspect_ratio = 1; + } else { + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 1; + hw->width_aspect_ratio = 1; + } + } + + if (hw->pts_unstable && (hw->fixed_frame_rate_flag == 0)) { + if (((hw->frame_dur == RATE_2397_FPS) + && (dec_control + & DEC_CONTROL_FLAG_FORCE_RATE_2397_FPS_FIX_FRAME_RATE)) + || ((RATE_2997_FPS == + hw->frame_dur) && + (dec_control & + DEC_CONTROL_FLAG_FORCE_RATE_2997_FPS_FIX_FRAME_RATE))) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "force fix frame rate\n"); + hw->fixed_frame_rate_flag = 0x40; + } + } + + /*video_signal_from_vui: to do .. */ +} + +static void bufmgr_recover(struct vdec_h264_hw_s *hw) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 2); + if (error_proc_policy & 0x20) { + if (!hw->is_used_v4l) + hw->reset_bufmgr_flag = 1; + } +} + +void bufmgr_force_recover(struct h264_dpb_stru *p_H264_Dpb) +{ + struct vdec_h264_hw_s *hw = + container_of(p_H264_Dpb, struct vdec_h264_hw_s, dpb); + + dpb_print(DECODE_ID(hw), 0, "call %s\n", __func__); + + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 2); + hw->reset_bufmgr_flag = 1; +} + +#ifdef CONSTRAIN_MAX_BUF_NUM +static int get_vf_ref_only_buf_count(struct vdec_h264_hw_s *hw) +{ + int i; + int count = 0; + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (is_buf_spec_in_disp_q(hw, i) && + hw->buffer_spec[i].vf_ref > 0) + count++; + } + return count; +} + +static int get_used_buf_count(struct vdec_h264_hw_s *hw) +{ + int i; + int count = 0; + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (is_buf_spec_in_use(hw, i)) + count++; + } + return count; +} +#endif + + +static bool is_buffer_available(struct vdec_s *vdec) +{ + bool buffer_available = 1; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)(vdec->private); + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + int i, frame_outside_count = 0, inner_size = 0; + if ((kfifo_len(&hw->newframe_q) <= 0) || + ((hw->config_bufmgr_done) && (!have_free_buf_spec(vdec))) || + ((p_H264_Dpb->mDPB.init_done) && + (p_H264_Dpb->mDPB.used_size >= (p_H264_Dpb->mDPB.size - 1)) && + (is_there_unused_frame_from_dpb(&p_H264_Dpb->mDPB) == 0))) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "%s, empty, newq(%d), free_spec(%d), initdon(%d), used_size(%d/%d), unused_fr_dpb(%d)\n", + __func__, + kfifo_len(&hw->newframe_q), + have_free_buf_spec(vdec), + p_H264_Dpb->mDPB.init_done, + p_H264_Dpb->mDPB.used_size, p_H264_Dpb->mDPB.size, + is_there_unused_frame_from_dpb(&p_H264_Dpb->mDPB) + ); + buffer_available = 0; + if (dpb_is_debug(DECODE_ID(hw), + DEBUG_DISABLE_RUNREADY_RMBUF)) + return buffer_available; + + if ((error_proc_policy & 0x4) && + (error_proc_policy & 0x8)) { + if ((kfifo_len(&hw->display_q) <= 0) && + (p_H264_Dpb->mDPB.used_size >= + (p_H264_Dpb->mDPB.size - 1)) && + (p_Dpb->ref_frames_in_buffer > + (imax( + 1, p_Dpb->num_ref_frames) + - p_Dpb->ltref_frames_in_buffer + + force_sliding_margin))){ + bufmgr_recover(hw); + } else { + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 1); + } + } else if ((error_proc_policy & 0x4) && + (kfifo_len(&hw->display_q) <= 0) && + ((p_H264_Dpb->mDPB.used_size >= + (p_H264_Dpb->mDPB.size - 1)) || + (!have_free_buf_spec(vdec)))) { + unsigned long flags; + spin_lock_irqsave(&hw->bufspec_lock, flags); + + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->pre_output) + frame_outside_count++; + else if (p_Dpb->fs[i]->is_output && !is_used_for_reference(p_Dpb->fs[i])) { + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 0); + return 0; + } + } + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + inner_size = p_Dpb->size - frame_outside_count; + + if (inner_size >= p_H264_Dpb->dec_dpb_size) { + if (p_H264_Dpb->mDPB.used_size >= + p_H264_Dpb->mDPB.size) { + bufmgr_recover(hw); + } else if (p_H264_Dpb->mDPB.used_size >= + (p_H264_Dpb->mDPB.size - 1)) { + if (inner_size > p_H264_Dpb->dec_dpb_size) { + bufmgr_recover(hw); + } + } + } + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 0); + } else if ((error_proc_policy & 0x8) && + (p_Dpb->ref_frames_in_buffer > + (imax( + 1, p_Dpb->num_ref_frames) + - p_Dpb->ltref_frames_in_buffer + + force_sliding_margin))) + bufmgr_recover(hw); + else + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 1); + + if (hw->reset_bufmgr_flag == 1) + buffer_available = 1; + } + + if (hw->is_used_v4l) + buffer_available = have_free_buf_spec(vdec); + + return buffer_available; +} + +#define AUX_TAG_SEI 0x2 + +#define SEI_BUFFERING_PERIOD 0 +#define SEI_PicTiming 1 +#define SEI_USER_DATA 4 +#define SEI_RECOVERY_POINT 6 + +/* + ************************************************************************* + * Function:Reads bits from the bitstream buffer + * Input: + byte buffer[] + containing sei message data bits + int totbitoffset + bit offset from start of partition + int bytecount + total bytes in bitstream + int numbits + number of bits to read + * Output: + int *info + * Return: + -1: failed + > 0: the count of bit read + * Attention: + ************************************************************************* + */ + +static int get_bits(unsigned char buffer[], + int totbitoffset, + int *info, + int bytecount, + int numbits) +{ + register int inf; + long byteoffset; + int bitoffset; + + int bitcounter = numbits; + + byteoffset = totbitoffset / 8; + bitoffset = 7 - (totbitoffset % 8); + + inf = 0; + while (numbits) { + inf <<= 1; + inf |= (buffer[byteoffset] & (0x01 << bitoffset)) >> bitoffset; + numbits--; + bitoffset--; + if (bitoffset < 0) { + byteoffset++; + bitoffset += 8; + if (byteoffset > bytecount) + return -1; + } + } + + *info = inf; + + + return bitcounter; +} + +static int parse_one_sei_record(struct vdec_h264_hw_s *hw, + u8 *sei_data_buf, + u8 *sei_data_buf_end) +{ + int payload_type; + int payload_size; + u8 *p_sei; + int temp = 0; + int bit_offset; + int read_size; + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + + p_sei = sei_data_buf; + read_size = 0; + payload_type = 0; + do { + if (p_sei >= sei_data_buf_end) + return read_size; + + payload_type += *p_sei; + read_size++; + } while (*p_sei++ == 255); + + + payload_size = 0; + do { + if (p_sei >= sei_data_buf_end) + return read_size; + + payload_size += *p_sei; + read_size++; + } while (*p_sei++ == 255); + + + if (p_sei + payload_size > sei_data_buf_end) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s: payload_type = %d, payload_size = %d is over\n", + __func__, payload_type, payload_size); + return read_size; + } + bit_offset = 0; + + if (payload_size <= 0) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s warning: this is a null sei message for payload_type = %d\n", + __func__, payload_type); + return read_size; + } + p_H264_Dpb->vui_status = p_H264_Dpb->dpb_param.l.data[VUI_STATUS]; + switch (payload_type) { + case SEI_BUFFERING_PERIOD: + break; + case SEI_PicTiming: + if (p_H264_Dpb->vui_status & 0xc) { + int cpb_removal_delay; + int dpb_output_delay; + u32 delay_len; + + delay_len = p_H264_Dpb->dpb_param.l.data[DELAY_LENGTH]; + cpb_removal_delay + = (delay_len & 0x1F) + 1; + dpb_output_delay + = ((delay_len >> 5) & 0x1F) + 1; + + get_bits(p_sei, bit_offset, + &temp, payload_size, + dpb_output_delay+cpb_removal_delay); + bit_offset += dpb_output_delay+cpb_removal_delay; + } + if (p_H264_Dpb->vui_status & 0x10) { + get_bits(p_sei, bit_offset, &temp, payload_size, 4); + bit_offset += 4; + p_H264_Dpb->dpb_param.l.data[PICTURE_STRUCT] = temp; + } + break; + case SEI_USER_DATA: + if (enable_itu_t35) { + int i; + int j; + int data_len; + u8 *user_data_buf; + + user_data_buf + = hw->sei_itu_data_buf + hw->sei_itu_data_len; + /* user data length should be align with 8 bytes, + if not, then padding with zero*/ + for (i = 0; i < payload_size; i += 8) { + if (hw->sei_itu_data_len + i >= SEI_ITU_DATA_SIZE) + break; // Avoid out-of-bound writing + for (j = 0; j < 8; j++) { + int index; + + index = i+7-j; + if (index >= payload_size) + user_data_buf[i+j] = 0; + else + user_data_buf[i+j] + = p_sei[i+7-j]; + } + } + + data_len = payload_size; + if (payload_size % 8) + data_len = ((payload_size + 8) >> 3) << 3; + + hw->sei_itu_data_len += data_len; + if (hw->sei_itu_data_len >= SEI_ITU_DATA_SIZE) + hw->sei_itu_data_len = SEI_ITU_DATA_SIZE; + /* + dpb_print(DECODE_ID(hw), 0, + "%s: user data, and len = %d:\n", + __func__, hw->sei_itu_data_len); + */ + } + break; + case SEI_RECOVERY_POINT: + p_H264_Dpb->dpb_param.l.data[RECOVERY_POINT] = 1; + break; + } + + return read_size + payload_size; +} + +static void parse_sei_data(struct vdec_h264_hw_s *hw, + u8 *sei_data_buf, + int len) +{ + char *p_sei; + char *p_sei_end; + int parsed_size; + int read_size; + + + p_sei = sei_data_buf; + p_sei_end = p_sei + len; + parsed_size = 0; + while (parsed_size < len) { + read_size = parse_one_sei_record(hw, p_sei, p_sei_end); + p_sei += read_size; + parsed_size += read_size; + if (*p_sei == 0x80) { + p_sei++; + parsed_size++; + } + } +} + +static void check_decoded_pic_error(struct vdec_h264_hw_s *hw) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + struct StorablePicture *p = p_H264_Dpb->mVideo.dec_picture; + unsigned mby_mbx = READ_VREG(MBY_MBX); + unsigned mb_total = (hw->seq_info2 >> 8) & 0xffff; + unsigned mb_width = hw->seq_info2 & 0xff; + unsigned decode_mb_count; + if (!mb_width && mb_total) /*for 4k2k*/ + mb_width = 256; + decode_mb_count = ((mby_mbx & 0xff) * mb_width + + (((mby_mbx >> 8) & 0xff) + 1)); + if ((mby_mbx == 0) && (p_H264_Dpb->dec_dpb_status != H264_SLICE_HEAD_DONE)) { + dpb_print(DECODE_ID(hw), 0, + "mby_mbx is zero\n"); + return; + } + if (get_cur_slice_picture_struct(p_H264_Dpb) != FRAME) + mb_total /= 2; + + if ((error_proc_policy & 0x200) && + READ_VREG(ERROR_STATUS_REG) != 0) { + p->data_flag |= ERROR_FLAG; + } + + if (error_proc_policy & 0x100 && !(p->data_flag & ERROR_FLAG)) { + if (decode_mb_count < mb_total) { + p->data_flag |= ERROR_FLAG; + if (((error_proc_policy & 0x20000) && + decode_mb_count >= mb_total * (100 - mb_count_threshold) / 100)) { + p->data_flag &= ~ERROR_FLAG; + } + } + } + + if ((error_proc_policy & 0x100000) && + hw->last_dec_picture && + (hw->last_dec_picture->slice_type == I_SLICE) && + (hw->dpb.mSlice.slice_type == P_SLICE)) { + if ((p->data_flag & ERROR_FLAG) && + (decode_mb_count >= mb_total)) { + hw->ip_field_error_count++; + if (hw->ip_field_error_count == 4) { + unsigned int i; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { + if (p_Dpb->fs_ref[i]->top_field) + p_Dpb->fs_ref[i]->top_field->data_flag &= ~ERROR_FLAG; + if (p_Dpb->fs_ref[i]->bottom_field) + p_Dpb->fs_ref[i]->bottom_field->data_flag &= ~ERROR_FLAG; + if (p_Dpb->fs_ref[i]->frame) + p_Dpb->fs_ref[i]->frame->data_flag &= ~ERROR_FLAG; + } + hw->ip_field_error_count = 0; + p->data_flag &= ~ERROR_FLAG; + hw->data_flag &= ~ERROR_FLAG; + dpb_print(DECODE_ID(hw), 0, + "clear all ref frame error flag\n"); + } + } else { + if (hw->ip_field_error_count > 0) + dpb_print(DECODE_ID(hw), 0, + "clear error count %d\n", hw->ip_field_error_count); + hw->ip_field_error_count = 0; + } + } + + if (p->data_flag & ERROR_FLAG) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, + "%s: decode error, seq_info2 0x%x, mby_mbx 0x%x, mb_total %d decoded mb_count %d ERROR_STATUS_REG 0x%x\n", + __func__, + hw->seq_info2, + mby_mbx, + mb_total, + decode_mb_count, + READ_VREG(ERROR_STATUS_REG) + ); + + } +} + +static int vh264_pic_done_proc(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)(vdec->private); + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int ret; + int i; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; + + if (input_frame_based(vdec) && + (!(hw->i_only & 0x2)) && + frmbase_cont_bitlevel != 0 && + READ_VREG(VIFF_BIT_CNT) > + frmbase_cont_bitlevel) { + /*handle the case: multi pictures in one packet*/ + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s H264_PIC_DATA_DONE decode slice count %d, continue (bitcnt 0x%x)\n", + __func__, + hw->decode_pic_count, + READ_VREG(VIFF_BIT_CNT)); + hw->frmbase_cont_flag = 1; + } else + hw->frmbase_cont_flag = 0; + + if (p_H264_Dpb->mVideo.dec_picture) { + get_picture_qos_info(p_H264_Dpb->mVideo.dec_picture); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + DEL_EXIST(hw, + p_H264_Dpb->mVideo.dec_picture) = 0; + if (vdec->master) { + struct vdec_h264_hw_s *hw_ba = + (struct vdec_h264_hw_s *) + vdec->master->private; + if (hw_ba->last_dec_picture) + DEL_EXIST(hw_ba, + hw_ba->last_dec_picture) + = 1; + } +#endif + mutex_lock(&hw->chunks_mutex); + if (hw->chunk) { + p_H264_Dpb->mVideo.dec_picture->pts = + hw->chunk->pts; + p_H264_Dpb->mVideo.dec_picture->pts64 = + hw->chunk->pts64; + p_H264_Dpb->mVideo.dec_picture->timestamp = + hw->chunk->timestamp; +#ifdef MH264_USERDATA_ENABLE + vmh264_udc_fill_vpts(hw, + p_H264_Dpb->mSlice.slice_type, + hw->chunk->pts, 1); +#endif + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + } else if (vdec->master) { + /*dv enhance layer, + do not checkout pts*/ + struct StorablePicture *pic = + p_H264_Dpb->mVideo.dec_picture; + pic->pts = 0; + pic->pts64 = 0; +#endif + } else { + struct StorablePicture *pic = + p_H264_Dpb->mVideo.dec_picture; + u32 offset = pic->offset_delimiter; + pic->pic_size = (hw->start_bit_cnt - READ_VREG(VIFF_BIT_CNT)) >> 3; + if (pts_pickout_offset_us64(PTS_TYPE_VIDEO, + offset, &pic->pts, 0, &pic->pts64)) { + pic->pts = 0; + pic->pts64 = 0; +#ifdef MH264_USERDATA_ENABLE + vmh264_udc_fill_vpts(hw, + p_H264_Dpb->mSlice.slice_type, + pic->pts, 0); +#endif + } else { +#ifdef MH264_USERDATA_ENABLE + vmh264_udc_fill_vpts(hw, + p_H264_Dpb->mSlice.slice_type, + pic->pts, 1); +#endif + } + + } + mutex_unlock(&hw->chunks_mutex); + + check_decoded_pic_error(hw); +#ifdef ERROR_HANDLE_TEST + if ((hw->data_flag & ERROR_FLAG) + && (error_proc_policy & 0x80)) { + release_cur_decoding_buf(hw); + h264_clear_dpb(hw); + hw->dec_flag = 0; + hw->data_flag = 0; + hw->skip_frame_count = 0; + hw->has_i_frame = 0; + hw->no_error_count = 0xfff; + hw->no_error_i_count = 0xf; + } else +#endif + if (error_proc_policy & 0x200000) { + if (!hw->loop_flag) { + for (i = 0; i < p_Dpb->used_size; i++) { + if ((p_H264_Dpb->mVideo.dec_picture->poc + loop_playback_poc_threshold < p_Dpb->fs[i]->poc) && + !p_Dpb->fs[i]->is_output && + !p_Dpb->fs[i]->pre_output) { + hw->loop_flag = 1; + hw->loop_last_poc = p_H264_Dpb->mVideo.dec_picture->poc; + break; + } + } + } else { + if ((p_H264_Dpb->mVideo.dec_picture->poc >= hw->loop_last_poc - poc_threshold) && + (p_H264_Dpb->mVideo.dec_picture->poc <= hw->loop_last_poc + poc_threshold)) { + if (hw->loop_flag >= 5) { + for (i = 0; i < p_Dpb->used_size; i++) { + if ((hw->loop_last_poc + loop_playback_poc_threshold < p_Dpb->fs[i]->poc) && + !p_Dpb->fs[i]->is_output && + !p_Dpb->fs[i]->pre_output) { + p_Dpb->fs[i]->is_output = 1; + } + } + hw->loop_flag = 0; + } else + hw->loop_flag++; + } else + hw->loop_flag = 0; + } + } + ret = store_picture_in_dpb(p_H264_Dpb, + p_H264_Dpb->mVideo.dec_picture, + hw->data_flag | hw->dec_flag | + p_H264_Dpb->mVideo.dec_picture->data_flag); + + + + if (ret == -1) { + release_cur_decoding_buf(hw); + bufmgr_force_recover(p_H264_Dpb); + } else if (ret == -2) { + release_cur_decoding_buf(hw); + } else { + if (hw->data_flag & ERROR_FLAG) { + hw->no_error_count = 0; + hw->no_error_i_count = 0; + } else { + hw->no_error_count++; + if (hw->data_flag & I_FLAG) + hw->no_error_i_count++; + } + if (hw->mmu_enable) + hevc_set_unused_4k_buff_idx(hw, + p_H264_Dpb->mVideo. + dec_picture->buf_spec_num); + bufmgr_post(p_H264_Dpb); + hw->last_dec_picture = + p_H264_Dpb->mVideo.dec_picture; + p_H264_Dpb->mVideo.dec_picture = NULL; + /* dump_dpb(&p_H264_Dpb->mDPB); */ + hw->has_i_frame = 1; + if (hw->mmu_enable) + hevc_set_frame_done(hw); + hw->decode_pic_count++; + p_H264_Dpb->decode_pic_count = hw->decode_pic_count; + if (hw->skip_frame_count > 0) { + /*skip n frame after first I */ + hw->skip_frame_count--; + if (hw->skip_frame_count == 0) + hw->dec_flag &= (~NODISP_FLAG); + } else if (hw->skip_frame_count < -1) { + /*skip n frame after first I until second I */ + hw->skip_frame_count++; + if (hw->skip_frame_count == -1) + hw->dec_flag &= (~NODISP_FLAG); + } + } + } + return 0; +} + +static irqreturn_t vh264_isr_thread_fn(struct vdec_s *vdec, int irq) +{ + int i; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)(vdec->private); + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + unsigned int dec_dpb_status = p_H264_Dpb->dec_dpb_status; + u32 debug_tag; + + if (dec_dpb_status == H264_SLICE_HEAD_DONE || + p_H264_Dpb->dec_dpb_status == H264_CONFIG_REQUEST) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_START); + } + else if (dec_dpb_status == H264_PIC_DATA_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_PIC_DONE_START); + } + else if (dec_dpb_status == H264_SEI_DATA_READY) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_SEI_START); + else if (dec_dpb_status == H264_AUX_DATA_READY) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_AUX_START); + + if (dec_dpb_status == H264_CONFIG_REQUEST) { +#if 1 + unsigned short *p = (unsigned short *)hw->lmem_addr; + for (i = 0; i < (RPM_END-RPM_BEGIN); i += 4) { + int ii; + for (ii = 0; ii < 4; ii++) { + p_H264_Dpb->dpb_param.l.data[i+ii] = + p[i+3-ii]; + if (dpb_is_debug(DECODE_ID(hw), + RRINT_FLAG_RPM)) { + if (((i + ii) & 0xf) == 0) + dpb_print(DECODE_ID(hw), + 0, "%04x:", + i); + dpb_print_cont(DECODE_ID(hw), + 0, "%04x ", + p[i+3-ii]); + if (((i + ii + 1) & 0xf) == 0) + dpb_print_cont( + DECODE_ID(hw), + 0, "\r\n"); + } + } + } + if (p_H264_Dpb->bitstream_restriction_flag != + ((p_H264_Dpb->dpb_param.l.data[SPS_FLAGS2] >> 3) & 0x1)) { + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "p_H264_Dpb->bitstream_restriction_flag 0x%x, new 0x%x\n", + p_H264_Dpb->bitstream_restriction_flag, ((p_H264_Dpb->dpb_param.l.data[SPS_FLAGS2] >> 3) & 0x1)); + hw->csd_change_flag = 1; + } + p_H264_Dpb->bitstream_restriction_flag = + (p_H264_Dpb->dpb_param.l.data[SPS_FLAGS2] >> 3) & 0x1; + p_H264_Dpb->num_reorder_frames = + p_H264_Dpb->dpb_param.l.data[NUM_REORDER_FRAMES]; + p_H264_Dpb->max_dec_frame_buffering = + p_H264_Dpb->dpb_param.l.data[MAX_BUFFER_FRAME]; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "H264_CONFIG_REQUEST: pdb %d, %d, %d\n", + p_H264_Dpb->bitstream_restriction_flag, + p_H264_Dpb->num_reorder_frames, + p_H264_Dpb->max_dec_frame_buffering); + hw->bitstream_restriction_flag = + p_H264_Dpb->bitstream_restriction_flag; + hw->num_reorder_frames = + p_H264_Dpb->num_reorder_frames; + hw->max_dec_frame_buffering = + p_H264_Dpb->max_dec_frame_buffering; + + /*crop*/ + p_H264_Dpb->chroma_format_idc = p_H264_Dpb->dpb_param.dpb.chroma_format_idc; + p_H264_Dpb->frame_crop_left_offset = p_H264_Dpb->dpb_param.dpb.frame_crop_left_offset; + p_H264_Dpb->frame_crop_right_offset = p_H264_Dpb->dpb_param.dpb.frame_crop_right_offset; + p_H264_Dpb->frame_crop_top_offset = p_H264_Dpb->dpb_param.dpb.frame_crop_top_offset; + p_H264_Dpb->frame_crop_bottom_offset = p_H264_Dpb->dpb_param.dpb.frame_crop_bottom_offset; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s chroma_format_idc %d crop offset: left %d right %d top %d bottom %d\n", + __func__, p_H264_Dpb->chroma_format_idc, + p_H264_Dpb->frame_crop_left_offset, + p_H264_Dpb->frame_crop_right_offset, + p_H264_Dpb->frame_crop_top_offset, + p_H264_Dpb->frame_crop_bottom_offset); +#endif + + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_CONFIG_DONE); + reset_process_time(hw); + hw->reg_iqidct_control = READ_VREG(IQIDCT_CONTROL); + hw->reg_iqidct_control_init_flag = 1; + hw->dec_result = DEC_RESULT_CONFIG_PARAM; +#ifdef DETECT_WRONG_MULTI_SLICE + /*restart check count and set 'unknown'*/ + dpb_print(DECODE_ID(hw), PRINT_FLAG_UCODE_EVT, + "%s MULTI_SLICE_DETECT (check_count %d slice_count %d cur_slice_count %d flag %d), H264_CONFIG_REQUEST => restart check\n", + __func__, + hw->multi_slice_pic_check_count, + hw->picture_slice_count, + hw->cur_picture_slice_count, + hw->multi_slice_pic_flag); + + hw->multi_slice_pic_check_count = 0; + hw->multi_slice_pic_flag = 0; + hw->picture_slice_count = 0; +#endif + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + vdec_schedule_work(&hw->work); + } else if (dec_dpb_status == H264_SLICE_HEAD_DONE) { + u16 data_hight; + u16 data_low; + u32 video_signal; + + int slice_header_process_status = 0; + int I_flag; + int frame_num_gap = 0; + union param dpb_param_bak; + /*unsigned char is_idr;*/ + unsigned short *p = (unsigned short *)hw->lmem_addr; + unsigned mb_width = hw->seq_info2 & 0xff; + unsigned short first_mb_in_slice; + unsigned int decode_mb_count, mby_mbx; + struct StorablePicture *pic = p_H264_Dpb->mVideo.dec_picture; + reset_process_time(hw); + hw->frmbase_cont_flag = 0; + + if ((pic != NULL) && (pic->mb_aff_frame_flag == 1)) + first_mb_in_slice = p[FIRST_MB_IN_SLICE + 3] * 2; + else + first_mb_in_slice = p[FIRST_MB_IN_SLICE + 3]; + +#ifdef DETECT_WRONG_MULTI_SLICE + hw->cur_picture_slice_count++; + + if ((error_proc_policy & 0x10000) && + (hw->cur_picture_slice_count > 1) && + (first_mb_in_slice == 0) && + (hw->multi_slice_pic_flag == 0)) + hw->multi_slice_pic_check_count = 0; + + if ((error_proc_policy & 0x10000) && + (hw->cur_picture_slice_count > 1) && + (hw->multi_slice_pic_flag == 1)) { + dpb_print(DECODE_ID(hw), 0, + "%s MULTI_SLICE_DETECT (check_count %d slice_count %d cur_slice_count %d flag %d), WRONG_MULTI_SLICE detected, insert picture\n", + __func__, + hw->multi_slice_pic_check_count, + hw->picture_slice_count, + hw->cur_picture_slice_count, + hw->multi_slice_pic_flag); + + mby_mbx = READ_VREG(MBY_MBX); + decode_mb_count = ((mby_mbx & 0xff) * mb_width + + (((mby_mbx >> 8) & 0xff) + 1)); + + if (first_mb_in_slice == decode_mb_count && + first_mb_in_slice != 0) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s first_mb_in_slice = %d \n", + __func__, first_mb_in_slice); + + hw->multi_slice_pic_flag = 0; + hw->multi_slice_pic_check_count = 0; + } else if (hw->cur_picture_slice_count > hw->last_picture_slice_count) { + vh264_pic_done_proc(vdec); + //if (p_H264_Dpb->mDPB.used_size == p_H264_Dpb->mDPB.size) { + if (!have_free_buf_spec(vdec)) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, "dpb full, wait buffer\n"); + p_H264_Dpb->mVideo.pre_frame_num = hw->first_pre_frame_num; + hw->last_picture_slice_count = hw->cur_picture_slice_count; + hw->no_decoder_buffer_flag = 1; + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + } + else { + if (p_H264_Dpb->mVideo.dec_picture) { + if (p_H264_Dpb->mVideo.dec_picture->colocated_buf_index >= 0) { + release_colocate_buf(p_H264_Dpb, + p_H264_Dpb->mVideo.dec_picture->colocated_buf_index); + p_H264_Dpb->mVideo.dec_picture->colocated_buf_index = -1; + } + } + release_cur_decoding_buf(hw); + } + } +#endif + + hw->reg_iqidct_control = READ_VREG(IQIDCT_CONTROL); + hw->reg_iqidct_control_init_flag = 1; + hw->reg_vcop_ctrl_reg = READ_VREG(VCOP_CTRL_REG); + hw->reg_rv_ai_mb_count = READ_VREG(RV_AI_MB_COUNT); + hw->vld_dec_control = READ_VREG(VLD_DECODE_CONTROL); + if (input_frame_based(vdec) && + frmbase_cont_bitlevel2 != 0 && + READ_VREG(VIFF_BIT_CNT) < + frmbase_cont_bitlevel2 && + hw->get_data_count >= 0x70000000) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s H264_SLICE_HEAD_DONE with small bitcnt %d, goto empty_proc\n", + __func__, + READ_VREG(VIFF_BIT_CNT)); + + goto empty_proc; + } + +#if 0 + if (p_H264_Dpb->mVideo.dec_picture == NULL) { + if (!is_buffer_available(vdec)) { + hw->buffer_empty_flag = 1; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_UCODE_EVT, + "%s, buffer_empty, newframe_q(%d), have_free_buf_spec(%d), init_done(%d), used_size(%d/%d), is_there_unused_frame_from_dpb(%d)\n", + __func__, + kfifo_len(&hw->newframe_q), + have_free_buf_spec(vdec), + p_H264_Dpb->mDPB.init_done, + p_H264_Dpb->mDPB.used_size, + p_H264_Dpb->mDPB.size, + is_there_unused_frame_from_dpb( + &p_H264_Dpb->mDPB)); + return IRQ_HANDLED; + } + } + + hw->buffer_empty_flag = 0; +#endif +#ifdef SEND_PARAM_WITH_REG + for (i = 0; i < (RPM_END-RPM_BEGIN); i++) { + unsigned int data32; + + do { + data32 = READ_VREG(RPM_CMD_REG); + /* printk("%x\n", data32); */ + } while ((data32&0x10000) == 0); + p_H264_Dpb->dpb_param.l.data[i] = data32 & 0xffff; + WRITE_VREG(RPM_CMD_REG, 0); + /* printk("%x:%x\n", i,data32); */ + } +#else + dpb_param_bak = p_H264_Dpb->dpb_param; + + ATRACE_COUNTER(hw->trace.decode_header_time_name, TRACE_HEADER_RPM_START); + + for (i = 0; i < (RPM_END-RPM_BEGIN); i += 4) { + int ii; + + for (ii = 0; ii < 4; ii++) { + p_H264_Dpb->dpb_param.l.data[i+ii] = + p[i+3-ii]; + if (dpb_is_debug(DECODE_ID(hw), + RRINT_FLAG_RPM)) { + if (((i + ii) & 0xf) == 0) + dpb_print(DECODE_ID(hw), + 0, "%04x:", + i); + dpb_print_cont(DECODE_ID(hw), + 0, "%04x ", + p[i+3-ii]); + if (((i + ii + 1) & 0xf) == 0) + dpb_print_cont( + DECODE_ID(hw), + 0, "\r\n"); + } + } + } + ATRACE_COUNTER(hw->trace.decode_header_time_name, TRACE_HEADER_RPM_END); +#endif +#ifdef DETECT_WRONG_MULTI_SLICE + + if (p_H264_Dpb->mVideo.dec_picture && + hw->multi_slice_pic_flag == 2 && + (p_H264_Dpb->dpb_param.l.data[SLICE_TYPE] != dpb_param_bak.l.data[SLICE_TYPE] || + dpb_param_bak.l.data[FIRST_MB_IN_SLICE] > p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE])) { + dpb_print(DECODE_ID(hw), 0, + "decode next pic, save before, SLICE_TYPE BAK %d, SLICE_TYPE %d, FIRST_MB_IN_SLICE BAK %d, FIRST_MB_IN_SLICE %d\n", + dpb_param_bak.l.data[SLICE_TYPE], p_H264_Dpb->dpb_param.l.data[SLICE_TYPE], + dpb_param_bak.l.data[FIRST_MB_IN_SLICE], p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE]); + vh264_pic_done_proc(vdec); + } +#endif + data_low = p_H264_Dpb->dpb_param.l.data[VIDEO_SIGNAL_LOW]; + data_hight = p_H264_Dpb->dpb_param.l.data[VIDEO_SIGNAL_HIGHT]; + + video_signal = (data_hight << 16) | data_low; + hw->video_signal_from_vui = + ((video_signal & 0xffff) << 8) | + ((video_signal & 0xff0000) >> 16) | + ((video_signal & 0x3f000000)); + + + /*dpb_print(DECODE_ID(hw), + 0, + "video_signal_from_vui:0x%x, " + "data_low:0x%x, data_hight:0x%x\n", + hw->video_signal_from_vui, + data_low, + data_hight);*/ + + parse_sei_data(hw, hw->sei_data_buf, hw->sei_data_len); + + if (hw->config_bufmgr_done == 0) { + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_UCODE_EVT, + "config_bufmgr not done, discard frame\n"); + return IRQ_HANDLED; + } else if ((hw->first_i_policy & 0x3) != 0) { + unsigned char is_i_slice = + (p_H264_Dpb->dpb_param.l.data[SLICE_TYPE] + == I_Slice) + ? 1 : 0; + unsigned char is_idr = + ((p_H264_Dpb->dpb_param.dpb.NAL_info_mmco & 0x1f) + == 5); + if ((hw->first_i_policy & 0x3) == 0x3) + is_i_slice = is_idr; + if (!is_i_slice) { + if (hw->has_i_frame == 0) { + amvdec_stop(); + vdec->mc_loaded = 0; + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_UCODE_EVT, + "has_i_frame is 0, discard none I(DR) frame silce_type %d is_idr %d\n", p_H264_Dpb->dpb_param.l.data[SLICE_TYPE], is_idr); + return IRQ_HANDLED; + } + } else { + if (hw->skip_frame_count < 0 || is_idr) { + /* second I */ + hw->dec_flag &= (~NODISP_FLAG); + hw->skip_frame_count = 0; + } + if (hw->has_i_frame == 0 && + (!is_idr)) { + int skip_count = + (hw->first_i_policy >> 8) & 0xff; + /* first I (not IDR) */ + if ((hw->first_i_policy & 0x3) == 2) + hw->skip_frame_count = + -1 - skip_count; + else + hw->skip_frame_count = + skip_count; + if (hw->skip_frame_count != 0) + hw->dec_flag |= NODISP_FLAG; + } + } + } + dpb_print(DECODE_ID(hw), PRINT_FLAG_UCODE_EVT, + "current dpb index %d, poc %d, top/bot poc (%d,%d)\n", + p_H264_Dpb->dpb_param.dpb.current_dpb_index, + val(p_H264_Dpb->dpb_param.dpb.frame_pic_order_cnt), + val(p_H264_Dpb->dpb_param.dpb.top_field_pic_order_cnt), + val(p_H264_Dpb->dpb_param.dpb.top_field_pic_order_cnt)); + I_flag = (p_H264_Dpb->dpb_param.l.data[SLICE_TYPE] == I_Slice) + ? I_FLAG : 0; + + if ((hw->i_only & 0x2) && (I_flag & I_FLAG)) + flush_dpb(p_H264_Dpb); + + if ((hw->i_only & 0x2) && (!(I_flag & I_FLAG)) && + (p_H264_Dpb->mSlice.structure == FRAME)) { + hw->data_flag = NULL_FLAG; + goto pic_done_proc; + } + + slice_header_process_status = + h264_slice_header_process(p_H264_Dpb, &frame_num_gap); + if (hw->mmu_enable) + hevc_sao_set_slice_type(hw, + slice_header_process_status, + hw->dpb.mSlice.idr_flag); + vui_config(hw); + + if (p_H264_Dpb->mVideo.dec_picture) { + int cfg_ret = 0; + bool field_pic_flag = false; + unsigned mby_mbx = READ_VREG(MBY_MBX); + struct StorablePicture *p = + p_H264_Dpb->mVideo.dec_picture; + + if (slice_header_process_status == 1) { + if (!p_H264_Dpb->mSPS.frame_mbs_only_flag) { + field_pic_flag = + (p_H264_Dpb->mSlice.structure == TOP_FIELD || + p_H264_Dpb->mSlice.structure == BOTTOM_FIELD) ? + true : false; + } + + vdec_set_profile_level(vdec, p_H264_Dpb->mSPS.profile_idc, + p_H264_Dpb->mSPS.level_idc); + + if (!field_pic_flag && (((p_H264_Dpb->mSPS.profile_idc == BASELINE) && + (p_H264_Dpb->dec_dpb_size < 2)) || + (((unsigned long)(hw->vh264_amstream_dec_info + .param)) & 0x8) || hw->low_latency_mode & 0x8)) { + p_H264_Dpb->fast_output_enable = + H264_OUTPUT_MODE_FAST; + } + else + p_H264_Dpb->fast_output_enable + = fast_output_enable; + if (hw->enable_fence) + p_H264_Dpb->fast_output_enable = H264_OUTPUT_MODE_FAST; + + hw->data_flag = I_flag; + if ((p_H264_Dpb-> + dpb_param.dpb.NAL_info_mmco & 0x1f) + == 5) + hw->data_flag |= IDR_FLAG; + if ((p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE]) && !mby_mbx) { + p->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_VDEC_STATUS, + "one slice error in muulti-slice first_mb 0x%x mby_mbx 0x%x slice_type %d\n", + p_H264_Dpb->dpb_param.l. + data[FIRST_MB_IN_SLICE], + READ_VREG(MBY_MBX), + p->slice_type); + } + dpb_print(DECODE_ID(hw), + PRINT_FLAG_VDEC_STATUS, + "==================> frame count %d to skip %d\n", + hw->decode_pic_count+1, + hw->skip_frame_count); + } else if (error_proc_policy & 0x100){ + unsigned decode_mb_count = + ((mby_mbx & 0xff) * hw->mb_width + + (((mby_mbx >> 8) & 0xff) + 1)); + if (decode_mb_count < + ((p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE]) * + (1 + p->mb_aff_frame_flag)) && decode_mb_count) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_VDEC_STATUS, + "Error detect! first_mb 0x%x mby_mbx 0x%x decode_mb 0x%x\n", + p_H264_Dpb->dpb_param.l. + data[FIRST_MB_IN_SLICE], + READ_VREG(MBY_MBX), + decode_mb_count); + p->data_flag |= ERROR_FLAG; + }/* else if (!p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE] && decode_mb_count) { + p->data_flag |= ERROR_FLAG; + goto pic_done_proc; + }*/ + } + + if (!I_flag && frame_num_gap && !p_H264_Dpb->long_term_reference_flag) { + if (!(error_proc_policy & 0x800000)) { + hw->data_flag |= ERROR_FLAG; + p_H264_Dpb->mVideo.dec_picture->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), 0, "frame number gap error\n"); + } + } + + if ((error_proc_policy & 0x400) && !hw->enable_fence) { + int ret = dpb_check_ref_list_error(p_H264_Dpb); + if (ret != 0) { + hw->reflist_error_count ++; + dpb_print(DECODE_ID(hw), 0, + "reference list error %d frame count %d to skip %d reflist_error_count %d\n", + ret, + hw->decode_pic_count+1, + hw->skip_frame_count, + hw->reflist_error_count); + + p_H264_Dpb->mVideo.dec_picture->data_flag = NODISP_FLAG; + if (((error_proc_policy & 0x80) + && ((hw->dec_flag & + NODISP_FLAG) == 0)) ||(hw->reflist_error_count > 50)) { + hw->reset_bufmgr_flag = 1; + hw->reflist_error_count =0; + amvdec_stop(); + vdec->mc_loaded = 0; + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + } else + hw->reflist_error_count = 0; + } + if ((error_proc_policy & 0x800) && (!(hw->i_only & 0x2)) + && p_H264_Dpb->dpb_error_flag != 0) { + dpb_print(DECODE_ID(hw), 0, + "dpb error %d\n", + p_H264_Dpb->dpb_error_flag); + hw->data_flag |= ERROR_FLAG; + p_H264_Dpb->mVideo.dec_picture->data_flag |= ERROR_FLAG; + if ((error_proc_policy & 0x80) && + ((hw->dec_flag & NODISP_FLAG) == 0)) { + hw->reset_bufmgr_flag = 1; + amvdec_stop(); + vdec->mc_loaded = 0; + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + } + ATRACE_COUNTER(hw->trace.decode_header_time_name, TRACE_HEADER_REGISTER_START); + cfg_ret = config_decode_buf(hw, + p_H264_Dpb->mVideo.dec_picture); + ATRACE_COUNTER(hw->trace.decode_header_time_name, TRACE_HEADER_REGISTER_END); + if (cfg_ret < 0) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "config_decode_buf fail (%d)\n", + cfg_ret); + if (error_proc_policy & 0x2) { + release_cur_decoding_buf(hw); + /*hw->data_flag |= ERROR_FLAG;*/ + hw->reset_bufmgr_flag = 1; + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } else + hw->data_flag |= ERROR_FLAG; + p_H264_Dpb->mVideo.dec_picture->data_flag |= ERROR_FLAG; + } + } + + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + + if (slice_header_process_status == 1) + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_DECODE_NEWPIC); + else + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_DECODE_SLICE); + hw->last_mby_mbx = 0; + hw->last_vld_level = 0; + start_process_time(hw); + } else if (dec_dpb_status == H264_PIC_DATA_DONE + ||((dec_dpb_status == H264_DATA_REQUEST) && input_frame_based(vdec))) { +#ifdef DETECT_WRONG_MULTI_SLICE + dpb_print(DECODE_ID(hw), PRINT_FLAG_UCODE_EVT, + "%s MULTI_SLICE_DETECT (check_count %d slice_count %d cur_slice_count %d flag %d), H264_PIC_DATA_DONE\n", + __func__, + hw->multi_slice_pic_check_count, + hw->picture_slice_count, + hw->cur_picture_slice_count, + hw->multi_slice_pic_flag); + + if (hw->multi_slice_pic_check_count < check_slice_num) { + hw->multi_slice_pic_check_count++; + if (hw->cur_picture_slice_count != + hw->picture_slice_count) { + /*restart check count and set 'unknown'*/ + hw->multi_slice_pic_check_count = 0; + hw->multi_slice_pic_flag = 0; + } + hw->picture_slice_count = + hw->cur_picture_slice_count; + } else if (hw->multi_slice_pic_check_count >= check_slice_num) { + if (hw->picture_slice_count > 1) + hw->multi_slice_pic_flag = 2; + else + hw->multi_slice_pic_flag = 1; + } +#endif + +pic_done_proc: + reset_process_time(hw); + if ((dec_dpb_status == H264_SEARCH_BUFEMPTY) || + (dec_dpb_status == H264_DECODE_BUFEMPTY) || + (dec_dpb_status == H264_DECODE_TIMEOUT) || + ((dec_dpb_status == H264_DATA_REQUEST) && input_frame_based(vdec))) { + hw->data_flag |= ERROR_FLAG; + if (hw->dpb.mVideo.dec_picture) + hw->dpb.mVideo.dec_picture->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "%s, mark err_frame\n", __func__); + } + vh264_pic_done_proc(vdec); + + if (hw->frmbase_cont_flag) { + /*do not DEC_RESULT_GET_DATA*/ + hw->get_data_count = 0x7fffffff; + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_SEARCH_HEAD); + decode_frame_count[DECODE_ID(hw)]++; + if (p_H264_Dpb->mSlice.slice_type == I_SLICE) { + hw->gvs.i_decoded_frames++; + } else if (p_H264_Dpb->mSlice.slice_type == P_SLICE) { + hw->gvs.p_decoded_frames++; + } else if (p_H264_Dpb->mSlice.slice_type == B_SLICE) { + hw->gvs.b_decoded_frames++; + } + start_process_time(hw); + return IRQ_HANDLED; + } + amvdec_stop(); + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s %s decode slice count %d\n", + __func__, + (dec_dpb_status == H264_PIC_DATA_DONE) ? + "H264_PIC_DATA_DONE" : + (dec_dpb_status == H264_FIND_NEXT_PIC_NAL) ? + "H264_FIND_NEXT_PIC_NAL" : "H264_FIND_NEXT_DVEL_NAL", + hw->decode_pic_count); + if (hw->kpi_first_i_decoded == 0) { + hw->kpi_first_i_decoded = 1; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "[vdec_kpi][%s] First I frame decoded.\n", __func__); + } + /* WRITE_VREG(DPB_STATUS_REG, H264_ACTION_SEARCH_HEAD); */ + hw->dec_result = DEC_RESULT_DONE; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->slave && + dec_dpb_status == H264_FIND_NEXT_DVEL_NAL) { + struct vdec_h264_hw_s *hw_el = + (struct vdec_h264_hw_s *)(vdec->slave->private); + hw_el->got_valid_nal = 0; + hw->switch_dvlayer_flag = 1; + } else if (vdec->master && + dec_dpb_status == H264_FIND_NEXT_PIC_NAL) { + struct vdec_h264_hw_s *hw_bl = + (struct vdec_h264_hw_s *)(vdec->master->private); + hw_bl->got_valid_nal = 0; + hw->switch_dvlayer_flag = 1; + } else { + hw->switch_dvlayer_flag = 0; + hw->got_valid_nal = 1; + } +#endif + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + } else if ( + (dec_dpb_status == H264_FIND_NEXT_PIC_NAL) || + (dec_dpb_status == H264_FIND_NEXT_DVEL_NAL)) { + goto pic_done_proc; +#endif + } else if (dec_dpb_status == H264_AUX_DATA_READY) { + reset_process_time(hw); + if (READ_VREG(H264_AUX_DATA_SIZE) != 0) { + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_SEI_DETAIL)) + dump_aux_buf(hw); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec_frame_based(vdec)) { + if (hw->last_dec_picture) + set_aux_data(hw, + hw->last_dec_picture, 0, 0, NULL); + } else if (vdec->dolby_meta_with_el || vdec->slave) { + if (hw->last_dec_picture) + set_aux_data(hw, hw->last_dec_picture, + 0, 0, NULL); + } else { + if (vdec->master) { + struct vdec_h264_hw_s *hw_bl = + (struct vdec_h264_hw_s *) + (vdec->master->private); + if (hw_bl->last_dec_picture != NULL) { + set_aux_data(hw_bl, + hw_bl->last_dec_picture, + 0, 1, hw); + } + set_aux_data(hw, + hw->last_dec_picture, + 0, 2, NULL); + } + } +#else + if (hw->last_dec_picture) + set_aux_data(hw, + hw->last_dec_picture, 0, 0, NULL); +#endif + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + hw->switch_dvlayer_flag = 0; + hw->got_valid_nal = 1; +#endif + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s H264_AUX_DATA_READY\n", __func__); + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + } else if (/*(dec_dpb_status == H264_DATA_REQUEST) ||*/ + (dec_dpb_status == H264_SEARCH_BUFEMPTY) || + (dec_dpb_status == H264_DECODE_BUFEMPTY) || + (dec_dpb_status == H264_DECODE_TIMEOUT)) { +empty_proc: + reset_process_time(hw); + if ((error_proc_policy & 0x40000) && + ((dec_dpb_status == H264_DECODE_TIMEOUT) || + (!hw->frmbase_cont_flag && (dec_dpb_status == H264_SEARCH_BUFEMPTY || dec_dpb_status == H264_DECODE_BUFEMPTY) && input_frame_based(vdec)))) + goto pic_done_proc; + if (!hw->frmbase_cont_flag) + release_cur_decoding_buf(hw); + + if (input_frame_based(vdec) || + (READ_VREG(VLD_MEM_VIFIFO_LEVEL) > 0x200)) { + if (h264_debug_flag & + DISABLE_ERROR_HANDLE) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_ERROR, + "%s decoding error, level 0x%x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + goto send_again; + } + amvdec_stop(); + vdec->mc_loaded = 0; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s %s\n", __func__, + (dec_dpb_status == H264_SEARCH_BUFEMPTY) ? + "H264_SEARCH_BUFEMPTY" : + (dec_dpb_status == H264_DECODE_BUFEMPTY) ? + "H264_DECODE_BUFEMPTY" : + (dec_dpb_status == H264_DECODE_TIMEOUT) ? + "H264_DECODE_TIMEOUT" : + "OTHER"); + hw->dec_result = DEC_RESULT_DONE; + + if (dec_dpb_status == H264_SEARCH_BUFEMPTY) + hw->search_dataempty_num++; + else if (dec_dpb_status == H264_DECODE_TIMEOUT) { + hw->decode_timeout_num++; + if (error_proc_policy & 0x4000) { + hw->data_flag |= ERROR_FLAG; + if ((p_H264_Dpb->last_dpb_status == H264_DECODE_TIMEOUT) || + (p_H264_Dpb->last_dpb_status == H264_PIC_DATA_DONE) || + ((p_H264_Dpb->last_dpb_status == H264_SLICE_HEAD_DONE) && + (p_H264_Dpb->mSlice.slice_type != B_SLICE))) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_ERROR, "%s last dpb status 0x%x need bugmgr reset \n", + p_H264_Dpb->last_dpb_status, __func__); + hw->reset_bufmgr_flag = 1; + } + } + } else if (dec_dpb_status == H264_DECODE_BUFEMPTY) + hw->decode_dataempty_num++; + if (!hw->frmbase_cont_flag) + hw->data_flag |= ERROR_FLAG; + + vdec_schedule_work(&hw->work); + } else { + /* WRITE_VREG(DPB_STATUS_REG, H264_ACTION_INIT); */ +#ifdef DETECT_WRONG_MULTI_SLICE + if (error_proc_policy & 0x10000) { + p_H264_Dpb->mVideo.pre_frame_num = hw->first_pre_frame_num; + } + hw->last_picture_slice_count = hw->cur_picture_slice_count; +#endif + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_AGAIN\n", __func__); +send_again: + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } + } else if (dec_dpb_status == H264_DATA_REQUEST) { + reset_process_time(hw); + if (input_frame_based(vdec)) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_VDEC_STATUS, + "%s H264_DATA_REQUEST (%d)\n", + __func__, hw->get_data_count); + hw->dec_result = DEC_RESULT_GET_DATA; + hw->reg_iqidct_control = READ_VREG(IQIDCT_CONTROL); + hw->reg_iqidct_control_init_flag = 1; + hw->get_data_start_time = jiffies; + hw->get_data_count++; + if (hw->get_data_count >= frame_max_data_packet) + goto empty_proc; + vdec_schedule_work(&hw->work); + } else + goto empty_proc; + } else if (dec_dpb_status == H264_DECODE_OVER_SIZE) { + dpb_print(DECODE_ID(hw), 0, + "vmh264 decode oversize !!\n"); + release_cur_decoding_buf(hw); + hw->data_flag |= ERROR_FLAG; + hw->stat |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + reset_process_time(hw); + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } else if (dec_dpb_status == H264_SEI_DATA_READY) { + int aux_data_len; + aux_data_len = + (READ_VREG(H264_AUX_DATA_SIZE) >> 16) << 4; + + if (aux_data_len > SEI_DATA_SIZE) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "sei data size more than 4K: %d, discarded it\n", + hw->sei_itu_data_len); + hw->sei_itu_data_len = 0; + } + + if (aux_data_len != 0) { + u8 *trans_data_buf; + u8 *sei_data_buf; + u8 swap_byte; + +#if 0 + dump_aux_buf(hw); +#endif + trans_data_buf = (u8 *)hw->aux_addr; + + if (trans_data_buf[7] == AUX_TAG_SEI) { + int left_len; + + sei_data_buf = (u8 *)hw->sei_data_buf + + hw->sei_data_len; + left_len = SEI_DATA_SIZE - hw->sei_data_len; + if (aux_data_len/2 <= left_len) { + for (i = 0; i < aux_data_len/2; i++) + sei_data_buf[i] + = trans_data_buf[i*2]; + + aux_data_len = aux_data_len / 2; + for (i = 0; i < aux_data_len; i = i+4) { + swap_byte = sei_data_buf[i]; + sei_data_buf[i] + = sei_data_buf[i+3]; + sei_data_buf[i+3] = swap_byte; + + swap_byte = sei_data_buf[i+1]; + sei_data_buf[i+1] + = sei_data_buf[i+2]; + sei_data_buf[i+2] = swap_byte; + } + + for (i = aux_data_len-1; i >= 0; i--) + if (sei_data_buf[i] != 0) + break; + + hw->sei_data_len += i+1; + } else + dpb_print(DECODE_ID(hw), + PRINT_FLAG_ERROR, + "sei data size %d and more than left space: %d, discarded it\n", + hw->sei_itu_data_len, + left_len); + } + } + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + WRITE_VREG(DPB_STATUS_REG, H264_SEI_DATA_DONE); + + return IRQ_HANDLED; + } + + + /* ucode debug */ + debug_tag = READ_VREG(DEBUG_REG1); + if (debug_tag & 0x10000) { + unsigned short *p = (unsigned short *)hw->lmem_addr; + + dpb_print(DECODE_ID(hw), 0, + "LMEM<tag %x>:\n", debug_tag); + for (i = 0; i < 0x400; i += 4) { + int ii; + if ((i & 0xf) == 0) + dpb_print_cont(DECODE_ID(hw), 0, + "%03x: ", i); + for (ii = 0; ii < 4; ii++) + dpb_print_cont(DECODE_ID(hw), 0, + "%04x ", p[i+3-ii]); + if (((i+ii) & 0xf) == 0) + dpb_print_cont(DECODE_ID(hw), 0, + "\n"); + } + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == + hw->decode_pic_count) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_VREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hw->ucode_pause_pos = udebug_pause_pos; + } + else if (debug_tag & 0x20000) + hw->ucode_pause_pos = 0xffffffff; + if (hw->ucode_pause_pos) + reset_process_time(hw); + else + WRITE_VREG(DEBUG_REG1, 0); + } else if (debug_tag != 0) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_UCODE_EVT, + "dbg%x: %x\n", debug_tag, + READ_VREG(DEBUG_REG2)); + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == + hw->decode_pic_count) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_VREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hw->ucode_pause_pos = udebug_pause_pos; + } + if (hw->ucode_pause_pos) + reset_process_time(hw); + else + WRITE_VREG(DEBUG_REG1, 0); + } + /**/ + return IRQ_HANDLED; +} + +static irqreturn_t vh264_isr(struct vdec_s *vdec, int irq) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)(vdec->private); + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + if (!hw) + return IRQ_HANDLED; + + if (hw->eos) + return IRQ_HANDLED; + + p_H264_Dpb->vdec = vdec; + p_H264_Dpb->dec_dpb_status = READ_VREG(DPB_STATUS_REG); + if (p_H264_Dpb->dec_dpb_status == H264_SLICE_HEAD_DONE || + p_H264_Dpb->dec_dpb_status == H264_CONFIG_REQUEST) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_HEAD_DONE); + } + else if (p_H264_Dpb->dec_dpb_status == H264_PIC_DATA_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_PIC_DONE); + } + else if (p_H264_Dpb->dec_dpb_status == H264_SEI_DATA_READY) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_SEI_DONE); + else if (p_H264_Dpb->dec_dpb_status == H264_AUX_DATA_READY) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_AUX_DONE); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_UCODE_EVT, + "%s DPB_STATUS_REG: 0x%x, run(%d) last_state (%x) ERROR_STATUS_REG 0x%x, sb (0x%x 0x%x 0x%x) bitcnt 0x%x mby_mbx 0x%x\n", + __func__, + p_H264_Dpb->dec_dpb_status, + run_count[DECODE_ID(hw)], + hw->dec_result, + READ_VREG(ERROR_STATUS_REG), + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VIFF_BIT_CNT), + READ_VREG(MBY_MBX)); + + if (p_H264_Dpb->dec_dpb_status == H264_WRRSP_REQUEST) { + if (hw->mmu_enable) + hevc_sao_wait_done(hw); + WRITE_VREG(DPB_STATUS_REG, H264_WRRSP_DONE); + return IRQ_HANDLED; + } + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_END); + return IRQ_WAKE_THREAD; + +} + +static void timeout_process(struct vdec_h264_hw_s *hw) +{ + struct vdec_s *vdec = hw_to_vdec(hw); + + /* + * In this very timeout point,the vh264_work arrives, + * or in some cases the system become slow, then come + * this second timeout. In both cases we return. + */ + if (work_pending(&hw->work) || + work_busy(&hw->work) || + work_busy(&hw->timeout_work) || + work_pending(&hw->timeout_work)) { + pr_err("%s h264[%d] work pending, do nothing.\n",__func__, vdec->id); + return; + } + hw->timeout_num++; + amvdec_stop(); + vdec->mc_loaded = 0; + if (hw->mmu_enable) { + hevc_set_frame_done(hw); + hevc_sao_wait_done(hw); + } + dpb_print(DECODE_ID(hw), + PRINT_FLAG_ERROR, "%s decoder timeout, DPB_STATUS_REG 0x%x\n", __func__, READ_VREG(DPB_STATUS_REG)); + release_cur_decoding_buf(hw); + hw->dec_result = DEC_RESULT_TIMEOUT; + hw->data_flag |= ERROR_FLAG; + + if (work_pending(&hw->work)) + return; + vdec_schedule_work(&hw->timeout_work); +} + +static void dump_bufspec(struct vdec_h264_hw_s *hw, + const char *caller) +{ + int i; + dpb_print(DECODE_ID(hw), 0, + "%s in %s:\n", __func__, caller); + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->buffer_spec[i].used == -1) + continue; + dpb_print(DECODE_ID(hw), 0, + "bufspec (%d): used %d adr 0x%x(%lx) canvas(%d) vf_ref(%d) ", + i, hw->buffer_spec[i].used, + hw->buffer_spec[i].buf_adr, + hw->buffer_spec[i].cma_alloc_addr, + hw->buffer_spec[i].canvas_pos, + hw->buffer_spec[i].vf_ref + ); +#ifdef CONFIG_AM_VDEC_DV + dpb_print_cont(DECODE_ID(hw), 0, + "dv_el_exist %d", + hw->buffer_spec[i].dv_enhance_exist + ); +#endif + dpb_print_cont(DECODE_ID(hw), 0, "\n"); + } + +} + +static void vmh264_dump_state(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)(vdec->private); + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + dpb_print(DECODE_ID(hw), 0, + "====== %s\n", __func__); + dpb_print(DECODE_ID(hw), 0, + "width/height (%d/%d), num_reorder_frames %d dec_dpb_size %d dpb size(bufspec count) %d max_reference_size(collocate count) %d i_only %d video_signal_type 0x%x send_err %d \n", + hw->frame_width, + hw->frame_height, + hw->num_reorder_frames, + hw->dpb.dec_dpb_size, + hw->dpb.mDPB.size, + hw->max_reference_size, + hw->i_only, + hw->video_signal_type, + hw->send_error_frame_flag + ); + + dpb_print(DECODE_ID(hw), 0, + "is_framebase(%d), eos %d, state 0x%x, dec_result 0x%x dec_frm %d disp_frm %d run %d not_run_ready %d input_empty %d bufmgr_reset_cnt %d error_frame_count = %d, drop_frame_count = %d\n", + input_frame_based(vdec), + hw->eos, + hw->stat, + hw->dec_result, + decode_frame_count[DECODE_ID(hw)], + display_frame_count[DECODE_ID(hw)], + run_count[DECODE_ID(hw)], + not_run_ready[DECODE_ID(hw)], + input_empty[DECODE_ID(hw)], + hw->reset_bufmgr_count, + hw->gvs.error_frame_count, + hw->gvs.drop_frame_count + ); + +#ifdef DETECT_WRONG_MULTI_SLICE + dpb_print(DECODE_ID(hw), 0, + "MULTI_SLICE_DETECT (check_count %d slice_count %d cur_slice_count %d flag %d)\n", + hw->multi_slice_pic_check_count, + hw->picture_slice_count, + hw->cur_picture_slice_count, + hw->multi_slice_pic_flag); +#endif + if (!hw->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + dpb_print(DECODE_ID(hw), 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + + dpb_print(DECODE_ID(hw), 0, + "%s, newq(%d/%d), dispq(%d/%d) vf prepare/get/put (%d/%d/%d), free_spec(%d), initdon(%d), used_size(%d/%d), unused_fr_dpb(%d) fast_output_enable %x \n", + __func__, + kfifo_len(&hw->newframe_q), + VF_POOL_SIZE, + kfifo_len(&hw->display_q), + VF_POOL_SIZE, + hw->vf_pre_count, + hw->vf_get_count, + hw->vf_put_count, + have_free_buf_spec(vdec), + p_H264_Dpb->mDPB.init_done, + p_H264_Dpb->mDPB.used_size, p_H264_Dpb->mDPB.size, + is_there_unused_frame_from_dpb(&p_H264_Dpb->mDPB), + p_H264_Dpb->fast_output_enable + ); + + dump_dpb(&p_H264_Dpb->mDPB, 1); + dump_pic(p_H264_Dpb); + dump_bufspec(hw, __func__); + + dpb_print(DECODE_ID(hw), 0, + "DPB_STATUS_REG=0x%x\n", + READ_VREG(DPB_STATUS_REG)); + dpb_print(DECODE_ID(hw), 0, + "MPC_E=0x%x\n", + READ_VREG(MPC_E)); + dpb_print(DECODE_ID(hw), 0, + "H264_DECODE_MODE=0x%x\n", + READ_VREG(H264_DECODE_MODE)); + dpb_print(DECODE_ID(hw), 0, + "MBY_MBX=0x%x\n", + READ_VREG(MBY_MBX)); + dpb_print(DECODE_ID(hw), 0, + "H264_DECODE_SIZE=0x%x\n", + READ_VREG(H264_DECODE_SIZE)); + dpb_print(DECODE_ID(hw), 0, + "VIFF_BIT_CNT=0x%x\n", + READ_VREG(VIFF_BIT_CNT)); + dpb_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_LEVEL=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + dpb_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_WP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP)); + dpb_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_RP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_RP)); + dpb_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + dpb_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (input_frame_based(vdec) && + dpb_is_debug(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA) + ) { + int jj; + if (hw->chunk && hw->chunk->block && + hw->chunk->size > 0) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, hw->chunk->size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + dpb_print(DECODE_ID(hw), 0, + "frame data size 0x%x\n", + hw->chunk->size); + for (jj = 0; jj < hw->chunk->size; jj++) { + if ((jj & 0xf) == 0) + dpb_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + dpb_print_cont(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + dpb_print_cont(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } +} + + +static void check_timer_func(struct timer_list *timer) +{ + struct vdec_h264_hw_s *hw = container_of(timer, + struct vdec_h264_hw_s, check_timer); + struct vdec_s *vdec = hw_to_vdec(hw); + int error_skip_frame_count = error_skip_count & 0xfff; + unsigned int timeout_val = decode_timeout_val; + if (timeout_val != 0 && + hw->no_error_count < error_skip_frame_count) + timeout_val = errordata_timeout_val; + if ((h264_debug_cmd & 0x100) != 0 && + DECODE_ID(hw) == (h264_debug_cmd & 0xff)) { + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + pr_info("vdec %d is forced to be disconnected\n", + h264_debug_cmd & 0xff); + h264_debug_cmd = 0; + return; + } + if ((h264_debug_cmd & 0x200) != 0 && + DECODE_ID(hw) == (h264_debug_cmd & 0xff)) { + pr_debug("vdec %d is forced to reset bufmgr\n", + h264_debug_cmd & 0xff); + hw->reset_bufmgr_flag = 1; + h264_debug_cmd = 0; + return; + } + + if (vdec->next_status == VDEC_STATUS_DISCONNECTED && + !hw->is_used_v4l) { + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + pr_debug("vdec requested to be disconnected\n"); + return; + } + + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + + if (((h264_debug_flag & DISABLE_ERROR_HANDLE) == 0) && + (timeout_val > 0) && + (hw->start_process_time > 0) && + ((1000 * (jiffies - hw->start_process_time) / HZ) + > timeout_val) + ) { + u32 dpb_status = READ_VREG(DPB_STATUS_REG); + u32 mby_mbx = READ_VREG(MBY_MBX); + if ((dpb_status == H264_ACTION_DECODE_NEWPIC) || + (dpb_status == H264_ACTION_DECODE_SLICE) || + (dpb_status == H264_SEI_DATA_DONE) || + (dpb_status == H264_STATE_SEARCH_HEAD) || + (dpb_status == H264_SLICE_HEAD_DONE) || + (dpb_status == H264_SEI_DATA_READY)) { + if (h264_debug_flag & DEBUG_TIMEOUT_DEC_STAT) + pr_debug("%s dpb_status = 0x%x last_mby_mbx = %u mby_mbx = %u\n", + __func__, dpb_status, hw->last_mby_mbx, mby_mbx); + + if (hw->last_mby_mbx == mby_mbx) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + { + reset_process_time(hw); + timeout_process(hw); + } + } else + start_process_time(hw); + } else if (is_in_parsing_state(dpb_status)) { + if (hw->last_vld_level == + READ_VREG(VLD_MEM_VIFIFO_LEVEL)) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + { + reset_process_time(hw); + timeout_process(hw); + } + } + } + hw->last_vld_level = + READ_VREG(VLD_MEM_VIFIFO_LEVEL); + hw->last_mby_mbx = mby_mbx; + } + + if ((hw->ucode_pause_pos != 0) && + (hw->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != hw->ucode_pause_pos) { + hw->ucode_pause_pos = 0; + WRITE_VREG(DEBUG_REG1, 0); + } + + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static int dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + u32 ar, ar_tmp; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + + if (!hw) + return -1; + + vstatus->frame_width = hw->frame_width; + vstatus->frame_height = hw->frame_height; + if (hw->error_frame_width && + hw->error_frame_height) { + vstatus->frame_width = hw->error_frame_width; + vstatus->frame_height = hw->error_frame_height; + } + if (hw->frame_dur != 0) { + vstatus->frame_dur = hw->frame_dur; + vstatus->frame_rate = ((96000 * 10 / hw->frame_dur) % 10) < 5 ? + 96000 / hw->frame_dur : (96000 / hw->frame_dur +1); + } + else + vstatus->frame_rate = -1; + vstatus->error_count = hw->gvs.error_frame_count; + vstatus->status = hw->stat; + if (hw->h264_ar == 0x3ff) + ar_tmp = (0x100 * + hw->frame_height * hw->height_aspect_ratio) / + (hw->frame_width * hw->width_aspect_ratio); + else + ar_tmp = hw->h264_ar; + ar = min_t(u32, + ar_tmp, + DISP_RATIO_ASPECT_RATIO_MAX); + vstatus->ratio_control = + ar << DISP_RATIO_ASPECT_RATIO_BIT; + + vstatus->error_frame_count = hw->gvs.error_frame_count; + vstatus->drop_frame_count = hw->gvs.drop_frame_count; + vstatus->frame_count = decode_frame_count[DECODE_ID(hw)]; + vstatus->i_decoded_frames = hw->gvs.i_decoded_frames; + vstatus->i_lost_frames = hw->gvs.i_lost_frames; + vstatus->i_concealed_frames = hw->gvs.i_concealed_frames; + vstatus->p_decoded_frames = hw->gvs.p_decoded_frames; + vstatus->p_lost_frames = hw->gvs.p_lost_frames; + vstatus->p_concealed_frames = hw->gvs.p_concealed_frames; + vstatus->b_decoded_frames = hw->gvs.b_decoded_frames; + vstatus->b_lost_frames = hw->gvs.b_lost_frames; + vstatus->b_concealed_frames = hw->gvs.b_concealed_frames; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s-%02d", DRIVER_NAME, hw->id); + + return 0; +} + +static int vh264_hw_ctx_restore(struct vdec_h264_hw_s *hw) +{ + int i, j; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + + hw->frmbase_cont_flag = 0; + /* if (hw->init_flag == 0) { */ + if (h264_debug_flag & 0x40000000) { + /* if (1) */ + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s, reset register\n", __func__); + + while (READ_VREG(DCAC_DMA_CTRL) & 0x8000) + ; + while (READ_VREG(LMEM_DMA_CTRL) & 0x8000) + ; /* reg address is 0x350 */ + +#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + WRITE_VREG(DOS_SW_RESET0, (1<<7) | (1<<6) | (1<<4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1<<7) | (1<<6) | (1<<4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1<<9) | (1<<8)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + +#else + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + READ_RESET_REG(RESET0_REGISTER); + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + + WRITE_RESET_REG(RESET2_REGISTER, RESET_PIC_DC | RESET_DBLK); +#endif + WRITE_VREG(POWER_CTL_VLD, + READ_VREG(POWER_CTL_VLD) | (0 << 10) | + (1 << 9) | (1 << 6)); + } else { + /* WRITE_VREG(POWER_CTL_VLD, + * READ_VREG(POWER_CTL_VLD) | (0 << 10) | (1 << 9) ); + */ + WRITE_VREG(POWER_CTL_VLD, + READ_VREG(POWER_CTL_VLD) | + (0 << 10) | (1 << 9) | (1 << 6)); + } + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1<<17); +#endif + + /* cbcr_merge_swap_en */ + if (hw->is_used_v4l + && (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21 + || v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + else + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 0xbf << 24); + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 0xbf << 24); + + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 31); + if (hw->mmu_enable) { + SET_VREG_MASK(MDEC_PIC_DC_MUX_CTRL, 1<<31); + /* sw reset to extif hardware */ + SET_VREG_MASK(MDEC_EXTIF_CFG1, 1<<30); + CLEAR_VREG_MASK(MDEC_EXTIF_CFG1, 1<<30); + } else { + CLEAR_VREG_MASK(MDEC_PIC_DC_MUX_CTRL, 1 << 31); + WRITE_VREG(MDEC_EXTIF_CFG1, 0); + } + + +#if 1 /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + /* pr_info("vh264 meson8 prot init\n"); */ + WRITE_VREG(MDEC_PIC_DC_THRESH, 0x404038aa); +#endif + +#ifdef VDEC_DW + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T7) { + if (IS_VDEC_DW(hw)) { + u32 data = ((1 << 30) |(1 << 0) |(1 << 8)); + + if (IS_VDEC_DW(hw) == 2) + data |= (1 << 9); + WRITE_VREG(MDEC_DOUBLEW_CFG0, data); /* Double Write Enable*/ + } + } +#endif + if (hw->dpb.mDPB.size > 0) { + WRITE_VREG(AV_SCRATCH_7, (hw->max_reference_size << 24) | + (hw->dpb.mDPB.size << 16) | + (hw->dpb.mDPB.size << 8)); + + for (j = 0; j < hw->dpb.mDPB.size; j++) { + i = get_buf_spec_by_canvas_pos(hw, j); + if (i < 0) + break; + + if (!hw->mmu_enable && + hw->buffer_spec[i].cma_alloc_addr) + config_decode_canvas(hw, i); + if (hw->mmu_enable && hw->double_write_mode) + config_decode_canvas_ex(hw, i); + } + } else { + WRITE_VREG(AV_SCRATCH_0, 0); + WRITE_VREG(AV_SCRATCH_9, 0); + } + + if (hw->init_flag == 0) + WRITE_VREG(DPB_STATUS_REG, 0); + else + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_DECODE_START); + + WRITE_VREG(FRAME_COUNTER_REG, hw->decode_pic_count); + WRITE_VREG(AV_SCRATCH_8, hw->buf_offset); + if (!tee_enabled()) + WRITE_VREG(AV_SCRATCH_G, hw->mc_dma_handle); + + /* hw->error_recovery_mode = (error_recovery_mode != 0) ? + * error_recovery_mode : error_recovery_mode_in; + */ + /* WRITE_VREG(AV_SCRATCH_F, + * (READ_VREG(AV_SCRATCH_F) & 0xffffffc3) ); + */ + WRITE_VREG(AV_SCRATCH_F, (hw->save_reg_f & 0xffffffc3) | + ((error_recovery_mode_in & 0x1) << 4)); + /*if (hw->ucode_type == UCODE_IP_ONLY_PARAM) + SET_VREG_MASK(AV_SCRATCH_F, 1 << 6); + else*/ + CLEAR_VREG_MASK(AV_SCRATCH_F, 1 << 6); + + WRITE_VREG(LMEM_DUMP_ADR, (u32)hw->lmem_phy_addr); +#if 1 /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + WRITE_VREG(MDEC_PIC_DC_THRESH, 0x404038aa); +#endif + + WRITE_VREG(DEBUG_REG1, 0); + WRITE_VREG(DEBUG_REG2, 0); + + /*Because CSD data is not found at playback start, + the IQIDCT_CONTROL register is not saved, + the initialized value 0x200 of IQIDCT_CONTROL is set*/ + if (hw->init_flag && (hw->reg_iqidct_control_init_flag == 0)) + WRITE_VREG(IQIDCT_CONTROL, 0x200); + + if (hw->reg_iqidct_control) + WRITE_VREG(IQIDCT_CONTROL, hw->reg_iqidct_control); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "IQIDCT_CONTROL = 0x%x\n", READ_VREG(IQIDCT_CONTROL)); + + if (hw->reg_vcop_ctrl_reg) + WRITE_VREG(VCOP_CTRL_REG, hw->reg_vcop_ctrl_reg); + if (hw->vld_dec_control) + WRITE_VREG(VLD_DECODE_CONTROL, hw->vld_dec_control); + return 0; +} + +static int vmh264_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)vdec->private; + if (i_only_flag & 0x100) + return 0; + if (trickmode == TRICKMODE_I) + hw->i_only = 0x3; + else if (trickmode == TRICKMODE_NONE) + hw->i_only = 0x0; + return 0; +} + +static unsigned char amvdec_enable_flag; +static void vh264_local_init(struct vdec_h264_hw_s *hw, bool is_reset) +{ + int i; + hw->init_flag = 0; + hw->first_sc_checked= 0; + hw->eos = 0; + hw->valve_count = 0; + hw->config_bufmgr_done = 0; + hw->start_process_time = 0; + hw->has_i_frame = 0; + hw->no_error_count = 0xfff; + hw->no_error_i_count = 0xf; + + hw->dec_flag = 0; + hw->data_flag = 0; + hw->skip_frame_count = 0; + hw->reg_iqidct_control = 0; + hw->reg_iqidct_control_init_flag = 0; + hw->reg_vcop_ctrl_reg = 0; + hw->reg_rv_ai_mb_count = 0; + hw->vld_dec_control = 0; + hw->decode_timeout_count = 0; + hw->no_mem_count = 0; + hw->dec_again_cnt = 0; + hw->vh264_ratio = hw->vh264_amstream_dec_info.ratio; + /* vh264_ratio = 0x100; */ + + hw->vh264_rotation = (((unsigned long) + hw->vh264_amstream_dec_info.param) >> 16) & 0xffff; + + hw->frame_prog = 0; + hw->frame_width = hw->vh264_amstream_dec_info.width; + hw->frame_height = hw->vh264_amstream_dec_info.height; + hw->frame_dur = hw->vh264_amstream_dec_info.rate; + hw->pts_outside = ((unsigned long) + hw->vh264_amstream_dec_info.param) & 0x01; + hw->sync_outside = ((unsigned long) + hw->vh264_amstream_dec_info.param & 0x02) >> 1; + hw->use_idr_framerate = ((unsigned long) + hw->vh264_amstream_dec_info.param & 0x04) >> 2; + hw->max_refer_buf = !(((unsigned long) + hw->vh264_amstream_dec_info.param & 0x10) >> 4); + if (hw->frame_dur < 96000/960) { + /*more than 960fps,it should not be a correct value, + *give default 30fps + */ + hw->frame_dur = 96000/30; + } + + hw->unstable_pts = (((unsigned long) hw->vh264_amstream_dec_info.param & 0x40) >> 6); + + hw->first_i_policy = first_i_policy; + + pr_info("H264 sysinfo: %dx%d duration=%d, pts_outside=%d\n", + hw->frame_width, hw->frame_height, hw->frame_dur, hw->pts_outside); + pr_debug("sync_outside=%d, use_idr_framerate=%d, is_used_v4l: %d\n", + hw->sync_outside, hw->use_idr_framerate, hw->is_used_v4l); + + if (i_only_flag & 0x100) + hw->i_only = i_only_flag & 0xff; + if (hw->i_only) + hw->dpb.first_insert_frame = FirstInsertFrm_SKIPDONE; + + if ((unsigned long) hw->vh264_amstream_dec_info.param + & 0x08) + hw->no_poc_reorder_flag = 1; + + error_recovery_mode_in = 1; /*ucode control?*/ + if (error_proc_policy & 0x80000000) + hw->send_error_frame_flag = error_proc_policy & 0x1; + else if ((unsigned long) hw->vh264_amstream_dec_info.param & 0x20) + hw->send_error_frame_flag = 0; /*Don't display mark err frames*/ + + if (!is_reset) { + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &(hw->vfpool[hw->cur_pool][i]); + hw->vfpool[hw->cur_pool][i].index = -1; /* VF_BUF_NUM; */ + hw->vfpool[hw->cur_pool][i].bufWidth = 1920; + kfifo_put(&hw->newframe_q, vf); + } + } + + hw->duration_from_pts_done = 0; + + hw->p_last_vf = NULL; + hw->vh264_stream_switching_state = SWITCHING_STATE_OFF; + hw->hevc_cur_buf_idx = 0xffff; + + init_waitqueue_head(&hw->wait_q); + + return; +} + +static s32 vh264_init(struct vdec_h264_hw_s *hw) +{ + int size = -1; + int fw_size = 0x1000 * 16; + int fw_mmu_size = 0x1000 * 16; + struct firmware_s *fw = NULL, *fw_mmu = NULL; + + /* int trickmode_fffb = 0; */ + + /* pr_info("\nvh264_init\n"); */ + /* init_timer(&hw->recycle_timer); */ + + /* timer init */ + timer_setup(&hw->check_timer, check_timer_func, 0); + hw->check_timer.expires = jiffies + CHECK_INTERVAL; + + /* add_timer(&hw->check_timer); */ + hw->stat |= STAT_TIMER_ARM; + hw->stat |= STAT_ISR_REG; + + mutex_init(&hw->chunks_mutex); + vh264_local_init(hw, false); + INIT_WORK(&hw->work, vh264_work); + INIT_WORK(&hw->notify_work, vh264_notify_work); + INIT_WORK(&hw->timeout_work, vh264_timeout_work); +#ifdef MH264_USERDATA_ENABLE + INIT_WORK(&hw->user_data_ready_work, user_data_ready_notify_work); +#endif + + /*if (!amvdec_enable_flag) { + amvdec_enable_flag = true; + amvdec_enable(); + if (hw->mmu_enable) + amhevc_enable(); + }*/ + if (hw->mmu_enable) { + + hw->frame_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + FRAME_MMU_MAP_SIZE, + &hw->frame_mmu_map_phy_addr, GFP_KERNEL); + if (hw->frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -ENOMEM; + } + } + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + size = get_firmware_data(VIDEO_DEC_H264_MULTI, fw->data); + if (size < 0) { + pr_err("get firmware fail.\n"); + vfree(fw); + return -1; + } + + fw->len = size; + hw->fw = fw; + + if (hw->mmu_enable) { + fw_mmu = vmalloc(sizeof(struct firmware_s) + fw_mmu_size); + if (IS_ERR_OR_NULL(fw_mmu)) + return -ENOMEM; + + size = get_firmware_data(VIDEO_DEC_H264_MULTI_MMU, fw_mmu->data); + if (size < 0) { + pr_err("get mmu fw fail.\n"); + vfree(fw_mmu); + return -1; + } + + fw_mmu->len = size; + hw->fw_mmu = fw_mmu; + } + + if (!tee_enabled()) { + /* -- ucode loading (amrisc and swap code) */ + hw->mc_cpu_addr = + dma_alloc_coherent(amports_get_dma_device(), MC_TOTAL_SIZE, + &hw->mc_dma_handle, GFP_KERNEL); + if (!hw->mc_cpu_addr) { + amvdec_enable_flag = false; + amvdec_disable(); + hw->vdec_pg_enable_flag = 0; + if (hw->mmu_enable) + amhevc_disable(); + pr_info("vh264_init: Can not allocate mc memory.\n"); + return -ENOMEM; + } + + /*pr_info("264 ucode swap area: phyaddr %p, cpu vaddr %p\n", + (void *)hw->mc_dma_handle, hw->mc_cpu_addr); + */ + + /*ret = amvdec_loadmc_ex(VFORMAT_H264, NULL, buf);*/ + + /*header*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_HEADER, + fw->data + 0x4000, MC_SWAP_SIZE); + /*data*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_DATA, + fw->data + 0x2000, MC_SWAP_SIZE); + /*mmco*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_MMCO, + fw->data + 0x6000, MC_SWAP_SIZE); + /*list*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_LIST, + fw->data + 0x3000, MC_SWAP_SIZE); + /*slice*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_SLICE, + fw->data + 0x5000, MC_SWAP_SIZE); + /*main*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_MAIN, + fw->data, 0x2000); + /*data*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_MAIN + 0x2000, + fw->data + 0x2000, 0x1000); + /*slice*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_MAIN + 0x3000, + fw->data + 0x5000, 0x1000); + } + +#if 1 /* #ifdef BUFFER_MGR_IN_C */ + hw->lmem_addr = (dma_addr_t)dma_alloc_coherent(amports_get_dma_device(), + PAGE_SIZE, (dma_addr_t *)&hw->lmem_phy_addr, GFP_KERNEL); + + if (hw->lmem_addr == 0) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + return -1; + } + pr_debug("%s, phy_addr=%lx vaddr=%p\n", + __func__, hw->lmem_phy_addr, (void *)hw->lmem_addr); + + if (prefix_aux_buf_size > 0 || + suffix_aux_buf_size > 0) { + u32 aux_buf_size; + hw->prefix_aux_size = AUX_BUF_ALIGN(prefix_aux_buf_size); + hw->suffix_aux_size = AUX_BUF_ALIGN(suffix_aux_buf_size); + aux_buf_size = hw->prefix_aux_size + hw->suffix_aux_size; + hw->aux_addr = dma_alloc_coherent(amports_get_dma_device(), + aux_buf_size, &hw->aux_phy_addr, + GFP_KERNEL); + if (hw->aux_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + return -1; + } + + hw->sei_data_buf = kmalloc(SEI_DATA_SIZE, GFP_KERNEL); + if (hw->sei_data_buf == NULL) { + pr_err("%s: failed to alloc sei itu data buffer\n", + __func__); + return -1; + } + hw->sei_itu_data_buf = kmalloc(SEI_ITU_DATA_SIZE, GFP_KERNEL); + if (hw->sei_itu_data_buf == NULL) { + pr_err("%s: failed to alloc sei itu data buffer\n", + __func__); + dma_free_coherent(amports_get_dma_device(), + hw->prefix_aux_size + hw->suffix_aux_size, hw->aux_addr, + hw->aux_phy_addr); + hw->aux_addr = NULL; + kfree(hw->sei_data_buf); + hw->sei_data_buf = NULL; + + return -1; + } + + if (NULL == hw->sei_user_data_buffer) { + hw->sei_user_data_buffer = kmalloc(USER_DATA_SIZE, + GFP_KERNEL); + if (!hw->sei_user_data_buffer) { + pr_info("%s: Can not allocate sei_data_buffer\n", + __func__); + dma_free_coherent(amports_get_dma_device(), + hw->prefix_aux_size + hw->suffix_aux_size, hw->aux_addr, + hw->aux_phy_addr); + hw->aux_addr = NULL; + kfree(hw->sei_data_buf); + hw->sei_data_buf = NULL; + kfree(hw->sei_itu_data_buf); + hw->sei_itu_data_buf = NULL; + + return -1; + } + hw->sei_user_data_wp = 0; + } + } +/* BUFFER_MGR_IN_C */ +#endif + hw->stat |= STAT_MC_LOAD; + + /* add memory barrier */ + wmb(); + + return 0; +} + +static int vh264_stop(struct vdec_h264_hw_s *hw) +{ + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } +#ifdef VDEC_DW + WRITE_VREG(MDEC_DOUBLEW_CFG0, 0); + WRITE_VREG(MDEC_DOUBLEW_CFG1, 0); +#endif +#ifdef MH264_USERDATA_ENABLE + cancel_work_sync(&hw->user_data_ready_work); +#endif + cancel_work_sync(&hw->notify_work); + cancel_work_sync(&hw->timeout_work); + cancel_work_sync(&hw->work); + + if (hw->stat & STAT_MC_LOAD) { + if (hw->mc_cpu_addr != NULL) { + dma_free_coherent(amports_get_dma_device(), + MC_TOTAL_SIZE, hw->mc_cpu_addr, + hw->mc_dma_handle); + hw->mc_cpu_addr = NULL; + } + if (hw->frame_mmu_map_addr != NULL) { + dma_free_coherent(amports_get_dma_device(), + FRAME_MMU_MAP_SIZE, hw->frame_mmu_map_addr, + hw->frame_mmu_map_phy_addr); + hw->frame_mmu_map_addr = NULL; + } + + } + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + if (hw->lmem_addr) { + dma_free_coherent(amports_get_dma_device(), + PAGE_SIZE, (void *)hw->lmem_addr, + hw->lmem_phy_addr); + hw->lmem_addr = 0; + } + + if (hw->aux_addr) { + dma_free_coherent(amports_get_dma_device(), + hw->prefix_aux_size + hw->suffix_aux_size, hw->aux_addr, + hw->aux_phy_addr); + hw->aux_addr = NULL; + } + if (hw->sei_data_buf != NULL) { + kfree(hw->sei_data_buf); + hw->sei_data_buf = NULL; + } + if (hw->sei_itu_data_buf != NULL) { + kfree(hw->sei_itu_data_buf); + hw->sei_itu_data_buf = NULL; + } + if (hw->sei_user_data_buffer != NULL) { + kfree(hw->sei_user_data_buffer); + hw->sei_user_data_buffer = NULL; + } + /* amvdec_disable(); */ + + vfree(hw->fw); + hw->fw = NULL; + + if (hw->mmu_enable) { + vfree(hw->fw_mmu); + hw->fw_mmu = NULL; + } + + dpb_print(DECODE_ID(hw), 0, + "%s\n", + __func__); + return 0; +} + +static void wait_vmh264_search_done(struct vdec_h264_hw_s *hw) +{ + u32 vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + int count = 0; + do { + usleep_range(100, 500); + if (vld_rp == READ_VREG(VLD_MEM_VIFIFO_RP)) + break; + if (count > 2000) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_ERROR, "%s timeout count %d vld_rp 0x%x VLD_MEM_VIFIFO_RP 0x%x\n", + __func__, count, vld_rp, READ_VREG(VLD_MEM_VIFIFO_RP)); + break; + } else + vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + count++; + } while (1); +} + +static void vh264_notify_work(struct work_struct *work) +{ + struct vdec_h264_hw_s *hw = container_of(work, + struct vdec_h264_hw_s, notify_work); + struct vdec_s *vdec = hw_to_vdec(hw); + + if (hw->is_used_v4l) + return; + + if (vdec->fr_hint_state == VDEC_NEED_HINT) { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long)hw->frame_dur)); + vdec->fr_hint_state = VDEC_HINTED; + } + + return; +} + +#ifdef MH264_USERDATA_ENABLE +static void vmh264_reset_udr_mgr(struct vdec_h264_hw_s *hw) +{ + hw->wait_for_udr_send = 0; + hw->sei_itu_data_len = 0; + memset(&hw->ud_record, 0, sizeof(hw->ud_record)); +} + +static void vmh264_crate_userdata_manager( + struct vdec_h264_hw_s *hw, + u8 *userdata_buf, + int buf_len) +{ + if (hw) { + + + mutex_init(&hw->userdata_mutex); + + memset(&hw->userdata_info, 0, + sizeof(struct mh264_userdata_info_t)); + hw->userdata_info.data_buf = userdata_buf; + hw->userdata_info.buf_len = buf_len; + hw->userdata_info.data_buf_end = userdata_buf + buf_len; + + vmh264_reset_udr_mgr(hw); + + } +} + +static void vmh264_destroy_userdata_manager(struct vdec_h264_hw_s *hw) +{ + if (hw) + memset(&hw->userdata_info, + 0, + sizeof(struct mh264_userdata_info_t)); +} + +/* +#define DUMP_USERDATA_RECORD +*/ +#ifdef DUMP_USERDATA_RECORD + +#define MAX_USER_DATA_SIZE 3145728 +static void *user_data_buf; +static unsigned char *pbuf_start; +static int total_len; +static int bskip; +static int n_userdata_id; + +static void print_data(unsigned char *pdata, + int len, + unsigned int poc_number, + unsigned int flag, + unsigned int duration, + unsigned int vpts, + unsigned int vpts_valid, + int rec_id) +{ + int nLeft; + + nLeft = len; +#if 0 + pr_info("%d len:%d, flag:%d, dur:%d, vpts:0x%x, valid:%d, poc:%d\n", + rec_id, len, flag, + duration, vpts, vpts_valid, poc_number); +#endif + pr_info("%d len = %d, flag = %d, vpts = 0x%x\n", + rec_id, len, flag, vpts); + + if (len == 96) { + int i; + nLeft = 72; + while (nLeft >= 16) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7], + pdata[8], pdata[9], pdata[10], pdata[11], + pdata[12], pdata[13], pdata[14], pdata[15]); + nLeft -= 16; + pdata += 16; + } + + + while (nLeft > 0) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + + i = 0; + nLeft = 96-72; + while (i < nLeft) { + if (pdata[0] != 0) { + pr_info("some data error\n"); + break; + } + pdata++; + i++; + } + } else { + while (nLeft >= 16) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7], + pdata[8], pdata[9], pdata[10], pdata[11], + pdata[12], pdata[13], pdata[14], pdata[15]); + nLeft -= 16; + pdata += 16; + } + + + while (nLeft > 0) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + + } +} + +static void push_to_buf(struct vdec_h264_hw_s *hw, + u8 *pdata, + int len, + struct userdata_meta_info_t *pmeta); + +static void dump_userdata_record(struct vdec_h264_hw_s *hw, + struct mh264_userdata_record_t *record) +{ + if (record && hw) { + u8 *pdata; + + pdata = hw->userdata_info.data_buf + record->rec_start; +/* + print_data(pdata, + record->rec_len, + record->meta_info.flags, + record->meta_info.duration, + record->meta_info.vpts, + record->meta_info.vpts_valid, + n_record_id); +*/ + push_to_buf(hw, pdata, record->rec_len, &record->meta_info); + n_userdata_id++; + } +} + + +static void push_to_buf(struct vdec_h264_hw_s *hw, + u8 *pdata, int len, + struct userdata_meta_info_t *pmeta) +{ + u32 *pLen; + int info_cnt; + u8 *pbuf_end; + + if (!user_data_buf) + return; + + if (bskip) { + pr_info("over size, skip\n"); + return; + } + info_cnt = 0; + pLen = (u32 *)pbuf_start; + + *pLen = len; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->poc_number; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->duration; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->flags; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->vpts; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->vpts_valid; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + + *pLen = n_userdata_id; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + + + pbuf_end = (u8 *)hw->sei_user_data_buffer + USER_DATA_SIZE; + if (pdata + len > pbuf_end) { + int first_section_len; + + first_section_len = pbuf_end - pdata; + memcpy(pbuf_start, pdata, first_section_len); + pdata = (u8 *)hw->sei_user_data_buffer; + pbuf_start += first_section_len; + memcpy(pbuf_start, pdata, len - first_section_len); + pbuf_start += len - first_section_len; + } else { + memcpy(pbuf_start, pdata, len); + pbuf_start += len; + } + + total_len += len + info_cnt * sizeof(u32); + if (total_len >= MAX_USER_DATA_SIZE-4096) + bskip = 1; +} + +static void show_user_data_buf(void) +{ + u8 *pbuf; + int len; + unsigned int flag; + unsigned int duration; + unsigned int vpts; + unsigned int vpts_valid; + unsigned int poc_number; + int rec_id; + + pr_info("show user data buf\n"); + pbuf = user_data_buf; + + while (pbuf < pbuf_start) { + u32 *pLen; + + pLen = (u32 *)pbuf; + + len = *pLen; + pLen++; + pbuf += sizeof(u32); + + poc_number = *pLen; + pLen++; + pbuf += sizeof(u32); + + duration = *pLen; + pLen++; + pbuf += sizeof(u32); + + flag = *pLen; + pLen++; + pbuf += sizeof(u32); + + vpts = *pLen; + pLen++; + pbuf += sizeof(u32); + + vpts_valid = *pLen; + pLen++; + pbuf += sizeof(u32); + + rec_id = *pLen; + pLen++; + pbuf += sizeof(u32); + + print_data(pbuf, len, poc_number, flag, + duration, vpts, + vpts_valid, rec_id); + pbuf += len; + msleep(30); + } +} + +static int vmh264_init_userdata_dump(void) +{ + user_data_buf = kmalloc(MAX_USER_DATA_SIZE, GFP_KERNEL); + if (user_data_buf) + return 1; + else + return 0; +} + +static void vmh264_dump_userdata(void) +{ + if (user_data_buf) { + show_user_data_buf(); + kfree(user_data_buf); + user_data_buf = NULL; + } +} + +static void vmh264_reset_user_data_buf(void) +{ + total_len = 0; + pbuf_start = user_data_buf; + bskip = 0; + n_userdata_id = 0; +} +#endif + + +static void vmh264_udc_fill_vpts(struct vdec_h264_hw_s *hw, + int frame_type, + u32 vpts, + u32 vpts_valid) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + + unsigned char *pdata; + u8 *pmax_sei_data_buffer; + u8 *sei_data_buf; + int i; + int wp; + int data_length; + struct mh264_userdata_record_t *p_userdata_rec; + + +#ifdef MH264_USERDATA_ENABLE + struct userdata_meta_info_t meta_info; + memset(&meta_info, 0, sizeof(meta_info)); +#endif + + if (hw->sei_itu_data_len <= 0) + return; + + pdata = (u8 *)hw->sei_user_data_buffer + hw->sei_user_data_wp; + pmax_sei_data_buffer = (u8 *)hw->sei_user_data_buffer + USER_DATA_SIZE; + sei_data_buf = (u8 *)hw->sei_itu_data_buf; + for (i = 0; i < hw->sei_itu_data_len; i++) { + *pdata++ = sei_data_buf[i]; + if (pdata >= pmax_sei_data_buffer) + pdata = (u8 *)hw->sei_user_data_buffer; + } + + hw->sei_user_data_wp = (hw->sei_user_data_wp + + hw->sei_itu_data_len) % USER_DATA_SIZE; + hw->sei_itu_data_len = 0; + +#ifdef MH264_USERDATA_ENABLE + meta_info.duration = hw->frame_dur; + meta_info.flags |= (VFORMAT_H264 << 3); + + meta_info.vpts = vpts; + meta_info.vpts_valid = vpts_valid; + meta_info.poc_number = + p_H264_Dpb->mVideo.dec_picture->poc; + + + wp = hw->sei_user_data_wp; + + if (hw->sei_user_data_wp > hw->userdata_info.last_wp) + data_length = wp - hw->userdata_info.last_wp; + else + data_length = wp + hw->userdata_info.buf_len + - hw->userdata_info.last_wp; + + if (data_length & 0x7) + data_length = (((data_length + 8) >> 3) << 3); + + p_userdata_rec = &hw->ud_record; + p_userdata_rec->meta_info = meta_info; + p_userdata_rec->rec_start = hw->userdata_info.last_wp; + p_userdata_rec->rec_len = data_length; + hw->userdata_info.last_wp = wp; + + p_userdata_rec->meta_info.flags |= + p_H264_Dpb->mVideo.dec_picture->pic_struct << 12; + + hw->wait_for_udr_send = 1; + vdec_schedule_work(&hw->user_data_ready_work); +#endif +} + + +static void user_data_ready_notify_work(struct work_struct *work) +{ + struct vdec_h264_hw_s *hw = container_of(work, + struct vdec_h264_hw_s, user_data_ready_work); + + + mutex_lock(&hw->userdata_mutex); + + hw->userdata_info.records[hw->userdata_info.write_index] + = hw->ud_record; + hw->userdata_info.write_index++; + if (hw->userdata_info.write_index >= USERDATA_FIFO_NUM) + hw->userdata_info.write_index = 0; + + mutex_unlock(&hw->userdata_mutex); + +#ifdef DUMP_USERDATA_RECORD + dump_userdata_record(hw, &hw->ud_record); +#endif + vdec_wakeup_userdata_poll(hw_to_vdec(hw)); + + hw->wait_for_udr_send = 0; +} + +static int vmh264_user_data_read(struct vdec_s *vdec, + struct userdata_param_t *puserdata_para) +{ + struct vdec_h264_hw_s *hw = NULL; + int rec_ri, rec_wi; + int rec_len; + u8 *rec_data_start; + u8 *pdest_buf; + struct mh264_userdata_record_t *p_userdata_rec; + u32 data_size; + u32 res; + int copy_ok = 1; + + hw = (struct vdec_h264_hw_s *)vdec->private; + + pdest_buf = puserdata_para->pbuf_addr; + + mutex_lock(&hw->userdata_mutex); + +/* + pr_info("ri = %d, wi = %d\n", + lg_p_mpeg12_userdata_info->read_index, + lg_p_mpeg12_userdata_info->write_index); +*/ + rec_ri = hw->userdata_info.read_index; + rec_wi = hw->userdata_info.write_index; + + if (rec_ri == rec_wi) { + mutex_unlock(&hw->userdata_mutex); + return 0; + } + + p_userdata_rec = hw->userdata_info.records + rec_ri; + + rec_len = p_userdata_rec->rec_len; + rec_data_start = p_userdata_rec->rec_start + hw->userdata_info.data_buf; +/* + pr_info("rec_len:%d, rec_start:%d, buf_len:%d\n", + p_userdata_rec->rec_len, + p_userdata_rec->rec_start, + puserdata_para->buf_len); +*/ + if (rec_len <= puserdata_para->buf_len) { + /* dvb user data buffer is enought to + copy the whole recored. */ + data_size = rec_len; + if (rec_data_start + data_size + > hw->userdata_info.data_buf_end) { + int first_section_len; + + first_section_len = hw->userdata_info.buf_len - + p_userdata_rec->rec_start; + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + first_section_len); + if (res) { + pr_info("p1 read not end res=%d, request=%d\n", + res, first_section_len); + copy_ok = 0; + + p_userdata_rec->rec_len -= + first_section_len - res; + p_userdata_rec->rec_start += + first_section_len - res; + puserdata_para->data_size = + first_section_len - res; + } else { + res = (u32)copy_to_user( + (void *)(pdest_buf+first_section_len), + (void *)hw->userdata_info.data_buf, + data_size - first_section_len); + if (res) { + pr_info("p2 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + p_userdata_rec->rec_len -= + data_size - res; + p_userdata_rec->rec_start = + data_size - first_section_len - res; + puserdata_para->data_size = + data_size - res; + } + } else { + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + data_size); + if (res) { + pr_info("p3 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start += data_size - res; + puserdata_para->data_size = data_size - res; + } + + if (copy_ok) { + hw->userdata_info.read_index++; + if (hw->userdata_info.read_index >= USERDATA_FIFO_NUM) + hw->userdata_info.read_index = 0; + } + } else { + /* dvb user data buffer is not enought + to copy the whole recored. */ + data_size = puserdata_para->buf_len; + if (rec_data_start + data_size + > hw->userdata_info.data_buf_end) { + int first_section_len; + + first_section_len = hw->userdata_info.buf_len - + p_userdata_rec->rec_start; + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + first_section_len); + if (res) { + pr_info("p4 read not end res=%d, request=%d\n", + res, first_section_len); + copy_ok = 0; + p_userdata_rec->rec_len -= + first_section_len - res; + p_userdata_rec->rec_start += + first_section_len - res; + puserdata_para->data_size = + first_section_len - res; + } else { + /* first secton copy is ok*/ + res = (u32)copy_to_user( + (void *)(pdest_buf+first_section_len), + (void *)hw->userdata_info.data_buf, + data_size - first_section_len); + if (res) { + pr_info("p5 read not end res=%d, request=%d\n", + res, + data_size - first_section_len); + copy_ok = 0; + } + + p_userdata_rec->rec_len -= + data_size - res; + p_userdata_rec->rec_start = + data_size - first_section_len - res; + puserdata_para->data_size = + data_size - res; + } + } else { + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + data_size); + if (res) { + pr_info("p6 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start += data_size - res; + puserdata_para->data_size = data_size - res; + } + + if (copy_ok) { + hw->userdata_info.read_index++; + if (hw->userdata_info.read_index >= USERDATA_FIFO_NUM) + hw->userdata_info.read_index = 0; + } + + } + puserdata_para->meta_info = p_userdata_rec->meta_info; + + if (hw->userdata_info.read_index <= hw->userdata_info.write_index) + puserdata_para->meta_info.records_in_que = + hw->userdata_info.write_index - + hw->userdata_info.read_index; + else + puserdata_para->meta_info.records_in_que = + hw->userdata_info.write_index + + USERDATA_FIFO_NUM - + hw->userdata_info.read_index; + + puserdata_para->version = (0<<24|0<<16|0<<8|1); + + mutex_unlock(&hw->userdata_mutex); + + return 1; +} + +static void vmh264_reset_userdata_fifo(struct vdec_s *vdec, int bInit) +{ + struct vdec_h264_hw_s *hw = NULL; + + hw = (struct vdec_h264_hw_s *)vdec->private; + + if (hw) { + mutex_lock(&hw->userdata_mutex); + pr_info("vmh264_reset_userdata_fifo: bInit: %d, ri: %d, wi: %d\n", + bInit, + hw->userdata_info.read_index, + hw->userdata_info.write_index); + hw->userdata_info.read_index = 0; + hw->userdata_info.write_index = 0; + + if (bInit) + hw->userdata_info.last_wp = 0; + mutex_unlock(&hw->userdata_mutex); + } +} + +static void vmh264_wakeup_userdata_poll(struct vdec_s *vdec) +{ + amstream_wakeup_userdata_poll(vdec); +} + +#endif + +static int vmh264_get_ps_info(struct vdec_h264_hw_s *hw, + u32 param1, u32 param2, u32 param3, u32 param4, + struct aml_vdec_ps_infos *ps) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + struct vdec_s *vdec = hw_to_vdec(hw); +#endif + int mb_width, mb_total; + int mb_height = 0; + int active_buffer_spec_num, dec_dpb_size; + int max_reference_size ,level_idc; + u32 frame_mbs_only_flag; + u32 chroma_format_idc; + u32 crop_bottom, crop_right; + int sub_width_c = 0, sub_height_c = 0; + u32 frame_width, frame_height; + u32 used_reorder_dpb_size_margin + = hw->reorder_dpb_size_margin; + + level_idc = param4 & 0xff; + max_reference_size = (param4 >> 8) & 0xff; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->master || vdec->slave) + used_reorder_dpb_size_margin = + reorder_dpb_size_margin_dv; +#endif + mb_width = param1 & 0xff; + mb_total = (param1 >> 8) & 0xffff; + if (!mb_width && mb_total) /*for 4k2k*/ + mb_width = 256; + if (mb_width) + mb_height = mb_total/mb_width; + if (mb_width <= 0 || mb_height <= 0 || + is_oversize(mb_width << 4, mb_height << 4)) { + dpb_print(DECODE_ID(hw), 0, + "!!!wrong param1 0x%x mb_width/mb_height (0x%x/0x%x) %x\r\n", + param1, + mb_width, + mb_height); + hw->error_frame_width = mb_width << 4; + hw->error_frame_height = mb_height << 4; + return -1; + } + hw->error_frame_width = 0; + hw->error_frame_height = 0; + + dec_dpb_size = get_dec_dpb_size(hw , mb_width, mb_height); + + dpb_print(DECODE_ID(hw), 0, + "restriction_flag=%d, max_dec_frame_buffering=%d, dec_dpb_size=%d num_reorder_frames %d used_reorder_dpb_size_margin %d\n", + hw->bitstream_restriction_flag, + hw->max_dec_frame_buffering, + dec_dpb_size, + hw->num_reorder_frames, + used_reorder_dpb_size_margin); + + active_buffer_spec_num = + dec_dpb_size + + used_reorder_dpb_size_margin; + + if (active_buffer_spec_num > MAX_VF_BUF_NUM) { + active_buffer_spec_num = MAX_VF_BUF_NUM; + dec_dpb_size = active_buffer_spec_num + - used_reorder_dpb_size_margin; + } + + hw->dpb.mDPB.size = active_buffer_spec_num; + + if (hw->no_poc_reorder_flag) + dec_dpb_size = 1; + + /* + * crop + * AV_SCRATCH_2 + * bit 15: frame_mbs_only_flag + * bit 13-14: chroma_format_idc + */ + hw->seq_info = param2; + frame_mbs_only_flag = (hw->seq_info >> 15) & 0x01; + if (hw->dpb.mSPS.profile_idc != 100 && + hw->dpb.mSPS.profile_idc != 110 && + hw->dpb.mSPS.profile_idc != 122 && + hw->dpb.mSPS.profile_idc != 144) { + hw->dpb.chroma_format_idc = 1; + } + chroma_format_idc = hw->dpb.chroma_format_idc; + + /* + * AV_SCRATCH_6 bit 31-16 = (left << 8 | right ) << 1 + * AV_SCRATCH_6 bit 15-0 = (top << 8 | bottom ) << + * (2 - frame_mbs_only_flag) + */ + switch (chroma_format_idc) { + case 1: + sub_width_c = 2; + sub_height_c = 2; + break; + + case 2: + sub_width_c = 2; + sub_height_c = 1; + break; + + case 3: + sub_width_c = 1; + sub_height_c = 1; + break; + + default: + break; + } + + if (chroma_format_idc == 0) { + crop_right = hw->dpb.frame_crop_right_offset; + crop_bottom = hw->dpb.frame_crop_bottom_offset * + (2 - frame_mbs_only_flag); + } else { + crop_right = sub_width_c * hw->dpb.frame_crop_right_offset; + crop_bottom = sub_height_c * hw->dpb.frame_crop_bottom_offset * + (2 - frame_mbs_only_flag); + } + + frame_width = mb_width << 4; + frame_height = mb_height << 4; + + frame_width = frame_width - crop_right; + frame_height = frame_height - crop_bottom; + + ps->profile = level_idc; + ps->ref_frames = max_reference_size; + ps->mb_width = mb_width; + ps->mb_height = mb_height; + ps->visible_width = frame_width; + ps->visible_height = frame_height; + ps->coded_width = ALIGN(mb_width << 4, 64); + ps->coded_height = ALIGN(mb_height << 4, 64); + ps->dpb_frames = dec_dpb_size + 1; /* +1 for two frames in one packet */ + ps->dpb_size = active_buffer_spec_num; + + return 0; +} + +static int v4l_res_change(struct vdec_h264_hw_s *hw, + u32 param1, u32 param2, + u32 param3, u32 param4) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int ret = 0; + int dec_dpb_size_change = hw->csd_change_flag && (hw->dpb.dec_dpb_size != get_dec_dpb_size_active(hw, param1)); + + if (ctx->param_sets_from_ucode && + hw->res_ch_flag == 0) { + if (((param1 != 0 && + hw->seq_info2 != param1) || hw->csd_change_flag) && + hw->seq_info2 != 0) { + if (hw->seq_info2 != param1 || dec_dpb_size_change) { /*picture size changed*/ + struct aml_vdec_ps_infos ps; + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "h264 res_change\n"); + if (vmh264_get_ps_info(hw, param1, + param2, param3, param4, &ps) < 0) { + dpb_print(DECODE_ID(hw), 0, + "set parameters error\n"); + } + hw->v4l_params_parsed = false; + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + hw->res_ch_flag = 1; + ctx->v4l_resolution_change = 1; + amvdec_stop(); + if (hw->mmu_enable) + amhevc_stop(); + hw->eos = 1; + flush_dpb(p_H264_Dpb); + //del_timer_sync(&hw->check_timer); + notify_v4l_eos(hw_to_vdec(hw)); + ret = 1; + } + } + } + + return ret; + +} + +static int check_dirty_data(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)(vdec->private); + u32 wp, rp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + + if (wp > rp) + level = wp - rp; + else + level = wp + vdec->input.size - rp ; + + if (level > (vdec->input.size / 2)) + hw->dec_again_cnt++; + + if (hw->dec_again_cnt > dirty_again_threshold) { + dpb_print(DECODE_ID(hw), 0, "h264 data skipped %x\n", level); + hw->dec_again_cnt = 0; + return 1; + } + return 0; +} + +static void vh264_work_implement(struct vdec_h264_hw_s *hw, + struct vdec_s *vdec, int from) +{ + /* finished decoding one frame or error, + * notify vdec core to switch context + */ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + + if (hw->dec_result == DEC_RESULT_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_START); + } else if (hw->dec_result == DEC_RESULT_AGAIN) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_AGAIN); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "%s dec_result %d %x %x %x\n", + __func__, + hw->dec_result, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP)); + if (!hw->mmu_enable) { + mutex_lock(&vmh264_mutex); + dealloc_buf_specs(hw, 0); + mutex_unlock(&vmh264_mutex); + } + hw->save_reg_f = READ_VREG(AV_SCRATCH_F); + hw->dpb.last_dpb_status = hw->dpb.dec_dpb_status; + if (hw->dec_result == DEC_RESULT_CONFIG_PARAM) { + u32 param1 = READ_VREG(AV_SCRATCH_1); + u32 param2 = READ_VREG(AV_SCRATCH_2); + u32 param3 = READ_VREG(AV_SCRATCH_6); + u32 param4 = READ_VREG(AV_SCRATCH_B); + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (hw->is_used_v4l && + ctx->param_sets_from_ucode) { + if (!v4l_res_change(hw, param1, param2, param3, param4)) { + if (!hw->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DEC_DETAIL, + "h264 parsered csd data\n"); + if (vmh264_get_ps_info(hw, + param1, param2, + param3, param4, &ps) < 0) { + dpb_print(DECODE_ID(hw), 0, + "set parameters error\n"); + } + hw->v4l_params_parsed = true; + vdec_v4l_set_ps_infos(ctx, &ps); + + amvdec_stop(); + if (hw->mmu_enable) + amhevc_stop(); + } else { + if (vh264_set_params(hw, param1, + param2, param3, param4, false) < 0) { + hw->init_flag = 0; + dpb_print(DECODE_ID(hw), 0, "set parameters error, init_flag: %u\n", + hw->init_flag); + } + + WRITE_VREG(AV_SCRATCH_0, (hw->max_reference_size<<24) | + (hw->dpb.mDPB.size<<16) | + (hw->dpb.mDPB.size<<8)); + hw->res_ch_flag = 0; + start_process_time(hw); + return; + } + } + } else { + if (vh264_set_params(hw, param1, + param2, param3, param4, false) < 0) { + hw->init_flag = 0; + dpb_print(DECODE_ID(hw), 0, "set parameters error, init_flag: %u\n", + hw->init_flag); + } + + WRITE_VREG(AV_SCRATCH_0, (hw->max_reference_size<<24) | + (hw->dpb.mDPB.size<<16) | + (hw->dpb.mDPB.size<<8)); + start_process_time(hw); + return; + } + } else + if (((hw->dec_result == DEC_RESULT_GET_DATA) || + (hw->dec_result == DEC_RESULT_GET_DATA_RETRY)) + && (hw_to_vdec(hw)->next_status != + VDEC_STATUS_DISCONNECTED)) { + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + + if (hw->dec_result == DEC_RESULT_GET_DATA) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA %x %x %x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP)); + mutex_lock(&hw->chunks_mutex); + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk = NULL; + mutex_unlock(&hw->chunks_mutex); + vdec_clean_input(vdec); + } + if ((hw->dec_result == DEC_RESULT_GET_DATA_RETRY) && + ((1000 * (jiffies - hw->get_data_start_time) / HZ) + > get_data_timeout_val)) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA_RETRY timeout\n", + __func__); + goto result_done; + } + if (is_buffer_available(vdec)) { + int r; + int decode_size; + r = vdec_prepare_input(vdec, &hw->chunk); + if (r < 0 && (hw_to_vdec(hw)->next_status != + VDEC_STATUS_DISCONNECTED)) { + hw->dec_result = DEC_RESULT_GET_DATA_RETRY; + + dpb_print(DECODE_ID(hw), + PRINT_FLAG_VDEC_DETAIL, + "vdec_prepare_input: Insufficient data\n"); + vdec_schedule_work(&hw->work); + return; + } + hw->dec_result = DEC_RESULT_NONE; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: chunk size 0x%x\n", + __func__, hw->chunk->size); + + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA)) { + int jj; + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap( + hw->chunk->block->start + + hw->chunk->offset, r); + else + data = ((u8 *) + hw->chunk->block->start_virt) + + hw->chunk->offset; + + for (jj = 0; jj < r; jj++) { + if ((jj & 0xf) == 0) + dpb_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + dpb_print_cont(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + dpb_print_cont(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + WRITE_VREG(POWER_CTL_VLD, + READ_VREG(POWER_CTL_VLD) | + (0 << 10) | (1 << 9) | (1 << 6)); + WRITE_VREG(H264_DECODE_INFO, (1<<13)); + decode_size = hw->chunk->size + + (hw->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + WRITE_VREG(H264_DECODE_SIZE, decode_size); + WRITE_VREG(VIFF_BIT_CNT, decode_size * 8); + vdec_enable_input(vdec); + + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_SEARCH_HEAD); + start_process_time(hw); + } else{ + if (hw_to_vdec(hw)->next_status + != VDEC_STATUS_DISCONNECTED) { + hw->dec_result = DEC_RESULT_GET_DATA_RETRY; + vdec_schedule_work(&hw->work); + } + } + return; + } else if (hw->dec_result == DEC_RESULT_DONE || + hw->dec_result == DEC_RESULT_TIMEOUT) { + /* if (!hw->ctx_valid) + hw->ctx_valid = 1; */ + hw->dec_again_cnt = 0; + if ((hw->dec_result == DEC_RESULT_TIMEOUT) && + !hw->i_only && (error_proc_policy & 0x2)) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + dpb_print(DECODE_ID(hw), 0, + "%s, decode timeout flush dpb\n", + __func__); + flush_dpb(p_H264_Dpb); + } +result_done: + { + if (error_proc_policy & 0x8000) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int i; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + for (i = 0; i < p_Dpb->used_size; i++) { + int i_flag = p_Dpb->fs[i]->bottom_field || p_Dpb->fs[i]->top_field; + int threshold = (i_flag || (hw->max_reference_size >= 12)) ? ((50 + p_Dpb->used_size) * 2) : 50 + p_Dpb->used_size; + if ((p_Dpb->fs[i]->dpb_frame_count + threshold + < p_H264_Dpb->dpb_frame_count) && + p_Dpb->fs[i]->is_reference && + !p_Dpb->fs[i]->is_long_term && + p_Dpb->fs[i]->is_output) { + dpb_print(DECODE_ID(hw), + 0, + "unmark reference dpb_frame_count diffrence large in dpb\n"); + unmark_for_reference(p_Dpb, p_Dpb->fs[i]); + update_ref_list(p_Dpb); + } + } + } + } + if (hw->mmu_enable + && hw->frame_busy && hw->frame_done) { + long used_4k_num; + hevc_sao_wait_done(hw); + if (hw->hevc_cur_buf_idx != 0xffff) { + used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); + if (used_4k_num >= 0) + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, + "release unused buf , used_4k_num %ld index %d\n", + used_4k_num, hw->hevc_cur_buf_idx); + hevc_mmu_dma_check(hw_to_vdec(hw)); + decoder_mmu_box_free_idx_tail( + hw->mmu_box, + hw->hevc_cur_buf_idx, + used_4k_num); + hw->hevc_cur_buf_idx = 0xffff; + } + } + decode_frame_count[DECODE_ID(hw)]++; + if (hw->dpb.mSlice.slice_type == I_SLICE) { + hw->gvs.i_decoded_frames++; + } else if (hw->dpb.mSlice.slice_type == P_SLICE) { + hw->gvs.p_decoded_frames++; + } else if (hw->dpb.mSlice.slice_type == B_SLICE) { + hw->gvs.b_decoded_frames++; + } + amvdec_stop(); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s dec_result %d %x %x %x\n", + __func__, + hw->dec_result, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP)); + mutex_lock(&hw->chunks_mutex); + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + hw->chunk = NULL; + mutex_unlock(&hw->chunks_mutex); + } else if (hw->dec_result == DEC_RESULT_AGAIN) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec) && (hw_to_vdec(hw)->next_status != + VDEC_STATUS_DISCONNECTED) && (hw->no_decoder_buffer_flag == 0)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + + if ((vdec_stream_based(vdec)) && + (error_proc_policy & 0x400000) && + check_dirty_data(vdec)) { + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return; + } + hw->no_decoder_buffer_flag = 0; + hw->next_again_flag = 1; + } else if (hw->dec_result == DEC_RESULT_EOS) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: end of stream\n", + __func__); + amvdec_stop(); + if (hw->mmu_enable) + amhevc_stop(); + hw->eos = 1; + flush_dpb(p_H264_Dpb); + notify_v4l_eos(hw_to_vdec(hw)); + mutex_lock(&hw->chunks_mutex); + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + hw->chunk = NULL; + mutex_unlock(&hw->chunks_mutex); + vdec_clean_input(vdec); + } else if (hw->dec_result == DEC_RESULT_FORCE_EXIT) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: force exit\n", + __func__); + amvdec_stop(); + if (hw->mmu_enable) + amhevc_stop(); + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + } + + if (p_H264_Dpb->mVideo.dec_picture) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s, release decoded picture\n", __func__); + release_cur_decoding_buf(hw); + } + + WRITE_VREG(ASSIST_MBOX1_MASK, 0); + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; +#ifdef DETECT_WRONG_MULTI_SLICE + if (hw->dec_result != DEC_RESULT_AGAIN) + hw->last_picture_slice_count = 0; +#endif + ATRACE_COUNTER(hw->trace.decode_work_time_name, TRACE_WORK_WAIT_SEARCH_DONE_START); + wait_vmh264_search_done(hw); + ATRACE_COUNTER(hw->trace.decode_work_time_name, TRACE_WORK_WAIT_SEARCH_DONE_END); + /* mark itself has all HW resource released and input released */ + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (hw->switch_dvlayer_flag) { + if (vdec->slave) + vdec_set_next_sched(vdec, vdec->slave); + else if (vdec->master) + vdec_set_next_sched(vdec, vdec->master); + } else if (vdec->slave || vdec->master) + vdec_set_next_sched(vdec, vdec); +#endif + + if (from == 1) { + /* This is a timeout work */ + if (work_pending(&hw->work)) { + /* + * The vh264_work arrives at the last second, + * give it a chance to handle the scenario. + */ + return; + } + } + if (hw->dec_result == DEC_RESULT_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_END); + } + + /* mark itself has all HW resource released and input released */ + if (vdec->parallel_dec == 1) { + if (hw->mmu_enable == 0) + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1); + else + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + } else + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + wake_up_interruptible(&hw->wait_q); + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !hw->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + if (hw->vdec_cb) + hw->vdec_cb(hw_to_vdec(hw), hw->vdec_cb_arg); +} + + +static void vh264_work(struct work_struct *work) +{ + struct vdec_h264_hw_s *hw = container_of(work, + struct vdec_h264_hw_s, work); + struct vdec_s *vdec = hw_to_vdec(hw); + + vh264_work_implement(hw, vdec, 0); +} + + +static void vh264_timeout_work(struct work_struct *work) +{ + struct vdec_h264_hw_s *hw = container_of(work, + struct vdec_h264_hw_s, timeout_work); + struct vdec_s *vdec = hw_to_vdec(hw); + + if (work_pending(&hw->work)) + return; + + hw->timeout_processing = 1; + vh264_work_implement(hw, vdec, 1); +} + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + bool ret = 0; + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)vdec->private; + int tvp = vdec_secure(hw_to_vdec(hw)) ? + CODEC_MM_FLAGS_TVP : 0; + + if (hw->timeout_processing && + (work_pending(&hw->work) || work_busy(&hw->work) || + work_pending(&hw->timeout_work) || work_busy(&hw->timeout_work))) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "h264 work pending, not ready for run.\n"); + return 0; + } + hw->timeout_processing = 0; + if (!hw->first_sc_checked && hw->mmu_enable) { + int size = decoder_mmu_box_sc_check(hw->mmu_box, tvp); + hw->first_sc_checked =1; + dpb_print(DECODE_ID(hw), 0, + "vmh264 cached=%d need_size=%d speed= %d ms\n", + size, (hw->need_cache_size >> PAGE_SHIFT), + (int)(get_jiffies_64() - hw->sc_start_time) * 1000/HZ); + } + + if (vdec_stream_based(vdec) && (hw->init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + + if (level < pre_decode_buf_level) + return 0; + } + +#ifndef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->master) + return 0; +#endif + if (hw->eos) + return 0; + + if (hw->stat & DECODER_FATAL_ERROR_NO_MEM) + return 0; + + if (disp_vframe_valve_level && + kfifo_len(&hw->display_q) >= + disp_vframe_valve_level) { + hw->valve_count--; + if (hw->valve_count <= 0) + hw->valve_count = 2; + else + return 0; + } + if (hw->next_again_flag && + (!vdec_frame_based(vdec))) { + u32 parser_wr_ptr = STBUF_READ(&vdec->vbuf, get_wp); + if (parser_wr_ptr >= hw->pre_parser_wr_ptr && + (parser_wr_ptr - hw->pre_parser_wr_ptr) < + again_threshold) { + int r = vdec_sync_input(vdec); + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "%s buf lelvel:%x\n", __func__, r); + return 0; + } + } + + if (h264_debug_flag & 0x20000000) { + /* pr_info("%s, a\n", __func__); */ + ret = 1; + } else + ret = is_buffer_available(vdec); + +#ifdef CONSTRAIN_MAX_BUF_NUM + if (ret && (hw->dpb.mDPB.size > 0)) { /*make sure initilized*/ + if (run_ready_max_vf_only_num > 0 && + get_vf_ref_only_buf_count(hw) >= + run_ready_max_vf_only_num + ) + ret = 0; + if (run_ready_display_q_num > 0 && + kfifo_len(&hw->display_q) >= + run_ready_display_q_num) + ret = 0; + /*avoid more buffers consumed when + switching resolution*/ + if (run_ready_max_buf_num == 0xff && + get_used_buf_count(hw) > + hw->dpb.mDPB.size) + ret = 0; + else if (run_ready_max_buf_num && + get_used_buf_count(hw) >= + run_ready_max_buf_num) + ret = 0; + if (ret == 0) + bufmgr_h264_remove_unused_frame(&hw->dpb, 0); + } +#endif + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode) { + if (hw->v4l_params_parsed) { + if (ctx->cap_pool.dec < hw->dpb.mDPB.size) { + if (is_buffer_available(vdec)) + ret = 1; + else + ret = 0; + } + } else { + if (ctx->v4l_resolution_change) + ret = 0; + } + } else if (!ctx->v4l_codec_dpb_ready) { + if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < + run_ready_min_buf_num) + ret = 0; + } + } + + if (ret) + not_run_ready[DECODE_ID(hw)] = 0; + else + not_run_ready[DECODE_ID(hw)]++; + if (vdec->parallel_dec == 1) { + if (hw->mmu_enable == 0) + return ret ? (CORE_MASK_VDEC_1) : 0; + else + return ret ? (CORE_MASK_VDEC_1 | CORE_MASK_HEVC) : 0; + } else + return ret ? (CORE_MASK_VDEC_1 | CORE_MASK_HEVC) : 0; +} + +static unsigned char get_data_check_sum + (struct vdec_h264_hw_s *hw, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)vdec->private; + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int size, ret = -1; + if (!hw->vdec_pg_enable_flag) { + hw->vdec_pg_enable_flag = 1; + amvdec_enable(); + if (hw->mmu_enable) + amhevc_enable(); + } + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_RUN_START); + + run_count[DECODE_ID(hw)]++; + vdec_reset_core(vdec); + if (hw->mmu_enable) + hevc_reset_core(vdec); + hw->vdec_cb_arg = arg; + hw->vdec_cb = callback; + +#ifdef DETECT_WRONG_MULTI_SLICE + hw->cur_picture_slice_count = 0; +#endif + + if (kfifo_len(&hw->display_q) > VF_POOL_SIZE) { + hw->reset_bufmgr_flag = 1; + dpb_print(DECODE_ID(hw), 0, + "kfifo len:%d invaild, need bufmgr reset\n", + kfifo_len(&hw->display_q)); + } + + if (vdec_stream_based(vdec)) { + hw->pre_parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + hw->next_again_flag = 0; + } + + if (hw->reset_bufmgr_flag || + ((error_proc_policy & 0x40) && + p_H264_Dpb->buf_alloc_fail)) { + h264_reset_bufmgr(vdec); + //flag must clear after reset for v4l buf_spec_init use + hw->reset_bufmgr_flag = 0; + } + + if (h264_debug_cmd & 0xf000) { + if (((h264_debug_cmd >> 12) & 0xf) + == (DECODE_ID(hw) + 1)) { + h264_reconfig(hw); + h264_debug_cmd &= (~0xf000); + } + } + /* hw->chunk = vdec_prepare_input(vdec); */ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->slave || vdec->master) + vdec_set_flag(vdec, VDEC_FLAG_SELF_INPUT_CONTEXT); +#endif + size = vdec_prepare_input(vdec, &hw->chunk); + if ((size < 0) || + (input_frame_based(vdec) && hw->chunk == NULL)) { + input_empty[DECODE_ID(hw)]++; + hw->dec_result = DEC_RESULT_AGAIN; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "vdec_prepare_input: Insufficient data\n"); + + vdec_schedule_work(&hw->work); + return; + } + input_empty[DECODE_ID(hw)] = 0; + + hw->dec_result = DEC_RESULT_NONE; + hw->get_data_count = 0; + hw->csd_change_flag = 0; +#if 0 + pr_info("VLD_MEM_VIFIFO_LEVEL = 0x%x, rp = 0x%x, wp = 0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_MEM_VIFIFO_WP)); +#endif + + if (input_frame_based(vdec) && !vdec_secure(vdec)) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_VDEC_STATUS) + ) { + dpb_print(DECODE_ID(hw), 0, + "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, size, get_data_check_sum(hw, size), + data[0], data[1], data[2], data[3], + data[4], data[5], data[size - 4], + data[size - 3], data[size - 2], + data[size - 1]); + } + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA) + ) { + int jj; + + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + dpb_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + dpb_print_cont(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + dpb_print_cont(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "\n"); + } + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } else + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: %x %x %x %x %x size 0x%x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + STBUF_READ(&vdec->vbuf, get_rp), + STBUF_READ(&vdec->vbuf, get_wp), + size); + + start_process_time(hw); + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + WRITE_VREG(AV_SCRATCH_G, hw->reg_g_status); + } else { + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_START); + ret = amvdec_vdec_loadmc_ex(VFORMAT_H264, "mh264", vdec, hw->fw->data); + if (ret < 0) { + amvdec_enable_flag = false; + amvdec_disable(); + hw->vdec_pg_enable_flag = 0; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "MH264 the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + vdec->mc_type = VFORMAT_H264; + hw->reg_g_status = READ_VREG(AV_SCRATCH_G); + if (hw->mmu_enable) { + ret = amhevc_loadmc_ex(VFORMAT_H264, "mh264_mmu", + hw->fw_mmu->data); + if (ret < 0) { + amvdec_enable_flag = false; + amhevc_disable(); + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "MH264_MMU the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + vdec->mc_type = ((1 << 16) | VFORMAT_H264); + } + vdec->mc_loaded = 0; + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_END); + } + vmh264_reset_udr_mgr(hw); + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_START); + if (vh264_hw_ctx_restore(hw) < 0) { + vdec_schedule_work(&hw->work); + return; + } + if (error_proc_policy & 0x10000) { + hw->first_pre_frame_num = p_H264_Dpb->mVideo.pre_frame_num; + } + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_END); + if (input_frame_based(vdec)) { + int decode_size = 0; + + decode_size = hw->chunk->size + + (hw->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + WRITE_VREG(H264_DECODE_INFO, (1<<13)); + WRITE_VREG(H264_DECODE_SIZE, decode_size); + WRITE_VREG(VIFF_BIT_CNT, decode_size * 8); + if (vdec->mvfrm) + vdec->mvfrm->frame_size = hw->chunk->size; + } else { + if (size <= 0) + size = 0x7fffffff; /*error happen*/ + WRITE_VREG(H264_DECODE_INFO, (1<<13)); + WRITE_VREG(H264_DECODE_SIZE, size); + WRITE_VREG(VIFF_BIT_CNT, size * 8); + hw->start_bit_cnt = size * 8; + } + config_aux_buf(hw); + config_decode_mode(hw); + vdec_enable_input(vdec); + WRITE_VREG(NAL_SEARCH_CTL, 0); + hw->sei_data_len = 0; + if (enable_itu_t35) + WRITE_VREG(NAL_SEARCH_CTL, READ_VREG(NAL_SEARCH_CTL) | 0x1); + if (!hw->init_flag) { + if (hw->mmu_enable) + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | 0x2); + else + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) & (~0x2)); + } + WRITE_VREG(NAL_SEARCH_CTL, READ_VREG(NAL_SEARCH_CTL) | (1 << 2) | (hw->bitstream_restriction_flag << 15)); + + if (udebug_flag) + WRITE_VREG(AV_SCRATCH_K, udebug_flag); + hw->stat |= STAT_TIMER_ARM; + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + + if (hw->mmu_enable) + SET_VREG_MASK(VDEC_ASSIST_MMC_CTRL1, 1 << 3); + else + CLEAR_VREG_MASK(VDEC_ASSIST_MMC_CTRL1, 1 << 3); + } + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + amvdec_start(); + if (hw->mmu_enable /*&& !hw->frame_busy && !hw->frame_done*/) { + WRITE_VREG(HEVC_ASSIST_SCRATCH_0, 0x0); + amhevc_start(); + if (hw->config_bufmgr_done) { + hevc_mcr_sao_global_hw_init(hw, + (hw->mb_width << 4), (hw->mb_height << 4)); + hevc_mcr_config_canv2axitbl(hw, 1); + } + } + + /* if (hw->init_flag) { */ + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_SEARCH_HEAD); + /* } */ + + hw->init_flag = 1; + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_RUN_END); +} + +static void clear_refer_bufs(struct vdec_h264_hw_s *hw) +{ + int i; + ulong flags; + + if (hw->is_used_v4l) { + spin_lock_irqsave(&hw->bufspec_lock, flags); + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + hw->buffer_spec[i].used = -1; + hw->buffer_spec[i].cma_alloc_addr = 0; + hw->buffer_spec[i].buf_adr = 0; + } + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + } + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &(hw->vfpool[hw->cur_pool][i]); + hw->vfpool[hw->cur_pool][i].index = -1; /* VF_BUF_NUM; */ + hw->vfpool[hw->cur_pool][i].bufWidth = 1920; + kfifo_put(&hw->newframe_q, vf); + } +} + +static void reset(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)vdec->private; + + pr_info("vmh264 reset\n"); + + cancel_work_sync(&hw->work); + cancel_work_sync(&hw->notify_work); + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + if (hw->mmu_enable) + amhevc_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + hw->eos = 0; + hw->decode_pic_count = 0; + + reset_process_time(hw); + h264_reset_bufmgr(vdec); + clear_refer_bufs(hw); + + dpb_print(DECODE_ID(hw), 0, "%s\n", __func__); +} + +static void h264_reconfig(struct vdec_h264_hw_s *hw) +{ + int i; + unsigned long flags; + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + struct vdec_s *vdec = hw_to_vdec(hw); + dpb_print(DECODE_ID(hw), 0, + "%s\n", __func__); + /* after calling flush_dpb() and bufmgr_h264_remove_unused_frame(), + all buffers are in display queue (used == 2), + or free (used == 0) + */ + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, "pre h264_reconfig"); + + flush_dpb(p_H264_Dpb); + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 0); + + if (hw->collocate_cma_alloc_addr) { + decoder_bmmu_box_free_idx( + hw->bmmu_box, + BMMU_REF_IDX); + hw->collocate_cma_alloc_addr = 0; + hw->dpb.colocated_mv_addr_start = 0; + hw->dpb.colocated_mv_addr_end = 0; + } + spin_lock_irqsave(&hw->bufspec_lock, flags); + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (vdec->parallel_dec == 1) { + vdec->free_canvas_ex(hw->buffer_spec[i].y_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].u_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].v_canvas_index, vdec->id); + hw->buffer_spec[i].y_canvas_index = -1; + hw->buffer_spec[i].u_canvas_index = -1; + hw->buffer_spec[i].v_canvas_index = -1; +#ifdef VDEC_DW + if (IS_VDEC_DW(hw)) { + vdec->free_canvas_ex(hw->buffer_spec[i].vdec_dw_y_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].vdec_dw_u_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].vdec_dw_v_canvas_index, vdec->id); + hw->buffer_spec[i].vdec_dw_y_canvas_index = -1; + hw->buffer_spec[i].vdec_dw_u_canvas_index = -1; + hw->buffer_spec[i].vdec_dw_v_canvas_index = -1; +#endif + } + } + /*make sure buffers not put back to bufmgr when + vf_put is called*/ + if (hw->buffer_spec[i].used == 2) + hw->buffer_spec[i].used = 3; + + /* ready to release "free buffers" + */ + if (hw->buffer_spec[i].used == 0) + hw->buffer_spec[i].used = 4; + + hw->buffer_spec[i].canvas_pos = -1; + + if (hw->buffer_spec[i].used == 4 && + hw->buffer_spec[i].vf_ref != 0 && + hw->buffer_spec[i].cma_alloc_addr) { + hw->buffer_spec[i].used = 3; + } + } + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + hw->has_i_frame = 0; + hw->config_bufmgr_done = 0; + + if (hw->is_used_v4l) { + mutex_lock(&vmh264_mutex); + dealloc_buf_specs(hw, 1); + mutex_unlock(&vmh264_mutex); + } + + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, "after h264_reconfig"); +} + +#ifdef ERROR_HANDLE_TEST +static void h264_clear_dpb(struct vdec_h264_hw_s *hw) +{ + int i; + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s\n", __func__); + remove_dpb_pictures(p_H264_Dpb); + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + /*make sure buffers not put back to bufmgr when + vf_put is called*/ + if (hw->buffer_spec[i].used == 2) + hw->buffer_spec[i].used = 5; + } + +} +#endif + +static void h264_reset_bufmgr(struct vdec_s *vdec) +{ + ulong timeout; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; +#if 0 + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int actual_dpb_size, max_reference_size; + int reorder_pic_num; + unsigned int colocated_buf_size; + unsigned int colocated_mv_addr_start; + unsigned int colocated_mv_addr_end; + dpb_print(DECODE_ID(hw), 0, + "%s\n", __func__); + + for (i = 0; i < VF_POOL_SIZE; i++) + hw->vfpool[hw->cur_pool][i].index = -1; /* VF_BUF_NUM; */ + + actual_dpb_size = p_H264_Dpb->mDPB.size; + max_reference_size = p_H264_Dpb->max_reference_size; + reorder_pic_num = p_H264_Dpb->reorder_pic_num; + + colocated_buf_size = p_H264_Dpb->colocated_buf_size; + colocated_mv_addr_start = p_H264_Dpb->colocated_mv_addr_start; + colocated_mv_addr_end = p_H264_Dpb->colocated_mv_addr_end; + + hw->cur_pool++; + if (hw->cur_pool >= VF_POOL_NUM) + hw->cur_pool = 0; + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &(hw->vfpool[hw->cur_pool][i]); + hw->vfpool[hw->cur_pool][i].index = -1; /* VF_BUF_NUM; */ + hw->vfpool[hw->cur_pool][i].bufWidth = 1920; + kfifo_put(&hw->newframe_q, vf); + } + + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) + hw->buffer_spec[i].used = 0; + + dpb_init_global(&hw->dpb, + DECODE_ID(hw), 0, 0); + p_H264_Dpb->mDPB.size = actual_dpb_size; + p_H264_Dpb->max_reference_size = max_reference_size; + p_H264_Dpb->reorder_pic_num = reorder_pic_num; + + p_H264_Dpb->colocated_buf_size = colocated_buf_size; + p_H264_Dpb->colocated_mv_addr_start = colocated_mv_addr_start; + p_H264_Dpb->colocated_mv_addr_end = colocated_mv_addr_end; + + p_H264_Dpb->fast_output_enable = fast_output_enable; + hw->has_i_frame = 0; +#else + dpb_print(DECODE_ID(hw), 0, + "%s frame count %d to skip %d\n\n", + __func__, hw->decode_pic_count+1, + hw->skip_frame_count); + + flush_dpb(&hw->dpb); + + if (!hw->is_used_v4l) { + timeout = jiffies + HZ; + while (kfifo_len(&hw->display_q) > 0) { + if (time_after(jiffies, timeout)) + break; + schedule(); + } + } + + buf_spec_init(hw, true); + + vh264_local_init(hw, true); + /*hw->decode_pic_count = 0; + hw->seq_info2 = 0;*/ + + if (vh264_set_params(hw, + hw->cfg_param1, + hw->cfg_param2, + hw->cfg_param3, + hw->cfg_param4, true) < 0) + hw->stat |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + else + hw->stat &= (~DECODER_FATAL_ERROR_SIZE_OVERFLOW); + + /*drop 3 frames after reset bufmgr if bit0 is set 1 */ + if (first_i_policy & 0x01) + hw->first_i_policy = (3 << 8) | first_i_policy; + + p_H264_Dpb->first_insert_frame = FirstInsertFrm_RESET; + + if (hw->stat & DECODER_FATAL_ERROR_SIZE_OVERFLOW) + hw->init_flag = 0; + else + hw->init_flag = 1; + + hw->reset_bufmgr_count++; +#endif +} + +int ammvdec_h264_mmu_init(struct vdec_h264_hw_s *hw) +{ + int ret = -1; + int tvp_flag = vdec_secure(hw_to_vdec(hw)) ? + CODEC_MM_FLAGS_TVP : 0; + int buf_size = 64; + + pr_debug("ammvdec_h264_mmu_init tvp = 0x%x mmu_enable %d\n", + tvp_flag, hw->mmu_enable); + hw->need_cache_size = buf_size * SZ_1M; + hw->sc_start_time = get_jiffies_64(); + if (hw->mmu_enable && !hw->mmu_box) { + hw->mmu_box = decoder_mmu_box_alloc_box(DRIVER_NAME, + hw->id, + MMU_MAX_BUFFERS, + hw->need_cache_size, + tvp_flag); + if (!hw->mmu_box) { + pr_err("h264 4k alloc mmu box failed!!\n"); + return -1; + } + ret = 0; + } + if (!hw->bmmu_box) { + hw->bmmu_box = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + hw->id, + BMMU_MAX_BUFFERS, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + tvp_flag); + if (hw->bmmu_box) + ret = 0; + } + return ret; +} +int ammvdec_h264_mmu_release(struct vdec_h264_hw_s *hw) +{ + if (hw->mmu_box) { + decoder_mmu_box_free(hw->mmu_box); + hw->mmu_box = NULL; + } + if (hw->bmmu_box) { + decoder_bmmu_box_free(hw->bmmu_box); + hw->bmmu_box = NULL; + } + return 0; +} + +static int ammvdec_h264_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_h264_hw_s *hw = NULL; + char *tmpbuf; + int config_val; + if (pdata == NULL) { + pr_info("\nammvdec_h264 memory resource undefined.\n"); + return -EFAULT; + } + + hw = (struct vdec_h264_hw_s *)h264_alloc_hw_stru(&pdev->dev, + sizeof(struct vdec_h264_hw_s), GFP_KERNEL); + if (hw == NULL) { + pr_info("\nammvdec_h264 device data allocation failed\n"); + return -ENOMEM; + } + hw->id = pdev->id; + hw->platform_dev = pdev; + + snprintf(hw->trace.vdec_name, sizeof(hw->trace.vdec_name), + "h264-%d", hw->id); + snprintf(hw->trace.pts_name, sizeof(hw->trace.pts_name), + "%s-pts", hw->trace.vdec_name); + snprintf(hw->trace.new_q_name, sizeof(hw->trace.new_q_name), + "%s-newframe_q", hw->trace.vdec_name); + snprintf(hw->trace.disp_q_name, sizeof(hw->trace.disp_q_name), + "%s-dispframe_q", hw->trace.vdec_name); + snprintf(hw->trace.decode_time_name, sizeof(hw->trace.decode_time_name), + "decoder_time%d", pdev->id); + snprintf(hw->trace.decode_run_time_name, sizeof(hw->trace.decode_run_time_name), + "decoder_run_time%d", pdev->id); + snprintf(hw->trace.decode_header_time_name, sizeof(hw->trace.decode_header_time_name), + "decoder_header_time%d", pdev->id); + snprintf(hw->trace.decode_work_time_name, sizeof(hw->trace.decode_work_time_name), + "decoder_work_time%d", pdev->id); + + /* the ctx from v4l2 driver. */ + hw->v4l2_ctx = pdata->private; + + platform_set_drvdata(pdev, pdata); + + hw->mmu_enable = 0; + hw->first_head_check_flag = 0; + + if (pdata->sys_info) + hw->vh264_amstream_dec_info = *pdata->sys_info; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) + force_enable_mmu = 1; + + if (force_enable_mmu && pdata->sys_info && + (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_TXLX) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_GXLX) && + (pdata->sys_info->height * pdata->sys_info->width + > 1920 * 1088)) + hw->mmu_enable = 1; + + if (hw->mmu_enable && + (pdata->frame_base_video_path == FRAME_BASE_PATH_IONVIDEO)) { + hw->mmu_enable = 0; + pr_info("ionvideo needs disable mmu, path= %d \n", + pdata->frame_base_video_path); + } + + if (ammvdec_h264_mmu_init(hw)) { + h264_free_hw_stru(&pdev->dev, (void *)hw); + pr_info("\nammvdec_h264 mmu alloc failed!\n"); + return -ENOMEM; + } + + if (pdata->config_len) { + dpb_print(DECODE_ID(hw), 0, "pdata->config=%s\n", pdata->config); + /*use ptr config for doubel_write_mode, etc*/ + if (get_config_int(pdata->config, + "mh264_double_write_mode", &config_val) == 0) + hw->double_write_mode = config_val; + else + hw->double_write_mode = double_write_mode; + + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + hw->is_used_v4l = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_buffer_margin", + &config_val) == 0) + hw->reorder_dpb_size_margin = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + hw->canvas_mode = config_val; + if (get_config_int(pdata->config, + "parm_v4l_low_latency_mode", + &config_val) == 0) + hw->low_latency_mode = config_val ? 0x8:0; + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + hw->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + hw->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, + "parm_enable_fence", + &config_val) == 0) + hw->enable_fence = config_val; + + if (get_config_int(pdata->config, + "parm_fence_usage", + &config_val) == 0) + hw->fence_usage = config_val; + + if (get_config_int(pdata->config, + "negative_dv", + &config_val) == 0) { + hw->discard_dv_data = config_val; + dpb_print(DECODE_ID(hw), 0, "discard dv data\n"); + } + + if (get_config_int(pdata->config, + "parm_v4l_metadata_config_flag", + &config_val) == 0) { + hw->metadata_config_flag = config_val; + hw->discard_dv_data = hw->metadata_config_flag & VDEC_CFG_FLAG_DV_NEGATIVE; + if (hw->discard_dv_data) + dpb_print(DECODE_ID(hw), 0, "discard dv data\n"); + } + + } else + hw->double_write_mode = double_write_mode; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) + hw->double_write_mode = 3; + + if (force_config_fence) { + hw->enable_fence = true; + hw->fence_usage = (force_config_fence >> 4) & 0xf; + if (force_config_fence & 0x2) + hw->enable_fence = false; + dpb_print(DECODE_ID(hw), 0, + "enable fence: %d, fence usage: %d\n", + hw->enable_fence, hw->fence_usage); + } + + if (!hw->is_used_v4l) { + hw->reorder_dpb_size_margin = reorder_dpb_size_margin; + hw->canvas_mode = mem_map_mode; + + if ((h264_debug_flag & IGNORE_PARAM_FROM_CONFIG) == 0) + hw->canvas_mode = pdata->canvas_mode; + } + + if (hw->mmu_enable) { + hw->canvas_mode = CANVAS_BLKMODE_LINEAR; + hw->double_write_mode &= 0xffff; + } + + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + hw->buffer_spec[i].y_canvas_index = -1; + hw->buffer_spec[i].u_canvas_index = -1; + hw->buffer_spec[i].v_canvas_index = -1; +#ifdef VDEC_DW + if (IS_VDEC_DW(hw)) { + hw->buffer_spec[i].vdec_dw_y_canvas_index = -1; + hw->buffer_spec[i].vdec_dw_u_canvas_index = -1; + hw->buffer_spec[i].vdec_dw_v_canvas_index = -1; + } +#endif + } + } + + dpb_print(DECODE_ID(hw), 0, + "%s mmu_enable %d double_write_mode 0x%x\n", + __func__, hw->mmu_enable, hw->double_write_mode); + + pdata->private = hw; + pdata->dec_status = dec_status; + pdata->set_trickmode = vmh264_set_trickmode; + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = vh264_isr; + pdata->threaded_irq_handler = vh264_isr_thread_fn; + pdata->dump_state = vmh264_dump_state; + +#ifdef MH264_USERDATA_ENABLE + pdata->wakeup_userdata_poll = vmh264_wakeup_userdata_poll; + pdata->user_data_read = vmh264_user_data_read; + pdata->reset_userdata_fifo = vmh264_reset_userdata_fifo; +#else + pdata->wakeup_userdata_poll = NULL; + pdata->user_data_read = NULL; + pdata->reset_userdata_fifo = NULL; +#endif + if (pdata->use_vfm_path) { + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + hw->frameinfo_enable = 1; + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (vdec_dual(pdata)) { + if (!pdata->is_stream_mode_dv_multi) { + if (dv_toggle_prov_name) /*debug purpose*/ + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVBL_PROVIDER_NAME : + VFM_DEC_DVEL_PROVIDER_NAME); + else + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVEL_PROVIDER_NAME : + VFM_DEC_DVBL_PROVIDER_NAME); + } else { + if (dv_toggle_prov_name) /*debug purpose*/ + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVBL_PROVIDER_NAME2 : + VFM_DEC_DVEL_PROVIDER_NAME2); + else + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVEL_PROVIDER_NAME2 : + VFM_DEC_DVBL_PROVIDER_NAME2); + } + } +#endif + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + PROVIDER_NAME ".%02x", pdev->id & 0xff); + + if (!hw->is_used_v4l) + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vf_provider_ops, pdata); + + platform_set_drvdata(pdev, pdata); + + buf_spec_init(hw, false); + + hw->platform_dev = pdev; + +#ifdef DUMP_USERDATA_RECORD + vmh264_init_userdata_dump(); + vmh264_reset_user_data_buf(); +#endif + if (decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, BMMU_DPB_IDX, + V_BUF_ADDR_OFFSET, DRIVER_NAME, &hw->cma_alloc_addr) < 0) { + h264_free_hw_stru(&pdev->dev, (void *)hw); + pdata->dec_status = NULL; + return -ENOMEM; + } + + hw->buf_offset = hw->cma_alloc_addr - DEF_BUF_START_ADDR + + DCAC_READ_MARGIN; + if (hw->mmu_enable) { + u32 extif_size = EXTIF_BUF_SIZE; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + extif_size <<= 1; + if (decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, BMMU_EXTIF_IDX, + extif_size, DRIVER_NAME, &hw->extif_addr) < 0) { + h264_free_hw_stru(&pdev->dev, (void *)hw); + pdata->dec_status = NULL; + return -ENOMEM; + } + } + if (!vdec_secure(pdata)) { +#if 1 + /*init internal buf*/ + tmpbuf = (char *)codec_mm_phys_to_virt(hw->cma_alloc_addr); + if (tmpbuf) { + memset(tmpbuf, 0, V_BUF_ADDR_OFFSET); + codec_mm_dma_flush(tmpbuf, + V_BUF_ADDR_OFFSET, + DMA_TO_DEVICE); + } else { + tmpbuf = codec_mm_vmap(hw->cma_alloc_addr, + V_BUF_ADDR_OFFSET); + if (tmpbuf) { + memset(tmpbuf, 0, V_BUF_ADDR_OFFSET); + codec_mm_dma_flush(tmpbuf, + V_BUF_ADDR_OFFSET, + DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(tmpbuf); + } + } +#else + /*init sps/pps internal buf 64k*/ + tmpbuf = (char *)codec_mm_phys_to_virt(hw->cma_alloc_addr + + (mem_sps_base - DEF_BUF_START_ADDR)); + memset(tmpbuf, 0, 0x10000); + dma_sync_single_for_device(amports_get_dma_device(), + hw->cma_alloc_addr + + (mem_sps_base - DEF_BUF_START_ADDR), + 0x10000, DMA_TO_DEVICE); +#endif + } + /**/ + +#if 0 + if (NULL == hw->sei_data_buffer) { + hw->sei_data_buffer = + dma_alloc_coherent(amports_get_dma_device(), + USER_DATA_SIZE, + &hw->sei_data_buffer_phys, GFP_KERNEL); + if (!hw->sei_data_buffer) { + pr_info("%s: Can not allocate sei_data_buffer\n", + __func__); + ammvdec_h264_mmu_release(hw); + h264_free_hw_stru(&pdev->dev, (void *)hw); + return -ENOMEM; + } + /* pr_info("buffer 0x%x, phys 0x%x, remap 0x%x\n", + sei_data_buffer, sei_data_buffer_phys, + (u32)sei_data_buffer_remap); */ + } +#endif + dpb_print(DECODE_ID(hw), 0, "ammvdec_h264 mem-addr=%lx,buff_offset=%x,buf_start=%lx\n", + pdata->mem_start, hw->buf_offset, hw->cma_alloc_addr); + + vdec_source_changed(VFORMAT_H264, 3840, 2160, 60); + + if (hw->mmu_enable) + hevc_source_changed(VFORMAT_HEVC, 3840, 2160, 60); + + if (vh264_init(hw) < 0) { + pr_info("\nammvdec_h264 init failed.\n"); + ammvdec_h264_mmu_release(hw); + h264_free_hw_stru(&pdev->dev, (void *)hw); + pdata->dec_status = NULL; + return -ENODEV; + } +#ifdef MH264_USERDATA_ENABLE + vmh264_crate_userdata_manager(hw, + hw->sei_user_data_buffer, + USER_DATA_SIZE); +#endif + +#ifdef AUX_DATA_CRC + vdec_aux_data_check_init(pdata); +#endif + + vdec_set_prepare_level(pdata, start_decode_buf_level); + if (pdata->parallel_dec == 1) { + if (hw->mmu_enable == 0) + vdec_core_request(pdata, CORE_MASK_VDEC_1); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } + } else + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + + atomic_set(&hw->vh264_active, 1); + vdec_set_vframe_comm(pdata, DRIVER_NAME); + display_frame_count[DECODE_ID(hw)] = 0; + decode_frame_count[DECODE_ID(hw)] = 0; + hw->dpb.without_display_mode = without_display_mode; + mutex_init(&hw->fence_mutex); + if (hw->enable_fence) { + pdata->sync = vdec_sync_get(); + if (!pdata->sync) { + dpb_print(DECODE_ID(hw), 0, "alloc fence timeline error\n"); + ammvdec_h264_mmu_release(hw); + h264_free_hw_stru(&pdev->dev, (void *)hw); + pdata->dec_status = NULL; + return -ENODEV; + } + pdata->sync->usage = hw->fence_usage; + /* creat timeline. */ + vdec_timeline_create(pdata->sync, DRIVER_NAME); + } + + return 0; +} + +static void vdec_fence_release(struct vdec_h264_hw_s *hw, + struct vdec_sync *sync) +{ + ulong expires; + + /* clear display pool. */ + clear_refer_bufs(hw); + + /* notify signal to wake up all fences. */ + vdec_timeline_increase(sync, VF_POOL_SIZE); + + expires = jiffies + msecs_to_jiffies(2000); + while (!check_objs_all_signaled(sync)) { + if (time_after(jiffies, expires)) { + pr_err("wait fence signaled timeout.\n"); + break; + } + } + + pr_info("fence start release\n"); + + /* decreases refcnt of timeline. */ + vdec_timeline_put(sync); +} + +static int ammvdec_h264_remove(struct platform_device *pdev) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + int i; + + struct vdec_s *vdec = hw_to_vdec(hw); + + if (vdec->next_status == VDEC_STATUS_DISCONNECTED + && (vdec->status == VDEC_STATUS_ACTIVE)) { + dpb_print(DECODE_ID(hw), 0, + "%s force exit %d\n", __func__, __LINE__); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + wait_event_interruptible_timeout(hw->wait_q, + (vdec->status == VDEC_STATUS_CONNECTED), + msecs_to_jiffies(1000)); /* wait for work done */ + } + + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) + release_aux_data(hw, i); + + atomic_set(&hw->vh264_active, 0); + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + + vh264_stop(hw); +#ifdef MH264_USERDATA_ENABLE +#ifdef DUMP_USERDATA_RECORD + vmh264_dump_userdata(); +#endif + vmh264_destroy_userdata_manager(hw); +#endif + /* vdec_source_changed(VFORMAT_H264, 0, 0, 0); */ + +#ifdef AUX_DATA_CRC + vdec_aux_data_check_exit(vdec); +#endif + + atomic_set(&hw->vh264_active, 0); + if (vdec->parallel_dec == 1) { + if (hw->mmu_enable == 0) + vdec_core_release(vdec, CORE_MASK_VDEC_1); + else + vdec_core_release(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC | + CORE_MASK_COMBINE); + } else + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + vdec_set_status(hw_to_vdec(hw), VDEC_STATUS_DISCONNECTED); + if (vdec->parallel_dec == 1) { + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + vdec->free_canvas_ex(hw->buffer_spec[i].y_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].u_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].v_canvas_index, vdec->id); + if (IS_VDEC_DW(hw)) { + vdec->free_canvas_ex(hw->buffer_spec[i].vdec_dw_y_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].vdec_dw_u_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].vdec_dw_v_canvas_index, vdec->id); + } + } + } + + if (hw->enable_fence) + vdec_fence_release(hw, vdec->sync); + + ammvdec_h264_mmu_release(hw); + h264_free_hw_stru(&pdev->dev, (void *)hw); + clk_adj_frame_count = 0; + + return 0; +} + +/****************************************/ + +static struct platform_driver ammvdec_h264_driver = { + .probe = ammvdec_h264_probe, + .remove = ammvdec_h264_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t ammvdec_h264_profile = { + .name = "mh264", + .profile = "" +}; + +static struct mconfig hm264_configs[] = { + MC_PU32("h264_debug_flag", &h264_debug_flag), + MC_PI32("start_decode_buf_level", &start_decode_buf_level), + MC_PU32("fixed_frame_rate_mode", &fixed_frame_rate_mode), + MC_PU32("decode_timeout_val", &decode_timeout_val), + MC_PU32("reorder_dpb_size_margin", &reorder_dpb_size_margin), + MC_PU32("reference_buf_margin", &reference_buf_margin), + MC_PU32("radr", &radr), + MC_PU32("rval", &rval), + MC_PU32("h264_debug_mask", &h264_debug_mask), + MC_PU32("h264_debug_cmd", &h264_debug_cmd), + MC_PI32("force_rate_streambase", &force_rate_streambase), + MC_PI32("dec_control", &dec_control), + MC_PI32("force_rate_framebase", &force_rate_framebase), + MC_PI32("force_disp_bufspec_num", &force_disp_bufspec_num), + MC_PU32("prefix_aux_buf_size", &prefix_aux_buf_size), + MC_PU32("suffix_aux_buf_size", &suffix_aux_buf_size), +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + MC_PU32("reorder_dpb_size_margin_dv", &reorder_dpb_size_margin_dv), + MC_PU32("dv_toggle_prov_name", &dv_toggle_prov_name), + MC_PU32("dolby_meta_with_el", &dolby_meta_with_el), +#endif + MC_PU32("i_only_flag", &i_only_flag), + MC_PU32("force_rate_streambase", &force_rate_streambase), +}; +static struct mconfig_node hm264_node; + + +static int __init ammvdec_h264_driver_init_module(void) +{ + + pr_info("ammvdec_h264 module init\n"); + if (platform_driver_register(&ammvdec_h264_driver)) { + pr_info("failed to register ammvdec_h264 driver\n"); + return -ENODEV; + } + + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_TXLX) { + ammvdec_h264_profile.profile = + "4k, dwrite, compressed, frame_dv, fence, v4l"; + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXTVBB) { + ammvdec_h264_profile.profile = "4k, frame_dv, fence, v4l"; + } + } else { + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D || is_cpu_s4_s805x2()) { + ammvdec_h264_profile.profile = + "dwrite, compressed, frame_dv, v4l"; + } else { + ammvdec_h264_profile.profile = + "dwrite, compressed, v4l"; + } + } + + vcodec_profile_register(&ammvdec_h264_profile); + INIT_REG_NODE_CONFIGS("media.decoder", &hm264_node, + "mh264", hm264_configs, CONFIG_FOR_RW); + + vcodec_feature_register(VFORMAT_H264, 0); + return 0; +} + +static void __exit ammvdec_h264_driver_remove_module(void) +{ + pr_info("ammvdec_h264 module remove.\n"); + + platform_driver_unregister(&ammvdec_h264_driver); +} + +/****************************************/ +module_param(h264_debug_flag, uint, 0664); +MODULE_PARM_DESC(h264_debug_flag, "\n ammvdec_h264 h264_debug_flag\n"); + +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n ammvdec_h264 start_decode_buf_level\n"); + +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, "\n ammvdec_h264 pre_decode_buf_level\n"); + +module_param(fixed_frame_rate_mode, uint, 0664); +MODULE_PARM_DESC(fixed_frame_rate_mode, "\namvdec_h264 fixed_frame_rate_mode\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, "\n amvdec_h264 decode_timeout_val\n"); + +module_param(errordata_timeout_val, uint, 0664); +MODULE_PARM_DESC(errordata_timeout_val, "\n amvdec_h264 errordata_timeout_val\n"); + +module_param(get_data_timeout_val, uint, 0664); +MODULE_PARM_DESC(get_data_timeout_val, "\n amvdec_h264 get_data_timeout_val\n"); + +module_param(frame_max_data_packet, uint, 0664); +MODULE_PARM_DESC(frame_max_data_packet, "\n amvdec_h264 frame_max_data_packet\n"); + +module_param(reorder_dpb_size_margin, uint, 0664); +MODULE_PARM_DESC(reorder_dpb_size_margin, "\n ammvdec_h264 reorder_dpb_size_margin\n"); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +module_param(reorder_dpb_size_margin_dv, uint, 0664); +MODULE_PARM_DESC(reorder_dpb_size_margin_dv, + "\n ammvdec_h264 reorder_dpb_size_margin_dv\n"); +#endif + +module_param(reference_buf_margin, uint, 0664); +MODULE_PARM_DESC(reference_buf_margin, "\n ammvdec_h264 reference_buf_margin\n"); + +#ifdef CONSTRAIN_MAX_BUF_NUM +module_param(run_ready_max_vf_only_num, uint, 0664); +MODULE_PARM_DESC(run_ready_max_vf_only_num, "\n run_ready_max_vf_only_num\n"); + +module_param(run_ready_display_q_num, uint, 0664); +MODULE_PARM_DESC(run_ready_display_q_num, "\n run_ready_display_q_num\n"); + +module_param(run_ready_max_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_max_buf_num, "\n run_ready_max_buf_num\n"); +#endif + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(h264_debug_mask, uint, 0664); +MODULE_PARM_DESC(h264_debug_mask, "\n amvdec_h264 h264_debug_mask\n"); + +module_param(h264_debug_cmd, uint, 0664); +MODULE_PARM_DESC(h264_debug_cmd, "\n amvdec_h264 h264_debug_cmd\n"); + +module_param(force_rate_streambase, int, 0664); +MODULE_PARM_DESC(force_rate_streambase, "\n amvdec_h264 force_rate_streambase\n"); + +module_param(dec_control, int, 0664); +MODULE_PARM_DESC(dec_control, "\n amvdec_h264 dec_control\n"); + +module_param(force_rate_framebase, int, 0664); +MODULE_PARM_DESC(force_rate_framebase, "\n amvdec_h264 force_rate_framebase\n"); + +module_param(force_disp_bufspec_num, int, 0664); +MODULE_PARM_DESC(force_disp_bufspec_num, "\n amvdec_h264 force_disp_bufspec_num\n"); + +module_param(V_BUF_ADDR_OFFSET, int, 0664); +MODULE_PARM_DESC(V_BUF_ADDR_OFFSET, "\n amvdec_h264 V_BUF_ADDR_OFFSET\n"); + +module_param(prefix_aux_buf_size, uint, 0664); +MODULE_PARM_DESC(prefix_aux_buf_size, "\n prefix_aux_buf_size\n"); + +module_param(suffix_aux_buf_size, uint, 0664); +MODULE_PARM_DESC(suffix_aux_buf_size, "\n suffix_aux_buf_size\n"); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +module_param(dv_toggle_prov_name, uint, 0664); +MODULE_PARM_DESC(dv_toggle_prov_name, "\n dv_toggle_prov_name\n"); + +module_param(dolby_meta_with_el, uint, 0664); +MODULE_PARM_DESC(dolby_meta_with_el, "\n dolby_meta_with_el\n"); + +#endif + +module_param(fast_output_enable, uint, 0664); +MODULE_PARM_DESC(fast_output_enable, "\n amvdec_h264 fast_output_enable\n"); + +module_param(error_proc_policy, uint, 0664); +MODULE_PARM_DESC(error_proc_policy, "\n amvdec_h264 error_proc_policy\n"); + +module_param(error_skip_count, uint, 0664); +MODULE_PARM_DESC(error_skip_count, "\n amvdec_h264 error_skip_count\n"); + +module_param(force_sliding_margin, uint, 0664); +MODULE_PARM_DESC(force_sliding_margin, "\n amvdec_h264 force_sliding_margin\n"); + +module_param(i_only_flag, uint, 0664); +MODULE_PARM_DESC(i_only_flag, "\n amvdec_h264 i_only_flag\n"); + +module_param(first_i_policy, uint, 0664); +MODULE_PARM_DESC(first_i_policy, "\n amvdec_h264 first_i_policy\n"); + +module_param(frmbase_cont_bitlevel, uint, 0664); +MODULE_PARM_DESC(frmbase_cont_bitlevel, + "\n amvdec_h264 frmbase_cont_bitlevel\n"); + +module_param(frmbase_cont_bitlevel2, uint, 0664); +MODULE_PARM_DESC(frmbase_cont_bitlevel2, + "\n amvdec_h264 frmbase_cont_bitlevel\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_mh264 udebug_flag\n"); + +module_param(udebug_pause_pos, uint, 0664); +MODULE_PARM_DESC(udebug_pause_pos, "\n udebug_pause_pos\n"); + +module_param(udebug_pause_val, uint, 0664); +MODULE_PARM_DESC(udebug_pause_val, "\n udebug_pause_val\n"); + +module_param(udebug_pause_decode_idx, uint, 0664); +MODULE_PARM_DESC(udebug_pause_decode_idx, "\n udebug_pause_decode_idx\n"); + +module_param(max_alloc_buf_count, uint, 0664); +MODULE_PARM_DESC(max_alloc_buf_count, "\n amvdec_h264 max_alloc_buf_count\n"); + +module_param(enable_itu_t35, uint, 0664); +MODULE_PARM_DESC(enable_itu_t35, "\n amvdec_h264 enable_itu_t35\n"); + +module_param(endian, uint, 0664); +MODULE_PARM_DESC(endian, "\nrval\n"); + +module_param(mmu_enable, uint, 0664); +MODULE_PARM_DESC(mmu_enable, "\n mmu_enable\n"); + +module_param(force_enable_mmu, uint, 0664); +MODULE_PARM_DESC(force_enable_mmu, "\n force_enable_mmu\n"); + +module_param(again_threshold, uint, 0664); +MODULE_PARM_DESC(again_threshold, "\n again_threshold\n"); + +module_param(stream_mode_start_num, uint, 0664); +MODULE_PARM_DESC(stream_mode_start_num, "\n stream_mode_start_num\n"); + +module_param(colocate_old_cal, uint, 0664); +MODULE_PARM_DESC(colocate_old_cal, "\n amvdec_mh264 colocate_old_cal\n"); + +/* +module_param(trigger_task, uint, 0664); +MODULE_PARM_DESC(trigger_task, "\n amvdec_h264 trigger_task\n"); +*/ +module_param_array(decode_frame_count, uint, &max_decode_instance_num, 0664); + +module_param_array(display_frame_count, uint, &max_decode_instance_num, 0664); + +module_param_array(max_process_time, uint, &max_decode_instance_num, 0664); + +module_param_array(run_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(not_run_ready, uint, + &max_decode_instance_num, 0664); + +module_param_array(input_empty, uint, + &max_decode_instance_num, 0664); + +module_param_array(max_get_frame_interval, uint, + &max_decode_instance_num, 0664); + +module_param_array(step, uint, &max_decode_instance_num, 0664); + +module_param_array(ref_frame_mark_flag, uint, &max_decode_instance_num, 0664); + +module_param(disp_vframe_valve_level, uint, 0664); +MODULE_PARM_DESC(disp_vframe_valve_level, "\n disp_vframe_valve_level\n"); + +module_param(double_write_mode, uint, 0664); +MODULE_PARM_DESC(double_write_mode, "\n double_write_mode\n"); + +module_param(mem_map_mode, uint, 0664); +MODULE_PARM_DESC(mem_map_mode, "\n mem_map_mode\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n without_display_mode\n"); + +module_param(check_slice_num, uint, 0664); +MODULE_PARM_DESC(check_slice_num, "\n check_slice_num\n"); + +module_param(mb_count_threshold, uint, 0664); +MODULE_PARM_DESC(mb_count_threshold, "\n mb_count_threshold\n"); + +module_param(loop_playback_poc_threshold, int, 0664); +MODULE_PARM_DESC(loop_playback_poc_threshold, "\n loop_playback_poc_threshold\n"); + +module_param(poc_threshold, int, 0664); +MODULE_PARM_DESC(poc_threshold, "\n poc_threshold\n"); + +module_param(force_config_fence, uint, 0664); +MODULE_PARM_DESC(force_config_fence, "\n force enable fence\n"); + +module_param(dirty_again_threshold, uint, 0664); +MODULE_PARM_DESC(dirty_again_threshold, "\n amvdec_h264 dirty_again_threshold\n"); + +module_init(ammvdec_h264_driver_init_module); +module_exit(ammvdec_h264_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC H264 Video Decoder Driver"); +MODULE_LICENSE("GPL");
diff --git a/drivers/frame_provider/decoder/h265/Makefile b/drivers/frame_provider/decoder/h265/Makefile new file mode 100644 index 0000000..86b8b88 --- /dev/null +++ b/drivers/frame_provider/decoder/h265/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_H265) += amvdec_h265.o +amvdec_h265-objs += vh265.o
diff --git a/drivers/frame_provider/decoder/h265/vh265.c b/drivers/frame_provider/decoder/h265/vh265.c new file mode 100644 index 0000000..d5945ce --- /dev/null +++ b/drivers/frame_provider/decoder/h265/vh265.c
@@ -0,0 +1,16227 @@ +/* + * drivers/amlogic/amports/vh265.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/timer.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/sched/clock.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include "../utils/config_parser.h" +#include "../utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../utils/vdec_v4l2_buffer_ops.h" +#include <media/v4l2-mem2mem.h> + +/* +to enable DV of frame mode +#define DOLBY_META_SUPPORT in ucode +*/ + +#define HEVC_8K_LFTOFFSET_FIX +#define SUPPORT_LONG_TERM_RPS + +//#define CO_MV_COMPRESS + +#define CONSTRAIN_MAX_BUF_NUM + +#define SWAP_HEVC_UCODE +#define DETREFILL_ENABLE + +#define AGAIN_HAS_THRESHOLD +/*#define TEST_NO_BUF*/ +#define HEVC_PIC_STRUCT_SUPPORT +#define MULTI_INSTANCE_SUPPORT +#define USE_UNINIT_SEMA + + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ +#define MPRED_8K_MV_BUF_SIZE (0x120000*4) +#define MPRED_4K_MV_BUF_SIZE (0x120000) +#define MPRED_MV_BUF_SIZE (0x3fc00) + +#define MMU_COMPRESS_HEADER_SIZE_1080P 0x10000 +#define MMU_COMPRESS_HEADER_SIZE_4K 0x48000 +#define MMU_COMPRESS_HEADER_SIZE_8K 0x120000 +#define DB_NUM 20 + +#define MAX_FRAME_4K_NUM 0x1200 +#define MAX_FRAME_8K_NUM ((MAX_FRAME_4K_NUM) * 4) + +//#define FRAME_MMU_MAP_SIZE (MAX_FRAME_4K_NUM * 4) +#define H265_MMU_MAP_BUFFER HEVC_ASSIST_SCRATCH_7 + +#define HEVC_ASSIST_MMU_MAP_ADDR 0x3009 + +#define HEVC_CM_HEADER_START_ADDR 0x3628 +#define HEVC_CM_HEADER_START_ADDR2 0x364a +#define HEVC_SAO_MMU_VH1_ADDR 0x363b +#define HEVC_SAO_MMU_VH0_ADDR 0x363a +#define HEVC_SAO_MMU_VH0_ADDR2 0x364d +#define HEVC_SAO_MMU_VH1_ADDR2 0x364e + +#define HEVC_SAO_MMU_DMA_CTRL2 0x364c +#define HEVC_SAO_MMU_STATUS2 0x3650 +#define HEVC_DW_VH0_ADDDR 0x365e +#define HEVC_DW_VH1_ADDDR 0x365f + +#define HEVC_DBLK_CFGB 0x350b +#define HEVCD_MPP_DECOMP_AXIURG_CTL 0x34c7 +#define SWAP_HEVC_OFFSET (3 * 0x1000) + +#define MEM_NAME "codec_265" +/* #include <mach/am_regs.h> */ +#include <linux/amlogic/media/utils/vdec_reg.h> + +#include "../utils/vdec.h" +#include "../utils/amvdec.h" +#include <linux/amlogic/media/video_sink/video.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/vdec_feature.h" + +#define SEND_LMEM_WITH_RPM +#define SUPPORT_10BIT +#define H265_10B_MMU_DW +/* #define ERROR_HANDLE_DEBUG */ + +#ifndef STAT_KTHREAD +#define STAT_KTHREAD 0x40 +#endif + +#ifdef MULTI_INSTANCE_SUPPORT +#define MAX_DECODE_INSTANCE_NUM 9 +#define MULTI_DRIVER_NAME "ammvdec_h265" +#endif +#define DRIVER_NAME "amvdec_h265" +#define DRIVER_HEADER_NAME "amvdec_h265_header" + +#define PUT_INTERVAL (HZ/100) +#define ERROR_SYSTEM_RESET_COUNT 200 + +#define PTS_NORMAL 0 +#define PTS_NONE_REF_USE_DURATION 1 + +#define PTS_MODE_SWITCHING_THRESHOLD 3 +#define PTS_MODE_SWITCHING_RECOVERY_THREASHOLD 3 + +#define DUR2PTS(x) ((x)*90/96) + +#define MAX_SIZE_8K (8192 * 4608) +#define MAX_SIZE_4K (4096 * 2304) +#define MAX_SIZE_2K (1920 * 1088) + +#define IS_8K_SIZE(w, h) (((w) * (h)) > MAX_SIZE_4K) +#define IS_4K_SIZE(w, h) (((w) * (h)) > (1920*1088)) + +#define SEI_UserDataITU_T_T35 4 +#define INVALID_IDX -1 /* Invalid buffer index.*/ + +static struct semaphore h265_sema; + +struct hevc_state_s; +static int hevc_print(struct hevc_state_s *hevc, + int debug_flag, const char *fmt, ...); +static int hevc_print_cont(struct hevc_state_s *hevc, + int debug_flag, const char *fmt, ...); +static int vh265_vf_states(struct vframe_states *states, void *); +static struct vframe_s *vh265_vf_peek(void *); +static struct vframe_s *vh265_vf_get(void *); +static void vh265_vf_put(struct vframe_s *, void *); +static int vh265_event_cb(int type, void *data, void *private_data); + +static int vh265_stop(struct hevc_state_s *hevc); +#ifdef MULTI_INSTANCE_SUPPORT +static int vmh265_stop(struct hevc_state_s *hevc); +static s32 vh265_init(struct vdec_s *vdec); +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask); +static void reset_process_time(struct hevc_state_s *hevc); +static void start_process_time(struct hevc_state_s *hevc); +static void restart_process_time(struct hevc_state_s *hevc); +static void timeout_process(struct hevc_state_s *hevc); +#else +static s32 vh265_init(struct hevc_state_s *hevc); +#endif +static void vh265_prot_init(struct hevc_state_s *hevc); +static int vh265_local_init(struct hevc_state_s *hevc); +static void vh265_check_timer_func(struct timer_list *timer); +static void config_decode_mode(struct hevc_state_s *hevc); +static int check_data_size(struct vdec_s *vdec); + + +static const char vh265_dec_id[] = "vh265-dev"; + +#define PROVIDER_NAME "decoder.h265" +#define MULTI_INSTANCE_PROVIDER_NAME "vdec.h265" + +static const struct vframe_operations_s vh265_vf_provider = { + .peek = vh265_vf_peek, + .get = vh265_vf_get, + .put = vh265_vf_put, + .event_cb = vh265_event_cb, + .vf_states = vh265_vf_states, +}; + +static struct vframe_provider_s vh265_vf_prov; + +static u32 bit_depth_luma; +static u32 bit_depth_chroma; +static u32 video_signal_type; +static int start_decode_buf_level = 0x8000; +static unsigned int decode_timeout_val = 200; + +static u32 run_ready_min_buf_num = 2; +static u32 disable_ip_mode; +static u32 print_lcu_error = 1; +/*data_resend_policy: + bit 0, stream base resend data when decoding buf empty +*/ +static u32 data_resend_policy = 1; +static int poc_num_margin = 1000; +static int poc_error_limit = 30; + +static u32 dirty_again_threshold = 100; +static u32 dirty_buffersize_threshold = 0x800000; + + +#define VIDEO_SIGNAL_TYPE_AVAILABLE_MASK 0x20000000 +/* +static const char * const video_format_names[] = { + "component", "PAL", "NTSC", "SECAM", + "MAC", "unspecified", "unspecified", "unspecified" +}; + +static const char * const color_primaries_names[] = { + "unknown", "bt709", "undef", "unknown", + "bt470m", "bt470bg", "smpte170m", "smpte240m", + "film", "bt2020" +}; + +static const char * const transfer_characteristics_names[] = { + "unknown", "bt709", "undef", "unknown", + "bt470m", "bt470bg", "smpte170m", "smpte240m", + "linear", "log100", "log316", "iec61966-2-4", + "bt1361e", "iec61966-2-1", "bt2020-10", "bt2020-12", + "smpte-st-2084", "smpte-st-428" +}; + +static const char * const matrix_coeffs_names[] = { + "GBR", "bt709", "undef", "unknown", + "fcc", "bt470bg", "smpte170m", "smpte240m", + "YCgCo", "bt2020nc", "bt2020c" +}; +*/ +#ifdef SUPPORT_10BIT +#define HEVC_CM_BODY_START_ADDR 0x3626 +#define HEVC_CM_BODY_LENGTH 0x3627 +#define HEVC_CM_HEADER_LENGTH 0x3629 +#define HEVC_CM_HEADER_OFFSET 0x362b +#define HEVC_SAO_CTRL9 0x362d + +#define HEVC_CM_BODY_LENGTH2 0x3663 +#define HEVC_CM_HEADER_OFFSET2 0x3664 +#define HEVC_CM_HEADER_LENGTH2 0x3665 + +#define LOSLESS_COMPRESS_MODE +/* DOUBLE_WRITE_MODE is enabled only when NV21 8 bit output is needed */ +/* double_write_mode: + * 0, no double write; + * 1, 1:1 ratio; + * 2, (1/4):(1/4) ratio; + * 3, (1/4):(1/4) ratio, with both compressed frame included + * 4, (1/2):(1/2) ratio; + * 5, (1/2):(1/2) ratio, with both compressed frame included + * 8, (1/8):(1/8) ratio, from t7 + * 0x10, double write only + * 0x100, if > 1080p,use mode 4,else use mode 1; + * 0x200, if > 1080p,use mode 2,else use mode 1; + * 0x300, if > 720p, use mode 4, else use mode 1; + * 0x1000,if > 1080p,use mode 3, else if > 960*540, use mode 4, else use mode 1; + */ +static u32 double_write_mode; + +/*#define DECOMP_HEADR_SURGENT*/ + +static u32 mem_map_mode; /* 0:linear 1:32x32 2:64x32 ; m8baby test1902 */ +static u32 enable_mem_saving = 1; +static u32 workaround_enable; +static u32 force_w_h; +#endif +static u32 force_fps; +static u32 pts_unstable; +#define H265_DEBUG_BUFMGR 0x01 +#define H265_DEBUG_BUFMGR_MORE 0x02 +#define H265_DEBUG_DETAIL 0x04 +#define H265_DEBUG_REG 0x08 +#define H265_DEBUG_MAN_SEARCH_NAL 0x10 +#define H265_DEBUG_MAN_SKIP_NAL 0x20 +#define H265_DEBUG_DISPLAY_CUR_FRAME 0x40 +#define H265_DEBUG_FORCE_CLK 0x80 +#define H265_DEBUG_SEND_PARAM_WITH_REG 0x100 +#define H265_DEBUG_NO_DISPLAY 0x200 +#define H265_DEBUG_DISCARD_NAL 0x400 +#define H265_DEBUG_OUT_PTS 0x800 +#define H265_DEBUG_DUMP_PIC_LIST 0x1000 +#define H265_DEBUG_PRINT_SEI 0x2000 +#define H265_DEBUG_PIC_STRUCT 0x4000 +#define H265_DEBUG_HAS_AUX_IN_SLICE 0x8000 +#define H265_DEBUG_DIS_LOC_ERROR_PROC 0x10000 +#define H265_DEBUG_DIS_SYS_ERROR_PROC 0x20000 +#define H265_NO_CHANG_DEBUG_FLAG_IN_CODE 0x40000 +#define H265_DEBUG_TRIG_SLICE_SEGMENT_PROC 0x80000 +#define H265_DEBUG_HW_RESET 0x100000 +#define H265_CFG_CANVAS_IN_DECODE 0x200000 +#define H265_DEBUG_DV 0x400000 +#define H265_DEBUG_NO_EOS_SEARCH_DONE 0x800000 +#define H265_DEBUG_NOT_USE_LAST_DISPBUF 0x1000000 +#define H265_DEBUG_IGNORE_CONFORMANCE_WINDOW 0x2000000 +#define H265_DEBUG_WAIT_DECODE_DONE_WHEN_STOP 0x4000000 +#ifdef MULTI_INSTANCE_SUPPORT +#define PRINT_FLAG_ERROR 0x0 +#define IGNORE_PARAM_FROM_CONFIG 0x08000000 +#define PRINT_FRAMEBASE_DATA 0x10000000 +#define PRINT_FLAG_VDEC_STATUS 0x20000000 +#define PRINT_FLAG_VDEC_DETAIL 0x40000000 +#define PRINT_FLAG_V4L_DETAIL 0x80000000 +#endif + +#define BUF_POOL_SIZE 32 +#define MAX_BUF_NUM 24 +#define MAX_REF_PIC_NUM 24 +#define MAX_REF_ACTIVE 16 + +#ifdef MV_USE_FIXED_BUF +#define BMMU_MAX_BUFFERS (BUF_POOL_SIZE + 1) +#define VF_BUFFER_IDX(n) (n) +#define BMMU_WORKSPACE_ID (BUF_POOL_SIZE) +#else +#define BMMU_MAX_BUFFERS (BUF_POOL_SIZE + 1 + MAX_REF_PIC_NUM) +#define VF_BUFFER_IDX(n) (n) +#define BMMU_WORKSPACE_ID (BUF_POOL_SIZE) +#define MV_BUFFER_IDX(n) (BUF_POOL_SIZE + 1 + n) +#endif + +#define HEVC_MV_INFO 0x310d +#define HEVC_QP_INFO 0x3137 +#define HEVC_SKIP_INFO 0x3136 + +const u32 h265_version = 201602101; +static u32 debug_mask = 0xffffffff; +static u32 log_mask; +static u32 debug; +static u32 radr; +static u32 rval; +static u32 dbg_cmd; +static u32 dump_nal; +static u32 dbg_skip_decode_index; +/* + * bit 0~3, for HEVCD_IPP_AXIIF_CONFIG endian config + * bit 8~23, for HEVC_SAO_CTRL1 endian config + */ +static u32 endian; +#define HEVC_CONFIG_BIG_ENDIAN ((0x880 << 8) | 0x8) +#define HEVC_CONFIG_LITTLE_ENDIAN ((0xff0 << 8) | 0xf) + +#ifdef ERROR_HANDLE_DEBUG +static u32 dbg_nal_skip_flag; + /* bit[0], skip vps; bit[1], skip sps; bit[2], skip pps */ +static u32 dbg_nal_skip_count; +#endif +/*for debug*/ +static u32 force_bufspec; + +/* + udebug_flag: + bit 0, enable ucode print + bit 1, enable ucode detail print + bit [31:16] not 0, pos to dump lmem + bit 2, pop bits to lmem + bit [11:8], pre-pop bits for alignment (when bit 2 is 1) +*/ +static u32 udebug_flag; +/* + when udebug_flag[1:0] is not 0 + udebug_pause_pos not 0, + pause position +*/ +static u32 udebug_pause_pos; +/* + when udebug_flag[1:0] is not 0 + and udebug_pause_pos is not 0, + pause only when DEBUG_REG2 is equal to this val +*/ +static u32 udebug_pause_val; + +static u32 udebug_pause_decode_idx; + +static u32 decode_pic_begin; +static uint slice_parse_begin; +static u32 step; +static bool is_reset; + +#ifdef CONSTRAIN_MAX_BUF_NUM +static u32 run_ready_max_vf_only_num; +static u32 run_ready_display_q_num; + /*0: not check + 0xff: work_pic_num + */ +static u32 run_ready_max_buf_num = 0xff; +#endif + +static u32 dynamic_buf_num_margin = 7; +static u32 buf_alloc_width; +static u32 buf_alloc_height; + +static u32 max_buf_num = 16; +static u32 buf_alloc_size; +/*static u32 re_config_pic_flag;*/ +/* + *bit[0]: 0, + *bit[1]: 0, always release cma buffer when stop + *bit[1]: 1, never release cma buffer when stop + *bit[0]: 1, when stop, release cma buffer if blackout is 1; + *do not release cma buffer is blackout is not 1 + * + *bit[2]: 0, when start decoding, check current displayed buffer + * (only for buffer decoded by h265) if blackout is 0 + * 1, do not check current displayed buffer + * + *bit[3]: 1, if blackout is not 1, do not release current + * displayed cma buffer always. + */ +/* set to 1 for fast play; + * set to 8 for other case of "keep last frame" + */ +static u32 buffer_mode = 1; + +/* buffer_mode_dbg: debug only*/ +static u32 buffer_mode_dbg = 0xffff0000; +/**/ +/* + *bit[1:0]PB_skip_mode: 0, start decoding at begin; + *1, start decoding after first I; + *2, only decode and display none error picture; + *3, start decoding and display after IDR,etc + *bit[31:16] PB_skip_count_after_decoding (decoding but not display), + *only for mode 0 and 1. + */ +static u32 nal_skip_policy = 2; + +/* + *bit 0, 1: only display I picture; + *bit 1, 1: only decode I picture; + */ +static u32 i_only_flag; +static u32 skip_nal_count = 500; +/* +bit 0, fast output first I picture +*/ +static u32 fast_output_enable = 1; + +static u32 frmbase_cont_bitlevel = 0x60; + +/* +use_cma: 1, use both reserver memory and cma for buffers +2, only use cma for buffers +*/ +static u32 use_cma = 2; + +#define AUX_BUF_ALIGN(adr) ((adr + 0xf) & (~0xf)) +/* +static u32 prefix_aux_buf_size = (16 * 1024); +static u32 suffix_aux_buf_size; +*/ +static u32 prefix_aux_buf_size = (12 * 1024); +static u32 suffix_aux_buf_size = (12 * 1024); + +static u32 max_decoding_time; +/* + *error handling + */ +/*error_handle_policy: + *bit 0: 0, auto skip error_skip_nal_count nals before error recovery; + *1, skip error_skip_nal_count nals before error recovery; + *bit 1 (valid only when bit0 == 1): + *1, wait vps/sps/pps after error recovery; + *bit 2 (valid only when bit0 == 0): + *0, auto search after error recovery (hevc_recover() called); + *1, manual search after error recovery + *(change to auto search after get IDR: WRITE_VREG(NAL_SEARCH_CTL, 0x2)) + * + *bit 4: 0, set error_mark after reset/recover + * 1, do not set error_mark after reset/recover + * + *bit 5: 0, check total lcu for every picture + * 1, do not check total lcu + * + *bit 6: 0, do not check head error + * 1, check head error + * + *bit 7: 0, allow to print over decode + * 1, NOT allow to print over decode + * + *bit 8: 0, use interlace policy + * 1, NOT use interlace policy + *bit 9: 0, discard dirty data on playback start + * 1, do not discard dirty data on playback start + *bit 10:0, when ucode always returns again, it supports discarding data + * 1, When ucode always returns again, it does not support discarding data + */ + +static u32 error_handle_policy; +static u32 error_skip_nal_count = 6; +static u32 error_handle_threshold = 30; +static u32 error_handle_nal_skip_threshold = 10; +static u32 error_handle_system_threshold = 30; +static u32 interlace_enable = 1; +static u32 fr_hint_status; + + /* + *parser_sei_enable: + * bit 0, sei; + * bit 1, sei_suffix (fill aux buf) + * bit 2, fill sei to aux buf (when bit 0 is 1) + * bit 8, debug flag + */ +static u32 parser_sei_enable; +static u32 parser_dolby_vision_enable = 1; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +static u32 dolby_meta_with_el; +static u32 dolby_el_flush_th = 2; +#endif +/* this is only for h265 mmu enable */ + +static u32 mmu_enable = 1; +static u32 mmu_enable_force; +static u32 work_buf_size; +static unsigned int force_disp_pic_index; +static unsigned int disp_vframe_valve_level; +static int pre_decode_buf_level = 0x1000; +static unsigned int pic_list_debug; +#ifdef HEVC_8K_LFTOFFSET_FIX + /* performance_profile: bit 0, multi slice in ucode + */ +static unsigned int performance_profile = 1; +#endif +#ifdef MULTI_INSTANCE_SUPPORT +static unsigned int max_decode_instance_num + = MAX_DECODE_INSTANCE_NUM; +static unsigned int decode_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int display_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int max_process_time[MAX_DECODE_INSTANCE_NUM]; +static unsigned int max_get_frame_interval[MAX_DECODE_INSTANCE_NUM]; +static unsigned int run_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int input_empty[MAX_DECODE_INSTANCE_NUM]; +static unsigned int not_run_ready[MAX_DECODE_INSTANCE_NUM]; +static unsigned int ref_frame_mark_flag[MAX_DECODE_INSTANCE_NUM] = +{1, 1, 1, 1, 1, 1, 1, 1, 1}; + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +static unsigned char get_idx(struct hevc_state_s *hevc); +#endif + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +static u32 dv_toggle_prov_name; + +static u32 dv_debug; + +static u32 force_bypass_dvenl; +#endif +#endif + +/* + *[3:0] 0: default use config from omx. + * 1: force enable fence. + * 2: disable fence. + *[7:4] 0: fence use for driver. + * 1: fence fd use for app. + */ +static u32 force_config_fence; + +/* + *The parameter sps_max_dec_pic_buffering_minus1_0+1 + *in SPS is the minimum DPB size required for stream + *(note: this parameter does not include the frame + *currently being decoded) +1 (decoding the current + *frame) +1 (decoding the current frame will only + *update refrence frame information, such as reference + *relation, when the next frame is decoded) + */ +static u32 detect_stuck_buffer_margin = 3; + + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +#define get_dbg_flag(hevc) ((debug_mask & (1 << hevc->index)) ? debug : 0) +#define get_dbg_flag2(hevc) ((debug_mask & (1 << get_idx(hevc))) ? debug : 0) +#define is_log_enable(hevc) ((log_mask & (1 << hevc->index)) ? 1 : 0) +#else +#define get_dbg_flag(hevc) debug +#define get_dbg_flag2(hevc) debug +#define is_log_enable(hevc) (log_mask ? 1 : 0) +#define get_valid_double_write_mode(hevc) double_write_mode +#define get_buf_alloc_width(hevc) buf_alloc_width +#define get_buf_alloc_height(hevc) buf_alloc_height +#define get_dynamic_buf_num_margin(hevc) dynamic_buf_num_margin +#endif +#define get_buffer_mode(hevc) buffer_mode + + +static DEFINE_SPINLOCK(lock); +struct task_struct *h265_task = NULL; +#undef DEBUG_REG +#ifdef DEBUG_REG +void WRITE_VREG_DBG(unsigned adr, unsigned val) +{ + if (debug & H265_DEBUG_REG) + pr_info("%s(%x, %x)\n", __func__, adr, val); + WRITE_VREG(adr, val); +} + +#undef WRITE_VREG +#define WRITE_VREG WRITE_VREG_DBG +#endif +extern u32 trickmode_i; + +static DEFINE_MUTEX(vh265_mutex); + +static DEFINE_MUTEX(vh265_log_mutex); + +//static struct vdec_info *gvs; + +static u32 without_display_mode; + +static u32 mv_buf_dynamic_alloc; + +/************************************************** + * + *h265 buffer management include + * + *************************************************** + */ +enum NalUnitType { + NAL_UNIT_CODED_SLICE_TRAIL_N = 0, /* 0 */ + NAL_UNIT_CODED_SLICE_TRAIL_R, /* 1 */ + + NAL_UNIT_CODED_SLICE_TSA_N, /* 2 */ + /* Current name in the spec: TSA_R */ + NAL_UNIT_CODED_SLICE_TLA, /* 3 */ + + NAL_UNIT_CODED_SLICE_STSA_N, /* 4 */ + NAL_UNIT_CODED_SLICE_STSA_R, /* 5 */ + + NAL_UNIT_CODED_SLICE_RADL_N, /* 6 */ + /* Current name in the spec: RADL_R */ + NAL_UNIT_CODED_SLICE_DLP, /* 7 */ + + NAL_UNIT_CODED_SLICE_RASL_N, /* 8 */ + /* Current name in the spec: RASL_R */ + NAL_UNIT_CODED_SLICE_TFD, /* 9 */ + + NAL_UNIT_RESERVED_10, + NAL_UNIT_RESERVED_11, + NAL_UNIT_RESERVED_12, + NAL_UNIT_RESERVED_13, + NAL_UNIT_RESERVED_14, + NAL_UNIT_RESERVED_15, + + /* Current name in the spec: BLA_W_LP */ + NAL_UNIT_CODED_SLICE_BLA, /* 16 */ + /* Current name in the spec: BLA_W_DLP */ + NAL_UNIT_CODED_SLICE_BLANT, /* 17 */ + NAL_UNIT_CODED_SLICE_BLA_N_LP, /* 18 */ + /* Current name in the spec: IDR_W_DLP */ + NAL_UNIT_CODED_SLICE_IDR, /* 19 */ + NAL_UNIT_CODED_SLICE_IDR_N_LP, /* 20 */ + NAL_UNIT_CODED_SLICE_CRA, /* 21 */ + NAL_UNIT_RESERVED_22, + NAL_UNIT_RESERVED_23, + + NAL_UNIT_RESERVED_24, + NAL_UNIT_RESERVED_25, + NAL_UNIT_RESERVED_26, + NAL_UNIT_RESERVED_27, + NAL_UNIT_RESERVED_28, + NAL_UNIT_RESERVED_29, + NAL_UNIT_RESERVED_30, + NAL_UNIT_RESERVED_31, + + NAL_UNIT_VPS, /* 32 */ + NAL_UNIT_SPS, /* 33 */ + NAL_UNIT_PPS, /* 34 */ + NAL_UNIT_ACCESS_UNIT_DELIMITER, /* 35 */ + NAL_UNIT_EOS, /* 36 */ + NAL_UNIT_EOB, /* 37 */ + NAL_UNIT_FILLER_DATA, /* 38 */ + NAL_UNIT_SEI, /* 39 Prefix SEI */ + NAL_UNIT_SEI_SUFFIX, /* 40 Suffix SEI */ + NAL_UNIT_RESERVED_41, + NAL_UNIT_RESERVED_42, + NAL_UNIT_RESERVED_43, + NAL_UNIT_RESERVED_44, + NAL_UNIT_RESERVED_45, + NAL_UNIT_RESERVED_46, + NAL_UNIT_RESERVED_47, + NAL_UNIT_UNSPECIFIED_48, + NAL_UNIT_UNSPECIFIED_49, + NAL_UNIT_UNSPECIFIED_50, + NAL_UNIT_UNSPECIFIED_51, + NAL_UNIT_UNSPECIFIED_52, + NAL_UNIT_UNSPECIFIED_53, + NAL_UNIT_UNSPECIFIED_54, + NAL_UNIT_UNSPECIFIED_55, + NAL_UNIT_UNSPECIFIED_56, + NAL_UNIT_UNSPECIFIED_57, + NAL_UNIT_UNSPECIFIED_58, + NAL_UNIT_UNSPECIFIED_59, + NAL_UNIT_UNSPECIFIED_60, + NAL_UNIT_UNSPECIFIED_61, + NAL_UNIT_UNSPECIFIED_62, + NAL_UNIT_UNSPECIFIED_63, + NAL_UNIT_INVALID, +}; + +/* --------------------------------------------------- */ +/* Amrisc Software Interrupt */ +/* --------------------------------------------------- */ +#define AMRISC_STREAM_EMPTY_REQ 0x01 +#define AMRISC_PARSER_REQ 0x02 +#define AMRISC_MAIN_REQ 0x04 + +/* --------------------------------------------------- */ +/* HEVC_DEC_STATUS define */ +/* --------------------------------------------------- */ +#define HEVC_DEC_IDLE 0x0 +#define HEVC_NAL_UNIT_VPS 0x1 +#define HEVC_NAL_UNIT_SPS 0x2 +#define HEVC_NAL_UNIT_PPS 0x3 +#define HEVC_NAL_UNIT_CODED_SLICE_SEGMENT 0x4 +#define HEVC_CODED_SLICE_SEGMENT_DAT 0x5 +#define HEVC_SLICE_DECODING 0x6 +#define HEVC_NAL_UNIT_SEI 0x7 +#define HEVC_SLICE_SEGMENT_DONE 0x8 +#define HEVC_NAL_SEARCH_DONE 0x9 +#define HEVC_DECPIC_DATA_DONE 0xa +#define HEVC_DECPIC_DATA_ERROR 0xb +#define HEVC_SEI_DAT 0xc +#define HEVC_SEI_DAT_DONE 0xd +#define HEVC_NAL_DECODE_DONE 0xe +#define HEVC_OVER_DECODE 0xf + +#define HEVC_DATA_REQUEST 0x12 + +#define HEVC_DECODE_BUFEMPTY 0x20 +#define HEVC_DECODE_TIMEOUT 0x21 +#define HEVC_SEARCH_BUFEMPTY 0x22 +#define HEVC_DECODE_OVER_SIZE 0x23 +#define HEVC_DECODE_BUFEMPTY2 0x24 +#define HEVC_FIND_NEXT_PIC_NAL 0x50 +#define HEVC_FIND_NEXT_DVEL_NAL 0x51 + +#define HEVC_DUMP_LMEM 0x30 + +#define HEVC_4k2k_60HZ_NOT_SUPPORT 0x80 +#define HEVC_DISCARD_NAL 0xf0 +#define HEVC_ACTION_DEC_CONT 0xfd +#define HEVC_ACTION_ERROR 0xfe +#define HEVC_ACTION_DONE 0xff + +/* --------------------------------------------------- */ +/* Include "parser_cmd.h" */ +/* --------------------------------------------------- */ +#define PARSER_CMD_SKIP_CFG_0 0x0000090b + +#define PARSER_CMD_SKIP_CFG_1 0x1b14140f + +#define PARSER_CMD_SKIP_CFG_2 0x001b1910 + +#define PARSER_CMD_NUMBER 37 + +/************************************************** + * + *h265 buffer management + * + *************************************************** + */ +/* #define BUFFER_MGR_ONLY */ +/* #define CONFIG_HEVC_CLK_FORCED_ON */ +/* #define ENABLE_SWAP_TEST */ +#define MCRCC_ENABLE +#define INVALID_POC 0x80000000 + +#define HEVC_DEC_STATUS_REG HEVC_ASSIST_SCRATCH_0 +#define HEVC_RPM_BUFFER HEVC_ASSIST_SCRATCH_1 +#define HEVC_SHORT_TERM_RPS HEVC_ASSIST_SCRATCH_2 +#define HEVC_VPS_BUFFER HEVC_ASSIST_SCRATCH_3 +#define HEVC_SPS_BUFFER HEVC_ASSIST_SCRATCH_4 +#define HEVC_PPS_BUFFER HEVC_ASSIST_SCRATCH_5 +#define HEVC_SAO_UP HEVC_ASSIST_SCRATCH_6 +#define HEVC_STREAM_SWAP_BUFFER HEVC_ASSIST_SCRATCH_7 +#define HEVC_STREAM_SWAP_BUFFER2 HEVC_ASSIST_SCRATCH_8 +#define HEVC_sao_mem_unit HEVC_ASSIST_SCRATCH_9 +#define HEVC_SAO_ABV HEVC_ASSIST_SCRATCH_A +#define HEVC_sao_vb_size HEVC_ASSIST_SCRATCH_B +#define HEVC_SAO_VB HEVC_ASSIST_SCRATCH_C +#define HEVC_SCALELUT HEVC_ASSIST_SCRATCH_D +#define HEVC_WAIT_FLAG HEVC_ASSIST_SCRATCH_E +#define RPM_CMD_REG HEVC_ASSIST_SCRATCH_F +#define LMEM_DUMP_ADR HEVC_ASSIST_SCRATCH_F +#ifdef ENABLE_SWAP_TEST +#define HEVC_STREAM_SWAP_TEST HEVC_ASSIST_SCRATCH_L +#endif + +/*#define HEVC_DECODE_PIC_BEGIN_REG HEVC_ASSIST_SCRATCH_M*/ +/*#define HEVC_DECODE_PIC_NUM_REG HEVC_ASSIST_SCRATCH_N*/ +#define HEVC_DECODE_SIZE HEVC_ASSIST_SCRATCH_N + /*do not define ENABLE_SWAP_TEST*/ +#define HEVC_AUX_ADR HEVC_ASSIST_SCRATCH_L +#define HEVC_AUX_DATA_SIZE HEVC_ASSIST_SCRATCH_M + +#define DEBUG_REG1 HEVC_ASSIST_SCRATCH_G +#define DEBUG_REG2 HEVC_ASSIST_SCRATCH_H +/* + *ucode parser/search control + *bit 0: 0, header auto parse; 1, header manual parse + *bit 1: 0, auto skip for noneseamless stream; 1, no skip + *bit [3:2]: valid when bit1==0; + *0, auto skip nal before first vps/sps/pps/idr; + *1, auto skip nal before first vps/sps/pps + *2, auto skip nal before first vps/sps/pps, + * and not decode until the first I slice (with slice address of 0) + * + *3, auto skip before first I slice (nal_type >=16 && nal_type<=21) + *bit [15:4] nal skip count (valid when bit0 == 1 (manual mode) ) + *bit [16]: for NAL_UNIT_EOS when bit0 is 0: + * 0, send SEARCH_DONE to arm ; 1, do not send SEARCH_DONE to arm + *bit [17]: for NAL_SEI when bit0 is 0: + * 0, do not parse/fetch SEI in ucode; + * 1, parse/fetch SEI in ucode + *bit [18]: for NAL_SEI_SUFFIX when bit0 is 0: + * 0, do not fetch NAL_SEI_SUFFIX to aux buf; + * 1, fetch NAL_SEL_SUFFIX data to aux buf + *bit [19]: + * 0, parse NAL_SEI in ucode + * 1, fetch NAL_SEI to aux buf + *bit [20]: for DOLBY_VISION_META + * 0, do not fetch DOLBY_VISION_META to aux buf + * 1, fetch DOLBY_VISION_META to aux buf + */ +#define NAL_SEARCH_CTL HEVC_ASSIST_SCRATCH_I + /*read only*/ +#define CUR_NAL_UNIT_TYPE HEVC_ASSIST_SCRATCH_J + /* + [15 : 8] rps_set_id + [7 : 0] start_decoding_flag + */ +#define HEVC_DECODE_INFO HEVC_ASSIST_SCRATCH_1 + /*set before start decoder*/ +#define HEVC_DECODE_MODE HEVC_ASSIST_SCRATCH_J +#define HEVC_DECODE_MODE2 HEVC_ASSIST_SCRATCH_H +#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K + +#define DECODE_MODE_SINGLE 0x0 +#define DECODE_MODE_MULTI_FRAMEBASE 0x1 +#define DECODE_MODE_MULTI_STREAMBASE 0x2 +#define DECODE_MODE_MULTI_DVBAL 0x3 +#define DECODE_MODE_MULTI_DVENL 0x4 + +#define MAX_INT 0x7FFFFFFF + +#define RPM_BEGIN 0x100 +#define modification_list_cur 0x148 +#define RPM_END 0x180 +#ifdef SUPPORT_LONG_TERM_RPS +/* + */ +#define RPS_END 0x8000 +#define RPS_LT_BIT 14 +#define RPS_USED_BIT 13 +#define RPS_SIGN_BIT 12 +#else +#define RPS_END 0x8000 +#define RPS_USED_BIT 14 +#define RPS_SIGN_BIT 13 +#endif +/* MISC_FLAG0 */ +#define PCM_LOOP_FILTER_DISABLED_FLAG_BIT 0 +#define PCM_ENABLE_FLAG_BIT 1 +#define LOOP_FILER_ACROSS_TILES_ENABLED_FLAG_BIT 2 +#define PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT 3 +#define DEBLOCKING_FILTER_OVERRIDE_ENABLED_FLAG_BIT 4 +#define PPS_DEBLOCKING_FILTER_DISABLED_FLAG_BIT 5 +#define DEBLOCKING_FILTER_OVERRIDE_FLAG_BIT 6 +#define SLICE_DEBLOCKING_FILTER_DISABLED_FLAG_BIT 7 +#define SLICE_SAO_LUMA_FLAG_BIT 8 +#define SLICE_SAO_CHROMA_FLAG_BIT 9 +#define SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT 10 + +union param_u { + struct { + unsigned short data[RPM_END - RPM_BEGIN]; + } l; + struct { + /* from ucode lmem, do not change this struct */ + unsigned short CUR_RPS[0x10]; + unsigned short num_ref_idx_l0_active; + unsigned short num_ref_idx_l1_active; + unsigned short slice_type; + unsigned short slice_temporal_mvp_enable_flag; + unsigned short dependent_slice_segment_flag; + unsigned short slice_segment_address; + unsigned short num_title_rows_minus1; + unsigned short pic_width_in_luma_samples; + unsigned short pic_height_in_luma_samples; + unsigned short log2_min_coding_block_size_minus3; + unsigned short log2_diff_max_min_coding_block_size; + unsigned short log2_max_pic_order_cnt_lsb_minus4; + unsigned short POClsb; + unsigned short collocated_from_l0_flag; + unsigned short collocated_ref_idx; + unsigned short log2_parallel_merge_level; + unsigned short five_minus_max_num_merge_cand; + unsigned short sps_num_reorder_pics_0; + unsigned short modification_flag; + unsigned short tiles_enabled_flag; + unsigned short num_tile_columns_minus1; + unsigned short num_tile_rows_minus1; + unsigned short tile_width[12]; + unsigned short tile_height[8]; + unsigned short misc_flag0; + unsigned short pps_beta_offset_div2; + unsigned short pps_tc_offset_div2; + unsigned short slice_beta_offset_div2; + unsigned short slice_tc_offset_div2; + unsigned short pps_cb_qp_offset; + unsigned short pps_cr_qp_offset; + unsigned short first_slice_segment_in_pic_flag; + unsigned short m_temporalId; + unsigned short m_nalUnitType; + + unsigned short vui_num_units_in_tick_hi; + unsigned short vui_num_units_in_tick_lo; + unsigned short vui_time_scale_hi; + unsigned short vui_time_scale_lo; + unsigned short bit_depth; + unsigned short profile_etc; + unsigned short sei_frame_field_info; + unsigned short video_signal_type; + unsigned short modification_list[0x20]; + unsigned short conformance_window_flag; + unsigned short conf_win_left_offset; + unsigned short conf_win_right_offset; + unsigned short conf_win_top_offset; + unsigned short conf_win_bottom_offset; + unsigned short chroma_format_idc; + unsigned short color_description; + unsigned short aspect_ratio_idc; + unsigned short sar_width; + unsigned short sar_height; + unsigned short sps_max_dec_pic_buffering_minus1_0; + } p; +}; + +#define RPM_BUF_SIZE (0x80*2) +/* non mmu mode lmem size : 0x400, mmu mode : 0x500*/ +#define LMEM_BUF_SIZE (0x500 * 2) + +struct buff_s { + u32 buf_start; + u32 buf_size; + u32 buf_end; +}; + +struct BuffInfo_s { + u32 max_width; + u32 max_height; + unsigned int start_adr; + unsigned int end_adr; + struct buff_s ipp; + struct buff_s sao_abv; + struct buff_s sao_vb; + struct buff_s short_term_rps; + struct buff_s vps; + struct buff_s sps; + struct buff_s pps; + struct buff_s sao_up; + struct buff_s swap_buf; + struct buff_s swap_buf2; + struct buff_s scalelut; + struct buff_s dblk_para; + struct buff_s dblk_data; + struct buff_s dblk_data2; + struct buff_s mmu_vbh; + struct buff_s cm_header; +#ifdef H265_10B_MMU_DW + struct buff_s mmu_vbh_dw; + struct buff_s cm_header_dw; +#endif + struct buff_s mpred_above; +#ifdef MV_USE_FIXED_BUF + struct buff_s mpred_mv; +#endif + struct buff_s rpm; + struct buff_s lmem; +}; + +//#define VBH_BUF_SIZE (2 * 16 * 2304) +//#define VBH_BUF_COUNT 4 + +/*mmu_vbh buf is used by HEVC_SAO_MMU_VH0_ADDR, HEVC_SAO_MMU_VH1_ADDR*/ +#define VBH_BUF_SIZE_1080P 0x3000 +#define VBH_BUF_SIZE_4K 0x5000 +#define VBH_BUF_SIZE_8K 0xa000 +#define VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh.buf_size / 2) + /*mmu_vbh_dw buf is used by HEVC_SAO_MMU_VH0_ADDR2,HEVC_SAO_MMU_VH1_ADDR2, + HEVC_DW_VH0_ADDDR, HEVC_DW_VH1_ADDDR*/ +#define DW_VBH_BUF_SIZE_1080P (VBH_BUF_SIZE_1080P * 2) +#define DW_VBH_BUF_SIZE_4K (VBH_BUF_SIZE_4K * 2) +#define DW_VBH_BUF_SIZE_8K (VBH_BUF_SIZE_8K * 2) +#define DW_VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh_dw.buf_size / 4) + +/* necessary 4K page size align for t7/t3 decoder and after */ +#define WORKBUF_ALIGN(addr) (ALIGN(addr, PAGE_SIZE)) + +#define WORK_BUF_SPEC_NUM 6 +static struct BuffInfo_s amvh265_workbuff_spec[WORK_BUF_SPEC_NUM] = { + { + /* 8M bytes */ + .max_width = 1920, + .max_height = 1088, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x30000, + }, + .sao_vb = { + .buf_size = 0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = + * 32Kbytes (0x8000) + */ + .buf_size = 0x8000, + }, + .dblk_para = { +#ifdef SUPPORT_10BIT + .buf_size = 0x40000, +#else + /* DBLK -> Max 256(4096/16) LCU, each para + *512bytes(total:0x20000), data 1024bytes(total:0x40000) + */ + .buf_size = 0x20000, +#endif + }, + .dblk_data = { + .buf_size = 0x40000, + }, + .dblk_data2 = { + .buf_size = 0x80000 * 2, + }, /*dblk data for adapter*/ + .mmu_vbh = { + .buf_size = 0x5000, /*2*16*2304/4, 4K*/ + }, +#if 0 + .cm_header = {/* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8)*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (MAX_REF_PIC_NUM + 1), + }, +#endif + .mpred_above = { + .buf_size = 0x8000, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = {/* 1080p, 0x40000 per buffer */ + .buf_size = 0x40000 * MAX_REF_PIC_NUM, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x500 * 2, + } + }, + { + .max_width = 4096, + .max_height = 2048, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x1e00, + }, + .sao_abv = { + .buf_size = 0, //0x30000, + }, + .sao_vb = { + .buf_size = 0, //0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = 32Kbytes + * (0x8000) + */ + .buf_size = 0x8000, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, each para + * 512bytes(total:0x20000), + * data 1024bytes(total:0x40000) + */ + .buf_size = 0x20000, + }, + .dblk_data = { + .buf_size = 0x80000, + }, + .dblk_data2 = { + .buf_size = 0x80000, + }, /*dblk data for adapter*/ + .mmu_vbh = { + .buf_size = 0x5000, /*2*16*2304/4, 4K*/ + }, +#if 0 + .cm_header = {/*0x44000 = ((1088*2*1024*4)/32/4)*(32/8)*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (MAX_REF_PIC_NUM + 1), + }, +#endif + .mpred_above = { + .buf_size = 0x8000, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = MPRED_4K_MV_BUF_SIZE * MAX_REF_PIC_NUM, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x500 * 2, + } + }, + + { + .max_width = 4096*2, + .max_height = 2048*2, + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x4000*2, + }, + .sao_abv = { + .buf_size = 0x30000*2, + }, + .sao_vb = { + .buf_size = 0x30000*2, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .sps = { + // SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .pps = { + // PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, total 0x2000 bytes + .buf_size = 0x2000, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0x2800*2, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0x8000*2, + }, + .dblk_para = {.buf_size = 0x40000*2, }, // dblk parameter + .dblk_data = {.buf_size = 0x80000*2, }, // dblk data for left/top + .dblk_data2 = {.buf_size = 0x80000*2, }, // dblk data for adapter + .mmu_vbh = { + .buf_size = 0x5000*2, //2*16*2304/4, 4K + }, +#if 0 + .cm_header = { + .buf_size = MMU_COMPRESS_8K_HEADER_SIZE * + MAX_REF_PIC_NUM, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif + .mpred_above = { + .buf_size = 0x8000*2, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + .buf_size = MPRED_8K_MV_BUF_SIZE * MAX_REF_PIC_NUM, //4k2k , 0x120000 per buffer + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x500 * 2, + }, + }, + { + /* 8M bytes */ + .max_width = 1920, + .max_height = 1088, + .ipp = {/*checked*/ + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x1e00, + }, + .sao_abv = { + .buf_size = 0, //0x30000, + }, + .sao_vb = { + .buf_size = 0, //0x30000, + }, + .short_term_rps = {/*checked*/ + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = {/*checked*/ + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = {/*checked*/ + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = {/*checked*/ + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0, //0x2800, + }, + .swap_buf = {/*checked*/ + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = {/*checked*/ + .buf_size = 0x800, + }, + .scalelut = {/*checked*/ + /* support up to 32 SCALELUT 1024x32 = + * 32Kbytes (0x8000) + */ + .buf_size = 0x8000, + }, + .dblk_para = {.buf_size = 0x14500, }, // dblk parameter + .dblk_data = {.buf_size = 0x62800, }, // dblk data for left/top + .dblk_data2 = {.buf_size = 0x22800, }, // dblk data for adapter + .mmu_vbh = {/*checked*/ + .buf_size = VBH_BUF_SIZE_1080P, /*2*16*2304/4, 4K*/ + }, +#if 0 + .cm_header = {/*checked*//* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8)*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE_1080P * + (MAX_REF_PIC_NUM + 1), + }, +#endif +#ifdef H265_10B_MMU_DW + .mmu_vbh_dw = {/*checked*/ + .buf_size = DW_VBH_BUF_SIZE_1080P, //VBH_BUF_SIZE * VBH_BUF_COUNT, //2*16*2304/4, 4K + }, +#ifdef USE_FIXED_MMU_DW_HEADER + .cm_header_dw = {/*checked*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE_1080P * DB_NUM, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif +#endif + .mpred_above = {/*checked*/ + .buf_size = 0x1e00, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = {/*checked*//* 1080p, 0x40000 per buffer */ + .buf_size = MPRED_MV_BUF_SIZE * MAX_REF_PIC_NUM, + }, +#endif + .rpm = {/*checked*/ + .buf_size = RPM_BUF_SIZE, + }, + .lmem = {/*checked*/ + .buf_size = 0x500 * 2, + } + }, + { + .max_width = 4096, + .max_height = 2048, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0, //0x30000, + }, + .sao_vb = { + .buf_size = 0, //0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0, //0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = 32Kbytes + * (0x8000) + */ + .buf_size = 0x8000, + }, + .dblk_para = {.buf_size = 0x19100, }, // dblk parameter + .dblk_data = {.buf_size = 0x88800, }, // dblk data for left/top + .dblk_data2 = {.buf_size = 0x48800, }, // dblk data for adapter + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_4K, /*2*16*2304/4, 4K*/ + }, +#if 0 + .cm_header = {/*0x44000 = ((1088*2*1024*4)/32/4)*(32/8)*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE_4K * + (MAX_REF_PIC_NUM + 1), + }, +#endif +#ifdef H265_10B_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_4K, //VBH_BUF_SIZE * VBH_BUF_COUNT, //2*16*(more than 2304)/4, 4K + }, +#ifdef USE_FIXED_MMU_DW_HEADER + .cm_header_dw = { + .buf_size = MMU_COMPRESS_HEADER_SIZE_4K * DB_NUM, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x4000, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = MPRED_4K_MV_BUF_SIZE * MAX_REF_PIC_NUM, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x500 * 2, + } + }, + + { + .max_width = 4096*2, + .max_height = 2048*2, + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x4000*2, + }, + .sao_abv = { + .buf_size = 0, //0x30000*2, + }, + .sao_vb = { + .buf_size = 0, //0x30000*2, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .sps = { + // SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .pps = { + // PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, total 0x2000 bytes + .buf_size = 0x2000, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0, //0x2800*2, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0x8000, //0x8000*2, + }, + .dblk_para = {.buf_size = 0x32100, }, // dblk parameter + .dblk_data = {.buf_size = 0x110800, }, // dblk data for left/top + .dblk_data2 = {.buf_size = 0x90800, }, // dblk data for adapter + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_8K, //2*16*2304/4, 4K + }, +#if 0 + .cm_header = { + .buf_size = MMU_COMPRESS_HEADER_SIZE_8K * + MAX_REF_PIC_NUM, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif +#ifdef H265_10B_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_8K, //VBH_BUF_SIZE * VBH_BUF_COUNT, //2*16*2304/4, 4K + }, +#ifdef USE_FIXED_MMU_DW_HEADER + .cm_header_dw = { + .buf_size = MMU_COMPRESS_HEADER_SIZE_8K * DB_NUM, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x8000, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + .buf_size = MPRED_8K_MV_BUF_SIZE * MAX_REF_PIC_NUM, //4k2k , 0x120000 per buffer + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x500 * 2, + }, + } +}; + +static void init_buff_spec(struct hevc_state_s *hevc, + struct BuffInfo_s *buf_spec) +{ + buf_spec->ipp.buf_start = + WORKBUF_ALIGN(buf_spec->start_adr); + buf_spec->sao_abv.buf_start = + WORKBUF_ALIGN(buf_spec->ipp.buf_start + buf_spec->ipp.buf_size); + buf_spec->sao_vb.buf_start = + WORKBUF_ALIGN(buf_spec->sao_abv.buf_start + buf_spec->sao_abv.buf_size); + buf_spec->short_term_rps.buf_start = + WORKBUF_ALIGN(buf_spec->sao_vb.buf_start + buf_spec->sao_vb.buf_size); + buf_spec->vps.buf_start = + WORKBUF_ALIGN(buf_spec->short_term_rps.buf_start + buf_spec->short_term_rps.buf_size); + buf_spec->sps.buf_start = + WORKBUF_ALIGN(buf_spec->vps.buf_start + buf_spec->vps.buf_size); + buf_spec->pps.buf_start = + WORKBUF_ALIGN(buf_spec->sps.buf_start + buf_spec->sps.buf_size); + buf_spec->sao_up.buf_start = + WORKBUF_ALIGN(buf_spec->pps.buf_start + buf_spec->pps.buf_size); + buf_spec->swap_buf.buf_start = + WORKBUF_ALIGN(buf_spec->sao_up.buf_start + buf_spec->sao_up.buf_size); + buf_spec->swap_buf2.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf.buf_start + buf_spec->swap_buf.buf_size); + buf_spec->scalelut.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf2.buf_start + buf_spec->swap_buf2.buf_size); + buf_spec->dblk_para.buf_start = + WORKBUF_ALIGN(buf_spec->scalelut.buf_start + buf_spec->scalelut.buf_size); + buf_spec->dblk_data.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_para.buf_start + buf_spec->dblk_para.buf_size); + buf_spec->dblk_data2.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data.buf_start + buf_spec->dblk_data.buf_size); + buf_spec->mmu_vbh.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data2.buf_start + buf_spec->dblk_data2.buf_size); +#ifdef H265_10B_MMU_DW + buf_spec->mmu_vbh_dw.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh.buf_start + buf_spec->mmu_vbh.buf_size); + buf_spec->cm_header_dw.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh_dw.buf_start + buf_spec->mmu_vbh_dw.buf_size); + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->cm_header_dw.buf_start + buf_spec->cm_header_dw.buf_size); +#else + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh.buf_start + buf_spec->mmu_vbh.buf_size); +#endif +#ifdef MV_USE_FIXED_BUF + buf_spec->mpred_mv.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_mv.buf_start + buf_spec->mpred_mv.buf_size); +#else + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); +#endif + buf_spec->lmem.buf_start = + WORKBUF_ALIGN(buf_spec->rpm.buf_start + buf_spec->rpm.buf_size); + buf_spec->end_adr = + WORKBUF_ALIGN(buf_spec->lmem.buf_start + buf_spec->lmem.buf_size); + + if (hevc && get_dbg_flag2(hevc)) { + hevc_print(hevc, 0, + "%s workspace (%x %x) size = %x\n", __func__, + buf_spec->start_adr, buf_spec->end_adr, + buf_spec->end_adr - buf_spec->start_adr); + + hevc_print(hevc, 0, + "ipp.buf_start :%x\n", + buf_spec->ipp.buf_start); + hevc_print(hevc, 0, + "sao_abv.buf_start :%x\n", + buf_spec->sao_abv.buf_start); + hevc_print(hevc, 0, + "sao_vb.buf_start :%x\n", + buf_spec->sao_vb.buf_start); + hevc_print(hevc, 0, + "short_term_rps.buf_start :%x\n", + buf_spec->short_term_rps.buf_start); + hevc_print(hevc, 0, + "vps.buf_start :%x\n", + buf_spec->vps.buf_start); + hevc_print(hevc, 0, + "sps.buf_start :%x\n", + buf_spec->sps.buf_start); + hevc_print(hevc, 0, + "pps.buf_start :%x\n", + buf_spec->pps.buf_start); + hevc_print(hevc, 0, + "sao_up.buf_start :%x\n", + buf_spec->sao_up.buf_start); + hevc_print(hevc, 0, + "swap_buf.buf_start :%x\n", + buf_spec->swap_buf.buf_start); + hevc_print(hevc, 0, + "swap_buf2.buf_start :%x\n", + buf_spec->swap_buf2.buf_start); + hevc_print(hevc, 0, + "scalelut.buf_start :%x\n", + buf_spec->scalelut.buf_start); + hevc_print(hevc, 0, + "dblk_para.buf_start :%x\n", + buf_spec->dblk_para.buf_start); + hevc_print(hevc, 0, + "dblk_data.buf_start :%x\n", + buf_spec->dblk_data.buf_start); + hevc_print(hevc, 0, + "dblk_data2.buf_start :%x\n", + buf_spec->dblk_data2.buf_start); +#ifdef H265_10B_MMU_DW + hevc_print(hevc, 0, + "mmu_vbh_dw.buf_start :%x\n", + buf_spec->mmu_vbh_dw.buf_start); + hevc_print(hevc, 0, + "cm_header_dw.buf_start :%x\n", + buf_spec->cm_header_dw.buf_start); +#endif + hevc_print(hevc, 0, + "mpred_above.buf_start :%x\n", + buf_spec->mpred_above.buf_start); +#ifdef MV_USE_FIXED_BUF + hevc_print(hevc, 0, + "mpred_mv.buf_start :%x\n", + buf_spec->mpred_mv.buf_start); +#endif + if ((get_dbg_flag2(hevc) + & + H265_DEBUG_SEND_PARAM_WITH_REG) + == 0) { + hevc_print(hevc, 0, + "rpm.buf_start :%x\n", + buf_spec->rpm.buf_start); + } + } + +} + +enum SliceType { + B_SLICE, + P_SLICE, + I_SLICE +}; + +/*USE_BUF_BLOCK*/ +struct BUF_s { + ulong start_adr; + u32 size; + u32 luma_size; + ulong header_addr; + u32 header_size; + int used_flag; + ulong v4l_ref_buf_addr; + ulong chroma_addr; + u32 chroma_size; +} /*BUF_t */; + +/* level 6, 6.1 maximum slice number is 800; other is 200 */ +#define MAX_SLICE_NUM 800 +struct PIC_s { + int index; + int scatter_alloc; + int BUF_index; + int mv_buf_index; + int POC; + int decode_idx; + int slice_type; + int RefNum_L0; + int RefNum_L1; + int num_reorder_pic; + int stream_offset; + unsigned char referenced; + unsigned char output_mark; + unsigned char recon_mark; + unsigned char output_ready; + unsigned char error_mark; + //dis_mark = 0:discard mark,dis_mark = 1:no discard mark + unsigned char dis_mark; + /**/ int slice_idx; + int m_aiRefPOCList0[MAX_SLICE_NUM][16]; + int m_aiRefPOCList1[MAX_SLICE_NUM][16]; +#ifdef SUPPORT_LONG_TERM_RPS + unsigned char long_term_ref; + unsigned char m_aiRefLTflgList0[MAX_SLICE_NUM][16]; + unsigned char m_aiRefLTflgList1[MAX_SLICE_NUM][16]; +#endif + /*buffer */ + unsigned int header_adr; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + unsigned char dv_enhance_exist; +#endif + char *aux_data_buf; + int aux_data_size; + unsigned long cma_alloc_addr; + struct page *alloc_pages; + unsigned int mpred_mv_wr_start_addr; + int mv_size; + unsigned int mc_y_adr; + unsigned int mc_u_v_adr; +#ifdef SUPPORT_10BIT + /*unsigned int comp_body_size;*/ + unsigned int dw_y_adr; + unsigned int dw_u_v_adr; +#endif + u32 luma_size; + u32 chroma_size; + + int mc_canvas_y; + int mc_canvas_u_v; + int width; + int height; + + int y_canvas_index; + int uv_canvas_index; +#ifdef MULTI_INSTANCE_SUPPORT + struct canvas_config_s canvas_config[2]; +#endif +#ifdef SUPPORT_10BIT + int mem_saving_mode; + u32 bit_depth_luma; + u32 bit_depth_chroma; +#endif +#ifdef LOSLESS_COMPRESS_MODE + unsigned int losless_comp_body_size; +#endif +#ifdef H265_10B_MMU_DW + u32 header_dw_adr; +#endif + unsigned char pic_struct; + int vf_ref; + + u32 pts; + u64 pts64; + u64 timestamp; + + u32 aspect_ratio_idc; + u32 sar_width; + u32 sar_height; + u32 double_write_mode; + u32 video_signal_type; + unsigned short conformance_window_flag; + unsigned short conf_win_left_offset; + unsigned short conf_win_right_offset; + unsigned short conf_win_top_offset; + unsigned short conf_win_bottom_offset; + unsigned short chroma_format_idc; + + /* picture qos infomation*/ + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; + + u32 hw_decode_time; + u32 frame_size; // For frame base mode + bool ip_mode; + u32 hdr10p_data_size; + char *hdr10p_data_buf; + struct dma_fence *fence; + bool show_frame; +} /*PIC_t */; + +#define MAX_TILE_COL_NUM 10 +#define MAX_TILE_ROW_NUM 20 +struct tile_s { + int width; + int height; + int start_cu_x; + int start_cu_y; + + unsigned int sao_vb_start_addr; + unsigned int sao_abv_start_addr; +}; + +#define SEI_MASTER_DISPLAY_COLOR_MASK 0x00000001 +#define SEI_CONTENT_LIGHT_LEVEL_MASK 0x00000002 +#define SEI_HDR10PLUS_MASK 0x00000004 +#define SEI_HDR_CUVA_MASK 0x00000008 + + +#define VF_POOL_SIZE 32 + +#ifdef MULTI_INSTANCE_SUPPORT +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_CONFIG_PARAM 3 +#define DEC_RESULT_ERROR 4 +#define DEC_INIT_PICLIST 5 +#define DEC_UNINIT_PICLIST 6 +#define DEC_RESULT_GET_DATA 7 +#define DEC_RESULT_GET_DATA_RETRY 8 +#define DEC_RESULT_EOS 9 +#define DEC_RESULT_FORCE_EXIT 10 +#define DEC_RESULT_FREE_CANVAS 11 + + +static void vh265_work(struct work_struct *work); +static void vh265_timeout_work(struct work_struct *work); +static void vh265_notify_work(struct work_struct *work); + +#endif + +struct debug_log_s { + struct list_head list; + uint8_t data; /*will alloc more size*/ +}; + +struct mh265_fence_vf_t { + u32 used_size; + struct vframe_s *fence_vf[VF_POOL_SIZE]; +}; + +struct hevc_state_s { +#ifdef MULTI_INSTANCE_SUPPORT + struct platform_device *platform_dev; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + struct vframe_chunk_s *chunk; + int dec_result; + u32 timeout_processing; + struct work_struct work; + struct work_struct timeout_work; + struct work_struct notify_work; + struct work_struct set_clk_work; + /* timeout handle */ + unsigned long int start_process_time; + unsigned int last_lcu_idx; + unsigned int decode_timeout_count; + unsigned int timeout_num; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + unsigned char switch_dvlayer_flag; + unsigned char no_switch_dvlayer_count; + unsigned char bypass_dvenl_enable; + unsigned char bypass_dvenl; +#endif + unsigned char start_parser_type; + /*start_decoding_flag: + vps/pps/sps/idr info from ucode*/ + unsigned char start_decoding_flag; + unsigned char rps_set_id; + unsigned char eos; + int pic_decoded_lcu_idx; + u8 over_decode; + u8 empty_flag; +#endif + struct vframe_s vframe_dummy; + char *provider_name; + int index; + struct device *cma_dev; + unsigned char m_ins_flag; + unsigned char dolby_enhance_flag; + unsigned long buf_start; + u32 buf_size; + u32 mv_buf_size; + + struct BuffInfo_s work_space_buf_store; + struct BuffInfo_s *work_space_buf; + + u8 aux_data_dirty; + u32 prefix_aux_size; + u32 suffix_aux_size; + void *aux_addr; + void *rpm_addr; + void *lmem_addr; + dma_addr_t aux_phy_addr; + dma_addr_t rpm_phy_addr; + dma_addr_t lmem_phy_addr; + + unsigned int pic_list_init_flag; + unsigned int use_cma_flag; + + unsigned short *rpm_ptr; + unsigned short *lmem_ptr; + unsigned short *debug_ptr; + int debug_ptr_size; + int pic_w; + int pic_h; + int lcu_x_num; + int lcu_y_num; + int lcu_total; + int lcu_size; + int lcu_size_log2; + int lcu_x_num_pre; + int lcu_y_num_pre; + int first_pic_after_recover; + + int num_tile_col; + int num_tile_row; + int tile_enabled; + int tile_x; + int tile_y; + int tile_y_x; + int tile_start_lcu_x; + int tile_start_lcu_y; + int tile_width_lcu; + int tile_height_lcu; + + int slice_type; + unsigned int slice_addr; + unsigned int slice_segment_addr; + + unsigned char interlace_flag; + unsigned char curr_pic_struct; + unsigned char frame_field_info_present_flag; + + unsigned short sps_num_reorder_pics_0; + unsigned short misc_flag0; + int m_temporalId; + int m_nalUnitType; + int TMVPFlag; + int isNextSliceSegment; + int LDCFlag; + int m_pocRandomAccess; + int plevel; + int MaxNumMergeCand; + + int new_pic; + int new_tile; + int curr_POC; + int iPrevPOC; +#ifdef MULTI_INSTANCE_SUPPORT + int decoded_poc; + struct PIC_s *decoding_pic; +#endif + int iPrevTid0POC; + int list_no; + int RefNum_L0; + int RefNum_L1; + int ColFromL0Flag; + int LongTerm_Curr; + int LongTerm_Col; + int Col_POC; + int LongTerm_Ref; +#ifdef MULTI_INSTANCE_SUPPORT + int m_pocRandomAccess_bak; + int curr_POC_bak; + int iPrevPOC_bak; + int iPrevTid0POC_bak; + unsigned char start_parser_type_bak; + unsigned char start_decoding_flag_bak; + unsigned char rps_set_id_bak; + int pic_decoded_lcu_idx_bak; + int decode_idx_bak; +#endif + struct PIC_s *cur_pic; + struct PIC_s *col_pic; + int skip_flag; + int decode_idx; + int slice_idx; + unsigned char have_vps; + unsigned char have_sps; + unsigned char have_pps; + unsigned char have_valid_start_slice; + unsigned char wait_buf; + unsigned char error_flag; + unsigned int error_skip_nal_count; + long used_4k_num; + + unsigned char + ignore_bufmgr_error; /* bit 0, for decoding; + bit 1, for displaying + bit 1 must be set if bit 0 is 1*/ + int PB_skip_mode; + int PB_skip_count_after_decoding; +#ifdef SUPPORT_10BIT + int mem_saving_mode; +#endif +#ifdef LOSLESS_COMPRESS_MODE + unsigned int losless_comp_body_size; +#endif + int pts_mode; + int last_lookup_pts; + int last_pts; + u64 last_lookup_pts_us64; + u64 last_pts_us64; + u32 shift_byte_count_lo; + u32 shift_byte_count_hi; + int pts_mode_switching_count; + int pts_mode_recovery_count; + + int pic_num; + + /**/ + union param_u param; + + struct tile_s m_tile[MAX_TILE_ROW_NUM][MAX_TILE_COL_NUM]; + + struct timer_list timer; + struct BUF_s m_BUF[BUF_POOL_SIZE]; + struct BUF_s m_mv_BUF[MAX_REF_PIC_NUM]; + struct PIC_s *m_PIC[MAX_REF_PIC_NUM]; + + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(pending_q, struct vframe_s *, VF_POOL_SIZE); + struct vframe_s vfpool[VF_POOL_SIZE]; + + u32 stat; + u32 frame_width; + u32 frame_height; + u32 frame_dur; + u32 frame_ar; + u32 bit_depth_luma; + u32 bit_depth_chroma; + u32 video_signal_type; + u32 video_signal_type_debug; + u32 saved_resolution; + bool get_frame_dur; + u32 error_watchdog_count; + u32 error_skip_nal_wt_cnt; + u32 error_system_watchdog_count; + +#ifdef DEBUG_PTS + unsigned long pts_missed; + unsigned long pts_hit; +#endif + struct dec_sysinfo vh265_amstream_dec_info; + unsigned char init_flag; + unsigned char first_sc_checked; + unsigned char uninit_list; + u32 start_decoding_time; + + int show_frame_num; +#ifdef USE_UNINIT_SEMA + struct semaphore h265_uninit_done_sema; +#endif + int fatal_error; + + + u32 sei_present_flag; + void *frame_mmu_map_addr; + dma_addr_t frame_mmu_map_phy_addr; + unsigned int mmu_mc_buf_start; + unsigned int mmu_mc_buf_end; + unsigned int mmu_mc_start_4k_adr; + void *mmu_box; + void *bmmu_box; + int mmu_enable; +#ifdef H265_10B_MMU_DW + void *frame_dw_mmu_map_addr; + dma_addr_t frame_dw_mmu_map_phy_addr; + void *mmu_box_dw; + int dw_mmu_enable; +#endif + + unsigned int dec_status; + + /* data for SEI_MASTER_DISPLAY_COLOR */ + unsigned int primaries[3][2]; + unsigned int white_point[2]; + unsigned int luminance[2]; + /* data for SEI_CONTENT_LIGHT_LEVEL */ + unsigned int content_light_level[2]; + + struct PIC_s *pre_top_pic; + struct PIC_s *pre_bot_pic; + +#ifdef MULTI_INSTANCE_SUPPORT + int double_write_mode; + int dynamic_buf_num_margin; + int start_action; + int save_buffer_mode; +#endif + u32 i_only; + struct list_head log_list; + u32 ucode_pause_pos; + u32 start_shift_bytes; + + u32 vf_pre_count; + atomic_t vf_get_count; + atomic_t vf_put_count; +#ifdef SWAP_HEVC_UCODE + dma_addr_t mc_dma_handle; + void *mc_cpu_addr; + int swap_size; + ulong swap_addr; +#endif +#ifdef DETREFILL_ENABLE + dma_addr_t detbuf_adr; + u16 *detbuf_adr_virt; + u8 delrefill_check; +#endif + u8 head_error_flag; + int valve_count; + struct firmware_s *fw; + int max_pic_w; + int max_pic_h; +#ifdef AGAIN_HAS_THRESHOLD + u8 next_again_flag; + u32 pre_parser_wr_ptr; +#endif + u32 ratio_control; + u32 first_pic_flag; + u32 decode_size; + struct mutex chunks_mutex; + int need_cache_size; + u64 sc_start_time; + u32 skip_nal_count; + bool is_swap; + bool is_4k; + + int frameinfo_enable; + struct vframe_qos_s vframe_qos; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + u32 mem_map_mode; + u32 performance_profile; + struct vdec_info *gvs; + bool ip_mode; + u32 kpi_first_i_comming; + u32 kpi_first_i_decoded; + int sidebind_type; + int sidebind_channel_id; + u32 pre_parser_video_rp; + u32 pre_parser_video_wp; + bool dv_duallayer; + u32 poc_error_count; + u32 timeout_flag; + ulong timeout; + bool discard_dv_data; + bool enable_fence; + int fence_usage; + int buffer_wrap[MAX_REF_PIC_NUM]; + int low_latency_flag; + u32 metadata_config_flag; + int last_width; + int last_height; + int used_buf_num; + u32 dirty_shift_flag; + u32 endian; + ulong fb_token; + int dec_again_cnt; + struct mh265_fence_vf_t fence_vf_s; + struct mutex fence_mutex; + dma_addr_t rdma_phy_adr; + unsigned *rdma_adr; + struct trace_decoder_name trace; +} /*hevc_stru_t */; + +#ifdef AGAIN_HAS_THRESHOLD +static u32 again_threshold; +#endif +#ifdef SEND_LMEM_WITH_RPM +#define get_lmem_params(hevc, ladr) \ + hevc->lmem_ptr[ladr - (ladr & 0x3) + 3 - (ladr & 0x3)] + + +static int get_frame_mmu_map_size(void) +{ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + return (MAX_FRAME_8K_NUM * 4); + + return (MAX_FRAME_4K_NUM * 4); +} + +static int is_oversize(int w, int h) +{ + int max = (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1)? + MAX_SIZE_8K : MAX_SIZE_4K; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) + max = MAX_SIZE_2K; + + if (w < 0 || h < 0) + return true; + + if (h != 0 && (w > max / h)) + return true; + + return false; +} + +int is_oversize_ex(int w, int h) +{ + int max = (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) ? + MAX_SIZE_8K : MAX_SIZE_4K; + + if (w == 0 || h == 0) + return true; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + if (w > 8192 || h > 4608) + return true; + } else { + if (w > 4096 || h > 2304) + return true; + } + + if (w < 0 || h < 0) + return true; + + if (h != 0 && (w > max / h)) + return true; + + return false; +} + + +void check_head_error(struct hevc_state_s *hevc) +{ +#define pcm_enabled_flag 0x040 +#define pcm_sample_bit_depth_luma 0x041 +#define pcm_sample_bit_depth_chroma 0x042 + hevc->head_error_flag = 0; + if ((error_handle_policy & 0x40) == 0) + return; + if (get_lmem_params(hevc, pcm_enabled_flag)) { + uint16_t pcm_depth_luma = get_lmem_params( + hevc, pcm_sample_bit_depth_luma); + uint16_t pcm_sample_chroma = get_lmem_params( + hevc, pcm_sample_bit_depth_chroma); + if (pcm_depth_luma > + hevc->bit_depth_luma || + pcm_sample_chroma > + hevc->bit_depth_chroma) { + hevc_print(hevc, 0, + "error, pcm bit depth %d, %d is greater than normal bit depth %d, %d\n", + pcm_depth_luma, + pcm_sample_chroma, + hevc->bit_depth_luma, + hevc->bit_depth_chroma); + hevc->head_error_flag = 1; + } + } +} +#endif + +#ifdef SUPPORT_10BIT +/* Losless compression body buffer size 4K per 64x32 (jt) */ +static int compute_losless_comp_body_size(struct hevc_state_s *hevc, + int width, int height, int mem_saving_mode) +{ + int width_x64; + int height_x32; + int bsize; + + width_x64 = width + 63; + width_x64 >>= 6; + + height_x32 = height + 31; + height_x32 >>= 5; + if (mem_saving_mode == 1 && hevc->mmu_enable) + bsize = 3200 * width_x64 * height_x32; + else if (mem_saving_mode == 1) + bsize = 3072 * width_x64 * height_x32; + else + bsize = 4096 * width_x64 * height_x32; + + return bsize; +} + +/* Losless compression header buffer size 32bytes per 128x64 (jt) */ +static int compute_losless_comp_header_size(int width, int height) +{ + int width_x128; + int height_x64; + int hsize; + + width_x128 = width + 127; + width_x128 >>= 7; + + height_x64 = height + 63; + height_x64 >>= 6; + + hsize = 32*width_x128*height_x64; + + return hsize; +} +#endif + +static int add_log(struct hevc_state_s *hevc, + const char *fmt, ...) +{ +#define HEVC_LOG_BUF 196 + struct debug_log_s *log_item; + unsigned char buf[HEVC_LOG_BUF]; + int len = 0; + va_list args; + mutex_lock(&vh265_log_mutex); + va_start(args, fmt); + len = sprintf(buf, "<%ld> <%05d> ", + jiffies, hevc->decode_idx); + len += vsnprintf(buf + len, + HEVC_LOG_BUF - len, fmt, args); + va_end(args); + log_item = kmalloc( + sizeof(struct debug_log_s) + len, + GFP_KERNEL); + if (log_item) { + INIT_LIST_HEAD(&log_item->list); + strcpy(&log_item->data, buf); + list_add_tail(&log_item->list, + &hevc->log_list); + } + mutex_unlock(&vh265_log_mutex); + return 0; +} + +static void dump_log(struct hevc_state_s *hevc) +{ + int i = 0; + struct debug_log_s *log_item, *tmp; + mutex_lock(&vh265_log_mutex); + list_for_each_entry_safe(log_item, tmp, &hevc->log_list, list) { + hevc_print(hevc, 0, + "[LOG%04d]%s\n", + i++, + &log_item->data); + list_del(&log_item->list); + kfree(log_item); + } + mutex_unlock(&vh265_log_mutex); +} + +static unsigned char is_skip_decoding(struct hevc_state_s *hevc, + struct PIC_s *pic) +{ + if (pic->error_mark + && ((hevc->ignore_bufmgr_error & 0x1) == 0)) + return 1; + return 0; +} + +static int get_pic_poc(struct hevc_state_s *hevc, + unsigned int idx) +{ + if (idx != 0xff + && idx < MAX_REF_PIC_NUM + && hevc->m_PIC[idx]) + return hevc->m_PIC[idx]->POC; + return INVALID_POC; +} + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +static int get_valid_double_write_mode(struct hevc_state_s *hevc) +{ + u32 dw = (hevc->m_ins_flag && + ((double_write_mode & 0x80000000) == 0)) ? + hevc->double_write_mode : + (double_write_mode & 0x7fffffff); + if (dw & 0x20) { + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T3) + && ((dw & 0xf) == 2 || (dw & 0xf) == 3)) { + pr_info("MMU doueble write 1:4 not supported !!!\n"); + dw = 0; + } + } + return dw; +} + +static int get_dynamic_buf_num_margin(struct hevc_state_s *hevc) +{ + return (hevc->m_ins_flag && + ((dynamic_buf_num_margin & 0x80000000) == 0)) ? + hevc->dynamic_buf_num_margin : + (dynamic_buf_num_margin & 0x7fffffff); +} +#endif + +static int get_double_write_mode(struct hevc_state_s *hevc) +{ + u32 valid_dw_mode = get_valid_double_write_mode(hevc); + int w = hevc->pic_w; + int h = hevc->pic_h; + u32 dw = 0x1; /*1:1*/ + + if (hevc->is_used_v4l) { + unsigned int out; + + vdec_v4l_get_dw_mode(hevc->v4l2_ctx, &out); + dw = out; + return dw; + } + + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + case 0x1000: + if (w * h > 1920 * 1080) + dw = 3; + else if (w * h > 960 * 540) + dw = 5; + else + dw = 1; + break; + default: + dw = valid_dw_mode; + break; + } + return dw; +} + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +static unsigned char get_idx(struct hevc_state_s *hevc) +{ + return hevc->index; +} +#endif + +#undef pr_info +#define pr_info printk +static int hevc_print(struct hevc_state_s *hevc, + int flag, const char *fmt, ...) +{ +#define HEVC_PRINT_BUF 512 + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + if (hevc == NULL || + (flag == 0) || + ((debug_mask & + (1 << hevc->index)) + && (debug & flag))) { +#endif + va_list args; + + va_start(args, fmt); + if (hevc) + len = sprintf(buf, "[%d]", hevc->index); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_debug("%s", buf); + va_end(args); +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + } +#endif + return 0; +} + +static int hevc_print_cont(struct hevc_state_s *hevc, + int flag, const char *fmt, ...) +{ + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + if (hevc == NULL || + (flag == 0) || + ((debug_mask & + (1 << hevc->index)) + && (debug & flag))) { +#endif + va_list args; + + va_start(args, fmt); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + } +#endif + return 0; +} + +static void put_mv_buf(struct hevc_state_s *hevc, + struct PIC_s *pic); + +static void update_vf_memhandle(struct hevc_state_s *hevc, + struct vframe_s *vf, struct PIC_s *pic); + +static void set_canvas(struct hevc_state_s *hevc, struct PIC_s *pic); + +static void release_aux_data(struct hevc_state_s *hevc, + struct PIC_s *pic); +static void release_pic_mmu_buf(struct hevc_state_s *hevc, struct PIC_s *pic); + +#ifdef MULTI_INSTANCE_SUPPORT +static void backup_decode_state(struct hevc_state_s *hevc) +{ + hevc->m_pocRandomAccess_bak = hevc->m_pocRandomAccess; + hevc->curr_POC_bak = hevc->curr_POC; + hevc->iPrevPOC_bak = hevc->iPrevPOC; + hevc->iPrevTid0POC_bak = hevc->iPrevTid0POC; + hevc->start_parser_type_bak = hevc->start_parser_type; + hevc->start_decoding_flag_bak = hevc->start_decoding_flag; + hevc->rps_set_id_bak = hevc->rps_set_id; + hevc->pic_decoded_lcu_idx_bak = hevc->pic_decoded_lcu_idx; + hevc->decode_idx_bak = hevc->decode_idx; + +} + +static void restore_decode_state(struct hevc_state_s *hevc) +{ + struct vdec_s *vdec = hw_to_vdec(hevc); + if (!vdec_has_more_input(vdec)) { + hevc->pic_decoded_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + return; + } + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: discard pic index 0x%x\n", + __func__, hevc->decoding_pic ? + hevc->decoding_pic->index : 0xff); + if (hevc->decoding_pic) { + hevc->decoding_pic->error_mark = 0; + hevc->decoding_pic->output_ready = 0; + hevc->decoding_pic->show_frame = false; + hevc->decoding_pic->output_mark = 0; + hevc->decoding_pic->referenced = 0; + hevc->decoding_pic->POC = INVALID_POC; + put_mv_buf(hevc, hevc->decoding_pic); + release_aux_data(hevc, hevc->decoding_pic); + hevc->decoding_pic = NULL; + } + /*if (vdec_stream_based(vdec) && + (hevc->decode_idx - hevc->decode_idx_bak > 1)) { + int i; + hevc_print(hevc, 0, "decode_idx %d, decode_idx_bak %d\n", + hevc->decode_idx, hevc->decode_idx_bak); + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + struct PIC_s *pic; + pic = hevc->m_PIC[i]; + if (pic == NULL || + (pic->index == -1) || + (pic->BUF_index == -1) || + (pic->POC == INVALID_POC)) + continue; + if ((pic->decode_idx >= hevc->decode_idx_bak) && + pic->decode_idx != (hevc->decode_idx - 1)) { + hevc_print(hevc, 0, "release error buffer\n"); + pic->error_mark = 0; + pic->output_ready = 0; + pic->show_frame = false; + pic->output_mark = 0; + pic->referenced = 0; + pic->POC = INVALID_POC; + put_mv_buf(hevc, pic); + release_aux_data(hevc, pic); + } + } + }*/ + hevc->decode_idx = hevc->decode_idx_bak; + hevc->m_pocRandomAccess = hevc->m_pocRandomAccess_bak; + hevc->curr_POC = hevc->curr_POC_bak; + hevc->iPrevPOC = hevc->iPrevPOC_bak; + hevc->iPrevTid0POC = hevc->iPrevTid0POC_bak; + hevc->start_parser_type = hevc->start_parser_type_bak; + hevc->start_decoding_flag = hevc->start_decoding_flag_bak; + hevc->rps_set_id = hevc->rps_set_id_bak; + hevc->pic_decoded_lcu_idx = hevc->pic_decoded_lcu_idx_bak; + + if (hevc->pic_list_init_flag == 1) + hevc->pic_list_init_flag = 0; + /*if (hevc->decode_idx == 0) + hevc->start_decoding_flag = 0;*/ + + hevc->slice_idx = 0; + hevc->used_4k_num = -1; +} +#endif + +static void hevc_init_stru(struct hevc_state_s *hevc, + struct BuffInfo_s *buf_spec_i) +{ + int i; + INIT_LIST_HEAD(&hevc->log_list); + hevc->work_space_buf = buf_spec_i; + hevc->prefix_aux_size = 0; + hevc->suffix_aux_size = 0; + hevc->aux_addr = NULL; + hevc->rpm_addr = NULL; + hevc->lmem_addr = NULL; + + hevc->curr_POC = INVALID_POC; + + hevc->pic_list_init_flag = 0; + hevc->use_cma_flag = 0; + hevc->decode_idx = 0; + hevc->slice_idx = 0; + hevc->new_pic = 0; + hevc->new_tile = 0; + hevc->iPrevPOC = 0; + hevc->list_no = 0; + /* int m_uiMaxCUWidth = 1<<7; */ + /* int m_uiMaxCUHeight = 1<<7; */ + hevc->m_pocRandomAccess = MAX_INT; + hevc->tile_enabled = 0; + hevc->tile_x = 0; + hevc->tile_y = 0; + hevc->iPrevTid0POC = 0; + hevc->slice_addr = 0; + hevc->slice_segment_addr = 0; + hevc->skip_flag = 0; + hevc->misc_flag0 = 0; + + hevc->cur_pic = NULL; + hevc->col_pic = NULL; + hevc->wait_buf = 0; + hevc->error_flag = 0; + hevc->head_error_flag = 0; + hevc->error_skip_nal_count = 0; + hevc->have_vps = 0; + hevc->have_sps = 0; + hevc->have_pps = 0; + hevc->have_valid_start_slice = 0; + + hevc->pts_mode = PTS_NORMAL; + hevc->last_pts = 0; + hevc->last_lookup_pts = 0; + hevc->last_pts_us64 = 0; + hevc->last_lookup_pts_us64 = 0; + hevc->pts_mode_switching_count = 0; + hevc->pts_mode_recovery_count = 0; + + hevc->PB_skip_mode = nal_skip_policy & 0x3; + hevc->PB_skip_count_after_decoding = (nal_skip_policy >> 16) & 0xffff; + if (hevc->PB_skip_mode == 0) + hevc->ignore_bufmgr_error = 0x1; + else + hevc->ignore_bufmgr_error = 0x0; + + if (hevc->is_used_v4l) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + if (hevc->m_PIC[i] != NULL) { + memset(hevc->m_PIC[i], 0 ,sizeof(struct PIC_s)); + hevc->m_PIC[i]->index = i; + } + } + } + + hevc->pic_num = 0; + hevc->lcu_x_num_pre = 0; + hevc->lcu_y_num_pre = 0; + hevc->first_pic_after_recover = 0; + + hevc->pre_top_pic = NULL; + hevc->pre_bot_pic = NULL; + + hevc->sei_present_flag = 0; + hevc->valve_count = 0; + hevc->first_pic_flag = 0; +#ifdef MULTI_INSTANCE_SUPPORT + hevc->decoded_poc = INVALID_POC; + hevc->start_process_time = 0; + hevc->last_lcu_idx = 0; + hevc->decode_timeout_count = 0; + hevc->timeout_num = 0; + hevc->eos = 0; + hevc->pic_decoded_lcu_idx = -1; + hevc->over_decode = 0; + hevc->used_4k_num = -1; + hevc->start_decoding_flag = 0; + hevc->rps_set_id = 0; + backup_decode_state(hevc); +#endif +#ifdef DETREFILL_ENABLE + hevc->detbuf_adr = 0; + hevc->detbuf_adr_virt = NULL; +#endif +} + +static int post_picture_early(struct vdec_s *vdec, int index); +static int prepare_display_buf(struct vdec_s *vdec, struct PIC_s *pic); +static int H265_alloc_mmu(struct hevc_state_s *hevc, + struct PIC_s *new_pic, unsigned short bit_depth, + unsigned int *mmu_index_adr); +#ifdef H265_10B_MMU_DW +static int H265_alloc_mmu_dw(struct hevc_state_s *hevc, struct PIC_s *new_pic, + unsigned short bit_depth, unsigned int *mmu_index_adr); +#endif + +#ifdef DETREFILL_ENABLE +#define DETREFILL_BUF_SIZE (4 * 0x4000) +#define HEVC_SAO_DBG_MODE0 0x361e +#define HEVC_SAO_DBG_MODE1 0x361f +#define HEVC_SAO_CTRL10 0x362e +#define HEVC_SAO_CTRL11 0x362f +static int init_detrefill_buf(struct hevc_state_s *hevc) +{ + if (hevc->detbuf_adr_virt) + return 0; + + hevc->detbuf_adr_virt = + (void *)dma_alloc_coherent(amports_get_dma_device(), + DETREFILL_BUF_SIZE, &hevc->detbuf_adr, + GFP_KERNEL); + + if (hevc->detbuf_adr_virt == NULL) { + pr_err("%s: failed to alloc ETREFILL_BUF\n", __func__); + return -1; + } + return 0; +} + +static void uninit_detrefill_buf(struct hevc_state_s *hevc) +{ + if (hevc->detbuf_adr_virt) { + dma_free_coherent(amports_get_dma_device(), + DETREFILL_BUF_SIZE, hevc->detbuf_adr_virt, + hevc->detbuf_adr); + + hevc->detbuf_adr_virt = NULL; + hevc->detbuf_adr = 0; + } +} + +/* + * convert uncompressed frame buffer data from/to ddr + */ +static void convUnc8x4blk(uint16_t* blk8x4Luma, + uint16_t* blk8x4Cb, uint16_t* blk8x4Cr, uint16_t* cmBodyBuf, int32_t direction) +{ + if (direction == 0) { + blk8x4Luma[3 + 0 * 8] = ((cmBodyBuf[0] >> 0)) & 0x3ff; + blk8x4Luma[3 + 1 * 8] = ((cmBodyBuf[1] << 6) + | (cmBodyBuf[0] >> 10)) & 0x3ff; + blk8x4Luma[3 + 2 * 8] = ((cmBodyBuf[1] >> 4)) & 0x3ff; + blk8x4Luma[3 + 3 * 8] = ((cmBodyBuf[2] << 2) + | (cmBodyBuf[1] >> 14)) & 0x3ff; + blk8x4Luma[7 + 0 * 8] = ((cmBodyBuf[3] << 8) + | (cmBodyBuf[2] >> 8)) & 0x3ff; + blk8x4Luma[7 + 1 * 8] = ((cmBodyBuf[3] >> 2)) & 0x3ff; + blk8x4Luma[7 + 2 * 8] = ((cmBodyBuf[4] << 4) + | (cmBodyBuf[3] >> 12)) & 0x3ff; + blk8x4Luma[7 + 3 * 8] = ((cmBodyBuf[4] >> 6)) & 0x3ff; + blk8x4Cb [0 + 0 * 4] = ((cmBodyBuf[5] >> 0)) & 0x3ff; + blk8x4Cr [0 + 0 * 4] = ((cmBodyBuf[6] << 6) + | (cmBodyBuf[5] >> 10)) & 0x3ff; + blk8x4Cb [0 + 1 * 4] = ((cmBodyBuf[6] >> 4)) & 0x3ff; + blk8x4Cr [0 + 1 * 4] = ((cmBodyBuf[7] << 2) + | (cmBodyBuf[6] >> 14)) & 0x3ff; + + blk8x4Luma[0 + 0 * 8] = ((cmBodyBuf[0 + 8] >> 0)) & 0x3ff; + blk8x4Luma[1 + 0 * 8] = ((cmBodyBuf[1 + 8] << 6) | + (cmBodyBuf[0 + 8] >> 10)) & 0x3ff; + blk8x4Luma[2 + 0 * 8] = ((cmBodyBuf[1 + 8] >> 4)) & 0x3ff; + blk8x4Luma[0 + 1 * 8] = ((cmBodyBuf[2 + 8] << 2) | + (cmBodyBuf[1 + 8] >> 14)) & 0x3ff; + blk8x4Luma[1 + 1 * 8] = ((cmBodyBuf[3 + 8] << 8) | + (cmBodyBuf[2 + 8] >> 8)) & 0x3ff; + blk8x4Luma[2 + 1 * 8] = ((cmBodyBuf[3 + 8] >> 2)) & 0x3ff; + blk8x4Luma[0 + 2 * 8] = ((cmBodyBuf[4 + 8] << 4) | + (cmBodyBuf[3 + 8] >> 12)) & 0x3ff; + blk8x4Luma[1 + 2 * 8] = ((cmBodyBuf[4 + 8] >> 6)) & 0x3ff; + blk8x4Luma[2 + 2 * 8] = ((cmBodyBuf[5 + 8] >> 0)) & 0x3ff; + blk8x4Luma[0 + 3 * 8] = ((cmBodyBuf[6 + 8] << 6) | + (cmBodyBuf[5 + 8] >> 10)) & 0x3ff; + blk8x4Luma[1 + 3 * 8] = ((cmBodyBuf[6 + 8] >> 4)) & 0x3ff; + blk8x4Luma[2 + 3 * 8] = ((cmBodyBuf[7 + 8] << 2) | + (cmBodyBuf[6 + 8] >> 14)) & 0x3ff; + + blk8x4Luma[4 + 0 * 8] = ((cmBodyBuf[0 + 16] >> 0)) & 0x3ff; + blk8x4Luma[5 + 0 * 8] = ((cmBodyBuf[1 + 16] << 6) | + (cmBodyBuf[0 + 16] >> 10)) & 0x3ff; + blk8x4Luma[6 + 0 * 8] = ((cmBodyBuf[1 + 16] >> 4)) & 0x3ff; + blk8x4Luma[4 + 1 * 8] = ((cmBodyBuf[2 + 16] << 2) | + (cmBodyBuf[1 + 16] >> 14)) & 0x3ff; + blk8x4Luma[5 + 1 * 8] = ((cmBodyBuf[3 + 16] << 8) | + (cmBodyBuf[2 + 16] >> 8)) & 0x3ff; + blk8x4Luma[6 + 1 * 8] = ((cmBodyBuf[3 + 16] >> 2)) & 0x3ff; + blk8x4Luma[4 + 2 * 8] = ((cmBodyBuf[4 + 16] << 4) | + (cmBodyBuf[3 + 16] >> 12)) & 0x3ff; + blk8x4Luma[5 + 2 * 8] = ((cmBodyBuf[4 + 16] >> 6)) & 0x3ff; + blk8x4Luma[6 + 2 * 8] = ((cmBodyBuf[5 + 16] >> 0)) & 0x3ff; + blk8x4Luma[4 + 3 * 8] = ((cmBodyBuf[6 + 16] << 6) | + (cmBodyBuf[5 + 16] >> 10)) & 0x3ff; + blk8x4Luma[5 + 3 * 8] = ((cmBodyBuf[6 + 16] >> 4)) & 0x3ff; + blk8x4Luma[6 + 3 * 8] = ((cmBodyBuf[7 + 16] << 2) | + (cmBodyBuf[6 + 16] >> 14)) & 0x3ff; + + blk8x4Cb[1 + 0 * 4] = ((cmBodyBuf[0 + 24] >> 0)) & 0x3ff; + blk8x4Cr[1 + 0 * 4] = ((cmBodyBuf[1 + 24] << 6) | + (cmBodyBuf[0 + 24] >> 10)) & 0x3ff; + blk8x4Cb[2 + 0 * 4] = ((cmBodyBuf[1 + 24] >> 4)) & 0x3ff; + blk8x4Cr[2 + 0 * 4] = ((cmBodyBuf[2 + 24] << 2) | + (cmBodyBuf[1 + 24] >> 14)) & 0x3ff; + blk8x4Cb[3 + 0 * 4] = ((cmBodyBuf[3 + 24] << 8) | + (cmBodyBuf[2 + 24] >> 8)) & 0x3ff; + blk8x4Cr[3 + 0 * 4] = ((cmBodyBuf[3 + 24] >> 2)) & 0x3ff; + blk8x4Cb[1 + 1 * 4] = ((cmBodyBuf[4 + 24] << 4) | + (cmBodyBuf[3 + 24] >> 12)) & 0x3ff; + blk8x4Cr[1 + 1 * 4] = ((cmBodyBuf[4 + 24] >> 6)) & 0x3ff; + blk8x4Cb[2 + 1 * 4] = ((cmBodyBuf[5 + 24] >> 0)) & 0x3ff; + blk8x4Cr[2 + 1 * 4] = ((cmBodyBuf[6 + 24] << 6) | + (cmBodyBuf[5 + 24] >> 10)) & 0x3ff; + blk8x4Cb[3 + 1 * 4] = ((cmBodyBuf[6 + 24] >> 4)) & 0x3ff; + blk8x4Cr[3 + 1 * 4] = ((cmBodyBuf[7 + 24] << 2) | + (cmBodyBuf[6 + 24] >> 14)) & 0x3ff; + } else { + cmBodyBuf[0 + 8 * 0] = (blk8x4Luma[3 + 1 * 8] << 10) | + blk8x4Luma[3 + 0 * 8]; + cmBodyBuf[1 + 8 * 0] = (blk8x4Luma[3 + 3 * 8] << 14) | + (blk8x4Luma[3 + 2 * 8] << 4) | (blk8x4Luma[3 + 1 * 8] >> 6); + cmBodyBuf[2 + 8 * 0] = (blk8x4Luma[7 + 0 * 8] << 8) | + (blk8x4Luma[3 + 3 * 8] >> 2); + cmBodyBuf[3 + 8 * 0] = (blk8x4Luma[7 + 2 * 8] << 12) | + (blk8x4Luma[7 + 1 * 8] << 2) | (blk8x4Luma[7 + 0 * 8] >>8); + cmBodyBuf[4 + 8 * 0] = (blk8x4Luma[7 + 3 * 8] << 6) | + (blk8x4Luma[7 + 2 * 8] >>4); + cmBodyBuf[5 + 8 * 0] = (blk8x4Cr[0 + 0 * 4] << 10) | + blk8x4Cb[0 + 0 * 4]; + cmBodyBuf[6 + 8 * 0] = (blk8x4Cr[0 + 1 * 4] << 14) | + (blk8x4Cb[0 + 1 * 4] << 4) | (blk8x4Cr[0 + 0 * 4] >> 6); + cmBodyBuf[7 + 8 * 0] = (0<< 8) | (blk8x4Cr[0 + 1 * 4] >> 2); + + cmBodyBuf[0 + 8 * 1] = (blk8x4Luma[1 + 0 * 8] << 10) | + blk8x4Luma[0 + 0 * 8]; + cmBodyBuf[1 + 8 * 1] = (blk8x4Luma[0 + 1 * 8] << 14) | + (blk8x4Luma[2 + 0 * 8] << 4) | (blk8x4Luma[1 + 0 * 8] >> 6); + cmBodyBuf[2 + 8 * 1] = (blk8x4Luma[1 + 1 * 8] << 8) | + (blk8x4Luma[0 + 1 * 8] >> 2); + cmBodyBuf[3 + 8 * 1] = (blk8x4Luma[0 + 2 * 8] << 12) | + (blk8x4Luma[2 + 1 * 8] << 2) | (blk8x4Luma[1 + 1 * 8] >>8); + cmBodyBuf[4 + 8 * 1] = (blk8x4Luma[1 + 2 * 8] << 6) | + (blk8x4Luma[0 + 2 * 8] >>4); + cmBodyBuf[5 + 8 * 1] = (blk8x4Luma[0 + 3 * 8] << 10) | + blk8x4Luma[2 + 2 * 8]; + cmBodyBuf[6 + 8 * 1] = (blk8x4Luma[2 + 3 * 8] << 14) | + (blk8x4Luma[1 + 3 * 8] << 4) | (blk8x4Luma[0 + 3 * 8] >> 6); + cmBodyBuf[7 + 8 * 1] = (0<< 8) | (blk8x4Luma[2 + 3 * 8] >> 2); + + cmBodyBuf[0 + 8 * 2] = (blk8x4Luma[5 + 0 * 8] << 10) | + blk8x4Luma[4 + 0 * 8]; + cmBodyBuf[1 + 8 * 2] = (blk8x4Luma[4 + 1 * 8] << 14) | + (blk8x4Luma[6 + 0 * 8] << 4) | (blk8x4Luma[5 + 0 * 8] >> 6); + cmBodyBuf[2 + 8 * 2] = (blk8x4Luma[5 + 1 * 8] << 8) | + (blk8x4Luma[4 + 1 * 8] >> 2); + cmBodyBuf[3 + 8 * 2] = (blk8x4Luma[4 + 2 * 8] << 12) | + (blk8x4Luma[6 + 1 * 8] << 2) | (blk8x4Luma[5 + 1 * 8] >>8); + cmBodyBuf[4 + 8 * 2] = (blk8x4Luma[5 + 2 * 8] << 6) | + (blk8x4Luma[4 + 2 * 8] >>4); + cmBodyBuf[5 + 8 * 2] = (blk8x4Luma[4 + 3 * 8] << 10) | + blk8x4Luma[6 + 2 * 8]; + cmBodyBuf[6 + 8 * 2] = (blk8x4Luma[6 + 3 * 8] << 14) | + (blk8x4Luma[5 + 3 * 8] << 4) | (blk8x4Luma[4 + 3 * 8] >> 6); + cmBodyBuf[7 + 8 * 2] = (0<< 8) | (blk8x4Luma[6 + 3 * 8] >> 2); + + cmBodyBuf[0 + 8 * 3] = (blk8x4Cr[1 + 0 * 4] << 10) | + blk8x4Cb[1 + 0 * 4]; + cmBodyBuf[1 + 8 * 3] = (blk8x4Cr[2 + 0 * 4] << 14) | + (blk8x4Cb[2 + 0 * 4] << 4) | (blk8x4Cr[1 + 0 * 4] >> 6); + cmBodyBuf[2 + 8 * 3] = (blk8x4Cb[3 + 0 * 4] << 8) | + (blk8x4Cr[2 + 0 * 4] >> 2); + cmBodyBuf[3 + 8 * 3] = (blk8x4Cb[1 + 1 * 4] << 12) | + (blk8x4Cr[3 + 0 * 4] << 2) | (blk8x4Cb[3 + 0 * 4] >>8); + cmBodyBuf[4 + 8 * 3] = (blk8x4Cr[1 + 1 * 4] << 6) | + (blk8x4Cb[1 + 1 * 4] >>4); + cmBodyBuf[5 + 8 * 3] = (blk8x4Cr[2 + 1 * 4] << 10) | + blk8x4Cb[2 + 1 * 4]; + cmBodyBuf[6 + 8 * 3] = (blk8x4Cr[3 + 1 * 4] << 14) | + (blk8x4Cb[3 + 1 * 4] << 4) | (blk8x4Cr[2 + 1 * 4] >> 6); + cmBodyBuf[7 + 8 * 3] = (0 << 8) | (blk8x4Cr[3 + 1 * 4] >> 2); + } +} + +static void corrRefillWithAmrisc ( + struct hevc_state_s *hevc, + uint32_t cmHeaderBaseAddr, + uint32_t picWidth, + uint32_t ctuPosition) +{ + int32_t i; + uint16_t ctux = (ctuPosition>>16) & 0xffff; + uint16_t ctuy = (ctuPosition>> 0) & 0xffff; + int32_t aboveCtuAvailable = (ctuy) ? 1 : 0; + + uint16_t *cmBodyBuf = NULL; + + uint32_t pic_width_x64_pre = picWidth + 0x3f; + uint32_t pic_width_x64 = pic_width_x64_pre >> 6; + uint32_t stride64x64 = pic_width_x64 * 128; + uint32_t addr_offset64x64_abv = stride64x64 * + (aboveCtuAvailable ? ctuy - 1 : ctuy) + 128 * ctux; + uint32_t addr_offset64x64_cur = stride64x64*ctuy + 128 * ctux; + uint32_t cmHeaderAddrAbv = cmHeaderBaseAddr + addr_offset64x64_abv; + uint32_t cmHeaderAddrCur = cmHeaderBaseAddr + addr_offset64x64_cur; + unsigned int tmpData32; + + uint16_t blkBuf0Y[32]; + uint16_t blkBuf0Cb[8]; + uint16_t blkBuf0Cr[8]; + uint16_t blkBuf1Y[32]; + uint16_t blkBuf1Cb[8]; + uint16_t blkBuf1Cr[8]; + int32_t blkBufCnt = 0; + + int32_t blkIdx; + + cmBodyBuf = vzalloc(sizeof(uint16_t) * 32 * 18); + if (!cmBodyBuf) + return; + + WRITE_VREG(HEVC_SAO_CTRL10, cmHeaderAddrAbv); + WRITE_VREG(HEVC_SAO_CTRL11, cmHeaderAddrCur); + WRITE_VREG(HEVC_SAO_DBG_MODE0, hevc->detbuf_adr); + WRITE_VREG(HEVC_SAO_DBG_MODE1, 2); + + for (i = 0; i < 32 * 18; i++) + cmBodyBuf[i] = 0; + + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s, %d\n", __func__, __LINE__); + do { + tmpData32 = READ_VREG(HEVC_SAO_DBG_MODE1); + } while (tmpData32); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s, %d\n", __func__, __LINE__); + + hevc_print(hevc, H265_DEBUG_DETAIL, + "cmBodyBuf from detbuf:\n"); + for (i = 0; i < 32 * 18; i++) { + cmBodyBuf[i] = hevc->detbuf_adr_virt[i]; + if (get_dbg_flag(hevc) & + H265_DEBUG_DETAIL) { + if ((i & 0xf) == 0) + hevc_print_cont(hevc, 0, "\n"); + hevc_print_cont(hevc, 0, "%02x ", cmBodyBuf[i]); + } + } + hevc_print_cont(hevc, H265_DEBUG_DETAIL, "\n"); + + for (i = 0; i < 32; i++) + blkBuf0Y[i] = 0; + for (i = 0; i < 8; i++) + blkBuf0Cb[i] = 0; + for (i = 0; i < 8; i++) + blkBuf0Cr[i] = 0; + for (i = 0; i < 32; i++) + blkBuf1Y[i] = 0; + for (i = 0; i < 8; i++) + blkBuf1Cb[i] = 0; + for (i = 0; i < 8; i++) + blkBuf1Cr[i] = 0; + + for (blkIdx = 0; blkIdx < 18; blkIdx++) { + int32_t inAboveCtu = (blkIdx<2) ? 1 : 0; + int32_t restoreEnable = (blkIdx>0) ? 1 : 0; + uint16_t* blkY = (blkBufCnt==0) ? blkBuf0Y : blkBuf1Y ; + uint16_t* blkCb = (blkBufCnt==0) ? blkBuf0Cb : blkBuf1Cb; + uint16_t* blkCr = (blkBufCnt==0) ? blkBuf0Cr : blkBuf1Cr; + uint16_t* cmBodyBufNow = cmBodyBuf + (blkIdx * 32); + + if (!aboveCtuAvailable && inAboveCtu) + continue; + + /* detRefillBuf --> 8x4block*/ + convUnc8x4blk(blkY, blkCb, blkCr, cmBodyBufNow, 0); + + if (restoreEnable) { + blkY[3 + 0 * 8] = blkY[2 + 0 * 8] + 2; + blkY[4 + 0 * 8] = blkY[1 + 0 * 8] + 3; + blkY[5 + 0 * 8] = blkY[0 + 0 * 8] + 1; + blkY[6 + 0 * 8] = blkY[0 + 0 * 8] + 2; + blkY[7 + 0 * 8] = blkY[1 + 0 * 8] + 2; + blkY[3 + 1 * 8] = blkY[2 + 1 * 8] + 1; + blkY[4 + 1 * 8] = blkY[1 + 1 * 8] + 2; + blkY[5 + 1 * 8] = blkY[0 + 1 * 8] + 2; + blkY[6 + 1 * 8] = blkY[0 + 1 * 8] + 2; + blkY[7 + 1 * 8] = blkY[1 + 1 * 8] + 3; + blkY[3 + 2 * 8] = blkY[2 + 2 * 8] + 3; + blkY[4 + 2 * 8] = blkY[1 + 2 * 8] + 1; + blkY[5 + 2 * 8] = blkY[0 + 2 * 8] + 3; + blkY[6 + 2 * 8] = blkY[0 + 2 * 8] + 3; + blkY[7 + 2 * 8] = blkY[1 + 2 * 8] + 3; + blkY[3 + 3 * 8] = blkY[2 + 3 * 8] + 0; + blkY[4 + 3 * 8] = blkY[1 + 3 * 8] + 0; + blkY[5 + 3 * 8] = blkY[0 + 3 * 8] + 1; + blkY[6 + 3 * 8] = blkY[0 + 3 * 8] + 2; + blkY[7 + 3 * 8] = blkY[1 + 3 * 8] + 1; + blkCb[1 + 0 * 4] = blkCb[0 + 0 * 4]; + blkCb[2 + 0 * 4] = blkCb[0 + 0 * 4]; + blkCb[3 + 0 * 4] = blkCb[0 + 0 * 4]; + blkCb[1 + 1 * 4] = blkCb[0 + 1 * 4]; + blkCb[2 + 1 * 4] = blkCb[0 + 1 * 4]; + blkCb[3 + 1 * 4] = blkCb[0 + 1 * 4]; + blkCr[1 + 0 * 4] = blkCr[0 + 0 * 4]; + blkCr[2 + 0 * 4] = blkCr[0 + 0 * 4]; + blkCr[3 + 0 * 4] = blkCr[0 + 0 * 4]; + blkCr[1 + 1 * 4] = blkCr[0 + 1 * 4]; + blkCr[2 + 1 * 4] = blkCr[0 + 1 * 4]; + blkCr[3 + 1 * 4] = blkCr[0 + 1 * 4]; + + /*Store data back to DDR*/ + convUnc8x4blk(blkY, blkCb, blkCr, cmBodyBufNow, 1); + } + + blkBufCnt = (blkBufCnt==1) ? 0 : blkBufCnt + 1; + } + + hevc_print(hevc, H265_DEBUG_DETAIL, + "cmBodyBuf to detbuf:\n"); + for (i = 0; i < 32 * 18; i++) { + hevc->detbuf_adr_virt[i] = cmBodyBuf[i]; + if (get_dbg_flag(hevc) & + H265_DEBUG_DETAIL) { + if ((i & 0xf) == 0) + hevc_print_cont(hevc, 0, "\n"); + hevc_print_cont(hevc, 0, "%02x ", cmBodyBuf[i]); + } + } + hevc_print_cont(hevc, H265_DEBUG_DETAIL, "\n"); + + WRITE_VREG(HEVC_SAO_DBG_MODE1, 3); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s, %d\n", __func__, __LINE__); + do { + tmpData32 = READ_VREG(HEVC_SAO_DBG_MODE1); + } while (tmpData32); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s, %d\n", __func__, __LINE__); + vfree(cmBodyBuf); +} + +static void delrefill(struct hevc_state_s *hevc) +{ + /* + * corrRefill + */ + /*HEVC_SAO_DBG_MODE0: picGlobalVariable + [31:30]error number + [29:20]error2([9:7]tilex[6:0]ctuy) + [19:10]error1 [9:0]error0*/ + uint32_t detResult = READ_VREG(HEVC_ASSIST_SCRATCH_3); + uint32_t errorIdx; + uint32_t errorNum = (detResult>>30); + + if (detResult) { + hevc_print(hevc, H265_DEBUG_BUFMGR, + "[corrRefillWithAmrisc] detResult=%08x\n", detResult); + for (errorIdx = 0; errorIdx < errorNum; errorIdx++) { + uint32_t errorPos = errorIdx * 10; + uint32_t errorResult = (detResult >> errorPos) & 0x3ff; + uint32_t tilex = (errorResult >> 7) - 1; + uint16_t ctux = hevc->m_tile[0][tilex].start_cu_x + + hevc->m_tile[0][tilex].width - 1; + uint16_t ctuy = (uint16_t)(errorResult & 0x7f); + uint32_t ctuPosition = (ctux<< 16) + ctuy; + hevc_print(hevc, H265_DEBUG_BUFMGR, + "Idx:%d tilex:%d ctu(%d(0x%x), %d(0x%x))\n", + errorIdx,tilex,ctux,ctux, ctuy,ctuy); + corrRefillWithAmrisc( + hevc, + (uint32_t)hevc->cur_pic->header_adr, + hevc->pic_w, + ctuPosition); + } + + WRITE_VREG(HEVC_ASSIST_SCRATCH_3, 0); /*clear status*/ + WRITE_VREG(HEVC_SAO_DBG_MODE0, 0); + WRITE_VREG(HEVC_SAO_DBG_MODE1, 1); + } +} +#endif + +static void get_rpm_param(union param_u *params) +{ + int i; + unsigned int data32; + + for (i = 0; i < 128; i++) { + do { + data32 = READ_VREG(RPM_CMD_REG); + /* hevc_print(hevc, 0, "%x\n", data32); */ + } while ((data32 & 0x10000) == 0); + params->l.data[i] = data32 & 0xffff; + /* hevc_print(hevc, 0, "%x\n", data32); */ + WRITE_VREG(RPM_CMD_REG, 0); + } +} + +static int get_free_buf_idx(struct hevc_state_s *hevc) +{ + int index = INVALID_IDX; + struct PIC_s *pic; + int i; + + for (i = 0; i < hevc->used_buf_num; i++) { + pic = hevc->m_PIC[i]; + if ((pic == NULL) || + (pic->index == -1) || + (pic->BUF_index == -1)) + continue; + + if ((pic->output_mark == 0) && + (pic->referenced == 0) && + (pic->output_ready == 0) && + (pic->vf_ref == 0) && + (pic->cma_alloc_addr)) { + pic->output_ready = 1; + index = i; + break; + } + } + + return index; +} + +static struct PIC_s *get_pic_by_POC(struct hevc_state_s *hevc, int POC) +{ + int i; + struct PIC_s *pic; + struct PIC_s *ret_pic = NULL; + if (POC == INVALID_POC) + return NULL; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1 || + pic->BUF_index == -1) + continue; + if (pic->POC == POC) { + if (ret_pic == NULL) + ret_pic = pic; + else { + if (pic->decode_idx > ret_pic->decode_idx) + ret_pic = pic; + } + } + } + return ret_pic; +} + +static struct PIC_s *get_ref_pic_by_POC(struct hevc_state_s *hevc, int POC) +{ + int i; + struct PIC_s *pic; + struct PIC_s *ret_pic = NULL; + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1 || + pic->BUF_index == -1) + continue; + /*Add width and height of ref picture detection, + resolved incorrectly referenced frame.*/ + if ((pic->POC == POC) && (pic->referenced) && + (hevc->pic_w == pic->width) && + (hevc->pic_h == pic->height)) { + if (ret_pic == NULL) + ret_pic = pic; + else { + if (pic->decode_idx > ret_pic->decode_idx) + ret_pic = pic; + } + } + } + + return ret_pic; +} + +static unsigned int log2i(unsigned int val) +{ + unsigned int ret = -1; + + while (val != 0) { + val >>= 1; + ret++; + } + return ret; +} + +static int init_buf_spec(struct hevc_state_s *hevc); + + +static void uninit_mmu_buffers(struct hevc_state_s *hevc) +{ + if (hevc->mmu_box) { + decoder_mmu_box_free(hevc->mmu_box); + hevc->mmu_box = NULL; + } +#ifdef H265_10B_MMU_DW + if (hevc->mmu_box_dw) { + decoder_mmu_box_free(hevc->mmu_box_dw); + hevc->mmu_box_dw = NULL; + } +#endif + if (hevc->bmmu_box) { + /* release workspace */ + decoder_bmmu_box_free_idx(hevc->bmmu_box, + BMMU_WORKSPACE_ID); + decoder_bmmu_box_free(hevc->bmmu_box); + hevc->bmmu_box = NULL; + } +} + +/* return in MB */ +static int hevc_max_mmu_buf_size(int max_w, int max_h) +{ + int buf_size = 64; + + if ((max_w * max_h) > 0 && + (max_w * max_h) <= 1920*1088) { + buf_size = 24; + } + return buf_size; +} + +static int init_mmu_buffers(struct hevc_state_s *hevc, int bmmu_flag) +{ + int tvp_flag = vdec_secure(hw_to_vdec(hevc)) ? + CODEC_MM_FLAGS_TVP : 0; + int buf_size = hevc_max_mmu_buf_size(hevc->max_pic_w, + hevc->max_pic_h); + + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, "%s max_w %d max_h %d\n", + __func__, hevc->max_pic_w, hevc->max_pic_h); + } + + hevc->need_cache_size = buf_size * SZ_1M; + hevc->sc_start_time = get_jiffies_64(); + if (hevc->mmu_enable && !hevc->is_used_v4l) { + hevc->mmu_box = decoder_mmu_box_alloc_box(DRIVER_NAME, + hevc->index, + MAX_REF_PIC_NUM, + buf_size * SZ_1M, + tvp_flag + ); + if (!hevc->mmu_box) { + pr_err("h265 alloc mmu box failed!!\n"); + return -1; + } +#ifdef H265_10B_MMU_DW + if (hevc->dw_mmu_enable) { + hevc->mmu_box_dw = decoder_mmu_box_alloc_box(DRIVER_NAME, + hevc->index, + MAX_REF_PIC_NUM, + buf_size * SZ_1M, + tvp_flag + ); + if (!hevc->mmu_box_dw) + goto dw_mmu_box_failed; + } +#endif + } + if (bmmu_flag) + return 0; + + hevc->bmmu_box = decoder_bmmu_box_alloc_box(DRIVER_NAME, + hevc->index, + BMMU_MAX_BUFFERS, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + tvp_flag); + if (!hevc->bmmu_box) + goto bmmu_box_failed; + + return 0; +bmmu_box_failed: +#ifdef H265_10B_MMU_DW + if (hevc->mmu_box_dw) + decoder_mmu_box_free(hevc->mmu_box_dw); + hevc->mmu_box_dw = NULL; +dw_mmu_box_failed: +#endif + if (hevc->mmu_box) { + decoder_mmu_box_free(hevc->mmu_box); + } + hevc->mmu_box = NULL; + pr_err("h265 %s failed!!\n", __func__); + return -1; +} + +struct buf_stru_s +{ + int lcu_total; + int mc_buffer_size_h; + int mc_buffer_size_u_v_h; +}; + +#ifndef MV_USE_FIXED_BUF +static void dealloc_mv_bufs(struct hevc_state_s *hevc) +{ + int i; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + if (hevc->m_mv_BUF[i].start_adr) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "dealloc mv buf(%d) adr 0x%p size 0x%x used_flag %d\n", + i, hevc->m_mv_BUF[i].start_adr, + hevc->m_mv_BUF[i].size, + hevc->m_mv_BUF[i].used_flag); + decoder_bmmu_box_free_idx( + hevc->bmmu_box, + MV_BUFFER_IDX(i)); + hevc->m_mv_BUF[i].start_adr = 0; + hevc->m_mv_BUF[i].size = 0; + hevc->m_mv_BUF[i].used_flag = 0; + } + } + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + if (hevc->m_PIC[i] != NULL) + hevc->m_PIC[i]->mv_buf_index = -1; + } +} + +static int alloc_mv_buf(struct hevc_state_s *hevc, int i) +{ + int ret = 0; + /*get_cma_alloc_ref();*/ /*DEBUG_TMP*/ + if (decoder_bmmu_box_alloc_buf_phy + (hevc->bmmu_box, + MV_BUFFER_IDX(i), hevc->mv_buf_size, + DRIVER_NAME, + &hevc->m_mv_BUF[i].start_adr) < 0) { + hevc->m_mv_BUF[i].start_adr = 0; + ret = -1; + } else { + hevc->m_mv_BUF[i].size = hevc->mv_buf_size; + hevc->m_mv_BUF[i].used_flag = 0; + ret = 0; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "MV Buffer %d: start_adr %p size %x\n", + i, + (void *)hevc->m_mv_BUF[i].start_adr, + hevc->m_mv_BUF[i].size); + } + if (!vdec_secure(hw_to_vdec(hevc)) && (hevc->m_mv_BUF[i].start_adr)) { + void *mem_start_virt; + mem_start_virt = + codec_mm_phys_to_virt(hevc->m_mv_BUF[i].start_adr); + if (mem_start_virt) { + memset(mem_start_virt, 0, hevc->m_mv_BUF[i].size); + codec_mm_dma_flush(mem_start_virt, + hevc->m_mv_BUF[i].size, DMA_TO_DEVICE); + } else { + mem_start_virt = codec_mm_vmap( + hevc->m_mv_BUF[i].start_adr, + hevc->m_mv_BUF[i].size); + if (mem_start_virt) { + memset(mem_start_virt, 0, hevc->m_mv_BUF[i].size); + codec_mm_dma_flush(mem_start_virt, + hevc->m_mv_BUF[i].size, + DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(mem_start_virt); + } else { + /*not virt for tvp playing, + may need clear on ucode.*/ + pr_err("ref %s mem_start_virt failed\n", __func__); + } + } + } + } + /*put_cma_alloc_ref();*/ /*DEBUG_TMP*/ + return ret; +} +#endif + +static int get_mv_buf(struct hevc_state_s *hevc, struct PIC_s *pic) +{ +#ifdef MV_USE_FIXED_BUF + if (pic && pic->index >= 0) { + int mv_size; + if (IS_8K_SIZE(pic->width, pic->height)) + mv_size = MPRED_8K_MV_BUF_SIZE; + else if (IS_4K_SIZE(pic->width, pic->height)) + mv_size = MPRED_4K_MV_BUF_SIZE; /*0x120000*/ + else + mv_size = MPRED_MV_BUF_SIZE; + + pic->mpred_mv_wr_start_addr = + hevc->work_space_buf->mpred_mv.buf_start + + (pic->index * mv_size); + pic->mv_size = mv_size; + } + return 0; +#else + int i; + int ret = -1; + int new_size; + if (mv_buf_dynamic_alloc) { + int MV_MEM_UNIT = + hevc->lcu_size_log2 == 6 ? 0x200 : hevc->lcu_size_log2 == + 5 ? 0x80 : 0x20; + int extended_pic_width = (pic->width + hevc->lcu_size -1) + & (~(hevc->lcu_size - 1)); + int extended_pic_height = (pic->height + hevc->lcu_size -1) + & (~(hevc->lcu_size - 1)); + int lcu_x_num = extended_pic_width / hevc->lcu_size; + int lcu_y_num = extended_pic_height / hevc->lcu_size; + new_size = lcu_x_num * lcu_y_num * MV_MEM_UNIT; + hevc->mv_buf_size = (new_size + 0xffff) & (~0xffff); + } else { + if (IS_8K_SIZE(pic->width, pic->height)) + new_size = MPRED_8K_MV_BUF_SIZE; + else if (IS_4K_SIZE(pic->width, pic->height)) + new_size = MPRED_4K_MV_BUF_SIZE; /*0x120000*/ + else + new_size = MPRED_MV_BUF_SIZE; + + if (new_size != hevc->mv_buf_size) { + dealloc_mv_bufs(hevc); + hevc->mv_buf_size = new_size; + } + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + if (hevc->m_mv_BUF[i].start_adr && + hevc->m_mv_BUF[i].used_flag == 0) { + hevc->m_mv_BUF[i].used_flag = 1; + ret = i; + break; + } + } + } + if (ret < 0) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + if (hevc->m_mv_BUF[i].start_adr == 0) { + if (alloc_mv_buf(hevc, i) >= 0) { + hevc->m_mv_BUF[i].used_flag = 1; + ret = i; + } + break; + } + } + } + + if (ret >= 0) { + pic->mv_buf_index = ret; + pic->mv_size = hevc->m_mv_BUF[ret].size; + pic->mpred_mv_wr_start_addr = + (hevc->m_mv_BUF[ret].start_adr + 0xffff) & + (~0xffff); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s => %d (0x%x) size 0x%x\n", + __func__, ret, + pic->mpred_mv_wr_start_addr, + pic->mv_size); + + } else { + hevc_print(hevc, 0, + "%s: Error, mv buf is not enough\n", + __func__); + } + return ret; + +#endif +} + +static void put_mv_buf(struct hevc_state_s *hevc, + struct PIC_s *pic) +{ +#ifndef MV_USE_FIXED_BUF + int i = pic->mv_buf_index; + if (i < 0 || i >= MAX_REF_PIC_NUM) { + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s: index %d beyond range\n", + __func__, i); + return; + } + if (mv_buf_dynamic_alloc) { + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s(%d)\n", + __func__, i); + + decoder_bmmu_box_free_idx( + hevc->bmmu_box, + MV_BUFFER_IDX(i)); + hevc->m_mv_BUF[i].start_adr = 0; + hevc->m_mv_BUF[i].size = 0; + hevc->m_mv_BUF[i].used_flag = 0; + pic->mv_buf_index = -1; + return; + } + + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s(%d): used_flag(%d)\n", + __func__, i, + hevc->m_mv_BUF[i].used_flag); + + if (hevc->m_mv_BUF[i].start_adr && + hevc->m_mv_BUF[i].used_flag) + hevc->m_mv_BUF[i].used_flag = 0; + pic->mv_buf_index = -1; +#endif +} + +static int hevc_get_header_size(int w, int h) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (IS_8K_SIZE(w, h))) + return MMU_COMPRESS_HEADER_SIZE_8K; + else if (IS_4K_SIZE(w, h)) + return MMU_COMPRESS_HEADER_SIZE_4K; + else + return MMU_COMPRESS_HEADER_SIZE_1080P; +} + +static int cal_current_buf_size(struct hevc_state_s *hevc, + struct buf_stru_s *buf_stru) +{ + int buf_size; + int pic_width = hevc->pic_w; + int pic_height = hevc->pic_h; + int lcu_size = hevc->lcu_size; + int pic_width_lcu = (pic_width % lcu_size) ? pic_width / lcu_size + + 1 : pic_width / lcu_size; + int pic_height_lcu = (pic_height % lcu_size) ? pic_height / lcu_size + + 1 : pic_height / lcu_size; + /*SUPPORT_10BIT*/ + int losless_comp_header_size = compute_losless_comp_header_size + (pic_width, pic_height); + /*always alloc buf for 10bit*/ + int losless_comp_body_size = compute_losless_comp_body_size + (hevc, pic_width, pic_height, 0); + int mc_buffer_size = losless_comp_header_size + + losless_comp_body_size; + int mc_buffer_size_h = (mc_buffer_size + 0xffff) >> 16; + int mc_buffer_size_u_v_h = 0; + + int dw_mode = get_double_write_mode(hevc); + + if (hevc->mmu_enable) + buf_size = hevc_get_header_size(hevc->pic_w, hevc->pic_h); + else + buf_size = 0; +#ifdef H265_10B_MMU_DW + if (hevc->dw_mmu_enable) { + buf_size = ((buf_size + 0xffff) >> 16) << 16; + buf_size <<= 1; + } +#endif + if (dw_mode && ((dw_mode & 0x20) == 0)) { + int pic_width_dw = pic_width / + get_double_write_ratio(dw_mode); + int pic_height_dw = pic_height / + get_double_write_ratio(dw_mode); + + int pic_width_lcu_dw = (pic_width_dw % lcu_size) ? + pic_width_dw / lcu_size + 1 : + pic_width_dw / lcu_size; + int pic_height_lcu_dw = (pic_height_dw % lcu_size) ? + pic_height_dw / lcu_size + 1 : + pic_height_dw / lcu_size; + int lcu_total_dw = pic_width_lcu_dw * pic_height_lcu_dw; + + int mc_buffer_size_u_v = lcu_total_dw * lcu_size * lcu_size / 2; + mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16; + /*64k alignment*/ + buf_size += ((mc_buffer_size_u_v_h << 16) * 3); + } + + if ((!hevc->mmu_enable) && + ((dw_mode & 0x10) == 0)) { + /* use compress mode without mmu, + need buf for compress decoding*/ + buf_size += (mc_buffer_size_h << 16); + } + + /*in case start adr is not 64k alignment*/ + if (buf_size > 0) + buf_size += 0x10000; + + if (buf_stru) { + buf_stru->lcu_total = pic_width_lcu * pic_height_lcu; + buf_stru->mc_buffer_size_h = mc_buffer_size_h; + buf_stru->mc_buffer_size_u_v_h = mc_buffer_size_u_v_h; + } + + hevc_print(hevc, PRINT_FLAG_V4L_DETAIL,"pic width: %d, pic height: %d, headr: %d, body: %d, size h: %d, size uvh: %d, buf size: %x\n", + pic_width, pic_height, losless_comp_header_size, + losless_comp_body_size, mc_buffer_size_h, + mc_buffer_size_u_v_h, buf_size); + + return buf_size; +} + +static struct internal_comp_buf* v4lfb_to_icomp_buf( + struct hevc_state_s *hevc, + struct vdec_v4l2_buffer *fb) +{ + struct aml_video_dec_buf *aml_fb = NULL; + struct aml_vcodec_ctx * v4l2_ctx = hevc->v4l2_ctx; + + aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer); + return &v4l2_ctx->comp_bufs[aml_fb->internal_index]; +} + +static struct internal_comp_buf* index_to_icomp_buf( + struct hevc_state_s *hevc, int index) +{ + struct aml_video_dec_buf *aml_fb = NULL; + struct aml_vcodec_ctx * v4l2_ctx = hevc->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + + fb = (struct vdec_v4l2_buffer *) + hevc->m_BUF[index].v4l_ref_buf_addr; + aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer); + return &v4l2_ctx->comp_bufs[aml_fb->internal_index]; +} + +static int v4l_alloc_buf(struct hevc_state_s *hevc, struct PIC_s *pic) +{ + int ret = -1; + int i = pic->index; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)hevc->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, hevc->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + hevc_print(hevc, 0, "[%d] H265 get buffer fail.\n", ctx->id); + return ret; + } + + fb->status = FB_ST_DECODER; + + if (hevc->mmu_enable) { + struct internal_comp_buf *ibuf = v4lfb_to_icomp_buf(hevc, fb); + hevc->m_BUF[i].header_addr = ibuf->header_addr; + } + + hevc->m_BUF[i].used_flag = 0; + hevc->m_BUF[i].v4l_ref_buf_addr = (ulong)fb; + pic->cma_alloc_addr = hevc->m_BUF[i].v4l_ref_buf_addr; + if (fb->num_planes == 1) { + hevc->m_BUF[i].start_adr = fb->m.mem[0].addr; + hevc->m_BUF[i].luma_size = fb->m.mem[0].offset; + hevc->m_BUF[i].size = fb->m.mem[0].size; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + pic->dw_y_adr = hevc->m_BUF[i].start_adr; + pic->dw_u_v_adr = pic->dw_y_adr + hevc->m_BUF[i].luma_size; + pic->luma_size = fb->m.mem[0].offset; + pic->chroma_size = fb->m.mem[0].size - fb->m.mem[0].offset; + } else if (fb->num_planes == 2) { + hevc->m_BUF[i].start_adr = fb->m.mem[0].addr; + hevc->m_BUF[i].luma_size = fb->m.mem[0].size; + hevc->m_BUF[i].chroma_addr = fb->m.mem[1].addr; + hevc->m_BUF[i].chroma_size = fb->m.mem[1].size; + hevc->m_BUF[i].size = fb->m.mem[0].size + fb->m.mem[1].size; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + fb->m.mem[1].bytes_used = fb->m.mem[1].size; + pic->dw_y_adr = hevc->m_BUF[i].start_adr; + pic->dw_u_v_adr = hevc->m_BUF[i].chroma_addr; + pic->luma_size = fb->m.mem[0].size; + pic->chroma_size = fb->m.mem[1].size; + } + + return ret; +} + +static int alloc_buf(struct hevc_state_s *hevc) +{ + int i; + int ret = -1; + int buf_size = cal_current_buf_size(hevc, NULL); + struct vdec_s *vdec = hw_to_vdec(hevc); + + if (hevc->fatal_error & DECODER_FATAL_ERROR_NO_MEM) + return ret; + + for (i = 0; i < BUF_POOL_SIZE; i++) { + if (hevc->m_BUF[i].start_adr == 0) + break; + } + if (i < BUF_POOL_SIZE) { + if (buf_size > 0) { + ret = decoder_bmmu_box_alloc_buf_phy + (hevc->bmmu_box, + VF_BUFFER_IDX(i), buf_size, + DRIVER_NAME, + &hevc->m_BUF[i].start_adr); + if (ret < 0) { + hevc->m_BUF[i].start_adr = 0; + if (i <= 8) { + hevc->fatal_error |= + DECODER_FATAL_ERROR_NO_MEM; + hevc_print(hevc, PRINT_FLAG_ERROR, + "%s[%d], size: %d, no mem fatal err\n", + __func__, i, buf_size); + } + } + + if (ret >= 0) { + if (hevc->enable_fence) { + vdec_fence_buffer_count_increase((ulong)vdec->sync); + INIT_LIST_HEAD(&vdec->sync->release_callback[VF_BUFFER_IDX(i)].node); + decoder_bmmu_box_add_callback_func(hevc->bmmu_box, VF_BUFFER_IDX(i), (void *)&vdec->sync->release_callback[VF_BUFFER_IDX(i)]); + } + hevc->m_BUF[i].size = buf_size; + hevc->m_BUF[i].used_flag = 0; + ret = 0; + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "Buffer %d: start_adr %p size %x\n", + i, + (void *)hevc->m_BUF[i].start_adr, + hevc->m_BUF[i].size); + } + /*flush the buffer make sure no cache dirty*/ + if (!vdec_secure(hw_to_vdec(hevc)) && (hevc->m_BUF[i].start_adr)) { + void *mem_start_virt; + mem_start_virt = + codec_mm_phys_to_virt(hevc->m_BUF[i].start_adr); + if (mem_start_virt) { + memset(mem_start_virt, 0, hevc->m_BUF[i].size); + codec_mm_dma_flush(mem_start_virt, + hevc->m_BUF[i].size, DMA_TO_DEVICE); + } else { + codec_mm_memset(hevc->m_BUF[i].start_adr, + 0, hevc->m_BUF[i].size); + } + } + } + /*put_cma_alloc_ref();*/ /*DEBUG_TMP*/ + } else + ret = 0; + } + + if (ret >= 0) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "alloc buf(%d) for %d/%d size 0x%x) => %p\n", + i, hevc->pic_w, hevc->pic_h, + buf_size, + hevc->m_BUF[i].start_adr); + } + } else { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "alloc buf(%d) for %d/%d size 0x%x) => Fail!!!\n", + i, hevc->pic_w, hevc->pic_h, + buf_size); + } + } + return ret; +} + +static void set_buf_unused(struct hevc_state_s *hevc, int i) +{ + if (i >= 0 && i < BUF_POOL_SIZE) + hevc->m_BUF[i].used_flag = 0; +} + +static void dealloc_unused_buf(struct hevc_state_s *hevc) +{ + int i; + for (i = 0; i < BUF_POOL_SIZE; i++) { + if (hevc->m_BUF[i].start_adr && + hevc->m_BUF[i].used_flag == 0) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "dealloc buf(%d) adr 0x%p size 0x%x\n", + i, hevc->m_BUF[i].start_adr, + hevc->m_BUF[i].size); + } + if (!hevc->is_used_v4l) + decoder_bmmu_box_free_idx( + hevc->bmmu_box, + VF_BUFFER_IDX(i)); + hevc->m_BUF[i].start_adr = 0; + hevc->m_BUF[i].header_addr = 0; + hevc->m_BUF[i].size = 0; + } + } +} + +static void dealloc_pic_buf(struct hevc_state_s *hevc, + struct PIC_s *pic) +{ + int i = pic->BUF_index; + pic->BUF_index = -1; + if (i >= 0 && + i < BUF_POOL_SIZE && + hevc->m_BUF[i].start_adr) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "dealloc buf(%d) adr 0x%p size 0x%x\n", + i, hevc->m_BUF[i].start_adr, + hevc->m_BUF[i].size); + } + + if (!hevc->is_used_v4l) + decoder_bmmu_box_free_idx( + hevc->bmmu_box, + VF_BUFFER_IDX(i)); + hevc->m_BUF[i].used_flag = 0; + hevc->m_BUF[i].start_adr = 0; + hevc->m_BUF[i].header_addr = 0; + hevc->m_BUF[i].size = 0; + } +} + +static int get_work_pic_num(struct hevc_state_s *hevc) +{ + int used_buf_num = 0; + int sps_pic_buf_diff = 0; + + if (get_dynamic_buf_num_margin(hevc) > 0) { + if ((!hevc->sps_num_reorder_pics_0) && + (hevc->param.p.sps_max_dec_pic_buffering_minus1_0)) { + /* the range of sps_num_reorder_pics_0 is in + [0, sps_max_dec_pic_buffering_minus1_0] */ + used_buf_num = get_dynamic_buf_num_margin(hevc) + + hevc->param.p.sps_max_dec_pic_buffering_minus1_0; + } else + used_buf_num = hevc->sps_num_reorder_pics_0 + + get_dynamic_buf_num_margin(hevc); + + sps_pic_buf_diff = hevc->param.p.sps_max_dec_pic_buffering_minus1_0 + - hevc->sps_num_reorder_pics_0; +#ifdef MULTI_INSTANCE_SUPPORT + /* + need one more for multi instance, as + apply_ref_pic_set() has no chanch to run to + to clear referenced flag in some case + */ + if (hevc->m_ins_flag) + used_buf_num++; +#endif + } else + used_buf_num = max_buf_num; + + if (hevc->save_buffer_mode) + hevc_print(hevc, 0, + "save buf _mode : dynamic_buf_num_margin %d ----> %d \n", + dynamic_buf_num_margin, hevc->dynamic_buf_num_margin); + + if (sps_pic_buf_diff >= 3) + used_buf_num += sps_pic_buf_diff; + + if (hevc->is_used_v4l) { + /* for eos add more buffer to flush.*/ + used_buf_num++; + } + + if (used_buf_num > MAX_BUF_NUM) + used_buf_num = MAX_BUF_NUM; + return used_buf_num; +} + +static int v4l_parser_work_pic_num(struct hevc_state_s *hevc) +{ + int used_buf_num = 0; + int sps_pic_buf_diff = 0; + pr_debug("margin = %d, sps_max_dec_pic_buffering_minus1_0 = %d, sps_num_reorder_pics_0 = %d\n", + get_dynamic_buf_num_margin(hevc), + hevc->param.p.sps_max_dec_pic_buffering_minus1_0, + hevc->param.p.sps_num_reorder_pics_0); + if (get_dynamic_buf_num_margin(hevc) > 0) { + if ((!hevc->param.p.sps_num_reorder_pics_0) && + (hevc->param.p.sps_max_dec_pic_buffering_minus1_0)) { + /* the range of sps_num_reorder_pics_0 is in + [0, sps_max_dec_pic_buffering_minus1_0] */ + used_buf_num = get_dynamic_buf_num_margin(hevc) + + hevc->param.p.sps_max_dec_pic_buffering_minus1_0; + } else + used_buf_num = hevc->param.p.sps_num_reorder_pics_0 + + get_dynamic_buf_num_margin(hevc); + + sps_pic_buf_diff = hevc->param.p.sps_max_dec_pic_buffering_minus1_0 + - hevc->param.p.sps_num_reorder_pics_0; +#ifdef MULTI_INSTANCE_SUPPORT + /* + need one more for multi instance, as + apply_ref_pic_set() has no chanch to run to + to clear referenced flag in some case + */ + if (hevc->m_ins_flag) + used_buf_num++; +#endif + } else + used_buf_num = max_buf_num; + + if (hevc->save_buffer_mode) + hevc_print(hevc, 0, + "save buf _mode : dynamic_buf_num_margin %d ----> %d \n", + dynamic_buf_num_margin, hevc->dynamic_buf_num_margin); + + if (sps_pic_buf_diff >= 3) + used_buf_num += sps_pic_buf_diff; + + /* for eos add more buffer to flush.*/ + used_buf_num++; + + if (used_buf_num > MAX_BUF_NUM) + used_buf_num = MAX_BUF_NUM; + return used_buf_num; +} + + +static int get_alloc_pic_count(struct hevc_state_s *hevc) +{ + int alloc_pic_count = 0; + int i; + struct PIC_s *pic; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic && pic->index >= 0) + alloc_pic_count++; + } + return alloc_pic_count; +} + +static int v4l_config_pic(struct hevc_state_s *hevc, struct PIC_s *pic) +{ + int i = pic->index; + int dw_mode = get_double_write_mode(hevc); + + if (hevc->mmu_enable) + pic->header_adr = hevc->m_BUF[i].header_addr; + + pic->BUF_index = i; + pic->POC = INVALID_POC; + pic->mc_canvas_y = pic->index; + pic->mc_canvas_u_v = pic->index; + + if (dw_mode & 0x10) { + pic->mc_canvas_y = (pic->index << 1); + pic->mc_canvas_u_v = (pic->index << 1) + 1; + pic->mc_y_adr = pic->dw_y_adr; + pic->mc_u_v_adr = pic->dw_u_v_adr; + } + + return 0; +} + +static int config_pic(struct hevc_state_s *hevc, struct PIC_s *pic) +{ + int ret = -1; + int i; + /*int lcu_size_log2 = hevc->lcu_size_log2; + int MV_MEM_UNIT=lcu_size_log2== + 6 ? 0x100 : lcu_size_log2==5 ? 0x40 : 0x10;*/ + /*int MV_MEM_UNIT = lcu_size_log2 == 6 ? 0x200 : lcu_size_log2 == + 5 ? 0x80 : 0x20; + int mpred_mv_end = hevc->work_space_buf->mpred_mv.buf_start + + hevc->work_space_buf->mpred_mv.buf_size;*/ + unsigned int y_adr = 0; + struct buf_stru_s buf_stru; + int buf_size = cal_current_buf_size(hevc, &buf_stru); + int dw_mode = get_double_write_mode(hevc); + + for (i = 0; i < BUF_POOL_SIZE; i++) { + if (hevc->m_BUF[i].start_adr != 0 && + hevc->m_BUF[i].used_flag == 0 && + buf_size <= hevc->m_BUF[i].size) { + hevc->m_BUF[i].used_flag = 1; + break; + } + } + + if (i >= BUF_POOL_SIZE) + return -1; + + if (hevc->mmu_enable) { + pic->header_adr = hevc->m_BUF[i].start_adr; + y_adr = hevc->m_BUF[i].start_adr + + hevc_get_header_size(hevc->pic_w, hevc->pic_h); + } else + y_adr = hevc->m_BUF[i].start_adr; + + y_adr = ((y_adr + 0xffff) >> 16) << 16; /*64k alignment*/ + +#ifdef H265_10B_MMU_DW + if (hevc->dw_mmu_enable) { +#ifdef USE_FIXED_MMU_DW_HEADER + pic->header_dw_adr = hevc->work_space_buf->cm_header_dw.buf_start + + (i * hevc_get_header_size(hevc->pic_w, hevc->pic_h)); +#else + pic->header_dw_adr = y_adr; + y_adr = pic->header_dw_adr + + hevc_get_header_size(hevc->pic_w, hevc->pic_h); +#endif + hevc_print(hevc, H265_DEBUG_BUFMGR, + "MMU header_dw_adr %d: %x\n", pic->header_dw_adr); + } +#endif + + pic->POC = INVALID_POC; + /*ensure get_pic_by_POC() + not get the buffer not decoded*/ + pic->BUF_index = i; + + if ((!hevc->mmu_enable) && + ((dw_mode & 0x10) == 0)) { + pic->mc_y_adr = y_adr; + y_adr += (buf_stru.mc_buffer_size_h << 16); + } + pic->mc_canvas_y = pic->index; + pic->mc_canvas_u_v = pic->index; + if (dw_mode & 0x10) { + pic->mc_y_adr = y_adr; + pic->mc_u_v_adr = y_adr + + ((buf_stru.mc_buffer_size_u_v_h << 16) << 1); + pic->mc_canvas_y = (pic->index << 1); + pic->mc_canvas_u_v = (pic->index << 1) + 1; + + pic->dw_y_adr = pic->mc_y_adr; + pic->dw_u_v_adr = pic->mc_u_v_adr; + } else if (dw_mode && (dw_mode & 0x20) == 0) { + pic->dw_y_adr = y_adr; + pic->dw_u_v_adr = pic->dw_y_adr + + ((buf_stru.mc_buffer_size_u_v_h << 16) << 1); + } + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "%s index %d BUF_index %d mc_y_adr %x\n", + __func__, pic->index, + pic->BUF_index, pic->mc_y_adr); + if (hevc->mmu_enable && dw_mode) + hevc_print(hevc, 0, + "mmu double write adr %ld\n", + pic->cma_alloc_addr); + } + ret = 0; + + return ret; +} + +static void init_pic_list(struct hevc_state_s *hevc) +{ + int i; + int init_buf_num = get_work_pic_num(hevc); + int dw_mode = get_double_write_mode(hevc); + struct vdec_s *vdec = hw_to_vdec(hevc); + /*alloc decoder buf will be delay if work on v4l. */ + if (!hevc->is_used_v4l) { + for (i = 0; i < init_buf_num; i++) { + if (alloc_buf(hevc) < 0) { + if (i <= 8) { + /*if alloced (i+1)>=9 + don't send errors.*/ + hevc->fatal_error |= + DECODER_FATAL_ERROR_NO_MEM; + } + break; + } + } + } + + for (i = 0; i < init_buf_num; i++) { + struct PIC_s *pic = hevc->m_PIC[i]; + + if (!pic) { + pic = vmalloc(sizeof(struct PIC_s)); + if (pic == NULL) { + hevc_print(hevc, 0, + "%s: alloc pic %d fail!!!\n", + __func__, i); + break; + } + hevc->m_PIC[i] = pic; + } + memset(pic, 0, sizeof(struct PIC_s)); + + pic->index = i; + pic->BUF_index = -1; + pic->mv_buf_index = -1; + if (vdec->parallel_dec == 1) { + pic->y_canvas_index = -1; + pic->uv_canvas_index = -1; + } + + pic->width = hevc->pic_w; + pic->height = hevc->pic_h; + pic->double_write_mode = dw_mode; + pic->POC = INVALID_POC; + + /*config canvas will be delay if work on v4l. */ + if (!hevc->is_used_v4l) { + if (config_pic(hevc, pic) < 0) { + if (get_dbg_flag(hevc)) + hevc_print(hevc, 0, + "Config_pic %d fail\n", pic->index); + pic->index = -1; + i++; + break; + } + + if (pic->double_write_mode) + set_canvas(hevc, pic); + } + } +} + +static void uninit_pic_list(struct hevc_state_s *hevc) +{ + struct vdec_s *vdec = hw_to_vdec(hevc); + int i; +#ifndef MV_USE_FIXED_BUF + dealloc_mv_bufs(hevc); +#endif + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + struct PIC_s *pic = hevc->m_PIC[i]; + + if (pic) { + if (vdec->parallel_dec == 1) { + vdec->free_canvas_ex(pic->y_canvas_index, vdec->id); + vdec->free_canvas_ex(pic->uv_canvas_index, vdec->id); + } + release_aux_data(hevc, pic); + vfree(pic); + hevc->m_PIC[i] = NULL; + } + } +} + +#ifdef LOSLESS_COMPRESS_MODE +static void init_decode_head_hw(struct hevc_state_s *hevc) +{ + + struct BuffInfo_s *buf_spec = hevc->work_space_buf; + unsigned int data32; + + int losless_comp_header_size = + compute_losless_comp_header_size(hevc->pic_w, + hevc->pic_h); + int losless_comp_body_size = compute_losless_comp_body_size(hevc, + hevc->pic_w, hevc->pic_h, hevc->mem_saving_mode); + + hevc->losless_comp_body_size = losless_comp_body_size; + + + if (hevc->mmu_enable) { + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0); + } else { + if (hevc->mem_saving_mode == 1) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, + (1 << 3) | ((workaround_enable & 2) ? 1 : 0)); + else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, + ((workaround_enable & 2) ? 1 : 0)); + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, (losless_comp_body_size >> 5)); + /* + *WRITE_VREG(HEVCD_MPP_DECOMP_CTL3,(0xff<<20) | (0xff<<10) | 0xff); + * //8-bit mode + */ + } + WRITE_VREG(HEVC_CM_BODY_LENGTH, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, losless_comp_header_size); + + if (hevc->mmu_enable) { + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR, buf_spec->mmu_vbh.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR, + buf_spec->mmu_vbh.buf_start + + VBH_BUF_SIZE(buf_spec)); + data32 = READ_VREG(HEVC_SAO_CTRL9); + data32 |= 0x1; + WRITE_VREG(HEVC_SAO_CTRL9, data32); + + /* use HEVC_CM_HEADER_START_ADDR */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (1<<10); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } +#ifdef H265_10B_MMU_DW + if (hevc->dw_mmu_enable) { + u32 data_tmp; + data_tmp = READ_VREG(HEVC_SAO_CTRL9); + data_tmp |= (1 << 10); + WRITE_VREG(HEVC_SAO_CTRL9, data_tmp); + + WRITE_VREG(HEVC_CM_BODY_LENGTH2, + losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET2, + losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH2, + losless_comp_header_size); + + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR2, + buf_spec->mmu_vbh_dw.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR2, + buf_spec->mmu_vbh_dw.buf_start + DW_VBH_BUF_SIZE(buf_spec)); + WRITE_VREG(HEVC_DW_VH0_ADDDR, + buf_spec->mmu_vbh_dw.buf_start + (2 * DW_VBH_BUF_SIZE(buf_spec))); + WRITE_VREG(HEVC_DW_VH1_ADDDR, + buf_spec->mmu_vbh_dw.buf_start + (3 * DW_VBH_BUF_SIZE(buf_spec))); + /* use HEVC_CM_HEADER_START_ADDR */ + data32 |= (1 << 15); + } else + data32 &= ~(1 << 15); + WRITE_VREG(HEVC_SAO_CTRL5, data32); +#endif + if (!hevc->m_ins_flag) + hevc_print(hevc, 0, + "%s: (%d, %d) body_size 0x%x header_size 0x%x\n", + __func__, hevc->pic_w, hevc->pic_h, + losless_comp_body_size, losless_comp_header_size); + +} +#endif +#define HEVCD_MPP_ANC2AXI_TBL_DATA 0x3464 + +static void init_pic_list_hw(struct hevc_state_s *hevc) +{ + int i; + int cur_pic_num = MAX_REF_PIC_NUM; + int dw_mode = get_double_write_mode(hevc); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (0x1 << 2)); + else + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x0); + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + if (hevc->m_PIC[i] == NULL || + hevc->m_PIC[i]->index == -1) { + cur_pic_num = i; + break; + } + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) { + if (hevc->mmu_enable && ((dw_mode & 0x10) == 0)) + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + hevc->m_PIC[i]->header_adr>>5); + else + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + hevc->m_PIC[i]->mc_y_adr >> 5); + } else + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + hevc->m_PIC[i]->mc_y_adr | + (hevc->m_PIC[i]->mc_canvas_y << 8) | 0x1); + if (dw_mode & 0x10) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) { + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + hevc->m_PIC[i]->mc_u_v_adr >> 5); + } + else + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + hevc->m_PIC[i]->mc_u_v_adr | + (hevc->m_PIC[i]->mc_canvas_u_v << 8) + | 0x1); + } + } + if (cur_pic_num == 0) + return; + + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1); + + /* Zero out canvas registers in IPP -- avoid simulation X */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 1); + for (i = 0; i < 32; i++) + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); + +#ifdef LOSLESS_COMPRESS_MODE + if ((dw_mode & 0x10) == 0) + init_decode_head_hw(hevc); +#endif + +} + + +static void dump_pic_list(struct hevc_state_s *hevc) +{ + int i; + struct PIC_s *pic; + + hevc_print(hevc, 0, + "pic_list_init_flag is %d\r\n", hevc->pic_list_init_flag); + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + hevc_print_cont(hevc, 0, + "index %d buf_idx %d mv_idx %d decode_idx:%d, POC:%d, referenced:%d (LT %d), ", + pic->index, pic->BUF_index, +#ifndef MV_USE_FIXED_BUF + pic->mv_buf_index, +#else + -1, +#endif + pic->decode_idx, pic->POC, pic->referenced +#ifdef SUPPORT_LONG_TERM_RPS + , pic->long_term_ref +#else + , 0 +#endif + ); + hevc_print_cont(hevc, 0, + "num_reorder_pic:%d, output_mark:%d, error_mark:%d w/h %d,%d", + pic->num_reorder_pic, pic->output_mark, pic->error_mark, + pic->width, pic->height); + hevc_print_cont(hevc, 0, + "output_ready:%d, mv_wr_start %x vf_ref %d\n", + pic->output_ready, pic->mpred_mv_wr_start_addr, + pic->vf_ref); + } +} + +static void clear_referenced_flag(struct hevc_state_s *hevc) +{ + int i; + struct PIC_s *pic; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if (pic->referenced) { + pic->referenced = 0; + put_mv_buf(hevc, pic); + } + } +} + +static void clear_poc_flag(struct hevc_state_s *hevc) +{ + int i; + struct PIC_s *pic; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + pic->POC = INVALID_POC; + } +} + +static struct PIC_s *output_pic(struct hevc_state_s *hevc, + unsigned char flush_flag) +{ + int num_pic_not_yet_display = 0; + int i, fisrt_pic_flag = 0; + struct PIC_s *pic; + struct PIC_s *pic_display = NULL; + struct vdec_s *vdec = hw_to_vdec(hevc); + + if (hevc->i_only & 0x4) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || + (pic->index == -1) || + (pic->BUF_index == -1) || + (pic->POC == INVALID_POC)) + continue; + if (pic->output_mark) { + if (pic_display) { + if (pic->decode_idx < + pic_display->decode_idx) + pic_display = pic; + + } else + pic_display = pic; + + } + } + if (pic_display) { + pic_display->output_mark = 0; + pic_display->recon_mark = 0; + pic_display->output_ready = 1; + pic_display->referenced = 0; + put_mv_buf(hevc, pic_display); + } + } else { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || + (pic->index == -1) || + (pic->BUF_index == -1) || + (pic->POC == INVALID_POC)) + continue; + if (pic->output_mark) + num_pic_not_yet_display++; + if (pic->slice_type == 2 && + hevc->vf_pre_count == 0 && + fast_output_enable & 0x1) { + /*fast output for first I picture*/ + pic->num_reorder_pic = 0; + if (vdec->master || vdec->slave) + pic_display = pic; + fisrt_pic_flag = 1; + hevc_print(hevc, 0, "VH265: output first frame\n"); + } + } + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || + (pic->index == -1) || + (pic->BUF_index == -1) || + (pic->POC == INVALID_POC)) + continue; + if (pic->output_mark) { + if (pic_display) { + if (pic->POC < pic_display->POC) + pic_display = pic; + else if ((pic->POC == pic_display->POC) + && (pic->decode_idx < + pic_display-> + decode_idx)) + pic_display + = pic; + } else + pic_display = pic; + } + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + /* dv wait cur_pic all data get, + some data may get after picture output */ + if ((vdec->master || vdec->slave) + && (pic_display == hevc->cur_pic) && + (!flush_flag) && + (hevc->bypass_dvenl && !dolby_meta_with_el) + && (!fisrt_pic_flag)) + pic_display = NULL; +#endif + if (pic_display) { + if ((num_pic_not_yet_display > + pic_display->num_reorder_pic) + || flush_flag) { + pic_display->output_mark = 0; + pic_display->recon_mark = 0; + pic_display->output_ready = 1; + } else if (num_pic_not_yet_display >= + (MAX_REF_PIC_NUM - 1)) { + pic_display->output_mark = 0; + pic_display->recon_mark = 0; + pic_display->output_ready = 1; + hevc_print(hevc, 0, + "Warning, num_reorder_pic %d is byeond buf num\n", + pic_display->num_reorder_pic); + } else + pic_display = NULL; + } + } + + if (pic_display && hevc->sps_num_reorder_pics_0 && + (hevc->vf_pre_count == 1) && (hevc->first_pic_flag == 1)) { + pic_display = NULL; + hevc->first_pic_flag = 2; + } + return pic_display; +} + +static int config_mc_buffer(struct hevc_state_s *hevc, struct PIC_s *cur_pic) +{ + int i; + struct PIC_s *pic; + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "config_mc_buffer entered .....\n"); + if (cur_pic->slice_type != 2) { /* P and B pic */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 1); + for (i = 0; i < cur_pic->RefNum_L0; i++) { + pic = + get_ref_pic_by_POC(hevc, + cur_pic-> + m_aiRefPOCList0[cur_pic-> + slice_idx][i]); + if (pic) { + if ((pic->width != hevc->pic_w) || + (pic->height != hevc->pic_h)) { + hevc_print(hevc, 0, + "%s: Wrong reference pic (poc %d) width/height %d/%d\n", + __func__, pic->POC, + pic->width, pic->height); + cur_pic->error_mark = 1; + } + if (pic->error_mark && (ref_frame_mark_flag[hevc->index])) + cur_pic->error_mark = 1; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v << 16) + | (pic->mc_canvas_u_v + << 8) | + pic->mc_canvas_y); + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print_cont(hevc, 0, + "refid %x mc_canvas_u_v %x", + i, pic->mc_canvas_u_v); + hevc_print_cont(hevc, 0, + " mc_canvas_y %x\n", + pic->mc_canvas_y); + } + } else + cur_pic->error_mark = 1; + + if (pic == NULL || pic->error_mark) { + hevc_print(hevc, 0, + "Error %s, %dth poc (%d) %s", + __func__, i, + cur_pic->m_aiRefPOCList0[cur_pic-> + slice_idx][i], + pic ? "has error" : + "not in list0"); + } + } + } + if (cur_pic->slice_type == 0) { /* B pic */ + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "config_mc_buffer RefNum_L1\n"); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (0 << 1) | 1); + + for (i = 0; i < cur_pic->RefNum_L1; i++) { + pic = + get_ref_pic_by_POC(hevc, + cur_pic-> + m_aiRefPOCList1[cur_pic-> + slice_idx][i]); + if (pic) { + if ((pic->width != hevc->pic_w) || + (pic->height != hevc->pic_h)) { + hevc_print(hevc, 0, + "%s: Wrong reference pic (poc %d) width/height %d/%d\n", + __func__, pic->POC, + pic->width, pic->height); + cur_pic->error_mark = 1; + } + + if (pic->error_mark && (ref_frame_mark_flag[hevc->index])) + cur_pic->error_mark = 1; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v << 16) + | (pic->mc_canvas_u_v + << 8) | + pic->mc_canvas_y); + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print_cont(hevc, 0, + "refid %x mc_canvas_u_v %x", + i, pic->mc_canvas_u_v); + hevc_print_cont(hevc, 0, + " mc_canvas_y %x\n", + pic->mc_canvas_y); + } + } else + cur_pic->error_mark = 1; + + if (pic == NULL || pic->error_mark) { + hevc_print(hevc, 0, + "Error %s, %dth poc (%d) %s", + __func__, i, + cur_pic->m_aiRefPOCList1[cur_pic-> + slice_idx][i], + pic ? "has error" : + "not in list1"); + } + } + } + return 0; +} + +#ifdef SUPPORT_LONG_TERM_RPS +static unsigned char is_ref_long_term(struct hevc_state_s *hevc, int poc) +{ + int ii; + struct PIC_s *pic; + for (ii = 0; ii < MAX_REF_PIC_NUM; ii++) { + pic = hevc->m_PIC[ii]; + if (pic == NULL || + pic->index == -1 || + pic->BUF_index == -1 + ) + continue; + + if (pic->referenced && pic->POC == poc + && pic->long_term_ref) + return 1; + } + return 0; +} + +#endif + +static void apply_ref_pic_set(struct hevc_state_s *hevc, int cur_poc, + union param_u *params) +{ + int ii, i; + int poc_tmp; + struct PIC_s *pic; + unsigned char is_referenced; + /* hevc_print(hevc, 0, + "%s cur_poc %d\n", __func__, cur_poc); */ + if (pic_list_debug & 0x2) { + pr_err("cur poc %d\n", cur_poc); + } + for (ii = 0; ii < MAX_REF_PIC_NUM; ii++) { + pic = hevc->m_PIC[ii]; + if (pic == NULL || + pic->index == -1 || + pic->BUF_index == -1 + ) + continue; + +#ifdef SUPPORT_LONG_TERM_RPS + pic->long_term_ref = 0; +#endif + if ((pic->referenced == 0 || pic->POC == cur_poc)) + continue; + is_referenced = 0; + + for (i = 0; i < 16; i++) { + int delt; +#ifdef SUPPORT_LONG_TERM_RPS + if (params->p.CUR_RPS[i] == RPS_END) + break; +#else + if (params->p.CUR_RPS[i] & 0x8000) + break; +#endif + delt = + params->p.CUR_RPS[i] & + ((1 << (RPS_USED_BIT - 1)) - 1); + if (params->p.CUR_RPS[i] & (1 << (RPS_USED_BIT - 1))) { + poc_tmp = + cur_poc - ((1 << (RPS_USED_BIT - 1)) - + delt); + } else + poc_tmp = cur_poc + delt; + if (poc_tmp == pic->POC) { +#ifdef SUPPORT_LONG_TERM_RPS + if (params->p.CUR_RPS[i] & (1 << (RPS_LT_BIT))) + pic->long_term_ref = 1; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + hevc_print(hevc, 0, "%d: CUR_RPS 0x%x, LT %d\n", + i, params->p.CUR_RPS[i], + pic->long_term_ref); +#endif + is_referenced = 1; + break; + } + } + if (is_referenced == 0) { + pic->referenced = 0; + put_mv_buf(hevc, pic); + /* hevc_print(hevc, 0, + "set poc %d reference to 0\n", pic->POC); */ + if (pic_list_debug & 0x2) { + pr_err("set poc %d reference to 0\n", pic->POC); + } + } + } + +} + +static void set_ref_pic_list(struct hevc_state_s *hevc, union param_u *params) +{ + struct PIC_s *pic = hevc->cur_pic; + int i, rIdx; + int num_neg = 0; + int num_pos = 0; + int total_num; + int num_ref_idx_l0_active = + (params->p.num_ref_idx_l0_active > + MAX_REF_ACTIVE) ? MAX_REF_ACTIVE : + params->p.num_ref_idx_l0_active; + int num_ref_idx_l1_active = + (params->p.num_ref_idx_l1_active > + MAX_REF_ACTIVE) ? MAX_REF_ACTIVE : + params->p.num_ref_idx_l1_active; + + int RefPicSetStCurr0[16]; + int RefPicSetStCurr1[16]; +#ifdef SUPPORT_LONG_TERM_RPS + int num_lt = 0; + int RefPicSetLtCurr[16]; +#endif + + for (i = 0; i < 16; i++) { + RefPicSetStCurr0[i] = 0; + RefPicSetStCurr1[i] = 0; + pic->m_aiRefPOCList0[pic->slice_idx][i] = 0; + pic->m_aiRefPOCList1[pic->slice_idx][i] = 0; + } + for (i = 0; i < 16; i++) { +#ifdef SUPPORT_LONG_TERM_RPS + if (params->p.CUR_RPS[i] == RPS_END) + break; +#else + if (params->p.CUR_RPS[i] & 0x8000) + break; +#endif + if ((params->p.CUR_RPS[i] >> RPS_USED_BIT) & 1) { + int delt = + params->p.CUR_RPS[i] & + ((1 << (RPS_USED_BIT - 1)) - 1); + + if ((params->p.CUR_RPS[i] >> (RPS_USED_BIT - 1)) & 1) { +#ifdef SUPPORT_LONG_TERM_RPS + if ((params->p.CUR_RPS[i] >> RPS_LT_BIT) & 1) { + RefPicSetLtCurr[num_lt] = + pic->POC - ((1 << (RPS_USED_BIT - 1)) - + delt); + num_lt++; + continue; + } +#endif + + RefPicSetStCurr0[num_neg] = + pic->POC - ((1 << (RPS_USED_BIT - 1)) - + delt); + /* hevc_print(hevc, 0, + * "RefPicSetStCurr0 %x %x %x\n", + * RefPicSetStCurr0[num_neg], pic->POC, + * (0x800-(params[i]&0x7ff))); + */ + num_neg++; + } else { +#ifdef SUPPORT_LONG_TERM_RPS + if ((params->p.CUR_RPS[i] >> RPS_LT_BIT) & 1) { + RefPicSetLtCurr[num_lt] = pic->POC + delt; + num_lt++; + continue; + } +#endif + RefPicSetStCurr1[num_pos] = pic->POC + delt; + /* hevc_print(hevc, 0, + * "RefPicSetStCurr1 %d\n", + * RefPicSetStCurr1[num_pos]); + */ + num_pos++; + } + } + } +#ifdef SUPPORT_LONG_TERM_RPS + total_num = num_neg + num_pos + num_lt; +#else + total_num = num_neg + num_pos; +#endif + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "%s: curpoc %d slice_type %d, total %d ", + __func__, pic->POC, params->p.slice_type, total_num); +#ifdef SUPPORT_LONG_TERM_RPS + hevc_print_cont(hevc, 0, + "num_neg %d num_lt %d num_list0 %d num_list1 %d\n", + num_neg, num_lt, num_ref_idx_l0_active, num_ref_idx_l1_active); +#else + hevc_print_cont(hevc, 0, + "num_neg %d num_list0 %d num_list1 %d\n", + num_neg, num_ref_idx_l0_active, num_ref_idx_l1_active); +#endif + + } + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "HEVC Stream buf start "); + hevc_print_cont(hevc, 0, + "%x end %x wr %x rd %x lev %x ctl %x intctl %x\n", + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_FIFO_CTL), + READ_VREG(HEVC_PARSER_INT_CONTROL)); + } + + if (total_num > 0) { + if (params->p.modification_flag & 0x1) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, "ref0 POC (modification):"); + for (rIdx = 0; rIdx < num_ref_idx_l0_active; rIdx++) { + int cIdx = params->p.modification_list[rIdx]; + + pic->m_aiRefPOCList0[pic->slice_idx][rIdx] = +#ifdef SUPPORT_LONG_TERM_RPS + cIdx >= (num_neg + num_pos) ? + RefPicSetLtCurr[cIdx - num_neg - num_pos] : +#endif + (cIdx >= + num_neg ? RefPicSetStCurr1[cIdx - + num_neg] : + RefPicSetStCurr0[cIdx]); + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print_cont(hevc, 0, "%d ", + pic->m_aiRefPOCList0[pic-> + slice_idx] + [rIdx]); + } + } + } else { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, "ref0 POC:"); + for (rIdx = 0; rIdx < num_ref_idx_l0_active; rIdx++) { + int cIdx = rIdx % total_num; + + pic->m_aiRefPOCList0[pic->slice_idx][rIdx] = +#ifdef SUPPORT_LONG_TERM_RPS + cIdx >= (num_neg + num_pos) ? + RefPicSetLtCurr[cIdx - num_neg - num_pos] : +#endif + (cIdx >= + num_neg ? RefPicSetStCurr1[cIdx - + num_neg] : + RefPicSetStCurr0[cIdx]); + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print_cont(hevc, 0, "%d ", + pic->m_aiRefPOCList0[pic-> + slice_idx] + [rIdx]); + } + } + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print_cont(hevc, 0, "\n"); + if (params->p.slice_type == B_SLICE) { + if (params->p.modification_flag & 0x2) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "ref1 POC (modification):"); + for (rIdx = 0; rIdx < num_ref_idx_l1_active; + rIdx++) { + int cIdx; + + if (params->p.modification_flag & 0x1) { + cIdx = + params->p. + modification_list + [num_ref_idx_l0_active + + rIdx]; + } else { + cIdx = + params->p. + modification_list[rIdx]; + } + pic->m_aiRefPOCList1[pic-> + slice_idx][rIdx] = +#ifdef SUPPORT_LONG_TERM_RPS + cIdx >= (num_neg + num_pos) ? + RefPicSetLtCurr[cIdx - num_neg - num_pos] : +#endif + (cIdx >= + num_pos ? + RefPicSetStCurr0[cIdx - num_pos] + : RefPicSetStCurr1[cIdx]); + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + hevc_print_cont(hevc, 0, "%d ", + pic-> + m_aiRefPOCList1[pic-> + slice_idx] + [rIdx]); + } + } + } else { + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, "ref1 POC:"); + for (rIdx = 0; rIdx < num_ref_idx_l1_active; + rIdx++) { + int cIdx = rIdx % total_num; + + pic->m_aiRefPOCList1[pic-> + slice_idx][rIdx] = +#ifdef SUPPORT_LONG_TERM_RPS + cIdx >= (num_neg + num_pos) ? + RefPicSetLtCurr[cIdx - num_neg - num_pos] : +#endif + (cIdx >= + num_pos ? + RefPicSetStCurr0[cIdx - + num_pos] + : RefPicSetStCurr1[cIdx]); + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + hevc_print_cont(hevc, 0, "%d ", + pic-> + m_aiRefPOCList1[pic-> + slice_idx] + [rIdx]); + } + } + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print_cont(hevc, 0, "\n"); + } + } + /*set m_PIC */ + pic->slice_type = (params->p.slice_type == I_SLICE) ? 2 : + (params->p.slice_type == P_SLICE) ? 1 : + (params->p.slice_type == B_SLICE) ? 0 : 3; + pic->RefNum_L0 = num_ref_idx_l0_active; + pic->RefNum_L1 = num_ref_idx_l1_active; +} + +static void update_tile_info(struct hevc_state_s *hevc, int pic_width_cu, + int pic_height_cu, int sao_mem_unit, + union param_u *params) +{ + int i, j; + int start_cu_x, start_cu_y; + int sao_vb_size = (sao_mem_unit + (2 << 4)) * pic_height_cu; + int sao_abv_size = sao_mem_unit * pic_width_cu; +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + int tmpRefillLcuSize = 1 << + (params->p.log2_min_coding_block_size_minus3 + + 3 + params->p.log2_diff_max_min_coding_block_size); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%x, %x, %x, %x\n", + params->p.slice_segment_address, + params->p.bit_depth, + params->p.tiles_enabled_flag, + tmpRefillLcuSize); + if (params->p.slice_segment_address == 0 && + params->p.bit_depth != 0 && + (params->p.tiles_enabled_flag & 1) && + tmpRefillLcuSize == 64) + hevc->delrefill_check = 1; + else + hevc->delrefill_check = 0; + } +#endif + + hevc->tile_enabled = params->p.tiles_enabled_flag & 1; + if (params->p.tiles_enabled_flag & 1) { + hevc->num_tile_col = params->p.num_tile_columns_minus1 + 1; + hevc->num_tile_row = params->p.num_tile_rows_minus1 + 1; + + if (hevc->num_tile_row > MAX_TILE_ROW_NUM + || hevc->num_tile_row <= 0) { + hevc->num_tile_row = 1; + hevc_print(hevc, 0, + "%s: num_tile_rows_minus1 (%d) error!!\n", + __func__, params->p.num_tile_rows_minus1); + } + if (hevc->num_tile_col > MAX_TILE_COL_NUM + || hevc->num_tile_col <= 0) { + hevc->num_tile_col = 1; + hevc_print(hevc, 0, + "%s: num_tile_columns_minus1 (%d) error!!\n", + __func__, params->p.num_tile_columns_minus1); + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "%s pic_w_cu %d pic_h_cu %d tile_enabled ", + __func__, pic_width_cu, pic_height_cu); + hevc_print_cont(hevc, 0, + "num_tile_col %d num_tile_row %d:\n", + hevc->num_tile_col, hevc->num_tile_row); + } + + if (params->p.tiles_enabled_flag & 2) { /* uniform flag */ + int w = pic_width_cu / hevc->num_tile_col; + int h = pic_height_cu / hevc->num_tile_row; + + start_cu_y = 0; + for (i = 0; i < hevc->num_tile_row; i++) { + start_cu_x = 0; + for (j = 0; j < hevc->num_tile_col; j++) { + if (j == (hevc->num_tile_col - 1)) { + hevc->m_tile[i][j].width = + pic_width_cu - + start_cu_x; + } else + hevc->m_tile[i][j].width = w; + if (i == (hevc->num_tile_row - 1)) { + hevc->m_tile[i][j].height = + pic_height_cu - + start_cu_y; + } else + hevc->m_tile[i][j].height = h; + hevc->m_tile[i][j].start_cu_x + = start_cu_x; + hevc->m_tile[i][j].start_cu_y + = start_cu_y; + hevc->m_tile[i][j].sao_vb_start_addr = + hevc->work_space_buf->sao_vb. + buf_start + j * sao_vb_size; + hevc->m_tile[i][j].sao_abv_start_addr = + hevc->work_space_buf->sao_abv. + buf_start + i * sao_abv_size; + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + hevc_print_cont(hevc, 0, + "{y=%d, x=%d w %d h %d ", + i, j, hevc->m_tile[i][j].width, + hevc->m_tile[i][j].height); + hevc_print_cont(hevc, 0, + "start_x %d start_y %d ", + hevc->m_tile[i][j].start_cu_x, + hevc->m_tile[i][j].start_cu_y); + hevc_print_cont(hevc, 0, + "sao_vb_start 0x%x ", + hevc->m_tile[i][j]. + sao_vb_start_addr); + hevc_print_cont(hevc, 0, + "sao_abv_start 0x%x}\n", + hevc->m_tile[i][j]. + sao_abv_start_addr); + } + start_cu_x += hevc->m_tile[i][j].width; + + } + start_cu_y += hevc->m_tile[i][0].height; + } + } else { + start_cu_y = 0; + for (i = 0; i < hevc->num_tile_row; i++) { + start_cu_x = 0; + for (j = 0; j < hevc->num_tile_col; j++) { + if (j == (hevc->num_tile_col - 1)) { + hevc->m_tile[i][j].width = + pic_width_cu - + start_cu_x; + } else { + hevc->m_tile[i][j].width = + params->p.tile_width[j]; + } + if (i == (hevc->num_tile_row - 1)) { + hevc->m_tile[i][j].height = + pic_height_cu - + start_cu_y; + } else { + hevc->m_tile[i][j].height = + params-> + p.tile_height[i]; + } + hevc->m_tile[i][j].start_cu_x + = start_cu_x; + hevc->m_tile[i][j].start_cu_y + = start_cu_y; + hevc->m_tile[i][j].sao_vb_start_addr = + hevc->work_space_buf->sao_vb. + buf_start + j * sao_vb_size; + hevc->m_tile[i][j].sao_abv_start_addr = + hevc->work_space_buf->sao_abv. + buf_start + i * sao_abv_size; + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + hevc_print_cont(hevc, 0, + "{y=%d, x=%d w %d h %d ", + i, j, hevc->m_tile[i][j].width, + hevc->m_tile[i][j].height); + hevc_print_cont(hevc, 0, + "start_x %d start_y %d ", + hevc->m_tile[i][j].start_cu_x, + hevc->m_tile[i][j].start_cu_y); + hevc_print_cont(hevc, 0, + "sao_vb_start 0x%x ", + hevc->m_tile[i][j]. + sao_vb_start_addr); + hevc_print_cont(hevc, 0, + "sao_abv_start 0x%x}\n", + hevc->m_tile[i][j]. + sao_abv_start_addr); + + } + start_cu_x += hevc->m_tile[i][j].width; + } + start_cu_y += hevc->m_tile[i][0].height; + } + } + } else { + hevc->num_tile_col = 1; + hevc->num_tile_row = 1; + hevc->m_tile[0][0].width = pic_width_cu; + hevc->m_tile[0][0].height = pic_height_cu; + hevc->m_tile[0][0].start_cu_x = 0; + hevc->m_tile[0][0].start_cu_y = 0; + hevc->m_tile[0][0].sao_vb_start_addr = + hevc->work_space_buf->sao_vb.buf_start; + hevc->m_tile[0][0].sao_abv_start_addr = + hevc->work_space_buf->sao_abv.buf_start; + } +} + +static int get_tile_index(struct hevc_state_s *hevc, int cu_adr, + int pic_width_lcu) +{ + int cu_x; + int cu_y; + int tile_x = 0; + int tile_y = 0; + int i; + + if (pic_width_lcu == 0) { + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "%s Error, pic_width_lcu is 0, pic_w %d, pic_h %d\n", + __func__, hevc->pic_w, hevc->pic_h); + } + return -1; + } + cu_x = cu_adr % pic_width_lcu; + cu_y = cu_adr / pic_width_lcu; + if (hevc->tile_enabled) { + for (i = 0; i < hevc->num_tile_col; i++) { + if (cu_x >= hevc->m_tile[0][i].start_cu_x) + tile_x = i; + else + break; + } + for (i = 0; i < hevc->num_tile_row; i++) { + if (cu_y >= hevc->m_tile[i][0].start_cu_y) + tile_y = i; + else + break; + } + } + return (tile_x) | (tile_y << 8); +} + +static void print_scratch_error(int error_num) +{ +#if 0 + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + " ERROR : HEVC_ASSIST_SCRATCH_TEST Error : %d\n", + error_num); + } +#endif +} + +static void hevc_config_work_space_hw(struct hevc_state_s *hevc) +{ + struct BuffInfo_s *buf_spec = hevc->work_space_buf; + + if (get_dbg_flag(hevc)) + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s %x %x %x %x %x %x %x %x %x %x %x %x %x\n", + __func__, + buf_spec->ipp.buf_start, + buf_spec->start_adr, + buf_spec->short_term_rps.buf_start, + buf_spec->vps.buf_start, + buf_spec->sps.buf_start, + buf_spec->pps.buf_start, + buf_spec->sao_up.buf_start, + buf_spec->swap_buf.buf_start, + buf_spec->swap_buf2.buf_start, + buf_spec->scalelut.buf_start, + buf_spec->dblk_para.buf_start, + buf_spec->dblk_data.buf_start, + buf_spec->dblk_data2.buf_start); + WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, buf_spec->ipp.buf_start); + if ((get_dbg_flag(hevc) & H265_DEBUG_SEND_PARAM_WITH_REG) == 0) + WRITE_VREG(HEVC_RPM_BUFFER, (u32)hevc->rpm_phy_addr); + WRITE_VREG(HEVC_SHORT_TERM_RPS, buf_spec->short_term_rps.buf_start); + WRITE_VREG(HEVC_VPS_BUFFER, buf_spec->vps.buf_start); + WRITE_VREG(HEVC_SPS_BUFFER, buf_spec->sps.buf_start); + WRITE_VREG(HEVC_PPS_BUFFER, buf_spec->pps.buf_start); + WRITE_VREG(HEVC_SAO_UP, buf_spec->sao_up.buf_start); + if (hevc->mmu_enable) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR, hevc->frame_mmu_map_phy_addr); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "write HEVC_ASSIST_MMU_MAP_ADDR\n"); + } else + WRITE_VREG(H265_MMU_MAP_BUFFER, hevc->frame_mmu_map_phy_addr); + } /*else + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER, + buf_spec->swap_buf.buf_start); + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, buf_spec->swap_buf2.buf_start);*/ + WRITE_VREG(HEVC_SCALELUT, buf_spec->scalelut.buf_start); +#ifdef HEVC_8K_LFTOFFSET_FIX + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + if (buf_spec->max_width <= 4096 && buf_spec->max_height <= 2304) + WRITE_VREG(HEVC_DBLK_CFG3, 0x4010); + else + WRITE_VREG(HEVC_DBLK_CFG3, 0x8020); + //WRITE_VREG(HEVC_DBLK_CFG3, 0x808020); /*offset should x2 if 8k*/ + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "write HEVC_DBLK_CFG3 to %x\n", READ_VREG(HEVC_DBLK_CFG3)); + } +#endif +#ifdef H265_10B_MMU_DW + if (hevc->dw_mmu_enable) { + //WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR2, FRAME_MMU_MAP_ADDR_DW); + WRITE_VREG(HEVC_SAO_MMU_DMA_CTRL2, hevc->frame_dw_mmu_map_phy_addr); + } +#endif + /* cfg_p_addr */ + WRITE_VREG(HEVC_DBLK_CFG4, buf_spec->dblk_para.buf_start); + /* cfg_d_addr */ + WRITE_VREG(HEVC_DBLK_CFG5, buf_spec->dblk_data.buf_start); + + WRITE_VREG(HEVC_DBLK_CFGE, buf_spec->dblk_data2.buf_start); + + WRITE_VREG(LMEM_DUMP_ADR, (u32)hevc->lmem_phy_addr); +} + +static void parser_cmd_write(void) +{ + u32 i; + const unsigned short parser_cmd[PARSER_CMD_NUMBER] = { + 0x0401, 0x8401, 0x0800, 0x0402, 0x9002, 0x1423, + 0x8CC3, 0x1423, 0x8804, 0x9825, 0x0800, 0x04FE, + 0x8406, 0x8411, 0x1800, 0x8408, 0x8409, 0x8C2A, + 0x9C2B, 0x1C00, 0x840F, 0x8407, 0x8000, 0x8408, + 0x2000, 0xA800, 0x8410, 0x04DE, 0x840C, 0x840D, + 0xAC00, 0xA000, 0x08C0, 0x08E0, 0xA40E, 0xFC00, + 0x7C00 + }; + for (i = 0; i < PARSER_CMD_NUMBER; i++) + WRITE_VREG(HEVC_PARSER_CMD_WRITE, parser_cmd[i]); +} + +static void hevc_init_decoder_hw(struct hevc_state_s *hevc, + int decode_pic_begin, int decode_pic_num) +{ + unsigned int data32; + int i; +#if 0 + if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) { + /* Set MCR fetch priorities*/ + data32 = 0x1 | (0x1 << 2) | (0x1 <<3) | + (24 << 4) | (32 << 11) | (24 << 18) | (32 << 25); + WRITE_VREG(HEVCD_MPP_DECOMP_AXIURG_CTL, data32); + } +#endif +#if 1 + /* m8baby test1902 */ + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "%s\n", __func__); + data32 = READ_VREG(HEVC_PARSER_VERSION); + if (data32 != 0x00010001) { + print_scratch_error(25); + return; + } + WRITE_VREG(HEVC_PARSER_VERSION, 0x5a5a55aa); + data32 = READ_VREG(HEVC_PARSER_VERSION); + if (data32 != 0x5a5a55aa) { + print_scratch_error(26); + return; + } +#if 0 + /* test Parser Reset */ + /* reset iqit to start mem init again */ + WRITE_VREG(DOS_SW_RESET3, (1 << 14) | + (1 << 3) /* reset_whole parser */ + ); + WRITE_VREG(DOS_SW_RESET3, 0); /* clear reset_whole parser */ + data32 = READ_VREG(HEVC_PARSER_VERSION); + if (data32 != 0x00010001) + hevc_print(hevc, 0, + "Test Parser Fatal Error\n"); +#endif + /* reset iqit to start mem init again */ + WRITE_VREG(DOS_SW_RESET3, (1 << 14) + ); + CLEAR_VREG_MASK(HEVC_CABAC_CONTROL, 1); + CLEAR_VREG_MASK(HEVC_PARSER_CORE_CONTROL, 1); + +#endif + if (!hevc->m_ins_flag) { + data32 = READ_VREG(HEVC_STREAM_CONTROL); + data32 = data32 | (1 << 0); /* stream_fetch_enable */ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + data32 |= (0xf << 25); /*arwlen_axi_max*/ + WRITE_VREG(HEVC_STREAM_CONTROL, data32); + } + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x00000100) { + print_scratch_error(29); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x00000300) { + print_scratch_error(30); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x12345678); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x9abcdef0); + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x12345678) { + print_scratch_error(31); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x9abcdef0) { + print_scratch_error(32); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x00000100); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x00000300); + + data32 = READ_VREG(HEVC_PARSER_INT_CONTROL); + data32 &= 0x03ffffff; + data32 = data32 | (3 << 29) | (2 << 26) | (1 << 24) + | /* stream_buffer_empty_int_amrisc_enable */ + (1 << 22) | /* stream_fifo_empty_int_amrisc_enable*/ + (1 << 7) | /* dec_done_int_cpu_enable */ + (1 << 4) | /* startcode_found_int_cpu_enable */ + (0 << 3) | /* startcode_found_int_amrisc_enable */ + (1 << 0) /* parser_int_enable */ + ; + WRITE_VREG(HEVC_PARSER_INT_CONTROL, data32); + + data32 = READ_VREG(HEVC_SHIFT_STATUS); + data32 = data32 | (1 << 1) | /* emulation_check_on */ + (1 << 0) /* startcode_check_on */ + ; + WRITE_VREG(HEVC_SHIFT_STATUS, data32); + + WRITE_VREG(HEVC_SHIFT_CONTROL, (3 << 6) |/* sft_valid_wr_position */ + (2 << 4) | /* emulate_code_length_sub_1 */ + (2 << 1) | /* start_code_length_sub_1 */ + (1 << 0) /* stream_shift_enable */ + ); + + WRITE_VREG(HEVC_CABAC_CONTROL, (1 << 0) /* cabac_enable */ + ); + /* hevc_parser_core_clk_en */ + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, (1 << 0) + ); + + WRITE_VREG(HEVC_DEC_STATUS_REG, 0); + + /* Initial IQIT_SCALELUT memory -- just to avoid X in simulation */ + if (is_rdma_enable()) + rdma_back_end_work(hevc->rdma_phy_adr, RDMA_SIZE); + else { + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 0);/*cfg_p_addr*/ + for (i = 0; i < 1024; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, 0); + } + +#ifdef ENABLE_SWAP_TEST + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 100); +#endif + + /*WRITE_VREG(HEVC_DECODE_PIC_BEGIN_REG, 0);*/ + /*WRITE_VREG(HEVC_DECODE_PIC_NUM_REG, 0xffffffff);*/ + WRITE_VREG(HEVC_DECODE_SIZE, 0); + /*WRITE_VREG(HEVC_DECODE_COUNT, 0);*/ + /* Send parser_cmd */ + WRITE_VREG(HEVC_PARSER_CMD_WRITE, (1 << 16) | (0 << 0)); + + parser_cmd_write(); + + WRITE_VREG(HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2); + + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + /* (1 << 8) | // sao_sw_pred_enable */ + (1 << 5) | /* parser_sao_if_en */ + (1 << 2) | /* parser_mpred_if_en */ + (1 << 0) /* parser_scaler_if_en */ + ); + + /* Changed to Start MPRED in microcode */ + /* + * hevc_print(hevc, 0, "[test.c] Start MPRED\n"); + * WRITE_VREG(HEVC_MPRED_INT_STATUS, + * (1<<31) + * ); + */ + + WRITE_VREG(HEVCD_IPP_TOP_CNTL, (0 << 1) | /* enable ipp */ + (1 << 0) /* software reset ipp and mpp */ + ); + WRITE_VREG(HEVCD_IPP_TOP_CNTL, (1 << 1) | /* enable ipp */ + (0 << 0) /* software reset ipp and mpp */ + ); + + if (get_double_write_mode(hevc) & 0x10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, + 0x1 << 31 /*/Enable NV21 reference read mode for MC*/ + ); + +} + +static void decoder_hw_reset(void) +{ + int i; + unsigned int data32; + + /* reset iqit to start mem init again */ + WRITE_VREG(DOS_SW_RESET3, (1 << 14) + ); + CLEAR_VREG_MASK(HEVC_CABAC_CONTROL, 1); + CLEAR_VREG_MASK(HEVC_PARSER_CORE_CONTROL, 1); + + data32 = READ_VREG(HEVC_STREAM_CONTROL); + data32 = data32 | (1 << 0) /* stream_fetch_enable */ + ; + WRITE_VREG(HEVC_STREAM_CONTROL, data32); + + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x00000100) { + print_scratch_error(29); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x00000300) { + print_scratch_error(30); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x12345678); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x9abcdef0); + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x12345678) { + print_scratch_error(31); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x9abcdef0) { + print_scratch_error(32); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x00000100); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x00000300); + + data32 = READ_VREG(HEVC_PARSER_INT_CONTROL); + data32 &= 0x03ffffff; + data32 = data32 | (3 << 29) | (2 << 26) | (1 << 24) + | /* stream_buffer_empty_int_amrisc_enable */ + (1 << 22) | /*stream_fifo_empty_int_amrisc_enable */ + (1 << 7) | /* dec_done_int_cpu_enable */ + (1 << 4) | /* startcode_found_int_cpu_enable */ + (0 << 3) | /* startcode_found_int_amrisc_enable */ + (1 << 0) /* parser_int_enable */ + ; + WRITE_VREG(HEVC_PARSER_INT_CONTROL, data32); + + data32 = READ_VREG(HEVC_SHIFT_STATUS); + data32 = data32 | (1 << 1) | /* emulation_check_on */ + (1 << 0) /* startcode_check_on */ + ; + WRITE_VREG(HEVC_SHIFT_STATUS, data32); + + WRITE_VREG(HEVC_SHIFT_CONTROL, (3 << 6) |/* sft_valid_wr_position */ + (2 << 4) | /* emulate_code_length_sub_1 */ + (2 << 1) | /* start_code_length_sub_1 */ + (1 << 0) /* stream_shift_enable */ + ); + + WRITE_VREG(HEVC_CABAC_CONTROL, (1 << 0) /* cabac_enable */ + ); + /* hevc_parser_core_clk_en */ + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, (1 << 0) + ); + + /* Initial IQIT_SCALELUT memory -- just to avoid X in simulation */ + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 0); /* cfg_p_addr */ + for (i = 0; i < 1024; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, 0); + + /* Send parser_cmd */ + WRITE_VREG(HEVC_PARSER_CMD_WRITE, (1 << 16) | (0 << 0)); + + parser_cmd_write(); + + WRITE_VREG(HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2); + + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + /* (1 << 8) | // sao_sw_pred_enable */ + (1 << 5) | /* parser_sao_if_en */ + (1 << 2) | /* parser_mpred_if_en */ + (1 << 0) /* parser_scaler_if_en */ + ); + + WRITE_VREG(HEVCD_IPP_TOP_CNTL, (0 << 1) | /* enable ipp */ + (1 << 0) /* software reset ipp and mpp */ + ); + WRITE_VREG(HEVCD_IPP_TOP_CNTL, (1 << 1) | /* enable ipp */ + (0 << 0) /* software reset ipp and mpp */ + ); +} + +#ifdef CONFIG_HEVC_CLK_FORCED_ON +static void config_hevc_clk_forced_on(void) +{ + unsigned int rdata32; + /* IQIT */ + rdata32 = READ_VREG(HEVC_IQIT_CLK_RST_CTRL); + WRITE_VREG(HEVC_IQIT_CLK_RST_CTRL, rdata32 | (0x1 << 2)); + + /* DBLK */ + rdata32 = READ_VREG(HEVC_DBLK_CFG0); + WRITE_VREG(HEVC_DBLK_CFG0, rdata32 | (0x1 << 2)); + + /* SAO */ + rdata32 = READ_VREG(HEVC_SAO_CTRL1); + WRITE_VREG(HEVC_SAO_CTRL1, rdata32 | (0x1 << 2)); + + /* MPRED */ + rdata32 = READ_VREG(HEVC_MPRED_CTRL1); + WRITE_VREG(HEVC_MPRED_CTRL1, rdata32 | (0x1 << 24)); + + /* PARSER */ + rdata32 = READ_VREG(HEVC_STREAM_CONTROL); + WRITE_VREG(HEVC_STREAM_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_SHIFT_CONTROL); + WRITE_VREG(HEVC_SHIFT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_CABAC_CONTROL); + WRITE_VREG(HEVC_CABAC_CONTROL, rdata32 | (0x1 << 13)); + rdata32 = READ_VREG(HEVC_PARSER_CORE_CONTROL); + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_INT_CONTROL); + WRITE_VREG(HEVC_PARSER_INT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_IF_CONTROL); + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + rdata32 | (0x3 << 5) | (0x3 << 2) | (0x3 << 0)); + + /* IPP */ + rdata32 = READ_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG); + WRITE_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG, rdata32 | 0xffffffff); + + /* MCRCC */ + rdata32 = READ_VREG(HEVCD_MCRCC_CTL1); + WRITE_VREG(HEVCD_MCRCC_CTL1, rdata32 | (0x1 << 3)); +} +#endif + +#ifdef MCRCC_ENABLE +static void config_mcrcc_axi_hw(struct hevc_state_s *hevc, int slice_type) +{ + unsigned int rdata32; + unsigned int rdata32_2; + int l0_cnt = 0; + int l1_cnt = 0x7fff; + + if (get_double_write_mode(hevc) & 0x10) { + l0_cnt = hevc->cur_pic->RefNum_L0; + l1_cnt = hevc->cur_pic->RefNum_L1; + } + + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2); /* reset mcrcc */ + + if (slice_type == 2) { /* I-PIC */ + /* remove reset -- disables clock */ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x0); + return; + } + + if (slice_type == 0) { /* B-PIC */ + /* Programme canvas0 */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 0); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + /* Programme canvas1 */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (1 << 1) | 0); + rdata32_2 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32_2 = rdata32_2 & 0xffff; + rdata32_2 = rdata32_2 | (rdata32_2 << 16); + if (rdata32 == rdata32_2 && l1_cnt > 1) { + rdata32_2 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32_2 = rdata32_2 & 0xffff; + rdata32_2 = rdata32_2 | (rdata32_2 << 16); + } + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32_2); + } else { /* P-PIC */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (1 << 1) | 0); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + if (l0_cnt == 1) { + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + } else { + /* Programme canvas1 */ + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + } + } + /* enable mcrcc progressive-mode */ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); +} +#endif + +static void config_title_hw(struct hevc_state_s *hevc, int sao_vb_size, + int sao_mem_unit) +{ + WRITE_VREG(HEVC_sao_mem_unit, sao_mem_unit); + WRITE_VREG(HEVC_SAO_ABV, hevc->work_space_buf->sao_abv.buf_start); + WRITE_VREG(HEVC_sao_vb_size, sao_vb_size); + WRITE_VREG(HEVC_SAO_VB, hevc->work_space_buf->sao_vb.buf_start); +} + +static u32 init_aux_size; +static int aux_data_is_avaible(struct hevc_state_s *hevc) +{ + u32 reg_val; + + reg_val = READ_VREG(HEVC_AUX_DATA_SIZE); + if (reg_val != 0 && reg_val != init_aux_size) + return 1; + else + return 0; +} + +static void config_aux_buf(struct hevc_state_s *hevc) +{ + WRITE_VREG(HEVC_AUX_ADR, hevc->aux_phy_addr); + init_aux_size = ((hevc->prefix_aux_size >> 4) << 16) | + (hevc->suffix_aux_size >> 4); + WRITE_VREG(HEVC_AUX_DATA_SIZE, init_aux_size); +} + +static void config_mpred_hw(struct hevc_state_s *hevc) +{ + int i; + unsigned int data32; + struct PIC_s *cur_pic = hevc->cur_pic; + struct PIC_s *col_pic = hevc->col_pic; + int AMVP_MAX_NUM_CANDS_MEM = 3; + int AMVP_MAX_NUM_CANDS = 2; + int NUM_CHROMA_MODE = 5; + int DM_CHROMA_IDX = 36; + int above_ptr_ctrl = 0; + int buffer_linear = 1; + int cu_size_log2 = 3; + + int mpred_mv_rd_start_addr; + int mpred_curr_lcu_x; + int mpred_curr_lcu_y; + int mpred_above_buf_start; + int mpred_mv_rd_ptr; + int mpred_mv_rd_ptr_p1; + int mpred_mv_rd_end_addr; + int MV_MEM_UNIT; + int mpred_mv_wr_ptr; + int *ref_poc_L0, *ref_poc_L1; + + int above_en; + int mv_wr_en; + int mv_rd_en; + int col_isIntra; + + if (hevc->slice_type != 2) { + above_en = 1; + mv_wr_en = 1; + mv_rd_en = 1; + col_isIntra = 0; + } else { + above_en = 1; + mv_wr_en = 1; + mv_rd_en = 0; + col_isIntra = 0; + } + + mpred_mv_rd_start_addr = col_pic->mpred_mv_wr_start_addr; + data32 = READ_VREG(HEVC_MPRED_CURR_LCU); + mpred_curr_lcu_x = data32 & 0xffff; + mpred_curr_lcu_y = (data32 >> 16) & 0xffff; + + MV_MEM_UNIT = + hevc->lcu_size_log2 == 6 ? 0x200 : hevc->lcu_size_log2 == + 5 ? 0x80 : 0x20; + mpred_mv_rd_ptr = + mpred_mv_rd_start_addr + (hevc->slice_addr * MV_MEM_UNIT); + + mpred_mv_rd_ptr_p1 = mpred_mv_rd_ptr + MV_MEM_UNIT; + mpred_mv_rd_end_addr = + mpred_mv_rd_start_addr + + col_pic->mv_size; + //((hevc->lcu_x_num * hevc->lcu_y_num) * MV_MEM_UNIT); + + mpred_above_buf_start = hevc->work_space_buf->mpred_above.buf_start; + + mpred_mv_wr_ptr = + cur_pic->mpred_mv_wr_start_addr + + (hevc->slice_addr * MV_MEM_UNIT); + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "cur pic index %d col pic index %d\n", cur_pic->index, + col_pic->index); + } + + WRITE_VREG(HEVC_MPRED_MV_WR_START_ADDR, + cur_pic->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_RD_START_ADDR, mpred_mv_rd_start_addr); + + data32 = ((hevc->lcu_x_num - hevc->tile_width_lcu) * MV_MEM_UNIT); + WRITE_VREG(HEVC_MPRED_MV_WR_ROW_JUMP, data32); + WRITE_VREG(HEVC_MPRED_MV_RD_ROW_JUMP, data32); + + data32 = READ_VREG(HEVC_MPRED_CTRL0); + data32 = ((hevc->slice_type & 3) | + (hevc->new_pic & 1) << 2 | + (hevc->new_tile & 1) << 3 | + (hevc->isNextSliceSegment & 1)<< 4 | + (hevc->TMVPFlag & 1)<< 5 | + (hevc->LDCFlag & 1) << 6 | + (hevc->ColFromL0Flag & 1)<< 7 | + (above_ptr_ctrl & 1)<< 8 | + (above_en & 1) << 9 | + (mv_wr_en & 1) << 10 | + (mv_rd_en & 1)<< 11 | + (col_isIntra & 1)<< 12 | + (buffer_linear & 1)<< 13 | + (hevc->LongTerm_Curr & 1) << 14 | + (hevc->LongTerm_Col & 1) << 15 | + (hevc->lcu_size_log2 & 0xf) << 16 | + (cu_size_log2 & 0xf) << 20 | (hevc->plevel & 0x7) << 24); + data32 &= ~(1<< 28); + WRITE_VREG(HEVC_MPRED_CTRL0, data32); + + data32 = READ_VREG(HEVC_MPRED_CTRL1); + data32 = ( +#if 0 + /* no set in m8baby test1902 */ + /* Don't override clk_forced_on , */ + (data32 & (0x1 << 24)) | +#endif + hevc->MaxNumMergeCand | + AMVP_MAX_NUM_CANDS << 4 | + AMVP_MAX_NUM_CANDS_MEM << 8 | + NUM_CHROMA_MODE << 12 | DM_CHROMA_IDX << 16); + WRITE_VREG(HEVC_MPRED_CTRL1, data32); + + data32 = (hevc->pic_w | hevc->pic_h << 16); + WRITE_VREG(HEVC_MPRED_PIC_SIZE, data32); + + data32 = ((hevc->lcu_x_num - 1) | (hevc->lcu_y_num - 1) << 16); + WRITE_VREG(HEVC_MPRED_PIC_SIZE_LCU, data32); + + data32 = (hevc->tile_start_lcu_x | hevc->tile_start_lcu_y << 16); + WRITE_VREG(HEVC_MPRED_TILE_START, data32); + + data32 = (hevc->tile_width_lcu | hevc->tile_height_lcu << 16); + WRITE_VREG(HEVC_MPRED_TILE_SIZE_LCU, data32); + + data32 = (hevc->RefNum_L0 | hevc->RefNum_L1 << 8 | 0 + /* col_RefNum_L0<<16| */ + /* col_RefNum_L1<<24 */ + ); + WRITE_VREG(HEVC_MPRED_REF_NUM, data32); + +#ifdef SUPPORT_LONG_TERM_RPS + data32 = 0; + for (i = 0; i < hevc->RefNum_L0; i++) { + if (is_ref_long_term(hevc, + cur_pic->m_aiRefPOCList0 + [cur_pic->slice_idx][i])) + data32 = data32 | (1 << i); + } + for (i = 0; i < hevc->RefNum_L1; i++) { + if (is_ref_long_term(hevc, + cur_pic->m_aiRefPOCList1 + [cur_pic->slice_idx][i])) + data32 = data32 | (1 << (i + 16)); + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "LongTerm_Ref 0x%x\n", data32); + } +#else + data32 = hevc->LongTerm_Ref; +#endif + WRITE_VREG(HEVC_MPRED_LT_REF, data32); + + data32 = 0; + for (i = 0; i < hevc->RefNum_L0; i++) + data32 = data32 | (1 << i); + WRITE_VREG(HEVC_MPRED_REF_EN_L0, data32); + + data32 = 0; + for (i = 0; i < hevc->RefNum_L1; i++) + data32 = data32 | (1 << i); + WRITE_VREG(HEVC_MPRED_REF_EN_L1, data32); + + WRITE_VREG(HEVC_MPRED_CUR_POC, hevc->curr_POC); + WRITE_VREG(HEVC_MPRED_COL_POC, hevc->Col_POC); + + /* below MPRED Ref_POC_xx_Lx registers must follow Ref_POC_xx_L0 -> + * Ref_POC_xx_L1 in pair write order!!! + */ + ref_poc_L0 = &(cur_pic->m_aiRefPOCList0[cur_pic->slice_idx][0]); + ref_poc_L1 = &(cur_pic->m_aiRefPOCList1[cur_pic->slice_idx][0]); + + WRITE_VREG(HEVC_MPRED_L0_REF00_POC, ref_poc_L0[0]); + WRITE_VREG(HEVC_MPRED_L1_REF00_POC, ref_poc_L1[0]); + + WRITE_VREG(HEVC_MPRED_L0_REF01_POC, ref_poc_L0[1]); + WRITE_VREG(HEVC_MPRED_L1_REF01_POC, ref_poc_L1[1]); + + WRITE_VREG(HEVC_MPRED_L0_REF02_POC, ref_poc_L0[2]); + WRITE_VREG(HEVC_MPRED_L1_REF02_POC, ref_poc_L1[2]); + + WRITE_VREG(HEVC_MPRED_L0_REF03_POC, ref_poc_L0[3]); + WRITE_VREG(HEVC_MPRED_L1_REF03_POC, ref_poc_L1[3]); + + WRITE_VREG(HEVC_MPRED_L0_REF04_POC, ref_poc_L0[4]); + WRITE_VREG(HEVC_MPRED_L1_REF04_POC, ref_poc_L1[4]); + + WRITE_VREG(HEVC_MPRED_L0_REF05_POC, ref_poc_L0[5]); + WRITE_VREG(HEVC_MPRED_L1_REF05_POC, ref_poc_L1[5]); + + WRITE_VREG(HEVC_MPRED_L0_REF06_POC, ref_poc_L0[6]); + WRITE_VREG(HEVC_MPRED_L1_REF06_POC, ref_poc_L1[6]); + + WRITE_VREG(HEVC_MPRED_L0_REF07_POC, ref_poc_L0[7]); + WRITE_VREG(HEVC_MPRED_L1_REF07_POC, ref_poc_L1[7]); + + WRITE_VREG(HEVC_MPRED_L0_REF08_POC, ref_poc_L0[8]); + WRITE_VREG(HEVC_MPRED_L1_REF08_POC, ref_poc_L1[8]); + + WRITE_VREG(HEVC_MPRED_L0_REF09_POC, ref_poc_L0[9]); + WRITE_VREG(HEVC_MPRED_L1_REF09_POC, ref_poc_L1[9]); + + WRITE_VREG(HEVC_MPRED_L0_REF10_POC, ref_poc_L0[10]); + WRITE_VREG(HEVC_MPRED_L1_REF10_POC, ref_poc_L1[10]); + + WRITE_VREG(HEVC_MPRED_L0_REF11_POC, ref_poc_L0[11]); + WRITE_VREG(HEVC_MPRED_L1_REF11_POC, ref_poc_L1[11]); + + WRITE_VREG(HEVC_MPRED_L0_REF12_POC, ref_poc_L0[12]); + WRITE_VREG(HEVC_MPRED_L1_REF12_POC, ref_poc_L1[12]); + + WRITE_VREG(HEVC_MPRED_L0_REF13_POC, ref_poc_L0[13]); + WRITE_VREG(HEVC_MPRED_L1_REF13_POC, ref_poc_L1[13]); + + WRITE_VREG(HEVC_MPRED_L0_REF14_POC, ref_poc_L0[14]); + WRITE_VREG(HEVC_MPRED_L1_REF14_POC, ref_poc_L1[14]); + + WRITE_VREG(HEVC_MPRED_L0_REF15_POC, ref_poc_L0[15]); + WRITE_VREG(HEVC_MPRED_L1_REF15_POC, ref_poc_L1[15]); + + if (hevc->new_pic) { + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, mpred_above_buf_start); + WRITE_VREG(HEVC_MPRED_MV_WPTR, mpred_mv_wr_ptr); + /* WRITE_VREG(HEVC_MPRED_MV_RPTR,mpred_mv_rd_ptr); */ + WRITE_VREG(HEVC_MPRED_MV_RPTR, mpred_mv_rd_start_addr); + } else if (!hevc->isNextSliceSegment) { + /* WRITE_VREG(HEVC_MPRED_MV_RPTR,mpred_mv_rd_ptr_p1); */ + WRITE_VREG(HEVC_MPRED_MV_RPTR, mpred_mv_rd_ptr); + } + + WRITE_VREG(HEVC_MPRED_MV_RD_END_ADDR, mpred_mv_rd_end_addr); +} + +static void config_sao_hw(struct hevc_state_s *hevc, union param_u *params) +{ + unsigned int data32, data32_2; + int misc_flag0 = hevc->misc_flag0; + int slice_deblocking_filter_disabled_flag = 0; + + int mc_buffer_size_u_v = + hevc->lcu_total * hevc->lcu_size * hevc->lcu_size / 2; + int mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16; + struct PIC_s *cur_pic = hevc->cur_pic; + struct aml_vcodec_ctx * v4l2_ctx = hevc->v4l2_ctx; + int dw_mode = get_double_write_mode(hevc); + + data32 = READ_VREG(HEVC_SAO_CTRL0); + data32 &= (~0xf); + data32 |= hevc->lcu_size_log2; + WRITE_VREG(HEVC_SAO_CTRL0, data32); + + data32 = (hevc->pic_w | hevc->pic_h << 16); + WRITE_VREG(HEVC_SAO_PIC_SIZE, data32); + + data32 = ((hevc->lcu_x_num - 1) | (hevc->lcu_y_num - 1) << 16); + WRITE_VREG(HEVC_SAO_PIC_SIZE_LCU, data32); + + if (hevc->new_pic) + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0xffffffff); +#ifdef LOSLESS_COMPRESS_MODE +/*SUPPORT_10BIT*/ + if ((dw_mode & 0x10) == 0) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) + WRITE_VREG(HEVC_SAO_CTRL26, 0); + + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 &= (~(0xff << 16)); + if (((dw_mode & 0xf) == 8) || + ((dw_mode & 0xf) == 9)) { + data32 |= (0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + WRITE_VREG(HEVC_SAO_CTRL26, 0xf); + } else { + if ((dw_mode & 0xf) == 2 || + (dw_mode & 0xf) == 3) + data32 |= (0xff<<16); + else if ((dw_mode & 0xf) == 4 || + (dw_mode & 0xf) == 5) + data32 |= (0x33<<16); + + if (hevc->mem_saving_mode == 1) + data32 |= (1 << 9); + else + data32 &= ~(1 << 9); + if (workaround_enable & 1) + data32 |= (1 << 7); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + } + data32 = cur_pic->mc_y_adr; + if (dw_mode && ((dw_mode & 0x20) == 0)) + WRITE_VREG(HEVC_SAO_Y_START_ADDR, cur_pic->dw_y_adr); + + if ((dw_mode & 0x10) == 0) + WRITE_VREG(HEVC_CM_BODY_START_ADDR, data32); + + if (hevc->mmu_enable) + WRITE_VREG(HEVC_CM_HEADER_START_ADDR, cur_pic->header_adr); +#ifdef H265_10B_MMU_DW + if (hevc->dw_mmu_enable) { + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0); + WRITE_VREG(HEVC_SAO_C_START_ADDR, 0); + WRITE_VREG(HEVC_CM_HEADER_START_ADDR2, cur_pic->header_dw_adr); + } +#endif +#else + data32 = cur_pic->mc_y_adr; + WRITE_VREG(HEVC_SAO_Y_START_ADDR, data32); +#endif + data32 = (mc_buffer_size_u_v_h << 16) << 1; + WRITE_VREG(HEVC_SAO_Y_LENGTH, data32); + +#ifdef LOSLESS_COMPRESS_MODE +/*SUPPORT_10BIT*/ + if (dw_mode && ((dw_mode & 0x20) == 0)) + WRITE_VREG(HEVC_SAO_C_START_ADDR, cur_pic->dw_u_v_adr); +#else + data32 = cur_pic->mc_u_v_adr; + WRITE_VREG(HEVC_SAO_C_START_ADDR, data32); +#endif + data32 = (mc_buffer_size_u_v_h << 16); + WRITE_VREG(HEVC_SAO_C_LENGTH, data32); + + if (hevc->is_used_v4l) { + WRITE_VREG(HEVC_SAO_Y_LENGTH, cur_pic->luma_size); + WRITE_VREG(HEVC_SAO_C_LENGTH, cur_pic->chroma_size); + if (debug & PRINT_FLAG_V4L_DETAIL) { + pr_info("[%d] config pic, id: %d, Y:(%x, %d) C:(%x, %d).\n", + v4l2_ctx->id, cur_pic->index, + cur_pic->dw_y_adr, cur_pic->luma_size, + cur_pic->dw_u_v_adr, cur_pic->chroma_size); + } + } + +#ifdef LOSLESS_COMPRESS_MODE +/*SUPPORT_10BIT*/ + if (dw_mode && ((dw_mode & 0x20) == 0)) { + WRITE_VREG(HEVC_SAO_Y_WPTR, cur_pic->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_WPTR, cur_pic->dw_u_v_adr); + } +#else + /* multi tile to do... */ + data32 = cur_pic->mc_y_adr; + WRITE_VREG(HEVC_SAO_Y_WPTR, data32); + + data32 = cur_pic->mc_u_v_adr; + WRITE_VREG(HEVC_SAO_C_WPTR, data32); +#endif + /* DBLK CONFIG HERE */ + if (hevc->new_pic) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + data32 = (0xff << 8) | (0x0 << 0); + else + data32 = (0x57 << 8) | /* 1st/2nd write both enable*/ + (0x0 << 0); /* h265 video format*/ + + if (hevc->pic_w >= 1280) + data32 |= (0x1 << 4); /*dblk pipeline mode=1 for performance*/ + data32 &= (~0x300); /*[8]:first write enable (compress) [9]:double write enable (uncompress)*/ + if (dw_mode == 0) + data32 |= (0x1 << 8); /*enable first write*/ + else if (dw_mode == 0x10) + data32 |= (0x1 << 9); /*double write only*/ + else + data32 |= ((0x1 << 8) |(0x1 << 9)); + + WRITE_VREG(HEVC_DBLK_CFGB, data32); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "[DBLK DEBUG] HEVC1 CFGB : 0x%x\n", data32); + } + data32 = (hevc->pic_w | hevc->pic_h << 16); + WRITE_VREG(HEVC_DBLK_CFG2, data32); + + if ((misc_flag0 >> PCM_ENABLE_FLAG_BIT) & 0x1) { + data32 = + ((misc_flag0 >> + PCM_LOOP_FILTER_DISABLED_FLAG_BIT) & + 0x1) << 3; + } else + data32 = 0; + data32 |= + (((params->p.pps_cb_qp_offset & 0x1f) << 4) | + ((params->p.pps_cr_qp_offset + & 0x1f) << + 9)); + data32 |= + (hevc->lcu_size == + 64) ? 0 : ((hevc->lcu_size == 32) ? 1 : 2); + data32 |= (hevc->pic_w <= 64) ? (1 << 20) : 0; + WRITE_VREG(HEVC_DBLK_CFG1, data32); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + /*if (debug & 0x80) {*/ + data32 = 1 << 28; /* Debug only: sts1 chooses dblk_main*/ + WRITE_VREG(HEVC_DBLK_STS1 + 4, data32); /* 0x3510 */ + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "[DBLK DEBUG] HEVC1 STS1 : 0x%x\n", + data32); + /*}*/ + } + } +#if 0 + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + data32 |= (hevc->mem_map_mode << + 12); + +/* [13:12] axi_aformat, + * 0-Linear, 1-32x32, 2-64x32 + */ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + data32 |= (hevc->mem_map_mode << + 4); + +/* [5:4] -- address_format + * 00:linear 01:32x32 10:64x32 + */ + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#else + /* m8baby test1902 */ + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + /* [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 */ + data32 |= (hevc->mem_map_mode << 12); + data32 &= (~0xff0); +#ifdef H265_10B_MMU_DW + if (hevc->dw_mmu_enable == 0) + data32 |= ((hevc->endian >> 8) & 0xfff); +#else + data32 |= ((hevc->endian >> 8) & 0xfff); /* data32 |= 0x670; Big-Endian per 64-bit */ +#endif + data32 &= (~0x3); /*[1]:dw_disable [0]:cm_disable*/ + if (dw_mode == 0) + data32 |= 0x2; /*disable double write*/ + else if (dw_mode & 0x10) + data32 |= 0x1; /*disable cm*/ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + unsigned int data; + data = (0x57 << 8) | /* 1st/2nd write both enable*/ + (0x0 << 0); /* h265 video format*/ + if (hevc->pic_w >= 1280) + data |= (0x1 << 4); /*dblk pipeline mode=1 for performance*/ + data &= (~0x300); /*[8]:first write enable (compress) [9]:double write enable (uncompress)*/ + if (dw_mode == 0) + data |= (0x1 << 8); /*enable first write*/ + else if (dw_mode & 0x10) + data |= (0x1 << 9); /*double write only*/ + else + data |= ((0x1 << 8) |(0x1 << 9)); + WRITE_VREG(HEVC_DBLK_CFGB, data); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "[DBLK DEBUG] HEVC1 CFGB : 0x%x\n", data); + } + + /* swap uv */ + if (hevc->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV21M)) + data32 &= ~(1 << 8); /* NV21 */ + else + data32 |= (1 << 8); /* NV12 */ + } + data32 &= (~(3 << 14)); + data32 |= (2 << 14); + /* + * [31:24] ar_fifo1_axi_thred + * [23:16] ar_fifo0_axi_thred + * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes + * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + * [11:08] axi_lendian_C + * [07:04] axi_lendian_Y + * [3] reserved + * [2] clk_forceon + * [1] dw_disable:disable double write output + * [0] cm_disable:disable compress output + */ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + if (dw_mode & 0x10) { + /* [23:22] dw_v1_ctrl + *[21:20] dw_v0_ctrl + *[19:18] dw_h1_ctrl + *[17:16] dw_h0_ctrl + */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /*set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /* [5:4] -- address_format 00:linear 01:32x32 10:64x32 */ + data32 |= (hevc->mem_map_mode << 4); + data32 &= (~0xF); + data32 |= (hevc->endian & 0xf); /* valid only when double write only */ + + /* swap uv */ + if (hevc->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV21M)) + data32 |= (1 << 12); /* NV21 */ + else + data32 &= ~(1 << 12); /* NV12 */ + } + data32 &= (~(3 << 8)); + data32 |= (2 << 8); + /* + * [3:0] little_endian + * [5:4] address_format 00:linear 01:32x32 10:64x32 + * [7:6] reserved + * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte + * [11:10] reserved + * [12] CbCr_byte_swap + * [31:13] reserved + */ + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#endif + data32 = 0; + data32_2 = READ_VREG(HEVC_SAO_CTRL0); + data32_2 &= (~0x300); + /* slice_deblocking_filter_disabled_flag = 0; + * ucode has handle it , so read it from ucode directly + */ + if (hevc->tile_enabled) { + data32 |= + ((misc_flag0 >> + LOOP_FILER_ACROSS_TILES_ENABLED_FLAG_BIT) & + 0x1) << 0; + data32_2 |= + ((misc_flag0 >> + LOOP_FILER_ACROSS_TILES_ENABLED_FLAG_BIT) & + 0x1) << 8; + } + slice_deblocking_filter_disabled_flag = (misc_flag0 >> + SLICE_DEBLOCKING_FILTER_DISABLED_FLAG_BIT) & + 0x1; /* ucode has handle it,so read it from ucode directly */ + if ((misc_flag0 & (1 << DEBLOCKING_FILTER_OVERRIDE_ENABLED_FLAG_BIT)) + && (misc_flag0 & (1 << DEBLOCKING_FILTER_OVERRIDE_FLAG_BIT))) { + /* slice_deblocking_filter_disabled_flag = + * (misc_flag0>>SLICE_DEBLOCKING_FILTER_DISABLED_FLAG_BIT)&0x1; + * //ucode has handle it , so read it from ucode directly + */ + data32 |= slice_deblocking_filter_disabled_flag << 2; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print_cont(hevc, 0, + "(1,%x)", data32); + if (!slice_deblocking_filter_disabled_flag) { + data32 |= (params->p.slice_beta_offset_div2 & 0xf) << 3; + data32 |= (params->p.slice_tc_offset_div2 & 0xf) << 7; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print_cont(hevc, 0, + "(2,%x)", data32); + } + } else { + data32 |= + ((misc_flag0 >> + PPS_DEBLOCKING_FILTER_DISABLED_FLAG_BIT) & + 0x1) << 2; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print_cont(hevc, 0, + "(3,%x)", data32); + if (((misc_flag0 >> PPS_DEBLOCKING_FILTER_DISABLED_FLAG_BIT) & + 0x1) == 0) { + data32 |= (params->p.pps_beta_offset_div2 & 0xf) << 3; + data32 |= (params->p.pps_tc_offset_div2 & 0xf) << 7; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print_cont(hevc, 0, + "(4,%x)", data32); + } + } + if ((misc_flag0 & (1 << PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT)) + && ((misc_flag0 & (1 << SLICE_SAO_LUMA_FLAG_BIT)) + || (misc_flag0 & (1 << SLICE_SAO_CHROMA_FLAG_BIT)) + || (!slice_deblocking_filter_disabled_flag))) { + data32 |= + ((misc_flag0 >> + SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT) + & 0x1) << 1; + data32_2 |= + ((misc_flag0 >> + SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT) + & 0x1) << 9; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print_cont(hevc, 0, + "(5,%x)\n", data32); + } else { + data32 |= + ((misc_flag0 >> + PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT) + & 0x1) << 1; + data32_2 |= + ((misc_flag0 >> + PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT) + & 0x1) << 9; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print_cont(hevc, 0, + "(6,%x)\n", data32); + } + WRITE_VREG(HEVC_DBLK_CFG9, data32); + WRITE_VREG(HEVC_SAO_CTRL0, data32_2); +} + +#ifdef TEST_NO_BUF +static unsigned char test_flag = 1; +#endif + +static void pic_list_process(struct hevc_state_s *hevc) +{ + int work_pic_num = get_work_pic_num(hevc); + int alloc_pic_count = 0; + int i; + struct PIC_s *pic; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + alloc_pic_count++; + if (pic->output_mark == 0 && pic->referenced == 0 + && pic->output_ready == 0 + && (pic->width != hevc->pic_w || + pic->height != hevc->pic_h) + ) { + set_buf_unused(hevc, pic->BUF_index); + pic->BUF_index = -1; + if (alloc_pic_count > work_pic_num) { + pic->width = 0; + pic->height = 0; + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + release_pic_mmu_buf(hevc, pic); + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + pic->index = -1; + } else { + pic->width = hevc->pic_w; + pic->height = hevc->pic_h; + } + } + } + if (alloc_pic_count < work_pic_num) { + int new_count = alloc_pic_count; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic && pic->index == -1) { + pic->index = i; + pic->BUF_index = -1; + pic->width = hevc->pic_w; + pic->height = hevc->pic_h; + new_count++; + if (new_count >= + work_pic_num) + break; + } + } + + } + dealloc_unused_buf(hevc); + if (get_alloc_pic_count(hevc) + != alloc_pic_count) { + hevc_print_cont(hevc, 0, + "%s: work_pic_num is %d, Change alloc_pic_count from %d to %d\n", + __func__, + work_pic_num, + alloc_pic_count, + get_alloc_pic_count(hevc)); + } +} + +static struct PIC_s *get_new_pic(struct hevc_state_s *hevc, + union param_u *rpm_param) +{ + struct vdec_s *vdec = hw_to_vdec(hevc); + struct PIC_s *new_pic = NULL; + struct PIC_s *pic; + int i; + int ret; + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + + if (pic->output_mark == 0 && pic->referenced == 0 + && pic->output_ready == 0 + && pic->width == hevc->pic_w + && pic->height == hevc->pic_h + && pic->vf_ref == 0 + ) { + if (new_pic) { + if (new_pic->POC != INVALID_POC) { + if (pic->POC == INVALID_POC || + pic->POC < new_pic->POC) + new_pic = pic; + } + } else + new_pic = pic; + } + } + + if (new_pic == NULL) + return NULL; + + if (new_pic->BUF_index < 0) { + if (alloc_buf(hevc) < 0) + return NULL; + else { + if (config_pic(hevc, new_pic) < 0) { + dealloc_pic_buf(hevc, new_pic); + return NULL; + } + } + new_pic->width = hevc->pic_w; + new_pic->height = hevc->pic_h; + set_canvas(hevc, new_pic); + + init_pic_list_hw(hevc); + } + + if (new_pic) { + new_pic->double_write_mode = + get_double_write_mode(hevc); + if (new_pic->double_write_mode) + set_canvas(hevc, new_pic); + +#ifdef TEST_NO_BUF + if (test_flag) { + test_flag = 0; + return NULL; + } else + test_flag = 1; +#endif + if (get_mv_buf(hevc, new_pic) < 0) + return NULL; + + if (hevc->mmu_enable) { + ret = H265_alloc_mmu(hevc, new_pic, + rpm_param->p.bit_depth, + hevc->frame_mmu_map_addr); + if (ret != 0) { + put_mv_buf(hevc, new_pic); + hevc_print(hevc, 0, + "can't alloc need mmu1,idx %d ret =%d\n", + new_pic->decode_idx, + ret); + return NULL; + } + } +#ifdef H265_10B_MMU_DW + if (hevc->dw_mmu_enable) { + ret = H265_alloc_mmu_dw(hevc, new_pic, + rpm_param->p.bit_depth, + hevc->frame_dw_mmu_map_addr); + if (ret != 0) { + put_mv_buf(hevc, new_pic); + hevc_print(hevc, 0, + "can't alloc need mmu_dw_1,idx %d ret =%d\n", + new_pic->decode_idx, + ret); + return NULL; + } + } +#endif + new_pic->referenced = 1; + new_pic->decode_idx = hevc->decode_idx; + new_pic->slice_idx = 0; + new_pic->referenced = 1; + new_pic->output_mark = 0; + new_pic->recon_mark = 0; + new_pic->error_mark = 0; + new_pic->dis_mark = 0; + /* new_pic->output_ready = 0; */ + new_pic->num_reorder_pic = rpm_param->p.sps_num_reorder_pics_0; + new_pic->ip_mode = (!new_pic->num_reorder_pic && + !(vdec->slave || vdec->master) && + !disable_ip_mode) ? true : false; + new_pic->losless_comp_body_size = hevc->losless_comp_body_size; + new_pic->POC = hevc->curr_POC; + new_pic->pic_struct = hevc->curr_pic_struct; + if (new_pic->aux_data_buf) + release_aux_data(hevc, new_pic); + new_pic->mem_saving_mode = + hevc->mem_saving_mode; + new_pic->bit_depth_luma = + hevc->bit_depth_luma; + new_pic->bit_depth_chroma = + hevc->bit_depth_chroma; + new_pic->video_signal_type = + hevc->video_signal_type; + + new_pic->conformance_window_flag = + hevc->param.p.conformance_window_flag; + new_pic->conf_win_left_offset = + hevc->param.p.conf_win_left_offset; + new_pic->conf_win_right_offset = + hevc->param.p.conf_win_right_offset; + new_pic->conf_win_top_offset = + hevc->param.p.conf_win_top_offset; + new_pic->conf_win_bottom_offset = + hevc->param.p.conf_win_bottom_offset; + new_pic->chroma_format_idc = + hevc->param.p.chroma_format_idc; + + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s: index %d, buf_idx %d, decode_idx %d, POC %d\n", + __func__, new_pic->index, + new_pic->BUF_index, new_pic->decode_idx, + new_pic->POC); + + } + if (pic_list_debug & 0x1) { + dump_pic_list(hevc); + pr_err("\n*******************************************\n"); + } + + return new_pic; +} + +static int get_free_fb_idx(struct hevc_state_s *hevc) +{ + int i; + + for (i = 0; i < MAX_REF_PIC_NUM; ++i) { + if (hevc->m_PIC[i] == NULL) + continue; + + if ((hevc->m_PIC[i]->referenced == 0) && + (hevc->m_PIC[i]->vf_ref == 0) && + (!hevc->m_PIC[i]->cma_alloc_addr)) + break; + } + + return (hevc->m_PIC[i] && + (i != MAX_REF_PIC_NUM)) ? i : -1; +} + +static struct PIC_s *v4l_get_new_pic(struct hevc_state_s *hevc, + union param_u *rpm_param) +{ + struct vdec_s *vdec = hw_to_vdec(hevc); + int ret; + struct aml_vcodec_ctx * v4l = hevc->v4l2_ctx; + struct v4l_buff_pool *pool = &v4l->cap_pool; + struct PIC_s *new_pic = NULL; + struct PIC_s *pic = NULL; + int i, j, idx; + + for (i = 0; i < pool->in; ++i) { + u32 state = (pool->seq[i] >> 16); + u32 index = (pool->seq[i] & 0xffff); + + switch (state) { + case V4L_CAP_BUFF_IN_DEC: + for (j = 0; j < MAX_REF_PIC_NUM; j++) { + pic = hevc->m_PIC[j]; + if (pic == NULL || pic->index == -1) + continue; + + if (pic->output_mark == 0 && + pic->referenced == 0 && + pic->output_ready == 0 && + pic->width == hevc->pic_w && + pic->height == hevc->pic_h && + pic->vf_ref == 0 && + pic->cma_alloc_addr) { + if (new_pic) { + if (new_pic->POC != INVALID_POC) { + if (pic->POC == INVALID_POC || + pic->POC < new_pic->POC) + new_pic = pic; + } + } else + new_pic = pic; + } + } + break; + case V4L_CAP_BUFF_IN_M2M: + idx = get_free_fb_idx(hevc); + if (idx < 0) + break; + pic = hevc->m_PIC[idx]; + pic->width = hevc->pic_w; + pic->height = hevc->pic_h; + hevc->buffer_wrap[idx] = index; + if ((pic->index != -1) && + !v4l_alloc_buf(hevc, pic)) { + v4l_config_pic(hevc, pic); + init_pic_list_hw(hevc); + new_pic = pic; + } + break; + default: + pr_err("v4l buffer state err %d.\n", state); + break; + } + + if (new_pic) + break; + } + + if (new_pic == NULL) + return NULL; + + new_pic->double_write_mode = get_double_write_mode(hevc); + if (new_pic->double_write_mode) + set_canvas(hevc, new_pic); + + if (get_mv_buf(hevc, new_pic) < 0) + return NULL; + + if (hevc->mmu_enable) { + ret = H265_alloc_mmu(hevc, new_pic, + rpm_param->p.bit_depth, + hevc->frame_mmu_map_addr); + if (ret != 0) { + put_mv_buf(hevc, new_pic); + hevc_print(hevc, 0, + "can't alloc need mmu1,idx %d ret =%d\n", + new_pic->decode_idx, ret); + return NULL; + } + } + + new_pic->referenced = 1; + new_pic->decode_idx = hevc->decode_idx; + new_pic->slice_idx = 0; + new_pic->referenced = 1; + new_pic->output_mark = 0; + new_pic->recon_mark = 0; + new_pic->error_mark = 0; + new_pic->dis_mark = 0; + /* new_pic->output_ready = 0; */ + new_pic->num_reorder_pic = rpm_param->p.sps_num_reorder_pics_0; + new_pic->ip_mode = hevc->low_latency_flag ? true : + (!new_pic->num_reorder_pic && + !(vdec->slave || vdec->master) && + !disable_ip_mode) ? true : false; + new_pic->losless_comp_body_size = hevc->losless_comp_body_size; + new_pic->POC = hevc->curr_POC; + new_pic->pic_struct = hevc->curr_pic_struct; + + if (new_pic->aux_data_buf) + release_aux_data(hevc, new_pic); + new_pic->mem_saving_mode = + hevc->mem_saving_mode; + new_pic->bit_depth_luma = + hevc->bit_depth_luma; + new_pic->bit_depth_chroma = + hevc->bit_depth_chroma; + new_pic->video_signal_type = + hevc->video_signal_type; + + new_pic->conformance_window_flag = + hevc->param.p.conformance_window_flag; + new_pic->conf_win_left_offset = + hevc->param.p.conf_win_left_offset; + new_pic->conf_win_right_offset = + hevc->param.p.conf_win_right_offset; + new_pic->conf_win_top_offset = + hevc->param.p.conf_win_top_offset; + new_pic->conf_win_bottom_offset = + hevc->param.p.conf_win_bottom_offset; + new_pic->chroma_format_idc = + hevc->param.p.chroma_format_idc; + + if (new_pic) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *)new_pic->cma_alloc_addr; + + fb->status = FB_ST_DECODER; + } + + hevc_print(hevc, H265_DEBUG_BUFMGR, + "%s: index %d, buf_idx %d, decode_idx %d, POC %d\n", + __func__, new_pic->index, + new_pic->BUF_index, new_pic->decode_idx, + new_pic->POC); + + return new_pic; +} + +static int get_display_pic_num(struct hevc_state_s *hevc) +{ + int i; + struct PIC_s *pic; + int num = 0; + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || + pic->index == -1) + continue; + + if (pic->output_ready == 1) + num++; + } + return num; +} + +static void flush_output(struct hevc_state_s *hevc, struct PIC_s *pic) +{ + struct PIC_s *pic_display; + + if (pic) { + /*PB skip control */ + if (pic->error_mark == 0 && hevc->PB_skip_mode == 1) { + /* start decoding after first I */ + hevc->ignore_bufmgr_error |= 0x1; + } + if (hevc->ignore_bufmgr_error & 1) { + if (hevc->PB_skip_count_after_decoding > 0) + hevc->PB_skip_count_after_decoding--; + else { + /* start displaying */ + hevc->ignore_bufmgr_error |= 0x2; + } + } + if (pic->POC != INVALID_POC && !pic->ip_mode) + pic->output_mark = 1; + pic->recon_mark = 1; + } + do { + pic_display = output_pic(hevc, 1); + + if (pic_display) { + pic_display->referenced = 0; + put_mv_buf(hevc, pic_display); + if ((pic_display->error_mark + && ((hevc->ignore_bufmgr_error & 0x2) == 0)) + || (get_dbg_flag(hevc) & + H265_DEBUG_DISPLAY_CUR_FRAME) + || (get_dbg_flag(hevc) & + H265_DEBUG_NO_DISPLAY)) { + pic_display->output_ready = 0; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "[BM] Display: POC %d, ", + pic_display->POC); + hevc_print_cont(hevc, 0, + "decoding index %d ==> ", + pic_display->decode_idx); + hevc_print_cont(hevc, 0, + "Debug mode or error, recycle it\n"); + } + /* + * Here the pic/frame error_mark is 1, + * and it won't be displayed, so increase + * the drop count + */ + hevc->gvs->drop_frame_count++; + if (pic_display->slice_type == I_SLICE) { + hevc->gvs->i_lost_frames++; + } else if (pic_display->slice_type == P_SLICE) { + hevc->gvs->p_lost_frames++; + } else if (pic_display->slice_type == B_SLICE) { + hevc->gvs->b_lost_frames++; + } + /* error frame count also need increase */ + hevc->gvs->error_frame_count++; + if (pic_display->slice_type == I_SLICE) { + hevc->gvs->i_concealed_frames++; + } else if (pic_display->slice_type == P_SLICE) { + hevc->gvs->p_concealed_frames++; + } else if (pic_display->slice_type == B_SLICE) { + hevc->gvs->b_concealed_frames++; + } + } else { + if (hevc->i_only & 0x1 + && pic_display->slice_type != 2) { + pic_display->output_ready = 0; + } else { + prepare_display_buf(hw_to_vdec(hevc), pic_display); + if (get_dbg_flag(hevc) + & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "[BM] flush Display: POC %d, ", + pic_display->POC); + hevc_print_cont(hevc, 0, + "decoding index %d\n", + pic_display->decode_idx); + } + } + } + } + } while (pic_display); + clear_referenced_flag(hevc); +} + +/* +* dv_meta_flag: 1, dolby meta only; 2, not include dolby meta +*/ +static void set_aux_data(struct hevc_state_s *hevc, + struct PIC_s *pic, unsigned char suffix_flag, + unsigned char dv_meta_flag) +{ + int i; + unsigned short *aux_adr; + unsigned int size_reg_val = + READ_VREG(HEVC_AUX_DATA_SIZE); + unsigned int aux_count = 0; + int aux_size = 0; + if (pic == NULL || 0 == aux_data_is_avaible(hevc)) + return; + + if (hevc->aux_data_dirty || + hevc->m_ins_flag == 0) { + + hevc->aux_data_dirty = 0; + } + + if (suffix_flag) { + aux_adr = (unsigned short *) + (hevc->aux_addr + + hevc->prefix_aux_size); + aux_count = + ((size_reg_val & 0xffff) << 4) + >> 1; + aux_size = + hevc->suffix_aux_size; + } else { + aux_adr = + (unsigned short *)hevc->aux_addr; + aux_count = + ((size_reg_val >> 16) << 4) + >> 1; + aux_size = + hevc->prefix_aux_size; + } + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) { + hevc_print(hevc, 0, + "%s:pic 0x%p old size %d count %d,suf %d dv_flag %d\r\n", + __func__, pic, pic->aux_data_size, + aux_count, suffix_flag, dv_meta_flag); + } + + if (aux_count > aux_size) { + hevc_print(hevc, 0, + "%s:aux_count(%d) is over size\n", __func__, aux_count); + aux_count = 0; + } + if (aux_size > 0 && aux_count > 0) { + int heads_size = 0; + int new_size; + char *new_buf; + + for (i = 0; i < aux_count; i++) { + unsigned char tag = aux_adr[i] >> 8; + if (tag != 0 && tag != 0xff) { + if (dv_meta_flag == 0) + heads_size += 8; + else if (dv_meta_flag == 1 && tag == 0x1) + heads_size += 8; + else if (dv_meta_flag == 2 && tag != 0x1) + heads_size += 8; + } + } + new_size = pic->aux_data_size + aux_count + heads_size; + new_buf = vzalloc(new_size); + if (new_buf) { + unsigned char valid_tag = 0; + unsigned char *h = + new_buf + + pic->aux_data_size; + unsigned char *p = h + 8; + int len = 0; + int padding_len = 0; + + if (pic->aux_data_buf) { + memcpy(new_buf, pic->aux_data_buf, pic->aux_data_size); + vfree(pic->aux_data_buf); + } + pic->aux_data_buf = new_buf; + + for (i = 0; i < aux_count; i += 4) { + int ii; + unsigned char tag = aux_adr[i + 3] >> 8; + if (tag != 0 && tag != 0xff) { + if (dv_meta_flag == 0) + valid_tag = 1; + else if (dv_meta_flag == 1 + && tag == 0x1) + valid_tag = 1; + else if (dv_meta_flag == 2 + && tag != 0x1) + valid_tag = 1; + else + valid_tag = 0; + if (valid_tag && len > 0) { + pic->aux_data_size += + (len + 8); + h[0] = (len >> 24) + & 0xff; + h[1] = (len >> 16) + & 0xff; + h[2] = (len >> 8) + & 0xff; + h[3] = (len >> 0) + & 0xff; + h[6] = + (padding_len >> 8) + & 0xff; + h[7] = (padding_len) + & 0xff; + h += (len + 8); + p += 8; + len = 0; + padding_len = 0; + } + if (valid_tag) { + h[4] = tag; + h[5] = 0; + h[6] = 0; + h[7] = 0; + } + } + if (valid_tag) { + for (ii = 0; ii < 4; ii++) { + unsigned short aa = + aux_adr[i + 3 + - ii]; + *p = aa & 0xff; + p++; + len++; + /*if ((aa >> 8) == 0xff) + padding_len++;*/ + } + } + } + if (len > 0) { + pic->aux_data_size += (len + 8); + h[0] = (len >> 24) & 0xff; + h[1] = (len >> 16) & 0xff; + h[2] = (len >> 8) & 0xff; + h[3] = (len >> 0) & 0xff; + h[6] = (padding_len >> 8) & 0xff; + h[7] = (padding_len) & 0xff; + } + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) { + hevc_print(hevc, 0, + "aux: (size %d) suffix_flag %d\n", + pic->aux_data_size, suffix_flag); + for (i = 0; i < pic->aux_data_size; i++) { + hevc_print_cont(hevc, 0, + "%02x ", pic->aux_data_buf[i]); + if (((i + 1) & 0xf) == 0) + hevc_print_cont(hevc, 0, "\n"); + } + hevc_print_cont(hevc, 0, "\n"); + } + + } else { + hevc_print(hevc, 0, "new buf alloc failed\n"); + if (pic->aux_data_buf) + vfree(pic->aux_data_buf); + pic->aux_data_buf = NULL; + pic->aux_data_size = 0; + } + } + +} + +static void release_aux_data(struct hevc_state_s *hevc, + struct PIC_s *pic) +{ + if (pic->aux_data_buf) { + vfree(pic->aux_data_buf); + if ((run_count[hevc->index] & 63) == 0) + vm_unmap_aliases(); + } + pic->aux_data_buf = NULL; + pic->aux_data_size = 0; +} + +static int recycle_mmu_buf_tail(struct hevc_state_s *hevc, + bool check_dma) +{ + hevc_print(hevc, + H265_DEBUG_BUFMGR_MORE, + "%s pic index %d scatter_alloc %d page_start %d\n", + "decoder_mmu_box_free_idx_tail", + hevc->cur_pic->index, + hevc->cur_pic->scatter_alloc, + hevc->used_4k_num); + if (check_dma) + hevc_mmu_dma_check(hw_to_vdec(hevc)); + + if (hevc->is_used_v4l) { + int index = hevc->cur_pic->BUF_index; + struct internal_comp_buf *ibuf = + index_to_icomp_buf(hevc, index); + + decoder_mmu_box_free_idx_tail( + ibuf->mmu_box, + ibuf->index, + hevc->used_4k_num); + } else { + decoder_mmu_box_free_idx_tail( + hevc->mmu_box, + hevc->cur_pic->index, + hevc->used_4k_num); + } + hevc->cur_pic->scatter_alloc = 2; + hevc->used_4k_num = -1; + return 0; +} + +static inline void hevc_pre_pic(struct hevc_state_s *hevc, + struct PIC_s *pic) +{ + + /* prev pic */ + /*if (hevc->curr_POC != 0) {*/ + int decoded_poc = hevc->iPrevPOC; +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) { + decoded_poc = hevc->decoded_poc; + hevc->decoded_poc = INVALID_POC; + } +#endif + if (hevc->m_nalUnitType != NAL_UNIT_CODED_SLICE_IDR + && hevc->m_nalUnitType != + NAL_UNIT_CODED_SLICE_IDR_N_LP) { + struct PIC_s *pic_display; + + pic = get_pic_by_POC(hevc, decoded_poc); + if (pic && (pic->POC != INVALID_POC)) { + struct vdec_s *vdec = hw_to_vdec(hevc); + + /*PB skip control */ + if (pic->error_mark == 0 + && hevc->PB_skip_mode == 1) { + /* start decoding after + * first I + */ + hevc->ignore_bufmgr_error |= 0x1; + } + if (hevc->ignore_bufmgr_error & 1) { + if (hevc->PB_skip_count_after_decoding > 0) { + hevc->PB_skip_count_after_decoding--; + } else { + /* start displaying */ + hevc->ignore_bufmgr_error |= 0x2; + } + } + if (hevc->mmu_enable + && ((hevc->double_write_mode & 0x10) == 0)) { + if (!hevc->m_ins_flag) { + hevc->used_4k_num = + READ_VREG(HEVC_SAO_MMU_STATUS) >> 16; + + if ((!is_skip_decoding(hevc, pic)) && + (hevc->used_4k_num >= 0) && + (hevc->cur_pic->scatter_alloc + == 1)) { + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + recycle_mmu_buf_tail(hevc, true); + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + } + } + } + if (!pic->ip_mode) + pic->output_mark = 1; + pic->recon_mark = 1; + pic->dis_mark = 1; + if (vdec->mvfrm) { + pic->frame_size = vdec->mvfrm->frame_size; + pic->hw_decode_time = (u32)vdec->mvfrm->hw_decode_time; + } + } + do { + pic_display = output_pic(hevc, 0); + + if (pic_display) { + if ((pic_display->error_mark && + ((hevc->ignore_bufmgr_error & + 0x2) == 0)) + || (get_dbg_flag(hevc) & + H265_DEBUG_DISPLAY_CUR_FRAME) + || (get_dbg_flag(hevc) & + H265_DEBUG_NO_DISPLAY)) { + pic_display->output_ready = 0; + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "[BM] Display: POC %d, ", + pic_display->POC); + hevc_print_cont(hevc, 0, + "decoding index %d ==> ", + pic_display-> + decode_idx); + hevc_print_cont(hevc, 0, + "Debug or err,recycle it\n"); + } + /* + * Here the pic/frame error_mark is 1, + * and it won't be displayed, so increase + * the drop count + */ + hevc->gvs->drop_frame_count++; + if (pic_display->slice_type == I_SLICE) { + hevc->gvs->i_lost_frames++; + }else if (pic_display->slice_type == P_SLICE) { + hevc->gvs->p_lost_frames++; + } else if (pic_display->slice_type == B_SLICE) { + hevc->gvs->b_lost_frames++; + } + /* error frame count also need increase */ + hevc->gvs->error_frame_count++; + if (pic_display->slice_type == I_SLICE) { + hevc->gvs->i_concealed_frames++; + } else if (pic_display->slice_type == P_SLICE) { + hevc->gvs->p_concealed_frames++; + } else if (pic_display->slice_type == B_SLICE) { + hevc->gvs->b_concealed_frames++; + } + } else { + if (hevc->i_only & 0x1 + && pic_display-> + slice_type != 2) { + pic_display->output_ready = 0; + } else { + prepare_display_buf + (hw_to_vdec(hevc), + pic_display); + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "[BM] Display: POC %d, ", + pic_display->POC); + hevc_print_cont(hevc, 0, + "decoding index %d\n", + pic_display-> + decode_idx); + } + } + } + } + } while (pic_display); + } else { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "[BM] current pic is IDR, "); + hevc_print(hevc, 0, + "clear referenced flag of all buffers\n"); + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + dump_pic_list(hevc); + if (hevc->vf_pre_count == 1 && + hevc->first_pic_flag == 1) { + hevc->first_pic_flag = 2; + pic = NULL; + } + else + pic = get_pic_by_POC(hevc, decoded_poc); + + flush_output(hevc, pic); + } + +} + +static void check_pic_decoded_error_pre(struct hevc_state_s *hevc, + int decoded_lcu) +{ + int current_lcu_idx = decoded_lcu; + if (decoded_lcu < 0) + return; + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "cur lcu idx = %d, (total %d)\n", + current_lcu_idx, hevc->lcu_total); + } + if ((error_handle_policy & 0x20) == 0 && hevc->cur_pic != NULL) { + if (hevc->first_pic_after_recover) { + if (current_lcu_idx != + ((hevc->lcu_x_num_pre*hevc->lcu_y_num_pre) - 1)) + hevc->cur_pic->error_mark = 1; + } else { + if (hevc->lcu_x_num_pre != 0 + && hevc->lcu_y_num_pre != 0 + && current_lcu_idx != 0 + && current_lcu_idx < + ((hevc->lcu_x_num_pre*hevc->lcu_y_num_pre) - 1)) + hevc->cur_pic->error_mark = 1; + } + if (hevc->cur_pic->error_mark) { + if (print_lcu_error) + hevc_print(hevc, 0, + "cur lcu idx = %d, (total %d), set error_mark\n", + current_lcu_idx, + hevc->lcu_x_num_pre*hevc->lcu_y_num_pre); + if (is_log_enable(hevc)) + add_log(hevc, + "cur lcu idx = %d, (total %d), set error_mark", + current_lcu_idx, + hevc->lcu_x_num_pre * + hevc->lcu_y_num_pre); + + } + + } + if (hevc->cur_pic && hevc->head_error_flag) { + hevc->cur_pic->error_mark = 1; + hevc_print(hevc, 0, + "head has error, set error_mark\n"); + } + + if ((error_handle_policy & 0x80) == 0) { + if (hevc->over_decode && hevc->cur_pic) { + hevc_print(hevc, 0, + "over decode, set error_mark\n"); + hevc->cur_pic->error_mark = 1; + } + } + + hevc->lcu_x_num_pre = hevc->lcu_x_num; + hevc->lcu_y_num_pre = hevc->lcu_y_num; +} + +static void check_pic_decoded_error(struct hevc_state_s *hevc, + int decoded_lcu) +{ + int current_lcu_idx = decoded_lcu; + if (decoded_lcu < 0) + return; + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "cur lcu idx = %d, (total %d)\n", + current_lcu_idx, hevc->lcu_total); + } + if ((error_handle_policy & 0x20) == 0 && hevc->cur_pic != NULL) { + if (hevc->lcu_x_num != 0 + && hevc->lcu_y_num != 0 + && current_lcu_idx != 0 + && current_lcu_idx < + ((hevc->lcu_x_num*hevc->lcu_y_num) - 1)) + hevc->cur_pic->error_mark = 1; + + if (hevc->cur_pic->error_mark) { + if (print_lcu_error) + hevc_print(hevc, 0, + "cur lcu idx = %d, (total %d), set error_mark\n", + current_lcu_idx, + hevc->lcu_x_num*hevc->lcu_y_num); + if (((hevc->i_only & 0x4) == 0) && hevc->cur_pic->POC && ( hevc->cur_pic->slice_type == 0) + && ((hevc->cur_pic->POC + MAX_BUF_NUM) < hevc->iPrevPOC)) { + hevc_print(hevc, 0, + "Flush.. num_reorder_pic %d pic->POC %d hevc->iPrevPOC %d\n", + hevc->sps_num_reorder_pics_0,hevc->cur_pic->POC ,hevc->iPrevPOC); + flush_output(hevc, get_pic_by_POC(hevc, hevc->cur_pic->POC )); + } + if (is_log_enable(hevc)) + add_log(hevc, + "cur lcu idx = %d, (total %d), set error_mark", + current_lcu_idx, + hevc->lcu_x_num * + hevc->lcu_y_num); + + } + + } + if (hevc->cur_pic && hevc->head_error_flag) { + hevc->cur_pic->error_mark = 1; + hevc_print(hevc, 0, + "head has error, set error_mark\n"); + } + + if ((error_handle_policy & 0x80) == 0) { + if (hevc->over_decode && hevc->cur_pic) { + hevc_print(hevc, 0, + "over decode, set error_mark\n"); + hevc->cur_pic->error_mark = 1; + } + } +} + +/* only when we decoded one field or one frame, +we can call this function to get qos info*/ +static void get_picture_qos_info(struct hevc_state_s *hevc) +{ + struct PIC_s *picture = hevc->cur_pic; + +/* +#define DEBUG_QOS +*/ + + if (!hevc->cur_pic) + return; + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + unsigned char a[3]; + unsigned char i, j, t; + unsigned long data; + + data = READ_VREG(HEVC_MV_INFO); + if (picture->slice_type == I_SLICE) + data = 0; + a[0] = data & 0xff; + a[1] = (data >> 8) & 0xff; + a[2] = (data >> 16) & 0xff; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_mv = a[2]; + picture->avg_mv = a[1]; + picture->min_mv = a[0]; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "mv data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); +#endif + + data = READ_VREG(HEVC_QP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_qp = a[2]; + picture->avg_qp = a[1]; + picture->min_qp = a[0]; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "qp data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); +#endif + + data = READ_VREG(HEVC_SKIP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_skip = a[2]; + picture->avg_skip = a[1]; + picture->min_skip = a[0]; + +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "skip data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); +#endif + } else { + uint32_t blk88_y_count; + uint32_t blk88_c_count; + uint32_t blk22_mv_count; + uint32_t rdata32; + int32_t mv_hi; + int32_t mv_lo; + uint32_t rdata32_l; + uint32_t mvx_L0_hi; + uint32_t mvy_L0_hi; + uint32_t mvx_L1_hi; + uint32_t mvy_L1_hi; + int64_t value; + uint64_t temp_value; +#ifdef DEBUG_QOS + int pic_number = picture->POC; +#endif + + picture->max_mv = 0; + picture->avg_mv = 0; + picture->min_mv = 0; + + picture->max_skip = 0; + picture->avg_skip = 0; + picture->min_skip = 0; + + picture->max_qp = 0; + picture->avg_qp = 0; + picture->min_qp = 0; + + + +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "slice_type:%d, poc:%d\n", + picture->slice_type, + picture->POC); +#endif + /* set rd_idx to 0 */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, 0); + + blk88_y_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_y_count == 0) { +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_y_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] Y QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_y_count, + rdata32, blk88_y_count); +#endif + picture->avg_qp = rdata32/blk88_y_count; + /* intra_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] Y intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + /* skipped_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] Y skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + picture->avg_skip = rdata32*100/blk88_y_count; + /* coeff_non_zero_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] Y ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_y_count*1)), + '%', rdata32); +#endif + /* blk66_c_count */ + blk88_c_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_c_count == 0) { +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_c_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] C QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_c_count, + rdata32, blk88_c_count); +#endif + /* intra_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] C intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* skipped_cu_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] C skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* coeff_non_zero_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] C ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_c_count*1)), + '%', rdata32); +#endif + + /* 1'h0, qp_c_max[6:0], 1'h0, qp_c_min[6:0], + 1'h0, qp_y_max[6:0], 1'h0, qp_y_min[6:0] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "[Picture %d Quality] Y QP min : %d\n", + pic_number, (rdata32>>0)&0xff); +#endif + picture->min_qp = (rdata32>>0)&0xff; + +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "[Picture %d Quality] Y QP max : %d\n", + pic_number, (rdata32>>8)&0xff); +#endif + picture->max_qp = (rdata32>>8)&0xff; + +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "[Picture %d Quality] C QP min : %d\n", + pic_number, (rdata32>>16)&0xff); + hevc_print(hevc, 0, "[Picture %d Quality] C QP max : %d\n", + pic_number, (rdata32>>24)&0xff); +#endif + + /* blk22_mv_count */ + blk22_mv_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk22_mv_count == 0) { +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] NO MV Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* mvy_L1_count[39:32], mvx_L1_count[39:32], + mvy_L0_count[39:32], mvx_L0_count[39:32] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + /* should all be 0x00 or 0xff */ +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] MV AVG High Bits: 0x%X\n", + pic_number, rdata32); +#endif + mvx_L0_hi = ((rdata32>>0)&0xff); + mvy_L0_hi = ((rdata32>>8)&0xff); + mvx_L1_hi = ((rdata32>>16)&0xff); + mvy_L1_hi = ((rdata32>>24)&0xff); + + /* mvx_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvx_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + value = div_s64(value, blk22_mv_count); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] MVX_L0 AVG : %d (%lld/%d)\n", + pic_number, (int)value, + value, blk22_mv_count); +#endif + picture->avg_mv = value; + + /* mvy_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvy_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] MVY_L0 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvx_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvx_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] MVX_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvy_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvy_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] MVY_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* {mvx_L0_max, mvx_L0_min} // format : {sign, abs[14:0]} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "[Picture %d Quality] MVX_L0 MAX : %d\n", + pic_number, mv_hi); +#endif + picture->max_mv = mv_hi; + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "[Picture %d Quality] MVX_L0 MIN : %d\n", + pic_number, mv_lo); +#endif + picture->min_mv = mv_lo; + +#ifdef DEBUG_QOS + /* {mvy_L0_max, mvy_L0_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + hevc_print(hevc, 0, "[Picture %d Quality] MVY_L0 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + hevc_print(hevc, 0, "[Picture %d Quality] MVY_L0 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvx_L1_max, mvx_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + hevc_print(hevc, 0, "[Picture %d Quality] MVX_L1 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + hevc_print(hevc, 0, "[Picture %d Quality] MVX_L1 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvy_L1_max, mvy_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + hevc_print(hevc, 0, "[Picture %d Quality] MVY_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + hevc_print(hevc, 0, "[Picture %d Quality] MVY_L1 MIN : %d\n", + pic_number, mv_lo); +#endif + + rdata32 = READ_VREG(HEVC_PIC_QUALITY_CTRL); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] After Read : VDEC_PIC_QUALITY_CTRL : 0x%x\n", + pic_number, rdata32); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + } +} + +static int hevc_slice_segment_header_process(struct hevc_state_s *hevc, + union param_u *rpm_param, + int decode_pic_begin) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + struct vdec_s *vdec = hw_to_vdec(hevc); +#endif + int i; + int lcu_x_num_div; + int lcu_y_num_div; + int Col_ref; + int dbg_skip_flag = 0; + + if (hevc->wait_buf == 0) { + hevc->sps_num_reorder_pics_0 = + rpm_param->p.sps_num_reorder_pics_0; + hevc->ip_mode = (!hevc->sps_num_reorder_pics_0 && + !(vdec->slave || vdec->master) && + !disable_ip_mode) ? true : false; + hevc->m_temporalId = rpm_param->p.m_temporalId; + hevc->m_nalUnitType = rpm_param->p.m_nalUnitType; + hevc->interlace_flag = + (rpm_param->p.profile_etc >> 2) & 0x1; + hevc->curr_pic_struct = + (rpm_param->p.sei_frame_field_info >> 3) & 0xf; + hevc->frame_field_info_present_flag = + (rpm_param->p.sei_frame_field_info >> 8) & 0x1; + + if (hevc->frame_field_info_present_flag) { + if (hevc->curr_pic_struct == 0 + || hevc->curr_pic_struct == 7 + || hevc->curr_pic_struct == 8) + hevc->interlace_flag = 0; + } + + hevc_print(hevc, H265_DEBUG_PIC_STRUCT, + "frame_field_info_present_flag = %d curr_pic_struct = %d interlace_flag = %d\n", + hevc->frame_field_info_present_flag, + hevc->curr_pic_struct, + hevc->interlace_flag); + + /* if (interlace_enable == 0 || hevc->m_ins_flag) */ + if (interlace_enable == 0) + hevc->interlace_flag = 0; + if (interlace_enable & 0x100) + hevc->interlace_flag = interlace_enable & 0x1; + if (hevc->interlace_flag == 0) + hevc->curr_pic_struct = 0; + /* if(hevc->m_nalUnitType == NAL_UNIT_EOS){ */ + /* + *hevc->m_pocRandomAccess = MAX_INT; + * //add to fix RAP_B_Bossen_1 + */ + /* } */ + hevc->misc_flag0 = rpm_param->p.misc_flag0; + if (rpm_param->p.first_slice_segment_in_pic_flag == 0) { + hevc->slice_segment_addr = + rpm_param->p.slice_segment_address; + if (!rpm_param->p.dependent_slice_segment_flag) + hevc->slice_addr = hevc->slice_segment_addr; + } else { + hevc->slice_segment_addr = 0; + hevc->slice_addr = 0; + } + + hevc->iPrevPOC = hevc->curr_POC; + hevc->slice_type = (rpm_param->p.slice_type == I_SLICE) ? 2 : + (rpm_param->p.slice_type == P_SLICE) ? 1 : + (rpm_param->p.slice_type == B_SLICE) ? 0 : 3; + /* hevc->curr_predFlag_L0=(hevc->slice_type==2) ? 0:1; */ + /* hevc->curr_predFlag_L1=(hevc->slice_type==0) ? 1:0; */ + hevc->TMVPFlag = rpm_param->p.slice_temporal_mvp_enable_flag; + hevc->isNextSliceSegment = + rpm_param->p.dependent_slice_segment_flag ? 1 : 0; + if (is_oversize_ex(rpm_param->p.pic_width_in_luma_samples, + rpm_param->p.pic_height_in_luma_samples)) { + hevc_print(hevc, 0, "over size : %u x %u.\n", + rpm_param->p.pic_width_in_luma_samples, rpm_param->p.pic_height_in_luma_samples); + if ((!hevc->m_ins_flag) && + ((debug & + H265_NO_CHANG_DEBUG_FLAG_IN_CODE) == 0)) + debug |= (H265_DEBUG_DIS_LOC_ERROR_PROC | + H265_DEBUG_DIS_SYS_ERROR_PROC); + return 3; + } + + if (hevc->pic_w != rpm_param->p.pic_width_in_luma_samples + || hevc->pic_h != + rpm_param->p.pic_height_in_luma_samples) { + hevc_print(hevc, 0, + "Pic Width/Height Change (%d,%d)=>(%d,%d), interlace %d\n", + hevc->pic_w, hevc->pic_h, + rpm_param->p.pic_width_in_luma_samples, + rpm_param->p.pic_height_in_luma_samples, + hevc->interlace_flag); + + hevc->pic_w = rpm_param->p.pic_width_in_luma_samples; + hevc->pic_h = rpm_param->p.pic_height_in_luma_samples; + hevc->frame_width = hevc->pic_w; + hevc->frame_height = hevc->pic_h; +#ifdef LOSLESS_COMPRESS_MODE + if (/*re_config_pic_flag == 0 &&*/ + (get_double_write_mode(hevc) & 0x10) == 0) + init_decode_head_hw(hevc); +#endif + } + + if (hevc->bit_depth_chroma > 10 || + hevc->bit_depth_luma > 10) { + hevc_print(hevc, 0, "unsupport bitdepth : %u,%u\n", + hevc->bit_depth_chroma, + hevc->bit_depth_luma); + if (!hevc->m_ins_flag) + debug |= (H265_DEBUG_DIS_LOC_ERROR_PROC | + H265_DEBUG_DIS_SYS_ERROR_PROC); + hevc->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + return 4; + } + + /* it will cause divide 0 error */ + if (hevc->pic_w == 0 || hevc->pic_h == 0) { + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "Fatal Error, pic_w = %d, pic_h = %d\n", + hevc->pic_w, hevc->pic_h); + } + return 3; + } + pic_list_process(hevc); + + hevc->lcu_size = + 1 << (rpm_param->p.log2_min_coding_block_size_minus3 + + 3 + rpm_param-> + p.log2_diff_max_min_coding_block_size); + if (hevc->lcu_size == 0) { + hevc_print(hevc, 0, + "Error, lcu_size = 0 (%d,%d)\n", + rpm_param->p. + log2_min_coding_block_size_minus3, + rpm_param->p. + log2_diff_max_min_coding_block_size); + return 3; + } + hevc->lcu_size_log2 = log2i(hevc->lcu_size); + lcu_x_num_div = (hevc->pic_w / hevc->lcu_size); + lcu_y_num_div = (hevc->pic_h / hevc->lcu_size); + hevc->lcu_x_num = + ((hevc->pic_w % hevc->lcu_size) == + 0) ? lcu_x_num_div : lcu_x_num_div + 1; + hevc->lcu_y_num = + ((hevc->pic_h % hevc->lcu_size) == + 0) ? lcu_y_num_div : lcu_y_num_div + 1; + hevc->lcu_total = hevc->lcu_x_num * hevc->lcu_y_num; + + if (hevc->m_nalUnitType == NAL_UNIT_CODED_SLICE_IDR + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_IDR_N_LP) { + hevc->curr_POC = 0; + if ((hevc->m_temporalId - 1) == 0) + hevc->iPrevTid0POC = hevc->curr_POC; + } else { + int iMaxPOClsb = + 1 << (rpm_param->p. + log2_max_pic_order_cnt_lsb_minus4 + 4); + int iPrevPOClsb; + int iPrevPOCmsb; + int iPOCmsb; + int iPOClsb = rpm_param->p.POClsb; + + if (iMaxPOClsb == 0) { + hevc_print(hevc, 0, + "error iMaxPOClsb is 0\n"); + return 3; + } + + iPrevPOClsb = hevc->iPrevTid0POC % iMaxPOClsb; + iPrevPOCmsb = hevc->iPrevTid0POC - iPrevPOClsb; + + if ((iPOClsb < iPrevPOClsb) + && ((iPrevPOClsb - iPOClsb) >= + (iMaxPOClsb / 2))) + iPOCmsb = iPrevPOCmsb + iMaxPOClsb; + else if ((iPOClsb > iPrevPOClsb) + && ((iPOClsb - iPrevPOClsb) > + (iMaxPOClsb / 2))) + iPOCmsb = iPrevPOCmsb - iMaxPOClsb; + else + iPOCmsb = iPrevPOCmsb; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "iPrePOC%d iMaxPOClsb%d iPOCmsb%d iPOClsb%d\n", + hevc->iPrevTid0POC, iMaxPOClsb, iPOCmsb, + iPOClsb); + } + if (hevc->m_nalUnitType == NAL_UNIT_CODED_SLICE_BLA + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_BLANT + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_BLA_N_LP) { + /* For BLA picture types, POCmsb is set to 0. */ + iPOCmsb = 0; + } + hevc->curr_POC = (iPOCmsb + iPOClsb); + if ((hevc->m_temporalId - 1) == 0) + hevc->iPrevTid0POC = hevc->curr_POC; + else { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "m_temporalID is %d\n", + hevc->m_temporalId); + } + } + } + hevc->RefNum_L0 = + (rpm_param->p.num_ref_idx_l0_active > + MAX_REF_ACTIVE) ? MAX_REF_ACTIVE : rpm_param->p. + num_ref_idx_l0_active; + hevc->RefNum_L1 = + (rpm_param->p.num_ref_idx_l1_active > + MAX_REF_ACTIVE) ? MAX_REF_ACTIVE : rpm_param->p. + num_ref_idx_l1_active; + + /* if(curr_POC==0x10) dump_lmem(); */ + + /* skip RASL pictures after CRA/BLA pictures */ + if (hevc->m_pocRandomAccess == MAX_INT) {/* first picture */ + if (hevc->m_nalUnitType == NAL_UNIT_CODED_SLICE_CRA || + hevc->m_nalUnitType == NAL_UNIT_CODED_SLICE_BLA + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_BLANT + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_BLA_N_LP) + hevc->m_pocRandomAccess = hevc->curr_POC; + else + hevc->m_pocRandomAccess = -MAX_INT; + } else if (hevc->m_nalUnitType == NAL_UNIT_CODED_SLICE_BLA + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_BLANT + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_BLA_N_LP) + hevc->m_pocRandomAccess = hevc->curr_POC; + else if ((hevc->curr_POC < hevc->m_pocRandomAccess) && + (nal_skip_policy >= 3) && + (hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_RASL_N || + hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_TFD)) { /* skip */ + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "RASL picture with POC %d < %d ", + hevc->curr_POC, hevc->m_pocRandomAccess); + hevc_print(hevc, 0, + "RandomAccess point POC), skip it\n"); + } + return 1; + } + + WRITE_VREG(HEVC_WAIT_FLAG, READ_VREG(HEVC_WAIT_FLAG) | 0x2); + hevc->skip_flag = 0; + /**/ + /* if((iPrevPOC != curr_POC)){ */ + if (rpm_param->p.slice_segment_address == 0) { + struct PIC_s *pic = NULL; + + hevc->new_pic = 1; +#ifdef MULTI_INSTANCE_SUPPORT + if (!hevc->m_ins_flag) +#endif + check_pic_decoded_error_pre(hevc, + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff); + if (vdec_stream_based(vdec) && ((READ_VREG(HEVC_PARSER_LCU_START) & 0xffffff) != 0)) { + if (hevc->cur_pic) + hevc->cur_pic->error_mark = 1; + } + /**/ if (use_cma == 0) { + if (hevc->pic_list_init_flag == 0) { + init_pic_list(hevc); + init_pic_list_hw(hevc); + init_buf_spec(hevc); + hevc->pic_list_init_flag = 3; + } + } + if (!hevc->m_ins_flag) { + if (hevc->cur_pic) + get_picture_qos_info(hevc); + } + hevc->first_pic_after_recover = 0; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + dump_pic_list(hevc); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->master) { + struct hevc_state_s *hevc_ba = + (struct hevc_state_s *) + vdec->master->private; + if (hevc_ba->cur_pic != NULL) { + hevc_ba->cur_pic->dv_enhance_exist = 1; + hevc_print(hevc, H265_DEBUG_DV, + "To decode el (poc %d) => set bl (poc %d) dv_enhance_exist flag\n", + hevc->curr_POC, hevc_ba->cur_pic->POC); + } + } + if (vdec->master == NULL && + vdec->slave == NULL) + set_aux_data(hevc, + hevc->cur_pic, 1, 0); /*suffix*/ + if (hevc->bypass_dvenl && !dolby_meta_with_el) + set_aux_data(hevc, + hevc->cur_pic, 0, 1); /*dv meta only*/ +#else + set_aux_data(hevc, hevc->cur_pic, 1, 0); +#endif + + /* prev pic */ + hevc_pre_pic(hevc, pic); + /* + *update referenced of old pictures + *(cur_pic->referenced is 1 and not updated) + */ + apply_ref_pic_set(hevc, hevc->curr_POC, + rpm_param); + + /*if (hevc->mmu_enable) + recycle_mmu_bufs(hevc);*/ + + + /* new pic */ + hevc->cur_pic = hevc->is_used_v4l ? + v4l_get_new_pic(hevc, rpm_param) : + get_new_pic(hevc, rpm_param); + if (hevc->cur_pic == NULL) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + dump_pic_list(hevc); + hevc->wait_buf = 1; + return -1; + } +#ifdef MULTI_INSTANCE_SUPPORT + hevc->decoding_pic = hevc->cur_pic; + if (!hevc->m_ins_flag) + hevc->over_decode = 0; +#endif +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + hevc->cur_pic->dv_enhance_exist = 0; + if (vdec->slave) + hevc_print(hevc, H265_DEBUG_DV, + "Clear bl (poc %d) dv_enhance_exist flag\n", + hevc->curr_POC); + if (vdec->master == NULL && + vdec->slave == NULL) + set_aux_data(hevc, + hevc->cur_pic, 0, 0); /*prefix*/ + + if (hevc->bypass_dvenl && !dolby_meta_with_el) + set_aux_data(hevc, + hevc->cur_pic, 0, 2); /*pre sei only*/ +#else + set_aux_data(hevc, hevc->cur_pic, 0, 0); +#endif + if (get_dbg_flag(hevc) & H265_DEBUG_DISPLAY_CUR_FRAME) { + hevc->cur_pic->output_ready = 1; + hevc->cur_pic->stream_offset = + READ_VREG(HEVC_SHIFT_BYTE_COUNT); + prepare_display_buf(vdec, hevc->cur_pic); + hevc->wait_buf = 2; + return -1; + } + } else { + if (get_dbg_flag(hevc) & H265_DEBUG_HAS_AUX_IN_SLICE) { +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->master == NULL && + vdec->slave == NULL) { + set_aux_data(hevc, hevc->cur_pic, 1, 0); + set_aux_data(hevc, hevc->cur_pic, 0, 0); + } +#else + set_aux_data(hevc, hevc->cur_pic, 1, 0); + set_aux_data(hevc, hevc->cur_pic, 0, 0); +#endif + } + if (hevc->pic_list_init_flag != 3 + || hevc->cur_pic == NULL) { + /* make it dec from the first slice segment */ + return 3; + } + hevc->cur_pic->slice_idx++; + hevc->new_pic = 0; + } + } else { + if (hevc->wait_buf == 1) { + pic_list_process(hevc); + hevc->cur_pic = hevc->is_used_v4l ? + v4l_get_new_pic(hevc, rpm_param) : + get_new_pic(hevc, rpm_param); + if (hevc->cur_pic == NULL) + return -1; + + if (!hevc->m_ins_flag) + hevc->over_decode = 0; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + hevc->cur_pic->dv_enhance_exist = 0; + if (vdec->master == NULL && + vdec->slave == NULL) + set_aux_data(hevc, hevc->cur_pic, 0, 0); +#else + set_aux_data(hevc, hevc->cur_pic, 0, 0); +#endif + hevc->wait_buf = 0; + } else if (hevc->wait_buf == + 2) { + if (get_display_pic_num(hevc) > + 1) + return -1; + hevc->wait_buf = 0; + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + dump_pic_list(hevc); + } + + if (hevc->new_pic) { +#if 1 + /*SUPPORT_10BIT*/ + int sao_mem_unit = + (hevc->lcu_size == 16 ? 9 : + hevc->lcu_size == + 32 ? 14 : 24) << 4; +#else + int sao_mem_unit = ((hevc->lcu_size / 8) * 2 + 4) << 4; +#endif + int pic_height_cu = + (hevc->pic_h + hevc->lcu_size - 1) / hevc->lcu_size; + int pic_width_cu = + (hevc->pic_w + hevc->lcu_size - 1) / hevc->lcu_size; + int sao_vb_size = (sao_mem_unit + (2 << 4)) * pic_height_cu; + + /* int sao_abv_size = sao_mem_unit*pic_width_cu; */ + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "==>%s dec idx %d, struct %d interlace %d pic idx %d\n", + __func__, + hevc->decode_idx, + hevc->curr_pic_struct, + hevc->interlace_flag, + hevc->cur_pic->index); + } + if (dbg_skip_decode_index != 0 && + hevc->decode_idx == dbg_skip_decode_index) + dbg_skip_flag = 1; + + hevc->decode_idx++; + update_tile_info(hevc, pic_width_cu, pic_height_cu, + sao_mem_unit, rpm_param); + + config_title_hw(hevc, sao_vb_size, sao_mem_unit); + } + + if (hevc->iPrevPOC != hevc->curr_POC) { + hevc->new_tile = 1; + hevc->tile_x = 0; + hevc->tile_y = 0; + hevc->tile_y_x = 0; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "new_tile (new_pic) tile_x=%d, tile_y=%d\n", + hevc->tile_x, hevc->tile_y); + } + } else if (hevc->tile_enabled) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "slice_segment_address is %d\n", + rpm_param->p.slice_segment_address); + } + hevc->tile_y_x = + get_tile_index(hevc, rpm_param->p.slice_segment_address, + (hevc->pic_w + + hevc->lcu_size - + 1) / hevc->lcu_size); + if ((hevc->tile_y_x != (hevc->tile_x | (hevc->tile_y << 8))) + && (hevc->tile_y_x != -1)) { + hevc->new_tile = 1; + hevc->tile_x = hevc->tile_y_x & 0xff; + hevc->tile_y = (hevc->tile_y_x >> 8) & 0xff; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "new_tile seg adr %d tile_x=%d, tile_y=%d\n", + rpm_param->p.slice_segment_address, + hevc->tile_x, hevc->tile_y); + } + } else + hevc->new_tile = 0; + } else + hevc->new_tile = 0; + + if ((hevc->tile_x > (MAX_TILE_COL_NUM - 1)) + || (hevc->tile_y > (MAX_TILE_ROW_NUM - 1))) + hevc->new_tile = 0; + + if (hevc->new_tile) { + hevc->tile_start_lcu_x = + hevc->m_tile[hevc->tile_y][hevc->tile_x].start_cu_x; + hevc->tile_start_lcu_y = + hevc->m_tile[hevc->tile_y][hevc->tile_x].start_cu_y; + hevc->tile_width_lcu = + hevc->m_tile[hevc->tile_y][hevc->tile_x].width; + hevc->tile_height_lcu = + hevc->m_tile[hevc->tile_y][hevc->tile_x].height; + } + + set_ref_pic_list(hevc, rpm_param); + + Col_ref = rpm_param->p.collocated_ref_idx; + + hevc->LDCFlag = 0; + if (rpm_param->p.slice_type != I_SLICE) { + hevc->LDCFlag = 1; + for (i = 0; (i < hevc->RefNum_L0) && hevc->LDCFlag; i++) { + if (hevc->cur_pic-> + m_aiRefPOCList0[hevc->cur_pic->slice_idx][i] > + hevc->curr_POC) + hevc->LDCFlag = 0; + } + if (rpm_param->p.slice_type == B_SLICE) { + for (i = 0; (i < hevc->RefNum_L1) + && hevc->LDCFlag; i++) { + if (hevc->cur_pic-> + m_aiRefPOCList1[hevc->cur_pic-> + slice_idx][i] > + hevc->curr_POC) + hevc->LDCFlag = 0; + } + } + } + + hevc->ColFromL0Flag = rpm_param->p.collocated_from_l0_flag; + + hevc->plevel = + rpm_param->p.log2_parallel_merge_level; + hevc->MaxNumMergeCand = 5 - rpm_param->p.five_minus_max_num_merge_cand; + + hevc->LongTerm_Curr = 0; /* to do ... */ + hevc->LongTerm_Col = 0; /* to do ... */ + + hevc->list_no = 0; + if (rpm_param->p.slice_type == B_SLICE) + hevc->list_no = 1 - hevc->ColFromL0Flag; + if (hevc->list_no == 0) { + if (Col_ref < hevc->RefNum_L0) { + hevc->Col_POC = + hevc->cur_pic->m_aiRefPOCList0[hevc->cur_pic-> + slice_idx][Col_ref]; + } else + hevc->Col_POC = INVALID_POC; + } else { + if (Col_ref < hevc->RefNum_L1) { + hevc->Col_POC = + hevc->cur_pic->m_aiRefPOCList1[hevc->cur_pic-> + slice_idx][Col_ref]; + } else + hevc->Col_POC = INVALID_POC; + } + + hevc->LongTerm_Ref = 0; /* to do ... */ + + if (hevc->slice_type != 2) { + /* if(hevc->i_only==1){ */ + /* return 0xf; */ + /* } */ + + if (hevc->Col_POC != INVALID_POC) { + hevc->col_pic = get_ref_pic_by_POC(hevc, hevc->Col_POC); + if (hevc->col_pic == NULL) { + hevc->cur_pic->error_mark = 1; + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "WRONG,fail to get the pic Col_POC\n"); + } + if (is_log_enable(hevc)) + add_log(hevc, + "WRONG,fail to get the pic Col_POC"); + } else if (hevc->col_pic->error_mark || hevc->col_pic->dis_mark == 0) { + hevc->col_pic->error_mark = 1; + hevc->cur_pic->error_mark = 1; + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "WRONG, Col_POC error_mark is 1\n"); + } + if (is_log_enable(hevc)) + add_log(hevc, + "WRONG, Col_POC error_mark is 1"); + } else { + if ((hevc->col_pic->width + != hevc->pic_w) || + (hevc->col_pic->height + != hevc->pic_h)) { + hevc_print(hevc, 0, + "Wrong reference pic (poc %d) width/height %d/%d\n", + hevc->col_pic->POC, + hevc->col_pic->width, + hevc->col_pic->height); + hevc->cur_pic->error_mark = 1; + } + + } + + if (hevc->cur_pic->error_mark + && ((hevc->ignore_bufmgr_error & 0x1) == 0)) { + /*count info*/ + vdec_count_info(hevc->gvs, hevc->cur_pic->error_mark, + hevc->cur_pic->stream_offset); + if (hevc->cur_pic->slice_type == I_SLICE) { + hevc->gvs->i_decoded_frames++; + } else if (hevc->cur_pic->slice_type == P_SLICE) { + hevc->gvs->p_decoded_frames++; + } else if (hevc->cur_pic->slice_type == B_SLICE) { + hevc->gvs->b_decoded_frames++; + } + if (hevc->cur_pic->error_mark) { + if (hevc->cur_pic->slice_type == I_SLICE) { + hevc->gvs->i_concealed_frames++; + } else if (hevc->cur_pic->slice_type == P_SLICE) { + hevc->gvs->p_concealed_frames++; + } else if (hevc->cur_pic->slice_type == B_SLICE) { + hevc->gvs->b_concealed_frames++; + } + } + if (hevc->PB_skip_mode == 2) { + hevc->gvs->drop_frame_count++; + if (rpm_param->p.slice_type == I_SLICE) { + hevc->gvs->i_lost_frames++; + } else if (rpm_param->p.slice_type == P_SLICE) { + hevc->gvs->p_lost_frames++; + } else if (rpm_param->p.slice_type == B_SLICE) { + hevc->gvs->b_lost_frames++; + } + } + } + + if (is_skip_decoding(hevc, + hevc->cur_pic)) { + return 2; + } + } else + hevc->col_pic = hevc->cur_pic; + } /* */ + if (hevc->col_pic == NULL) + hevc->col_pic = hevc->cur_pic; +#ifdef BUFFER_MGR_ONLY + return 0xf; +#else + if ((decode_pic_begin > 0 && hevc->decode_idx <= decode_pic_begin) + || (dbg_skip_flag)) + return 0xf; +#endif + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_REGISTER_START); + config_mc_buffer(hevc, hevc->cur_pic); + + if (is_skip_decoding(hevc, + hevc->cur_pic)) { + if (get_dbg_flag(hevc)) + hevc_print(hevc, 0, + "Discard this picture index %d\n", + hevc->cur_pic->index); + /*count info*/ + vdec_count_info(hevc->gvs, hevc->cur_pic->error_mark, + hevc->cur_pic->stream_offset); + if (hevc->cur_pic->slice_type == I_SLICE) { + hevc->gvs->i_decoded_frames++; + } else if (hevc->cur_pic->slice_type == P_SLICE) { + hevc->gvs->p_decoded_frames++; + } else if (hevc->cur_pic->slice_type == B_SLICE) { + hevc->gvs->b_decoded_frames++; + } + if (hevc->cur_pic->error_mark) { + if (hevc->cur_pic->slice_type == I_SLICE) { + hevc->gvs->i_concealed_frames++; + } else if (hevc->cur_pic->slice_type == P_SLICE) { + hevc->gvs->p_concealed_frames++; + } else if (hevc->cur_pic->slice_type == B_SLICE) { + hevc->gvs->b_concealed_frames++; + } + } + if (hevc->PB_skip_mode == 2) { + hevc->gvs->drop_frame_count++; + if (rpm_param->p.slice_type == I_SLICE) { + hevc->gvs->i_lost_frames++; + } else if (rpm_param->p.slice_type == P_SLICE) { + hevc->gvs->p_lost_frames++; + } else if (rpm_param->p.slice_type == B_SLICE) { + hevc->gvs->b_lost_frames++; + } + } + return 2; + } +#ifdef MCRCC_ENABLE + config_mcrcc_axi_hw(hevc, hevc->cur_pic->slice_type); +#endif + if (!hevc->tile_width_lcu || !hevc->tile_height_lcu) + return -1; + config_mpred_hw(hevc); + + config_sao_hw(hevc, rpm_param); + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_REGISTER_END); + if ((hevc->slice_type != 2) && (hevc->i_only & 0x2)) + return 0xf; + + if (post_picture_early(vdec, hevc->cur_pic->index)) + return -1; + + return 0; +} + +/* return page number */ +static int hevc_mmu_page_num(struct hevc_state_s *hevc, + int w, int h, int save_mode) +{ + int picture_size; + int page_num; + int max_frame_num; + + picture_size = compute_losless_comp_body_size(hevc, w, + h, save_mode); + page_num = ((picture_size + PAGE_SIZE - 1) >> PAGE_SHIFT); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + max_frame_num = MAX_FRAME_8K_NUM; + else + max_frame_num = MAX_FRAME_4K_NUM; + + if (page_num > max_frame_num) { + hevc_print(hevc, 0, "over max !! 0x%x width %d height %d\n", + page_num, w, h); + return -1; + } + return page_num; +} + +static int H265_alloc_mmu(struct hevc_state_s *hevc, struct PIC_s *new_pic, + unsigned short bit_depth, unsigned int *mmu_index_adr) { + int bit_depth_10 = (bit_depth != 0x00); + int cur_mmu_4k_number; + int ret; + + if (get_double_write_mode(hevc) == 0x10) + return 0; + + cur_mmu_4k_number = hevc_mmu_page_num(hevc, new_pic->width, + new_pic->height, !bit_depth_10); + if (cur_mmu_4k_number < 0) + return -1; + + if (hevc->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(hevc, new_pic->BUF_index); + + ret = decoder_mmu_box_alloc_idx( + ibuf->mmu_box, + ibuf->index, + ibuf->frame_buffer_size, + mmu_index_adr); + } else { + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + ret = decoder_mmu_box_alloc_idx( + hevc->mmu_box, + new_pic->index, + cur_mmu_4k_number, + mmu_index_adr); + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + } + + new_pic->scatter_alloc = 1; + + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s pic index %d page count(%d) ret =%d\n", + __func__, new_pic->index, + cur_mmu_4k_number, ret); + return ret; +} +#ifdef H265_10B_MMU_DW +static int H265_alloc_mmu_dw(struct hevc_state_s *hevc, struct PIC_s *new_pic, + unsigned short bit_depth, unsigned int *mmu_index_adr) { + int bit_depth_10 = (bit_depth != 0x00); + int cur_mmu_4k_number; + int ret; + + if (!hevc->mmu_box_dw) { + hevc_print(hevc, 0, + "%s, error no mmu box dw!\n", __func__); + return -1; + } + + if (get_double_write_mode(hevc) == 0x10) + return 0; + + cur_mmu_4k_number = hevc_mmu_page_num(hevc, new_pic->width, + new_pic->height, !bit_depth_10); + if (cur_mmu_4k_number < 0) + return -1; + + ret = decoder_mmu_box_alloc_idx( + hevc->mmu_box_dw, + new_pic->index, + cur_mmu_4k_number, + mmu_index_adr); + + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s pic index %d page count(%d) ret =%d\n", + __func__, new_pic->index, + cur_mmu_4k_number, ret); + return ret; +} +#endif + +static void release_pic_mmu_buf(struct hevc_state_s *hevc, + struct PIC_s *pic) +{ + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s pic index %d scatter_alloc %d\n", + __func__, pic->index, + pic->scatter_alloc); + + if (hevc->mmu_enable + && !(hevc->double_write_mode & 0x10) + && pic->scatter_alloc) { + if (!hevc->is_used_v4l) + decoder_mmu_box_free_idx(hevc->mmu_box, pic->index); + else { + struct internal_comp_buf *ibuf = + ibuf = index_to_icomp_buf(hevc, pic->BUF_index); + decoder_mmu_box_free_idx(ibuf->mmu_box, ibuf->index); + } + } +#ifdef H265_10B_MMU_DW + if (hevc->dw_mmu_enable) + decoder_mmu_box_free_idx(hevc->mmu_box_dw, pic->index); +#endif + pic->scatter_alloc = 0; +} + +/* + ************************************************* + * + *h265 buffer management end + * + ************************************************** + */ +static struct hevc_state_s *gHevc; + +static void hevc_local_uninit(struct hevc_state_s *hevc) +{ + hevc->rpm_ptr = NULL; + hevc->lmem_ptr = NULL; + +#ifdef SWAP_HEVC_UCODE + if (hevc->is_swap && get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + if (hevc->mc_cpu_addr != NULL) { + dma_free_coherent(amports_get_dma_device(), + hevc->swap_size, hevc->mc_cpu_addr, + hevc->mc_dma_handle); + hevc->mc_cpu_addr = NULL; + } + + } +#endif +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) + uninit_detrefill_buf(hevc); +#endif + if (hevc->aux_addr) { + dma_free_coherent(amports_get_dma_device(), + hevc->prefix_aux_size + hevc->suffix_aux_size, hevc->aux_addr, + hevc->aux_phy_addr); + hevc->aux_addr = NULL; + } + if (hevc->rpm_addr) { + dma_free_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, hevc->rpm_addr, + hevc->rpm_phy_addr); + hevc->rpm_addr = NULL; + } + if (hevc->lmem_addr) { + dma_free_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, hevc->lmem_addr, + hevc->lmem_phy_addr); + hevc->lmem_addr = NULL; + } + + if (hevc->mmu_enable && hevc->frame_mmu_map_addr) { + if (hevc->frame_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(), hevc->frame_mmu_map_addr, + hevc->frame_mmu_map_phy_addr); + + hevc->frame_mmu_map_addr = NULL; + } +#ifdef H265_10B_MMU_DW + if (hevc->dw_mmu_enable && hevc->frame_dw_mmu_map_addr) { + if (hevc->frame_dw_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(), hevc->frame_dw_mmu_map_addr, + hevc->frame_dw_mmu_map_phy_addr); + + hevc->frame_dw_mmu_map_addr = NULL; + } +#endif + //pr_err("[%s line %d] hevc->gvs=0x%p operation\n",__func__, __LINE__, hevc->gvs); +} + +static int hevc_local_init(struct hevc_state_s *hevc) +{ + int ret = -1; + struct BuffInfo_s *cur_buf_info = NULL; + + memset(&hevc->param, 0, sizeof(union param_u)); + + cur_buf_info = &hevc->work_space_buf_store; + + if (force_bufspec) { + memcpy(cur_buf_info, &amvh265_workbuff_spec[force_bufspec & 0xf], + sizeof(struct BuffInfo_s)); + pr_info("force buffer spec %d\n", force_bufspec & 0xf); + } else { + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + memcpy(cur_buf_info, &amvh265_workbuff_spec[2], /* 4k */ + sizeof(struct BuffInfo_s)); + else + memcpy(cur_buf_info, &amvh265_workbuff_spec[1], /* 4k */ + sizeof(struct BuffInfo_s)); + } else { + memcpy(cur_buf_info, &amvh265_workbuff_spec[0], /* 1080p */ + sizeof(struct BuffInfo_s)); + } + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) { + memcpy(cur_buf_info, &amvh265_workbuff_spec[5], /* 4k */ + sizeof(struct BuffInfo_s)); + } else { + memcpy(cur_buf_info, &amvh265_workbuff_spec[3], /* 1080p */ + sizeof(struct BuffInfo_s)); + } + } + } + + cur_buf_info->start_adr = hevc->buf_start; + init_buff_spec(hevc, cur_buf_info); + + hevc_init_stru(hevc, cur_buf_info); + + hevc->bit_depth_luma = 8; + hevc->bit_depth_chroma = 8; + hevc->video_signal_type = 0; + hevc->video_signal_type_debug = 0; + bit_depth_luma = hevc->bit_depth_luma; + bit_depth_chroma = hevc->bit_depth_chroma; + video_signal_type = hevc->video_signal_type; + + if ((get_dbg_flag(hevc) & H265_DEBUG_SEND_PARAM_WITH_REG) == 0) { + hevc->rpm_addr = dma_alloc_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, &hevc->rpm_phy_addr, GFP_KERNEL); + if (hevc->rpm_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + return -1; + } + hevc->rpm_ptr = hevc->rpm_addr; + } + + if (prefix_aux_buf_size > 0 || + suffix_aux_buf_size > 0) { + u32 aux_buf_size; + + hevc->prefix_aux_size = AUX_BUF_ALIGN(prefix_aux_buf_size); + hevc->suffix_aux_size = AUX_BUF_ALIGN(suffix_aux_buf_size); + aux_buf_size = hevc->prefix_aux_size + hevc->suffix_aux_size; + hevc->aux_addr =dma_alloc_coherent(amports_get_dma_device(), + aux_buf_size, &hevc->aux_phy_addr, GFP_KERNEL); + if (hevc->aux_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + return -1; + } + } + + hevc->lmem_addr = dma_alloc_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, &hevc->lmem_phy_addr, GFP_KERNEL); + if (hevc->lmem_addr == NULL) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + return -1; + } + hevc->lmem_ptr = hevc->lmem_addr; + + if (hevc->mmu_enable) { + hevc->frame_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(), + &hevc->frame_mmu_map_phy_addr, GFP_KERNEL); + if (hevc->frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(hevc->frame_mmu_map_addr, 0, get_frame_mmu_map_size()); + } +#ifdef H265_10B_MMU_DW + if (hevc->dw_mmu_enable) { + hevc->frame_dw_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(), + &hevc->frame_dw_mmu_map_phy_addr, GFP_KERNEL); + if (hevc->frame_dw_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(hevc->frame_dw_mmu_map_addr, 0, get_frame_mmu_map_size()); + } +#endif + ret = 0; + return ret; +} + +/* + ******************************************* + * Mailbox command + ******************************************* + */ +#define CMD_FINISHED 0 +#define CMD_ALLOC_VIEW 1 +#define CMD_FRAME_DISPLAY 3 +#define CMD_DEBUG 10 + + +#define DECODE_BUFFER_NUM_MAX 32 +#define DISPLAY_BUFFER_NUM 6 + +#define video_domain_addr(adr) (adr&0x7fffffff) +#define DECODER_WORK_SPACE_SIZE 0x800000 + +#define spec2canvas(x) \ + (((x)->uv_canvas_index << 16) | \ + ((x)->uv_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + + +static void set_canvas(struct hevc_state_s *hevc, struct PIC_s *pic) +{ + struct vdec_s *vdec = hw_to_vdec(hevc); + int canvas_w = ALIGN(pic->width, 64)/4; + int canvas_h = ALIGN(pic->height, 32)/4; + int blkmode = hevc->mem_map_mode; + + /*CANVAS_BLKMODE_64X32*/ +#ifdef SUPPORT_10BIT + if (pic->double_write_mode && + ((pic->double_write_mode & 0x20) == 0)) { + canvas_w = pic->width / + get_double_write_ratio(pic->double_write_mode & 0xf); + canvas_h = pic->height / + get_double_write_ratio(pic->double_write_mode & 0xf); + + canvas_w = ALIGN(canvas_w, 64); + canvas_h = ALIGN(canvas_h, 32); + + if (vdec->parallel_dec == 1) { + if (pic->y_canvas_index == -1) + pic->y_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + if (pic->uv_canvas_index == -1) + pic->uv_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + } else { + pic->y_canvas_index = 128 + pic->index * 2; + pic->uv_canvas_index = 128 + pic->index * 2 + 1; + } + + config_cav_lut_ex(pic->y_canvas_index, + pic->dw_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hevc->is_used_v4l ? 0 : 7, VDEC_HEVC); + config_cav_lut_ex(pic->uv_canvas_index, pic->dw_u_v_adr, + canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hevc->is_used_v4l ? 0 : 7, VDEC_HEVC); +#ifdef MULTI_INSTANCE_SUPPORT + pic->canvas_config[0].phy_addr = + pic->dw_y_adr; + pic->canvas_config[0].width = + canvas_w; + pic->canvas_config[0].height = + canvas_h; + pic->canvas_config[0].block_mode = + blkmode; + pic->canvas_config[0].endian = hevc->is_used_v4l ? 0 : 7; + + pic->canvas_config[1].phy_addr = + pic->dw_u_v_adr; + pic->canvas_config[1].width = + canvas_w; + pic->canvas_config[1].height = + canvas_h; + pic->canvas_config[1].block_mode = + blkmode; + pic->canvas_config[1].endian = hevc->is_used_v4l ? 0 : 7; + + ATRACE_COUNTER(hevc->trace.set_canvas0_addr, pic->canvas_config[0].phy_addr); + hevc_print(hevc, H265_DEBUG_PIC_STRUCT,"%s(canvas0 addr:0x%x)\n", + __func__, pic->canvas_config[0].phy_addr); +#else + ATRACE_COUNTER(hevc->trace.set_canvas0_addr, spec2canvas(pic)); + hevc_print(hevc, H265_DEBUG_PIC_STRUCT,"%s(canvas0 addr:0x%x)\n", + __func__, spec2canvas(pic)); +#endif + } else { + if (!hevc->mmu_enable) { + /* to change after 10bit VPU is ready ... */ + if (vdec->parallel_dec == 1) { + if (pic->y_canvas_index == -1) + pic->y_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + pic->uv_canvas_index = pic->y_canvas_index; + } else { + pic->y_canvas_index = 128 + pic->index; + pic->uv_canvas_index = 128 + pic->index; + } + + config_cav_lut_ex(pic->y_canvas_index, + pic->mc_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hevc->is_used_v4l ? 0 : 7, VDEC_HEVC); + config_cav_lut_ex(pic->uv_canvas_index, pic->mc_u_v_adr, + canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hevc->is_used_v4l ? 0 : 7, VDEC_HEVC); + } + ATRACE_COUNTER(hevc->trace.set_canvas0_addr, spec2canvas(pic)); + hevc_print(hevc, H265_DEBUG_PIC_STRUCT,"%s(canvas0 addr:0x%x)\n", + __func__, spec2canvas(pic)); + } +#else + if (vdec->parallel_dec == 1) { + if (pic->y_canvas_index == -1) + pic->y_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + if (pic->uv_canvas_index == -1) + pic->uv_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + } else { + pic->y_canvas_index = 128 + pic->index * 2; + pic->uv_canvas_index = 128 + pic->index * 2 + 1; + } + + + config_cav_lut_ex(pic->y_canvas_index, pic->mc_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hevc->is_used_v4l ? 0 : 7, VDEC_HEVC); + config_cav_lut_ex(pic->uv_canvas_index, pic->mc_u_v_adr, + canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hevc->is_used_v4l ? 0 : 7, VDEC_HEVC); + + ATRACE_COUNTER(hevc->trace.set_canvas0_addr, spec2canvas(pic)); + hevc_print(hevc, H265_DEBUG_PIC_STRUCT,"%s(canvas0 addr:0x%x)\n", + __func__, spec2canvas(pic)); +#endif +} + +static int init_buf_spec(struct hevc_state_s *hevc) +{ + int pic_width = hevc->pic_w; + int pic_height = hevc->pic_h; + + /* hevc_print(hevc, 0, + *"%s1: %d %d\n", __func__, hevc->pic_w, hevc->pic_h); + */ + hevc_print(hevc, 0, + "%s2 %d %d\n", __func__, pic_width, pic_height); + /* pic_width = hevc->pic_w; */ + /* pic_height = hevc->pic_h; */ + + if (hevc->frame_width == 0 || hevc->frame_height == 0) { + hevc->frame_width = pic_width; + hevc->frame_height = pic_height; + + } + + return 0; +} + +static int parse_sei(struct hevc_state_s *hevc, + struct PIC_s *pic, char *sei_buf, uint32_t size) +{ + char *p = sei_buf; + char *p_sei; + uint16_t header; + uint16_t nal_unit_type; + uint16_t payload_type, payload_size; + int i, j; + + if (size < 2) + return 0; + header = *p++; + header <<= 8; + header += *p++; + nal_unit_type = header >> 9; + if ((nal_unit_type != NAL_UNIT_SEI) + && (nal_unit_type != NAL_UNIT_SEI_SUFFIX)) + return 0; + while (p+4 <= sei_buf+size) { + payload_type = *p++; + if (payload_type == 0xff) { + payload_type += *p++; + } + payload_size = *p++; + if (payload_size == 0xff) { + payload_size += *p++; + } + + if (p+payload_size <= sei_buf+size) { + switch (payload_type) { + case SEI_PicTiming: + if ((parser_sei_enable & 0x4) && + hevc->frame_field_info_present_flag) { + p_sei = p; + hevc->curr_pic_struct = (*p_sei >> 4)&0x0f; + pic->pic_struct = hevc->curr_pic_struct; + if (get_dbg_flag(hevc) & + H265_DEBUG_PIC_STRUCT) { + hevc_print(hevc, 0, + "parse result pic_struct = %d\n", + hevc->curr_pic_struct); + } + } + break; + case SEI_UserDataITU_T_T35: + p_sei = p; + if (p_sei[0] == 0xB5 + && p_sei[1] == 0x00 + && p_sei[2] == 0x3C + && p_sei[3] == 0x00 + && p_sei[4] == 0x01 + && p_sei[5] == 0x04) { + char *new_buf; + hevc->sei_present_flag |= SEI_HDR10PLUS_MASK; + new_buf = vzalloc(payload_size); + if (new_buf) { + memcpy(new_buf, p_sei, payload_size); + pic->hdr10p_data_buf = new_buf; + pic->hdr10p_data_size = payload_size; + } else { + hevc_print(hevc, 0, + "%s:hdr10p data vzalloc size(%d) fail\n", + __func__, payload_size); + pic->hdr10p_data_buf = NULL; + pic->hdr10p_data_size = 0; + } + } else if (p_sei[0] == 0x26 + && p_sei[1] == 0x00 + && p_sei[2] == 0x04 + && p_sei[3] == 0x00 + && p_sei[4] == 0x05) { + hevc->sei_present_flag |= SEI_HDR_CUVA_MASK; + + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) { + hevc_print(hevc, 0, + "hdr cuva data: (size %d)\n", + payload_size); + for (i = 0; i < payload_size; i++) { + hevc_print_cont(hevc, 0, + "%02x ", p_sei[i]); + if (((i + 1) & 0xf) == 0) + hevc_print_cont(hevc, 0, "\n"); + } + hevc_print_cont(hevc, 0, "\n"); + } + } + + break; + case SEI_MasteringDisplayColorVolume: + /*hevc_print(hevc, 0, + "sei type: primary display color volume %d, size %d\n", + payload_type, + payload_size);*/ + /* master_display_colour */ + p_sei = p; + for (i = 0; i < 3; i++) { + for (j = 0; j < 2; j++) { + hevc->primaries[i][j] + = (*p_sei<<8) + | *(p_sei+1); + p_sei += 2; + } + } + for (i = 0; i < 2; i++) { + hevc->white_point[i] + = (*p_sei<<8) + | *(p_sei+1); + p_sei += 2; + } + for (i = 0; i < 2; i++) { + hevc->luminance[i] + = (*p_sei<<24) + | (*(p_sei+1)<<16) + | (*(p_sei+2)<<8) + | *(p_sei+3); + p_sei += 4; + } + hevc->sei_present_flag |= + SEI_MASTER_DISPLAY_COLOR_MASK; + /*for (i = 0; i < 3; i++) + for (j = 0; j < 2; j++) + hevc_print(hevc, 0, + "\tprimaries[%1d][%1d] = %04x\n", + i, j, + hevc->primaries[i][j]); + hevc_print(hevc, 0, + "\twhite_point = (%04x, %04x)\n", + hevc->white_point[0], + hevc->white_point[1]); + hevc_print(hevc, 0, + "\tmax,min luminance = %08x, %08x\n", + hevc->luminance[0], + hevc->luminance[1]);*/ + break; + case SEI_ContentLightLevel: + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "sei type: max content light level %d, size %d\n", + payload_type, payload_size); + /* content_light_level */ + p_sei = p; + hevc->content_light_level[0] + = (*p_sei<<8) | *(p_sei+1); + p_sei += 2; + hevc->content_light_level[1] + = (*p_sei<<8) | *(p_sei+1); + p_sei += 2; + hevc->sei_present_flag |= + SEI_CONTENT_LIGHT_LEVEL_MASK; + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "\tmax cll = %04x, max_pa_cll = %04x\n", + hevc->content_light_level[0], + hevc->content_light_level[1]); + break; + default: + break; + } + } + p += payload_size; + } + return 0; +} + +/* +static unsigned calc_ar(unsigned idc, unsigned sar_w, unsigned sar_h, + unsigned w, unsigned h) +{ + unsigned ar; + + if (idc == 255) { + ar = div_u64(256ULL * sar_h * h, + sar_w * w); + } else { + switch (idc) { + case 1: + ar = 0x100 * h / w; + break; + case 2: + ar = 0x100 * h * 11 / (w * 12); + break; + case 3: + ar = 0x100 * h * 11 / (w * 10); + break; + case 4: + ar = 0x100 * h * 11 / (w * 16); + break; + case 5: + ar = 0x100 * h * 33 / (w * 40); + break; + case 6: + ar = 0x100 * h * 11 / (w * 24); + break; + case 7: + ar = 0x100 * h * 11 / (w * 20); + break; + case 8: + ar = 0x100 * h * 11 / (w * 32); + break; + case 9: + ar = 0x100 * h * 33 / (w * 80); + break; + case 10: + ar = 0x100 * h * 11 / (w * 18); + break; + case 11: + ar = 0x100 * h * 11 / (w * 15); + break; + case 12: + ar = 0x100 * h * 33 / (w * 64); + break; + case 13: + ar = 0x100 * h * 99 / (w * 160); + break; + case 14: + ar = 0x100 * h * 3 / (w * 4); + break; + case 15: + ar = 0x100 * h * 2 / (w * 3); + break; + case 16: + ar = 0x100 * h * 1 / (w * 2); + break; + default: + ar = h * 0x100 / w; + break; + } + } + + return ar; +} +*/ +static void set_frame_info(struct hevc_state_s *hevc, struct vframe_s *vf, + struct PIC_s *pic) +{ + unsigned int ar; + int i, j; + char *p; + unsigned size = 0; + unsigned type = 0; + struct vframe_master_display_colour_s *vf_dp + = &vf->prop.master_display_colour; + + vf->width = pic->width / + get_double_write_ratio(pic->double_write_mode); + vf->height = pic->height / + get_double_write_ratio(pic->double_write_mode); + + vf->duration = hevc->frame_dur; + vf->duration_pulldown = 0; + vf->flag = 0; + + ar = min_t(u32, hevc->frame_ar, DISP_RATIO_ASPECT_RATIO_MAX); + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + +/* + if (((pic->aspect_ratio_idc == 255) && + pic->sar_width && + pic->sar_height) || + ((pic->aspect_ratio_idc != 255) && + (pic->width))) { + ar = min_t(u32, + calc_ar(pic->aspect_ratio_idc, + pic->sar_width, + pic->sar_height, + pic->width, + pic->height), + DISP_RATIO_ASPECT_RATIO_MAX); + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + vf->ratio_control <<= hevc->interlace_flag; + } +*/ + hevc->ratio_control = vf->ratio_control; + if (pic->aux_data_buf + && pic->aux_data_size) { + /* parser sei */ + p = pic->aux_data_buf; + while (p < pic->aux_data_buf + + pic->aux_data_size - 8) { + size = *p++; + size = (size << 8) | *p++; + size = (size << 8) | *p++; + size = (size << 8) | *p++; + type = *p++; + type = (type << 8) | *p++; + type = (type << 8) | *p++; + type = (type << 8) | *p++; + if (type == 0x02000000) { + /* hevc_print(hevc, 0, + "sei(%d)\n", size); */ + parse_sei(hevc, pic, p, size); + } + p += size; + } + } + if (hevc->video_signal_type & VIDEO_SIGNAL_TYPE_AVAILABLE_MASK) { + vf->signal_type = pic->video_signal_type; + if (hevc->sei_present_flag & SEI_HDR10PLUS_MASK) { + u32 data; + data = vf->signal_type; + data = data & 0xFFFF00FF; + data = data | (0x30<<8); + vf->signal_type = data; + } + + if (hevc->sei_present_flag & SEI_HDR_CUVA_MASK) { + u32 data; + data = vf->signal_type; + data = data & 0x7FFFFFFF; + data = data | (1<<31); + vf->signal_type = data; + } + } + else + vf->signal_type = 0; + hevc->video_signal_type_debug = vf->signal_type; + + /* master_display_colour */ + if (hevc->sei_present_flag & SEI_MASTER_DISPLAY_COLOR_MASK) { + for (i = 0; i < 3; i++) + for (j = 0; j < 2; j++) + vf_dp->primaries[i][j] = hevc->primaries[i][j]; + for (i = 0; i < 2; i++) { + vf_dp->white_point[i] = hevc->white_point[i]; + vf_dp->luminance[i] + = hevc->luminance[i]; + } + vf_dp->present_flag = 1; + } else + vf_dp->present_flag = 0; + + /* content_light_level */ + if (hevc->sei_present_flag & SEI_CONTENT_LIGHT_LEVEL_MASK) { + vf_dp->content_light_level.max_content + = hevc->content_light_level[0]; + vf_dp->content_light_level.max_pic_average + = hevc->content_light_level[1]; + vf_dp->content_light_level.present_flag = 1; + } else + vf_dp->content_light_level.present_flag = 0; + + if (hevc->is_used_v4l && + ((hevc->video_signal_type & VIDEO_SIGNAL_TYPE_AVAILABLE_MASK) || + (hevc->sei_present_flag & SEI_HDR10PLUS_MASK) || + (vf_dp->present_flag) || + (vf_dp->content_light_level.present_flag))) { + struct aml_vdec_hdr_infos hdr; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + + memset(&hdr, 0, sizeof(hdr)); + hdr.signal_type = vf->signal_type; + hdr.color_parms = *vf_dp; + vdec_v4l_set_hdr_infos(ctx, &hdr); + } + + if ((hevc->sei_present_flag & SEI_HDR10PLUS_MASK) && (pic->hdr10p_data_buf != NULL) + && (pic->hdr10p_data_size != 0)) { + if (pic->hdr10p_data_size <= 128) { + char *new_buf; + new_buf = kzalloc(pic->hdr10p_data_size, GFP_ATOMIC); + + if (new_buf) { + memcpy(new_buf, pic->hdr10p_data_buf, pic->hdr10p_data_size); + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) { + hevc_print(hevc, 0, + "hdr10p data: (size %d)\n", + pic->hdr10p_data_size); + for (i = 0; i < pic->hdr10p_data_size; i++) { + hevc_print_cont(hevc, 0, + "%02x ", pic->hdr10p_data_buf[i]); + if (((i + 1) & 0xf) == 0) + hevc_print_cont(hevc, 0, "\n"); + } + hevc_print_cont(hevc, 0, "\n"); + } + + vf->hdr10p_data_size = pic->hdr10p_data_size; + vf->hdr10p_data_buf = new_buf; + } else { + hevc_print(hevc, 0, + "%s:hdr10p data vzalloc size(%d) fail\n", + __func__, pic->hdr10p_data_size); + vf->hdr10p_data_buf = NULL; + vf->hdr10p_data_size = 0; + } + } + + vfree(pic->hdr10p_data_buf); + pic->hdr10p_data_buf = NULL; + pic->hdr10p_data_size = 0; + } + + vf->sidebind_type = hevc->sidebind_type; + vf->sidebind_channel_id = hevc->sidebind_channel_id; +} + +static int vh265_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; +#ifdef MULTI_INSTANCE_SUPPORT + struct vdec_s *vdec = op_arg; + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; +#else + struct hevc_state_s *hevc = (struct hevc_state_s *)op_arg; +#endif + + spin_lock_irqsave(&lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hevc->newframe_q); + states->buf_avail_num = kfifo_len(&hevc->display_q); + + if (step == 2) + states->buf_avail_num = 0; + spin_unlock_irqrestore(&lock, flags); + return 0; +} + +static struct vframe_s *vh265_vf_peek(void *op_arg) +{ + struct vframe_s *vf[2] = {0, 0}; +#ifdef MULTI_INSTANCE_SUPPORT + struct vdec_s *vdec = op_arg; + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; +#else + struct hevc_state_s *hevc = (struct hevc_state_s *)op_arg; +#endif + + if (step == 2) + return NULL; + + if (force_disp_pic_index & 0x100) { + if (force_disp_pic_index & 0x200) + return NULL; + return &hevc->vframe_dummy; + } + + if (kfifo_len(&hevc->display_q) > VF_POOL_SIZE) { + hevc_print(hevc, H265_DEBUG_BUFMGR, + "kfifo len:%d invaild, peek error\n", + kfifo_len(&hevc->display_q)); + return NULL; + } + + if (kfifo_out_peek(&hevc->display_q, (void *)&vf, 2)) { + if (vf[1]) { + vf[0]->next_vf_pts_valid = true; + vf[0]->next_vf_pts = vf[1]->pts; + } else + vf[0]->next_vf_pts_valid = false; + return vf[0]; + } + + return NULL; +} + +static struct vframe_s *vh265_vf_get(void *op_arg) +{ + struct vframe_s *vf; +#ifdef MULTI_INSTANCE_SUPPORT + struct vdec_s *vdec = op_arg; + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; +#else + struct hevc_state_s *hevc = (struct hevc_state_s *)op_arg; +#endif + + if (step == 2) + return NULL; + else if (step == 1) + step = 2; + +#if 0 + if (force_disp_pic_index & 0x100) { + int buffer_index = force_disp_pic_index & 0xff; + struct PIC_s *pic = NULL; + if (buffer_index >= 0 + && buffer_index < MAX_REF_PIC_NUM) + pic = hevc->m_PIC[buffer_index]; + if (pic == NULL) + return NULL; + if (force_disp_pic_index & 0x200) + return NULL; + + vf = &hevc->vframe_dummy; + if (get_double_write_mode(hevc)) { + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | + VIDTYPE_VIU_NV21; + if (hevc->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + pic->canvas_config[0]; + vf->canvas0_config[1] = + pic->canvas_config[1]; + + vf->canvas1_config[0] = + pic->canvas_config[0]; + vf->canvas1_config[1] = + pic->canvas_config[1]; + } else { + vf->canvas0Addr = vf->canvas1Addr + = spec2canvas(pic); + } + } else { + vf->canvas0Addr = vf->canvas1Addr = 0; + vf->type = VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + vf->compWidth = pic->width; + vf->compHeight = pic->height; + update_vf_memhandle(hevc, vf, pic); + switch (hevc->bit_depth_luma) { + case 9: + vf->bitdepth = BITDEPTH_Y9 | BITDEPTH_U9 | BITDEPTH_V9; + break; + case 10: + vf->bitdepth = BITDEPTH_Y10 | BITDEPTH_U10 + | BITDEPTH_V10; + break; + default: + vf->bitdepth = BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + break; + } + if ((vf->type & VIDTYPE_COMPRESS) == 0) + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + if (hevc->mem_saving_mode == 1) + vf->bitdepth |= BITDEPTH_SAVING_MODE; + vf->duration_pulldown = 0; + vf->pts = 0; + vf->pts_us64 = 0; + set_frame_info(hevc, vf); + + vf->width = pic->width / + get_double_write_ratio(pic->double_write_mode); + vf->height = pic->height / + get_double_write_ratio(pic->double_write_mode); + + force_disp_pic_index |= 0x200; + return vf; + } +#endif + + if (kfifo_get(&hevc->display_q, &vf)) { + struct vframe_s *next_vf = NULL; + + ATRACE_COUNTER(hevc->trace.vf_get_name, (long)vf); + ATRACE_COUNTER(hevc->trace.disp_q_name, kfifo_len(&hevc->display_q)); +#ifdef MULTI_INSTANCE_SUPPORT + ATRACE_COUNTER(hevc->trace.set_canvas0_addr, vf->canvas0_config[0].phy_addr); +#else + ATRACE_COUNTER(hevc->trace.get_canvas0_addr, vf->canvas0Addr); +#endif + + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) { + hevc_print(hevc, 0, + "%s(vf 0x%p type %d index 0x%x poc %d/%d) pts(%d,%d) dur %d\n", + __func__, vf, vf->type, vf->index, + get_pic_poc(hevc, vf->index & 0xff), + get_pic_poc(hevc, (vf->index >> 8) & 0xff), + vf->pts, vf->pts_us64, + vf->duration); +#ifdef MULTI_INSTANCE_SUPPORT + hevc_print(hevc, 0, "get canvas0 addr:0x%x\n", vf->canvas0_config[0].phy_addr); +#else + hevc_print(hevc, 0, "get canvas0 addr:0x%x\n", vf->canvas0Addr); +#endif + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (get_dbg_flag(hevc) & H265_DEBUG_DV) { + struct PIC_s *pic = hevc->m_PIC[vf->index & 0xff]; + hevc_print(hevc, 0, "pic 0x%p aux size %d:\n", + pic, pic->aux_data_size); + if (pic->aux_data_buf && pic->aux_data_size > 0) { + int i; + for (i = 0; i < pic->aux_data_size; i++) { + hevc_print_cont(hevc, 0, + "%02x ", pic->aux_data_buf[i]); + if (((i + 1) & 0xf) == 0) + hevc_print_cont(hevc, 0, "\n"); + } + hevc_print_cont(hevc, 0, "\n"); + } + } +#endif + hevc->show_frame_num++; + vf->index_disp = atomic_read(&hevc->vf_get_count); + atomic_add(1, &hevc->vf_get_count); + + if (kfifo_peek(&hevc->display_q, &next_vf) && next_vf) { + vf->next_vf_pts_valid = true; + vf->next_vf_pts = next_vf->pts; + } else + vf->next_vf_pts_valid = false; + + return vf; + } + + return NULL; +} +static bool vf_valid_check(struct vframe_s *vf, struct hevc_state_s *hevc) { + int i; + for (i = 0; i < VF_POOL_SIZE; i++) { + if (vf == &hevc->vfpool[i] || vf == &hevc->vframe_dummy) + return true; + } + hevc_print(hevc, 0," h265 invalid vf been put, vf = %p\n", vf); + for (i = 0; i < VF_POOL_SIZE; i++) { + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS,"valid vf[%d]= %p \n", i, &hevc->vfpool[i]); + } + return false; +} + +static void vh265_vf_put(struct vframe_s *vf, void *op_arg) +{ + unsigned long flags; +#ifdef MULTI_INSTANCE_SUPPORT + struct vdec_s *vdec = op_arg; + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; +#else + struct hevc_state_s *hevc = (struct hevc_state_s *)op_arg; +#endif + unsigned char index_top; + unsigned char index_bot; + + if (!vf) + return; + if (vf == (&hevc->vframe_dummy)) + return; + if (vf && (vf_valid_check(vf, hevc) == false)) + return; + + if (hevc->enable_fence && vf->fence) { + int ret, i; + + mutex_lock(&hevc->fence_mutex); + ret = dma_fence_get_status(vf->fence); + if (ret == 0) { + for (i = 0; i < VF_POOL_SIZE; i++) { + if (hevc->fence_vf_s.fence_vf[i] == NULL) { + hevc->fence_vf_s.fence_vf[i] = vf; + hevc->fence_vf_s.used_size++; + mutex_unlock(&hevc->fence_mutex); + return; + } + } + } + mutex_unlock(&hevc->fence_mutex); + } + + ATRACE_COUNTER(hevc->trace.vf_put_name, (long)vf); +#ifdef MULTI_INSTANCE_SUPPORT + ATRACE_COUNTER(hevc->trace.put_canvas0_addr, vf->canvas0_config[0].phy_addr); +#else + ATRACE_COUNTER(hevc->trace.put_canvas0_addr, vf->canvas0Addr); +#endif + index_top = vf->index & 0xff; + index_bot = (vf->index >> 8) & 0xff; + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "%s(vf 0x%p type %d index 0x%x put canvas0 addr:0x%x)\n", + __func__, vf, vf->type, vf->index +#ifdef MULTI_INSTANCE_SUPPORT + , vf->canvas0_config[0].phy_addr +#else + , vf->canvas0Addr +#endif + ); + atomic_add(1, &hevc->vf_put_count); + spin_lock_irqsave(&lock, flags); + kfifo_put(&hevc->newframe_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hevc->trace.new_q_name, kfifo_len(&hevc->newframe_q)); + if (hevc->enable_fence && vf->fence) { + vdec_fence_put(vf->fence); + vf->fence = NULL; + } + + if (vf->hdr10p_data_buf) { + kfree(vf->hdr10p_data_buf); + vf->hdr10p_data_buf = NULL; + vf->hdr10p_data_size = 0; + } + + + if (index_top != 0xff + && index_top < MAX_REF_PIC_NUM + && hevc->m_PIC[index_top]) { + if (hevc->m_PIC[index_top]->vf_ref > 0) { + hevc->m_PIC[index_top]->vf_ref--; + + if (hevc->m_PIC[index_top]->vf_ref == 0) { + hevc->m_PIC[index_top]->output_ready = 0; + hevc->m_PIC[index_top]->show_frame = false; + + if (hevc->wait_buf != 0) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } + } + + if (index_bot != 0xff + && index_bot < MAX_REF_PIC_NUM + && hevc->m_PIC[index_bot]) { + if (hevc->m_PIC[index_bot]->vf_ref > 0) { + hevc->m_PIC[index_bot]->vf_ref--; + + if (hevc->m_PIC[index_bot]->vf_ref == 0) { + hevc->m_PIC[index_bot]->output_ready = 0; + hevc->m_PIC[index_bot]->show_frame = false; + + if (hevc->wait_buf != 0) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } + } + spin_unlock_irqrestore(&lock, flags); +} + +static int vh265_event_cb(int type, void *data, void *op_arg) +{ + unsigned long flags; +#ifdef MULTI_INSTANCE_SUPPORT + struct vdec_s *vdec = op_arg; + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; +#else + struct hevc_state_s *hevc = (struct hevc_state_s *)op_arg; +#endif + if (type & VFRAME_EVENT_RECEIVER_RESET) { +#if 0 + amhevc_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vh265_vf_prov); +#endif + spin_lock_irqsave(&hevc->lock, flags); + vh265_local_init(); + vh265_prot_init(); + spin_unlock_irqrestore(&hevc->lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vh265_vf_prov); +#endif + amhevc_start(); +#endif + } else if (type & VFRAME_EVENT_RECEIVER_GET_AUX_DATA) { + struct provider_aux_req_s *req = + (struct provider_aux_req_s *)data; + unsigned char index; + + if (!req->vf) { + req->aux_size = atomic_read(&hevc->vf_put_count); + return 0; + } + spin_lock_irqsave(&lock, flags); + index = req->vf->index & 0xff; + req->aux_buf = NULL; + req->aux_size = 0; + req->format = VFORMAT_HEVC; + if (req->bot_flag) + index = (req->vf->index >> 8) & 0xff; + if (index != 0xff + && index < MAX_REF_PIC_NUM + && hevc->m_PIC[index]) { + req->aux_buf = hevc->m_PIC[index]->aux_data_buf; + req->aux_size = hevc->m_PIC[index]->aux_data_size; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (hevc->bypass_dvenl && !dolby_meta_with_el) + req->dv_enhance_exist = false; + else + req->dv_enhance_exist = + hevc->m_PIC[index]->dv_enhance_exist; + if (vdec_frame_based(vdec) && (hevc->dv_duallayer == true)) + req->dv_enhance_exist = 1; + hevc_print(hevc, H265_DEBUG_DV, + "query dv_enhance_exist for (pic 0x%p, vf 0x%p, poc %d index %d) flag => %d, aux sizd 0x%x\n", + hevc->m_PIC[index], + req->vf, + hevc->m_PIC[index]->POC, index, + req->dv_enhance_exist, req->aux_size); +#else + req->dv_enhance_exist = 0; +#endif + } + spin_unlock_irqrestore(&lock, flags); + + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "%s(type 0x%x vf index 0x%x)=>size 0x%x\n", + __func__, type, index, req->aux_size); + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (type & VFRAME_EVENT_RECEIVER_DOLBY_BYPASS_EL) { + if ((force_bypass_dvenl & 0x80000000) == 0) { + hevc_print(hevc, 0, + "%s: VFRAME_EVENT_RECEIVER_DOLBY_BYPASS_EL\n", + __func__); + hevc->bypass_dvenl_enable = 1; + } + } +#endif + else if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +#ifdef HEVC_PIC_STRUCT_SUPPORT +static int process_pending_vframe(struct hevc_state_s *hevc, + struct PIC_s *pair_pic, unsigned char pair_frame_top_flag) +{ + struct vframe_s *vf; + + if (!pair_pic) + return -1; + + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "%s: pair_pic index 0x%x %s\n", + __func__, pair_pic->index, + pair_frame_top_flag ? + "top" : "bot"); + + if (kfifo_len(&hevc->pending_q) > 1) { + unsigned long flags; + int index1; + int index2; + /* do not pending more than 1 frame */ + if (kfifo_get(&hevc->pending_q, &vf) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "%s warning(1), vf=>display_q: (index 0x%x), vf 0x%px\n", + __func__, vf->index, vf); + if ((pair_pic->double_write_mode == 3) && + (!(IS_8K_SIZE(vf->width, vf->height)))) { + vf->type |= VIDTYPE_COMPRESS; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + + hevc->vf_pre_count++; + spin_lock_irqsave(&lock, flags); + kfifo_put(&hevc->newframe_q, (const struct vframe_s *)vf); + index1 = vf->index & 0xff; + index2 = (vf->index >> 8) & 0xff; + if (index1 >= MAX_REF_PIC_NUM && + index2 >= MAX_REF_PIC_NUM) { + spin_unlock_irqrestore(&lock, flags); + return -1; + } + + if (index1 < MAX_REF_PIC_NUM) { + hevc->m_PIC[index1]->vf_ref = 0; + hevc->m_PIC[index1]->output_ready = 0; + } + if (index2 < MAX_REF_PIC_NUM) { + hevc->m_PIC[index2]->vf_ref = 0; + hevc->m_PIC[index2]->output_ready = 0; + } + + if (hevc->wait_buf != 0) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + spin_unlock_irqrestore(&lock, flags); + + ATRACE_COUNTER(hevc->trace.pts_name, vf->pts); + } + + if (kfifo_peek(&hevc->pending_q, &vf)) { + if (pair_pic == NULL || pair_pic->vf_ref <= 0) { + /* + *if pair_pic is recycled (pair_pic->vf_ref <= 0), + *do not use it + */ + if (kfifo_get(&hevc->pending_q, &vf) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "%s warning(2), vf=>display_q: (index 0x%x)\n", + __func__, vf->index); + if (vf) { + if ((pair_pic->double_write_mode == 3) && + (!(IS_8K_SIZE(vf->width, vf->height)))) { + vf->type |= VIDTYPE_COMPRESS; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + hevc->vf_pre_count++; + vdec_vframe_ready(hw_to_vdec(hevc), vf); + kfifo_put(&hevc->display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(hevc->trace.pts_name, vf->pts); + } + } else if ((!pair_frame_top_flag) && + (((vf->index >> 8) & 0xff) == 0xff)) { + if (kfifo_get(&hevc->pending_q, &vf) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + if (vf) { + if ((pair_pic->double_write_mode == 3) && + (!(IS_8K_SIZE(vf->width, vf->height)))) { + vf->type |= VIDTYPE_COMPRESS; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + vf->index &= 0xff; + vf->index |= (pair_pic->index << 8); + pair_pic->vf_ref++; + vdec_vframe_ready(hw_to_vdec(hevc), vf); + kfifo_put(&hevc->display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(hevc->trace.pts_name, vf->pts); + hevc->vf_pre_count++; + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "%s vf => display_q: (index 0x%x)\n", + __func__, vf->index); + } + } else if (pair_frame_top_flag && + ((vf->index & 0xff) == 0xff)) { + if (kfifo_get(&hevc->pending_q, &vf) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + if (vf) { + if ((pair_pic->double_write_mode == 3) && + (!(IS_8K_SIZE(vf->width, vf->height)))) { + vf->type |= VIDTYPE_COMPRESS; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + vf->index &= 0xff00; + vf->index |= pair_pic->index; + pair_pic->vf_ref++; + vdec_vframe_ready(hw_to_vdec(hevc), vf); + kfifo_put(&hevc->display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(hevc->trace.pts_name, vf->pts); + hevc->vf_pre_count++; + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "%s vf => display_q: (index 0x%x)\n", + __func__, vf->index); + } + } + } + return 0; +} +#endif +static void update_vf_memhandle(struct hevc_state_s *hevc, + struct vframe_s *vf, struct PIC_s *pic) +{ + vf->mem_handle = NULL; + vf->mem_head_handle = NULL; + + /* keeper not needed for v4l solution */ + if (hevc->is_used_v4l) + return; + + if (vf->type & VIDTYPE_SCATTER) { +#ifdef H265_10B_MMU_DW + if (hevc->dw_mmu_enable) { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + hevc->mmu_box_dw, pic->index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hevc->bmmu_box, VF_BUFFER_IDX(pic->BUF_index)); + } else + +#endif + { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + hevc->mmu_box, pic->index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hevc->bmmu_box, VF_BUFFER_IDX(pic->BUF_index)); + } + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hevc->bmmu_box, VF_BUFFER_IDX(pic->BUF_index)); + vf->mem_head_handle = NULL; + /*vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hevc->bmmu_box, VF_BUFFER_IDX(BUF_index));*/ + } + return; +} + +static void fill_frame_info(struct hevc_state_s *hevc, + struct PIC_s *pic, unsigned int framesize, unsigned int pts) +{ + struct vframe_qos_s *vframe_qos = &hevc->vframe_qos; + if (hevc->m_nalUnitType == NAL_UNIT_CODED_SLICE_IDR) + vframe_qos->type = 4; + else if (pic->slice_type == I_SLICE) + vframe_qos->type = 1; + else if (pic->slice_type == P_SLICE) + vframe_qos->type = 2; + else if (pic->slice_type == B_SLICE) + vframe_qos->type = 3; +/* +#define SHOW_QOS_INFO +*/ + if (input_frame_based(hw_to_vdec(hevc))) + vframe_qos->size = pic->frame_size; + else + vframe_qos->size = framesize; + vframe_qos->pts = pts; +#ifdef SHOW_QOS_INFO + hevc_print(hevc, 0, "slice:%d, poc:%d\n", pic->slice_type, pic->POC); +#endif + + + vframe_qos->max_mv = pic->max_mv; + vframe_qos->avg_mv = pic->avg_mv; + vframe_qos->min_mv = pic->min_mv; +#ifdef SHOW_QOS_INFO + hevc_print(hevc, 0, "mv: max:%d, avg:%d, min:%d\n", + vframe_qos->max_mv, + vframe_qos->avg_mv, + vframe_qos->min_mv); +#endif + + vframe_qos->max_qp = pic->max_qp; + vframe_qos->avg_qp = pic->avg_qp; + vframe_qos->min_qp = pic->min_qp; +#ifdef SHOW_QOS_INFO + hevc_print(hevc, 0, "qp: max:%d, avg:%d, min:%d\n", + vframe_qos->max_qp, + vframe_qos->avg_qp, + vframe_qos->min_qp); +#endif + + vframe_qos->max_skip = pic->max_skip; + vframe_qos->avg_skip = pic->avg_skip; + vframe_qos->min_skip = pic->min_skip; +#ifdef SHOW_QOS_INFO + hevc_print(hevc, 0, "skip: max:%d, avg:%d, min:%d\n", + vframe_qos->max_skip, + vframe_qos->avg_skip, + vframe_qos->min_skip); +#endif + + vframe_qos->num++; + +} + +static inline void hevc_update_gvs(struct hevc_state_s *hevc, struct PIC_s *pic) +{ + if (hevc->gvs->frame_height != pic->height) { + hevc->gvs->frame_width = pic->width; + hevc->gvs->frame_height = pic->height; + } + if (hevc->gvs->frame_dur != hevc->frame_dur) { + hevc->gvs->frame_dur = hevc->frame_dur; + if (hevc->frame_dur != 0) + hevc->gvs->frame_rate = ((96000 * 10 / hevc->frame_dur) % 10) < 5 ? + 96000 / hevc->frame_dur : (96000 / hevc->frame_dur +1); + else + hevc->gvs->frame_rate = -1; + } + hevc->gvs->error_count = hevc->gvs->error_frame_count; + hevc->gvs->status = hevc->stat | hevc->fatal_error; + if (hevc->gvs->ratio_control != hevc->ratio_control) + hevc->gvs->ratio_control = hevc->ratio_control; +} + +static void put_vf_to_display_q(struct hevc_state_s *hevc, struct vframe_s *vf) +{ + hevc->vf_pre_count++; + decoder_do_frame_check(hw_to_vdec(hevc), vf); + vdec_vframe_ready(hw_to_vdec(hevc), vf); + kfifo_put(&hevc->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hevc->trace.pts_name, vf->pts); +} + +static int post_prepare_process(struct vdec_s *vdec, struct PIC_s *frame) +{ + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; + + if (force_disp_pic_index & 0x100) { + /*recycle directly*/ + frame->output_ready = 0; + frame->show_frame = false; + hevc_print(hevc, 0, "discard show frame.\n"); + return 0; + } + + frame->show_frame = true; + + return 0; +} + +static int post_video_frame(struct vdec_s *vdec, struct PIC_s *pic) +{ + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; + struct vframe_s *vf = NULL; + int stream_offset = pic->stream_offset; + unsigned short slice_type = pic->slice_type; + ulong nv_order = VIDTYPE_VIU_NV21; + u32 frame_size = 0; + struct vdec_info tmp4x; + struct aml_vcodec_ctx * v4l2_ctx = hevc->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + int index; + + /* swap uv */ + if (hevc->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + } + + if (kfifo_get(&hevc->newframe_q, &vf) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + + if (vf) { + /*hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: pic index 0x%x\n", + __func__, pic->index);*/ + + if (hevc->is_used_v4l) { + vf->v4l_mem_handle + = hevc->m_BUF[pic->BUF_index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + if (hevc->mmu_enable) { + vf->mm_box.bmmu_box = hevc->bmmu_box; + vf->mm_box.bmmu_idx = VF_BUFFER_IDX(hevc->buffer_wrap[pic->BUF_index]); + vf->mm_box.mmu_box = hevc->mmu_box; + vf->mm_box.mmu_idx = hevc->buffer_wrap[pic->BUF_index]; + } + } + + if (hevc->enable_fence) { + /* fill fence information. */ + if (hevc->fence_usage == FENCE_USE_FOR_DRIVER) + vf->fence = pic->fence; + } + +#ifdef MULTI_INSTANCE_SUPPORT + if (vdec_frame_based(vdec)) { + vf->pts = pic->pts; + vf->pts_us64 = pic->pts64; + vf->timestamp = pic->timestamp; + } + /* if (pts_lookup_offset(PTS_TYPE_VIDEO, + stream_offset, &vf->pts, 0) != 0) { */ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (vdec->master == NULL) { +#else + else { +#endif +#endif + hevc_print(hevc, H265_DEBUG_OUT_PTS, + "call pts_lookup_offset_us64(0x%x)\n", + stream_offset); + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, stream_offset, &vf->pts, + &frame_size, 0, + &vf->pts_us64) != 0) { +#ifdef DEBUG_PTS + hevc->pts_missed++; +#endif + vf->pts = 0; + vf->pts_us64 = 0; + } else { +#ifdef DEBUG_PTS + hevc->pts_hit++; +#endif + } + } + +#ifdef MULTI_INSTANCE_SUPPORT +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + } else { + vf->pts = 0; + vf->pts_us64 = 0; + } +#else + } +#endif +#endif + if (pts_unstable && (hevc->frame_dur > 0)) + hevc->pts_mode = PTS_NONE_REF_USE_DURATION; + + fill_frame_info(hevc, pic, frame_size, vf->pts); + + if (vf->pts != 0) + hevc->last_lookup_pts = vf->pts; + + if ((hevc->pts_mode == PTS_NONE_REF_USE_DURATION) + && (slice_type != 2)) + vf->pts = hevc->last_pts + DUR2PTS(hevc->frame_dur); + hevc->last_pts = vf->pts; + + if (vf->pts_us64 != 0) + hevc->last_lookup_pts_us64 = vf->pts_us64; + + if ((hevc->pts_mode == PTS_NONE_REF_USE_DURATION) + && (slice_type != 2)) { + vf->pts_us64 = + hevc->last_pts_us64 + + (DUR2PTS(hevc->frame_dur) * 100 / 9); + } + hevc->last_pts_us64 = vf->pts_us64; + if ((get_dbg_flag(hevc) & H265_DEBUG_OUT_PTS) != 0) { + hevc_print(hevc, 0, + "H265 dec out pts: vf->pts=%d, vf->pts_us64 = %lld, ts: %llu\n", + vf->pts, vf->pts_us64, vf->timestamp); + } + + /* + *vf->index: + *(1) vf->type is VIDTYPE_PROGRESSIVE + * and vf->canvas0Addr != vf->canvas1Addr, + * vf->index[7:0] is the index of top pic + * vf->index[15:8] is the index of bot pic + *(2) other cases, + * only vf->index[7:0] is used + * vf->index[15:8] == 0xff + */ + vf->index = 0xff00 | pic->index; +#if 1 +/*SUPPORT_10BIT*/ + if (pic->double_write_mode & 0x10) { + /* double write only */ + vf->compBodyAddr = 0; + vf->compHeadAddr = 0; +#ifdef H265_10B_MMU_DW + vf->dwBodyAddr = 0; + vf->dwHeadAddr = 0; +#endif + } else { + if (hevc->mmu_enable) { + vf->compBodyAddr = 0; + vf->compHeadAddr = pic->header_adr; +#ifdef H265_10B_MMU_DW + vf->dwBodyAddr = 0; + vf->dwHeadAddr = 0; + if (pic->double_write_mode & 0x20) { + u32 mode = pic->double_write_mode & 0xf; + if (mode == 5 || mode == 3) + vf->dwHeadAddr = pic->header_dw_adr; + else if ((mode == 1 || mode == 2 || mode == 4) + && (debug & H265_DEBUG_OUT_PTS) == 0) { + vf->compHeadAddr = pic->header_dw_adr; + pr_debug("Use dw mmu for display\n"); + } + } +#endif + } else { + vf->compBodyAddr = pic->mc_y_adr; /*body adr*/ + vf->compHeadAddr = pic->mc_y_adr + + pic->losless_comp_body_size; + vf->mem_head_handle = NULL; + } + /*head adr*/ + vf->canvas0Addr = vf->canvas1Addr = 0; + } + if (pic->double_write_mode && + ((pic->double_write_mode & 0x20) == 0)) { + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; + vf->type |= nv_order; + + if (((pic->double_write_mode == 3) || (pic->double_write_mode == 5) || + (pic->double_write_mode == 9)) && + (!(IS_8K_SIZE(pic->width, pic->height)))) { + vf->type |= VIDTYPE_COMPRESS; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag && + (get_dbg_flag(hevc) + & H265_CFG_CANVAS_IN_DECODE) == 0) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + pic->canvas_config[0]; + vf->canvas0_config[1] = + pic->canvas_config[1]; + + vf->canvas1_config[0] = + pic->canvas_config[0]; + vf->canvas1_config[1] = + pic->canvas_config[1]; + + } else +#endif + vf->canvas0Addr = vf->canvas1Addr + = spec2canvas(pic); + } else { + vf->canvas0Addr = vf->canvas1Addr = 0; + vf->type = VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + vf->compWidth = pic->width; + vf->compHeight = pic->height; + update_vf_memhandle(hevc, vf, pic); + switch (pic->bit_depth_luma) { + case 9: + vf->bitdepth = BITDEPTH_Y9; + break; + case 10: + vf->bitdepth = BITDEPTH_Y10; + break; + default: + vf->bitdepth = BITDEPTH_Y8; + break; + } + switch (pic->bit_depth_chroma) { + case 9: + vf->bitdepth |= (BITDEPTH_U9 | BITDEPTH_V9); + break; + case 10: + vf->bitdepth |= (BITDEPTH_U10 | BITDEPTH_V10); + break; + default: + vf->bitdepth |= (BITDEPTH_U8 | BITDEPTH_V8); + break; + } + if ((vf->type & VIDTYPE_COMPRESS) == 0) + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + if (pic->mem_saving_mode == 1) + vf->bitdepth |= BITDEPTH_SAVING_MODE; +#else + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; + vf->type |= nv_order; + vf->canvas0Addr = vf->canvas1Addr = spec2canvas(pic); +#endif + set_frame_info(hevc, vf, pic); + if (hevc->discard_dv_data) { + vf->discard_dv_data = true; + } + + /* if((vf->width!=pic->width)||(vf->height!=pic->height)) */ + /* hevc_print(hevc, 0, + "aaa: %d/%d, %d/%d\n", + vf->width,vf->height, pic->width, pic->height); */ + vf->width = pic->width; + vf->height = pic->height; + + if (force_w_h != 0) { + vf->width = (force_w_h >> 16) & 0xffff; + vf->height = force_w_h & 0xffff; + } + if (force_fps & 0x100) { + u32 rate = force_fps & 0xff; + + if (rate) + vf->duration = 96000/rate; + else + vf->duration = 0; + } + if (force_fps & 0x200) { + vf->pts = 0; + vf->pts_us64 = 0; + } + if (!vdec->vbuf.use_ptsserv && vdec_stream_based(vdec)) { + vf->pts_us64 = stream_offset; + vf->pts = 0; + } + /* + * !!! to do ... + * need move below code to get_new_pic(), + * hevc->xxx can only be used by current decoded pic + */ + if (pic->conformance_window_flag && + (get_dbg_flag(hevc) & + H265_DEBUG_IGNORE_CONFORMANCE_WINDOW) == 0) { + unsigned int SubWidthC, SubHeightC; + + switch (pic->chroma_format_idc) { + case 1: + SubWidthC = 2; + SubHeightC = 2; + break; + case 2: + SubWidthC = 2; + SubHeightC = 1; + break; + default: + SubWidthC = 1; + SubHeightC = 1; + break; + } + vf->width -= SubWidthC * + (pic->conf_win_left_offset + + pic->conf_win_right_offset); + vf->height -= SubHeightC * + (pic->conf_win_top_offset + + pic->conf_win_bottom_offset); + + vf->compWidth -= SubWidthC * + (pic->conf_win_left_offset + + pic->conf_win_right_offset); + vf->compHeight -= SubHeightC * + (pic->conf_win_top_offset + + pic->conf_win_bottom_offset); + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "conformance_window %d, %d, %d, %d, %d => cropped width %d, height %d com_w %d com_h %d\n", + pic->chroma_format_idc, + pic->conf_win_left_offset, + pic->conf_win_right_offset, + pic->conf_win_top_offset, + pic->conf_win_bottom_offset, + vf->width, vf->height, vf->compWidth, vf->compHeight); + } + if (hevc->cur_pic != NULL) { + vf->sar_width = hevc->cur_pic->sar_width; + vf->sar_height = hevc->cur_pic->sar_height; + } + vf->width = vf->width / + get_double_write_ratio(pic->double_write_mode & 0xf); + vf->height = vf->height / + get_double_write_ratio(pic->double_write_mode & 0xf); + +#ifdef H265_10B_MMU_DW + if ((pic->double_write_mode & 0x20) && + ((pic->double_write_mode & 0xf) == 2 || + (pic->double_write_mode & 0xf) == 4)) { + vf->compWidth = vf->width; + vf->compHeight = vf->height; + } +#endif + if (hevc->is_used_v4l && vdec->prog_only) + pic->pic_struct = 0; + +#ifdef HEVC_PIC_STRUCT_SUPPORT + if (pic->pic_struct == 3 || pic->pic_struct == 4) { + struct vframe_s *vf2; + + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "pic_struct = %d index 0x%x\n", + pic->pic_struct, + pic->index); + + if (kfifo_get(&hevc->newframe_q, &vf2) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + pic->vf_ref = 2; + vf->duration = vf->duration>>1; + memcpy(vf2, vf, sizeof(struct vframe_s)); + + if (pic->pic_struct == 3) { + vf->type = VIDTYPE_INTERLACE_TOP + | nv_order; + vf2->type = VIDTYPE_INTERLACE_BOTTOM + | nv_order; + } else { + vf->type = VIDTYPE_INTERLACE_BOTTOM + | nv_order; + vf2->type = VIDTYPE_INTERLACE_TOP + | nv_order; + } + if (pic->show_frame) { + put_vf_to_display_q(hevc, vf); + hevc->vf_pre_count++; + vdec_vframe_ready(hw_to_vdec(hevc), vf2); + kfifo_put(&hevc->display_q, + (const struct vframe_s *)vf2); + ATRACE_COUNTER(hevc->trace.pts_name, vf2->pts); + } else { + vh265_vf_put(vf, vdec); + vh265_vf_put(vf2, vdec); + atomic_add(2, &hevc->vf_get_count); + hevc->vf_pre_count += 2; + return 0; + } + } else if (pic->pic_struct == 5 + || pic->pic_struct == 6) { + struct vframe_s *vf2, *vf3; + + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "pic_struct = %d index 0x%x\n", + pic->pic_struct, + pic->index); + + if (kfifo_get(&hevc->newframe_q, &vf2) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + if (kfifo_get(&hevc->newframe_q, &vf3) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + pic->vf_ref = 3; + vf->duration = vf->duration/3; + memcpy(vf2, vf, sizeof(struct vframe_s)); + memcpy(vf3, vf, sizeof(struct vframe_s)); + + if (pic->pic_struct == 5) { + vf->type = VIDTYPE_INTERLACE_TOP + | nv_order; + vf2->type = VIDTYPE_INTERLACE_BOTTOM + | nv_order; + vf3->type = VIDTYPE_INTERLACE_TOP + | nv_order; + } else { + vf->type = VIDTYPE_INTERLACE_BOTTOM + | nv_order; + vf2->type = VIDTYPE_INTERLACE_TOP + | nv_order; + vf3->type = VIDTYPE_INTERLACE_BOTTOM + | nv_order; + } + if (pic->show_frame) { + put_vf_to_display_q(hevc, vf); + hevc->vf_pre_count++; + vdec_vframe_ready(hw_to_vdec(hevc), vf2); + kfifo_put(&hevc->display_q, + (const struct vframe_s *)vf2); + ATRACE_COUNTER(hevc->trace.pts_name, vf2->pts); + hevc->vf_pre_count++; + vdec_vframe_ready(hw_to_vdec(hevc), vf3); + kfifo_put(&hevc->display_q, + (const struct vframe_s *)vf3); + ATRACE_COUNTER(hevc->trace.pts_name, vf3->pts); + } else { + vh265_vf_put(vf, vdec); + vh265_vf_put(vf2, vdec); + vh265_vf_put(vf3, vdec); + atomic_add(3, &hevc->vf_get_count); + hevc->vf_pre_count += 3;; + return 0; + } + } else if (pic->pic_struct == 9 + || pic->pic_struct == 10) { + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "pic_struct = %d index 0x%x\n", + pic->pic_struct, + pic->index); + + pic->vf_ref = 1; + /* process previous pending vf*/ + process_pending_vframe(hevc, + pic, (pic->pic_struct == 9)); + vf->height <<= 1; + if (pic->show_frame) { + decoder_do_frame_check(vdec, vf); + vdec_vframe_ready(vdec, vf); + /* process current vf */ + kfifo_put(&hevc->pending_q, + (const struct vframe_s *)vf); + if (pic->pic_struct == 9) { + vf->type = VIDTYPE_INTERLACE_TOP + | nv_order | VIDTYPE_VIU_FIELD; + process_pending_vframe(hevc, + hevc->pre_bot_pic, 0); + } else { + vf->type = VIDTYPE_INTERLACE_BOTTOM | + nv_order | VIDTYPE_VIU_FIELD; + vf->index = (pic->index << 8) | 0xff; + process_pending_vframe(hevc, + hevc->pre_top_pic, 1); + } + + if (hevc->vf_pre_count == 0) + hevc->vf_pre_count++; + + /**/ + if (pic->pic_struct == 9) + hevc->pre_top_pic = pic; + else + hevc->pre_bot_pic = pic; + } else { + vh265_vf_put(vf, vdec); + atomic_add(1, &hevc->vf_get_count); + hevc->vf_pre_count++; + return 0; + } + } else if (pic->pic_struct == 11 + || pic->pic_struct == 12) { + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "pic_struct = %d index 0x%x\n", + pic->pic_struct, + pic->index); + pic->vf_ref = 1; + /* process previous pending vf*/ + process_pending_vframe(hevc, pic, + (pic->pic_struct == 11)); + + /* put current into pending q */ + vf->height <<= 1; + if (pic->pic_struct == 11) + vf->type = VIDTYPE_INTERLACE_TOP | + nv_order | VIDTYPE_VIU_FIELD; + else { + vf->type = VIDTYPE_INTERLACE_BOTTOM | + nv_order | VIDTYPE_VIU_FIELD; + vf->index = (pic->index << 8) | 0xff; + } + if (pic->show_frame) { + decoder_do_frame_check(vdec, vf); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hevc->pending_q, + (const struct vframe_s *)vf); + if (hevc->vf_pre_count == 0) + hevc->vf_pre_count++; + + /**/ + if (pic->pic_struct == 11) + hevc->pre_top_pic = pic; + else + hevc->pre_bot_pic = pic; + } else { + vh265_vf_put(vf, vdec); + atomic_add(1, &hevc->vf_get_count); + hevc->vf_pre_count++; + return 0; + } + } else { + pic->vf_ref = 1; + + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "pic_struct = %d index 0x%x\n", + pic->pic_struct, + pic->index); + + switch (pic->pic_struct) { + case 7: + vf->duration <<= 1; + break; + case 8: + vf->duration = vf->duration * 3; + break; + case 1: + vf->height <<= 1; + vf->type = VIDTYPE_INTERLACE_TOP | + nv_order | VIDTYPE_VIU_FIELD; + process_pending_vframe(hevc, pic, 1); + hevc->pre_top_pic = pic; + break; + case 2: + vf->height <<= 1; + vf->type = VIDTYPE_INTERLACE_BOTTOM + | nv_order + | VIDTYPE_VIU_FIELD; + process_pending_vframe(hevc, pic, 0); + hevc->pre_bot_pic = pic; + break; + } + if (pic->show_frame) { + put_vf_to_display_q(hevc, vf); + } else { + vh265_vf_put(vf, vdec); + atomic_add(1, &hevc->vf_get_count); + hevc->vf_pre_count++; + return 0; + } + } +#else + vf->type_original = vf->type; + pic->vf_ref = 1; + put_vf_to_display_q(hevc, vf); +#endif + ATRACE_COUNTER(hevc->trace.new_q_name, kfifo_len(&hevc->newframe_q)); + ATRACE_COUNTER(hevc->trace.disp_q_name, kfifo_len(&hevc->display_q)); + /*count info*/ + vdec_count_info(hevc->gvs, 0, stream_offset); + if (pic->slice_type == I_SLICE) { + hevc->gvs->i_decoded_frames++; + vf->frame_type |= V4L2_BUF_FLAG_KEYFRAME; + } else if (pic->slice_type == P_SLICE) { + hevc->gvs->p_decoded_frames++; + vf->frame_type |= V4L2_BUF_FLAG_PFRAME; + } else if (pic->slice_type == B_SLICE) { + hevc->gvs->b_decoded_frames++; + vf->frame_type |= V4L2_BUF_FLAG_BFRAME; + } + hevc_update_gvs(hevc, pic); + memcpy(&tmp4x, hevc->gvs, sizeof(struct vdec_info)); + tmp4x.bit_depth_luma = pic->bit_depth_luma; + tmp4x.bit_depth_chroma = pic->bit_depth_chroma; + tmp4x.double_write_mode = pic->double_write_mode; + vdec_fill_vdec_frame(vdec, &hevc->vframe_qos, &tmp4x, vf, pic->hw_decode_time); + vdec->vdec_fps_detec(vdec->id); + hevc_print(hevc, H265_DEBUG_BUFMGR, + "%s(type %d index 0x%x poc %d/%d) pts(%d,%d) dur %d\n", + __func__, vf->type, vf->index, + get_pic_poc(hevc, vf->index & 0xff), + get_pic_poc(hevc, (vf->index >> 8) & 0xff), + vf->pts, vf->pts_us64, + vf->duration); + + if (pic->pic_struct == 10 || pic->pic_struct == 12) { + index = (vf->index >> 8) & 0xff; + } else { + index = vf->index & 0xff; + } + +#ifdef AUX_DATA_CRC + if (index <= MAX_REF_PIC_NUM) + decoder_do_aux_data_check(vdec, hevc->m_PIC[index]->aux_data_buf, + hevc->m_PIC[index]->aux_data_size); +#endif + + hevc_print(hevc, H265_DEBUG_PRINT_SEI, + "aux_data_size:%d, signal_type: %d, sei_present_flag: %d\n", + hevc->m_PIC[index]->aux_data_size, hevc->video_signal_type, hevc->sei_present_flag); + + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) { + int i = 0; + PR_INIT(128); + for (i = 0; i < hevc->m_PIC[index]->aux_data_size; i++) { + PR_FILL("%02x ", hevc->m_PIC[index]->aux_data_buf[i]); + if (((i + 1) & 0xf) == 0) + PR_INFO(hevc->index); + } + PR_INFO(hevc->index); + } + + if (hevc->is_used_v4l) + update_vframe_src_fmt(vf, + hevc->m_PIC[index]->aux_data_buf, + hevc->m_PIC[index]->aux_data_size, + hevc->dv_duallayer, hevc->provider_name, NULL); + + /*if (pic->vf_ref == hevc->vf_pre_count) {*/ + if (hevc->kpi_first_i_decoded == 0) { + hevc->kpi_first_i_decoded = 1; + pr_debug("[vdec_kpi][%s] First I frame decoded.\n", + __func__); + } + + if (without_display_mode == 0) { + if (hevc->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vh265_vf_put(vh265_vf_get(vdec), vdec); + } else { + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } + else + vh265_vf_put(vh265_vf_get(vdec), vdec); + } + + return 0; +} + +static int post_picture_early(struct vdec_s *vdec, int index) +{ + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; + struct PIC_s *pic = hevc->m_PIC[index]; + + if (!hevc->enable_fence) + return 0; + + /* create fence for each buffers. */ + if (vdec_timeline_create_fence(vdec->sync)) + return -1; + + pic->fence = vdec->sync->fence; + pic->stream_offset = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + + if (hevc->chunk) { + pic->pts = hevc->chunk->pts; + pic->pts64 = hevc->chunk->pts64; + pic->timestamp = hevc->chunk->timestamp; + } + pic->show_frame = true; + post_video_frame(vdec, pic); + + display_frame_count[hevc->index]++; + + return 0; +} + +static int prepare_display_buf(struct vdec_s *vdec, struct PIC_s *frame) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + + if (hevc->enable_fence) { + int i, j, used_size, ret; + int signed_count = 0; + struct vframe_s *signed_fence[VF_POOL_SIZE]; + + post_prepare_process(vdec, frame); + + if (!frame->show_frame) + pr_info("do not display.\n"); + + hevc->m_PIC[frame->index]->vf_ref = 1; + + /* notify signal to wake up wq of fence. */ + vdec_timeline_increase(vdec->sync, 1); + mutex_lock(&hevc->fence_mutex); + used_size = hevc->fence_vf_s.used_size; + if (used_size) { + for (i = 0, j = 0; i < VF_POOL_SIZE && j < used_size; i++) { + if (hevc->fence_vf_s.fence_vf[i] != NULL) { + ret = dma_fence_get_status(hevc->fence_vf_s.fence_vf[i]->fence); + if (ret == 1) { + signed_fence[signed_count] = hevc->fence_vf_s.fence_vf[i]; + hevc->fence_vf_s.fence_vf[i] = NULL; + hevc->fence_vf_s.used_size--; + signed_count++; + } + j++; + } + } + } + mutex_unlock(&hevc->fence_mutex); + if (signed_count != 0) { + for (i = 0; i < signed_count; i++) + vh265_vf_put(signed_fence[i], vdec); + } + + return 0; + } + + if (post_prepare_process(vdec, frame)) + return -1; + + if (post_video_frame(vdec, frame)) + return -1; + + display_frame_count[hevc->index]++; + return 0; +} + +static int notify_v4l_eos(struct vdec_s *vdec) +{ + struct hevc_state_s *hw = (struct hevc_state_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = &hw->vframe_dummy; + struct vdec_v4l2_buffer *fb = NULL; + int index = INVALID_IDX; + ulong expires; + + if (hw->eos) { + if (hw->is_used_v4l) { + expires = jiffies + msecs_to_jiffies(2000); + while (INVALID_IDX == (index = get_free_buf_idx(hw))) { + if (time_after(jiffies, expires) || + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) + break; + } + + if (index == INVALID_IDX) { + ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token); + if (ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC) < 0) { + pr_err("[%d] EOS get free buff fail.\n", ctx->id); + return -1; + } + } + } + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + vf->v4l_mem_handle = (index == INVALID_IDX) ? (ulong)fb : + hw->m_BUF[index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + + if (hw->is_used_v4l) + fb->task->submit(fb->task, TASK_TYPE_DEC); + else + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + + pr_info("[%d] H265 EOS notify.\n", (hw->is_used_v4l)?ctx->id:vdec->id); + } + + return 0; +} + +static void process_nal_sei(struct hevc_state_s *hevc, + int payload_type, int payload_size) +{ + unsigned short data; + + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "\tsei message: payload_type = 0x%02x, payload_size = 0x%02x\n", + payload_type, payload_size); + + if (payload_type == 137) { + int i, j; + /* MASTERING_DISPLAY_COLOUR_VOLUME */ + if (payload_size >= 24) { + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "\tsei MASTERING_DISPLAY_COLOUR_VOLUME available\n"); + for (i = 0; i < 3; i++) { + for (j = 0; j < 2; j++) { + data = + (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + hevc->primaries[i][j] = data; + WRITE_HREG(HEVC_SHIFT_COMMAND, + (1<<7)|16); + if (get_dbg_flag(hevc) & + H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "\t\tprimaries[%1d][%1d] = %04x\n", + i, j, hevc->primaries[i][j]); + } + } + for (i = 0; i < 2; i++) { + data = (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + hevc->white_point[i] = data; + WRITE_HREG(HEVC_SHIFT_COMMAND, (1<<7)|16); + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "\t\twhite_point[%1d] = %04x\n", + i, hevc->white_point[i]); + } + for (i = 0; i < 2; i++) { + data = (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + hevc->luminance[i] = data << 16; + WRITE_HREG(HEVC_SHIFT_COMMAND, + (1<<7)|16); + data = + (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + hevc->luminance[i] |= data; + WRITE_HREG(HEVC_SHIFT_COMMAND, + (1<<7)|16); + if (get_dbg_flag(hevc) & + H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "\t\tluminance[%1d] = %08x\n", + i, hevc->luminance[i]); + } + hevc->sei_present_flag |= SEI_MASTER_DISPLAY_COLOR_MASK; + } + payload_size -= 24; + while (payload_size > 0) { + data = (READ_HREG(HEVC_SHIFTED_DATA) >> 24); + payload_size--; + WRITE_HREG(HEVC_SHIFT_COMMAND, (1<<7)|8); + hevc_print(hevc, 0, "\t\tskip byte %02x\n", data); + } + } +} + +static int hevc_recover(struct hevc_state_s *hevc) +{ + int ret = -1; + u32 rem; + u64 shift_byte_count64; + unsigned int hevc_shift_byte_count; + unsigned int hevc_stream_start_addr; + unsigned int hevc_stream_end_addr; + unsigned int hevc_stream_rd_ptr; + unsigned int hevc_stream_wr_ptr; + unsigned int hevc_stream_control; + unsigned int hevc_stream_fifo_ctl; + unsigned int hevc_stream_buf_size; + struct vdec_s *vdec = hw_to_vdec(hevc); + + mutex_lock(&vh265_mutex); +#if 0 + for (i = 0; i < (hevc->debug_ptr_size / 2); i += 4) { + int ii; + + for (ii = 0; ii < 4; ii++) + hevc_print(hevc, 0, + "%04x ", hevc->debug_ptr[i + 3 - ii]); + if (((i + ii) & 0xf) == 0) + hevc_print(hevc, 0, "\n"); + } +#endif +#define ES_VID_MAN_RD_PTR (1<<0) + if (!hevc->init_flag) { + hevc_print(hevc, 0, "h265 has stopped, recover return!\n"); + mutex_unlock(&vh265_mutex); + return ret; + } + amhevc_stop(); + msleep(20); + ret = 0; + /* reset */ + if (vdec_stream_based(vdec)) { + STBUF_WRITE(&vdec->vbuf, set_rp, + READ_VREG(HEVC_STREAM_RD_PTR)); + + if (!vdec->vbuf.no_parser) + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, + ES_VID_MAN_RD_PTR); + } + + hevc_stream_start_addr = READ_VREG(HEVC_STREAM_START_ADDR); + hevc_stream_end_addr = READ_VREG(HEVC_STREAM_END_ADDR); + hevc_stream_rd_ptr = READ_VREG(HEVC_STREAM_RD_PTR); + hevc_stream_wr_ptr = READ_VREG(HEVC_STREAM_WR_PTR); + hevc_stream_control = READ_VREG(HEVC_STREAM_CONTROL); + hevc_stream_fifo_ctl = READ_VREG(HEVC_STREAM_FIFO_CTL); + hevc_stream_buf_size = hevc_stream_end_addr - hevc_stream_start_addr; + + /* HEVC streaming buffer will reset and restart + * from current hevc_stream_rd_ptr position + */ + /* calculate HEVC_SHIFT_BYTE_COUNT value with the new position. */ + hevc_shift_byte_count = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + if ((hevc->shift_byte_count_lo & (1 << 31)) + && ((hevc_shift_byte_count & (1 << 31)) == 0)) + hevc->shift_byte_count_hi++; + + hevc->shift_byte_count_lo = hevc_shift_byte_count; + shift_byte_count64 = ((u64)(hevc->shift_byte_count_hi) << 32) | + hevc->shift_byte_count_lo; + div_u64_rem(shift_byte_count64, hevc_stream_buf_size, &rem); + shift_byte_count64 -= rem; + shift_byte_count64 += hevc_stream_rd_ptr - hevc_stream_start_addr; + + if (rem > (hevc_stream_rd_ptr - hevc_stream_start_addr)) + shift_byte_count64 += hevc_stream_buf_size; + + hevc->shift_byte_count_lo = (u32)shift_byte_count64; + hevc->shift_byte_count_hi = (u32)(shift_byte_count64 >> 32); + + WRITE_VREG(DOS_SW_RESET3, + /* (1<<2)| */ + (1 << 3) | (1 << 4) | (1 << 8) | + (1 << 11) | (1 << 12) | (1 << 14) + | (1 << 15) | (1 << 17) | (1 << 18) | (1 << 19)); + WRITE_VREG(DOS_SW_RESET3, 0); + + WRITE_VREG(HEVC_STREAM_START_ADDR, hevc_stream_start_addr); + WRITE_VREG(HEVC_STREAM_END_ADDR, hevc_stream_end_addr); + WRITE_VREG(HEVC_STREAM_RD_PTR, hevc_stream_rd_ptr); + WRITE_VREG(HEVC_STREAM_WR_PTR, hevc_stream_wr_ptr); + WRITE_VREG(HEVC_STREAM_CONTROL, hevc_stream_control); + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, hevc->shift_byte_count_lo); + WRITE_VREG(HEVC_STREAM_FIFO_CTL, hevc_stream_fifo_ctl); + + hevc_config_work_space_hw(hevc); + decoder_hw_reset(); + + hevc->have_vps = 0; + hevc->have_sps = 0; + hevc->have_pps = 0; + + hevc->have_valid_start_slice = 0; + + if (get_double_write_mode(hevc) & 0x10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, + 0x1 << 31 /*/Enable NV21 reference read mode for MC*/ + ); + + WRITE_VREG(HEVC_WAIT_FLAG, 1); + /* clear mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + /* enable mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 1); + /* disable PSCALE for hardware sharing */ + WRITE_VREG(HEVC_PSCALE_CTRL, 0); + + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_VID_MAN_RD_PTR); + + WRITE_VREG(DEBUG_REG1, 0x0); + + if ((error_handle_policy & 1) == 0) { + if ((error_handle_policy & 4) == 0) { + /* ucode auto mode, and do not check vps/sps/pps/idr */ + WRITE_VREG(NAL_SEARCH_CTL, + 0xc); + } else { + WRITE_VREG(NAL_SEARCH_CTL, 0x1);/* manual parser NAL */ + } + } else { + WRITE_VREG(NAL_SEARCH_CTL, 0x1);/* manual parser NAL */ + } + + if (get_dbg_flag(hevc) & H265_DEBUG_NO_EOS_SEARCH_DONE) + WRITE_VREG(NAL_SEARCH_CTL, READ_VREG(NAL_SEARCH_CTL) | 0x10000); + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) + | ((parser_sei_enable & 0x7) << 17)); +/*#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION*/ + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | + ((parser_dolby_vision_enable & 0x1) << 20)); +/*#endif*/ + config_decode_mode(hevc); + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + + /* if (amhevc_loadmc(vh265_mc) < 0) { */ + /* amhevc_disable(); */ + /* return -EBUSY; */ + /* } */ +#if 0 + for (i = 0; i < (hevc->debug_ptr_size / 2); i += 4) { + int ii; + + for (ii = 0; ii < 4; ii++) { + /* hevc->debug_ptr[i+3-ii]=ttt++; */ + hevc_print(hevc, 0, + "%04x ", hevc->debug_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + hevc_print(hevc, 0, "\n"); + } +#endif + init_pic_list_hw(hevc); + + hevc_print(hevc, 0, "%s HEVC_SHIFT_BYTE_COUNT=0x%x\n", __func__, + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + +#ifdef SWAP_HEVC_UCODE + if (!tee_enabled() && hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, hevc->mc_dma_handle); + /*pr_info("write swap buffer %x\n", (u32)(hevc->mc_dma_handle));*/ + } +#endif + amhevc_start(); + + /* skip, search next start code */ + WRITE_VREG(HEVC_WAIT_FLAG, READ_VREG(HEVC_WAIT_FLAG) & (~0x2)); + hevc->skip_flag = 1; +#ifdef ERROR_HANDLE_DEBUG + if (dbg_nal_skip_count & 0x20000) { + dbg_nal_skip_count &= ~0x20000; + mutex_unlock(&vh265_mutex); + return ret; + } +#endif + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); +#ifdef MULTI_INSTANCE_SUPPORT + if (!hevc->m_ins_flag) +#endif + hevc->first_pic_after_recover = 1; + mutex_unlock(&vh265_mutex); + return ret; +} + +static void dump_aux_buf(struct hevc_state_s *hevc) +{ + int i; + unsigned short *aux_adr = + (unsigned short *) + hevc->aux_addr; + unsigned int aux_size = + (READ_VREG(HEVC_AUX_DATA_SIZE) + >> 16) << 4; + + if (hevc->prefix_aux_size > 0) { + hevc_print(hevc, 0, + "prefix aux: (size %d)\n", + aux_size); + if (aux_size > hevc->prefix_aux_size) { + hevc_print(hevc, 0, + "%s:aux_size(%d) is over size\n", __func__, aux_size); + return ; + } + for (i = 0; i < + (aux_size >> 1); i++) { + hevc_print_cont(hevc, 0, + "%04x ", + *(aux_adr + i)); + if (((i + 1) & 0xf) + == 0) + hevc_print_cont(hevc, + 0, "\n"); + } + } + if (hevc->suffix_aux_size > 0) { + aux_adr = (unsigned short *) + (hevc->aux_addr + + hevc->prefix_aux_size); + aux_size = + (READ_VREG(HEVC_AUX_DATA_SIZE) & 0xffff) + << 4; + hevc_print(hevc, 0, + "suffix aux: (size %d)\n", + aux_size); + if (aux_size > hevc->suffix_aux_size) { + hevc_print(hevc, 0, + "%s:aux_size(%d) is over size\n", __func__, aux_size); + return ; + } + for (i = 0; i < + (aux_size >> 1); i++) { + hevc_print_cont(hevc, 0, + "%04x ", *(aux_adr + i)); + if (((i + 1) & 0xf) == 0) + hevc_print_cont(hevc, 0, "\n"); + } + } +} + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +static void dolby_get_meta(struct hevc_state_s *hevc) +{ + struct vdec_s *vdec = hw_to_vdec(hevc); + + if (get_dbg_flag(hevc) & + H265_DEBUG_PRINT_SEI) + dump_aux_buf(hevc); + if (vdec->dolby_meta_with_el || vdec->slave) { + set_aux_data(hevc, + hevc->cur_pic, 0, 0); + } else if (vdec->master) { + struct hevc_state_s *hevc_ba = + (struct hevc_state_s *) + vdec->master->private; + /*do not use hevc_ba*/ + set_aux_data(hevc, + hevc_ba->cur_pic, + 0, 1); + set_aux_data(hevc, + hevc->cur_pic, 0, 2); + } else if (vdec_frame_based(vdec)) { + set_aux_data(hevc, + hevc->cur_pic, 1, 0); + } +} +#endif + +static void read_decode_info(struct hevc_state_s *hevc) +{ + uint32_t decode_info = + READ_HREG(HEVC_DECODE_INFO); + hevc->start_decoding_flag |= + (decode_info & 0xff); + hevc->rps_set_id = (decode_info >> 8) & 0xff; +} + +static int vh265_get_ps_info(struct hevc_state_s *hevc, + union param_u *rpm_param, + struct aml_vdec_ps_infos *ps) +{ + u32 width = rpm_param->p.pic_width_in_luma_samples; + u32 height = rpm_param->p.pic_height_in_luma_samples; + u32 SubWidthC, SubHeightC; + + switch (rpm_param->p.chroma_format_idc) { + case 1: + SubWidthC = 2; + SubHeightC = 2; + break; + case 2: + SubWidthC = 2; + SubHeightC = 1; + break; + default: + SubWidthC = 1; + SubHeightC = 1; + break; + } + + width -= SubWidthC * + (rpm_param->p.conf_win_left_offset + + rpm_param->p.conf_win_right_offset); + height -= SubHeightC * + (rpm_param->p.conf_win_top_offset + + rpm_param->p.conf_win_bottom_offset); + + hevc->last_width = rpm_param->p.pic_width_in_luma_samples; + hevc->last_height = rpm_param->p.pic_height_in_luma_samples; + hevc->sps_num_reorder_pics_0 = + rpm_param->p.sps_num_reorder_pics_0; + hevc->used_buf_num = v4l_parser_work_pic_num(hevc); + + ps->visible_width = width; + ps->visible_height = height; + ps->coded_width = ALIGN(width, 64); + ps->coded_height = ALIGN(height, 64); + ps->dpb_size = v4l_parser_work_pic_num(hevc); + + return 0; +} + +static void get_comp_buf_info(struct hevc_state_s *hevc, + struct vdec_comp_buf_info *info) +{ + u16 bit_depth = hevc->param.p.bit_depth; + int w = hevc->param.p.pic_width_in_luma_samples; + int h = hevc->param.p.pic_height_in_luma_samples; + + info->max_size = hevc_max_mmu_buf_size( + hevc->max_pic_w, + hevc->max_pic_h); + info->header_size = hevc_get_header_size(w,h); + info->frame_buffer_size = hevc_mmu_page_num( + hevc, w, h, bit_depth == 0); + + pr_info("hevc get comp info: %d %d %d\n", + info->max_size, info->header_size, + info->frame_buffer_size); +} + +static int v4l_res_change(struct hevc_state_s *hevc, union param_u *rpm_param) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + int i, ret = 0; + + if (ctx->param_sets_from_ucode) { + struct aml_vdec_ps_infos ps; + int width = rpm_param->p.pic_width_in_luma_samples; + int height = rpm_param->p.pic_height_in_luma_samples; + if ((hevc->last_width != 0 && + hevc->last_height != 0) && + (hevc->last_width != width || + hevc->last_height != height)) { + hevc_print(hevc, 0, + "v4l_res_change Pic Width/Height Change (%d,%d)=>(%d,%d), interlace %d\n", + hevc->last_width, hevc->last_height, + width, + height, + hevc->interlace_flag); + + if (get_valid_double_write_mode(hevc) != 16) { + struct vdec_comp_buf_info info; + + get_comp_buf_info(hevc, &info); + vdec_v4l_set_comp_buf_info(ctx, &info); + } + + vh265_get_ps_info(hevc, &hevc->param, &ps); + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + hevc->v4l_params_parsed = false; + ctx->v4l_resolution_change = 1; + hevc->eos = 1; + + /* + * marks frame valid on the dpb is the ouput state, + * then via flush_output all frames can be flushed out. + */ + for (i = 0; i < MAX_REF_PIC_NUM; ++i) { + if ((hevc->m_PIC[i] == NULL) || + (hevc->m_PIC[i]->index == -1) || + (hevc->m_PIC[i]->BUF_index == -1)) + continue; + + if ((hevc->m_PIC[i]->POC != INVALID_POC) && + (hevc->m_PIC[i]->output_ready == 0) && + hevc->m_PIC[i]->referenced && + (hevc->m_PIC[i]->POC >= hevc->decoded_poc)) { + hevc->m_PIC[i]->output_mark = 1; + } + } + + flush_output(hevc, NULL); + //del_timer_sync(&hevc->timer); + notify_v4l_eos(hw_to_vdec(hevc)); + + ret = 1; + } + } + + return ret; +} + +static int hevc_skip_nal(struct hevc_state_s *hevc) +{ + if ((hevc->pic_h == 96) && (hevc->pic_w == 160) && + (get_double_write_mode(hevc) == 0x10)) { + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TXLX) { + if (hevc->skip_nal_count < skip_nal_count) + return 1; + } else { + if (hevc->skip_nal_count < 1) + return 1; + } + } + return 0; +} + +static void aspect_ratio_set(struct hevc_state_s *hevc) +{ + int aspect_ratio_idc = hevc->param.p.aspect_ratio_idc; + + switch (aspect_ratio_idc) { + case 1: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 1; + hevc->cur_pic->sar_width = 1; + break; + case 2: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 11; + hevc->cur_pic->sar_width = 12; + break; + case 3: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 11; + hevc->cur_pic->sar_width = 10; + break; + case 4: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 11; + hevc->cur_pic->sar_width = 16; + break; + case 5: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 33; + hevc->cur_pic->sar_width = 40; + break; + case 6: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 11; + hevc->cur_pic->sar_width = 24; + break; + case 7: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 11; + hevc->cur_pic->sar_width = 20; + break; + case 8: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 11; + hevc->cur_pic->sar_width = 32; + break; + case 9: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 33; + hevc->cur_pic->sar_width = 80; + break; + case 10: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 11; + hevc->cur_pic->sar_width = 18; + break; + case 11: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 11; + hevc->cur_pic->sar_width = 15; + break; + case 12: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 33; + hevc->cur_pic->sar_width = 64; + break; + case 13: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 99; + hevc->cur_pic->sar_width = 160; + break; + case 14: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 3; + hevc->cur_pic->sar_width = 4; + break; + case 15: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 2; + hevc->cur_pic->sar_width = 3; + break; + case 16: + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 1; + hevc->cur_pic->sar_width = 2; + break; + default: + + hevc->frame_ar = 0x3ff; + hevc->cur_pic->sar_height = 1; + hevc->cur_pic->sar_width = 1; + break; + } + +} +static irqreturn_t vh265_isr_thread_fn(int irq, void *data) +{ + struct hevc_state_s *hevc = (struct hevc_state_s *) data; + unsigned int dec_status = hevc->dec_status; + int i, ret; + + struct vdec_s *vdec = hw_to_vdec(hevc); + + if (dec_status == HEVC_SLICE_SEGMENT_DONE) { + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_START); + } + else if (dec_status == HEVC_DECPIC_DATA_DONE) { + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_THREAD_PIC_DONE_START); + } + + if (hevc->eos) + return IRQ_HANDLED; + if ( +#ifdef MULTI_INSTANCE_SUPPORT + (!hevc->m_ins_flag) && +#endif + hevc->error_flag == 1) { + if ((error_handle_policy & 0x10) == 0) { + if (hevc->cur_pic) { + int current_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + if (current_lcu_idx < + ((hevc->lcu_x_num*hevc->lcu_y_num)-1)) + hevc->cur_pic->error_mark = 1; + + } + } + if ((error_handle_policy & 1) == 0) { + hevc->error_skip_nal_count = 1; + /* manual search nal, skip error_skip_nal_count + * of nal and trigger the HEVC_NAL_SEARCH_DONE irq + */ + WRITE_VREG(NAL_SEARCH_CTL, + (error_skip_nal_count << 4) | 0x1); + } else { + hevc->error_skip_nal_count = error_skip_nal_count; + WRITE_VREG(NAL_SEARCH_CTL, 0x1);/* manual parser NAL */ + } + if ((get_dbg_flag(hevc) & H265_DEBUG_NO_EOS_SEARCH_DONE) +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + || vdec->master + || vdec->slave +#endif + ) { + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | 0x10000); + } + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) + | ((parser_sei_enable & 0x7) << 17)); +/*#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION*/ + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | + ((parser_dolby_vision_enable & 0x1) << 20)); +/*#endif*/ + config_decode_mode(hevc); + /* search new nal */ + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); + + /* hevc_print(hevc, 0, + *"%s: error handle\n", __func__); + */ + hevc->error_flag = 2; + return IRQ_HANDLED; + } else if ( +#ifdef MULTI_INSTANCE_SUPPORT + (!hevc->m_ins_flag) && +#endif + hevc->error_flag == 3) { + hevc_print(hevc, 0, "error_flag=3, hevc_recover\n"); + hevc_recover(hevc); + hevc->error_flag = 0; + + if ((error_handle_policy & 0x10) == 0) { + if (hevc->cur_pic) { + int current_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + if (current_lcu_idx < + ((hevc->lcu_x_num*hevc->lcu_y_num)-1)) + hevc->cur_pic->error_mark = 1; + + } + } + if ((error_handle_policy & 1) == 0) { + /* need skip some data when + * error_flag of 3 is triggered, + */ + /* to avoid hevc_recover() being called + * for many times at the same bitstream position + */ + hevc->error_skip_nal_count = 1; + /* manual search nal, skip error_skip_nal_count + * of nal and trigger the HEVC_NAL_SEARCH_DONE irq + */ + WRITE_VREG(NAL_SEARCH_CTL, + (error_skip_nal_count << 4) | 0x1); + } + + if ((error_handle_policy & 0x2) == 0) { + hevc->have_vps = 1; + hevc->have_sps = 1; + hevc->have_pps = 1; + } + return IRQ_HANDLED; + } + if (!hevc->m_ins_flag) { + i = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + if ((hevc->shift_byte_count_lo & (1 << 31)) + && ((i & (1 << 31)) == 0)) + hevc->shift_byte_count_hi++; + hevc->shift_byte_count_lo = i; + } +#ifdef MULTI_INSTANCE_SUPPORT + mutex_lock(&hevc->chunks_mutex); + if ((dec_status == HEVC_DECPIC_DATA_DONE || + dec_status == HEVC_FIND_NEXT_PIC_NAL || + dec_status == HEVC_FIND_NEXT_DVEL_NAL) + && (hevc->chunk)) { + hevc->cur_pic->pts = hevc->chunk->pts; + hevc->cur_pic->pts64 = hevc->chunk->pts64; + hevc->cur_pic->timestamp = hevc->chunk->timestamp; + } + mutex_unlock(&hevc->chunks_mutex); + + if (dec_status == HEVC_DECODE_BUFEMPTY || + dec_status == HEVC_DECODE_BUFEMPTY2) { + if (hevc->m_ins_flag) { + read_decode_info(hevc); + if (vdec_frame_based(hw_to_vdec(hevc))) { + hevc->empty_flag = 1; + /*suffix sei or dv meta*/ + set_aux_data(hevc, hevc->cur_pic, 1, 0); + goto pic_done; + } else { + if ( +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + vdec->master || + vdec->slave || +#endif + (data_resend_policy & 0x1)) { + hevc->dec_result = DEC_RESULT_AGAIN; + amhevc_stop(); + restore_decode_state(hevc); + } else + hevc->dec_result = DEC_RESULT_GET_DATA; + } + reset_process_time(hevc); + vdec_schedule_work(&hevc->work); + } + return IRQ_HANDLED; + } else if ((dec_status == HEVC_SEARCH_BUFEMPTY) || + (dec_status == HEVC_NAL_DECODE_DONE) + ) { + if (hevc->m_ins_flag) { + read_decode_info(hevc); + if (vdec_frame_based(hw_to_vdec(hevc))) { + /*hevc->dec_result = DEC_RESULT_GET_DATA;*/ + hevc->empty_flag = 1; + /*suffix sei or dv meta*/ + set_aux_data(hevc, hevc->cur_pic, 1, 0); + goto pic_done; + } else { + hevc->dec_result = DEC_RESULT_AGAIN; + amhevc_stop(); + restore_decode_state(hevc); + } + + reset_process_time(hevc); + vdec_schedule_work(&hevc->work); + } + + return IRQ_HANDLED; + } else if (dec_status == HEVC_DECPIC_DATA_DONE) { + if (hevc->m_ins_flag) { + struct PIC_s *pic; + struct PIC_s *pic_display; + int decoded_poc; + + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + if (hevc->detbuf_adr_virt && hevc->delrefill_check + && READ_VREG(HEVC_SAO_DBG_MODE0)) + hevc->delrefill_check = 2; + } +#endif + hevc->empty_flag = 0; +pic_done: + if (vdec->master == NULL && vdec->slave == NULL && + hevc->empty_flag == 0) { + hevc->over_decode = + (READ_VREG(HEVC_SHIFT_STATUS) >> 15) & 0x1; + if (hevc->over_decode) + hevc_print(hevc, 0, + "!!!Over decode %d\n", __LINE__); + } + if (input_frame_based(hw_to_vdec(hevc)) && + frmbase_cont_bitlevel != 0 && + (hevc->decode_size > READ_VREG(HEVC_SHIFT_BYTE_COUNT)) && + (hevc->decode_size - (READ_VREG(HEVC_SHIFT_BYTE_COUNT)) + > frmbase_cont_bitlevel)) { + check_pic_decoded_error(hevc, READ_VREG(HEVC_PARSER_LCU_START) & 0xffffff); + /*handle the case: multi pictures in one packet*/ + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s has more data index= %d, size=0x%x shiftcnt=0x%x)\n", + __func__, + hevc->decode_idx, hevc->decode_size, + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + start_process_time(hevc); + return IRQ_HANDLED; + } + + read_decode_info(hevc); + get_picture_qos_info(hevc); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + hevc->start_parser_type = 0; + hevc->switch_dvlayer_flag = 0; +#endif + hevc->decoded_poc = hevc->curr_POC; + hevc->decoding_pic = NULL; + hevc->dec_result = DEC_RESULT_DONE; +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) + if (hevc->delrefill_check != 2) +#endif + + amhevc_stop(); + + reset_process_time(hevc); + + if ((!input_stream_based(vdec) && + hevc->vf_pre_count == 0) || hevc->ip_mode) { + decoded_poc = hevc->curr_POC; + pic = get_pic_by_POC(hevc, decoded_poc); + if (pic && (pic->POC != INVALID_POC)) { + /*PB skip control */ + if (pic->error_mark == 0 + && hevc->PB_skip_mode == 1) { + /* start decoding after + * first I + */ + hevc->ignore_bufmgr_error |= 0x1; + } + if (hevc->ignore_bufmgr_error & 1) { + if (hevc->PB_skip_count_after_decoding > 0) { + hevc->PB_skip_count_after_decoding--; + } else { + /* start displaying */ + hevc->ignore_bufmgr_error |= 0x2; + } + } + if (hevc->mmu_enable + && ((hevc->double_write_mode & 0x10) == 0)) { + if (!hevc->m_ins_flag) { + hevc->used_4k_num = + READ_VREG(HEVC_SAO_MMU_STATUS) >> 16; + if ((!is_skip_decoding(hevc, pic)) && + (hevc->used_4k_num >= 0) && + (hevc->cur_pic->scatter_alloc + == 1)) + recycle_mmu_buf_tail(hevc, false); + } + } + + pic->output_mark = 1; + pic->recon_mark = 1; + if (vdec->mvfrm) { + pic->frame_size = + vdec->mvfrm->frame_size; + pic->hw_decode_time = + (u32)vdec->mvfrm->hw_decode_time; + } + } + /*Detects the first frame whether has an over decode error*/ + if (vdec->master == NULL && vdec->slave == NULL && + hevc->empty_flag == 0) { + hevc->over_decode = + (READ_VREG(HEVC_SHIFT_STATUS) >> 15) & 0x1; + if (hevc->over_decode) + hevc_print(hevc, 0, + "!!!Over decode %d\n", __LINE__); + } + check_pic_decoded_error(hevc, + READ_VREG(HEVC_PARSER_LCU_START) & 0xffffff); + if (hevc->cur_pic != NULL && + (READ_VREG(HEVC_PARSER_LCU_START) & 0xffffff) == 0 + && (hevc->lcu_x_num * hevc->lcu_y_num != 1)) + hevc->cur_pic->error_mark = 1; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +force_output: +#endif + pic_display = output_pic(hevc, 1); + if (pic_display) { + if ((pic_display->error_mark && + ((hevc->ignore_bufmgr_error & + 0x2) == 0)) + || (get_dbg_flag(hevc) & + H265_DEBUG_DISPLAY_CUR_FRAME) + || (get_dbg_flag(hevc) & + H265_DEBUG_NO_DISPLAY)) { + pic_display->output_ready = 0; + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "[BM] Display: POC %d, ", + pic_display->POC); + hevc_print_cont(hevc, 0, + "decoding index %d ==> ", + pic_display-> + decode_idx); + hevc_print_cont(hevc, 0, + "Debug or err,recycle it\n"); + } + } else { + if ((pic_display-> + slice_type != 2) && !pic_display->ip_mode) { + pic_display->output_ready = 0; + } else { + prepare_display_buf + (hw_to_vdec(hevc), + pic_display); + hevc->first_pic_flag = 1; + } + } + } + } + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + vdec_schedule_work(&hevc->work); + } + + return IRQ_HANDLED; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + } else if (dec_status == HEVC_FIND_NEXT_PIC_NAL || + dec_status == HEVC_FIND_NEXT_DVEL_NAL) { + if (hevc->m_ins_flag) { + unsigned char next_parser_type = + READ_HREG(CUR_NAL_UNIT_TYPE) & 0xff; + read_decode_info(hevc); + + if (vdec->slave && + dec_status == HEVC_FIND_NEXT_DVEL_NAL) { + /*cur is base, found enhance*/ + struct hevc_state_s *hevc_el = + (struct hevc_state_s *) + vdec->slave->private; + hevc->switch_dvlayer_flag = 1; + hevc->no_switch_dvlayer_count = 0; + hevc_el->start_parser_type = + next_parser_type; + hevc_print(hevc, H265_DEBUG_DV, + "switch (poc %d) to el\n", + hevc->cur_pic ? + hevc->cur_pic->POC : + INVALID_POC); + } else if (vdec->master && + dec_status == HEVC_FIND_NEXT_PIC_NAL) { + /*cur is enhance, found base*/ + struct hevc_state_s *hevc_ba = + (struct hevc_state_s *) + vdec->master->private; + hevc->switch_dvlayer_flag = 1; + hevc->no_switch_dvlayer_count = 0; + hevc_ba->start_parser_type = + next_parser_type; + hevc_print(hevc, H265_DEBUG_DV, + "switch (poc %d) to bl\n", + hevc->cur_pic ? + hevc->cur_pic->POC : + INVALID_POC); + } else { + hevc->switch_dvlayer_flag = 0; + hevc->start_parser_type = + next_parser_type; + hevc->no_switch_dvlayer_count++; + hevc_print(hevc, H265_DEBUG_DV, + "%s: no_switch_dvlayer_count = %d\n", + vdec->master ? "el" : "bl", + hevc->no_switch_dvlayer_count); + if (vdec->slave && + dolby_el_flush_th != 0 && + hevc->no_switch_dvlayer_count > + dolby_el_flush_th) { + struct hevc_state_s *hevc_el = + (struct hevc_state_s *) + vdec->slave->private; + struct PIC_s *el_pic; + check_pic_decoded_error(hevc_el, + hevc_el->pic_decoded_lcu_idx); + el_pic = get_pic_by_POC(hevc_el, + hevc_el->curr_POC); + hevc_el->curr_POC = INVALID_POC; + hevc_el->m_pocRandomAccess = MAX_INT; + flush_output(hevc_el, el_pic); + hevc_el->decoded_poc = INVALID_POC; /* + already call flush_output*/ + hevc_el->decoding_pic = NULL; + hevc->no_switch_dvlayer_count = 0; + if (get_dbg_flag(hevc) & H265_DEBUG_DV) + hevc_print(hevc, 0, + "no el anymore, flush_output el\n"); + } + } + hevc->decoded_poc = hevc->curr_POC; + hevc->decoding_pic = NULL; + hevc->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + reset_process_time(hevc); + if (aux_data_is_avaible(hevc)) + dolby_get_meta(hevc); + if(hevc->cur_pic && hevc->cur_pic->slice_type == 2 && + hevc->vf_pre_count == 0) { + hevc_print(hevc, 0, + "first slice_type %x no_switch_dvlayer_count %x\n", + hevc->cur_pic->slice_type, + hevc->no_switch_dvlayer_count); + goto force_output; + } + vdec_schedule_work(&hevc->work); + } + + return IRQ_HANDLED; +#endif + } + +#endif + + if (dec_status == HEVC_SEI_DAT) { + if (!hevc->m_ins_flag) { + int payload_type = + READ_HREG(CUR_NAL_UNIT_TYPE) & 0xffff; + int payload_size = + (READ_HREG(CUR_NAL_UNIT_TYPE) >> 16) & 0xffff; + process_nal_sei(hevc, + payload_type, payload_size); + } + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_SEI_DAT_DONE); + } else if (dec_status == HEVC_NAL_SEARCH_DONE) { + int naltype = READ_HREG(CUR_NAL_UNIT_TYPE); + int parse_type = HEVC_DISCARD_NAL; + + hevc->error_watchdog_count = 0; + hevc->error_skip_nal_wt_cnt = 0; +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) + reset_process_time(hevc); +#endif + if (slice_parse_begin > 0 && + get_dbg_flag(hevc) & H265_DEBUG_DISCARD_NAL) { + hevc_print(hevc, 0, + "nal type %d, discard %d\n", naltype, + slice_parse_begin); + if (naltype <= NAL_UNIT_CODED_SLICE_CRA) + slice_parse_begin--; + } + if (naltype == NAL_UNIT_EOS) { + struct PIC_s *pic; + bool eos_in_head = false; + + hevc_print(hevc, 0, "get NAL_UNIT_EOS, flush output\n"); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if ((vdec_dual(vdec)) && aux_data_is_avaible(hevc)) { + if (hevc->decoding_pic) + dolby_get_meta(hevc); + } +#endif + /*Detects frame whether has an over decode error*/ + if (vdec->master == NULL && vdec->slave == NULL && + hevc->empty_flag == 0 && input_stream_based(vdec)) { + hevc->over_decode = + (READ_VREG(HEVC_SHIFT_STATUS) >> 15) & 0x1; + if (hevc->over_decode) + hevc_print(hevc, 0, + "!!!Over decode %d\n", __LINE__); + } + check_pic_decoded_error(hevc, + hevc->pic_decoded_lcu_idx); + pic = get_pic_by_POC(hevc, hevc->curr_POC); + hevc->curr_POC = INVALID_POC; + /* add to fix RAP_B_Bossen_1 */ + hevc->m_pocRandomAccess = MAX_INT; + flush_output(hevc, pic); + clear_poc_flag(hevc); + if (input_frame_based(vdec)) { + u32 shiftbyte = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + if (shiftbyte < 0x8 && (hevc->decode_size - shiftbyte) > 0x100) { + hevc_print(hevc, 0," shiftbytes 0x%x decode_size 0x%x\n", shiftbyte, hevc->decode_size); + eos_in_head = true; + } + } + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_DISCARD_NAL); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); + + /* eos is in the head of the chunk and followed by sps/pps/IDR + * so need to go on decoding + */ + if (eos_in_head) + return IRQ_HANDLED; + +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) { + hevc->decoded_poc = INVALID_POC; /* + already call flush_output*/ + hevc->decoding_pic = NULL; + hevc->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + + vdec_schedule_work(&hevc->work); + } +#endif + return IRQ_HANDLED; + } + + if ( +#ifdef MULTI_INSTANCE_SUPPORT + (!hevc->m_ins_flag) && +#endif + hevc->error_skip_nal_count > 0) { + hevc_print(hevc, 0, + "nal type %d, discard %d\n", naltype, + hevc->error_skip_nal_count); + hevc->error_skip_nal_count--; + if (hevc->error_skip_nal_count == 0) { + hevc_recover(hevc); + hevc->error_flag = 0; + if ((error_handle_policy & 0x2) == 0) { + hevc->have_vps = 1; + hevc->have_sps = 1; + hevc->have_pps = 1; + } + return IRQ_HANDLED; + } + } else if (naltype == NAL_UNIT_VPS) { + parse_type = HEVC_NAL_UNIT_VPS; + hevc->have_vps = 1; +#ifdef ERROR_HANDLE_DEBUG + if (dbg_nal_skip_flag & 1) + parse_type = HEVC_DISCARD_NAL; +#endif + } else if (hevc->have_vps) { + if (naltype == NAL_UNIT_SPS) { + parse_type = HEVC_NAL_UNIT_SPS; + hevc->have_sps = 1; +#ifdef ERROR_HANDLE_DEBUG + if (dbg_nal_skip_flag & 2) + parse_type = HEVC_DISCARD_NAL; +#endif + } else if (naltype == NAL_UNIT_PPS) { + parse_type = HEVC_NAL_UNIT_PPS; + hevc->have_pps = 1; +#ifdef ERROR_HANDLE_DEBUG + if (dbg_nal_skip_flag & 4) + parse_type = HEVC_DISCARD_NAL; +#endif + } else if (hevc->have_sps && hevc->have_pps) { + int seg = HEVC_NAL_UNIT_CODED_SLICE_SEGMENT; + + if ((naltype == NAL_UNIT_CODED_SLICE_IDR) || + (naltype == + NAL_UNIT_CODED_SLICE_IDR_N_LP) + || (naltype == + NAL_UNIT_CODED_SLICE_CRA) + || (naltype == + NAL_UNIT_CODED_SLICE_BLA) + || (naltype == + NAL_UNIT_CODED_SLICE_BLANT) + || (naltype == + NAL_UNIT_CODED_SLICE_BLA_N_LP) + ) { + if (slice_parse_begin > 0) { + hevc_print(hevc, 0, + "discard %d, for debugging\n", + slice_parse_begin); + slice_parse_begin--; + } else { + parse_type = seg; + } + hevc->have_valid_start_slice = 1; + } else if (naltype <= + NAL_UNIT_CODED_SLICE_CRA + && (hevc->have_valid_start_slice + || (hevc->PB_skip_mode != 3))) { + if (slice_parse_begin > 0) { + hevc_print(hevc, 0, + "discard %d, dd\n", + slice_parse_begin); + slice_parse_begin--; + } else + parse_type = seg; + + } + } + } + if (hevc->have_vps && hevc->have_sps && hevc->have_pps + && hevc->have_valid_start_slice && + hevc->error_flag == 0) { + if ((get_dbg_flag(hevc) & + H265_DEBUG_MAN_SEARCH_NAL) == 0 + /* && (!hevc->m_ins_flag)*/) { + /* auot parser NAL; do not check + *vps/sps/pps/idr + */ + WRITE_VREG(NAL_SEARCH_CTL, 0x2); + } + + if ((get_dbg_flag(hevc) & + H265_DEBUG_NO_EOS_SEARCH_DONE) +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + || vdec->master + || vdec->slave +#endif + ) { + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | + 0x10000); + } + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) + | ((parser_sei_enable & 0x7) << 17)); +/*#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION*/ + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | + ((parser_dolby_vision_enable & 0x1) << 20)); +/*#endif*/ + config_decode_mode(hevc); + } + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "naltype = %d parse_type %d\n %d %d %d %d\n", + naltype, parse_type, hevc->have_vps, + hevc->have_sps, hevc->have_pps, + hevc->have_valid_start_slice); + } + + WRITE_VREG(HEVC_DEC_STATUS_REG, parse_type); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) + start_process_time(hevc); +#endif + } else if (dec_status == HEVC_SLICE_SEGMENT_DONE) { +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) { + reset_process_time(hevc); + read_decode_info(hevc); + + } +#endif + if (hevc->start_decoding_time > 0) { + u32 process_time = 1000* + (jiffies - hevc->start_decoding_time)/HZ; + if (process_time > max_decoding_time) + max_decoding_time = process_time; + } + + hevc->error_watchdog_count = 0; + if (hevc->pic_list_init_flag == 2) { + hevc->pic_list_init_flag = 3; + hevc_print(hevc, 0, "set pic_list_init_flag to 3\n"); + if (hevc->kpi_first_i_comming == 0) { + hevc->kpi_first_i_comming = 1; + pr_debug("[vdec_kpi][%s] First I frame coming.\n", + __func__); + } + } else if (hevc->wait_buf == 0) { + u32 vui_time_scale; + u32 vui_num_units_in_tick; + unsigned char reconfig_flag = 0; + + if (get_dbg_flag(hevc) & H265_DEBUG_SEND_PARAM_WITH_REG) + get_rpm_param(&hevc->param); + else { + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_RPM_START); + for (i = 0; i < (RPM_END - RPM_BEGIN); i += 4) { + int ii; + + for (ii = 0; ii < 4; ii++) { + hevc->param.l.data[i + ii] = + hevc->rpm_ptr[i + 3 + - ii]; + } + } + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_RPM_END); +#ifdef SEND_LMEM_WITH_RPM + check_head_error(hevc); +#endif + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) { + hevc_print(hevc, 0, + "rpm_param: (%d)\n", hevc->slice_idx); + hevc->slice_idx++; + for (i = 0; i < (RPM_END - RPM_BEGIN); i++) { + hevc_print_cont(hevc, 0, + "%04x ", hevc->param.l.data[i]); + if (((i + 1) & 0xf) == 0) + hevc_print_cont(hevc, 0, "\n"); + } + + hevc_print(hevc, 0, + "vui_timing_info: %x, %x, %x, %x\n", + hevc->param.p.vui_num_units_in_tick_hi, + hevc->param.p.vui_num_units_in_tick_lo, + hevc->param.p.vui_time_scale_hi, + hevc->param.p.vui_time_scale_lo); + } + + if (hevc->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + if (!v4l_res_change(hevc, &hevc->param)) { + if (ctx->param_sets_from_ucode && !hevc->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + int width = hevc->param.p.pic_width_in_luma_samples; + int height = hevc->param.p.pic_height_in_luma_samples; + int log = hevc->param.p.log2_min_coding_block_size_minus3; + int log_s = hevc->param.p.log2_diff_max_min_coding_block_size; + + hevc->pic_w = width; + hevc->pic_h = height; + hevc->lcu_size = 1 << (log + 3 + log_s); + + pr_debug("set ucode parse\n"); + if (get_valid_double_write_mode(hevc) != 16) { + struct vdec_comp_buf_info info; + + get_comp_buf_info(hevc, &info); + vdec_v4l_set_comp_buf_info(ctx, &info); + } + + vh265_get_ps_info(hevc, &hevc->param, &ps); + /*notice the v4l2 codec.*/ + vdec_v4l_set_ps_infos(ctx, &ps); + hevc->v4l_params_parsed = true; + hevc->dec_result = DEC_RESULT_AGAIN; + amhevc_stop(); + restore_decode_state(hevc); + reset_process_time(hevc); + vdec_schedule_work(&hevc->work); + return IRQ_HANDLED; + } + }else { + pr_debug("resolution change\n"); + hevc->dec_result = DEC_RESULT_AGAIN; + amhevc_stop(); + restore_decode_state(hevc); + reset_process_time(hevc); + vdec_schedule_work(&hevc->work); + return IRQ_HANDLED; + } + } + + if ( +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + vdec->master == NULL && + vdec->slave == NULL && +#endif + aux_data_is_avaible(hevc) + ) { + + if (get_dbg_flag(hevc) & + H265_DEBUG_PRINT_SEI) + dump_aux_buf(hevc); + } + + vui_time_scale = + (u32)(hevc->param.p.vui_time_scale_hi << 16) | + hevc->param.p.vui_time_scale_lo; + vui_num_units_in_tick = + (u32)(hevc->param. + p.vui_num_units_in_tick_hi << 16) | + hevc->param. + p.vui_num_units_in_tick_lo; + if (hevc->bit_depth_luma != + ((hevc->param.p.bit_depth & 0xf) + 8)) { + reconfig_flag = 1; + hevc_print(hevc, 0, "Bit depth luma = %d\n", + (hevc->param.p.bit_depth & 0xf) + 8); + } + if (hevc->bit_depth_chroma != + (((hevc->param.p.bit_depth >> 4) & 0xf) + 8)) { + reconfig_flag = 1; + hevc_print(hevc, 0, "Bit depth chroma = %d\n", + ((hevc->param.p.bit_depth >> 4) & + 0xf) + 8); + } + hevc->bit_depth_luma = + (hevc->param.p.bit_depth & 0xf) + 8; + hevc->bit_depth_chroma = + ((hevc->param.p.bit_depth >> 4) & 0xf) + 8; + bit_depth_luma = hevc->bit_depth_luma; + bit_depth_chroma = hevc->bit_depth_chroma; +#ifdef SUPPORT_10BIT + if (hevc->bit_depth_luma == 8 && + hevc->bit_depth_chroma == 8 && + enable_mem_saving) + hevc->mem_saving_mode = 1; + else + hevc->mem_saving_mode = 0; +#endif + if (reconfig_flag && + (get_double_write_mode(hevc) & 0x10) == 0) + init_decode_head_hw(hevc); + + if ((vui_time_scale != 0) + && (vui_num_units_in_tick != 0)) { + hevc->frame_dur = + div_u64(96000ULL * + vui_num_units_in_tick, + vui_time_scale); + if (hevc->get_frame_dur != true) + vdec_schedule_work( + &hevc->notify_work); + + hevc->get_frame_dur = true; + //hevc->gvs->frame_dur = hevc->frame_dur; + } + + if (hevc->video_signal_type != + ((hevc->param.p.video_signal_type << 16) + | hevc->param.p.color_description)) { + u32 v = hevc->param.p.video_signal_type; + u32 c = hevc->param.p.color_description; +#if 0 + if (v & 0x2000) { + hevc_print(hevc, 0, + "video_signal_type present:\n"); + hevc_print(hevc, 0, " %s %s\n", + video_format_names[(v >> 10) & 7], + ((v >> 9) & 1) ? + "full_range" : "limited"); + if (v & 0x100) { + hevc_print(hevc, 0, + " color_description present:\n"); + hevc_print(hevc, 0, + " color_primarie = %s\n", + color_primaries_names + [v & 0xff]); + hevc_print(hevc, 0, + " transfer_characteristic = %s\n", + transfer_characteristics_names + [(c >> 8) & 0xff]); + hevc_print(hevc, 0, + " matrix_coefficient = %s\n", + matrix_coeffs_names[c & 0xff]); + } + } +#endif + hevc->video_signal_type = (v << 16) | c; + video_signal_type = hevc->video_signal_type; + } + + if (use_cma && + (hevc->param.p.slice_segment_address == 0) + && (hevc->pic_list_init_flag == 0)) { + int log = hevc->param.p.log2_min_coding_block_size_minus3; + int log_s = hevc->param.p.log2_diff_max_min_coding_block_size; + + hevc->pic_w = hevc->param.p.pic_width_in_luma_samples; + hevc->pic_h = hevc->param.p.pic_height_in_luma_samples; + hevc->lcu_size = 1 << (log + 3 + log_s); + hevc->lcu_size_log2 = log2i(hevc->lcu_size); + if (performance_profile &&( (!is_oversize(hevc->pic_w, hevc->pic_h)) && IS_8K_SIZE(hevc->pic_w,hevc->pic_h))) + hevc->performance_profile = 1; + else + hevc->performance_profile = 0; + hevc_print(hevc, 0, "hevc->performance_profile %d\n", hevc->performance_profile); + if (hevc->pic_w == 0 || hevc->pic_h == 0 + || hevc->lcu_size == 0 + || is_oversize(hevc->pic_w, hevc->pic_h) + || hevc_skip_nal(hevc)) { + /* skip search next start code */ + WRITE_VREG(HEVC_WAIT_FLAG, READ_VREG(HEVC_WAIT_FLAG) + & (~0x2)); + if ((hevc->pic_h == 96) && (hevc->pic_w == 160)) + hevc->skip_nal_count++; + hevc->skip_flag = 1; + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) + start_process_time(hevc); +#endif + } else { + hevc->sps_num_reorder_pics_0 = + hevc->param.p.sps_num_reorder_pics_0; + hevc->ip_mode = hevc->low_latency_flag ? true : + (!hevc->sps_num_reorder_pics_0 && + !(vdec->slave || vdec->master) && + !disable_ip_mode) ? true : false; + hevc->pic_list_init_flag = 1; + if ((!IS_4K_SIZE(hevc->pic_w, hevc->pic_h)) && + ((hevc->param.p.profile_etc & 0xc) == 0x4) + && (interlace_enable != 0)) { + hevc->double_write_mode = 1; + hevc->mmu_enable = 1; + hevc->interlace_flag = 1; + hevc->frame_ar = (hevc->pic_h * 0x100 / hevc->pic_w) * 2; + hevc_print(hevc, 0, + "interlace (%d, %d), profile_etc %x, ar 0x%x, dw %d\n", + hevc->pic_w, hevc->pic_h, hevc->param.p.profile_etc, hevc->frame_ar, + get_double_write_mode(hevc)); + /* When dw changed from 0x10 to 1, the mmu_box is NULL */ + if (!hevc->mmu_box && init_mmu_buffers(hevc, 1) != 0) { + hevc->dec_result = DEC_RESULT_FORCE_EXIT; + hevc->fatal_error |= + DECODER_FATAL_ERROR_NO_MEM; + vdec_schedule_work(&hevc->work); + hevc_print(hevc, + 0, "can not alloc mmu box, force exit\n"); + return IRQ_HANDLED; + } + if (hevc->frame_mmu_map_addr == NULL) { + hevc->frame_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(), + &hevc->frame_mmu_map_phy_addr, GFP_KERNEL); + if (hevc->frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return IRQ_HANDLED; + } + memset(hevc->frame_mmu_map_addr, 0, get_frame_mmu_map_size()); + } + } +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) { + vdec_schedule_work(&hevc->work); + } else +#endif + up(&h265_sema); + hevc_print(hevc, 0, "set pic_list_init_flag 1\n"); + } + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + return IRQ_HANDLED; + } + +} + ret = + hevc_slice_segment_header_process(hevc, + &hevc->param, decode_pic_begin); + if (ret < 0) { +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) { + hevc->wait_buf = 0; + hevc->dec_result = DEC_RESULT_AGAIN; + amhevc_stop(); + restore_decode_state(hevc); + reset_process_time(hevc); + vdec_schedule_work(&hevc->work); + return IRQ_HANDLED; + } +#else + ; +#endif + } else if (ret == 0) { + if ((hevc->new_pic) && (hevc->cur_pic)) { + hevc->cur_pic->stream_offset = + READ_VREG(HEVC_SHIFT_BYTE_COUNT); + hevc_print(hevc, H265_DEBUG_OUT_PTS, + "read stream_offset = 0x%x\n", + hevc->cur_pic->stream_offset); + hevc->cur_pic->aspect_ratio_idc = + hevc->param.p.aspect_ratio_idc; + hevc->cur_pic->sar_width = + hevc->param.p.sar_width; + hevc->cur_pic->sar_height = + hevc->param.p.sar_height; + } + + aspect_ratio_set(hevc); + WRITE_VREG(HEVC_DEC_STATUS_REG, + HEVC_CODED_SLICE_SEGMENT_DAT); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); + + hevc->start_decoding_time = jiffies; +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) + start_process_time(hevc); +#endif +#if 1 + /*to do..., copy aux data to hevc->cur_pic*/ +#endif +#ifdef MULTI_INSTANCE_SUPPORT + } else if (hevc->m_ins_flag) { + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s, bufmgr ret %d skip, DEC_RESULT_DONE\n", + __func__, ret); + hevc->decoded_poc = INVALID_POC; + hevc->decoding_pic = NULL; + hevc->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + reset_process_time(hevc); + vdec_schedule_work(&hevc->work); +#endif + } else { + /* skip, search next start code */ + hevc->gvs->drop_frame_count++; + if (hevc->cur_pic->slice_type == I_SLICE) { + hevc->gvs->i_lost_frames++; + } else if (hevc->cur_pic->slice_type == P_SLICE) { + hevc->gvs->i_lost_frames++; + } else if (hevc->cur_pic->slice_type == B_SLICE) { + hevc->gvs->i_lost_frames++; + } + WRITE_VREG(HEVC_WAIT_FLAG, READ_VREG(HEVC_WAIT_FLAG) & (~0x2)); + hevc->skip_flag = 1; + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); + } + + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + + } else if (dec_status == HEVC_DECODE_OVER_SIZE) { + hevc_print(hevc, 0 , "hevc decode oversize !!\n"); +#ifdef MULTI_INSTANCE_SUPPORT + if (!hevc->m_ins_flag) + debug |= (H265_DEBUG_DIS_LOC_ERROR_PROC | + H265_DEBUG_DIS_SYS_ERROR_PROC); +#endif + hevc->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + } + return IRQ_HANDLED; +} + +static void wait_hevc_search_done(struct hevc_state_s *hevc) +{ + int count = 0; + WRITE_VREG(HEVC_SHIFT_STATUS, 0); + while (READ_VREG(HEVC_STREAM_CONTROL) & 0x2) { + msleep(20); + count++; + if (count > 100) { + hevc_print(hevc, 0, "%s timeout\n", __func__); + break; + } + } +} +static irqreturn_t vh265_isr(int irq, void *data) +{ + int i, temp; + unsigned int dec_status; + struct hevc_state_s *hevc = (struct hevc_state_s *)data; + u32 debug_tag; + dec_status = READ_VREG(HEVC_DEC_STATUS_REG); + + + if (dec_status == HEVC_SLICE_SEGMENT_DONE) { + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_HEAD_DONE); + } + else if (dec_status == HEVC_DECPIC_DATA_DONE) { + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_PIC_DONE); + } + + if (hevc->init_flag == 0) + return IRQ_HANDLED; + hevc->dec_status = dec_status; + if (is_log_enable(hevc)) + add_log(hevc, + "isr: status = 0x%x dec info 0x%x lcu 0x%x shiftbyte 0x%x shiftstatus 0x%x", + dec_status, READ_HREG(HEVC_DECODE_INFO), + READ_VREG(HEVC_MPRED_CURR_LCU), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_STATUS)); + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "265 isr dec status = 0x%x dec info 0x%x shiftbyte 0x%x shiftstatus 0x%x\n", + dec_status, READ_HREG(HEVC_DECODE_INFO), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_STATUS)); + + debug_tag = READ_HREG(DEBUG_REG1); + if (debug_tag & 0x10000) { + hevc_print(hevc, 0, + "LMEM<tag %x>:\n", READ_HREG(DEBUG_REG1)); + + if (hevc->mmu_enable) + temp = 0x500; + else + temp = 0x400; + for (i = 0; i < temp; i += 4) { + int ii; + if ((i & 0xf) == 0) + hevc_print_cont(hevc, 0, "%03x: ", i); + for (ii = 0; ii < 4; ii++) { + hevc_print_cont(hevc, 0, "%04x ", + hevc->lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + hevc_print_cont(hevc, 0, "\n"); + } + + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == hevc->decode_idx) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hevc->ucode_pause_pos = udebug_pause_pos; + } + else if (debug_tag & 0x20000) + hevc->ucode_pause_pos = 0xffffffff; + if (hevc->ucode_pause_pos) + reset_process_time(hevc); + else + WRITE_HREG(DEBUG_REG1, 0); + } else if (debug_tag != 0) { + hevc_print(hevc, 0, + "dbg%x: %x l/w/r %x %x %x\n", READ_HREG(DEBUG_REG1), + READ_HREG(DEBUG_REG2), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == hevc->decode_idx) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hevc->ucode_pause_pos = udebug_pause_pos; + } + if (hevc->ucode_pause_pos) + reset_process_time(hevc); + else + WRITE_HREG(DEBUG_REG1, 0); + return IRQ_HANDLED; + } + + + if (hevc->pic_list_init_flag == 1) + return IRQ_HANDLED; + + if (!hevc->m_ins_flag) { + if (dec_status == HEVC_OVER_DECODE) { + hevc->over_decode = 1; + hevc_print(hevc, 0, + "isr: over decode\n"), + WRITE_VREG(HEVC_DEC_STATUS_REG, 0); + return IRQ_HANDLED; + } + } + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_END); + return IRQ_WAKE_THREAD; + +} + +static void vh265_set_clk(struct work_struct *work) +{ + struct hevc_state_s *hevc = container_of(work, + struct hevc_state_s, set_clk_work); + + int fps = 96000 / hevc->frame_dur; + + if (hevc_source_changed(VFORMAT_HEVC, + hevc->frame_width, hevc->frame_height, fps) > 0) + hevc->saved_resolution = hevc->frame_width * + hevc->frame_height * fps; +} + +static void vh265_check_timer_func(struct timer_list *timer) +{ + struct hevc_state_s *hevc = container_of(timer, + struct hevc_state_s, timer); + unsigned char empty_flag; + unsigned int buf_level; + + enum receviver_start_e state = RECEIVER_INACTIVE; + + if (hevc->init_flag == 0) { + if (hevc->stat & STAT_TIMER_ARM) { + mod_timer(&hevc->timer, jiffies + PUT_INTERVAL); + } + return; + } +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag && + (get_dbg_flag(hevc) & + H265_DEBUG_WAIT_DECODE_DONE_WHEN_STOP) == 0 && + hw_to_vdec(hevc)->next_status == + VDEC_STATUS_DISCONNECTED && + !hevc->is_used_v4l) { + hevc->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hevc->work); + hevc_print(hevc, + 0, "vdec requested to be disconnected\n"); + return; + } + + if (hevc->m_ins_flag) { + if (((get_dbg_flag(hevc) & + H265_DEBUG_DIS_LOC_ERROR_PROC) == 0) && + (decode_timeout_val > 0) && + (hevc->start_process_time > 0) && + ((1000 * (jiffies - hevc->start_process_time) / HZ) + > decode_timeout_val) + ) { + u32 dec_status = READ_VREG(HEVC_DEC_STATUS_REG); + int current_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START)&0xffffff; + if (dec_status == HEVC_CODED_SLICE_SEGMENT_DAT) { + if (hevc->last_lcu_idx == current_lcu_idx) { + if (hevc->decode_timeout_count > 0) + hevc->decode_timeout_count--; + if (hevc->decode_timeout_count == 0) + timeout_process(hevc); + } else + restart_process_time(hevc); + hevc->last_lcu_idx = current_lcu_idx; + } else { + hevc->pic_decoded_lcu_idx = current_lcu_idx; + timeout_process(hevc); + } + } + } else { +#endif + if (hevc->m_ins_flag == 0 && + vf_get_receiver(hevc->provider_name)) { + state = + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) + state = RECEIVER_INACTIVE; + } else + state = RECEIVER_INACTIVE; + + empty_flag = (READ_VREG(HEVC_PARSER_INT_STATUS) >> 6) & 0x1; + /* error watchdog */ + if (hevc->m_ins_flag == 0 && + (empty_flag == 0) + && (hevc->pic_list_init_flag == 0 + || hevc->pic_list_init_flag + == 3)) { + /* decoder has input */ + if ((get_dbg_flag(hevc) & + H265_DEBUG_DIS_LOC_ERROR_PROC) == 0) { + + buf_level = READ_VREG(HEVC_STREAM_LEVEL); + /* receiver has no buffer to recycle */ + if ((state == RECEIVER_INACTIVE) && + (kfifo_is_empty(&hevc->display_q) && + buf_level > 0x200) + ) { + if (hevc->error_flag == 0) { + hevc->error_watchdog_count++; + if (hevc->error_watchdog_count == + error_handle_threshold) { + hevc_print(hevc, 0, + "H265 dec err local reset.\n"); + hevc->error_flag = 1; + hevc->error_watchdog_count = 0; + hevc->error_skip_nal_wt_cnt = 0; + hevc-> + error_system_watchdog_count++; + WRITE_VREG + (HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } else if (hevc->error_flag == 2) { + int th = + error_handle_nal_skip_threshold; + hevc->error_skip_nal_wt_cnt++; + if (hevc->error_skip_nal_wt_cnt + == th) { + hevc->error_flag = 3; + hevc->error_watchdog_count = 0; + hevc-> + error_skip_nal_wt_cnt = 0; + WRITE_VREG + (HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } + } + } + + if ((get_dbg_flag(hevc) + & H265_DEBUG_DIS_SYS_ERROR_PROC) == 0) + /* receiver has no buffer to recycle */ + if ((state == RECEIVER_INACTIVE) && + (kfifo_is_empty(&hevc->display_q)) + ) { /* no buffer to recycle */ + if ((get_dbg_flag(hevc) & + H265_DEBUG_DIS_LOC_ERROR_PROC) != + 0) + hevc->error_system_watchdog_count++; + if (hevc->error_system_watchdog_count == + error_handle_system_threshold) { + /* and it lasts for a while */ + hevc_print(hevc, 0, + "H265 dec fatal error watchdog.\n"); + hevc-> + error_system_watchdog_count = 0; + hevc->fatal_error |= DECODER_FATAL_ERROR_UNKNOWN; + } + } + } else { + hevc->error_watchdog_count = 0; + hevc->error_system_watchdog_count = 0; + } +#ifdef MULTI_INSTANCE_SUPPORT + } +#endif + if ((hevc->ucode_pause_pos != 0) && + (hevc->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != hevc->ucode_pause_pos) { + hevc->ucode_pause_pos = 0; + WRITE_HREG(DEBUG_REG1, 0); + } + + if (get_dbg_flag(hevc) & H265_DEBUG_DUMP_PIC_LIST) { + dump_pic_list(hevc); + debug &= ~H265_DEBUG_DUMP_PIC_LIST; + } + if (get_dbg_flag(hevc) & H265_DEBUG_TRIG_SLICE_SEGMENT_PROC) { + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + debug &= ~H265_DEBUG_TRIG_SLICE_SEGMENT_PROC; + } +#ifdef TEST_NO_BUF + if (hevc->wait_buf) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); +#endif + if (get_dbg_flag(hevc) & H265_DEBUG_HW_RESET) { + hevc->error_skip_nal_count = error_skip_nal_count; + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + + debug &= ~H265_DEBUG_HW_RESET; + } + +#ifdef ERROR_HANDLE_DEBUG + if ((dbg_nal_skip_count > 0) && ((dbg_nal_skip_count & 0x10000) != 0)) { + hevc->error_skip_nal_count = dbg_nal_skip_count & 0xffff; + dbg_nal_skip_count &= ~0x10000; + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + } +#endif + + if (radr != 0) { +#ifdef SUPPORT_LONG_TERM_RPS + if ((radr >> 24) != 0) { + int count = radr >> 24; + int adr = radr & 0xffffff; + int i; + for (i = 0; i < count; i++) + pr_info("READ_VREG(%x)=%x\n", adr+i, READ_VREG(adr+i)); + } else +#endif + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + if (dbg_cmd != 0) { + if (dbg_cmd == 1) { + u32 disp_laddr; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB && + get_double_write_mode(hevc) == 0) { + disp_laddr = + READ_VCBUS_REG(AFBC_BODY_BADDR) << 4; + } else { + struct canvas_s cur_canvas; + + canvas_read((READ_VCBUS_REG(VD1_IF0_CANVAS0) + & 0xff), &cur_canvas); + disp_laddr = cur_canvas.addr; + } + hevc_print(hevc, 0, + "current displayed buffer address %x\r\n", + disp_laddr); + } + dbg_cmd = 0; + } + /*don't changed at start.*/ + if (hevc->m_ins_flag == 0 && + hevc->get_frame_dur && hevc->show_frame_num > 60 && + hevc->frame_dur > 0 && hevc->saved_resolution != + hevc->frame_width * hevc->frame_height * + (96000 / hevc->frame_dur)) + vdec_schedule_work(&hevc->set_clk_work); + + mod_timer(timer, jiffies + PUT_INTERVAL); +} + +static int h265_task_handle(void *data) +{ + int ret = 0; + struct hevc_state_s *hevc = (struct hevc_state_s *)data; + + set_user_nice(current, -10); + while (1) { + if (use_cma == 0) { + hevc_print(hevc, 0, + "ERROR: use_cma can not be changed dynamically\n"); + } + ret = down_interruptible(&h265_sema); + if ((hevc->init_flag != 0) && (hevc->pic_list_init_flag == 1)) { + init_pic_list(hevc); + init_pic_list_hw(hevc); + init_buf_spec(hevc); + hevc->pic_list_init_flag = 2; + hevc_print(hevc, 0, "set pic_list_init_flag to 2\n"); + + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + + } + + if (hevc->uninit_list) { + /*USE_BUF_BLOCK*/ + uninit_pic_list(hevc); + hevc_print(hevc, 0, "uninit list\n"); + hevc->uninit_list = 0; +#ifdef USE_UNINIT_SEMA + if (use_cma) { + up(&hevc->h265_uninit_done_sema); + while (!kthread_should_stop()) + msleep(1); + break; + } +#endif + } + } + + return 0; +} + +void vh265_free_cmabuf(void) +{ + struct hevc_state_s *hevc = gHevc; + + mutex_lock(&vh265_mutex); + + if (hevc->init_flag) { + mutex_unlock(&vh265_mutex); + return; + } + + mutex_unlock(&vh265_mutex); +} + +#ifdef MULTI_INSTANCE_SUPPORT +int vh265_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +#else +int vh265_dec_status(struct vdec_info *vstatus) +#endif +{ +#ifdef MULTI_INSTANCE_SUPPORT + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; +#else + struct hevc_state_s *hevc = gHevc; +#endif + if (!hevc) + return -1; + + vstatus->frame_width = hevc->pic_w; + /* for hevc interlace for disp height x2 */ + vstatus->frame_height = + (hevc->pic_h << hevc->interlace_flag); + if (hevc->frame_dur != 0) + vstatus->frame_rate = ((96000 * 10 / hevc->frame_dur) % 10) < 5 ? + 96000 / hevc->frame_dur : (96000 / hevc->frame_dur +1); + else + vstatus->frame_rate = -1; + vstatus->error_count = hevc->gvs->error_frame_count; + vstatus->status = hevc->stat | hevc->fatal_error; + if (!vdec_is_support_4k() && + (IS_4K_SIZE(vstatus->frame_width, vstatus->frame_height)) && + (vstatus->frame_width <= 4096 && vstatus->frame_height <= 2304)) { + vstatus->status |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + } + + vstatus->bit_rate = hevc->gvs->bit_rate; + vstatus->frame_dur = hevc->frame_dur; + if (hevc->gvs) { + vstatus->bit_rate = hevc->gvs->bit_rate; + vstatus->frame_data = hevc->gvs->frame_data; + vstatus->total_data = hevc->gvs->total_data; + vstatus->frame_count = hevc->gvs->frame_count; + vstatus->error_frame_count = hevc->gvs->error_frame_count; + vstatus->drop_frame_count = hevc->gvs->drop_frame_count; + vstatus->i_decoded_frames = hevc->gvs->i_decoded_frames; + vstatus->i_lost_frames = hevc->gvs->i_lost_frames; + vstatus->i_concealed_frames = hevc->gvs->i_concealed_frames; + vstatus->p_decoded_frames = hevc->gvs->p_decoded_frames; + vstatus->p_lost_frames = hevc->gvs->p_lost_frames; + vstatus->p_concealed_frames = hevc->gvs->p_concealed_frames; + vstatus->b_decoded_frames = hevc->gvs->b_decoded_frames; + vstatus->b_lost_frames = hevc->gvs->b_lost_frames; + vstatus->b_concealed_frames = hevc->gvs->b_concealed_frames; + vstatus->samp_cnt = hevc->gvs->samp_cnt; + vstatus->offset = hevc->gvs->offset; + } + + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + vstatus->ratio_control = hevc->ratio_control; + return 0; +} + +int vh265_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +static int vh265_vdec_info_init(struct hevc_state_s *hevc) +{ + hevc->gvs = kzalloc(sizeof(struct vdec_info), GFP_KERNEL); + //pr_err("[%s line %d] hevc->gvs=0x%p operation\n",__func__, __LINE__, hevc->gvs); + if (NULL == hevc->gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -ENOMEM; + } + vdec_set_vframe_comm(hw_to_vdec(hevc), DRIVER_NAME); + return 0; +} + +#if 0 +static void H265_DECODE_INIT(void) +{ + /* enable hevc clocks */ + WRITE_VREG(DOS_GCLK_EN3, 0xffffffff); + /* *************************************************************** */ + /* Power ON HEVC */ + /* *************************************************************** */ + /* Powerup HEVC */ + WRITE_VREG(P_AO_RTI_GEN_PWR_SLEEP0, + READ_VREG(P_AO_RTI_GEN_PWR_SLEEP0) & (~(0x3 << 6))); + WRITE_VREG(DOS_MEM_PD_HEVC, 0x0); + WRITE_VREG(DOS_SW_RESET3, READ_VREG(DOS_SW_RESET3) | (0x3ffff << 2)); + WRITE_VREG(DOS_SW_RESET3, READ_VREG(DOS_SW_RESET3) & (~(0x3ffff << 2))); + /* remove isolations */ + WRITE_VREG(AO_RTI_GEN_PWR_ISO0, + READ_VREG(AO_RTI_GEN_PWR_ISO0) & (~(0x3 << 10))); + +} +#endif + +int vh265_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; + hevc_print(hevc, 0, "[%s %d] trickmode:%lu\n", __func__, __LINE__, trickmode); + + if (trickmode == TRICKMODE_I) { + trickmode_i = 1; + i_only_flag = 0x1; + } else if (trickmode == TRICKMODE_NONE) { + trickmode_i = 0; + i_only_flag = 0x0; + } else if (trickmode == 0x02) { + trickmode_i = 0; + i_only_flag = 0x02; + } else if (trickmode == 0x03) { + trickmode_i = 1; + i_only_flag = 0x03; + } else if (trickmode == 0x07) { + trickmode_i = 1; + i_only_flag = 0x07; + } + //hevc_print(hevc, 0, "i_only_flag: %d trickmode_i:%d\n", i_only_flag, trickmode_i); + + return 0; +} + +static void config_decode_mode(struct hevc_state_s *hevc) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + struct vdec_s *vdec = hw_to_vdec(hevc); +#endif + unsigned decode_mode; +#ifdef HEVC_8K_LFTOFFSET_FIX + if (hevc->performance_profile) + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | (1 << 21)); +#endif + if (!hevc->m_ins_flag) + decode_mode = DECODE_MODE_SINGLE; + else if (vdec_frame_based(hw_to_vdec(hevc))) + decode_mode = + DECODE_MODE_MULTI_FRAMEBASE; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (vdec->slave) { + if (force_bypass_dvenl & 0x80000000) + hevc->bypass_dvenl = force_bypass_dvenl & 0x1; + else + hevc->bypass_dvenl = hevc->bypass_dvenl_enable; + if (dolby_meta_with_el && hevc->bypass_dvenl) { + hevc->bypass_dvenl = 0; + hevc_print(hevc, 0, + "NOT support bypass_dvenl when meta_with_el\n"); + } + if (hevc->bypass_dvenl) + decode_mode = + (hevc->start_parser_type << 8) + | DECODE_MODE_MULTI_STREAMBASE; + else + decode_mode = + (hevc->start_parser_type << 8) + | DECODE_MODE_MULTI_DVBAL; + } else if (vdec->master) + decode_mode = + (hevc->start_parser_type << 8) + | DECODE_MODE_MULTI_DVENL; +#endif + else + decode_mode = + DECODE_MODE_MULTI_STREAMBASE; + + if (hevc->m_ins_flag) + decode_mode |= + (hevc->start_decoding_flag << 16); + /* set MBX0 interrupt flag */ + decode_mode |= (0x80 << 24); + WRITE_VREG(HEVC_DECODE_MODE, decode_mode); + WRITE_VREG(HEVC_DECODE_MODE2, + hevc->rps_set_id); +} + +static void vh265_prot_init(struct hevc_state_s *hevc) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + struct vdec_s *vdec = hw_to_vdec(hevc); +#endif + /* H265_DECODE_INIT(); */ + + hevc_config_work_space_hw(hevc); + + hevc_init_decoder_hw(hevc, 0, 0xffffffff); + + WRITE_VREG(HEVC_WAIT_FLAG, 1); + + /* WRITE_VREG(P_HEVC_MPSR, 1); */ + + /* clear mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 1); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(HEVC_PSCALE_CTRL, 0); + + WRITE_VREG(DEBUG_REG1, 0x0 | (dump_nal << 8)); + + if ((get_dbg_flag(hevc) & + (H265_DEBUG_MAN_SKIP_NAL | + H265_DEBUG_MAN_SEARCH_NAL)) + /*||hevc->m_ins_flag*/ + ) { + WRITE_VREG(NAL_SEARCH_CTL, 0x1); /* manual parser NAL */ + } else { + /* check vps/sps/pps/i-slice in ucode */ + unsigned ctl_val = 0x8; + if (hevc->PB_skip_mode == 0) + ctl_val = 0x4; /* check vps/sps/pps only in ucode */ + else if (hevc->PB_skip_mode == 3) + ctl_val = 0x0; /* check vps/sps/pps/idr in ucode */ + /*if (((error_handle_policy & 0x200) == 0) && + input_stream_based(vdec)) + ctl_val = 0x1;*/ + WRITE_VREG(NAL_SEARCH_CTL, ctl_val); + } + if ((get_dbg_flag(hevc) & H265_DEBUG_NO_EOS_SEARCH_DONE) +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + || vdec->master + || vdec->slave +#endif + ) + WRITE_VREG(NAL_SEARCH_CTL, READ_VREG(NAL_SEARCH_CTL) | 0x10000); + + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) + | ((parser_sei_enable & 0x7) << 17)); +/*#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION*/ + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | + ((parser_dolby_vision_enable & 0x1) << 20)); +/*#endif*/ + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + + config_decode_mode(hevc); + config_aux_buf(hevc); +#ifdef SWAP_HEVC_UCODE + if (!tee_enabled() && hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, hevc->mc_dma_handle); + /*pr_info("write swap buffer %x\n", (u32)(hevc->mc_dma_handle));*/ + } +#endif +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + WRITE_VREG(HEVC_SAO_DBG_MODE0, 0); + WRITE_VREG(HEVC_SAO_DBG_MODE1, 0); + } +#endif +} + +static int vh265_local_init(struct hevc_state_s *hevc) +{ + int i; + int ret = -1; + struct vdec_s *vdec = hw_to_vdec(hevc); + +#ifdef DEBUG_PTS + hevc->pts_missed = 0; + hevc->pts_hit = 0; +#endif + hevc->saved_resolution = 0; + hevc->get_frame_dur = false; + hevc->frame_width = hevc->vh265_amstream_dec_info.width; + hevc->frame_height = hevc->vh265_amstream_dec_info.height; + hevc->dec_again_cnt = 0; + + if (is_oversize(hevc->frame_width, hevc->frame_height)) { + pr_info("over size : %u x %u.\n", + hevc->frame_width, hevc->frame_height); + hevc->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + return ret; + } + + if (hevc->max_pic_w && hevc->max_pic_h) { + hevc->is_4k = !(hevc->max_pic_w && hevc->max_pic_h) || + ((hevc->max_pic_w * hevc->max_pic_h) > + 1920 * 1088) ? true : false; + } else { + hevc->is_4k = !(hevc->frame_width && hevc->frame_height) || + ((hevc->frame_width * hevc->frame_height) > + 1920 * 1088) ? true : false; + } + + hevc->frame_dur = + (hevc->vh265_amstream_dec_info.rate == + 0) ? 3600 : hevc->vh265_amstream_dec_info.rate; + //hevc->gvs->frame_dur = hevc->frame_dur; + if (hevc->frame_width && hevc->frame_height) + hevc->frame_ar = hevc->frame_height * 0x100 / hevc->frame_width; + + if (i_only_flag) + hevc->i_only = i_only_flag & 0xff; + else if ((unsigned long) hevc->vh265_amstream_dec_info.param + & 0x08) + hevc->i_only = 0x7; + else + hevc->i_only = 0x0; + hevc->error_watchdog_count = 0; + hevc->sei_present_flag = 0; + if (vdec->sys_info) + pts_unstable = ((unsigned long)vdec->sys_info->param + & 0x40) >> 6; + hevc_print(hevc, 0, + "h265:pts_unstable=%d\n", pts_unstable); +/* + *TODO:FOR VERSION + */ + hevc_print(hevc, 0, + "h265: ver (%d,%d) decinfo: %dx%d rate=%d\n", h265_version, + 0, hevc->frame_width, hevc->frame_height, hevc->frame_dur); + + if (hevc->frame_dur == 0) + hevc->frame_dur = 96000 / 24; + + INIT_KFIFO(hevc->display_q); + INIT_KFIFO(hevc->newframe_q); + INIT_KFIFO(hevc->pending_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hevc->vfpool[i]; + + hevc->vfpool[i].index = -1; + kfifo_put(&hevc->newframe_q, vf); + } + + + ret = hevc_local_init(hevc); + + return ret; +} +#ifdef MULTI_INSTANCE_SUPPORT +static s32 vh265_init(struct vdec_s *vdec) +{ + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; +#else +static s32 vh265_init(struct hevc_state_s *hevc) +{ + +#endif + int ret, size = -1; + int fw_size = 0x1000 * 16; + struct firmware_s *fw = NULL; + + timer_setup(&hevc->timer, vh265_check_timer_func, 0); + + hevc->stat |= STAT_TIMER_INIT; + + if (hevc->m_ins_flag) { +#ifdef USE_UNINIT_SEMA + sema_init(&hevc->h265_uninit_done_sema, 0); +#endif + INIT_WORK(&hevc->work, vh265_work); + INIT_WORK(&hevc->timeout_work, vh265_timeout_work); + } + + if (vh265_local_init(hevc) < 0) + return -EBUSY; + + mutex_init(&hevc->chunks_mutex); + INIT_WORK(&hevc->notify_work, vh265_notify_work); + INIT_WORK(&hevc->set_clk_work, vh265_set_clk); + + fw = vzalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + if (hevc->mmu_enable) { + if (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_GXM) + size = get_firmware_data(VIDEO_DEC_HEVC_MMU, fw->data); + else { + if (!hevc->is_4k) { + /* if an older version of the fw was loaded, */ + /* needs try to load noswap fw because the */ + /* old fw package dose not contain the swap fw.*/ + size = get_firmware_data( + VIDEO_DEC_HEVC_MMU_SWAP, fw->data); + if (size < 0) + size = get_firmware_data( + VIDEO_DEC_HEVC_MMU, fw->data); + else if (size) + hevc->is_swap = true; + } else + size = get_firmware_data(VIDEO_DEC_HEVC_MMU, + fw->data); + } + } else + size = get_firmware_data(VIDEO_DEC_HEVC, fw->data); + + if (size < 0) { + pr_err("get firmware fail.\n"); + vfree(fw); + return -1; + } + + fw->len = size; + +#ifdef SWAP_HEVC_UCODE + if (!tee_enabled() && hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + if (hevc->mmu_enable) { + hevc->swap_size = (4 * (4 * SZ_1K)); /*max 4 swap code, each 0x400*/ + hevc->mc_cpu_addr = + dma_alloc_coherent(amports_get_dma_device(), + hevc->swap_size, + &hevc->mc_dma_handle, GFP_KERNEL); + if (!hevc->mc_cpu_addr) { + amhevc_disable(); + pr_info("vh265 mmu swap ucode loaded fail.\n"); + return -ENOMEM; + } + + memcpy((u8 *) hevc->mc_cpu_addr, fw->data + SWAP_HEVC_OFFSET, + hevc->swap_size); + + hevc_print(hevc, 0, + "vh265 mmu ucode swap loaded %x\n", + hevc->mc_dma_handle); + } + } +#endif + +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) { + //hevc->timer.data = (ulong) hevc; + //hevc->timer.function = vh265_check_timer_func; + hevc->timer.expires = jiffies + PUT_INTERVAL; + + hevc->fw = fw; + hevc->init_flag = 1; + + return 0; + } +#endif + amhevc_enable(); + + if (hevc->mmu_enable) + if (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_GXM) + ret = amhevc_loadmc_ex(VFORMAT_HEVC, "h265_mmu", fw->data); + else { + if (!hevc->is_4k) { + /* if an older version of the fw was loaded, */ + /* needs try to load noswap fw because the */ + /* old fw package dose not contain the swap fw. */ + ret = amhevc_loadmc_ex(VFORMAT_HEVC, + "hevc_mmu_swap", fw->data); + if (ret < 0) + ret = amhevc_loadmc_ex(VFORMAT_HEVC, + "h265_mmu", fw->data); + else + hevc->is_swap = true; + } else + ret = amhevc_loadmc_ex(VFORMAT_HEVC, + "h265_mmu", fw->data); + } + else + ret = amhevc_loadmc_ex(VFORMAT_HEVC, NULL, fw->data); + + if (ret < 0) { + amhevc_disable(); + vfree(fw); + pr_err("H265: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(fw); + + hevc->stat |= STAT_MC_LOAD; + +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) + init_detrefill_buf(hevc); +#endif + /* enable AMRISC side protocol */ + vh265_prot_init(hevc); + + if (vdec_request_threaded_irq(VDEC_IRQ_0, vh265_isr, + vh265_isr_thread_fn, + IRQF_ONESHOT,/*run thread on this irq disabled*/ + "vh265-irq", (void *)hevc)) { + hevc_print(hevc, 0, "vh265 irq register error.\n"); + amhevc_disable(); + return -ENOENT; + } + + hevc->stat |= STAT_ISR_REG; + hevc->provider_name = PROVIDER_NAME; + +#ifdef MULTI_INSTANCE_SUPPORT + if (!hevc->is_used_v4l) { + vf_provider_init(&vh265_vf_prov, hevc->provider_name, + &vh265_vf_provider, vdec); + vf_reg_provider(&vh265_vf_prov); + vf_notify_receiver(hevc->provider_name, VFRAME_EVENT_PROVIDER_START, + NULL); + if (hevc->frame_dur != 0) { + if (!is_reset) { + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)hevc->frame_dur)); + fr_hint_status = VDEC_HINTED; + } + } else + fr_hint_status = VDEC_NEED_HINT; + } +#else + vf_provider_init(&vh265_vf_prov, PROVIDER_NAME, &vh265_vf_provider, + hevc); + vf_reg_provider(&vh265_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); + if (hevc->frame_dur != 0) { + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)hevc->frame_dur)); + fr_hint_status = VDEC_HINTED; + } else + fr_hint_status = VDEC_NEED_HINT; +#endif + hevc->stat |= STAT_VF_HOOK; + + //hevc->timer.data = (ulong) hevc; + //hevc->timer.function = vh265_check_timer_func; + hevc->timer.expires = jiffies + PUT_INTERVAL; + + add_timer(&hevc->timer); + + hevc->stat |= STAT_TIMER_ARM; + + if (use_cma) { +#ifdef USE_UNINIT_SEMA + sema_init(&hevc->h265_uninit_done_sema, 0); +#endif + if (h265_task == NULL) { + sema_init(&h265_sema, 1); + h265_task = + kthread_run(h265_task_handle, hevc, + "kthread_h265"); + } + } + /* hevc->stat |= STAT_KTHREAD; */ +#if 0 + if (get_dbg_flag(hevc) & H265_DEBUG_FORCE_CLK) { + hevc_print(hevc, 0, "%s force clk\n", __func__); + WRITE_VREG(HEVC_IQIT_CLK_RST_CTRL, + READ_VREG(HEVC_IQIT_CLK_RST_CTRL) | + ((1 << 2) | (1 << 1))); + WRITE_VREG(HEVC_DBLK_CFG0, + READ_VREG(HEVC_DBLK_CFG0) | ((1 << 2) | + (1 << 1) | 0x3fff0000));/* 2,29:16 */ + WRITE_VREG(HEVC_SAO_CTRL1, READ_VREG(HEVC_SAO_CTRL1) | + (1 << 2)); /* 2 */ + WRITE_VREG(HEVC_MPRED_CTRL1, READ_VREG(HEVC_MPRED_CTRL1) | + (1 << 24)); /* 24 */ + WRITE_VREG(HEVC_STREAM_CONTROL, + READ_VREG(HEVC_STREAM_CONTROL) | + (1 << 15)); /* 15 */ + WRITE_VREG(HEVC_CABAC_CONTROL, READ_VREG(HEVC_CABAC_CONTROL) | + (1 << 13)); /* 13 */ + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, + READ_VREG(HEVC_PARSER_CORE_CONTROL) | + (1 << 15)); /* 15 */ + WRITE_VREG(HEVC_PARSER_INT_CONTROL, + READ_VREG(HEVC_PARSER_INT_CONTROL) | + (1 << 15)); /* 15 */ + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + READ_VREG(HEVC_PARSER_IF_CONTROL) | ((1 << 6) | + (1 << 3) | (1 << 1))); /* 6, 3, 1 */ + WRITE_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG, 0xffffffff); /* 31:0 */ + WRITE_VREG(HEVCD_MCRCC_CTL1, READ_VREG(HEVCD_MCRCC_CTL1) | + (1 << 3)); /* 3 */ + } +#endif +#ifdef SWAP_HEVC_UCODE + if (!tee_enabled() && hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, hevc->mc_dma_handle); + /*pr_info("write swap buffer %x\n", (u32)(hevc->mc_dma_handle));*/ + } +#endif + +#ifndef MULTI_INSTANCE_SUPPORT + set_vdec_func(&vh265_dec_status); +#endif + amhevc_start(); + + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, 0); + + hevc->stat |= STAT_VDEC_RUN; + hevc->init_flag = 1; + error_handle_threshold = 30; + /* pr_info("%d, vh265_init, RP=0x%x\n", + * __LINE__, READ_VREG(HEVC_STREAM_RD_PTR)); + */ + + return 0; +} + +static int check_dirty_data(struct vdec_s *vdec) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)(vdec->private); + struct vdec_input_s *input = &vdec->input; + u32 wp, rp, level; + u32 rp_set; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = hevc->pre_parser_wr_ptr; + + if (wp > rp) + level = wp - rp; + else + level = wp + vdec->input.size - rp; + + if (level > 0x100000) { + u32 skip_size = ((level >> 1) >> 19) << 19; + if (!vdec->input.swap_valid) { + hevc_print(hevc , 0, "h265 start data discard level 0x%x, buffer level 0x%x, RP 0x%x, WP 0x%x\n", + ((level >> 1) >> 19) << 19, level, rp, wp); + if (wp >= rp) { + rp_set = rp + skip_size; + } + else if ((rp + skip_size) < (input->start + input->size)) { + rp_set = rp + skip_size; + } else { + rp_set = rp + skip_size - input->size; + } + STBUF_WRITE(&vdec->vbuf, set_rp, rp_set); + vdec->discard_start_data_flag = 1; + vdec->input.stream_cookie += skip_size; + hevc->dirty_shift_flag = 1; + } + return 1; + } + return 0; +} + +static int check_data_size(struct vdec_s *vdec) +{ + struct hevc_state_s *hw = + (struct hevc_state_s *)(vdec->private); + u32 wp, rp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + + if (wp > rp) + level = wp - rp; + else + level = wp + vdec->input.size - rp ; + + if (level > (vdec->input.size / 2)) + hw->dec_again_cnt++; + + if (hw->dec_again_cnt > dirty_again_threshold) { + hevc_print(hw, 0, "h265 data skipped %x\n", level); + hw->dec_again_cnt = 0; + return 1; + } + return 0; +} + +static int vh265_stop(struct hevc_state_s *hevc) +{ + if (get_dbg_flag(hevc) & + H265_DEBUG_WAIT_DECODE_DONE_WHEN_STOP) { + int wait_timeout_count = 0; + + while (READ_VREG(HEVC_DEC_STATUS_REG) == + HEVC_CODED_SLICE_SEGMENT_DAT && + wait_timeout_count < 10){ + wait_timeout_count++; + msleep(20); + } + } + if (hevc->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hevc->stat &= ~STAT_VDEC_RUN; + } + + if (hevc->stat & STAT_ISR_REG) { +#ifdef MULTI_INSTANCE_SUPPORT + if (!hevc->m_ins_flag) +#endif + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 0); + vdec_free_irq(VDEC_IRQ_0, (void *)hevc); + hevc->stat &= ~STAT_ISR_REG; + } + + hevc->stat &= ~STAT_TIMER_INIT; + if (hevc->stat & STAT_TIMER_ARM) { + del_timer_sync(&hevc->timer); + hevc->stat &= ~STAT_TIMER_ARM; + } + + if (hevc->stat & STAT_VF_HOOK) { + if (fr_hint_status == VDEC_HINTED) { + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + } + fr_hint_status = VDEC_NO_NEED_HINT; + vf_unreg_provider(&vh265_vf_prov); + hevc->stat &= ~STAT_VF_HOOK; + } + + hevc_local_uninit(hevc); + + if (use_cma) { + hevc->uninit_list = 1; + up(&h265_sema); +#ifdef USE_UNINIT_SEMA + down(&hevc->h265_uninit_done_sema); + if (!IS_ERR(h265_task)) { + kthread_stop(h265_task); + h265_task = NULL; + } +#else + while (hevc->uninit_list) /* wait uninit complete */ + msleep(20); +#endif + + } + hevc->init_flag = 0; + hevc->first_sc_checked = 0; + cancel_work_sync(&hevc->notify_work); + cancel_work_sync(&hevc->set_clk_work); + uninit_mmu_buffers(hevc); + amhevc_disable(); + + //pr_err("[%s line %d] hevc->gvs=0x%p operation\n",__func__, __LINE__, hevc->gvs); + if (hevc->gvs) + kfree(hevc->gvs); + hevc->gvs = NULL; + + return 0; +} + +#ifdef MULTI_INSTANCE_SUPPORT +static void reset_process_time(struct hevc_state_s *hevc) +{ + if (hevc->start_process_time) { + unsigned int process_time = + 1000 * (jiffies - hevc->start_process_time) / HZ; + hevc->start_process_time = 0; + if (process_time > max_process_time[hevc->index]) + max_process_time[hevc->index] = process_time; + } +} + +static void start_process_time(struct hevc_state_s *hevc) +{ + hevc->start_process_time = jiffies; + hevc->decode_timeout_count = 2; + hevc->last_lcu_idx = 0; +} + +static void restart_process_time(struct hevc_state_s *hevc) +{ + hevc->start_process_time = jiffies; + hevc->decode_timeout_count = 2; +} + +static void timeout_process(struct hevc_state_s *hevc) +{ + /* + * In this very timeout point,the vh265_work arrives, + * or in some cases the system become slow, then come + * this second timeout. In both cases we return. + */ + if (work_pending(&hevc->work) || + work_busy(&hevc->work) || + work_busy(&hevc->timeout_work) || + work_pending(&hevc->timeout_work)) { + pr_err("%s h265[%d] work pending, do nothing.\n",__func__, hevc->index); + return; + } + + hevc->timeout_num++; + amhevc_stop(); + read_decode_info(hevc); + + hevc_print(hevc, + 0, "%s decoder timeout\n", __func__); + check_pic_decoded_error(hevc, + hevc->pic_decoded_lcu_idx); + /*The current decoded frame is marked + error when the decode timeout*/ + if (hevc->cur_pic != NULL) + hevc->cur_pic->error_mark = 1; + hevc->decoded_poc = hevc->curr_POC; + hevc->decoding_pic = NULL; + hevc->dec_result = DEC_RESULT_DONE; + reset_process_time(hevc); + + if (work_pending(&hevc->work)) + return; + vdec_schedule_work(&hevc->timeout_work); +} + +#ifdef CONSTRAIN_MAX_BUF_NUM +static int get_vf_ref_only_buf_count(struct hevc_state_s *hevc) +{ + struct PIC_s *pic; + int i; + int count = 0; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if (pic->output_mark == 0 && pic->referenced == 0 + && pic->output_ready == 1) + count++; + } + + return count; +} + +static int get_used_buf_count(struct hevc_state_s *hevc) +{ + struct PIC_s *pic; + int i; + int count = 0; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if (pic->output_mark != 0 || pic->referenced != 0 + || pic->output_ready != 0) + count++; + } + + return count; +} +#endif + +static bool is_avaliable_buffer(struct hevc_state_s *hevc) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + struct PIC_s *pic = NULL; + int i, free_count = 0; + + if (ctx->cap_pool.dec < hevc->used_buf_num) { + free_count = v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx); + if (free_count && + !ctx->fb_ops.query(&ctx->fb_ops, &hevc->fb_token)) { + return false; + } + } + + for (i = 0; i < hevc->used_buf_num; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || + pic->index == -1 || + pic->BUF_index == -1) + continue; + + if (pic->output_mark == 0 && + pic->referenced == 0 && + pic->output_ready == 0 && + pic->cma_alloc_addr) { + free_count++; + } + } + + return free_count < run_ready_min_buf_num ? 0 : 1; +} + +static unsigned char is_new_pic_available(struct hevc_state_s *hevc) +{ + struct PIC_s *new_pic = NULL; + struct PIC_s *pic; + /* recycle un-used pic */ + int i; + int ref_pic = 0; + struct vdec_s *vdec = hw_to_vdec(hevc); + unsigned long flags; + /*return 1 if pic_list is not initialized yet*/ + if (hevc->pic_list_init_flag != 3) + return 1; + spin_lock_irqsave(&lock, flags); + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if (pic->referenced == 1) + ref_pic++; + if (pic->output_mark == 0 && pic->referenced == 0 + && pic->output_ready == 0 + && pic->vf_ref == 0 + ) { + if (new_pic) { + if (pic->POC < new_pic->POC) + new_pic = pic; + } else + new_pic = pic; + } + } + if (!hevc->is_used_v4l && new_pic == NULL) { + enum receviver_start_e state = RECEIVER_INACTIVE; + if (vf_get_receiver(vdec->vf_provider_name)) { + state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) + state = RECEIVER_INACTIVE; + } + if (state == RECEIVER_INACTIVE) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + int poc = INVALID_POC; + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if ((pic->referenced == 0) && + (pic->error_mark == 1) && + (pic->output_mark == 1)) { + if (poc == INVALID_POC || (pic->POC < poc)) { + new_pic = pic; + poc = pic->POC; + } + } + } + if (new_pic) { + new_pic->referenced = 0; + new_pic->output_mark = 0; + put_mv_buf(hevc, new_pic); + hevc_print(hevc, 0, "force release error pic %d recieve_state %d \n", new_pic->POC, state); + } else { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if ((pic->referenced == 1) && (pic->error_mark == 1)) { + spin_unlock_irqrestore(&lock, flags); + flush_output(hevc, pic); + hevc_print(hevc, 0, "DPB error, neeed fornce flush recieve_state %d \n", state); + return 0; + } + } + } + } + } + if (new_pic == NULL) { + int decode_count = 0; + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if (pic->output_ready == 0) + decode_count++; + } + if (decode_count >= + hevc->param.p.sps_max_dec_pic_buffering_minus1_0 + detect_stuck_buffer_margin) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + dump_pic_list(hevc); + if (!(error_handle_policy & 0x400)) { + spin_unlock_irqrestore(&lock, flags); + flush_output(hevc, NULL); + hevc_print(hevc, H265_DEBUG_BUFMGR, "flush dpb, ref_error_count %d, sps_max_dec_pic_buffering_minus1_0 %d\n", + decode_count, hevc->param.p.sps_max_dec_pic_buffering_minus1_0); + return 0; + } + } + } + spin_unlock_irqrestore(&lock, flags); + return (new_pic != NULL) ? 1 : 0; +} + +static void check_buffer_status(struct hevc_state_s *hevc) +{ + int i; + struct PIC_s *new_pic = NULL; + struct PIC_s *pic; + struct vdec_s *vdec = hw_to_vdec(hevc); + + enum receviver_start_e state = RECEIVER_INACTIVE; + + if (hevc->is_used_v4l) + return; + + if (vf_get_receiver(vdec->vf_provider_name)) { + state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) + state = RECEIVER_INACTIVE; + } + if (hevc->timeout_flag == false) + hevc->timeout = jiffies + HZ / 2; + + if (state == RECEIVER_INACTIVE) + hevc->timeout_flag = true; + else + hevc->timeout_flag = false; + + if (state == RECEIVER_INACTIVE && hevc->timeout_flag && + time_after(jiffies, hevc->timeout)) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + int poc = INVALID_POC; + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if ((pic->referenced == 0) && + (pic->error_mark == 1) && + (pic->output_mark == 1)) { + if (poc == INVALID_POC || (pic->POC < poc)) { + new_pic = pic; + poc = pic->POC; + } + } + } + if (new_pic) { + new_pic->referenced = 0; + new_pic->output_mark = 0; + put_mv_buf(hevc, new_pic); + hevc_print(hevc, 0, "check_buffer_status force release error pic %d recieve_state %d \n", new_pic->POC, state); + } else { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if ((pic->referenced == 1) && (pic->error_mark == 1)) { + flush_output(hevc, pic); + hevc_print(hevc, 0, "check_buffer_status DPB error, neeed fornce flush recieve_state %d \n", state); + break; + } + } + } + } +} + + +static int vmh265_stop(struct hevc_state_s *hevc) +{ + if (hevc->stat & STAT_TIMER_ARM) { + del_timer_sync(&hevc->timer); + hevc->stat &= ~STAT_TIMER_ARM; + } + if (hevc->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hevc->stat &= ~STAT_VDEC_RUN; + } + if (hevc->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_0, (void *)hevc); + hevc->stat &= ~STAT_ISR_REG; + } + + if (!hevc->is_used_v4l && hevc->stat & STAT_VF_HOOK) { + if (fr_hint_status == VDEC_HINTED) + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + fr_hint_status = VDEC_NO_NEED_HINT; + vf_unreg_provider(&vh265_vf_prov); + hevc->stat &= ~STAT_VF_HOOK; + } + + hevc_local_uninit(hevc); + + if (hevc->gvs) + kfree(hevc->gvs); + hevc->gvs = NULL; + + if (use_cma) { + hevc->uninit_list = 1; + reset_process_time(hevc); + hevc->dec_result = DEC_RESULT_FREE_CANVAS; + vdec_schedule_work(&hevc->work); + flush_work(&hevc->work); +#ifdef USE_UNINIT_SEMA + if (hevc->init_flag) { + down(&hevc->h265_uninit_done_sema); + } +#else + while (hevc->uninit_list) /* wait uninit complete */ + msleep(20); +#endif + } + hevc->init_flag = 0; + hevc->first_sc_checked = 0; + cancel_work_sync(&hevc->notify_work); + cancel_work_sync(&hevc->set_clk_work); + cancel_work_sync(&hevc->timeout_work); + cancel_work_sync(&hevc->work); + uninit_mmu_buffers(hevc); + + vfree(hevc->fw); + hevc->fw = NULL; + + dump_log(hevc); + return 0; +} + +static unsigned char get_data_check_sum + (struct hevc_state_s *hevc, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (!hevc->chunk->block->is_mapped) + data = codec_mm_vmap(hevc->chunk->block->start + + hevc->chunk->offset, size); + else + data = ((u8 *)hevc->chunk->block->start_virt) + + hevc->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, size, sum, + data[0], data[1], data[2], data[3], + data[4], data[5], data[size - 4], + data[size - 3], data[size - 2], + data[size - 1]); + + if (!hevc->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void vh265_notify_work(struct work_struct *work) +{ + struct hevc_state_s *hevc = + container_of(work, + struct hevc_state_s, + notify_work); + struct vdec_s *vdec = hw_to_vdec(hevc); + + if (hevc->is_used_v4l) + return; + +#ifdef MULTI_INSTANCE_SUPPORT + if (vdec->fr_hint_state == VDEC_NEED_HINT) { + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)hevc->frame_dur)); + vdec->fr_hint_state = VDEC_HINTED; + } else if (fr_hint_status == VDEC_NEED_HINT) { + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)hevc->frame_dur)); + fr_hint_status = VDEC_HINTED; + } +#else + if (fr_hint_status == VDEC_NEED_HINT) + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)hevc->frame_dur)); + fr_hint_status = VDEC_HINTED; + } +#endif + + return; +} + +static void vh265_work_implement(struct hevc_state_s *hevc, + struct vdec_s *vdec,int from) +{ + if (hevc->dec_result == DEC_RESULT_DONE) { + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_WORKER_START); + } else if (hevc->dec_result == DEC_RESULT_AGAIN) + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_WORKER_AGAIN); + + if (hevc->dec_result == DEC_RESULT_FREE_CANVAS) { + /*USE_BUF_BLOCK*/ + uninit_pic_list(hevc); + hevc->uninit_list = 0; +#ifdef USE_UNINIT_SEMA + up(&hevc->h265_uninit_done_sema); +#endif + return; + } + + /* finished decoding one frame or error, + * notify vdec core to switch context + */ + if (hevc->pic_list_init_flag == 1 + && (hevc->dec_result != DEC_RESULT_FORCE_EXIT)) { + hevc->pic_list_init_flag = 2; + init_pic_list(hevc); + init_pic_list_hw(hevc); + init_buf_spec(hevc); + hevc_print(hevc, 0, + "set pic_list_init_flag to 2\n"); + + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + return; + } + + hevc_print(hevc, PRINT_FLAG_VDEC_DETAIL, + "%s dec_result %d %x %x %x\n", + __func__, + hevc->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + + if (((hevc->dec_result == DEC_RESULT_GET_DATA) || + (hevc->dec_result == DEC_RESULT_GET_DATA_RETRY)) + && (hw_to_vdec(hevc)->next_status != + VDEC_STATUS_DISCONNECTED)) { + if (!vdec_has_more_input(vdec)) { + hevc->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hevc->work); + return; + } + if (!input_frame_based(vdec)) { + int r = vdec_sync_input(vdec); + if (r >= 0x200) { + WRITE_VREG(HEVC_DECODE_SIZE, + READ_VREG(HEVC_DECODE_SIZE) + r); + + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA %x %x %x mpc %x size 0x%x\n", + __func__, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_MPC_E), r); + + start_process_time(hevc); + if (READ_VREG(HEVC_DEC_STATUS_REG) + == HEVC_DECODE_BUFEMPTY2) + WRITE_VREG(HEVC_DEC_STATUS_REG, + HEVC_ACTION_DONE); + else + WRITE_VREG(HEVC_DEC_STATUS_REG, + HEVC_ACTION_DEC_CONT); + } else { + hevc->dec_result = DEC_RESULT_GET_DATA_RETRY; + vdec_schedule_work(&hevc->work); + } + return; + } + + /*below for frame_base*/ + if (hevc->dec_result == DEC_RESULT_GET_DATA) { + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA %x %x %x mpc %x\n", + __func__, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_MPC_E)); + mutex_lock(&hevc->chunks_mutex); + vdec_vframe_dirty(vdec, hevc->chunk); + hevc->chunk = NULL; + mutex_unlock(&hevc->chunks_mutex); + vdec_clean_input(vdec); + } + + /*if (is_new_pic_available(hevc)) {*/ + if (run_ready(vdec, VDEC_HEVC)) { + int r; + int decode_size; + + r = vdec_prepare_input(vdec, &hevc->chunk); + if (r < 0) { + hevc->dec_result = DEC_RESULT_GET_DATA_RETRY; + + hevc_print(hevc, + PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&hevc->work); + return; + } + hevc->dec_result = DEC_RESULT_NONE; + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: chunk size 0x%x sum 0x%x mpc %x\n", + __func__, r, + (get_dbg_flag(hevc) & PRINT_FLAG_VDEC_STATUS) ? + get_data_check_sum(hevc, r) : 0, + READ_VREG(HEVC_MPC_E)); + + if (get_dbg_flag(hevc) & PRINT_FRAMEBASE_DATA) { + int jj; + u8 *data = NULL; + + if (!hevc->chunk->block->is_mapped) + data = codec_mm_vmap( + hevc->chunk->block->start + + hevc->chunk->offset, r); + else + data = ((u8 *) + hevc->chunk->block->start_virt) + + hevc->chunk->offset; + + for (jj = 0; jj < r; jj++) { + if ((jj & 0xf) == 0) + hevc_print(hevc, + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + hevc_print_cont(hevc, + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + hevc_print_cont(hevc, + PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hevc->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + + decode_size = hevc->chunk->size + + (hevc->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + WRITE_VREG(HEVC_DECODE_SIZE, + READ_VREG(HEVC_DECODE_SIZE) + decode_size); + + vdec_enable_input(vdec); + + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: mpc %x\n", + __func__, READ_VREG(HEVC_MPC_E)); + + start_process_time(hevc); + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + } else{ + hevc->dec_result = DEC_RESULT_GET_DATA_RETRY; + + /*hevc_print(hevc, PRINT_FLAG_VDEC_DETAIL, + * "amvdec_vh265: Insufficient data\n"); + */ + + vdec_schedule_work(&hevc->work); + } + return; + } else if (hevc->dec_result == DEC_RESULT_DONE) { + /* if (!hevc->ctx_valid) + hevc->ctx_valid = 1; */ + int i; + hevc->dec_again_cnt = 0; + decode_frame_count[hevc->index]++; +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + if (hevc->delrefill_check == 2) { + delrefill(hevc); + amhevc_stop(); + } + } +#endif + if (hevc->mmu_enable && ((hevc->double_write_mode & 0x10) == 0)) { + hevc->used_4k_num = + READ_VREG(HEVC_SAO_MMU_STATUS) >> 16; + if (hevc->used_4k_num >= 0 && + hevc->cur_pic && + hevc->cur_pic->scatter_alloc + == 1) + recycle_mmu_buf_tail(hevc, hevc->m_ins_flag); + } + hevc->pic_decoded_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + + if (vdec->master == NULL && vdec->slave == NULL && + hevc->empty_flag == 0) { + hevc->over_decode = + (READ_VREG(HEVC_SHIFT_STATUS) >> 15) & 0x1; + if (hevc->over_decode) + hevc_print(hevc, 0, + "!!!Over decode\n"); + } + + if (is_log_enable(hevc)) + add_log(hevc, + "%s dec_result %d lcu %d used_mmu %d shiftbyte 0x%x decbytes 0x%x", + __func__, + hevc->dec_result, + hevc->pic_decoded_lcu_idx, + hevc->used_4k_num, + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_BYTE_COUNT) - + hevc->start_shift_bytes + ); + + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s dec_result %d (%x %x %x) lcu %d used_mmu %d shiftbyte 0x%x decbytes 0x%x\n", + __func__, + hevc->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + hevc->pic_decoded_lcu_idx, + hevc->used_4k_num, + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_BYTE_COUNT) - + hevc->start_shift_bytes + ); + + hevc->used_4k_num = -1; + + check_pic_decoded_error(hevc, + hevc->pic_decoded_lcu_idx); + if ((error_handle_policy & 0x100) == 0 && hevc->cur_pic) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + struct PIC_s *pic; + pic = hevc->m_PIC[i]; + if (!pic || pic->index == -1) + continue; + if ((hevc->cur_pic->POC + poc_num_margin < pic->POC) && (pic->referenced == 0) && + (pic->output_mark == 1) && (pic->output_ready == 0)) { + hevc->poc_error_count++; + break; + } + } + if (i == MAX_REF_PIC_NUM) + hevc->poc_error_count = 0; + if (hevc->poc_error_count >= poc_error_limit) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + struct PIC_s *pic; + pic = hevc->m_PIC[i]; + if (!pic || pic->index == -1) + continue; + if ((hevc->cur_pic->POC + poc_num_margin < pic->POC) && (pic->referenced == 0) && + (pic->output_mark == 1) && (pic->output_ready == 0)) { + pic->output_mark = 0; + hevc_print(hevc, 0, "DPB poc error, remove error frame\n"); + } + } + } + } + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +#if 1 + if (vdec->slave) { + if (dv_debug & 0x1) + vdec_set_flag(vdec->slave, + VDEC_FLAG_SELF_INPUT_CONTEXT); + else + vdec_set_flag(vdec->slave, + VDEC_FLAG_OTHER_INPUT_CONTEXT); + } +#else + if (vdec->slave) { + if (no_interleaved_el_slice) + vdec_set_flag(vdec->slave, + VDEC_FLAG_INPUT_KEEP_CONTEXT); + /* this will move real HW pointer for input */ + else + vdec_set_flag(vdec->slave, 0); + /* this will not move real HW pointer + *and SL layer decoding + *will start from same stream position + *as current BL decoder + */ + } +#endif +#endif +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + hevc->shift_byte_count_lo + = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + if (vdec->slave) { + /*cur is base, found enhance*/ + struct hevc_state_s *hevc_el = + (struct hevc_state_s *) + vdec->slave->private; + if (hevc_el) + hevc_el->shift_byte_count_lo = + hevc->shift_byte_count_lo; + } else if (vdec->master) { + /*cur is enhance, found base*/ + struct hevc_state_s *hevc_ba = + (struct hevc_state_s *) + vdec->master->private; + if (hevc_ba) + hevc_ba->shift_byte_count_lo = + hevc->shift_byte_count_lo; + } +#endif + mutex_lock(&hevc->chunks_mutex); + vdec_vframe_dirty(hw_to_vdec(hevc), hevc->chunk); + hevc->chunk = NULL; + mutex_unlock(&hevc->chunks_mutex); + } else if (hevc->dec_result == DEC_RESULT_AGAIN) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec)) { + hevc->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hevc->work); + return; + } +#ifdef AGAIN_HAS_THRESHOLD + hevc->next_again_flag = 1; +#endif + if (input_stream_based(vdec)) { + if (!(error_handle_policy & 0x400) && check_data_size(vdec)) { + hevc->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hevc->work); + return; + } else if ((((error_handle_policy & 0x200) == 0) && + (hevc->pic_list_init_flag == 0))) { + check_dirty_data(vdec); + } + } + } else if (hevc->dec_result == DEC_RESULT_EOS) { + struct PIC_s *pic; + hevc->eos = 1; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if ((vdec_dual(vdec)) && aux_data_is_avaible(hevc)) + if (hevc->decoding_pic) + dolby_get_meta(hevc); +#endif + check_pic_decoded_error(hevc, + hevc->pic_decoded_lcu_idx); + pic = get_pic_by_POC(hevc, hevc->curr_POC); + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: end of stream, last dec poc %d => 0x%pf\n", + __func__, hevc->curr_POC, pic); + flush_output(hevc, pic); + /* dummy vf with eos flag to backend */ + notify_v4l_eos(hw_to_vdec(hevc)); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + hevc->shift_byte_count_lo + = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + if (vdec->slave) { + /*cur is base, found enhance*/ + struct hevc_state_s *hevc_el = + (struct hevc_state_s *) + vdec->slave->private; + if (hevc_el) + hevc_el->shift_byte_count_lo = + hevc->shift_byte_count_lo; + } else if (vdec->master) { + /*cur is enhance, found base*/ + struct hevc_state_s *hevc_ba = + (struct hevc_state_s *) + vdec->master->private; + if (hevc_ba) + hevc_ba->shift_byte_count_lo = + hevc->shift_byte_count_lo; + } +#endif + mutex_lock(&hevc->chunks_mutex); + vdec_vframe_dirty(hw_to_vdec(hevc), hevc->chunk); + hevc->chunk = NULL; + mutex_unlock(&hevc->chunks_mutex); + } else if (hevc->dec_result == DEC_RESULT_FORCE_EXIT) { + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: force exit\n", + __func__); + if (hevc->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hevc->stat &= ~STAT_VDEC_RUN; + } + if (hevc->stat & STAT_ISR_REG) { + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 0); + vdec_free_irq(VDEC_IRQ_0, (void *)hevc); + hevc->stat &= ~STAT_ISR_REG; + } + hevc_print(hevc, 0, "%s: force exit end\n", + __func__); + } + + if (hevc->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hevc->stat &= ~STAT_VDEC_RUN; + } + + if (hevc->stat & STAT_TIMER_ARM) { + del_timer_sync(&hevc->timer); + hevc->stat &= ~STAT_TIMER_ARM; + } + ATRACE_COUNTER(hevc->trace.decode_work_time_name, TRACE_WORK_WAIT_SEARCH_DONE_START); + wait_hevc_search_done(hevc); + ATRACE_COUNTER(hevc->trace.decode_work_time_name, TRACE_WORK_WAIT_SEARCH_DONE_END); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (hevc->switch_dvlayer_flag) { + if (vdec->slave) + vdec_set_next_sched(vdec, vdec->slave); + else if (vdec->master) + vdec_set_next_sched(vdec, vdec->master); + } else if (vdec->slave || vdec->master) + vdec_set_next_sched(vdec, vdec); +#endif + + if (from == 1) { + /* This is a timeout work */ + if (work_pending(&hevc->work)) { + /* + * The vh265_work arrives at the last second, + * give it a chance to handle the scenario. + */ + return; + //cancel_work_sync(&hevc->work);//reserved for future considraion + } + } + if (hevc->dec_result == DEC_RESULT_DONE) { + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_WORKER_END); + } + + /* mark itself has all HW resource released and input released */ + if (vdec->parallel_dec == 1) + vdec_core_finish_run(vdec, CORE_MASK_HEVC); + else + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + if (hevc->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !hevc->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + if (hevc->vdec_cb) + hevc->vdec_cb(hw_to_vdec(hevc), hevc->vdec_cb_arg); +} + +static void vh265_work(struct work_struct *work) +{ + struct hevc_state_s *hevc = container_of(work, + struct hevc_state_s, work); + struct vdec_s *vdec = hw_to_vdec(hevc); + + vh265_work_implement(hevc, vdec, 0); +} + +static void vh265_timeout_work(struct work_struct *work) +{ + struct hevc_state_s *hevc = container_of(work, + struct hevc_state_s, timeout_work); + struct vdec_s *vdec = hw_to_vdec(hevc); + + if (work_pending(&hevc->work)) + return; + hevc->timeout_processing = 1; + vh265_work_implement(hevc, vdec, 1); +} + + +static int vh265_hw_ctx_restore(struct hevc_state_s *hevc) +{ + /* new to do ... */ + vh265_prot_init(hevc); + return 0; +} +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + int tvp = vdec_secure(hw_to_vdec(hevc)) ? + CODEC_MM_FLAGS_TVP : 0; + bool ret = 0; + if (step == 0x12) + return 0; + else if (step == 0x11) + step = 0x12; + + if (hevc->fatal_error & DECODER_FATAL_ERROR_NO_MEM) + return 0; + + if (hevc->eos) + return 0; + if (hevc->timeout_processing && + (work_pending(&hevc->work) || + work_busy(&hevc->work) || + work_busy(&hevc->timeout_work) || + work_pending(&hevc->timeout_work))) { + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "h265 work pending,not ready for run.\n"); + return 0; + } + hevc->timeout_processing = 0; + if (!hevc->first_sc_checked && hevc->mmu_enable) { + int size; + void * mmu_box; + + if (hevc->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + mmu_box = ctx->mmu_box; + } else + mmu_box = hevc->mmu_box; + + size = decoder_mmu_box_sc_check(mmu_box, tvp); + hevc->first_sc_checked =1; + hevc_print(hevc, 0, + "vh265 cached=%d need_size=%d speed= %d ms\n", + size, (hevc->need_cache_size >> PAGE_SHIFT), + (int)(get_jiffies_64() - hevc->sc_start_time) * 1000/HZ); + } + if (vdec_stream_based(vdec) && (hevc->init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + + if (level < pre_decode_buf_level) + return 0; + } + +#ifdef AGAIN_HAS_THRESHOLD + if (hevc->next_again_flag && + (!vdec_frame_based(vdec))) { + u32 parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + if (parser_wr_ptr >= hevc->pre_parser_wr_ptr && + (parser_wr_ptr - hevc->pre_parser_wr_ptr) < + again_threshold) { + int r = vdec_sync_input(vdec); + hevc_print(hevc, + PRINT_FLAG_VDEC_DETAIL, "%s buf lelvel:%x\n", __func__, r); + return 0; + } + } +#endif + + if (disp_vframe_valve_level && + kfifo_len(&hevc->display_q) >= + disp_vframe_valve_level) { + hevc->valve_count--; + if (hevc->valve_count <= 0) + hevc->valve_count = 2; + else + return 0; + } + + ret = is_new_pic_available(hevc); + if (!ret) { + hevc_print(hevc, + PRINT_FLAG_VDEC_DETAIL, "%s=>%d\r\n", + __func__, ret); + } + +#ifdef CONSTRAIN_MAX_BUF_NUM + if (hevc->pic_list_init_flag == 3 && !hevc->is_used_v4l) { + if (run_ready_max_vf_only_num > 0 && + get_vf_ref_only_buf_count(hevc) >= + run_ready_max_vf_only_num + ) + ret = 0; + if (run_ready_display_q_num > 0 && + kfifo_len(&hevc->display_q) >= + run_ready_display_q_num) + ret = 0; + + /*avoid more buffers consumed when + switching resolution*/ + if (run_ready_max_buf_num == 0xff && + get_used_buf_count(hevc) >= + get_work_pic_num(hevc)) { + check_buffer_status(hevc); + ret = 0; + } + else if (run_ready_max_buf_num && + get_used_buf_count(hevc) >= + run_ready_max_buf_num) + ret = 0; + } +#endif + + if (hevc->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + + if (ctx->param_sets_from_ucode) { + if (hevc->v4l_params_parsed) { + if (ctx->cap_pool.dec < hevc->used_buf_num) { + if (is_avaliable_buffer(hevc)) + ret = 1; + else + ret = 0; + } + } else { + if (ctx->v4l_resolution_change) + ret = 0; + } + } else if (!ctx->v4l_codec_dpb_ready) { + if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < + run_ready_min_buf_num) + ret = 0; + } + } + + if (ret) + not_run_ready[hevc->index] = 0; + else + not_run_ready[hevc->index]++; + if (vdec->parallel_dec == 1) + return ret ? (CORE_MASK_HEVC) : 0; + else + return ret ? (CORE_MASK_VDEC_1 | CORE_MASK_HEVC) : 0; +} + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + int r, loadr = 0; + unsigned char check_sum = 0; + + run_count[hevc->index]++; + hevc->vdec_cb_arg = arg; + hevc->vdec_cb = callback; + hevc->aux_data_dirty = 1; + + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_RUN_START); + hevc_reset_core(vdec); + +#ifdef AGAIN_HAS_THRESHOLD + if (vdec_stream_based(vdec)) { + hevc->pre_parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + hevc->next_again_flag = 0; + } +#endif + r = vdec_prepare_input(vdec, &hevc->chunk); + if (r < 0) { + input_empty[hevc->index]++; + hevc->dec_result = DEC_RESULT_AGAIN; + hevc_print(hevc, PRINT_FLAG_VDEC_DETAIL, + "ammvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&hevc->work); + return; + } + input_empty[hevc->index] = 0; + hevc->dec_result = DEC_RESULT_NONE; + if (vdec_frame_based(vdec) && + ((get_dbg_flag(hevc) & PRINT_FLAG_VDEC_STATUS) + || is_log_enable(hevc)) && + !vdec_secure(vdec)) + check_sum = get_data_check_sum(hevc, r); + + if (is_log_enable(hevc)) + add_log(hevc, + "%s: size 0x%x sum 0x%x shiftbyte 0x%x", + __func__, r, + check_sum, + READ_VREG(HEVC_SHIFT_BYTE_COUNT) + ); + if ((hevc->dirty_shift_flag == 1) && !(vdec->input.swap_valid)) { + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, vdec->input.stream_cookie); + } + hevc->start_shift_bytes = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: size 0x%x sum 0x%x (%x %x %x %x %x) byte count %x\n", + __func__, r, + check_sum, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + STBUF_READ(&vdec->vbuf, get_rp), + STBUF_READ(&vdec->vbuf, get_wp), + hevc->start_shift_bytes + ); + if ((get_dbg_flag(hevc) & PRINT_FRAMEBASE_DATA) && + input_frame_based(vdec) && + !vdec_secure(vdec)) { + int jj; + u8 *data = NULL; + + if (!hevc->chunk->block->is_mapped) + data = codec_mm_vmap(hevc->chunk->block->start + + hevc->chunk->offset, r); + else + data = ((u8 *)hevc->chunk->block->start_virt) + + hevc->chunk->offset; + + for (jj = 0; jj < r; jj++) { + if ((jj & 0xf) == 0) + hevc_print(hevc, PRINT_FRAMEBASE_DATA, + "%06x:", jj); + hevc_print_cont(hevc, PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + hevc_print_cont(hevc, PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hevc->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + ATRACE_COUNTER(hevc->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_START); + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + if (tee_enabled() && hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, hevc->swap_addr); + } else { + if (hevc->mmu_enable) { + if (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_GXM) + loadr = amhevc_vdec_loadmc_ex(VFORMAT_HEVC, vdec, + "h265_mmu", hevc->fw->data); + else { + if (!hevc->is_4k) { + /* if an older version of the fw was loaded, */ + /* needs try to load noswap fw because the */ + /* old fw package dose not contain the swap fw.*/ + loadr = amhevc_vdec_loadmc_ex( + VFORMAT_HEVC, vdec, + "hevc_mmu_swap", + hevc->fw->data); + if (loadr < 0) + loadr = amhevc_vdec_loadmc_ex( + VFORMAT_HEVC, vdec, + "h265_mmu", + hevc->fw->data); + else + hevc->is_swap = true; + } else + loadr = amhevc_vdec_loadmc_ex( + VFORMAT_HEVC, vdec, + "h265_mmu", hevc->fw->data); + } + } else + loadr = amhevc_vdec_loadmc_ex(VFORMAT_HEVC, vdec, + NULL, hevc->fw->data); + + if (loadr < 0) { + amhevc_disable(); + hevc_print(hevc, 0, "H265: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", loadr); + hevc->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hevc->work); + return; + } + + if (tee_enabled() && hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) + hevc->swap_addr = READ_VREG(HEVC_STREAM_SWAP_BUFFER2); +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) + init_detrefill_buf(hevc); +#endif + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_HEVC; + } + ATRACE_COUNTER(hevc->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_END); + + ATRACE_COUNTER(hevc->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_START); + if (vh265_hw_ctx_restore(hevc) < 0) { + vdec_schedule_work(&hevc->work); + return; + } + ATRACE_COUNTER(hevc->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_END); + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + + if (vdec_frame_based(vdec)) { + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, 0); + r = hevc->chunk->size + + (hevc->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + hevc->decode_size = r; + if (vdec->mvfrm) + vdec->mvfrm->frame_size = hevc->chunk->size; + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else { + if (vdec->master || vdec->slave) + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, + hevc->shift_byte_count_lo); + } +#endif + WRITE_VREG(HEVC_DECODE_SIZE, r); + /*WRITE_VREG(HEVC_DECODE_COUNT, hevc->decode_idx);*/ + hevc->init_flag = 1; + + if (hevc->pic_list_init_flag == 3) + init_pic_list_hw(hevc); + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + dump_pic_list(hevc); + backup_decode_state(hevc); + + start_process_time(hevc); + mod_timer(&hevc->timer, jiffies); + hevc->stat |= STAT_TIMER_ARM; + hevc->stat |= STAT_ISR_REG; + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + amhevc_start(); + hevc->stat |= STAT_VDEC_RUN; + + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_RUN_END); +} + +static void aml_free_canvas(struct vdec_s *vdec) +{ + int i; + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + struct PIC_s *pic = hevc->m_PIC[i]; + + if (pic) { + if (vdec->parallel_dec == 1) { + vdec->free_canvas_ex(pic->y_canvas_index, vdec->id); + vdec->free_canvas_ex(pic->uv_canvas_index, vdec->id); + } + } + hevc->buffer_wrap[i] = i; + } +} + +static void reset(struct vdec_s *vdec) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + int i; + + cancel_work_sync(&hevc->work); + cancel_work_sync(&hevc->notify_work); + if (hevc->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hevc->stat &= ~STAT_VDEC_RUN; + } + + if (hevc->stat & STAT_TIMER_ARM) { + del_timer_sync(&hevc->timer); + hevc->stat &= ~STAT_TIMER_ARM; + } + hevc->dec_result = DEC_RESULT_NONE; + reset_process_time(hevc); + hevc->pic_list_init_flag = 0; + dealloc_mv_bufs(hevc); + aml_free_canvas(vdec); + hevc_local_uninit(hevc); + if (vh265_local_init(hevc) < 0) + pr_debug(" %s local init fail\n", __func__); + for (i = 0; i < BUF_POOL_SIZE; i++) { + hevc->m_BUF[i].start_adr = 0; + } + + hevc_print(hevc, PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__); +} + +static irqreturn_t vh265_irq_cb(struct vdec_s *vdec, int irq) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + + return vh265_isr(0, hevc); +} + +static irqreturn_t vh265_threaded_irq_cb(struct vdec_s *vdec, int irq) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + + return vh265_isr_thread_fn(0, hevc); +} +#endif + +static int amvdec_h265_probe(struct platform_device *pdev) +{ +#ifdef MULTI_INSTANCE_SUPPORT + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; +#else + struct vdec_dev_reg_s *pdata = + (struct vdec_dev_reg_s *)pdev->dev.platform_data; +#endif + char *tmpbuf; + int ret; + struct hevc_state_s *hevc; + + hevc = vmalloc(sizeof(struct hevc_state_s)); + if (hevc == NULL) { + hevc_print(hevc, 0, "%s vmalloc hevc failed\r\n", __func__); + return -ENOMEM; + } + gHevc = hevc; + if ((debug & H265_NO_CHANG_DEBUG_FLAG_IN_CODE) == 0) + debug &= (~(H265_DEBUG_DIS_LOC_ERROR_PROC | + H265_DEBUG_DIS_SYS_ERROR_PROC)); + memset(hevc, 0, sizeof(struct hevc_state_s)); + if (get_dbg_flag(hevc)) + hevc_print(hevc, 0, "%s\r\n", __func__); + mutex_lock(&vh265_mutex); + + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXTVBB) && + (parser_sei_enable & 0x100) == 0) + parser_sei_enable = 7; /*old 1*/ + hevc->m_ins_flag = 0; + hevc->init_flag = 0; + hevc->first_sc_checked = 0; + hevc->uninit_list = 0; + hevc->fatal_error = 0; + hevc->show_frame_num = 0; + hevc->frameinfo_enable = 1; +#ifdef MULTI_INSTANCE_SUPPORT + hevc->platform_dev = pdev; + platform_set_drvdata(pdev, pdata); +#endif + + if (pdata == NULL) { + hevc_print(hevc, 0, + "\namvdec_h265 memory resource undefined.\n"); + vfree(hevc); + mutex_unlock(&vh265_mutex); + return -EFAULT; + } + if (mmu_enable_force == 0) { + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL + || double_write_mode == 0x10) + hevc->mmu_enable = 0; + else + hevc->mmu_enable = 1; + } +#ifdef H265_10B_MMU_DW + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + hevc->dw_mmu_enable = + get_double_write_mode(hevc) & 0x20 ? 1 : 0; + } else { + hevc->dw_mmu_enable = 0; + } +#endif + if (init_mmu_buffers(hevc, 0)) { + hevc_print(hevc, 0, + "\n 265 mmu init failed!\n"); + vfree(hevc); + mutex_unlock(&vh265_mutex); + return -EFAULT; + } + + ret = decoder_bmmu_box_alloc_buf_phy(hevc->bmmu_box, BMMU_WORKSPACE_ID, + work_buf_size, DRIVER_NAME, &hevc->buf_start); + if (ret < 0) { + uninit_mmu_buffers(hevc); + vfree(hevc); + mutex_unlock(&vh265_mutex); + return ret; + } + hevc->buf_size = work_buf_size; + + + if (!vdec_secure(pdata)) { + tmpbuf = (char *)codec_mm_phys_to_virt(hevc->buf_start); + if (tmpbuf) { + memset(tmpbuf, 0, work_buf_size); + dma_sync_single_for_device(amports_get_dma_device(), + hevc->buf_start, + work_buf_size, DMA_TO_DEVICE); + } else { + tmpbuf = codec_mm_vmap(hevc->buf_start, + work_buf_size); + if (tmpbuf) { + memset(tmpbuf, 0, work_buf_size); + dma_sync_single_for_device( + amports_get_dma_device(), + hevc->buf_start, + work_buf_size, + DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(tmpbuf); + } + } + } + + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "===H.265 decoder mem resource 0x%lx size 0x%x\n", + hevc->buf_start, hevc->buf_size); + } + + if (pdata->sys_info) + hevc->vh265_amstream_dec_info = *pdata->sys_info; + else { + hevc->vh265_amstream_dec_info.width = 0; + hevc->vh265_amstream_dec_info.height = 0; + hevc->vh265_amstream_dec_info.rate = 30; + } + + hevc->endian = HEVC_CONFIG_LITTLE_ENDIAN; + if (is_support_vdec_canvas()) + hevc->endian = HEVC_CONFIG_BIG_ENDIAN; + if (endian) + hevc->endian = endian; + +#ifndef MULTI_INSTANCE_SUPPORT + if (pdata->flag & DEC_FLAG_HEVC_WORKAROUND) { + workaround_enable |= 3; + hevc_print(hevc, 0, + "amvdec_h265 HEVC_WORKAROUND flag set.\n"); + } else + workaround_enable &= ~3; +#endif + hevc->cma_dev = pdata->cma_dev; + vh265_vdec_info_init(hevc); + +#ifdef MULTI_INSTANCE_SUPPORT + pdata->private = hevc; + pdata->dec_status = vh265_dec_status; + pdata->set_trickmode = vh265_set_trickmode; + pdata->set_isreset = vh265_set_isreset; + is_reset = 0; + if (vh265_init(pdata) < 0) { +#else + if (vh265_init(hevc) < 0) { +#endif + hevc_print(hevc, 0, + "\namvdec_h265 init failed.\n"); + hevc_local_uninit(hevc); + if (hevc->gvs) + kfree(hevc->gvs); + hevc->gvs = NULL; + uninit_mmu_buffers(hevc); + vfree(hevc); + pdata->dec_status = NULL; + mutex_unlock(&vh265_mutex); + return -ENODEV; + } + /*set the max clk for smooth playing...*/ + hevc_source_changed(VFORMAT_HEVC, + 3840, 2160, 60); + mutex_unlock(&vh265_mutex); + + return 0; +} + +static int amvdec_h265_remove(struct platform_device *pdev) +{ + struct hevc_state_s *hevc = gHevc; + + if (get_dbg_flag(hevc)) + hevc_print(hevc, 0, "%s\r\n", __func__); + + mutex_lock(&vh265_mutex); + + vh265_stop(hevc); + + hevc_source_changed(VFORMAT_HEVC, 0, 0, 0); + + +#ifdef DEBUG_PTS + hevc_print(hevc, 0, + "pts missed %ld, pts hit %ld, duration %d\n", + hevc->pts_missed, hevc->pts_hit, hevc->frame_dur); +#endif + + vfree(hevc); + hevc = NULL; + gHevc = NULL; + + mutex_unlock(&vh265_mutex); + + return 0; +} +/****************************************/ +#ifdef CONFIG_PM +static int h265_suspend(struct device *dev) +{ + amhevc_suspend(to_platform_device(dev), dev->power.power_state); + return 0; +} + +static int h265_resume(struct device *dev) +{ + amhevc_resume(to_platform_device(dev)); + return 0; +} + +static const struct dev_pm_ops h265_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(h265_suspend, h265_resume) +}; +#endif + +static struct platform_driver amvdec_h265_driver = { + .probe = amvdec_h265_probe, + .remove = amvdec_h265_remove, + .driver = { + .name = DRIVER_NAME, +#ifdef CONFIG_PM + .pm = &h265_pm_ops, +#endif + } +}; + +#ifdef MULTI_INSTANCE_SUPPORT +static void vh265_dump_state(struct vdec_s *vdec) +{ + int i; + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + hevc_print(hevc, 0, + "====== %s\n", __func__); + + hevc_print(hevc, 0, + "width/height (%d/%d), reorder_pic_num %d ip_mode %d buf count(bufspec size) %d, video_signal_type 0x%x, is_swap %d i_only 0x%x\n", + hevc->frame_width, + hevc->frame_height, + hevc->sps_num_reorder_pics_0, + hevc->ip_mode, + get_work_pic_num(hevc), + hevc->video_signal_type_debug, + hevc->is_swap, + hevc->i_only + ); + + hevc_print(hevc, 0, + "is_framebase(%d), eos %d, dec_result 0x%x dec_frm %d disp_frm %d run %d not_run_ready %d input_empty %d\n", + input_frame_based(vdec), + hevc->eos, + hevc->dec_result, + decode_frame_count[hevc->index], + display_frame_count[hevc->index], + run_count[hevc->index], + not_run_ready[hevc->index], + input_empty[hevc->index] + ); + + if (hevc->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + hevc_print(hevc, 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + + hevc_print(hevc, 0, + "%s, newq(%d/%d), dispq(%d/%d), vf prepare/get/put (%d/%d/%d), pic_list_init_flag(%d), is_new_pic_available(%d)\n", + __func__, + kfifo_len(&hevc->newframe_q), + VF_POOL_SIZE, + kfifo_len(&hevc->display_q), + VF_POOL_SIZE, + hevc->vf_pre_count, + hevc->vf_get_count, + hevc->vf_put_count, + hevc->pic_list_init_flag, + is_new_pic_available(hevc) + ); + + dump_pic_list(hevc); + + for (i = 0; i < BUF_POOL_SIZE; i++) { + hevc_print(hevc, 0, + "Buf(%d) start_adr 0x%x header_addr 0x%x size 0x%x used %d\n", + i, + hevc->m_BUF[i].start_adr, + hevc->m_BUF[i].header_addr, + hevc->m_BUF[i].size, + hevc->m_BUF[i].used_flag); + } + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + hevc_print(hevc, 0, + "mv_Buf(%d) start_adr 0x%x size 0x%x used %d\n", + i, + hevc->m_mv_BUF[i].start_adr, + hevc->m_mv_BUF[i].size, + hevc->m_mv_BUF[i].used_flag); + } + + hevc_print(hevc, 0, + "HEVC_DEC_STATUS_REG=0x%x\n", + READ_VREG(HEVC_DEC_STATUS_REG)); + hevc_print(hevc, 0, + "HEVC_MPC_E=0x%x\n", + READ_VREG(HEVC_MPC_E)); + hevc_print(hevc, 0, + "HEVC_DECODE_MODE=0x%x\n", + READ_VREG(HEVC_DECODE_MODE)); + hevc_print(hevc, 0, + "HEVC_DECODE_MODE2=0x%x\n", + READ_VREG(HEVC_DECODE_MODE2)); + hevc_print(hevc, 0, + "NAL_SEARCH_CTL=0x%x\n", + READ_VREG(NAL_SEARCH_CTL)); + hevc_print(hevc, 0, + "HEVC_PARSER_LCU_START=0x%x\n", + READ_VREG(HEVC_PARSER_LCU_START)); + hevc_print(hevc, 0, + "HEVC_DECODE_SIZE=0x%x\n", + READ_VREG(HEVC_DECODE_SIZE)); + hevc_print(hevc, 0, + "HEVC_SHIFT_BYTE_COUNT=0x%x\n", + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + hevc_print(hevc, 0, + "HEVC_STREAM_START_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_START_ADDR)); + hevc_print(hevc, 0, + "HEVC_STREAM_END_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_END_ADDR)); + hevc_print(hevc, 0, + "HEVC_STREAM_LEVEL=0x%x\n", + READ_VREG(HEVC_STREAM_LEVEL)); + hevc_print(hevc, 0, + "HEVC_STREAM_WR_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_WR_PTR)); + hevc_print(hevc, 0, + "HEVC_STREAM_RD_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_RD_PTR)); + hevc_print(hevc, 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + hevc_print(hevc, 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (input_frame_based(vdec) && + (get_dbg_flag(hevc) & PRINT_FRAMEBASE_DATA) + ) { + int jj; + if (hevc->chunk && hevc->chunk->block && + hevc->chunk->size > 0) { + u8 *data = NULL; + if (!hevc->chunk->block->is_mapped) + data = codec_mm_vmap(hevc->chunk->block->start + + hevc->chunk->offset, hevc->chunk->size); + else + data = ((u8 *)hevc->chunk->block->start_virt) + + hevc->chunk->offset; + hevc_print(hevc, 0, + "frame data size 0x%x\n", + hevc->chunk->size); + for (jj = 0; jj < hevc->chunk->size; jj++) { + if ((jj & 0xf) == 0) + hevc_print(hevc, + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + hevc_print_cont(hevc, + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + hevc_print_cont(hevc, + PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hevc->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } + +} + + +static int ammvdec_h265_probe(struct platform_device *pdev) +{ + + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct hevc_state_s *hevc = NULL; + int ret; + int i; +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + int config_val; +#endif + + if (pdata == NULL) { + pr_info("\nammvdec_h265 memory resource undefined.\n"); + return -EFAULT; + } + + + hevc = vmalloc(sizeof(struct hevc_state_s)); + if (hevc == NULL) { + pr_info("\nammvdec_h265 device data allocation failed\n"); + return -ENOMEM; + } + memset(hevc, 0, sizeof(struct hevc_state_s)); + + /* the ctx from v4l2 driver. */ + hevc->v4l2_ctx = pdata->private; + + pdata->private = hevc; + pdata->dec_status = vh265_dec_status; + pdata->set_trickmode = vh265_set_trickmode; + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = vh265_irq_cb; + pdata->threaded_irq_handler = vh265_threaded_irq_cb; + pdata->dump_state = vh265_dump_state; + + hevc->index = pdev->id; + hevc->m_ins_flag = 1; + + if (is_rdma_enable()) { + hevc->rdma_adr = dma_alloc_coherent(amports_get_dma_device(), RDMA_SIZE, &hevc->rdma_phy_adr, GFP_KERNEL); + for (i = 0; i < SCALELUT_DATA_WRITE_NUM; i++) { + hevc->rdma_adr[i * 4] = HEVC_IQIT_SCALELUT_WR_ADDR & 0xfff; + hevc->rdma_adr[i * 4 + 1] = i; + hevc->rdma_adr[i * 4 + 2] = HEVC_IQIT_SCALELUT_DATA & 0xfff; + hevc->rdma_adr[i * 4 + 3] = 0; + if (i == SCALELUT_DATA_WRITE_NUM - 1) { + hevc->rdma_adr[i * 4 + 2] = (HEVC_IQIT_SCALELUT_DATA & 0xfff) | 0x20000; + } + } + } + snprintf(hevc->trace.vdec_name, sizeof(hevc->trace.vdec_name), + "h265-%d", hevc->index); + snprintf(hevc->trace.pts_name, sizeof(hevc->trace.pts_name), + "%s-pts", hevc->trace.vdec_name); + snprintf(hevc->trace.vf_get_name, sizeof(hevc->trace.vf_get_name), + "%s-vf_get", hevc->trace.vdec_name); + snprintf(hevc->trace.vf_put_name, sizeof(hevc->trace.vf_put_name), + "%s-vf_put", hevc->trace.vdec_name); + snprintf(hevc->trace.set_canvas0_addr, sizeof(hevc->trace.set_canvas0_addr), + "%s-set_canvas0_addr", hevc->trace.vdec_name); + snprintf(hevc->trace.get_canvas0_addr, sizeof(hevc->trace.get_canvas0_addr), + "%s-get_canvas0_addr", hevc->trace.vdec_name); + snprintf(hevc->trace.put_canvas0_addr, sizeof(hevc->trace.put_canvas0_addr), + "%s-put_canvas0_addr", hevc->trace.vdec_name); + snprintf(hevc->trace.new_q_name, sizeof(hevc->trace.new_q_name), + "%s-newframe_q", hevc->trace.vdec_name); + snprintf(hevc->trace.disp_q_name, sizeof(hevc->trace.disp_q_name), + "%s-dispframe_q", hevc->trace.vdec_name); + snprintf(hevc->trace.decode_time_name, sizeof(hevc->trace.decode_time_name), + "decoder_time%d", pdev->id); + snprintf(hevc->trace.decode_run_time_name, sizeof(hevc->trace.decode_run_time_name), + "decoder_run_time%d", pdev->id); + snprintf(hevc->trace.decode_header_memory_time_name, sizeof(hevc->trace.decode_header_memory_time_name), + "decoder_header_time%d", pdev->id); + snprintf(hevc->trace.decode_work_time_name, sizeof(hevc->trace.decode_work_time_name), + "decoder_work_time%d", pdev->id); + + if (pdata->use_vfm_path) { + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + hevc->frameinfo_enable = 1; + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (vdec_dual(pdata)) { + struct hevc_state_s *hevc_pair = NULL; + + if (!pdata->is_stream_mode_dv_multi) { + if (dv_toggle_prov_name) /*debug purpose*/ + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVBL_PROVIDER_NAME : + VFM_DEC_DVEL_PROVIDER_NAME); + else + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVEL_PROVIDER_NAME : + VFM_DEC_DVBL_PROVIDER_NAME); + } else { + if (dv_toggle_prov_name) /*debug purpose*/ + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVBL_PROVIDER_NAME2 : + VFM_DEC_DVEL_PROVIDER_NAME2); + else + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVEL_PROVIDER_NAME2 : + VFM_DEC_DVBL_PROVIDER_NAME2); + } + + hevc->dolby_enhance_flag = pdata->master ? 1 : 0; + if (pdata->master) + hevc_pair = (struct hevc_state_s *) + pdata->master->private; + else if (pdata->slave) + hevc_pair = (struct hevc_state_s *) + pdata->slave->private; + if (hevc_pair) + hevc->shift_byte_count_lo = + hevc_pair->shift_byte_count_lo; + } +#endif + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + MULTI_INSTANCE_PROVIDER_NAME ".%02x", pdev->id & 0xff); + + hevc->provider_name = pdata->vf_provider_name; + platform_set_drvdata(pdev, pdata); + + hevc->platform_dev = pdev; + + if (((get_dbg_flag(hevc) & IGNORE_PARAM_FROM_CONFIG) == 0) && + pdata->config_len) { +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + /*use ptr config for doubel_write_mode, etc*/ + hevc_print(hevc, 0, "pdata->config=%s\n", pdata->config); + + if (get_config_int(pdata->config, "hevc_double_write_mode", + &config_val) == 0) + hevc->double_write_mode = config_val; + else + hevc->double_write_mode = double_write_mode; + + if (get_config_int(pdata->config, "save_buffer_mode", + &config_val) == 0) + hevc->save_buffer_mode = config_val; + else + hevc->save_buffer_mode = 0; + + /*use ptr config for max_pic_w, etc*/ + if (get_config_int(pdata->config, "hevc_buf_width", + &config_val) == 0) { + hevc->max_pic_w = config_val; + } + if (get_config_int(pdata->config, "hevc_buf_height", + &config_val) == 0) { + hevc->max_pic_h = config_val; + } + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + hevc->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + hevc->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + hevc->is_used_v4l = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_buffer_margin", + &config_val) == 0) + hevc->dynamic_buf_num_margin = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + hevc->mem_map_mode = config_val; + + if (get_config_int(pdata->config, "negative_dv", + &config_val) == 0) { + hevc->discard_dv_data = config_val; + hevc_print(hevc, 0, "discard dv data\n"); + } + + if (get_config_int(pdata->config, "dv_duallayer", + &config_val) == 0) { + hevc->dv_duallayer = config_val; + hevc_print(hevc, 0, "dv dual layer\n"); + } + + if (get_config_int(pdata->config, + "parm_enable_fence", + &config_val) == 0) + hevc->enable_fence = config_val; + + if (get_config_int(pdata->config, + "parm_fence_usage", + &config_val) == 0) + hevc->fence_usage = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_low_latency_mode", + &config_val) == 0) + hevc->low_latency_flag = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_metadata_config_flag", + &config_val) == 0) { + hevc->metadata_config_flag = config_val; + hevc->discard_dv_data = hevc->metadata_config_flag & VDEC_CFG_FLAG_DV_NEGATIVE; + hevc->dv_duallayer = hevc->metadata_config_flag & VDEC_CFG_FLAG_DV_TWOLARYER; + if (hevc->discard_dv_data) + hevc_print(hevc, 0, "discard dv data\n"); + if (hevc->dv_duallayer) + hevc_print(hevc, 0, "dv_duallayer\n"); + } +#endif + } else { + if (pdata->sys_info) + hevc->vh265_amstream_dec_info = *pdata->sys_info; + else { + hevc->vh265_amstream_dec_info.width = 0; + hevc->vh265_amstream_dec_info.height = 0; + hevc->vh265_amstream_dec_info.rate = 30; + } + hevc->double_write_mode = double_write_mode; + } + + + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vh265_vf_provider, pdata); + + if (force_config_fence) { + hevc->enable_fence = true; + hevc->fence_usage = (force_config_fence >> 4) & 0xf; + if (force_config_fence & 0x2) + hevc->enable_fence = false; + hevc_print(hevc, 0, + "enable fence: %d, fence usage: %d\n", + hevc->enable_fence, hevc->fence_usage); + } + + if (hevc->save_buffer_mode && dynamic_buf_num_margin > 2) + hevc->dynamic_buf_num_margin = dynamic_buf_num_margin -2; + else + hevc->dynamic_buf_num_margin = dynamic_buf_num_margin; + + hevc->mem_map_mode = mem_map_mode; + + hevc->endian = HEVC_CONFIG_LITTLE_ENDIAN; + if (is_support_vdec_canvas()) + hevc->endian = HEVC_CONFIG_BIG_ENDIAN; + if (endian) + hevc->endian = endian; + + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) && + (hevc->double_write_mode == 3)) + hevc->double_write_mode = 0x1000; + + /* get valid double write from node */ + if (double_write_mode) + hevc->double_write_mode = get_double_write_mode(hevc); + + if (mmu_enable_force) { + hevc->mmu_enable = 1; + } else { + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL) || + (hevc->double_write_mode & 0x10)) + hevc->mmu_enable = 0; + else + hevc->mmu_enable = 1; + } +#ifdef H265_10B_MMU_DW + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + hevc->dw_mmu_enable = + get_double_write_mode(hevc) & 0x20 ? 1 : 0; + } else { + hevc->dw_mmu_enable = 0; + } +#endif + if (init_mmu_buffers(hevc, 0) < 0) { + hevc_print(hevc, 0, + "\n 265 mmu init failed!\n"); + mutex_unlock(&vh265_mutex); + /* devm_kfree(&pdev->dev, (void *)hevc);*/ + if (hevc) + vfree((void *)hevc); + pdata->dec_status = NULL; + return -EFAULT; + } +#if 0 + hevc->buf_start = pdata->mem_start; + hevc->buf_size = pdata->mem_end - pdata->mem_start + 1; +#else + + ret = decoder_bmmu_box_alloc_buf_phy(hevc->bmmu_box, + BMMU_WORKSPACE_ID, work_buf_size, + DRIVER_NAME, &hevc->buf_start); + if (ret < 0) { + uninit_mmu_buffers(hevc); + /* devm_kfree(&pdev->dev, (void *)hevc); */ + if (hevc) + vfree((void *)hevc); + pdata->dec_status = NULL; + mutex_unlock(&vh265_mutex); + return ret; + } + hevc->buf_size = work_buf_size; +#endif + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXTVBB) && + (parser_sei_enable & 0x100) == 0) + parser_sei_enable = 7; + hevc->init_flag = 0; + hevc->first_sc_checked = 0; + hevc->uninit_list = 0; + hevc->fatal_error = 0; + hevc->show_frame_num = 0; + + /* + *hevc->mc_buf_spec.buf_end = pdata->mem_end + 1; + *for (i = 0; i < WORK_BUF_SPEC_NUM; i++) + * amvh265_workbuff_spec[i].start_adr = pdata->mem_start; + */ + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "===H.265 decoder mem resource 0x%lx size 0x%x\n", + hevc->buf_start, hevc->buf_size); + } + + hevc_print(hevc, 0, + "dynamic_buf_num_margin=%d\n", + hevc->dynamic_buf_num_margin); + hevc_print(hevc, 0, + "double_write_mode=%d\n", + hevc->double_write_mode); + + hevc->cma_dev = pdata->cma_dev; + vh265_vdec_info_init(hevc); + + if (vh265_init(pdata) < 0) { + hevc_print(hevc, 0, + "\namvdec_h265 init failed.\n"); + hevc_local_uninit(hevc); + if (hevc->gvs) + kfree(hevc->gvs); + hevc->gvs = NULL; + uninit_mmu_buffers(hevc); + /* devm_kfree(&pdev->dev, (void *)hevc); */ + if (hevc) + vfree((void *)hevc); + pdata->dec_status = NULL; + return -ENODEV; + } + +#ifdef AUX_DATA_CRC + vdec_aux_data_check_init(pdata); +#endif + + vdec_set_prepare_level(pdata, start_decode_buf_level); + + /*set the max clk for smooth playing...*/ + hevc_source_changed(VFORMAT_HEVC, + 3840, 2160, 60); + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_HEVC); + else + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + + mutex_init(&hevc->fence_mutex); + if (hevc->enable_fence) { + pdata->sync = vdec_sync_get(); + if (!pdata->sync) { + hevc_print(hevc, 0, "alloc fence timeline error\n"); + hevc_local_uninit(hevc); + if (hevc->gvs) + kfree(hevc->gvs); + hevc->gvs = NULL; + uninit_mmu_buffers(hevc); + /* devm_kfree(&pdev->dev, (void *)hevc); */ + if (hevc) + vfree((void *)hevc); + pdata->dec_status = NULL; + return -ENODEV; + } + pdata->sync->usage = hevc->fence_usage; + /* creat timeline. */ + vdec_timeline_create(pdata->sync, DRIVER_NAME); + } + + return 0; +} + +static void vdec_fence_release(struct hevc_state_s *hw, + struct vdec_sync *sync) +{ + ulong expires; + + /* notify signal to wake up all fences. */ + vdec_timeline_increase(sync, VF_POOL_SIZE); + + expires = jiffies + msecs_to_jiffies(2000); + while (!check_objs_all_signaled(sync)) { + if (time_after(jiffies, expires)) { + pr_err("wait fence signaled timeout.\n"); + break; + } + } + + /* decreases refcnt of timeline. */ + vdec_timeline_put(sync); +} + +static int ammvdec_h265_remove(struct platform_device *pdev) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec; + + if (hevc == NULL) + return 0; + vdec = hw_to_vdec(hevc); + +#ifdef AUX_DATA_CRC + vdec_aux_data_check_exit(vdec); +#endif + + //pr_err("%s [pid=%d,tgid=%d]\n", __func__, current->pid, current->tgid); + if (get_dbg_flag(hevc)) + hevc_print(hevc, 0, "%s\r\n", __func__); + + vmh265_stop(hevc); + + /* vdec_source_changed(VFORMAT_H264, 0, 0, 0); */ + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(hevc), CORE_MASK_HEVC); + else + vdec_core_release(hw_to_vdec(hevc), CORE_MASK_HEVC); + + vdec_set_status(hw_to_vdec(hevc), VDEC_STATUS_DISCONNECTED); + + if (hevc->enable_fence) + vdec_fence_release(hevc, vdec->sync); + if (is_rdma_enable()) + dma_free_coherent(amports_get_dma_device(), RDMA_SIZE, hevc->rdma_adr, hevc->rdma_phy_adr); + vfree((void *)hevc); + + return 0; +} + +static struct platform_driver ammvdec_h265_driver = { + .probe = ammvdec_h265_probe, + .remove = ammvdec_h265_remove, + .driver = { + .name = MULTI_DRIVER_NAME, +#ifdef CONFIG_PM + .pm = &h265_pm_ops, +#endif + } +}; +#endif + +static struct codec_profile_t amvdec_h265_profile = { + .name = "hevc", + .profile = "" +}; + +static struct codec_profile_t amvdec_h265_profile_single, + amvdec_h265_profile_mult; + +static struct mconfig h265_configs[] = { + MC_PU32("use_cma", &use_cma), + MC_PU32("bit_depth_luma", &bit_depth_luma), + MC_PU32("bit_depth_chroma", &bit_depth_chroma), + MC_PU32("video_signal_type", &video_signal_type), +#ifdef ERROR_HANDLE_DEBUG + MC_PU32("dbg_nal_skip_flag", &dbg_nal_skip_flag), + MC_PU32("dbg_nal_skip_count", &dbg_nal_skip_count), +#endif + MC_PU32("radr", &radr), + MC_PU32("rval", &rval), + MC_PU32("dbg_cmd", &dbg_cmd), + MC_PU32("dbg_skip_decode_index", &dbg_skip_decode_index), + MC_PU32("endian", &endian), + MC_PU32("step", &step), + MC_PU32("udebug_flag", &udebug_flag), + MC_PU32("decode_pic_begin", &decode_pic_begin), + MC_PU32("slice_parse_begin", &slice_parse_begin), + MC_PU32("nal_skip_policy", &nal_skip_policy), + MC_PU32("i_only_flag", &i_only_flag), + MC_PU32("error_handle_policy", &error_handle_policy), + MC_PU32("error_handle_threshold", &error_handle_threshold), + MC_PU32("error_handle_nal_skip_threshold", + &error_handle_nal_skip_threshold), + MC_PU32("error_handle_system_threshold", + &error_handle_system_threshold), + MC_PU32("error_skip_nal_count", &error_skip_nal_count), + MC_PU32("debug", &debug), + MC_PU32("debug_mask", &debug_mask), + MC_PU32("buffer_mode", &buffer_mode), + MC_PU32("double_write_mode", &double_write_mode), + MC_PU32("buf_alloc_width", &buf_alloc_width), + MC_PU32("buf_alloc_height", &buf_alloc_height), + MC_PU32("dynamic_buf_num_margin", &dynamic_buf_num_margin), + MC_PU32("max_buf_num", &max_buf_num), + MC_PU32("buf_alloc_size", &buf_alloc_size), + MC_PU32("buffer_mode_dbg", &buffer_mode_dbg), + MC_PU32("mem_map_mode", &mem_map_mode), + MC_PU32("enable_mem_saving", &enable_mem_saving), + MC_PU32("force_w_h", &force_w_h), + MC_PU32("force_fps", &force_fps), + MC_PU32("max_decoding_time", &max_decoding_time), + MC_PU32("prefix_aux_buf_size", &prefix_aux_buf_size), + MC_PU32("suffix_aux_buf_size", &suffix_aux_buf_size), + MC_PU32("interlace_enable", &interlace_enable), + MC_PU32("pts_unstable", &pts_unstable), + MC_PU32("parser_sei_enable", &parser_sei_enable), + MC_PU32("start_decode_buf_level", &start_decode_buf_level), + MC_PU32("decode_timeout_val", &decode_timeout_val), + MC_PU32("parser_dolby_vision_enable", &parser_dolby_vision_enable), +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + MC_PU32("dv_toggle_prov_name", &dv_toggle_prov_name), + MC_PU32("dv_debug", &dv_debug), +#endif +}; +static struct mconfig_node decoder_265_node; + +static int __init amvdec_h265_driver_init_module(void) +{ + struct BuffInfo_s *p_buf_info; + + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + p_buf_info = &amvh265_workbuff_spec[2]; + else + p_buf_info = &amvh265_workbuff_spec[1]; + } else + p_buf_info = &amvh265_workbuff_spec[0]; + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) + p_buf_info = &amvh265_workbuff_spec[5]; + else + p_buf_info = &amvh265_workbuff_spec[3]; + } + + init_buff_spec(NULL, p_buf_info); + work_buf_size = + (p_buf_info->end_adr - p_buf_info->start_adr + + 0xffff) & (~0xffff); + + pr_debug("amvdec_h265 module init\n"); + error_handle_policy = 0; + +#ifdef ERROR_HANDLE_DEBUG + dbg_nal_skip_flag = 0; + dbg_nal_skip_count = 0; +#endif + udebug_flag = 0; + decode_pic_begin = 0; + slice_parse_begin = 0; + step = 0; + buf_alloc_size = 0; + +#ifdef MULTI_INSTANCE_SUPPORT + if (platform_driver_register(&ammvdec_h265_driver)) + pr_err("failed to register ammvdec_h265 driver\n"); + +#endif + if (platform_driver_register(&amvdec_h265_driver)) { + pr_err("failed to register amvdec_h265 driver\n"); + return -ENODEV; + } +#if 1/*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8*/ + if (!has_hevc_vdec()) { + /* not support hevc */ + amvdec_h265_profile.name = "hevc_unsupport"; + } + if (vdec_is_support_4k()) { + if (is_meson_m8m2_cpu()) { + /* m8m2 support 4k */ + amvdec_h265_profile.profile = "4k"; + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + amvdec_h265_profile.profile = + "8k, 8bit, 10bit, dwrite, compressed, frame_dv, fence, v4l-uvm"; + }else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB) { + amvdec_h265_profile.profile = + "4k, 8bit, 10bit, dwrite, compressed, frame_dv, fence, v4l-uvm"; + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_MG9TV) + amvdec_h265_profile.profile = "4k"; + } else { + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D || is_cpu_s4_s805x2()) { + amvdec_h265_profile.profile = + "8bit, 10bit, dwrite, compressed, frame_dv, v4l"; + } else { + amvdec_h265_profile.profile = + "8bit, 10bit, dwrite, compressed, v4l"; + } + } +#endif + if (codec_mm_get_total_size() < 80 * SZ_1M) { + pr_info("amvdec_h265 default mmu enabled.\n"); + mmu_enable = 1; + } + vcodec_profile_register(&amvdec_h265_profile); + amvdec_h265_profile_single = amvdec_h265_profile; + amvdec_h265_profile_single.name = "h265"; + vcodec_profile_register(&amvdec_h265_profile_single); + amvdec_h265_profile_mult = amvdec_h265_profile; + amvdec_h265_profile_mult.name = "mh265"; + vcodec_profile_register(&amvdec_h265_profile_mult); + INIT_REG_NODE_CONFIGS("media.decoder", &decoder_265_node, + "h265", h265_configs, CONFIG_FOR_RW); + vcodec_feature_register(VFORMAT_HEVC, 0); + return 0; +} + +static void __exit amvdec_h265_driver_remove_module(void) +{ + pr_debug("amvdec_h265 module remove.\n"); + +#ifdef MULTI_INSTANCE_SUPPORT + platform_driver_unregister(&ammvdec_h265_driver); +#endif + platform_driver_unregister(&amvdec_h265_driver); +} + +/****************************************/ +/* + *module_param(stat, uint, 0664); + *MODULE_PARM_DESC(stat, "\n amvdec_h265 stat\n"); + */ +module_param(use_cma, uint, 0664); +MODULE_PARM_DESC(use_cma, "\n amvdec_h265 use_cma\n"); + +module_param(bit_depth_luma, uint, 0664); +MODULE_PARM_DESC(bit_depth_luma, "\n amvdec_h265 bit_depth_luma\n"); + +module_param(bit_depth_chroma, uint, 0664); +MODULE_PARM_DESC(bit_depth_chroma, "\n amvdec_h265 bit_depth_chroma\n"); + +module_param(video_signal_type, uint, 0664); +MODULE_PARM_DESC(video_signal_type, "\n amvdec_h265 video_signal_type\n"); + +#ifdef ERROR_HANDLE_DEBUG +module_param(dbg_nal_skip_flag, uint, 0664); +MODULE_PARM_DESC(dbg_nal_skip_flag, "\n amvdec_h265 dbg_nal_skip_flag\n"); + +module_param(dbg_nal_skip_count, uint, 0664); +MODULE_PARM_DESC(dbg_nal_skip_count, "\n amvdec_h265 dbg_nal_skip_count\n"); +#endif + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\n radr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\n rval\n"); + +module_param(dbg_cmd, uint, 0664); +MODULE_PARM_DESC(dbg_cmd, "\n dbg_cmd\n"); + +module_param(dump_nal, uint, 0664); +MODULE_PARM_DESC(dump_nal, "\n dump_nal\n"); + +module_param(dbg_skip_decode_index, uint, 0664); +MODULE_PARM_DESC(dbg_skip_decode_index, "\n dbg_skip_decode_index\n"); + +module_param(endian, uint, 0664); +MODULE_PARM_DESC(endian, "\n rval\n"); + +module_param(step, uint, 0664); +MODULE_PARM_DESC(step, "\n amvdec_h265 step\n"); + +module_param(decode_pic_begin, uint, 0664); +MODULE_PARM_DESC(decode_pic_begin, "\n amvdec_h265 decode_pic_begin\n"); + +module_param(slice_parse_begin, uint, 0664); +MODULE_PARM_DESC(slice_parse_begin, "\n amvdec_h265 slice_parse_begin\n"); + +module_param(nal_skip_policy, uint, 0664); +MODULE_PARM_DESC(nal_skip_policy, "\n amvdec_h265 nal_skip_policy\n"); + +module_param(i_only_flag, uint, 0664); +MODULE_PARM_DESC(i_only_flag, "\n amvdec_h265 i_only_flag\n"); + +module_param(fast_output_enable, uint, 0664); +MODULE_PARM_DESC(fast_output_enable, "\n amvdec_h265 fast_output_enable\n"); + +module_param(error_handle_policy, uint, 0664); +MODULE_PARM_DESC(error_handle_policy, "\n amvdec_h265 error_handle_policy\n"); + +module_param(error_handle_threshold, uint, 0664); +MODULE_PARM_DESC(error_handle_threshold, + "\n amvdec_h265 error_handle_threshold\n"); + +module_param(error_handle_nal_skip_threshold, uint, 0664); +MODULE_PARM_DESC(error_handle_nal_skip_threshold, + "\n amvdec_h265 error_handle_nal_skip_threshold\n"); + +module_param(error_handle_system_threshold, uint, 0664); +MODULE_PARM_DESC(error_handle_system_threshold, + "\n amvdec_h265 error_handle_system_threshold\n"); + +module_param(error_skip_nal_count, uint, 0664); +MODULE_PARM_DESC(error_skip_nal_count, + "\n amvdec_h265 error_skip_nal_count\n"); + +module_param(skip_nal_count, uint, 0664); +MODULE_PARM_DESC(skip_nal_count, "\n skip_nal_count\n"); + +module_param(debug, uint, 0664); +MODULE_PARM_DESC(debug, "\n amvdec_h265 debug\n"); + +module_param(debug_mask, uint, 0664); +MODULE_PARM_DESC(debug_mask, "\n amvdec_h265 debug mask\n"); + +module_param(log_mask, uint, 0664); +MODULE_PARM_DESC(log_mask, "\n amvdec_h265 log_mask\n"); + +module_param(buffer_mode, uint, 0664); +MODULE_PARM_DESC(buffer_mode, "\n buffer_mode\n"); + +module_param(double_write_mode, uint, 0664); +MODULE_PARM_DESC(double_write_mode, "\n double_write_mode\n"); + +module_param(buf_alloc_width, uint, 0664); +MODULE_PARM_DESC(buf_alloc_width, "\n buf_alloc_width\n"); + +module_param(buf_alloc_height, uint, 0664); +MODULE_PARM_DESC(buf_alloc_height, "\n buf_alloc_height\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n dynamic_buf_num_margin\n"); + +module_param(max_buf_num, uint, 0664); +MODULE_PARM_DESC(max_buf_num, "\n max_buf_num\n"); + +module_param(buf_alloc_size, uint, 0664); +MODULE_PARM_DESC(buf_alloc_size, "\n buf_alloc_size\n"); + +#ifdef CONSTRAIN_MAX_BUF_NUM +module_param(run_ready_max_vf_only_num, uint, 0664); +MODULE_PARM_DESC(run_ready_max_vf_only_num, "\n run_ready_max_vf_only_num\n"); + +module_param(run_ready_display_q_num, uint, 0664); +MODULE_PARM_DESC(run_ready_display_q_num, "\n run_ready_display_q_num\n"); + +module_param(run_ready_max_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_max_buf_num, "\n run_ready_max_buf_num\n"); +#endif + +#if 0 +module_param(re_config_pic_flag, uint, 0664); +MODULE_PARM_DESC(re_config_pic_flag, "\n re_config_pic_flag\n"); +#endif + +module_param(buffer_mode_dbg, uint, 0664); +MODULE_PARM_DESC(buffer_mode_dbg, "\n buffer_mode_dbg\n"); + +module_param(mem_map_mode, uint, 0664); +MODULE_PARM_DESC(mem_map_mode, "\n mem_map_mode\n"); + +module_param(enable_mem_saving, uint, 0664); +MODULE_PARM_DESC(enable_mem_saving, "\n enable_mem_saving\n"); + +module_param(force_w_h, uint, 0664); +MODULE_PARM_DESC(force_w_h, "\n force_w_h\n"); + +module_param(force_fps, uint, 0664); +MODULE_PARM_DESC(force_fps, "\n force_fps\n"); + +module_param(max_decoding_time, uint, 0664); +MODULE_PARM_DESC(max_decoding_time, "\n max_decoding_time\n"); + +module_param(prefix_aux_buf_size, uint, 0664); +MODULE_PARM_DESC(prefix_aux_buf_size, "\n prefix_aux_buf_size\n"); + +module_param(suffix_aux_buf_size, uint, 0664); +MODULE_PARM_DESC(suffix_aux_buf_size, "\n suffix_aux_buf_size\n"); + +module_param(interlace_enable, uint, 0664); +MODULE_PARM_DESC(interlace_enable, "\n interlace_enable\n"); +module_param(pts_unstable, uint, 0664); +MODULE_PARM_DESC(pts_unstable, "\n amvdec_h265 pts_unstable\n"); +module_param(parser_sei_enable, uint, 0664); +MODULE_PARM_DESC(parser_sei_enable, "\n parser_sei_enable\n"); + +module_param(parser_dolby_vision_enable, uint, 0664); +MODULE_PARM_DESC(parser_dolby_vision_enable, + "\n parser_dolby_vision_enable\n"); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +module_param(dolby_meta_with_el, uint, 0664); +MODULE_PARM_DESC(dolby_meta_with_el, + "\n dolby_meta_with_el\n"); + +module_param(dolby_el_flush_th, uint, 0664); +MODULE_PARM_DESC(dolby_el_flush_th, + "\n dolby_el_flush_th\n"); +#endif +module_param(mmu_enable, uint, 0664); +MODULE_PARM_DESC(mmu_enable, "\n mmu_enable\n"); + +module_param(mmu_enable_force, uint, 0664); +MODULE_PARM_DESC(mmu_enable_force, "\n mmu_enable_force\n"); + +#ifdef MULTI_INSTANCE_SUPPORT +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n h265 start_decode_buf_level\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, + "\n h265 decode_timeout_val\n"); + +module_param(print_lcu_error, uint, 0664); +MODULE_PARM_DESC(print_lcu_error, + "\n h265 print_lcu_error\n"); + +module_param(data_resend_policy, uint, 0664); +MODULE_PARM_DESC(data_resend_policy, + "\n h265 data_resend_policy\n"); + +module_param(poc_num_margin, int, 0664); +MODULE_PARM_DESC(poc_num_margin, + "\n h265 poc_num_margin\n"); + +module_param(poc_error_limit, int, 0664); +MODULE_PARM_DESC(poc_error_limit, + "\n h265 poc_error_limit\n"); + +module_param_array(decode_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(display_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(max_process_time, uint, + &max_decode_instance_num, 0664); + +module_param_array(max_get_frame_interval, + uint, &max_decode_instance_num, 0664); + +module_param_array(run_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(input_empty, uint, + &max_decode_instance_num, 0664); + +module_param_array(not_run_ready, uint, + &max_decode_instance_num, 0664); + +module_param_array(ref_frame_mark_flag, uint, + &max_decode_instance_num, 0664); + +#endif +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +module_param(dv_toggle_prov_name, uint, 0664); +MODULE_PARM_DESC(dv_toggle_prov_name, "\n dv_toggle_prov_name\n"); + +module_param(dv_debug, uint, 0664); +MODULE_PARM_DESC(dv_debug, "\n dv_debug\n"); + +module_param(force_bypass_dvenl, uint, 0664); +MODULE_PARM_DESC(force_bypass_dvenl, "\n force_bypass_dvenl\n"); +#endif + +#ifdef AGAIN_HAS_THRESHOLD +module_param(again_threshold, uint, 0664); +MODULE_PARM_DESC(again_threshold, "\n again_threshold\n"); +#endif + +module_param(force_disp_pic_index, int, 0664); +MODULE_PARM_DESC(force_disp_pic_index, + "\n amvdec_h265 force_disp_pic_index\n"); + +module_param(frmbase_cont_bitlevel, uint, 0664); +MODULE_PARM_DESC(frmbase_cont_bitlevel, "\n frmbase_cont_bitlevel\n"); + +module_param(force_bufspec, uint, 0664); +MODULE_PARM_DESC(force_bufspec, "\n amvdec_h265 force_bufspec\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_h265 udebug_flag\n"); + +module_param(udebug_pause_pos, uint, 0664); +MODULE_PARM_DESC(udebug_pause_pos, "\n udebug_pause_pos\n"); + +module_param(udebug_pause_val, uint, 0664); +MODULE_PARM_DESC(udebug_pause_val, "\n udebug_pause_val\n"); + +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, "\n ammvdec_h264 pre_decode_buf_level\n"); + +module_param(udebug_pause_decode_idx, uint, 0664); +MODULE_PARM_DESC(udebug_pause_decode_idx, "\n udebug_pause_decode_idx\n"); + +module_param(disp_vframe_valve_level, uint, 0664); +MODULE_PARM_DESC(disp_vframe_valve_level, "\n disp_vframe_valve_level\n"); + +module_param(pic_list_debug, uint, 0664); +MODULE_PARM_DESC(pic_list_debug, "\n pic_list_debug\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n amvdec_h265 without_display_mode\n"); + +#ifdef HEVC_8K_LFTOFFSET_FIX +module_param(performance_profile, uint, 0664); +MODULE_PARM_DESC(performance_profile, "\n amvdec_h265 performance_profile\n"); +#endif +module_param(disable_ip_mode, uint, 0664); +MODULE_PARM_DESC(disable_ip_mode, "\n amvdec_h265 disable ip_mode\n"); + +module_param(dirty_again_threshold, uint, 0664); +MODULE_PARM_DESC(dirty_again_threshold, "\n dirty_again_threshold\n"); + +module_param(dirty_buffersize_threshold, uint, 0664); +MODULE_PARM_DESC(dirty_buffersize_threshold, "\n dirty_buffersize_threshold\n"); + +module_param(force_config_fence, uint, 0664); +MODULE_PARM_DESC(force_config_fence, "\n force enable fence\n"); + +module_param(mv_buf_dynamic_alloc, uint, 0664); +MODULE_PARM_DESC(mv_buf_dynamic_alloc, "\n mv_buf_dynamic_alloc\n"); + +module_param(detect_stuck_buffer_margin, uint, 0664); +MODULE_PARM_DESC(detect_stuck_buffer_margin, "\n detect_stuck_buffer_margin\n"); + +module_init(amvdec_h265_driver_init_module); +module_exit(amvdec_h265_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC h265 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <tim.yao@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/h265/vh265.h b/drivers/frame_provider/decoder/h265/vh265.h new file mode 100644 index 0000000..11de11a --- /dev/null +++ b/drivers/frame_provider/decoder/h265/vh265.h
@@ -0,0 +1,27 @@ +/* + * drivers/amlogic/amports/vh265.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VH265_H +#define VH265_H + +extern u32 get_blackout_policy(void); + +extern s32 vh265_init(void); + +extern s32 vh265_release(void); + +#endif /* VMPEG4_H */
diff --git a/drivers/frame_provider/decoder/mjpeg/Makefile b/drivers/frame_provider/decoder/mjpeg/Makefile new file mode 100644 index 0000000..ab91854 --- /dev/null +++ b/drivers/frame_provider/decoder/mjpeg/Makefile
@@ -0,0 +1,5 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_MJPEG) += amvdec_mjpeg.o +amvdec_mjpeg-objs += vmjpeg.o + +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_MJPEG_MULTI) += amvdec_mmjpeg.o +amvdec_mmjpeg-objs += vmjpeg_multi.o
diff --git a/drivers/frame_provider/decoder/mjpeg/vmjpeg.c b/drivers/frame_provider/decoder/mjpeg/vmjpeg.c new file mode 100644 index 0000000..917a2a1 --- /dev/null +++ b/drivers/frame_provider/decoder/mjpeg/vmjpeg.c
@@ -0,0 +1,959 @@ +/* + * drivers/amlogic/amports/vmjpeg.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/registers/register.h> +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/configs.h> + +#ifdef CONFIG_AM_VDEC_MJPEG_LOG +#define AMLOG +#define LOG_LEVEL_VAR amlog_level_vmjpeg +#define LOG_MASK_VAR amlog_mask_vmjpeg +#define LOG_LEVEL_ERROR 0 +#define LOG_LEVEL_INFO 1 +#define LOG_LEVEL_DESC "0:ERROR, 1:INFO" +#endif +#include <linux/amlogic/media/utils/amlog.h> +MODULE_AMLOG(LOG_LEVEL_ERROR, 0, LOG_LEVEL_DESC, LOG_DEFAULT_MASK_DESC); + +#include "../utils/amvdec.h" +#include "../utils/firmware.h" + +#define DRIVER_NAME "amvdec_mjpeg" +#define MODULE_NAME "amvdec_mjpeg" + +/* protocol register usage + * AV_SCRATCH_0 - AV_SCRATCH_1 : initial display buffer fifo + * AV_SCRATCH_2 - AV_SCRATCH_3 : decoder settings + * AV_SCRATCH_4 - AV_SCRATCH_7 : display buffer spec + * AV_SCRATCH_8 - AV_SCRATCH_9 : amrisc/host display buffer management + * AV_SCRATCH_a : time stamp + */ + +#define MREG_DECODE_PARAM AV_SCRATCH_2 /* bit 0-3: pico_addr_mode */ +/* bit 15-4: reference height */ +#define MREG_TO_AMRISC AV_SCRATCH_8 +#define MREG_FROM_AMRISC AV_SCRATCH_9 +#define MREG_FRAME_OFFSET AV_SCRATCH_A + +#define PICINFO_BUF_IDX_MASK 0x0007 +#define PICINFO_AVI1 0x0080 +#define PICINFO_INTERLACE 0x0020 +#define PICINFO_INTERLACE_AVI1_BOT 0x0010 +#define PICINFO_INTERLACE_FIRST 0x0010 + +#define VF_POOL_SIZE 16 +#define DECODE_BUFFER_NUM_MAX 4 +#define MAX_BMMU_BUFFER_NUM DECODE_BUFFER_NUM_MAX +#define PUT_INTERVAL (HZ/100) + +#if 1/*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6*/ +/* #define NV21 */ +#endif +static DEFINE_MUTEX(vmjpeg_mutex); + +static struct dec_sysinfo vmjpeg_amstream_dec_info; + +static struct vframe_s *vmjpeg_vf_peek(void *); +static struct vframe_s *vmjpeg_vf_get(void *); +static void vmjpeg_vf_put(struct vframe_s *, void *); +static int vmjpeg_vf_states(struct vframe_states *states, void *); +static int vmjpeg_event_cb(int type, void *data, void *private_data); + +static int vmjpeg_prot_init(void); +static void vmjpeg_local_init(void); + +static const char vmjpeg_dec_id[] = "vmjpeg-dev"; +static struct vdec_info *gvs; +static struct work_struct set_clk_work; + +#define PROVIDER_NAME "decoder.mjpeg" +static const struct vframe_operations_s vmjpeg_vf_provider = { + .peek = vmjpeg_vf_peek, + .get = vmjpeg_vf_get, + .put = vmjpeg_vf_put, + .event_cb = vmjpeg_event_cb, + .vf_states = vmjpeg_vf_states, +}; +static void *mm_blk_handle; +static struct vframe_provider_s vmjpeg_vf_prov; + +static DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(recycle_q, struct vframe_s *, VF_POOL_SIZE); + +static struct vframe_s vfpool[VF_POOL_SIZE]; +static s32 vfbuf_use[DECODE_BUFFER_NUM_MAX]; + +static u32 frame_width, frame_height, frame_dur; +static u32 saved_resolution; +static struct timer_list recycle_timer; +static u32 stat; +static u32 buf_size = 32 * 1024 * 1024; +static DEFINE_SPINLOCK(lock); +static bool is_reset; + +static inline u32 index2canvas0(u32 index) +{ + const u32 canvas_tab[4] = { +#ifdef NV21 + 0x010100, 0x030302, 0x050504, 0x070706 +#else + 0x020100, 0x050403, 0x080706, 0x0b0a09 +#endif + }; + + return canvas_tab[index]; +} + +static inline u32 index2canvas1(u32 index) +{ + const u32 canvas_tab[4] = { +#ifdef NV21 + 0x0d0d0c, 0x0f0f0e, 0x171716, 0x191918 +#else + 0x0e0d0c, 0x181716, 0x222120, 0x252423 +#endif + }; + + return canvas_tab[index]; +} + +static void set_frame_info(struct vframe_s *vf) +{ + vf->width = frame_width; + vf->height = frame_height; + vf->duration = frame_dur; + vf->ratio_control = 0; + vf->duration_pulldown = 0; + vf->flag = 0; +} + +static irqreturn_t vmjpeg_isr(int irq, void *dev_id) +{ + u32 reg, offset, pts, pts_valid = 0; + struct vframe_s *vf = NULL; + u64 pts_us64; + u32 frame_size; + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + reg = READ_VREG(MREG_FROM_AMRISC); + + if (reg & PICINFO_BUF_IDX_MASK) { + offset = READ_VREG(MREG_FRAME_OFFSET); + + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, offset, &pts, + &frame_size, 0, &pts_us64) == 0) + pts_valid = 1; + + if ((reg & PICINFO_INTERLACE) == 0) { + u32 index = ((reg & PICINFO_BUF_IDX_MASK) - 1) & 3; + + if (index >= DECODE_BUFFER_NUM_MAX) { + pr_err("fatal error, invalid buffer index."); + return IRQ_HANDLED; + } + + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info( + "fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + + set_frame_info(vf); + vf->signal_type = 0; + vf->index = index; +#ifdef NV21 + vf->type = + VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | + VIDTYPE_VIU_NV21; +#else + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; +#endif + vf->canvas0Addr = vf->canvas1Addr = + index2canvas0(index); + vf->pts = (pts_valid) ? pts : 0; + vf->pts_us64 = (pts_valid) ? pts_us64 : 0; + vf->orientation = 0; + vf->type_original = vf->type; + vfbuf_use[index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + index); + + gvs->frame_dur = frame_dur; + vdec_count_info(gvs, 0, offset); + + kfifo_put(&display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + + } else { + u32 index = ((reg & PICINFO_BUF_IDX_MASK) - 1) & 3; + + if (index >= DECODE_BUFFER_NUM_MAX) { + pr_info("fatal error, invalid buffer index."); + return IRQ_HANDLED; + } + + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + + set_frame_info(vf); + vf->signal_type = 0; + vf->index = index; +#if 0 + if (reg & PICINFO_AVI1) { + /* AVI1 format */ + if (reg & PICINFO_INTERLACE_AVI1_BOT) { + vf->type = + VIDTYPE_INTERLACE_BOTTOM | + VIDTYPE_INTERLACE_FIRST; + } else + vf->type = VIDTYPE_INTERLACE_TOP; + } else { + if (reg & PICINFO_INTERLACE_FIRST) { + vf->type = + VIDTYPE_INTERLACE_TOP | + VIDTYPE_INTERLACE_FIRST; + } else + vf->type = VIDTYPE_INTERLACE_BOTTOM; + } + + vf->type |= VIDTYPE_VIU_FIELD; +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + vf->duration >>= 1; + vf->canvas0Addr = vf->canvas1Addr = + index2canvas0(index); + vf->orientation = 0; + if ((vf->type & VIDTYPE_INTERLACE_FIRST) && + (pts_valid)) + vf->pts = pts; + else + vf->pts = 0; + + vfbuf_use[index]++; + + kfifo_put(&display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); +#else + /* send whole frame by weaving top & bottom field */ +#ifdef NV21 + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_NV21; +#else + vf->type = VIDTYPE_PROGRESSIVE; +#endif + vf->canvas0Addr = index2canvas0(index); + vf->canvas1Addr = index2canvas1(index); + vf->orientation = 0; + if (pts_valid) { + vf->pts = pts; + vf->pts_us64 = pts_us64; + } else { + vf->pts = 0; + vf->pts_us64 = 0; + } + vf->type_original = vf->type; + vfbuf_use[index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + index); + + gvs->frame_dur = frame_dur; + vdec_count_info(gvs, 0, offset); + + kfifo_put(&display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); +#endif + } + + WRITE_VREG(MREG_FROM_AMRISC, 0); + } + + return IRQ_HANDLED; +} + +static struct vframe_s *vmjpeg_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + + if (kfifo_peek(&display_q, &vf)) + return vf; + + return NULL; +} + +static struct vframe_s *vmjpeg_vf_get(void *op_arg) +{ + struct vframe_s *vf; + + if (kfifo_get(&display_q, &vf)) + return vf; + + return NULL; +} + +static void vmjpeg_vf_put(struct vframe_s *vf, void *op_arg) +{ + kfifo_put(&recycle_q, (const struct vframe_s *)vf); +} + +static int vmjpeg_event_cb(int type, void *data, void *private_data) +{ + if (type & VFRAME_EVENT_RECEIVER_RESET) { + unsigned long flags; + + amvdec_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vmjpeg_vf_prov); +#endif + spin_lock_irqsave(&lock, flags); + vmjpeg_local_init(); + vmjpeg_prot_init(); + spin_unlock_irqrestore(&lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vmjpeg_vf_prov); +#endif + amvdec_start(); + } + return 0; +} + +static int vmjpeg_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + + spin_lock_irqsave(&lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&newframe_q); + states->buf_avail_num = kfifo_len(&display_q); + states->buf_recycle_num = kfifo_len(&recycle_q); + + spin_unlock_irqrestore(&lock, flags); + + return 0; +} +static void mjpeg_set_clk(struct work_struct *work) +{ + if (frame_dur > 0 && saved_resolution != + frame_width * frame_height * (96000 / frame_dur)) { + int fps = 96000 / frame_dur; + + saved_resolution = frame_width * frame_height * fps; + vdec_source_changed(VFORMAT_MJPEG, + frame_width, frame_height, fps); + } +} + +static void vmjpeg_put_timer_func(struct timer_list *timer) +{ + while (!kfifo_is_empty(&recycle_q) && + (READ_VREG(MREG_TO_AMRISC) == 0)) { + struct vframe_s *vf; + + if (kfifo_get(&recycle_q, &vf)) { + if ((vf->index < DECODE_BUFFER_NUM_MAX) + && (--vfbuf_use[vf->index] == 0)) { + WRITE_VREG(MREG_TO_AMRISC, vf->index + 1); + vf->index = DECODE_BUFFER_NUM_MAX; + } + + kfifo_put(&newframe_q, (const struct vframe_s *)vf); + } + } + + schedule_work(&set_clk_work); + + timer->expires = jiffies + PUT_INTERVAL; + + add_timer(timer); +} + +int vmjpeg_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + if (!(stat & STAT_VDEC_RUN)) + return -1; + + vstatus->frame_width = frame_width; + vstatus->frame_height = frame_height; + if (0 != frame_dur) + vstatus->frame_rate = 96000 / frame_dur; + else + vstatus->frame_rate = 96000; + vstatus->error_count = 0; + vstatus->status = stat; + vstatus->bit_rate = gvs->bit_rate; + vstatus->frame_dur = frame_dur; + vstatus->frame_data = gvs->frame_data; + vstatus->total_data = gvs->total_data; + vstatus->frame_count = gvs->frame_count; + vstatus->error_frame_count = gvs->error_frame_count; + vstatus->drop_frame_count = gvs->drop_frame_count; + vstatus->total_data = gvs->total_data; + vstatus->samp_cnt = gvs->samp_cnt; + vstatus->offset = gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + +int vmjpeg_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +/****************************************/ +static int vmjpeg_canvas_init(void) +{ + int i, ret; + u32 canvas_width, canvas_height; + u32 decbuf_size, decbuf_y_size, decbuf_uv_size; + unsigned long buf_start; + + if (buf_size <= 0x00400000) { + /* SD only */ + canvas_width = 768; + canvas_height = 576; + decbuf_y_size = 0x80000; + decbuf_uv_size = 0x20000; + decbuf_size = 0x100000; + } else { + /* HD & SD */ + canvas_width = 1920; + canvas_height = 1088; + decbuf_y_size = 0x200000; + decbuf_uv_size = 0x80000; + decbuf_size = 0x300000; + } + + for (i = 0; i < MAX_BMMU_BUFFER_NUM; i++) { + + ret = decoder_bmmu_box_alloc_buf_phy(mm_blk_handle, i, + decbuf_size, DRIVER_NAME, &buf_start); + if (ret < 0) + return ret; +#ifdef NV21 + config_cav_lut_ex(index2canvas0(i) & 0xff, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, 0, VDEC_1); + config_cav_lut_ex((index2canvas0(i) >> 8) & 0xff, + buf_start + + decbuf_y_size, canvas_width, + canvas_height / 2, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, 0, VDEC_1); + config_cav_lut_ex(index2canvas1(i) & 0xff, + buf_start + + decbuf_size / 2, canvas_width, + canvas_height, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, 0, VDEC_1); + config_cav_lut_ex((index2canvas1(i) >> 8) & 0xff, + buf_start + + decbuf_y_size + decbuf_uv_size / 2, + canvas_width, canvas_height / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, 0, VDEC_1); +#else + config_cav_lut_ex(index2canvas0(i) & 0xff, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, 0, VDEC_1); + config_cav_lut_ex((index2canvas0(i) >> 8) & 0xff, + buf_start + + decbuf_y_size, canvas_width / 2, + canvas_height / 2, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, 0, VDEC_1); + config_cav_lut_ex((index2canvas0(i) >> 16) & 0xff, + buf_start + + decbuf_y_size + decbuf_uv_size, + canvas_width / 2, canvas_height / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, 0, VDEC_1); + config_cav_lut_ex(index2canvas1(i) & 0xff, + buf_start + + decbuf_size / 2, canvas_width, + canvas_height, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, 0, VDEC_1); + config_cav_lut_ex((index2canvas1(i) >> 8) & 0xff, + buf_start + + decbuf_y_size + decbuf_uv_size / 2, + canvas_width / 2, canvas_height / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, 0, VDEC_1); + config_cav_lut_ex((index2canvas1(i) >> 16) & 0xff, + buf_start + + decbuf_y_size + decbuf_uv_size + + decbuf_uv_size / 2, canvas_width / 2, + canvas_height / 2, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, 0, VDEC_1); +#endif + + } + return 0; +} + +static void init_scaler(void) +{ + /* 4 point triangle */ + const unsigned int filt_coef[] = { + 0x20402000, 0x20402000, 0x1f3f2101, 0x1f3f2101, + 0x1e3e2202, 0x1e3e2202, 0x1d3d2303, 0x1d3d2303, + 0x1c3c2404, 0x1c3c2404, 0x1b3b2505, 0x1b3b2505, + 0x1a3a2606, 0x1a3a2606, 0x19392707, 0x19392707, + 0x18382808, 0x18382808, 0x17372909, 0x17372909, + 0x16362a0a, 0x16362a0a, 0x15352b0b, 0x15352b0b, + 0x14342c0c, 0x14342c0c, 0x13332d0d, 0x13332d0d, + 0x12322e0e, 0x12322e0e, 0x11312f0f, 0x11312f0f, + 0x10303010 + }; + int i; + + /* pscale enable, PSCALE cbus bmem enable */ + WRITE_VREG(PSCALE_CTRL, 0xc000); + + /* write filter coefs */ + WRITE_VREG(PSCALE_BMEM_ADDR, 0); + for (i = 0; i < 33; i++) { + WRITE_VREG(PSCALE_BMEM_DAT, 0); + WRITE_VREG(PSCALE_BMEM_DAT, filt_coef[i]); + } + + /* Y horizontal initial info */ + WRITE_VREG(PSCALE_BMEM_ADDR, 37 * 2); + /* [35]: buf repeat pix0, + * [34:29] => buf receive num, + * [28:16] => buf blk x, + * [15:0] => buf phase + */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x0008); + WRITE_VREG(PSCALE_BMEM_DAT, 0x60000000); + + /* C horizontal initial info */ + WRITE_VREG(PSCALE_BMEM_ADDR, 41 * 2); + WRITE_VREG(PSCALE_BMEM_DAT, 0x0008); + WRITE_VREG(PSCALE_BMEM_DAT, 0x60000000); + + /* Y vertical initial info */ + WRITE_VREG(PSCALE_BMEM_ADDR, 39 * 2); + WRITE_VREG(PSCALE_BMEM_DAT, 0x0008); + WRITE_VREG(PSCALE_BMEM_DAT, 0x60000000); + + /* C vertical initial info */ + WRITE_VREG(PSCALE_BMEM_ADDR, 43 * 2); + WRITE_VREG(PSCALE_BMEM_DAT, 0x0008); + WRITE_VREG(PSCALE_BMEM_DAT, 0x60000000); + + /* Y horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_ADDR, 36 * 2 + 1); + /* [19:0] => Y horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x10000); + /* C horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_ADDR, 40 * 2 + 1); + /* [19:0] => C horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x10000); + + /* Y vertical phase step */ + WRITE_VREG(PSCALE_BMEM_ADDR, 38 * 2 + 1); + /* [19:0] => Y vertical phase step */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x10000); + /* C vertical phase step */ + WRITE_VREG(PSCALE_BMEM_ADDR, 42 * 2 + 1); + /* [19:0] => C horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x10000); + + /* reset pscaler */ +#if 1/*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6*/ + WRITE_VREG(DOS_SW_RESET0, (1 << 10)); + WRITE_VREG(DOS_SW_RESET0, 0); +#else + WRITE_RESET_REG(RESET2_REGISTER, RESET_PSCALE); +#endif + READ_RESET_REG(RESET2_REGISTER); + READ_RESET_REG(RESET2_REGISTER); + READ_RESET_REG(RESET2_REGISTER); + + WRITE_VREG(PSCALE_RST, 0x7); + WRITE_VREG(PSCALE_RST, 0x0); +} + +static int vmjpeg_prot_init(void) +{ + int r; +#if 1/*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6*/ + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6)); + WRITE_VREG(DOS_SW_RESET0, 0); +#else + WRITE_RESET_REG(RESET0_REGISTER, RESET_IQIDCT | RESET_MC); +#endif + + r = vmjpeg_canvas_init(); + + WRITE_VREG(AV_SCRATCH_0, 12); + WRITE_VREG(AV_SCRATCH_1, 0x031a); +#ifdef NV21 + WRITE_VREG(AV_SCRATCH_4, 0x010100); + WRITE_VREG(AV_SCRATCH_5, 0x030302); + WRITE_VREG(AV_SCRATCH_6, 0x050504); + WRITE_VREG(AV_SCRATCH_7, 0x070706); +#else + WRITE_VREG(AV_SCRATCH_4, 0x020100); + WRITE_VREG(AV_SCRATCH_5, 0x050403); + WRITE_VREG(AV_SCRATCH_6, 0x080706); + WRITE_VREG(AV_SCRATCH_7, 0x0b0a09); +#endif + init_scaler(); + + /* clear buffer IN/OUT registers */ + WRITE_VREG(MREG_TO_AMRISC, 0); + WRITE_VREG(MREG_FROM_AMRISC, 0); + + WRITE_VREG(MCPU_INTR_MSK, 0xffff); + WRITE_VREG(MREG_DECODE_PARAM, (frame_height << 4) | 0x8000); + + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + /* set interrupt mapping for vld */ + WRITE_VREG(ASSIST_AMR1_INT8, 8); +#if 1/*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6*/ +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#else + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif +#endif + return r; +} + +static int vmjpeg_vdec_info_init(void) +{ + gvs = kzalloc(sizeof(struct vdec_info), GFP_KERNEL); + if (NULL == gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -ENOMEM; + } + return 0; +} + +static void vmjpeg_local_init(void) +{ + int i; + + frame_width = vmjpeg_amstream_dec_info.width; + frame_height = vmjpeg_amstream_dec_info.height; + frame_dur = vmjpeg_amstream_dec_info.rate; + saved_resolution = 0; + amlog_level(LOG_LEVEL_INFO, "mjpegdec: w(%d), h(%d), dur(%d)\n", + frame_width, frame_height, frame_dur); + + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + vfbuf_use[i] = 0; + + INIT_KFIFO(display_q); + INIT_KFIFO(recycle_q); + INIT_KFIFO(newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &vfpool[i]; + + vfpool[i].index = DECODE_BUFFER_NUM_MAX; + kfifo_put(&newframe_q, vf); + } + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + + mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER); +} + +static s32 vmjpeg_init(void) +{ + int ret = -1, size = -1; + char *buf = vmalloc(0x1000 * 16); + + if (IS_ERR_OR_NULL(buf)) + return -ENOMEM; + + timer_setup(&recycle_timer, vmjpeg_put_timer_func, 0); + + stat |= STAT_TIMER_INIT; + + amvdec_enable(); + + vmjpeg_local_init(); + + size = get_firmware_data(VIDEO_DEC_MJPEG, buf); + if (size < 0) { + amvdec_disable(); + pr_err("get firmware fail."); + vfree(buf); + return -1; + } + + ret = amvdec_loadmc_ex(VFORMAT_MJPEG, NULL, buf); + if (ret < 0) { + amvdec_disable(); + vfree(buf); + pr_err("MJPEG: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(buf); + + stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + ret = vmjpeg_prot_init(); + if (ret < 0) + return ret; + + ret = vdec_request_irq(VDEC_IRQ_1, vmjpeg_isr, + "vmjpeg-irq", (void *)vmjpeg_dec_id); + + if (ret) { + amvdec_disable(); + + amlog_level(LOG_LEVEL_ERROR, "vmjpeg irq register error.\n"); + return -ENOENT; + } + + stat |= STAT_ISR_REG; + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_provider_init(&vmjpeg_vf_prov, PROVIDER_NAME, &vmjpeg_vf_provider, + NULL); + vf_reg_provider(&vmjpeg_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); +#else + vf_provider_init(&vmjpeg_vf_prov, PROVIDER_NAME, &vmjpeg_vf_provider, + NULL); + vf_reg_provider(&vmjpeg_vf_prov); +#endif + + if (!is_reset) + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)vmjpeg_amstream_dec_info.rate)); + + stat |= STAT_VF_HOOK; + + recycle_timer.expires = jiffies + PUT_INTERVAL; + add_timer(&recycle_timer); + + stat |= STAT_TIMER_ARM; + + amvdec_start(); + + stat |= STAT_VDEC_RUN; + + return 0; +} + +static int amvdec_mjpeg_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + + mutex_lock(&vmjpeg_mutex); + + amlog_level(LOG_LEVEL_INFO, "amvdec_mjpeg probe start.\n"); + + INIT_WORK(&set_clk_work, mjpeg_set_clk); + + if (pdata == NULL) { + amlog_level(LOG_LEVEL_ERROR, + "amvdec_mjpeg memory resource undefined.\n"); + mutex_unlock(&vmjpeg_mutex); + + return -EFAULT; + } + + if (pdata->sys_info) + vmjpeg_amstream_dec_info = *pdata->sys_info; + + pdata->dec_status = vmjpeg_dec_status; + pdata->set_isreset = vmjpeg_set_isreset; + is_reset = 0; + vmjpeg_vdec_info_init(); + + if (vmjpeg_init() < 0) { + amlog_level(LOG_LEVEL_ERROR, "amvdec_mjpeg init failed.\n"); + mutex_unlock(&vmjpeg_mutex); + kfree(gvs); + gvs = NULL; + pdata->dec_status = NULL; + return -ENODEV; + } + + mutex_unlock(&vmjpeg_mutex); + + amlog_level(LOG_LEVEL_INFO, "amvdec_mjpeg probe end.\n"); + + return 0; +} + +static int amvdec_mjpeg_remove(struct platform_device *pdev) +{ + mutex_lock(&vmjpeg_mutex); + + cancel_work_sync(&set_clk_work); + + if (stat & STAT_VDEC_RUN) { + amvdec_stop(); + stat &= ~STAT_VDEC_RUN; + } + + if (stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)vmjpeg_dec_id); + stat &= ~STAT_ISR_REG; + } + + if (stat & STAT_TIMER_ARM) { + del_timer_sync(&recycle_timer); + stat &= ~STAT_TIMER_ARM; + } + + if (stat & STAT_VF_HOOK) { + if (!is_reset) + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + + vf_unreg_provider(&vmjpeg_vf_prov); + stat &= ~STAT_VF_HOOK; + } + + amvdec_disable(); + + mutex_unlock(&vmjpeg_mutex); + + kfree(gvs); + gvs = NULL; + + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + + amlog_level(LOG_LEVEL_INFO, "amvdec_mjpeg remove.\n"); + + return 0; +} + +/****************************************/ + +static struct platform_driver amvdec_mjpeg_driver = { + .probe = amvdec_mjpeg_probe, + .remove = amvdec_mjpeg_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t amvdec_mjpeg_profile = { + .name = "mjpeg", + .profile = "" +}; +static struct mconfig mjpeg_configs[] = { + MC_PU32("stat", &stat), +}; +static struct mconfig_node mjpeg_node; + +static int __init amvdec_mjpeg_driver_init_module(void) +{ + amlog_level(LOG_LEVEL_INFO, "amvdec_mjpeg module init\n"); + + if (platform_driver_register(&amvdec_mjpeg_driver)) { + amlog_level(LOG_LEVEL_ERROR, + "failed to register amvdec_mjpeg driver\n"); + return -ENODEV; + } + vcodec_profile_register(&amvdec_mjpeg_profile); + INIT_REG_NODE_CONFIGS("media.decoder", &mjpeg_node, + "mjpeg", mjpeg_configs, CONFIG_FOR_RW); + return 0; +} + +static void __exit amvdec_mjpeg_driver_remove_module(void) +{ + amlog_level(LOG_LEVEL_INFO, "amvdec_mjpeg module remove.\n"); + + platform_driver_unregister(&amvdec_mjpeg_driver); +} + +/****************************************/ + +module_param(stat, uint, 0664); +MODULE_PARM_DESC(stat, "\n amvdec_mjpeg stat\n"); + +module_init(amvdec_mjpeg_driver_init_module); +module_exit(amvdec_mjpeg_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC MJMPEG Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/mjpeg/vmjpeg_multi.c b/drivers/frame_provider/decoder/mjpeg/vmjpeg_multi.c new file mode 100644 index 0000000..22d7dbd --- /dev/null +++ b/drivers/frame_provider/decoder/mjpeg/vmjpeg_multi.c
@@ -0,0 +1,1892 @@ +/* + * drivers/amlogic/amports/vmjpeg.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> + +#include <linux/amlogic/media/utils/vdec_reg.h> +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" + +#include "../utils/vdec_input.h" +#include "../utils/vdec.h" +#include "../utils/amvdec.h" +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/firmware.h" +#include "../utils/vdec_v4l2_buffer_ops.h" +#include "../utils/config_parser.h" +#include <media/v4l2-mem2mem.h> +#include "../utils/vdec_feature.h" + +#define MEM_NAME "codec_mmjpeg" + +#define DRIVER_NAME "ammvdec_mjpeg" +#define CHECK_INTERVAL (HZ/100) + +/* protocol register usage + * AV_SCRATCH_4 : decode buffer spec + * AV_SCRATCH_5 : decode buffer index + */ + +#define MREG_DECODE_PARAM AV_SCRATCH_2 /* bit 0-3: pico_addr_mode */ +/* bit 15-4: reference height */ +#define MREG_TO_AMRISC AV_SCRATCH_8 +#define MREG_FROM_AMRISC AV_SCRATCH_9 +#define MREG_FRAME_OFFSET AV_SCRATCH_A +#define DEC_STATUS_REG AV_SCRATCH_F +#define MREG_PIC_WIDTH AV_SCRATCH_B +#define MREG_PIC_HEIGHT AV_SCRATCH_C +#define DECODE_STOP_POS AV_SCRATCH_K + +#define PICINFO_BUF_IDX_MASK 0x0007 +#define PICINFO_AVI1 0x0080 +#define PICINFO_INTERLACE 0x0020 +#define PICINFO_INTERLACE_AVI1_BOT 0x0010 +#define PICINFO_INTERLACE_FIRST 0x0010 + +#define VF_POOL_SIZE 64 +#define DECODE_BUFFER_NUM_MAX 16 +#define DECODE_BUFFER_NUM_DEF 4 +#define MAX_BMMU_BUFFER_NUM DECODE_BUFFER_NUM_MAX + +#define DEFAULT_MEM_SIZE (32*SZ_1M) +static int debug_enable; +static u32 udebug_flag; +#define DECODE_ID(hw) (hw_to_vdec(hw)->id) + +static unsigned int radr; +static unsigned int rval; +#define VMJPEG_DEV_NUM 9 +static unsigned int max_decode_instance_num = VMJPEG_DEV_NUM; +static unsigned int max_process_time[VMJPEG_DEV_NUM]; +static unsigned int decode_timeout_val = 200; +static struct vframe_s *vmjpeg_vf_peek(void *); +static struct vframe_s *vmjpeg_vf_get(void *); +static void vmjpeg_vf_put(struct vframe_s *, void *); +static int vmjpeg_vf_states(struct vframe_states *states, void *); +static int vmjpeg_event_cb(int type, void *data, void *private_data); +static void vmjpeg_work(struct work_struct *work); +static int notify_v4l_eos(struct vdec_s *vdec); +static int pre_decode_buf_level = 0x800; +static int start_decode_buf_level = 0x2000; +static u32 without_display_mode; +static u32 dynamic_buf_num_margin; +static u32 run_ready_min_buf_num = 2; +#undef pr_info +#define pr_info printk +unsigned int mmjpeg_debug_mask = 0xff; +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_RUN_FLOW 0X0001 +#define PRINT_FLAG_TIMEINFO 0x0002 +#define PRINT_FLAG_UCODE_DETAIL 0x0004 +#define PRINT_FLAG_VLD_DETAIL 0x0008 +#define PRINT_FLAG_DEC_DETAIL 0x0010 +#define PRINT_FLAG_BUFFER_DETAIL 0x0020 +#define PRINT_FLAG_RESTORE 0x0040 +#define PRINT_FRAME_NUM 0x0080 +#define PRINT_FLAG_FORCE_DONE 0x0100 +#define PRINT_FRAMEBASE_DATA 0x0400 +#define PRINT_FLAG_TIMEOUT_STATUS 0x1000 +#define PRINT_FLAG_V4L_DETAIL 0x8000 +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 + +int mmjpeg_debug_print(int index, int debug_flag, const char *fmt, ...) +{ + if (((debug_enable & debug_flag) && + ((1 << index) & mmjpeg_debug_mask)) + || (debug_flag == PRINT_FLAG_ERROR)) { + unsigned char *buf = kzalloc(512, GFP_ATOMIC); + int len = 0; + va_list args; + + if (!buf) + return 0; + + va_start(args, fmt); + len = sprintf(buf, "%d: ", index); + vsnprintf(buf + len, 512-len, fmt, args); + pr_info("%s", buf); + va_end(args); + kfree(buf); + } + return 0; +} + +static const char vmjpeg_dec_id[] = "vmmjpeg-dev"; + +#define PROVIDER_NAME "vdec.mjpeg" +static const struct vframe_operations_s vf_provider_ops = { + .peek = vmjpeg_vf_peek, + .get = vmjpeg_vf_get, + .put = vmjpeg_vf_put, + .event_cb = vmjpeg_event_cb, + .vf_states = vmjpeg_vf_states, +}; + +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_ERROR 3 +#define DEC_RESULT_FORCE_EXIT 4 +#define DEC_RESULT_EOS 5 +#define DEC_DECODE_TIMEOUT 0x21 + + +struct buffer_spec_s { + unsigned int y_addr; + unsigned int u_addr; + unsigned int v_addr; + + int y_canvas_index; + int u_canvas_index; + int v_canvas_index; + + struct canvas_config_s canvas_config[3]; + unsigned long cma_alloc_addr; + int cma_alloc_count; + unsigned int buf_adr; + ulong v4l_ref_buf_addr; +}; + +#define spec2canvas(x) \ + (((x)->v_canvas_index << 16) | \ + ((x)->u_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + +struct vdec_mjpeg_hw_s { + spinlock_t lock; + struct mutex vmjpeg_mutex; + + struct platform_device *platform_dev; + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + + struct vframe_s vfpool[VF_POOL_SIZE]; + struct buffer_spec_s buffer_spec[DECODE_BUFFER_NUM_MAX]; + s32 vfbuf_use[DECODE_BUFFER_NUM_MAX]; + + u32 frame_width; + u32 frame_height; + u32 frame_dur; + u32 saved_resolution; + u8 init_flag; + u32 stat; + u32 dec_result; + unsigned long buf_start; + u32 buf_size; + void *mm_blk_handle; + struct dec_sysinfo vmjpeg_amstream_dec_info; + + struct vframe_chunk_s *chunk; + struct work_struct work; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + struct firmware_s *fw; + struct timer_list check_timer; + u32 decode_timeout_count; + u32 start_process_time; + u32 last_vld_level; + u8 eos; + u32 frame_num; + u32 put_num; + u32 run_count; + u32 not_run_ready; + u32 buffer_not_ready; + u32 input_empty; + u32 peek_num; + u32 get_num; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + int buf_num; + int dynamic_buf_num_margin; + int sidebind_type; + int sidebind_channel_id; + u32 res_ch_flag; + u32 canvas_mode; + u32 canvas_endian; + ulong fb_token; + char vdec_name[32]; + char pts_name[32]; + char new_q_name[32]; + char disp_q_name[32]; +}; + +static void reset_process_time(struct vdec_mjpeg_hw_s *hw); +static int notify_v4l_eos(struct vdec_s *vdec); + +static void set_frame_info(struct vdec_mjpeg_hw_s *hw, struct vframe_s *vf) +{ + u32 temp; + temp = READ_VREG(MREG_PIC_WIDTH); + if (temp > 1920) + vf->width = hw->frame_width = 1920; + else if (temp > 0) + vf->width = hw->frame_width = temp; + temp = READ_VREG(MREG_PIC_HEIGHT); + if (temp > 1088) + vf->height = hw->frame_height = 1088; + else if (temp > 0) + vf->height = hw->frame_height = temp; + vf->duration = hw->frame_dur; + vf->ratio_control = DISP_RATIO_ASPECT_RATIO_MAX << DISP_RATIO_ASPECT_RATIO_BIT; + vf->sar_width = 1; + vf->sar_height = 1; + vf->duration_pulldown = 0; + vf->flag = 0; + + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 3; + + vf->canvas0_config[0] = hw->buffer_spec[vf->index].canvas_config[0]; + vf->canvas0_config[1] = hw->buffer_spec[vf->index].canvas_config[1]; + vf->canvas0_config[2] = hw->buffer_spec[vf->index].canvas_config[2]; + + vf->canvas1_config[0] = hw->buffer_spec[vf->index].canvas_config[0]; + vf->canvas1_config[1] = hw->buffer_spec[vf->index].canvas_config[1]; + vf->canvas1_config[2] = hw->buffer_spec[vf->index].canvas_config[2]; + + vf->sidebind_type = hw->sidebind_type; + vf->sidebind_channel_id = hw->sidebind_channel_id; +} + +static irqreturn_t vmjpeg_isr(struct vdec_s *vdec, int irq) +{ + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *)(vdec->private); + + if (!hw) + return IRQ_HANDLED; + + if (hw->eos) + return IRQ_HANDLED; + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + return IRQ_WAKE_THREAD; +} + +static int vmjpeg_get_ps_info(struct vdec_mjpeg_hw_s *hw, int width, int height, struct aml_vdec_ps_infos *ps) +{ + ps->visible_width = width; + ps->visible_height = height; + ps->coded_width = ALIGN(width, 64); + ps->coded_height = ALIGN(height, 64); + ps->dpb_size = hw->buf_num; + + return 0; +} + +static int v4l_res_change(struct vdec_mjpeg_hw_s *hw, int width, int height) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int ret = 0; + + if (ctx->param_sets_from_ucode && + hw->res_ch_flag == 0) { + struct aml_vdec_ps_infos ps; + + if ((hw->frame_width != 0 && + hw->frame_height != 0) && + (hw->frame_width != width || + hw->frame_height != height)) { + mmjpeg_debug_print(DECODE_ID(hw), 0, + "v4l_res_change Pic Width/Height Change (%d,%d)=>(%d,%d)\n", + hw->frame_width, hw->frame_height, + width, + height); + vmjpeg_get_ps_info(hw, width, height, &ps); + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + hw->v4l_params_parsed = false; + hw->res_ch_flag = 1; + ctx->v4l_resolution_change = 1; + hw->eos = 1; + if (hw->is_used_v4l) + notify_v4l_eos(hw_to_vdec(hw)); + + ret = 1; + } + } + + return ret; +} + +static irqreturn_t vmjpeg_isr_thread_fn(struct vdec_s *vdec, int irq) +{ + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)(vdec->private); + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + u32 reg; + struct vframe_s *vf = NULL; + u32 index, offset = 0, pts; + u64 pts_us64; + u32 frame_size; + + if (READ_VREG(AV_SCRATCH_D) != 0 && + (debug_enable & PRINT_FLAG_UCODE_DETAIL)) { + pr_info("dbg%x: %x\n", READ_VREG(AV_SCRATCH_D), + READ_VREG(AV_SCRATCH_E)); + WRITE_VREG(AV_SCRATCH_D, 0); + return IRQ_HANDLED; + } + + if (READ_VREG(DEC_STATUS_REG) == 1) { + if (hw->is_used_v4l) { + int frame_width = READ_VREG(MREG_PIC_WIDTH); + int frame_height = READ_VREG(MREG_PIC_HEIGHT); + + if (!v4l_res_change(hw, frame_width, frame_height)) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + + vmjpeg_get_ps_info(hw, frame_width, frame_height, &ps); + hw->v4l_params_parsed = true; + vdec_v4l_set_ps_infos(ctx, &ps); + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } else { + WRITE_VREG(DEC_STATUS_REG, 0); + } + } else { + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } + } else + WRITE_VREG(DEC_STATUS_REG, 0); + return IRQ_HANDLED; + } + reset_process_time(hw); + + reg = READ_VREG(MREG_FROM_AMRISC); + index = READ_VREG(AV_SCRATCH_5) & 0xffffff; + + if (index >= hw->buf_num) { + pr_err("fatal error, invalid buffer index."); + return IRQ_HANDLED; + } + + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + pr_info( + "fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->buffer_spec[index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), v4l mem handle: 0x%lx\n", + ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, + __func__, vf->v4l_mem_handle); + } + + vf->index = index; + set_frame_info(hw, vf); + + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; + /* vf->pts = (pts_valid) ? pts : 0; */ + /* vf->pts_us64 = (pts_valid) ? pts_us64 : 0; */ + + if (hw->chunk) { + vf->pts = hw->chunk->pts; + vf->pts_us64 = hw->chunk->pts64; + vf->timestamp = hw->chunk->timestamp; + } else { + offset = READ_VREG(MREG_FRAME_OFFSET); + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, offset, &pts, + &frame_size, 3000, + &pts_us64) == 0) { + vf->pts = pts; + vf->pts_us64 = pts_us64; + } else { + vf->pts = 0; + vf->pts_us64 = 0; + } + } + if (!vdec->vbuf.use_ptsserv && vdec_stream_based(vdec)) { + vf->pts_us64 = offset; + vf->pts = 0; + } + } + vf->orientation = 0; + hw->vfbuf_use[index]++; + + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, index); + decoder_do_frame_check(vdec, vf); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->pts); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + hw->frame_num++; + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FRAME_NUM, + "%s:frame num:%d,pts=%d,pts64=%lld. dur=%d\n", + __func__, hw->frame_num, + vf->pts, vf->pts_us64, vf->duration); + vdec->vdec_fps_detec(vdec->id); + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vmjpeg_vf_put(vmjpeg_vf_get(vdec), vdec); + } else { + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + } + } else + vmjpeg_vf_put(vmjpeg_vf_get(vdec), vdec); + + hw->dec_result = DEC_RESULT_DONE; + + vdec_schedule_work(&hw->work); + + return IRQ_HANDLED; +} + +static int valid_vf_check(struct vframe_s *vf, struct vdec_mjpeg_hw_s *hw) +{ + int i; + + if (!vf || (vf->index == -1)) + return 0; + + for (i = 0; i < VF_POOL_SIZE; i++) { + if (vf == &hw->vfpool[i]) + return 1; + } + + return 0; +} + +static struct vframe_s *vmjpeg_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private; + + if (!hw) + return NULL; + hw->peek_num++; + + if (kfifo_len(&hw->display_q) > VF_POOL_SIZE) { + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "kfifo len:%d invaild, peek error\n", + kfifo_len(&hw->display_q)); + return NULL; + } + + if (kfifo_peek(&hw->display_q, &vf)) + return vf; + + return NULL; +} + +static struct vframe_s *vmjpeg_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private; + + if (!hw) + return NULL; + hw->get_num++; + if (kfifo_get(&hw->display_q, &vf)) { + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + return vf; + } + return NULL; +} + +static void vmjpeg_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private; + + if (!valid_vf_check(vf, hw)) { + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "invalid vf: %lx\n", (ulong)vf); + return ; + } + + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FRAME_NUM, + "%s:put_num:%d\n", __func__, hw->put_num); + hw->vfbuf_use[vf->index]--; + kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); + hw->put_num++; +} + +static int vmjpeg_event_cb(int type, void *data, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +static int vmjpeg_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + struct vdec_s *vdec = op_arg; + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private; + + spin_lock_irqsave(&hw->lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hw->newframe_q); + states->buf_avail_num = kfifo_len(&hw->display_q); + states->buf_recycle_num = 0; + + spin_unlock_irqrestore(&hw->lock, flags); + + return 0; +} + +static int vmjpeg_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private; + + if (!hw) + return -1; + + vstatus->frame_width = hw->frame_width; + vstatus->frame_height = hw->frame_height; + if (0 != hw->frame_dur) + vstatus->frame_rate = 96000 / hw->frame_dur; + else + vstatus->frame_rate = 96000; + vstatus->error_count = 0; + vstatus->status = hw->stat; + + return 0; +} + +/****************************************/ +static void vmjpeg_canvas_init(struct vdec_mjpeg_hw_s *hw) +{ + int i, ret; + u32 canvas_width, canvas_height; + u32 decbuf_size, decbuf_y_size, decbuf_uv_size; + unsigned long buf_start, addr; + u32 endian; + struct vdec_s *vdec = hw_to_vdec(hw); + + endian = (vdec->canvas_mode == + CANVAS_BLKMODE_LINEAR) ? 7 : 0; + canvas_width = 1920; + canvas_height = 1088; + decbuf_y_size = 0x200000; + decbuf_uv_size = 0x80000; + decbuf_size = 0x300000; + + for (i = 0; i < hw->buf_num; i++) { + int canvas; + + if (hw->is_used_v4l) { + continue; + } else { + ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, i, + decbuf_size, DRIVER_NAME, &buf_start); + if (ret < 0) { + pr_err("CMA alloc failed! size 0x%d idx %d\n", + decbuf_size, i); + return; + } + } + + hw->buffer_spec[i].buf_adr = buf_start; + addr = hw->buffer_spec[i].buf_adr; + + hw->buffer_spec[i].y_addr = addr; + addr += decbuf_y_size; + hw->buffer_spec[i].u_addr = addr; + addr += decbuf_uv_size; + hw->buffer_spec[i].v_addr = addr; + + if (vdec->parallel_dec == 1) { + if (hw->buffer_spec[i].y_canvas_index == -1) + hw->buffer_spec[i].y_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].u_canvas_index == -1) + hw->buffer_spec[i].u_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].v_canvas_index == -1) + hw->buffer_spec[i].v_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + } else { + canvas = vdec->get_canvas(i, 3); + hw->buffer_spec[i].y_canvas_index = canvas_y(canvas); + hw->buffer_spec[i].u_canvas_index = canvas_u(canvas); + hw->buffer_spec[i].v_canvas_index = canvas_v(canvas); + } + + config_cav_lut_ex(hw->buffer_spec[i].y_canvas_index, + hw->buffer_spec[i].y_addr, + canvas_width, + canvas_height, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, endian, VDEC_1); + hw->buffer_spec[i].canvas_config[0].phy_addr = + hw->buffer_spec[i].y_addr; + hw->buffer_spec[i].canvas_config[0].width = + canvas_width; + hw->buffer_spec[i].canvas_config[0].height = + canvas_height; + hw->buffer_spec[i].canvas_config[0].block_mode = + CANVAS_BLKMODE_LINEAR; + hw->buffer_spec[i].canvas_config[0].endian = + endian; + + config_cav_lut_ex(hw->buffer_spec[i].u_canvas_index, + hw->buffer_spec[i].u_addr, + canvas_width / 2, + canvas_height / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, endian, VDEC_1); + hw->buffer_spec[i].canvas_config[1].phy_addr = + hw->buffer_spec[i].u_addr; + hw->buffer_spec[i].canvas_config[1].width = + canvas_width / 2; + hw->buffer_spec[i].canvas_config[1].height = + canvas_height / 2; + hw->buffer_spec[i].canvas_config[1].block_mode = + CANVAS_BLKMODE_LINEAR; + hw->buffer_spec[i].canvas_config[1].endian = + endian; + + config_cav_lut_ex(hw->buffer_spec[i].v_canvas_index, + hw->buffer_spec[i].v_addr, + canvas_width / 2, + canvas_height / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR, endian, VDEC_1); + hw->buffer_spec[i].canvas_config[2].phy_addr = + hw->buffer_spec[i].v_addr; + hw->buffer_spec[i].canvas_config[2].width = + canvas_width / 2; + hw->buffer_spec[i].canvas_config[2].height = + canvas_height / 2; + hw->buffer_spec[i].canvas_config[2].block_mode = + CANVAS_BLKMODE_LINEAR; + hw->buffer_spec[i].canvas_config[2].endian = + endian; + } +} + +static void init_scaler(void) +{ + /* 4 point triangle */ + const unsigned int filt_coef[] = { + 0x20402000, 0x20402000, 0x1f3f2101, 0x1f3f2101, + 0x1e3e2202, 0x1e3e2202, 0x1d3d2303, 0x1d3d2303, + 0x1c3c2404, 0x1c3c2404, 0x1b3b2505, 0x1b3b2505, + 0x1a3a2606, 0x1a3a2606, 0x19392707, 0x19392707, + 0x18382808, 0x18382808, 0x17372909, 0x17372909, + 0x16362a0a, 0x16362a0a, 0x15352b0b, 0x15352b0b, + 0x14342c0c, 0x14342c0c, 0x13332d0d, 0x13332d0d, + 0x12322e0e, 0x12322e0e, 0x11312f0f, 0x11312f0f, + 0x10303010 + }; + int i; + + /* pscale enable, PSCALE cbus bmem enable */ + WRITE_VREG(PSCALE_CTRL, 0xc000); + + /* write filter coefs */ + WRITE_VREG(PSCALE_BMEM_ADDR, 0); + for (i = 0; i < 33; i++) { + WRITE_VREG(PSCALE_BMEM_DAT, 0); + WRITE_VREG(PSCALE_BMEM_DAT, filt_coef[i]); + } + + /* Y horizontal initial info */ + WRITE_VREG(PSCALE_BMEM_ADDR, 37 * 2); + /* [35]: buf repeat pix0, + * [34:29] => buf receive num, + * [28:16] => buf blk x, + * [15:0] => buf phase + */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x0008); + WRITE_VREG(PSCALE_BMEM_DAT, 0x60000000); + + /* C horizontal initial info */ + WRITE_VREG(PSCALE_BMEM_ADDR, 41 * 2); + WRITE_VREG(PSCALE_BMEM_DAT, 0x0008); + WRITE_VREG(PSCALE_BMEM_DAT, 0x60000000); + + /* Y vertical initial info */ + WRITE_VREG(PSCALE_BMEM_ADDR, 39 * 2); + WRITE_VREG(PSCALE_BMEM_DAT, 0x0008); + WRITE_VREG(PSCALE_BMEM_DAT, 0x60000000); + + /* C vertical initial info */ + WRITE_VREG(PSCALE_BMEM_ADDR, 43 * 2); + WRITE_VREG(PSCALE_BMEM_DAT, 0x0008); + WRITE_VREG(PSCALE_BMEM_DAT, 0x60000000); + + /* Y horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_ADDR, 36 * 2 + 1); + /* [19:0] => Y horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x10000); + /* C horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_ADDR, 40 * 2 + 1); + /* [19:0] => C horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x10000); + + /* Y vertical phase step */ + WRITE_VREG(PSCALE_BMEM_ADDR, 38 * 2 + 1); + /* [19:0] => Y vertical phase step */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x10000); + /* C vertical phase step */ + WRITE_VREG(PSCALE_BMEM_ADDR, 42 * 2 + 1); + /* [19:0] => C horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x10000); + + /* reset pscaler */ +#if 1/*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6*/ + WRITE_VREG(DOS_SW_RESET0, (1 << 10)); + WRITE_VREG(DOS_SW_RESET0, 0); +#else + WRITE_RESET_REG(RESET2_REGISTER, RESET_PSCALE); +#endif + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SC2) { + READ_RESET_REG(RESET2_REGISTER); + READ_RESET_REG(RESET2_REGISTER); + READ_RESET_REG(RESET2_REGISTER); + } + WRITE_VREG(PSCALE_RST, 0x7); + WRITE_VREG(PSCALE_RST, 0x0); +} + +static void vmjpeg_dump_state(struct vdec_s *vdec) +{ + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *)(vdec->private); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "====== %s\n", __func__); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "width/height (%d/%d) buf_num %d\n", + hw->frame_width, + hw->frame_height, + hw->buf_num + ); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "is_framebase(%d), eos %d, state 0x%x, dec_result 0x%x dec_frm %d put_frm %d run %d not_run_ready %d input_empty %d\n", + input_frame_based(vdec), + hw->eos, + hw->stat, + hw->dec_result, + hw->frame_num, + hw->put_num, + hw->run_count, + hw->not_run_ready, + hw->input_empty + ); + if (!hw->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + mmjpeg_debug_print(DECODE_ID(hw), 0, + "%s, newq(%d/%d), dispq(%d/%d) vf peek/get/put (%d/%d/%d)\n", + __func__, + kfifo_len(&hw->newframe_q), + VF_POOL_SIZE, + kfifo_len(&hw->display_q), + VF_POOL_SIZE, + hw->peek_num, + hw->get_num, + hw->put_num + ); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "VIFF_BIT_CNT=0x%x\n", + READ_VREG(VIFF_BIT_CNT)); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_LEVEL=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_WP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP)); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_RP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_RP)); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + if (input_frame_based(vdec) && + debug_enable & PRINT_FRAMEBASE_DATA + ) { + int jj; + if (hw->chunk && hw->chunk->block && + hw->chunk->size > 0) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, hw->chunk->size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + mmjpeg_debug_print(DECODE_ID(hw), 0, + "frame data size 0x%x\n", + hw->chunk->size); + for (jj = 0; jj < hw->chunk->size; jj++) { + if ((jj & 0xf) == 0) + mmjpeg_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + mmjpeg_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + mmjpeg_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } +} +static void reset_process_time(struct vdec_mjpeg_hw_s *hw) +{ + if (hw->start_process_time) { + unsigned process_time = + 1000 * (jiffies - hw->start_process_time) / HZ; + hw->start_process_time = 0; + if (process_time > max_process_time[DECODE_ID(hw)]) + max_process_time[DECODE_ID(hw)] = process_time; + } +} + +static void start_process_time(struct vdec_mjpeg_hw_s *hw) +{ + hw->decode_timeout_count = 2; + hw->start_process_time = jiffies; +} + +static void timeout_process(struct vdec_mjpeg_hw_s *hw) +{ + amvdec_stop(); + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s decoder timeout\n", __func__); + hw->dec_result = DEC_RESULT_DONE; + reset_process_time(hw); + vdec_schedule_work(&hw->work); +} + +static void check_timer_func(struct timer_list *timer) +{ + struct vdec_mjpeg_hw_s *hw = container_of(timer, + struct vdec_mjpeg_hw_s, check_timer); + struct vdec_s *vdec = hw_to_vdec(hw); + int timeout_val = decode_timeout_val; + + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_VLD_DETAIL, + "%s: status:nstatus=%d:%d\n", + __func__, vdec->status, vdec->next_status); + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_VLD_DETAIL, + "%s: %d,buftl=%x:%x:%x:%x\n", + __func__, __LINE__, + READ_VREG(VLD_MEM_VIFIFO_BUF_CNTL), + STBUF_READ(&vdec->vbuf, get_wp), + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP)); + + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + + if (((debug_enable & PRINT_FLAG_TIMEOUT_STATUS) == 0) && + (timeout_val > 0) && + (hw->start_process_time > 0) && + ((1000 * (jiffies - hw->start_process_time) / HZ) + > timeout_val)) { + if (hw->last_vld_level == READ_VREG(VLD_MEM_VIFIFO_LEVEL)) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + timeout_process(hw); + } + hw->last_vld_level = READ_VREG(VLD_MEM_VIFIFO_LEVEL); + } + + if (READ_VREG(DEC_STATUS_REG) == DEC_DECODE_TIMEOUT) { + pr_info("ucode DEC_DECODE_TIMEOUT\n"); + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + timeout_process(hw); + WRITE_VREG(DEC_STATUS_REG, 0); + } + + if (vdec->next_status == VDEC_STATUS_DISCONNECTED) { + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + pr_info("vdec requested to be disconnected\n"); + return; + } + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static int vmjpeg_v4l_alloc_buff_config_canvas(struct vdec_mjpeg_hw_s *hw, int i) +{ + int ret; + u32 canvas; + ulong decbuf_start = 0, decbuf_u_start = 0, decbuf_v_start = 0; + int decbuf_y_size = 0, decbuf_u_size = 0, decbuf_v_size = 0; + u32 canvas_width = 0, canvas_height = 0; + struct vdec_s *vdec = hw_to_vdec(hw); + struct vdec_v4l2_buffer *fb = NULL; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (hw->buffer_spec[i].v4l_ref_buf_addr) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *) + hw->buffer_spec[i].v4l_ref_buf_addr; + + fb->status = FB_ST_DECODER; + return 0; + } + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + mmjpeg_debug_print(DECODE_ID(hw), 0, + "[%d] get fb fail.\n", + ((struct aml_vcodec_ctx *) + (hw->v4l2_ctx))->id); + return ret; + } + + fb->status = FB_ST_DECODER; + + if (!hw->frame_width || !hw->frame_height) { + struct vdec_pic_info pic; + vdec_v4l_get_pic_info(ctx, &pic); + hw->frame_width = pic.visible_width; + hw->frame_height = pic.visible_height; + mmjpeg_debug_print(DECODE_ID(hw), 0, + "[%d] set %d x %d from IF layer\n", ctx->id, + hw->frame_width, hw->frame_height); + } + + hw->buffer_spec[i].v4l_ref_buf_addr = (ulong)fb; + if (fb->num_planes == 1) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].offset; + decbuf_u_start = decbuf_start + decbuf_y_size; + decbuf_u_size = decbuf_y_size / 4; + decbuf_v_start = decbuf_u_start + decbuf_u_size; + decbuf_v_size = decbuf_u_size; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 64); + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + } else if (fb->num_planes == 2) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].size; + decbuf_u_start = fb->m.mem[1].addr; + decbuf_u_size = fb->m.mem[1].size >> 1; + decbuf_v_start = decbuf_u_start + decbuf_u_size; + decbuf_v_size = decbuf_u_size; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 64); + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + fb->m.mem[1].bytes_used = fb->m.mem[1].size; + } else if (fb->num_planes == 3) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].size; + decbuf_u_start = fb->m.mem[1].addr; + decbuf_u_size = fb->m.mem[1].size; + decbuf_v_start = fb->m.mem[2].addr; + decbuf_v_size = fb->m.mem[2].size; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 64); + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + fb->m.mem[1].bytes_used = fb->m.mem[1].size; + fb->m.mem[2].bytes_used = fb->m.mem[2].size; + } + + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] v4l ref buf addr: 0x%x\n", ctx->id, fb); + + if (vdec->parallel_dec == 1) { + if (hw->buffer_spec[i].y_canvas_index == -1) + hw->buffer_spec[i].y_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].u_canvas_index == -1) + hw->buffer_spec[i].u_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].v_canvas_index == -1) + hw->buffer_spec[i].v_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + } else { + canvas = vdec->get_canvas(i, 3); + hw->buffer_spec[i].y_canvas_index = canvas_y(canvas); + hw->buffer_spec[i].u_canvas_index = canvas_u(canvas); + hw->buffer_spec[i].v_canvas_index = canvas_v(canvas); + } + + hw->buffer_spec[i].canvas_config[0].phy_addr = + decbuf_start; + hw->buffer_spec[i].canvas_config[0].width = + canvas_width; + hw->buffer_spec[i].canvas_config[0].height = + canvas_height; + hw->buffer_spec[i].canvas_config[0].block_mode = + hw->canvas_mode; + hw->buffer_spec[i].canvas_config[0].endian = + hw->canvas_endian; + + config_cav_lut(hw->buffer_spec[i].y_canvas_index, + &hw->buffer_spec[i].canvas_config[0], VDEC_1); + + hw->buffer_spec[i].canvas_config[1].phy_addr = + decbuf_u_start; + hw->buffer_spec[i].canvas_config[1].width = + canvas_width / 2; + hw->buffer_spec[i].canvas_config[1].height = + canvas_height / 2; + hw->buffer_spec[i].canvas_config[1].block_mode = + hw->canvas_mode; + hw->buffer_spec[i].canvas_config[1].endian = + hw->canvas_endian; + + config_cav_lut(hw->buffer_spec[i].u_canvas_index, + &hw->buffer_spec[i].canvas_config[1], VDEC_1); + + hw->buffer_spec[i].canvas_config[2].phy_addr = + decbuf_v_start; + hw->buffer_spec[i].canvas_config[2].width = + canvas_width / 2; + hw->buffer_spec[i].canvas_config[2].height = + canvas_height / 2; + hw->buffer_spec[i].canvas_config[2].block_mode = + hw->canvas_mode; + hw->buffer_spec[i].canvas_config[2].endian = + hw->canvas_endian; + + config_cav_lut(hw->buffer_spec[i].v_canvas_index, + &hw->buffer_spec[i].canvas_config[2], VDEC_1); + + /* mjpeg decoder canvas need to be revert to match display. */ + hw->buffer_spec[i].canvas_config[0].endian = hw->canvas_endian ? 0 : 7; + hw->buffer_spec[i].canvas_config[1].endian = hw->canvas_endian ? 0 : 7; + hw->buffer_spec[i].canvas_config[2].endian = hw->canvas_endian ? 0 : 7; + + return 0; +} + +static int vmjpeg_get_buf_num(struct vdec_mjpeg_hw_s *hw) +{ + int buf_num = DECODE_BUFFER_NUM_DEF; + + buf_num += hw->dynamic_buf_num_margin; + + if (buf_num > DECODE_BUFFER_NUM_MAX) + buf_num = DECODE_BUFFER_NUM_MAX; + + return buf_num; +} + +static bool is_enough_free_buffer(struct vdec_mjpeg_hw_s *hw) +{ + int i; + + for (i = 0; i < hw->buf_num; i++) { + if (hw->vfbuf_use[i] == 0) + break; + } + + return i == hw->buf_num ? false : true; +} + +static int find_free_buffer(struct vdec_mjpeg_hw_s *hw) +{ + int i; + + for (i = 0; i < hw->buf_num; i++) { + if (hw->vfbuf_use[i] == 0) + break; + } + + if (i == hw->buf_num) + return -1; + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) { + /*run to parser csd data*/ + i = 0; + } else { + if (!ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token)) + return -1; + + if (vmjpeg_v4l_alloc_buff_config_canvas(hw, i)) + return -1; + } + } + + return i; +} + +static int vmjpeg_hw_ctx_restore(struct vdec_mjpeg_hw_s *hw) +{ + struct buffer_spec_s *buff_spec; + int index, i; + + index = find_free_buffer(hw); + if (index < 0) + return -1; + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6)); + WRITE_VREG(DOS_SW_RESET0, 0); + + if (!hw->init_flag) { + vmjpeg_canvas_init(hw); + } else { + if (!hw->is_used_v4l) { + for (i = 0; i < hw->buf_num; i++) { + buff_spec = &hw->buffer_spec[i]; + config_cav_lut(buff_spec->y_canvas_index, + &buff_spec->canvas_config[0], VDEC_1); + config_cav_lut(buff_spec->u_canvas_index, + &buff_spec->canvas_config[1], VDEC_1); + config_cav_lut(buff_spec->v_canvas_index, + &buff_spec->canvas_config[2], VDEC_1); + } + } + } + + /* find next decode buffer index */ + WRITE_VREG(AV_SCRATCH_4, spec2canvas(&hw->buffer_spec[index])); + WRITE_VREG(AV_SCRATCH_5, index | 1 << 24); + init_scaler(); + + /* clear buffer IN/OUT registers */ + WRITE_VREG(MREG_TO_AMRISC, 0); + WRITE_VREG(MREG_FROM_AMRISC, 0); + + WRITE_VREG(MCPU_INTR_MSK, 0xffff); + WRITE_VREG(MREG_DECODE_PARAM, (hw->frame_height << 4) | 0x8000); + + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + /* set interrupt mapping for vld */ + WRITE_VREG(ASSIST_AMR1_INT8, 8); +#if 1/*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6*/ + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + return 0; +} + +static s32 vmjpeg_init(struct vdec_s *vdec) +{ + int i; + int size = -1, fw_size = 0x1000 * 16; + struct firmware_s *fw = NULL; + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *)vdec->private; + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + size = get_firmware_data(VIDEO_DEC_MJPEG_MULTI, fw->data); + if (size < 0) { + pr_err("get firmware fail."); + vfree(fw); + return -1; + } + + fw->len = size; + hw->fw = fw; + + if (hw->is_used_v4l) { + hw->frame_width = 0; + hw->frame_height = 0; + } else { + hw->frame_width = hw->vmjpeg_amstream_dec_info.width; + hw->frame_height = hw->vmjpeg_amstream_dec_info.height; + } + hw->frame_dur = ((hw->vmjpeg_amstream_dec_info.rate) ? + hw->vmjpeg_amstream_dec_info.rate : 3840); + hw->saved_resolution = 0; + hw->eos = 0; + hw->init_flag = 0; + hw->frame_num = 0; + hw->put_num = 0; + hw->run_count = 0; + hw->not_run_ready = 0; + hw->input_empty = 0; + hw->peek_num = 0; + hw->get_num = 0; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->vfbuf_use[i] = 0; + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hw->vfpool[i]; + + hw->vfpool[i].index = -1; + kfifo_put(&hw->newframe_q, vf); + } + + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + + hw->mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER); + + timer_setup(&hw->check_timer, check_timer_func, 0); + hw->check_timer.expires = jiffies + CHECK_INTERVAL; + /*add_timer(&hw->check_timer);*/ + hw->stat |= STAT_TIMER_ARM; + hw->stat |= STAT_ISR_REG; + + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + INIT_WORK(&hw->work, vmjpeg_work); + pr_info("w:h=%d:%d\n", hw->frame_width, hw->frame_height); + return 0; +} + +static unsigned long run_ready(struct vdec_s *vdec, + unsigned long mask) +{ + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *)vdec->private; + hw->not_run_ready++; + if (hw->eos) + return 0; + if (vdec_stream_based(vdec) && (hw->init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + + if (level < pre_decode_buf_level) + return 0; + } + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode) { + if (hw->v4l_params_parsed) { + if (!ctx->v4l_codec_dpb_ready && + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < + run_ready_min_buf_num) + return 0; + } else { + if (ctx->v4l_resolution_change) + return 0; + } + } else if (!ctx->v4l_codec_dpb_ready) { + if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < + run_ready_min_buf_num) + return 0; + } + } + + if (!is_enough_free_buffer(hw)) { + hw->buffer_not_ready++; + return 0; + } + + hw->not_run_ready = 0; + hw->buffer_not_ready = 0; + if (vdec->parallel_dec == 1) + return CORE_MASK_VDEC_1; + else + return CORE_MASK_VDEC_1 | CORE_MASK_HEVC; +} + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *)vdec->private; + int i, ret; + + hw->vdec_cb_arg = arg; + hw->vdec_cb = callback; + + hw->run_count++; + vdec_reset_core(vdec); + for (i = 0; i < hw->buf_num; i++) { + if (hw->vfbuf_use[i] == 0) + break; + } + + if (i == hw->buf_num) { + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + return; + } + + ret = vdec_prepare_input(vdec, &hw->chunk); + if (ret <= 0) { + hw->input_empty++; + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s: %d,r=%d,buftl=%x:%x:%x\n", + __func__, __LINE__, ret, + READ_VREG(VLD_MEM_VIFIFO_BUF_CNTL), + STBUF_READ(&vdec->vbuf, get_rp), + READ_VREG(VLD_MEM_VIFIFO_WP)); + + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + return; + } + hw->input_empty = 0; + hw->dec_result = DEC_RESULT_NONE; + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else { + ret = amvdec_vdec_loadmc_ex(VFORMAT_MJPEG, "mmjpeg", vdec, hw->fw->data); + if (ret < 0) { + pr_err("[%d] MMJPEG: the %s fw loading failed, err: %x\n", + vdec->id, tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_MJPEG; + } +/* if (amvdec_vdec_loadmc_buf_ex(vdec, hw->fw->data, hw->fw->len) < 0) { + pr_err("%s: Error amvdec_loadmc fail\n", __func__); + return; + }*/ + + if (vmjpeg_hw_ctx_restore(hw) < 0) { + hw->dec_result = DEC_RESULT_ERROR; + mmjpeg_debug_print(DECODE_ID(hw), 0, + "amvdec_mmjpeg: error HW context restore\n"); + vdec_schedule_work(&hw->work); + return; + } +#if 0 + vdec_enable_input(vdec); + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +#endif + hw->stat |= STAT_MC_LOAD; + start_process_time(hw); + hw->last_vld_level = 0; + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); + amvdec_start(); + vdec_enable_input(vdec); + hw->stat |= STAT_VDEC_RUN; + hw->init_flag = 1; + + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s (0x%x 0x%x 0x%x) vldcrl 0x%x bitcnt 0x%x powerctl 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_DECODE_CONTROL), + READ_VREG(VIFF_BIT_CNT), + READ_VREG(POWER_CTL_VLD), + READ_VREG(VLD_MEM_VIFIFO_START_PTR), + READ_VREG(VLD_MEM_VIFIFO_CURR_PTR), + READ_VREG(VLD_MEM_VIFIFO_CONTROL), + READ_VREG(VLD_MEM_VIFIFO_BUF_CNTL), + READ_VREG(VLD_MEM_VIFIFO_END_PTR)); +} +static void wait_vmjpeg_search_done(struct vdec_mjpeg_hw_s *hw) +{ + u32 vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + int count = 0; + + do { + usleep_range(100, 500); + if (vld_rp == READ_VREG(VLD_MEM_VIFIFO_RP)) + break; + if (count > 1000) { + mmjpeg_debug_print(DECODE_ID(hw), 0, + "%s, count %d vld_rp 0x%x VLD_MEM_VIFIFO_RP 0x%x\n", + __func__, count, vld_rp, READ_VREG(VLD_MEM_VIFIFO_RP)); + break; + } else + vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + count++; + } while (1); +} + +static int notify_v4l_eos(struct vdec_s *vdec) +{ + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = NULL; + struct vdec_v4l2_buffer *fb = NULL; + int index = -1; + + if (hw->eos) { + if (kfifo_get(&hw->newframe_q, &vf) == 0 || vf == NULL) { + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s fatal error, no available buffer slot.\n", + __func__); + return -1; + } + + if (hw->is_used_v4l) { + index = find_free_buffer(hw); + if (index == -1) { + ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token); + if (ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC) < 0) { + pr_err("[%d] get fb fail.\n", ctx->id); + return -1; + } + } + } + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->v4l_mem_handle = (index == -1) ? (ulong)fb : + hw->buffer_spec[index].v4l_ref_buf_addr; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + + if (hw->is_used_v4l) + fb->task->submit(fb->task, TASK_TYPE_DEC); + else + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + + pr_info("[%d] mjpeg EOS notify.\n", (hw->is_used_v4l)?ctx->id:vdec->id); + } + + return 0; +} + +static void vmjpeg_work(struct work_struct *work) +{ + struct vdec_mjpeg_hw_s *hw = container_of(work, + struct vdec_mjpeg_hw_s, work); + struct vdec_s *vdec = hw_to_vdec(hw); + + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_BUFFER_DETAIL, + "%s: result=%d,len=%d:%d\n", + __func__, hw->dec_result, + kfifo_len(&hw->newframe_q), + kfifo_len(&hw->display_q)); + if (hw->dec_result == DEC_RESULT_DONE) { + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + hw->chunk = NULL; + } else if (hw->dec_result == DEC_RESULT_AGAIN) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(hw_to_vdec(hw))) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + /*pr_info("%s: return\n", + __func__);*/ + return; + } + } else if (hw->dec_result == DEC_RESULT_FORCE_EXIT) { + pr_info("%s: force exit\n", __func__); + if (hw->stat & STAT_ISR_REG) { + amvdec_stop(); + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + } else if (hw->dec_result == DEC_RESULT_EOS) { + pr_info("%s: end of stream\n", __func__); + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + hw->eos = 1; + notify_v4l_eos(vdec); + + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + hw->chunk = NULL; + vdec_clean_input(hw_to_vdec(hw)); + } + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + /*disable mbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 0); + wait_vmjpeg_search_done(hw); + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !hw->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + /* mark itself has all HW resource released and input released */ + if (vdec->parallel_dec == 1) + vdec_core_finish_run(hw_to_vdec(hw), CORE_MASK_VDEC_1); + else { + vdec_core_finish_run(hw_to_vdec(hw), CORE_MASK_VDEC_1 + | CORE_MASK_HEVC); + } + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + + if (hw->vdec_cb) + hw->vdec_cb(hw_to_vdec(hw), hw->vdec_cb_arg); +} + +static int vmjpeg_stop(struct vdec_mjpeg_hw_s *hw) +{ + pr_info("%s ...count = %d\n", __func__, hw->frame_num); + + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + pr_info("%s amvdec_stop\n", __func__); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + cancel_work_sync(&hw->work); + hw->init_flag = 0; + + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + + if (hw->fw) { + vfree(hw->fw); + hw->fw = NULL; + } + + return 0; +} + +static int ammvdec_mjpeg_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_mjpeg_hw_s *hw = NULL; + int config_val = 0; + + if (pdata == NULL) { + pr_info("ammvdec_mjpeg memory resource undefined.\n"); + return -EFAULT; + } + + hw = vzalloc(sizeof(struct vdec_mjpeg_hw_s)); + if (hw == NULL) { + pr_info("\nammvdec_mjpeg device data allocation failed\n"); + return -ENOMEM; + } + + /* the ctx from v4l2 driver. */ + hw->v4l2_ctx = pdata->private; + + pdata->private = hw; + pdata->dec_status = vmjpeg_dec_status; + + pdata->run = run; + pdata->run_ready = run_ready; + pdata->irq_handler = vmjpeg_isr; + pdata->threaded_irq_handler = vmjpeg_isr_thread_fn; + pdata->dump_state = vmjpeg_dump_state; + + snprintf(hw->vdec_name, sizeof(hw->vdec_name), + "vmjpeg-%d", pdev->id); + snprintf(hw->pts_name, sizeof(hw->pts_name), + "%s-pts", hw->vdec_name); + snprintf(hw->new_q_name, sizeof(hw->new_q_name), + "%s-newframe_q", hw->vdec_name); + snprintf(hw->disp_q_name, sizeof(hw->disp_q_name), + "%s-dispframe_q", hw->vdec_name); + + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + hw->buffer_spec[i].y_canvas_index = -1; + hw->buffer_spec[i].u_canvas_index = -1; + hw->buffer_spec[i].v_canvas_index = -1; + } + } + + if (pdata->use_vfm_path) + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + PROVIDER_NAME ".%02x", pdev->id & 0xff); + + platform_set_drvdata(pdev, pdata); + hw->platform_dev = pdev; + + if (((debug_enable & IGNORE_PARAM_FROM_CONFIG) == 0) && pdata->config_len) { + mmjpeg_debug_print(DECODE_ID(hw), 0, "pdata->config: %s\n", pdata->config); + if (get_config_int(pdata->config, "parm_v4l_buffer_margin", + &config_val) == 0) + hw->dynamic_buf_num_margin = config_val; + else + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + hw->canvas_mode = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_endian", + &config_val) == 0) + hw->canvas_endian = config_val; + + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + hw->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + hw->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + hw->is_used_v4l = config_val; + } else { + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + } + + hw->buf_num = vmjpeg_get_buf_num(hw); + + if (!hw->is_used_v4l) + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vf_provider_ops, pdata); + + platform_set_drvdata(pdev, pdata); + + hw->platform_dev = pdev; + + vdec_source_changed(VFORMAT_MJPEG, + 1920, 1080, 60); + if (vmjpeg_init(pdata) < 0) { + pr_info("ammvdec_mjpeg init failed.\n"); + if (hw) { + vfree(hw); + hw = NULL; + } + pdata->dec_status = NULL; + return -ENODEV; + } + vdec_set_prepare_level(pdata, start_decode_buf_level); + + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_VDEC_1); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } + + return 0; +} + +static int ammvdec_mjpeg_remove(struct platform_device *pdev) +{ + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec; + int i; + + if (!hw) + return -1; + vdec = hw_to_vdec(hw); + + vmjpeg_stop(hw); + + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1); + else + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + vdec_set_status(hw_to_vdec(hw), VDEC_STATUS_DISCONNECTED); + if (vdec->parallel_dec == 1) { + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + vdec->free_canvas_ex(hw->buffer_spec[i].y_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].u_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].v_canvas_index, vdec->id); + } + } + + vfree(hw); + + pr_info("%s\n", __func__); + return 0; +} + +/****************************************/ + +static struct platform_driver ammvdec_mjpeg_driver = { + .probe = ammvdec_mjpeg_probe, + .remove = ammvdec_mjpeg_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t ammvdec_mjpeg_profile = { + .name = "mmjpeg", + .profile = "v4l" +}; + +static int __init ammvdec_mjpeg_driver_init_module(void) +{ + if (platform_driver_register(&ammvdec_mjpeg_driver)) { + pr_err("failed to register ammvdec_mjpeg driver\n"); + return -ENODEV; + } + vcodec_profile_register(&ammvdec_mjpeg_profile); + vcodec_feature_register(VFORMAT_MJPEG, 0); + return 0; +} + +static void __exit ammvdec_mjpeg_driver_remove_module(void) +{ + platform_driver_unregister(&ammvdec_mjpeg_driver); +} + +/****************************************/ +module_param(debug_enable, uint, 0664); +MODULE_PARM_DESC(debug_enable, "\n debug enable\n"); +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, + "\n ammvdec_h264 pre_decode_buf_level\n"); +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_mmpeg12 udebug_flag\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n dynamic_buf_num_margin\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, "\n ammvdec_mjpeg decode_timeout_val\n"); + +module_param_array(max_process_time, uint, &max_decode_instance_num, 0664); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(start_decode_buf_level, uint, 0664); +MODULE_PARM_DESC(start_decode_buf_level, "\nstart_decode_buf_level\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n without_display_mode\n"); + +module_init(ammvdec_mjpeg_driver_init_module); +module_exit(ammvdec_mjpeg_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC MJMPEG Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/mpeg12/Makefile b/drivers/frame_provider/decoder/mpeg12/Makefile new file mode 100644 index 0000000..34f78c4 --- /dev/null +++ b/drivers/frame_provider/decoder/mpeg12/Makefile
@@ -0,0 +1,5 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_MPEG12) += amvdec_mpeg12.o +amvdec_mpeg12-objs += vmpeg12.o + +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_MPEG2_MULTI) += amvdec_mmpeg12.o +amvdec_mmpeg12-objs += vmpeg12_multi.o
diff --git a/drivers/frame_provider/decoder/mpeg12/vmpeg12.c b/drivers/frame_provider/decoder/mpeg12/vmpeg12.c new file mode 100644 index 0000000..a813ac7 --- /dev/null +++ b/drivers/frame_provider/decoder/mpeg12/vmpeg12.c
@@ -0,0 +1,2221 @@ +/* + * drivers/amlogic/amports/vmpeg12.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/module.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/registers/cpu_version.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "vmpeg12.h" +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include <linux/uaccess.h> +#include <linux/amlogic/media/codec_mm/configs.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> + + + +#ifdef CONFIG_AM_VDEC_MPEG12_LOG +#define AMLOG +#define LOG_LEVEL_VAR amlog_level_vmpeg +#define LOG_MASK_VAR amlog_mask_vmpeg +#define LOG_LEVEL_ERROR 0 +#define LOG_LEVEL_INFO 1 +#define LOG_LEVEL_DESC "0:ERROR, 1:INFO" +#endif +#include <linux/amlogic/media/utils/amlog.h> +MODULE_AMLOG(LOG_LEVEL_ERROR, 0, LOG_LEVEL_DESC, LOG_DEFAULT_MASK_DESC); + +#include "../utils/amvdec.h" +#include "../utils/vdec.h" +#include "../utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" + +#define DRIVER_NAME "amvdec_mpeg12" +#define MODULE_NAME "amvdec_mpeg12" + +/* protocol registers */ +#define MREG_SEQ_INFO AV_SCRATCH_4 +#define MREG_PIC_INFO AV_SCRATCH_5 +#define MREG_PIC_WIDTH AV_SCRATCH_6 +#define MREG_PIC_HEIGHT AV_SCRATCH_7 +#define MREG_BUFFERIN AV_SCRATCH_8 +#define MREG_BUFFEROUT AV_SCRATCH_9 + +#define MREG_CMD AV_SCRATCH_A +#define MREG_CO_MV_START AV_SCRATCH_B +#define MREG_ERROR_COUNT AV_SCRATCH_C +#define MREG_FRAME_OFFSET AV_SCRATCH_D +#define MREG_WAIT_BUFFER AV_SCRATCH_E +#define MREG_FATAL_ERROR AV_SCRATCH_F +#define MREG_FORCE_I_RDY AV_SCRATCH_G + +#define PICINFO_ERROR 0x80000000 +#define PICINFO_TYPE_MASK 0x00030000 +#define PICINFO_TYPE_I 0x00000000 +#define PICINFO_TYPE_P 0x00010000 +#define PICINFO_TYPE_B 0x00020000 + +#define PICINFO_PROG 0x8000 +#define PICINFO_RPT_FIRST 0x4000 +#define PICINFO_TOP_FIRST 0x2000 +#define PICINFO_FRAME 0x1000 + +#define SEQINFO_EXT_AVAILABLE 0x80000000 +#define SEQINFO_PROG 0x00010000 +#define CCBUF_SIZE (5*1024) + +#define VF_POOL_SIZE 32 +#define DECODE_BUFFER_NUM_MAX 8 +#define PUT_INTERVAL (HZ/100) +#define WORKSPACE_SIZE (2*SZ_64K) +#define MAX_BMMU_BUFFER_NUM (DECODE_BUFFER_NUM_MAX + 1) + + +#define INCPTR(p) ptr_atomic_wrap_inc(&p) + +#define DEC_CONTROL_FLAG_FORCE_2500_720_576_INTERLACE 0x0002 +#define DEC_CONTROL_FLAG_FORCE_3000_704_480_INTERLACE 0x0004 +#define DEC_CONTROL_FLAG_FORCE_2500_704_576_INTERLACE 0x0008 +#define DEC_CONTROL_FLAG_FORCE_2500_544_576_INTERLACE 0x0010 +#define DEC_CONTROL_FLAG_FORCE_2500_480_576_INTERLACE 0x0020 +#define DEC_CONTROL_INTERNAL_MASK 0x0fff +#define DEC_CONTROL_FLAG_FORCE_SEQ_INTERLACE 0x1000 + +#define INTERLACE_SEQ_ALWAYS + +#if 1/* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +#define NV21 +#endif + + +enum { + FRAME_REPEAT_TOP, + FRAME_REPEAT_BOT, + FRAME_REPEAT_NONE +}; + +static struct vframe_s *vmpeg_vf_peek(void *); +static struct vframe_s *vmpeg_vf_get(void *); +static void vmpeg_vf_put(struct vframe_s *, void *); +static int vmpeg_vf_states(struct vframe_states *states, void *); +static int vmpeg_event_cb(int type, void *data, void *private_data); + +static int vmpeg12_prot_init(void); +static void vmpeg12_local_init(void); + +static const char vmpeg12_dec_id[] = "vmpeg12-dev"; +#define PROVIDER_NAME "decoder.mpeg12" +static const struct vframe_operations_s vmpeg_vf_provider = { + .peek = vmpeg_vf_peek, + .get = vmpeg_vf_get, + .put = vmpeg_vf_put, + .event_cb = vmpeg_event_cb, + .vf_states = vmpeg_vf_states, +}; +static void *mm_blk_handle; +static struct vframe_provider_s vmpeg_vf_prov; +static int tvp_flag; + +static DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(recycle_q, struct vframe_s *, VF_POOL_SIZE); + +static const u32 frame_rate_tab[16] = { + 96000 / 30, 96000000 / 23976, 96000 / 24, 96000 / 25, + 9600000 / 2997, 96000 / 30, 96000 / 50, 9600000 / 5994, + 96000 / 60, + /* > 8 reserved, use 24 */ + 96000 / 24, 96000 / 24, 96000 / 24, 96000 / 24, + 96000 / 24, 96000 / 24, 96000 / 24 +}; + +static struct vframe_s vfpool[VF_POOL_SIZE]; +static struct vframe_s vfpool2[VF_POOL_SIZE]; +static int cur_pool_idx; +static s32 vfbuf_use[DECODE_BUFFER_NUM_MAX]; +static u32 dec_control; +static u32 frame_width, frame_height, frame_dur, frame_prog; +static u32 saved_resolution; +static struct timer_list recycle_timer; +static u32 stat; +static u32 buf_size = 32 * 1024 * 1024; +static u32 ccbuf_phyAddress; +static void *ccbuf_phyAddress_virt; +static int ccbuf_phyAddress_is_remaped_nocache; +static u32 lastpts; +static u32 fr_hint_status; +static u32 last_offset; +static u32 ratio_control; + + +static DEFINE_SPINLOCK(lock); + +static u32 frame_rpt_state; + +static struct dec_sysinfo vmpeg12_amstream_dec_info; +static struct vdec_info *gvs; +static struct vdec_s *vdec; + +/* for error handling */ +static s32 frame_force_skip_flag; +static s32 error_frame_skip_level; +static s32 wait_buffer_counter; +static u32 first_i_frame_ready; +static u32 force_first_i_ready; + +static struct work_struct userdata_push_work; +static struct work_struct notify_work; +static struct work_struct reset_work; +static struct work_struct set_clk_work; +static bool is_reset; + +static DEFINE_MUTEX(userdata_mutex); + +static void vmpeg12_create_userdata_manager(u8 *userdata_buf, int buf_len); + +struct mpeg12_userdata_recored_t { + struct userdata_meta_info_t meta_info; + u32 rec_start; + u32 rec_len; +}; + +#define USERDATA_FIFO_NUM 256 + +struct mpeg12_userdata_info_t { + struct mpeg12_userdata_recored_t records[USERDATA_FIFO_NUM]; + u8 *data_buf; + u8 *data_buf_end; + u32 buf_len; + u32 read_index; + u32 write_index; + u32 last_wp; +}; + +static struct mpeg12_userdata_info_t *p_userdata_mgr; + + +static inline int pool_index(struct vframe_s *vf) +{ + if ((vf >= &vfpool[0]) && (vf <= &vfpool[VF_POOL_SIZE - 1])) + return 0; + else if ((vf >= &vfpool2[0]) && (vf <= &vfpool2[VF_POOL_SIZE - 1])) + return 1; + else + return -1; +} + +static inline u32 index2canvas(u32 index) +{ + const u32 canvas_tab[8] = { +#ifdef NV21 + 0x010100, 0x030302, 0x050504, 0x070706, + 0x090908, 0x0b0b0a, 0x0d0d0c, 0x0f0f0e +#else + 0x020100, 0x050403, 0x080706, 0x0b0a09, + 0x0e0d0c, 0x11100f, 0x141312, 0x171615 +#endif + }; + + return canvas_tab[index]; +} + +static void set_frame_info(struct vframe_s *vf) +{ + unsigned int ar_bits; + u32 temp; + +#ifdef CONFIG_AM_VDEC_MPEG12_LOG + bool first = (frame_width == 0) && (frame_height == 0); +#endif + temp = READ_VREG(MREG_PIC_WIDTH); + if (temp > 1920) + vf->width = frame_width = 1920; + else + vf->width = frame_width = temp; + + temp = READ_VREG(MREG_PIC_HEIGHT); + if (temp > 1088) + vf->height = frame_height = 1088; + else + vf->height = frame_height = temp; + + vf->flag = 0; + + if (frame_dur > 0) + vf->duration = frame_dur; + else { + int index = (READ_VREG(MREG_SEQ_INFO) >> 4) & 0xf; + vf->duration = frame_dur = frame_rate_tab[index]; + schedule_work(¬ify_work); + } + + gvs->frame_dur = vf->duration; + + ar_bits = READ_VREG(MREG_SEQ_INFO) & 0xf; + + if (ar_bits == 0x2) + vf->ratio_control = 0xc0 << DISP_RATIO_ASPECT_RATIO_BIT; + + else if (ar_bits == 0x3) + vf->ratio_control = 0x90 << DISP_RATIO_ASPECT_RATIO_BIT; + + else if (ar_bits == 0x4) + vf->ratio_control = 0x74 << DISP_RATIO_ASPECT_RATIO_BIT; + + else + vf->ratio_control = 0; + + ratio_control = vf->ratio_control; + + amlog_level_if(first, LOG_LEVEL_INFO, + "mpeg2dec: w(%d), h(%d), dur(%d), dur-ES(%d)\n", + frame_width, frame_height, frame_dur, + frame_rate_tab[(READ_VREG(MREG_SEQ_INFO) >> 4) & 0xf]); +} + +static bool error_skip(u32 info, struct vframe_s *vf) +{ + if (error_frame_skip_level) { + /* skip error frame */ + if ((info & PICINFO_ERROR) || (frame_force_skip_flag)) { + if ((info & PICINFO_ERROR) == 0) { + if ((info & PICINFO_TYPE_MASK) == + PICINFO_TYPE_I) + frame_force_skip_flag = 0; + } else { + if (error_frame_skip_level >= 2) + frame_force_skip_flag = 1; + } + if ((info & PICINFO_ERROR) || (frame_force_skip_flag)) + return true; + } + } + + return false; +} + + +static void aml_swap_data(uint8_t *user_data, int ud_size) +{ + int swap_blocks, i, j, k, m; + unsigned char c_temp; + + /* swap byte order */ + swap_blocks = ud_size / 8; + for (i = 0; i < swap_blocks; i++) { + j = i * 8; + k = j + 7; + for (m = 0; m < 4; m++) { + c_temp = user_data[j]; + user_data[j++] = user_data[k]; + user_data[k--] = c_temp; + } + } +} + +/* +#define DUMP_USER_DATA +*/ +#ifdef DUMP_USER_DATA +static int last_wp; +#define DUMP_USER_DATA_HEX + + +#ifdef DUMP_USER_DATA_HEX +static void print_data(unsigned char *pdata, + int len, + unsigned int flag, + unsigned int duration, + unsigned int vpts, + unsigned int vpts_valid, + int rec_id, + u32 reference) +{ + int nLeft; + + nLeft = len; + + pr_info("%d len:%d, flag:0x%x, dur:%d, vpts:0x%x, valid:%d, refer:%d\n", + rec_id, len, flag, + duration, vpts, vpts_valid, + reference); + while (nLeft >= 16) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7], + pdata[8], pdata[9], pdata[10], pdata[11], + pdata[12], pdata[13], pdata[14], pdata[15]); + nLeft -= 16; + pdata += 16; + } + + + while (nLeft > 0) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } +} +#endif + + +#define DEBUG_CC_DUMP_ASCII + +#ifdef DEBUG_CC_DUMP_ASCII +static int vbi_to_ascii(int c) +{ + if (c < 0) + return '?'; + + c &= 0x7F; + + if (c < 0x20 || c >= 0x7F) + return '.'; + + return c; +} + +static void dump_cc_ascii(const uint8_t *buf, int poc) +{ + int cc_flag; + int cc_count; + int i; + int szAscii[32]; + int index = 0; + + cc_flag = buf[1] & 0x40; + if (!cc_flag) { + pr_info("### cc_flag is invalid\n"); + return; + } + cc_count = buf[1] & 0x1f; + + for (i = 0; i < cc_count; ++i) { + unsigned int b0; + unsigned int cc_valid; + unsigned int cc_type; + unsigned char cc_data1; + unsigned char cc_data2; + + b0 = buf[3 + i * 3]; + cc_valid = b0 & 4; + cc_type = b0 & 3; + cc_data1 = buf[4 + i * 3]; + cc_data2 = buf[5 + i * 3]; + + + if (cc_type == 0) { + /* NTSC pair, Line 21 */ + szAscii[index++] = vbi_to_ascii(cc_data1); + szAscii[index++] = vbi_to_ascii(cc_data2); + if ((!cc_valid) || (i >= 3)) + break; + } + } + switch (index) { + case 8: + pr_info("push poc:%d : %c %c %c %c %c %c %c %c\n", + poc, + szAscii[0], szAscii[1], szAscii[2], szAscii[3], + szAscii[4], szAscii[5], szAscii[6], szAscii[7]); + break; + case 7: + pr_info("push poc:%d : %c %c %c %c %c %c %c\n", + poc, + szAscii[0], szAscii[1], szAscii[2], szAscii[3], + szAscii[4], szAscii[5], szAscii[6]); + break; + case 6: + pr_info("push poc:%d : %c %c %c %c %c %c\n", poc, + szAscii[0], szAscii[1], szAscii[2], szAscii[3], + szAscii[4], szAscii[5]); + break; + case 5: + pr_info("push poc:%d : %c %c %c %c %c\n", poc, + szAscii[0], szAscii[1], szAscii[2], szAscii[3], + szAscii[4]); + break; + case 4: + pr_info("push poc:%d : %c %c %c %c\n", poc, + szAscii[0], szAscii[1], szAscii[2], szAscii[3]); + break; + case 3: + pr_info("push poc:%d : %c %c %c\n", poc, + szAscii[0], szAscii[1], szAscii[2]); + break; + case 2: + pr_info("push poc:%d : %c %c\n", poc, + szAscii[0], szAscii[1]); + break; + case 1: + pr_info("push poc:%d : %c\n", poc, szAscii[0]); + break; + default: + pr_info("push poc:%d and no CC data: index = %d\n", + poc, index); + break; + } +} +#endif + + +static int is_atsc(u8 *pdata) +{ + if ((pdata[0] == 0x47) && + (pdata[1] == 0x41) && + (pdata[2] == 0x39) && + (pdata[3] == 0x34)) + return 1; + else + return 0; +} +/* +#define DUMP_HEAD_INFO_DATA +*/ +static void dump_data(u8 *pdata, + unsigned int user_data_length, + unsigned int flag, + unsigned int duration, + unsigned int vpts, + unsigned int vpts_valid, + int rec_id, + u32 reference) +{ + unsigned char szBuf[256]; + + + memset(szBuf, 0, 256); + memcpy(szBuf, pdata, user_data_length); + + aml_swap_data(szBuf, user_data_length); +#ifdef DUMP_USER_DATA_HEX + print_data(szBuf, + user_data_length, + flag, + duration, + vpts, + vpts_valid, + rec_id, + reference); +#endif + +#ifdef DEBUG_CC_DUMP_ASCII +#ifdef DUMP_HEAD_INFO_DATA + if (is_atsc(szBuf+8)) + dump_cc_ascii(szBuf+8+4, reference); +#else + if (is_atsc(szBuf)) + dump_cc_ascii(szBuf+4, reference); +#endif +#endif +} + + + + +#define MAX_USER_DATA_SIZE 1572864 +static void *user_data_buf; +static unsigned char *pbuf_start; +static int total_len; +static int bskip; +static int n_userdata_id; + +static void reset_user_data_buf(void) +{ + total_len = 0; + pbuf_start = user_data_buf; + bskip = 0; + n_userdata_id = 0; +} + +static void push_to_buf(u8 *pdata, int len, struct userdata_meta_info_t *pmeta, + u32 reference) +{ + u32 *pLen; + int info_cnt; + u8 *pbuf_end; + + if (!user_data_buf) + return; + + if (bskip) { + pr_info("over size, skip\n"); + return; + } + info_cnt = 0; + pLen = (u32 *)pbuf_start; + + *pLen = len; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->duration; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->flags; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->vpts; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->vpts_valid; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + + *pLen = n_userdata_id; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = reference; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + + + pbuf_end = (u8 *)ccbuf_phyAddress_virt + CCBUF_SIZE; + if (pdata + len > pbuf_end) { + int first_section_len; + + first_section_len = pbuf_end - pdata; + memcpy(pbuf_start, pdata, first_section_len); + pdata = (u8 *)ccbuf_phyAddress_virt; + pbuf_start += first_section_len; + memcpy(pbuf_start, pdata, len - first_section_len); + pbuf_start += len - first_section_len; + } else { + memcpy(pbuf_start, pdata, len); + pbuf_start += len; + } + + total_len += len + info_cnt * sizeof(u32); + if (total_len >= MAX_USER_DATA_SIZE-4096) + bskip = 1; +} + +static void dump_userdata_info(void *puser_data, + int len, + struct userdata_meta_info_t *pmeta, + u32 reference) +{ + u8 *pstart; + + pstart = (u8 *)puser_data; + +#ifdef DUMP_HEAD_INFO_DATA + push_to_buf(pstart, len, pmeta, reference); +#else + push_to_buf(pstart+8, len - 8, pmeta, reference); +#endif +} + +static void show_user_data_buf(void) +{ + u8 *pbuf; + int len; + unsigned int flag; + unsigned int duration; + unsigned int vpts; + unsigned int vpts_valid; + int rec_id; + u32 reference; + + pr_info("show user data buf\n"); + pbuf = user_data_buf; + + while (pbuf < pbuf_start) { + u32 *pLen; + + pLen = (u32 *)pbuf; + + len = *pLen; + pLen++; + pbuf += sizeof(u32); + + duration = *pLen; + pLen++; + pbuf += sizeof(u32); + + flag = *pLen; + pLen++; + pbuf += sizeof(u32); + + vpts = *pLen; + pLen++; + pbuf += sizeof(u32); + + vpts_valid = *pLen; + pLen++; + pbuf += sizeof(u32); + + rec_id = *pLen; + pLen++; + pbuf += sizeof(u32); + + reference = *pLen; + pLen++; + pbuf += sizeof(u32); + + + dump_data(pbuf, len, flag, duration, + vpts, vpts_valid, rec_id, reference); + pbuf += len; + msleep(30); + } +} +#endif + +static void vmpeg12_add_userdata(struct userdata_meta_info_t meta_info, + int wp, + u32 reference); +/* +#define PRINT_HEAD_INFO +*/ +static void userdata_push_do_work(struct work_struct *work) +{ + u32 reg; + u32 offset, pts; + u64 pts_us64 = 0; + u8 *pdata; + u8 head_info[8]; + struct userdata_meta_info_t meta_info; + u32 wp; + u32 index; + u32 picture_struct; + u32 reference; + u32 picture_type; + u32 temp; +#ifdef PRINT_HEAD_INFO + u8 *ptype_str; +#endif + memset(&meta_info, 0, sizeof(meta_info)); + + meta_info.duration = frame_dur; + + reg = READ_VREG(AV_SCRATCH_M); + meta_info.flags = ((reg >> 30) << 1); + meta_info.flags |= (VFORMAT_MPEG12 << 3); + /* check top_field_first flag */ + if ((reg >> 28) & 0x1) { + meta_info.flags |= (1 << 10); + meta_info.flags |= (((reg >> 29) & 0x1) << 11); + } + + offset = READ_VREG(AV_SCRATCH_N); + if (offset != last_offset) { + meta_info.flags |= 1; + last_offset = offset; + } + + if (pts_pickout_offset_us64 + (PTS_TYPE_VIDEO, offset, &pts, 0, &pts_us64) != 0) { + pr_info("pick out pts failed by offset = 0x%x\n", offset); + pts = -1; + meta_info.vpts_valid = 0; + } else + meta_info.vpts_valid = 1; + meta_info.vpts = pts; + + if (!ccbuf_phyAddress_is_remaped_nocache && + ccbuf_phyAddress && + ccbuf_phyAddress_virt) { + codec_mm_dma_flush( + ccbuf_phyAddress_virt, + CCBUF_SIZE, + DMA_FROM_DEVICE); + } + + mutex_lock(&userdata_mutex); + if (p_userdata_mgr && ccbuf_phyAddress_virt) { + int new_wp; + + new_wp = reg & 0xffff; + if (new_wp < p_userdata_mgr->last_wp) + pdata = (u8 *)ccbuf_phyAddress_virt; + else + pdata = (u8 *)ccbuf_phyAddress_virt + + p_userdata_mgr->last_wp; + memcpy(head_info, pdata, 8); + } else + memset(head_info, 0, 8); + mutex_unlock(&userdata_mutex); + aml_swap_data(head_info, 8); + + wp = (head_info[0] << 8 | head_info[1]); + index = (head_info[2] << 8 | head_info[3]); + + picture_struct = (head_info[6] << 8 | head_info[7]); + temp = (head_info[4] << 8 | head_info[5]); + reference = temp & 0x3FF; + picture_type = (temp >> 10) & 0x7; + +#if 0 + pr_info("index = %d, wp = %d, ref = %d, type = %d, struct = 0x%x, vpts:0x%x\n", + index, wp, reference, + picture_type, picture_struct, meta_info.vpts); +#endif + switch (picture_type) { + case 1: + /* pr_info("I type, pos:%d\n", + (meta_info.flags>>1)&0x3); */ + meta_info.flags |= (1<<7); +#ifdef PRINT_HEAD_INFO + ptype_str = " I"; +#endif + break; + case 2: + /* pr_info("P type, pos:%d\n", + (meta_info.flags>>1)&0x3); */ + meta_info.flags |= (2<<7); +#ifdef PRINT_HEAD_INFO + ptype_str = " P"; +#endif + break; + case 3: + /* pr_info("B type, pos:%d\n", + (meta_info.flags>>1)&0x3); */ + meta_info.flags |= (3<<7); +#ifdef PRINT_HEAD_INFO + ptype_str = " B"; +#endif + break; + case 4: + /* pr_info("D type, pos:%d\n", + (meta_info.flags>>1)&0x3); */ + meta_info.flags |= (4<<7); +#ifdef PRINT_HEAD_INFO + ptype_str = " D"; +#endif + break; + default: + /* pr_info("Unknown type:0x%x, pos:%d\n", + pheader->picture_coding_type, + (meta_info.flags>>1)&0x3); */ +#ifdef PRINT_HEAD_INFO + ptype_str = " U"; +#endif + break; + } +#ifdef PRINT_HEAD_INFO + pr_info("ref:%d, type:%s, ext:%d, offset:0x%x, first:%d, id:%d\n", + reference, ptype_str, + (reg >> 30), offset, + (reg >> 28)&0x3, + n_userdata_id); +#endif + vmpeg12_add_userdata(meta_info, reg & 0xffff, reference); + + WRITE_VREG(AV_SCRATCH_M, 0); +} + +static void vmpeg12_notify_work(struct work_struct *work) +{ + pr_info("frame duration changed %d\n", frame_dur); + if (fr_hint_status == VDEC_NEED_HINT) { + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long)frame_dur)); + fr_hint_status = VDEC_HINTED; + } + return; +} +static irqreturn_t vmpeg12_isr(int irq, void *dev_id) +{ + u32 reg, info, seqinfo, offset, pts, pts_valid = 0; + struct vframe_s *vf; + u64 pts_us64 = 0; + u32 frame_size; + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + reg = READ_VREG(MREG_BUFFEROUT); + + if (reg) { + info = READ_VREG(MREG_PIC_INFO); + offset = READ_VREG(MREG_FRAME_OFFSET); + seqinfo = READ_VREG(MREG_SEQ_INFO); + + if ((first_i_frame_ready == 0) && + ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) && + ((info & PICINFO_ERROR) == 0)) + first_i_frame_ready = 1; + + if ((pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, offset, &pts, + &frame_size, 0, &pts_us64) == 0) + && (((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) + || ((info & PICINFO_TYPE_MASK) == + PICINFO_TYPE_P))) + pts_valid = 1; + + if (pts_valid && lastpts == pts) + pts_valid = 0; + if (pts_valid) + lastpts = pts; + /*if (frame_prog == 0) */ + { + frame_prog = info & PICINFO_PROG; + if ((seqinfo & SEQINFO_EXT_AVAILABLE) + && (!(seqinfo & SEQINFO_PROG))) + frame_prog = 0; + } + + if ((dec_control & + DEC_CONTROL_FLAG_FORCE_2500_720_576_INTERLACE) + && (frame_width == 720 || frame_width == 480) + && (frame_height == 576) + && (frame_dur == 3840)) + frame_prog = 0; + else if ((dec_control & + DEC_CONTROL_FLAG_FORCE_3000_704_480_INTERLACE) + && (frame_width == 704) && (frame_height == 480) + && (frame_dur == 3200)) + frame_prog = 0; + else if ((dec_control & + DEC_CONTROL_FLAG_FORCE_2500_704_576_INTERLACE) + && (frame_width == 704) && (frame_height == 576) + && (frame_dur == 3840)) + frame_prog = 0; + else if ((dec_control & + DEC_CONTROL_FLAG_FORCE_2500_544_576_INTERLACE) + && (frame_width == 544) && (frame_height == 576) + && (frame_dur == 3840)) + frame_prog = 0; + else if ((dec_control & + DEC_CONTROL_FLAG_FORCE_2500_480_576_INTERLACE) + && (frame_width == 480) && (frame_height == 576) + && (frame_dur == 3840)) + frame_prog = 0; + else if (dec_control & DEC_CONTROL_FLAG_FORCE_SEQ_INTERLACE) + frame_prog = 0; + if (frame_prog & PICINFO_PROG) { + u32 index = ((reg & 0xf) - 1) & 7; + + seqinfo = READ_VREG(MREG_SEQ_INFO); + + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + + set_frame_info(vf); + /*pr_info("video signal type:0x%x\n", + READ_VREG(AV_SCRATCH_H));*/ + vf->signal_type = READ_VREG(AV_SCRATCH_H); + vf->index = index; +#ifdef NV21 + vf->type = + VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | + VIDTYPE_VIU_NV21; +#else + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; +#endif + if ((seqinfo & SEQINFO_EXT_AVAILABLE) + && (seqinfo & SEQINFO_PROG)) { + if (info & PICINFO_RPT_FIRST) { + if (info & PICINFO_TOP_FIRST) { + vf->duration = + vf->duration * 3; + /* repeat three times */ + } else { + vf->duration = + vf->duration * 2; + /* repeat two times */ + } + } + vf->duration_pulldown = 0; + /* no pull down */ + + } else { + vf->duration_pulldown = + (info & PICINFO_RPT_FIRST) ? + vf->duration >> 1 : 0; + } + + /*count info*/ + vdec_count_info(gvs, info & PICINFO_ERROR, offset); + + vf->duration += vf->duration_pulldown; + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(index); + vf->orientation = 0; + vf->pts = (pts_valid) ? pts : 0; + vf->pts_us64 = (pts_valid) ? pts_us64 : 0; + vf->type_original = vf->type; + + vfbuf_use[index] = 1; + + if ((error_skip(info, vf)) || + ((first_i_frame_ready == 0) + && ((PICINFO_TYPE_MASK & info) != + PICINFO_TYPE_I))) { + gvs->drop_frame_count++; + kfifo_put(&recycle_q, + (const struct vframe_s *)vf); + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + index); + kfifo_put(&display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + } + + } else { + u32 index = ((reg & 0xf) - 1) & 7; + int first_field_type = (info & PICINFO_TOP_FIRST) ? + VIDTYPE_INTERLACE_TOP : + VIDTYPE_INTERLACE_BOTTOM; + +#ifdef INTERLACE_SEQ_ALWAYS + /* once an interlaced sequence exist, + *always force interlaced type + */ + /* to make DI easy. */ + dec_control |= DEC_CONTROL_FLAG_FORCE_SEQ_INTERLACE; +#endif +#if 0 + if (info & PICINFO_FRAME) { + frame_rpt_state = + (info & PICINFO_TOP_FIRST) ? + FRAME_REPEAT_TOP : FRAME_REPEAT_BOT; + } else { + if (frame_rpt_state == FRAME_REPEAT_TOP) { + first_field_type = + VIDTYPE_INTERLACE_TOP; + } else if (frame_rpt_state == + FRAME_REPEAT_BOT) { + first_field_type = + VIDTYPE_INTERLACE_BOTTOM; + } + frame_rpt_state = FRAME_REPEAT_NONE; + } +#else + frame_rpt_state = FRAME_REPEAT_NONE; +#endif + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + if (info & PICINFO_RPT_FIRST) + vfbuf_use[index] = 3; + else + vfbuf_use[index] = 2; + + set_frame_info(vf); + vf->signal_type = 0; + vf->index = index; + vf->type = + (first_field_type == VIDTYPE_INTERLACE_TOP) ? + VIDTYPE_INTERLACE_TOP : + VIDTYPE_INTERLACE_BOTTOM; +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + if (info & PICINFO_RPT_FIRST) + vf->duration /= 3; + else + vf->duration >>= 1; + vf->duration_pulldown = (info & PICINFO_RPT_FIRST) ? + vf->duration >> 1 : 0; + vf->duration += vf->duration_pulldown; + vf->orientation = 0; + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(index); + vf->pts = (pts_valid) ? pts : 0; + vf->pts_us64 = (pts_valid) ? pts_us64 : 0; + vf->type_original = vf->type; + + if ((error_skip(info, vf)) || + ((first_i_frame_ready == 0) + && ((PICINFO_TYPE_MASK & info) != + PICINFO_TYPE_I))) { + gvs->drop_frame_count++; + kfifo_put(&recycle_q, + (const struct vframe_s *)vf); + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + index); + kfifo_put(&display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + } + + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + + set_frame_info(vf); + vf->signal_type = 0; + vf->index = index; + vf->type = (first_field_type == + VIDTYPE_INTERLACE_TOP) ? + VIDTYPE_INTERLACE_BOTTOM : + VIDTYPE_INTERLACE_TOP; +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + if (info & PICINFO_RPT_FIRST) + vf->duration /= 3; + else + vf->duration >>= 1; + vf->duration_pulldown = (info & PICINFO_RPT_FIRST) ? + vf->duration >> 1 : 0; + vf->duration += vf->duration_pulldown; + vf->orientation = 0; + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(index); + vf->pts = 0; + vf->pts_us64 = 0; + vf->type_original = vf->type; + + /*count info*/ + vdec_count_info(gvs, info & PICINFO_ERROR, offset); + + if ((error_skip(info, vf)) || + ((first_i_frame_ready == 0) + && ((PICINFO_TYPE_MASK & info) != + PICINFO_TYPE_I))) { + gvs->drop_frame_count++; + kfifo_put(&recycle_q, + (const struct vframe_s *)vf); + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + index); + kfifo_put(&display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + } + + if (info & PICINFO_RPT_FIRST) { + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info("error, no available buffer slot."); + return IRQ_HANDLED; + } + + set_frame_info(vf); + + vf->index = index; + vf->type = (first_field_type == + VIDTYPE_INTERLACE_TOP) ? + VIDTYPE_INTERLACE_TOP : + VIDTYPE_INTERLACE_BOTTOM; +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + vf->duration /= 3; + vf->duration_pulldown = + (info & PICINFO_RPT_FIRST) ? + vf->duration >> 1 : 0; + vf->duration += vf->duration_pulldown; + vf->orientation = 0; + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(index); + vf->pts = 0; + vf->pts_us64 = 0; + if ((error_skip(info, vf)) || + ((first_i_frame_ready == 0) + && ((PICINFO_TYPE_MASK & info) + != PICINFO_TYPE_I))) { + kfifo_put(&recycle_q, + (const struct vframe_s *)vf); + } else { + kfifo_put(&display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + } + } + } + WRITE_VREG(MREG_BUFFEROUT, 0); + } + + reg = READ_VREG(AV_SCRATCH_M); + if (reg & (1<<16)) + schedule_work(&userdata_push_work); + + return IRQ_HANDLED; +} + +static struct vframe_s *vmpeg_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + + if (kfifo_peek(&display_q, &vf)) + return vf; + + return NULL; +} + +static struct vframe_s *vmpeg_vf_get(void *op_arg) +{ + struct vframe_s *vf; + + if (kfifo_get(&display_q, &vf)) + return vf; + + return NULL; +} + +static void vmpeg_vf_put(struct vframe_s *vf, void *op_arg) +{ + if (pool_index(vf) == cur_pool_idx) + kfifo_put(&recycle_q, (const struct vframe_s *)vf); +} + +static int vmpeg_event_cb(int type, void *data, void *private_data) +{ + if (type & VFRAME_EVENT_RECEIVER_RESET) { + unsigned long flags; + + amvdec_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vmpeg_vf_prov); +#endif + spin_lock_irqsave(&lock, flags); + vmpeg12_local_init(); + vmpeg12_prot_init(); + spin_unlock_irqrestore(&lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vmpeg_vf_prov); +#endif + amvdec_start(); + } + return 0; +} + +static int vmpeg_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + + spin_lock_irqsave(&lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&newframe_q); + states->buf_avail_num = kfifo_len(&display_q); + states->buf_recycle_num = kfifo_len(&recycle_q); + + spin_unlock_irqrestore(&lock, flags); + + return 0; +} + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER +static void vmpeg12_ppmgr_reset(void) +{ + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_RESET, NULL); + + vmpeg12_local_init(); + + pr_info("vmpeg12dec: vf_ppmgr_reset\n"); +} +#endif + +static void vmpeg12_reset_userdata_fifo(struct vdec_s *vdec, int bInit); +static void vmpeg12_wakeup_userdata_poll(struct vdec_s *vdec); + +static void reset_do_work(struct work_struct *work) +{ + amvdec_stop(); + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vmpeg12_ppmgr_reset(); +#else + vf_light_unreg_provider(&vmpeg_vf_prov); + vmpeg12_local_init(); + vf_reg_provider(&vmpeg_vf_prov); +#endif + vmpeg12_prot_init(); + vmpeg12_create_userdata_manager(ccbuf_phyAddress_virt, CCBUF_SIZE); + vmpeg12_reset_userdata_fifo(vdec, 1); +#ifdef DUMP_USER_DATA + last_wp = 0; +#endif + + amvdec_start(); +} + +static void vmpeg12_set_clk(struct work_struct *work) +{ + { + int fps = 96000 / frame_dur; + + saved_resolution = frame_width * frame_height * fps; + vdec_source_changed(VFORMAT_MPEG12, + frame_width, frame_height, fps); + } +} + + +static void vmpeg_put_timer_func(struct timer_list *timer) +{ + int fatal_reset = 0; + enum receviver_start_e state = RECEIVER_INACTIVE; + + if (vf_get_receiver(PROVIDER_NAME)) { + state = vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_QUREY_STATE, NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) { + /* receiver has no event_cb or + *receiver's event_cb does not process this event + */ + state = RECEIVER_INACTIVE; + } + } else + state = RECEIVER_INACTIVE; + + if (READ_VREG(MREG_FATAL_ERROR) == 1) + fatal_reset = 1; + + if ((READ_VREG(MREG_WAIT_BUFFER) != 0) && + (kfifo_is_empty(&recycle_q)) && + (kfifo_is_empty(&display_q)) && (state == RECEIVER_INACTIVE)) { + if (++wait_buffer_counter > 4) + fatal_reset = 1; + + } else + wait_buffer_counter = 0; + + if (fatal_reset && (kfifo_is_empty(&display_q))) { + pr_info("$$$$decoder is waiting for buffer or fatal reset.\n"); + schedule_work(&reset_work); + } + + while (!kfifo_is_empty(&recycle_q) && (READ_VREG(MREG_BUFFERIN) == 0)) { + struct vframe_s *vf; + + if (kfifo_get(&recycle_q, &vf)) { + if ((vf->index < DECODE_BUFFER_NUM_MAX) && + (--vfbuf_use[vf->index] == 0)) { + WRITE_VREG(MREG_BUFFERIN, vf->index + 1); + vf->index = DECODE_BUFFER_NUM_MAX; + } + + if (pool_index(vf) == cur_pool_idx) { + kfifo_put(&newframe_q, + (const struct vframe_s *)vf); + } + } + } + + if (frame_dur > 0 && saved_resolution != + frame_width * frame_height * (96000 / frame_dur)) + schedule_work(&set_clk_work); + + timer->expires = jiffies + PUT_INTERVAL; + + add_timer(timer); +} + +int vmpeg12_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + if (!(stat & STAT_VDEC_RUN)) + return -1; + + vstatus->frame_width = frame_width; + vstatus->frame_height = frame_height; + if (frame_dur != 0) + vstatus->frame_rate = 96000 / frame_dur; + else + vstatus->frame_rate = -1; + vstatus->error_count = READ_VREG(AV_SCRATCH_C); + vstatus->status = stat; + vstatus->bit_rate = gvs->bit_rate; + vstatus->frame_dur = frame_dur; + vstatus->frame_data = gvs->frame_data; + vstatus->total_data = gvs->total_data; + vstatus->frame_count = gvs->frame_count; + vstatus->error_frame_count = gvs->error_frame_count; + vstatus->drop_frame_count = gvs->drop_frame_count; + vstatus->total_data = gvs->total_data; + vstatus->samp_cnt = gvs->samp_cnt; + vstatus->offset = gvs->offset; + vstatus->ratio_control = ratio_control; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + +int vmpeg12_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + + + + +static void vmpeg12_create_userdata_manager(u8 *userdata_buf, int buf_len) +{ + mutex_lock(&userdata_mutex); + + p_userdata_mgr = (struct mpeg12_userdata_info_t *) + vmalloc(sizeof(struct mpeg12_userdata_info_t)); + if (p_userdata_mgr) { + memset(p_userdata_mgr, 0, + sizeof(struct mpeg12_userdata_info_t)); + p_userdata_mgr->data_buf = userdata_buf; + p_userdata_mgr->buf_len = buf_len; + p_userdata_mgr->data_buf_end = userdata_buf + buf_len; + } + mutex_unlock(&userdata_mutex); +} + +static void vmpeg12_destroy_userdata_manager(void) +{ + mutex_lock(&userdata_mutex); + + if (p_userdata_mgr) { + vfree(p_userdata_mgr); + p_userdata_mgr = NULL; + } + mutex_unlock(&userdata_mutex); +} + +static void vmpeg12_add_userdata(struct userdata_meta_info_t meta_info, + int wp, + u32 reference) +{ + struct mpeg12_userdata_recored_t *p_userdata_rec; + int data_length; + + mutex_lock(&userdata_mutex); + + if (p_userdata_mgr) { + if (wp > p_userdata_mgr->last_wp) + data_length = wp - p_userdata_mgr->last_wp; + else { + p_userdata_mgr->last_wp = 0; + data_length = wp - p_userdata_mgr->last_wp; + } + + if (data_length & 0x7) + data_length = (((data_length + 8) >> 3) << 3); +/* +pr_info("wakeup_push: ri:%d, wi:%d, data_len:%d, last_wp:%d, wp:%d, id = %d\n", + p_userdata_mgr->read_index, + p_userdata_mgr->write_index, + data_length, + p_userdata_mgr->last_wp, + wp, + n_userdata_id); +*/ + p_userdata_rec = p_userdata_mgr->records + + p_userdata_mgr->write_index; + p_userdata_rec->meta_info = meta_info; + p_userdata_rec->rec_start = p_userdata_mgr->last_wp; + p_userdata_rec->rec_len = data_length; + p_userdata_mgr->last_wp = wp; + +#ifdef DUMP_USER_DATA + dump_userdata_info(p_userdata_mgr->data_buf + + p_userdata_rec->rec_start, + data_length, + &meta_info, + reference); + n_userdata_id++; +#endif + + p_userdata_mgr->write_index++; + if (p_userdata_mgr->write_index >= USERDATA_FIFO_NUM) + p_userdata_mgr->write_index = 0; + } + mutex_unlock(&userdata_mutex); + + vdec_wakeup_userdata_poll(vdec); +} + + +static int vmpeg12_user_data_read(struct vdec_s *vdec, + struct userdata_param_t *puserdata_para) +{ + int rec_ri, rec_wi; + int rec_len; + u8 *rec_data_start; + u8 *pdest_buf; + struct mpeg12_userdata_recored_t *p_userdata_rec; + u32 data_size; + u32 res; + int copy_ok = 1; + + pdest_buf = puserdata_para->pbuf_addr; + + mutex_lock(&userdata_mutex); + + if (!p_userdata_mgr) { + mutex_unlock(&userdata_mutex); + return 0; + } +/* + pr_info("ri = %d, wi = %d\n", + p_userdata_mgr->read_index, + p_userdata_mgr->write_index); +*/ + rec_ri = p_userdata_mgr->read_index; + rec_wi = p_userdata_mgr->write_index; + + if (rec_ri == rec_wi) { + mutex_unlock(&userdata_mutex); + return 0; + } + + p_userdata_rec = p_userdata_mgr->records + rec_ri; + + rec_len = p_userdata_rec->rec_len; + rec_data_start = p_userdata_rec->rec_start + p_userdata_mgr->data_buf; +/* + pr_info("rec_len:%d, rec_start:%d, buf_len:%d\n", + p_userdata_rec->rec_len, + p_userdata_rec->rec_start, + puserdata_para->buf_len); +*/ + if (rec_len <= puserdata_para->buf_len) { + /* dvb user data buffer is enought to copy the whole recored. */ + data_size = rec_len; + if (rec_data_start + data_size + > p_userdata_mgr->data_buf_end) { + int first_section_len; + + first_section_len = p_userdata_mgr->buf_len + - p_userdata_rec->rec_start; + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + first_section_len); + if (res) { + pr_info("p1 read not end res=%d, request=%d\n", + res, first_section_len); + copy_ok = 0; + + p_userdata_rec->rec_len -= + first_section_len - res; + p_userdata_rec->rec_start += + first_section_len - res; + puserdata_para->data_size = + first_section_len - res; + } else { + res = (u32)copy_to_user( + (void *)(pdest_buf+first_section_len), + (void *)p_userdata_mgr->data_buf, + data_size - first_section_len); + if (res) { + pr_info("p2 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start = + data_size - first_section_len - res; + puserdata_para->data_size = data_size - res; + } + } else { + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + data_size); + if (res) { + pr_info("p3 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start += data_size - res; + puserdata_para->data_size = data_size - res; + } + + if (copy_ok) { + p_userdata_mgr->read_index++; + if (p_userdata_mgr->read_index >= USERDATA_FIFO_NUM) + p_userdata_mgr->read_index = 0; + } + } else { + /* dvb user data buffer is not enought + to copy the whole recored. */ + data_size = puserdata_para->buf_len; + if (rec_data_start + data_size + > p_userdata_mgr->data_buf_end) { + int first_section_len; + + first_section_len = p_userdata_mgr->buf_len + - p_userdata_rec->rec_start; + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + first_section_len); + if (res) { + pr_info("p4 read not end res=%d, request=%d\n", + res, first_section_len); + copy_ok = 0; + p_userdata_rec->rec_len -= + first_section_len - res; + p_userdata_rec->rec_start += + first_section_len - res; + puserdata_para->data_size = + first_section_len - res; + } else { + /* first secton copy is ok*/ + res = (u32)copy_to_user( + (void *)(pdest_buf+first_section_len), + (void *)p_userdata_mgr->data_buf, + data_size - first_section_len); + if (res) { + pr_info("p5 read not end res=%d, request=%d\n", + res, + data_size - first_section_len); + copy_ok = 0; + } + + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start = + data_size - first_section_len - res; + puserdata_para->data_size = data_size - res; + } + } else { + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + data_size); + if (res) { + pr_info("p6 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start += data_size - res; + puserdata_para->data_size = data_size - res; + } + + if (copy_ok) { + p_userdata_mgr->read_index++; + if (p_userdata_mgr->read_index + >= USERDATA_FIFO_NUM) + p_userdata_mgr->read_index = 0; + } + + } + + puserdata_para->meta_info = p_userdata_rec->meta_info; + + if (p_userdata_mgr->read_index <= p_userdata_mgr->write_index) + puserdata_para->meta_info.records_in_que = + p_userdata_mgr->write_index - + p_userdata_mgr->read_index; + else + puserdata_para->meta_info.records_in_que = + p_userdata_mgr->write_index + + USERDATA_FIFO_NUM - + p_userdata_mgr->read_index; + puserdata_para->version = (0<<24|0<<16|0<<8|1); + + mutex_unlock(&userdata_mutex); + + return 1; +} + +static void vmpeg12_reset_userdata_fifo(struct vdec_s *vdec, int bInit) +{ + mutex_lock(&userdata_mutex); + + if (p_userdata_mgr) { + pr_info("vmpeg12_reset_userdata_fifo: bInit: %d, ri: %d, wi: %d\n", + bInit, p_userdata_mgr->read_index, + p_userdata_mgr->write_index); + p_userdata_mgr->read_index = 0; + p_userdata_mgr->write_index = 0; + + if (bInit) + p_userdata_mgr->last_wp = 0; + } + + mutex_unlock(&userdata_mutex); +} + +static void vmpeg12_wakeup_userdata_poll(struct vdec_s *vdec) +{ + amstream_wakeup_userdata_poll(vdec); +} + +static int vmpeg12_vdec_info_init(void) +{ + gvs = kzalloc(sizeof(struct vdec_info), GFP_KERNEL); + if (NULL == gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -ENOMEM; + } + return 0; +} + +/****************************************/ +static int vmpeg12_canvas_init(void) +{ + int i, ret; + u32 canvas_width, canvas_height; + u32 decbuf_size, decbuf_y_size, decbuf_uv_size; + static unsigned long buf_start; + + if (buf_size <= 0x00400000) { + /* SD only */ + canvas_width = 768; + canvas_height = 576; + decbuf_y_size = 0x80000; + decbuf_uv_size = 0x20000; + decbuf_size = 0x100000; + } else { + /* HD & SD */ + canvas_width = 1920; + canvas_height = 1088; + decbuf_y_size = 0x200000; + decbuf_uv_size = 0x80000; + decbuf_size = 0x300000; + } + + + for (i = 0; i < MAX_BMMU_BUFFER_NUM; i++) { + + if (i == (MAX_BMMU_BUFFER_NUM - 1)) /* workspace mem */ + decbuf_size = WORKSPACE_SIZE; + + ret = decoder_bmmu_box_alloc_buf_phy(mm_blk_handle, i, + decbuf_size, DRIVER_NAME, &buf_start); + if (ret < 0) + return ret; + + if (i == (MAX_BMMU_BUFFER_NUM - 1)) { + + WRITE_VREG(MREG_CO_MV_START, (buf_start + CCBUF_SIZE)); + if (!ccbuf_phyAddress) { + ccbuf_phyAddress + = (u32)buf_start; + + ccbuf_phyAddress_virt + = codec_mm_phys_to_virt( + ccbuf_phyAddress); + if ((!ccbuf_phyAddress_virt) && (!tvp_flag)) { + ccbuf_phyAddress_virt + = codec_mm_vmap( + ccbuf_phyAddress, + CCBUF_SIZE); + ccbuf_phyAddress_is_remaped_nocache = 1; + } + } + + } else { +#ifdef NV21 + config_cav_lut_ex(2 * i + 0, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_32X32, 0, VDEC_1); + config_cav_lut_ex(2 * i + 1, + buf_start + + decbuf_y_size, canvas_width, + canvas_height / 2, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); +#else + config_cav_lut_ex(3 * i + 0, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_32X32, 0, VDEC_1); + config_cav_lut_ex(3 * i + 1, + buf_start + + decbuf_y_size, canvas_width / 2, + canvas_height / 2, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); + config_cav_lut_ex(3 * i + 2, + buf_start + + decbuf_y_size + decbuf_uv_size, + canvas_width / 2, canvas_height / 2, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_32X32, 0, VDEC_1); +#endif + } + } + + return 0; +} + +static int vmpeg12_prot_init(void) +{ + int ret; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + int save_reg = READ_VREG(POWER_CTL_VLD); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) { + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1<<7) | (1<<6) | (1<<4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1<<9) | (1<<8)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(MDEC_SW_RESET, (1 << 7)); + WRITE_VREG(MDEC_SW_RESET, 0); + } + + WRITE_VREG(POWER_CTL_VLD, save_reg); + + } else + WRITE_RESET_REG(RESET0_REGISTER, RESET_IQIDCT | RESET_MC); + + ret = vmpeg12_canvas_init(); + +#ifdef NV21 + WRITE_VREG(AV_SCRATCH_0, 0x010100); + WRITE_VREG(AV_SCRATCH_1, 0x030302); + WRITE_VREG(AV_SCRATCH_2, 0x050504); + WRITE_VREG(AV_SCRATCH_3, 0x070706); + WRITE_VREG(AV_SCRATCH_4, 0x090908); + WRITE_VREG(AV_SCRATCH_5, 0x0b0b0a); + WRITE_VREG(AV_SCRATCH_6, 0x0d0d0c); + WRITE_VREG(AV_SCRATCH_7, 0x0f0f0e); +#else + WRITE_VREG(AV_SCRATCH_0, 0x020100); + WRITE_VREG(AV_SCRATCH_1, 0x050403); + WRITE_VREG(AV_SCRATCH_2, 0x080706); + WRITE_VREG(AV_SCRATCH_3, 0x0b0a09); + WRITE_VREG(AV_SCRATCH_4, 0x0e0d0c); + WRITE_VREG(AV_SCRATCH_5, 0x11100f); + WRITE_VREG(AV_SCRATCH_6, 0x141312); + WRITE_VREG(AV_SCRATCH_7, 0x171615); +#endif + + /* set to mpeg1 default */ + WRITE_VREG(MPEG1_2_REG, 0); + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + /* for Mpeg1 default value */ + WRITE_VREG(PIC_HEAD_INFO, 0x380); + /* disable mpeg4 */ + WRITE_VREG(M4_CONTROL_REG, 0); + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + /* clear buffer IN/OUT registers */ + WRITE_VREG(MREG_BUFFERIN, 0); + WRITE_VREG(MREG_BUFFEROUT, 0); + /* set reference width and height */ + if ((frame_width != 0) && (frame_height != 0)) + WRITE_VREG(MREG_CMD, (frame_width << 16) | frame_height); + else + WRITE_VREG(MREG_CMD, 0); + WRITE_VREG(MREG_FORCE_I_RDY, (force_first_i_ready & 0x01)); + /* clear error count */ + WRITE_VREG(MREG_ERROR_COUNT, 0); + WRITE_VREG(MREG_FATAL_ERROR, 0); + /* clear wait buffer status */ + WRITE_VREG(MREG_WAIT_BUFFER, 0); +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + return ret; +} + +static void vmpeg12_local_init(void) +{ + int i; + + INIT_KFIFO(display_q); + INIT_KFIFO(recycle_q); + INIT_KFIFO(newframe_q); + + cur_pool_idx ^= 1; + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf; + + if (cur_pool_idx == 0) { + vf = &vfpool[i]; + vfpool[i].index = DECODE_BUFFER_NUM_MAX; + } else { + vf = &vfpool2[i]; + vfpool2[i].index = DECODE_BUFFER_NUM_MAX; + } + kfifo_put(&newframe_q, (const struct vframe_s *)vf); + } + + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + vfbuf_use[i] = 0; + if (mm_blk_handle) { + mutex_lock(&userdata_mutex); + if (p_userdata_mgr) { + vfree(p_userdata_mgr); + p_userdata_mgr = NULL; + } + if (ccbuf_phyAddress_is_remaped_nocache) + codec_mm_unmap_phyaddr(ccbuf_phyAddress_virt); + ccbuf_phyAddress_virt = NULL; + ccbuf_phyAddress = 0; + ccbuf_phyAddress_is_remaped_nocache = 0; + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + mutex_unlock(&userdata_mutex); + } + + mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + tvp_flag); + + + frame_width = frame_height = frame_dur = frame_prog = 0; + frame_force_skip_flag = 0; + wait_buffer_counter = 0; + first_i_frame_ready = force_first_i_ready; + saved_resolution = 0; + dec_control &= DEC_CONTROL_INTERNAL_MASK; +} + +static s32 vmpeg12_init(void) +{ + int ret = -1, size = -1; + char *buf = vmalloc(0x1000 * 16); + + if (IS_ERR_OR_NULL(buf)) + return -ENOMEM; + + timer_setup(&recycle_timer, vmpeg_put_timer_func, 0); + + stat |= STAT_TIMER_INIT; + + vmpeg12_local_init(); + + amvdec_enable(); + + size = get_firmware_data(VIDEO_DEC_MPEG12, buf); + if (size < 0) { + pr_err("get firmware fail."); + vfree(buf); + return -1; + } + + ret = amvdec_loadmc_ex(VFORMAT_MPEG12, "mpeg12", buf); + if (ret < 0) { + amvdec_disable(); + vfree(buf); + pr_err("MPEG12: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(buf); + + stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + vmpeg12_prot_init(); + + ret = vdec_request_irq(VDEC_IRQ_1, vmpeg12_isr, + "vmpeg12-irq", (void *)vmpeg12_dec_id); + + if (ret) { + amvdec_disable(); + amlog_level(LOG_LEVEL_ERROR, "vmpeg12 irq register error.\n"); + return -ENOENT; + } + + stat |= STAT_ISR_REG; +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_provider_init(&vmpeg_vf_prov, PROVIDER_NAME, &vmpeg_vf_provider, + NULL); + vf_reg_provider(&vmpeg_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); +#else + vf_provider_init(&vmpeg_vf_prov, PROVIDER_NAME, &vmpeg_vf_provider, + NULL); + vf_reg_provider(&vmpeg_vf_prov); +#endif + if (vmpeg12_amstream_dec_info.rate != 0) { + if (!is_reset) { + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long) + vmpeg12_amstream_dec_info.rate)); + fr_hint_status = VDEC_HINTED; + } + } else + fr_hint_status = VDEC_NEED_HINT; + + stat |= STAT_VF_HOOK; + + recycle_timer.expires = jiffies + PUT_INTERVAL; + add_timer(&recycle_timer); + + stat |= STAT_TIMER_ARM; + + amvdec_start(); + + stat |= STAT_VDEC_RUN; + + return 0; +} + +#ifdef DUMP_USER_DATA +static int amvdec_mpeg12_init_userdata_dump(void) +{ + user_data_buf = kmalloc(MAX_USER_DATA_SIZE, GFP_KERNEL); + if (user_data_buf) + return 1; + else + return 0; +} +#endif + +static int amvdec_mpeg12_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + + amlog_level(LOG_LEVEL_INFO, "amvdec_mpeg12 probe start.\n"); + + if (pdata == NULL) { + amlog_level(LOG_LEVEL_ERROR, + "amvdec_mpeg12 platform data undefined.\n"); + return -EFAULT; + } + + tvp_flag = vdec_secure(pdata) ? CODEC_MM_FLAGS_TVP : 0; + if (pdata->sys_info) + vmpeg12_amstream_dec_info = *pdata->sys_info; + + pdata->dec_status = vmpeg12_dec_status; + pdata->set_isreset = vmpeg12_set_isreset; + + pdata->user_data_read = vmpeg12_user_data_read; + pdata->reset_userdata_fifo = vmpeg12_reset_userdata_fifo; + pdata->wakeup_userdata_poll = vmpeg12_wakeup_userdata_poll; + is_reset = 0; + + vmpeg12_vdec_info_init(); + + INIT_WORK(&set_clk_work, vmpeg12_set_clk); + if (vmpeg12_init() < 0) { + amlog_level(LOG_LEVEL_ERROR, "amvdec_mpeg12 init failed.\n"); + kfree(gvs); + gvs = NULL; + pdata->dec_status = NULL; + return -ENODEV; + } + vdec = pdata; +#ifdef DUMP_USER_DATA + amvdec_mpeg12_init_userdata_dump(); +#endif + vmpeg12_create_userdata_manager(ccbuf_phyAddress_virt, CCBUF_SIZE); + + INIT_WORK(&userdata_push_work, userdata_push_do_work); + INIT_WORK(¬ify_work, vmpeg12_notify_work); + INIT_WORK(&reset_work, reset_do_work); + + + last_offset = 0xFFFFFFFF; +#ifdef DUMP_USER_DATA + last_wp = 0; + reset_user_data_buf(); +#endif + + amlog_level(LOG_LEVEL_INFO, "amvdec_mpeg12 probe end.\n"); + + return 0; +} + +static int amvdec_mpeg12_remove(struct platform_device *pdev) +{ + cancel_work_sync(&userdata_push_work); + cancel_work_sync(¬ify_work); + cancel_work_sync(&reset_work); + + if (stat & STAT_VDEC_RUN) { + amvdec_stop(); + stat &= ~STAT_VDEC_RUN; + } + + if (stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)vmpeg12_dec_id); + stat &= ~STAT_ISR_REG; + } + + if (stat & STAT_TIMER_ARM) { + del_timer_sync(&recycle_timer); + stat &= ~STAT_TIMER_ARM; + } + + cancel_work_sync(&set_clk_work); + if (stat & STAT_VF_HOOK) { + if (fr_hint_status == VDEC_HINTED) + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_END_HINT, NULL); + fr_hint_status = VDEC_NO_NEED_HINT; + + vf_unreg_provider(&vmpeg_vf_prov); + stat &= ~STAT_VF_HOOK; + } + + amvdec_disable(); + if (ccbuf_phyAddress_is_remaped_nocache) + codec_mm_unmap_phyaddr(ccbuf_phyAddress_virt); + + ccbuf_phyAddress_virt = NULL; + ccbuf_phyAddress = 0; + ccbuf_phyAddress_is_remaped_nocache = 0; + vmpeg12_destroy_userdata_manager(); + + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + amlog_level(LOG_LEVEL_INFO, "amvdec_mpeg12 remove.\n"); + + kfree(gvs); + gvs = NULL; + vdec = NULL; + +#ifdef DUMP_USER_DATA + if (user_data_buf) { + show_user_data_buf(); + kfree(user_data_buf); + user_data_buf = NULL; + } +#endif + + return 0; +} + +/****************************************/ + +static struct platform_driver amvdec_mpeg12_driver = { + .probe = amvdec_mpeg12_probe, + .remove = amvdec_mpeg12_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t amvdec_mpeg12_profile = { + .name = "mpeg12", + .profile = "" +}; + + +static struct mconfig mpeg12_configs[] = { + MC_PU32("stat", &stat), + MC_PU32("dec_control", &dec_control), + MC_PU32("error_frame_skip_level", &error_frame_skip_level), +}; +static struct mconfig_node mpeg12_node; + + +static int __init amvdec_mpeg12_driver_init_module(void) +{ + amlog_level(LOG_LEVEL_INFO, "amvdec_mpeg12 module init\n"); + + if (platform_driver_register(&amvdec_mpeg12_driver)) { + amlog_level(LOG_LEVEL_ERROR, + "failed to register amvdec_mpeg12 driver\n"); + return -ENODEV; + } + vcodec_profile_register(&amvdec_mpeg12_profile); + INIT_REG_NODE_CONFIGS("media.decoder", &mpeg12_node, + "mpeg12", mpeg12_configs, CONFIG_FOR_RW); + return 0; +} + +static void __exit amvdec_mpeg12_driver_remove_module(void) +{ + amlog_level(LOG_LEVEL_INFO, "amvdec_mpeg12 module remove.\n"); + + platform_driver_unregister(&amvdec_mpeg12_driver); +} + +/****************************************/ +module_param(dec_control, uint, 0664); +MODULE_PARM_DESC(dec_control, "\n amvmpeg12 decoder control\n"); +module_param(error_frame_skip_level, uint, 0664); +MODULE_PARM_DESC(error_frame_skip_level, + "\n amvdec_mpeg12 error_frame_skip_level\n"); +module_param(force_first_i_ready, uint, 0664); +MODULE_PARM_DESC(force_first_i_ready, "\n amvmpeg12 force_first_i_ready\n"); + +module_init(amvdec_mpeg12_driver_init_module); +module_exit(amvdec_mpeg12_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC MPEG1/2 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/mpeg12/vmpeg12.h b/drivers/frame_provider/decoder/mpeg12/vmpeg12.h new file mode 100644 index 0000000..e26a414 --- /dev/null +++ b/drivers/frame_provider/decoder/mpeg12/vmpeg12.h
@@ -0,0 +1,26 @@ +/* + * drivers/amlogic/amports/vmpeg12.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VMPEG12_H +#define VMPEG12_H + +/* /#if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +/* TODO: move to register headers */ +#define VPP_VD1_POSTBLEND (1 << 10) +/* /#endif */ + +#endif /* VMPEG12_H */
diff --git a/drivers/frame_provider/decoder/mpeg12/vmpeg12_multi.c b/drivers/frame_provider/decoder/mpeg12/vmpeg12_multi.c new file mode 100644 index 0000000..f77bb7f --- /dev/null +++ b/drivers/frame_provider/decoder/mpeg12/vmpeg12_multi.c
@@ -0,0 +1,3941 @@ +/* + * drivers/amlogic/amports/vmpeg12.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/random.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/sched/clock.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> + +#include <linux/amlogic/media/utils/vdec_reg.h> +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "../utils/vdec_input.h" +#include "../utils/vdec.h" +#include "../utils/amvdec.h" +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/config_parser.h" +#include "../utils/firmware.h" +#include "../utils/vdec_v4l2_buffer_ops.h" +#include "../utils/config_parser.h" +#include <media/v4l2-mem2mem.h> +#include "../utils/vdec_feature.h" + +#define MEM_NAME "codec_mmpeg12" +#define CHECK_INTERVAL (HZ/100) + +#define DRIVER_NAME "ammvdec_mpeg12" +#define MREG_REF0 AV_SCRATCH_2 +#define MREG_REF1 AV_SCRATCH_3 +/* protocol registers */ +#define MREG_SEQ_INFO AV_SCRATCH_4 +#define MREG_PIC_INFO AV_SCRATCH_5 +#define MREG_PIC_WIDTH AV_SCRATCH_6 +#define MREG_PIC_HEIGHT AV_SCRATCH_7 +#define MREG_INPUT AV_SCRATCH_8 /*input_type*/ +#define MREG_BUFFEROUT AV_SCRATCH_9 /*FROM_AMRISC_REG*/ + +#define MREG_CMD AV_SCRATCH_A +#define MREG_CO_MV_START AV_SCRATCH_B +#define MREG_ERROR_COUNT AV_SCRATCH_C +#define MREG_FRAME_OFFSET AV_SCRATCH_D +#define MREG_WAIT_BUFFER AV_SCRATCH_E +#define MREG_FATAL_ERROR AV_SCRATCH_F + +#define MREG_CC_ADDR AV_SCRATCH_0 +#define AUX_BUF_ALIGN(adr) ((adr + 0xf) & (~0xf)) + +#define GET_SLICE_TYPE(type) ("IPB##"[((type&PICINFO_TYPE_MASK)>>16)&0x3]) +#define PICINFO_ERROR 0x80000000 +#define PICINFO_TYPE_MASK 0x00030000 +#define PICINFO_TYPE_I 0x00000000 +#define PICINFO_TYPE_P 0x00010000 +#define PICINFO_TYPE_B 0x00020000 +#define PICINFO_PROG 0x8000 +#define PICINFO_RPT_FIRST 0x4000 +#define PICINFO_TOP_FIRST 0x2000 +#define PICINFO_FRAME 0x1000 +#define TOP_FIELD 0x1000 +#define BOTTOM_FIELD 0x2000 +#define FRAME_PICTURE 0x3000 +#define FRAME_PICTURE_MASK 0x3000 + +#define SEQINFO_EXT_AVAILABLE 0x80000000 +#define SEQINFO_PROG 0x00010000 +#define CCBUF_SIZE (5*1024) + +#define VF_POOL_SIZE 64 +#define DECODE_BUFFER_NUM_MAX 16 +#define DECODE_BUFFER_NUM_DEF 8 +#define MAX_BMMU_BUFFER_NUM (DECODE_BUFFER_NUM_MAX + 1) + +#define PUT_INTERVAL (HZ/100) +#define WORKSPACE_SIZE (4*SZ_64K) /*swap&ccbuf&matirx&MV*/ +#define CTX_LMEM_SWAP_OFFSET 0 +#define CTX_CCBUF_OFFSET 0x800 +#define CTX_QUANT_MATRIX_OFFSET (CTX_CCBUF_OFFSET + 5*1024) +#define CTX_CO_MV_OFFSET (CTX_QUANT_MATRIX_OFFSET + 1*1024) +#define CTX_DECBUF_OFFSET (CTX_CO_MV_OFFSET + 0x11000) + +#define DEFAULT_MEM_SIZE (32*SZ_1M) +#define INVALID_IDX (-1) /* Invalid buffer index.*/ + +static u32 buf_size = 32 * 1024 * 1024; +static int pre_decode_buf_level = 0x800; +static int start_decode_buf_level = 0x4000; +static u32 dec_control; +static u32 error_frame_skip_level = 1; +static u32 udebug_flag; +static unsigned int radr; +static unsigned int rval; + +static u32 without_display_mode; +static u32 dynamic_buf_num_margin = 2; + +#define VMPEG12_DEV_NUM 9 +static unsigned int max_decode_instance_num = VMPEG12_DEV_NUM; +static unsigned int max_process_time[VMPEG12_DEV_NUM]; +static unsigned int decode_timeout_val = 200; +#define INCPTR(p) ptr_atomic_wrap_inc(&p) + +#define DEC_CONTROL_FLAG_FORCE_2500_720_576_INTERLACE 0x0002 +#define DEC_CONTROL_FLAG_FORCE_3000_704_480_INTERLACE 0x0004 +#define DEC_CONTROL_FLAG_FORCE_2500_704_576_INTERLACE 0x0008 +#define DEC_CONTROL_FLAG_FORCE_2500_544_576_INTERLACE 0x0010 +#define DEC_CONTROL_FLAG_FORCE_2500_480_576_INTERLACE 0x0020 +#define DEC_CONTROL_INTERNAL_MASK 0x0fff +#define DEC_CONTROL_FLAG_FORCE_SEQ_INTERLACE 0x1000 + +#define INTERLACE_SEQ_ALWAYS + +#if 1/* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +#define NV21 +#endif + +#define AGAIN_HAS_THRESHOLD + +#ifdef AGAIN_HAS_THRESHOLD +static u32 again_threshold; +#endif + +/* +#define DUMP_USER_DATA +*/ + +enum { + FRAME_REPEAT_TOP, + FRAME_REPEAT_BOT, + FRAME_REPEAT_NONE +}; + +/*Send by AV_SCRATCH_9*/ +#define MPEG12_PIC_DONE 1 +#define MPEG12_DATA_EMPTY 2 +#define MPEG12_SEQ_END 3 +#define MPEG12_DATA_REQUEST 4 + +/*Send by AV_SCRATCH_G*/ +#define MPEG12_V4L2_INFO_NOTIFY 1 +/*Send by AV_SCRATCH_J*/ +#define MPEG12_USERDATA_DONE 0x8000 + +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_ERROR 3 +#define DEC_RESULT_FORCE_EXIT 4 +#define DEC_RESULT_EOS 5 +#define DEC_RESULT_GET_DATA 6 +#define DEC_RESULT_GET_DATA_RETRY 7 + +#define DEC_DECODE_TIMEOUT 0x21 +#define DECODE_ID(hw) (hw_to_vdec(hw)->id) +#define DECODE_STOP_POS AV_SCRATCH_K + +struct mmpeg2_userdata_record_t { + struct userdata_meta_info_t meta_info; + u32 rec_start; + u32 rec_len; +}; + +#define USERDATA_FIFO_NUM 256 +#define MAX_FREE_USERDATA_NODES 5 + +struct mmpeg2_userdata_info_t { + struct mmpeg2_userdata_record_t records[USERDATA_FIFO_NUM]; + u8 *data_buf; + u8 *data_buf_end; + u32 buf_len; + u32 read_index; + u32 write_index; + u32 last_wp; +}; +#define MAX_UD_RECORDS 5 + +struct pic_info_t { + u32 buffer_info; + u32 index; + u32 offset; + u32 width; + u32 height; + u32 pts; + u64 pts64; + bool pts_valid; + ulong v4l_ref_buf_addr; + u32 hw_decode_time; + u32 frame_size; // For frame base mode + u64 timestamp; +}; + +struct vdec_mpeg12_hw_s { + spinlock_t lock; + struct platform_device *platform_dev; + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + struct vframe_s vfpool[VF_POOL_SIZE]; + s32 vfbuf_use[DECODE_BUFFER_NUM_MAX]; + s32 ref_use[DECODE_BUFFER_NUM_MAX]; + u32 frame_width; + u32 frame_height; + u32 frame_dur; + u32 frame_prog; + u32 seqinfo; + u32 ctx_valid; + u32 dec_control; + void *mm_blk_handle; + struct vframe_chunk_s *chunk; + u32 stat; + u8 init_flag; + unsigned long buf_start; + u32 buf_size; + u32 vmpeg12_ratio; + u64 vmpeg12_ratio64; + u32 pixel_ratio; + u32 reg_pic_width; + u32 reg_pic_height; + u32 reg_mpeg1_2_reg; + u32 reg_pic_head_info; + u32 reg_f_code_reg; + u32 reg_slice_ver_pos_pic_type; + u32 reg_vcop_ctrl_reg; + u32 reg_mb_info; + u32 reg_signal_type; + u32 dec_num; + u32 disp_num; + struct timer_list check_timer; + u32 decode_timeout_count; + unsigned long int start_process_time; + u32 last_vld_level; + u32 eos; + + struct pic_info_t pics[DECODE_BUFFER_NUM_MAX]; + u32 canvas_spec[DECODE_BUFFER_NUM_MAX]; + u64 lastpts64; + u32 last_chunk_pts; + struct canvas_config_s canvas_config[DECODE_BUFFER_NUM_MAX][2]; + struct dec_sysinfo vmpeg12_amstream_dec_info; + + s32 refs[2]; + int dec_result; + u32 timeout_processing; + wait_queue_head_t wait_q; + struct work_struct work; + struct work_struct timeout_work; + struct work_struct notify_work; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + dma_addr_t ccbuf_phyAddress; + void *ccbuf_phyAddress_virt; + u32 cc_buf_size; + unsigned long ccbuf_phyAddress_is_remaped_nocache; + u32 frame_rpt_state; +/* for error handling */ + s32 frame_force_skip_flag; + s32 error_frame_skip_level; + s32 wait_buffer_counter; + u32 first_i_frame_ready; + u32 run_count; + u32 not_run_ready; + u32 input_empty; + u32 put_num; + u32 peek_num; + u32 get_num; + u32 drop_frame_count; + u32 buffer_not_ready; + u32 ratio_control; + int frameinfo_enable; + struct firmware_s *fw; + u32 canvas_mode; +#ifdef AGAIN_HAS_THRESHOLD + u32 pre_parser_wr_ptr; + u8 next_again_flag; +#endif + + struct work_struct userdata_push_work; + struct mutex userdata_mutex; + struct mmpeg2_userdata_info_t userdata_info; + struct mmpeg2_userdata_record_t ud_record[MAX_UD_RECORDS]; + int cur_ud_idx; + u8 *user_data_buffer; + int wait_for_udr_send; + u32 ucode_cc_last_wp; + u32 notify_ucode_cc_last_wp; + u32 notify_data_cc_last_wp; + u32 userdata_wp_ctx; +#ifdef DUMP_USER_DATA +#define MAX_USER_DATA_SIZE 1572864 + void *user_data_dump_buf; + unsigned char *pdump_buf_cur_start; + int total_len; + int bskip; + int n_userdata_id; + u32 reference[MAX_UD_RECORDS]; +#endif + int tvp_flag; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + u32 buf_num; + u32 dynamic_buf_num_margin; + struct vdec_info gvs; + struct vframe_qos_s vframe_qos; + u32 res_ch_flag; + u32 i_only; + u32 kpi_first_i_comming; + u32 kpi_first_i_decoded; + int sidebind_type; + int sidebind_channel_id; + u32 profile_idc; + u32 level_idc; + int dec_again_cnt; + int vdec_pg_enable_flag; + ulong fb_token; + char vdec_name[32]; + char pts_name[32]; + char new_q_name[32]; + char disp_q_name[32]; + bool run_flag; +}; + +static void vmpeg12_local_init(struct vdec_mpeg12_hw_s *hw); +static int vmpeg12_hw_ctx_restore(struct vdec_mpeg12_hw_s *hw); +static void reset_process_time(struct vdec_mpeg12_hw_s *hw); +static int vmpeg12_canvas_init(struct vdec_mpeg12_hw_s *hw); +static void flush_output(struct vdec_mpeg12_hw_s *hw); +static struct vframe_s *vmpeg_vf_peek(void *); +static struct vframe_s *vmpeg_vf_get(void *); +static void vmpeg_vf_put(struct vframe_s *, void *); +static int vmpeg_vf_states(struct vframe_states *states, void *); +static int vmpeg_event_cb(int type, void *data, void *private_data); +static int notify_v4l_eos(struct vdec_s *vdec); +static void start_process_time_set(struct vdec_mpeg12_hw_s *hw); +static int check_dirty_data(struct vdec_s *vdec); +static int debug_enable; +/*static struct work_struct userdata_push_work;*/ +#undef pr_info +#define pr_info printk +unsigned int mpeg12_debug_mask = 0xff; +/*static int counter_max = 5;*/ +static u32 run_ready_min_buf_num = 2; +static int dirty_again_threshold = 100; +static int error_proc_policy = 0x1; + +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_RUN_FLOW 0X0001 +#define PRINT_FLAG_TIMEINFO 0x0002 +#define PRINT_FLAG_UCODE_DETAIL 0x0004 +#define PRINT_FLAG_VLD_DETAIL 0x0008 +#define PRINT_FLAG_DEC_DETAIL 0x0010 +#define PRINT_FLAG_BUFFER_DETAIL 0x0020 +#define PRINT_FLAG_RESTORE 0x0040 +#define PRINT_FRAME_NUM 0x0080 +#define PRINT_FLAG_FORCE_DONE 0x0100 +#define PRINT_FLAG_COUNTER 0X0200 +#define PRINT_FRAMEBASE_DATA 0x0400 +#define PRINT_FLAG_VDEC_STATUS 0x0800 +#define PRINT_FLAG_PARA_DATA 0x1000 +#define PRINT_FLAG_USERDATA_DETAIL 0x2000 +#define PRINT_FLAG_TIMEOUT_STATUS 0x4000 +#define PRINT_FLAG_V4L_DETAIL 0x8000 +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 + + + +int debug_print(int index, int debug_flag, const char *fmt, ...) +{ + if (((debug_enable & debug_flag) && + ((1 << index) & mpeg12_debug_mask)) + || (debug_flag == PRINT_FLAG_ERROR)) { + unsigned char *buf = kzalloc(512, GFP_ATOMIC); + int len = 0; + va_list args; + + if (!buf) + return 0; + + va_start(args, fmt); + len = sprintf(buf, "%d: ", index); + vsnprintf(buf + len, 512-len, fmt, args); + pr_info("%s", buf); + va_end(args); + kfree(buf); + } + return 0; +} + + +/*static bool is_reset;*/ +#define PROVIDER_NAME "vdec.mpeg12" +static const struct vframe_operations_s vf_provider_ops = { + .peek = vmpeg_vf_peek, + .get = vmpeg_vf_get, + .put = vmpeg_vf_put, + .event_cb = vmpeg_event_cb, + .vf_states = vmpeg_vf_states, +}; + + +static const u32 frame_rate_tab[16] = { + 96000 / 30, 96000000 / 23976, 96000 / 24, 96000 / 25, + 9600000 / 2997, 96000 / 30, 96000 / 50, 9600000 / 5994, + 96000 / 60, + /* > 8 reserved, use 24 */ + 96000 / 24, 96000 / 24, 96000 / 24, 96000 / 24, + 96000 / 24, 96000 / 24, 96000 / 24 +}; + +static int vmpeg12_v4l_alloc_buff_config_canvas(struct vdec_mpeg12_hw_s *hw, int i) +{ + int ret; + u32 canvas; + ulong decbuf_start = 0, decbuf_uv_start = 0; + int decbuf_y_size = 0, decbuf_uv_size = 0; + u32 canvas_width = 0, canvas_height = 0; + struct vdec_s *vdec = hw_to_vdec(hw); + struct vdec_v4l2_buffer *fb = NULL; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (hw->pics[i].v4l_ref_buf_addr) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *) + hw->pics[i].v4l_ref_buf_addr; + + fb->status = FB_ST_DECODER; + return 0; + } + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + debug_print(DECODE_ID(hw), 0, + "[%d] get fb fail %d/%d.\n", + ctx->id, i, hw->buf_num); + return ret; + } + + fb->status = FB_ST_DECODER; + + if (!hw->frame_width || !hw->frame_height) { + struct vdec_pic_info pic; + vdec_v4l_get_pic_info(ctx, &pic); + hw->frame_width = pic.visible_width; + hw->frame_height = pic.visible_height; + debug_print(DECODE_ID(hw), 0, + "[%d] set %d x %d from IF layer\n", ctx->id, + hw->frame_width, hw->frame_height); + } + + hw->pics[i].v4l_ref_buf_addr = (ulong)fb; + if (fb->num_planes == 1) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].offset; + decbuf_uv_start = decbuf_start + decbuf_y_size; + decbuf_uv_size = decbuf_y_size / 2; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 32); + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + } else if (fb->num_planes == 2) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].size; + decbuf_uv_start = fb->m.mem[1].addr; + decbuf_uv_size = fb->m.mem[1].size; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 32); + fb->m.mem[0].bytes_used = decbuf_y_size; + fb->m.mem[1].bytes_used = decbuf_uv_size; + } + + debug_print(DECODE_ID(hw), 0, "[%d] %s(), v4l ref buf addr: 0x%x\n", + ctx->id, __func__, fb); + + if (vdec->parallel_dec == 1) { + u32 tmp; + if (canvas_u(hw->canvas_spec[i]) == 0xff) { + tmp = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~(0xffff << 8); + hw->canvas_spec[i] |= tmp << 8; + hw->canvas_spec[i] |= tmp << 16; + } + if (canvas_y(hw->canvas_spec[i]) == 0xff) { + tmp = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~0xff; + hw->canvas_spec[i] |= tmp; + } + canvas = hw->canvas_spec[i]; + } else { + canvas = vdec->get_canvas(i, 2); + hw->canvas_spec[i] = canvas; + } + + hw->canvas_config[i][0].phy_addr = decbuf_start; + hw->canvas_config[i][0].width = canvas_width; + hw->canvas_config[i][0].height = canvas_height; + hw->canvas_config[i][0].block_mode = hw->canvas_mode; + hw->canvas_config[i][0].endian = + (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 7 : 0; + config_cav_lut(canvas_y(canvas), &hw->canvas_config[i][0], VDEC_1); + + /* mpeg2 decoder canvas need to be revert to match display canvas */ + hw->canvas_config[i][0].endian = + (hw->canvas_mode != CANVAS_BLKMODE_LINEAR) ? 7 : 0; + + hw->canvas_config[i][1].phy_addr = decbuf_uv_start; + hw->canvas_config[i][1].width = canvas_width; + hw->canvas_config[i][1].height = canvas_height / 2; + hw->canvas_config[i][1].block_mode = hw->canvas_mode; + hw->canvas_config[i][1].endian = + (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 7 : 0; + config_cav_lut(canvas_u(canvas), &hw->canvas_config[i][1], VDEC_1); + + /* mpeg2 decoder canvas need to be revert to match display canvas */ + hw->canvas_config[i][1].endian = + (hw->canvas_mode != CANVAS_BLKMODE_LINEAR) ? 7 : 0; + + debug_print(DECODE_ID(hw), PRINT_FLAG_BUFFER_DETAIL, + "[%d] %s(), canvas: 0x%x mode: %d y: %x uv: %x w: %d h: %d\n", + ctx->id, __func__, canvas, hw->canvas_mode, + decbuf_start, decbuf_uv_start, + canvas_width, canvas_height); + + return 0; +} + + +static unsigned int vmpeg12_get_buf_num(struct vdec_mpeg12_hw_s *hw) +{ + unsigned int buf_num = DECODE_BUFFER_NUM_DEF; + + buf_num += hw->dynamic_buf_num_margin; + + if (buf_num > DECODE_BUFFER_NUM_MAX) + buf_num = DECODE_BUFFER_NUM_MAX; + + return buf_num; +} + +static bool is_enough_free_buffer(struct vdec_mpeg12_hw_s *hw) +{ + int i; + + for (i = 0; i < hw->buf_num; i++) { + if ((hw->vfbuf_use[i] == 0) && (hw->ref_use[i] == 0)) + break; + } + + return (i == hw->buf_num) ? false : true; +} + +static int find_free_buffer(struct vdec_mpeg12_hw_s *hw) +{ + int i; + + for (i = 0; i < hw->buf_num; i++) { + if ((hw->vfbuf_use[i] == 0) && + (hw->ref_use[i] == 0)) + break; + } + + if (i == hw->buf_num) + return -1; + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) { + /*run to parser csd data*/ + i = 0; + } else { + if (!ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token)) + return -1; + + if (vmpeg12_v4l_alloc_buff_config_canvas(hw, i)) + return -1; + } + } + return i; +} + +static u32 spec_to_index(struct vdec_mpeg12_hw_s *hw, u32 spec) +{ + u32 i; + + for (i = 0; i < hw->buf_num; i++) { + if (hw->canvas_spec[i] == spec) + return i; + } + + return hw->buf_num; +} + +/* +[SE][BUG-145343][huanghang] fixed:mpeg2 frame qos info notify */ +static void fill_frame_info(struct vdec_mpeg12_hw_s *hw, u32 slice_type, + int frame_size, u32 pts) +{ + unsigned char a[3]; + unsigned char i, j, t; + unsigned long data; + struct vframe_qos_s *vframe_qos = &hw->vframe_qos; + + vframe_qos->type = ((slice_type & PICINFO_TYPE_MASK) == + PICINFO_TYPE_I) ? 1 : + ((slice_type & + PICINFO_TYPE_MASK) == + PICINFO_TYPE_P) ? 2 : 3; + vframe_qos->size = frame_size; + vframe_qos->pts = pts; + + get_random_bytes(&data, sizeof(unsigned long)); + if (vframe_qos->type == 1) + data = 0; + a[0] = data & 0xff; + a[1] = (data >> 8) & 0xff; + a[2] = (data >> 16) & 0xff; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + vframe_qos->max_mv = a[2]; + vframe_qos->avg_mv = a[1]; + vframe_qos->min_mv = a[0]; + + get_random_bytes(&data, sizeof(unsigned long)); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + vframe_qos->max_qp = a[2]; + vframe_qos->avg_qp = a[1]; + vframe_qos->min_qp = a[0]; + + get_random_bytes(&data, sizeof(unsigned long)); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) { + for (j = i + 1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + vframe_qos->max_skip = a[2]; + vframe_qos->avg_skip = a[1]; + vframe_qos->min_skip = a[0]; + + vframe_qos->num++; + + return; +} + +static void set_frame_info(struct vdec_mpeg12_hw_s *hw, struct vframe_s *vf) +{ + int ar = 0; + u32 buffer_index = vf->index; + unsigned int num = 0; + unsigned int den = 0; + + vf->width = hw->pics[buffer_index].width; + vf->height = hw->pics[buffer_index].height; + + if (hw->vmpeg12_ratio64 != 0) { + num = hw->vmpeg12_ratio64>>32; + den = hw->vmpeg12_ratio64 & 0xffffffff; + } else { + num = hw->vmpeg12_ratio>>16; + den = hw->vmpeg12_ratio & 0xffff; + + } + + if (hw->frame_dur > 0) + vf->duration = hw->frame_dur; + else { + vf->duration = hw->frame_dur = + frame_rate_tab[(READ_VREG(MREG_SEQ_INFO) >> 4) & 0xf]; + vdec_schedule_work(&hw->notify_work); + } +/* + ar_bits = READ_VREG(MREG_SEQ_INFO) & 0xf; + + if (ar_bits == 0x2) + vf->ratio_control = 0xc0 << DISP_RATIO_ASPECT_RATIO_BIT; + + else if (ar_bits == 0x3) + vf->ratio_control = 0x90 << DISP_RATIO_ASPECT_RATIO_BIT; + + else if (ar_bits == 0x4) + vf->ratio_control = 0x74 << DISP_RATIO_ASPECT_RATIO_BIT; + else + vf->ratio_control = 0; +*/ + + hw->pixel_ratio = READ_VREG(MREG_SEQ_INFO) & 0xf;; + + if (hw->vmpeg12_ratio == 0) { + /* always stretch to 16:9 */ + vf->ratio_control |= (0x90 << + DISP_RATIO_ASPECT_RATIO_BIT); + vf->sar_width = 1; + vf->sar_height = 1; + } else { + switch (hw->pixel_ratio) { + case 1: + vf->sar_width = 1; + vf->sar_height = 1; + ar = (vf->height * hw->vmpeg12_ratio) / vf->width; + break; + case 2: + vf->sar_width = 4; + vf->sar_height = 3; + ar = (vf->height * 3 * hw->vmpeg12_ratio) / (vf->width * 4); + break; + case 3: + vf->sar_width = 16; + vf->sar_height = 9; + ar = (vf->height * 9 * hw->vmpeg12_ratio) / (vf->width * 16); + break; + case 4: + vf->sar_width = 221; + vf->sar_height = 100; + ar = (vf->height * 100 * hw->vmpeg12_ratio) / (vf->width * + 221); + break; + default: + vf->sar_width = 1; + vf->sar_height = 1; + ar = (vf->height * hw->vmpeg12_ratio) / vf->width; + break; + } + } + ar = min(ar, DISP_RATIO_ASPECT_RATIO_MAX); + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + + hw->ratio_control = vf->ratio_control; + + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + + vf->canvas0_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas0_config[1] = hw->canvas_config[buffer_index][1]; + + vf->canvas1_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas1_config[1] = hw->canvas_config[buffer_index][1]; + + vf->sidebind_type = hw->sidebind_type; + vf->sidebind_channel_id = hw->sidebind_channel_id; + + debug_print(DECODE_ID(hw), PRINT_FLAG_PARA_DATA, + "mpeg2dec: w(%d), h(%d), dur(%d), dur-ES(%d)\n", + hw->frame_width, hw->frame_height, hw->frame_dur, + frame_rate_tab[(READ_VREG(MREG_SEQ_INFO) >> 4) & 0xf]); +} + +static bool error_skip(struct vdec_mpeg12_hw_s *hw, + u32 info, struct vframe_s *vf) +{ + if (hw->error_frame_skip_level) { + /* skip error frame */ + if ((info & PICINFO_ERROR) || (hw->frame_force_skip_flag)) { + if ((info & PICINFO_ERROR) == 0) { + if ((info & PICINFO_TYPE_MASK) == + PICINFO_TYPE_I) + hw->frame_force_skip_flag = 0; + } else { + if (hw->error_frame_skip_level >= 2) + hw->frame_force_skip_flag = 1; + } + if ((info & PICINFO_ERROR) + || (hw->frame_force_skip_flag)) + return true; + } + } + return false; +} + +static inline void vmpeg12_save_hw_context(struct vdec_mpeg12_hw_s *hw, u32 reg) +{ + if (reg == 3) { + hw->ctx_valid = 0; + //pr_info("%s, hw->userdata_wp_ctx %d\n", __func__, hw->userdata_wp_ctx); + } else { + hw->seqinfo = READ_VREG(MREG_SEQ_INFO); + hw->reg_pic_width = READ_VREG(MREG_PIC_WIDTH); + hw->reg_pic_height = READ_VREG(MREG_PIC_HEIGHT); + hw->reg_mpeg1_2_reg = READ_VREG(MPEG1_2_REG); + hw->reg_pic_head_info = READ_VREG(PIC_HEAD_INFO); + hw->reg_f_code_reg = READ_VREG(F_CODE_REG); + hw->reg_slice_ver_pos_pic_type = READ_VREG(SLICE_VER_POS_PIC_TYPE); + hw->reg_vcop_ctrl_reg = READ_VREG(VCOP_CTRL_REG); + hw->reg_mb_info = READ_VREG(MB_INFO); + hw->reg_signal_type = READ_VREG(AV_SCRATCH_H); + debug_print(DECODE_ID(hw), PRINT_FLAG_PARA_DATA, + "signal_type = %x", hw->reg_signal_type); + hw->ctx_valid = 1; + } +} + +static void vmmpeg2_reset_udr_mgr(struct vdec_mpeg12_hw_s *hw) +{ + hw->wait_for_udr_send = 0; + hw->cur_ud_idx = 0; + memset(&hw->ud_record, 0, sizeof(hw->ud_record)); +} + +static void vmmpeg2_crate_userdata_manager( + struct vdec_mpeg12_hw_s *hw, + u8 *userdata_buf, + int buf_len) +{ + if (hw) { + mutex_init(&hw->userdata_mutex); + + memset(&hw->userdata_info, 0, + sizeof(struct mmpeg2_userdata_info_t)); + hw->userdata_info.data_buf = userdata_buf; + hw->userdata_info.buf_len = buf_len; + hw->userdata_info.data_buf_end = userdata_buf + buf_len; + hw->userdata_wp_ctx = 0; + + vmmpeg2_reset_udr_mgr(hw); + } +} + +static void vmmpeg2_destroy_userdata_manager(struct vdec_mpeg12_hw_s *hw) +{ + if (hw) + memset(&hw->userdata_info, + 0, + sizeof(struct mmpeg2_userdata_info_t)); +} + +static void aml_swap_data(uint8_t *user_data, int ud_size) +{ + int swap_blocks, i, j, k, m; + unsigned char c_temp; + + /* swap byte order */ + swap_blocks = ud_size / 8; + for (i = 0; i < swap_blocks; i++) { + j = i * 8; + k = j + 7; + for (m = 0; m < 4; m++) { + c_temp = user_data[j]; + user_data[j++] = user_data[k]; + user_data[k--] = c_temp; + } + } +} + +#ifdef DUMP_USER_DATA +static void push_to_buf(struct vdec_mpeg12_hw_s *hw, + u8 *pdata, + int len, + struct userdata_meta_info_t *pmeta, + u32 reference) +{ + u32 *pLen; + int info_cnt; + u8 *pbuf_end; + + if (!hw->user_data_dump_buf) + return; + + if (hw->bskip) { + pr_info("over size, skip\n"); + return; + } + info_cnt = 0; + pLen = (u32 *)hw->pdump_buf_cur_start; + + *pLen = len; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->duration; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->flags; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->vpts; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->vpts_valid; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + + *pLen = hw->n_userdata_id; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = reference; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + pbuf_end = hw->userdata_info.data_buf_end; + if (pdata + len > pbuf_end) { + int first_section_len; + + first_section_len = pbuf_end - pdata; + memcpy(hw->pdump_buf_cur_start, pdata, first_section_len); + pdata = (u8 *)hw->userdata_info.data_buf; + hw->pdump_buf_cur_start += first_section_len; + memcpy(hw->pdump_buf_cur_start, pdata, len - first_section_len); + hw->pdump_buf_cur_start += len - first_section_len; + } else { + memcpy(hw->pdump_buf_cur_start, pdata, len); + hw->pdump_buf_cur_start += len; + } + + hw->total_len += len + info_cnt * sizeof(u32); + if (hw->total_len >= MAX_USER_DATA_SIZE-4096) + hw->bskip = 1; +} + +static void dump_userdata_info(struct vdec_mpeg12_hw_s *hw, + void *puser_data, + int len, + struct userdata_meta_info_t *pmeta, + u32 reference) +{ + u8 *pstart; + + pstart = (u8 *)puser_data; + +#ifdef DUMP_HEAD_INFO_DATA + push_to_buf(hw, pstart, len, pmeta, reference); +#else + push_to_buf(hw, pstart+8, len - 8, pmeta, reference); +#endif +} + + +static void print_data(unsigned char *pdata, + int len, + unsigned int flag, + unsigned int duration, + unsigned int vpts, + unsigned int vpts_valid, + int rec_id, + u32 reference) +{ + int nLeft; + + nLeft = len; + + pr_info("%d len:%d, flag:0x%x, dur:%d, vpts:0x%x, valid:%d, refer:%d\n", + rec_id, len, flag, + duration, vpts, vpts_valid, + reference); + while (nLeft >= 16) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7], + pdata[8], pdata[9], pdata[10], pdata[11], + pdata[12], pdata[13], pdata[14], pdata[15]); + nLeft -= 16; + pdata += 16; + } + + + while (nLeft > 0) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } +} + +static void dump_data(u8 *pdata, + unsigned int user_data_length, + unsigned int flag, + unsigned int duration, + unsigned int vpts, + unsigned int vpts_valid, + int rec_id, + u32 reference) +{ + unsigned char szBuf[256]; + + + memset(szBuf, 0, 256); + memcpy(szBuf, pdata, user_data_length); + + aml_swap_data(szBuf, user_data_length); + + print_data(szBuf, + user_data_length, + flag, + duration, + vpts, + vpts_valid, + rec_id, + reference); +} + + +static void show_user_data_buf(struct vdec_mpeg12_hw_s *hw) +{ + u8 *pbuf; + int len; + unsigned int flag; + unsigned int duration; + unsigned int vpts; + unsigned int vpts_valid; + int rec_id; + u32 reference; + + pr_info("show user data buf\n"); + pbuf = hw->user_data_dump_buf; + + while (pbuf < hw->pdump_buf_cur_start) { + u32 *pLen; + + pLen = (u32 *)pbuf; + + len = *pLen; + pLen++; + pbuf += sizeof(u32); + + duration = *pLen; + pLen++; + pbuf += sizeof(u32); + + flag = *pLen; + pLen++; + pbuf += sizeof(u32); + + vpts = *pLen; + pLen++; + pbuf += sizeof(u32); + + vpts_valid = *pLen; + pLen++; + pbuf += sizeof(u32); + + rec_id = *pLen; + pLen++; + pbuf += sizeof(u32); + + reference = *pLen; + pLen++; + pbuf += sizeof(u32); + + + dump_data(pbuf, len, flag, duration, + vpts, vpts_valid, rec_id, reference); + pbuf += len; + msleep(30); + } +} + +static int amvdec_mmpeg12_init_userdata_dump(struct vdec_mpeg12_hw_s *hw) +{ + hw->user_data_dump_buf = kmalloc(MAX_USER_DATA_SIZE, GFP_KERNEL); + if (hw->user_data_dump_buf) + return 1; + else + return 0; +} + +static void amvdec_mmpeg12_uninit_userdata_dump(struct vdec_mpeg12_hw_s *hw) +{ + if (hw->user_data_dump_buf) { + show_user_data_buf(hw); + kfree(hw->user_data_dump_buf); + hw->user_data_dump_buf = NULL; + } +} + +static void reset_user_data_buf(struct vdec_mpeg12_hw_s *hw) +{ + hw->total_len = 0; + hw->pdump_buf_cur_start = hw->user_data_dump_buf; + hw->bskip = 0; + hw->n_userdata_id = 0; +} +#endif + +static void user_data_ready_notify(struct vdec_mpeg12_hw_s *hw, + u32 pts, u32 pts_valid) +{ + struct mmpeg2_userdata_record_t *p_userdata_rec; + int i; + + if (hw->wait_for_udr_send) { + for (i = 0; i < hw->cur_ud_idx; i++) { + mutex_lock(&hw->userdata_mutex); + + + p_userdata_rec = hw->userdata_info.records + + hw->userdata_info.write_index; + + hw->ud_record[i].meta_info.vpts_valid = pts_valid; + hw->ud_record[i].meta_info.vpts = pts; + + *p_userdata_rec = hw->ud_record[i]; +#ifdef DUMP_USER_DATA + dump_userdata_info(hw, + hw->userdata_info.data_buf + p_userdata_rec->rec_start, + p_userdata_rec->rec_len, + &p_userdata_rec->meta_info, + hw->reference[i]); + hw->n_userdata_id++; +#endif +/* + pr_info("notify: rec_start:%d, rec_len:%d, wi:%d, reference:%d\n", + p_userdata_rec->rec_start, + p_userdata_rec->rec_len, + hw->userdata_info.write_index, + hw->reference[i]); +*/ + hw->userdata_info.write_index++; + if (hw->userdata_info.write_index >= USERDATA_FIFO_NUM) + hw->userdata_info.write_index = 0; + + mutex_unlock(&hw->userdata_mutex); + + + vdec_wakeup_userdata_poll(hw_to_vdec(hw)); + } + hw->wait_for_udr_send = 0; + hw->cur_ud_idx = 0; + } + hw->notify_ucode_cc_last_wp = hw->ucode_cc_last_wp; + hw->notify_data_cc_last_wp = hw->userdata_info.last_wp; +} + +static int vmmpeg2_user_data_read(struct vdec_s *vdec, + struct userdata_param_t *puserdata_para) +{ + struct vdec_mpeg12_hw_s *hw = NULL; + int rec_ri, rec_wi; + int rec_len; + u8 *rec_data_start; + u8 *pdest_buf; + struct mmpeg2_userdata_record_t *p_userdata_rec; + u32 data_size; + u32 res; + int copy_ok = 1; + + hw = (struct vdec_mpeg12_hw_s *)vdec->private; + + pdest_buf = puserdata_para->pbuf_addr; + + mutex_lock(&hw->userdata_mutex); + +/* + pr_info("ri = %d, wi = %d\n", + hw->userdata_info.read_index, + hw->userdata_info.write_index); +*/ + rec_ri = hw->userdata_info.read_index; + rec_wi = hw->userdata_info.write_index; + + if (rec_ri == rec_wi) { + mutex_unlock(&hw->userdata_mutex); + return 0; + } + + p_userdata_rec = hw->userdata_info.records + rec_ri; + + rec_len = p_userdata_rec->rec_len; + rec_data_start = p_userdata_rec->rec_start + hw->userdata_info.data_buf; +/* + pr_info("ri:%d, wi:%d, rec_len:%d, rec_start:%d, buf_len:%d\n", + rec_ri, rec_wi, + p_userdata_rec->rec_len, + p_userdata_rec->rec_start, + puserdata_para->buf_len); +*/ + if (rec_len <= puserdata_para->buf_len) { + /* dvb user data buffer is enought to + copy the whole recored. */ + data_size = rec_len; + if (rec_data_start + data_size + > hw->userdata_info.data_buf_end) { + int first_section_len; + + first_section_len = hw->userdata_info.buf_len - + p_userdata_rec->rec_start; + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + first_section_len); + if (res) { + pr_info("p1 read not end res=%d, request=%d\n", + res, first_section_len); + copy_ok = 0; + + p_userdata_rec->rec_len -= + first_section_len - res; + p_userdata_rec->rec_start += + first_section_len - res; + puserdata_para->data_size = + first_section_len - res; + } else { + res = (u32)copy_to_user( + (void *)(pdest_buf+first_section_len), + (void *)hw->userdata_info.data_buf, + data_size - first_section_len); + if (res) { + pr_info("p2 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + p_userdata_rec->rec_len -= + data_size - res; + p_userdata_rec->rec_start = + data_size - first_section_len - res; + puserdata_para->data_size = + data_size - res; + } + } else { + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + data_size); + if (res) { + pr_info("p3 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start += data_size - res; + puserdata_para->data_size = data_size - res; + } + + if (copy_ok) { + hw->userdata_info.read_index++; + if (hw->userdata_info.read_index >= USERDATA_FIFO_NUM) + hw->userdata_info.read_index = 0; + } + } else { + /* dvb user data buffer is not enought + to copy the whole recored. */ + data_size = puserdata_para->buf_len; + if (rec_data_start + data_size + > hw->userdata_info.data_buf_end) { + int first_section_len; + + first_section_len = hw->userdata_info.buf_len - + p_userdata_rec->rec_start; + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + first_section_len); + if (res) { + pr_info("p4 read not end res=%d, request=%d\n", + res, first_section_len); + copy_ok = 0; + p_userdata_rec->rec_len -= + first_section_len - res; + p_userdata_rec->rec_start += + first_section_len - res; + puserdata_para->data_size = + first_section_len - res; + } else { + /* first secton copy is ok*/ + res = (u32)copy_to_user( + (void *)(pdest_buf+first_section_len), + (void *)hw->userdata_info.data_buf, + data_size - first_section_len); + if (res) { + pr_info("p5 read not end res=%d, request=%d\n", + res, + data_size - first_section_len); + copy_ok = 0; + } + + p_userdata_rec->rec_len -= + data_size - res; + p_userdata_rec->rec_start = + data_size - first_section_len - res; + puserdata_para->data_size = + data_size - res; + } + } else { + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + data_size); + if (res) { + pr_info("p6 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start += data_size - res; + puserdata_para->data_size = data_size - res; + } + + if (copy_ok) { + hw->userdata_info.read_index++; + if (hw->userdata_info.read_index >= USERDATA_FIFO_NUM) + hw->userdata_info.read_index = 0; + } + + } + puserdata_para->meta_info = p_userdata_rec->meta_info; + + if (hw->userdata_info.read_index <= hw->userdata_info.write_index) + puserdata_para->meta_info.records_in_que = + hw->userdata_info.write_index - + hw->userdata_info.read_index; + else + puserdata_para->meta_info.records_in_que = + hw->userdata_info.write_index + + USERDATA_FIFO_NUM - + hw->userdata_info.read_index; + + puserdata_para->version = (0<<24|0<<16|0<<8|1); + + mutex_unlock(&hw->userdata_mutex); + + + return 1; +} + +static void vmmpeg2_reset_userdata_fifo(struct vdec_s *vdec, int bInit) +{ + struct vdec_mpeg12_hw_s *hw = NULL; + + hw = (struct vdec_mpeg12_hw_s *)vdec->private; + + if (hw) { + mutex_lock(&hw->userdata_mutex); + pr_info("mpeg2_reset_userdata_fifo: bInit: %d, ri: %d, wi: %d\n", + bInit, + hw->userdata_info.read_index, + hw->userdata_info.write_index); + hw->userdata_info.read_index = 0; + hw->userdata_info.write_index = 0; + + if (bInit) + hw->userdata_info.last_wp = 0; + mutex_unlock(&hw->userdata_mutex); + } +} + +static void vmmpeg2_wakeup_userdata_poll(struct vdec_s *vdec) +{ + amstream_wakeup_userdata_poll(vdec); +} + +/* +#define PRINT_HEAD_INFO +*/ +static void userdata_push_do_work(struct work_struct *work) +{ + u32 reg; + u8 *pdata; + u8 *psrc_data; + u8 head_info[8]; + struct userdata_meta_info_t meta_info; + u32 wp; + u32 index; + u32 picture_struct; + u32 reference; + u32 picture_type; + u32 temp; + u32 data_length; + u32 data_start; + int i; + u32 offset; + u32 cur_wp; +#ifdef PRINT_HEAD_INFO + u8 *ptype_str; +#endif + struct mmpeg2_userdata_record_t *pcur_ud_rec; + + struct vdec_mpeg12_hw_s *hw = container_of(work, + struct vdec_mpeg12_hw_s, userdata_push_work); + + memset(&meta_info, 0, sizeof(meta_info)); + + meta_info.duration = hw->frame_dur; + + + reg = READ_VREG(AV_SCRATCH_J); + hw->userdata_wp_ctx = reg & (~(1<<16)); + meta_info.flags = ((reg >> 30) << 1); + meta_info.flags |= (VFORMAT_MPEG12 << 3); + /* check top_field_first flag */ + if ((reg >> 28) & 0x1) { + meta_info.flags |= (1 << 10); + meta_info.flags |= (((reg >> 29) & 0x1) << 11); + } + + cur_wp = reg & 0x7fff; + if (cur_wp == hw->ucode_cc_last_wp || (cur_wp >= AUX_BUF_ALIGN(CCBUF_SIZE))) { + debug_print(DECODE_ID(hw), 0, + "Null or Over size user data package: wp = %d\n", cur_wp); + WRITE_VREG(AV_SCRATCH_J, 0); + return; + } + + if (hw->cur_ud_idx >= MAX_UD_RECORDS) { + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "UD Records over: %d, skip it\n", MAX_UD_RECORDS); + WRITE_VREG(AV_SCRATCH_J, 0); + hw->cur_ud_idx = 0; + return; + } + + if (cur_wp < hw->ucode_cc_last_wp) + hw->ucode_cc_last_wp = 0; + + offset = READ_VREG(AV_SCRATCH_I); + + codec_mm_dma_flush( + hw->ccbuf_phyAddress_virt, + CCBUF_SIZE, + DMA_FROM_DEVICE); + + mutex_lock(&hw->userdata_mutex); + if (hw->ccbuf_phyAddress_virt) { + pdata = (u8 *)hw->ccbuf_phyAddress_virt + hw->ucode_cc_last_wp; + memcpy(head_info, pdata, 8); + } else + memset(head_info, 0, 8); + mutex_unlock(&hw->userdata_mutex); + aml_swap_data(head_info, 8); + + wp = (head_info[0] << 8 | head_info[1]); + index = (head_info[2] << 8 | head_info[3]); + + picture_struct = (head_info[6] << 8 | head_info[7]); + temp = (head_info[4] << 8 | head_info[5]); + reference = temp & 0x3FF; + picture_type = (temp >> 10) & 0x7; + + if (debug_enable & PRINT_FLAG_USERDATA_DETAIL) + pr_info("index:%d, wp:%d, ref:%d, type:%d, struct:0x%x, u_last_wp:0x%x\n", + index, wp, reference, + picture_type, picture_struct, + hw->ucode_cc_last_wp); + + switch (picture_type) { + case 1: + /* pr_info("I type, pos:%d\n", + (meta_info.flags>>1)&0x3); */ + meta_info.flags |= (1<<7); +#ifdef PRINT_HEAD_INFO + ptype_str = " I"; +#endif + break; + case 2: + /* pr_info("P type, pos:%d\n", + (meta_info.flags>>1)&0x3); */ + meta_info.flags |= (2<<7); +#ifdef PRINT_HEAD_INFO + ptype_str = " P"; +#endif + break; + case 3: + /* pr_info("B type, pos:%d\n", + (meta_info.flags>>1)&0x3); */ + meta_info.flags |= (3<<7); +#ifdef PRINT_HEAD_INFO + ptype_str = " B"; +#endif + break; + case 4: + /* pr_info("D type, pos:%d\n", + (meta_info.flags>>1)&0x3); */ + meta_info.flags |= (4<<7); +#ifdef PRINT_HEAD_INFO + ptype_str = " D"; +#endif + break; + default: + /* pr_info("Unknown type:0x%x, pos:%d\n", + pheader->picture_coding_type, + (meta_info.flags>>1)&0x3); */ +#ifdef PRINT_HEAD_INFO + ptype_str = " U"; +#endif + break; + } +#ifdef PRINT_HEAD_INFO + pr_info("ref:%d, type:%s, ext:%d, first:%d, data_length:%d\n", + reference, ptype_str, + (reg >> 30), + (reg >> 28)&0x3, + reg & 0xffff); +#endif + data_length = cur_wp - hw->ucode_cc_last_wp; + data_start = reg & 0xffff; + psrc_data = (u8 *)hw->ccbuf_phyAddress_virt + hw->ucode_cc_last_wp; + + pdata = hw->userdata_info.data_buf + hw->userdata_info.last_wp; + for (i = 0; i < data_length && hw->ccbuf_phyAddress_virt != NULL && psrc_data; i++) { + *pdata++ = *psrc_data++; + if (pdata >= hw->userdata_info.data_buf_end) + pdata = hw->userdata_info.data_buf; + } + + pcur_ud_rec = hw->ud_record + hw->cur_ud_idx; + + pcur_ud_rec->meta_info = meta_info; + pcur_ud_rec->rec_start = hw->userdata_info.last_wp; + pcur_ud_rec->rec_len = data_length; + + hw->userdata_info.last_wp += data_length; + if (hw->userdata_info.last_wp >= USER_DATA_SIZE) + hw->userdata_info.last_wp %= USER_DATA_SIZE; + + hw->wait_for_udr_send = 1; + + hw->ucode_cc_last_wp = cur_wp; + + if (debug_enable & PRINT_FLAG_USERDATA_DETAIL) + pr_info("cur_wp:%d, rec_start:%d, rec_len:%d\n", + cur_wp, + pcur_ud_rec->rec_start, + pcur_ud_rec->rec_len); + +#ifdef DUMP_USER_DATA + hw->reference[hw->cur_ud_idx] = reference; +#endif + + hw->cur_ud_idx++; + WRITE_VREG(AV_SCRATCH_J, 0); +} + + +void userdata_pushed_drop(struct vdec_mpeg12_hw_s *hw) +{ + hw->userdata_info.last_wp = hw->notify_data_cc_last_wp; + hw->ucode_cc_last_wp = hw->notify_ucode_cc_last_wp; + hw->cur_ud_idx = 0; + hw->wait_for_udr_send = 0; + +} + + +static inline void hw_update_gvs(struct vdec_mpeg12_hw_s *hw) +{ + if (hw->gvs.frame_height != hw->frame_height) { + hw->gvs.frame_width = hw->frame_width; + hw->gvs.frame_height = hw->frame_height; + } + if (hw->gvs.frame_dur != hw->frame_dur) { + hw->gvs.frame_dur = hw->frame_dur; + if (hw->frame_dur != 0) + hw->gvs.frame_rate = ((96000 * 10 / hw->frame_dur) % 10) < 5 ? + 96000 / hw->frame_dur : (96000 / hw->frame_dur +1); + else + hw->gvs.frame_rate = -1; + } + if (hw->gvs.ratio_control != hw->ratio_control) + hw->gvs.ratio_control = hw->ratio_control; + + hw->gvs.status = hw->stat; + hw->gvs.error_count = hw->gvs.error_frame_count; + hw->gvs.drop_frame_count = hw->drop_frame_count; + +} + +static int prepare_display_buf(struct vdec_mpeg12_hw_s *hw, + struct pic_info_t *pic) +{ + u32 field_num = 0, i; + u32 first_field_type = 0, type = 0; + struct vframe_s *vf = NULL; + u32 index = pic->index; + u32 info = pic->buffer_info; + struct vdec_s *vdec = hw_to_vdec(hw); + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + ulong nv_order = VIDTYPE_VIU_NV21; + bool pb_skip = false; + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + } + +#ifdef NV21 + type = nv_order; +#endif + if (hw->i_only) { + pb_skip = 1; + } + + user_data_ready_notify(hw, pic->pts, pic->pts_valid); + + if (hw->frame_prog & PICINFO_PROG) { + field_num = 1; + type |= VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | nv_order; + } else { +#ifdef INTERLACE_SEQ_ALWAYS + /* once an interlace seq, force interlace, to make di easy. */ + hw->dec_control |= DEC_CONTROL_FLAG_FORCE_SEQ_INTERLACE; +#endif + hw->frame_rpt_state = FRAME_REPEAT_NONE; + + first_field_type = (info & PICINFO_TOP_FIRST) ? + VIDTYPE_INTERLACE_TOP : VIDTYPE_INTERLACE_BOTTOM; + field_num = (info & PICINFO_RPT_FIRST) ? 3 : 2; + } + + if ((hw->is_used_v4l) && + (vdec->prog_only)) + field_num = 1; + + for (i = 0; i < field_num; i++) { + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "fatal error, no available buffer slot."); + hw->dec_result = DEC_RESULT_ERROR; + vdec_schedule_work(&hw->work); + return -1; + } + + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->pics[index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), v4l mem handle: 0x%lx\n", + ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, + __func__, vf->v4l_mem_handle); + } + + hw->vfbuf_use[index]++; + vf->index = index; + set_frame_info(hw, vf); + if (field_num > 1) { + vf->duration = vf->duration / field_num; + vf->duration_pulldown = (field_num == 3) ? + (vf->duration >> 1):0; + if (i > 0) + type = nv_order; + if (i == 1) /* second field*/ + type |= (first_field_type == VIDTYPE_INTERLACE_TOP) ? + VIDTYPE_INTERLACE_BOTTOM : VIDTYPE_INTERLACE_TOP; + else + type |= (first_field_type == VIDTYPE_INTERLACE_TOP) ? + VIDTYPE_INTERLACE_TOP : VIDTYPE_INTERLACE_BOTTOM; + } else { + if ((hw->seqinfo & SEQINFO_EXT_AVAILABLE) && + (hw->seqinfo & SEQINFO_PROG)) { + if (info & PICINFO_RPT_FIRST) { + if (info & PICINFO_TOP_FIRST) + vf->duration *= 3; + else + vf->duration *= 2; + } + vf->duration_pulldown = 0; + } else { + vf->duration_pulldown = + (info & PICINFO_RPT_FIRST) ? + vf->duration >> 1 : 0; + } + } + vf->duration += vf->duration_pulldown; + vf->type = type; + vf->signal_type = hw->reg_signal_type; + vf->orientation = 0; + if (i > 0) { + vf->pts = 0; + vf->pts_us64 = 0; + vf->timestamp = 0; + } else { + vf->pts = (pic->pts_valid) ? pic->pts : 0; + vf->pts_us64 = (pic->pts_valid) ? pic->pts64 : 0; + vf->timestamp = pic->timestamp; + } + vf->type_original = vf->type; + + if ((error_skip(hw, pic->buffer_info, vf)) || + (((hw->first_i_frame_ready == 0) || pb_skip) && + ((PICINFO_TYPE_MASK & pic->buffer_info) != + PICINFO_TYPE_I))) { + unsigned long flags; + hw->drop_frame_count++; + if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) { + hw->gvs.i_lost_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_P) { + hw->gvs.p_lost_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_B) { + hw->gvs.b_lost_frames++; + } + /* Though we drop it, it is still an error frame, count it. + * Becase we've counted the error frame in vdec_count_info + * function, avoid count it twice. + */ + if (!(info & PICINFO_ERROR)) { + hw->gvs.error_frame_count++; + if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) { + hw->gvs.i_concealed_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_P) { + hw->gvs.p_concealed_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_B) { + hw->gvs.b_concealed_frames++; + } + } + hw->vfbuf_use[index]--; + spin_lock_irqsave(&hw->lock, flags); + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + spin_unlock_irqrestore(&hw->lock, flags); + } else { + debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "%s, vf: %lx, num[%d]: %d(%c), dur: %d, type: %x, pts: %d(%lld)\n", + __func__, (ulong)vf, i, hw->disp_num, GET_SLICE_TYPE(info), + vf->duration, vf->type, vf->pts, vf->pts_us64); + hw->disp_num++; + if (i == 0) { + decoder_do_frame_check(vdec, vf); + hw_update_gvs(hw); + vdec_fill_vdec_frame(vdec, &hw->vframe_qos, + &hw->gvs, vf, pic->hw_decode_time); + } + vdec->vdec_fps_detec(vdec->id); + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, index); + if (!vdec->vbuf.use_ptsserv && vdec_stream_based(vdec)) { + /* offset for tsplayer pts lookup */ + if (i == 0) { + vf->pts_us64 = + (((u64)vf->duration << 32) & + 0xffffffff00000000) | pic->offset; + vf->pts = 0; + } else { + vf->pts_us64 = (u64)-1; + vf->pts = 0; + } + } + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->pts); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + /* if (hw->disp_num == 1) { */ + if (hw->kpi_first_i_decoded == 0) { + hw->kpi_first_i_decoded = 1; + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "[vdec_kpi][%s] First I frame decoded.\n", + __func__); + } + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } else { + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + } + } else + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + + + } + } + return 0; +} + +static void force_interlace_check(struct vdec_mpeg12_hw_s *hw) +{ + if ((hw->dec_control & + DEC_CONTROL_FLAG_FORCE_2500_720_576_INTERLACE) && + (hw->frame_width == 720) && + (hw->frame_height == 576) && + (hw->frame_dur == 3840)) { + hw->frame_prog = 0; + } else if ((hw->dec_control + & DEC_CONTROL_FLAG_FORCE_3000_704_480_INTERLACE) && + (hw->frame_width == 704) && + (hw->frame_height == 480) && + (hw->frame_dur == 3200)) { + hw->frame_prog = 0; + } else if ((hw->dec_control + & DEC_CONTROL_FLAG_FORCE_2500_704_576_INTERLACE) && + (hw->frame_width == 704) && + (hw->frame_height == 576) && + (hw->frame_dur == 3840)) { + hw->frame_prog = 0; + } else if ((hw->dec_control + & DEC_CONTROL_FLAG_FORCE_2500_544_576_INTERLACE) && + (hw->frame_width == 544) && + (hw->frame_height == 576) && + (hw->frame_dur == 3840)) { + hw->frame_prog = 0; + } else if ((hw->dec_control + & DEC_CONTROL_FLAG_FORCE_2500_480_576_INTERLACE) && + (hw->frame_width == 480) && + (hw->frame_height == 576) && + (hw->frame_dur == 3840)) { + hw->frame_prog = 0; + } else if (hw->dec_control + & DEC_CONTROL_FLAG_FORCE_SEQ_INTERLACE) { + hw->frame_prog = 0; + } + +} + +static int update_reference(struct vdec_mpeg12_hw_s *hw, + int index) +{ + hw->ref_use[index]++; + if (hw->refs[1] == -1) { + hw->refs[1] = index; + /* + * first pic need output to show + * usecnt do not decrease. + */ + } else if (hw->refs[0] == -1) { + hw->refs[0] = hw->refs[1]; + hw->refs[1] = index; + /* second pic do not output */ + index = hw->buf_num; + } else { + hw->ref_use[hw->refs[0]]--; //old ref0 ununsed + hw->refs[0] = hw->refs[1]; + hw->refs[1] = index; + index = hw->refs[0]; + } + return index; +} + +static bool is_ref_error(struct vdec_mpeg12_hw_s *hw) +{ + if ((hw->pics[hw->refs[0]].buffer_info & PICINFO_ERROR) || + (hw->pics[hw->refs[1]].buffer_info & PICINFO_ERROR)) + return 1; + return 0; +} + +static int vmpeg2_get_ps_info(struct vdec_mpeg12_hw_s *hw, int width, int height, struct aml_vdec_ps_infos *ps) +{ + ps->visible_width = width; + ps->visible_height = height; + ps->coded_width = ALIGN(width, 64); + ps->coded_height = ALIGN(height, 32); + ps->dpb_size = hw->buf_num; + + return 0; +} + +static int v4l_res_change(struct vdec_mpeg12_hw_s *hw, int width, int height) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int ret = 0; + + if (ctx->param_sets_from_ucode && + hw->res_ch_flag == 0) { + struct aml_vdec_ps_infos ps; + + if ((hw->frame_width != 0 && + hw->frame_height != 0) && + (hw->frame_width != width || + hw->frame_height != height)) { + debug_print(DECODE_ID(hw), 0, + "v4l_res_change Pic Width/Height Change (%d,%d)=>(%d,%d)\n", + hw->frame_width, hw->frame_height, + width, + height); + vmpeg2_get_ps_info(hw, width, height, &ps); + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + hw->v4l_params_parsed = false; + hw->res_ch_flag = 1; + ctx->v4l_resolution_change = 1; + hw->eos = 1; + flush_output(hw); + if (hw->is_used_v4l) + notify_v4l_eos(hw_to_vdec(hw)); + + ret = 1; + } + } + + return ret; +} + + +static irqreturn_t vmpeg12_isr_thread_fn(struct vdec_s *vdec, int irq) +{ + u32 reg, index, info, seqinfo, offset, pts, frame_size=0, tmp; + u64 pts_us64 = 0; + struct pic_info_t *new_pic, *disp_pic; + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)(vdec->private); + + if (READ_VREG(AV_SCRATCH_M) != 0 && + (debug_enable & PRINT_FLAG_UCODE_DETAIL)) { + + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "dbg %x: %x, level %x, wp %x, rp %x, cnt %x\n", + READ_VREG(AV_SCRATCH_M), READ_VREG(AV_SCRATCH_N), + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VIFF_BIT_CNT)); + WRITE_VREG(AV_SCRATCH_M, 0); + return IRQ_HANDLED; + } + + reg = READ_VREG(AV_SCRATCH_G); + if (reg == 1) { + if (hw->kpi_first_i_comming == 0) { + hw->kpi_first_i_comming = 1; + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "[vdec_kpi][%s] First I frame coming.\n", + __func__); + } + if (hw->is_used_v4l) { + int frame_width = READ_VREG(MREG_PIC_WIDTH); + int frame_height = READ_VREG(MREG_PIC_HEIGHT); + if (!v4l_res_change(hw, frame_width, frame_height)) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + + vmpeg2_get_ps_info(hw, frame_width, frame_height, &ps); + hw->v4l_params_parsed = true; + vdec_v4l_set_ps_infos(ctx, &ps); + userdata_pushed_drop(hw); + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } else { + WRITE_VREG(AV_SCRATCH_G, 0); + } + } else { + userdata_pushed_drop(hw); + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } + } else + WRITE_VREG(AV_SCRATCH_G, 0); + return IRQ_HANDLED; + } + + reg = READ_VREG(AV_SCRATCH_J); + if (reg & (1<<16)) { + vdec_schedule_work(&hw->userdata_push_work); + return IRQ_HANDLED; + } + + reg = READ_VREG(MREG_BUFFEROUT); + if (reg == MPEG12_DATA_REQUEST) { + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s: data request, bcnt=%x\n", + __func__, READ_VREG(VIFF_BIT_CNT)); + if (vdec_frame_based(vdec)) { + reset_process_time(hw); + hw->dec_result = DEC_RESULT_GET_DATA; + vdec_schedule_work(&hw->work); + } + } else if (reg == MPEG12_DATA_EMPTY) { + /*timeout when decoding next frame*/ + debug_print(DECODE_ID(hw), PRINT_FLAG_VLD_DETAIL, + "%s: Insufficient data, lvl=%x ctrl=%x bcnt=%x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_CONTROL), + READ_VREG(VIFF_BIT_CNT)); + + if (vdec_frame_based(vdec)) { + userdata_pushed_drop(hw); + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + } else { + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + userdata_pushed_drop(hw); + reset_process_time(hw); + } + return IRQ_HANDLED; + } else { /* MPEG12_PIC_DONE, MPEG12_SEQ_END */ + reset_process_time(hw); + + info = READ_VREG(MREG_PIC_INFO); + offset = READ_VREG(MREG_FRAME_OFFSET); + index = spec_to_index(hw, READ_VREG(REC_CANVAS_ADDR)); + seqinfo = READ_VREG(MREG_SEQ_INFO); + + if (((seqinfo >> 8) & 0xff) && + ((seqinfo >> 12 & 0x7) != hw->profile_idc || + (seqinfo >> 8 & 0xf) != hw->level_idc)) { + hw->profile_idc = seqinfo >> 12 & 0x7; + hw->level_idc = seqinfo >> 8 & 0xf; + vdec_set_profile_level(vdec, hw->profile_idc, hw->level_idc); + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "profile_idc: %d level_idc: %d\n", + hw->profile_idc, hw->level_idc); + } + + if ((info & PICINFO_PROG) == 0 && + (info & FRAME_PICTURE_MASK) != FRAME_PICTURE) { + hw->first_i_frame_ready = 1; /* for field struct case*/ + } + if (index >= hw->buf_num) { + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "mmpeg12: invalid buf index: %d\n", index); + hw->dec_result = DEC_RESULT_ERROR; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + hw->dec_num++; + hw->dec_result = DEC_RESULT_DONE; + new_pic = &hw->pics[index]; + if (vdec->mvfrm) { + new_pic->frame_size = vdec->mvfrm->frame_size; + new_pic->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; + } + tmp = READ_VREG(MREG_PIC_WIDTH); + if ((tmp > 1920) || (tmp == 0)) { + new_pic->width = 1920; + hw->frame_width = 1920; + } else { + new_pic->width = tmp; + hw->frame_width = tmp; + } + + tmp = READ_VREG(MREG_PIC_HEIGHT); + if ((tmp > 1088) || (tmp == 0)) { + new_pic->height = 1088; + hw->frame_height = 1088; + } else { + new_pic->height = tmp; + hw->frame_height = tmp; + } + + new_pic->buffer_info = info; + new_pic->offset = offset; + new_pic->index = index; + if (((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) || + ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_P)) { + if (hw->chunk) { + new_pic->pts_valid = hw->chunk->pts_valid; + new_pic->pts = hw->chunk->pts; + new_pic->pts64 = hw->chunk->pts64; + new_pic->timestamp = hw->chunk->timestamp; + if (hw->last_chunk_pts == hw->chunk->pts) { + new_pic->pts_valid = 0; + debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "pts invalid\n"); + } + } else { + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + if (pts_lookup_offset_us64(PTS_TYPE_VIDEO, offset, + &pts, &frame_size, 0, &pts_us64) == 0) { + new_pic->pts_valid = true; + new_pic->pts = pts; + new_pic->pts64 = pts_us64; + } else + new_pic->pts_valid = false; + } + } + } else { + if (hw->chunk) { + hw->last_chunk_pts = hw->chunk->pts; + new_pic->timestamp = hw->chunk->timestamp; + } + new_pic->pts_valid = false; + } + + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "mmpeg12: new_pic=%d, ind=%d, info=%x, seq=%x, offset=%d\n", + hw->dec_num, index, info, seqinfo, offset); + + hw->frame_prog = info & PICINFO_PROG; + if ((seqinfo & SEQINFO_EXT_AVAILABLE) && + ((seqinfo & SEQINFO_PROG) == 0)) + hw->frame_prog = 0; + force_interlace_check(hw); + + if (is_ref_error(hw)) { + if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_B) + new_pic->buffer_info |= PICINFO_ERROR; + } + + if (((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) || + ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_P)) { + index = update_reference(hw, index); + } else { + /* drop b frame before reference pic ready */ + if (hw->refs[0] == -1) + index = hw->buf_num; + } + vmpeg12_save_hw_context(hw, reg); + + if (index >= hw->buf_num) { + if (hw->dec_num != 2) { + debug_print(DECODE_ID(hw), 0, + "mmpeg12: drop pic num %d, type %c, index %d, offset %x\n", + hw->dec_num, GET_SLICE_TYPE(info), index, offset); + hw->dec_result = DEC_RESULT_ERROR; + } + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + + disp_pic = &hw->pics[index]; + info = hw->pics[index].buffer_info; + if (disp_pic->pts_valid && hw->lastpts64 == disp_pic->pts64) + disp_pic->pts_valid = false; + if (disp_pic->pts_valid) + hw->lastpts64 = disp_pic->pts64; + + if (input_frame_based(hw_to_vdec(hw))) + frame_size = new_pic->frame_size; + + fill_frame_info(hw, info, frame_size, new_pic->pts); + + if ((hw->first_i_frame_ready == 0) && + ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) && + ((info & PICINFO_ERROR) == 0)) { + hw->first_i_frame_ready = 1; + } + + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "mmpeg12: disp_pic=%d(%c), ind=%d, offst=%x, pts=(%d,%lld,%llx)(%d)\n", + hw->disp_num, GET_SLICE_TYPE(info), index, disp_pic->offset, + disp_pic->pts, disp_pic->pts64, + disp_pic->timestamp, disp_pic->pts_valid); + + prepare_display_buf(hw, disp_pic); + vdec_schedule_work(&hw->work); + } + + return IRQ_HANDLED; +} +static irqreturn_t vmpeg12_isr(struct vdec_s *vdec, int irq) +{ + u32 info, offset; + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)(vdec->private); + if (hw->eos) + return IRQ_HANDLED; + info = READ_VREG(MREG_PIC_INFO); + offset = READ_VREG(MREG_FRAME_OFFSET); + + vdec_count_info(&hw->gvs, info & PICINFO_ERROR, offset); + if (info &PICINFO_ERROR) { + if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) { + hw->gvs.i_concealed_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_P) { + hw->gvs.p_concealed_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_B) { + hw->gvs.b_concealed_frames++; + } + } + if (offset) { + if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) { + hw->gvs.i_decoded_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_P) { + hw->gvs.p_decoded_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_B) { + hw->gvs.b_decoded_frames++; + } + } + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + return IRQ_WAKE_THREAD; +} + +static void vmpeg12_notify_work(struct work_struct *work) +{ + struct vdec_mpeg12_hw_s *hw = container_of(work, + struct vdec_mpeg12_hw_s, notify_work); + struct vdec_s *vdec = hw_to_vdec(hw); + + if (!hw->is_used_v4l && vdec->fr_hint_state == VDEC_NEED_HINT) { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long)hw->frame_dur)); + vdec->fr_hint_state = VDEC_HINTED; + } +} + +static void wait_vmmpeg12_search_done(struct vdec_mpeg12_hw_s *hw) +{ + u32 vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + int count = 0; + + do { + usleep_range(100, 500); + if (vld_rp == READ_VREG(VLD_MEM_VIFIFO_RP)) + break; + if (count > 1000) { + debug_print(DECODE_ID(hw), 0, + "%s, count %d vld_rp 0x%x VLD_MEM_VIFIFO_RP 0x%x\n", + __func__, count, vld_rp, READ_VREG(VLD_MEM_VIFIFO_RP)); + break; + } else + vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + count++; + } while (1); +} + +static void flush_output(struct vdec_mpeg12_hw_s *hw) +{ + int index = hw->refs[1]; + + /* video only one frame need not flush. */ + if (hw->dec_num < 2) + return; + + if ((hw->refs[0] >= 0) && + (hw->refs[0] < hw->buf_num)) + hw->ref_use[hw->refs[0]] = 0; + + if (index >= 0 && index < hw->buf_num) { + hw->ref_use[index] = 0; + prepare_display_buf(hw, &hw->pics[index]); + } +} + +static int notify_v4l_eos(struct vdec_s *vdec) +{ + struct vdec_mpeg12_hw_s *hw = (struct vdec_mpeg12_hw_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = NULL; + struct vdec_v4l2_buffer *fb = NULL; + int index = INVALID_IDX; + + if (hw->eos) { + if (kfifo_get(&hw->newframe_q, &vf) == 0 || vf == NULL) { + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s fatal error, no available buffer slot.\n", + __func__); + return -1; + } + if (hw->is_used_v4l) { + index = find_free_buffer(hw); + if (index == INVALID_IDX) { + ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token); + if (ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC) < 0) { + pr_err("[%d] get fb fail.\n", ctx->id); + return -1; + } + } + } + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->v4l_mem_handle = (index == INVALID_IDX) ? (ulong)fb : + hw->pics[index].v4l_ref_buf_addr; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->pts); + + if (hw->is_used_v4l) + fb->task->submit(fb->task, TASK_TYPE_DEC); + else + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + + pr_info("[%d] mpeg12 EOS notify.\n", (hw->is_used_v4l)?ctx->id:vdec->id); + } + + return 0; +} + +static void vmpeg12_work_implement(struct vdec_mpeg12_hw_s *hw, + struct vdec_s *vdec, int from) +{ + int r; + + if (hw->dec_result != DEC_RESULT_DONE) + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s, result=%d, status=%d\n", __func__, + hw->dec_result, vdec->next_status); + if (hw->dec_result == DEC_RESULT_DONE) { + if (vdec->input.swap_valid) + hw->dec_again_cnt = 0; + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk = NULL; + } else if (hw->dec_result == DEC_RESULT_AGAIN && + (vdec->next_status != VDEC_STATUS_DISCONNECTED)) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + if ((vdec_stream_based(vdec)) && + (error_proc_policy & 0x1) && + check_dirty_data(vdec)) { + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return; + } +#ifdef AGAIN_HAS_THRESHOLD + hw->next_again_flag = 1; +#endif + //hw->dec_again_cnt++; + } else if (hw->dec_result == DEC_RESULT_GET_DATA && + vdec->next_status != VDEC_STATUS_DISCONNECTED) { + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + debug_print(DECODE_ID(hw), PRINT_FLAG_VLD_DETAIL, + "%s DEC_RESULT_GET_DATA %x %x %x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP)); + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk = NULL; + vdec_clean_input(vdec); + + r = vdec_prepare_input(vdec, &hw->chunk); + if (r < 0) { + hw->input_empty++; + reset_process_time(hw); + hw->dec_result = DEC_RESULT_GET_DATA; + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "%s: Insufficient data, get data retry\n", __func__); + vdec_schedule_work(&hw->work); + return; + } + hw->input_empty = 0; + if (vdec_frame_based(vdec) && (hw->chunk != NULL)) { + r = hw->chunk->size + + (hw->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + WRITE_VREG(VIFF_BIT_CNT, r * 8); + if (vdec->mvfrm) + vdec->mvfrm->frame_size += hw->chunk->size; + } + debug_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: %x %x %x size %d, bitcnt %d\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + r, READ_VREG(VIFF_BIT_CNT)); + vdec_enable_input(vdec); + hw->dec_result = DEC_RESULT_NONE; + hw->last_vld_level = 0; + start_process_time_set(hw); + hw->init_flag = 1; + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); + WRITE_VREG(MREG_BUFFEROUT, 0); + return; + } else if (hw->dec_result == DEC_RESULT_FORCE_EXIT) { + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s: force exit\n", __func__); + if (hw->stat & STAT_ISR_REG) { + amvdec_stop(); + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + } else if (hw->dec_result == DEC_RESULT_EOS) { + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + hw->eos = 1; + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk = NULL; + vdec_clean_input(vdec); + flush_output(hw); + notify_v4l_eos(vdec); + + debug_print(DECODE_ID(hw), 0, + "%s: end of stream, num %d(%d)\n", + __func__, hw->disp_num, hw->dec_num); + } + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + /*disable mbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 0); + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + wait_vmmpeg12_search_done(hw); + + if (from == 1) { + /*This is a timeout work*/ + if (work_pending(&hw->work)) { + pr_err("timeout work return befor finishing."); + /* + * The vmpeg12_work arrives at the last second, + * give it a chance to handle the scenario. + */ + return; + } + } + + if (vdec->parallel_dec == 1) + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1); + else + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + wake_up_interruptible(&hw->wait_q); + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !hw->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + if (hw->vdec_cb) + hw->vdec_cb(vdec, hw->vdec_cb_arg); +} + +static void vmpeg12_work(struct work_struct *work) +{ + struct vdec_mpeg12_hw_s *hw = + container_of(work, struct vdec_mpeg12_hw_s, work); + struct vdec_s *vdec = hw_to_vdec(hw); + + vmpeg12_work_implement(hw, vdec, 0); +} +static void vmpeg12_timeout_work(struct work_struct *work) +{ + struct vdec_mpeg12_hw_s *hw = + container_of(work, struct vdec_mpeg12_hw_s, timeout_work); + struct vdec_s *vdec = hw_to_vdec(hw); + + if (work_pending(&hw->work)) { + pr_err("timeout work return befor executing."); + return; + } + + hw->timeout_processing = 1; + vmpeg12_work_implement(hw, vdec, 1); +} + +static struct vframe_s *vmpeg_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + hw->peek_num++; + + if (kfifo_len(&hw->display_q) > VF_POOL_SIZE) { + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "kfifo len:%d invaild, peek error\n", + kfifo_len(&hw->display_q)); + return NULL; + } + + if (kfifo_peek(&hw->display_q, &vf)) + return vf; + + return NULL; +} + +static struct vframe_s *vmpeg_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + + hw->get_num++; + if (kfifo_get(&hw->display_q, &vf)) { + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + return vf; + } + return NULL; +} + +static int mpeg12_valid_vf_check(struct vframe_s *vf, struct vdec_mpeg12_hw_s *hw) +{ + int i; + + if (vf == NULL) + return 0; + + for (i = 0; i < VF_POOL_SIZE; i++) { + if (vf == &hw->vfpool[i]) + return 1; + } + return 0; +} + +static void vmpeg_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + unsigned long flags; + + if (!mpeg12_valid_vf_check(vf, hw)) { + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "invalid vf: %lx\n", (ulong)vf); + return ; + } + spin_lock_irqsave(&hw->lock, flags); + hw->vfbuf_use[vf->index]--; + if (hw->vfbuf_use[vf->index] < 0) { + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "warn: vf %lx, index %d putback repetitive, set use to 0\n", (ulong)vf, vf->index); + hw->vfbuf_use[vf->index] = 0; + } + hw->put_num++; + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s: vf: %lx, index: %d, use: %d\n", __func__, (ulong)vf, + vf->index, hw->vfbuf_use[vf->index]); + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); + spin_unlock_irqrestore(&hw->lock, flags); +} + + +static int vmpeg_event_cb(int type, void *data, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +static int vmpeg_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + struct vdec_s *vdec = op_arg; + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + + spin_lock_irqsave(&hw->lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hw->newframe_q); + states->buf_avail_num = kfifo_len(&hw->display_q); + states->buf_recycle_num = 0; + + spin_unlock_irqrestore(&hw->lock, flags); + return 0; +} + +static u32 get_ratio_control(struct vdec_mpeg12_hw_s *hw) +{ + u32 ar_bits; + + u32 ratio_control; + + ar_bits = hw->pixel_ratio; + + if (ar_bits == 0x2) + ratio_control = 0xc0 << DISP_RATIO_ASPECT_RATIO_BIT; + + else if (ar_bits == 0x3) + ratio_control = 0x90 << DISP_RATIO_ASPECT_RATIO_BIT; + + else if (ar_bits == 0x4) + ratio_control = 0x74 << DISP_RATIO_ASPECT_RATIO_BIT; + else + ratio_control = 0; + + return ratio_control; +} + +static int vmmpeg12_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + + if (!hw) + return -1; + + vstatus->frame_width = hw->frame_width; + vstatus->frame_height = hw->frame_height; + if (hw->frame_dur != 0) + vstatus->frame_rate = ((96000 * 10 / hw->frame_dur) % 10) < 5 ? + 96000 / hw->frame_dur : (96000 / hw->frame_dur +1); + else + vstatus->frame_rate = -1; + vstatus->error_count = READ_VREG(AV_SCRATCH_C); + vstatus->status = hw->stat; + vstatus->bit_rate = hw->gvs.bit_rate; + vstatus->frame_dur = hw->frame_dur; + vstatus->frame_data = hw->gvs.frame_data; + vstatus->total_data = hw->gvs.total_data; + vstatus->frame_count = hw->gvs.frame_count; + vstatus->error_frame_count = hw->gvs.error_frame_count; + vstatus->drop_frame_count = hw->drop_frame_count; + vstatus->i_decoded_frames = hw->gvs.i_decoded_frames; + vstatus->i_lost_frames = hw->gvs.i_lost_frames; + vstatus->i_concealed_frames = hw->gvs.i_concealed_frames; + vstatus->p_decoded_frames = hw->gvs.p_decoded_frames; + vstatus->p_lost_frames = hw->gvs.p_lost_frames; + vstatus->p_concealed_frames = hw->gvs.p_concealed_frames; + vstatus->b_decoded_frames = hw->gvs.b_decoded_frames; + vstatus->b_lost_frames = hw->gvs.b_lost_frames; + vstatus->b_concealed_frames = hw->gvs.b_concealed_frames; + vstatus->total_data = hw->gvs.total_data; + vstatus->samp_cnt = hw->gvs.samp_cnt; + vstatus->offset = hw->gvs.offset; + vstatus->ratio_control = get_ratio_control(hw); + + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + + + +/****************************************/ +static int vmpeg12_canvas_init(struct vdec_mpeg12_hw_s *hw) +{ + int i, ret; + u32 canvas_width, canvas_height; + u32 decbuf_size, decbuf_y_size, decbuf_uv_size; + unsigned long decbuf_start; + /*u32 disp_addr = 0xffffffff;*/ + struct vdec_s *vdec = hw_to_vdec(hw); + + if (buf_size <= 0x00400000) { + /* SD only */ + canvas_width = 768; + canvas_height = 576; + decbuf_y_size = 0x80000; + decbuf_uv_size = 0x20000; + decbuf_size = 0x100000; + } else { + /* HD & SD */ + canvas_width = 1920; + canvas_height = 1088; + decbuf_y_size = 0x200000; + decbuf_uv_size = 0x80000; + decbuf_size = 0x300000; + } + + for (i = 0; i < hw->buf_num + 1; i++) { + unsigned canvas; + + if (i == hw->buf_num) /* SWAP&CCBUF&MATIRX&MV */ + decbuf_size = WORKSPACE_SIZE; + + if (hw->is_used_v4l && !(i == hw->buf_num)) { + continue; + } else { + ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, i, + decbuf_size, DRIVER_NAME, &decbuf_start); + if (ret < 0) { + pr_err("bmmu alloc failed! size 0x%d idx %d\n", + decbuf_size, i); + return ret; + } + } + + if (i == hw->buf_num) { + hw->cc_buf_size = AUX_BUF_ALIGN(CCBUF_SIZE); + hw->ccbuf_phyAddress_virt = dma_alloc_coherent(amports_get_dma_device(), + hw->cc_buf_size, &hw->ccbuf_phyAddress, + GFP_KERNEL); + if (hw->ccbuf_phyAddress_virt == NULL) { + pr_err("%s: failed to alloc cc buffer\n", __func__); + return -ENOMEM; + } + hw->buf_start = decbuf_start; + WRITE_VREG(MREG_CO_MV_START, hw->buf_start); + WRITE_VREG(MREG_CC_ADDR, hw->ccbuf_phyAddress); + } else { + if (vdec->parallel_dec == 1) { + unsigned tmp; + if (canvas_u(hw->canvas_spec[i]) == 0xff) { + tmp = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~(0xffff << 8); + hw->canvas_spec[i] |= tmp << 8; + hw->canvas_spec[i] |= tmp << 16; + } + if (canvas_y(hw->canvas_spec[i]) == 0xff) { + tmp = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~0xff; + hw->canvas_spec[i] |= tmp; + } + canvas = hw->canvas_spec[i]; + } else { + canvas = vdec->get_canvas(i, 2); + hw->canvas_spec[i] = canvas; + } + + hw->canvas_config[i][0].phy_addr = + decbuf_start; + hw->canvas_config[i][0].width = + canvas_width; + hw->canvas_config[i][0].height = + canvas_height; + hw->canvas_config[i][0].block_mode = + hw->canvas_mode; + hw->canvas_config[i][0].endian = + (hw->canvas_mode == CANVAS_BLKMODE_LINEAR)?7:0; + + config_cav_lut(canvas_y(canvas), + &hw->canvas_config[i][0], VDEC_1); + + hw->canvas_config[i][1].phy_addr = + decbuf_start + decbuf_y_size; + hw->canvas_config[i][1].width = canvas_width; + hw->canvas_config[i][1].height = canvas_height / 2; + hw->canvas_config[i][1].block_mode = hw->canvas_mode; + hw->canvas_config[i][1].endian = + (hw->canvas_mode == CANVAS_BLKMODE_LINEAR)?7:0; + + config_cav_lut(canvas_u(canvas), + &hw->canvas_config[i][1], VDEC_1); + } + } + return 0; +} + +static void vmpeg2_dump_state(struct vdec_s *vdec) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)(vdec->private); + u32 i; + debug_print(DECODE_ID(hw), 0, + "====== %s\n", __func__); + debug_print(DECODE_ID(hw), 0, + "width/height (%d/%d),i_first %d, buf_num %d, run_flag %d\n", + hw->frame_width, + hw->frame_height, + hw->first_i_frame_ready, + hw->buf_num, + hw->run_flag + ); + debug_print(DECODE_ID(hw), 0, + "is_framebase(%d), eos %d, state 0x%x, dec_result 0x%x dec_frm %d put_frm %d run %d not_run_ready %d,input_empty %d\n", + vdec_frame_based(vdec), + hw->eos, + hw->stat, + hw->dec_result, + hw->dec_num, + hw->put_num, + hw->run_count, + hw->not_run_ready, + hw->input_empty + ); + + for (i = 0; i < hw->buf_num; i++) { + debug_print(DECODE_ID(hw), 0, + "index %d, used %d, ref %d\n", i, + hw->vfbuf_use[i], hw->ref_use[i]); + } + + if (!hw->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + debug_print(DECODE_ID(hw), 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + debug_print(DECODE_ID(hw), 0, + "%s, newq(%d/%d), dispq(%d/%d) vf pre/get/put (%d/%d/%d),drop=%d, buffer_not_ready %d\n", + __func__, + kfifo_len(&hw->newframe_q), + VF_POOL_SIZE, + kfifo_len(&hw->display_q), + VF_POOL_SIZE, + hw->disp_num, + hw->get_num, + hw->put_num, + hw->drop_frame_count, + hw->buffer_not_ready + ); + debug_print(DECODE_ID(hw), 0, + "VIFF_BIT_CNT=0x%x\n", + READ_VREG(VIFF_BIT_CNT)); + debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_LEVEL=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_WP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP)); + debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_RP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_RP)); + debug_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + debug_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (vdec_frame_based(vdec) && + debug_enable & PRINT_FRAMEBASE_DATA + ) { + int jj; + if (hw->chunk && hw->chunk->block && + hw->chunk->size > 0) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, hw->chunk->size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + debug_print(DECODE_ID(hw), 0, + "frame data size 0x%x\n", + hw->chunk->size); + for (jj = 0; jj < hw->chunk->size; jj++) { + if ((jj & 0xf) == 0) + debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } +} + +static void reset_process_time(struct vdec_mpeg12_hw_s *hw) +{ + if (hw->start_process_time) { + unsigned process_time = + 1000 * (jiffies - hw->start_process_time) / HZ; + hw->start_process_time = 0; + if (process_time > max_process_time[DECODE_ID(hw)]) + max_process_time[DECODE_ID(hw)] = process_time; + } +} + +static void start_process_time_set(struct vdec_mpeg12_hw_s *hw) +{ + if ((hw->refs[1] != -1) && (hw->refs[0] == -1)) + hw->decode_timeout_count = 1; + else + hw->decode_timeout_count = 10; + hw->start_process_time = jiffies; +} +static void timeout_process(struct vdec_mpeg12_hw_s *hw) +{ + struct vdec_s *vdec = hw_to_vdec(hw); + + if (work_pending(&hw->work) || + work_busy(&hw->work) || + work_busy(&hw->timeout_work) || + work_pending(&hw->timeout_work)) { + pr_err("%s mpeg12[%d] timeout_process return befor do anything.\n",__func__, vdec->id); + return; + } + reset_process_time(hw); + amvdec_stop(); + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s decoder timeout, status=%d, level=%d\n", + __func__, vdec->status, READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + hw->dec_result = DEC_RESULT_DONE; + if ((hw->refs[1] != -1) && (hw->refs[0] != -1)) + hw->first_i_frame_ready = 0; + + /* + * In this very timeout point,the vmpeg12_work arrives, + * let it to handle the scenario. + */ + if (work_pending(&hw->work)) { + pr_err("%s mpeg12[%d] return befor schedule.", __func__, vdec->id); + return; + } + vdec_schedule_work(&hw->timeout_work); +} + +static void check_timer_func(struct timer_list *timer) +{ + struct vdec_mpeg12_hw_s *hw = container_of(timer, + struct vdec_mpeg12_hw_s, check_timer); + struct vdec_s *vdec = hw_to_vdec(hw); + unsigned int timeout_val = decode_timeout_val; + + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + + if (((debug_enable & PRINT_FLAG_TIMEOUT_STATUS) == 0) && + (timeout_val > 0) && + (hw->start_process_time > 0) && + ((1000 * (jiffies - hw->start_process_time) / HZ) + > timeout_val)) { + if (hw->last_vld_level == READ_VREG(VLD_MEM_VIFIFO_LEVEL)) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + timeout_process(hw); + } + hw->last_vld_level = READ_VREG(VLD_MEM_VIFIFO_LEVEL); + } + + if (vdec->next_status == VDEC_STATUS_DISCONNECTED) { + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + pr_info("vdec requested to be disconnected\n"); + return; + } + + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static int vmpeg12_hw_ctx_restore(struct vdec_mpeg12_hw_s *hw) +{ + u32 index = -1, i; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + + index = find_free_buffer(hw); + if (index < 0 || index >= hw->buf_num) + return -1; + if (!hw->init_flag) { + if (vmpeg12_canvas_init(hw) < 0) { + debug_print(DECODE_ID(hw), 0, "vmpeg12_canvas_init failed\n"); + return -1; + } + } else { + WRITE_VREG(MREG_CO_MV_START, hw->buf_start); + WRITE_VREG(MREG_CC_ADDR, hw->ccbuf_phyAddress); + if (!hw->is_used_v4l) { + for (i = 0; i < hw->buf_num; i++) { + config_cav_lut(canvas_y(hw->canvas_spec[i]), + &hw->canvas_config[i][0], VDEC_1); + config_cav_lut(canvas_u(hw->canvas_spec[i]), + &hw->canvas_config[i][1], VDEC_1); + } + } + } + + /* prepare REF0 & REF1 + points to the past two IP buffers + prepare REC_CANVAS_ADDR and ANC2_CANVAS_ADDR + points to the output buffer*/ + WRITE_VREG(MREG_REF0, + (hw->refs[0] == -1) ? 0xffffffff : + hw->canvas_spec[hw->refs[0]]); + WRITE_VREG(MREG_REF1, + (hw->refs[1] == -1) ? 0xffffffff : + hw->canvas_spec[hw->refs[1]]); + WRITE_VREG(REC_CANVAS_ADDR, hw->canvas_spec[index]); + WRITE_VREG(ANC2_CANVAS_ADDR, hw->canvas_spec[index]); + + debug_print(DECODE_ID(hw), PRINT_FLAG_RESTORE, + "%s,ref0=0x%x, ref1=0x%x,rec=0x%x, ctx_valid=%d,index=%d\n", + __func__, + READ_VREG(MREG_REF0), + READ_VREG(MREG_REF1), + READ_VREG(REC_CANVAS_ADDR), + hw->ctx_valid, index); + + /* set to mpeg1 default */ + WRITE_VREG(MPEG1_2_REG, + (hw->ctx_valid) ? hw->reg_mpeg1_2_reg : 0); + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + /* for Mpeg1 default value */ + WRITE_VREG(PIC_HEAD_INFO, + (hw->ctx_valid) ? hw->reg_pic_head_info : 0x380); + /* disable mpeg4 */ + WRITE_VREG(M4_CONTROL_REG, 0); + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + /* clear buffer IN/OUT registers */ + WRITE_VREG(MREG_BUFFEROUT, 0); + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + /* set reference width and height */ + if ((hw->frame_width != 0) && (hw->frame_height != 0)) + WRITE_VREG(MREG_CMD, + (hw->frame_width << 16) | hw->frame_height); + else + WRITE_VREG(MREG_CMD, 0); + + debug_print(DECODE_ID(hw), PRINT_FLAG_RESTORE, + "0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", + hw->frame_width, hw->frame_height, hw->seqinfo, + hw->reg_f_code_reg, hw->reg_slice_ver_pos_pic_type, + hw->reg_mb_info); + + WRITE_VREG(MREG_PIC_WIDTH, hw->reg_pic_width); + WRITE_VREG(MREG_PIC_HEIGHT, hw->reg_pic_height); + WRITE_VREG(MREG_SEQ_INFO, hw->seqinfo); + WRITE_VREG(F_CODE_REG, hw->reg_f_code_reg); + WRITE_VREG(SLICE_VER_POS_PIC_TYPE, + hw->reg_slice_ver_pos_pic_type); + WRITE_VREG(MB_INFO, hw->reg_mb_info); + WRITE_VREG(VCOP_CTRL_REG, hw->reg_vcop_ctrl_reg); + WRITE_VREG(AV_SCRATCH_H, hw->reg_signal_type); + + if (READ_VREG(MREG_ERROR_COUNT) != 0 || + READ_VREG(MREG_FATAL_ERROR) == 1) + debug_print(DECODE_ID(hw), PRINT_FLAG_RESTORE, + "err_cnt:%d fa_err:%d\n", + READ_VREG(MREG_ERROR_COUNT), + READ_VREG(MREG_FATAL_ERROR)); + + /* clear error count */ + WRITE_VREG(MREG_ERROR_COUNT, 0); + /*Use MREG_FATAL_ERROR bit1, the ucode determine + whether to report the interruption of width and + height information,in order to be compatible + with the old version of ucode. + 1: Report the width and height information + 0: No Report + bit0: + 1: Use cma cc buffer for new driver + 0: use codec mm cc buffer for old driver + */ + WRITE_VREG(MREG_FATAL_ERROR, 3); + /* clear wait buffer status */ + WRITE_VREG(MREG_WAIT_BUFFER, 0); +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1<<17); +#endif + + /* cbcr_merge_swap_en */ + if (hw->is_used_v4l + && (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21 + || v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + else + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + + if (!hw->ctx_valid) + WRITE_VREG(AV_SCRATCH_J, hw->userdata_wp_ctx); + + if (hw->chunk) { + /*frame based input*/ + WRITE_VREG(MREG_INPUT, + (hw->chunk->offset & 7) | (1<<7) | (hw->ctx_valid<<6)); + } else { + /*stream based input*/ + WRITE_VREG(MREG_INPUT, (hw->ctx_valid<<6)); + } + return 0; +} + +static void vmpeg12_local_init(struct vdec_mpeg12_hw_s *hw) +{ + int i; + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + hw->vmpeg12_ratio = hw->vmpeg12_amstream_dec_info.ratio; + + hw->vmpeg12_ratio64 = hw->vmpeg12_amstream_dec_info.ratio64; + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf; + vf = &hw->vfpool[i]; + hw->vfpool[i].index = DECODE_BUFFER_NUM_MAX; + kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf); + } + + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + hw->vfbuf_use[i] = 0; + hw->ref_use[i] = 0; + } + + + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + + hw->mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + hw->tvp_flag); + hw->eos = 0; + hw->frame_width = hw->frame_height = 0; + hw->frame_dur = hw->frame_prog = 0; + hw->frame_force_skip_flag = 0; + hw->wait_buffer_counter = 0; + hw->first_i_frame_ready = 0; + hw->dec_control &= DEC_CONTROL_INTERNAL_MASK; + hw->refs[0] = -1; + hw->refs[1] = -1; + hw->disp_num = 0; + hw->dec_num = 0; + hw->put_num = 0; + hw->run_count = 0; + hw->not_run_ready = 0; + hw->input_empty = 0; + hw->peek_num = 0; + hw->get_num = 0; + hw->drop_frame_count = 0; + hw->buffer_not_ready = 0; + hw->start_process_time = 0; + hw->init_flag = 0; + hw->dec_again_cnt = 0; + hw->error_frame_skip_level = error_frame_skip_level; + + init_waitqueue_head(&hw->wait_q); + if (dec_control) + hw->dec_control = dec_control; +} + +static s32 vmpeg12_init(struct vdec_mpeg12_hw_s *hw) +{ + int size; + u32 fw_size = 16*0x1000; + struct firmware_s *fw; + + vmpeg12_local_init(hw); + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + pr_debug("get firmware ...\n"); + size = get_firmware_data(VIDEO_DEC_MPEG12_MULTI, fw->data); + if (size < 0) { + pr_err("get firmware fail.\n"); + vfree(fw); + return -1; + } + + fw->len = size; + hw->fw = fw; + + INIT_WORK(&hw->userdata_push_work, userdata_push_do_work); + INIT_WORK(&hw->work, vmpeg12_work); + INIT_WORK(&hw->timeout_work, vmpeg12_timeout_work); + INIT_WORK(&hw->notify_work, vmpeg12_notify_work); + + if (NULL == hw->user_data_buffer) { + hw->user_data_buffer = kmalloc(USER_DATA_SIZE, + GFP_KERNEL); + if (!hw->user_data_buffer) { + pr_info("%s: Can not allocate user_data_buffer\n", + __func__); + return -1; + } + } + + vmmpeg2_crate_userdata_manager(hw, + hw->user_data_buffer, + USER_DATA_SIZE); + + //amvdec_enable(); + timer_setup(&hw->check_timer, check_timer_func, 0); + //init_timer(&hw->check_timer); + //hw->check_timer.data = (unsigned long)hw; + //hw->check_timer.function = check_timer_func; + hw->check_timer.expires = jiffies + CHECK_INTERVAL; + + hw->stat |= STAT_TIMER_ARM; + hw->stat |= STAT_ISR_REG; + + hw->buf_start = 0; + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + + return 0; +} + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + if (hw->eos) + return 0; + if (hw->timeout_processing && + (work_pending(&hw->work) || work_busy(&hw->work) || + work_pending(&hw->timeout_work) || work_busy(&hw->timeout_work))) { + debug_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "mpeg12 work pending,not ready for run.\n"); + return 0; + } + hw->timeout_processing = 0; + if (vdec_stream_based(vdec) && (hw->init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + + if (level < pre_decode_buf_level) { + hw->not_run_ready++; + return 0; + } + } + +#ifdef AGAIN_HAS_THRESHOLD + if (hw->next_again_flag&& + (!vdec_frame_based(vdec))) { + u32 parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + if (parser_wr_ptr >= hw->pre_parser_wr_ptr && + (parser_wr_ptr - hw->pre_parser_wr_ptr) < + again_threshold) { + int r = vdec_sync_input(vdec); + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s buf level%x\n", + __func__, r); + return 0; + } + } +#endif + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode) { + if (hw->v4l_params_parsed) { + if (!ctx->v4l_codec_dpb_ready && + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < + run_ready_min_buf_num) + if (!ctx->v4l_codec_dpb_ready) + return 0; + } else { + if (ctx->v4l_resolution_change) + return 0; + } + } else if (!ctx->v4l_codec_dpb_ready) { + if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < + run_ready_min_buf_num) + return 0; + } + } + + if (!is_enough_free_buffer(hw)) { + hw->not_run_ready++; + hw->buffer_not_ready++; + return 0; + } + hw->not_run_ready = 0; + hw->buffer_not_ready = 0; + if (vdec->parallel_dec == 1) + return (unsigned long)(CORE_MASK_VDEC_1); + else + return (unsigned long)(CORE_MASK_VDEC_1 | CORE_MASK_HEVC); +} + +static unsigned char get_data_check_sum + (struct vdec_mpeg12_hw_s *hw, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static int check_dirty_data(struct vdec_s *vdec) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)(vdec->private); + u32 wp, rp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + + if (wp > rp) + level = wp - rp; + else + level = wp + vdec->input.size - rp ; + + if (level > (vdec->input.size / 2)) + hw->dec_again_cnt++; + + if (hw->dec_again_cnt > dirty_again_threshold) { + debug_print(DECODE_ID(hw), 0, "mpeg12 data skipped %x\n", level); + hw->dec_again_cnt = 0; + return 1; + } + return 0; +} + + +static void run(struct vdec_s *vdec, unsigned long mask, +void (*callback)(struct vdec_s *, void *), + void *arg) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + int save_reg; + int size, ret; + + hw->run_flag = 1; + if (!hw->vdec_pg_enable_flag) { + hw->vdec_pg_enable_flag = 1; + amvdec_enable(); + } + save_reg = READ_VREG(POWER_CTL_VLD); + /* reset everything except DOS_TOP[1] and APB_CBUS[0]*/ + WRITE_VREG(DOS_SW_RESET0, 0xfffffff0); + WRITE_VREG(DOS_SW_RESET0, 0); + WRITE_VREG(POWER_CTL_VLD, save_reg); + hw->run_count++; + vdec_reset_core(vdec); + hw->vdec_cb_arg = arg; + hw->vdec_cb = callback; + +#ifdef AGAIN_HAS_THRESHOLD + if (vdec_stream_based(vdec)) { + hw->pre_parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + hw->next_again_flag = 0; + } +#endif + + size = vdec_prepare_input(vdec, &hw->chunk); + if (size < 0) { + hw->input_empty++; + hw->dec_result = DEC_RESULT_AGAIN; + + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "vdec_prepare_input: Insufficient data\n"); + vdec_schedule_work(&hw->work); + hw->run_flag = 0; + return; + } + + hw->input_empty = 0; + if ((vdec_frame_based(vdec)) && + (hw->chunk != NULL)) { + size = hw->chunk->size + + (hw->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + WRITE_VREG(VIFF_BIT_CNT, size * 8); + if (vdec->mvfrm) + vdec->mvfrm->frame_size = hw->chunk->size; + } + if (vdec_frame_based(vdec) && !vdec_secure(vdec)) { + /* HW needs padding (NAL start) for frame ending */ + char* tail = (char *)hw->chunk->block->start_virt; + + tail += hw->chunk->offset + hw->chunk->size; + tail[0] = 0; + tail[1] = 0; + tail[2] = 1; + tail[3] = 0; + codec_mm_dma_flush(tail, 4, DMA_TO_DEVICE); + } + + if (vdec_frame_based(vdec) && debug_enable && !vdec_secure(vdec)) { + u8 *data = NULL; + if (hw->chunk) + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "run: chunk offset 0x%x, size %d\n", + hw->chunk->offset, hw->chunk->size); + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + if (debug_enable & PRINT_FLAG_VDEC_STATUS + ) { + debug_print(DECODE_ID(hw), 0, + "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, size, get_data_check_sum(hw, size), + data[0], data[1], data[2], data[3], + data[4], data[5], data[size - 4], + data[size - 3], data[size - 2], + data[size - 1]); + } + if (debug_enable & PRINT_FRAMEBASE_DATA + ) { + int jj; + debug_print(DECODE_ID(hw), PRINT_FRAMEBASE_DATA, + "frame data:\n"); + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + pr_info("%06x:", jj); + pr_info("%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + pr_info("\n"); + } + pr_info("\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } else { + debug_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: %x %x %x %x %x size 0x%x, bitcnt %d\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + STBUF_READ(&vdec->vbuf, get_rp), + STBUF_READ(&vdec->vbuf, get_wp), + size, READ_VREG(VIFF_BIT_CNT)); + } + + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else { + ret = amvdec_vdec_loadmc_buf_ex(VFORMAT_MPEG12, "mmpeg12", vdec, + hw->fw->data, hw->fw->len); + if (ret < 0) { + pr_err("[%d] %s: the %s fw loading failed, err: %x\n", vdec->id, + hw->fw->name, tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + hw->run_flag = 0; + return; + } + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_MPEG12; + } + + if (vmpeg12_hw_ctx_restore(hw) < 0) { + hw->dec_result = DEC_RESULT_ERROR; + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "ammvdec_mpeg12: error HW context restore\n"); + hw->run_flag = 0; + return; + } + /*wmb();*/ + hw->dec_result = DEC_RESULT_NONE; + hw->stat |= STAT_MC_LOAD; + vdec_enable_input(vdec); + hw->last_vld_level = 0; + start_process_time_set(hw); + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + amvdec_start(); + hw->stat |= STAT_VDEC_RUN; + hw->stat |= STAT_TIMER_ARM; + hw->init_flag = 1; + if (hw->mm_blk_handle) + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); + hw->run_flag = 0; +} + +static void reset(struct vdec_s *vdec) +{ + struct vdec_mpeg12_hw_s *hw = (struct vdec_mpeg12_hw_s *)vdec->private; + + pr_info("ammvdec_mpeg12: reset.\n"); + + vmpeg12_local_init(hw); + + if (hw->is_used_v4l) { + u32 i, buf_num = vmpeg12_get_buf_num(hw); + for (i = 0; i < buf_num; i++) { + hw->pics[i].v4l_ref_buf_addr = 0; + } + } + + hw->ctx_valid = 0; +} + +static int vmpeg12_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + if (!hw) + return 0; + + if (trickmode == TRICKMODE_I) { + hw->i_only = 0x3; + //trickmode_i = 1; + } else if (trickmode == TRICKMODE_NONE) { + hw->i_only = 0x0; + //trickmode_i = 0; + } + return 0; +} + +static int ammvdec_mpeg12_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_mpeg12_hw_s *hw = NULL; + int config_val = 0; + + pr_info("ammvdec_mpeg12 probe start.\n"); + + if (pdata == NULL) { + pr_info("ammvdec_mpeg12 platform data undefined.\n"); + return -EFAULT; + } + + hw = vzalloc(sizeof(struct vdec_mpeg12_hw_s)); + if (hw == NULL) { + pr_info("\nammvdec_mpeg12 decoder driver alloc failed\n"); + return -ENOMEM; + } + + /* the ctx from v4l2 driver. */ + hw->v4l2_ctx = pdata->private; + + pdata->private = hw; + pdata->dec_status = vmmpeg12_dec_status; + pdata->set_trickmode = vmpeg12_set_trickmode; + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = vmpeg12_isr; + pdata->threaded_irq_handler = vmpeg12_isr_thread_fn; + pdata->dump_state = vmpeg2_dump_state; + + pdata->user_data_read = vmmpeg2_user_data_read; + pdata->reset_userdata_fifo = vmmpeg2_reset_userdata_fifo; + pdata->wakeup_userdata_poll = vmmpeg2_wakeup_userdata_poll; + + snprintf(hw->vdec_name, sizeof(hw->vdec_name), + "mpeg12-%d", pdev->id); + snprintf(hw->pts_name, sizeof(hw->pts_name), + "%s-pts", hw->vdec_name); + snprintf(hw->new_q_name, sizeof(hw->new_q_name), + "%s-newframe_q", hw->vdec_name); + snprintf(hw->disp_q_name, sizeof(hw->disp_q_name), + "%s-dispframe_q", hw->vdec_name); + + if (pdata->use_vfm_path) { + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + hw->frameinfo_enable = 1; + } + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + PROVIDER_NAME ".%02x", pdev->id & 0xff); + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->canvas_spec[i] = 0xffffff; + } + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vf_provider_ops, pdata); + + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->canvas_spec[i] = 0xffffff; + } + platform_set_drvdata(pdev, pdata); + + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + hw->canvas_mode = pdata->canvas_mode; + if (pdata->config_len) { + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + hw->is_used_v4l = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + hw->canvas_mode = config_val; + + if ((debug_enable & IGNORE_PARAM_FROM_CONFIG) == 0 && + get_config_int(pdata->config, + "parm_v4l_buffer_margin", + &config_val) == 0) + hw->dynamic_buf_num_margin= config_val; + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + hw->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + hw->sidebind_channel_id = config_val; + } + + hw->buf_num = vmpeg12_get_buf_num(hw); + hw->platform_dev = pdev; + + hw->tvp_flag = vdec_secure(pdata) ? CODEC_MM_FLAGS_TVP : 0; + if (pdata->sys_info) + hw->vmpeg12_amstream_dec_info = *pdata->sys_info; + + debug_print(DECODE_ID(hw), 0, + "%s, sysinfo: %dx%d, tvp_flag = 0x%x\n", + __func__, + hw->vmpeg12_amstream_dec_info.width, + hw->vmpeg12_amstream_dec_info.height, + hw->tvp_flag); + + if (vmpeg12_init(hw) < 0) { + pr_info("ammvdec_mpeg12 init failed.\n"); + if (hw) { + vfree(hw); + hw = NULL; + } + pdata->dec_status = NULL; + return -ENODEV; + } + vdec_set_prepare_level(pdata, start_decode_buf_level); + + vdec_set_vframe_comm(pdata, DRIVER_NAME); + + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_VDEC_1); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } +#ifdef DUMP_USER_DATA + amvdec_mmpeg12_init_userdata_dump(hw); + reset_user_data_buf(hw); +#endif + + /*INIT_WORK(&userdata_push_work, userdata_push_do_work);*/ + return 0; +} + +static int ammvdec_mpeg12_remove(struct platform_device *pdev) + +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec = hw_to_vdec(hw); + int i; + + if (vdec->next_status == VDEC_STATUS_DISCONNECTED + && (vdec->status == VDEC_STATUS_ACTIVE)) { + debug_print(DECODE_ID(hw), 0, + "%s, force exit %d\n", __func__, __LINE__); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + wait_event_interruptible_timeout(hw->wait_q, + (vdec->status == VDEC_STATUS_CONNECTED), + msecs_to_jiffies(1000)); /* wait for work done */ + } + + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + cancel_work_sync(&hw->userdata_push_work); + cancel_work_sync(&hw->notify_work); + cancel_work_sync(&hw->work); + cancel_work_sync(&hw->timeout_work); + + if (hw->mm_blk_handle) { + void *bmmu_box_tmp = hw->mm_blk_handle; + hw->mm_blk_handle = NULL; + while (hw->run_flag) + usleep_range(1000, 2000); + decoder_bmmu_box_free(bmmu_box_tmp); + bmmu_box_tmp = NULL; + } + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1); + else + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + vdec_set_status(hw_to_vdec(hw), VDEC_STATUS_DISCONNECTED); + + if (vdec->parallel_dec == 1) { + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + vdec->free_canvas_ex(canvas_y(hw->canvas_spec[i]), vdec->id); + vdec->free_canvas_ex(canvas_u(hw->canvas_spec[i]), vdec->id); + } + } + + if (hw->ccbuf_phyAddress_virt) { + dma_free_coherent(amports_get_dma_device(),hw->cc_buf_size, + hw->ccbuf_phyAddress_virt, hw->ccbuf_phyAddress); + hw->ccbuf_phyAddress_virt = NULL; + hw->ccbuf_phyAddress = 0; + } + + if (hw->user_data_buffer != NULL) { + kfree(hw->user_data_buffer); + hw->user_data_buffer = NULL; + } + vmmpeg2_destroy_userdata_manager(hw); + +#ifdef DUMP_USER_DATA + amvdec_mmpeg12_uninit_userdata_dump(hw); +#endif + + if (hw->fw) { + vfree(hw->fw); + hw->fw = NULL; + } + + vfree(hw); + + pr_info("ammvdec_mpeg12 removed.\n"); + + return 0; +} + +/****************************************/ + +static struct platform_driver ammvdec_mpeg12_driver = { + .probe = ammvdec_mpeg12_probe, + .remove = ammvdec_mpeg12_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t ammvdec_mpeg12_profile = { + .name = "mmpeg12", + .profile = "v4l" +}; + +static struct mconfig mmpeg12_configs[] = { + MC_PU32("radr", &radr), + MC_PU32("rval", &rval), + MC_PU32("dec_control", &dec_control), + MC_PU32("error_frame_skip_level", &error_frame_skip_level), + MC_PU32("decode_timeout_val", &decode_timeout_val), + MC_PU32("start_decode_buf_level", &start_decode_buf_level), + MC_PU32("pre_decode_buf_level", &pre_decode_buf_level), + MC_PU32("debug_enable", &debug_enable), + MC_PU32("udebug_flag", &udebug_flag), + MC_PU32("without_display_mode", &without_display_mode), + MC_PU32("dynamic_buf_num_margin", &dynamic_buf_num_margin), +#ifdef AGAIN_HAS_THRESHOLD + MC_PU32("again_threshold", &again_threshold), +#endif +}; +static struct mconfig_node mmpeg12_node; + +static int __init ammvdec_mpeg12_driver_init_module(void) +{ + pr_info("ammvdec_mpeg12 module init\n"); + + if (platform_driver_register(&ammvdec_mpeg12_driver)) { + pr_info("failed to register ammvdec_mpeg12 driver\n"); + return -ENODEV; + } + vcodec_profile_register(&ammvdec_mpeg12_profile); + INIT_REG_NODE_CONFIGS("media.decoder", &mmpeg12_node, + "mmpeg12", mmpeg12_configs, CONFIG_FOR_RW); + vcodec_feature_register(VFORMAT_MPEG12, 0); + return 0; +} + +static void __exit ammvdec_mpeg12_driver_remove_module(void) +{ + pr_info("ammvdec_mpeg12 module exit.\n"); + platform_driver_unregister(&ammvdec_mpeg12_driver); +} + +/****************************************/ +module_param(dec_control, uint, 0664); +MODULE_PARM_DESC(dec_control, "\n ammvdec_mpeg12 decoder control\n"); +module_param(error_frame_skip_level, uint, 0664); +MODULE_PARM_DESC(error_frame_skip_level, + "\n ammvdec_mpeg12 error_frame_skip_level\n"); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(debug_enable, uint, 0664); +MODULE_PARM_DESC(debug_enable, + "\n ammvdec_mpeg12 debug enable\n"); +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, + "\n ammvdec_mpeg12 pre_decode_buf_level\n"); + +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n ammvdec_mpeg12 start_decode_buf_level\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, "\n ammvdec_mpeg12 decode_timeout_val\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n ammvdec_mpeg12 dynamic_buf_num_margin\n"); + +module_param_array(max_process_time, uint, &max_decode_instance_num, 0664); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n ammvdec_mpeg12 udebug_flag\n"); + +module_param(dirty_again_threshold, int, 0664); +MODULE_PARM_DESC(dirty_again_threshold, "\n ammvdec_mpeg12 dirty_again_threshold\n"); + + +#ifdef AGAIN_HAS_THRESHOLD +module_param(again_threshold, uint, 0664); +MODULE_PARM_DESC(again_threshold, "\n again_threshold\n"); +#endif + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n ammvdec_mpeg12 without_display_mode\n"); + +module_param(error_proc_policy, uint, 0664); +MODULE_PARM_DESC(error_proc_policy, "\n ammvdec_mpeg12 error_proc_policy\n"); + +module_init(ammvdec_mpeg12_driver_init_module); +module_exit(ammvdec_mpeg12_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC MULTI MPEG1/2 Video Decoder Driver"); +MODULE_LICENSE("GPL"); + +
diff --git a/drivers/frame_provider/decoder/mpeg4/Makefile b/drivers/frame_provider/decoder/mpeg4/Makefile new file mode 100644 index 0000000..917cc85 --- /dev/null +++ b/drivers/frame_provider/decoder/mpeg4/Makefile
@@ -0,0 +1,5 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_MPEG4) += amvdec_mpeg4.o +amvdec_mpeg4-objs += vmpeg4.o + +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_MPEG4_MULTI) += amvdec_mmpeg4.o +amvdec_mmpeg4-objs += vmpeg4_multi.o
diff --git a/drivers/frame_provider/decoder/mpeg4/vmpeg4.c b/drivers/frame_provider/decoder/mpeg4/vmpeg4.c new file mode 100644 index 0000000..7d33a75 --- /dev/null +++ b/drivers/frame_provider/decoder/mpeg4/vmpeg4.c
@@ -0,0 +1,1278 @@ +/* + * drivers/amlogic/amports/vmpeg4.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "vmpeg4.h" +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/configs.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include "../../../common/chips/decoder_cpu_ver_info.h" + + + +/* #define CONFIG_AM_VDEC_MPEG4_LOG */ +#ifdef CONFIG_AM_VDEC_MPEG4_LOG +#define AMLOG +#define LOG_LEVEL_VAR amlog_level_vmpeg4 +#define LOG_MASK_VAR amlog_mask_vmpeg4 +#define LOG_LEVEL_ERROR 0 +#define LOG_LEVEL_INFO 1 +#define LOG_LEVEL_DESC "0:ERROR, 1:INFO" +#define LOG_MASK_PTS 0x01 +#define LOG_MASK_DESC "0x01:DEBUG_PTS" +#endif + +#include <linux/amlogic/media/utils/amlog.h> + +MODULE_AMLOG(LOG_LEVEL_ERROR, 0, LOG_LEVEL_DESC, LOG_DEFAULT_MASK_DESC); + +#include "../utils/amvdec.h" +#include "../utils/vdec.h" +#include "../utils/firmware.h" + +#define DRIVER_NAME "amvdec_mpeg4" +#define MODULE_NAME "amvdec_mpeg4" + +#define DEBUG_PTS + +/* /#if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +#define NV21 +/* /#endif */ + +#define I_PICTURE 0 +#define P_PICTURE 1 +#define B_PICTURE 2 + +#define ORI_BUFFER_START_ADDR 0x01000000 + +#define INTERLACE_FLAG 0x80 +#define TOP_FIELD_FIRST_FLAG 0x40 + +/* protocol registers */ +#define MP4_PIC_RATIO AV_SCRATCH_5 +#define MP4_RATE AV_SCRATCH_3 +#define MP4_ERR_COUNT AV_SCRATCH_6 +#define MP4_PIC_WH AV_SCRATCH_7 +#define MREG_BUFFERIN AV_SCRATCH_8 +#define MREG_BUFFEROUT AV_SCRATCH_9 +#define MP4_NOT_CODED_CNT AV_SCRATCH_A +#define MP4_VOP_TIME_INC AV_SCRATCH_B +#define MP4_OFFSET_REG AV_SCRATCH_C +#define MP4_SYS_RATE AV_SCRATCH_E +#define MEM_OFFSET_REG AV_SCRATCH_F + +#define PARC_FORBIDDEN 0 +#define PARC_SQUARE 1 +#define PARC_CIF 2 +#define PARC_10_11 3 +#define PARC_16_11 4 +#define PARC_40_33 5 +#define PARC_RESERVED 6 +/* values between 6 and 14 are reserved */ +#define PARC_EXTENDED 15 + +#define VF_POOL_SIZE 32 +#define DECODE_BUFFER_NUM_MAX 8 +#define PUT_INTERVAL (HZ/100) +#define WORKSPACE_SIZE (1 * SZ_1M) +#define MAX_BMMU_BUFFER_NUM (DECODE_BUFFER_NUM_MAX + 1) +#define DCAC_BUFF_START_IP 0x02b00000 + + +#define RATE_DETECT_COUNT 5 +#define DURATION_UNIT 96000 +#define PTS_UNIT 90000 + +#define DUR2PTS(x) ((x) - ((x) >> 4)) + +#define MAX_MPEG4_SUPPORT_SIZE (1920*1088) + +static struct vframe_s *vmpeg_vf_peek(void *); +static struct vframe_s *vmpeg_vf_get(void *); +static void vmpeg_vf_put(struct vframe_s *, void *); +static int vmpeg_vf_states(struct vframe_states *states, void *); +static int vmpeg_event_cb(int type, void *data, void *private_data); + +static int vmpeg4_prot_init(void); +static void vmpeg4_local_init(void); + +static const char vmpeg4_dec_id[] = "vmpeg4-dev"; + +#define PROVIDER_NAME "decoder.mpeg4" + +struct vdec_s *vdec = NULL; + +/* + *int query_video_status(int type, int *value); + */ +static const struct vframe_operations_s vmpeg_vf_provider = { + .peek = vmpeg_vf_peek, + .get = vmpeg_vf_get, + .put = vmpeg_vf_put, + .event_cb = vmpeg_event_cb, + .vf_states = vmpeg_vf_states, +}; +static void *mm_blk_handle; +static struct vframe_provider_s vmpeg_vf_prov; + +static DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(recycle_q, struct vframe_s *, VF_POOL_SIZE); + +static struct vframe_s vfpool[VF_POOL_SIZE]; +static s32 vfbuf_use[DECODE_BUFFER_NUM_MAX]; +static u32 frame_width, frame_height, frame_dur, frame_prog; +static u32 saved_resolution; +static struct timer_list recycle_timer; +static u32 stat; +static u32 buf_size = 32 * 1024 * 1024; +static u32 buf_offset; +static u32 vmpeg4_ratio; +static u64 vmpeg4_ratio64; +static u32 rate_detect; +static u32 vmpeg4_rotation; +static u32 fr_hint_status; + +static u32 total_frame; +static u32 last_vop_time_inc, last_duration; +static u32 last_anch_pts, vop_time_inc_since_last_anch, + frame_num_since_last_anch; +static u64 last_anch_pts_us64; +static struct vdec_info *gvs; + +#ifdef CONFIG_AM_VDEC_MPEG4_LOG +u32 pts_hit, pts_missed, pts_i_hit, pts_i_missed; +#endif + +static struct work_struct reset_work; +static struct work_struct notify_work; +static struct work_struct set_clk_work; +static bool is_reset; + +static DEFINE_SPINLOCK(lock); + +static struct dec_sysinfo vmpeg4_amstream_dec_info; + +static unsigned char aspect_ratio_table[16] = { + PARC_FORBIDDEN, + PARC_SQUARE, + PARC_CIF, + PARC_10_11, + PARC_16_11, + PARC_40_33, + PARC_RESERVED, PARC_RESERVED, PARC_RESERVED, PARC_RESERVED, + PARC_RESERVED, PARC_RESERVED, PARC_RESERVED, PARC_RESERVED, + PARC_RESERVED, PARC_EXTENDED +}; + +static inline u32 index2canvas(u32 index) +{ + const u32 canvas_tab[8] = { +#ifdef NV21 + 0x010100, 0x030302, 0x050504, 0x070706, + 0x090908, 0x0b0b0a, 0x0d0d0c, 0x0f0f0e +#else + 0x020100, 0x050403, 0x080706, 0x0b0a09, + 0x0e0d0c, 0x11100f, 0x141312, 0x171615 +#endif + }; + + return canvas_tab[index]; +} + +static void set_aspect_ratio(struct vframe_s *vf, unsigned int pixel_ratio) +{ + int ar = 0; + unsigned int num = 0; + unsigned int den = 0; + + if (vmpeg4_ratio64 != 0) { + num = vmpeg4_ratio64 >> 32; + den = vmpeg4_ratio64 & 0xffffffff; + } else { + num = vmpeg4_ratio >> 16; + den = vmpeg4_ratio & 0xffff; + + } + if ((num == 0) || (den == 0)) { + num = 1; + den = 1; + } + + if (vmpeg4_ratio == 0) { + vf->ratio_control |= (0x90 << DISP_RATIO_ASPECT_RATIO_BIT); + /* always stretch to 16:9 */ + } else if (pixel_ratio > 0x0f) { + num = (pixel_ratio >> 8) * + vmpeg4_amstream_dec_info.width * num; + ar = div_u64((pixel_ratio & 0xff) * + vmpeg4_amstream_dec_info.height * den * 0x100ULL + + (num >> 1), num); + } else { + switch (aspect_ratio_table[pixel_ratio]) { + case 0: + num = vmpeg4_amstream_dec_info.width * num; + ar = (vmpeg4_amstream_dec_info.height * den * 0x100 + + (num >> 1)) / num; + break; + case 1: + num = vf->width * num; + ar = (vf->height * den * 0x100 + (num >> 1)) / num; + break; + case 2: + num = (vf->width * 12) * num; + ar = (vf->height * den * 0x100 * 11 + + ((num) >> 1)) / num; + break; + case 3: + num = (vf->width * 10) * num; + ar = (vf->height * den * 0x100 * 11 + (num >> 1)) / + num; + break; + case 4: + num = (vf->width * 16) * num; + ar = (vf->height * den * 0x100 * 11 + (num >> 1)) / + num; + break; + case 5: + num = (vf->width * 40) * num; + ar = (vf->height * den * 0x100 * 33 + (num >> 1)) / + num; + break; + default: + num = vf->width * num; + ar = (vf->height * den * 0x100 + (num >> 1)) / num; + break; + } + } + + ar = min(ar, DISP_RATIO_ASPECT_RATIO_MAX); + + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); +} + +static irqreturn_t vmpeg4_isr(int irq, void *dev_id) +{ + u32 reg; + struct vframe_s *vf = NULL; + u32 picture_type; + u32 buffer_index; + u32 pts, pts_valid = 0, offset = 0; + u64 pts_us64 = 0; + u32 rate, vop_time_inc, repeat_cnt, duration = 3200; + u32 frame_size; + + reg = READ_VREG(MREG_BUFFEROUT); + + if (reg) { + buffer_index = reg & 0x7; + picture_type = (reg >> 3) & 7; + rate = READ_VREG(MP4_RATE); + repeat_cnt = READ_VREG(MP4_NOT_CODED_CNT); + vop_time_inc = READ_VREG(MP4_VOP_TIME_INC); + + if (buffer_index >= DECODE_BUFFER_NUM_MAX) { + pr_err("fatal error, invalid buffer index."); + return IRQ_HANDLED; + } + + if (vmpeg4_amstream_dec_info.width == 0) { + vmpeg4_amstream_dec_info.width = + READ_VREG(MP4_PIC_WH) >> 16; + } +#if 0 + else { + pr_info("info width = %d, ucode width = %d\n", + vmpeg4_amstream_dec_info.width, + READ_VREG(MP4_PIC_WH) >> 16); + } +#endif + + if (vmpeg4_amstream_dec_info.height == 0) { + vmpeg4_amstream_dec_info.height = + READ_VREG(MP4_PIC_WH) & 0xffff; + } +#if 0 + else { + pr_info("info height = %d, ucode height = %d\n", + vmpeg4_amstream_dec_info.height, + READ_VREG(MP4_PIC_WH) & 0xffff); + } +#endif + if (vmpeg4_amstream_dec_info.rate == 0 + || vmpeg4_amstream_dec_info.rate > 96000) { + /* if ((rate >> 16) != 0) { */ + if ((rate & 0xffff) != 0 && (rate >> 16) != 0) { + vmpeg4_amstream_dec_info.rate = + (rate >> 16) * DURATION_UNIT / + (rate & 0xffff); + duration = vmpeg4_amstream_dec_info.rate; + if (fr_hint_status == VDEC_NEED_HINT) { + schedule_work(¬ify_work); + fr_hint_status = VDEC_HINTED; + } + } else if (rate_detect < RATE_DETECT_COUNT) { + if (vop_time_inc < last_vop_time_inc) { + duration = + vop_time_inc + rate - + last_vop_time_inc; + } else { + duration = + vop_time_inc - last_vop_time_inc; + } + + if (duration == last_duration) { + rate_detect++; + if (rate_detect >= RATE_DETECT_COUNT) { + vmpeg4_amstream_dec_info.rate = + duration * DURATION_UNIT / + rate; + duration = + vmpeg4_amstream_dec_info.rate; + } + } else + rate_detect = 0; + + last_duration = duration; + } + } else { + duration = vmpeg4_amstream_dec_info.rate; +#if 0 + pr_info("info rate = %d, ucode rate = 0x%x:0x%x\n", + vmpeg4_amstream_dec_info.rate, + READ_VREG(MP4_RATE), vop_time_inc); +#endif + } + + if ((picture_type == I_PICTURE) || + (picture_type == P_PICTURE)) { + offset = READ_VREG(MP4_OFFSET_REG); + /*2500-->3000,because some mpeg4 + *video may checkout failed; + *may have av sync problem.can changed small later. + *263 may need small? + */ + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, offset, &pts, + &frame_size, 3000, + &pts_us64) == 0) { + pts_valid = 1; + last_anch_pts = pts; + last_anch_pts_us64 = pts_us64; +#ifdef CONFIG_AM_VDEC_MPEG4_LOG + pts_hit++; +#endif + } else { +#ifdef CONFIG_AM_VDEC_MPEG4_LOG + pts_missed++; +#endif + } +#ifdef CONFIG_AM_VDEC_MPEG4_LOG + amlog_mask(LOG_MASK_PTS, + "I offset 0x%x, pts_valid %d pts=0x%x\n", + offset, pts_valid, pts); +#endif + } + + if (pts_valid) { + last_anch_pts = pts; + last_anch_pts_us64 = pts_us64; + frame_num_since_last_anch = 0; + vop_time_inc_since_last_anch = 0; + } else { + pts = last_anch_pts; + pts_us64 = last_anch_pts_us64; + + if ((rate != 0) && ((rate >> 16) == 0) + && vmpeg4_amstream_dec_info.rate == 0) { + /* variable PTS rate */ + /*bug on variable pts calc, + *do as dixed vop first if we + *have rate setting before. + */ + if (vop_time_inc > last_vop_time_inc) { + vop_time_inc_since_last_anch += + vop_time_inc - last_vop_time_inc; + } else { + vop_time_inc_since_last_anch += + vop_time_inc + rate - + last_vop_time_inc; + } + + pts += vop_time_inc_since_last_anch * + PTS_UNIT / rate; + pts_us64 += div_u64((u64)(vop_time_inc_since_last_anch * + PTS_UNIT / rate) * 100, 9); + + if (vop_time_inc_since_last_anch > (1 << 14)) { + /* avoid overflow */ + last_anch_pts = pts; + last_anch_pts_us64 = pts_us64; + vop_time_inc_since_last_anch = 0; + } + } else { + /* fixed VOP rate */ + frame_num_since_last_anch++; + pts += DUR2PTS(frame_num_since_last_anch * + vmpeg4_amstream_dec_info.rate); + pts_us64 += DUR2PTS(frame_num_since_last_anch * + vmpeg4_amstream_dec_info.rate) * + 100 / 9; + + if (frame_num_since_last_anch > (1 << 15)) { + /* avoid overflow */ + last_anch_pts = pts; + last_anch_pts_us64 = pts_us64; + frame_num_since_last_anch = 0; + } + } + } + + if (reg & INTERLACE_FLAG) { /* interlace */ + if (kfifo_get(&newframe_q, &vf) == 0) { + printk + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->width = vmpeg4_amstream_dec_info.width; + vf->height = vmpeg4_amstream_dec_info.height; + vf->bufWidth = 1920; + vf->flag = 0; + vf->orientation = vmpeg4_rotation; + vf->pts = pts; + vf->pts_us64 = pts_us64; + vf->duration = duration >> 1; + vf->duration_pulldown = 0; + vf->type = (reg & TOP_FIELD_FIRST_FLAG) ? + VIDTYPE_INTERLACE_TOP : + VIDTYPE_INTERLACE_BOTTOM; +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->type_original = vf->type; + + set_aspect_ratio(vf, READ_VREG(MP4_PIC_RATIO)); + + vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + buffer_index); + + kfifo_put(&display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + + if (kfifo_get(&newframe_q, &vf) == 0) { + printk( + "fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->width = vmpeg4_amstream_dec_info.width; + vf->height = vmpeg4_amstream_dec_info.height; + vf->bufWidth = 1920; + vf->flag = 0; + vf->orientation = vmpeg4_rotation; + + vf->pts = 0; + vf->pts_us64 = 0; + vf->duration = duration >> 1; + + vf->duration_pulldown = 0; + vf->type = (reg & TOP_FIELD_FIRST_FLAG) ? + VIDTYPE_INTERLACE_BOTTOM : VIDTYPE_INTERLACE_TOP; +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->type_original = vf->type; + + set_aspect_ratio(vf, READ_VREG(MP4_PIC_RATIO)); + + vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + buffer_index); + + amlog_mask(LOG_MASK_PTS, + "[%s:%d] [inte] dur=0x%x rate=%d picture_type=%d\n", + __func__, __LINE__, vf->duration, + vmpeg4_amstream_dec_info.rate, picture_type); + + kfifo_put(&display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + + } else { /* progressive */ + if (kfifo_get(&newframe_q, &vf) == 0) { + printk + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->width = vmpeg4_amstream_dec_info.width; + vf->height = vmpeg4_amstream_dec_info.height; + vf->bufWidth = 1920; + vf->flag = 0; + vf->orientation = vmpeg4_rotation; + vf->pts = pts; + vf->pts_us64 = pts_us64; + vf->duration = duration; + vf->duration_pulldown = repeat_cnt * duration; +#ifdef NV21 + vf->type = + VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | + VIDTYPE_VIU_NV21; +#else + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; +#endif + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->type_original = vf->type; + + set_aspect_ratio(vf, READ_VREG(MP4_PIC_RATIO)); + + amlog_mask(LOG_MASK_PTS, + "[%s:%d] [prog] dur=0x%x rate=%d picture_type=%d\n", + __func__, __LINE__, vf->duration, + vmpeg4_amstream_dec_info.rate, picture_type); + + vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + buffer_index); + + kfifo_put(&display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + } + + total_frame += repeat_cnt + 1; + + WRITE_VREG(MREG_BUFFEROUT, 0); + + last_vop_time_inc = vop_time_inc; + + /*count info*/ + gvs->frame_dur = duration; + vdec_count_info(gvs, 0, offset); + } + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + return IRQ_HANDLED; +} + +static struct vframe_s *vmpeg_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + + if (kfifo_peek(&display_q, &vf)) + return vf; + + return NULL; +} + +static struct vframe_s *vmpeg_vf_get(void *op_arg) +{ + struct vframe_s *vf; + + if (kfifo_get(&display_q, &vf)) + return vf; + + return NULL; +} + +static void vmpeg_vf_put(struct vframe_s *vf, void *op_arg) +{ + kfifo_put(&recycle_q, (const struct vframe_s *)vf); +} + +static int vmpeg_event_cb(int type, void *data, void *private_data) +{ + if (type & VFRAME_EVENT_RECEIVER_RESET) { + unsigned long flags; + + amvdec_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vmpeg_vf_prov); +#endif + spin_lock_irqsave(&lock, flags); + vmpeg4_local_init(); + vmpeg4_prot_init(); + spin_unlock_irqrestore(&lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vmpeg_vf_prov); +#endif + amvdec_start(); + } + + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE && vdec) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + return 0; +} + +static int vmpeg_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + + spin_lock_irqsave(&lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&newframe_q); + states->buf_avail_num = kfifo_len(&display_q); + states->buf_recycle_num = kfifo_len(&recycle_q); + + spin_unlock_irqrestore(&lock, flags); + + return 0; +} + +static void vmpeg4_notify_work(struct work_struct *work) +{ + pr_info("frame duration changed %d\n", vmpeg4_amstream_dec_info.rate); + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long) + vmpeg4_amstream_dec_info.rate)); + return; +} + +static void reset_do_work(struct work_struct *work) +{ + unsigned long flags; + + amvdec_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vmpeg_vf_prov); +#endif + spin_lock_irqsave(&lock, flags); + vmpeg4_local_init(); + vmpeg4_prot_init(); + spin_unlock_irqrestore(&lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vmpeg_vf_prov); +#endif + amvdec_start(); +} + +static void vmpeg4_set_clk(struct work_struct *work) +{ + int fps = 96000 / frame_dur; + + saved_resolution = frame_width * frame_height * fps; + vdec_source_changed(VFORMAT_MPEG4, + frame_width, frame_height, fps); +} + +static void vmpeg_put_timer_func(struct timer_list *timer) +{ + while (!kfifo_is_empty(&recycle_q) && (READ_VREG(MREG_BUFFERIN) == 0)) { + struct vframe_s *vf; + + if (kfifo_get(&recycle_q, &vf)) { + if ((vf->index < DECODE_BUFFER_NUM_MAX) + && (--vfbuf_use[vf->index] == 0)) { + WRITE_VREG(MREG_BUFFERIN, ~(1 << vf->index)); + vf->index = DECODE_BUFFER_NUM_MAX; + } + kfifo_put(&newframe_q, (const struct vframe_s *)vf); + } + } + + if (frame_dur > 0 && saved_resolution != + frame_width * frame_height * (96000 / frame_dur)) + schedule_work(&set_clk_work); + + if (READ_VREG(AV_SCRATCH_L)) { + pr_info("mpeg4 fatal error happened,need reset !!\n"); + schedule_work(&reset_work); + } + + + timer->expires = jiffies + PUT_INTERVAL; + + add_timer(timer); +} + +int vmpeg4_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + if (!(stat & STAT_VDEC_RUN)) + return -1; + + vstatus->frame_width = vmpeg4_amstream_dec_info.width; + vstatus->frame_height = vmpeg4_amstream_dec_info.height; + if (0 != vmpeg4_amstream_dec_info.rate) + vstatus->frame_rate = + DURATION_UNIT / vmpeg4_amstream_dec_info.rate; + else + vstatus->frame_rate = -1; + vstatus->error_count = READ_VREG(MP4_ERR_COUNT); + vstatus->status = stat; + vstatus->bit_rate = gvs->bit_rate; + vstatus->frame_dur = frame_dur; + vstatus->frame_data = gvs->frame_data; + vstatus->total_data = gvs->total_data; + vstatus->frame_count = gvs->frame_count; + vstatus->error_frame_count = gvs->error_frame_count; + vstatus->drop_frame_count = gvs->drop_frame_count; + vstatus->total_data = gvs->total_data; + vstatus->samp_cnt = gvs->samp_cnt; + vstatus->offset = gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + +int vmpeg4_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +static int vmpeg4_vdec_info_init(void) +{ + gvs = kzalloc(sizeof(struct vdec_info), GFP_KERNEL); + if (NULL == gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -ENOMEM; + } + return 0; +} + +/****************************************/ +static int vmpeg4_canvas_init(void) +{ + int i, ret; + u32 canvas_width, canvas_height; + unsigned long buf_start; + u32 alloc_size, decbuf_size, decbuf_y_size, decbuf_uv_size; + + if (buf_size <= 0x00400000) { + /* SD only */ + canvas_width = 768; + canvas_height = 576; + decbuf_y_size = 0x80000; + decbuf_uv_size = 0x20000; + decbuf_size = 0x100000; + } else { + int w = vmpeg4_amstream_dec_info.width; + int h = vmpeg4_amstream_dec_info.height; + int align_w, align_h; + int max, min; + + align_w = ALIGN(w, 64); + align_h = ALIGN(h, 64); + if (align_w > align_h) { + max = align_w; + min = align_h; + } else { + max = align_h; + min = align_w; + } + /* HD & SD */ + if ((max > 1920 || min > 1088) && + ALIGN(align_w * align_h * 3/2, SZ_64K) * 9 <= + buf_size) { + canvas_width = align_w; + canvas_height = align_h; + decbuf_y_size = ALIGN(align_w * align_h, SZ_64K); + decbuf_uv_size = ALIGN(align_w * align_h/4, SZ_64K); + decbuf_size = ALIGN(align_w * align_h * 3/2, SZ_64K); + } else { /*1080p*/ + if (h > w) { + canvas_width = 1088; + canvas_height = 1920; + } else { + canvas_width = 1920; + canvas_height = 1088; + } + decbuf_y_size = 0x200000; + decbuf_uv_size = 0x80000; + decbuf_size = 0x300000; + } + } + + for (i = 0; i < MAX_BMMU_BUFFER_NUM; i++) { + /* workspace mem */ + if (i == (MAX_BMMU_BUFFER_NUM - 1)) + alloc_size = WORKSPACE_SIZE; + else + alloc_size = decbuf_size; + + ret = decoder_bmmu_box_alloc_buf_phy(mm_blk_handle, i, + alloc_size, DRIVER_NAME, &buf_start); + if (ret < 0) + return ret; + if (i == (MAX_BMMU_BUFFER_NUM - 1)) { + buf_offset = buf_start - DCAC_BUFF_START_IP; + continue; + } + + +#ifdef NV21 + config_cav_lut_ex(2 * i + 0, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_32X32, 0, VDEC_1); + config_cav_lut_ex(2 * i + 1, + buf_start + + decbuf_y_size, canvas_width, + canvas_height / 2, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); +#else + config_cav_lut_ex(3 * i + 0, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_32X32, 0, VDEC_1); + config_cav_lut_ex(3 * i + 1, + buf_start + + decbuf_y_size, canvas_width / 2, + canvas_height / 2, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); + config_cav_lut_ex(3 * i + 2, + buf_start + + decbuf_y_size + decbuf_uv_size, + canvas_width / 2, canvas_height / 2, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_32X32, 0, VDEC_1); +#endif + + } + return 0; +} + +static int vmpeg4_prot_init(void) +{ + int r; +#if 1 /* /MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6)); + WRITE_VREG(DOS_SW_RESET0, 0); +#else + WRITE_RESET_REG(RESET0_REGISTER, RESET_IQIDCT | RESET_MC); +#endif + + r = vmpeg4_canvas_init(); + + /* index v << 16 | u << 8 | y */ +#ifdef NV21 + WRITE_VREG(AV_SCRATCH_0, 0x010100); + WRITE_VREG(AV_SCRATCH_1, 0x030302); + WRITE_VREG(AV_SCRATCH_2, 0x050504); + WRITE_VREG(AV_SCRATCH_3, 0x070706); + WRITE_VREG(AV_SCRATCH_G, 0x090908); + WRITE_VREG(AV_SCRATCH_H, 0x0b0b0a); + WRITE_VREG(AV_SCRATCH_I, 0x0d0d0c); + WRITE_VREG(AV_SCRATCH_J, 0x0f0f0e); +#else + WRITE_VREG(AV_SCRATCH_0, 0x020100); + WRITE_VREG(AV_SCRATCH_1, 0x050403); + WRITE_VREG(AV_SCRATCH_2, 0x080706); + WRITE_VREG(AV_SCRATCH_3, 0x0b0a09); + WRITE_VREG(AV_SCRATCH_G, 0x0e0d0c); + WRITE_VREG(AV_SCRATCH_H, 0x11100f); + WRITE_VREG(AV_SCRATCH_I, 0x141312); + WRITE_VREG(AV_SCRATCH_J, 0x171615); +#endif + WRITE_VREG(AV_SCRATCH_L, 0);/*clearfatal error flag*/ + + /* notify ucode the buffer offset */ + WRITE_VREG(AV_SCRATCH_F, buf_offset); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + + /* clear repeat count */ + WRITE_VREG(MP4_NOT_CODED_CNT, 0); + + WRITE_VREG(MREG_BUFFERIN, 0); + WRITE_VREG(MREG_BUFFEROUT, 0); + + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + + + +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + +#if 1/* /MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + pr_debug("mpeg4 meson8 prot init\n"); + WRITE_VREG(MDEC_PIC_DC_THRESH, 0x404038aa); +#endif + + WRITE_VREG(MP4_PIC_WH, (vmpeg4_amstream_dec_info. + width << 16) | vmpeg4_amstream_dec_info.height); + WRITE_VREG(MP4_SYS_RATE, vmpeg4_amstream_dec_info.rate); + return r; +} + +static void vmpeg4_local_init(void) +{ + int i; + + vmpeg4_ratio = vmpeg4_amstream_dec_info.ratio; + + vmpeg4_ratio64 = vmpeg4_amstream_dec_info.ratio64; + + vmpeg4_rotation = + (((unsigned long) vmpeg4_amstream_dec_info.param) + >> 16) & 0xffff; + + frame_width = frame_height = frame_dur = frame_prog = 0; + + total_frame = 0; + saved_resolution = 0; + last_anch_pts = 0; + + last_anch_pts_us64 = 0; + + last_vop_time_inc = last_duration = 0; + + vop_time_inc_since_last_anch = 0; + + frame_num_since_last_anch = 0; + +#ifdef CONFIG_AM_VDEC_MPEG4_LOG + pts_hit = pts_missed = pts_i_hit = pts_i_missed = 0; +#endif + + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + vfbuf_use[i] = 0; + + INIT_KFIFO(display_q); + INIT_KFIFO(recycle_q); + INIT_KFIFO(newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &vfpool[i]; + + vfpool[i].index = DECODE_BUFFER_NUM_MAX; + kfifo_put(&newframe_q, (const struct vframe_s *)vf); + } + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + + mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER); +} + +static s32 vmpeg4_init(void) +{ + int trickmode_fffb = 0; + int size = -1, ret = -1; + char fw_name[32] = {0}; + char *buf = vmalloc(0x1000 * 16); + + if (IS_ERR_OR_NULL(buf)) + return -ENOMEM; + amlog_level(LOG_LEVEL_INFO, "vmpeg4_init\n"); + + if (vmpeg4_amstream_dec_info.format == + VIDEO_DEC_FORMAT_MPEG4_5) { + size = get_firmware_data(VIDEO_DEC_MPEG4_5, buf); + strncpy(fw_name, "vmpeg4_mc_5", sizeof(fw_name)); + + amlog_level(LOG_LEVEL_INFO, "load VIDEO_DEC_FORMAT_MPEG4_5\n"); + } else if (vmpeg4_amstream_dec_info.format == VIDEO_DEC_FORMAT_H263) { + size = get_firmware_data(VIDEO_DEC_H263, buf); + strncpy(fw_name, "h263_mc", sizeof(fw_name)); + + pr_info("load VIDEO_DEC_FORMAT_H263\n"); + } else + pr_err("unsupport mpeg4 sub format %d\n", + vmpeg4_amstream_dec_info.format); + + if (size < 0) { + pr_err("get firmware fail."); + vfree(buf); + return -1; + } + + ret = amvdec_loadmc_ex(VFORMAT_MPEG4, fw_name, buf); + if (ret < 0) { + amvdec_disable(); + vfree(buf); + pr_err("%s: the %s fw loading failed, err: %x\n", + fw_name, tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(buf); + stat |= STAT_MC_LOAD; + query_video_status(0, &trickmode_fffb); + + timer_setup(&recycle_timer, vmpeg_put_timer_func, 0); + stat |= STAT_TIMER_INIT; + + if (vdec_request_irq(VDEC_IRQ_1, vmpeg4_isr, + "vmpeg4-irq", (void *)vmpeg4_dec_id)) { + amlog_level(LOG_LEVEL_ERROR, "vmpeg4 irq register error.\n"); + return -ENOENT; + } + stat |= STAT_ISR_REG; + vmpeg4_local_init(); + /* enable AMRISC side protocol */ + ret = vmpeg4_prot_init(); + if (ret < 0) { + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + return ret; + } + amvdec_enable(); + fr_hint_status = VDEC_NO_NEED_HINT; +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_provider_init(&vmpeg_vf_prov, PROVIDER_NAME, &vmpeg_vf_provider, + NULL); + vf_reg_provider(&vmpeg_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); +#else + vf_provider_init(&vmpeg_vf_prov, PROVIDER_NAME, &vmpeg_vf_provider, + NULL); + vf_reg_provider(&vmpeg_vf_prov); +#endif + if (vmpeg4_amstream_dec_info.rate != 0) { + if (!is_reset) { + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long) + vmpeg4_amstream_dec_info.rate)); + fr_hint_status = VDEC_HINTED; + } + } else + fr_hint_status = VDEC_NEED_HINT; + + stat |= STAT_VF_HOOK; + + recycle_timer.expires = jiffies + PUT_INTERVAL; + add_timer(&recycle_timer); + + stat |= STAT_TIMER_ARM; + + amvdec_start(); + + stat |= STAT_VDEC_RUN; + + return 0; +} + +static int amvdec_mpeg4_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + + if (pdata == NULL) { + amlog_level(LOG_LEVEL_ERROR, + "amvdec_mpeg4 memory resource undefined.\n"); + return -EFAULT; + } + + if (pdata->sys_info) { + vmpeg4_amstream_dec_info = *pdata->sys_info; + if ((vmpeg4_amstream_dec_info.height != 0) && + (vmpeg4_amstream_dec_info.width > + (MAX_MPEG4_SUPPORT_SIZE/vmpeg4_amstream_dec_info.height))) { + pr_info("amvdec_mpeg4: oversize, unsupport: %d*%d\n", + vmpeg4_amstream_dec_info.width, + vmpeg4_amstream_dec_info.height); + return -EFAULT; + } + } + pdata->dec_status = vmpeg4_dec_status; + pdata->set_isreset = vmpeg4_set_isreset; + is_reset = 0; + vdec = pdata; + + INIT_WORK(&reset_work, reset_do_work); + INIT_WORK(¬ify_work, vmpeg4_notify_work); + INIT_WORK(&set_clk_work, vmpeg4_set_clk); + + vmpeg4_vdec_info_init(); + + if (vmpeg4_init() < 0) { + amlog_level(LOG_LEVEL_ERROR, "amvdec_mpeg4 init failed.\n"); + kfree(gvs); + gvs = NULL; + pdata->dec_status = NULL; + return -ENODEV; + } + + return 0; +} + +static int amvdec_mpeg4_remove(struct platform_device *pdev) +{ + if (stat & STAT_VDEC_RUN) { + amvdec_stop(); + stat &= ~STAT_VDEC_RUN; + } + + if (stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)vmpeg4_dec_id); + stat &= ~STAT_ISR_REG; + } + + if (stat & STAT_TIMER_ARM) { + del_timer_sync(&recycle_timer); + stat &= ~STAT_TIMER_ARM; + } + + if (stat & STAT_VF_HOOK) { + if (fr_hint_status == VDEC_HINTED) + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_END_HINT, NULL); + fr_hint_status = VDEC_NO_NEED_HINT; + + vf_unreg_provider(&vmpeg_vf_prov); + stat &= ~STAT_VF_HOOK; + } + + cancel_work_sync(&reset_work); + cancel_work_sync(¬ify_work); + cancel_work_sync(&set_clk_work); + + amvdec_disable(); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_TM2) + vdec_reset_core(NULL); + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + + amlog_mask(LOG_MASK_PTS, + "pts hit %d, pts missed %d, i hit %d, missed %d\n", pts_hit, + pts_missed, pts_i_hit, pts_i_missed); + amlog_mask(LOG_MASK_PTS, "total frame %d, rate %d\n", total_frame, + vmpeg4_amstream_dec_info.rate); + kfree(gvs); + gvs = NULL; + vdec = NULL; + + return 0; +} + +/****************************************/ + +static struct platform_driver amvdec_mpeg4_driver = { + .probe = amvdec_mpeg4_probe, + .remove = amvdec_mpeg4_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t amvdec_mpeg4_profile = { + .name = "mpeg4", + .profile = "" +}; +static struct mconfig mpeg4_configs[] = { + MC_PU32("stat", &stat), +}; +static struct mconfig_node mpeg4_node; + +static int __init amvdec_mpeg4_driver_init_module(void) +{ + amlog_level(LOG_LEVEL_INFO, "amvdec_mpeg4 module init\n"); + + if (platform_driver_register(&amvdec_mpeg4_driver)) { + amlog_level(LOG_LEVEL_ERROR, + "failed to register amvdec_mpeg4 driver\n"); + return -ENODEV; + } + vcodec_profile_register(&amvdec_mpeg4_profile); + INIT_REG_NODE_CONFIGS("media.decoder", &mpeg4_node, + "mpeg4", mpeg4_configs, CONFIG_FOR_R); + return 0; +} + +static void __exit amvdec_mpeg4_driver_remove_module(void) +{ + amlog_level(LOG_LEVEL_INFO, "amvdec_mpeg4 module remove.\n"); + + platform_driver_unregister(&amvdec_mpeg4_driver); +} + +/****************************************/ +module_init(amvdec_mpeg4_driver_init_module); +module_exit(amvdec_mpeg4_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC MPEG4 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/mpeg4/vmpeg4.h b/drivers/frame_provider/decoder/mpeg4/vmpeg4.h new file mode 100644 index 0000000..7914e6a --- /dev/null +++ b/drivers/frame_provider/decoder/mpeg4/vmpeg4.h
@@ -0,0 +1,26 @@ +/* + * drivers/amlogic/amports/vmpeg4.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VMPEG4_H +#define VMPEG4_H + +/* /#if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +/* TODO: move to register headers */ +#define VPP_VD1_POSTBLEND (1 << 10) +/* /#endif */ + +#endif /* VMPEG4_H */
diff --git a/drivers/frame_provider/decoder/mpeg4/vmpeg4_multi.c b/drivers/frame_provider/decoder/mpeg4/vmpeg4_multi.c new file mode 100644 index 0000000..00ceb71 --- /dev/null +++ b/drivers/frame_provider/decoder/mpeg4/vmpeg4_multi.c
@@ -0,0 +1,2996 @@ +/* + * drivers/amlogic/amports/vmpeg4.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/sched/clock.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" + +#include "../utils/amvdec.h" +#include "../utils/vdec_input.h" +#include "../utils/vdec.h" +#include "../utils/firmware.h" +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/firmware.h" +#include "../utils/vdec_v4l2_buffer_ops.h" +#include "../utils/config_parser.h" +#include <media/v4l2-mem2mem.h> +#include "../utils/vdec_feature.h" + +#define DRIVER_NAME "ammvdec_mpeg4" + +#define MEM_NAME "codec_mmpeg4" + +#define DEBUG_PTS + +#define NV21 +#define I_PICTURE 0 +#define P_PICTURE 1 +#define B_PICTURE 2 +#define GET_PIC_TYPE(type) ("IPB####"[type&0x3]) + +#define ORI_BUFFER_START_ADDR 0x01000000 +#define DEFAULT_MEM_SIZE (32*SZ_1M) + +#define INTERLACE_FLAG 0x80 +#define TOP_FIELD_FIRST_FLAG 0x40 + +/* protocol registers */ +#define MREG_REF0 AV_SCRATCH_1 +#define MREG_REF1 AV_SCRATCH_2 +#define MP4_PIC_RATIO AV_SCRATCH_5 +#define MP4_RATE AV_SCRATCH_3 +#define MP4_ERR_COUNT AV_SCRATCH_6 +#define MP4_PIC_WH AV_SCRATCH_7 +#define MREG_INPUT AV_SCRATCH_8 +#define MREG_BUFFEROUT AV_SCRATCH_9 +#define MP4_NOT_CODED_CNT AV_SCRATCH_A +#define MP4_VOP_TIME_INC AV_SCRATCH_B +#define MP4_OFFSET_REG AV_SCRATCH_C +#define MP4_VOS_INFO AV_SCRATCH_D +#define MP4_SYS_RATE AV_SCRATCH_E +#define MEM_OFFSET_REG AV_SCRATCH_F +#define MP4_PIC_INFO AV_SCRATCH_H + +#define PARC_FORBIDDEN 0 +#define PARC_SQUARE 1 +#define PARC_CIF 2 +#define PARC_10_11 3 +#define PARC_16_11 4 +#define PARC_40_33 5 +#define PARC_RESERVED 6 +/* values between 6 and 14 are reserved */ +#define PARC_EXTENDED 15 + +#define VF_POOL_SIZE 64 +#define DECODE_BUFFER_NUM_MAX 16 +#define DECODE_BUFFER_NUM_DEF 8 +#define PUT_INTERVAL (HZ/100) +#define MAX_BMMU_BUFFER_NUM (DECODE_BUFFER_NUM_MAX + 1) +#define WORKSPACE_SIZE (12*SZ_64K) +static u32 buf_size = 32 * 1024 * 1024; + +#define CTX_LMEM_SWAP_OFFSET 0 +#define CTX_QUANT_MATRIX_OFFSET 0x800 +/* dcac buffer must align at 4k boundary */ +#define CTX_DCAC_BUF_OFFSET 0x1000 +#define CTX_DECBUF_OFFSET (0x0c0000 + 0x1000) + +#define RATE_DETECT_COUNT 5 +#define DURATION_UNIT 96000 +#define PTS_UNIT 90000 +#define CHECK_INTERVAL (HZ/100) + +#define DUR2PTS(x) ((x) - ((x) >> 4)) + +/* 96000/(60fps* 2field) = 800, 96000/10fps = 9600 */ +#define MPEG4_VALID_DUR(x) ((x < 9600) && (x > 799)) + +#define MAX_MPEG4_SUPPORT_SIZE (1920*1088) + +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_ERROR 3 +#define DEC_RESULT_FORCE_EXIT 4 +#define DEC_RESULT_EOS 5 +#define DEC_RESULT_UNFINISH 6 + +#define DEC_DECODE_TIMEOUT 0x21 +#define DECODE_ID(hw) (hw_to_vdec(hw)->id) +#define DECODE_STOP_POS AV_SCRATCH_K +static u32 udebug_flag; + +static struct vframe_s *vmpeg_vf_peek(void *); +static struct vframe_s *vmpeg_vf_get(void *); +static void vmpeg_vf_put(struct vframe_s *, void *); +static int vmpeg_vf_states(struct vframe_states *states, void *); +static int vmpeg_event_cb(int type, void *data, void *private_data); +static int notify_v4l_eos(struct vdec_s *vdec); + +static int pre_decode_buf_level = 0x800; +static int start_decode_buf_level = 0x4000; +static int debug_enable; +static unsigned int radr; +static unsigned int rval; +/* 0x40bit = 8byte */ +static unsigned int frmbase_cont_bitlevel = 0x40; +static unsigned int dynamic_buf_num_margin; + +#define VMPEG4_DEV_NUM 9 +static unsigned int max_decode_instance_num = VMPEG4_DEV_NUM; +static unsigned int max_process_time[VMPEG4_DEV_NUM]; +static unsigned int decode_timeout_val = 200; + +static u32 without_display_mode; + +#undef pr_info +#define pr_info printk +unsigned int mpeg4_debug_mask = 0xff; +static u32 run_ready_min_buf_num = 2; + + +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_RUN_FLOW 0X0001 +#define PRINT_FLAG_TIMEINFO 0x0002 +#define PRINT_FLAG_UCODE_DETAIL 0x0004 +#define PRINT_FLAG_VLD_DETAIL 0x0008 +#define PRINT_FLAG_DEC_DETAIL 0x0010 +#define PRINT_FLAG_BUFFER_DETAIL 0x0020 +#define PRINT_FLAG_RESTORE 0x0040 +#define PRINT_FRAME_NUM 0x0080 +#define PRINT_FLAG_FORCE_DONE 0x0100 +#define PRINT_FLAG_COUNTER 0X0200 +#define PRINT_FRAMEBASE_DATA 0x0400 +#define PRINT_FLAG_VDEC_STATUS 0x0800 +#define PRINT_FLAG_TIMEOUT_STATUS 0x1000 +#define PRINT_FLAG_V4L_DETAIL 0x8000 +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 + +int mmpeg4_debug_print(int index, int debug_flag, const char *fmt, ...) +{ + if (((debug_enable & debug_flag) && + ((1 << index) & mpeg4_debug_mask)) + || (debug_flag == PRINT_FLAG_ERROR)) { + unsigned char *buf = kzalloc(512, GFP_ATOMIC); + int len = 0; + va_list args; + + if (!buf) + return 0; + + va_start(args, fmt); + len = sprintf(buf, "%d: ", index); + vsnprintf(buf + len, 512-len, fmt, args); + pr_info("%s", buf); + va_end(args); + kfree(buf); + } + return 0; +} + +struct pic_info_t { + int index; + u32 pic_type; + u32 pic_info; + u32 pts; + u64 pts64; + bool pts_valid; + u32 duration; + u32 repeat_cnt; + ulong v4l_ref_buf_addr; + u32 hw_decode_time; + u32 frame_size; // For frame base mode; + u64 timestamp; + u32 offset; + u32 height; + u32 width; +}; + +struct vdec_mpeg4_hw_s { + spinlock_t lock; + struct platform_device *platform_dev; + /* struct device *cma_dev; */ + + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + struct vframe_s vfpool[VF_POOL_SIZE]; + + s32 vfbuf_use[DECODE_BUFFER_NUM_MAX]; + u32 frame_width; + u32 frame_height; + u32 frame_dur; + u32 frame_prog; + + u32 ctx_valid; + u32 reg_vcop_ctrl_reg; + u32 reg_pic_head_info; + u32 reg_mpeg1_2_reg; + u32 reg_slice_qp; + u32 reg_mp4_pic_wh; + u32 reg_mp4_rate; + u32 reg_mb_info; + u32 reg_dc_ac_ctrl; + u32 reg_iqidct_control; + u32 reg_resync_marker_length; + u32 reg_rv_ai_mb_count; + struct timer_list check_timer; + u32 decode_timeout_count; + u32 timeout_cnt; + unsigned long int start_process_time; + + u32 last_vld_level; + u8 init_flag; + u32 eos; + void *mm_blk_handle; + + struct vframe_chunk_s *chunk; + u32 chunk_offset; + u32 chunk_size; + u32 chunk_frame_count; + u32 stat; + unsigned long buf_start; + u32 buf_size; + /* + unsigned long cma_alloc_addr; + int cma_alloc_count; + */ + u32 vmpeg4_ratio; + u64 vmpeg4_ratio64; + u32 rate_detect; + u32 vmpeg4_rotation; + u32 total_frame; + u32 last_vop_time_inc; + u32 last_duration; + u32 last_anch_pts; + u32 vop_time_inc_since_last_anch; + u32 frame_num_since_last_anch; + u64 last_anch_pts_us64; + + u32 last_pts; + u64 last_pts64; + u32 pts_hit; + u32 pts_missed; + u32 pts_i_hit; + u32 pts_i_missed; + struct pic_info_t pic[DECODE_BUFFER_NUM_MAX]; + u32 canvas_spec[DECODE_BUFFER_NUM_MAX]; +#ifdef NV21 + struct canvas_config_s canvas_config[DECODE_BUFFER_NUM_MAX][2]; +#else + struct canvas_config_s canvas_config[DECODE_BUFFER_NUM_MAX][3]; +#endif + struct dec_sysinfo vmpeg4_amstream_dec_info; + + s32 refs[2]; + int dec_result; + struct work_struct work; + + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + u32 frame_num; + u32 put_num; + u32 sys_mp4_rate; + u32 run_count; + u32 not_run_ready; + u32 buffer_not_ready; + u32 input_empty; + u32 peek_num; + u32 get_num; + u32 first_i_frame_ready; + u32 drop_frame_count; + u32 unstable_pts; + u32 last_dec_pts; + + struct firmware_s *fw; + u32 blkmode; + wait_queue_head_t wait_q; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + u32 buf_num; + u32 dynamic_buf_num_margin; + u32 i_only; + int sidebind_type; + int sidebind_channel_id; + u32 res_ch_flag; + u32 profile_idc; + u32 level_idc; + unsigned int i_decoded_frames; + unsigned int i_lost_frames; + unsigned int i_concealed_frames; + unsigned int p_decoded_frames; + unsigned int p_lost_frames; + unsigned int p_concealed_frames; + unsigned int b_decoded_frames; + unsigned int b_lost_frames; + unsigned int b_concealed_frames; + int vdec_pg_enable_flag; + ulong fb_token; + char vdec_name[32]; + char pts_name[32]; + char new_q_name[32]; + char disp_q_name[32]; + bool run_flag; +}; +static void vmpeg4_local_init(struct vdec_mpeg4_hw_s *hw); +static int vmpeg4_hw_ctx_restore(struct vdec_mpeg4_hw_s *hw); +static unsigned char + get_data_check_sum(struct vdec_mpeg4_hw_s *hw, int size); +static void flush_output(struct vdec_mpeg4_hw_s * hw); + +#define PROVIDER_NAME "vdec.mpeg4" + +/* + *int query_video_status(int type, int *value); + */ +static const struct vframe_operations_s vf_provider_ops = { + .peek = vmpeg_vf_peek, + .get = vmpeg_vf_get, + .put = vmpeg_vf_put, + .event_cb = vmpeg_event_cb, + .vf_states = vmpeg_vf_states, +}; + +static unsigned char aspect_ratio_table[16] = { + PARC_FORBIDDEN, + PARC_SQUARE, + PARC_CIF, + PARC_10_11, + PARC_16_11, + PARC_40_33, + PARC_RESERVED, PARC_RESERVED, PARC_RESERVED, PARC_RESERVED, + PARC_RESERVED, PARC_RESERVED, PARC_RESERVED, PARC_RESERVED, + PARC_RESERVED, PARC_EXTENDED +}; + +static void reset_process_time(struct vdec_mpeg4_hw_s *hw); + +static int vmpeg4_get_buf_num(struct vdec_mpeg4_hw_s *hw) +{ + int buf_num = DECODE_BUFFER_NUM_DEF; + + buf_num += hw->dynamic_buf_num_margin; + if (buf_num > DECODE_BUFFER_NUM_MAX) + buf_num = DECODE_BUFFER_NUM_MAX; + + return buf_num; +} + +static int vmpeg4_v4l_alloc_buff_config_canvas(struct vdec_mpeg4_hw_s *hw, int i) +{ + int ret; + u32 canvas; + ulong decbuf_start = 0, decbuf_uv_start = 0; + int decbuf_y_size = 0, decbuf_uv_size = 0; + u32 canvas_width = 0, canvas_height = 0; + struct vdec_s *vdec = hw_to_vdec(hw); + struct vdec_v4l2_buffer *fb = NULL; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (hw->pic[i].v4l_ref_buf_addr) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *) + hw->pic[i].v4l_ref_buf_addr; + + fb->status = FB_ST_DECODER; + return 0; + } + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "[%d] get fb fail.\n", + ((struct aml_vcodec_ctx *) + (hw->v4l2_ctx))->id); + return ret; + } + + fb->status = FB_ST_DECODER; + + if (!hw->frame_width || !hw->frame_height) { + struct vdec_pic_info pic; + vdec_v4l_get_pic_info(ctx, &pic); + hw->frame_width = pic.visible_width; + hw->frame_height = pic.visible_height; + mmpeg4_debug_print(DECODE_ID(hw), 0, + "[%d] set %d x %d from IF layer\n", ctx->id, + hw->frame_width, hw->frame_height); + } + + hw->pic[i].v4l_ref_buf_addr = (ulong)fb; + if (fb->num_planes == 1) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].offset; + decbuf_uv_start = decbuf_start + decbuf_y_size; + decbuf_uv_size = decbuf_y_size / 2; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 64); + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + } else if (fb->num_planes == 2) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].size; + decbuf_uv_start = fb->m.mem[1].addr; + decbuf_uv_size = fb->m.mem[1].size; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 64); + fb->m.mem[0].bytes_used = decbuf_y_size; + fb->m.mem[1].bytes_used = decbuf_uv_size; + } + + mmpeg4_debug_print(DECODE_ID(hw), 0, "[%d] %s(), v4l ref buf addr: 0x%x\n", + ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, __func__, fb); + + if (vdec->parallel_dec == 1) { + u32 tmp; + if (canvas_y(hw->canvas_spec[i]) == 0xff) { + tmp = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~0xff; + hw->canvas_spec[i] |= tmp; + } + if (canvas_u(hw->canvas_spec[i]) == 0xff) { + tmp = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~(0xffff << 8); + hw->canvas_spec[i] |= tmp << 8; + hw->canvas_spec[i] |= tmp << 16; + } + canvas = hw->canvas_spec[i]; + } else { + canvas = vdec->get_canvas(i, 2); + hw->canvas_spec[i] = canvas; + } + + hw->canvas_config[i][0].phy_addr = decbuf_start; + hw->canvas_config[i][0].width = canvas_width; + hw->canvas_config[i][0].height = canvas_height; + hw->canvas_config[i][0].block_mode = hw->blkmode; + if (hw->blkmode == CANVAS_BLKMODE_LINEAR) + hw->canvas_config[i][0].endian = 7; + else + hw->canvas_config[i][0].endian = 0; + config_cav_lut(canvas_y(canvas), + &hw->canvas_config[i][0], VDEC_1); + /* mpeg4 decoder canvas need to be revert to match display canvas */ + hw->canvas_config[i][0].endian = + (hw->blkmode != CANVAS_BLKMODE_LINEAR) ? 7 : 0; + + hw->canvas_config[i][1].phy_addr = + decbuf_uv_start; + hw->canvas_config[i][1].width = canvas_width; + hw->canvas_config[i][1].height = (canvas_height >> 1); + hw->canvas_config[i][1].block_mode = hw->blkmode; + if (hw->blkmode == CANVAS_BLKMODE_LINEAR) + hw->canvas_config[i][1].endian = 7; + else + hw->canvas_config[i][1].endian = 0; + config_cav_lut(canvas_u(canvas), + &hw->canvas_config[i][1], VDEC_1); + /* mpeg4 decoder canvas need to be revert to match display canvas */ + hw->canvas_config[i][1].endian = + (hw->blkmode != CANVAS_BLKMODE_LINEAR) ? 7 : 0; + + return 0; +} + +static bool is_enough_free_buffer(struct vdec_mpeg4_hw_s *hw) +{ + int i; + + for (i = 0; i < hw->buf_num; i++) { + if (hw->vfbuf_use[i] == 0) + break; + } + + return i == hw->buf_num ? false : true; +} + +static int find_free_buffer(struct vdec_mpeg4_hw_s *hw) +{ + int i; + + for (i = 0; i < hw->buf_num; i++) { + if (hw->vfbuf_use[i] == 0) + break; + } + + if (i == hw->buf_num) + return -1; + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) { + /*run to parser csd data*/ + i = 0xffffff; + } else { + if (!ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token)) + return -1; + + if (vmpeg4_v4l_alloc_buff_config_canvas(hw, i)) + return -1; + } + } + + return i; +} + +static int spec_to_index(struct vdec_mpeg4_hw_s *hw, u32 spec) +{ + int i; + + for (i = 0; i < hw->buf_num; i++) { + if (hw->canvas_spec[i] == spec) + return i; + } + + return -1; +} + +static void set_frame_info(struct vdec_mpeg4_hw_s *hw, struct vframe_s *vf, + int buffer_index) +{ + int ar = 0; + unsigned int num = 0; + unsigned int den = 0; + unsigned int pixel_ratio = READ_VREG(MP4_PIC_RATIO); + + if (hw->vmpeg4_ratio64 != 0) { + num = hw->vmpeg4_ratio64>>32; + den = hw->vmpeg4_ratio64 & 0xffffffff; + } else { + num = hw->vmpeg4_ratio>>16; + den = hw->vmpeg4_ratio & 0xffff; + + } + if ((num == 0) || (den == 0)) { + num = 1; + den = 1; + } + + if (hw->vmpeg4_ratio == 0) { + vf->ratio_control |= (0x90 << DISP_RATIO_ASPECT_RATIO_BIT); + vf->sar_width = 1; + vf->sar_height = 1; + /* always stretch to 16:9 */ + } else if (pixel_ratio > 0x0f) { + num = (pixel_ratio >> 8) * + hw->frame_width * num; + ar = div_u64((pixel_ratio & 0xff) * + hw->frame_height * den * 0x100ULL + + (num >> 1), num); + } else { + switch (aspect_ratio_table[pixel_ratio]) { + case 0: + vf->sar_width = 1; + vf->sar_height = 1; + num = hw->frame_width * num; + ar = (hw->frame_height * den * + 0x100 + (num >> 1)) / num; + break; + case 1: + vf->sar_width = 1; + vf->sar_height = 1; + num = vf->width * num; + ar = (vf->height * den * 0x100 + (num >> 1)) / num; + break; + case 2: + vf->sar_width = 12; + vf->sar_height = 11; + num = (vf->width * 12) * num; + ar = (vf->height * den * 0x100 * 11 + + ((num) >> 1)) / num; + break; + case 3: + vf->sar_width = 10; + vf->sar_height = 11; + num = (vf->width * 10) * num; + ar = (vf->height * den * 0x100 * 11 + (num >> 1)) / + num; + break; + case 4: + vf->sar_width = 16; + vf->sar_height = 11; + num = (vf->width * 16) * num; + ar = (vf->height * den * 0x100 * 11 + (num >> 1)) / + num; + break; + case 5: + vf->sar_width = 40; + vf->sar_height = 33; + num = (vf->width * 40) * num; + ar = (vf->height * den * 0x100 * 33 + (num >> 1)) / + num; + break; + default: + vf->sar_width = 1; + vf->sar_height = 1; + num = vf->width * num; + ar = (vf->height * den * 0x100 + (num >> 1)) / num; + break; + } + } + + vf->sidebind_type = hw->sidebind_type; + vf->sidebind_channel_id = hw->sidebind_channel_id; + + ar = min(ar, DISP_RATIO_ASPECT_RATIO_MAX); + + vf->signal_type = 0; + vf->type_original = vf->type; + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + vf->canvas0Addr = vf->canvas1Addr = -1; +#ifdef NV21 + vf->plane_num = 2; +#else + vf->plane_num = 3; +#endif + vf->canvas0_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas0_config[1] = hw->canvas_config[buffer_index][1]; +#ifndef NV21 + vf->canvas0_config[2] = hw->canvas_config[buffer_index][2]; +#endif + vf->canvas1_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas1_config[1] = hw->canvas_config[buffer_index][1]; +#ifndef NV21 + vf->canvas1_config[2] = hw->canvas_config[buffer_index][2]; +#endif +} + +static inline void vmpeg4_save_hw_context(struct vdec_mpeg4_hw_s *hw) +{ + hw->reg_mpeg1_2_reg = READ_VREG(MPEG1_2_REG); + hw->reg_vcop_ctrl_reg = READ_VREG(VCOP_CTRL_REG); + hw->reg_pic_head_info = READ_VREG(PIC_HEAD_INFO); + hw->reg_slice_qp = READ_VREG(SLICE_QP); + hw->reg_mp4_pic_wh = READ_VREG(MP4_PIC_WH); + hw->reg_mp4_rate = READ_VREG(MP4_RATE); + hw->reg_mb_info = READ_VREG(MB_INFO); + hw->reg_dc_ac_ctrl = READ_VREG(DC_AC_CTRL); + hw->reg_iqidct_control = READ_VREG(IQIDCT_CONTROL); + hw->reg_resync_marker_length = READ_VREG(RESYNC_MARKER_LENGTH); + hw->reg_rv_ai_mb_count = READ_VREG(RV_AI_MB_COUNT); +} + +static int update_ref(struct vdec_mpeg4_hw_s *hw, int index) +{ + hw->vfbuf_use[index]++; + + if (hw->refs[1] == -1) { + hw->refs[1] = index; + index = -1; + } else if (hw->refs[0] == -1) { + hw->refs[0] = hw->refs[1]; + hw->refs[1] = index; + index = hw->refs[0]; + } else { + hw->vfbuf_use[hw->refs[0]]--; + hw->refs[0] = hw->refs[1]; + hw->refs[1] = index; + index = hw->refs[0]; + } + + return index; +} + +static int prepare_display_buf(struct vdec_mpeg4_hw_s * hw, + struct pic_info_t *pic) +{ + struct vframe_s *vf = NULL; + struct vdec_s *vdec = hw_to_vdec(hw); + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + ulong nv_order = VIDTYPE_VIU_NV21; + int index = pic->index; + bool pb_skip = false; + unsigned long flags; + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + if (vdec->prog_only) + pic->pic_info &= ~INTERLACE_FLAG; + } + + if (hw->i_only) + pb_skip = 1; + + if (pic->pic_info & INTERLACE_FLAG) { + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "fatal error, no available buffer slot."); + return -1; + } + + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->pic[index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), v4l mem handle: 0x%lx\n", + ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, + __func__, vf->v4l_mem_handle); + } + + vf->index = pic->index; + vf->width = pic->width; + vf->height = pic->height; + vf->bufWidth = 1920; + vf->flag = 0; + vf->orientation = hw->vmpeg4_rotation; + vf->pts = pic->pts; + vf->pts_us64 = pic->pts64; + vf->timestamp = pic->timestamp; + vf->duration = pic->duration >> 1; + vf->duration_pulldown = 0; + vf->type = (pic->pic_info & TOP_FIELD_FIRST_FLAG) ? + VIDTYPE_INTERLACE_TOP : VIDTYPE_INTERLACE_BOTTOM; +#ifdef NV21 + vf->type |= nv_order; +#endif + set_frame_info(hw, vf, pic->index); + + hw->vfbuf_use[pic->index]++; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "field0: pts %d, pts64 %lld, w %d, h %d, dur %d\n", + vf->pts, vf->pts_us64, vf->width, vf->height, vf->duration); + if (vdec_stream_based(vdec) && (!vdec->vbuf.use_ptsserv)) { + vf->pts_us64 = + (((u64)vf->duration << 32) & + 0xffffffff00000000) | pic->offset; + vf->pts = 0; + } + if (((hw->first_i_frame_ready == 0) || pb_skip) + && (pic->pic_type != I_PICTURE)) { + hw->drop_frame_count++; + if (pic->pic_type == I_PICTURE) { + hw->i_lost_frames++; + } else if (pic->pic_type == P_PICTURE) { + hw->p_lost_frames++; + } else if (pic->pic_type == B_PICTURE) { + hw->b_lost_frames++; + } + hw->vfbuf_use[index]--; + spin_lock_irqsave(&hw->lock, flags); + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + spin_unlock_irqrestore(&hw->lock, flags); + return 0; + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, index); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->pts); + hw->frame_num++; + if (pic->pic_type == I_PICTURE) { + hw->i_decoded_frames++; + } else if (pic->pic_type == P_PICTURE) { + hw->p_decoded_frames++; + } else if (pic->pic_type == B_PICTURE) { + hw->b_decoded_frames++; + } + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } else { + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } else + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } + + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "error, no available buf.\n"); + hw->dec_result = DEC_RESULT_ERROR; + return -1; + } + + vf->index = pic->index; + vf->width = pic->width; + vf->height = pic->height; + vf->bufWidth = 1920; + vf->flag = 0; + vf->orientation = hw->vmpeg4_rotation; + vf->pts = 0; + vf->pts_us64 = 0; + vf->timestamp = 0; + vf->duration = pic->duration >> 1; + vf->duration_pulldown = 0; + vf->type = (pic->pic_info & TOP_FIELD_FIRST_FLAG) ? + VIDTYPE_INTERLACE_BOTTOM : VIDTYPE_INTERLACE_TOP; +#ifdef NV21 + vf->type |= nv_order; +#endif + set_frame_info(hw, vf, pic->index); + + hw->vfbuf_use[pic->index]++; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "filed1: pts %d, pts64 %lld, w %d, h %d, dur: %d\n", + vf->pts, vf->pts_us64, vf->width, vf->height, vf->duration); + + if (vdec_stream_based(vdec) && (!vdec->vbuf.use_ptsserv)) { + vf->pts_us64 = (u64)-1; + vf->pts = 0; + } + if (((hw->first_i_frame_ready == 0) || pb_skip) + && (pic->pic_type != I_PICTURE)) { + hw->drop_frame_count++; + if (pic->pic_type == I_PICTURE) { + hw->i_lost_frames++; + } else if (pic->pic_type == P_PICTURE) { + hw->p_lost_frames++; + } else if (pic->pic_type == B_PICTURE) { + hw->b_lost_frames++; + } + hw->vfbuf_use[index]--; + spin_lock_irqsave(&hw->lock, flags); + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + spin_unlock_irqrestore(&hw->lock, flags); + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, index); + decoder_do_frame_check(vdec, vf); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->pts); + vdec->vdec_fps_detec(vdec->id); + hw->frame_num++; + if (pic->pic_type == I_PICTURE) { + hw->i_decoded_frames++; + } else if (pic->pic_type == P_PICTURE) { + hw->p_decoded_frames++; + } else if (pic->pic_type == B_PICTURE) { + hw->b_decoded_frames++; + } + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } else { + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } else + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } + } else { + /* progressive */ + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "error, no available buf\n"); + hw->dec_result = DEC_RESULT_ERROR; + return -1; + } + + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->pic[index].v4l_ref_buf_addr; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), v4l mem handle: 0x%lx\n", + ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, + __func__, vf->v4l_mem_handle); + } + + vf->index = index; + vf->width = pic->width; + vf->height = pic->height; + vf->bufWidth = 1920; + vf->flag = 0; + vf->orientation = hw->vmpeg4_rotation; + vf->pts = pic->pts; + vf->pts_us64 = pic->pts64; + vf->timestamp = pic->timestamp; + vf->duration = pic->duration; + vf->duration_pulldown = pic->repeat_cnt * + pic->duration; +#ifdef NV21 + vf->type = VIDTYPE_PROGRESSIVE | + VIDTYPE_VIU_FIELD | nv_order; +#else + vf->type = VIDTYPE_PROGRESSIVE | + VIDTYPE_VIU_FIELD; +#endif + set_frame_info(hw, vf, index); + + hw->vfbuf_use[index]++; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "prog: pts %d, pts64 %lld, w %d, h %d, dur %d\n", + vf->pts, vf->pts_us64, vf->width, vf->height, vf->duration); + if (vdec_stream_based(vdec) && (!vdec->vbuf.use_ptsserv)) { + vf->pts_us64 = + (((u64)vf->duration << 32) & + 0xffffffff00000000) | pic->offset; + vf->pts = 0; + } + if (((hw->first_i_frame_ready == 0) || pb_skip) + && (pic->pic_type != I_PICTURE)) { + hw->drop_frame_count++; + if (pic->pic_type == I_PICTURE) { + hw->i_lost_frames++; + } else if (pic->pic_type == P_PICTURE) { + hw->p_lost_frames++; + } else if (pic->pic_type == B_PICTURE) { + hw->b_lost_frames++; + } + hw->vfbuf_use[index]--; + spin_lock_irqsave(&hw->lock, flags); + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + spin_unlock_irqrestore(&hw->lock, flags); + } else { + struct vdec_info vinfo; + + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, index); + decoder_do_frame_check(vdec, vf); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->pts); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + vdec->vdec_fps_detec(vdec->id); + hw->frame_num++; + if (pic->pic_type == I_PICTURE) { + hw->i_decoded_frames++; + } else if (pic->pic_type == P_PICTURE) { + hw->p_decoded_frames++; + } else if (pic->pic_type == B_PICTURE) { + hw->b_decoded_frames++; + } + vdec->dec_status(vdec, &vinfo); + vdec_fill_vdec_frame(vdec, NULL, + &vinfo, vf, pic->hw_decode_time); + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } else { + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } else + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } + + } + return 0; +} + +static void vmpeg4_prepare_input(struct vdec_mpeg4_hw_s *hw) +{ + struct vdec_s *vdec = hw_to_vdec(hw); + struct vdec_input_s *input = &vdec->input; + struct vframe_block_list_s *block = NULL; + struct vframe_chunk_s *chunk = hw->chunk; + int dummy; + + if (chunk == NULL) + return; + + /* full reset to HW input */ + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + + /* reset VLD fifo for all vdec */ + WRITE_VREG(DOS_SW_RESET0, (1<<5) | (1<<4) | (1<<3)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(POWER_CTL_VLD, 1 << 4); + + /* + *setup HW decoder input buffer (VLD context) + * based on input->type and input->target + */ + if (input_frame_based(input)) { + block = chunk->block; + + WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, block->start); + WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, block->start + + block->size - 8); + WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR, + round_down(block->start + hw->chunk_offset, + VDEC_FIFO_ALIGN)); + + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1); + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + + /* set to manual mode */ + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2); + WRITE_VREG(VLD_MEM_VIFIFO_RP, + round_down(block->start + hw->chunk_offset, + VDEC_FIFO_ALIGN)); + dummy = hw->chunk_offset + hw->chunk_size + + VLD_PADDING_SIZE; + if (dummy >= block->size) + dummy -= block->size; + WRITE_VREG(VLD_MEM_VIFIFO_WP, + round_down(block->start + dummy, + VDEC_FIFO_ALIGN)); + + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 3); + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2); + + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, + (0x11 << 16) | (1<<10) | (7<<3)); + + } +} + +static int vmpeg4_get_ps_info(struct vdec_mpeg4_hw_s *hw, int width, int height, struct aml_vdec_ps_infos *ps) +{ + ps->visible_width = width; + ps->visible_height = height; + ps->coded_width = ALIGN(width, 64); + ps->coded_height = ALIGN(height, 64); + ps->dpb_size = hw->buf_num; + + return 0; +} + +static int v4l_res_change(struct vdec_mpeg4_hw_s *hw, int width, int height) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int ret = 0; + + if (ctx->param_sets_from_ucode && + hw->res_ch_flag == 0) { + struct aml_vdec_ps_infos ps; + + if ((hw->frame_width != 0 && + hw->frame_height != 0) && + (hw->frame_width != width || + hw->frame_height != height)) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "v4l_res_change Pic Width/Height Change (%d,%d)=>(%d,%d)\n", + hw->frame_width, hw->frame_height, + width, + height); + vmpeg4_get_ps_info(hw, width, height, &ps); + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + hw->v4l_params_parsed = false; + hw->res_ch_flag = 1; + ctx->v4l_resolution_change = 1; + hw->eos = 1; + flush_output(hw); + if (hw->is_used_v4l) + notify_v4l_eos(hw_to_vdec(hw)); + + ret = 1; + } + } + + return ret; +} + + +static irqreturn_t vmpeg4_isr_thread_fn(struct vdec_s *vdec, int irq) +{ + u32 reg; + u32 picture_type; + int index; + u32 pts, offset = 0; + u64 pts_us64 = 0; + u32 frame_size, dec_w, dec_h; + u32 time_increment_resolution, fixed_vop_rate, vop_time_inc, vos_info; + u32 repeat_cnt, duration = 3200; + struct pic_info_t *dec_pic, *disp_pic; + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)(vdec->private); + if (hw->eos) + return IRQ_HANDLED; + + if (READ_VREG(MP4_PIC_INFO) == 1) { + if (hw->is_used_v4l) { + int frame_width = READ_VREG(MP4_PIC_WH)>> 16; + int frame_height = READ_VREG(MP4_PIC_WH) & 0xffff; + if (!v4l_res_change(hw, frame_width, frame_height)) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + + vmpeg4_get_ps_info(hw, frame_width, frame_height, &ps); + hw->v4l_params_parsed = true; + vdec_v4l_set_ps_infos(ctx, &ps); + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } else { + WRITE_VREG(MP4_PIC_INFO, 0); + } + } else { + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } + } else + WRITE_VREG(MP4_PIC_INFO, 0); + return IRQ_HANDLED; + } + + if ((hw->is_used_v4l) && !hw->v4l_params_parsed) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "The head was not found, can not to decode\n"); + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + if (READ_VREG(AV_SCRATCH_M) != 0 && + (debug_enable & PRINT_FLAG_UCODE_DETAIL)) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_UCODE_DETAIL, + "dbg %x: %x, level %x, wp %x, rp %x, cnt %x\n", + READ_VREG(AV_SCRATCH_M), READ_VREG(AV_SCRATCH_N), + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VIFF_BIT_CNT)); + WRITE_VREG(AV_SCRATCH_M, 0); + return IRQ_HANDLED; + } + reg = READ_VREG(MREG_BUFFEROUT); + + time_increment_resolution = READ_VREG(MP4_RATE); + fixed_vop_rate = time_increment_resolution >> 16; + time_increment_resolution &= 0xffff; + if (time_increment_resolution > 0 && + fixed_vop_rate == 0) + hw->sys_mp4_rate = time_increment_resolution; + + if (hw->vmpeg4_amstream_dec_info.rate == 0) { + if ((fixed_vop_rate != 0) && + (time_increment_resolution != 0)) { + hw->vmpeg4_amstream_dec_info.rate = fixed_vop_rate * + DURATION_UNIT / time_increment_resolution; + } else if (time_increment_resolution == 0 + && hw->sys_mp4_rate > 0) + time_increment_resolution = hw->sys_mp4_rate; + } + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "time_inc_res = %d, fixed_vop_rate = %d, rate = %d\n", + time_increment_resolution, fixed_vop_rate, + hw->vmpeg4_amstream_dec_info.rate); + + if (reg == 2) { + /* timeout when decoding next frame */ + + /* for frame based case, insufficient result may happen + * at the beginning when only VOL head is available save + * HW context also, such as for the QTable from VCOP register + */ + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FLAG_VLD_DETAIL, + "%s, level = %x, vfifo_ctrl = %x, bitcnt = %d\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_CONTROL), + READ_VREG(VIFF_BIT_CNT)); + + if (vdec_frame_based(vdec)) { + vmpeg4_save_hw_context(hw); + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + } else { + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } + return IRQ_HANDLED; + } else { + reset_process_time(hw); + picture_type = (reg >> 3) & 7; + repeat_cnt = READ_VREG(MP4_NOT_CODED_CNT); + vop_time_inc = READ_VREG(MP4_VOP_TIME_INC); + vos_info = READ_VREG(MP4_VOS_INFO); + if ((vos_info & 0xff) && + (((vos_info >> 4) & 0xf) != hw->profile_idc || + (vos_info & 0xf) != hw->level_idc)) { + hw->profile_idc = vos_info >> 4 & 0xf; + hw->level_idc = vos_info & 0xf; + vdec_set_profile_level(vdec, hw->profile_idc, hw->level_idc); + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "profile_idc: %d level_idc: %d\n", + hw->profile_idc, hw->level_idc); + } + + index = spec_to_index(hw, READ_VREG(REC_CANVAS_ADDR)); + if (index < 0) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "invalid buffer index %d. rec = %x\n", + index, READ_VREG(REC_CANVAS_ADDR)); + hw->dec_result = DEC_RESULT_ERROR; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + hw->dec_result = DEC_RESULT_DONE; + dec_pic = &hw->pic[index]; + if (vdec->mvfrm) { + dec_pic->frame_size = vdec->mvfrm->frame_size; + dec_pic->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; + } + dec_pic->pts_valid = false; + dec_pic->pts = 0; + dec_pic->pts64 = 0; + dec_pic->timestamp = 0; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_BUFFER_DETAIL, + "new pic: index=%d, used=%d, repeat=%d, time_inc=%d\n", + index, hw->vfbuf_use[index], repeat_cnt, vop_time_inc); + + dec_w = READ_VREG(MP4_PIC_WH)>> 16; + dec_h = READ_VREG(MP4_PIC_WH) & 0xffff; + if (dec_w != 0) { + hw->frame_width = dec_w; + dec_pic->width = dec_w; + } + if (dec_h != 0) { + hw->frame_height = dec_h; + dec_pic->height = dec_h; + } + hw->res_ch_flag = 0; + + if (hw->vmpeg4_amstream_dec_info.rate == 0) { + if (vop_time_inc < hw->last_vop_time_inc) { + duration = vop_time_inc + + time_increment_resolution - + hw->last_vop_time_inc; + } else { + duration = vop_time_inc - + hw->last_vop_time_inc; + } + + if (duration == hw->last_duration) { + hw->rate_detect++; + if ((hw->rate_detect >= RATE_DETECT_COUNT) && + (time_increment_resolution != 0)) { + hw->vmpeg4_amstream_dec_info.rate = + duration * DURATION_UNIT / + time_increment_resolution; + duration = + hw->vmpeg4_amstream_dec_info.rate; + } + } else { + hw->rate_detect = 0; + hw->last_duration = duration; + } + if (MPEG4_VALID_DUR(duration)) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "warn: duration %x, set 0\n", duration); + duration = 0; + } + } else { + duration = hw->vmpeg4_amstream_dec_info.rate; +#if 0 + pr_info("info rate = %d, ucode rate = 0x%x:0x%x\n", + hw->vmpeg4_amstream_dec_info.rate, + READ_VREG(MP4_RATE), vop_time_inc); +#endif + } + + /* frame mode with unstable pts */ + if (hw->unstable_pts && hw->chunk) { + dec_pic->pts_valid = hw->chunk->pts_valid; + dec_pic->pts = hw->chunk->pts; + dec_pic->pts64 = hw->chunk->pts64; + dec_pic->timestamp = hw->chunk->timestamp; + if ((B_PICTURE == picture_type) || + (hw->last_dec_pts == dec_pic->pts)) + dec_pic->pts_valid = 0; + + hw->last_dec_pts = dec_pic->pts; + } else if ((I_PICTURE == picture_type) || + (P_PICTURE == picture_type)) { + offset = READ_VREG(MP4_OFFSET_REG); + if (hw->chunk) { + dec_pic->pts_valid = hw->chunk->pts_valid; + dec_pic->pts = hw->chunk->pts; + dec_pic->pts64 = hw->chunk->pts64; + dec_pic->timestamp = hw->chunk->timestamp; + } else { + dec_pic->offset = offset; + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + if (pts_lookup_offset_us64(PTS_TYPE_VIDEO, offset, + &pts, &frame_size, 3000, &pts_us64) == 0) { + dec_pic->pts_valid = true; + dec_pic->pts = pts; + dec_pic->pts64 = pts_us64; + hw->pts_hit++; + } else { + dec_pic->pts_valid = false; + hw->pts_missed++; + } + } + } + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "%c, offset=0x%x, pts=0x%x(%d), index=%d, used=%d\n", + GET_PIC_TYPE(picture_type), offset, dec_pic->pts, + dec_pic->pts_valid, index, hw->vfbuf_use[index]); + } + + dec_pic->index = index; + dec_pic->pic_info = reg; + dec_pic->pic_type = picture_type; + dec_pic->duration = duration; + hw->vfbuf_use[index] = 0; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "mmpeg4: pic_num: %d, index %d, type %c, pts %x\n", + hw->frame_num, index, + GET_PIC_TYPE(picture_type), + dec_pic->pts); + + /* buffer management */ + if ((picture_type == I_PICTURE) || + (picture_type == P_PICTURE)) { + index = update_ref(hw, index); + } else { + /* drop B frame or disp immediately. + * depend on if there are two ref frames + */ + if (hw->refs[1] == -1) + index = -1; + } + vmpeg4_save_hw_context(hw); + if (index < 0) { + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + disp_pic = &hw->pic[index]; + if ((hw->first_i_frame_ready == 0) && + (I_PICTURE == disp_pic->pic_type)) + hw->first_i_frame_ready = 1; + + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "disp: index=%d, pts=%x(%d), used=%d, picout=%c(dec=%c)\n", + index, disp_pic->pts, disp_pic->pts_valid, + hw->vfbuf_use[index], + GET_PIC_TYPE(disp_pic->pic_type), + GET_PIC_TYPE(picture_type)); + + if (disp_pic->pts_valid) { + hw->last_anch_pts = disp_pic->pts; + hw->last_anch_pts_us64 = disp_pic->pts64; + hw->frame_num_since_last_anch = 0; + hw->vop_time_inc_since_last_anch = 0; + } else if (vdec_stream_based(vdec)) { + disp_pic->pts = hw->last_anch_pts; + disp_pic->pts64 = hw->last_anch_pts_us64; + + if ((time_increment_resolution != 0) && + (fixed_vop_rate == 0) && + (hw->vmpeg4_amstream_dec_info.rate == 0)) { + /* variable PTS rate */ + /*bug on variable pts calc, + *do as dixed vop first if we + *have rate setting before. + */ + if (vop_time_inc > hw->last_vop_time_inc) { + duration = vop_time_inc - + hw->last_vop_time_inc; + } else { + duration = vop_time_inc + + time_increment_resolution - + hw->last_vop_time_inc; + } + + hw->vop_time_inc_since_last_anch += duration; + + disp_pic->pts += hw->vop_time_inc_since_last_anch * + PTS_UNIT / time_increment_resolution; + disp_pic->pts64 += (hw->vop_time_inc_since_last_anch * + PTS_UNIT / time_increment_resolution) * + 100 / 9; + + if (hw->vop_time_inc_since_last_anch > + (1 << 14)) { + /* avoid overflow */ + hw->last_anch_pts = disp_pic->pts; + hw->last_anch_pts_us64 = disp_pic->pts64; + hw->vop_time_inc_since_last_anch = 0; + } + } else { + /* fixed VOP rate */ + hw->frame_num_since_last_anch++; + disp_pic->pts += DUR2PTS(hw->frame_num_since_last_anch * + hw->vmpeg4_amstream_dec_info.rate); + disp_pic->pts64 += DUR2PTS( + hw->frame_num_since_last_anch * + hw->vmpeg4_amstream_dec_info.rate) * 100 / 9; + + if (hw->frame_num_since_last_anch > (1 << 15)) { + /* avoid overflow */ + hw->last_anch_pts = disp_pic->pts; + hw->last_anch_pts_us64 = disp_pic->pts64; + hw->frame_num_since_last_anch = 0; + } + } + } else if (hw->unstable_pts && hw->chunk && + MPEG4_VALID_DUR(duration)) { + /* invalid pts calc */ + hw->frame_num_since_last_anch = hw->chunk_frame_count; + disp_pic->pts = hw->last_anch_pts + + DUR2PTS(hw->frame_num_since_last_anch * + duration); + disp_pic->pts64 = hw->last_anch_pts_us64 + + DUR2PTS(hw->frame_num_since_last_anch * + duration) * 100 / 9; + + if (hw->frame_num_since_last_anch > (1 << 15)) { + /* avoid overflow */ + hw->last_anch_pts = disp_pic->pts; + hw->last_anch_pts_us64 = disp_pic->pts64; + hw->frame_num_since_last_anch = 0; + } else + disp_pic->pts_valid = 1; + } + + if (vdec_frame_based(vdec) && + (hw->unstable_pts) && + MPEG4_VALID_DUR(duration)) { + + u32 threshold = DUR2PTS(duration) >> 3; + + if (disp_pic->pts <= (hw->last_pts + threshold)) { + disp_pic->pts = hw->last_pts + DUR2PTS(duration); + disp_pic->pts64 = hw->last_pts64 + + (DUR2PTS(duration)*100/9); + } + if (!disp_pic->pts_valid) { + disp_pic->pts = 0; + disp_pic->pts64 = 0; + disp_pic->timestamp = 0; + } + } + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "disp: pic_type %c, pts %d(%lld), diff %d, cnt %d, disp_pic->timestamp %llu\n", + GET_PIC_TYPE(disp_pic->pic_type), + disp_pic->pts, + disp_pic->pts64, + disp_pic->pts - hw->last_pts, + hw->chunk_frame_count, + disp_pic->timestamp); + hw->last_pts = disp_pic->pts; + hw->last_pts64 = disp_pic->pts64; + hw->frame_dur = duration; + disp_pic->duration = duration; + disp_pic->repeat_cnt = repeat_cnt; + + prepare_display_buf(hw, disp_pic); + + hw->total_frame += repeat_cnt + 1; + hw->last_vop_time_inc = vop_time_inc; + + if (vdec_frame_based(vdec) && + (frmbase_cont_bitlevel != 0) && + (hw->first_i_frame_ready)) { + u32 consume_byte, res_byte, bitcnt; + + bitcnt = READ_VREG(VIFF_BIT_CNT); + res_byte = bitcnt >> 3; + + if (hw->chunk_size > res_byte) { + if (bitcnt > frmbase_cont_bitlevel) { + consume_byte = hw->chunk_size - res_byte; + + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s, size %d, consume %d, res %d\n", __func__, + hw->chunk_size, consume_byte, res_byte); + + if (consume_byte > VDEC_FIFO_ALIGN) { + consume_byte -= VDEC_FIFO_ALIGN; + res_byte += VDEC_FIFO_ALIGN; + } + hw->chunk_offset += consume_byte; + hw->chunk_size = res_byte; + hw->dec_result = DEC_RESULT_UNFINISH; + hw->chunk_frame_count++; + hw->unstable_pts = 1; + } else { + hw->chunk_size = 0; + hw->chunk_offset = 0; + } + } else { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "error: bitbyte %d hw->chunk_size %d\n", res_byte, hw->chunk_size); + hw->chunk_size = 0; + hw->chunk_offset = 0; + } + } + vdec_schedule_work(&hw->work); + } + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FRAME_NUM, + "%s: frame num:%d\n", __func__, hw->frame_num); + + return IRQ_HANDLED; +} + +static irqreturn_t vmpeg4_isr(struct vdec_s *vdec, int irq) +{ + struct vdec_mpeg4_hw_s *hw = + (struct vdec_mpeg4_hw_s *)(vdec->private); + + if (hw->eos) + return IRQ_HANDLED; + + return IRQ_WAKE_THREAD; +} + +static void flush_output(struct vdec_mpeg4_hw_s * hw) +{ + struct pic_info_t *pic; + + if (hw->vfbuf_use[hw->refs[1]] > 0) { + pic = &hw->pic[hw->refs[1]]; + prepare_display_buf(hw, pic); + } +} + +static int notify_v4l_eos(struct vdec_s *vdec) +{ + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = NULL; + struct vdec_v4l2_buffer *fb = NULL; + int index = -1; + + if (hw->eos) { + if (kfifo_get(&hw->newframe_q, &vf) == 0 || vf == NULL) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s fatal error, no available buffer slot.\n", + __func__); + return -1; + } + + if (hw->is_used_v4l) { + index = find_free_buffer(hw); + if ((index == -1) || (index == 0xffffff)) { + ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token); + if (ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC) < 0) { + pr_err("[%d] get fb fail.\n", ctx->id); + return -1; + } + } + } + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->v4l_mem_handle = (index == -1) ? (ulong)fb : + hw->pic[index].v4l_ref_buf_addr;; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + + if (hw->is_used_v4l) + fb->task->submit(fb->task, TASK_TYPE_DEC); + else + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + ATRACE_COUNTER(hw->pts_name, vf->pts); + + pr_info("[%d] mpeg4 EOS notify.\n", (hw->is_used_v4l)?ctx->id:vdec->id); + } + + return 0; +} + +static void vmpeg4_work(struct work_struct *work) +{ + struct vdec_mpeg4_hw_s *hw = + container_of(work, struct vdec_mpeg4_hw_s, work); + struct vdec_s *vdec = hw_to_vdec(hw); + + /* finished decoding one frame or error, + * notify vdec core to switch context + */ + if (hw->dec_result != DEC_RESULT_DONE) + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "vmpeg4_work: result=%d,status=%d\n", + hw->dec_result, hw_to_vdec(hw)->next_status); + + if (hw->dec_result == DEC_RESULT_UNFINISH) { + if (!hw->ctx_valid) + hw->ctx_valid = 1; + + } else if ((hw->dec_result == DEC_RESULT_DONE) || + ((!hw->is_used_v4l) && (input_frame_based(&vdec->input)) && hw->chunk)) { + if (!hw->ctx_valid) + hw->ctx_valid = 1; + + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk = NULL; + } else if (hw->dec_result == DEC_RESULT_AGAIN + && (vdec->next_status != VDEC_STATUS_DISCONNECTED)) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + } else if (hw->dec_result == DEC_RESULT_FORCE_EXIT) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s: force exit\n", __func__); + if (hw->stat & STAT_ISR_REG) { + amvdec_stop(); + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + } else if (hw->dec_result == DEC_RESULT_EOS) { + hw->eos = 1; + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk = NULL; + vdec_clean_input(vdec); + flush_output(hw); + + notify_v4l_eos(vdec); + + mmpeg4_debug_print(DECODE_ID(hw), 0, + "%s: eos flushed, frame_num %d\n", + __func__, hw->frame_num); + } + + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + /*disable mbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 0); + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !hw->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + /* mark itself has all HW resource released and input released */ + if (vdec->parallel_dec == 1) + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1); + else + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + wake_up_interruptible(&hw->wait_q); + if (hw->vdec_cb) + hw->vdec_cb(vdec, hw->vdec_cb_arg); +} + +static struct vframe_s *vmpeg_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + + if (!hw) + return NULL; + hw->peek_num++; + + if (kfifo_len(&hw->display_q) > VF_POOL_SIZE) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "kfifo len:%d invaild, peek error\n", + kfifo_len(&hw->display_q)); + return NULL; + } + + if (kfifo_peek(&hw->display_q, &vf)) + return vf; + + return NULL; +} + +static struct vframe_s *vmpeg_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + hw->get_num++; + if (kfifo_get(&hw->display_q, &vf)) { + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + return vf; + } + return NULL; +} + +static void vmpeg_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + unsigned long flags; + + if (!vf) + return; + + hw->vfbuf_use[vf->index]--; + hw->put_num++; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FRAME_NUM, + "%s: put num:%d\n",__func__, hw->put_num); + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_BUFFER_DETAIL, + "index=%d, used=%d\n", vf->index, hw->vfbuf_use[vf->index]); + spin_lock_irqsave(&hw->lock, flags); + kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf); + spin_unlock_irqrestore(&hw->lock, flags); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); +} + +static int vmpeg_event_cb(int type, void *data, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +static int vmpeg_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + struct vdec_s *vdec = op_arg; + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + + spin_lock_irqsave(&hw->lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hw->newframe_q); + states->buf_avail_num = kfifo_len(&hw->display_q); + states->buf_recycle_num = 0; + + spin_unlock_irqrestore(&hw->lock, flags); + + return 0; +} + + +static int dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct vdec_mpeg4_hw_s *hw = + (struct vdec_mpeg4_hw_s *)vdec->private; + + if (!hw) + return -1; + + vstatus->frame_width = hw->frame_width; + vstatus->frame_height = hw->frame_height; + if (0 != hw->vmpeg4_amstream_dec_info.rate) + vstatus->frame_rate = ((DURATION_UNIT * 10 / hw->vmpeg4_amstream_dec_info.rate) % 10) < 5 ? + DURATION_UNIT / hw->vmpeg4_amstream_dec_info.rate : (DURATION_UNIT / hw->vmpeg4_amstream_dec_info.rate +1); + else + vstatus->frame_rate = -1; + vstatus->error_count = READ_VREG(MP4_ERR_COUNT); + vstatus->status = hw->stat; + vstatus->frame_dur = hw->frame_dur; + vstatus->error_frame_count = READ_VREG(MP4_ERR_COUNT); + vstatus->drop_frame_count = hw->drop_frame_count; + vstatus->frame_count =hw->frame_num; + vstatus->i_decoded_frames = hw->i_decoded_frames; + vstatus->i_lost_frames = hw->i_lost_frames; + vstatus->i_concealed_frames = hw->i_concealed_frames; + vstatus->p_decoded_frames = hw->p_decoded_frames; + vstatus->p_lost_frames = hw->p_lost_frames; + vstatus->p_concealed_frames = hw->p_concealed_frames; + vstatus->b_decoded_frames = hw->b_decoded_frames; + vstatus->b_lost_frames = hw->b_lost_frames; + vstatus->b_concealed_frames = hw->b_concealed_frames; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + +/****************************************/ +static int vmpeg4_canvas_init(struct vdec_mpeg4_hw_s *hw) +{ + int i, ret; + u32 canvas_width, canvas_height; + u32 decbuf_size, decbuf_y_size; + struct vdec_s *vdec = hw_to_vdec(hw); + unsigned long decbuf_start; + + if (buf_size <= 0x00400000) { + /* SD only */ + canvas_width = 768; + canvas_height = 576; + decbuf_y_size = 0x80000; + decbuf_size = 0x100000; + } else { + int w = 1920; + int h = 1088; + int align_w, align_h; + int max, min; + + align_w = ALIGN(w, 64); + align_h = ALIGN(h, 64); + if (align_w > align_h) { + max = align_w; + min = align_h; + } else { + max = align_h; + min = align_w; + } + /* HD & SD */ + if ((max > 1920 || min > 1088) && + ALIGN(align_w * align_h * 3/2, SZ_64K) * 9 <= + buf_size) { + canvas_width = align_w; + canvas_height = align_h; + decbuf_y_size = + ALIGN(align_w * align_h, SZ_64K); + decbuf_size = + ALIGN(align_w * align_h * 3/2, SZ_64K); + } else { /*1080p*/ + canvas_width = 1920; + canvas_height = 1088; + if (hw->vmpeg4_amstream_dec_info.width < hw->vmpeg4_amstream_dec_info.height ) { + canvas_width = 1088; + canvas_height = 1920; + } + decbuf_y_size = 0x200000; + decbuf_size = 0x300000; + } + } + + for (i = 0; i < hw->buf_num + 1; i++) { + + unsigned canvas; + + if (i == hw->buf_num) + decbuf_size = WORKSPACE_SIZE; + + if (hw->is_used_v4l && !(i == hw->buf_num)) { + continue; + } else { + ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, i, + decbuf_size, DRIVER_NAME, &decbuf_start); + if (ret < 0) { + pr_err("bmmu alloc failed! size %d idx %d\n", + decbuf_size, i); + return ret; + } + } + + if (!vdec_secure(vdec)) { + /*init internal buf*/ + char *tmpbuf = (char *)codec_mm_phys_to_virt(decbuf_start); + if (tmpbuf) { + memset(tmpbuf, 0, decbuf_size); + codec_mm_dma_flush(tmpbuf, + decbuf_size, DMA_TO_DEVICE); + } else { + tmpbuf = codec_mm_vmap(decbuf_start, decbuf_size); + if (tmpbuf) { + memset(tmpbuf, 0, decbuf_size); + codec_mm_dma_flush(tmpbuf, + decbuf_size, DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(tmpbuf); + } + } + } + + if (i == hw->buf_num) { + hw->buf_start = decbuf_start; + } else { + if (vdec->parallel_dec == 1) { + unsigned tmp; + if (canvas_y(hw->canvas_spec[i]) == 0xff) { + tmp = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~0xff; + hw->canvas_spec[i] |= tmp; + } + if (canvas_u(hw->canvas_spec[i]) == 0xff) { + tmp = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~(0xffff << 8); + hw->canvas_spec[i] |= tmp << 8; + hw->canvas_spec[i] |= tmp << 16; + } + canvas = hw->canvas_spec[i]; + } else { + canvas = vdec->get_canvas(i, 2); + hw->canvas_spec[i] = canvas; + } + + hw->canvas_config[i][0].phy_addr = decbuf_start; + hw->canvas_config[i][0].width = canvas_width; + hw->canvas_config[i][0].height = canvas_height; + hw->canvas_config[i][0].block_mode = hw->blkmode; + if (hw->blkmode == CANVAS_BLKMODE_LINEAR) + hw->canvas_config[i][0].endian = 7; + else + hw->canvas_config[i][0].endian = 0; + config_cav_lut(canvas_y(canvas), + &hw->canvas_config[i][0], VDEC_1); + + hw->canvas_config[i][1].phy_addr = + decbuf_start + decbuf_y_size; + hw->canvas_config[i][1].width = canvas_width; + hw->canvas_config[i][1].height = (canvas_height >> 1); + hw->canvas_config[i][1].block_mode = hw->blkmode; + if (hw->blkmode == CANVAS_BLKMODE_LINEAR) + hw->canvas_config[i][1].endian = 7; + else + hw->canvas_config[i][1].endian = 0; + config_cav_lut(canvas_u(canvas), + &hw->canvas_config[i][1], VDEC_1); + } + } + + return 0; +} + +static void vmpeg4_dump_state(struct vdec_s *vdec) +{ + struct vdec_mpeg4_hw_s *hw = + (struct vdec_mpeg4_hw_s *)(vdec->private); + u32 i; + mmpeg4_debug_print(DECODE_ID(hw), 0, + "====== %s\n", __func__); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "width/height (%d/%d), i_fram:%d, buffer_not_ready %d, buf_num %d, run_flag %d\n", + hw->frame_width, + hw->frame_height, + hw->first_i_frame_ready, + hw->buffer_not_ready, + hw->buf_num, + hw->run_flag + ); + for (i = 0; i < hw->buf_num; i++) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "index %d, used %d\n", i, hw->vfbuf_use[i]); + } + + mmpeg4_debug_print(DECODE_ID(hw), 0, + "is_framebase(%d), eos %d, state 0x%x, dec_result 0x%x dec_frm %d\n", + vdec_frame_based(vdec), + hw->eos, + hw->stat, + hw->dec_result, + hw->frame_num + ); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "is_framebase(%d), put_frm %d run %d not_run_ready %d input_empty %d,drop %d\n", + vdec_frame_based(vdec), + hw->put_num, + hw->run_count, + hw->not_run_ready, + hw->input_empty, + hw->drop_frame_count + ); + + if (!hw->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + mmpeg4_debug_print(DECODE_ID(hw), 0, + "%s, newq(%d/%d), dispq(%d/%d) vf peek/get/put (%d/%d/%d)\n", + __func__, + kfifo_len(&hw->newframe_q), VF_POOL_SIZE, + kfifo_len(&hw->display_q), VF_POOL_SIZE, + hw->peek_num, hw->get_num, hw->put_num + ); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "VIFF_BIT_CNT=0x%x\n", + READ_VREG(VIFF_BIT_CNT)); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_LEVEL=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_WP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP)); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_RP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_RP)); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (vdec_frame_based(vdec) && + debug_enable & PRINT_FRAMEBASE_DATA) { + int jj; + if (hw->chunk && hw->chunk->block && + hw->chunk->size > 0) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, hw->chunk->size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + mmpeg4_debug_print(DECODE_ID(hw), 0, + "frame data size 0x%x\n", + hw->chunk->size); + for (jj = 0; jj < hw->chunk->size; jj++) { + if ((jj & 0xf) == 0) + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } +} + +static void reset_process_time(struct vdec_mpeg4_hw_s *hw) +{ + if (hw->start_process_time) { + unsigned process_time = + 1000 * (jiffies - hw->start_process_time) / HZ; + hw->start_process_time = 0; + if (process_time > max_process_time[DECODE_ID(hw)]) + max_process_time[DECODE_ID(hw)] = process_time; + } +} +static void start_process_time(struct vdec_mpeg4_hw_s *hw) +{ + hw->decode_timeout_count = 2; + hw->start_process_time = jiffies; +} + +static void timeout_process(struct vdec_mpeg4_hw_s *hw) +{ + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + mmpeg4_debug_print(DECODE_ID(hw), 0, + "%s decoder timeout %d\n", __func__, hw->timeout_cnt); + if (vdec_frame_based((hw_to_vdec(hw)))) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "%s frame_num %d, chunk size 0x%x, chksum 0x%x\n", + __func__, + hw->frame_num, + hw->chunk->size, + get_data_check_sum(hw, hw->chunk->size)); + } + hw->timeout_cnt++; + /* timeout: data droped, frame_num inaccurate*/ + hw->frame_num++; + reset_process_time(hw); + hw->first_i_frame_ready = 0; + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); +} + + +static void check_timer_func(struct timer_list *timer) +{ + struct vdec_mpeg4_hw_s *hw = container_of(timer, + struct vdec_mpeg4_hw_s, check_timer); + struct vdec_s *vdec = hw_to_vdec(hw); + unsigned int timeout_val = decode_timeout_val; + + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + + if (((debug_enable & PRINT_FLAG_TIMEOUT_STATUS) == 0) && + (timeout_val > 0) && + (hw->start_process_time > 0) && + ((1000 * (jiffies - hw->start_process_time) / HZ) + > timeout_val)) { + if (hw->last_vld_level == READ_VREG(VLD_MEM_VIFIFO_LEVEL)) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + timeout_process(hw); + } + hw->last_vld_level = READ_VREG(VLD_MEM_VIFIFO_LEVEL); + } + + if (vdec->next_status == VDEC_STATUS_DISCONNECTED) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "vdec requested to be disconnected\n"); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static int vmpeg4_hw_ctx_restore(struct vdec_mpeg4_hw_s *hw) +{ + int index, i; + void *workspace_buf = NULL; + + index = find_free_buffer(hw); + if (index < 0) + return -1; + + if (!hw->init_flag) { + if (vmpeg4_canvas_init(hw) < 0) + return -1; + } else { + if (!hw->is_used_v4l) { + for (i = 0; i < hw->buf_num; i++) { + config_cav_lut(canvas_y(hw->canvas_spec[i]), + &hw->canvas_config[i][0], VDEC_1); + config_cav_lut(canvas_u(hw->canvas_spec[i]), + &hw->canvas_config[i][1], VDEC_1); + } + } + } + /* prepare REF0 & REF1 + * points to the past two IP buffers + * prepare REC_CANVAS_ADDR and ANC2_CANVAS_ADDR + * points to the output buffer + */ + if (hw->refs[0] == -1) { + WRITE_VREG(MREG_REF0, (hw->refs[1] == -1) ? 0xffffffff : + hw->canvas_spec[hw->refs[1]]); + } else { + WRITE_VREG(MREG_REF0, hw->canvas_spec[hw->refs[0]]); + } + WRITE_VREG(MREG_REF1, (hw->refs[1] == -1) ? 0xffffffff : + hw->canvas_spec[hw->refs[1]]); + if ((hw->is_used_v4l) && (index == 0xffffff)) { + WRITE_VREG(REC_CANVAS_ADDR, 0xffffff); + WRITE_VREG(ANC2_CANVAS_ADDR, 0xffffff); + } else { + WRITE_VREG(REC_CANVAS_ADDR, hw->canvas_spec[index]); + WRITE_VREG(ANC2_CANVAS_ADDR, hw->canvas_spec[index]); + } + + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RESTORE, + "restore ref0=0x%x, ref1=0x%x, rec=0x%x, ctx_valid=%d,index=%d\n", + READ_VREG(MREG_REF0), + READ_VREG(MREG_REF1), + READ_VREG(REC_CANVAS_ADDR), + hw->ctx_valid, index); + + /* notify ucode the buffer start address */ + workspace_buf = codec_mm_vmap(hw->buf_start, WORKSPACE_SIZE); + if (workspace_buf) { + /* clear to fix decoder timeout at first time */ + if (!hw->init_flag) + memset(workspace_buf, 0, WORKSPACE_SIZE); + codec_mm_dma_flush(workspace_buf, + WORKSPACE_SIZE, DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(workspace_buf); + } + WRITE_VREG(MEM_OFFSET_REG, hw->buf_start); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + + WRITE_VREG(MREG_BUFFEROUT, 0x10000); + + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + + /* clear repeat count */ + WRITE_VREG(MP4_NOT_CODED_CNT, 0); + +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + +#if 1/* /MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + WRITE_VREG(MDEC_PIC_DC_THRESH, 0x404038aa); +#endif + + WRITE_VREG(MP4_PIC_WH, (hw->ctx_valid) ? + hw->reg_mp4_pic_wh : + ((hw->frame_width << 16) | hw->frame_height)); + WRITE_VREG(MP4_SYS_RATE, hw->vmpeg4_amstream_dec_info.rate); + + if (hw->ctx_valid) { + WRITE_VREG(DC_AC_CTRL, hw->reg_dc_ac_ctrl); + WRITE_VREG(IQIDCT_CONTROL, hw->reg_iqidct_control); + WRITE_VREG(RESYNC_MARKER_LENGTH, hw->reg_resync_marker_length); + WRITE_VREG(RV_AI_MB_COUNT, hw->reg_rv_ai_mb_count); + } + WRITE_VREG(MPEG1_2_REG, (hw->ctx_valid) ? hw->reg_mpeg1_2_reg : 1); + WRITE_VREG(VCOP_CTRL_REG, hw->reg_vcop_ctrl_reg); + WRITE_VREG(PIC_HEAD_INFO, hw->reg_pic_head_info); + WRITE_VREG(SLICE_QP, hw->reg_slice_qp); + WRITE_VREG(MB_INFO, hw->reg_mb_info); + + if (vdec_frame_based(hw_to_vdec(hw)) && hw->chunk) { + /* frame based input */ + WRITE_VREG(MREG_INPUT, (hw->chunk->offset & 7) | (1<<7) | + (hw->ctx_valid<<6)); + } else { + /* stream based input */ + WRITE_VREG(MREG_INPUT, (hw->ctx_valid<<6)); + } + + return 0; +} + +static void vmpeg4_local_init(struct vdec_mpeg4_hw_s *hw) +{ + int i; + + hw->vmpeg4_ratio = hw->vmpeg4_amstream_dec_info.ratio; + + hw->vmpeg4_ratio64 = hw->vmpeg4_amstream_dec_info.ratio64; + + hw->vmpeg4_rotation = + (((unsigned long)hw->vmpeg4_amstream_dec_info.param) >> 16) & 0xffff; + hw->sys_mp4_rate = hw->vmpeg4_amstream_dec_info.rate; + if (hw->is_used_v4l) { + hw->frame_width = 0; + hw->frame_height = 0; + } else { + hw->frame_width = hw->vmpeg4_amstream_dec_info.width; + hw->frame_height = hw->vmpeg4_amstream_dec_info.height; + } + + hw->frame_dur = 0; + hw->frame_prog = 0; + hw->unstable_pts = + (((unsigned long) hw->vmpeg4_amstream_dec_info.param & 0x40) >> 6); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "param = 0x%x unstable_pts = %d\n", + hw->vmpeg4_amstream_dec_info.param, + hw->unstable_pts); + hw->last_dec_pts = -1; + + hw->total_frame = 0; + + hw->last_anch_pts = 0; + + hw->last_anch_pts_us64 = 0; + + hw->last_vop_time_inc = hw->last_duration = 0; + + hw->vop_time_inc_since_last_anch = 0; + hw->last_pts = 0; + hw->last_pts64 = 0; + hw->frame_num_since_last_anch = 0; + hw->frame_num = 0; + hw->put_num = 0; + hw->run_count = 0; + hw->not_run_ready = 0; + hw->input_empty = 0; + hw->peek_num = 0; + hw->get_num = 0; + + hw->pts_hit = hw->pts_missed = hw->pts_i_hit = hw->pts_i_missed = 0; + hw->refs[0] = -1; + hw->refs[1] = -1; + hw->first_i_frame_ready = 0; + hw->drop_frame_count = 0; + hw->buffer_not_ready = 0; + hw->init_flag = 0; + hw->dec_result = DEC_RESULT_NONE; + hw->timeout_cnt = 0; + + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->vfbuf_use[i] = 0; + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hw->vfpool[i]; + + hw->vfpool[i].index = DECODE_BUFFER_NUM_MAX; + kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf); + } + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + hw->mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER); + INIT_WORK(&hw->work, vmpeg4_work); + + init_waitqueue_head(&hw->wait_q); +} + +static s32 vmmpeg4_init(struct vdec_mpeg4_hw_s *hw) +{ + int trickmode_fffb = 0; + int size = -1, fw_size = 0x1000 * 16; + struct firmware_s *fw = NULL; + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + if (hw->vmpeg4_amstream_dec_info.format == + VIDEO_DEC_FORMAT_MPEG4_5) { + size = get_firmware_data(VIDEO_DEC_MPEG4_5_MULTI, fw->data); + strncpy(fw->name, "mmpeg4_mc_5", sizeof(fw->name)); + } else if (hw->vmpeg4_amstream_dec_info.format == + VIDEO_DEC_FORMAT_H263) { + size = get_firmware_data(VIDEO_DEC_H263_MULTI, fw->data); + strncpy(fw->name, "mh263_mc", sizeof(fw->name)); + } else + pr_err("unsupport mpeg4 sub format %d\n", + hw->vmpeg4_amstream_dec_info.format); + pr_info("mmpeg4 get fw %s, size %x\n", fw->name, size); + if (size < 0) { + pr_err("get firmware failed."); + vfree(fw); + return -1; + } + + fw->len = size; + hw->fw = fw; + + query_video_status(0, &trickmode_fffb); + + pr_info("%s\n", __func__); + + //amvdec_enable(); + + timer_setup(&hw->check_timer, check_timer_func, 0); + //init_timer(&hw->check_timer); + //hw->check_timer.data = (unsigned long)hw; + //hw->check_timer.function = check_timer_func; + hw->check_timer.expires = jiffies + CHECK_INTERVAL; + hw->stat |= STAT_TIMER_ARM; + hw->eos = 0; + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + + vmpeg4_local_init(hw); + wmb(); + + return 0; +} + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + if (hw->eos) + return 0; + if (vdec_stream_based(vdec) && (hw->init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + if (level < pre_decode_buf_level) { + hw->not_run_ready++; + return 0; + } + } + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode) { + if (hw->v4l_params_parsed) { + if (!ctx->v4l_codec_dpb_ready && + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < + run_ready_min_buf_num) + return 0; + } else { + if (ctx->v4l_resolution_change) + return 0; + } + } else if (!ctx->v4l_codec_dpb_ready) { + if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < + run_ready_min_buf_num) + return 0; + } + } + + if (!is_enough_free_buffer(hw)) { + hw->buffer_not_ready++; + return 0; + } + + hw->not_run_ready = 0; + hw->buffer_not_ready = 0; + if (vdec->parallel_dec == 1) + return (unsigned long)(CORE_MASK_VDEC_1); + else + return (unsigned long)(CORE_MASK_VDEC_1 | CORE_MASK_HEVC); +} + +static unsigned char get_data_check_sum + (struct vdec_mpeg4_hw_s *hw, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + int size = 0, ret = 0; + + hw->run_flag = 1; + if (!hw->vdec_pg_enable_flag) { + hw->vdec_pg_enable_flag = 1; + amvdec_enable(); + } + hw->run_count++; + hw->vdec_cb_arg = arg; + hw->vdec_cb = callback; + vdec_reset_core(vdec); + + if ((vdec_frame_based(vdec)) && + (hw->dec_result == DEC_RESULT_UNFINISH)) { + vmpeg4_prepare_input(hw); + size = hw->chunk_size; + } else { + size = vdec_prepare_input(vdec, &hw->chunk); + if (size < 4) { /*less than start code size 00 00 01 xx*/ + hw->input_empty++; + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + hw->run_flag = 0; + return; + } + if ((vdec_frame_based(vdec)) && + (hw->chunk != NULL)) { + hw->chunk_offset = hw->chunk->offset; + hw->chunk_size = hw->chunk->size; + hw->chunk_frame_count = 0; + } + } + if (vdec_frame_based(vdec) && !vdec_secure(vdec)) { + /* HW needs padding (NAL start) for frame ending */ + char* tail = (char *)hw->chunk->block->start_virt; + + tail += hw->chunk->offset + hw->chunk->size; + tail[0] = 0; + tail[1] = 0; + tail[2] = 1; + tail[3] = 0xb6; + codec_mm_dma_flush(tail, 4, DMA_TO_DEVICE); + } + if (vdec_frame_based(vdec) && + (debug_enable & 0xc00)) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk_offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk_offset; + + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, size, get_data_check_sum(hw, size), + data[0], data[1], data[2], data[3], + data[4], data[5], data[size - 4], + data[size - 3], data[size - 2], + data[size - 1]); + + if (debug_enable & PRINT_FRAMEBASE_DATA) { + int jj; + + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "\n"); + } + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s, size=%d, %x %x %x %x %x\n", + __func__, size, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + STBUF_READ(&vdec->vbuf, get_rp), + STBUF_READ(&vdec->vbuf, get_wp)); + + hw->dec_result = DEC_RESULT_NONE; + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else { + ret = amvdec_vdec_loadmc_buf_ex(VFORMAT_MPEG4,hw->fw->name, vdec, + hw->fw->data, hw->fw->len); + if (ret < 0) { + pr_err("[%d] %s: the %s fw loading failed, err: %x\n", vdec->id, + hw->fw->name, tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + hw->run_flag = 0; + return; + } + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_MPEG4; + } + if (vmpeg4_hw_ctx_restore(hw) < 0) { + hw->dec_result = DEC_RESULT_ERROR; + mmpeg4_debug_print(DECODE_ID(hw), 0, + "amvdec_mpeg4: error HW context restore\n"); + hw->run_flag = 0; + return; + } + if (vdec_frame_based(vdec)) { + size = hw->chunk_size + + (hw->chunk_offset & (VDEC_FIFO_ALIGN - 1)); + WRITE_VREG(VIFF_BIT_CNT, size * 8); + if (vdec->mvfrm) + vdec->mvfrm->frame_size = hw->chunk->size; + } + hw->input_empty = 0; + hw->last_vld_level = 0; + start_process_time(hw); + vdec_enable_input(vdec); + /* wmb before ISR is handled */ + wmb(); + + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + amvdec_start(); + hw->stat |= STAT_VDEC_RUN; + hw->stat |= STAT_TIMER_ARM; + hw->init_flag = 1; + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); + hw->run_flag = 0; +} + +static int vmpeg4_stop(struct vdec_mpeg4_hw_s *hw) +{ + cancel_work_sync(&hw->work); + + if (hw->mm_blk_handle) { + void *bmmu_box_tmp = hw->mm_blk_handle; + hw->mm_blk_handle = NULL; + while (hw->run_flag) + usleep_range(1000, 2000); + decoder_bmmu_box_free(bmmu_box_tmp); + bmmu_box_tmp = NULL; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + + if (hw->fw) { + vfree(hw->fw); + hw->fw = NULL; + } + return 0; +} +static void reset(struct vdec_s *vdec) +{ + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + + pr_info("amvdec_mmpeg4: reset.\n"); + + vmpeg4_local_init(hw); + + if (hw->is_used_v4l) { + u32 i, buf_num = vmpeg4_get_buf_num(hw); + for (i = 0; i < buf_num; i++) { + hw->pic[i].v4l_ref_buf_addr = 0; + } + } + hw->ctx_valid = 0; +} + +static int vmpeg4_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + struct vdec_mpeg4_hw_s *hw = + (struct vdec_mpeg4_hw_s *)vdec->private; + if (!hw) + return 0; + + if (trickmode == TRICKMODE_I) { + hw->i_only = 0x3; + trickmode_i = 1; + } else if (trickmode == TRICKMODE_NONE) { + hw->i_only = 0x0; + trickmode_i = 0; + } + return 0; +} + +static int ammvdec_mpeg4_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_mpeg4_hw_s *hw = NULL; + int config_val = 0; + + if (pdata == NULL) { + pr_err("%s memory resource undefined.\n", __func__); + return -EFAULT; + } + + hw = vmalloc(sizeof(struct vdec_mpeg4_hw_s)); + if (hw == NULL) { + pr_err("\namvdec_mpeg4 decoder driver alloc failed\n"); + return -ENOMEM; + } + memset(hw, 0, sizeof(struct vdec_mpeg4_hw_s)); + + /* the ctx from v4l2 driver. */ + hw->v4l2_ctx = pdata->private; + + pdata->private = hw; + pdata->dec_status = dec_status; + /* pdata->set_trickmode = set_trickmode; */ + pdata->set_trickmode = vmpeg4_set_trickmode; + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = vmpeg4_isr; + pdata->threaded_irq_handler = vmpeg4_isr_thread_fn; + pdata->dump_state = vmpeg4_dump_state; + + snprintf(hw->vdec_name, sizeof(hw->vdec_name), + "mpeg4-%d", pdev->id); + snprintf(hw->pts_name, sizeof(hw->pts_name), + "%s-pts", hw->vdec_name); + snprintf(hw->new_q_name, sizeof(hw->new_q_name), + "%s-newframe_q", hw->vdec_name); + snprintf(hw->disp_q_name, sizeof(hw->disp_q_name), + "%s-dispframe_q", hw->vdec_name); + + if (pdata->use_vfm_path) + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + PROVIDER_NAME ".%02x", pdev->id & 0xff); + + platform_set_drvdata(pdev, pdata); + hw->platform_dev = pdev; + + if (((debug_enable & IGNORE_PARAM_FROM_CONFIG) == 0) && pdata->config_len) { + mmpeg4_debug_print(DECODE_ID(hw), 0, "pdata->config: %s\n", pdata->config); + if (get_config_int(pdata->config, "parm_v4l_buffer_margin", + &config_val) == 0) + hw->dynamic_buf_num_margin = config_val; + else + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + } else { + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + } + hw->buf_num = vmpeg4_get_buf_num(hw); + + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->canvas_spec[i] = 0xffffff; + } + + hw->blkmode = pdata->canvas_mode; + + if (pdata->sys_info) { + hw->vmpeg4_amstream_dec_info = *pdata->sys_info; + if ((hw->vmpeg4_amstream_dec_info.height != 0) && + (hw->vmpeg4_amstream_dec_info.width > + (MAX_MPEG4_SUPPORT_SIZE/hw->vmpeg4_amstream_dec_info.height))) { + pr_info("ammvdec_mpeg4: oversize, unsupport: %d*%d\n", + hw->vmpeg4_amstream_dec_info.width, + hw->vmpeg4_amstream_dec_info.height); + pdata->dec_status = NULL; + vfree((void *)hw); + hw = NULL; + return -EFAULT; + } + mmpeg4_debug_print(DECODE_ID(hw), 0, + "sysinfo: %d x %d, rate: %d\n", + hw->vmpeg4_amstream_dec_info.width, + hw->vmpeg4_amstream_dec_info.height, + hw->vmpeg4_amstream_dec_info.rate); + } + if (((debug_enable & IGNORE_PARAM_FROM_CONFIG) == 0) && pdata->config_len) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "pdata->config: %s\n", pdata->config); + if (get_config_int(pdata->config, "parm_v4l_buffer_margin", + &config_val) == 0) + hw->dynamic_buf_num_margin = config_val; + else + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + hw->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + hw->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + hw->is_used_v4l = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + hw->blkmode = config_val; + } else + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + + if (!hw->is_used_v4l) + vf_provider_init(&pdata->vframe_provider, + pdata->vf_provider_name, &vf_provider_ops, pdata); + + hw->buf_num = vmpeg4_get_buf_num(hw); + + if (vmmpeg4_init(hw) < 0) { + pr_err("%s init failed.\n", __func__); + + if (hw) { + vfree((void *)hw); + hw = NULL; + } + pdata->dec_status = NULL; + return -ENODEV; + } + vdec_set_prepare_level(pdata, start_decode_buf_level); + + vdec_set_vframe_comm(pdata, DRIVER_NAME); + + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_VDEC_1); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } + + mmpeg4_debug_print(DECODE_ID(hw), 0, + "%s end.\n", __func__); + return 0; +} + +static int ammvdec_mpeg4_remove(struct platform_device *pdev) +{ + struct vdec_mpeg4_hw_s *hw = + (struct vdec_mpeg4_hw_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec = hw_to_vdec(hw); + int i; + + if (vdec->next_status == VDEC_STATUS_DISCONNECTED + && (vdec->status == VDEC_STATUS_ACTIVE)) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "%s force exit %d\n", __func__, __LINE__); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + wait_event_interruptible_timeout(hw->wait_q, + (vdec->status == VDEC_STATUS_CONNECTED), + msecs_to_jiffies(1000)); /* wait for work done */ + } + + vmpeg4_stop(hw); + + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1); + else + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + vdec_set_status(hw_to_vdec(hw), VDEC_STATUS_DISCONNECTED); + + if (vdec->parallel_dec == 1) { + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + vdec->free_canvas_ex(canvas_y(hw->canvas_spec[i]), vdec->id); + vdec->free_canvas_ex(canvas_u(hw->canvas_spec[i]), vdec->id); + } + } + + mmpeg4_debug_print(DECODE_ID(hw), 0, "%s\n", __func__); + vfree((void *)hw); + hw = NULL; + + return 0; +} + +/****************************************/ + +static struct platform_driver ammvdec_mpeg4_driver = { + .probe = ammvdec_mpeg4_probe, + .remove = ammvdec_mpeg4_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t amvdec_mpeg4_profile = { + .name = "mmpeg4", + .profile = "v4l, no_single" +}; + +static int __init ammvdec_mpeg4_driver_init_module(void) +{ + pr_info("%s \n", __func__); + + if (platform_driver_register(&ammvdec_mpeg4_driver)) { + pr_err("failed to register ammvdec_mpeg4 driver\n"); + return -ENODEV; + } + vcodec_profile_register(&amvdec_mpeg4_profile); + vcodec_feature_register(VFORMAT_MPEG4, 0); + return 0; +} + +static void __exit ammvdec_mpeg4_driver_remove_module(void) +{ + pr_info("ammvdec_mpeg4 module remove.\n"); + + platform_driver_unregister(&ammvdec_mpeg4_driver); +} + +/****************************************/ +module_param(debug_enable, uint, 0664); +MODULE_PARM_DESC(debug_enable, + "\n ammvdec_mpeg4 debug enable\n"); + +module_param(frmbase_cont_bitlevel, uint, 0664); +MODULE_PARM_DESC(frmbase_cont_bitlevel, "\nfrmbase_cont_bitlevel\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n dynamic_buf_num_margin\n"); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, "\n ammvdec_mpeg4 decode_timeout_val\n"); + +module_param_array(max_process_time, uint, &max_decode_instance_num, 0664); + +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, + "\n ammvdec_mpeg4 pre_decode_buf_level\n"); + +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n ammvdec_mpeg4 start_decode_buf_level\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n ammvdec_mpeg4 udebug_flag\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n ammvdec_mpeg4 without_display_mode\n"); + +module_init(ammvdec_mpeg4_driver_init_module); +module_exit(ammvdec_mpeg4_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC MPEG4 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>"); +
diff --git a/drivers/frame_provider/decoder/utils/Makefile b/drivers/frame_provider/decoder/utils/Makefile new file mode 100644 index 0000000..6f8540b --- /dev/null +++ b/drivers/frame_provider/decoder/utils/Makefile
@@ -0,0 +1,13 @@ +obj-m += decoder_common.o +decoder_common-objs += utils.o vdec.o vdec_input.o amvdec.o +decoder_common-objs += decoder_mmu_box.o decoder_bmmu_box.o +decoder_common-objs += config_parser.o vdec_profile.o +decoder_common-objs += amstream_profile.o +decoder_common-objs += frame_check.o amlogic_fbc_hook.o +decoder_common-objs += vdec_v4l2_buffer_ops.o +decoder_common-objs += vdec_sync.o +decoder_common-objs += vdec_power_ctrl.o +decoder_common-objs += vdec_canvas_utils.o +decoder_common-objs += vdec_feature.o +decoder_common-objs += vdec_ge2d_utils.o +
diff --git a/drivers/frame_provider/decoder/utils/amlogic_fbc_hook.c b/drivers/frame_provider/decoder/utils/amlogic_fbc_hook.c new file mode 100644 index 0000000..b6179fb --- /dev/null +++ b/drivers/frame_provider/decoder/utils/amlogic_fbc_hook.c
@@ -0,0 +1,104 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/amlogic_fbc_hook.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> + +#include "amlogic_fbc_hook.h" +static AMLOGIC_FBC_vframe_decoder_fun_t g_decoder_fun; +static AMLOGIC_FBC_vframe_encoder_fun_t g_encoder_fun; + + +int AMLOGIC_FBC_vframe_decoder( + void *dstyuv[4], + struct vframe_s *vf, + int out_format, + int flags) + +{ + if (g_decoder_fun) { + return g_decoder_fun(dstyuv, + vf, + out_format, + flags); + } + printk("no AMLOGIC_FBC_vframe_decoder ERRR!!\n"); + return -1; +} +EXPORT_SYMBOL(AMLOGIC_FBC_vframe_decoder); + +int AMLOGIC_FBC_vframe_encoder( + void *srcyuv[4], + void *dst_header, + void *dst_body, + int in_format, + int flags) + +{ + if (g_encoder_fun) { + return g_encoder_fun( + srcyuv, + dst_header, + dst_body, + in_format, + flags); + } + printk("no AMLOGIC_FBC_vframe_encoder ERRR!!\n"); + return -1; +} +EXPORT_SYMBOL(AMLOGIC_FBC_vframe_encoder); + +int register_amlogic_afbc_dec_fun(AMLOGIC_FBC_vframe_decoder_fun_t fn) +{ + if (g_decoder_fun) { + pr_err("error!!,AMLOGIC_FBC dec have register\n"); + return -1; + } + printk("register_amlogic_afbc_dec_fun\n"); + g_decoder_fun = fn; + return 0; +} +EXPORT_SYMBOL(register_amlogic_afbc_dec_fun); + +int register_amlogic_afbc_enc_fun(AMLOGIC_FBC_vframe_encoder_fun_t fn) +{ + if (g_encoder_fun) { + pr_err("error!!,AMLOGIC_FBC enc have register\n"); + return -1; + } + g_encoder_fun = fn; + return 0; +} +EXPORT_SYMBOL(register_amlogic_afbc_enc_fun); + +int unregister_amlogic_afbc_dec_fun(void) +{ + g_decoder_fun = NULL; + pr_err("unregister_amlogic_afbc_dec_fun\n"); + return 0; +} +EXPORT_SYMBOL(unregister_amlogic_afbc_dec_fun); + +int unregister_amlogic_afbc_enc_fun(void) +{ + g_encoder_fun = NULL; + pr_err("unregister_amlogic_afbc_dec_fun\n"); + return 0; +} +EXPORT_SYMBOL(unregister_amlogic_afbc_enc_fun); + +
diff --git a/drivers/frame_provider/decoder/utils/amlogic_fbc_hook.h b/drivers/frame_provider/decoder/utils/amlogic_fbc_hook.h new file mode 100644 index 0000000..7eec4b7 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/amlogic_fbc_hook.h
@@ -0,0 +1,55 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/amlogic_fbc_hook.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef AMLGIC_FBC_HEADER___ +#define AMLGIC_FBC_HEADER___ +#include <linux/amlogic/media/vfm/vframe.h> +/* +unsigned short *planes[4], + unsigned char *buf, + unsigned *v2_head_buf // v2_head_buf_size=(((frame_info->mbw + 1)>>1)*2) * (((frame_info->mbh + 15)>>4)*16) +); +*/ + +int AMLOGIC_FBC_vframe_decoder( + void *dstyuv[4], + struct vframe_s *vf, + int out_format, + int flags); +int AMLOGIC_FBC_vframe_encoder( + void *srcyuv[4], + void *dst_header, + void *dst_body, + int in_format, + int flags); + +typedef int (*AMLOGIC_FBC_vframe_decoder_fun_t)( + void **, + struct vframe_s *, + int, + int); +typedef int (*AMLOGIC_FBC_vframe_encoder_fun_t)( + void **, + void *, + void *, + int, + int); +int register_amlogic_afbc_dec_fun(AMLOGIC_FBC_vframe_decoder_fun_t fn); +int register_amlogic_afbc_enc_fun(AMLOGIC_FBC_vframe_encoder_fun_t fn); +int unregister_amlogic_afbc_dec_fun(void); +int unregister_amlogic_afbc_enc_fun(void); +#endif \ No newline at end of file
diff --git a/drivers/frame_provider/decoder/utils/amstream_profile.c b/drivers/frame_provider/decoder/utils/amstream_profile.c new file mode 100644 index 0000000..0e2b9c8 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/amstream_profile.c
@@ -0,0 +1,68 @@ +/* + * drivers/amlogic/media/stream_input/amports/amstream_profile.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/amlogic/media/utils/amstream.h> + +static const struct codec_profile_t *vcodec_profile[SUPPORT_VDEC_NUM] = { 0 }; + +static int vcodec_profile_idx; + +ssize_t vcodec_profile_read(char *buf) +{ + char *pbuf = buf; + int i = 0; + + for (i = 0; i < vcodec_profile_idx; i++) { + pbuf += snprintf(pbuf, PAGE_SIZE - (pbuf - buf), "%s:%s;\n", vcodec_profile[i]->name, + vcodec_profile[i]->profile); + } + + return pbuf - buf; +} +EXPORT_SYMBOL(vcodec_profile_read); + +int vcodec_profile_register(const struct codec_profile_t *vdec_profile) +{ + if (vcodec_profile_idx < SUPPORT_VDEC_NUM) { + vcodec_profile[vcodec_profile_idx] = vdec_profile; + vcodec_profile_idx++; + pr_debug("regist %s codec profile\n", vdec_profile->name); + + } + + return 0; +} +EXPORT_SYMBOL(vcodec_profile_register); + +bool is_support_profile(char *name) +{ + int ret = 0; + int i, size = ARRAY_SIZE(vcodec_profile); + + for (i = 0; i < size; i++) { + if (!vcodec_profile[i]) + break; + if (!strcmp(name, vcodec_profile[i]->name)) + return true; + } + return ret; +} +EXPORT_SYMBOL(is_support_profile);
diff --git a/drivers/frame_provider/decoder/utils/amvdec.c b/drivers/frame_provider/decoder/utils/amvdec.c new file mode 100644 index 0000000..d525fe8 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/amvdec.c
@@ -0,0 +1,1196 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/amvdec.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/vmalloc.h> +#include "vdec.h" + +#ifdef CONFIG_PM +#include <linux/pm.h> +#endif + +#ifdef CONFIG_WAKELOCK +#include <linux/wakelock.h> +#endif +#include "../../../stream_input/amports/amports_priv.h" + +/* #include <mach/am_regs.h> */ +/* #include <mach/power_gate.h> */ +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "amvdec.h" +#include <linux/amlogic/media/utils/amports_config.h> +#include "firmware.h" +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include "../../../common/chips/decoder_cpu_ver_info.h" + +#define MC_SIZE (4096 * 16) + +#ifdef CONFIG_WAKELOCK +static struct wake_lock amvdec_lock; +struct timer_list amvdevtimer; +#define WAKE_CHECK_INTERVAL (100*HZ/100) +#endif +#define AMVDEC_USE_STATIC_MEMORY +static void *mc_addr; +static dma_addr_t mc_addr_map; + +#ifdef CONFIG_WAKELOCK +static int video_running; +static int video_stated_changed = 1; +#endif + +static void amvdec_pg_enable(bool enable) +{ + ulong timeout; + + if (enable) { + AMVDEC_CLK_GATE_ON(MDEC_CLK_PIC_DC); + AMVDEC_CLK_GATE_ON(MDEC_CLK_DBLK); + AMVDEC_CLK_GATE_ON(MC_CLK); + AMVDEC_CLK_GATE_ON(IQIDCT_CLK); + /* AMVDEC_CLK_GATE_ON(VLD_CLK); */ + AMVDEC_CLK_GATE_ON(AMRISC); + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) + WRITE_VREG(GCLK_EN, 0x3ff); + /* #endif */ + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 31); + } else { + + AMVDEC_CLK_GATE_OFF(AMRISC); + timeout = jiffies + HZ / 100; + + while (READ_VREG(MDEC_PIC_DC_STATUS) != 0) { + if (time_after(jiffies, timeout)) { + WRITE_VREG_BITS(MDEC_PIC_DC_CTRL, 1, 0, 1); + WRITE_VREG_BITS(MDEC_PIC_DC_CTRL, 0, 0, 1); + READ_VREG(MDEC_PIC_DC_STATUS); + READ_VREG(MDEC_PIC_DC_STATUS); + READ_VREG(MDEC_PIC_DC_STATUS); + break; + } + } + + AMVDEC_CLK_GATE_OFF(MDEC_CLK_PIC_DC); + timeout = jiffies + HZ / 100; + + while (READ_VREG(DBLK_STATUS) & 1) { + if (time_after(jiffies, timeout)) { + WRITE_VREG(DBLK_CTRL, 3); + WRITE_VREG(DBLK_CTRL, 0); + READ_VREG(DBLK_STATUS); + READ_VREG(DBLK_STATUS); + READ_VREG(DBLK_STATUS); + break; + } + } + AMVDEC_CLK_GATE_OFF(MDEC_CLK_DBLK); + timeout = jiffies + HZ / 100; + + while (READ_VREG(MC_STATUS0) & 1) { + if (time_after(jiffies, timeout)) { + SET_VREG_MASK(MC_CTRL1, 0x9); + CLEAR_VREG_MASK(MC_CTRL1, 0x9); + READ_VREG(MC_STATUS0); + READ_VREG(MC_STATUS0); + READ_VREG(MC_STATUS0); + break; + } + } + AMVDEC_CLK_GATE_OFF(MC_CLK); + timeout = jiffies + HZ / 100; + while (READ_VREG(DCAC_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + AMVDEC_CLK_GATE_OFF(IQIDCT_CLK); + /* AMVDEC_CLK_GATE_OFF(VLD_CLK); */ + } +} + +static void amvdec2_pg_enable(bool enable) +{ + if (has_vdec2()) { + ulong timeout; + + if (!vdec_on(VDEC_2)) + return; + if (enable) { + /* WRITE_VREG(VDEC2_GCLK_EN, 0x3ff); */ + } else { + timeout = jiffies + HZ / 10; + + while (READ_VREG(VDEC2_MDEC_PIC_DC_STATUS) != 0) { + if (time_after(jiffies, timeout)) { + WRITE_VREG_BITS(VDEC2_MDEC_PIC_DC_CTRL, + 1, 0, 1); + WRITE_VREG_BITS(VDEC2_MDEC_PIC_DC_CTRL, + 0, 0, 1); + READ_VREG(VDEC2_MDEC_PIC_DC_STATUS); + READ_VREG(VDEC2_MDEC_PIC_DC_STATUS); + READ_VREG(VDEC2_MDEC_PIC_DC_STATUS); + break; + } + } + + timeout = jiffies + HZ / 10; + + while (READ_VREG(VDEC2_DBLK_STATUS) & 1) { + if (time_after(jiffies, timeout)) { + WRITE_VREG(VDEC2_DBLK_CTRL, 3); + WRITE_VREG(VDEC2_DBLK_CTRL, 0); + READ_VREG(VDEC2_DBLK_STATUS); + READ_VREG(VDEC2_DBLK_STATUS); + READ_VREG(VDEC2_DBLK_STATUS); + break; + } + } + + timeout = jiffies + HZ / 10; + + while (READ_VREG(VDEC2_DCAC_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + } + } +} + +static void amhevc_pg_enable(bool enable) +{ + if (has_hevc_vdec()) { + ulong timeout; + + if (!vdec_on(VDEC_HEVC)) + return; + if (enable) { + /* WRITE_VREG(VDEC2_GCLK_EN, 0x3ff); */ + } else { + timeout = jiffies + HZ / 10; + + while (READ_VREG(HEVC_MDEC_PIC_DC_STATUS) != 0) { + if (time_after(jiffies, timeout)) { + WRITE_VREG_BITS(HEVC_MDEC_PIC_DC_CTRL, + 1, 0, 1); + WRITE_VREG_BITS(HEVC_MDEC_PIC_DC_CTRL, + 0, 0, 1); + READ_VREG(HEVC_MDEC_PIC_DC_STATUS); + READ_VREG(HEVC_MDEC_PIC_DC_STATUS); + READ_VREG(HEVC_MDEC_PIC_DC_STATUS); + break; + } + } + + timeout = jiffies + HZ / 10; + + while (READ_VREG(HEVC_DBLK_STATUS) & 1) { + if (time_after(jiffies, timeout)) { + WRITE_VREG(HEVC_DBLK_CTRL, 3); + WRITE_VREG(HEVC_DBLK_CTRL, 0); + READ_VREG(HEVC_DBLK_STATUS); + READ_VREG(HEVC_DBLK_STATUS); + READ_VREG(HEVC_DBLK_STATUS); + break; + } + } + + timeout = jiffies + HZ / 10; + + while (READ_VREG(HEVC_DCAC_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + } + } +} + +#ifdef CONFIG_WAKELOCK +int amvdec_wake_lock(void) +{ + wake_lock(&amvdec_lock); + return 0; +} + +int amvdec_wake_unlock(void) +{ + wake_unlock(&amvdec_lock); + return 0; +} +#else +#define amvdec_wake_lock() +#define amvdec_wake_unlock() +#endif + +static s32 am_vdec_loadmc_ex(struct vdec_s *vdec, + const char *name, char *def, s32(*load)(const u32 *)) +{ + int err; + + if (!vdec->mc_loaded) { + if (!def) { + err = get_decoder_firmware_data(vdec->format, + name, (u8 *)(vdec->mc), + (4096 * 4 * 4)); + if (err <= 0) + return -1; + } else + memcpy((char *)vdec->mc, def, sizeof(vdec->mc)); + + vdec->mc_loaded = true; + } + + err = (*load)(vdec->mc); + if (err < 0) { + pr_err("loading firmware %s to vdec ram failed!\n", name); + return err; + } + + return err; +} + +static s32 am_vdec_loadmc_buf_ex(struct vdec_s *vdec, + char *buf, int size, s32(*load)(const u32 *)) +{ + int err; + + if (!vdec->mc_loaded) { + memcpy((u8 *)(vdec->mc), buf, size); + vdec->mc_loaded = true; + } + + err = (*load)(vdec->mc); + if (err < 0) { + pr_err("loading firmware to vdec ram failed!\n"); + return err; + } + + return err; +} + +static s32 am_loadmc_ex(enum vformat_e type, + const char *name, char *def, s32(*load)(const u32 *)) +{ + char *mc_addr = vmalloc(4096 * 16); + char *pmc_addr = def; + int err; + + if (!def && mc_addr) { + int loaded; + + loaded = get_decoder_firmware_data(type, + name, mc_addr, (4096 * 16)); + if (loaded > 0) + pmc_addr = mc_addr; + } + if (!pmc_addr) { + vfree(mc_addr); + return -1; + } + err = (*load)((u32 *) pmc_addr); + if (err < 0) { + pr_err("loading firmware %s to vdec ram failed!\n", name); + vfree(mc_addr); + return err; + } + vfree(mc_addr); + + return err; +} + +static s32 amvdec_loadmc(const u32 *p) +{ + ulong timeout; + s32 ret = 0; + +#ifdef AMVDEC_USE_STATIC_MEMORY + if (mc_addr == NULL) { +#else + { +#endif + mc_addr = kmalloc(MC_SIZE, GFP_KERNEL); + } + + if (!mc_addr) + return -ENOMEM; + + memcpy(mc_addr, p, MC_SIZE); + + mc_addr_map = dma_map_single(get_vdec_device(), + mc_addr, MC_SIZE, DMA_TO_DEVICE); + + WRITE_VREG(MPSR, 0); + WRITE_VREG(CPSR, 0); + + /* Read CBUS register for timing */ + timeout = READ_VREG(MPSR); + timeout = READ_VREG(MPSR); + + timeout = jiffies + HZ; + + WRITE_VREG(IMEM_DMA_ADR, mc_addr_map); + WRITE_VREG(IMEM_DMA_COUNT, 0x1000); + WRITE_VREG(IMEM_DMA_CTRL, (0x8000 | (7 << 16))); + + while (READ_VREG(IMEM_DMA_CTRL) & 0x8000) { + if (time_before(jiffies, timeout)) + schedule(); + else { + pr_err("vdec load mc error\n"); + ret = -EBUSY; + break; + } + } + + dma_unmap_single(get_vdec_device(), + mc_addr_map, MC_SIZE, DMA_TO_DEVICE); + +#ifndef AMVDEC_USE_STATIC_MEMORY + kfree(mc_addr); + mc_addr = NULL; +#endif + + return ret; +} + +s32 optee_load_fw(enum vformat_e type, const char *fw_name) +{ + s32 ret = -1; + unsigned int format = FIRMWARE_MAX; + unsigned int vdec = OPTEE_VDEC_LEGENCY; + char *name = __getname(); + bool is_swap = false; + + sprintf(name, "%s", fw_name ? fw_name : "null"); + + switch ((u32)type) { + case VFORMAT_VC1: + format = VIDEO_DEC_VC1; + break; + + case VFORMAT_AVS: + if (!strcmp(name, "avs_no_cabac")) + format = VIDEO_DEC_AVS_NOCABAC; + else if (!strcmp(name, "avs_multi")) + format = VIDEO_DEC_AVS_MULTI; + else + format = VIDEO_DEC_AVS; + break; + + case VFORMAT_MPEG12: + if (!strcmp(name, "mpeg12")) + format = VIDEO_DEC_MPEG12; + else if (!strcmp(name, "mmpeg12")) + format = VIDEO_DEC_MPEG12_MULTI; + break; + + case VFORMAT_MJPEG: + if (!strcmp(name, "mmjpeg")) + format = VIDEO_DEC_MJPEG_MULTI; + else + format = VIDEO_DEC_MJPEG; + break; + + case VFORMAT_VP9: + if (!strcmp(name, "vp9_mc")) + format = VIDEO_DEC_VP9; + else + format = VIDEO_DEC_VP9_MMU; + break; + + case VFORMAT_AVS2: + format = VIDEO_DEC_AVS2_MMU; + vdec = OPTEE_VDEC_HEVC; + break; + + case VFORMAT_AV1: + format = VIDEO_DEC_AV1_MMU; + vdec = OPTEE_VDEC_HEVC; + break; + + case VFORMAT_HEVC: + if (!strcmp(name, "h265_mmu")) + format = VIDEO_DEC_HEVC_MMU; + else if (!strcmp(name, "hevc_mmu_swap")) { + format = VIDEO_DEC_HEVC_MMU_SWAP; + vdec = OPTEE_VDEC_HEVC; + is_swap = true; + } else + format = VIDEO_DEC_HEVC; + break; + + case VFORMAT_REAL: + if (!strcmp(name, "vreal_mc_8")) + format = VIDEO_DEC_REAL_V8; + else if (!strcmp(name, "vreal_mc_9")) + format = VIDEO_DEC_REAL_V9; + break; + + case VFORMAT_MPEG4: + if (!strcmp(name, "mmpeg4_mc_5")) + format = VIDEO_DEC_MPEG4_5_MULTI; + else if ((!strcmp(name, "mh263_mc"))) + format = VIDEO_DEC_H263_MULTI; + else if (!strcmp(name, "vmpeg4_mc_5")) + format = VIDEO_DEC_MPEG4_5; + else if (!strcmp(name, "h263_mc")) + format = VIDEO_DEC_H263; + /*not support now*/ + else if (!strcmp(name, "vmpeg4_mc_311")) + format = VIDEO_DEC_MPEG4_3; + else if (!strcmp(name, "vmpeg4_mc_4")) + format = VIDEO_DEC_MPEG4_4; + break; + + case VFORMAT_H264_4K2K: + if (!strcmp(name, "single_core")) + format = VIDEO_DEC_H264_4k2K_SINGLE; + else + format = VIDEO_DEC_H264_4k2K; + break; + + case VFORMAT_H264MVC: + format = VIDEO_DEC_H264_MVC; + break; + + case VFORMAT_H264: + if (!strcmp(name, "mh264")) + format = VIDEO_DEC_H264_MULTI; + else if (!strcmp(name, "mh264_mmu")) { + format = VIDEO_DEC_H264_MULTI_MMU; + vdec = OPTEE_VDEC_HEVC; + } else + format = VIDEO_DEC_H264; + break; + case VFORMAT_JPEG_ENC: + format = VIDEO_ENC_JPEG; + vdec = OPTEE_VDEC_HCDEC; + break; + case VFORMAT_H264_ENC: + format = VIDEO_ENC_H264; + vdec = OPTEE_VDEC_HCDEC; + break; + default: + pr_info("Unknow vdec format: %u\n", (u32)type); + break; + } + + if (format < FIRMWARE_MAX) { + if (is_swap) + ret = tee_load_video_fw_swap(format, vdec, is_swap); + else + ret = tee_load_video_fw(format, vdec); + } + + __putname(name); + + return ret; +} +EXPORT_SYMBOL(optee_load_fw); + +s32 amvdec_loadmc_ex(enum vformat_e type, const char *name, char *def) +{ + if (tee_enabled()) + return optee_load_fw(type, name); + else + return am_loadmc_ex(type, name, def, &amvdec_loadmc); +} +EXPORT_SYMBOL(amvdec_loadmc_ex); + +s32 amvdec_vdec_loadmc_ex(enum vformat_e type, const char *name, + struct vdec_s *vdec, char *def) +{ + if (tee_enabled()) + return optee_load_fw(type, name); + else + return am_vdec_loadmc_ex(vdec, name, def, &amvdec_loadmc); +} +EXPORT_SYMBOL(amvdec_vdec_loadmc_ex); + +s32 amvdec_vdec_loadmc_buf_ex(enum vformat_e type, const char *name, + struct vdec_s *vdec, char *buf, int size) +{ + if (tee_enabled()) + return optee_load_fw(type, name); + else + return am_vdec_loadmc_buf_ex(vdec, buf, size, &amvdec_loadmc); +} +EXPORT_SYMBOL(amvdec_vdec_loadmc_buf_ex); + +static s32 amvdec2_loadmc(const u32 *p) +{ + if (has_vdec2()) { + ulong timeout; + s32 ret = 0; + +#ifdef AMVDEC_USE_STATIC_MEMORY + if (mc_addr == NULL) { +#else + { +#endif + mc_addr = kmalloc(MC_SIZE, GFP_KERNEL); + } + + if (!mc_addr) + return -ENOMEM; + + memcpy(mc_addr, p, MC_SIZE); + + mc_addr_map = dma_map_single(get_vdec_device(), + mc_addr, MC_SIZE, DMA_TO_DEVICE); + + WRITE_VREG(VDEC2_MPSR, 0); + WRITE_VREG(VDEC2_CPSR, 0); + + /* Read CBUS register for timing */ + timeout = READ_VREG(VDEC2_MPSR); + timeout = READ_VREG(VDEC2_MPSR); + + timeout = jiffies + HZ; + + WRITE_VREG(VDEC2_IMEM_DMA_ADR, mc_addr_map); + WRITE_VREG(VDEC2_IMEM_DMA_COUNT, 0x1000); + WRITE_VREG(VDEC2_IMEM_DMA_CTRL, (0x8000 | (7 << 16))); + + while (READ_VREG(VDEC2_IMEM_DMA_CTRL) & 0x8000) { + if (time_before(jiffies, timeout)) + schedule(); + else { + pr_err("vdec2 load mc error\n"); + ret = -EBUSY; + break; + } + } + + dma_unmap_single(get_vdec_device(), + mc_addr_map, MC_SIZE, DMA_TO_DEVICE); + +#ifndef AMVDEC_USE_STATIC_MEMORY + kfree(mc_addr); + mc_addr = NULL; +#endif + + return ret; + } else + return 0; +} + +s32 amvdec2_loadmc_ex(enum vformat_e type, const char *name, char *def) +{ + if (has_vdec2()) + return am_loadmc_ex(type, name, def, &amvdec2_loadmc); + else + return 0; +} +EXPORT_SYMBOL(amvdec2_loadmc_ex); + +s32 amhcodec_loadmc(const u32 *p) +{ +#ifdef AMVDEC_USE_STATIC_MEMORY + if (mc_addr == NULL) { +#else + { +#endif + mc_addr = kmalloc(MC_SIZE, GFP_KERNEL); + } + + if (!mc_addr) + return -ENOMEM; + + memcpy(mc_addr, p, MC_SIZE); + + mc_addr_map = dma_map_single(get_vdec_device(), + mc_addr, MC_SIZE, DMA_TO_DEVICE); + + WRITE_VREG(HCODEC_IMEM_DMA_ADR, mc_addr_map); + WRITE_VREG(HCODEC_IMEM_DMA_COUNT, 0x100); + WRITE_VREG(HCODEC_IMEM_DMA_CTRL, (0x8000 | (7 << 16))); + + while (READ_VREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) + udelay(1000); + + dma_unmap_single(get_vdec_device(), + mc_addr_map, MC_SIZE, DMA_TO_DEVICE); + +#ifndef AMVDEC_USE_STATIC_MEMORY + kfree(mc_addr); +#endif + + return 0; +} +EXPORT_SYMBOL(amhcodec_loadmc); + +s32 amhcodec_loadmc_ex(enum vformat_e type, const char *name, char *def) +{ + return am_loadmc_ex(type, name, def, &amhcodec_loadmc); +} +EXPORT_SYMBOL(amhcodec_loadmc_ex); + +static s32 amhevc_loadmc(const u32 *p) +{ + ulong timeout; + s32 ret = 0; + + if (has_hevc_vdec()) { +#ifdef AMVDEC_USE_STATIC_MEMORY + if (mc_addr == NULL) { +#else + { +#endif + mc_addr = kmalloc(MC_SIZE, GFP_KERNEL); + } + + if (!mc_addr) + return -ENOMEM; + + memcpy(mc_addr, p, MC_SIZE); + + mc_addr_map = + dma_map_single(get_vdec_device(), + mc_addr, MC_SIZE, DMA_TO_DEVICE); + + WRITE_VREG(HEVC_MPSR, 0); + WRITE_VREG(HEVC_CPSR, 0); + + /* Read CBUS register for timing */ + timeout = READ_VREG(HEVC_MPSR); + timeout = READ_VREG(HEVC_MPSR); + + timeout = jiffies + HZ; + + WRITE_VREG(HEVC_IMEM_DMA_ADR, mc_addr_map); + WRITE_VREG(HEVC_IMEM_DMA_COUNT, 0x1000); + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3)) + WRITE_VREG(HEVC_IMEM_DMA_CTRL, (0x8000 | (0xf << 16))); + else + WRITE_VREG(HEVC_IMEM_DMA_CTRL, (0x8000 | (0x7 << 16))); + + while (READ_VREG(HEVC_IMEM_DMA_CTRL) & 0x8000) { + if (time_before(jiffies, timeout)) + schedule(); + else { + pr_err("hevc load mc error\n"); + ret = -EBUSY; + break; + } + } + + dma_unmap_single(get_vdec_device(), + mc_addr_map, MC_SIZE, DMA_TO_DEVICE); + +#ifndef AMVDEC_USE_STATIC_MEMORY + kfree(mc_addr); + mc_addr = NULL; +#endif + } + + return ret; +} + +s32 amhevc_loadmc_ex(enum vformat_e type, const char *name, char *def) +{ + if (has_hevc_vdec()) + if (tee_enabled()) + return optee_load_fw(type, name); + else + return am_loadmc_ex(type, name, def, &amhevc_loadmc); + else + return -1; +} +EXPORT_SYMBOL(amhevc_loadmc_ex); + +s32 amhevc_vdec_loadmc_ex(enum vformat_e type, struct vdec_s *vdec, + const char *name, char *def) +{ + if (has_hevc_vdec()) + if (tee_enabled()) + return optee_load_fw(type, name); + else + return am_vdec_loadmc_ex(vdec, name, def, &amhevc_loadmc); + else + return -1; +} +EXPORT_SYMBOL(amhevc_vdec_loadmc_ex); + +void amvdec_start(void) +{ +#ifdef CONFIG_WAKELOCK + amvdec_wake_lock(); +#endif + + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 12) | (1 << 11)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + } else { + /* #else */ + /* additional cbus dummy register reading for timing control */ + READ_RESET_REG(RESET0_REGISTER); + READ_RESET_REG(RESET0_REGISTER); + READ_RESET_REG(RESET0_REGISTER); + READ_RESET_REG(RESET0_REGISTER); + + WRITE_RESET_REG(RESET0_REGISTER, RESET_VCPU | RESET_CCPU); + + READ_RESET_REG(RESET0_REGISTER); + READ_RESET_REG(RESET0_REGISTER); + READ_RESET_REG(RESET0_REGISTER); + } + /* #endif */ + + WRITE_VREG(MPSR, 0x0001); +} +EXPORT_SYMBOL(amvdec_start); + +void amvdec2_start(void) +{ + if (has_vdec2()) { +#ifdef CONFIG_WAKELOCK + amvdec_wake_lock(); +#endif + + READ_VREG(DOS_SW_RESET2); + READ_VREG(DOS_SW_RESET2); + READ_VREG(DOS_SW_RESET2); + + WRITE_VREG(DOS_SW_RESET2, (1 << 12) | (1 << 11)); + WRITE_VREG(DOS_SW_RESET2, 0); + + READ_VREG(DOS_SW_RESET2); + READ_VREG(DOS_SW_RESET2); + READ_VREG(DOS_SW_RESET2); + + WRITE_VREG(VDEC2_MPSR, 0x0001); + } +} +EXPORT_SYMBOL(amvdec2_start); + +void amhcodec_start(void) +{ + WRITE_VREG(HCODEC_MPSR, 0x0001); +} +EXPORT_SYMBOL(amhcodec_start); + +void amhevc_start(void) +{ + + if (has_hevc_vdec()) { +#ifdef CONFIG_WAKELOCK + amvdec_wake_lock(); +#endif + + READ_VREG(DOS_SW_RESET3); + READ_VREG(DOS_SW_RESET3); + READ_VREG(DOS_SW_RESET3); + + WRITE_VREG(DOS_SW_RESET3, (1 << 12) | (1 << 11)); + WRITE_VREG(DOS_SW_RESET3, 0); + + READ_VREG(DOS_SW_RESET3); + READ_VREG(DOS_SW_RESET3); + READ_VREG(DOS_SW_RESET3); + + WRITE_VREG(HEVC_MPSR, 0x0001); + } +} +EXPORT_SYMBOL(amhevc_start); + +void amvdec_stop(void) +{ + ulong timeout = jiffies + HZ/10; + + WRITE_VREG(MPSR, 0); + WRITE_VREG(CPSR, 0); + + while (READ_VREG(IMEM_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + + timeout = jiffies + HZ/10; + while (READ_VREG(LMEM_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 12) | (1 << 11)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + } else { + /* #else */ + WRITE_RESET_REG(RESET0_REGISTER, RESET_VCPU | RESET_CCPU); + + /* additional cbus dummy register reading for timing control */ + READ_RESET_REG(RESET0_REGISTER); + READ_RESET_REG(RESET0_REGISTER); + READ_RESET_REG(RESET0_REGISTER); + READ_RESET_REG(RESET0_REGISTER); + } + /* #endif */ + +#ifdef CONFIG_WAKELOCK + amvdec_wake_unlock(); +#endif +} +EXPORT_SYMBOL(amvdec_stop); + +void amvdec2_stop(void) +{ + if (has_vdec2()) { + ulong timeout = jiffies + HZ/10; + + WRITE_VREG(VDEC2_MPSR, 0); + WRITE_VREG(VDEC2_CPSR, 0); + + while (READ_VREG(VDEC2_IMEM_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + + READ_VREG(DOS_SW_RESET2); + READ_VREG(DOS_SW_RESET2); + READ_VREG(DOS_SW_RESET2); + +#ifdef CONFIG_WAKELOCK + amvdec_wake_unlock(); +#endif + } +} +EXPORT_SYMBOL(amvdec2_stop); + +void amhcodec_stop(void) +{ + WRITE_VREG(HCODEC_MPSR, 0); +} +EXPORT_SYMBOL(amhcodec_stop); + +void amhevc_stop(void) +{ + if (has_hevc_vdec()) { + ulong timeout = jiffies + HZ/10; + + WRITE_VREG(HEVC_MPSR, 0); + WRITE_VREG(HEVC_CPSR, 0); + + while (READ_VREG(HEVC_IMEM_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + + timeout = jiffies + HZ/10; + while (READ_VREG(HEVC_LMEM_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + + READ_VREG(DOS_SW_RESET3); + READ_VREG(DOS_SW_RESET3); + READ_VREG(DOS_SW_RESET3); + +#ifdef CONFIG_WAKELOCK + amvdec_wake_unlock(); +#endif + } +} +EXPORT_SYMBOL(amhevc_stop); + +void amvdec_enable(void) +{ + amvdec_pg_enable(true); +} +EXPORT_SYMBOL(amvdec_enable); + +void amvdec_disable(void) +{ + amvdec_pg_enable(false); +} +EXPORT_SYMBOL(amvdec_disable); + +void amvdec2_enable(void) +{ + if (has_vdec2()) + amvdec2_pg_enable(true); +} +EXPORT_SYMBOL(amvdec2_enable); + +void amvdec2_disable(void) +{ + if (has_vdec2()) + amvdec2_pg_enable(false); +} +EXPORT_SYMBOL(amvdec2_disable); + +void amhevc_enable(void) +{ + if (has_hevc_vdec()) + amhevc_pg_enable(true); +} +EXPORT_SYMBOL(amhevc_enable); + +void amhevc_disable(void) +{ + if (has_hevc_vdec()) + amhevc_pg_enable(false); +} +EXPORT_SYMBOL(amhevc_disable); + +#ifdef CONFIG_PM +int amvdec_suspend(struct platform_device *dev, pm_message_t event) +{ + struct vdec_s *vdec = *(struct vdec_s **)dev->dev.platform_data;; + + if (vdec) { + wait_event_interruptible_timeout(vdec->idle_wait, + (vdec->status != VDEC_STATUS_ACTIVE), + msecs_to_jiffies(100)); + } + + amvdec_pg_enable(false); + + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */ + if (has_vdec2()) + amvdec2_pg_enable(false); + /* #endif */ + + if (has_hevc_vdec()) + amhevc_pg_enable(false); + /*vdec_set_suspend_clk(1, 0);*//*DEBUG_TMP*/ + return 0; +} +EXPORT_SYMBOL(amvdec_suspend); + +int amvdec_resume(struct platform_device *dev) +{ + amvdec_pg_enable(true); + + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */ + if (has_vdec2()) + amvdec2_pg_enable(true); + /* #endif */ + + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + if (has_hevc_vdec()) + amhevc_pg_enable(true); + /* #endif */ + /*vdec_set_suspend_clk(0, 0);*//*DEBUG_TMP*/ + return 0; +} +EXPORT_SYMBOL(amvdec_resume); + +int amhevc_suspend(struct platform_device *dev, pm_message_t event) +{ + struct vdec_s *vdec = *(struct vdec_s **)dev->dev.platform_data;; + + if (vdec) { + wait_event_interruptible_timeout(vdec->idle_wait, + (vdec->status != VDEC_STATUS_ACTIVE), + msecs_to_jiffies(100)); + } + + if (has_hevc_vdec()) { + amhevc_pg_enable(false); + /*vdec_set_suspend_clk(1, 1);*//*DEBUG_TMP*/ + } + return 0; +} +EXPORT_SYMBOL(amhevc_suspend); + +int amhevc_resume(struct platform_device *dev) +{ + if (has_hevc_vdec()) { + amhevc_pg_enable(true); + /*vdec_set_suspend_clk(0, 1);*//*DEBUG_TMP*/ + } + return 0; +} +EXPORT_SYMBOL(amhevc_resume); + + +#endif + +#ifdef CONFIG_WAKELOCK + +static int vdec_is_paused(void) +{ + static unsigned long old_wp = -1, old_rp = -1, old_level = -1; + unsigned long wp, rp, level; + static int paused_time; + + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + if (has_hevc_vdec()) { + if ((vdec_on(VDEC_HEVC)) + && (READ_VREG(HEVC_STREAM_CONTROL) & 1)) { + wp = READ_VREG(HEVC_STREAM_WR_PTR); + rp = READ_VREG(HEVC_STREAM_RD_PTR); + level = READ_VREG(HEVC_STREAM_LEVEL); + } else { + wp = READ_VREG(VLD_MEM_VIFIFO_WP); + rp = READ_VREG(VLD_MEM_VIFIFO_RP); + level = READ_VREG(VLD_MEM_VIFIFO_LEVEL); + } + } else + /* #endif */ + { + wp = READ_VREG(VLD_MEM_VIFIFO_WP); + rp = READ_VREG(VLD_MEM_VIFIFO_RP); + level = READ_VREG(VLD_MEM_VIFIFO_LEVEL); + } + /*have data,but output buffer is full */ + if ((rp == old_rp && level > 1024) || + (rp == old_rp && wp == old_wp && level == old_level)) { + /*no write && not read */ + paused_time++; + } else { + paused_time = 0; + } + old_wp = wp; old_rp = rp; old_level = level; + if (paused_time > 10) + return 1; + return 0; +} + +int amvdev_pause(void) +{ + video_running = 0; + video_stated_changed = 1; + return 0; +} +EXPORT_SYMBOL(amvdev_pause); + +int amvdev_resume(void) +{ + video_running = 1; + video_stated_changed = 1; + return 0; +} +EXPORT_SYMBOL(amvdev_resume); + +static void vdec_paused_check_timer(unsigned long arg) +{ + if (video_stated_changed) { + if (!video_running) { + if (vdec_is_paused()) { + pr_info("vdec paused and release wakelock now\n"); + amvdec_wake_unlock(); + video_stated_changed = 0; + } + } else { + amvdec_wake_lock(); + video_stated_changed = 0; + } + } + mod_timer(&amvdevtimer, jiffies + WAKE_CHECK_INTERVAL); +} +#else +int amvdev_pause(void) +{ + return 0; +} + +int amvdev_resume(void) +{ + return 0; +} +#endif + +int amvdec_init(void) +{ +#ifdef CONFIG_WAKELOCK + /* + *wake_lock_init(&amvdec_lock, WAKE_LOCK_IDLE, "amvdec_lock"); + *tmp mark for compile, no "WAKE_LOCK_IDLE" definition in kernel 3.8 + */ + wake_lock_init(&amvdec_lock, /*WAKE_LOCK_IDLE */ WAKE_LOCK_SUSPEND, + "amvdec_lock"); + + init_timer(&amvdevtimer); + + amvdevtimer.data = (ulong) &amvdevtimer; + amvdevtimer.function = vdec_paused_check_timer; +#endif + return 0; +} +EXPORT_SYMBOL(amvdec_init); + +void amvdec_exit(void) +{ +#ifdef CONFIG_WAKELOCK + del_timer_sync(&amvdevtimer); +#endif +} +EXPORT_SYMBOL(amvdec_exit); + +#if 0 +int __init amvdec_init(void) +{ +#ifdef CONFIG_WAKELOCK + /* + *wake_lock_init(&amvdec_lock, WAKE_LOCK_IDLE, "amvdec_lock"); + *tmp mark for compile, no "WAKE_LOCK_IDLE" definition in kernel 3.8 + */ + wake_lock_init(&amvdec_lock, /*WAKE_LOCK_IDLE */ WAKE_LOCK_SUSPEND, + "amvdec_lock"); + + init_timer(&amvdevtimer); + + amvdevtimer.data = (ulong) &amvdevtimer; + amvdevtimer.function = vdec_paused_check_timer; +#endif + return 0; +} + +static void __exit amvdec_exit(void) +{ +#ifdef CONFIG_WAKELOCK + del_timer_sync(&amvdevtimer); +#endif +} + +module_init(amvdec_init); +module_exit(amvdec_exit); +#endif + +MODULE_DESCRIPTION("Amlogic Video Decoder Utility Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/utils/amvdec.h b/drivers/frame_provider/decoder/utils/amvdec.h new file mode 100644 index 0000000..a3deecd --- /dev/null +++ b/drivers/frame_provider/decoder/utils/amvdec.h
@@ -0,0 +1,91 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/amvdec.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef AMVDEC_H +#define AMVDEC_H +#include <linux/amlogic/media/utils/amports_config.h> +#include <linux/amlogic/media/utils/vformat.h> +#include "vdec.h" + +#define UCODE_ALIGN 8 +#define UCODE_ALIGN_MASK 7UL + +struct amvdec_dec_reg_s { + unsigned long mem_start; + unsigned long mem_end; + struct device *cma_dev; + struct dec_sysinfo *dec_sysinfo; +}; /*amvdec_dec_reg_t */ + +struct vdec_s; + +extern void amvdec_start(void); +extern void amvdec_stop(void); +extern void amvdec_enable(void); +extern void amvdec_disable(void); +s32 amvdec_loadmc_ex(enum vformat_e type, const char *name, char *def); +s32 amvdec_vdec_loadmc_ex(enum vformat_e type, const char *name, + struct vdec_s *vdec, char *def); + +extern void amvdec2_start(void); +extern void amvdec2_stop(void); +extern void amvdec2_enable(void); +extern void amvdec2_disable(void); +s32 amvdec2_loadmc_ex(enum vformat_e type, const char *name, char *def); + +extern void amhevc_start(void); +extern void amhevc_stop(void); +extern void amhevc_enable(void); +extern void amhevc_disable(void); +s32 amhevc_loadmc_ex(enum vformat_e type, const char *name, char *def); +s32 amhevc_vdec_loadmc_ex(enum vformat_e type, struct vdec_s *vdec, + const char *name, char *def); +s32 amvdec_vdec_loadmc_buf_ex(enum vformat_e type, const char *name, + struct vdec_s *vdec, char *buf, int size); + +extern void amhcodec_start(void); +extern void amhcodec_stop(void); +s32 amhcodec_loadmc(const u32 *p); +s32 amhcodec_loadmc_ex(enum vformat_e type, const char *name, char *def); + +extern int amvdev_pause(void); +extern int amvdev_resume(void); + +#ifdef CONFIG_PM +extern int amvdec_suspend(struct platform_device *dev, pm_message_t event); +extern int amvdec_resume(struct platform_device *dec); +extern int amhevc_suspend(struct platform_device *dev, pm_message_t event); +extern int amhevc_resume(struct platform_device *dec); + +#endif + +int amvdec_init(void); +void amvdec_exit(void); + +#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +#define AMVDEC_CLK_GATE_ON(a) +#define AMVDEC_CLK_GATE_OFF(a) +#else +#define AMVDEC_CLK_GATE_ON(a) CLK_GATE_ON(a) +#define AMVDEC_CLK_GATE_OFF(a) CLK_GATE_OFF(a) +#endif + +/* TODO: move to register headers */ +#define RESET_VCPU (1<<7) +#define RESET_CCPU (1<<8) + +#endif /* AMVDEC_H */
diff --git a/drivers/frame_provider/decoder/utils/config_parser.c b/drivers/frame_provider/decoder/utils/config_parser.c new file mode 100644 index 0000000..9a65620 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/config_parser.c
@@ -0,0 +1,64 @@ +/* + * drivers/amlogic/amports/config_parser.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> + +#include "config_parser.h" +/* + *sample config: + *configs: width:1920;height:1080; + *need:width + *ok: return 0; + **val = value; + */ +int get_config_int(const char *configs, const char *need, int *val) +{ + const char *str; + int ret; + int lval = 0; + *val = 0; + + if (!configs || !need) + return -1; + str = strstr(configs, need); + if (str != NULL) { + if (str > configs && str[-1] != ';') { + /* + * if not the first config val. + * make sure before is ';' + * to recognize: + * ;crop_width:100 + * ;width:100 + */ + return -2; + } + str += strlen(need); + if (str[0] != ':' || str[1] == '\0') + return -3; + ret = sscanf(str, ":%d", &lval); + if (ret == 1) { + *val = lval; + return 0; + } + } + + return -4; +} +EXPORT_SYMBOL(get_config_int); +
diff --git a/drivers/frame_provider/decoder/utils/config_parser.h b/drivers/frame_provider/decoder/utils/config_parser.h new file mode 100644 index 0000000..9746caf --- /dev/null +++ b/drivers/frame_provider/decoder/utils/config_parser.h
@@ -0,0 +1,21 @@ +/* + * drivers/amlogic/amports/config_parser.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#ifndef CONFIG_PARSER_HHH_ +#define CONFIG_PARSER_HHH_ +int get_config_int(const char *configs, const char *need, int *val); + +#endif/*CONFIG_PARSER_HHH_*/
diff --git a/drivers/frame_provider/decoder/utils/decoder_bmmu_box.c b/drivers/frame_provider/decoder/utils/decoder_bmmu_box.c new file mode 100644 index 0000000..33a9e4b --- /dev/null +++ b/drivers/frame_provider/decoder/utils/decoder_bmmu_box.c
@@ -0,0 +1,634 @@ +/* + * drivers/amlogic/amports/decoder/decoder_bmmu_box.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <linux/amlogic/media/codec_mm/codec_mm_scatter.h> +#include <linux/platform_device.h> + +#include <linux/amlogic/media/video_sink/video_keeper.h> +#include "decoder_bmmu_box.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/codec_mm_keeper.h> + +struct decoder_bmmu_box { + int max_mm_num; + const char *name; + int channel_id; + struct mutex mutex; + struct list_head list; + int total_size; + int box_ref_cnt; + int change_size_on_need_smaller; + int align2n; /*can overwite on idx alloc */ + int mem_flags; /*can overwite on idx alloc */ + struct codec_mm_s *mm_list[1]; +}; + +struct decoder_bmmu_box_mgr { + int num; + struct mutex mutex; + struct list_head box_list; +}; +static struct decoder_bmmu_box_mgr global_blk_mgr; +static struct decoder_bmmu_box_mgr *get_decoder_bmmu_box_mgr(void) +{ + return &global_blk_mgr; +} + +static int decoder_bmmu_box_mgr_add_box(struct decoder_bmmu_box *box) +{ + struct decoder_bmmu_box_mgr *mgr = get_decoder_bmmu_box_mgr(); + + mutex_lock(&mgr->mutex); + list_add_tail(&box->list, &mgr->box_list); + mutex_unlock(&mgr->mutex); + return 0; +} + +static int decoder_bmmu_box_mgr_del_box(struct decoder_bmmu_box *box) +{ + struct decoder_bmmu_box_mgr *mgr = get_decoder_bmmu_box_mgr(); + + mutex_lock(&mgr->mutex); + list_del(&box->list); + mutex_unlock(&mgr->mutex); + return 0; +} + +bool decoder_bmmu_box_valide_check(void *box) +{ + struct decoder_bmmu_box_mgr *mgr = get_decoder_bmmu_box_mgr(); + struct decoder_bmmu_box *bmmu_box = NULL; + bool is_valide = false; + + mutex_lock(&mgr->mutex); + list_for_each_entry(bmmu_box, &mgr->box_list, list) { + if (bmmu_box && bmmu_box == box) { + is_valide = true; + break; + } + } + mutex_unlock(&mgr->mutex); + + return is_valide; +} +EXPORT_SYMBOL(decoder_bmmu_box_valide_check); + +void *decoder_bmmu_box_alloc_box(const char *name, + int channel_id, int max_num, + int aligned, int mem_flags) +/*min_size_M:wait alloc this size*/ +{ + struct decoder_bmmu_box *box; + int size; + int tvp_flags; + tvp_flags = (mem_flags & CODEC_MM_FLAGS_TVP) ? + CODEC_MM_FLAGS_TVP : 0; + + pr_debug("decoder_bmmu_box_alloc_box, tvp_flags = %x\n", tvp_flags); + + size = sizeof(struct decoder_bmmu_box) + sizeof(struct codec_mm_s *) * + max_num; + box = kmalloc(size, GFP_KERNEL); + if (!box) { + pr_err("can't alloc decoder buffers box!!!\n"); + return NULL; + } + memset(box, 0, size); + box->max_mm_num = max_num; + box->name = name; + box->channel_id = channel_id; + box->align2n = aligned; + box->mem_flags = mem_flags | tvp_flags; + mutex_init(&box->mutex); + INIT_LIST_HEAD(&box->list); + decoder_bmmu_box_mgr_add_box(box); + return (void *)box; +} +EXPORT_SYMBOL(decoder_bmmu_box_alloc_box); + +int decoder_bmmu_box_alloc_idx(void *handle, int idx, int size, int aligned_2n, + int mem_flags) +/*align& flags if -1 user box default.*/ +{ + struct decoder_bmmu_box *box = handle; + struct codec_mm_s *mm; + int align = aligned_2n; + int memflags = mem_flags; + + if (!box || idx < 0 || idx >= box->max_mm_num) { + pr_err("can't alloc bmmu box(%p),idx:%d\n", + box, idx); + return -1; + } + if (align == -1) + align = box->align2n; + if (memflags == -1) + memflags = box->mem_flags; + + mutex_lock(&box->mutex); + mm = box->mm_list[idx]; + if (mm) { + int invalid = 0; + int keeped = 0; + + keeped = is_codec_mm_keeped(mm); + if (!keeped) { + if (mm->page_count * PAGE_SIZE < size) { + /*size is small. */ + invalid = 1; + } else if (box->change_size_on_need_smaller && + (mm->buffer_size > (size << 1))) { + /*size is too large. */ + invalid = 2; + } else if (mm->phy_addr & ((1 << align) - 1)) { + /*addr is not align */ + invalid = 4; + } + if (invalid) { + box->total_size -= mm->buffer_size; + codec_mm_release(mm, box->name); + box->mm_list[idx] = NULL; + mm = NULL; + } + } else { + box->total_size -= mm->buffer_size; + codec_mm_release(mm, box->name); + box->mm_list[idx] = NULL; + mm = NULL; + } + } + if (!mm) { + mm = codec_mm_alloc(box->name, size, align, memflags); + if (mm) { + box->mm_list[idx] = mm; + box->total_size += mm->buffer_size; + mm->ins_id = box->channel_id; + mm->ins_buffer_id = idx; + box->box_ref_cnt++; + } + } + mutex_unlock(&box->mutex); + return mm ? 0 : -ENOMEM; +} + +int decoder_bmmu_box_free_idx(void *handle, int idx) +{ + struct decoder_bmmu_box *box = handle; + struct codec_mm_s *mm; + + if (!box || idx < 0 || idx >= box->max_mm_num) { + pr_err("can't free idx of box(%p),idx:%d in (%d-%d)\n", + box, idx, 0, + box ? (box->max_mm_num - 1) : 0); + return -1; + } + mutex_lock(&box->mutex); + mm = box->mm_list[idx]; + if (mm) { + box->total_size -= mm->buffer_size; + codec_mm_release(mm, box->name); + box->mm_list[idx] = NULL; + mm = NULL; + box->box_ref_cnt--; + } + mutex_unlock(&box->mutex); + return 0; +} +EXPORT_SYMBOL(decoder_bmmu_box_free_idx); + +int decoder_bmmu_box_free(void *handle) +{ + struct decoder_bmmu_box *box = handle; + struct codec_mm_s *mm; + int i; + + if (!box) { + pr_err("can't free box of NULL box!\n"); + return -1; + } + mutex_lock(&box->mutex); + for (i = 0; i < box->max_mm_num; i++) { + mm = box->mm_list[i]; + if (mm) { + codec_mm_release(mm, box->name); + box->mm_list[i] = NULL; + } + } + mutex_unlock(&box->mutex); + decoder_bmmu_box_mgr_del_box(box); + kfree(box); + return 0; +} +EXPORT_SYMBOL(decoder_bmmu_box_free); + +void decoder_bmmu_try_to_release_box(void *handle) +{ + struct decoder_bmmu_box *box = handle; + bool is_keep = false; + int i; + + if (!box || box->box_ref_cnt) + return; + + mutex_lock(&box->mutex); + for (i = 0; i < box->max_mm_num; i++) { + if (box->mm_list[i]) { + is_keep = true; + break; + } + } + mutex_unlock(&box->mutex); + + if (!is_keep) { + decoder_bmmu_box_mgr_del_box(box); + kfree(box); + } +} +EXPORT_SYMBOL(decoder_bmmu_try_to_release_box); + +void *decoder_bmmu_box_get_mem_handle(void *box_handle, int idx) +{ + struct decoder_bmmu_box *box = box_handle; + + if (!box || idx < 0 || idx >= box->max_mm_num) + return NULL; + return box->mm_list[idx]; +} +EXPORT_SYMBOL(decoder_bmmu_box_get_mem_handle); + +int decoder_bmmu_box_get_mem_size(void *box_handle, int idx) +{ + struct decoder_bmmu_box *box = box_handle; + int size = 0; + + if (!box || idx < 0 || idx >= box->max_mm_num) + return 0; + mutex_lock(&box->mutex); + if (box->mm_list[idx] != NULL) + size = box->mm_list[idx]->buffer_size; + mutex_unlock(&box->mutex); + return size; +} + + +unsigned long decoder_bmmu_box_get_phy_addr(void *box_handle, int idx) +{ + struct decoder_bmmu_box *box = box_handle; + struct codec_mm_s *mm; + + if (!box || idx < 0 || idx >= box->max_mm_num) + return 0; + mm = box->mm_list[idx]; + if (!mm) + return 0; + return mm->phy_addr; +} +EXPORT_SYMBOL(decoder_bmmu_box_get_phy_addr); + +void *decoder_bmmu_box_get_virt_addr(void *box_handle, int idx) +{ + struct decoder_bmmu_box *box = box_handle; + struct codec_mm_s *mm; + + if (!box || idx < 0 || idx >= box->max_mm_num) + return NULL; + mm = box->mm_list[idx]; + if (!mm) + return 0; + return codec_mm_phys_to_virt(mm->phy_addr); +} + +/*flags: &0x1 for wait,*/ +int decoder_bmmu_box_check_and_wait_size(int size, int flags, int mem_flags) +{ + if ((flags & BMMU_ALLOC_FLAGS_CAN_CLEAR_KEEPER) && + codec_mm_get_free_size() < size) { + pr_err("CMA force free keep,for size = %d\n", size); + /*need free others? + */ + try_free_keep_video(1); + } + + return codec_mm_enough_for_size(size, + flags & BMMU_ALLOC_FLAGS_WAIT, mem_flags); +} + +int decoder_bmmu_box_alloc_idx_wait( + void *handle, int idx, + int size, int aligned_2n, + int mem_flags, + int wait_flags) +{ + int have_space; + int ret = -1; + int keeped = 0; + + if (decoder_bmmu_box_get_mem_size(handle, idx) >= size) { + struct decoder_bmmu_box *box = handle; + struct codec_mm_s *mm; + mutex_lock(&box->mutex); + mm = box->mm_list[idx]; + keeped = is_codec_mm_keeped(mm); + mutex_unlock(&box->mutex); + + if (!keeped) + return 0;/*have alloced memery before.*/ + } + have_space = decoder_bmmu_box_check_and_wait_size( + size, + wait_flags, + mem_flags); + if (have_space) { + ret = decoder_bmmu_box_alloc_idx(handle, + idx, size, aligned_2n, mem_flags); + if (ret == -ENOMEM) { + pr_info("bmmu alloc idx fail, try free keep video.\n"); + try_free_keep_video(1); + } + } else { + try_free_keep_video(1); + ret = -ENOMEM; + } + return ret; +} +EXPORT_SYMBOL(decoder_bmmu_box_alloc_idx_wait); + +int decoder_bmmu_box_alloc_buf_phy( + void *handle, int idx, + int size, unsigned char *driver_name, + unsigned long *buf_phy_addr) +{ + struct decoder_bmmu_box *bmmu_box = (struct decoder_bmmu_box *)handle; + + if (bmmu_box == NULL) + return -EINVAL; + + if (!decoder_bmmu_box_check_and_wait_size( + size, + 1, bmmu_box->mem_flags)) { + pr_info("%s not enough buf for buf_idx = %d\n", + driver_name, idx); + return -ENOMEM; + } + if (!decoder_bmmu_box_alloc_idx_wait( + handle, + idx, + size, + -1, + bmmu_box->mem_flags, + BMMU_ALLOC_FLAGS_WAITCLEAR)) { + *buf_phy_addr = + decoder_bmmu_box_get_phy_addr( + handle, + idx); + /* + *pr_info("%s malloc buf_idx = %d addr = %ld size = %d\n", + * driver_name, idx, *buf_phy_addr, size); + */ + } else { + pr_info("%s malloc failed %d\n", driver_name, idx); + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL(decoder_bmmu_box_alloc_buf_phy); + +int decoder_bmmu_box_add_callback_func( + void *handle, int idx, + void *cb) +{ + struct decoder_bmmu_box *box = handle; + struct codec_mm_s *mm; + + if (!box || idx < 0 || idx >= box->max_mm_num) + return 0; + + mutex_lock(&box->mutex); + mm = box->mm_list[idx]; + codec_mm_add_release_callback(mm, (struct codec_mm_cb_s *)cb); + mutex_unlock(&box->mutex); + + return 0; +} +EXPORT_SYMBOL(decoder_bmmu_box_add_callback_func); + +static int decoder_bmmu_box_dump(struct decoder_bmmu_box *box, void *buf, + int size) +{ + char *pbuf = buf; + char sbuf[512]; + int tsize = 0; + int s; + int i; + + if (!buf) { + pbuf = sbuf; + size = 512; + } +#define BUFPRINT(args...) \ + do {\ + s = snprintf(pbuf, size - tsize, args);\ + tsize += s;\ + pbuf += s; \ + } while (0) + + for (i = 0; i < box->max_mm_num; i++) { + struct codec_mm_s *mm = box->mm_list[i]; + if (buf && (size - tsize) < 256) { + BUFPRINT("\n\t**NOT END**\n"); + break; + } + if (mm) { + BUFPRINT("code mem[%d]:%p, addr=%p, size=%d,from=%d\n", + i, + (void *)mm, + (void *)mm->phy_addr, + mm->buffer_size, + mm->from_flags); + if (!buf) { + pr_info("%s", sbuf); + pbuf = sbuf; + } + } + } +#undef BUFPRINT + + return tsize; +} + +static int decoder_bmmu_box_dump_all(void *buf, int size) +{ + struct decoder_bmmu_box_mgr *mgr = get_decoder_bmmu_box_mgr(); + char *pbuf = buf; + char sbuf[512]; + int tsize = 0; + int s; + int i; + struct list_head *head, *list; + + if (!buf) { + pbuf = sbuf; + size = 512; + } +#define BUFPRINT(args...) \ + do {\ + s = snprintf(pbuf, size - tsize, args);\ + tsize += s;\ + pbuf += s; \ + } while (0) + + mutex_lock(&mgr->mutex); + head = &mgr->box_list; + list = head->next; + i = 0; + while (list != head) { + struct decoder_bmmu_box *box; + + box = list_entry(list, struct decoder_bmmu_box, list); + BUFPRINT("box[%d]: %s, %splayer_id:%d, max_num:%d, size:%d\n", + i, box->name, + (box->mem_flags & CODEC_MM_FLAGS_TVP) ? + "TVP mode " : "", + box->channel_id, + box->max_mm_num, + box->total_size); + if (buf) { + s = decoder_bmmu_box_dump(box, pbuf, size - tsize); + if (s > 0) { + tsize += s; + pbuf += s; + } + } else { + pr_info("%s", sbuf); + pbuf = sbuf; + tsize += decoder_bmmu_box_dump(box, NULL, 0); + } + list = list->next; + i++; + } + mutex_unlock(&mgr->mutex); + +#undef BUFPRINT + if (!buf) + pr_info("%s", sbuf); + return tsize; +} + +static ssize_t box_dump_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + ssize_t ret = 0; + + ret = decoder_bmmu_box_dump_all(buf, PAGE_SIZE); + return ret; +} + +static ssize_t debug_show(struct class *class, + struct class_attribute *attr, + char *buf) +{ + ssize_t size = 0; + size += sprintf(buf, "box debug help:\n"); + size += sprintf(buf + size, "echo n > debug\n"); + size += sprintf(buf + size, "n==0: clear all debugs)\n"); + size += sprintf(buf + size, + "n=1: dump all box\n"); + + return size; +} + +static ssize_t debug_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned val; + ssize_t ret; + val = -1; + ret = sscanf(buf, "%d", &val); + if (ret != 1) + return -EINVAL; + switch (val) { + case 1: + decoder_bmmu_box_dump_all(NULL , 0); + break; + default: + pr_err("unknow cmd! %d\n", val); + } + return size; + +} + +static CLASS_ATTR_RO(box_dump); +static CLASS_ATTR_RW(debug); + +static struct attribute *decoder_bmmu_box_class_attrs[] = { + &class_attr_box_dump.attr, + &class_attr_debug.attr, + NULL +}; + +ATTRIBUTE_GROUPS(decoder_bmmu_box_class); + +static struct class decoder_bmmu_box_class = { + .name = "decoder_bmmu_box", + .class_groups = decoder_bmmu_box_class_groups, +}; + +int decoder_bmmu_box_init(void) +{ + int r; + + memset(&global_blk_mgr, 0, sizeof(global_blk_mgr)); + INIT_LIST_HEAD(&global_blk_mgr.box_list); + mutex_init(&global_blk_mgr.mutex); + r = class_register(&decoder_bmmu_box_class); + return r; +} +EXPORT_SYMBOL(decoder_bmmu_box_init); + +void decoder_bmmu_box_exit(void) +{ + class_unregister(&decoder_bmmu_box_class); + pr_info("dec bmmu box exit.\n"); +} + +#if 0 +static int __init decoder_bmmu_box_init(void) +{ + int r; + + memset(&global_blk_mgr, 0, sizeof(global_blk_mgr)); + INIT_LIST_HEAD(&global_blk_mgr.box_list); + mutex_init(&global_blk_mgr.mutex); + r = class_register(&decoder_bmmu_box_class); + return r; +} + +module_init(decoder_bmmu_box_init); +#endif
diff --git a/drivers/frame_provider/decoder/utils/decoder_bmmu_box.h b/drivers/frame_provider/decoder/utils/decoder_bmmu_box.h new file mode 100644 index 0000000..93974ec --- /dev/null +++ b/drivers/frame_provider/decoder/utils/decoder_bmmu_box.h
@@ -0,0 +1,74 @@ +/* + * drivers/amlogic/amports/decoder/decoder_bmmu_box.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef DECODER_BLOCK_BUFFER_BOX +#define DECODER_BLOCK_BUFFER_BOX + +void *decoder_bmmu_box_alloc_box(const char *name, + int channel_id, + int max_num, + int aligned, + int mem_flags); + +int decoder_bmmu_box_alloc_idx( + void *handle, int idx, int size, + int aligned_2n, int mem_flags); + +int decoder_bmmu_box_free_idx(void *handle, int idx); +int decoder_bmmu_box_free(void *handle); +void *decoder_bmmu_box_get_mem_handle( + void *box_handle, int idx); + +unsigned long decoder_bmmu_box_get_phy_addr( + void *box_handle, int idx); + +void *decoder_bmmu_box_get_virt_addr( + void *box_handle, int idx); + +/*flags: &0x1 for wait,*/ +int decoder_bmmu_box_check_and_wait_size( + int size, int flags, int mem_flags); + +int decoder_bmmu_box_alloc_buf_phy( + void *handle, int idx, + int size, unsigned char *driver_name, + unsigned long *buf_phy_addr); + +int decoder_bmmu_box_add_callback_func( + void *handle, int idx, + void *cb); + +#define BMMU_ALLOC_FLAGS_WAIT (1 << 0) +#define BMMU_ALLOC_FLAGS_CAN_CLEAR_KEEPER (1 << 1) +#define BMMU_ALLOC_FLAGS_WAITCLEAR \ + (BMMU_ALLOC_FLAGS_WAIT |\ + BMMU_ALLOC_FLAGS_CAN_CLEAR_KEEPER) + +int decoder_bmmu_box_alloc_idx_wait( + void *handle, int idx, + int size, int aligned_2n, + int mem_flags, + int wait_flags); + +bool decoder_bmmu_box_valide_check(void *box); +void decoder_bmmu_try_to_release_box(void *handle); + +int decoder_bmmu_box_init(void); +void decoder_bmmu_box_exit(void); + +#endif +
diff --git a/drivers/frame_provider/decoder/utils/decoder_mmu_box.c b/drivers/frame_provider/decoder/utils/decoder_mmu_box.c new file mode 100644 index 0000000..57180dd --- /dev/null +++ b/drivers/frame_provider/decoder/utils/decoder_mmu_box.c
@@ -0,0 +1,416 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/decoder_mmu_box.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <linux/amlogic/media/codec_mm/codec_mm_scatter.h> +#include <linux/platform_device.h> +struct decoder_mmu_box { + int max_sc_num; + const char *name; + int channel_id; + int tvp_mode; + struct mutex mutex; + struct list_head list; + struct codec_mm_scatter *sc_list[1]; +}; +#define MAX_KEEP_FRAME 4 +#define START_KEEP_ID 0x9 +#define MAX_KEEP_ID (INT_MAX - 1) +struct decoder_mmu_box_mgr { + int num; + struct mutex mutex; + struct codec_mm_scatter *keep_sc[MAX_KEEP_FRAME]; + int keep_id[MAX_KEEP_FRAME]; + int next_id;/*id for keep & free.*/ + struct list_head box_list; +}; +static struct decoder_mmu_box_mgr global_mgr; +static struct decoder_mmu_box_mgr *get_decoder_mmu_box_mgr(void) +{ + return &global_mgr; +} + +static int decoder_mmu_box_mgr_add_box(struct decoder_mmu_box *box) +{ + struct decoder_mmu_box_mgr *mgr = get_decoder_mmu_box_mgr(); + + mutex_lock(&mgr->mutex); + list_add_tail(&box->list, &mgr->box_list); + mutex_unlock(&mgr->mutex); + return 0; +} + +static int decoder_mmu_box_mgr_del_box(struct decoder_mmu_box *box) +{ + struct decoder_mmu_box_mgr *mgr = get_decoder_mmu_box_mgr(); + + mutex_lock(&mgr->mutex); + list_del(&box->list); + mutex_unlock(&mgr->mutex); + return 0; +} + +int decoder_mmu_box_sc_check(void *handle, int is_tvp) +{ + struct decoder_mmu_box *box = handle; + if (!box) { + pr_err("mmu box NULL !!!\n"); + return 0; + } + return codec_mm_scatter_size(is_tvp); +} +EXPORT_SYMBOL(decoder_mmu_box_sc_check); + + +void *decoder_mmu_box_alloc_box(const char *name, + int channel_id, + int max_num, + int min_size_M, + int mem_flags) +/*min_size_M:wait alloc this size*/ +{ + struct decoder_mmu_box *box; + int size; + + pr_debug("decoder_mmu_box_alloc_box, mem_flags = 0x%x\n", mem_flags); + + size = sizeof(struct decoder_mmu_box) + + sizeof(struct codec_mm_scatter *) * + max_num; + box = kmalloc(size, GFP_KERNEL); + if (!box) { + pr_err("can't alloc decoder buffers box!!!\n"); + return NULL; + } + memset(box, 0, size); + box->max_sc_num = max_num; + box->name = name; + box->channel_id = channel_id; + box->tvp_mode = mem_flags; + + mutex_init(&box->mutex); + INIT_LIST_HEAD(&box->list); + decoder_mmu_box_mgr_add_box(box); + codec_mm_scatter_mgt_delay_free_switch(1, 2000, + min_size_M, box->tvp_mode); + return (void *)box; +} +EXPORT_SYMBOL(decoder_mmu_box_alloc_box); + +int decoder_mmu_box_alloc_idx( + void *handle, int idx, int num_pages, + unsigned int *mmu_index_adr) +{ + struct decoder_mmu_box *box = handle; + struct codec_mm_scatter *sc; + int ret; + int i; + + if (!box || idx < 0 || idx >= box->max_sc_num) { + pr_err("can't alloc mmu box(%p),idx:%d\n", + box, idx); + return -1; + } + mutex_lock(&box->mutex); + sc = box->sc_list[idx]; + if (sc) { + if (sc->page_max_cnt >= num_pages) + ret = codec_mm_scatter_alloc_want_pages(sc, + num_pages); + else { + codec_mm_scatter_dec_owner_user(sc, 0); + box->sc_list[idx] = NULL; + sc = NULL; + } + + } + if (!sc) { + sc = codec_mm_scatter_alloc(num_pages + 64, num_pages, + box->tvp_mode); + if (!sc) { + mutex_unlock(&box->mutex); + pr_err("alloc mmu failed, need pages=%d\n", + num_pages); + return -1; + } + box->sc_list[idx] = sc; + } + + for (i = 0; i < num_pages; i++) + mmu_index_adr[i] = PAGE_INDEX(sc->pages_list[i]); + + mutex_unlock(&box->mutex); + + return 0; +} +EXPORT_SYMBOL(decoder_mmu_box_alloc_idx); + +int decoder_mmu_box_free_idx_tail( + void *handle, int idx, + int start_release_index) +{ + struct decoder_mmu_box *box = handle; + struct codec_mm_scatter *sc; + + if (!box || idx < 0 || idx >= box->max_sc_num) { + pr_err("can't free tail mmu box(%p),idx:%d in (%d-%d)\n", + box, idx, 0, + box ? (box->max_sc_num - 1) : 0); + return -1; + } + mutex_lock(&box->mutex); + sc = box->sc_list[idx]; + if (sc && start_release_index < sc->page_cnt) + codec_mm_scatter_free_tail_pages_fast(sc, + start_release_index); + mutex_unlock(&box->mutex); + return 0; +} +EXPORT_SYMBOL(decoder_mmu_box_free_idx_tail); + +int decoder_mmu_box_free_idx(void *handle, int idx) +{ + struct decoder_mmu_box *box = handle; + struct codec_mm_scatter *sc; + + if (!box || idx < 0 || idx >= box->max_sc_num) { + pr_err("can't free idx of box(%p),idx:%d in (%d-%d)\n", + box, idx, 0, + box ? (box->max_sc_num - 1) : 0); + return -1; + } + + mutex_lock(&box->mutex); + sc = box->sc_list[idx]; + if (sc && sc->page_cnt > 0) { + codec_mm_scatter_dec_owner_user(sc, 0); + box->sc_list[idx] = NULL; + } + + mutex_unlock(&box->mutex); + + return 0; +} +EXPORT_SYMBOL(decoder_mmu_box_free_idx); + +int decoder_mmu_box_free(void *handle) +{ + struct decoder_mmu_box *box = handle; + struct codec_mm_scatter *sc; + int i; + + if (!box) { + pr_err("can't free box of NULL box!\n"); + return -1; + } + + mutex_lock(&box->mutex); + for (i = 0; i < box->max_sc_num; i++) { + sc = box->sc_list[i]; + if (sc) { + codec_mm_scatter_dec_owner_user(sc, 0); + box->sc_list[i] = NULL; + + } + } + + codec_mm_scatter_mgt_delay_free_switch(0, 0, 0, box->tvp_mode); + + mutex_unlock(&box->mutex); + + decoder_mmu_box_mgr_del_box(box); + kfree(box); + + return 0; +} +EXPORT_SYMBOL(decoder_mmu_box_free); + +void *decoder_mmu_box_get_mem_handle(void *box_handle, int idx) +{ + struct decoder_mmu_box *box = box_handle; + + if (!box || idx < 0 || idx >= box->max_sc_num) + return NULL; + return box->sc_list[idx]; +} +EXPORT_SYMBOL(decoder_mmu_box_get_mem_handle); + +static int decoder_mmu_box_dump(struct decoder_mmu_box *box, + void *buf, int size) +{ + char *pbuf = buf; + char sbuf[512]; + int tsize = 0; + int s; + int i; + + if (!buf) { + pbuf = sbuf; + size = 512; + } + #define BUFPRINT(args...) \ + do {\ + s = snprintf(pbuf, size - tsize, args);\ + tsize += s;\ + pbuf += s; \ + } while (0) + + for (i = 0; i < box->max_sc_num; i++) { + struct codec_mm_scatter *sc = box->sc_list[i]; + + if (sc) { + BUFPRINT("sc mem[%d]:%p, size=%d\n", + i, sc, + sc->page_cnt << PAGE_SHIFT); + } + } +#undef BUFPRINT + if (!buf) + pr_info("%s", sbuf); + + return tsize; +} + +static int decoder_mmu_box_dump_all(void *buf, int size) +{ + struct decoder_mmu_box_mgr *mgr = get_decoder_mmu_box_mgr(); + char *pbuf = buf; + char sbuf[512]; + int tsize = 0; + int s; + int i; + struct list_head *head, *list; + + if (!pbuf) { + pbuf = sbuf; + size = 512; + } + + #define BUFPRINT(args...) \ + do {\ + s = snprintf(pbuf, size - tsize, args);\ + tsize += s;\ + pbuf += s; \ + } while (0) + + mutex_lock(&mgr->mutex); + head = &mgr->box_list; + list = head->next; + i = 0; + while (list != head) { + struct decoder_mmu_box *box; + box = list_entry(list, struct decoder_mmu_box, + list); + BUFPRINT("box[%d]: %s, %splayer_id:%d, max_num:%d\n", + i, + box->name, + box->tvp_mode ? "TVP mode " : "", + box->channel_id, + box->max_sc_num); + if (buf) { + s += decoder_mmu_box_dump(box, pbuf, size - tsize); + if (s > 0) { + tsize += s; + pbuf += s; + } + } else { + pr_info("%s", sbuf); + pbuf = sbuf; + tsize += decoder_mmu_box_dump(box, NULL, 0); + } + list = list->next; + i++; + } + mutex_unlock(&mgr->mutex); + + +#undef BUFPRINT + if (!buf) + pr_info("%s", sbuf); + return tsize; +} + + + +static ssize_t +box_dump_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + ssize_t ret = 0; + + ret = decoder_mmu_box_dump_all(buf, PAGE_SIZE); + return ret; +} + +static CLASS_ATTR_RO(box_dump); + +static struct attribute *decoder_mmu_box_class_attrs[] = { + &class_attr_box_dump.attr, + NULL +}; + +ATTRIBUTE_GROUPS(decoder_mmu_box_class); + +static struct class decoder_mmu_box_class = { + .name = "decoder_mmu_box", + .class_groups = decoder_mmu_box_class_groups, +}; + +int decoder_mmu_box_init(void) +{ + int r; + + memset(&global_mgr, 0, sizeof(global_mgr)); + INIT_LIST_HEAD(&global_mgr.box_list); + mutex_init(&global_mgr.mutex); + global_mgr.next_id = START_KEEP_ID; + r = class_register(&decoder_mmu_box_class); + return r; +} +EXPORT_SYMBOL(decoder_mmu_box_init); + +void decoder_mmu_box_exit(void) +{ + class_unregister(&decoder_mmu_box_class); + pr_info("dec mmu box exit.\n"); +} + +#if 0 +static int __init decoder_mmu_box_init(void) +{ + int r; + + memset(&global_mgr, 0, sizeof(global_mgr)); + INIT_LIST_HEAD(&global_mgr.box_list); + mutex_init(&global_mgr.mutex); + global_mgr.next_id = START_KEEP_ID; + r = class_register(&decoder_mmu_box_class); + return r; +} + +module_init(decoder_mmu_box_init); +#endif
diff --git a/drivers/frame_provider/decoder/utils/decoder_mmu_box.h b/drivers/frame_provider/decoder/utils/decoder_mmu_box.h new file mode 100644 index 0000000..13b3dd9 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/decoder_mmu_box.h
@@ -0,0 +1,52 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/decoder_mmu_box.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef DECODER_BUFFER_BOX +#define DECODER_BUFFER_BOX + +void *decoder_mmu_box_alloc_box(const char *name, + int channel_id, + int max_num, + int min_size_M, + int mem_flags); + +int decoder_mmu_box_sc_check(void *handle, int is_tvp); + +int decoder_mmu_box_alloc_idx( + void *handle, int idx, int num_pages, + unsigned int *mmu_index_adr); + +int decoder_mmu_box_free_idx_tail(void *handle, int idx, + int start_release_index); + +int decoder_mmu_box_free_idx(void *handle, int idx); + +int decoder_mmu_box_free(void *handle); + +int decoder_mmu_box_move_keep_idx(void *box_handle, + int keep_idx); +int decoder_mmu_box_free_keep(int keep_id); + +int decoder_mmu_box_free_all_keep(void); + +void *decoder_mmu_box_get_mem_handle(void *box_handle, int idx); + +int decoder_mmu_box_init(void); + +void decoder_mmu_box_exit(void); + +#endif
diff --git a/drivers/frame_provider/decoder/utils/firmware.h b/drivers/frame_provider/decoder/utils/firmware.h new file mode 100644 index 0000000..cd48036 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/firmware.h
@@ -0,0 +1,56 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef __VIDEO_FIRMWARE_HEADER_ +#define __VIDEO_FIRMWARE_HEADER_ + +#include "../../../common/firmware/firmware_type.h" +#include <linux/amlogic/media/utils/vformat.h> +#if defined(CONFIG_AMLOGIC_TEE) || defined(CONFIG_AMLOGIC_TEE_MODULE) +#include <linux/amlogic/tee.h> +#endif + +#define FW_LOAD_FORCE (0x1) +#define FW_LOAD_TRY (0X2) + +struct firmware_s { + char name[32]; + unsigned int len; + char data[0]; +}; + +extern int get_decoder_firmware_data(enum vformat_e type, + const char *file_name, char *buf, int size); +extern int get_data_from_name(const char *name, char *buf); +extern int get_firmware_data(unsigned int foramt, char *buf); +extern int video_fw_reload(int mode); + +#if !defined(CONFIG_AMLOGIC_TEE) && !defined(CONFIG_AMLOGIC_TEE_MODULE) +static inline bool tee_enabled(void) { return false; } +static inline int tee_load_video_fw_swap(u32 index, u32 vdec, bool is_swap) +{ + return -1; +} +static inline int tee_load_video_fw(u32 index, u32 vdec) +{ + return -1; +} +#endif + +#endif
diff --git a/drivers/frame_provider/decoder/utils/frame_check.c b/drivers/frame_provider/decoder/utils/frame_check.c new file mode 100644 index 0000000..0600f8e --- /dev/null +++ b/drivers/frame_provider/decoder/utils/frame_check.c
@@ -0,0 +1,1780 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/frame_check.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/kfifo.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/vmalloc.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <asm-generic/checksum.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include <linux/crc32.h> +#include <linux/fs.h> +#include "vdec.h" +#include "frame_check.h" +#include "amlogic_fbc_hook.h" +#include <linux/highmem.h> +#include <linux/page-flags.h> +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include <asm/cacheflush.h> + +#define FC_ERROR 0x0 + +#define FC_YUV_DEBUG 0x01 +#define FC_CRC_DEBUG 0x02 +#define FC_TST_DEBUG 0x80 +#define FC_ERR_CRC_BLOCK_MODE 0x10 +#define FC_CHECK_CRC_LOOP_MODE 0x20 +#define AD_CHECK_CRC_LOOP_MODE 0x40 + +#define YUV_MASK 0x01 +#define CRC_MASK 0x02 +#define AUX_MASK 0x04 + + +#define MAX_YUV_SIZE (4096 * 2304) +#define YUV_DEF_SIZE (MAX_YUV_SIZE * 3 / 2) +#define YUV_DEF_NUM 1 + +#define MAX_SIZE_AFBC_PLANES (4096 * 2048) + +#define VMAP_STRIDE_SIZE (1024*1024) + +static unsigned int fc_debug; +static unsigned int size_yuv_buf = (YUV_DEF_SIZE * YUV_DEF_NUM); + +#define dbg_print(mask, ...) do { \ + if ((fc_debug & mask) || \ + (mask == FC_ERROR)) \ + printk("[FRAME_CHECK] "__VA_ARGS__);\ + } while(0) + + +#define CRC_PATH "/data/tmp/" +#define YUV_PATH "/data/tmp/" +static char comp_crc[128] = "name"; +static char aux_comp_crc[128] = "aux"; + +static struct vdec_s *single_mode_vdec = NULL; + +static unsigned int yuv_enable, check_enable; +static unsigned int aux_enable; +static unsigned int yuv_start[MAX_INSTANCE_MUN]; +static unsigned int yuv_num[MAX_INSTANCE_MUN]; + +#define CHECKSUM_PATH "/data/local/tmp/" +static char checksum_info[128] = "checksum info"; +static char checksum_filename[128] = "checksum"; +static unsigned int checksum_enable; +static unsigned int checksum_start_count; + +static const char * const format_name[] = { + "MPEG12", + "MPEG4", + "H264", + "MJPEG", + "REAL", + "JPEG", + "VC1", + "AVS", + "YUV", + "H264MVC", + "H264_4K2K", + "HEVC", + "H264_ENC", + "JPEG_ENC", + "VP9", + "AVS2", + "AV1", +}; + +static const char *get_format_name(int format) +{ + if (format < 17 && format >= 0) + return format_name[format]; + else + return "Unknow"; +} + + +static inline void set_enable(struct pic_check_mgr_t *p, int mask) +{ + p->enable |= mask; +} + +static inline void set_disable(struct pic_check_mgr_t *p, int mask) +{ + p->enable &= (~mask); +} + +static inline void aux_set_enable(struct aux_data_check_mgr_t *p, int mask) +{ + p->enable |= mask; +} + +static inline void aux_set_disable(struct aux_data_check_mgr_t *p, int mask) +{ + p->enable &= (~mask); +} + + +static inline void check_schedule(struct pic_check_mgr_t *mgr) +{ + if (atomic_read(&mgr->work_inited)) + vdec_schedule_work(&mgr->frame_check_work); +} + +static inline void aux_data_check_schedule(struct aux_data_check_mgr_t *mgr) +{ + if (atomic_read(&mgr->work_inited)) + vdec_schedule_work(&mgr->aux_data_check_work); +} + +static bool is_oversize(int w, int h) +{ + if (w <= 0 || h <= 0) + return true; + + if (h != 0 && (w > (MAX_YUV_SIZE / h))) + return true; + + return false; +} + +unsigned long vdec_cav_get_addr(int index); +unsigned int vdec_cav_get_width(int index); +unsigned int vdec_cav_get_height(int index); +#define canvas_0(v) ((v) & 0xff) +#define canvas_1(v) (((v) >> 8) & 0xff) +#define canvas_2(v) (((v) >> 16) & 0xff) +#define canvas_3(v) (((v) >> 24) & 0xff) + +#define canvasY(v) canvas_0(v) +#define canvasU(v) canvas_1(v) +#define canvasV(v) canvas_2(v) +#define canvasUV(v) canvas_1(v) + +static int get_frame_size(struct pic_check_mgr_t *pic, + struct vframe_s *vf) +{ + if (is_oversize(vf->width, vf->height)) { + dbg_print(FC_ERROR, "vf size err: w=%d, h=%d\n", + vf->width, vf->height); + return -1; + } + pic->height = vf->height; + pic->width = vf->width; + pic->size_y = vf->width * vf->height; + pic->size_uv = pic->size_y >> (1 + pic->mjpeg_flag); + pic->size_pic = pic->size_y + (pic->size_y >> 1); + + if ((!(vf->type & VIDTYPE_VIU_NV21)) && (!pic->mjpeg_flag)) + return 0; + + if ((vf->canvas0Addr == vf->canvas1Addr) && + (vf->canvas0Addr != 0) && + (vf->canvas0Addr != -1)) { + pic->canvas_w = vdec_cav_get_width(canvasY(vf->canvas0Addr)); + //canvas_get_width(canvasY(vf->canvas0Addr)); + pic->canvas_h = vdec_cav_get_height(canvasY(vf->canvas0Addr)); + //canvas_get_height(canvasY(vf->canvas0Addr)); + } else { + pic->canvas_w = vf->canvas0_config[0].width; + pic->canvas_h = vf->canvas0_config[0].height; + } + + if ((pic->canvas_h < 1) || (pic->canvas_w < 1)) { + dbg_print(FC_ERROR, "(canvas,pic) w(%d,%d), h(%d,%d)\n", + pic->canvas_w, vf->width, pic->canvas_h, vf->height); + return -1; + } +/* + int blkmod; + blkmod = canvas_get_blkmode(canvasY(vf->canvas0Addr)); + if (blkmod != CANVAS_BLKMODE_LINEAR) { + dbg_print(0, "WARN: canvas blkmod %x\n", blkmod); + } +*/ + return 0; +} + +static int canvas_get_virt_addr(struct pic_check_mgr_t *pic, + struct vframe_s *vf) +{ + unsigned long phy_y_addr, phy_uv_addr; + void *vaddr_y, *vaddr_uv; + + if ((vf->canvas0Addr == vf->canvas1Addr) && + (vf->canvas0Addr != 0) && + (vf->canvas0Addr != -1)) { + phy_y_addr = vdec_cav_get_addr(canvasY(vf->canvas0Addr)); //canvas_get_addr(canvasY(vf->canvas0Addr)); + phy_uv_addr = vdec_cav_get_addr(canvasUV(vf->canvas0Addr)); //canvas_get_addr(canvasUV(vf->canvas0Addr)); + } else { + phy_y_addr = vf->canvas0_config[0].phy_addr; + phy_uv_addr = vf->canvas0_config[1].phy_addr; + } + vaddr_y = codec_mm_phys_to_virt(phy_y_addr); + vaddr_uv = codec_mm_phys_to_virt(phy_uv_addr); + + if (((!vaddr_y) || (!vaddr_uv)) && ((!phy_y_addr) || (!phy_uv_addr))) { + dbg_print(FC_ERROR, "%s, y_addr %p(0x%lx), uv_addr %p(0x%lx)\n", + __func__, vaddr_y, phy_y_addr, vaddr_uv, phy_uv_addr); + return -1; + } + pic->y_vaddr = vaddr_y; + pic->uv_vaddr = vaddr_uv; + pic->y_phyaddr = phy_y_addr; + pic->uv_phyaddr = phy_uv_addr; + + if (pic->mjpeg_flag) { + if ((vf->canvas0Addr == vf->canvas1Addr) && + (vf->canvas0Addr != 0) && + (vf->canvas0Addr != -1)) { + pic->extra_v_phyaddr = canvas_get_addr(canvasV(vf->canvas0Addr)); + } else { + pic->extra_v_phyaddr = vf->canvas0_config[2].phy_addr; + } + pic->extra_v_vaddr = codec_mm_phys_to_virt(phy_uv_addr); + + if (!pic->extra_v_vaddr && !pic->extra_v_phyaddr) + return -1; + } + + return 0; +} + +static int str_strip(char *str) +{ + char *s = str; + int i = 0; + + while (s[i]) { + if (s[i] == '\n') + s[i] = 0; + else if (s[i] == ' ') + s[i] = '_'; + i++; + } + + return i; +} + +static char *fget_crc_str(char *buf, + unsigned int size, struct pic_check_t *fc) +{ + unsigned int c = 0, sz, ret, index, crc1, crc2; + mm_segment_t old_fs; + char *cs; + + if (!fc->compare_fp) + return NULL; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + + do { + cs = buf; + sz = size; + while (--sz && (c = vfs_read(fc->compare_fp, + cs, 1, &fc->compare_pos) != 0)) { + if (*cs++ == '\n') + break; + } + *cs = '\0'; + if ((c == 0) && (cs == buf)) { + set_fs(old_fs); + return NULL; + } + ret = sscanf(buf, "%08u: %8x %8x", &index, &crc1, &crc2); + dbg_print(FC_CRC_DEBUG, "%s, index = %d, cmp = %d\n", + __func__, index, fc->cmp_crc_cnt); + }while(ret != 3 || index != fc->cmp_crc_cnt); + + set_fs(old_fs); + fc->cmp_crc_cnt++; + + return buf; +} + +static char *fget_aux_data_crc_str(char *buf, + unsigned int size, struct aux_data_check_t *fc) +{ + unsigned int c = 0, sz, ret, index, crc; + mm_segment_t old_fs; + char *cs; + + if (!fc->compare_fp) + return NULL; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + + do { + cs = buf; + sz = size; + while (--sz && (c = vfs_read(fc->compare_fp, + cs, 1, &fc->compare_pos) != 0)) { + if (*cs++ == '\n') + break; + } + *cs = '\0'; + if ((c == 0) && (cs == buf)) { + set_fs(old_fs); + return NULL; + } + ret = sscanf(buf, "%08u: %8x", &index, &crc); + dbg_print(FC_CRC_DEBUG, "%s, index = %d, cmp = %d\n", + __func__, index, fc->cmp_crc_cnt); + }while(ret != 2 || index != fc->cmp_crc_cnt); + + set_fs(old_fs); + fc->cmp_crc_cnt++; + + return buf; +} + + +static struct file* file_open(int mode, const char *str, ...) +{ + char file[256] = {0}; + struct file* fp = NULL; + va_list args; + + va_start(args, str); + vsnprintf(file, sizeof(file), str, args); + + fp = filp_open(file, mode, (mode&O_CREAT)?0666:0); + if (IS_ERR(fp)) { + fp = NULL; + dbg_print(FC_ERROR, "open %s failed\n", file); + va_end(args); + return fp; + } + dbg_print(FC_ERROR, "open %s success\n", file); + va_end(args); + + return fp; +} + +static int write_yuv_work(struct pic_check_mgr_t *mgr) +{ + mm_segment_t old_fs; + unsigned int i, wr_size, pic_num; + struct pic_dump_t *dump = &mgr->pic_dump; + + if (dump->dump_cnt > 0) { + if (!dump->yuv_fp) { + dump->yuv_fp = file_open(O_CREAT | O_WRONLY | O_TRUNC, + "%s%s-%d-%d.yuv", YUV_PATH, comp_crc, mgr->id, mgr->file_cnt); + dump->yuv_pos = 0; + } + + if ((mgr->enable & YUV_MASK) && + (dump->yuv_fp != NULL) && + (dump->dump_cnt >= dump->num)) { + + i = 0; + pic_num = dump->dump_cnt; + old_fs = get_fs(); + set_fs(KERNEL_DS); + while (pic_num > 0) { + wr_size = vfs_write(dump->yuv_fp, + (dump->buf_addr + i * mgr->size_pic), + mgr->size_pic, &dump->yuv_pos); + if (mgr->size_pic != wr_size) { + dbg_print(FC_ERROR, "buf failed to write yuv file\n"); + break; + } + pic_num--; + i++; + } + set_fs(old_fs); + vfs_fsync(dump->yuv_fp, 0); + + filp_close(dump->yuv_fp, current->files); + dump->yuv_pos = 0; + dump->yuv_fp = NULL; + set_disable(mgr, YUV_MASK); + dbg_print(FC_YUV_DEBUG, + "closed yuv file, dump yuv exit\n"); + dump->num = 0; + dump->dump_cnt = 0; + if (dump->buf_addr != NULL) + vfree(dump->buf_addr); + dump->buf_addr = NULL; + dump->buf_size = 0; + } + } + + return 0; +} + +static int write_crc_work(struct pic_check_mgr_t *mgr) +{ + unsigned int wr_size; + char *crc_buf, *crc_tmp = NULL; + mm_segment_t old_fs; + struct pic_check_t *check = &mgr->pic_check; + + crc_tmp = (char *)vzalloc(64 * 30); + if (!crc_tmp) + return -1; + + if (mgr->enable & CRC_MASK) { + wr_size = 0; + while (kfifo_get(&check->wr_chk_q, &crc_buf) != 0) { + wr_size += sprintf(&crc_tmp[wr_size], "%s", crc_buf); + if (check->compare_fp != NULL) { + if (!fget_crc_str(crc_buf, SIZE_CRC, check)) { + dbg_print(0, "%s, can't get more compare crc\n", __func__); + filp_close(check->compare_fp, current->files); + check->compare_fp = NULL; + } + } + kfifo_put(&check->new_chk_q, crc_buf); + } + if (check->check_fp && (wr_size != 0)) { + old_fs = get_fs(); + set_fs(KERNEL_DS); + if (wr_size != vfs_write(check->check_fp, + crc_tmp, wr_size, &check->check_pos)) { + dbg_print(FC_ERROR, "failed to check_dump_filp\n"); + } + set_fs(old_fs); + } + } + + vfree(crc_tmp); + return 0; +} + +static int write_aux_data_crc_work(struct aux_data_check_mgr_t *mgr) +{ + unsigned int wr_size; + char *crc_buf, crc_tmp[64*30]; + mm_segment_t old_fs; + struct aux_data_check_t *check = &mgr->aux_data_check; + + if (mgr->enable & AUX_MASK) { + wr_size = 0; + while (kfifo_get(&check->wr_chk_q, &crc_buf) != 0) { + wr_size += sprintf(&crc_tmp[wr_size], "%s", crc_buf); + if (check->compare_fp != NULL) { + if (!fget_aux_data_crc_str(crc_buf, SIZE_CRC, check)) { + dbg_print(0, "%s, can't get more compare crc\n", __func__); + filp_close(check->compare_fp, current->files); + check->compare_fp = NULL; + } + } + kfifo_put(&check->new_chk_q, crc_buf); + } + if (check->check_fp && (wr_size != 0)) { + old_fs = get_fs(); + set_fs(KERNEL_DS); + if (wr_size != vfs_write(check->check_fp, + crc_tmp, wr_size, &check->check_pos)) { + dbg_print(FC_ERROR, "failed to check_dump_filp\n"); + } + set_fs(old_fs); + } + } + return 0; +} + +static void do_check_work(struct work_struct *work) +{ + struct pic_check_mgr_t *mgr = container_of(work, + struct pic_check_mgr_t, frame_check_work); + + write_yuv_work(mgr); + + write_crc_work(mgr); +} + +static void do_aux_data_check_work(struct work_struct *work) +{ + struct aux_data_check_mgr_t *mgr = container_of(work, + struct aux_data_check_mgr_t, aux_data_check_work); + + write_aux_data_crc_work(mgr); +} + + +static int memcpy_phy_to_virt(char *to_virt, + ulong phy_from, unsigned int size) +{ + void *vaddr = NULL; + unsigned int tmp_size = 0; + + if (single_mode_vdec != NULL) { + unsigned int offset = phy_from & (~PAGE_MASK); + while (size > 0) { + /* flush dcache in isr. */ + flush_dcache_page(phys_to_page(phy_from)); + + if (offset + size >= PAGE_SIZE) { + vaddr = kmap_atomic(phys_to_page(phy_from)); + tmp_size = (PAGE_SIZE - offset); + phy_from += tmp_size; //for next loop; + size -= tmp_size; + vaddr += offset; + } else { + vaddr = kmap_atomic(phys_to_page(phy_from)); + vaddr += offset; + tmp_size = size; + size = 0; + } + if (vaddr == NULL) { + dbg_print(FC_CRC_DEBUG, "%s: kmap_atomic failed phy: 0x%x\n", + __func__, (unsigned int)phy_from); + return -1; + } + + memcpy(to_virt, vaddr, tmp_size); + to_virt += tmp_size; + + kunmap_atomic(vaddr - offset); + offset = 0; + } + } else { + while (size > 0) { + if (size >= VMAP_STRIDE_SIZE) { + vaddr = codec_mm_vmap(phy_from, VMAP_STRIDE_SIZE); + tmp_size = VMAP_STRIDE_SIZE; + phy_from += VMAP_STRIDE_SIZE; + size -= VMAP_STRIDE_SIZE; + } else { + vaddr = codec_mm_vmap(phy_from, size); + tmp_size = size; + size = 0; + } + if (vaddr == NULL) { + dbg_print(FC_YUV_DEBUG, "%s: codec_mm_vmap failed phy: 0x%x\n", + __func__, (unsigned int)phy_from); + return -1; + } + codec_mm_dma_flush(vaddr, + tmp_size, DMA_FROM_DEVICE); + memcpy(to_virt, vaddr, tmp_size); + to_virt += tmp_size; + + codec_mm_unmap_phyaddr(vaddr); + } + } + return 0; +} + + +static int do_yuv_unit_cp(void **addr, ulong phy, void *virt, + int h, int w, int stride) +{ + int ret = 0, i; + void *tmp = *addr; + + if ((phy != 0) && (virt == NULL)) { + for (i = 0; i < h; i++) { + ret |= memcpy_phy_to_virt(tmp, phy, w); + phy += stride; + tmp += w; + } + } else { + for (i = 0; i < h; i++) { + memcpy(tmp, virt, w); + virt += stride; + tmp += w; + } + } + *addr = tmp; + + return ret; +} + +static int do_yuv_dump(struct pic_check_mgr_t *mgr, struct vframe_s *vf) +{ + int ret = 0; + void *tmp_addr; + struct pic_dump_t *dump = &mgr->pic_dump; + + if (dump->start > 0) { + dump->start--; + return 0; + } + + if (dump->dump_cnt >= dump->num) { + mgr->enable &= (~YUV_MASK); + dump->num = 0; + dump->dump_cnt = 0; + return 0; + } + + if (single_mode_vdec != NULL) { + if (mgr->size_pic > + (dump->buf_size - dump->dump_cnt * mgr->size_pic)) { + if (dump->buf_size) { + dbg_print(FC_ERROR, + "not enough buf for single mode, force dump less\n"); + dump->num = dump->dump_cnt; + check_schedule(mgr); + } else + set_disable(mgr, YUV_MASK); + return -1; + } + tmp_addr = dump->buf_addr + + mgr->size_pic * dump->dump_cnt; + } else { + if (mgr->size_pic > dump->buf_size) { + dbg_print(FC_ERROR, + "not enough size, pic/buf size: 0x%x/0x%x\n", + mgr->size_pic, dump->buf_size); + return -1; + } + tmp_addr = dump->buf_addr; + } + + if (vf->width == mgr->canvas_w) { + if ((mgr->uv_vaddr == NULL) || (mgr->y_vaddr == NULL)) { + ret |= memcpy_phy_to_virt(tmp_addr, mgr->y_phyaddr, mgr->size_y); + ret |= memcpy_phy_to_virt(tmp_addr + mgr->size_y, + mgr->uv_phyaddr, mgr->size_uv); + if (mgr->mjpeg_flag) /*mjpeg yuv420 u v is separate */ + ret |= memcpy_phy_to_virt(tmp_addr + mgr->size_y + mgr->size_uv, + mgr->extra_v_phyaddr, mgr->size_uv); + } else { + memcpy(tmp_addr, mgr->y_vaddr, mgr->size_y); + memcpy(tmp_addr + mgr->size_y, mgr->uv_vaddr, mgr->size_uv); + if (mgr->mjpeg_flag) /*mjpeg u v is separate */ + memcpy(tmp_addr + mgr->size_y + mgr->size_uv, + mgr->extra_v_vaddr, mgr->size_uv); + } + } else { + u32 uv_stride, uv_cpsize; + ret |= do_yuv_unit_cp(&tmp_addr, mgr->y_phyaddr, mgr->y_vaddr, + vf->height, vf->width, mgr->canvas_w); + + uv_stride = (mgr->mjpeg_flag) ? (mgr->canvas_w >> 1) : mgr->canvas_w; + uv_cpsize = (mgr->mjpeg_flag) ? (vf->width >> 1) : vf->width; + ret |= do_yuv_unit_cp(&tmp_addr, mgr->uv_phyaddr, mgr->uv_vaddr, + vf->height >> 1, uv_cpsize, uv_stride); + + if (mgr->mjpeg_flag) { + ret |= do_yuv_unit_cp(&tmp_addr, mgr->extra_v_phyaddr, mgr->extra_v_vaddr, + vf->height >> 1, uv_cpsize, uv_stride); + } + } + + dump->dump_cnt++; + dbg_print(0, "----->dump %dst, size %x (%d x %d), dec total %d\n", + dump->dump_cnt, mgr->size_pic, vf->width, vf->height, mgr->frame_cnt); + + if (single_mode_vdec != NULL) { + /* single mode need schedule work to write*/ + if (dump->dump_cnt >= dump->num) + check_schedule(mgr); + } else { + int wr_size; + mm_segment_t old_fs; + + /* dump for dec pic not in isr */ + if (dump->yuv_fp == NULL) { + dump->yuv_fp = file_open(O_CREAT | O_WRONLY | O_TRUNC, + "%s%s-%d-%d.yuv", YUV_PATH, comp_crc, mgr->id, mgr->file_cnt); + if (dump->yuv_fp == NULL) + return -1; + mgr->file_cnt++; + } + old_fs = get_fs(); + set_fs(KERNEL_DS); + wr_size = vfs_write(dump->yuv_fp, dump->buf_addr, + mgr->size_pic, &dump->yuv_pos); + if (mgr->size_pic != wr_size) { + dbg_print(FC_ERROR, "buf failed to write yuv file\n"); + } + set_fs(old_fs); + vfs_fsync(dump->yuv_fp, 0); + } + + return 0; +} + +static int crc_store(struct pic_check_mgr_t *mgr, struct vframe_s *vf, + int crc_y, int crc_uv) +{ + int ret = 0; + char *crc_addr = NULL; + int comp_frame = 0, comp_crc_y, comp_crc_uv; + struct pic_check_t *check = &mgr->pic_check; + + mgr->yuvsum += crc_uv; + mgr->yuvsum += crc_y; + + if (kfifo_get(&check->new_chk_q, &crc_addr) == 0) { + dbg_print(0, "%08d: %08x %08x\n", + mgr->frame_cnt, crc_y, crc_uv); + if (check->check_fp) { + dbg_print(0, "crc32 dropped\n"); + } else { + dbg_print(0, "no opened file to write crc32\n"); + } + return -1; + } + if (check->cmp_crc_cnt > mgr->frame_cnt) { + sscanf(crc_addr, "%08u: %8x %8x", + &comp_frame, &comp_crc_y, &comp_crc_uv); + + dbg_print(0, "%08d: %08x %08x <--> %08d: %08x %08x\n", + mgr->frame_cnt, crc_y, crc_uv, + comp_frame, comp_crc_y, comp_crc_uv); + + if (comp_frame == mgr->frame_cnt) { + if ((comp_crc_y != crc_y) || (crc_uv != comp_crc_uv)) { + mgr->pic_dump.start = 0; + if (fc_debug || mgr->pic_dump.num < 3) + mgr->pic_dump.num++; + dbg_print(0, "\n\nError: %08d: %08x %08x != %08x %08x\n\n", + mgr->frame_cnt, crc_y, crc_uv, comp_crc_y, comp_crc_uv); + if (!(vf->type & VIDTYPE_SCATTER)) + do_yuv_dump(mgr, vf); + if (fc_debug & FC_ERR_CRC_BLOCK_MODE) + mgr->err_crc_block = 1; + mgr->usr_cmp_result = -1; + } + } else { + mgr->usr_cmp_result = -1; + dbg_print(0, "frame num error: frame_cnt(%d) frame_comp(%d)\n", + mgr->frame_cnt, comp_frame); + } + } else { + dbg_print(0, "%08d: %08x %08x\n", mgr->frame_cnt, crc_y, crc_uv); + } + + if ((check->check_fp) && (crc_addr != NULL)) { + ret = snprintf(crc_addr, SIZE_CRC, + "%08d: %08x %08x\n", mgr->frame_cnt, crc_y, crc_uv); + + kfifo_put(&check->wr_chk_q, crc_addr); + if ((mgr->frame_cnt & 0xf) == 0) + check_schedule(mgr); + } + return ret; +} + +static int aux_data_crc_store(struct aux_data_check_mgr_t *mgr,int crc) +{ + int ret = 0; + char *crc_addr = NULL; + int comp_frame = 0, comp_crc; + struct aux_data_check_t *check = &mgr->aux_data_check; + + if (kfifo_get(&check->new_chk_q, &crc_addr) == 0) { + dbg_print(0, "%08d: %08x\n", + mgr->frame_cnt, crc); + if (check->check_fp) { + dbg_print(0, "crc32 dropped\n"); + } else { + dbg_print(0, "no opened file to write crc32\n"); + } + return -1; + } + if (check->cmp_crc_cnt > mgr->frame_cnt) { + sscanf(crc_addr, "%08u: %8x", + &comp_frame, &comp_crc); + + dbg_print(0, "%08d: %08x <--> %08d: %08x\n", + mgr->frame_cnt, crc, + comp_frame, comp_crc); + if (comp_frame == mgr->frame_cnt) { + if (comp_crc != crc) { + dbg_print(0, "\n\nError: %08d: %08x != %08x \n\n", + mgr->frame_cnt, crc, comp_crc); + } + } else { + dbg_print(0, "frame num error: frame_cnt(%d) frame_comp(%d)\n", + mgr->frame_cnt, comp_frame); + } + } else { + dbg_print(0, "%08d: %08x\n", mgr->frame_cnt, crc); + } + + if ((check->check_fp) && (crc_addr != NULL)) { + ret = snprintf(crc_addr, SIZE_CRC, + "%08d: %08x\n", mgr->frame_cnt, crc); + + kfifo_put(&check->wr_chk_q, crc_addr); + if ((mgr->frame_cnt & 0xf) == 0) + aux_data_check_schedule(mgr); + } + return ret; +} + + + +static int crc32_vmap_le(unsigned int *crc32, + ulong phyaddr, unsigned int size) +{ + void *vaddr = NULL; + unsigned int crc = *crc32; + unsigned int tmp_size = 0; + + /*single mode cannot use codec_mm_vmap*/ + if (single_mode_vdec != NULL) { + unsigned int offset = phyaddr & (~PAGE_MASK); + while (size > 0) { + /*flush dcache in isr.*/ + flush_dcache_page(phys_to_page(phyaddr)); + + if (offset + size >= PAGE_SIZE) { + vaddr = kmap_atomic(phys_to_page(phyaddr)); + tmp_size = (PAGE_SIZE - offset); + phyaddr += tmp_size; + size -= tmp_size; + vaddr += offset; + } else { + vaddr = kmap_atomic(phys_to_page(phyaddr)); + tmp_size = size; + vaddr += offset; + size = 0; + } + if (vaddr == NULL) { + dbg_print(FC_CRC_DEBUG, "%s: kmap_atomic failed phy: 0x%x\n", + __func__, (unsigned int)phyaddr); + return -1; + } + + crc = crc32_le(crc, vaddr, tmp_size); + + kunmap_atomic(vaddr - offset); + offset = 0; + } + } else { + while (size > 0) { + if (size >= VMAP_STRIDE_SIZE) { + vaddr = codec_mm_vmap(phyaddr, VMAP_STRIDE_SIZE); + tmp_size = VMAP_STRIDE_SIZE; + phyaddr += VMAP_STRIDE_SIZE; + size -= VMAP_STRIDE_SIZE; + } else { + vaddr = codec_mm_vmap(phyaddr, size); + tmp_size = size; + size = 0; + } + if (vaddr == NULL) { + dbg_print(FC_CRC_DEBUG, "%s: codec_mm_vmap failed phy: 0x%x\n", + __func__, (unsigned int)phyaddr); + return -1; + } + codec_mm_dma_flush(vaddr, + tmp_size, DMA_FROM_DEVICE); + + crc = crc32_le(crc, vaddr, tmp_size); + + codec_mm_unmap_phyaddr(vaddr); + } + } + *crc32 = crc; + + return 0; +} + +static int do_check_nv21(struct pic_check_mgr_t *mgr, struct vframe_s *vf) +{ + int i; + unsigned int crc_y = 0, crc_uv = 0; + void *p_yaddr, *p_uvaddr; + ulong y_phyaddr, uv_phyaddr; + int ret = 0; + + p_yaddr = mgr->y_vaddr; + p_uvaddr = mgr->uv_vaddr; + y_phyaddr = mgr->y_phyaddr; + uv_phyaddr = mgr->uv_phyaddr; + if ((p_yaddr == NULL) || (p_uvaddr == NULL)) + { + if (vf->width == mgr->canvas_w) { + ret = crc32_vmap_le(&crc_y, y_phyaddr, mgr->size_y); + ret |= crc32_vmap_le(&crc_uv, uv_phyaddr, mgr->size_uv); + } else { + for (i = 0; i < vf->height; i++) { + ret |= crc32_vmap_le(&crc_y, y_phyaddr, vf->width); + y_phyaddr += mgr->canvas_w; + } + for (i = 0; i < vf->height/2; i++) { + ret |= crc32_vmap_le(&crc_uv, uv_phyaddr, vf->width); + uv_phyaddr += mgr->canvas_w; + } + } + if (ret < 0) { + dbg_print(0, "calc crc failed, may codec_mm_vmap failed\n"); + return ret; + } + } else { + if (mgr->frame_cnt == 0) { + unsigned int *p = mgr->y_vaddr; + dbg_print(0, "YUV0000: %08x-%08x-%08x-%08x\n", + p[0], p[1], p[2], p[3]); + } + if (vf->width == mgr->canvas_w) { + crc_y = crc32_le(crc_y, p_yaddr, mgr->size_y); + crc_uv = crc32_le(crc_uv, p_uvaddr, mgr->size_uv); + } else { + for (i = 0; i < vf->height; i++) { + crc_y = crc32_le(crc_y, p_yaddr, vf->width); + p_yaddr += mgr->canvas_w; + } + for (i = 0; i < vf->height/2; i++) { + crc_uv = crc32_le(crc_uv, p_uvaddr, vf->width); + p_uvaddr += mgr->canvas_w; + } + } + } + + crc_store(mgr, vf, crc_y, crc_uv); + + return 0; +} + +static int do_check_yuv16(struct pic_check_mgr_t *mgr, + struct vframe_s *vf, char *ybuf, char *uvbuf, + char *ubuf, char *vbuf) +{ + unsigned int crc1, crc2, crc3, crc4; + int w, h; + + w = vf->width; + h = vf->height; + crc1 = 0; + crc2 = 0; + crc3 = 0; + crc4 = 0; + + crc1 = crc32_le(0, ybuf, w * h *2); + crc2 = crc32_le(0, ubuf, w * h/2); + crc3 = crc32_le(0, vbuf, w * h/2); + crc4 = crc32_le(0, uvbuf, w * h*2/2); + /* + printk("%08d: %08x %08x %08x %08x\n", + mgr->frame_cnt, crc1, crc4, crc2, crc3); + */ + mgr->size_y = w * h * 2; + mgr->size_uv = w * h; + mgr->size_pic = mgr->size_y + mgr->size_uv; + mgr->y_vaddr = ybuf; + mgr->uv_vaddr = uvbuf; + mgr->canvas_w = w; + mgr->canvas_h = h; + crc_store(mgr, vf, crc1, crc4); + + return 0; +} + +static int do_check_aux_data_crc(struct aux_data_check_mgr_t *mgr, + char *aux_buf, int size) +{ + unsigned int crc = 0; + + crc = crc32_le(0, aux_buf, size); + + //pr_info("%s:crc = %08x\n",crc); + aux_data_crc_store(mgr,crc); + + return 0; +} + + +static int fbc_check_prepare(struct pic_check_t *check, + int resize, int y_size) +{ + int i = 0; + + if (y_size > MAX_SIZE_AFBC_PLANES) + return -1; + + if (((!check->fbc_planes[0]) || + (!check->fbc_planes[1]) || + (!check->fbc_planes[2]) || + (!check->fbc_planes[3])) && + (!resize)) + return -1; + + if (resize) { + dbg_print(0, "size changed to 0x%x(y_size)\n", y_size); + for (i = 0; i < ARRAY_SIZE(check->fbc_planes); i++) { + if (check->fbc_planes[i]) { + vfree(check->fbc_planes[i]); + check->fbc_planes[i] = NULL; + } + } + } + for (i = 0; i < ARRAY_SIZE(check->fbc_planes); i++) { + if (!check->fbc_planes[i]) + check->fbc_planes[i] = + vmalloc(y_size * sizeof(short)); + } + if ((!check->fbc_planes[0]) || + (!check->fbc_planes[1]) || + (!check->fbc_planes[2]) || + (!check->fbc_planes[3])) { + dbg_print(0, "vmalloc staicplanes failed %lx %lx %lx %lx\n", + (ulong)check->fbc_planes[0], + (ulong)check->fbc_planes[1], + (ulong)check->fbc_planes[2], + (ulong)check->fbc_planes[3]); + for (i = 0; i < ARRAY_SIZE(check->fbc_planes); i++) { + if (check->fbc_planes[i]) { + vfree(check->fbc_planes[i]); + check->fbc_planes[i] = NULL; + } + } + return -1; + } else + dbg_print(FC_CRC_DEBUG, "vmalloc staicplanes sucessed\n"); + + return 0; +} + +int load_user_cmp_crc(struct pic_check_mgr_t *mgr) +{ + int i; + struct pic_check_t *chk; + void *qaddr; + + if (mgr == NULL || + (mgr->cmp_pool == NULL)|| + (mgr->usr_cmp_num == 0)) + return 0; + + chk = &mgr->pic_check; + + if (chk->cmp_crc_cnt > 0) { + pr_info("cmp crc32 data is ready\n"); + return -1; + } + + if (chk->check_addr == NULL) { + pr_info("no cmp crc buf\n"); /* vmalloc again or return */ + return -1; + } + + if (mgr->usr_cmp_num >= USER_CMP_POOL_MAX_SIZE) + mgr->usr_cmp_num = USER_CMP_POOL_MAX_SIZE - 1; + + for (i = 0; i < mgr->usr_cmp_num; i++) { + qaddr = chk->check_addr + i * SIZE_CRC; + dbg_print(FC_CRC_DEBUG, "%s, %8d: %08x %08x\n", __func__, + mgr->cmp_pool[i].pic_num, + mgr->cmp_pool[i].y_crc, + mgr->cmp_pool[i].uv_crc); + sprintf(qaddr, "%8d: %08x %08x\n", + mgr->cmp_pool[i].pic_num, + mgr->cmp_pool[i].y_crc, + mgr->cmp_pool[i].uv_crc); + + kfifo_put(&chk->new_chk_q, qaddr); + chk->cmp_crc_cnt++; + } + + mgr->usr_cmp_result = 0; + + vfree(mgr->cmp_pool); + mgr->cmp_pool = NULL; + + return 0; +} + + +int decoder_do_frame_check(struct vdec_s *vdec, struct vframe_s *vf) +{ + int resize = 0; + void *planes[4]; + struct pic_check_t *check = NULL; + struct pic_check_mgr_t *mgr = NULL; + int ret = 0; + + if (vdec == NULL) { + if (single_mode_vdec == NULL) + return 0; + mgr = &single_mode_vdec->vfc; + } else { + mgr = &vdec->vfc; + single_mode_vdec = NULL; + } + + if (!single_mode_vdec && + unlikely(in_interrupt())) + return 0; + + if ((mgr == NULL) || (vf == NULL) || + (mgr->enable == 0)) + return 0; + + mgr->mjpeg_flag = ((vdec) && + (vdec->format == VFORMAT_MJPEG)) ? 1 : 0; + + if (get_frame_size(mgr, vf) < 0) + return -1; + + if (mgr->last_size_pic != mgr->size_pic) { + resize = 1; + dbg_print(0, "size changed, %x-->%x [%d x %d]\n", + mgr->last_size_pic, mgr->size_pic, + vf->width, vf->height); + /* for slt, if no compare crc file, use the + * cmp crc from amstream ioctl write */ + load_user_cmp_crc(mgr); + } else + resize = 0; + mgr->last_size_pic = mgr->size_pic; + + if ((vf->type & VIDTYPE_VIU_NV21) || (mgr->mjpeg_flag) || + (vf->type & VIDTYPE_VIU_NV12)) { + int flush_size; + + if (canvas_get_virt_addr(mgr, vf) < 0) + return -2; + + /* flush */ + flush_size = mgr->mjpeg_flag ? + ((mgr->canvas_w * mgr->canvas_h) >> 2) : + ((mgr->canvas_w * mgr->canvas_h) >> 1); + if (mgr->y_vaddr) + codec_mm_dma_flush(mgr->y_vaddr, + mgr->canvas_w * mgr->canvas_h, DMA_FROM_DEVICE); + if (mgr->uv_vaddr) + codec_mm_dma_flush(mgr->uv_vaddr, + flush_size, DMA_FROM_DEVICE); + if ((mgr->mjpeg_flag) && (mgr->extra_v_vaddr)) + codec_mm_dma_flush(mgr->extra_v_vaddr, + flush_size, DMA_FROM_DEVICE); + + if (mgr->enable & CRC_MASK) + ret = do_check_nv21(mgr, vf); + + if (mgr->enable & YUV_MASK) + do_yuv_dump(mgr, vf); + + } else if (vf->type & VIDTYPE_SCATTER) { + check = &mgr->pic_check; + + if (mgr->pic_dump.buf_addr != NULL) { + dbg_print(0, "scatter free yuv buf\n"); + vfree(mgr->pic_dump.buf_addr); + mgr->pic_dump.buf_addr = NULL; + } + if (fbc_check_prepare(check, + resize, mgr->size_y) < 0) + return -3; + planes[0] = check->fbc_planes[0]; + planes[1] = check->fbc_planes[1]; + planes[2] = check->fbc_planes[2]; + planes[3] = check->fbc_planes[3]; + ret = AMLOGIC_FBC_vframe_decoder(planes, vf, 0, 0); + if (ret < 0) { + dbg_print(0, "amlogic_fbc_lib.ko error %d\n", ret); + } else { + do_check_yuv16(mgr, vf, + (void *)planes[0], (void *)planes[3],//uv + (void *)planes[1], (void *)planes[2]); + } + } + mgr->frame_cnt++; + + if (mgr->usr_cmp_num > 0) { + mgr->usr_cmp_num -= 1; + } + + return ret; +} +EXPORT_SYMBOL(decoder_do_frame_check); + +int decoder_do_aux_data_check(struct vdec_s *vdec, char *aux_buffer, int size) +{ + struct aux_data_check_mgr_t *mgr = NULL; + int ret = 0; + + if (vdec == NULL) { + return 0; + } else { + mgr = &vdec->adc; + } + + if ((mgr == NULL) || (mgr->enable == 0)) + return 0; + + if (mgr->enable & AUX_MASK) + ret = do_check_aux_data_crc(mgr,aux_buffer,size); + + mgr->frame_cnt++; + + return ret; +} +EXPORT_SYMBOL(decoder_do_aux_data_check); + +static int dump_buf_alloc(struct pic_dump_t *dump) +{ + if ((dump->buf_addr != NULL) && + (dump->buf_size != 0)) + return 0; + + dump->buf_addr = + (char *)vmalloc(size_yuv_buf); + if (!dump->buf_addr) { + dump->buf_size = 0; + dbg_print(0, "vmalloc yuv buf failed\n"); + return -ENOMEM; + } + dump->buf_size = size_yuv_buf; + + dbg_print(0, "%s: buf for yuv is alloced\n", __func__); + + return 0; +} + +int dump_yuv_trig(struct pic_check_mgr_t *mgr, + int id, int start, int num) +{ + struct pic_dump_t *dump = &mgr->pic_dump; + + if (!dump->num) { + mgr->id = id; + dump->start = start; + dump->num = num; + dump->end = start + num; + dump->dump_cnt = 0; + dump->yuv_fp = NULL; + if (!atomic_read(&mgr->work_inited)) { + INIT_WORK(&mgr->frame_check_work, do_check_work); + atomic_set(&mgr->work_inited, 1); + } + dump_buf_alloc(dump); + str_strip(comp_crc); + set_enable(mgr, YUV_MASK); + } else { + dbg_print(FC_ERROR, "yuv dump now, trig later\n"); + return -EBUSY; + } + dbg_print(0, "dump yuv trigger, from %d to %d frame\n", + dump->start, dump->end); + return 0; +} + +int frame_check_init(struct pic_check_mgr_t *mgr, int id) +{ + int i; + struct pic_dump_t *dump = &mgr->pic_dump; + struct pic_check_t *check = &mgr->pic_check; + + mgr->frame_cnt = 0; + mgr->size_pic = 0; + mgr->last_size_pic = 0; + mgr->id = id; + mgr->yuvsum = 0; + mgr->height = 0; + mgr->width = 0; + + dump->num = 0; + dump->dump_cnt = 0; + dump->yuv_fp = NULL; + check->check_pos = 0; + check->compare_pos = 0; + + if (!atomic_read(&mgr->work_inited)) { + INIT_WORK(&mgr->frame_check_work, do_check_work); + atomic_set(&mgr->work_inited, 1); + } + /* for dump error yuv prepare. */ + dump_buf_alloc(dump); + + /* try to open compare crc32 file */ + str_strip(comp_crc); + check->compare_fp = file_open(O_RDONLY, + "%s%s", CRC_PATH, comp_crc); + + /* create crc32 log file */ + check->check_fp = file_open(O_CREAT| O_WRONLY | O_TRUNC, + "%s%s-%d-%d.crc", CRC_PATH, comp_crc, id, mgr->file_cnt); + + INIT_KFIFO(check->new_chk_q); + INIT_KFIFO(check->wr_chk_q); + check->check_addr = vmalloc(SIZE_CRC * SIZE_CHECK_Q); + if (check->check_addr == NULL) { + dbg_print(FC_ERROR, "vmalloc qbuf fail\n"); + } else { + void *qaddr = NULL, *rdret = NULL; + check->cmp_crc_cnt = 0; + for (i = 0; i < SIZE_CHECK_Q; i++) { + qaddr = check->check_addr + i * SIZE_CRC; + rdret = fget_crc_str(qaddr, + SIZE_CRC, check); + if (rdret == NULL) { + if (i < 3) + dbg_print(0, "can't get compare crc string\n"); + if (check->compare_fp) { + filp_close(check->compare_fp, current->files); + check->compare_fp = NULL; + } + } + + kfifo_put(&check->new_chk_q, qaddr); + } + } + set_enable(mgr, CRC_MASK); + dbg_print(0, "%s end\n", __func__); + + return 0; +} + + +int aux_data_check_init(struct aux_data_check_mgr_t *mgr, int id) +{ + int i; + struct aux_data_check_t *check = &mgr->aux_data_check; + + mgr->frame_cnt = 0; + mgr->id = id; + + check->check_pos = 0; + check->compare_pos = 0; + + if (!atomic_read(&mgr->work_inited)) { + INIT_WORK(&mgr->aux_data_check_work, do_aux_data_check_work); + atomic_set(&mgr->work_inited, 1); + } + + /* try to open compare meta crc32 file */ + str_strip(aux_comp_crc); + check->compare_fp = file_open(O_RDONLY, + "%s%s", CRC_PATH, aux_comp_crc); + + /* create meta crc log file */ + check->check_fp = file_open(O_CREAT| O_WRONLY | O_TRUNC, + "%s%s-%d-%d.crc", CRC_PATH, aux_comp_crc, id, mgr->file_cnt); + + INIT_KFIFO(check->new_chk_q); + INIT_KFIFO(check->wr_chk_q); + check->check_addr = vmalloc(SIZE_CRC * SIZE_CHECK_Q); + if (check->check_addr == NULL) { + dbg_print(FC_ERROR, "vmalloc qbuf fail\n"); + } else { + void *qaddr = NULL, *rdret = NULL; + check->cmp_crc_cnt = 0; + for (i = 0; i < SIZE_CHECK_Q; i++) { + qaddr = check->check_addr + i * SIZE_CRC; + rdret = fget_aux_data_crc_str(qaddr, + SIZE_CRC, check); + if (rdret == NULL) { + if (i < 3) + dbg_print(0, "can't get compare crc string\n"); + if (check->compare_fp) { + filp_close(check->compare_fp, current->files); + check->compare_fp = NULL; + } + } + + kfifo_put(&check->new_chk_q, qaddr); + } + } + aux_set_enable(mgr, AUX_MASK); + dbg_print(0, "%s end\n", __func__); + + return 0; +} + + +void frame_check_exit(struct pic_check_mgr_t *mgr) +{ + int i; + struct pic_dump_t *dump = &mgr->pic_dump; + struct pic_check_t *check = &mgr->pic_check; + + if (mgr->enable != 0) { + if (dump->dump_cnt != 0) { + dbg_print(0, "%s, cnt = %d, num = %d\n", + __func__, dump->dump_cnt, dump->num); + set_enable(mgr, YUV_MASK); + } + if (atomic_read(&mgr->work_inited)) { + cancel_work_sync(&mgr->frame_check_work); + atomic_set(&mgr->work_inited, 0); + } + if (single_mode_vdec != NULL) + write_yuv_work(mgr); + write_crc_work(mgr); + + for (i = 0; i < ARRAY_SIZE(check->fbc_planes); i++) { + if (check->fbc_planes[i]) { + vfree(check->fbc_planes[i]); + check->fbc_planes[i] = NULL; + } + } + if (check->check_addr) { + vfree(check->check_addr); + check->check_addr = NULL; + } + + if (mgr->cmp_pool) { + vfree(mgr->cmp_pool); + mgr->cmp_pool = NULL; + } + + if (check->check_fp) { + filp_close(check->check_fp, current->files); + check->check_fp = NULL; + } + if (check->compare_fp) { + filp_close(check->compare_fp, current->files); + check->compare_fp = NULL; + } + if (dump->yuv_fp) { + filp_close(dump->yuv_fp, current->files); + dump->yuv_fp = NULL; + } + if (dump->buf_addr) { + vfree(dump->buf_addr); + dump->buf_addr = NULL; + } + mgr->file_cnt++; + set_disable(mgr, YUV_MASK | CRC_MASK); + dbg_print(0, "%s end\n", __func__); + } +} + +void aux_data_check_exit(struct aux_data_check_mgr_t *mgr) +{ + //struct pic_dump_t *dump = &mgr->pic_dump; + struct aux_data_check_t *check = &mgr->aux_data_check; + + if (mgr->enable != 0) { + if (atomic_read(&mgr->work_inited)) { + cancel_work_sync(&mgr->aux_data_check_work); + atomic_set(&mgr->work_inited, 0); + } + + write_aux_data_crc_work(mgr); + + if (check->check_addr) { + vfree(check->check_addr); + check->check_addr = NULL; + } + + if (check->check_fp) { + filp_close(check->check_fp, current->files); + check->check_fp = NULL; + } + if (check->compare_fp) { + filp_close(check->compare_fp, current->files); + check->compare_fp = NULL; + } + + mgr->file_cnt++; + aux_set_disable(mgr, AUX_MASK); + dbg_print(0, "%s end\n", __func__); + } +} + + + +int vdec_frame_check_init(struct vdec_s *vdec) +{ + int ret = 0, id = 0; + + if (vdec == NULL) + return 0; + + if ((vdec->is_reset) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_GXL)) + return 0; + + vdec->vfc.err_crc_block = 0; + single_mode_vdec = (vdec_single(vdec))? vdec : NULL; + + if (!check_enable && !yuv_enable) + return 0; + + vdec->canvas_mode = CANVAS_BLKMODE_LINEAR; + id = vdec->id; + + if (check_enable & (0x01 << id)) { + frame_check_init(&vdec->vfc, id); + /*repeat check one video crc32, not clear enable*/ + if ((fc_debug & FC_CHECK_CRC_LOOP_MODE) == 0) + check_enable &= ~(0x01 << id); + } + + if (yuv_enable & (0x01 << id)) { + ret = dump_yuv_trig(&vdec->vfc, + id, yuv_start[id], yuv_num[id]); + if (ret < 0) + pr_info("dump yuv init failed\n"); + else { + pr_info("dump yuv init ok, total %d\n", + yuv_num[id]); + vdec->canvas_mode = CANVAS_BLKMODE_LINEAR; + } + yuv_num[id] = 0; + yuv_start[id] = 0; + yuv_enable &= ~(0x01 << id); + } + + return ret; +} + +int print_decoder_info(struct vdec_s *vdec) +{ + if (vdec->vfc.enable & CRC_MASK) { + const char *format_name; + + format_name = get_format_name(vdec->format); + if (format_name == NULL) + return -1; + + dbg_print(0, "Decoder-Summary:Type:%10s,framesize:%04dx%04d;out-nums:%08d,yuvsum:%08x\n", + format_name, vdec->vfc.width, vdec->vfc.height, + vdec->vfc.frame_cnt, vdec->vfc.yuvsum); + sprintf(checksum_info, "Type:%10s,framesize:%04dx%04d,out-nums:%08d,yuvsum:%08x", + format_name, vdec->vfc.width, vdec->vfc.height, + vdec->vfc.frame_cnt, vdec->vfc.yuvsum); + if (checksum_enable) { + struct file *checksum_fp; + static loff_t checksum_pos; + mm_segment_t old_fs; + char checksum_buf[128]="\n"; + static int num; + static char file_name[128]; + + if (strcmp(checksum_filename,file_name) != 0) { + num = 0; + checksum_pos = 0; + strcpy(file_name,checksum_filename); + } + if (checksum_start_count == 1) { + num = 0; + checksum_start_count = 0; + } + + str_strip(checksum_filename); + checksum_fp = file_open(O_CREAT| O_WRONLY | O_APPEND, + "%s%s.txt", CHECKSUM_PATH,checksum_filename); + if (checksum_fp == NULL) { + return -1; + } + sprintf(checksum_buf, "%08d (Type:%10s, framesize:%04dx%04d, out-nums:%08d, yuvsum:%08x)\n", + num,format_name, vdec->vfc.width, vdec->vfc.height, + vdec->vfc.frame_cnt, vdec->vfc.yuvsum); + + old_fs = get_fs(); + set_fs(KERNEL_DS); + + vfs_write(checksum_fp, checksum_buf, + strlen(checksum_buf), &checksum_pos); + + set_fs(old_fs); + + filp_close(checksum_fp, current->files); + checksum_fp = NULL; + num++; + } + } + + return 0; +} + +int vdec_aux_data_check_init(struct vdec_s *vdec) +{ + int ret = 0, id = 0; + + if (vdec == NULL) + return 0; + + if ((vdec->is_reset) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_GXL)) + return 0; + + if (!aux_enable) + return 0; + + id = vdec->id; + + if (aux_enable & (0x01 << id)) { + aux_data_check_init(&vdec->adc, id); + /*repeat check one video meta crc32, not clear enable*/ + if ((fc_debug & AD_CHECK_CRC_LOOP_MODE) == 0) + aux_enable &= ~(0x01 << id); + } + + return ret; +} +EXPORT_SYMBOL(vdec_aux_data_check_init); + + +void vdec_aux_data_check_exit(struct vdec_s *vdec) +{ + if (vdec == NULL) + return; + aux_data_check_exit(&vdec->adc); +} +EXPORT_SYMBOL(vdec_aux_data_check_exit); + + +void vdec_frame_check_exit(struct vdec_s *vdec) +{ + if (vdec == NULL) + return; + print_decoder_info(vdec); + frame_check_exit(&vdec->vfc); + + single_mode_vdec = NULL; +} + +ssize_t dump_yuv_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + struct vdec_s *vdec = NULL; + unsigned int id = 0, num = 0, start = 0; + int ret = -1; + + ret = sscanf(buf, "%d %d %d", &id, &start, &num); + if (ret < 0) { + pr_info("%s, parse failed\n", buf); + return size; + } + if ((num == 0) || (num > YUV_MAX_DUMP_NUM)) { + pr_info("requred yuv num %d, max %d\n", + num, YUV_MAX_DUMP_NUM); + return size; + } + vdec = vdec_get_vdec_by_id(id); + if (vdec == NULL) { + yuv_start[id] = start; + yuv_num[id] = num; + yuv_enable |= (1 << id); + pr_info("no connected vdec.%d now, set dump ok\n", id); + return size; + } + + ret = dump_yuv_trig(&vdec->vfc, id, start, num); + if (ret < 0) + pr_info("trigger dump yuv failed\n"); + else + pr_info("trigger dump yuv init ok, total %d frames\n", num); + + return size; +} + +ssize_t dump_yuv_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int i; + char *pbuf = buf; + + for (i = 0; i < MAX_INSTANCE_MUN; i++) { + pbuf += pr_info("vdec.%d, start: %d, total: %d frames\n", + i, yuv_start[i], yuv_num[i]); + } + pbuf += sprintf(pbuf, + "\nUsage: echo [id] [start] [num] > dump_yuv\n\n"); + return pbuf - buf; +} + + +ssize_t frame_check_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + int ret = -1; + int on_off, id; + + ret = sscanf(buf, "%d %d", &id, &on_off); + if (ret < 0) { + pr_info("%s, parse failed\n", buf); + return size; + } + if (id >= MAX_INSTANCE_MUN) { + pr_info("%d out of max vdec id\n", id); + return size; + } + if (on_off) + check_enable |= (1 << id); + else + check_enable &= ~(1 << id); + + return size; +} + +ssize_t frame_check_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int i; + char *pbuf = buf; + + for (i = 0; i < MAX_INSTANCE_MUN; i++) { + pbuf += sprintf(pbuf, + "vdec.%d\tcrc: %s\n", i, + (check_enable & (0x01 << i))?"enabled":"--"); + } + pbuf += sprintf(pbuf, + "\nUsage:\techo [id] [1:on/0:off] > frame_check\n\n"); + + if (fc_debug & FC_ERR_CRC_BLOCK_MODE) { + /* cat frame_check to next frame when block */ + struct vdec_s *vdec = NULL; + vdec = vdec_get_vdec_by_id(__ffs(check_enable)); + if (vdec) + vdec->vfc.err_crc_block = 0; + } + + return pbuf - buf; +} + + +module_param_string(comp_crc, comp_crc, 128, 0664); +MODULE_PARM_DESC(comp_crc, "\n crc_filename\n"); + +module_param_string(aux_comp_crc, aux_comp_crc, 128, 0664); +MODULE_PARM_DESC(aux_comp_crc, "\n aux crc_filename\n"); + + +module_param(fc_debug, uint, 0664); +MODULE_PARM_DESC(fc_debug, "\n frame check debug\n"); + +module_param(aux_enable, uint, 0664); +MODULE_PARM_DESC(aux_enable, "\n aux data check debug\n"); + +module_param(size_yuv_buf, uint, 0664); +MODULE_PARM_DESC(size_yuv_buf, "\n size_yuv_buf\n"); + +module_param_string(checksum_info, checksum_info, 128, 0664); +MODULE_PARM_DESC(checksum_info, "\n checksum_info\n"); + +module_param_string(checksum_filename, checksum_filename, 128, 0664); +MODULE_PARM_DESC(checksum_filename, "\n checksum_filename\n"); + +module_param(checksum_start_count, uint, 0664); +MODULE_PARM_DESC(checksum_start_count, "\n checksum_start_count\n"); + +module_param(checksum_enable, uint, 0664); +MODULE_PARM_DESC(checksum_enable, "\n checksum_enable\n");
diff --git a/drivers/frame_provider/decoder/utils/frame_check.h b/drivers/frame_provider/decoder/utils/frame_check.h new file mode 100644 index 0000000..e983377 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/frame_check.h
@@ -0,0 +1,162 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/frame_check.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#ifndef __FRAME_CHECK_H__ +#define __FRAME_CHECK_H__ + + +#include <linux/fs.h> +#include <linux/uaccess.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/kfifo.h> + +#define FRAME_CHECK +#define AUX_DATA_CRC + + +#define YUV_MAX_DUMP_NUM 60 + +#define SIZE_CRC 64 +#define SIZE_CHECK_Q 128 + +#define USER_CMP_POOL_MAX_SIZE (SIZE_CHECK_Q) + +struct pic_dump_t{ + struct file *yuv_fp; + loff_t yuv_pos; + unsigned int start; + unsigned int num; + unsigned int end; + unsigned int dump_cnt; + + unsigned int buf_size; + char *buf_addr; +}; + +struct pic_check_t{ + struct file *check_fp; + loff_t check_pos; + + struct file *compare_fp; + loff_t compare_pos; + unsigned int cmp_crc_cnt; + void *fbc_planes[4]; + void *check_addr; + DECLARE_KFIFO(new_chk_q, char *, SIZE_CHECK_Q); + DECLARE_KFIFO(wr_chk_q, char *, SIZE_CHECK_Q); +}; + +struct aux_data_check_t{ + struct file *check_fp; + loff_t check_pos; + + struct file *compare_fp; + loff_t compare_pos; + unsigned int cmp_crc_cnt; + void *check_addr; + + DECLARE_KFIFO(new_chk_q, char *, SIZE_CHECK_Q); + DECLARE_KFIFO(wr_chk_q, char *, SIZE_CHECK_Q); +}; + + +struct pic_check_mgr_t{ + int id; + int enable; + unsigned int frame_cnt; + /* pic info */ + unsigned int canvas_w; + unsigned int canvas_h; + unsigned int size_y; //real size + unsigned int size_uv; + unsigned int size_pic; + unsigned int last_size_pic; + void *y_vaddr; + void *uv_vaddr; + ulong y_phyaddr; + ulong uv_phyaddr; + int err_crc_block; + + int file_cnt; + atomic_t work_inited; + struct work_struct frame_check_work; + + struct pic_check_t pic_check; + struct pic_dump_t pic_dump; + + struct usr_crc_info_t *cmp_pool; + int usr_cmp_num; + int usr_cmp_result; + /* for mjpeg u different addr with v */ + bool mjpeg_flag; + void *extra_v_vaddr; + ulong extra_v_phyaddr; + int yuvsum; + u32 width; + u32 height; +}; + +struct aux_data_check_mgr_t{ + int id; + int enable; + unsigned int frame_cnt; + /* pic info */ + int aux_size; + char *aux_addr; + + int file_cnt; + atomic_t work_inited; + struct work_struct aux_data_check_work; + + struct aux_data_check_t aux_data_check; +}; + + +int dump_yuv_trig(struct pic_check_mgr_t *mgr, + int id, int start, int num); + +int decoder_do_frame_check(struct vdec_s *vdec, struct vframe_s *vf); + +int decoder_do_aux_data_check(struct vdec_s *vdec, char *aux_buffer, int size); + +int frame_check_init(struct pic_check_mgr_t *mgr, int id); + +void frame_check_exit(struct pic_check_mgr_t *mgr); + +ssize_t frame_check_show(struct class *class, + struct class_attribute *attr, char *buf); + +ssize_t frame_check_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size); + +ssize_t dump_yuv_show(struct class *class, + struct class_attribute *attr, char *buf); + +ssize_t dump_yuv_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size); + +void vdec_frame_check_exit(struct vdec_s *vdec); +int vdec_frame_check_init(struct vdec_s *vdec); + +void vdec_aux_data_check_exit(struct vdec_s *vdec); +int vdec_aux_data_check_init(struct vdec_s *vdec); + + +#endif /* __FRAME_CHECK_H__ */ +
diff --git a/drivers/frame_provider/decoder/utils/utils.c b/drivers/frame_provider/decoder/utils/utils.c new file mode 100644 index 0000000..0372a18 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/utils.c
@@ -0,0 +1,74 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/utils.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <linux/semaphore.h> +#include <linux/sched/rt.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include "vdec.h" +#include "vdec_input.h" +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "amvdec.h" +#include "decoder_mmu_box.h" +#include "decoder_bmmu_box.h" +#include "vdec_profile.h" + +static int __init decoder_common_init(void) +{ + /*vdec init.*/ + vdec_module_init(); + + /*amvdec init.*/ + amvdec_init(); + + /*mmu box init.*/ + decoder_mmu_box_init();/*exit?*/ + decoder_bmmu_box_init(); + + vdec_profile_init_debugfs(); + + return 0; +} + +static void __exit decoder_common_exit(void) +{ + /*vdec exit.*/ + vdec_module_exit(); + + /*amvdec exit.*/ + amvdec_exit(); + + decoder_mmu_box_exit(); + decoder_bmmu_box_exit(); + + vdec_profile_exit_debugfs(); +} + +module_init(decoder_common_init); +module_exit(decoder_common_exit); + +MODULE_DESCRIPTION("AMLOGIC decoder_common driver"); +MODULE_LICENSE("GPL"); \ No newline at end of file
diff --git a/drivers/frame_provider/decoder/utils/vdec.c b/drivers/frame_provider/decoder/utils/vdec.c new file mode 100644 index 0000000..98f0e3c --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec.c
@@ -0,0 +1,6342 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/vdec.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <linux/semaphore.h> +#include <linux/sched/rt.h> +#include <linux/interrupt.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/iomap.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/video_sink/ionvideo_ext.h> +#ifdef CONFIG_AMLOGIC_V4L_VIDEO3 +#include <linux/amlogic/media/video_sink/v4lvideo_ext.h> +#endif +#include <linux/amlogic/media/vfm/vfm_ext.h> +#include <linux/sched/clock.h> +#include <uapi/linux/sched/types.h> +#include <linux/signal.h> +/*for VDEC_DEBUG_SUPPORT*/ +#include <linux/time.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../../stream_input/amports/streambuf.h" +#include "vdec.h" +#include "vdec_trace.h" +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +#include "vdec_profile.h" +#endif +#include <linux/sched/clock.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/libfdt_env.h> +#include <linux/of_reserved_mem.h> +#include <linux/dma-contiguous.h> +#include <linux/cma.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include "../../../stream_input/amports/amports_priv.h" + +#include <linux/amlogic/media/utils/amports_config.h> +#include "../utils/amvdec.h" +#include "vdec_input.h" + +#include "../../../common/media_clock/clk/clk.h" +#include <linux/reset.h> +#include <linux/amlogic/media/registers/cpu_version.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/video_sink/video_keeper.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "frame_check.h" +#include <linux/amlogic/tee.h> +#include "vdec_canvas_utils.h" +#include "../../../amvdec_ports/aml_vcodec_drv.h" + + +#ifdef CONFIG_AMLOGIC_POWER +#include <linux/amlogic/power_ctrl.h> +#endif + +#ifdef CONFIG_AMLOGIC_IONVIDEO +#include <linux/amlogic/media/video_sink/ionvideo_ext.h> +#endif +//#include <dt-bindings/power/sc2-pd.h> +//#include <linux/amlogic/pwr_ctrl.h> +#include <linux/of_device.h> +#include "vdec_power_ctrl.h" + +static DEFINE_MUTEX(vdec_mutex); + +#define MC_SIZE (4096 * 4) +#define CMA_ALLOC_SIZE SZ_64M +#define MEM_NAME "vdec_prealloc" +static int inited_vcodec_num; +#define jiffies_ms div64_u64(get_jiffies_64() * 1000, HZ) +static int poweron_clock_level; +static int debug_vdetect = 0; +static int keep_vdec_mem; +static unsigned int debug_trace_num = 16 * 20; +static int step_mode; +static unsigned int clk_config; +/* +0x1 : enable rdma +0x2 : check rdma result +*/ +static int rdma_mode = 0x1; + +/* + * 0x1 : sched_priority to MAX_RT_PRIO -1. + * 0x2 : always reload firmware. + * 0x4 : vdec canvas debug enable + * 0x100: enable vdec fence. + */ +#define VDEC_DBG_SCHED_PRIO (0x1) +#define VDEC_DBG_ALWAYS_LOAD_FW (0x2) +#define VDEC_DBG_CANVAS_STATUS (0x4) +#define VDEC_DBG_ENABLE_FENCE (0x100) + + +#define HEVC_RDMA_F_CTRL 0x30f0 +#define HEVC_RDMA_F_START_ADDR 0x30f1 +#define HEVC_RDMA_F_END_ADDR 0x30f2 +#define HEVC_RDMA_F_STATUS0 0x30f3 + +#define HEVC_RDMA_B_CTRL 0x30f8 +#define HEVC_RDMA_B_START_ADDR 0x30f9 +#define HEVC_RDMA_B_END_ADDR 0x30fa +#define HEVC_RDMA_B_STATUS0 0x30fb + + +static u32 debug = VDEC_DBG_ALWAYS_LOAD_FW; + +u32 vdec_get_debug(void) +{ + return debug; +} +EXPORT_SYMBOL(vdec_get_debug); + + +int hevc_max_reset_count; +EXPORT_SYMBOL(hevc_max_reset_count); + +int no_powerdown; +EXPORT_SYMBOL(no_powerdown); +static int parallel_decode = 1; +static int fps_detection; +static int fps_clear; +static bool prog_only; + +static int force_nosecure_even_drm; +static int disable_switch_single_to_mult; + +static DEFINE_SPINLOCK(vdec_spin_lock); + +#define HEVC_TEST_LIMIT 100 +#define GXBB_REV_A_MINOR 0xA + +#define PRINT_FRAME_INFO 1 +#define DISABLE_FRAME_INFO 2 + +#define RESET7_REGISTER_LEVEL 0x1127 +#define P_RESETCTRL_RESET5_LEVEL 0x15 + +#define str(a) #a +#define xstr(a) str(a) + +static int frameinfo_flag = 0; +static int v4lvideo_add_di = 1; +static int v4lvideo_add_ppmgr = 0; +static int max_di_instance = 2; +static int max_supported_di_instance = 4; + +//static int path_debug = 0; + +static int enable_mvdec_info = 1; + +int decode_underflow = 0; +u32 debug_meta; + +static int enable_stream_mode_multi_dec; + +st_userdata userdata; + +typedef void (*vdec_frame_rate_event_func)(int); + +#if 1 +extern void vframe_rate_uevent(int duration); +vdec_frame_rate_event_func frame_rate_notify = vframe_rate_uevent; +#else +vdec_frame_rate_event_func frame_rate_notify = NULL; +#endif + +void vdec_frame_rate_uevent(int dur) +{ + if (frame_rate_notify == NULL) + return; + + if (unlikely(in_interrupt())) + return; + pr_info("vdec_frame_rate_uevent %d\n", dur); + frame_rate_notify(dur); +} +EXPORT_SYMBOL(vdec_frame_rate_uevent); + + +void register_frame_rate_uevent_func(vdec_frame_rate_event_func func) +{ + frame_rate_notify = func; +} +EXPORT_SYMBOL(register_frame_rate_uevent_func); + +struct am_reg { + char *name; + int offset; +}; + +struct vdec_isr_context_s { + int index; + int irq; + irq_handler_t dev_isr; + irq_handler_t dev_threaded_isr; + void *dev_id; + struct vdec_s *vdec; +}; + +struct decode_fps_s { + u32 frame_count; + u64 start_timestamp; + u64 last_timestamp; + u32 fps; +}; + +struct vdec_core_s { + struct list_head connected_vdec_list; + spinlock_t lock; + spinlock_t canvas_lock; + spinlock_t fps_lock; + spinlock_t input_lock; + struct ida ida; + atomic_t vdec_nr; + struct vdec_s *vfm_vdec; + struct vdec_s *active_vdec; + struct vdec_s *active_hevc; + struct vdec_s *hint_fr_vdec; + struct platform_device *vdec_core_platform_device; + struct device *cma_dev; + struct semaphore sem; + struct task_struct *thread; + struct workqueue_struct *vdec_core_wq; + + unsigned long sched_mask; + struct vdec_isr_context_s isr_context[VDEC_IRQ_MAX]; + int power_ref_count[VDEC_MAX]; + struct vdec_s *last_vdec; + int parallel_dec; + unsigned long power_ref_mask; + int vdec_combine_flag; + struct decode_fps_s decode_fps[MAX_INSTANCE_MUN]; + unsigned long buff_flag; + unsigned long stream_buff_flag; + struct power_manager_s *pm; + u32 vdec_resouce_status; + struct post_task_mgr_s post; +}; + +static struct vdec_core_s *vdec_core; + +static const char * const vdec_status_string[] = { + "VDEC_STATUS_UNINITIALIZED", + "VDEC_STATUS_DISCONNECTED", + "VDEC_STATUS_CONNECTED", + "VDEC_STATUS_ACTIVE" +}; +/* +bit [29] enable steam mode dv multi; +bit [28] enable print +bit [23:16] etc +bit [15:12] + none 0 and not 0x1: force single + none 0 and 0x1: force multi +bit [8] + 1: force dual +bit [3] + 1: use mavs for single mode +bit [2] + 1: force vfm path for frame mode +bit [1] + 1: force esparser auto mode +bit [0] + 1: disable audo manual mode ?? +*/ + +static int debugflags; + +static char vfm_path[VDEC_MAP_NAME_SIZE] = {"disable"}; +static const char vfm_path_node[][VDEC_MAP_NAME_SIZE] = +{ + "video_render.0", + "video_render.1", + "amvideo", + "videopip", + "deinterlace", + "dimulti.1", + "amlvideo", + "aml_video.1", + "amlvideo2.0", + "amlvideo2.1", + "ppmgr", + "ionvideo", + "ionvideo.1", + "ionvideo.2", + "ionvideo.3", + "ionvideo.4", + "ionvideo.5", + "ionvideo.6", + "ionvideo.7", + "ionvideo.8", + "videosync.0", + "v4lvideo.0", + "v4lvideo.1", + "v4lvideo.2", + "v4lvideo.3", + "v4lvideo.4", + "v4lvideo.5", + "v4lvideo.6", + "v4lvideo.7", + "v4lvideo.8", + "fake-amvideo", + "disable", + "reserved", +}; + +int vdec_get_debug_flags(void) +{ + return debugflags; +} +EXPORT_SYMBOL(vdec_get_debug_flags); + +void VDEC_PRINT_FUN_LINENO(const char *fun, int line) +{ + if (debugflags & 0x10000000) + pr_info("%s, %d\n", fun, line); +} +EXPORT_SYMBOL(VDEC_PRINT_FUN_LINENO); + +unsigned char is_mult_inc(unsigned int type) +{ + unsigned char ret = 0; + if (vdec_get_debug_flags() & 0xf000) + ret = (vdec_get_debug_flags() & 0x1000) + ? 1 : 0; + else if (type & PORT_TYPE_DECODER_SCHED) + ret = 1; + return ret; +} +EXPORT_SYMBOL(is_mult_inc); + +bool is_support_no_parser(void) +{ + if ((enable_stream_mode_multi_dec) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_SC2) || + (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7)) + return true; + return false; +} +EXPORT_SYMBOL(is_support_no_parser); + +static const bool cores_with_input[VDEC_MAX] = { + true, /* VDEC_1 */ + false, /* VDEC_HCODEC */ + false, /* VDEC_2 */ + true, /* VDEC_HEVC / VDEC_HEVC_FRONT */ + false, /* VDEC_HEVC_BACK */ +}; + +static const int cores_int[VDEC_MAX] = { + VDEC_IRQ_1, + VDEC_IRQ_2, + VDEC_IRQ_0, + VDEC_IRQ_0, + VDEC_IRQ_HEVC_BACK +}; + +unsigned long vdec_canvas_lock(void) +{ + unsigned long flags; + spin_lock_irqsave(&vdec_core->canvas_lock, flags); + + return flags; +} + +void vdec_canvas_unlock(unsigned long flags) +{ + spin_unlock_irqrestore(&vdec_core->canvas_lock, flags); +} + +unsigned long vdec_fps_lock(struct vdec_core_s *core) +{ + unsigned long flags; + spin_lock_irqsave(&core->fps_lock, flags); + + return flags; +} + +void vdec_fps_unlock(struct vdec_core_s *core, unsigned long flags) +{ + spin_unlock_irqrestore(&core->fps_lock, flags); +} + +unsigned long vdec_core_lock(struct vdec_core_s *core) +{ + unsigned long flags; + + spin_lock_irqsave(&core->lock, flags); + + return flags; +} + +void vdec_core_unlock(struct vdec_core_s *core, unsigned long flags) +{ + spin_unlock_irqrestore(&core->lock, flags); +} + +unsigned long vdec_inputbuff_lock(struct vdec_core_s *core) +{ + unsigned long flags; + + spin_lock_irqsave(&core->input_lock, flags); + + return flags; +} + +void vdec_inputbuff_unlock(struct vdec_core_s *core, unsigned long flags) +{ + spin_unlock_irqrestore(&core->input_lock, flags); +} + + +static bool vdec_is_input_frame_empty(struct vdec_s *vdec) { + struct vdec_core_s *core = vdec_core; + bool ret; + unsigned long flags; + + flags = vdec_inputbuff_lock(core); + ret = !(vdec->core_mask & core->buff_flag); + vdec_inputbuff_unlock(core, flags); + + return ret; +} + +static void vdec_up(struct vdec_s *vdec) +{ + struct vdec_core_s *core = vdec_core; + + if (debug & 8) + pr_info("vdec_up, id:%d\n", vdec->id); + up(&core->sem); +} + +static u64 vdec_get_us_time_system(void) +{ + return div64_u64(local_clock(), 1000); +} + +static void vdec_fps_clear(int id) +{ + if (id >= MAX_INSTANCE_MUN) + return; + + vdec_core->decode_fps[id].frame_count = 0; + vdec_core->decode_fps[id].start_timestamp = 0; + vdec_core->decode_fps[id].last_timestamp = 0; + vdec_core->decode_fps[id].fps = 0; +} + +static void vdec_fps_clearall(void) +{ + int i; + + for (i = 0; i < MAX_INSTANCE_MUN; i++) { + vdec_core->decode_fps[i].frame_count = 0; + vdec_core->decode_fps[i].start_timestamp = 0; + vdec_core->decode_fps[i].last_timestamp = 0; + vdec_core->decode_fps[i].fps = 0; + } +} + +static void vdec_fps_detec(int id) +{ + unsigned long flags; + + if (fps_detection == 0) + return; + + if (id >= MAX_INSTANCE_MUN) + return; + + flags = vdec_fps_lock(vdec_core); + + if (fps_clear == 1) { + vdec_fps_clearall(); + fps_clear = 0; + } + + vdec_core->decode_fps[id].frame_count++; + if (vdec_core->decode_fps[id].frame_count == 1) { + vdec_core->decode_fps[id].start_timestamp = + vdec_get_us_time_system(); + vdec_core->decode_fps[id].last_timestamp = + vdec_core->decode_fps[id].start_timestamp; + } else { + vdec_core->decode_fps[id].last_timestamp = + vdec_get_us_time_system(); + vdec_core->decode_fps[id].fps = + (u32)div_u64(((u64)(vdec_core->decode_fps[id].frame_count) * + 10000000000), + (vdec_core->decode_fps[id].last_timestamp - + vdec_core->decode_fps[id].start_timestamp)); + } + vdec_fps_unlock(vdec_core, flags); +} + +static void vdec_dmc_pipeline_reset(void) +{ + /* + * bit15: vdec_piple + * bit14: hevc_dmc_piple + * bit13: hevcf_dmc_pipl + * bit12: wave420_dmc_pipl + * bit11: hcodec_dmc_pipl + */ + + WRITE_RESET_REG(RESET7_REGISTER, + (1 << 15) | (1 << 14) | (1 << 13) | + (1 << 12) | (1 << 11)); +} + +static void vdec_stop_armrisc(int hw) +{ + ulong timeout = jiffies + HZ; + + if (hw == VDEC_INPUT_TARGET_VLD) { + WRITE_VREG(MPSR, 0); + WRITE_VREG(CPSR, 0); + + while (READ_VREG(IMEM_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + + timeout = jiffies + HZ; + while (READ_VREG(LMEM_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + } else if (hw == VDEC_INPUT_TARGET_HEVC) { + WRITE_VREG(HEVC_MPSR, 0); + WRITE_VREG(HEVC_CPSR, 0); + + while (READ_VREG(HEVC_IMEM_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + + timeout = jiffies + HZ/10; + while (READ_VREG(HEVC_LMEM_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + } +} + +#define VDEC_ASSIST_DBUS_DISABLE 0x0046 +#define HEVC_ASSIST_AXI_STATUS2_LO 0x307f + +static void vdec_dbus_ctrl(bool enable) +{ + if (enable) { + WRITE_VREG(VDEC_ASSIST_DBUS_DISABLE, 0); + } else { + u32 nop_cnt = 200; + WRITE_VREG(VDEC_ASSIST_DBUS_DISABLE, 0xffff); + while (READ_VREG(VDEC_ASSIST_DBUS_DISABLE) != 0xffff); + while (nop_cnt--); + } +} + +static void hevc_arb_ctrl(bool enable) +{ + u32 axi_ctrl, axi_status, nop_cnt = 200; + + if (enable) { + axi_ctrl = READ_VREG(HEVC_ASSIST_AXI_CTRL); + axi_ctrl &= (~((1 << 6) | (1 << 14))); + WRITE_VREG(HEVC_ASSIST_AXI_CTRL, axi_ctrl); //enable front/back arbitor + } else { + axi_ctrl = READ_VREG(HEVC_ASSIST_AXI_CTRL); + axi_ctrl |= (1 << 6); + WRITE_VREG(HEVC_ASSIST_AXI_CTRL, axi_ctrl); // disable front arbitor + + do { + axi_status = READ_VREG(HEVC_ASSIST_AXI_STATUS); + if (axi_status & ((1 << 15) | (1 << 11))) //read/write disable + break; + } while (1); + + axi_ctrl |= (1 << 14); + WRITE_VREG(HEVC_ASSIST_AXI_CTRL, axi_ctrl); // disable back arbitor + + do { + axi_status = READ_VREG(HEVC_ASSIST_AXI_STATUS2_LO); + if (axi_status & ((1 << 15) | (1 << 11))) //read/write disable + break; + } while (1); + + while (nop_cnt--); + } +} + +static void dec_dmc_port_ctrl(bool dmc_on, u32 target) +{ + unsigned long flags; + unsigned int sts_reg_addr = DMC_CHAN_STS; + unsigned int mask = 0; + unsigned int cpu_type = get_cpu_major_id(); + + if (target == VDEC_INPUT_TARGET_VLD) { + mask = (1 << 13); /*bit13: DOS VDEC interface*/ + if (cpu_type >= AM_MESON_CPU_MAJOR_ID_G12A) + mask = (1 << 21); + } else if (target == VDEC_INPUT_TARGET_HEVC) { + mask = (1 << 4); /*hevc*/ + if (cpu_type >= AM_MESON_CPU_MAJOR_ID_G12A) + mask |= (1 << 8); /*hevcb */ + } + + if (!mask) { + pr_info("debug dmc ctrl return\n"); + return; + } + + if (dmc_on) { + /* dmc async on requset */ + spin_lock_irqsave(&vdec_spin_lock, flags); + + codec_dmcbus_write(DMC_REQ_CTRL, + codec_dmcbus_read(DMC_REQ_CTRL) | mask); + + spin_unlock_irqrestore(&vdec_spin_lock, flags); + } else { + /* dmc async off requset */ + spin_lock_irqsave(&vdec_spin_lock, flags); + + codec_dmcbus_write(DMC_REQ_CTRL, + codec_dmcbus_read(DMC_REQ_CTRL) & ~mask); + + spin_unlock_irqrestore(&vdec_spin_lock, flags); + + switch (cpu_type) { + case AM_MESON_CPU_MAJOR_ID_S4: + case AM_MESON_CPU_MAJOR_ID_S4D: + case AM_MESON_CPU_MAJOR_ID_T5W: + sts_reg_addr = S4_DMC_CHAN_STS; + break; + case AM_MESON_CPU_MAJOR_ID_T5: + case AM_MESON_CPU_MAJOR_ID_T5D: + sts_reg_addr = T5_DMC_CHAN_STS; + break; + case AM_MESON_CPU_MAJOR_ID_SC2: + sts_reg_addr = TM2_REVB_DMC_CHAN_STS; + break; + case AM_MESON_CPU_MAJOR_ID_TM2: + if (is_cpu_meson_revb()) + sts_reg_addr = TM2_REVB_DMC_CHAN_STS; + else + sts_reg_addr = DMC_CHAN_STS; + break; + default: + sts_reg_addr = DMC_CHAN_STS; + break; + } + while (!(codec_dmcbus_read(sts_reg_addr) + & mask)) + ; + } +} + +static void vdec_disable_DMC(struct vdec_s *vdec) +{ + /*close first,then wait pedding end,timing suggestion from vlsi*/ + struct vdec_input_s *input = &vdec->input; + + /* need to stop armrisc. */ + if (!IS_ERR_OR_NULL(vdec->dev)) + vdec_stop_armrisc(input->target); + + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3)) { + if (input->target == VDEC_INPUT_TARGET_VLD) { + if (!vdec_on(VDEC_1)) + return; + vdec_dbus_ctrl(0); + } else if (input->target == VDEC_INPUT_TARGET_HEVC) { + if (!vdec_on(VDEC_HEVC)) + return; + hevc_arb_ctrl(0); + } + } else + dec_dmc_port_ctrl(0, input->target); + + pr_debug("%s input->target= 0x%x\n", __func__, input->target); +} + +static void vdec_enable_DMC(struct vdec_s *vdec) +{ + struct vdec_input_s *input = &vdec->input; + + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3)) { + if (input->target == VDEC_INPUT_TARGET_VLD) + vdec_dbus_ctrl(1); + else if (input->target == VDEC_INPUT_TARGET_HEVC) + hevc_arb_ctrl(1); + return; + } + + /*must to be reset the dmc pipeline if it's g12b.*/ + if (get_cpu_type() == AM_MESON_CPU_MAJOR_ID_G12B) + vdec_dmc_pipeline_reset(); + + dec_dmc_port_ctrl(1, input->target); + + pr_debug("%s input->target= 0x%x\n", __func__, input->target); +} + + + +static int vdec_get_hw_type(int value) +{ + int type; + switch (value) { + case VFORMAT_HEVC: + case VFORMAT_VP9: + case VFORMAT_AVS2: + case VFORMAT_AV1: + type = CORE_MASK_HEVC; + break; + + case VFORMAT_MPEG12: + case VFORMAT_MPEG4: + case VFORMAT_H264: + case VFORMAT_MJPEG: + case VFORMAT_REAL: + case VFORMAT_JPEG: + case VFORMAT_VC1: + case VFORMAT_AVS: + case VFORMAT_YUV: + case VFORMAT_H264MVC: + case VFORMAT_H264_4K2K: + case VFORMAT_H264_ENC: + case VFORMAT_JPEG_ENC: + type = CORE_MASK_VDEC_1; + break; + + default: + type = -1; + } + + return type; +} + + +static void vdec_save_active_hw(struct vdec_s *vdec) +{ + int type; + + type = vdec_get_hw_type(vdec->port->vformat); + + if (type == CORE_MASK_HEVC) { + vdec_core->active_hevc = vdec; + } else if (type == CORE_MASK_VDEC_1) { + vdec_core->active_vdec = vdec; + } else { + pr_info("save_active_fw wrong\n"); + } +} + +static void vdec_update_buff_status(void) +{ + struct vdec_core_s *core = vdec_core; + unsigned long flags; + struct vdec_s *vdec; + + flags = vdec_inputbuff_lock(core); + core->buff_flag = 0; + core->stream_buff_flag = 0; + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + struct vdec_input_s *input = &vdec->input; + if (input_frame_based(input)) { + if (input->have_frame_num || input->eos) + core->buff_flag |= vdec->core_mask; + } else if (input_stream_based(input)) { + core->stream_buff_flag |= vdec->core_mask; + } + /* slave el pre_decode_level wp update */ + if ((is_support_no_parser()) && (vdec->slave)) { + STBUF_WRITE(&vdec->slave->vbuf, set_wp, + STBUF_READ(&vdec->vbuf, get_wp)); + } + } + vdec_inputbuff_unlock(core, flags); +} + +#if 0 +void vdec_update_streambuff_status(void) +{ + struct vdec_core_s *core = vdec_core; + struct vdec_s *vdec; + + /* check streaming prepare level threshold if not EOS */ + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + struct vdec_input_s *input = &vdec->input; + if (input && input_stream_based(input) && !input->eos && + (vdec->need_more_data & VDEC_NEED_MORE_DATA)) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = input->size + wp - rp; + else + level = wp - rp; + if ((level < input->prepare_level) && + (pts_get_rec_num(PTS_TYPE_VIDEO, + vdec->input.total_rd_count) < 2)) { + break; + } else if (level > input->prepare_level) { + vdec->need_more_data &= ~VDEC_NEED_MORE_DATA; + if (debug & 8) + pr_info("vdec_flush_streambuff_status up\n"); + vdec_up(vdec); + } + break; + } + } +} +EXPORT_SYMBOL(vdec_update_streambuff_status); +#endif + +int vdec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + if (vdec && vdec->dec_status && + ((vdec->status == VDEC_STATUS_CONNECTED || + vdec->status == VDEC_STATUS_ACTIVE))) + return vdec->dec_status(vdec, vstatus); + + return 0; +} +EXPORT_SYMBOL(vdec_status); + +int vdec_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + int r; + + if (vdec->set_trickmode) { + r = vdec->set_trickmode(vdec, trickmode); + + if ((r == 0) && (vdec->slave) && (vdec->slave->set_trickmode)) + r = vdec->slave->set_trickmode(vdec->slave, + trickmode); + return r; + } + + return -1; +} +EXPORT_SYMBOL(vdec_set_trickmode); + +int vdec_set_isreset(struct vdec_s *vdec, int isreset) +{ + vdec->is_reset = isreset; + pr_info("is_reset=%d\n", isreset); + if (vdec->set_isreset) + return vdec->set_isreset(vdec, isreset); + return 0; +} +EXPORT_SYMBOL(vdec_set_isreset); + +int vdec_set_dv_metawithel(struct vdec_s *vdec, int isdvmetawithel) +{ + vdec->dolby_meta_with_el = isdvmetawithel; + pr_info("isdvmetawithel=%d\n", isdvmetawithel); + return 0; +} +EXPORT_SYMBOL(vdec_set_dv_metawithel); + +void vdec_set_no_powerdown(int flag) +{ + no_powerdown = flag; + pr_info("no_powerdown=%d\n", no_powerdown); + return; +} +EXPORT_SYMBOL(vdec_set_no_powerdown); + +void vdec_count_info(struct vdec_info *vs, unsigned int err, + unsigned int offset) +{ + if (err) + vs->error_frame_count++; + if (offset) { + if (0 == vs->frame_count) { + vs->offset = 0; + vs->samp_cnt = 0; + } + vs->frame_data = offset > vs->total_data ? + offset - vs->total_data : vs->total_data - offset; + vs->total_data = offset; + if (vs->samp_cnt < 96000 * 2) { /* 2s */ + if (0 == vs->samp_cnt) + vs->offset = offset; + vs->samp_cnt += vs->frame_dur; + } else { + vs->bit_rate = (offset - vs->offset) / 2; + /*pr_info("bitrate : %u\n",vs->bit_rate);*/ + vs->samp_cnt = 0; + } + vs->frame_count++; + } + /*pr_info("size : %u, offset : %u, dur : %u, cnt : %u\n", + vs->offset,offset,vs->frame_dur,vs->samp_cnt);*/ + return; +} +EXPORT_SYMBOL(vdec_count_info); +int vdec_is_support_4k(void) +{ + return ((!is_meson_gxl_package_805X()) && + (!is_cpu_s4_s805x2()) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D)); +} +EXPORT_SYMBOL(vdec_is_support_4k); + +/* + * clk_config: + *0:default + *1:no gp0_pll; + *2:always used gp0_pll; + *>=10:fixed n M clk; + *== 100 , 100M clks; + */ +unsigned int get_vdec_clk_config_settings(void) +{ + return clk_config; +} +void update_vdec_clk_config_settings(unsigned int config) +{ + clk_config = config; +} +EXPORT_SYMBOL(update_vdec_clk_config_settings); + +struct device *get_codec_cma_device(void) +{ + return vdec_core->cma_dev; +} + +int vdec_get_core_nr(void) +{ + return (int)atomic_read(&vdec_core->vdec_nr); +} +EXPORT_SYMBOL(vdec_get_core_nr); + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +static const char * const vdec_device_name[] = { + "amvdec_mpeg12", "ammvdec_mpeg12", + "amvdec_mpeg4", "ammvdec_mpeg4", + "amvdec_h264", "ammvdec_h264", + "amvdec_mjpeg", "ammvdec_mjpeg", + "amvdec_real", "ammvdec_real", + "amjpegdec", "ammjpegdec", + "amvdec_vc1", "ammvdec_vc1", + "amvdec_avs", "ammvdec_avs", + "amvdec_yuv", "ammvdec_yuv", + "amvdec_h264mvc", "ammvdec_h264mvc", + "amvdec_h264_4k2k", "ammvdec_h264_4k2k", + "amvdec_h265", "ammvdec_h265", + "amvenc_avc", "amvenc_avc", + "jpegenc", "jpegenc", + "amvdec_vp9", "ammvdec_vp9", + "amvdec_avs2", "ammvdec_avs2", + "amvdec_av1", "ammvdec_av1", +}; + + +#else + +static const char * const vdec_device_name[] = { + "amvdec_mpeg12", + "amvdec_mpeg4", + "amvdec_h264", + "amvdec_mjpeg", + "amvdec_real", + "amjpegdec", + "amvdec_vc1", + "amvdec_avs", + "amvdec_yuv", + "amvdec_h264mvc", + "amvdec_h264_4k2k", + "amvdec_h265", + "amvenc_avc", + "jpegenc", + "amvdec_vp9", + "amvdec_avs2", + "amvdec_av1" +}; + +#endif + +/* + * Only support time sliced decoding for frame based input, + * so legacy decoder can exist with time sliced decoder. + */ +static const char *get_dev_name(bool use_legacy_vdec, int format) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + if (use_legacy_vdec && (debugflags & 0x8) == 0) + return vdec_device_name[format * 2]; + else + return vdec_device_name[format * 2 + 1]; +#else + return vdec_device_name[format]; +#endif +} + +#ifdef VDEC_DEBUG_SUPPORT +static u64 get_current_clk(void) +{ + /*struct timespec xtime = current_kernel_time(); + u64 usec = xtime.tv_sec * 1000000; + usec += xtime.tv_nsec / 1000; + */ + u64 usec = sched_clock(); + return usec; +} + +static void inc_profi_count(unsigned long mask, u32 *count) +{ + enum vdec_type_e type; + + for (type = VDEC_1; type < VDEC_MAX; type++) { + if (mask & (1 << type)) + count[type]++; + } +} + +static void update_profi_clk_run(struct vdec_s *vdec, + unsigned long mask, u64 clk) +{ + enum vdec_type_e type; + + for (type = VDEC_1; type < VDEC_MAX; type++) { + if (mask & (1 << type)) { + vdec->start_run_clk[type] = clk; + if (vdec->profile_start_clk[type] == 0) + vdec->profile_start_clk[type] = clk; + vdec->total_clk[type] = clk + - vdec->profile_start_clk[type]; + /*pr_info("set start_run_clk %ld\n", + vdec->start_run_clk);*/ + + } + } +} + +static void update_profi_clk_stop(struct vdec_s *vdec, + unsigned long mask, u64 clk) +{ + enum vdec_type_e type; + + for (type = VDEC_1; type < VDEC_MAX; type++) { + if (mask & (1 << type)) { + if (vdec->start_run_clk[type] == 0) + pr_info("error, start_run_clk[%d] not set\n", type); + + /*pr_info("update run_clk type %d, %ld, %ld, %ld\n", + type, + clk, + vdec->start_run_clk[type], + vdec->run_clk[type]);*/ + vdec->run_clk[type] += + (clk - vdec->start_run_clk[type]); + } + } +} + +#endif + +int vdec_set_decinfo(struct vdec_s *vdec, struct dec_sysinfo *p) +{ + if (copy_from_user((void *)&vdec->sys_info_store, (void *)p, + sizeof(struct dec_sysinfo))) + return -EFAULT; + + /* force switch to mult instance if supports this profile. */ + if ((vdec->type == VDEC_TYPE_SINGLE) && + !disable_switch_single_to_mult) { + const char *str = NULL; + char fmt[16] = {0}; + + str = strchr(get_dev_name(false, vdec->format), '_'); + if (!str) + return -1; + + sprintf(fmt, "m%s", ++str); + if (is_support_profile(fmt) && + vdec->sys_info->format != VIDEO_DEC_FORMAT_H263 && + vdec->format != VFORMAT_AV1) + vdec->type = VDEC_TYPE_STREAM_PARSER; + } + + return 0; +} +EXPORT_SYMBOL(vdec_set_decinfo); + +/* construct vdec strcture */ +struct vdec_s *vdec_create(struct stream_port_s *port, + struct vdec_s *master) +{ + struct vdec_s *vdec; + int type = VDEC_TYPE_SINGLE; + int id; + if (is_mult_inc(port->type)) + type = (port->type & PORT_TYPE_FRAME) ? + VDEC_TYPE_FRAME_BLOCK : + VDEC_TYPE_STREAM_PARSER; + + id = ida_simple_get(&vdec_core->ida, + 0, MAX_INSTANCE_MUN, GFP_KERNEL); + if (id < 0) { + pr_info("vdec_create request id failed!ret =%d\n", id); + return NULL; + } + vdec = vzalloc(sizeof(struct vdec_s)); + + /* TBD */ + if (vdec) { + vdec->magic = 0x43454456; + vdec->id = -1; + vdec->type = type; + vdec->port = port; + vdec->sys_info = &vdec->sys_info_store; + + INIT_LIST_HEAD(&vdec->list); + + init_waitqueue_head(&vdec->idle_wait); + + atomic_inc(&vdec_core->vdec_nr); +#ifdef CONFIG_AMLOGIC_V4L_VIDEO3 + v4lvideo_dec_count_increase(); +#endif + vdec->id = id; + vdec->video_id = 0xffffffff; + vdec_input_init(&vdec->input, vdec); + vdec->input.vdec_is_input_frame_empty = vdec_is_input_frame_empty; + vdec->input.vdec_up = vdec_up; + if (master) { + vdec->master = master; + master->slave = vdec; + master->sched = 1; + } + if (enable_mvdec_info) { + vdec->mvfrm = (struct vdec_frames_s *) + vzalloc(sizeof(struct vdec_frames_s)); + if (!vdec->mvfrm) + pr_err("vzalloc: vdec_frames_s failed\n"); + } + } + + pr_debug("vdec_create instance %p, total %d, PM: %s\n", vdec, + atomic_read(&vdec_core->vdec_nr), + get_pm_name(vdec_core->pm->pm_type)); + + //trace_vdec_create(vdec); /*DEBUG_TMP*/ + + return vdec; +} +EXPORT_SYMBOL(vdec_create); + +int vdec_set_format(struct vdec_s *vdec, int format) +{ + vdec->format = format; + vdec->port_flag |= PORT_FLAG_VFORMAT; + + if (vdec->slave) { + vdec->slave->format = format; + vdec->slave->port_flag |= PORT_FLAG_VFORMAT; + } + + //trace_vdec_set_format(vdec, format);/*DEBUG_TMP*/ + + return 0; +} +EXPORT_SYMBOL(vdec_set_format); + +int vdec_set_pts(struct vdec_s *vdec, u32 pts) +{ + vdec->pts = pts; + vdec->pts64 = div64_u64((u64)pts * 100, 9); + vdec->pts_valid = true; + //trace_vdec_set_pts(vdec, (u64)pts);/*DEBUG_TMP*/ + return 0; +} +EXPORT_SYMBOL(vdec_set_pts); + +void vdec_set_timestamp(struct vdec_s *vdec, u64 timestamp) +{ + vdec->timestamp = timestamp; + vdec->timestamp_valid = true; +} +EXPORT_SYMBOL(vdec_set_timestamp); + +void vdec_set_metadata(struct vdec_s *vdec, ulong meta_ptr) +{ + char *tmp_buf = NULL; + u32 size = 0; + + if (!meta_ptr) + return; + + tmp_buf = vmalloc(META_DATA_SIZE + 4); + if (!tmp_buf) { + pr_err("%s:vmalloc 256+4 fail\n", __func__); + return; + } + memcpy(tmp_buf, (void *)meta_ptr, META_DATA_SIZE + 4); + + size = tmp_buf[0] + (tmp_buf[1] << 8) + + (tmp_buf[2] << 16) + (tmp_buf[3] << 24); + + if ((size > 0) && (size <= META_DATA_SIZE)) { + memcpy(vdec->hdr10p_data_buf, tmp_buf + 4, size); + vdec->hdr10p_data_size = size; + vdec->hdr10p_data_valid = true; + } + + vfree(tmp_buf); +} +EXPORT_SYMBOL(vdec_set_metadata); + +int vdec_set_pts64(struct vdec_s *vdec, u64 pts64) +{ + vdec->pts64 = pts64; + vdec->pts = (u32)div64_u64(pts64 * 9, 100); + vdec->pts_valid = true; + + //trace_vdec_set_pts64(vdec, pts64);/*DEBUG_TMP*/ + return 0; +} +EXPORT_SYMBOL(vdec_set_pts64); + +int vdec_get_status(struct vdec_s *vdec) +{ + return vdec->status; +} +EXPORT_SYMBOL(vdec_get_status); + +int vdec_get_frame_num(struct vdec_s *vdec) +{ + return vdec->input.have_frame_num; +} +EXPORT_SYMBOL(vdec_get_frame_num); + +void vdec_set_status(struct vdec_s *vdec, int status) +{ + //trace_vdec_set_status(vdec, status);/*DEBUG_TMP*/ + vdec->status = status; +} +EXPORT_SYMBOL(vdec_set_status); + +void vdec_set_next_status(struct vdec_s *vdec, int status) +{ + //trace_vdec_set_next_status(vdec, status);/*DEBUG_TMP*/ + vdec->next_status = status; +} +EXPORT_SYMBOL(vdec_set_next_status); + +int vdec_set_video_path(struct vdec_s *vdec, int video_path) +{ + vdec->frame_base_video_path = video_path; + return 0; +} +EXPORT_SYMBOL(vdec_set_video_path); + +int vdec_set_receive_id(struct vdec_s *vdec, int receive_id) +{ + vdec->vf_receiver_inst = receive_id; + return 0; +} +EXPORT_SYMBOL(vdec_set_receive_id); + +/* add frame data to input chain */ +int vdec_write_vframe(struct vdec_s *vdec, const char *buf, size_t count) +{ + return vdec_input_add_frame(&vdec->input, buf, count); +} +EXPORT_SYMBOL(vdec_write_vframe); + +int vdec_write_vframe_with_dma(struct vdec_s *vdec, + ulong addr, size_t count, u32 handle, chunk_free free, void* priv) +{ + return vdec_input_add_frame_with_dma(&vdec->input, + addr, count, handle, free, priv); +} +EXPORT_SYMBOL(vdec_write_vframe_with_dma); + +/* add a work queue thread for vdec*/ +void vdec_schedule_work(struct work_struct *work) +{ + if (vdec_core->vdec_core_wq) + queue_work(vdec_core->vdec_core_wq, work); + else + schedule_work(work); +} +EXPORT_SYMBOL(vdec_schedule_work); + +static struct vdec_s *vdec_get_associate(struct vdec_s *vdec) +{ + if (vdec->master) + return vdec->master; + else if (vdec->slave) + return vdec->slave; + return NULL; +} + +static void vdec_sync_input_read(struct vdec_s *vdec) +{ + if (!vdec_stream_based(vdec)) + return; + + if (vdec_dual(vdec)) { + u32 me, other; + if (vdec->input.target == VDEC_INPUT_TARGET_VLD) { + me = READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT); + other = + vdec_get_associate(vdec)->input.stream_cookie; + if (me > other) + return; + else if (me == other) { + me = READ_VREG(VLD_MEM_VIFIFO_RP); + other = + vdec_get_associate(vdec)->input.swap_rp; + if (me > other) { + STBUF_WRITE(&vdec->vbuf, set_rp, + vdec_get_associate(vdec)->input.swap_rp); + return; + } + } + + STBUF_WRITE(&vdec->vbuf, set_rp, + READ_VREG(VLD_MEM_VIFIFO_RP)); + } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) { + me = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + if (((me & 0x80000000) == 0) && + (vdec->input.streaming_rp & 0x80000000)) + me += 1ULL << 32; + other = vdec_get_associate(vdec)->input.streaming_rp; + if (me > other) { + STBUF_WRITE(&vdec->vbuf, set_rp, + vdec_get_associate(vdec)->input.swap_rp); + return; + } + + STBUF_WRITE(&vdec->vbuf, set_rp, + READ_VREG(HEVC_STREAM_RD_PTR)); + } + } else if (vdec->input.target == VDEC_INPUT_TARGET_VLD) { + STBUF_WRITE(&vdec->vbuf, set_rp, + READ_VREG(VLD_MEM_VIFIFO_RP)); + } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) { + STBUF_WRITE(&vdec->vbuf, set_rp, + READ_VREG(HEVC_STREAM_RD_PTR)); + } +} + +static void vdec_sync_input_write(struct vdec_s *vdec) +{ + if (!vdec_stream_based(vdec)) + return; + + if (vdec->input.target == VDEC_INPUT_TARGET_VLD) { + if (is_support_no_parser()) { + if (!vdec->master) { + WRITE_VREG(VLD_MEM_VIFIFO_WP, + STBUF_READ(&vdec->vbuf, get_wp)); + } else { + STBUF_WRITE(&vdec->vbuf, set_wp, + STBUF_READ(&vdec->master->vbuf, get_wp)); + } + } else { + WRITE_VREG(VLD_MEM_VIFIFO_WP, + STBUF_READ(&vdec->vbuf, get_wp)); + } + } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) { + if (is_support_no_parser()) { + if (!vdec->master) { + WRITE_VREG(HEVC_STREAM_WR_PTR, + STBUF_READ(&vdec->vbuf, get_wp)); + } else { + STBUF_WRITE(&vdec->vbuf, set_wp, + STBUF_READ(&vdec->master->vbuf, get_wp)); + } + } else { + WRITE_VREG(HEVC_STREAM_WR_PTR, + STBUF_READ(&vdec->vbuf, get_wp)); + } + } +} + +void vdec_stream_skip_data(struct vdec_s *vdec, int skip_size) +{ + u32 rp_set; + struct vdec_input_s *input = &vdec->input; + u32 rp = 0, wp = 0, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + + if (wp > rp) + level = wp - rp; + else + level = wp + vdec->input.size - rp ; + + if (level <= skip_size) { + pr_err("skip size is error, buffer level = 0x%x, skip size = 0x%x\n", level, skip_size); + return; + } + + if (wp >= rp) { + rp_set = rp + skip_size; + } + else if ((rp + skip_size) < (input->start + input->size)) { + rp_set = rp + skip_size; + } else { + rp_set = rp + skip_size - input->size; + input->stream_cookie++; + } + + if (vdec->format == VFORMAT_H264) + SET_VREG_MASK(POWER_CTL_VLD, + (1 << 9)); + + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + + /* restore read side */ + WRITE_VREG(VLD_MEM_SWAP_ADDR, + input->swap_page_phys); + WRITE_VREG(VLD_MEM_SWAP_CTL, 1); + + while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7)) + ; + WRITE_VREG(VLD_MEM_SWAP_CTL, 0); + + WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR, + rp_set); + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1); + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + STBUF_WRITE(&vdec->vbuf, set_rp, + rp_set); + WRITE_VREG(VLD_MEM_SWAP_ADDR, + input->swap_page_phys); + WRITE_VREG(VLD_MEM_SWAP_CTL, 3); + while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7)) + ; + WRITE_VREG(VLD_MEM_SWAP_CTL, 0); +} +EXPORT_SYMBOL(vdec_stream_skip_data); + + + +/* + *get next frame from input chain + */ +/* + *THE VLD_FIFO is 512 bytes and Video buffer level + * empty interrupt is set to 0x80 bytes threshold + */ +#define VLD_PADDING_SIZE 1024 +#define HEVC_PADDING_SIZE (1024*16) +int vdec_prepare_input(struct vdec_s *vdec, struct vframe_chunk_s **p) +{ + struct vdec_input_s *input = &vdec->input; + struct vframe_chunk_s *chunk = NULL; + struct vframe_block_list_s *block = NULL; + int dummy; + + /* full reset to HW input */ + if (input->target == VDEC_INPUT_TARGET_VLD) { + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + + /* reset VLD fifo for all vdec */ + WRITE_VREG(DOS_SW_RESET0, (1<<5) | (1<<4) | (1<<3)); + WRITE_VREG(DOS_SW_RESET0, 0); + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SC2) + dummy = READ_RESET_REG(RESET0_REGISTER); + WRITE_VREG(POWER_CTL_VLD, 1 << 4); + } else if (input->target == VDEC_INPUT_TARGET_HEVC) { +#if 0 + /*move to driver*/ + if (input_frame_based(input)) + WRITE_VREG(HEVC_STREAM_CONTROL, 0); + + /* + * 2: assist + * 3: parser + * 4: parser_state + * 8: dblk + * 11:mcpu + * 12:ccpu + * 13:ddr + * 14:iqit + * 15:ipp + * 17:qdct + * 18:mpred + * 19:sao + * 24:hevc_afifo + */ + WRITE_VREG(DOS_SW_RESET3, + (1<<3)|(1<<4)|(1<<8)|(1<<11)|(1<<12)|(1<<14)|(1<<15)| + (1<<17)|(1<<18)|(1<<19)); + WRITE_VREG(DOS_SW_RESET3, 0); +#endif + } + + /* + *setup HW decoder input buffer (VLD context) + * based on input->type and input->target + */ + if (input_frame_based(input)) { + chunk = vdec_input_next_chunk(&vdec->input); + + if (chunk == NULL) { + *p = NULL; + return -1; + } + + block = chunk->block; + + if (input->target == VDEC_INPUT_TARGET_VLD) { + WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, block->start); + WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, block->start + + block->size - 8); + WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR, + round_down(block->start + chunk->offset, + VDEC_FIFO_ALIGN)); + + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1); + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + + /* set to manual mode */ + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2); + WRITE_VREG(VLD_MEM_VIFIFO_RP, + round_down(block->start + chunk->offset, + VDEC_FIFO_ALIGN)); + dummy = chunk->offset + chunk->size + + VLD_PADDING_SIZE; + if (dummy >= block->size) + dummy -= block->size; + WRITE_VREG(VLD_MEM_VIFIFO_WP, + round_down(block->start + dummy, + VDEC_FIFO_ALIGN)); + + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 3); + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2); + + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, + (0x11 << 16) | (1<<10) | (7<<3)); + + } else if (input->target == VDEC_INPUT_TARGET_HEVC) { + WRITE_VREG(HEVC_STREAM_START_ADDR, block->start); + WRITE_VREG(HEVC_STREAM_END_ADDR, block->start + + block->size); + WRITE_VREG(HEVC_STREAM_RD_PTR, block->start + + chunk->offset); + dummy = chunk->offset + chunk->size + + HEVC_PADDING_SIZE; + if (dummy >= block->size) + dummy -= block->size; + WRITE_VREG(HEVC_STREAM_WR_PTR, + round_down(block->start + dummy, + VDEC_FIFO_ALIGN)); + + /* set endian */ + SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4); + } + + *p = chunk; + return chunk->size; + + } else { + /* stream based */ + u32 rp = 0, wp = 0, fifo_len = 0, first_set_rp = 0; + int size; + bool swap_valid = input->swap_valid; + unsigned long swap_page_phys = input->swap_page_phys; + + if (vdec_dual(vdec) && + ((vdec->flag & VDEC_FLAG_SELF_INPUT_CONTEXT) == 0)) { + /* keep using previous input context */ + struct vdec_s *master = (vdec->slave) ? + vdec : vdec->master; + if (master->input.last_swap_slave) { + swap_valid = master->slave->input.swap_valid; + swap_page_phys = + master->slave->input.swap_page_phys; + } else { + swap_valid = master->input.swap_valid; + swap_page_phys = master->input.swap_page_phys; + } + } + + if (swap_valid) { + if (input->target == VDEC_INPUT_TARGET_VLD) { + if (vdec->format == VFORMAT_H264) + SET_VREG_MASK(POWER_CTL_VLD, + (1 << 9)); + + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + + /* restore read side */ + WRITE_VREG(VLD_MEM_SWAP_ADDR, + swap_page_phys); + WRITE_VREG(VLD_MEM_SWAP_CTL, 1); + + while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7)) + ; + WRITE_VREG(VLD_MEM_SWAP_CTL, 0); + + /* restore wrap count */ + WRITE_VREG(VLD_MEM_VIFIFO_WRAP_COUNT, + input->stream_cookie); + + rp = READ_VREG(VLD_MEM_VIFIFO_RP); + fifo_len = READ_VREG(VLD_MEM_VIFIFO_LEVEL); + + /* enable */ + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, + (0x11 << 16) | (1<<10)); + + if (vdec->vbuf.no_parser) + SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL, + 7 << 3); + + /* sync with front end */ + vdec_sync_input_read(vdec); + vdec_sync_input_write(vdec); + + wp = READ_VREG(VLD_MEM_VIFIFO_WP); + } else if (input->target == VDEC_INPUT_TARGET_HEVC) { + SET_VREG_MASK(HEVC_STREAM_CONTROL, 1); + + /* restore read side */ + WRITE_VREG(HEVC_STREAM_SWAP_ADDR, + swap_page_phys); + WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1); + + while (READ_VREG(HEVC_STREAM_SWAP_CTRL) + & (1<<7)) + ; + WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0); + + /* restore stream offset */ + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, + input->stream_cookie); + + rp = READ_VREG(HEVC_STREAM_RD_PTR); + fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL) + >> 16) & 0x7f; + + + /* enable */ + + /* sync with front end */ + vdec_sync_input_read(vdec); + vdec_sync_input_write(vdec); + + wp = READ_VREG(HEVC_STREAM_WR_PTR); + + if (vdec->vbuf.no_parser) + SET_VREG_MASK(HEVC_STREAM_CONTROL, + 7 << 4); + /*pr_info("vdec: restore context\r\n");*/ + } + + } else { + if (vdec->vbuf.ext_buf_addr) + first_set_rp = STBUF_READ(&vdec->vbuf, get_rp); + else { + if (vdec->discard_start_data_flag) + first_set_rp = STBUF_READ(&vdec->vbuf, get_rp); + else + first_set_rp = input->start; + } + if (input->target == VDEC_INPUT_TARGET_VLD) { + WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, + input->start); + WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, + input->start + input->size - 8); + WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR, + first_set_rp); + + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1); + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + + /* set to manual mode */ + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2); + WRITE_VREG(VLD_MEM_VIFIFO_RP, first_set_rp); + WRITE_VREG(VLD_MEM_VIFIFO_WP, + STBUF_READ(&vdec->vbuf, get_wp)); + rp = READ_VREG(VLD_MEM_VIFIFO_RP); + + /* enable */ + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, + (0x11 << 16) | (1<<10)); + if (vdec->vbuf.no_parser) + SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL, + 7 << 3); + + wp = READ_VREG(VLD_MEM_VIFIFO_WP); + + } else if (input->target == VDEC_INPUT_TARGET_HEVC) { + WRITE_VREG(HEVC_STREAM_START_ADDR, + input->start); + WRITE_VREG(HEVC_STREAM_END_ADDR, + input->start + input->size); + WRITE_VREG(HEVC_STREAM_RD_PTR, + first_set_rp); + WRITE_VREG(HEVC_STREAM_WR_PTR, + STBUF_READ(&vdec->vbuf, get_wp)); + rp = READ_VREG(HEVC_STREAM_RD_PTR); + wp = READ_VREG(HEVC_STREAM_WR_PTR); + fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL) + >> 16) & 0x7f; + if (vdec->vbuf.no_parser) + SET_VREG_MASK(HEVC_STREAM_CONTROL, + 7 << 4); + /* enable */ + } + } + *p = NULL; + if (wp >= rp) + size = wp - rp + fifo_len; + else + size = wp + input->size - rp + fifo_len; + if (size < 0) { + pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n", + __func__, input->size, wp, rp, fifo_len, size); + size = 0; + } + return size; + } +} +EXPORT_SYMBOL(vdec_prepare_input); + +u32 vdec_offset_prepare_input(struct vdec_s *vdec, u32 consume_byte, u32 data_offset, u32 data_size) +{ + struct vdec_input_s *input = &vdec->input; + u32 res_byte, header_offset, header_data_size, data_invalid; + + res_byte = data_size - consume_byte; + header_offset = data_offset; + header_data_size = data_size; + data_offset += consume_byte; + data_size = res_byte; + + if (input->target == VDEC_INPUT_TARGET_VLD) { + //to do + } else if (input->target == VDEC_INPUT_TARGET_HEVC) { + data_invalid = data_offset - round_down(data_offset, 0x40); + data_offset -= data_invalid; + data_size += data_invalid; + + if (data_offset < header_offset) { + data_invalid = consume_byte; + data_offset = header_offset; + data_size = header_data_size; + } + + if (input_frame_based(input)) { + struct vframe_chunk_s *chunk = vdec_input_next_chunk(&vdec->input); + struct vframe_block_list_s *block = NULL; + int dummy; + + block = chunk->block; + WRITE_VREG(HEVC_STREAM_START_ADDR, block->start); + WRITE_VREG(HEVC_STREAM_END_ADDR, block->start + + block->size); + WRITE_VREG(HEVC_STREAM_RD_PTR, block->start + + data_offset); + dummy = data_offset + data_size + + HEVC_PADDING_SIZE; + if (dummy >= block->size) + dummy -= block->size; + WRITE_VREG(HEVC_STREAM_WR_PTR, + round_down(block->start + dummy, + VDEC_FIFO_ALIGN)); + + /* set endian */ + SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4); + } + } + return data_invalid; +} +EXPORT_SYMBOL(vdec_offset_prepare_input); + +void vdec_enable_input(struct vdec_s *vdec) +{ + struct vdec_input_s *input = &vdec->input; + + if (vdec->status != VDEC_STATUS_ACTIVE) + return; + + if (input->target == VDEC_INPUT_TARGET_VLD) + SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL, (1<<2) | (1<<1)); + else if (input->target == VDEC_INPUT_TARGET_HEVC) { + SET_VREG_MASK(HEVC_STREAM_CONTROL, 1); + if (vdec_stream_based(vdec)) { + if (vdec->vbuf.no_parser) + /*set endian for non-parser mode. */ + SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4); + else + CLEAR_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4); + } else + SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4); + + SET_VREG_MASK(HEVC_STREAM_FIFO_CTL, (1<<29)); + } +} +EXPORT_SYMBOL(vdec_enable_input); + +int vdec_set_input_buffer(struct vdec_s *vdec, u32 start, u32 size) +{ + int r = vdec_input_set_buffer(&vdec->input, start, size); + + if (r) + return r; + + if (vdec->slave) + r = vdec_input_set_buffer(&vdec->slave->input, start, size); + + return r; +} +EXPORT_SYMBOL(vdec_set_input_buffer); + +/* + * vdec_eos returns the possibility that there are + * more input can be used by decoder through vdec_prepare_input + * Note: this function should be called prior to vdec_vframe_dirty + * by decoder driver to determine if EOS happens for stream based + * decoding when there is no sufficient data for a frame + */ +bool vdec_has_more_input(struct vdec_s *vdec) +{ + struct vdec_input_s *input = &vdec->input; + + if (!input->eos) + return true; + + if (input_frame_based(input)) + return vdec_input_next_input_chunk(input) != NULL; + else { + if (input->target == VDEC_INPUT_TARGET_VLD) + return READ_VREG(VLD_MEM_VIFIFO_WP) != + STBUF_READ(&vdec->vbuf, get_wp); + else { + return (READ_VREG(HEVC_STREAM_WR_PTR) & ~0x3) != + (STBUF_READ(&vdec->vbuf, get_wp) & ~0x3); + } + } +} +EXPORT_SYMBOL(vdec_has_more_input); + +void vdec_set_prepare_level(struct vdec_s *vdec, int level) +{ + vdec->input.prepare_level = level; +} +EXPORT_SYMBOL(vdec_set_prepare_level); + +void vdec_set_flag(struct vdec_s *vdec, u32 flag) +{ + vdec->flag = flag; +} +EXPORT_SYMBOL(vdec_set_flag); + +void vdec_set_eos(struct vdec_s *vdec, bool eos) +{ + struct vdec_core_s *core = vdec_core; + + vdec->input.eos = eos; + + if (vdec->slave) + vdec->slave->input.eos = eos; + up(&core->sem); +} +EXPORT_SYMBOL(vdec_set_eos); + +#ifdef VDEC_DEBUG_SUPPORT +void vdec_set_step_mode(void) +{ + step_mode = 0x1ff; +} +EXPORT_SYMBOL(vdec_set_step_mode); +#endif + +void vdec_set_next_sched(struct vdec_s *vdec, struct vdec_s *next_vdec) +{ + if (vdec && next_vdec) { + vdec->sched = 0; + next_vdec->sched = 1; + } +} +EXPORT_SYMBOL(vdec_set_next_sched); + +/* + * Swap Context: S0 S1 S2 S3 S4 + * Sample sequence: M S M M S + * Master Context: S0 S0 S2 S3 S3 + * Slave context: NA S1 S1 S2 S4 + * ^ + * ^ + * ^ + * the tricky part + * If there are back to back decoding of master or slave + * then the context of the counter part should be updated + * with current decoder. In this example, S1 should be + * updated to S2. + * This is done by swap the swap_page and related info + * between two layers. + */ +static void vdec_borrow_input_context(struct vdec_s *vdec) +{ + struct page *swap_page; + unsigned long swap_page_phys; + struct vdec_input_s *me; + struct vdec_input_s *other; + + if (!vdec_dual(vdec)) + return; + + me = &vdec->input; + other = &vdec_get_associate(vdec)->input; + + /* swap the swap_context, borrow counter part's + * swap context storage and update all related info. + * After vdec_vframe_dirty, vdec_save_input_context + * will be called to update current vdec's + * swap context + */ + swap_page = other->swap_page; + other->swap_page = me->swap_page; + me->swap_page = swap_page; + + swap_page_phys = other->swap_page_phys; + other->swap_page_phys = me->swap_page_phys; + me->swap_page_phys = swap_page_phys; + + other->swap_rp = me->swap_rp; + other->streaming_rp = me->streaming_rp; + other->stream_cookie = me->stream_cookie; + other->swap_valid = me->swap_valid; +} + +void vdec_vframe_dirty(struct vdec_s *vdec, struct vframe_chunk_s *chunk) +{ + if (chunk) + chunk->flag |= VFRAME_CHUNK_FLAG_CONSUMED; + + if (vdec_stream_based(vdec)) { + vdec->input.swap_needed = true; + + if (vdec_dual(vdec)) { + vdec_get_associate(vdec)->input.dirty_count = 0; + vdec->input.dirty_count++; + if (vdec->input.dirty_count > 1) { + vdec->input.dirty_count = 1; + vdec_borrow_input_context(vdec); + } + } + + /* for stream based mode, we update read and write pointer + * also in case decoder wants to keep working on decoding + * for more frames while input front end has more data + */ + vdec_sync_input_read(vdec); + vdec_sync_input_write(vdec); + + vdec->need_more_data |= VDEC_NEED_MORE_DATA_DIRTY; + vdec->need_more_data &= ~VDEC_NEED_MORE_DATA; + } +} +EXPORT_SYMBOL(vdec_vframe_dirty); + +bool vdec_need_more_data(struct vdec_s *vdec) +{ + if (vdec_stream_based(vdec)) + return vdec->need_more_data & VDEC_NEED_MORE_DATA; + + return false; +} +EXPORT_SYMBOL(vdec_need_more_data); + + +static void hevc_wait_ddr(void) +{ + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3)) { + hevc_arb_ctrl(0); + } else { + dec_dmc_port_ctrl(0, VDEC_INPUT_TARGET_HEVC); + } +} + +void vdec_save_input_context(struct vdec_s *vdec) +{ + struct vdec_input_s *input = &vdec->input; + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + vdec_profile(vdec, VDEC_PROFILE_EVENT_SAVE_INPUT); +#endif + + if (input->target == VDEC_INPUT_TARGET_VLD) + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1<<15); + + if (input_stream_based(input) && (input->swap_needed)) { + if (input->target == VDEC_INPUT_TARGET_VLD) { + WRITE_VREG(VLD_MEM_SWAP_ADDR, + input->swap_page_phys); + WRITE_VREG(VLD_MEM_SWAP_CTL, 3); + while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7)) + ; + WRITE_VREG(VLD_MEM_SWAP_CTL, 0); + vdec->input.stream_cookie = + READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT); + vdec->input.swap_rp = + READ_VREG(VLD_MEM_VIFIFO_RP); + vdec->input.total_rd_count = + (u64)vdec->input.stream_cookie * + vdec->input.size + vdec->input.swap_rp - + READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL); + } else if (input->target == VDEC_INPUT_TARGET_HEVC) { + WRITE_VREG(HEVC_STREAM_SWAP_ADDR, + input->swap_page_phys); + WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 3); + + while (READ_VREG(HEVC_STREAM_SWAP_CTRL) & (1<<7)) + ; + WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0); + + vdec->input.stream_cookie = + READ_VREG(HEVC_SHIFT_BYTE_COUNT); + vdec->input.swap_rp = + READ_VREG(HEVC_STREAM_RD_PTR); + if (((vdec->input.stream_cookie & 0x80000000) == 0) && + (vdec->input.streaming_rp & 0x80000000)) + vdec->input.streaming_rp += 1ULL << 32; + vdec->input.streaming_rp &= 0xffffffffULL << 32; + vdec->input.streaming_rp |= vdec->input.stream_cookie; + vdec->input.total_rd_count = vdec->input.streaming_rp; + hevc_wait_ddr(); + } + + input->swap_valid = true; + input->swap_needed = false; + /*pr_info("vdec: save context\r\n");*/ + + vdec_sync_input_read(vdec); + + if (vdec_dual(vdec)) { + struct vdec_s *master = (vdec->slave) ? + vdec : vdec->master; + master->input.last_swap_slave = (master->slave == vdec); + /* pr_info("master->input.last_swap_slave = %d\n", + master->input.last_swap_slave); */ + } + } +} +EXPORT_SYMBOL(vdec_save_input_context); + +void vdec_clean_input(struct vdec_s *vdec) +{ + struct vdec_input_s *input = &vdec->input; + + while (!list_empty(&input->vframe_chunk_list)) { + struct vframe_chunk_s *chunk = + vdec_input_next_chunk(input); + if (chunk && (chunk->flag & VFRAME_CHUNK_FLAG_CONSUMED)) + vdec_input_release_chunk(input, chunk); + else + break; + } + vdec_save_input_context(vdec); +} +EXPORT_SYMBOL(vdec_clean_input); + + +static int vdec_input_read_restore(struct vdec_s *vdec) +{ + struct vdec_input_s *input = &vdec->input; + + if (!vdec_stream_based(vdec)) + return 0; + + if (!input->swap_valid) { + if (input->target == VDEC_INPUT_TARGET_VLD) { + WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, + input->start); + WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, + input->start + input->size - 8); + WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR, + input->start); + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1); + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + + /* set to manual mode */ + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2); + WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start); + } else if (input->target == VDEC_INPUT_TARGET_HEVC) { + WRITE_VREG(HEVC_STREAM_START_ADDR, + input->start); + WRITE_VREG(HEVC_STREAM_END_ADDR, + input->start + input->size); + WRITE_VREG(HEVC_STREAM_RD_PTR, + input->start); + } + return 0; + } + if (input->target == VDEC_INPUT_TARGET_VLD) { + /* restore read side */ + WRITE_VREG(VLD_MEM_SWAP_ADDR, + input->swap_page_phys); + + /*swap active*/ + WRITE_VREG(VLD_MEM_SWAP_CTL, 1); + + /*wait swap busy*/ + while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7)) + ; + + WRITE_VREG(VLD_MEM_SWAP_CTL, 0); + } else if (input->target == VDEC_INPUT_TARGET_HEVC) { + /* restore read side */ + WRITE_VREG(HEVC_STREAM_SWAP_ADDR, + input->swap_page_phys); + WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1); + + while (READ_VREG(HEVC_STREAM_SWAP_CTRL) + & (1<<7)) + ; + WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0); + } + + return 0; +} + + +int vdec_sync_input(struct vdec_s *vdec) +{ + struct vdec_input_s *input = &vdec->input; + u32 rp = 0, wp = 0, fifo_len = 0; + int size; + + vdec_input_read_restore(vdec); + vdec_sync_input_read(vdec); + vdec_sync_input_write(vdec); + if (input->target == VDEC_INPUT_TARGET_VLD) { + rp = READ_VREG(VLD_MEM_VIFIFO_RP); + wp = READ_VREG(VLD_MEM_VIFIFO_WP); + + } else if (input->target == VDEC_INPUT_TARGET_HEVC) { + rp = READ_VREG(HEVC_STREAM_RD_PTR); + wp = READ_VREG(HEVC_STREAM_WR_PTR); + fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL) + >> 16) & 0x7f; + } + if (wp >= rp) + size = wp - rp + fifo_len; + else + size = wp + input->size - rp + fifo_len; + if (size < 0) { + pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n", + __func__, input->size, wp, rp, fifo_len, size); + size = 0; + } + return size; + +} +EXPORT_SYMBOL(vdec_sync_input); + +const char *vdec_status_str(struct vdec_s *vdec) +{ + if (vdec->status < 0) + return "INVALID"; + return vdec->status < ARRAY_SIZE(vdec_status_string) ? + vdec_status_string[vdec->status] : "INVALID"; +} + +const char *vdec_type_str(struct vdec_s *vdec) +{ + switch (vdec->type) { + case VDEC_TYPE_SINGLE: + return "VDEC_TYPE_SINGLE"; + case VDEC_TYPE_STREAM_PARSER: + return "VDEC_TYPE_STREAM_PARSER"; + case VDEC_TYPE_FRAME_BLOCK: + return "VDEC_TYPE_FRAME_BLOCK"; + case VDEC_TYPE_FRAME_CIRCULAR: + return "VDEC_TYPE_FRAME_CIRCULAR"; + default: + return "VDEC_TYPE_INVALID"; + } +} + +const char *vdec_device_name_str(struct vdec_s *vdec) +{ + return vdec_device_name[vdec->format * 2 + 1]; +} +EXPORT_SYMBOL(vdec_device_name_str); + +void walk_vdec_core_list(char *s) +{ + struct vdec_s *vdec; + struct vdec_core_s *core = vdec_core; + unsigned long flags; + + pr_info("%s --->\n", s); + + flags = vdec_core_lock(vdec_core); + + if (list_empty(&core->connected_vdec_list)) { + pr_info("connected vdec list empty\n"); + } else { + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + pr_info("\tvdec (%p), status = %s\n", vdec, + vdec_status_str(vdec)); + } + } + + vdec_core_unlock(vdec_core, flags); +} +EXPORT_SYMBOL(walk_vdec_core_list); + +/* insert vdec to vdec_core for scheduling, + * for dual running decoders, connect/disconnect always runs in pairs + */ +int vdec_connect(struct vdec_s *vdec) +{ + unsigned long flags; + + //trace_vdec_connect(vdec);/*DEBUG_TMP*/ + + if (vdec->status != VDEC_STATUS_DISCONNECTED) + return 0; + + vdec_set_status(vdec, VDEC_STATUS_CONNECTED); + vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED); + + init_completion(&vdec->inactive_done); + + if (vdec->slave) { + vdec_set_status(vdec->slave, VDEC_STATUS_CONNECTED); + vdec_set_next_status(vdec->slave, VDEC_STATUS_CONNECTED); + + init_completion(&vdec->slave->inactive_done); + } + + flags = vdec_core_lock(vdec_core); + + list_add_tail(&vdec->list, &vdec_core->connected_vdec_list); + + if (vdec->slave) { + list_add_tail(&vdec->slave->list, + &vdec_core->connected_vdec_list); + } + + vdec_core_unlock(vdec_core, flags); + + up(&vdec_core->sem); + + return 0; +} +EXPORT_SYMBOL(vdec_connect); + +/* remove vdec from vdec_core scheduling */ +int vdec_disconnect(struct vdec_s *vdec) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + vdec_profile(vdec, VDEC_PROFILE_EVENT_DISCONNECT); +#endif + //trace_vdec_disconnect(vdec);/*DEBUG_TMP*/ + + if ((vdec->status != VDEC_STATUS_CONNECTED) && + (vdec->status != VDEC_STATUS_ACTIVE)) { + return 0; + } + mutex_lock(&vdec_mutex); + /* + *when a vdec is under the management of scheduler + * the status change will only be from vdec_core_thread + */ + vdec_set_next_status(vdec, VDEC_STATUS_DISCONNECTED); + + if (vdec->slave) + vdec_set_next_status(vdec->slave, VDEC_STATUS_DISCONNECTED); + else if (vdec->master) + vdec_set_next_status(vdec->master, VDEC_STATUS_DISCONNECTED); + mutex_unlock(&vdec_mutex); + up(&vdec_core->sem); + + if(!wait_for_completion_timeout(&vdec->inactive_done, + msecs_to_jiffies(2000))) + goto discon_timeout; + + if (vdec->slave) { + if(!wait_for_completion_timeout(&vdec->slave->inactive_done, + msecs_to_jiffies(2000))) + goto discon_timeout; + } else if (vdec->master) { + if(!wait_for_completion_timeout(&vdec->master->inactive_done, + msecs_to_jiffies(2000))) + goto discon_timeout; + } + + return 0; +discon_timeout: + pr_err("%s timeout!!! status: 0x%x force it to 2\n", __func__, vdec->status); + if (vdec->status == VDEC_STATUS_ACTIVE) { + if (vdec->input.target == VDEC_INPUT_TARGET_VLD) { + amvdec_stop(); + WRITE_VREG(ASSIST_MBOX1_MASK, 0); + vdec_free_irq(VDEC_IRQ_1, NULL); + } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) { + amhevc_stop(); + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0); + vdec_free_irq(VDEC_IRQ_0, NULL); + } + } + + return 0; +} +EXPORT_SYMBOL(vdec_disconnect); + +/* release vdec structure */ +int vdec_destroy(struct vdec_s *vdec) +{ + //trace_vdec_destroy(vdec);/*DEBUG_TMP*/ + + vdec_input_release(&vdec->input); + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + vdec_profile_flush(vdec); +#endif + ida_simple_remove(&vdec_core->ida, vdec->id); + if (vdec->mvfrm) + vfree(vdec->mvfrm); + vfree(vdec); + +#ifdef CONFIG_AMLOGIC_V4L_VIDEO3 + v4lvideo_dec_count_decrease(); +#endif + atomic_dec(&vdec_core->vdec_nr); + + return 0; +} +EXPORT_SYMBOL(vdec_destroy); + +static bool is_tunnel_pipeline(u32 pl) +{ + return ((pl & BIT(FRAME_BASE_PATH_DTV_TUNNEL_MODE)) || + (pl & BIT(FRAME_BASE_PATH_AMLVIDEO_AMVIDEO))) ? + true : false; +} + +static bool is_res_locked(u32 pre, u32 cur) +{ + return is_tunnel_pipeline(pre) ? + (is_tunnel_pipeline(cur) ? true : false) : false; +} + +int vdec_resource_checking(struct vdec_s *vdec) +{ + /* + * If it is the single instance that the pipeline of DTV used, + * then have to check that the resources which is belong tunnel + * pipeline these are being released. + */ + ulong expires = jiffies + msecs_to_jiffies(2000); + + while (is_res_locked(vdec_core->vdec_resouce_status, + BIT(vdec->frame_base_video_path))) { + if (time_after(jiffies, expires)) { + pr_err("wait vdec resource timeout.\n"); + return -EBUSY; + } + schedule(); + } + + return 0; +} +EXPORT_SYMBOL(vdec_resource_checking); + +/* + *register vdec_device + * create output, vfm or create ionvideo output + */ +s32 vdec_init(struct vdec_s *vdec, int is_4k, bool is_v4l) +{ + int r = 0; + struct vdec_s *p = vdec; + const char *pdev_name; + char dev_name[32] = {0}; + int id = PLATFORM_DEVID_AUTO;/*if have used my self*/ + int max_di_count = max_di_instance; + char postprocess_name[64] = {0}; + if (vdec_stream_based(vdec)) + max_di_count = max_supported_di_instance; + vdec->is_v4l = is_v4l ? 1 : 0; + if (is_res_locked(vdec_core->vdec_resouce_status, + BIT(vdec->frame_base_video_path))) + return -EBUSY; + + //pr_err("%s [pid=%d,tgid=%d]\n", __func__, current->pid, current->tgid); + pdev_name = get_dev_name(vdec_single(vdec), vdec->format); + if (pdev_name == NULL) + return -ENODEV; + + snprintf(dev_name, sizeof(dev_name), + "%s%s", pdev_name, is_v4l ? "_v4l": ""); + + pr_info("vdec_init, dev_name:%s, vdec_type=%s, format: %d\n", + dev_name, vdec_type_str(vdec), vdec->format); + + snprintf(vdec->name, sizeof(vdec->name), + "vdec-%d", vdec->id); + snprintf(vdec->dec_spend_time, sizeof(vdec->dec_spend_time), + "%s-dec_spend_time", vdec->name); + snprintf(vdec->dec_spend_time_ave, sizeof(vdec->dec_spend_time_ave), + "%s-dec_spend_time_ave", vdec->name); + + /* + *todo: VFM patch control should be configurable, + * for now all stream based input uses default VFM path. + */ + if (!is_support_no_parser()) { + if (vdec_stream_based(vdec) && !vdec_dual(vdec)) { + if (vdec_core->vfm_vdec == NULL) { + pr_debug("vdec_init set vfm decoder %p\n", vdec); + vdec_core->vfm_vdec = vdec; + } else { + pr_info("vdec_init vfm path busy.\n"); + return -EBUSY; + } + } + } + + mutex_lock(&vdec_mutex); + inited_vcodec_num++; + mutex_unlock(&vdec_mutex); + + vdec_input_set_type(&vdec->input, vdec->type, + (vdec->format == VFORMAT_HEVC || + vdec->format == VFORMAT_AVS2 || + vdec->format == VFORMAT_VP9 || + vdec->format == VFORMAT_AV1 + ) ? + VDEC_INPUT_TARGET_HEVC : + VDEC_INPUT_TARGET_VLD); + if (vdec_single(vdec) || (vdec_get_debug_flags() & 0x2)) + vdec_enable_DMC(vdec); + p->cma_dev = vdec_core->cma_dev; + + vdec_canvas_port_register(vdec); + + p->vdec_fps_detec = vdec_fps_detec; + /* todo */ + if (!vdec_dual(vdec)) { + p->use_vfm_path = + is_support_no_parser() ? + vdec_single(vdec) : + vdec_stream_based(vdec); + } + + if (debugflags & (1 << 29)) + p->is_stream_mode_dv_multi = true; + else + p->is_stream_mode_dv_multi = false; + + if (debugflags & 0x4) + p->use_vfm_path = 1; + /* vdec_dev_reg.flag = 0; */ + if (vdec->id >= 0) + id = vdec->id; + p->parallel_dec = parallel_decode; + p->prog_only = prog_only; + + vdec_core->parallel_dec = parallel_decode; + vdec->canvas_mode = CANVAS_BLKMODE_32X32; +#ifdef FRAME_CHECK + vdec_frame_check_init(vdec); +#endif + /* stream buffer init. */ + if (vdec->vbuf.ops && !vdec->master) { + r = vdec->vbuf.ops->init(&vdec->vbuf, vdec); + if (r) { + pr_err("%s stream buffer init err (%d)\n", dev_name, r); + + mutex_lock(&vdec_mutex); + inited_vcodec_num--; + mutex_unlock(&vdec_mutex); + + goto error; + } + + if (vdec->slave) { + memcpy(&vdec->slave->vbuf, &vdec->vbuf, + sizeof(vdec->vbuf)); + } + } + + p->dev = platform_device_register_data( + &vdec_core->vdec_core_platform_device->dev, + dev_name, + id, + &p, sizeof(struct vdec_s *)); + + if (IS_ERR(p->dev)) { + r = PTR_ERR(p->dev); + pr_err("vdec: Decoder device %s register failed (%d)\n", + dev_name, r); + + mutex_lock(&vdec_mutex); + inited_vcodec_num--; + mutex_unlock(&vdec_mutex); + + goto error; + } else if (!p->dev->dev.driver) { + pr_info("vdec: Decoder device %s driver probe failed.\n", + dev_name); + r = -ENODEV; + + goto error; + } + + if ((p->type == VDEC_TYPE_FRAME_BLOCK) && (p->run == NULL)) { + r = -ENODEV; + pr_err("vdec: Decoder device not handled (%s)\n", dev_name); + + mutex_lock(&vdec_mutex); + inited_vcodec_num--; + mutex_unlock(&vdec_mutex); + + goto error; + } + + if (p->use_vfm_path) { + vdec->vf_receiver_inst = -1; + vdec->vfm_map_id[0] = 0; + } else if (!vdec_dual(vdec) && !vdec->disable_vfm) { + /* create IONVIDEO instance and connect decoder's + * vf_provider interface to it + */ + if (!is_support_no_parser()) { + if (p->type != VDEC_TYPE_FRAME_BLOCK) { + r = -ENODEV; + pr_err("vdec: Incorrect decoder type\n"); + + mutex_lock(&vdec_mutex); + inited_vcodec_num--; + mutex_unlock(&vdec_mutex); + + goto error; + } + } + + if (strncmp("disable", vfm_path, strlen("disable"))) { + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s", vdec->vf_provider_name, vfm_path); + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } else if (p->frame_base_video_path == FRAME_BASE_PATH_IONVIDEO) { +#if 1 + r = ionvideo_assign_map(&vdec->vf_receiver_name, + &vdec->vf_receiver_inst); +#else + /* + * temporarily just use decoder instance ID as iondriver ID + * to solve OMX iondriver instance number check time sequence + * only the limitation is we can NOT mix different video + * decoders since same ID will be used for different decoder + * formats. + */ + vdec->vf_receiver_inst = p->dev->id; + r = ionvideo_assign_map(&vdec->vf_receiver_name, + &vdec->vf_receiver_inst); +#endif + if (r < 0) { + pr_err("IonVideo frame receiver allocation failed.\n"); + + mutex_lock(&vdec_mutex); + inited_vcodec_num--; + mutex_unlock(&vdec_mutex); + + goto error; + } + + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s", vdec->vf_provider_name, + vdec->vf_receiver_name); + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } else if (p->frame_base_video_path == + FRAME_BASE_PATH_AMLVIDEO_AMVIDEO) { + if (vdec_secure(vdec)) { + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s", vdec->vf_provider_name, + "amlvideo amvideo"); + } else { + if (debug_vdetect) + snprintf(vdec->vfm_map_chain, + VDEC_MAP_NAME_SIZE, + "%s vdetect.0 %s", + vdec->vf_provider_name, + "amlvideo ppmgr deinterlace amvideo"); + else + snprintf(vdec->vfm_map_chain, + VDEC_MAP_NAME_SIZE, "%s %s", + vdec->vf_provider_name, + "amlvideo ppmgr deinterlace amvideo"); + } + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } else if (p->frame_base_video_path == + FRAME_BASE_PATH_AMLVIDEO1_AMVIDEO2) { + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s", vdec->vf_provider_name, + "aml_video.1 videosync.0 videopip"); + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } else if (p->frame_base_video_path == FRAME_BASE_PATH_V4L_OSD) { + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s", vdec->vf_provider_name, + vdec->vf_receiver_name); + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } else if (p->frame_base_video_path == FRAME_BASE_PATH_TUNNEL_MODE) { + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s", vdec->vf_provider_name, + "amvideo"); + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } else if (p->frame_base_video_path == FRAME_BASE_PATH_PIP_TUNNEL_MODE) { + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s", vdec->vf_provider_name, + "videosync.0 videopip"); + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } else if (p->frame_base_video_path == FRAME_BASE_PATH_V4L_VIDEO) { + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s %s", vdec->vf_provider_name, + vdec->vf_receiver_name, "amvideo"); + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } else if (p->frame_base_video_path == + FRAME_BASE_PATH_DI_V4LVIDEO) { +#ifdef CONFIG_AMLOGIC_V4L_VIDEO3 + r = v4lvideo_assign_map(&vdec->vf_receiver_name, + &vdec->vf_receiver_inst); +#else + r = -1; +#endif + if (r < 0) { + pr_err("V4lVideo frame receiver allocation failed.\n"); + mutex_lock(&vdec_mutex); + inited_vcodec_num--; + mutex_unlock(&vdec_mutex); + goto error; + } + if (v4lvideo_add_ppmgr) + snprintf(postprocess_name, sizeof(postprocess_name), + "%s ", "ppmgr"); + if (debug_vdetect && (vdec->vf_receiver_inst == 0)) + snprintf(postprocess_name + strlen(postprocess_name), sizeof(postprocess_name), + "%s ", "vdetect.0"); + /* 8K remove di */ + if ((vdec->sys_info->width * vdec->sys_info->height > (4096 * 2304)) + || (!v4lvideo_add_di)) + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s%s", vdec->vf_provider_name, + postprocess_name, + vdec->vf_receiver_name); + else { + if ((vdec->vf_receiver_inst == 0) + && (max_di_count > 0)) + if (max_di_count == 1) + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s%s %s", vdec->vf_provider_name, + postprocess_name, + "deinterlace", + vdec->vf_receiver_name); + else + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s%s %s", vdec->vf_provider_name, + postprocess_name, + "dimulti.1", + vdec->vf_receiver_name); + else if ((vdec->vf_receiver_inst < + max_di_count) && + (vdec->vf_receiver_inst == 1)) + snprintf(vdec->vfm_map_chain, + VDEC_MAP_NAME_SIZE, + "%s %s %s", + vdec->vf_provider_name, + "deinterlace", + vdec->vf_receiver_name); + else if (vdec->vf_receiver_inst < + max_di_count) + snprintf(vdec->vfm_map_chain, + VDEC_MAP_NAME_SIZE, + "%s %s%d %s", + vdec->vf_provider_name, + "dimulti.", + vdec->vf_receiver_inst, + vdec->vf_receiver_name); + else + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s", vdec->vf_provider_name, + vdec->vf_receiver_name); + } + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } else if (p->frame_base_video_path == + FRAME_BASE_PATH_V4LVIDEO) { +#ifdef CONFIG_AMLOGIC_V4L_VIDEO3 + r = v4lvideo_assign_map(&vdec->vf_receiver_name, + &vdec->vf_receiver_inst); +#else + r = -1; +#endif + if (r < 0) { + pr_err("V4lVideo frame receiver allocation failed.\n"); + mutex_lock(&vdec_mutex); + inited_vcodec_num--; + mutex_unlock(&vdec_mutex); + goto error; + } + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s", vdec->vf_provider_name, + vdec->vf_receiver_name); + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } else if (p->frame_base_video_path == + FRAME_BASE_PATH_DTV_TUNNEL_MODE) { + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s deinterlace %s", vdec->vf_provider_name, + "amvideo"); + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } else if (p->frame_base_video_path == + FRAME_BASE_PATH_AMLVIDEO_FENCE) { + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s", vdec->vf_provider_name, + "amlvideo amvideo"); + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } else if (p->frame_base_video_path == + FRAME_BASE_PATH_V4LVIDEO_FENCE) { +#ifdef CONFIG_AMLOGIC_V4L_VIDEO3 + r = v4lvideo_assign_map(&vdec->vf_receiver_name, + &vdec->vf_receiver_inst); +#else + r = -1; +#endif + if (r < 0) { + pr_err("V4lVideo frame receiver allocation failed.\n"); + mutex_lock(&vdec_mutex); + inited_vcodec_num--; + mutex_unlock(&vdec_mutex); + goto error; + } + + snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE, + "%s %s", vdec->vf_provider_name, + vdec->vf_receiver_name); + snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE, + "vdec-map-%d", vdec->id); + } + + if (vfm_map_add(vdec->vfm_map_id, + vdec->vfm_map_chain) < 0) { + r = -ENOMEM; + pr_err("Decoder pipeline map creation failed %s.\n", + vdec->vfm_map_id); + vdec->vfm_map_id[0] = 0; + + mutex_lock(&vdec_mutex); + inited_vcodec_num--; + mutex_unlock(&vdec_mutex); + + goto error; + } + + pr_debug("vfm map %s created\n", vdec->vfm_map_id); + + /* + *assume IONVIDEO driver already have a few vframe_receiver + * registered. + * 1. Call iondriver function to allocate a IONVIDEO path and + * provide receiver's name and receiver op. + * 2. Get decoder driver's provider name from driver instance + * 3. vfm_map_add(name, "<decoder provider name> + * <iondriver receiver name>"), e.g. + * vfm_map_add("vdec_ion_map_0", "mpeg4_0 iondriver_1"); + * 4. vf_reg_provider and vf_reg_receiver + * Note: the decoder provider's op uses vdec as op_arg + * the iondriver receiver's op uses iondev device as + * op_arg + */ + + } + + if (!vdec_single(vdec) && !vdec->disable_vfm) { + vf_reg_provider(&p->vframe_provider); + + vf_notify_receiver(p->vf_provider_name, + VFRAME_EVENT_PROVIDER_START, + vdec); + + if (vdec_core->hint_fr_vdec == NULL) + vdec_core->hint_fr_vdec = vdec; + + if (vdec_core->hint_fr_vdec == vdec) { + if (p->sys_info->rate != 0) { + if (!vdec->is_reset) { + vf_notify_receiver(p->vf_provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long) + p->sys_info->rate)); + vdec->fr_hint_state = VDEC_HINTED; + } + } else { + vdec->fr_hint_state = VDEC_NEED_HINT; + } + } + } + + p->dolby_meta_with_el = 0; + pr_debug("vdec_init, vf_provider_name = %s, b %d\n", + p->vf_provider_name, is_cpu_tm2_revb()); + + mutex_lock(&vdec_mutex); + vdec_core->vdec_resouce_status |= BIT(p->frame_base_video_path); + mutex_unlock(&vdec_mutex); + + vdec_input_prepare_bufs(/*prepared buffer for fast playing.*/ + &vdec->input, + vdec->sys_info->width, + vdec->sys_info->height); + /* vdec is now ready to be active */ + vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED); + return 0; + +error: + return r; +} +EXPORT_SYMBOL(vdec_init); + +/* + *Remove the vdec after timeout happens both in vdec_disconnect + *and platform_device_unregister. Then after, we can release the vdec. + */ +static void vdec_connect_list_force_clear(struct vdec_core_s *core, struct vdec_s *v_ref) +{ + struct vdec_s *vdec, *tmp; + unsigned long flags; + + flags = vdec_core_lock(core); + + list_for_each_entry_safe(vdec, tmp, + &core->connected_vdec_list, list) { + if ((vdec->status == VDEC_STATUS_DISCONNECTED) && + (vdec == v_ref)) { + pr_err("%s, vdec = %p, active vdec = %p\n", + __func__, vdec, core->active_vdec); + if (v_ref->active_mask) + core->sched_mask &= ~v_ref->active_mask; + if (core->active_vdec == v_ref) + core->active_vdec = NULL; + if (core->active_hevc == v_ref) + core->active_hevc = NULL; + if (core->last_vdec == v_ref) + core->last_vdec = NULL; + list_del(&vdec->list); + } + } + + vdec_core_unlock(core, flags); +} + +st_userdata *get_vdec_userdata_ctx() +{ + return &userdata; +} +EXPORT_SYMBOL(get_vdec_userdata_ctx); + +static void vdec_userdata_ctx_release(struct vdec_s *vdec) +{ + int i; + st_userdata *userdata = get_vdec_userdata_ctx(); + + mutex_lock(&userdata->mutex); + + for (i = 0; i < MAX_USERDATA_CHANNEL_NUM; i++) { + if (userdata->used[i] == 1 && vdec->video_id != 0xffffffff) { + if (vdec_get_debug_flags() & 0x10000000) + pr_info("ctx_release i: %d userdata.id %d\n", + i, userdata->id[i]); + userdata->ready_flag[i] = 0; + userdata->id[i] = -1; + userdata->used[i] = 0; + userdata->set_id_flag = 0; + } + } + + mutex_unlock(&userdata->mutex); + + return; +} + +/* vdec_create/init/release/destroy are applied to both dual running decoders + */ +void vdec_release(struct vdec_s *vdec) +{ + u32 wcount = 0; + + //trace_vdec_release(vdec);/*DEBUG_TMP*/ +#ifdef VDEC_DEBUG_SUPPORT + if (step_mode) { + pr_info("VDEC_DEBUG: in step_mode, wait release\n"); + while (step_mode) + udelay(10); + pr_info("VDEC_DEBUG: step_mode is clear\n"); + } +#endif + /* When release, userspace systemctl need this duration 0 event */ + vdec_frame_rate_uevent(0); + vdec_disconnect(vdec); + + if (!vdec->disable_vfm && vdec->vframe_provider.name) { + if (!vdec_single(vdec)) { + if (vdec_core->hint_fr_vdec == vdec + && vdec->fr_hint_state == VDEC_HINTED) + vf_notify_receiver( + vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + vdec->fr_hint_state = VDEC_NO_NEED_HINT; + } + vf_unreg_provider(&vdec->vframe_provider); + } + + if (vdec_core->vfm_vdec == vdec) + vdec_core->vfm_vdec = NULL; + + if (vdec_core->hint_fr_vdec == vdec) + vdec_core->hint_fr_vdec = NULL; + + if (vdec->vf_receiver_inst >= 0) { + if (vdec->vfm_map_id[0]) { + vfm_map_remove(vdec->vfm_map_id); + vdec->vfm_map_id[0] = 0; + } + } + + while (vdec->irq_cnt > vdec->irq_thread_cnt) { + if ((wcount & 0x1f) == 0) + pr_debug("%s vdec[%lx]: %lld > %lld, loop %u times\n",__func__, (unsigned long)vdec, + vdec->irq_cnt,vdec->irq_thread_cnt, wcount); + /* + * Wait at most 2000 ms. + * In suspend scenario, the system may disable thread_fn, + * thus can NOT always waiting the thread_fn happen + */ + if (++wcount > 1000) + break; + usleep_range(1000, 2000); + } + +#ifdef FRAME_CHECK + vdec_frame_check_exit(vdec); +#endif + vdec_fps_clear(vdec->id); + if (atomic_read(&vdec_core->vdec_nr) == 1) + vdec_disable_DMC(vdec); + platform_device_unregister(vdec->dev); + /*Check if the vdec still in connected list, if yes, delete it*/ + vdec_connect_list_force_clear(vdec_core, vdec); + + if (vdec->vbuf.ops && !vdec->master) + vdec->vbuf.ops->release(&vdec->vbuf); + + vdec_userdata_ctx_release(vdec); + + pr_debug("vdec_release instance %p, total %d\n", vdec, + atomic_read(&vdec_core->vdec_nr)); + + mutex_lock(&vdec_mutex); + vdec_core->vdec_resouce_status &= ~BIT(vdec->frame_base_video_path); + mutex_unlock(&vdec_mutex); + vdec_destroy(vdec); + + mutex_lock(&vdec_mutex); + inited_vcodec_num--; + mutex_unlock(&vdec_mutex); + +} +EXPORT_SYMBOL(vdec_release); + +/* For dual running decoders, vdec_reset is only called with master vdec. + */ +int vdec_reset(struct vdec_s *vdec) +{ + //trace_vdec_reset(vdec); /*DEBUG_TMP*/ + + vdec_disconnect(vdec); + + if (!vdec->disable_vfm) { + if (vdec->vframe_provider.name) + vf_unreg_provider(&vdec->vframe_provider); + + if ((vdec->slave) && (vdec->slave->vframe_provider.name)) + vf_unreg_provider(&vdec->slave->vframe_provider); + } + + if (vdec->reset) { + vdec->reset(vdec); + if (vdec->slave) + vdec->slave->reset(vdec->slave); + } + vdec->mc_loaded = 0;/*clear for reload firmware*/ + vdec_input_release(&vdec->input); + + vdec_input_init(&vdec->input, vdec); + + vdec_input_prepare_bufs(&vdec->input, vdec->sys_info->width, + vdec->sys_info->height); + + if (!vdec->disable_vfm) { + vf_reg_provider(&vdec->vframe_provider); + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_START, vdec); + + if (vdec->slave) { + vf_reg_provider(&vdec->slave->vframe_provider); + vf_notify_receiver(vdec->slave->vf_provider_name, + VFRAME_EVENT_PROVIDER_START, vdec->slave); + vdec->slave->mc_loaded = 0;/*clear for reload firmware*/ + } + } + + vdec_connect(vdec); + + return 0; +} +EXPORT_SYMBOL(vdec_reset); + +int vdec_v4l2_reset(struct vdec_s *vdec, int flag) +{ + //trace_vdec_reset(vdec); /*DEBUG_TMP*/ + pr_debug("vdec_v4l2_reset %d\n", flag); + vdec_disconnect(vdec); + if (flag != 2) { + if (!vdec->disable_vfm) { + if (vdec->vframe_provider.name) + vf_unreg_provider(&vdec->vframe_provider); + + if ((vdec->slave) && (vdec->slave->vframe_provider.name)) + vf_unreg_provider(&vdec->slave->vframe_provider); + } + + if (vdec->reset) { + vdec->reset(vdec); + if (vdec->slave) + vdec->slave->reset(vdec->slave); + } + vdec->mc_loaded = 0;/*clear for reload firmware*/ + + vdec_input_release(&vdec->input); + + vdec_input_init(&vdec->input, vdec); + + vdec_input_prepare_bufs(&vdec->input, vdec->sys_info->width, + vdec->sys_info->height); + + if (!vdec->disable_vfm) { + vf_reg_provider(&vdec->vframe_provider); + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_START, vdec); + + if (vdec->slave) { + vf_reg_provider(&vdec->slave->vframe_provider); + vf_notify_receiver(vdec->slave->vf_provider_name, + VFRAME_EVENT_PROVIDER_START, vdec->slave); + vdec->slave->mc_loaded = 0;/*clear for reload firmware*/ + } + } + } else { + if (vdec->reset) { + vdec->reset(vdec); + if (vdec->slave) + vdec->slave->reset(vdec->slave); + } + } + + vdec_connect(vdec); + + return 0; +} +EXPORT_SYMBOL(vdec_v4l2_reset); + +void vdec_free_cmabuf(void) +{ + mutex_lock(&vdec_mutex); + + /*if (inited_vcodec_num > 0) { + mutex_unlock(&vdec_mutex); + return; + }*/ + mutex_unlock(&vdec_mutex); +} + +void vdec_core_request(struct vdec_s *vdec, unsigned long mask) +{ + vdec->core_mask |= mask; + + if (vdec->slave) + vdec->slave->core_mask |= mask; + if (vdec_core->parallel_dec == 1) { + if (mask & CORE_MASK_COMBINE) + vdec_core->vdec_combine_flag++; + } + +} +EXPORT_SYMBOL(vdec_core_request); + +int vdec_core_release(struct vdec_s *vdec, unsigned long mask) +{ + vdec->core_mask &= ~mask; + + if (vdec->slave) + vdec->slave->core_mask &= ~mask; + if (vdec_core->parallel_dec == 1) { + if (mask & CORE_MASK_COMBINE) + vdec_core->vdec_combine_flag--; + } + return 0; +} +EXPORT_SYMBOL(vdec_core_release); + +bool vdec_core_with_input(unsigned long mask) +{ + enum vdec_type_e type; + + for (type = VDEC_1; type < VDEC_MAX; type++) { + if ((mask & (1 << type)) && cores_with_input[type]) + return true; + } + + return false; +} + +void vdec_core_finish_run(struct vdec_s *vdec, unsigned long mask) +{ + unsigned long i; + unsigned long t = mask; + mutex_lock(&vdec_mutex); + while (t) { + i = __ffs(t); + clear_bit(i, &vdec->active_mask); + t &= ~(1 << i); + } + + if (vdec->active_mask == 0) { + vdec_set_status(vdec, VDEC_STATUS_CONNECTED); + wake_up_interruptible(&vdec->idle_wait); + } + + mutex_unlock(&vdec_mutex); +} +EXPORT_SYMBOL(vdec_core_finish_run); +/* + * find what core resources are available for vdec + */ +static unsigned long vdec_schedule_mask(struct vdec_s *vdec, + unsigned long active_mask) +{ + unsigned long mask = vdec->core_mask & + ~CORE_MASK_COMBINE; + + if (vdec->core_mask & CORE_MASK_COMBINE) { + /* combined cores must be granted together */ + if ((mask & ~active_mask) == mask) + return mask; + else + return 0; + } else + return mask & ~vdec->sched_mask & ~active_mask; +} + +/* + *Decoder callback + * Each decoder instance uses this callback to notify status change, e.g. when + * decoder finished using HW resource. + * a sample callback from decoder's driver is following: + * + * if (hw->vdec_cb) { + * vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED); + * hw->vdec_cb(vdec, hw->vdec_cb_arg); + * } + */ +static void vdec_callback(struct vdec_s *vdec, void *data) +{ + struct vdec_core_s *core = (struct vdec_core_s *)data; + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + vdec_profile(vdec, VDEC_PROFILE_EVENT_CB); +#endif + + up(&core->sem); +} + +static irqreturn_t vdec_isr(int irq, void *dev_id) +{ + struct vdec_isr_context_s *c = + (struct vdec_isr_context_s *)dev_id; + struct vdec_s *vdec = vdec_core->last_vdec; + irqreturn_t ret = IRQ_HANDLED; + + if (vdec_core->parallel_dec == 1) { + if (irq == vdec_core->isr_context[VDEC_IRQ_0].irq) + vdec = vdec_core->active_hevc; + else if (irq == vdec_core->isr_context[VDEC_IRQ_1].irq) + vdec = vdec_core->active_vdec; + else + vdec = NULL; + } + + if (c->dev_isr) { + ret = c->dev_isr(irq, c->dev_id); + goto isr_done; + } + + if ((c != &vdec_core->isr_context[VDEC_IRQ_0]) && + (c != &vdec_core->isr_context[VDEC_IRQ_1]) && + (c != &vdec_core->isr_context[VDEC_IRQ_HEVC_BACK])) { +#if 0 + pr_warn("vdec interrupt w/o a valid receiver\n"); +#endif + goto isr_done; + } + + if (!vdec) { +#if 0 + pr_warn("vdec interrupt w/o an active instance running. core = %p\n", + core); +#endif + goto isr_done; + } + + if (!vdec->irq_handler) { +#if 0 + pr_warn("vdec instance has no irq handle.\n"); +#endif + goto isr_done; + } + + ret = vdec->irq_handler(vdec, c->index); +isr_done: + if (vdec && ret == IRQ_WAKE_THREAD) + vdec->irq_cnt++; + + return ret; +} + +static irqreturn_t vdec_thread_isr(int irq, void *dev_id) +{ + struct vdec_isr_context_s *c = + (struct vdec_isr_context_s *)dev_id; + struct vdec_s *vdec = vdec_core->last_vdec; + irqreturn_t ret = IRQ_HANDLED; + + if (vdec_core->parallel_dec == 1) { + if (irq == vdec_core->isr_context[VDEC_IRQ_0].irq) + vdec = vdec_core->active_hevc; + else if (irq == vdec_core->isr_context[VDEC_IRQ_1].irq) + vdec = vdec_core->active_vdec; + else + vdec = NULL; + } + + if (c->dev_threaded_isr) { + ret = c->dev_threaded_isr(irq, c->dev_id); + goto thread_isr_done; + } + if (!vdec) + goto thread_isr_done; + + if (!vdec->threaded_irq_handler) + goto thread_isr_done; + ret = vdec->threaded_irq_handler(vdec, c->index); +thread_isr_done: + if (vdec) + vdec->irq_thread_cnt++; + return ret; +} + +int vdec_check_rec_num_enough(struct vdec_s *vdec) { + + if (vdec->vbuf.use_ptsserv) { + return (pts_get_rec_num(PTS_TYPE_VIDEO, + vdec->input.total_rd_count) >= 2); + } else { + u64 total_rd_count = vdec->input.total_rd_count; + + if (vdec->input.target == VDEC_INPUT_TARGET_VLD) { + //total_rd_count -= vdec->input.start; + /*just like use ptsserv, alway return true*/ + return 1; + } + if ((total_rd_count >= vdec->vbuf.last_offset[0]) && + (total_rd_count - vdec->vbuf.last_offset[0] < 0x80000000)) + return 0; + else if ((total_rd_count >= vdec->vbuf.last_offset[1]) && + (total_rd_count - vdec->vbuf.last_offset[1] < 0x80000000)) + return 0; + + return 1; + } +} + +unsigned long vdec_ready_to_run(struct vdec_s *vdec, unsigned long mask) +{ + unsigned long ready_mask; + struct vdec_input_s *input = &vdec->input; + + /* Wait the matching irq_thread finished */ + if (vdec->irq_cnt > vdec->irq_thread_cnt) + return false; + + if ((vdec->status != VDEC_STATUS_CONNECTED) && + (vdec->status != VDEC_STATUS_ACTIVE)) + return false; + + if (!vdec->run_ready) + return false; + + /* when crc32 error, block at error frame */ + if (vdec->vfc.err_crc_block) + return false; + + if ((vdec->slave || vdec->master) && + (vdec->sched == 0)) + return false; +#ifdef VDEC_DEBUG_SUPPORT + inc_profi_count(mask, vdec->check_count); +#endif + if (vdec_core_with_input(mask)) { + /* check frame based input underrun */ + if (input && !input->eos && input_frame_based(input) + && (!vdec_input_next_chunk(input))) { +#ifdef VDEC_DEBUG_SUPPORT + inc_profi_count(mask, vdec->input_underrun_count); +#endif + return false; + } + /* check streaming prepare level threshold if not EOS */ + if (input && input_stream_based(input) && !input->eos) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = input->size + wp - rp; + else + level = wp - rp; + + if ((level < input->prepare_level) && + !vdec_check_rec_num_enough(vdec)) { + vdec->need_more_data |= VDEC_NEED_MORE_DATA; +#ifdef VDEC_DEBUG_SUPPORT + inc_profi_count(mask, vdec->input_underrun_count); + if (step_mode & 0x200) { + if ((step_mode & 0xff) == vdec->id) { + step_mode |= 0xff; + return mask; + } + } +#endif + return false; + } else if (level > input->prepare_level) + vdec->need_more_data &= ~VDEC_NEED_MORE_DATA; + } + } + + if (step_mode) { + if ((step_mode & 0xff) != vdec->id) + return 0; + step_mode |= 0xff; /*VDEC_DEBUG_SUPPORT*/ + } + + /*step_mode &= ~0xff; not work for id of 0, removed*/ + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + vdec_profile(vdec, VDEC_PROFILE_EVENT_CHK_RUN_READY); +#endif + + ready_mask = vdec->run_ready(vdec, mask) & mask; +#ifdef VDEC_DEBUG_SUPPORT + if (ready_mask != mask) + inc_profi_count(ready_mask ^ mask, vdec->not_run_ready_count); +#endif +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + if (ready_mask) + vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN_READY); +#endif + + return ready_mask; +} + +/* bridge on/off vdec's interrupt processing to vdec core */ +static void vdec_route_interrupt(struct vdec_s *vdec, unsigned long mask, + bool enable) +{ + enum vdec_type_e type; + + for (type = VDEC_1; type < VDEC_MAX; type++) { + if (mask & (1 << type)) { + struct vdec_isr_context_s *c = + &vdec_core->isr_context[cores_int[type]]; + if (enable) + c->vdec = vdec; + else if (c->vdec == vdec) + c->vdec = NULL; + } + } +} + +/* + * Set up secure protection for each decoder instance running. + * Note: The operation from REE side only resets memory access + * to a default policy and even a non_secure type will still be + * changed to secure type automatically when secure source is + * detected inside TEE. + * Perform need_more_data checking and set flag is decoder + * is not consuming data. + */ +#define DMC_DEV_TYPE_NON_SECURE 0 +#define DMC_DEV_TYPE_SECURE 1 + +void vdec_prepare_run(struct vdec_s *vdec, unsigned long mask) +{ + struct vdec_input_s *input = &vdec->input; + int secure = (vdec_secure(vdec)) ? DMC_DEV_TYPE_SECURE : + DMC_DEV_TYPE_NON_SECURE; + + vdec_route_interrupt(vdec, mask, true); + + if (!vdec_core_with_input(mask)) + return; + + if (vdec_stream_based(vdec) && !vdec_secure(vdec)) + { + tee_config_device_state(DMC_DEV_ID_PARSER, 0); + } + if (input->target == VDEC_INPUT_TARGET_VLD) + tee_config_device_state(DMC_DEV_ID_VDEC, secure); + else if (input->target == VDEC_INPUT_TARGET_HEVC) + tee_config_device_state(DMC_DEV_ID_HEVC, secure); + + if (vdec_stream_based(vdec) && + ((vdec->need_more_data & VDEC_NEED_MORE_DATA_RUN) && + (vdec->need_more_data & VDEC_NEED_MORE_DATA_DIRTY) == 0)) { + vdec->need_more_data |= VDEC_NEED_MORE_DATA; + } + + vdec->need_more_data |= VDEC_NEED_MORE_DATA_RUN; + vdec->need_more_data &= ~VDEC_NEED_MORE_DATA_DIRTY; +} + +/* struct vdec_core_shread manages all decoder instance in active list. When + * a vdec is added into the active list, it can onlt be in two status: + * VDEC_STATUS_CONNECTED(the decoder does not own HW resource and ready to run) + * VDEC_STATUS_ACTIVE(the decoder owns HW resources and is running). + * Removing a decoder from active list is only performed within core thread. + * Adding a decoder into active list is performed from user thread. + */ +static int vdec_core_thread(void *data) +{ + struct vdec_core_s *core = (struct vdec_core_s *)data; + struct sched_param param = {.sched_priority = MAX_RT_PRIO/2}; + unsigned long flags; + int i; + + sched_setscheduler(current, SCHED_FIFO, ¶m); + + allow_signal(SIGTERM); + + while (down_interruptible(&core->sem) == 0) { + struct vdec_s *vdec, *tmp, *worker; + unsigned long sched_mask = 0; + LIST_HEAD(disconnecting_list); + + if (kthread_should_stop()) + break; + mutex_lock(&vdec_mutex); + + if (core->parallel_dec == 1) { + for (i = VDEC_1; i < VDEC_MAX; i++) { + core->power_ref_mask = + core->power_ref_count[i] > 0 ? + (core->power_ref_mask | (1 << i)) : + (core->power_ref_mask & ~(1 << i)); + } + } + /* clean up previous active vdec's input */ + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + unsigned long mask = vdec->sched_mask & + (vdec->active_mask ^ vdec->sched_mask); + + vdec_route_interrupt(vdec, mask, false); + +#ifdef VDEC_DEBUG_SUPPORT + update_profi_clk_stop(vdec, mask, get_current_clk()); +#endif + /* + * If decoder released some core resources (mask), then + * check if these core resources are associated + * with any input side and do input clean up accordingly + */ + if (vdec_core_with_input(mask)) { + struct vdec_input_s *input = &vdec->input; + while (!list_empty( + &input->vframe_chunk_list)) { + struct vframe_chunk_s *chunk = + vdec_input_next_chunk(input); + if (chunk && (chunk->flag & + VFRAME_CHUNK_FLAG_CONSUMED)) + vdec_input_release_chunk(input, + chunk); + else + break; + } + + vdec_save_input_context(vdec); + } + + vdec->sched_mask &= ~mask; + core->sched_mask &= ~mask; + } + vdec_update_buff_status(); + /* + *todo: + * this is the case when the decoder is in active mode and + * the system side wants to stop it. Currently we rely on + * the decoder instance to go back to VDEC_STATUS_CONNECTED + * from VDEC_STATUS_ACTIVE by its own. However, if for some + * reason the decoder can not exist by itself (dead decoding + * or whatever), then we may have to add another vdec API + * to kill the vdec and release its HW resource and make it + * become inactive again. + * if ((core->active_vdec) && + * (core->active_vdec->status == VDEC_STATUS_DISCONNECTED)) { + * } + */ + + /* check disconnected decoders */ + flags = vdec_core_lock(vdec_core); + list_for_each_entry_safe(vdec, tmp, + &core->connected_vdec_list, list) { + if ((vdec->status == VDEC_STATUS_CONNECTED) && + (vdec->next_status == VDEC_STATUS_DISCONNECTED)) { + if (core->parallel_dec == 1) { + if (vdec_core->active_hevc == vdec) + vdec_core->active_hevc = NULL; + if (vdec_core->active_vdec == vdec) + vdec_core->active_vdec = NULL; + } + if (core->last_vdec == vdec) + core->last_vdec = NULL; + list_move(&vdec->list, &disconnecting_list); + } + } + vdec_core_unlock(vdec_core, flags); + mutex_unlock(&vdec_mutex); + /* elect next vdec to be scheduled */ + vdec = core->last_vdec; + if (vdec) { + vdec = list_entry(vdec->list.next, struct vdec_s, list); + list_for_each_entry_from(vdec, + &core->connected_vdec_list, list) { + sched_mask = vdec_schedule_mask(vdec, + core->sched_mask); + if (!sched_mask) + continue; + sched_mask = vdec_ready_to_run(vdec, + sched_mask); + if (sched_mask) + break; + } + + if (&vdec->list == &core->connected_vdec_list) + vdec = NULL; + } + + if (!vdec) { + /* search from beginning */ + list_for_each_entry(vdec, + &core->connected_vdec_list, list) { + sched_mask = vdec_schedule_mask(vdec, + core->sched_mask); + if (vdec == core->last_vdec) { + if (!sched_mask) { + vdec = NULL; + break; + } + + sched_mask = vdec_ready_to_run(vdec, + sched_mask); + + if (!sched_mask) { + vdec = NULL; + break; + } + break; + } + + if (!sched_mask) + continue; + + sched_mask = vdec_ready_to_run(vdec, + sched_mask); + if (sched_mask) + break; + } + + if (&vdec->list == &core->connected_vdec_list) + vdec = NULL; + } + + worker = vdec; + + if (vdec) { + unsigned long mask = sched_mask; + unsigned long i; + + /* setting active_mask should be atomic. + * it can be modified by decoder driver callbacks. + */ + while (sched_mask) { + i = __ffs(sched_mask); + set_bit(i, &vdec->active_mask); + sched_mask &= ~(1 << i); + } + + /* vdec's sched_mask is only set from core thread */ + vdec->sched_mask |= mask; + if (core->last_vdec) { + if ((core->last_vdec != vdec) && + (core->last_vdec->mc_type != vdec->mc_type)) + vdec->mc_loaded = 0;/*clear for reload firmware*/ + } else + vdec->mc_loaded = 0; + core->last_vdec = vdec; + if (debug & 2) + vdec->mc_loaded = 0;/*alway reload firmware*/ + vdec_set_status(vdec, VDEC_STATUS_ACTIVE); + + core->sched_mask |= mask; + if (core->parallel_dec == 1) + vdec_save_active_hw(vdec); +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN); +#endif + vdec_prepare_run(vdec, mask); +#ifdef VDEC_DEBUG_SUPPORT + inc_profi_count(mask, vdec->run_count); + update_profi_clk_run(vdec, mask, get_current_clk()); +#endif + vdec->run(vdec, mask, vdec_callback, core); + + + /* we have some cores scheduled, keep working until + * all vdecs are checked with no cores to schedule + */ + if (core->parallel_dec == 1) { + if (vdec_core->vdec_combine_flag == 0) + up(&core->sem); + } else + up(&core->sem); + } + + /* remove disconnected decoder from active list */ + list_for_each_entry_safe(vdec, tmp, &disconnecting_list, list) { + list_del(&vdec->list); + vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED); + /*core->last_vdec = NULL;*/ + complete(&vdec->inactive_done); + } + + /* if there is no new work scheduled and nothing + * is running, sleep 20ms + */ + if (core->parallel_dec == 1) { + if (vdec_core->vdec_combine_flag == 0) { + if ((!worker) && + ((core->sched_mask != core->power_ref_mask)) && + (atomic_read(&vdec_core->vdec_nr) > 0) && + ((core->buff_flag | core->stream_buff_flag) & + (core->sched_mask ^ core->power_ref_mask))) { + usleep_range(1000, 2000); + up(&core->sem); + } + } else { + if ((!worker) && (!core->sched_mask) && + (atomic_read(&vdec_core->vdec_nr) > 0) && + (core->buff_flag | core->stream_buff_flag)) { + usleep_range(1000, 2000); + up(&core->sem); + } + } + } else if ((!worker) && (!core->sched_mask) && (atomic_read(&vdec_core->vdec_nr) > 0)) { + usleep_range(1000, 2000); + up(&core->sem); + } + + } + + return 0; +} + +#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ +void vdec_power_reset(void) +{ + /* enable vdec1 isolation */ + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc0); + /* power off vdec1 memories */ + WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL); + /* vdec1 power off */ + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc); + + if (has_vdec2()) { + /* enable vdec2 isolation */ + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x300); + /* power off vdec2 memories */ + WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL); + /* vdec2 power off */ + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0x30); + } + + if (has_hdec()) { + /* enable hcodec isolation */ + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x30); + /* power off hcodec memories */ + WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL); + /* hcodec power off */ + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 3); + } + + if (has_hevc_vdec()) { + /* enable hevc isolation */ + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc00); + /* power off hevc memories */ + WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL); + /* hevc power off */ + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc0); + } +} +EXPORT_SYMBOL(vdec_power_reset); + + +void vdec_poweron(enum vdec_type_e core) +{ + if (core >= VDEC_MAX) + return; + + mutex_lock(&vdec_mutex); + + vdec_core->power_ref_count[core]++; + if (vdec_core->power_ref_count[core] > 1) { + mutex_unlock(&vdec_mutex); + return; + } + + if (vdec_on(core)) { + mutex_unlock(&vdec_mutex); + return; + } + + vdec_core->pm->power_on(vdec_core->cma_dev, core); + + mutex_unlock(&vdec_mutex); +} +EXPORT_SYMBOL(vdec_poweron); + +void vdec_poweroff(enum vdec_type_e core) +{ + if (core >= VDEC_MAX) + return; + + mutex_lock(&vdec_mutex); + + vdec_core->power_ref_count[core]--; + if (vdec_core->power_ref_count[core] > 0) { + mutex_unlock(&vdec_mutex); + return; + } + + vdec_core->pm->power_off(vdec_core->cma_dev, core); + + mutex_unlock(&vdec_mutex); +} +EXPORT_SYMBOL(vdec_poweroff); + +bool vdec_on(enum vdec_type_e core) +{ + return vdec_core->pm->power_state(vdec_core->cma_dev, core); +} +EXPORT_SYMBOL(vdec_on); + +#elif 0 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */ +void vdec_poweron(enum vdec_type_e core) +{ + ulong flags; + + spin_lock_irqsave(&lock, flags); + + if (core == VDEC_1) { + /* vdec1 soft reset */ + WRITE_VREG(DOS_SW_RESET0, 0xfffffffc); + WRITE_VREG(DOS_SW_RESET0, 0); + /* enable vdec1 clock */ + vdec_clock_enable(); + /* reset DOS top registers */ + WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0); + } else if (core == VDEC_2) { + /* vdec2 soft reset */ + WRITE_VREG(DOS_SW_RESET2, 0xffffffff); + WRITE_VREG(DOS_SW_RESET2, 0); + /* enable vdec2 clock */ + vdec2_clock_enable(); + /* reset DOS top registers */ + WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0); + } else if (core == VDEC_HCODEC) { + /* hcodec soft reset */ + WRITE_VREG(DOS_SW_RESET1, 0xffffffff); + WRITE_VREG(DOS_SW_RESET1, 0); + /* enable hcodec clock */ + hcodec_clock_enable(); + } + + spin_unlock_irqrestore(&lock, flags); +} + +void vdec_poweroff(enum vdec_type_e core) +{ + ulong flags; + + spin_lock_irqsave(&lock, flags); + + if (core == VDEC_1) { + /* disable vdec1 clock */ + vdec_clock_off(); + } else if (core == VDEC_2) { + /* disable vdec2 clock */ + vdec2_clock_off(); + } else if (core == VDEC_HCODEC) { + /* disable hcodec clock */ + hcodec_clock_off(); + } + + spin_unlock_irqrestore(&lock, flags); +} + +bool vdec_on(enum vdec_type_e core) +{ + bool ret = false; + + if (core == VDEC_1) { + if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100) + ret = true; + } else if (core == VDEC_2) { + if (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100) + ret = true; + } else if (core == VDEC_HCODEC) { + if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000) + ret = true; + } + + return ret; +} +#endif + +int vdec_source_changed(int format, int width, int height, int fps) +{ + /* todo: add level routines for clock adjustment per chips */ + int ret = -1; + static int on_setting; + + if (on_setting > 0) + return ret;/*on changing clk,ignore this change*/ + + if (vdec_source_get(VDEC_1) == width * height * fps) + return ret; + + + on_setting = 1; + ret = vdec_source_changed_for_clk_set(format, width, height, fps); + pr_debug("vdec1 video changed to %d x %d %d fps clk->%dMHZ\n", + width, height, fps, vdec_clk_get(VDEC_1)); + on_setting = 0; + return ret; + +} +EXPORT_SYMBOL(vdec_source_changed); + +void vdec_reset_core(struct vdec_s *vdec) +{ + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3)) { + /* t7 no dmc req for vdec only */ + vdec_dbus_ctrl(0); + } else { + dec_dmc_port_ctrl(0, VDEC_INPUT_TARGET_VLD); + } + /* + * 2: assist + * 3: vld_reset + * 4: vld_part_reset + * 5: vfifo reset + * 6: iqidct + * 7: mc + * 8: dblk + * 9: pic_dc + * 10: psc + * 11: mcpu + * 12: ccpu + * 13: ddr + * 14: afifo + */ + WRITE_VREG(DOS_SW_RESET0, (1<<3)|(1<<4)|(1<<5)|(1<<7)|(1<<8)|(1<<9)); + WRITE_VREG(DOS_SW_RESET0, 0); + + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3)) + vdec_dbus_ctrl(1); + else + dec_dmc_port_ctrl(1, VDEC_INPUT_TARGET_VLD); +} +EXPORT_SYMBOL(vdec_reset_core); + +void hevc_mmu_dma_check(struct vdec_s *vdec) +{ + ulong timeout; + u32 data; + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) + return; + timeout = jiffies + HZ/100; + while (1) { + data = READ_VREG(HEVC_CM_CORE_STATUS); + if ((data & 0x1) == 0) + break; + if (time_after(jiffies, timeout)) { + if (debug & 0x10) + pr_info(" %s sao mmu dma idle\n", __func__); + break; + } + } + /*disable sao mmu dma */ + CLEAR_VREG_MASK(HEVC_SAO_MMU_DMA_CTRL, 1 << 0); + timeout = jiffies + HZ/100; + while (1) { + data = READ_VREG(HEVC_SAO_MMU_DMA_STATUS); + if ((data & 0x1)) + break; + if (time_after(jiffies, timeout)) { + if (debug & 0x10) + pr_err("%s sao mmu dma timeout, num_buf_used = 0x%x\n", + __func__, (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16)); + break; + } + } +} +EXPORT_SYMBOL(hevc_mmu_dma_check); + +void hevc_reset_core(struct vdec_s *vdec) +{ + int cpu_type = get_cpu_major_id(); + + if ((cpu_type == AM_MESON_CPU_MAJOR_ID_T7) || + (cpu_type == AM_MESON_CPU_MAJOR_ID_T3)) { + /* t7 no dmc req for hevc only */ + hevc_arb_ctrl(0); + } else { + WRITE_VREG(HEVC_STREAM_CONTROL, 0); + + dec_dmc_port_ctrl(0, VDEC_INPUT_TARGET_HEVC); + } + + if (vdec == NULL || input_frame_based(vdec)) + WRITE_VREG(HEVC_STREAM_CONTROL, 0); + + WRITE_VREG(HEVC_SAO_MMU_RESET_CTRL, + READ_VREG(HEVC_SAO_MMU_RESET_CTRL) | 1); + + /* + * 2: assist + * 3: parser + * 4: parser_state + * 8: dblk + * 10:wrrsp lmem + * 11:mcpu + * 12:ccpu + * 13:ddr + * 14:iqit + * 15:ipp + * 17:qdct + * 18:mpred + * 19:sao + * 24:hevc_afifo + * 26:rst_mmu_n + */ + WRITE_VREG(DOS_SW_RESET3, + (1<<3)|(1<<4)|(1<<8)|(1<<10)|(1<<11)| + (1<<12)|(1<<13)|(1<<14)|(1<<15)| + (1<<17)|(1<<18)|(1<<19)|(1<<24)|(1<<26)); + + WRITE_VREG(DOS_SW_RESET3, 0); + while (READ_VREG(HEVC_WRRSP_LMEM) & 0xfff) + ; + WRITE_VREG(HEVC_SAO_MMU_RESET_CTRL, + READ_VREG(HEVC_SAO_MMU_RESET_CTRL) & (~1)); + + if (cpu_type == AM_MESON_CPU_MAJOR_ID_TL1 && + is_meson_rev_b()) + cpu_type = AM_MESON_CPU_MAJOR_ID_G12B; + switch (cpu_type) { + case AM_MESON_CPU_MAJOR_ID_G12B: + WRITE_RESET_REG((RESET7_REGISTER_LEVEL), + READ_RESET_REG(RESET7_REGISTER_LEVEL) & (~((1<<13)|(1<<14)))); + WRITE_RESET_REG((RESET7_REGISTER_LEVEL), + READ_RESET_REG((RESET7_REGISTER_LEVEL)) | ((1<<13)|(1<<14))); + break; + case AM_MESON_CPU_MAJOR_ID_G12A: + case AM_MESON_CPU_MAJOR_ID_SM1: + case AM_MESON_CPU_MAJOR_ID_TL1: + case AM_MESON_CPU_MAJOR_ID_TM2: + case AM_MESON_CPU_MAJOR_ID_T5: + case AM_MESON_CPU_MAJOR_ID_T5D: + case AM_MESON_CPU_MAJOR_ID_T5W: + WRITE_RESET_REG((RESET7_REGISTER_LEVEL), + READ_RESET_REG(RESET7_REGISTER_LEVEL) & (~((1<<13)))); + WRITE_RESET_REG((RESET7_REGISTER_LEVEL), + READ_RESET_REG((RESET7_REGISTER_LEVEL)) | ((1<<13))); + break; + case AM_MESON_CPU_MAJOR_ID_SC2: + case AM_MESON_CPU_MAJOR_ID_S4: + case AM_MESON_CPU_MAJOR_ID_S4D: + WRITE_RESET_REG(P_RESETCTRL_RESET5_LEVEL, + READ_RESET_REG(P_RESETCTRL_RESET5_LEVEL) & (~((1<<1)|(1<<12)|(1<<13)))); + WRITE_RESET_REG(P_RESETCTRL_RESET5_LEVEL, + READ_RESET_REG(P_RESETCTRL_RESET5_LEVEL) | ((1<<1)|(1<<12)|(1<<13))); + break; + default: + break; + } + + if ((cpu_type == AM_MESON_CPU_MAJOR_ID_T7) || + (cpu_type == AM_MESON_CPU_MAJOR_ID_T3)) + hevc_arb_ctrl(1); + else + dec_dmc_port_ctrl(1, VDEC_INPUT_TARGET_HEVC); +} +EXPORT_SYMBOL(hevc_reset_core); + +int vdec2_source_changed(int format, int width, int height, int fps) +{ + int ret = -1; + static int on_setting; + + if (has_vdec2()) { + /* todo: add level routines for clock adjustment per chips */ + if (on_setting != 0) + return ret;/*on changing clk,ignore this change*/ + + if (vdec_source_get(VDEC_2) == width * height * fps) + return ret; + + on_setting = 1; + ret = vdec_source_changed_for_clk_set(format, + width, height, fps); + pr_debug("vdec2 video changed to %d x %d %d fps clk->%dMHZ\n", + width, height, fps, vdec_clk_get(VDEC_2)); + on_setting = 0; + return ret; + } + return 0; +} +EXPORT_SYMBOL(vdec2_source_changed); + +int hevc_source_changed(int format, int width, int height, int fps) +{ + /* todo: add level routines for clock adjustment per chips */ + int ret = -1; + static int on_setting; + + if (on_setting != 0) + return ret;/*on changing clk,ignore this change*/ + + if (vdec_source_get(VDEC_HEVC) == width * height * fps) + return ret; + + on_setting = 1; + ret = vdec_source_changed_for_clk_set(format, width, height, fps); + pr_debug("hevc video changed to %d x %d %d fps clk->%dMHZ\n", + width, height, fps, vdec_clk_get(VDEC_HEVC)); + on_setting = 0; + + return ret; +} +EXPORT_SYMBOL(hevc_source_changed); + +static struct am_reg am_risc[] = { + {"MSP", 0x300}, + {"MPSR", 0x301}, + {"MCPU_INT_BASE", 0x302}, + {"MCPU_INTR_GRP", 0x303}, + {"MCPU_INTR_MSK", 0x304}, + {"MCPU_INTR_REQ", 0x305}, + {"MPC-P", 0x306}, + {"MPC-D", 0x307}, + {"MPC_E", 0x308}, + {"MPC_W", 0x309}, + {"CSP", 0x320}, + {"CPSR", 0x321}, + {"CCPU_INT_BASE", 0x322}, + {"CCPU_INTR_GRP", 0x323}, + {"CCPU_INTR_MSK", 0x324}, + {"CCPU_INTR_REQ", 0x325}, + {"CPC-P", 0x326}, + {"CPC-D", 0x327}, + {"CPC_E", 0x328}, + {"CPC_W", 0x329}, + {"AV_SCRATCH_0", 0x09c0}, + {"AV_SCRATCH_1", 0x09c1}, + {"AV_SCRATCH_2", 0x09c2}, + {"AV_SCRATCH_3", 0x09c3}, + {"AV_SCRATCH_4", 0x09c4}, + {"AV_SCRATCH_5", 0x09c5}, + {"AV_SCRATCH_6", 0x09c6}, + {"AV_SCRATCH_7", 0x09c7}, + {"AV_SCRATCH_8", 0x09c8}, + {"AV_SCRATCH_9", 0x09c9}, + {"AV_SCRATCH_A", 0x09ca}, + {"AV_SCRATCH_B", 0x09cb}, + {"AV_SCRATCH_C", 0x09cc}, + {"AV_SCRATCH_D", 0x09cd}, + {"AV_SCRATCH_E", 0x09ce}, + {"AV_SCRATCH_F", 0x09cf}, + {"AV_SCRATCH_G", 0x09d0}, + {"AV_SCRATCH_H", 0x09d1}, + {"AV_SCRATCH_I", 0x09d2}, + {"AV_SCRATCH_J", 0x09d3}, + {"AV_SCRATCH_K", 0x09d4}, + {"AV_SCRATCH_L", 0x09d5}, + {"AV_SCRATCH_M", 0x09d6}, + {"AV_SCRATCH_N", 0x09d7}, +}; + +static ssize_t amrisc_regs_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + char *pbuf = buf; + struct am_reg *regs = am_risc; + int rsize = sizeof(am_risc) / sizeof(struct am_reg); + int i; + unsigned int val; + ssize_t ret; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) { + mutex_lock(&vdec_mutex); + if (!vdec_on(VDEC_1)) { + mutex_unlock(&vdec_mutex); + pbuf += sprintf(pbuf, "amrisc is power off\n"); + ret = pbuf - buf; + return ret; + } + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + /*TODO:M6 define */ + /* + * switch_mod_gate_by_type(MOD_VDEC, 1); + */ + amports_switch_gate("vdec", 1); + } + pbuf += sprintf(pbuf, "amrisc registers show:\n"); + for (i = 0; i < rsize; i++) { + val = READ_VREG(regs[i].offset); + pbuf += sprintf(pbuf, "%s(%#x)\t:%#x(%d)\n", + regs[i].name, regs[i].offset, val, val); + } + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) + mutex_unlock(&vdec_mutex); + else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + /*TODO:M6 define */ + /* + * switch_mod_gate_by_type(MOD_VDEC, 0); + */ + amports_switch_gate("vdec", 0); + } + ret = pbuf - buf; + return ret; +} + +static ssize_t dump_trace_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int i; + char *pbuf = buf; + ssize_t ret; + u16 *trace_buf = kmalloc(debug_trace_num * 2, GFP_KERNEL); + + if (!trace_buf) { + pbuf += sprintf(pbuf, "No Memory bug\n"); + ret = pbuf - buf; + return ret; + } + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) { + mutex_lock(&vdec_mutex); + if (!vdec_on(VDEC_1)) { + mutex_unlock(&vdec_mutex); + kfree(trace_buf); + pbuf += sprintf(pbuf, "amrisc is power off\n"); + ret = pbuf - buf; + return ret; + } + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + /*TODO:M6 define */ + /* + * switch_mod_gate_by_type(MOD_VDEC, 1); + */ + amports_switch_gate("vdec", 1); + } + pr_info("dump trace steps:%d start\n", debug_trace_num); + i = 0; + while (i <= debug_trace_num - 16) { + trace_buf[i] = READ_VREG(MPC_E); + trace_buf[i + 1] = READ_VREG(MPC_E); + trace_buf[i + 2] = READ_VREG(MPC_E); + trace_buf[i + 3] = READ_VREG(MPC_E); + trace_buf[i + 4] = READ_VREG(MPC_E); + trace_buf[i + 5] = READ_VREG(MPC_E); + trace_buf[i + 6] = READ_VREG(MPC_E); + trace_buf[i + 7] = READ_VREG(MPC_E); + trace_buf[i + 8] = READ_VREG(MPC_E); + trace_buf[i + 9] = READ_VREG(MPC_E); + trace_buf[i + 10] = READ_VREG(MPC_E); + trace_buf[i + 11] = READ_VREG(MPC_E); + trace_buf[i + 12] = READ_VREG(MPC_E); + trace_buf[i + 13] = READ_VREG(MPC_E); + trace_buf[i + 14] = READ_VREG(MPC_E); + trace_buf[i + 15] = READ_VREG(MPC_E); + i += 16; + }; + pr_info("dump trace steps:%d finished\n", debug_trace_num); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) + mutex_unlock(&vdec_mutex); + else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + /*TODO:M6 define */ + /* + * switch_mod_gate_by_type(MOD_VDEC, 0); + */ + amports_switch_gate("vdec", 0); + } + for (i = 0; i < debug_trace_num; i++) { + if (i % 4 == 0) { + if (i % 16 == 0) + pbuf += sprintf(pbuf, "\n"); + else if (i % 8 == 0) + pbuf += sprintf(pbuf, " "); + else /* 4 */ + pbuf += sprintf(pbuf, " "); + } + pbuf += sprintf(pbuf, "%04x:", trace_buf[i]); + } + while (i < debug_trace_num) + ; + kfree(trace_buf); + pbuf += sprintf(pbuf, "\n"); + ret = pbuf - buf; + return ret; +} + +static ssize_t clock_level_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + char *pbuf = buf; + size_t ret; + + pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_1)); + + if (has_vdec2()) + pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_2)); + + if (has_hevc_vdec()) + pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_HEVC)); + + ret = pbuf - buf; + return ret; +} + +static ssize_t enable_mvdec_info_show(struct class *cla, + struct class_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", enable_mvdec_info); +} + +static ssize_t enable_mvdec_info_store(struct class *cla, + struct class_attribute *attr, + const char *buf, size_t count) +{ + int r; + int val; + + r = kstrtoint(buf, 0, &val); + if (r < 0) + return -EINVAL; + enable_mvdec_info = val; + + return count; +} +static ssize_t poweron_clock_level_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int val; + ssize_t ret; + + /*ret = sscanf(buf, "%d", &val);*/ + ret = kstrtoint(buf, 0, &val); + + if (ret != 0) + return -EINVAL; + poweron_clock_level = val; + return size; +} + +static ssize_t poweron_clock_level_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", poweron_clock_level); +} + +/* + *if keep_vdec_mem == 1 + *always don't release + *vdec 64 memory for fast play. + */ +static ssize_t keep_vdec_mem_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int val; + ssize_t ret; + + /*ret = sscanf(buf, "%d", &val);*/ + ret = kstrtoint(buf, 0, &val); + if (ret != 0) + return -EINVAL; + keep_vdec_mem = val; + return size; +} + +static ssize_t keep_vdec_mem_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", keep_vdec_mem); +} + +#ifdef VDEC_DEBUG_SUPPORT +static ssize_t debug_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + struct vdec_s *vdec; + struct vdec_core_s *core = vdec_core; + unsigned long flags; + + unsigned id; + unsigned val; + ssize_t ret; + char cbuf[32]; + + cbuf[0] = 0; + ret = sscanf(buf, "%s %x %x", cbuf, &id, &val); + /*pr_info( + "%s(%s)=>ret %ld: %s, %x, %x\n", + __func__, buf, ret, cbuf, id, val);*/ + if (strcmp(cbuf, "schedule") == 0) { + pr_info("VDEC_DEBUG: force schedule\n"); + up(&core->sem); + } else if (strcmp(cbuf, "power_off") == 0) { + pr_info("VDEC_DEBUG: power off core %d\n", id); + vdec_poweroff(id); + } else if (strcmp(cbuf, "power_on") == 0) { + pr_info("VDEC_DEBUG: power_on core %d\n", id); + vdec_poweron(id); + } else if (strcmp(cbuf, "wr") == 0) { + pr_info("VDEC_DEBUG: WRITE_VREG(0x%x, 0x%x)\n", + id, val); + WRITE_VREG(id, val); + } else if (strcmp(cbuf, "rd") == 0) { + pr_info("VDEC_DEBUG: READ_VREG(0x%x) = 0x%x\n", + id, READ_VREG(id)); + } else if (strcmp(cbuf, "read_hevc_clk_reg") == 0) { + pr_info( + "VDEC_DEBUG: HHI_VDEC4_CLK_CNTL = 0x%x, HHI_VDEC2_CLK_CNTL = 0x%x\n", + READ_HHI_REG(HHI_VDEC4_CLK_CNTL), + READ_HHI_REG(HHI_VDEC2_CLK_CNTL)); + } else if (strcmp(cbuf, "no_interlace") == 0) { + prog_only ^= 1; + pr_info("set prog only %d, %s output\n", + prog_only, prog_only?"force one filed only":"interlace"); + } + + flags = vdec_core_lock(vdec_core); + + list_for_each_entry(vdec, + &core->connected_vdec_list, list) { + pr_info("vdec: status %d, id %d\n", vdec->status, vdec->id); + if (((vdec->status == VDEC_STATUS_CONNECTED + || vdec->status == VDEC_STATUS_ACTIVE)) && + (vdec->id == id)) { + /*to add*/ + break; + } + } + vdec_core_unlock(vdec_core, flags); + return size; +} + +static ssize_t debug_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + char *pbuf = buf; + struct vdec_s *vdec; + struct vdec_core_s *core = vdec_core; + unsigned long flags = vdec_core_lock(vdec_core); + u64 tmp; + + pbuf += sprintf(pbuf, + "============== help:\n"); + pbuf += sprintf(pbuf, + "'echo xxx > debug' usuage:\n"); + pbuf += sprintf(pbuf, + "schedule - trigger schedule thread to run\n"); + pbuf += sprintf(pbuf, + "power_off core_num - call vdec_poweroff(core_num)\n"); + pbuf += sprintf(pbuf, + "power_on core_num - call vdec_poweron(core_num)\n"); + pbuf += sprintf(pbuf, + "wr adr val - call WRITE_VREG(adr, val)\n"); + pbuf += sprintf(pbuf, + "rd adr - call READ_VREG(adr)\n"); + pbuf += sprintf(pbuf, + "read_hevc_clk_reg - read HHI register for hevc clk\n"); + pbuf += sprintf(pbuf, + "no_interlace - force v4l no_interlace output. %d\n", prog_only); + pbuf += sprintf(pbuf, + "===================\n"); + + pbuf += sprintf(pbuf, + "name(core)\tschedule_count\trun_count\tinput_underrun\tdecbuf_not_ready\trun_time\n"); + list_for_each_entry(vdec, + &core->connected_vdec_list, list) { + enum vdec_type_e type; + if ((vdec->status == VDEC_STATUS_CONNECTED + || vdec->status == VDEC_STATUS_ACTIVE)) { + for (type = VDEC_1; type < VDEC_MAX; type++) { + if (vdec->core_mask & (1 << type)) { + pbuf += sprintf(pbuf, "%s(%d):", + vdec->vf_provider_name, type); + pbuf += sprintf(pbuf, "\t%d", + vdec->check_count[type]); + pbuf += sprintf(pbuf, "\t%d", + vdec->run_count[type]); + pbuf += sprintf(pbuf, "\t%d", + vdec->input_underrun_count[type]); + pbuf += sprintf(pbuf, "\t%d", + vdec->not_run_ready_count[type]); + tmp = vdec->run_clk[type] * 100; + do_div(tmp, vdec->total_clk[type]); + pbuf += sprintf(pbuf, + "\t%d%%\n", + vdec->total_clk[type] == 0 ? 0 : + (u32)tmp); + } + } + } + } + + vdec_core_unlock(vdec_core, flags); + return pbuf - buf; + +} +#endif +int show_stream_buffer_status(char *buf, + int (*callback) (struct stream_buf_s *, char *)) +{ + char *pbuf = buf; + struct vdec_s *vdec; + struct vdec_core_s *core = vdec_core; + u64 flags = vdec_core_lock(vdec_core); + + list_for_each_entry(vdec, + &core->connected_vdec_list, list) { + if ((vdec->status == VDEC_STATUS_CONNECTED + || vdec->status == VDEC_STATUS_ACTIVE)) { + if (vdec_frame_based(vdec)) + continue; + pbuf += callback(&vdec->vbuf, pbuf); + } + } + vdec_core_unlock(vdec_core, flags); + + return pbuf - buf; +} +EXPORT_SYMBOL(show_stream_buffer_status); + +static ssize_t vdec_vfm_path_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t count) +{ + char *buf_dup, *ps, *token; + char str[VDEC_MAP_NAME_SIZE] = "\0"; + bool found = false; + int i; + + if (strlen(buf) >= VDEC_MAP_NAME_SIZE) { + pr_info("parameter is overflow\n"); + return -1; + } + + buf_dup = kstrdup(buf, GFP_KERNEL); + ps = buf_dup; + while (1) { + token = strsep(&ps, "\n "); + if (token == NULL) + break; + if (*token == '\0') + continue; + + for (i = 0; strcmp("reserved", vfm_path_node[i]) != 0; i++) { + if (!strncmp (vfm_path_node[i], token, strlen(vfm_path_node[i]))) { + break; + } + } + + if (strcmp("reserved", vfm_path_node[i]) == 0 || + strncmp("help", buf, strlen("help")) == 0) { + if (strncmp("help", buf, strlen("help")) != 0) { + pr_info("warnning! Input parameter is invalid. set failed!\n"); + } + pr_info("\nusage for example: \n"); + pr_info("echo help > /sys/class/vdec/vfm_path \n"); + pr_info("echo disable > /sys/class/vdec/vfm_path \n"); + pr_info("echo amlvideo ppmgr amvideo > /sys/class/vdec/vfm_path \n"); + found = false; + + break; + } else { + strcat(str, vfm_path_node[i]); + strcat(str, " "); + found = true; + } + } + + if (found == true) { + memset(vfm_path, 0, sizeof(vfm_path)); + strncpy(vfm_path, str, strlen(str)); + vfm_path[VDEC_MAP_NAME_SIZE - 1] = '\0'; + pr_info("cfg path success: decoder %s\n", vfm_path); + } + kfree(buf_dup); + + return count; +} + +static ssize_t vdec_vfm_path_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int len = 0; + int i; + len += sprintf(buf + len, "cfg vfm path: decoder %s\n", vfm_path); + len += sprintf(buf + len, "\nvfm path node list: \n"); + for (i = 0; strcmp("reserved", vfm_path_node[i]) != 0; i++) { + len += sprintf(buf + len, "\t%s \n", vfm_path_node[i]); + } + + return len; +} + +/*irq num as same as .dts*/ +/* + * interrupts = <0 3 1 + * 0 23 1 + * 0 32 1 + * 0 43 1 + * 0 44 1 + * 0 45 1>; + * interrupt-names = "vsync", + * "demux", + * "parser", + * "mailbox_0", + * "mailbox_1", + * "mailbox_2"; + */ +s32 vdec_request_threaded_irq(enum vdec_irq_num num, + irq_handler_t handler, + irq_handler_t thread_fn, + unsigned long irqflags, + const char *devname, void *dev) +{ + s32 res_irq; + s32 ret = 0; + + if (num >= VDEC_IRQ_MAX) { + pr_err("[%s] request irq error, irq num too big!", __func__); + return -EINVAL; + } + + if (vdec_core->isr_context[num].irq < 0) { + res_irq = platform_get_irq( + vdec_core->vdec_core_platform_device, num); + if (res_irq < 0) { + pr_err("[%s] get irq error!", __func__); + return -EINVAL; + } + + vdec_core->isr_context[num].irq = res_irq; + vdec_core->isr_context[num].dev_isr = handler; + vdec_core->isr_context[num].dev_threaded_isr = thread_fn; + vdec_core->isr_context[num].dev_id = dev; + ret = request_threaded_irq(res_irq, + vdec_isr, + vdec_thread_isr, + (thread_fn) ? IRQF_ONESHOT : irqflags, + devname, + &vdec_core->isr_context[num]); + + if (ret) { + vdec_core->isr_context[num].irq = -1; + vdec_core->isr_context[num].dev_isr = NULL; + vdec_core->isr_context[num].dev_threaded_isr = NULL; + vdec_core->isr_context[num].dev_id = NULL; + + pr_err("vdec irq register error for %s.\n", devname); + return -EIO; + } + } else { + vdec_core->isr_context[num].dev_isr = handler; + vdec_core->isr_context[num].dev_threaded_isr = thread_fn; + vdec_core->isr_context[num].dev_id = dev; + } + + return ret; +} +EXPORT_SYMBOL(vdec_request_threaded_irq); + +s32 vdec_request_irq(enum vdec_irq_num num, irq_handler_t handler, + const char *devname, void *dev) +{ + pr_debug("vdec_request_irq %p, %s\n", handler, devname); + + return vdec_request_threaded_irq(num, + handler, + NULL,/*no thread_fn*/ + IRQF_SHARED, + devname, + dev); +} +EXPORT_SYMBOL(vdec_request_irq); + +void vdec_free_irq(enum vdec_irq_num num, void *dev) +{ + if (num >= VDEC_IRQ_MAX) { + pr_err("[%s] request irq error, irq num too big!", __func__); + return; + } + /* + *assume amrisc is stopped already and there is no mailbox interrupt + * when we reset pointers here. + */ + vdec_core->isr_context[num].dev_isr = NULL; + vdec_core->isr_context[num].dev_threaded_isr = NULL; + vdec_core->isr_context[num].dev_id = NULL; + synchronize_irq(vdec_core->isr_context[num].irq); +} +EXPORT_SYMBOL(vdec_free_irq); + +void vdec_sync_irq(enum vdec_irq_num num) +{ +#if 0 + if (!vdec) + return; + if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) + synchronize_irq(vdec_core->isr_context[VDEC_IRQ_0].irq); + else (vdec->input.target == VDEC_INPUT_TARGET_VLD) + synchronize_irq(vdec_core->isr_context[VDEC_IRQ_1].irq); +#endif + synchronize_irq(vdec_core->isr_context[num].irq); +} +EXPORT_SYMBOL(vdec_sync_irq); + +struct vdec_s *vdec_get_default_vdec_for_userdata(void) +{ + struct vdec_s *vdec; + struct vdec_s *ret_vdec; + struct vdec_core_s *core = vdec_core; + unsigned long flags; + int id; + + flags = vdec_core_lock(vdec_core); + + id = 0x10000000; + ret_vdec = NULL; + if (!list_empty(&core->connected_vdec_list)) { + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + if (vdec->id < id) { + id = vdec->id; + ret_vdec = vdec; + } + } + } + + vdec_core_unlock(vdec_core, flags); + + return ret_vdec; +} +EXPORT_SYMBOL(vdec_get_default_vdec_for_userdata); + +struct vdec_s *vdec_get_vdec_by_video_id(int video_id) +{ + struct vdec_s *vdec; + struct vdec_s *ret_vdec; + struct vdec_core_s *core = vdec_core; + unsigned long flags; + + flags = vdec_core_lock(vdec_core); + + ret_vdec = NULL; + if (!list_empty(&core->connected_vdec_list)) { + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + if (vdec->video_id == video_id) { + ret_vdec = vdec; + break; + } + } + } + + vdec_core_unlock(vdec_core, flags); + + return ret_vdec; +} +EXPORT_SYMBOL(vdec_get_vdec_by_video_id); + +struct vdec_s *vdec_get_vdec_by_id(int vdec_id) +{ + struct vdec_s *vdec; + struct vdec_s *ret_vdec; + struct vdec_core_s *core = vdec_core; + unsigned long flags; + + flags = vdec_core_lock(vdec_core); + + ret_vdec = NULL; + if (!list_empty(&core->connected_vdec_list)) { + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + if (vdec->id == vdec_id) { + ret_vdec = vdec; + break; + } + } + } + + vdec_core_unlock(vdec_core, flags); + + return ret_vdec; +} +EXPORT_SYMBOL(vdec_get_vdec_by_id); + + +int vdec_read_user_data(struct vdec_s *vdec, + struct userdata_param_t *p_userdata_param) +{ + int ret = 0; + + if (!vdec) + vdec = vdec_get_default_vdec_for_userdata(); + + if (vdec) { + if (vdec->user_data_read) + ret = vdec->user_data_read(vdec, p_userdata_param); + } + return ret; +} +EXPORT_SYMBOL(vdec_read_user_data); + +int vdec_wakeup_userdata_poll(struct vdec_s *vdec) +{ + if (vdec) { + if (vdec->wakeup_userdata_poll) + vdec->wakeup_userdata_poll(vdec); + } + + return 0; +} +EXPORT_SYMBOL(vdec_wakeup_userdata_poll); + +void vdec_reset_userdata_fifo(struct vdec_s *vdec, int bInit) +{ + if (!vdec) + vdec = vdec_get_default_vdec_for_userdata(); + + if (vdec) { + if (vdec->reset_userdata_fifo) + vdec->reset_userdata_fifo(vdec, bInit); + } +} +EXPORT_SYMBOL(vdec_reset_userdata_fifo); + +void vdec_set_profile_level(struct vdec_s *vdec, u32 profile_idc, u32 level_idc) +{ + if (vdec) { + vdec->profile_idc = profile_idc; + vdec->level_idc = level_idc; + } +} +EXPORT_SYMBOL(vdec_set_profile_level); + +static int dump_mode; +static ssize_t dump_risc_mem_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size)/*set*/ +{ + unsigned int val; + ssize_t ret; + char dump_mode_str[4] = "PRL"; + + /*ret = sscanf(buf, "%d", &val);*/ + ret = kstrtoint(buf, 0, &val); + + if (ret != 0) + return -EINVAL; + dump_mode = val & 0x3; + pr_info("set dump mode to %d,%c_mem\n", + dump_mode, dump_mode_str[dump_mode]); + return size; +} +static u32 read_amrisc_reg(int reg) +{ + WRITE_VREG(0x31b, reg); + return READ_VREG(0x31c); +} + +static void dump_pmem(void) +{ + int i; + + WRITE_VREG(0x301, 0x8000); + WRITE_VREG(0x31d, 0); + pr_info("start dump amrisc pmem of risc\n"); + for (i = 0; i < 0xfff; i++) { + /*same as .o format*/ + pr_info("%08x // 0x%04x:\n", read_amrisc_reg(i), i); + } +} + +static void dump_lmem(void) +{ + int i; + + WRITE_VREG(0x301, 0x8000); + WRITE_VREG(0x31d, 2); + pr_info("start dump amrisc lmem\n"); + for (i = 0; i < 0x3ff; i++) { + /*same as */ + pr_info("[%04x] = 0x%08x:\n", i, read_amrisc_reg(i)); + } +} + +static ssize_t dump_risc_mem_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + char *pbuf = buf; + int ret; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) { + mutex_lock(&vdec_mutex); + if (!vdec_on(VDEC_1)) { + mutex_unlock(&vdec_mutex); + pbuf += sprintf(pbuf, "amrisc is power off\n"); + ret = pbuf - buf; + return ret; + } + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + /*TODO:M6 define */ + /* + * switch_mod_gate_by_type(MOD_VDEC, 1); + */ + amports_switch_gate("vdec", 1); + } + /*start do**/ + switch (dump_mode) { + case 0: + dump_pmem(); + break; + case 2: + dump_lmem(); + break; + default: + break; + } + + /*done*/ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) + mutex_unlock(&vdec_mutex); + else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + /*TODO:M6 define */ + /* + * switch_mod_gate_by_type(MOD_VDEC, 0); + */ + amports_switch_gate("vdec", 0); + } + return sprintf(buf, "done\n"); +} + +static ssize_t core_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + struct vdec_core_s *core = vdec_core; + char *pbuf = buf; + unsigned long flags = vdec_core_lock(vdec_core); + + if (list_empty(&core->connected_vdec_list)) + pbuf += sprintf(pbuf, "connected vdec list empty\n"); + else { + struct vdec_s *vdec; + + pbuf += sprintf(pbuf, + " Core: last_sched %p, sched_mask %lx\n", + core->last_vdec, + core->sched_mask); + + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + pbuf += sprintf(pbuf, + "\tvdec.%d (%p (%s%s)), status = %s,\ttype = %s, \tactive_mask = %lx\n", + vdec->id, + vdec, + vdec_device_name[vdec->format * 2], + (vdec->is_v4l == 1) ? "_v4l" : "", + vdec_status_str(vdec), + vdec_type_str(vdec), + vdec->active_mask); + } + } + + vdec_core_unlock(vdec_core, flags); + return pbuf - buf; +} + +static ssize_t vdec_status_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + char *pbuf = buf; + struct vdec_s *vdec; + struct vdec_info vs; + unsigned char vdec_num = 0; + struct vdec_core_s *core = vdec_core; + unsigned long flags = vdec_core_lock(vdec_core); + + if (list_empty(&core->connected_vdec_list)) { + pbuf += sprintf(pbuf, "No vdec.\n"); + goto out; + } + + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + if ((vdec->status == VDEC_STATUS_CONNECTED + || vdec->status == VDEC_STATUS_ACTIVE)) { + memset(&vs, 0, sizeof(vs)); + if (vdec_status(vdec, &vs)) { + pbuf += sprintf(pbuf, "err.\n"); + goto out; + } + pbuf += sprintf(pbuf, + "vdec channel %u statistics:\n", + vdec_num); + pbuf += sprintf(pbuf, + "%13s : %s\n", "device name", + vs.vdec_name); + pbuf += sprintf(pbuf, + "%13s : %u\n", "frame width", + vs.frame_width); + pbuf += sprintf(pbuf, + "%13s : %u\n", "frame height", + vs.frame_height); + pbuf += sprintf(pbuf, + "%13s : %u %s\n", "frame rate", + vs.frame_rate, "fps"); + pbuf += sprintf(pbuf, + "%13s : %u %s\n", "bit rate", + vs.bit_rate / 1024 * 8, "kbps"); + pbuf += sprintf(pbuf, + "%13s : %u\n", "status", + vs.status); + pbuf += sprintf(pbuf, + "%13s : %u\n", "frame dur", + vs.frame_dur); + pbuf += sprintf(pbuf, + "%13s : %u %s\n", "frame data", + vs.frame_data / 1024, "KB"); + pbuf += sprintf(pbuf, + "%13s : %u\n", "frame count", + vs.frame_count); + pbuf += sprintf(pbuf, + "%13s : %u\n", "drop count", + vs.drop_frame_count); + pbuf += sprintf(pbuf, + "%13s : %u\n", "fra err count", + vs.error_frame_count); + pbuf += sprintf(pbuf, + "%13s : %u\n", "hw err count", + vs.error_count); + pbuf += sprintf(pbuf, + "%13s : %llu %s\n", "total data", + vs.total_data / 1024, "KB"); + pbuf += sprintf(pbuf, + "%13s : %x\n\n", "ratio_control", + vs.ratio_control); + + vdec_num++; + } + } +out: + vdec_core_unlock(vdec_core, flags); + return pbuf - buf; +} + +static ssize_t dump_vdec_blocks_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + struct vdec_core_s *core = vdec_core; + char *pbuf = buf; + unsigned long flags = vdec_core_lock(vdec_core); + + if (list_empty(&core->connected_vdec_list)) + pbuf += sprintf(pbuf, "connected vdec list empty\n"); + else { + struct vdec_s *vdec; + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + pbuf += vdec_input_dump_blocks(&vdec->input, + pbuf, PAGE_SIZE - (pbuf - buf)); + } + } + vdec_core_unlock(vdec_core, flags); + + return pbuf - buf; +} +static ssize_t dump_vdec_chunks_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + struct vdec_core_s *core = vdec_core; + char *pbuf = buf; + unsigned long flags = vdec_core_lock(vdec_core); + + if (list_empty(&core->connected_vdec_list)) + pbuf += sprintf(pbuf, "connected vdec list empty\n"); + else { + struct vdec_s *vdec; + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + pbuf += vdec_input_dump_chunks(vdec->id, &vdec->input, + pbuf, PAGE_SIZE - (pbuf - buf)); + } + } + vdec_core_unlock(vdec_core, flags); + + return pbuf - buf; +} + +static ssize_t dump_decoder_state_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + char *pbuf = buf; + struct vdec_s *vdec; + struct vdec_core_s *core = vdec_core; + unsigned long flags = vdec_core_lock(vdec_core); + + if (list_empty(&core->connected_vdec_list)) { + pbuf += sprintf(pbuf, "No vdec.\n"); + } else { + list_for_each_entry(vdec, + &core->connected_vdec_list, list) { + if ((vdec->status == VDEC_STATUS_CONNECTED + || vdec->status == VDEC_STATUS_ACTIVE) + && vdec->dump_state) + vdec->dump_state(vdec); + } + } + vdec_core_unlock(vdec_core, flags); + + return pbuf - buf; +} + +static ssize_t dump_fps_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + char *pbuf = buf; + struct vdec_core_s *core = vdec_core; + int i; + + unsigned long flags = vdec_fps_lock(vdec_core); + for (i = 0; i < MAX_INSTANCE_MUN; i++) + pbuf += sprintf(pbuf, "%d ", core->decode_fps[i].fps); + + pbuf += sprintf(pbuf, "\n"); + vdec_fps_unlock(vdec_core, flags); + + return pbuf - buf; +} + +static ssize_t version_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + char *pbuf = buf; +#ifdef DECODER_VERSION + pbuf += sprintf(pbuf, "DECODER VERSION: V" xstr(DECODER_VERSION) "\n"); +#else +#ifdef RELEASED_VERSION + pbuf += sprintf(pbuf, "Due to project compilation environment problems,\ +the current decoder version could not be detected,\ +Please Use The DECODER BASE Version for traceability\n"); + pbuf += sprintf(pbuf, "DECODER BASE Version: " xstr(RELEASED_VERSION) "\n"); +#endif +#endif + +#ifdef UCODE_VERSION + pbuf += sprintf(pbuf, "UCODE VERSION: V" xstr(UCODE_VERSION) "\n"); +#endif + + return pbuf - buf; +} + +static char * parser_h264_profile(char *pbuf, struct vdec_s *vdec) +{ + switch (vdec->profile_idc) { + case 66: + pbuf += sprintf(pbuf, "%d: Baseline Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 77: + pbuf += sprintf(pbuf, "%d: Main Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 88: + pbuf += sprintf(pbuf, "%d: Extended Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 100: + pbuf += sprintf(pbuf, "%d: High Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 110: + pbuf += sprintf(pbuf, "%d: High 10 Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + default: + pbuf += sprintf(pbuf, "%d: Not Support Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + } + + return pbuf; +} + +static char * parser_mpeg2_profile(char *pbuf, struct vdec_s *vdec) +{ + switch (vdec->profile_idc) { + case 5: + pbuf += sprintf(pbuf, "%d: Simple Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 4: + pbuf += sprintf(pbuf, "%d: Main Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 3: + pbuf += sprintf(pbuf, "%d: SNR Scalable Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 2: + pbuf += sprintf(pbuf, "%d: Airspace Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 1: + pbuf += sprintf(pbuf, "%d: High Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + default: + pbuf += sprintf(pbuf, "%d: Not Support Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + } + return pbuf; +} + +static char * parser_mpeg4_profile(char *pbuf, struct vdec_s *vdec) +{ + switch (vdec->profile_idc) { + case 0: + pbuf += sprintf(pbuf, "%d: Simple Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 1: + pbuf += sprintf(pbuf, "%d: Simple Scalable Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 2: + pbuf += sprintf(pbuf, "%d: Core Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 3: + pbuf += sprintf(pbuf, "%d: Main Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 4: + pbuf += sprintf(pbuf, "%d: N-bit Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 5: + pbuf += sprintf(pbuf, "%d: Scalable Texture Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 6: + if (vdec->profile_idc == 1 || vdec->profile_idc == 2) + pbuf += sprintf(pbuf, "%d: Simple Face Animation Profile(%u)\n", + vdec->id, vdec->profile_idc); + else + pbuf += sprintf(pbuf, "%d: Simple FBA Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 7: + pbuf += sprintf(pbuf, "%d: Basic Animated Texture Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 8: + pbuf += sprintf(pbuf, "%d: Hybrid Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 9: + pbuf += sprintf(pbuf, "%d: Advanced Real Time Simple Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 10: + pbuf += sprintf(pbuf, "%d: Core Scalable Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 11: + pbuf += sprintf(pbuf, "%d: Advanced Coding Efficiency Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 12: + pbuf += sprintf(pbuf, "%d: Advanced Core Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 13: + pbuf += sprintf(pbuf, "%d: Advanced Scalable Texture Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + case 14: + case 15: + pbuf += sprintf(pbuf, "%d: Advanced Simple Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + default: + pbuf += sprintf(pbuf, "%d: Not Support Profile(%u)\n", + vdec->id, vdec->profile_idc); + break; + } + + return pbuf; +} + +static ssize_t profile_idc_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + struct vdec_core_s *core = vdec_core; + char *pbuf = buf; + unsigned long flags = vdec_core_lock(vdec_core); + + if (list_empty(&core->connected_vdec_list)) + pbuf += sprintf(pbuf, "connected vdec list empty\n"); + else { + struct vdec_s *vdec; + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + if (vdec->format == 0) { + pbuf = parser_mpeg2_profile(pbuf, vdec); + } else if (vdec->format == 1) { + pbuf = parser_mpeg4_profile(pbuf, vdec); + } else if (vdec->format == 2) { + pbuf = parser_h264_profile(pbuf, vdec); + } else { + pbuf += sprintf(pbuf, + "%d: Not Support\n", vdec->id); + } + } + } + + vdec_core_unlock(vdec_core, flags); + return pbuf - buf; +} + +static char * parser_h264_level(char *pbuf, struct vdec_s *vdec) +{ + + pbuf += sprintf(pbuf, "%d: Level %d.%d(%u)\n", + vdec->id, vdec->level_idc/10, vdec->level_idc%10, vdec->level_idc); + + return pbuf; +} + +static char * parser_mpeg2_level(char *pbuf, struct vdec_s *vdec) +{ + switch (vdec->level_idc) { + case 10: + pbuf += sprintf(pbuf, "%d: Low Level(%u)\n", + vdec->id, vdec->level_idc); + break; + case 8: + pbuf += sprintf(pbuf, "%d: Main Level(%u)\n", + vdec->id, vdec->level_idc); + break; + case 6: + pbuf += sprintf(pbuf, "%d: High 1440 Level(%u)\n", + vdec->id, vdec->level_idc); + break; + case 4: + pbuf += sprintf(pbuf, "%d: High Level(%u)\n", + vdec->id, vdec->level_idc); + break; + default: + pbuf += sprintf(pbuf, "%d: Not Support Level(%u)\n", + vdec->id, vdec->level_idc); + break; + } + + return pbuf; +} + +static char * parser_mpeg4_level(char *pbuf, struct vdec_s *vdec) +{ + switch (vdec->level_idc) { + case 1: + pbuf += sprintf(pbuf, "%d: Level 1(%u)\n", + vdec->id, vdec->level_idc); + break; + case 2: + pbuf += sprintf(pbuf, "%d: Level 2(%u)\n", + vdec->id, vdec->level_idc); + break; + case 3: + pbuf += sprintf(pbuf, "%d: Level 3(%u)\n", + vdec->id, vdec->level_idc); + break; + case 4: + pbuf += sprintf(pbuf, "%d: Level 4(%u)\n", + vdec->id, vdec->level_idc); + break; + case 5: + pbuf += sprintf(pbuf, "%d: Level 5(%u)\n", + vdec->id, vdec->level_idc); + break; + default: + pbuf += sprintf(pbuf, "%d: Not Support Level(%u)\n", + vdec->id, vdec->level_idc); + break; + } + + return pbuf; +} + +static ssize_t level_idc_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + struct vdec_core_s *core = vdec_core; + char *pbuf = buf; + unsigned long flags = vdec_core_lock(vdec_core); + + if (list_empty(&core->connected_vdec_list)) + pbuf += sprintf(pbuf, "connected vdec list empty\n"); + else { + struct vdec_s *vdec; + list_for_each_entry(vdec, &core->connected_vdec_list, list) { + if (vdec->format == 0) { + pbuf = parser_mpeg2_level(pbuf, vdec); + } else if (vdec->format == 1) { + pbuf = parser_mpeg4_level(pbuf, vdec); + } else if (vdec->format == 2) { + pbuf = parser_h264_level(pbuf, vdec); + } else { + pbuf += sprintf(pbuf, + "%d: Not Support\n", vdec->id); + } + } + } + + vdec_core_unlock(vdec_core, flags); + return pbuf - buf; +} + +static CLASS_ATTR_RO(amrisc_regs); +static CLASS_ATTR_RO(dump_trace); +static CLASS_ATTR_RO(clock_level); +static CLASS_ATTR_RW(poweron_clock_level); +static CLASS_ATTR_RW(dump_risc_mem); +static CLASS_ATTR_RW(keep_vdec_mem); +static CLASS_ATTR_RW(enable_mvdec_info); +static CLASS_ATTR_RO(core); +static CLASS_ATTR_RO(vdec_status); +static CLASS_ATTR_RO(dump_vdec_blocks); +static CLASS_ATTR_RO(dump_vdec_chunks); +static CLASS_ATTR_RO(dump_decoder_state); +#ifdef VDEC_DEBUG_SUPPORT +static CLASS_ATTR_RW(debug); +#endif +static CLASS_ATTR_RW(vdec_vfm_path); +#ifdef FRAME_CHECK +static CLASS_ATTR_RW(dump_yuv); +static CLASS_ATTR_RW(frame_check); +#endif +static CLASS_ATTR_RO(dump_fps); +static CLASS_ATTR_RO(profile_idc); +static CLASS_ATTR_RO(level_idc); +static CLASS_ATTR_RO(version); + +static struct attribute *vdec_class_attrs[] = { + &class_attr_amrisc_regs.attr, + &class_attr_dump_trace.attr, + &class_attr_clock_level.attr, + &class_attr_poweron_clock_level.attr, + &class_attr_dump_risc_mem.attr, + &class_attr_keep_vdec_mem.attr, + &class_attr_enable_mvdec_info.attr, + &class_attr_core.attr, + &class_attr_vdec_status.attr, + &class_attr_dump_vdec_blocks.attr, + &class_attr_dump_vdec_chunks.attr, + &class_attr_dump_decoder_state.attr, +#ifdef VDEC_DEBUG_SUPPORT + &class_attr_debug.attr, +#endif + &class_attr_vdec_vfm_path.attr, +#ifdef FRAME_CHECK + &class_attr_dump_yuv.attr, + &class_attr_frame_check.attr, +#endif + &class_attr_dump_fps.attr, + &class_attr_profile_idc.attr, + &class_attr_level_idc.attr, + &class_attr_version.attr, + NULL +}; + +ATTRIBUTE_GROUPS(vdec_class); + +static struct class vdec_class = { + .name = "vdec", + .class_groups = vdec_class_groups, +}; + +struct device *get_vdec_device(void) +{ + return &vdec_core->vdec_core_platform_device->dev; +} +EXPORT_SYMBOL(get_vdec_device); + +static int vdec_post_task_recycle(void *args) +{ + struct post_task_mgr_s *post = + (struct post_task_mgr_s *)args; + + while (post->running && + down_interruptible(&post->sem) == 0) { + if (kthread_should_stop()) + break; + mutex_lock(&post->mutex); + if (!list_empty(&post->task_recycle)) { + struct vdec_post_task_parms_s *parms, *tmp; + list_for_each_entry_safe(parms, tmp, &post->task_recycle, recycle) { + if (parms->scheduled) { + list_del(&parms->recycle); + kthread_stop(parms->task); + kfree(parms); + parms = NULL; + } + } + } + mutex_unlock(&post->mutex); + } + + return 0; +} + +static void vdec_post_task_exit(void) +{ + struct post_task_mgr_s *post = &vdec_core->post; + + post->running = false; + up(&post->sem); + + kthread_stop(post->task); +} + +static int vdec_post_task_init(void) +{ + struct post_task_mgr_s *post = &vdec_core->post; + + sema_init(&post->sem, 0); + INIT_LIST_HEAD(&post->task_recycle); + mutex_init(&post->mutex); + post->running = true; + + post->task = kthread_run(vdec_post_task_recycle, + post, "task-post-daemon-thread"); + if (IS_ERR(post->task)) { + pr_err("%s, creat task post daemon thread faild %ld\n", + __func__, PTR_ERR(post->task)); + return PTR_ERR(post->task); + } + + return 0; +} + +static int vdec_post_handler(void *args) +{ + struct vdec_post_task_parms_s *parms = + (struct vdec_post_task_parms_s *) args; + struct post_task_mgr_s *post = &vdec_core->post; + + complete(&parms->park); + + /* process client task. */ + parms->func(parms->private); + parms->scheduled = 1; + up(&post->sem); + + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + usleep_range(1000, 2000); + } + + return 0; +} + +int vdec_post_task(post_task_handler func, void *args) +{ + struct vdec_post_task_parms_s *parms; + struct post_task_mgr_s *post = &vdec_core->post; + + parms = kzalloc(sizeof(*parms), GFP_KERNEL); + if (parms == NULL) + return -ENOMEM; + + parms->func = func; + parms->private = args; + init_completion(&parms->park); + parms->scheduled = 0; + parms->task = kthread_run(vdec_post_handler, + parms, "task-post-thread"); + if (IS_ERR(parms->task)) { + pr_err("%s, creat task post thread faild %ld\n", + __func__, PTR_ERR(parms->task)); + kfree(parms); + return PTR_ERR(parms->task); + } + if (!__kthread_should_park(parms->task)) + wait_for_completion(&parms->park); + + mutex_lock(&post->mutex); + /* add to list for resource recycle in post daemon kthread */ + list_add_tail(&parms->recycle, &post->task_recycle); + mutex_unlock(&post->mutex); + + return 0; +} +EXPORT_SYMBOL(vdec_post_task); + + + +static int vdec_probe(struct platform_device *pdev) +{ + s32 i, r; + + vdec_core = (struct vdec_core_s *)devm_kzalloc(&pdev->dev, + sizeof(struct vdec_core_s), GFP_KERNEL); + if (vdec_core == NULL) { + pr_err("vdec core allocation failed.\n"); + return -ENOMEM; + } + + atomic_set(&vdec_core->vdec_nr, 0); + sema_init(&vdec_core->sem, 1); + + r = class_register(&vdec_class); + if (r) { + pr_info("vdec class create fail.\n"); + return r; + } + + vdec_core->vdec_core_platform_device = pdev; + + platform_set_drvdata(pdev, vdec_core); + + for (i = 0; i < VDEC_IRQ_MAX; i++) { + vdec_core->isr_context[i].index = i; + vdec_core->isr_context[i].irq = -1; + } + + r = vdec_request_threaded_irq(VDEC_IRQ_0, NULL, NULL, + IRQF_ONESHOT, "vdec-0", NULL); + if (r < 0) { + pr_err("vdec interrupt request failed\n"); + return r; + } + + r = vdec_request_threaded_irq(VDEC_IRQ_1, NULL, NULL, + IRQF_ONESHOT, "vdec-1", NULL); + if (r < 0) { + pr_err("vdec interrupt request failed\n"); + return r; + } +#if 0 + if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) { + r = vdec_request_threaded_irq(VDEC_IRQ_HEVC_BACK, NULL, NULL, + IRQF_ONESHOT, "vdec-hevc_back", NULL); + if (r < 0) { + pr_err("vdec interrupt request failed\n"); + return r; + } + } +#endif + r = of_reserved_mem_device_init(&pdev->dev); + if (r == 0) + pr_info("vdec_probe done\n"); + + vdec_core->cma_dev = &pdev->dev; + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_M8) { + /* default to 250MHz */ + vdec_clock_hi_enable(); + } + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) { + /* set vdec dmc request to urgent */ + WRITE_DMCREG(DMC_AM5_CHAN_CTRL, 0x3f203cf); + } + INIT_LIST_HEAD(&vdec_core->connected_vdec_list); + spin_lock_init(&vdec_core->lock); + spin_lock_init(&vdec_core->canvas_lock); + spin_lock_init(&vdec_core->fps_lock); + spin_lock_init(&vdec_core->input_lock); + ida_init(&vdec_core->ida); + vdec_core->thread = kthread_run(vdec_core_thread, vdec_core, + "vdec-core"); + + vdec_core->vdec_core_wq = alloc_ordered_workqueue("%s",__WQ_LEGACY | + WQ_MEM_RECLAIM |WQ_HIGHPRI/*high priority*/, "vdec-work"); + /*work queue priority lower than vdec-core.*/ + + vdec_post_task_init(); + + /* power manager init. */ + vdec_core->pm = (struct power_manager_s *) + of_device_get_match_data(&pdev->dev); + if (vdec_core->pm->init) { + r = vdec_core->pm->init(&pdev->dev); + if (r) { + pr_err("vdec power manager init failed\n"); + return r; + } + pr_err("vdec power init success!\n"); + } + + return 0; +} + +static int vdec_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < VDEC_IRQ_MAX; i++) { + if (vdec_core->isr_context[i].irq >= 0) { + free_irq(vdec_core->isr_context[i].irq, + &vdec_core->isr_context[i]); + vdec_core->isr_context[i].irq = -1; + vdec_core->isr_context[i].dev_isr = NULL; + vdec_core->isr_context[i].dev_threaded_isr = NULL; + vdec_core->isr_context[i].dev_id = NULL; + } + } + + vdec_post_task_exit(); + + kthread_stop(vdec_core->thread); + + destroy_workqueue(vdec_core->vdec_core_wq); + + if (vdec_core->pm->release) + vdec_core->pm->release(&pdev->dev); + + class_unregister(&vdec_class); + + return 0; +} + +static struct mconfig vdec_configs[] = { + MC_PU32("debug_trace_num", &debug_trace_num), + MC_PI32("hevc_max_reset_count", &hevc_max_reset_count), + MC_PU32("clk_config", &clk_config), + MC_PI32("step_mode", &step_mode), + MC_PI32("poweron_clock_level", &poweron_clock_level), +}; +static struct mconfig_node vdec_node; + +extern const struct of_device_id amlogic_vdec_matches[]; + +static struct platform_driver vdec_driver = { + .probe = vdec_probe, + .remove = vdec_remove, + .driver = { + .name = "vdec", + .of_match_table = amlogic_vdec_matches, + } +}; + +static struct codec_profile_t amvdec_common_profile = { + .name = "vdec_common", + .profile = "vdec" +}; + +static struct codec_profile_t amvdec_input_profile = { + .name = "vdec_input", + .profile = "drm_framemode" +}; + +int vdec_module_init(void) +{ + if (platform_driver_register(&vdec_driver)) { + pr_info("failed to register vdec module\n"); + return -ENODEV; + } + INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node, + "vdec", vdec_configs, CONFIG_FOR_RW); + vcodec_profile_register(&amvdec_common_profile); + + vcodec_profile_register(&amvdec_input_profile); + return 0; +} +EXPORT_SYMBOL(vdec_module_init); + +void vdec_module_exit(void) +{ + platform_driver_unregister(&vdec_driver); +} +EXPORT_SYMBOL(vdec_module_exit); + +#if 0 +static int __init vdec_module_init(void) +{ + if (platform_driver_register(&vdec_driver)) { + pr_info("failed to register vdec module\n"); + return -ENODEV; + } + INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node, + "vdec", vdec_configs, CONFIG_FOR_RW); + return 0; +} + +static void __exit vdec_module_exit(void) +{ + platform_driver_unregister(&vdec_driver); +} +#endif + +static int vdec_mem_device_init(struct reserved_mem *rmem, struct device *dev) +{ + vdec_core->cma_dev = dev; + + return 0; +} + +static const struct reserved_mem_ops rmem_vdec_ops = { + .device_init = vdec_mem_device_init, +}; + +static int __init vdec_mem_setup(struct reserved_mem *rmem) +{ + rmem->ops = &rmem_vdec_ops; + pr_info("vdec: reserved mem setup\n"); + + return 0; +} + + +void vdec_set_vframe_comm(struct vdec_s *vdec, char *n) +{ + struct vdec_frames_s *mvfrm = vdec->mvfrm; + + if (!mvfrm) + return; + + mvfrm->comm.vdec_id = vdec->id; + + snprintf(mvfrm->comm.vdec_name, sizeof(mvfrm->comm.vdec_name)-1, + "%s", n); + mvfrm->comm.vdec_type = vdec->type; +} +EXPORT_SYMBOL(vdec_set_vframe_comm); + +u32 diff_pts(u32 a, u32 b) +{ + if (!a || !b) + return 0; + else + return abs(a - b); +} + +/* + * We only use the first 5 frames to calc duration. + * The fifo[0]~fifo[4] means the frame 0 to frame 4. + * we start to calculate the duration from frame 1. + * And the caller guarantees that slot > 0. + */ +static void cal_dur_from_pts(struct vdec_s *vdec, u32 slot) +{ +#define DURATION_THRESHOD 10 + static u32 must_send = 0, ready = 0; + u32 old = 0, cur, diff; + struct vframe_counter_s *fifo = vdec->mvfrm->fifo_buf; + + if (vdec->mvfrm->wr == 1) { + ready = 0; + must_send = 0; + } + + if (must_send == 2) + return ; + + if (ready) + ++must_send; + + if ((vdec->format != VFORMAT_H264 && vdec->format != VFORMAT_HEVC) || + !fifo[slot].pts) { + if (fifo[slot].frame_dur != ready) { + if (must_send) + ready = (ready + fifo[slot].frame_dur) / 2; + else + ready = fifo[slot].frame_dur; + pr_debug("%s inner driver dur%u \n",__func__, ready); + } + goto end_handle; + } + + if (slot == 1) { + cur = diff_pts(fifo[1].pts, fifo[0].pts); + } else { + old = diff_pts(fifo[slot - 1].pts, fifo[slot - 2].pts); + cur = diff_pts(fifo[slot].pts, fifo[slot - 1].pts); + } + + diff = abs(cur - old); + if (diff > DURATION_THRESHOD) { + u32 dur, cur2; + + cur2 = (cur << 4) / 15; + diff = abs(cur2 - fifo[slot].frame_dur); + if (fifo[slot].frame_dur == 3600) + dur = cur2; + else if (diff < DURATION_THRESHOD || diff > fifo[slot].frame_dur) + dur = fifo[slot].frame_dur; + else + dur = cur2; + + if (ready == dur) + goto end_handle; + + if (must_send) + ready = (ready + dur) / 2; + else + ready = dur; + pr_debug("%s vstatus %u dur%u -> %u, revised %u\n",__func__,fifo[slot].frame_dur, cur,cur2, dur); + if (diff > 10 && slot >= 2) + pr_debug("wr=%u,slot=%u pts %u, %u, %u\n",vdec->mvfrm->wr,slot, + fifo[slot].pts, fifo[slot-1].pts,fifo[slot-2].pts); + } + +end_handle: + if (must_send) { + ++must_send; + vdec_frame_rate_uevent(ready); + } +} + +void vdec_fill_vdec_frame(struct vdec_s *vdec, struct vframe_qos_s *vframe_qos, + struct vdec_info *vinfo,struct vframe_s *vf, + u32 hw_dec_time) +{ +#define MINIMUM_FRAMES 5 + u32 i; + struct vframe_counter_s *fifo_buf; + struct vdec_frames_s *mvfrm = vdec->mvfrm; + + if (!mvfrm) + return; + fifo_buf = mvfrm->fifo_buf; + + /* assume fps==60,mv->wr max value can support system running 828 days, + this is enough for us */ + i = mvfrm->wr & (NUM_FRAME_VDEC-1); //find the slot num in fifo_buf + mvfrm->fifo_buf[i].decode_time_cost = hw_dec_time; + if (vframe_qos) + memcpy(&fifo_buf[i].qos, vframe_qos, sizeof(struct vframe_qos_s)); + if (vinfo) { + memcpy(&fifo_buf[i].frame_width, &vinfo->frame_width, + ((char*)&vinfo->reserved[0] - (char*)&vinfo->frame_width)); + /*copy for ipb report*/ + memcpy(&fifo_buf[i].i_decoded_frames, &vinfo->i_decoded_frames, + ((char*)&vinfo->endipb_line[0] - (char*)&vinfo->i_decoded_frames)); + fifo_buf[i].av_resynch_counter = timestamp_avsync_counter_get(); + } + if (vf) { + fifo_buf[i].vf_type = vf->type; + fifo_buf[i].signal_type = vf->signal_type; + fifo_buf[i].pts = vf->pts; + fifo_buf[i].pts_us64 = vf->pts_us64; + + /* Calculate the duration from pts */ + if (!vdec->is_v4l && (mvfrm->wr < MINIMUM_FRAMES && mvfrm->wr > 0)) + cal_dur_from_pts(vdec, i); + } + mvfrm->wr++; +} +EXPORT_SYMBOL(vdec_fill_vdec_frame); + +void vdec_vframe_ready(struct vdec_s *vdec, struct vframe_s *vf) { + if (vdec_secure(vdec)) { + vf->flag |= VFRAME_FLAG_VIDEO_SECURE; + } else { + vf->flag &= ~VFRAME_FLAG_VIDEO_SECURE; + } +} +EXPORT_SYMBOL(vdec_vframe_ready); + +void set_meta_data_to_vf(struct vframe_s *vf, u32 type, void *v4l2_ctx) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(v4l2_ctx); + struct aml_meta_head_s head; + struct aml_vf_base_info_s vfb_infos; + + if ((ctx == NULL) || (vf == NULL)) + return ; + + if (vf->meta_data_buf == NULL) { + vf->meta_data_buf = ctx->meta_infos.meta_bufs[ctx->meta_infos.index].buf; + ctx->meta_infos.index = (ctx->meta_infos.index + 1) % V4L_CAP_BUFF_MAX; + } + + switch (type) { + case UVM_META_DATA_VF_BASE_INFOS: + if ((vf->meta_data_size + sizeof(struct aml_vf_base_info_s) + AML_META_HEAD_SIZE) <= META_DATA_SIZE) { + head.magic = META_DATA_MAGIC; + head.type = UVM_META_DATA_VF_BASE_INFOS; + head.data_size = sizeof(struct aml_vf_base_info_s); + + memcpy(vf->meta_data_buf + vf->meta_data_size, + &head, AML_META_HEAD_SIZE); + vf->meta_data_size += AML_META_HEAD_SIZE; + + vfb_infos.width = vf->width; + vfb_infos.height = vf->height; + vfb_infos.duration = vf->duration; + vfb_infos.frame_type = vf->frame_type; + vfb_infos.type = vf->type; + + memcpy(vf->meta_data_buf + vf->meta_data_size, + &vfb_infos, sizeof(struct aml_vf_base_info_s)); + vf->meta_data_size += sizeof(struct aml_vf_base_info_s); + + if (debug_meta) { + pr_debug("vf->meta_data_size = %d\n", vf->meta_data_size); + pr_debug("vf:width:%d height:%d duration:%d frame_type:%d type:%d\n", + vfb_infos.width, vfb_infos.height, vfb_infos.duration, + vfb_infos.frame_type, vfb_infos.type); + } + } + break; + case UVM_META_DATA_HDR10P_DATA: + if ((vf->meta_data_size + vf->hdr10p_data_size + AML_META_HEAD_SIZE) <= META_DATA_SIZE) { + head.magic = META_DATA_MAGIC; + head.type = UVM_META_DATA_HDR10P_DATA; + head.data_size = vf->hdr10p_data_size; + + memcpy(vf->meta_data_buf + vf->meta_data_size, + &head, AML_META_HEAD_SIZE); + vf->meta_data_size += AML_META_HEAD_SIZE; + + memcpy(vf->meta_data_buf + vf->meta_data_size, + vf->hdr10p_data_buf, vf->hdr10p_data_size); + vf->meta_data_size += vf->hdr10p_data_size; + + if (debug_meta) { + pr_debug("vf->meta_data_size = %d\n", vf->meta_data_size); + pr_debug("vf->hdr10p_data_size = %d\n", vf->hdr10p_data_size); + } + } + break; + default: + break; + } +} +EXPORT_SYMBOL(set_meta_data_to_vf); + +/* In this function,if we use copy_to_user, we may encounter sleep, +which may block the vdec_fill_vdec_frame,this is not acceptable. +So, we should use a tmp buffer(passed by caller) to get the content */ +u32 vdec_get_frame_vdec(struct vdec_s *vdec, struct vframe_counter_s *tmpbuf) +{ + u32 toread = 0; + u32 slot_rd; + struct vframe_counter_s *fifo_buf = NULL; + struct vdec_frames_s *mvfrm = NULL; + + /* + switch (version) { + case version_1: + f1(); + case version_2: + f2(); + default: + break; + } + */ + + if (!vdec) + return 0; + mvfrm = vdec->mvfrm; + if (!mvfrm) + return 0; + + fifo_buf = &mvfrm->fifo_buf[0]; + + toread = mvfrm->wr - mvfrm->rd; + if (toread) { + if (toread >= NUM_FRAME_VDEC - QOS_FRAME_NUM) { + /* round the fifo_buf length happens, give QOS_FRAME_NUM for buffer */ + mvfrm->rd = mvfrm->wr - (NUM_FRAME_VDEC - QOS_FRAME_NUM); + } + + if (toread >= QOS_FRAME_NUM) { + toread = QOS_FRAME_NUM; //by default, we use this num + } + + slot_rd = mvfrm->rd &( NUM_FRAME_VDEC-1); //In this case it equals to x%y + if (slot_rd + toread <= NUM_FRAME_VDEC) { + memcpy(tmpbuf, &fifo_buf[slot_rd], toread*sizeof(struct vframe_counter_s)); + } else { + u32 exeed; + exeed = slot_rd + toread - NUM_FRAME_VDEC; + memcpy(tmpbuf, &fifo_buf[slot_rd], (NUM_FRAME_VDEC - slot_rd)*sizeof(struct vframe_counter_s)); + memcpy(&tmpbuf[NUM_FRAME_VDEC-slot_rd], &fifo_buf[0], exeed*sizeof(struct vframe_counter_s)); + } + + mvfrm->rd += toread; + } + return toread; +} +EXPORT_SYMBOL(vdec_get_frame_vdec); + +int get_double_write_ratio(int dw_mode) +{ + int ratio = 1; + + if ((dw_mode == 2) || + (dw_mode == 3)) + ratio = 4; + else if ((dw_mode == 4) || + (dw_mode == 5)) + ratio = 2; + else if ((dw_mode == 8) || + (dw_mode == 9)) + ratio = 8; + return ratio; +} +EXPORT_SYMBOL(get_double_write_ratio); + +void vdec_set_vld_wp(struct vdec_s *vdec, u32 wp) +{ + if (vdec_single(vdec)) { + WRITE_VREG(VLD_MEM_VIFIFO_WP, wp); + } +} +EXPORT_SYMBOL(vdec_set_vld_wp); + +void vdec_config_vld_reg(struct vdec_s *vdec, u32 addr, u32 size) +{ + if (vdec_single(vdec)) { + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + /* reset VLD before setting all pointers */ + WRITE_VREG(VLD_MEM_VIFIFO_WRAP_COUNT, 0); + /*TODO: only > m6*/ + WRITE_VREG(DOS_SW_RESET0, (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + + WRITE_VREG(POWER_CTL_VLD, 1 << 4); + + WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, + addr); + WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, + addr + size - 8); + WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR, + addr); + + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1); + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + + /* set to manual mode */ + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2); + WRITE_VREG(VLD_MEM_VIFIFO_WP, addr); + + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 3); + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2); + + /* enable */ + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, + (0x11 << 16) | (1<<10) | (1 << 1) | (1 << 2)); + SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL, + 7 << 3); + } +} +EXPORT_SYMBOL(vdec_config_vld_reg); + +static void check_rdma_result(int num) +{ + int i, wr,rd,data; + int flag = 1; + for (i = 0; i < num; i++) { + wr = READ_VREG(HEVC_IQIT_SCALELUT_WR_ADDR); + rd = READ_VREG(HEVC_IQIT_SCALELUT_RD_ADDR); + data = READ_VREG(HEVC_IQIT_SCALELUT_DATA); + if (wr != (num & 0x3ff)) { + pr_info("--->HEVC_IQIT_SCALELUT_WR_ADDR = 0x%x\n", wr); + flag = 0; + break; + } + if (rd != i) { + pr_info("--->HEVC_IQIT_SCALELUT_RD_ADDR = 0x%x\n", rd); + flag = 0; + break; + } + + if (data != 0) { + pr_info("--->HEVC_IQIT_SCALELUT_DATA = 0x%x\n", data); + flag = 0; + break; + } + } + if (flag == 0) + pr_info("-->%d--rdma flail\n", i); + else + pr_info("rdma ok\n"); + return; + +} + +int is_rdma_enable(void) +{ + if (rdma_mode && (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3)) + return 1; + else + return 0; +} +EXPORT_SYMBOL(is_rdma_enable); + +void rdma_front_end_wrok(dma_addr_t ddr_phy_addr, u32 size) +{ + ulong expires; + + WRITE_VREG(HEVC_RDMA_F_CTRL, + (0X0 << 7) | //axi id + (0X7 << 3) | //axi length + (0X0 << 2) | //rdma force cg en + (0X1 << 1)); //rdma path en + WRITE_VREG(HEVC_RDMA_F_START_ADDR, ddr_phy_addr); //rdma start address + WRITE_VREG(HEVC_RDMA_F_END_ADDR, ddr_phy_addr + 0xff); //rdma end address + WRITE_VREG(HEVC_RDMA_F_STATUS0, 0X1); //trigger rdma start to work + + expires = jiffies + msecs_to_jiffies(2000); + while (1) { + if ((READ_VREG(HEVC_RDMA_F_STATUS0) & 0x1) == 0) { + //pr_info("rdma front_end done\n"); + break; + } + if (time_after(jiffies, expires)) { + pr_info("wait rdma done timeout\n"); + break; + } + } + + if (rdma_mode & 0x2) + check_rdma_result(SCALELUT_DATA_WRITE_NUM); + + return; +} +EXPORT_SYMBOL(rdma_front_end_wrok); + +void rdma_back_end_work(dma_addr_t back_ddr_phy_addr, u32 size) +{ + ulong expires; + + WRITE_VREG(HEVC_RDMA_B_CTRL, + (0X0 << 7) | //axi id + (0X7 << 3) | //axi length + (0X0 << 2) | //rdma force cg en + (0X1 << 1)); //rdma path en + WRITE_VREG(HEVC_RDMA_B_START_ADDR, back_ddr_phy_addr); //rdma start address + WRITE_VREG(HEVC_RDMA_B_END_ADDR, back_ddr_phy_addr + size -1); //rdma end address + WRITE_VREG(HEVC_RDMA_B_STATUS0, 0X1); //trigger rdma start to work + + expires = jiffies + msecs_to_jiffies(2000); + while (1) { + if ((READ_VREG(HEVC_RDMA_B_STATUS0) & 0x1) == 0) { + //pr_info("rdma back_end done\n"); + break; + } + if (time_after(jiffies, expires)) { + pr_info("wait rdma done timeout\n"); + break; + } + } + if (rdma_mode & 0x2) + check_rdma_result(SCALELUT_DATA_WRITE_NUM); + + return; +} +EXPORT_SYMBOL(rdma_back_end_work); + +RESERVEDMEM_OF_DECLARE(vdec, "amlogic, vdec-memory", vdec_mem_setup); +/* +uint force_hevc_clock_cntl; +EXPORT_SYMBOL(force_hevc_clock_cntl); + +module_param(force_hevc_clock_cntl, uint, 0664); +*/ +module_param(debug, uint, 0664); +module_param(debug_trace_num, uint, 0664); +module_param(hevc_max_reset_count, int, 0664); +module_param(clk_config, uint, 0664); +module_param(step_mode, int, 0664); +module_param(debugflags, int, 0664); +module_param(parallel_decode, int, 0664); +module_param(fps_detection, int, 0664); +module_param(fps_clear, int, 0664); +module_param(force_nosecure_even_drm, int, 0664); +module_param(disable_switch_single_to_mult, int, 0664); + +module_param(debug_meta, uint, 0664); + +module_param(frameinfo_flag, int, 0664); +MODULE_PARM_DESC(frameinfo_flag, + "\n frameinfo_flag\n"); +module_param(v4lvideo_add_di, int, 0664); +MODULE_PARM_DESC(v4lvideo_add_di, + "\n v4lvideo_add_di\n"); + +module_param(v4lvideo_add_ppmgr, int, 0664); +MODULE_PARM_DESC(v4lvideo_add_ppmgr, + "\n v4lvideo_add_ppmgr\n"); + +module_param(max_di_instance, int, 0664); +MODULE_PARM_DESC(max_di_instance, + "\n max_di_instance\n"); + +module_param(max_supported_di_instance, int, 0664); +MODULE_PARM_DESC(max_supported_di_instance, + "\n max_supported_di_instance\n"); +module_param(debug_vdetect, int, 0664); +MODULE_PARM_DESC(debug_vdetect, "\n debug_vdetect\n"); + +module_param(enable_stream_mode_multi_dec, int, 0664); +MODULE_PARM_DESC(enable_stream_mode_multi_dec, + "\n enable multi-decoding on stream mode. \n"); + +module_param(rdma_mode, int, 0664); +MODULE_PARM_DESC(rdma_mode, "\n rdma_enable\n"); + +/* +*module_init(vdec_module_init); +*module_exit(vdec_module_exit); +*/ +#define CREATE_TRACE_POINTS +#include "vdec_trace.h" +MODULE_DESCRIPTION("AMLOGIC vdec driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/utils/vdec.h b/drivers/frame_provider/decoder/utils/vdec.h new file mode 100644 index 0000000..a50bdb5 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec.h
@@ -0,0 +1,697 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/vdec.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VDEC_H +#define VDEC_H +#include <linux/amlogic/media/utils/amports_config.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/list.h> +#include <linux/completion.h> +#include <linux/irqreturn.h> +#include <linux/videodev2.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#define KERNEL_ATRACE_TAG KERNEL_ATRACE_TAG_VDEC +#include <trace/events/meson_atrace.h> +/*#define CONFIG_AM_VDEC_DV*/ +#include "../../../stream_input/amports/streambuf.h" +#include "../../../stream_input/amports/stream_buffer_base.h" + +#include "vdec_input.h" +#include "frame_check.h" +#include "vdec_sync.h" +#include "vdec_canvas_utils.h" + +s32 vdec_dev_register(void); +s32 vdec_dev_unregister(void); + +int vdec_source_changed(int format, int width, int height, int fps); +int vdec2_source_changed(int format, int width, int height, int fps); +int hevc_source_changed(int format, int width, int height, int fps); +struct device *get_vdec_device(void); +int vdec_module_init(void); +void vdec_module_exit(void); + +#define MAX_INSTANCE_MUN 9 + +#define VDEC_DEBUG_SUPPORT + +#define DEC_FLAG_HEVC_WORKAROUND 0x01 + +#define VDEC_FIFO_ALIGN 8 + +enum vdec_type_e { + VDEC_1 = 0, + VDEC_HCODEC, + VDEC_2, + VDEC_HEVC, + VDEC_HEVCB, + VDEC_WAVE, + VDEC_MAX +}; + +struct trace_decoder_name { + char set_canvas0_addr[32]; + char get_canvas0_addr[32]; + char put_canvas0_addr[32]; + char vf_put_name[32]; + char vf_get_name[32]; + char vdec_name[32]; + char pts_name[32]; + char new_q_name[32]; + char disp_q_name[32]; + char decode_time_name[32]; + char decode_run_time_name[32]; + char decode_header_time_name[32]; + char decode_work_time_name[32]; + char decode_header_memory_time_name[32]; +}; + + +enum e_trace_decoder_status { + DECODER_RUN_START = 1, + DECODER_RUN_END = 2, + DECODER_ISR_HEAD_DONE = 3, + DECODER_ISR_PIC_DONE = 4, + DECODER_ISR_END = 5, + DECODER_ISR_THREAD_HEAD_START = 6, + DECODER_ISR_THREAD_PIC_DONE_START = 7, + DECODER_ISR_THREAD_EDN = 8, + DECODER_WORKER_START = 9, + DECODER_WORKER_END = 10, + DECODER_WORKER_AGAIN = 11, + DECODER_ISR_SEI_DONE = 12, + DECODER_ISR_THREAD_SEI_START = 13, + DECODER_ISR_AUX_DONE = 14, + DECODER_ISR_THREAD_AUX_START = 15, + DECODER_ISR_THREAD_HEAD_END = 16, +}; + +enum e_trace_run_status { + TRACE_RUN_LOADING_FW_START = 1, + TRACE_RUN_LOADING_FW_END = 2, + TRACE_RUN_LOADING_RESTORE_START = 3, + TRACE_RUN_LOADING_RESTORE_END = 4, +}; + +enum e_trace_header_status { + TRACE_HEADER_MEMORY_START = 1, + TRACE_HEADER_MEMORY_END = 2, + TRACE_HEADER_REGISTER_START = 3, + TRACE_HEADER_REGISTER_END = 4, + TRACE_HEADER_RPM_START = 5, + TRACE_HEADER_RPM_END = 6, +}; + +enum e_trace_work_status { + TRACE_WORK_WAIT_SEARCH_DONE_START = 1, + TRACE_WORK_WAIT_SEARCH_DONE_END = 2, +}; + + +#define VDEC_CFG_FLAG_DV_TWOLARYER (1 << 0) +#define VDEC_CFG_FLAG_DV_NEGATIVE (1 << 1) +#define VDEC_CFG_FLAG_DIS_ERR_POLICY (1 << 11) + +#define VDEC_CFG_FLAG_PROG_ONLY (1 << 16) + +#define UVM_META_DATA_VF_BASE_INFOS (1 << 0) +#define UVM_META_DATA_HDR10P_DATA (1 << 1) + +#define CORE_MASK_VDEC_1 (1 << VDEC_1) +#define CORE_MASK_HCODEC (1 << VDEC_HCODEC) +#define CORE_MASK_VDEC_2 (1 << VDEC_2) +#define CORE_MASK_HEVC (1 << VDEC_HEVC) +#define CORE_MASK_HEVC_FRONT (1 << VDEC_HEVC) +#define CORE_MASK_HEVC_BACK (1 << VDEC_HEVCB) +#define CORE_MASK_COMBINE (1UL << 31) + +#define META_DATA_SIZE (256) + +#define SEI_TYPE (1) +#define DV_TYPE (2) + +extern void vdec2_power_mode(int level); +extern void vdec_poweron(enum vdec_type_e core); +extern void vdec_poweroff(enum vdec_type_e core); +extern bool vdec_on(enum vdec_type_e core); +extern void vdec_power_reset(void); + +/*irq num as same as .dts*/ + +/* + * interrupts = <0 3 1 + * 0 23 1 + * 0 32 1 + * 0 43 1 + * 0 44 1 + * 0 45 1>; + * interrupt-names = "vsync", + * "demux", + * "parser", + * "mailbox_0", + * "mailbox_1", + * "mailbox_2"; + */ +enum vdec_irq_num { + VSYNC_IRQ = 0, + DEMUX_IRQ, + PARSER_IRQ, + VDEC_IRQ_0, + VDEC_IRQ_1, + VDEC_IRQ_2, + VDEC_IRQ_HEVC_BACK, + VDEC_IRQ_MAX, +}; + +enum vdec_fr_hint_state { + VDEC_NO_NEED_HINT = 0, + VDEC_NEED_HINT, + VDEC_HINTED, +}; +extern s32 vdec_request_threaded_irq(enum vdec_irq_num num, + irq_handler_t handler, + irq_handler_t thread_fn, + unsigned long irqflags, + const char *devname, void *dev); +extern s32 vdec_request_irq(enum vdec_irq_num num, irq_handler_t handler, + const char *devname, void *dev); +extern void vdec_free_irq(enum vdec_irq_num num, void *dev); + +extern void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); +unsigned int get_vdec_clk_config_settings(void); +void update_vdec_clk_config_settings(unsigned int config); +//unsigned int get_mmu_mode(void);//DEBUG_TMP +//extern void vdec_fill_frame_info(struct vframe_qos_s *vframe_qos, int debug); +extern void vdec_fill_vdec_frame(struct vdec_s *vdec, + struct vframe_qos_s *vframe_qos, + struct vdec_info *vinfo, + struct vframe_s *vf, u32 hw_dec_time); + +extern void vdec_vframe_ready(struct vdec_s *vdec, struct vframe_s *vf); +extern void vdec_set_vframe_comm(struct vdec_s *vdec, char *n); + +struct vdec_s; +enum vformat_t; + +/* stream based with single instance decoder driver */ +#define VDEC_TYPE_SINGLE 0 + +/* stream based with multi-instance decoder with HW resouce sharing */ +#define VDEC_TYPE_STREAM_PARSER 1 + +/* frame based with multi-instance decoder, input block list based */ +#define VDEC_TYPE_FRAME_BLOCK 2 + +/* frame based with multi-instance decoder, single circular input block */ +#define VDEC_TYPE_FRAME_CIRCULAR 3 + +/* decoder status: uninitialized */ +#define VDEC_STATUS_UNINITIALIZED 0 + +/* decoder status: before the decoder can start consuming data */ +#define VDEC_STATUS_DISCONNECTED 1 + +/* decoder status: decoder should become disconnected once it's not active */ +#define VDEC_STATUS_CONNECTED 2 + +/* decoder status: decoder owns HW resource and is running */ +#define VDEC_STATUS_ACTIVE 3 + +#define VDEC_PROVIDER_NAME_SIZE 16 +#define VDEC_RECEIVER_NAME_SIZE 16 +#define VDEC_MAP_NAME_SIZE 90 + +#define VDEC_FLAG_OTHER_INPUT_CONTEXT 0x0 +#define VDEC_FLAG_SELF_INPUT_CONTEXT 0x01 + +#define VDEC_NEED_MORE_DATA_RUN 0x01 +#define VDEC_NEED_MORE_DATA_DIRTY 0x02 +#define VDEC_NEED_MORE_DATA 0x04 + +#define SCALELUT_DATA_WRITE_NUM 1024 +#define RDMA_SIZE (1024 * 4 * 4) + +struct vdec_s { + u32 magic; + struct list_head list; + unsigned long core_mask; + unsigned long active_mask; + unsigned long sched_mask; + int id; + + struct vdec_s *master; + struct vdec_s *slave; + struct stream_port_s *port; + struct stream_buf_s vbuf; + int status; + int next_status; + int type; + int port_flag; + int format; + u32 pts; + u64 pts64; + bool pts_valid; + u64 timestamp; + bool timestamp_valid; + int flag; + int sched; + int need_more_data; + u32 canvas_mode; //canvas block mode + + struct completion inactive_done; + + /* config (temp) */ + unsigned long mem_start; + unsigned long mem_end; + + void *mm_blk_handle; + + struct device *cma_dev; + struct platform_device *dev; + struct dec_sysinfo sys_info_store; + struct dec_sysinfo *sys_info; + + /* input */ + struct vdec_input_s input; + + /*frame check*/ + struct pic_check_mgr_t vfc; + + /* mc cache */ + u32 mc[4096 * 4]; + bool mc_loaded; + u32 mc_type; + /* frame provider/receiver interface */ + char vf_provider_name[VDEC_PROVIDER_NAME_SIZE]; + struct vframe_provider_s vframe_provider; + char *vf_receiver_name; + char vfm_map_id[VDEC_MAP_NAME_SIZE]; + char vfm_map_chain[VDEC_MAP_NAME_SIZE]; + int vf_receiver_inst; + enum FRAME_BASE_VIDEO_PATH frame_base_video_path; + enum vdec_fr_hint_state fr_hint_state; + bool use_vfm_path; + char config[PAGE_SIZE]; + int config_len; + bool is_reset; + bool dolby_meta_with_el; + + /* canvas */ + int (*get_canvas)(unsigned int index, unsigned int base); + int (*get_canvas_ex)(int type, int id); + void (*free_canvas_ex)(int index, int id); + + int (*dec_status)(struct vdec_s *vdec, struct vdec_info *vstatus); + int (*set_trickmode)(struct vdec_s *vdec, unsigned long trickmode); + int (*set_isreset)(struct vdec_s *vdec, int isreset); + void (*vdec_fps_detec)(int id); + + unsigned long (*run_ready)(struct vdec_s *vdec, unsigned long mask); + void (*run)(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *); + void (*reset)(struct vdec_s *vdec); + void (*dump_state)(struct vdec_s *vdec); + irqreturn_t (*irq_handler)(struct vdec_s *vdec, int irq); + irqreturn_t (*threaded_irq_handler)(struct vdec_s *vdec, int irq); + + int (*user_data_read)(struct vdec_s *vdec, + struct userdata_param_t *puserdata_para); + void (*reset_userdata_fifo)(struct vdec_s *vdec, int bInit); + void (*wakeup_userdata_poll)(struct vdec_s *vdec); + /* private */ + void *private; /* decoder per instance specific data */ +#ifdef VDEC_DEBUG_SUPPORT + u64 profile_start_clk[VDEC_MAX]; + u64 total_clk[VDEC_MAX]; + u32 check_count[VDEC_MAX]; + u32 not_run_ready_count[VDEC_MAX]; + u32 input_underrun_count[VDEC_MAX]; + u32 run_count[VDEC_MAX]; + u64 run_clk[VDEC_MAX]; + u64 start_run_clk[VDEC_MAX]; +#endif + u64 irq_thread_cnt; + u64 irq_cnt; + int parallel_dec; + struct vdec_frames_s *mvfrm; + struct vdec_sync *sync; + + /*aux data check*/ + struct aux_data_check_mgr_t adc; + + u32 hdr10p_data_size; + char hdr10p_data_buf[PAGE_SIZE]; + bool hdr10p_data_valid; + u32 profile_idc; + u32 level_idc; + bool prog_only; + bool disable_vfm; + char name[32]; + char dec_spend_time[32]; + char dec_spend_time_ave[32]; + u32 discard_start_data_flag; + u32 video_id; + int is_v4l; + bool is_stream_mode_dv_multi; + wait_queue_head_t idle_wait; +}; + +#define CODEC_MODE(a, b, c, d)\ + (((u8)(a) << 24) | ((u8)(b) << 16) | ((u8)(c) << 8) | (u8)(d)) + +#define META_DATA_MAGIC CODEC_MODE('M', 'E', 'T', 'A') +#define AML_META_HEAD_NUM 8 +#define AML_META_HEAD_SIZE (AML_META_HEAD_NUM * sizeof(u32)) + + +struct aml_meta_head_s { + u32 magic; + u32 type; + u32 data_size; + u32 data[5]; +}; + +struct aml_vf_base_info_s { + u32 width; + u32 height; + u32 duration; + u32 frame_type; + u32 type; + u32 data[12]; +}; + +struct aml_meta_info_s { + union { + struct aml_meta_head_s head; + u32 buf[AML_META_HEAD_NUM]; + }; + u8 data[0]; +}; + +typedef int (*post_task_handler)(void *args); + +struct post_task_mgr_s { + struct list_head task_recycle; + struct task_struct *task; + struct semaphore sem; + struct mutex mutex; + bool running; + void *private; +}; + +struct vdec_post_task_parms_s { + struct list_head recycle; + struct task_struct *task; + struct completion park; + post_task_handler func; + void *private; + int scheduled; +}; + +#define MAX_USERDATA_CHANNEL_NUM 9 + +typedef struct { + struct mutex mutex; + wait_queue_head_t userdata_wait; + u32 video_id; + u32 set_id_flag; + u32 ready_flag[MAX_USERDATA_CHANNEL_NUM]; + int used[MAX_USERDATA_CHANNEL_NUM]; + u32 id[MAX_USERDATA_CHANNEL_NUM]; +} st_userdata; + +/* common decoder vframe provider name to use default vfm path */ +#define VFM_DEC_PROVIDER_NAME "decoder" +#define VFM_DEC_DVBL_PROVIDER_NAME "dvbldec" +#define VFM_DEC_DVEL_PROVIDER_NAME "dveldec" + +#define VFM_DEC_DVBL_PROVIDER_NAME2 "dvbldec2" +#define VFM_DEC_DVEL_PROVIDER_NAME2 "dveldec2" + +#define hw_to_vdec(hw) ((struct vdec_s *) \ + (platform_get_drvdata(hw->platform_dev))) + +#define canvas_y(canvas) ((canvas) & 0xff) +#define canvas_u(canvas) (((canvas) >> 8) & 0xff) +#define canvas_v(canvas) (((canvas) >> 16) & 0xff) +#define canvas_y2(canvas) (((canvas) >> 16) & 0xff) +#define canvas_u2(canvas) (((canvas) >> 24) & 0xff) + +#define vdec_frame_based(vdec) \ + (((vdec)->type == VDEC_TYPE_FRAME_BLOCK) || \ + ((vdec)->type == VDEC_TYPE_FRAME_CIRCULAR)) +#define vdec_stream_based(vdec) \ + (((vdec)->type == VDEC_TYPE_STREAM_PARSER) || \ + ((vdec)->type == VDEC_TYPE_SINGLE)) +#define vdec_single(vdec) \ + ((vdec)->type == VDEC_TYPE_SINGLE) +#define vdec_dual(vdec) \ + (((vdec)->port->type & PORT_TYPE_DUALDEC) ||\ + (vdec_get_debug_flags() & 0x100)) +#define vdec_secure(vdec) \ + (((vdec)->port_flag & PORT_FLAG_DRM)) + +#define PR_INIT(s) \ + int __len = 0, __size = s; \ + u8 __buf[s] = {0} + +#define PR_FILL(args...) \ + do { \ + if ((__size - __len) <= 0) break; \ + __len += snprintf(__buf + __len, \ + __size - __len, ##args); \ + } while (0) + +#define PR_INFO(id) \ + do { \ + if (__len == 0) break; \ + pr_info("[%d] %s\n", id, __buf); \ + __len = 0; \ + } while (0) + +/* construct vdec strcture */ +extern struct vdec_s *vdec_create(struct stream_port_s *port, + struct vdec_s *master); + +/* set video format */ +extern int vdec_set_format(struct vdec_s *vdec, int format); + +/* set PTS */ +extern int vdec_set_pts(struct vdec_s *vdec, u32 pts); + +extern int vdec_set_pts64(struct vdec_s *vdec, u64 pts64); + +/* set vfm map when use frame base decoder */ +extern int vdec_set_video_path(struct vdec_s *vdec, int video_path); + +/* set receive id when receive is ionvideo or amlvideo */ +extern int vdec_set_receive_id(struct vdec_s *vdec, int receive_id); + +/* add frame data to input chain */ +extern int vdec_write_vframe(struct vdec_s *vdec, const char *buf, + size_t count); + +extern int vdec_write_vframe_with_dma(struct vdec_s *vdec, + ulong addr, size_t count, u32 handle, chunk_free free, void* priv); + +/* mark the vframe_chunk as consumed */ +extern void vdec_vframe_dirty(struct vdec_s *vdec, + struct vframe_chunk_s *chunk); + +/* prepare decoder input */ +extern int vdec_prepare_input(struct vdec_s *vdec, struct vframe_chunk_s **p); + +extern u32 vdec_offset_prepare_input(struct vdec_s *vdec, u32 consume_byte, + u32 data_offset, u32 data_size); + +/* clean decoder input */ +extern void vdec_clean_input(struct vdec_s *vdec); + +/* sync decoder input */ +extern int vdec_sync_input(struct vdec_s *vdec); + +/* enable decoder input */ +extern void vdec_enable_input(struct vdec_s *vdec); + +/* set decoder input prepare level */ +extern void vdec_set_prepare_level(struct vdec_s *vdec, int level); + +/* set vdec input */ +extern int vdec_set_input_buffer(struct vdec_s *vdec, u32 start, u32 size); + +/* check if decoder can get more input */ +extern bool vdec_has_more_input(struct vdec_s *vdec); + +/* allocate input chain + * register vdec_device + * create output, vfm or create ionvideo output + * insert vdec to vdec_manager for scheduling + */ +extern int vdec_connect(struct vdec_s *vdec); + +/* remove vdec from vdec_manager scheduling + * release input chain + * disconnect video output from ionvideo + */ +extern int vdec_disconnect(struct vdec_s *vdec); + +/* release vdec structure */ +extern int vdec_destroy(struct vdec_s *vdec); + +/* reset vdec */ +extern int vdec_reset(struct vdec_s *vdec); + +extern int vdec_v4l2_reset(struct vdec_s *vdec, int flag); +extern void vdec_set_status(struct vdec_s *vdec, int status); + +extern void vdec_set_next_status(struct vdec_s *vdec, int status); + +extern int vdec_set_decinfo(struct vdec_s *vdec, struct dec_sysinfo *p); + +extern int vdec_init(struct vdec_s *vdec, int is_4k, bool is_v4l); + +extern void vdec_release(struct vdec_s *vdec); + +extern int vdec_status(struct vdec_s *vdec, struct vdec_info *vstatus); + +extern int vdec_set_trickmode(struct vdec_s *vdec, unsigned long trickmode); + +extern int vdec_set_isreset(struct vdec_s *vdec, int isreset); + +extern int vdec_set_dv_metawithel(struct vdec_s *vdec, int isdvmetawithel); + +extern void vdec_set_no_powerdown(int flag); + +extern int vdec_is_support_4k(void); +extern void vdec_set_flag(struct vdec_s *vdec, u32 flag); + +extern void vdec_set_eos(struct vdec_s *vdec, bool eos); + +extern void vdec_set_next_sched(struct vdec_s *vdec, struct vdec_s *next_vdec); + +extern const char *vdec_status_str(struct vdec_s *vdec); + +extern const char *vdec_type_str(struct vdec_s *vdec); + +extern const char *vdec_device_name_str(struct vdec_s *vdec); + +extern void vdec_schedule_work(struct work_struct *work); + +extern void vdec_count_info(struct vdec_info *vs, unsigned int err, + unsigned int offset); + +extern bool vdec_need_more_data(struct vdec_s *vdec); + +extern void vdec_reset_core(struct vdec_s *vdec); + +extern void hevc_reset_core(struct vdec_s *vdec); + +extern void vdec_set_suspend_clk(int mode, int hevc); + +extern unsigned long vdec_ready_to_run(struct vdec_s *vdec, unsigned long mask); + +extern void vdec_prepare_run(struct vdec_s *vdec, unsigned long mask); + +extern void vdec_core_request(struct vdec_s *vdec, unsigned long mask); + +extern int vdec_core_release(struct vdec_s *vdec, unsigned long mask); + +extern bool vdec_core_with_input(unsigned long mask); + +extern void vdec_core_finish_run(struct vdec_s *vdec, unsigned long mask); + +#ifdef VDEC_DEBUG_SUPPORT +extern void vdec_set_step_mode(void); +#endif + +extern void hevc_mmu_dma_check(struct vdec_s *vdec); + +int vdec_read_user_data(struct vdec_s *vdec, + struct userdata_param_t *p_userdata_param); + +int vdec_wakeup_userdata_poll(struct vdec_s *vdec); + +void vdec_reset_userdata_fifo(struct vdec_s *vdec, int bInit); + +struct vdec_s *vdec_get_vdec_by_video_id(int video_id); +struct vdec_s *vdec_get_vdec_by_id(int vdec_id); + + +#ifdef VDEC_DEBUG_SUPPORT +extern void vdec_set_step_mode(void); +#endif +int vdec_get_debug_flags(void); + +void VDEC_PRINT_FUN_LINENO(const char *fun, int line); + +unsigned char is_mult_inc(unsigned int); + +int vdec_get_status(struct vdec_s *vdec); + +void vdec_set_timestamp(struct vdec_s *vdec, u64 timestamp); +void vdec_set_metadata(struct vdec_s *vdec, ulong meta_ptr); + +extern u32 vdec_get_frame_vdec(struct vdec_s *vdec, struct vframe_counter_s *tmpbuf); + +int vdec_get_frame_num(struct vdec_s *vdec); + +int show_stream_buffer_status(char *buf, + int (*callback) (struct stream_buf_s *, char *)); + +extern int get_double_write_ratio(int dw_mode); + +bool is_support_no_parser(void); + +int vdec_resource_checking(struct vdec_s *vdec); + +void set_meta_data_to_vf(struct vframe_s *vf, u32 type, void *v4l2_ctx); + +void vdec_set_profile_level(struct vdec_s *vdec, u32 profile_idc, u32 level_idc); + +extern void vdec_stream_skip_data(struct vdec_s *vdec, int skip_size); +void vdec_set_vld_wp(struct vdec_s *vdec, u32 wp); +void vdec_config_vld_reg(struct vdec_s *vdec, u32 addr, u32 size); + +extern u32 timestamp_avsync_counter_get(void); + +void vdec_canvas_unlock(unsigned long flags); + +unsigned long vdec_canvas_lock(void); + +int vdec_get_core_nr(void); + + +int vdec_post_task(post_task_handler func, void *args); + +void rdma_front_end_wrok(dma_addr_t ddr_phy_addr, u32 size); + +void rdma_back_end_work(dma_addr_t back_ddr_phy_addr, u32 size); + +int is_rdma_enable(void); + +st_userdata *get_vdec_userdata_ctx(void); + +void vdec_frame_rate_uevent(int dur); + +void vdec_sync_irq(enum vdec_irq_num num); + + +#endif /* VDEC_H */
diff --git a/drivers/frame_provider/decoder/utils/vdec_canvas_utils.c b/drivers/frame_provider/decoder/utils/vdec_canvas_utils.c new file mode 100644 index 0000000..46fc003 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_canvas_utils.c
@@ -0,0 +1,415 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/vdec_canvas_utils.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/types.h> +#include "vdec_canvas_utils.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "vdec.h" + +static struct canvas_status_s canvas_stat[CANVAS_MAX_SIZE]; +static struct canvas_status_s mdec_cav_stat[MDEC_CAV_LUT_MAX]; +static struct canvas_config_s *mdec_cav_pool = NULL; + +extern u32 vdec_get_debug(void); + + +bool is_support_vdec_canvas(void) +{ + /* vdec canvas note: + * 1. canvas params config to display, do not use + * vf->canvasxAddr, should use vf->canvasxconfig[]. + * 2. the endian can not config with canvas. and hevc + * core should not config canvas. config endian in + * probe function like h265/vp9/av1/avs2. + */ + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5W)) + return true; + return false; +} +EXPORT_SYMBOL(is_support_vdec_canvas); + +static int get_canvas(unsigned int index, unsigned int base) +{ + int start; + int canvas_index = index * base; + int ret = 0; + + if ((base > 4) || (base == 0)) + return -1; + + if ((AMVDEC_CANVAS_START_INDEX + canvas_index + base - 1) + <= AMVDEC_CANVAS_MAX1) { + start = AMVDEC_CANVAS_START_INDEX + base * index; + } else { + canvas_index -= (AMVDEC_CANVAS_MAX1 - + AMVDEC_CANVAS_START_INDEX + 1) / base * base; + if (canvas_index <= AMVDEC_CANVAS_MAX2) + start = canvas_index / base; + else + return -1; + } + + if (base == 1) { + ret = start; + } else if (base == 2) { + ret = ((start + 1) << 16) | ((start + 1) << 8) | start; + } else if (base == 3) { + ret = ((start + 2) << 16) | ((start + 1) << 8) | start; + } else if (base == 4) { + ret = (((start + 3) << 24) | (start + 2) << 16) | + ((start + 1) << 8) | start; + } + + return ret; +} + +static int get_canvas_ex(int type, int id) +{ + int i; + unsigned long flags; + + flags = vdec_canvas_lock(); + + for (i = 0; i < CANVAS_MAX_SIZE; i++) { + /*0x10-0x15 has been used by rdma*/ + if ((i >= 0x10) && (i <= 0x15)) + continue; + if ((canvas_stat[i].type == type) && + (canvas_stat[i].id & (1 << id)) == 0) { + canvas_stat[i].canvas_used_flag++; + canvas_stat[i].id |= (1 << id); + if (vdec_get_debug() & 4) + pr_debug("get used canvas %d\n", i); + vdec_canvas_unlock(flags); + if (i < AMVDEC_CANVAS_MAX2 + 1) + return i; + else + return (i + AMVDEC_CANVAS_START_INDEX - AMVDEC_CANVAS_MAX2 - 1); + } + } + + for (i = 0; i < CANVAS_MAX_SIZE; i++) { + /*0x10-0x15 has been used by rdma*/ + if ((i >= 0x10) && (i <= 0x15)) + continue; + if (canvas_stat[i].type == 0) { + canvas_stat[i].type = type; + canvas_stat[i].canvas_used_flag = 1; + canvas_stat[i].id = (1 << id); + if (vdec_get_debug() & 4) { + pr_debug("get canvas %d\n", i); + pr_debug("canvas_used_flag %d\n", + canvas_stat[i].canvas_used_flag); + pr_debug("canvas_stat[i].id %d\n", + canvas_stat[i].id); + } + vdec_canvas_unlock(flags); + if (i < AMVDEC_CANVAS_MAX2 + 1) + return i; + else + return (i + AMVDEC_CANVAS_START_INDEX - AMVDEC_CANVAS_MAX2 - 1); + } + } + vdec_canvas_unlock(flags); + + pr_info("cannot get canvas\n"); + + return -1; +} + +static void free_canvas_ex(int index, int id) +{ + unsigned long flags; + int offset; + + flags = vdec_canvas_lock(); + if (index >= 0 && + index < AMVDEC_CANVAS_MAX2 + 1) + offset = index; + else if ((index >= AMVDEC_CANVAS_START_INDEX) && + (index <= AMVDEC_CANVAS_MAX1)) + offset = index + AMVDEC_CANVAS_MAX2 + 1 - AMVDEC_CANVAS_START_INDEX; + else { + vdec_canvas_unlock(flags); + return; + } + + if ((canvas_stat[offset].canvas_used_flag > 0) && + (canvas_stat[offset].id & (1 << id))) { + canvas_stat[offset].canvas_used_flag--; + canvas_stat[offset].id &= ~(1 << id); + if (canvas_stat[offset].canvas_used_flag == 0) { + canvas_stat[offset].type = 0; + canvas_stat[offset].id = 0; + } + if (vdec_get_debug() & 4) { + pr_debug("free index %d used_flag %d, type = %d, id = %d\n", + offset, + canvas_stat[offset].canvas_used_flag, + canvas_stat[offset].type, + canvas_stat[offset].id); + } + } + vdec_canvas_unlock(flags); + + return; +} + + +static int get_internal_cav_lut(unsigned int index, unsigned int base) +{ + int start; + int canvas_index = index * base; + int ret = 0; + + if ((base > 4) || (base == 0)) + return -1; + + if (canvas_index + base - 1 < MDEC_CAV_LUT_MAX) + start = canvas_index; + else + return -1; + + if (base == 1) { + ret = start; + } else if (base == 2) { + ret = ((start + 1) << 16) | ((start + 1) << 8) | start; + } else if (base == 3) { + ret = ((start + 2) << 16) | ((start + 1) << 8) | start; + } else if (base == 4) { + ret = (((start + 3) << 24) | (start + 2) << 16) | + ((start + 1) << 8) | start; + } + + return ret; +} + +static int get_internal_cav_lut_ex(int type, int id) +{ + int i; + unsigned long flags; + + flags = vdec_canvas_lock(); + + for (i = 0; i < MDEC_CAV_LUT_MAX; i++) { + if ((mdec_cav_stat[i].type == type) && + (mdec_cav_stat[i].id & (1 << id)) == 0) { + mdec_cav_stat[i].canvas_used_flag++; + mdec_cav_stat[i].id |= (1 << id); + if (vdec_get_debug() & 4) + pr_debug("get used cav lut %d\n", i); + vdec_canvas_unlock(flags); + return i; + } + } + + for (i = 0; i < MDEC_CAV_LUT_MAX; i++) { + if (mdec_cav_stat[i].type == 0) { + mdec_cav_stat[i].type = type; + mdec_cav_stat[i].canvas_used_flag = 1; + mdec_cav_stat[i].id = (1 << id); + if (vdec_get_debug() & 4) + pr_debug("get cav lut %d\n", i); + vdec_canvas_unlock(flags); + return i; + } + } + vdec_canvas_unlock(flags); + + pr_info("cannot get cav lut\n"); + + return -1; +} + +static void free_internal_cav_lut(int index, int id) +{ + unsigned long flags; + int offset; + + flags = vdec_canvas_lock(); + if (index > 0 && index < MDEC_CAV_LUT_MAX) + offset = index; + else { + vdec_canvas_unlock(flags); + return; + } + if ((mdec_cav_stat[offset].canvas_used_flag > 0) && + (mdec_cav_stat[offset].id & (1 << id))) { + mdec_cav_stat[offset].canvas_used_flag--; + mdec_cav_stat[offset].id &= ~(1 << id); + if (mdec_cav_stat[offset].canvas_used_flag == 0) { + mdec_cav_stat[offset].type = 0; + mdec_cav_stat[offset].id = 0; + } + if (vdec_get_debug() & 4) { + pr_debug("free index %d used_flag %d, type = %d, id = %d\n", + offset, + mdec_cav_stat[offset].canvas_used_flag, + mdec_cav_stat[offset].type, + mdec_cav_stat[offset].id); + } + } + vdec_canvas_unlock(flags); + + return; +} + +unsigned long vdec_cav_get_addr(int index) +{ + if (index < 0 || index >= MDEC_CAV_LUT_MAX) { + pr_err("%s, error index %d\n", __func__, index); + return -1; + } + + return mdec_cav_pool[index & MDEC_CAV_INDEX_MASK].phy_addr; +} +EXPORT_SYMBOL(vdec_cav_get_addr); + +unsigned int vdec_cav_get_width(int index) +{ + if (index < 0 || index >= MDEC_CAV_LUT_MAX) { + pr_err("%s, error index %d\n", __func__, index); + return -1; + } + + return mdec_cav_pool[index & MDEC_CAV_INDEX_MASK].width; +} +EXPORT_SYMBOL(vdec_cav_get_width); + +unsigned int vdec_cav_get_height(int index) +{ + if (index < 0 || index >= MDEC_CAV_LUT_MAX) { + pr_err("%s, error index %d\n", __func__, index); + return -1; + } + return mdec_cav_pool[index & MDEC_CAV_INDEX_MASK].height; +} +EXPORT_SYMBOL(vdec_cav_get_height); + +void cav_lut_info_store(u32 index, ulong addr, u32 width, + u32 height, u32 wrap, u32 blkmode, u32 endian) +{ + struct canvas_config_s *pool = NULL; + + if (index < 0 || index >= MDEC_CAV_LUT_MAX) { + pr_err("%s, error index %d\n", __func__, index); + return; + } + if (mdec_cav_pool == NULL) + mdec_cav_pool = vzalloc(sizeof(struct canvas_config_s) + * (MDEC_CAV_LUT_MAX + 1)); + + if (mdec_cav_pool == NULL) { + pr_err("%s failed, mdec_cav_pool null\n", __func__); + return; + } + pool = &mdec_cav_pool[index]; + pool->width = width; + pool->height = height; + pool->block_mode = blkmode; + pool->endian = endian; + pool->phy_addr = addr; +} + +void config_cav_lut_ex(u32 index, ulong addr, u32 width, + u32 height, u32 wrap, u32 blkmode, + u32 endian, enum vdec_type_e core) +{ + unsigned long datah_temp, datal_temp; + + if (!is_support_vdec_canvas()) { + canvas_config_ex(index, addr, width, height, wrap, blkmode, endian); + if (vdec_get_debug() & 0x40000000) { + pr_info("%s %2d) addr: %lx, width: %d, height: %d, blkm: %d, endian: %d\n", + __func__, index, addr, width, height, blkmode, endian); + } + } else { + /* + datal_temp = (cav_lut.start_addr & 0x1fffffff) | + ((cav_lut.cav_width & 0x7 ) << 29 ); + datah_temp = ((cav_lut.cav_width >> 3) & 0x1ff) | + (( cav_lut.cav_hight & 0x1fff) <<9 ) | + ((cav_lut.x_wrap_en & 1) << 22 ) | + (( cav_lut.y_wrap_en & 1) << 23) | + (( cav_lut.blk_mode & 0x3) << 24); + */ + u32 addr_bits_l = ((((addr + 7) >> 3) & CANVAS_ADDR_LMASK) << CAV_WADDR_LBIT); + u32 width_l = ((((width + 7) >> 3) & CANVAS_WIDTH_LMASK) << CAV_WIDTH_LBIT); + u32 width_h = ((((width + 7) >> 3) >> CANVAS_WIDTH_LWID) << CAV_WIDTH_HBIT); + u32 height_h = (height & CANVAS_HEIGHT_MASK) << CAV_HEIGHT_HBIT; + u32 blkmod_h = (blkmode & CANVAS_BLKMODE_MASK) << CAV_BLKMODE_HBIT; + u32 switch_bits_ctl = (endian & 0xf) << CAV_ENDIAN_HBIT; + u32 wrap_h = (0 << 23); + datal_temp = addr_bits_l | width_l; + datah_temp = width_h | height_h | wrap_h | blkmod_h | switch_bits_ctl; + + if (core == VDEC_1) { + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T3) && (endian == 7)) + WRITE_VREG(MDEC_CAV_CFG0, 0x1ff << 17); + else + WRITE_VREG(MDEC_CAV_CFG0, 0); //[0]canv_mode, by default is non-canv-mode + WRITE_VREG(MDEC_CAV_LUT_DATAL, datal_temp); + WRITE_VREG(MDEC_CAV_LUT_DATAH, datah_temp); + WRITE_VREG(MDEC_CAV_LUT_ADDR, index); + } + + cav_lut_info_store(index, addr, width, height, wrap, blkmode, endian); + + if (vdec_get_debug() & 0x40000000) { + pr_info("%s %2d) addr: %lx, width: %d, height: %d, blkm: %d, endian: %d\n", + __func__, index, addr, width, height, blkmode, endian); + pr_info("data(h,l): 0x%8lx, 0x%8lx\n", datah_temp, datal_temp); + } + } +} +EXPORT_SYMBOL(config_cav_lut_ex); + +void config_cav_lut(int index, struct canvas_config_s *cfg, + enum vdec_type_e core) +{ + config_cav_lut_ex(index, + cfg->phy_addr, + cfg->width, + cfg->height, + CANVAS_ADDR_NOWRAP, + cfg->block_mode, + cfg->endian, + core); +} +EXPORT_SYMBOL(config_cav_lut); + + +void vdec_canvas_port_register(struct vdec_s *vdec) +{ + if (is_support_vdec_canvas()) { + vdec->get_canvas = get_internal_cav_lut; + vdec->get_canvas_ex = get_internal_cav_lut_ex; + vdec->free_canvas_ex = free_internal_cav_lut; + if (mdec_cav_pool == NULL) { + mdec_cav_pool = vzalloc(sizeof(struct canvas_config_s) + * (MDEC_CAV_LUT_MAX + 1)); + } + } else { + vdec->get_canvas = get_canvas; + vdec->get_canvas_ex = get_canvas_ex; + vdec->free_canvas_ex = free_canvas_ex; + } +} +EXPORT_SYMBOL(vdec_canvas_port_register);
diff --git a/drivers/frame_provider/decoder/utils/vdec_canvas_utils.h b/drivers/frame_provider/decoder/utils/vdec_canvas_utils.h new file mode 100644 index 0000000..add83cf --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_canvas_utils.h
@@ -0,0 +1,70 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/vdec_canvas_utils.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#ifndef __CANVAS_UTILS_H__ +#define __CANVAS_UTILS_H__ + +#include "vdec.h" + +/* external canvas */ +struct canvas_status_s { + int type; + int canvas_used_flag; + int id; +}; + +#define CANVAS_MAX_SIZE (AMVDEC_CANVAS_MAX1 - AMVDEC_CANVAS_START_INDEX + 1 + AMVDEC_CANVAS_MAX2 + 1) + + +/* internal canvas */ +#define CANVAS_ADDR_LMASK 0x1fffffff +#define CANVAS_WIDTH_LMASK 0x7 +#define CANVAS_WIDTH_LWID 3 +#define CANVAS_HEIGHT_MASK 0x1fff +#define CANVAS_BLKMODE_MASK 3 + +#define CAV_WADDR_LBIT 0 +#define CAV_WIDTH_LBIT 29 +#define CAV_WIDTH_HBIT 0 +#define CAV_HEIGHT_HBIT (41 - 32) +#define CAV_WRAPX_HBIT (54 - 32) +#define CAV_WRAPY_HBIT (55 - 32) +#define CAV_BLKMODE_HBIT (56 - 32) +#define CAV_ENDIAN_HBIT (58 - 32) + +#define MDEC_CAV_LUT_MAX 128 +#define MDEC_CAV_INDEX_MASK 0x7f + +enum vdec_type_e; + +void config_cav_lut(int index, struct canvas_config_s *cfg, enum vdec_type_e core); + +void config_cav_lut_ex(u32 index, ulong addr, u32 width, + u32 height, u32 wrap, u32 blkmode, + u32 endian, enum vdec_type_e core); + +unsigned int vdec_cav_get_width(int index); + +unsigned int vdec_cav_get_height(int index); + +unsigned long vdec_cav_get_addr(int index); + +bool is_support_vdec_canvas(void); + +void vdec_canvas_port_register(struct vdec_s *vdec); + +#endif +
diff --git a/drivers/frame_provider/decoder/utils/vdec_feature.c b/drivers/frame_provider/decoder/utils/vdec_feature.c new file mode 100644 index 0000000..ea3a75d --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_feature.c
@@ -0,0 +1,522 @@ +/* + * drivers/amlogic/media/stream_input/amports/amstream_profile.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "vdec.h" + + + +//static const struct codec_profile_t *vcodec_feature[SUPPORT_VDEC_NUM] = { 0 }; + +struct vcodec_feature { + int format; + int is_v4l; +}; + +static struct vcodec_feature feature[SUPPORT_VDEC_NUM]; + +static int vcodec_feature_idx; +static ulong last_time; +u8 buf[4096]; + + +static const char * const format_name[] = { + "ammvdec_mpeg12", + "ammvdec_mpeg4", + "ammvdec_h264", + "ammvdec_mjpeg", + "ammvdec_real", + "ammjpegdec", + "ammvdec_vc1", + "ammvdec_avs", + "ammvdec_yuv", + "ammvdec_h264mvc", + "ammvdec_h264_4k2k", + "ammvdec_h265", + "amvenc_avc", + "jpegenc", + "ammvdec_vp9", + "ammvdec_avs2", + "ammvdec_av1", +}; + +static int vcodec_feature_CC(u8 *buf, int size, int vformat, int is_v4l) +{ + u8 *pbuf = buf; + + if (!is_v4l) { + switch (vformat) { + case VFORMAT_MPEG12: + case VFORMAT_H264: + case VFORMAT_AVS: + pbuf += snprintf(pbuf, size, " \"CC subtitle\" : \"true\"\n"); + break; + default: + break; + } + } + + return pbuf - buf; +} + +static int vcodec_feature_report_information(u8 *buf, int size, int vformat, int is_v4l) +{ + u8 *pbuf = buf; + + if (!is_v4l) { + pbuf += snprintf(pbuf, size, " \"Decoder information report\" : \"true\"\n"); + } + + return pbuf - buf; +} + +static int vcodec_feature_i_only_mode(u8 *buf, int size, int vformat) +{ + u8 *pbuf = buf; + + switch (vformat) { + case VFORMAT_MPEG12: + case VFORMAT_H264: + case VFORMAT_MPEG4: + case VFORMAT_HEVC: + case VFORMAT_AVS2: + pbuf += snprintf(pbuf, size, " \"I only mode\" : \"true\"\n"); + break; + default: + break; + } + + return pbuf - buf; +} + +static int vcodec_feature_dolbyVison(u8 *buf, int size, int vformat) +{ + u8 *pbuf = buf; + + switch (vformat) { + case VFORMAT_H264: + case VFORMAT_HEVC: + case VFORMAT_AV1: + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_S4)) + pbuf += snprintf(pbuf, size, " \"DolbyVision\" : \"true\"\n"); + break; + default: + break; + } + + return pbuf - buf; +} + +static int vcodec_feature_HDR(u8 *buf, int size, int vformat) +{ + u8 *pbuf = buf; + + switch (vformat) { + case VFORMAT_H264: + case VFORMAT_HEVC: + case VFORMAT_AV1: + case VFORMAT_AVS2: + case VFORMAT_VP9: + pbuf += snprintf(pbuf, size, " \"HDR\" : \"true\"\n"); + break; + default: + break; + } + + return pbuf - buf; +} + + +static int vcodec_feature_doublewrite(u8 *buf, int size, int vformat) +{ + u8 *pbuf = buf; + int tsize = 0; + int s; + + switch (vformat) { + case VFORMAT_HEVC: + case VFORMAT_VP9: + case VFORMAT_AVS2: + case VFORMAT_AV1: + s = snprintf(pbuf, size - tsize, " \"DoubleWrite\" "); + tsize += s; + pbuf += s; + if ((get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T7) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T3)) { + s = snprintf(pbuf, size - tsize, "[ \"0\", \"1\", \"2\", \"3\", \"4\", \"0x10\", \"0x10000\", \"0x20000\"]\n"); + tsize += s; + pbuf += s; + } + else { + s = snprintf(pbuf, size - tsize, "[ \"0\", \"1\", \"2\", \"3\", \"4\", \"8\", \"0x10\", \"0x10000\", \"0x20000\"]\n"); + tsize += s; + pbuf += s; + } + break; + default: + break; + } + + return pbuf - buf; +} + +static int vcodec_feature_vdec_fence(u8 *buf, int size, int vformat) +{ + u8 *pbuf = buf; + + switch (vformat) { + case VFORMAT_H264: + case VFORMAT_HEVC: + case VFORMAT_VP9: + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM) + pbuf += snprintf(pbuf, size, " \"GameMode\" : \"true\"\n"); + break; + default: + break; + } + + return pbuf - buf; +} + +static int vcodec_feature_bitdepth(u8 *buf, int size, int vformat) +{ + u8 *pbuf = buf; + + switch (vformat) { + case VFORMAT_HEVC: + case VFORMAT_VP9: + case VFORMAT_AVS2: + case VFORMAT_AV1: + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB) + pbuf += snprintf(pbuf, size, " \"BitDepth\" : \"10\"\n"); + else + pbuf += snprintf(pbuf, size, " \"BitDepth\" : \"8\"\n"); + break; + default: + break; + } + + return pbuf - buf; +} + + +static int vcodec_feature_MaxResolution(u8 *buf, int size, int vformat) +{ + u8 *pbuf = buf; + + switch (vformat) { + case VFORMAT_HEVC: + case VFORMAT_VP9: + case VFORMAT_AVS2: + case VFORMAT_AV1: + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D)) + pbuf += snprintf(pbuf, size, " \"MaximumResolution\" : \"8k\"\n"); + else if (vdec_is_support_4k() && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D)) + pbuf += snprintf(pbuf, size, " \"MaximumResolution\" : \"4k60\"\n"); + else + pbuf += snprintf(pbuf, size, " \"MaximumResolution\" : \"1080p60\"\n"); + break; + case VFORMAT_H264: + if (vdec_is_support_4k()) + pbuf += snprintf(pbuf, size, " \"MaximumResolution\" : \"4k30\"\n"); + else + pbuf += snprintf(pbuf, size, " \"MaximumResolution\" : \"1080p60\"\n"); + break; + case VFORMAT_MPEG12: + case VFORMAT_MPEG4: + case VFORMAT_MJPEG: + case VFORMAT_VC1: + case VFORMAT_AVS: + pbuf += snprintf(pbuf, size, " \"MaximumResolution\" : \"1080p60\"\n"); + break; + default: + break; + } + + return pbuf - buf; +} + +static int vcodec_feature_clock(u8 *buf, int size, int vformat) +{ + u8 *pbuf = buf; + + switch (vformat) { + case VFORMAT_HEVC: + case VFORMAT_VP9: + case VFORMAT_AVS2: + case VFORMAT_AV1: + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12B) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_GXLX2) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D)) + pbuf += snprintf(pbuf, size, " \"ClockFrequency\" : \"800MHZ\"\n"); + else + pbuf += snprintf(pbuf, size, " \"ClockFrequency\" : \"667MHZ\"\n"); + break; + case VFORMAT_H264: + case VFORMAT_MPEG12: + case VFORMAT_MPEG4: + case VFORMAT_MJPEG: + case VFORMAT_VC1: + case VFORMAT_AVS: + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_GXLX2) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D)) + pbuf += snprintf(pbuf, size, " \"ClockFrequency\" : \"800MHZ\"\n"); + else + pbuf += snprintf(pbuf, size, " \"ClockFrequency\" : \"667MHZ\"\n"); + break; + default: + break; + } + + return pbuf - buf; +} + +static int vcodec_feature_support_format(int vformat) +{ + + switch (vformat) { + case VFORMAT_VP9: + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5)) + return 1; + else + return 0; + case VFORMAT_AVS2: + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D)) + return 1; + else + return 0; + case VFORMAT_AV1: + if (((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_TM2) && + is_cpu_tm2_revb()) || + ((get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5))) + return 1; + else + return 0; + case VFORMAT_AVS: + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM)) + return 1; + else + return 0; + case VFORMAT_MJPEG: + case VFORMAT_VC1: + case VFORMAT_MPEG4: + case VFORMAT_MPEG12: + case VFORMAT_H264: + case VFORMAT_HEVC: + return 1; + default: + break; + } + + return 0; +} + +static int vcodec_feature_FCC(u8 *buf, int size, int vformat, int is_v4l) +{ + u8 *pbuf = buf; + + if (!is_v4l) { + switch (vformat) { + case VFORMAT_HEVC: + case VFORMAT_H264: + case VFORMAT_MPEG12: + pbuf += snprintf(pbuf, size, " \"Decoder FCC support\" : \"true\"\n"); + break; + default: + break; + } + } + + return pbuf - buf; +} + +static int vcodec_feature_RDMA(u8 *buf, int size, int vformat) +{ + u8 *pbuf = buf; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) + pbuf += snprintf(pbuf, size, " \"Decoder RDMA support\" : \"true\"\n"); + + return pbuf - buf; +} + +static int vcodec_feature_v4ldec_nr(u8 *buf, int size, int vformat, int is_v4l) +{ + u8 *pbuf = buf; + + if (is_v4l) { + pbuf += snprintf(pbuf, size, " \"V4ldec nr\" : \"true\"\n"); + } + + return pbuf - buf; +} + +static int vcodec_feature_ge2d_wrapper(u8 *buf, int size, int vformat, int is_v4l) +{ + u8 *pbuf = buf; + + if (is_v4l && (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7)) { + pbuf += snprintf(pbuf, size, " \"Ge2d wrapper\" : \"true\"\n"); + } + + return pbuf - buf; +} + + + +int vcodec_feature_get_feature(u8 *buf, int vformat, int is_v4l) +{ + u8 *pbuf = buf; + int size = PAGE_SIZE; + int tsize = 0; + int s; + + s = snprintf(pbuf, size - tsize, " \"%s%s\": ", format_name[vformat], is_v4l ? "_v4l" : ""); + tsize += s; + pbuf += s; + + s = snprintf(pbuf, size - tsize, "{\n"); + tsize += s; + pbuf += s; + + s = vcodec_feature_CC(pbuf, size - tsize, vformat, is_v4l); + tsize += s; + pbuf += s; + + s = vcodec_feature_report_information(pbuf, size - tsize, vformat, is_v4l); + tsize += s; + pbuf += s; + + s = vcodec_feature_vdec_fence(pbuf, size - tsize, vformat); + tsize += s; + pbuf += s; + + s = vcodec_feature_i_only_mode(pbuf, size - tsize, vformat); + tsize += s; + pbuf += s; + + s = vcodec_feature_dolbyVison(pbuf, size - tsize, vformat); + tsize += s; + pbuf += s; + + s = vcodec_feature_HDR(pbuf, size - tsize, vformat); + tsize += s; + pbuf += s; + + s = vcodec_feature_doublewrite(pbuf, size - tsize, vformat); + tsize += s; + pbuf += s; + + s = vcodec_feature_bitdepth(pbuf, size - tsize, vformat); + tsize += s; + pbuf += s; + + s = vcodec_feature_MaxResolution(pbuf, size - tsize, vformat); + tsize += s; + pbuf += s; + + s = vcodec_feature_clock(pbuf, size - tsize, vformat); + tsize += s; + pbuf += s; + + s = vcodec_feature_FCC(pbuf, size - tsize, vformat, is_v4l); + tsize += s; + pbuf += s; + + s = vcodec_feature_RDMA(pbuf, size - tsize, vformat); + tsize += s; + pbuf += s; + + s = vcodec_feature_v4ldec_nr(pbuf, size - tsize, vformat, is_v4l); + tsize += s; + pbuf += s; + + s = vcodec_feature_ge2d_wrapper(pbuf, size - tsize, vformat, is_v4l); + tsize += s; + pbuf += s; + + s = snprintf(pbuf, size - tsize, " \"UcodeVersionRequest\" : \"0.3.10\"\n"); + tsize += s; + pbuf += s; + + s = snprintf(pbuf, size - tsize, " }\n"); + tsize += s; + pbuf += s; + + return pbuf - buf; +} + + +ssize_t vcodec_feature_read(char *buf) +{ + static int read_count; + char *pbuf = buf; + + if (jiffies - last_time > 5 * HZ) { + read_count = 0; + /*timeout :not continue dump,dump from first. */ + } + + if (vcodec_feature_idx > 0) { + if (read_count == 0) + pbuf += snprintf(pbuf, PAGE_SIZE - (pbuf - buf), "{\n"); + pbuf += vcodec_feature_get_feature(pbuf, feature[read_count].format, feature[read_count].is_v4l); + read_count++; + if (read_count >= vcodec_feature_idx) { + read_count = 0; + pbuf += snprintf(pbuf, PAGE_SIZE - (pbuf - buf), "}"); + } + } + last_time = jiffies; + return pbuf - buf; +} +EXPORT_SYMBOL(vcodec_feature_read); + + +int vcodec_feature_register(int vformat, int is_v4l) +{ + if ((vcodec_feature_idx < SUPPORT_VDEC_NUM) && vcodec_feature_support_format(vformat)) { + feature[vcodec_feature_idx].format = vformat; + feature[vcodec_feature_idx].is_v4l = is_v4l; + vcodec_feature_idx++; + //pr_debug("regist %s codec profile\n", vdec_profile->name); + } + + return 0; +} +EXPORT_SYMBOL(vcodec_feature_register); + + +
diff --git a/drivers/frame_provider/decoder/utils/vdec_feature.h b/drivers/frame_provider/decoder/utils/vdec_feature.h new file mode 100644 index 0000000..a2918fd --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_feature.h
@@ -0,0 +1,27 @@ +/* + * drivers/amlogic/amports/vdec_profile.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#ifndef VDEC_FEATURE_H +#define VDEC_FEATURE_H + +int vcodec_feature_register(int vformat, int is_v4l); + +ssize_t vcodec_feature_read(char *buf); + + + +#endif /* VDEC_FEATURE_H */
diff --git a/drivers/frame_provider/decoder/utils/vdec_ge2d_utils.c b/drivers/frame_provider/decoder/utils/vdec_ge2d_utils.c new file mode 100644 index 0000000..08cd0d7 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_ge2d_utils.c
@@ -0,0 +1,487 @@ +/* + * Copyright (C) 2017 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Description: + */ +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/amlogic/media/ge2d/ge2d.h> +#include <linux/amlogic/media/canvas/canvas_mgr.h> +#include "vdec.h" +#include "vdec_ge2d_utils.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" + +static u32 vdec_ge2d_debug = 0; + +#ifndef CONFIG_AMLOGIC_MEDIA_GE2D +inline struct ge2d_context_s *create_ge2d_work_queue(void) { return NULL; } +inline int destroy_ge2d_work_queue(struct ge2d_context_s *ge2d_work_queue) { return -1; } +#endif + +enum videocom_source_type { + DECODER_8BIT_NORMAL = 0, + DECODER_8BIT_BOTTOM, + DECODER_8BIT_TOP, + DECODER_10BIT_NORMAL, + DECODER_10BIT_BOTTOM, + DECODER_10BIT_TOP +}; + +static void vdec_canvas_cache_free(struct vdec_canvas_cache *canche) +{ + int i = -1; + + for (i = 0; i < ARRAY_SIZE(canche->res); i++) { + if (canche->res[i].cid > 0) { + if (vdec_ge2d_debug) + pr_info("canvas-free, name:%s, canvas id:%d\n", + canche->res[i].name, + canche->res[i].cid); + + canvas_pool_map_free_canvas(canche->res[i].cid); + + canche->res[i].cid = 0; + } + } +} + +static void vdec_canvas_cache_put(struct vdec_ge2d *ge2d) +{ + struct vdec_canvas_cache *canche = &ge2d->canche; + + mutex_lock(&canche->lock); + + pr_info("canvas-put, ref:%d\n", canche->ref); + + canche->ref--; + + if (canche->ref == 0) { + vdec_canvas_cache_free(canche); + } + + mutex_unlock(&canche->lock); +} + +static int vdec_canvas_cache_get(struct vdec_ge2d *ge2d, char *usr) +{ + struct vdec_canvas_cache *canche = &ge2d->canche; + int i; + + mutex_lock(&canche->lock); + + canche->ref++; + + for (i = 0; i < ARRAY_SIZE(canche->res); i++) { + if (canche->res[i].cid <= 0) { + snprintf(canche->res[i].name, 32, "%s-%d", usr, i); + canche->res[i].cid = + canvas_pool_map_alloc_canvas(canche->res[i].name); + } + + if (vdec_ge2d_debug) + pr_info("canvas-alloc, name:%s, canvas id:%d\n", + canche->res[i].name, + canche->res[i].cid); + + if (canche->res[i].cid <= 0) { + pr_err("canvas-fail, name:%s, canvas id:%d.\n", + canche->res[i].name, + canche->res[i].cid); + + mutex_unlock(&canche->lock); + goto err; + } + } + + if (vdec_ge2d_debug) + pr_info("canvas-get, ref:%d\n", canche->ref); + + mutex_unlock(&canche->lock); + return 0; +err: + vdec_canvas_cache_put(ge2d); + return -1; +} + +static int vdec_canvas_cache_init(struct vdec_ge2d *ge2d) +{ + ge2d->canche.ref = 0; + mutex_init(&ge2d->canche.lock); + + pr_info("canvas-init, ref:%d\n", ge2d->canche.ref); + + return 0; +} + +static int get_source_type(struct vframe_s *vf) +{ + enum videocom_source_type ret; + int interlace_mode; + + interlace_mode = vf->type & VIDTYPE_TYPEMASK; + + if ((vf->bitdepth & BITDEPTH_Y10) && + (!(vf->type & VIDTYPE_COMPRESS)) && + (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL)) { + if (interlace_mode == VIDTYPE_INTERLACE_TOP) + ret = DECODER_10BIT_TOP; + else if (interlace_mode == VIDTYPE_INTERLACE_BOTTOM) + ret = DECODER_10BIT_BOTTOM; + else + ret = DECODER_10BIT_NORMAL; + } else { + if (interlace_mode == VIDTYPE_INTERLACE_TOP) + ret = DECODER_8BIT_TOP; + else if (interlace_mode == VIDTYPE_INTERLACE_BOTTOM) + ret = DECODER_8BIT_BOTTOM; + else + ret = DECODER_8BIT_NORMAL; + } + + return ret; +} + +static int get_input_format(struct vframe_s *vf) +{ + int format = GE2D_FORMAT_M24_YUV420; + enum videocom_source_type soure_type; + + soure_type = get_source_type(vf); + + switch (soure_type) { + case DECODER_8BIT_NORMAL: + if (vf->type & VIDTYPE_VIU_422) + format = GE2D_FORMAT_S16_YUV422; + else if (vf->type & VIDTYPE_VIU_NV21) + format = GE2D_FORMAT_M24_NV21; + else if (vf->type & VIDTYPE_VIU_NV12) + format = GE2D_FORMAT_M24_NV12; + else if (vf->type & VIDTYPE_VIU_444) + format = GE2D_FORMAT_S24_YUV444; + else + format = GE2D_FORMAT_M24_YUV420; + break; + case DECODER_8BIT_BOTTOM: + if (vf->type & VIDTYPE_VIU_422) + format = GE2D_FORMAT_S16_YUV422 + | (GE2D_FORMAT_S16_YUV422B & (3 << 3)); + else if (vf->type & VIDTYPE_VIU_NV21) + format = GE2D_FORMAT_M24_NV21 + | (GE2D_FORMAT_M24_NV21B & (3 << 3)); + else if (vf->type & VIDTYPE_VIU_NV12) + format = GE2D_FORMAT_M24_NV12 + | (GE2D_FORMAT_M24_NV12B & (3 << 3)); + else if (vf->type & VIDTYPE_VIU_444) + format = GE2D_FORMAT_S24_YUV444 + | (GE2D_FORMAT_S24_YUV444B & (3 << 3)); + else + format = GE2D_FORMAT_M24_YUV420 + | (GE2D_FMT_M24_YUV420B & (3 << 3)); + break; + case DECODER_8BIT_TOP: + if (vf->type & VIDTYPE_VIU_422) + format = GE2D_FORMAT_S16_YUV422 + | (GE2D_FORMAT_S16_YUV422T & (3 << 3)); + else if (vf->type & VIDTYPE_VIU_NV21) + format = GE2D_FORMAT_M24_NV21 + | (GE2D_FORMAT_M24_NV21T & (3 << 3)); + else if (vf->type & VIDTYPE_VIU_NV12) + format = GE2D_FORMAT_M24_NV12 + | (GE2D_FORMAT_M24_NV12T & (3 << 3)); + else if (vf->type & VIDTYPE_VIU_444) + format = GE2D_FORMAT_S24_YUV444 + | (GE2D_FORMAT_S24_YUV444T & (3 << 3)); + else + format = GE2D_FORMAT_M24_YUV420 + | (GE2D_FMT_M24_YUV420T & (3 << 3)); + break; + case DECODER_10BIT_NORMAL: + if (vf->type & VIDTYPE_VIU_422) { + if (vf->bitdepth & FULL_PACK_422_MODE) + format = GE2D_FORMAT_S16_10BIT_YUV422; + else + format = GE2D_FORMAT_S16_12BIT_YUV422; + } + break; + case DECODER_10BIT_BOTTOM: + if (vf->type & VIDTYPE_VIU_422) { + if (vf->bitdepth & FULL_PACK_422_MODE) + format = GE2D_FORMAT_S16_10BIT_YUV422 + | (GE2D_FORMAT_S16_10BIT_YUV422B + & (3 << 3)); + else + format = GE2D_FORMAT_S16_12BIT_YUV422 + | (GE2D_FORMAT_S16_12BIT_YUV422B + & (3 << 3)); + } + break; + case DECODER_10BIT_TOP: + if (vf->type & VIDTYPE_VIU_422) { + if (vf->bitdepth & FULL_PACK_422_MODE) + format = GE2D_FORMAT_S16_10BIT_YUV422 + | (GE2D_FORMAT_S16_10BIT_YUV422T + & (3 << 3)); + else + format = GE2D_FORMAT_S16_12BIT_YUV422 + | (GE2D_FORMAT_S16_12BIT_YUV422T + & (3 << 3)); + } + break; + default: + format = GE2D_FORMAT_M24_YUV420; + } + return format; +} + +int vdec_ge2d_init(struct vdec_ge2d** ge2d_handle, int mode) +{ + int ret; + struct vdec_ge2d *ge2d; + + if (!ge2d_handle) + return -EINVAL; + + ge2d = kzalloc(sizeof(*ge2d), GFP_KERNEL); + if (!ge2d) + return -ENOMEM; + + vdec_canvas_cache_init(ge2d); + + ge2d->work_mode = mode; + if (!ge2d->work_mode) { + ge2d->work_mode = GE2D_MODE_CONVERT_LE; + } + pr_info("vdec_ge2d_init work_mode:%d\n", ge2d->work_mode); + + ge2d->ge2d_context = create_ge2d_work_queue(); + if (!ge2d->ge2d_context) { + pr_err("ge2d_create_instance fail\n"); + ret = -EINVAL; + goto error; + } + + if (vdec_canvas_cache_get(ge2d, "vdec-ge2d-dec") < 0) { + pr_err("canvas pool alloc fail. src(%d, %d, %d) dst(%d, %d, %d).\n", + ge2d->canche.res[0].cid, + ge2d->canche.res[1].cid, + ge2d->canche.res[2].cid, + ge2d->canche.res[3].cid, + ge2d->canche.res[4].cid, + ge2d->canche.res[5].cid); + ret = -ENOMEM; + goto error1; + } + + *ge2d_handle = ge2d; + + return 0; +error1: + destroy_ge2d_work_queue(ge2d->ge2d_context); +error: + kfree(ge2d); + + return ret; +} +EXPORT_SYMBOL(vdec_ge2d_init); + +int vdec_ge2d_copy_data(struct vdec_ge2d *ge2d, struct vdec_ge2d_info *ge2d_info) +{ + struct config_para_ex_s ge2d_config; + u32 src_fmt = 0, dst_fmt = 0; + struct canvas_s cd; + + if (!ge2d) + return -1; + + if (vdec_ge2d_debug) + pr_info("vdec_ge2d_copy_data start\n"); + + memset(&ge2d_config, 0, sizeof(ge2d_config)); + + src_fmt = get_input_format(ge2d_info->dst_vf); + if (ge2d_info->src_canvas0_config[0].endian == 7) + src_fmt |= GE2D_BIG_ENDIAN; + else + src_fmt |= GE2D_LITTLE_ENDIAN; + + /* negotiate format of destination */ + dst_fmt = get_input_format(ge2d_info->dst_vf); + if (ge2d->work_mode & GE2D_MODE_CONVERT_NV12) + dst_fmt |= GE2D_FORMAT_M24_NV12; + else if (ge2d->work_mode & GE2D_MODE_CONVERT_NV21) + dst_fmt |= GE2D_FORMAT_M24_NV21; + + if (ge2d->work_mode & GE2D_MODE_CONVERT_LE) + dst_fmt |= GE2D_LITTLE_ENDIAN; + else + dst_fmt |= GE2D_BIG_ENDIAN; + + if ((dst_fmt & GE2D_COLOR_MAP_MASK) == GE2D_COLOR_MAP_NV12) { + ge2d_info->dst_vf->type |= VIDTYPE_VIU_NV12; + ge2d_info->dst_vf->type &= ~VIDTYPE_VIU_NV21; + } else if ((dst_fmt & GE2D_COLOR_MAP_MASK) == GE2D_COLOR_MAP_NV21) { + ge2d_info->dst_vf->type |= VIDTYPE_VIU_NV21; + ge2d_info->dst_vf->type &= ~VIDTYPE_VIU_NV12; + } + if ((dst_fmt & GE2D_ENDIAN_MASK) == GE2D_LITTLE_ENDIAN) { + ge2d_info->dst_vf->canvas0_config[0].endian = 0; + ge2d_info->dst_vf->canvas0_config[1].endian = 0; + ge2d_info->dst_vf->canvas0_config[2].endian = 0; + } else if ((dst_fmt & GE2D_ENDIAN_MASK) == GE2D_BIG_ENDIAN){ + ge2d_info->dst_vf->canvas0_config[0].endian = 7; + ge2d_info->dst_vf->canvas0_config[1].endian = 7; + ge2d_info->dst_vf->canvas0_config[2].endian = 7; + } + + ge2d_info->dst_vf->mem_sec = ge2d_info->dst_vf->flag & VFRAME_FLAG_VIDEO_SECURE ? 1 : 0; + + mutex_lock(&ge2d->canche.lock); + + /* src canvas configure. */ + if ((ge2d_info->dst_vf->canvas0Addr == 0) || + (ge2d_info->dst_vf->canvas0Addr == (u32)-1)) { + canvas_config_config(ge2d->canche.res[0].cid, &ge2d_info->src_canvas0_config[0]); + canvas_config_config(ge2d->canche.res[1].cid, &ge2d_info->src_canvas0_config[1]); + canvas_config_config(ge2d->canche.res[2].cid, &ge2d_info->src_canvas0_config[2]); + ge2d_config.src_para.canvas_index = + ge2d->canche.res[0].cid | + ge2d->canche.res[1].cid << 8 | + ge2d->canche.res[2].cid << 16; + + ge2d_config.src_planes[0].addr = + ge2d_info->src_canvas0_config[0].phy_addr; + ge2d_config.src_planes[0].w = + ge2d_info->src_canvas0_config[0].width; + ge2d_config.src_planes[0].h = + ge2d_info->src_canvas0_config[0].height; + ge2d_config.src_planes[1].addr = + ge2d_info->src_canvas0_config[1].phy_addr; + ge2d_config.src_planes[1].w = + ge2d_info->src_canvas0_config[1].width; + ge2d_config.src_planes[1].h = + ge2d_info->src_canvas0_config[1].height; + ge2d_config.src_planes[2].addr = + ge2d_info->src_canvas0_config[2].phy_addr; + ge2d_config.src_planes[2].w = + ge2d_info->src_canvas0_config[2].width; + ge2d_config.src_planes[2].h = + ge2d_info->src_canvas0_config[2].height; + } else { + ge2d_config.src_para.canvas_index = ge2d_info->src_canvas0Addr; + } + ge2d_config.src_para.mem_type = CANVAS_TYPE_INVALID; + ge2d_config.src_para.format = src_fmt; + ge2d_config.src_para.fill_color_en = 0; + ge2d_config.src_para.fill_mode = 0; + ge2d_config.src_para.x_rev = 0; + ge2d_config.src_para.y_rev = 0; + ge2d_config.src_para.color = 0xffffffff; + ge2d_config.src_para.top = 0; + ge2d_config.src_para.left = 0; + ge2d_config.src_para.width = ge2d_info->dst_vf->width; + if (ge2d_info->dst_vf->type & VIDTYPE_INTERLACE) + ge2d_config.src_para.height = ge2d_info->dst_vf->height >> 1; + else + ge2d_config.src_para.height = ge2d_info->dst_vf->height; + + /* dst canvas configure. */ + canvas_config_config(ge2d->canche.res[3].cid, &ge2d_info->dst_vf->canvas0_config[0]); + if ((ge2d_config.src_para.format & 0xfffff) == GE2D_FORMAT_M24_YUV420) { + ge2d_info->dst_vf->canvas0_config[1].width <<= 1; + } + canvas_config_config(ge2d->canche.res[4].cid, &ge2d_info->dst_vf->canvas0_config[1]); + canvas_config_config(ge2d->canche.res[5].cid, &ge2d_info->dst_vf->canvas0_config[2]); + ge2d_config.dst_para.canvas_index = + ge2d->canche.res[3].cid | + ge2d->canche.res[4].cid << 8; + canvas_read(ge2d->canche.res[3].cid, &cd); + ge2d_config.dst_planes[0].addr = cd.addr; + ge2d_config.dst_planes[0].w = cd.width; + ge2d_config.dst_planes[0].h = cd.height; + canvas_read(ge2d->canche.res[4].cid, &cd); + ge2d_config.dst_planes[1].addr = cd.addr; + ge2d_config.dst_planes[1].w = cd.width; + ge2d_config.dst_planes[1].h = cd.height; + + ge2d_config.dst_para.format = dst_fmt; + ge2d_config.dst_para.width = ge2d_info->dst_vf->width; + ge2d_config.dst_para.height = ge2d_info->dst_vf->height; + ge2d_config.dst_para.mem_type = CANVAS_TYPE_INVALID; + ge2d_config.dst_para.fill_color_en = 0; + ge2d_config.dst_para.fill_mode = 0; + ge2d_config.dst_para.x_rev = 0; + ge2d_config.dst_para.y_rev = 0; + ge2d_config.dst_para.color = 0; + ge2d_config.dst_para.top = 0; + ge2d_config.dst_para.left = 0; + + /* other ge2d parameters configure. */ + ge2d_config.src_key.key_enable = 0; + ge2d_config.src_key.key_mask = 0; + ge2d_config.src_key.key_mode = 0; + ge2d_config.alu_const_color = 0; + ge2d_config.bitmask_en = 0; + ge2d_config.src1_gb_alpha = 0; + ge2d_config.dst_xy_swap = 0; + ge2d_config.src2_para.mem_type = CANVAS_TYPE_INVALID; + ge2d_config.mem_sec = ge2d_info->dst_vf->flag & VFRAME_FLAG_VIDEO_SECURE ? 1 : 0; + + if (ge2d_context_config_ex(ge2d->ge2d_context, &ge2d_config) < 0) { + pr_err("vdec_ge2d_context_config_ex error.\n"); + mutex_unlock(&ge2d->canche.lock); + return -1; + } + + if (!(ge2d_info->dst_vf->type & VIDTYPE_V4L_EOS)) { + if (ge2d_info->dst_vf->type & VIDTYPE_INTERLACE) { + stretchblt_noalpha(ge2d->ge2d_context, + 0, 0, ge2d_info->dst_vf->width, ge2d_info->dst_vf->height / 2, + 0, 0, ge2d_info->dst_vf->width, ge2d_info->dst_vf->height); + } else { + stretchblt_noalpha(ge2d->ge2d_context, + 0, 0, ge2d_info->dst_vf->width, ge2d_info->dst_vf->height, + 0, 0, ge2d_info->dst_vf->width, ge2d_info->dst_vf->height); + } + } + mutex_unlock(&ge2d->canche.lock); + if (vdec_ge2d_debug) + pr_info("vdec_ge2d_copy_data done\n"); + + return 0; +} +EXPORT_SYMBOL(vdec_ge2d_copy_data); + +int vdec_ge2d_destroy(struct vdec_ge2d *ge2d) +{ + if (!ge2d) + return -1; + + pr_info("vdec ge2d destroy begin\n"); + + destroy_ge2d_work_queue(ge2d->ge2d_context); + vdec_canvas_cache_put(ge2d); + kfree(ge2d); + + pr_info("vdec ge2d destroy done\n"); + + return 0; +} +EXPORT_SYMBOL(vdec_ge2d_destroy); + +module_param(vdec_ge2d_debug, int, 0664); +MODULE_PARM_DESC(vdec_ge2d_debug, "\n vdec_ge2d_debug\n");
diff --git a/drivers/frame_provider/decoder/utils/vdec_ge2d_utils.h b/drivers/frame_provider/decoder/utils/vdec_ge2d_utils.h new file mode 100644 index 0000000..31018f7 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_ge2d_utils.h
@@ -0,0 +1,69 @@ +/* + * Copyright (C) 2017 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Description: + */ +#ifndef __GE2D_UTILS_H__ +#define __GE2D_UTILS_H__ + +//#include "../../../amvdec_ports/aml_vcodec_drv.h" +#include <linux/amlogic/media/vfm/vframe.h> + + +/* define ge2d work mode. */ +#define GE2D_MODE_CONVERT_NV12 (1 << 0) +#define GE2D_MODE_CONVERT_NV21 (1 << 1) +#define GE2D_MODE_CONVERT_LE (1 << 2) +#define GE2D_MODE_CONVERT_BE (1 << 3) +#define GE2D_MODE_SEPARATE_FIELD (1 << 4) +#define GE2D_MODE_422_TO_420 (1 << 5) + +struct vdec_canvas_res { + int cid; + u8 name[32]; +}; + +struct vdec_canvas_cache { + int ref; + struct vdec_canvas_res res[6]; + struct mutex lock; +}; + +struct vdec_ge2d { + u32 work_mode; /* enum ge2d_work_mode */ + struct ge2d_context_s *ge2d_context; /* handle of GE2D */ + struct aml_vcodec_ctx *ctx; + struct vdec_canvas_cache canche; + void *hw; +}; + +struct vdec_ge2d_info { + struct vframe_s *dst_vf; + u32 src_canvas0Addr; + u32 src_canvas1Addr;; + struct canvas_config_s src_canvas0_config[3]; + struct canvas_config_s src_canvas1_config[3]; +}; + +int vdec_ge2d_init(struct vdec_ge2d** ge2d_handle, int mode); + +int vdec_ge2d_copy_data(struct vdec_ge2d *ge2d, struct vdec_ge2d_info *ge2d_info); + +int vdec_ge2d_destroy(struct vdec_ge2d *ge2d); + +#endif +
diff --git a/drivers/frame_provider/decoder/utils/vdec_input.c b/drivers/frame_provider/decoder/utils/vdec_input.c new file mode 100644 index 0000000..6fec0ad --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_input.c
@@ -0,0 +1,1218 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/vdec_input.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/uaccess.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "vdec.h" +#include "vdec_input.h" +#include <asm/cacheflush.h> +#include <linux/crc32.h> + +#define VFRAME_BLOCK_SIZE (512 * SZ_1K)/*512 for 1080p default init.*/ +#define VFRAME_BLOCK_SIZE_4K (2 * SZ_1M) /*2M for 4K default.*/ +#define VFRAME_BLOCK_SIZE_MAX (4 * SZ_1M) + +#define VFRAME_BLOCK_PAGEALIGN 4 +#define VFRAME_BLOCK_MIN_LEVEL (2 * SZ_1M) +#define VFRAME_BLOCK_MAX_LEVEL (8 * SZ_1M) +#define VFRAME_BLOCK_MAX_TOTAL_SIZE (16 * SZ_1M) + +/* +2s for OMX +*/ +#define MAX_FRAME_DURATION_S 2 + + +#define VFRAME_BLOCK_HOLE (SZ_64K) + +#define MIN_FRAME_PADDING_SIZE ((u32)(L1_CACHE_BYTES)) + +#define EXTRA_PADDING_SIZE (16 * SZ_1K) /*HEVC_PADDING_SIZE*/ + +#define MEM_NAME "VFRAME_INPUT" + +//static int vdec_input_get_duration_u64(struct vdec_input_s *input); +static struct vframe_block_list_s * + vdec_input_alloc_new_block(struct vdec_input_s *input, + ulong phy_addr, + int size, + chunk_free free, + void* priv); + +static int aml_copy_from_user(void *to, const void *from, ulong n) +{ + int ret =0; + + if (likely(access_ok(from, n))) + ret = copy_from_user(to, from, n); + else + memcpy(to, from, n); + + return ret; +} + +static int copy_from_user_to_phyaddr(void *virts, const char __user *buf, + u32 size, ulong phys, u32 pading, bool is_mapped) +{ + u32 i, span = SZ_1M; + u32 count = size / PAGE_ALIGN(span); + u32 remain = size % PAGE_ALIGN(span); + ulong addr = phys; + u8 *p = virts; + + if (is_mapped) { + if (aml_copy_from_user(p, buf, size)) + return -EFAULT; + + if (pading) + memset(p + size, 0, pading); + + codec_mm_dma_flush(p, size + pading, DMA_TO_DEVICE); + + return 0; + } + + for (i = 0; i < count; i++) { + addr = phys + i * span; + p = codec_mm_vmap(addr, span); + if (!p) + return -1; + + if (aml_copy_from_user(p, buf + i * span, span)) { + codec_mm_unmap_phyaddr(p); + return -EFAULT; + } + + codec_mm_dma_flush(p, span, DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(p); + } + + if (!remain) + return 0; + + span = size - remain; + addr = phys + span; + p = codec_mm_vmap(addr, remain + pading); + if (!p) + return -1; + + if (aml_copy_from_user(p, buf + span, remain)) { + codec_mm_unmap_phyaddr(p); + return -EFAULT; + } + + if (pading) + memset(p + remain, 0, pading); + + codec_mm_dma_flush(p, remain + pading, DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(p); + + return 0; +} + +static int vframe_chunk_fill(struct vdec_input_s *input, + struct vframe_chunk_s *chunk, const char *buf, + size_t count, struct vframe_block_list_s *block) +{ + u8 *p = (u8 *)block->start_virt + block->wp; + if (block->type == VDEC_TYPE_FRAME_BLOCK) { + copy_from_user_to_phyaddr(p, buf, count, + block->start + block->wp, + chunk->pading_size, + block->is_mapped); + } else if (block->type == VDEC_TYPE_FRAME_CIRCULAR) { + size_t len = min((size_t)(block->size - block->wp), count); + u32 wp; + + copy_from_user_to_phyaddr(p, buf, len, + block->start + block->wp, 0, + block->is_mapped); + p += len; + + if (count > len) { + copy_from_user_to_phyaddr(p, buf + len, + count - len, + block->start, 0, + block->is_mapped); + + p += count - len; + } + + wp = block->wp + count; + if (wp >= block->size) + wp -= block->size; + + len = min(block->size - wp, chunk->pading_size); + + if (!block->is_mapped) { + p = codec_mm_vmap(block->start + wp, len); + memset(p, 0, len); + codec_mm_dma_flush(p, len, DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(p); + } else { + memset(p, 0, len); + codec_mm_dma_flush(p, len, DMA_TO_DEVICE); + } + + if (chunk->pading_size > len) { + p = (u8 *)block->start_virt; + + if (!block->is_mapped) { + p = codec_mm_vmap(block->start, + chunk->pading_size - len); + memset(p, 0, chunk->pading_size - len); + codec_mm_dma_flush(p, + chunk->pading_size - len, + DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(p); + } else { + memset(p, 0, chunk->pading_size - len); + codec_mm_dma_flush(p, + chunk->pading_size - len, + DMA_TO_DEVICE); + } + } + } + + return 0; +} + +static inline u32 vframe_block_space(struct vframe_block_list_s *block) +{ + if (block->type == VDEC_TYPE_FRAME_BLOCK) { + return block->size - block->wp; + } else { + return (block->rp >= block->wp) ? + (block->rp - block->wp) : + (block->rp - block->wp + block->size); + } +} + +static void vframe_block_add_chunk(struct vframe_block_list_s *block, + struct vframe_chunk_s *chunk) +{ + block->wp += chunk->size + chunk->pading_size; + if (block->wp >= block->size) + block->wp -= block->size; + block->data_size += chunk->size; + block->chunk_count++; + chunk->block = block; + block->input->wr_block = block; + chunk->sequence = block->input->sequence; + block->input->sequence++; +} + +static bool is_coherent_buff = 1; + +static void vframe_block_free_block(struct vframe_block_list_s *block) +{ + if (is_coherent_buff) { + if (block->mem_handle) { + codec_mm_dma_free_coherent(block->mem_handle); + } + } else { + if (block->addr) { + codec_mm_free_for_dma(MEM_NAME, block->addr); + } + } + /* + *pr_err("free block %d, size=%d\n", block->id, block->size); + */ + kfree(block); +} + +static int vframe_block_init_alloc_storage(struct vdec_input_s *input, + struct vframe_block_list_s *block, + ulong phy_addr, + int size, + chunk_free free, + void *priv) +{ + int alloc_size = input->default_block_size; + block->magic = 0x4b434c42; + block->input = input; + block->type = input->type; + + /* + * todo: for different type use different size + */ + if (phy_addr) { + block->is_out_buf = 1; + block->start_virt = NULL; + block->start = phy_addr; + block->size = size; + block->free = free; + block->priv = priv; + } else { + alloc_size = PAGE_ALIGN(alloc_size); + if (is_coherent_buff) { + block->start_virt = codec_mm_dma_alloc_coherent(&block->mem_handle, &block->addr, alloc_size, MEM_NAME); + } else { + block->addr = codec_mm_alloc_for_dma_ex( + MEM_NAME, + alloc_size/PAGE_SIZE, + VFRAME_BLOCK_PAGEALIGN, + CODEC_MM_FLAGS_DMA_CPU | CODEC_MM_FLAGS_FOR_VDECODER, + input->id, + block->id); + } + + if (!block->addr) { + pr_err("Input block allocation failed\n"); + return -ENOMEM; + } + + if (!is_coherent_buff) + block->start_virt = (void *)codec_mm_phys_to_virt(block->addr); + if (block->start_virt) + block->is_mapped = true; + block->start = block->addr; + block->size = alloc_size; + block->is_out_buf = 0; + block->free = NULL; + } + + return 0; +} + +void vdec_input_init(struct vdec_input_s *input, struct vdec_s *vdec) +{ + INIT_LIST_HEAD(&input->vframe_block_list); + INIT_LIST_HEAD(&input->vframe_block_free_list); + INIT_LIST_HEAD(&input->vframe_chunk_list); + spin_lock_init(&input->lock); + input->id = vdec->id; + input->block_nums = 0; + input->vdec = vdec; + input->block_id_seq = 0; + input->size = 0; + input->default_block_size = VFRAME_BLOCK_SIZE; + snprintf(input->vdec_input_name, sizeof(input->vdec_input_name), + "vdec-input-%d", vdec->id); +} +int vdec_input_prepare_bufs(struct vdec_input_s *input, + int frame_width, int frame_height) +{ + struct vframe_block_list_s *block; + int i; + unsigned long flags; + + if (vdec_secure(input->vdec)) + return 0; + if (input->size > 0) + return 0; + if (frame_width * frame_height >= 1920 * 1088) { + /*have add data before. ignore prepare buffers.*/ + input->default_block_size = VFRAME_BLOCK_SIZE_4K; + } + /*prepared 3 buffers for smooth start.*/ + for (i = 0; i < 3; i++) { + block = vdec_input_alloc_new_block(input, 0, 0, NULL, NULL); + if (!block) + break; + flags = vdec_input_lock(input); + list_move_tail(&block->list, + &input->vframe_block_free_list); + input->wr_block = NULL; + vdec_input_unlock(input, flags); + } + return 0; +} + +static int vdec_input_dump_block_locked( + struct vframe_block_list_s *block, + char *buf, int size) +{ + char *pbuf = buf; + char sbuf[512]; + int tsize = 0; + int s; + + if (!pbuf) { + pbuf = sbuf; + size = 512; + } + #define BUFPRINT(args...) \ + do {\ + s = snprintf(pbuf, size - tsize, args);\ + tsize += s;\ + pbuf += s; \ + } while (0) + + BUFPRINT("\tblock:[%d:%p]-addr=%p,vstart=%p,type=%d\n", + block->id, + block, + (void *)block->addr, + (void *)block->start_virt, + block->type); + BUFPRINT("\t-blocksize=%d,data=%d,wp=%d,rp=%d,chunk_count=%d\n", + block->size, + block->data_size, + block->wp, + block->rp, + block->chunk_count); + /* + BUFPRINT("\tlist=%p,next=%p,prev=%p\n", + &block->list, + block->list.next, + block->list.prev); + */ + #undef BUFPRINT + if (!buf) + pr_info("%s", sbuf); + return tsize; +} + +int vdec_input_dump_blocks(struct vdec_input_s *input, + char *bufs, int size) +{ + struct list_head *p, *tmp; + unsigned long flags; + char *lbuf = bufs; + char sbuf[256]; + int s = 0; + + if (size <= 0) + return 0; + if (!bufs) + lbuf = sbuf; + s += snprintf(lbuf + s, size - s, + "blocks:vdec-%d id:%d,bufsize=%d,dsize=%d,frames:%d,dur:%dms\n", + input->id, + input->block_nums, + input->size, + input->data_size, + input->have_frame_num, + vdec_input_get_duration_u64(input)/1000); + if (bufs) + lbuf += s; + else { + pr_info("%s", sbuf); + lbuf = NULL; + } + + flags = vdec_input_lock(input); + /* dump input blocks */ + list_for_each_safe(p, tmp, &input->vframe_block_list) { + struct vframe_block_list_s *block = list_entry( + p, struct vframe_block_list_s, list); + if (bufs != NULL) { + lbuf = bufs + s; + if (size - s < 128) + break; + } + s += vdec_input_dump_block_locked(block, lbuf, size - s); + } + list_for_each_safe(p, tmp, &input->vframe_block_free_list) { + struct vframe_block_list_s *block = list_entry( + p, struct vframe_block_list_s, list); + if (bufs != NULL) { + lbuf = bufs + s; + if (size - s < 128) + break; + } + s += vdec_input_dump_block_locked(block, lbuf, size - s); + } + vdec_input_unlock(input, flags); + + return s; +} + +static int vdec_input_dump_chunk_locked( + int id, + struct vframe_chunk_s *chunk, + char *buf, int size) +{ + char *pbuf = buf; + char sbuf[512]; + int tsize = 0; + int s; + + if (!pbuf) { + pbuf = sbuf; + size = 512; + } + #define BUFPRINT(args...) \ + do {\ + s = snprintf(pbuf, size - tsize, args);\ + tsize += s;\ + pbuf += s; \ + } while (0) + + BUFPRINT( + "\t[%d][%lld:%p]-off=%d,size:%d,p:%d,\tpts64=%lld,addr=%p\n", + id, + chunk->sequence, + chunk->block, + chunk->offset, + chunk->size, + chunk->pading_size, + chunk->pts64, + (void *)(chunk->block->addr + chunk->offset)); + /* + BUFPRINT("\tlist=%p,next=%p,prev=%p\n", + &chunk->list, + chunk->list.next, + chunk->list.prev); + */ + #undef BUFPRINT + if (!buf) + pr_info("%s", sbuf); + return tsize; +} + +int vdec_input_dump_chunks(int id, struct vdec_input_s *input, + char *bufs, int size) +{ + + struct list_head *p, *tmp; + unsigned long flags; + char *lbuf = bufs; + char sbuf[256]; + int s = 0; + int i = 0; + + if (size <= 0) + return 0; + + if (!bufs) + lbuf = sbuf; + s = snprintf(lbuf + s, size - s, + "[%d]blocks:vdec-%d id:%d,bufsize=%d,dsize=%d,frames:%d,maxframe:%d\n", + id, + input->id, + input->block_nums, + input->size, + input->data_size, + input->have_frame_num, + input->frame_max_size); + if (bufs) + lbuf += s; + if (!bufs) { + pr_info("%s", sbuf); + lbuf = NULL; + } + flags = vdec_input_lock(input); + /*dump chunks list infos.*/ + list_for_each_safe(p, tmp, &input->vframe_chunk_list) { + struct vframe_chunk_s *chunk = list_entry( + p, struct vframe_chunk_s, list); + if (bufs != NULL) + lbuf = bufs + s; + s += vdec_input_dump_chunk_locked(id, chunk, lbuf, size - s); + i++; + if (i >= 10) + break; + } + vdec_input_unlock(input, flags); + + return s; +} + + + +int vdec_input_set_buffer(struct vdec_input_s *input, u32 start, u32 size) +{ + if (input_frame_based(input)) + return -EINVAL; + + input->start = start; + input->size = size; + input->swap_rp = start; + + if (vdec_secure(input->vdec)) + input->swap_page_phys = codec_mm_alloc_for_dma("SWAP", + 1, 0, CODEC_MM_FLAGS_TVP); + else { + input->swap_page = codec_mm_dma_alloc_coherent(&input->mem_handle, + (ulong *)&input->swap_page_phys, + PAGE_SIZE, MEM_NAME); + if (input->swap_page == NULL) + return -ENOMEM; + } + + if (input->swap_page_phys == 0) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL(vdec_input_set_buffer); + +void vdec_input_set_type(struct vdec_input_s *input, int type, int target) +{ + input->type = type; + input->target = target; + if (type == VDEC_TYPE_FRAME_CIRCULAR) { + /*alway used max block.*/ + input->default_block_size = VFRAME_BLOCK_SIZE_MAX; + } +} +EXPORT_SYMBOL(vdec_input_set_type); + +int vdec_input_get_status(struct vdec_input_s *input, + struct vdec_input_status_s *status) +{ + unsigned long flags; + + if (input->vdec == NULL) + return -EINVAL; + + flags = vdec_input_lock(input); + + if (list_empty(&input->vframe_block_list)) { + status->size = VFRAME_BLOCK_SIZE; + status->data_len = 0; + status->free_len = VFRAME_BLOCK_SIZE; + status->read_pointer = 0; + } else { + int r = VFRAME_BLOCK_MAX_LEVEL - vdec_input_level(input) + - VFRAME_BLOCK_HOLE; + status->size = input->size; + status->data_len = vdec_input_level(input); + status->free_len = (r > 0) ? r : 0; + status->read_pointer = input->total_rd_count; + } + + vdec_input_unlock(input, flags); + + return 0; +} +EXPORT_SYMBOL(vdec_input_get_status); + +static void vdec_input_add_block(struct vdec_input_s *input, + struct vframe_block_list_s *block) +{ + unsigned long flags; + + flags = vdec_input_lock(input); + block->wp = 0; + block->id = input->block_id_seq++; + list_add_tail(&block->list, &input->vframe_block_list); + input->size += block->size; + input->block_nums++; + input->wr_block = block; + vdec_input_unlock(input, flags); +} + +static inline void vdec_input_del_block_locked(struct vdec_input_s *input, + struct vframe_block_list_s *block) +{ + list_del(&block->list); + input->size -= block->size; + input->block_nums--; +} + +int vdec_input_level(struct vdec_input_s *input) +{ + return input->total_wr_count - input->total_rd_count; +} +EXPORT_SYMBOL(vdec_input_level); + +static struct vframe_block_list_s * + vdec_input_alloc_new_block(struct vdec_input_s *input, + ulong phy_addr, + int size, + chunk_free free, + void* priv) +{ + struct vframe_block_list_s *block; + block = kzalloc(sizeof(struct vframe_block_list_s), + GFP_KERNEL); + if (block == NULL) { + input->no_mem_err_cnt++; + pr_err("vframe_block structure allocation failed\n"); + return NULL; + } + + if (vframe_block_init_alloc_storage(input, + block, phy_addr, size, free, priv) != 0) { + kfree(block); + pr_err("vframe_block storage allocation failed\n"); + return NULL; + } + + INIT_LIST_HEAD(&block->list); + + vdec_input_add_block(input, block); + + /* + *pr_info("vdec-%d:new block id=%d, total_blocks:%d, size=%d\n", + * input->id, + * block->id, + * input->block_nums, + * block->size); + */ + if (0 && input->size > VFRAME_BLOCK_MAX_LEVEL * 2) { + /* + used + */ + pr_info( + "input[%d] reach max: size:%d, blocks:%d", + input->id, + input->size, + input->block_nums); + pr_info("level:%d, wr:%lld,rd:%lld\n", + vdec_input_level(input), + input->total_wr_count, + input->total_rd_count); + vdec_input_dump_blocks(input, NULL, 0); + } + return block; +} +int vdec_input_get_duration_u64(struct vdec_input_s *input) +{ + int duration = (input->last_inpts_u64 - input->last_comsumed_pts_u64); + if (input->last_in_nopts_cnt > 0 && + input->last_comsumed_pts_u64 > 0 && + input->last_duration > 0) { + duration += (input->last_in_nopts_cnt - + input->last_comsumed_no_pts_cnt) * + input->last_duration; + } + if (duration > 1000 * 1000000)/*> 1000S,I think jumped.*/ + duration = 0; + if (duration <= 0 && input->last_duration > 0) { + /*..*/ + duration = input->last_duration * input->have_frame_num; + } + if (duration < 0) + duration = 0; + return duration; +} +EXPORT_SYMBOL(vdec_input_get_duration_u64); + +/* + ret >= 13: have enough buffer, blocked add more buffers +*/ +static int vdec_input_have_blocks_enough(struct vdec_input_s *input) +{ + int ret = 0; + if (vdec_input_level(input) > VFRAME_BLOCK_MIN_LEVEL) + ret += 1; + if (vdec_input_level(input) >= VFRAME_BLOCK_MAX_LEVEL) + ret += 2; + if (vdec_input_get_duration_u64(input) > MAX_FRAME_DURATION_S) + ret += 4; + if (input->have_frame_num > 30) + ret += 8; + else + ret -= 8;/*not enough frames.*/ + if (input->size >= VFRAME_BLOCK_MAX_TOTAL_SIZE) + ret += 100;/*always bloced add more buffers.*/ + + return ret; +} +static int vdec_input_get_free_block( + struct vdec_input_s *input, + int size,/*frame size + pading*/ + struct vframe_block_list_s **block_ret) +{ + struct vframe_block_list_s *to_freeblock = NULL; + struct vframe_block_list_s *block = NULL; + unsigned long flags; + flags = vdec_input_lock(input); + /*get from free list.*/ + if (!list_empty(&input->vframe_block_free_list)) { + block = list_entry(input->vframe_block_free_list.next, + struct vframe_block_list_s, list); + if (block->size < (size)) { + vdec_input_del_block_locked(input, block); + to_freeblock = block; + block = NULL; + } else { + list_move_tail(&block->list, + &input->vframe_block_list); + input->wr_block = block;/*swith to new block*/ + } + } + vdec_input_unlock(input, flags); + if (to_freeblock) { + /*free the small block.*/ + vframe_block_free_block(to_freeblock); + } + if (block) { + *block_ret = block; + return 0; + } + + if (vdec_input_have_blocks_enough(input) > 13) { + /*buf fulled */ + return -EAGAIN; + } + if (input->no_mem_err_cnt > 3) { + /*alloced failed more times. + */ + return -EAGAIN; + } + if (input->default_block_size <= + size * 2) { + int def_size = input->default_block_size; + do { + def_size *= 2; + } while ((def_size <= 2 * size) && + (def_size <= VFRAME_BLOCK_SIZE_MAX)); + if (def_size < size) + def_size = ALIGN(size + 64, (1 << 17)); + /*128k aligned,same as codec_mm*/ + input->default_block_size = def_size; + } + block = vdec_input_alloc_new_block(input, 0, 0, NULL, NULL); + if (!block) { + input->no_mem_err_cnt++; + return -EAGAIN; + } + input->no_mem_err_cnt = 0; + *block_ret = block; + return 0; +} + +int vdec_input_add_chunk(struct vdec_input_s *input, const char *buf, + size_t count, u32 handle, chunk_free free, void* priv) +{ + unsigned long flags; + struct vframe_chunk_s *chunk; + struct vdec_s *vdec = input->vdec; + struct vframe_block_list_s *block; + + int need_pading_size = MIN_FRAME_PADDING_SIZE; + + if (vdec_secure(vdec)) { + block = vdec_input_alloc_new_block(input, (ulong)buf, + PAGE_ALIGN(count + HEVC_PADDING_SIZE + 1), + free, priv); /*Add padding large than HEVC_PADDING_SIZE */ + if (!block) + return -ENOMEM; + block->handle = handle; + } else { +#if 0 + if (add_count == 0) { + add_count++; + memcpy(sps, buf, 30); + return 30; + } else if (add_count == 1) { + add_count++; + memcpy(pps, buf, 8); + return 8; + } + add_count++; +#endif + +#if 0 + pr_info("vdec_input_add_frame add %p, count=%d\n", buf, (int)count); + + if (count >= 8) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + buf[0], buf[1], buf[2], buf[3], + buf[4], buf[5], buf[6], buf[7]); + } + if (count >= 16) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + buf[8], buf[9], buf[10], buf[11], + buf[12], buf[13], buf[14], buf[15]); + } + if (count >= 24) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + buf[16], buf[17], buf[18], buf[19], + buf[20], buf[21], buf[22], buf[23]); + } + if (count >= 32) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + buf[24], buf[25], buf[26], buf[27], + buf[28], buf[29], buf[30], buf[31]); + } +#endif + if (input_stream_based(input)) + return -EINVAL; + + if (count < PAGE_SIZE) { + need_pading_size = PAGE_ALIGN(count + need_pading_size) - + count; + } else { + /*to 64 bytes aligned;*/ + if (count & 0x3f) + need_pading_size += 64 - (count & 0x3f); + } + block = input->wr_block; + if (block && + (vframe_block_space(block) > (count + need_pading_size))) { + /*this block have enough buffers. + do nothings. + */ + } else if (block && (block->type == VDEC_TYPE_FRAME_CIRCULAR)) { + /*in circular module. + only one block,.*/ + return -EAGAIN; + } else if (block != NULL) { + /*have block but not enough space. + recycle the no enough blocks.*/ + flags = vdec_input_lock(input); + if (input->wr_block == block && + block->chunk_count == 0) { + block->rp = 0; + block->wp = 0; + /*block no data move to freelist*/ + list_move_tail(&block->list, + &input->vframe_block_free_list); + input->wr_block = NULL; + } + vdec_input_unlock(input, flags); + block = NULL; + } + if (!block) {/*try new block.*/ + int ret = vdec_input_get_free_block(input, + count + need_pading_size + EXTRA_PADDING_SIZE, + &block); + if (ret < 0)/*no enough block now.*/ + return ret; + } + } + + chunk = kzalloc(sizeof(struct vframe_chunk_s), GFP_KERNEL); + + if (!chunk) { + pr_err("vframe_chunk structure allocation failed\n"); + return -ENOMEM; + } + + if ((vdec->hdr10p_data_valid == true) && + (vdec->hdr10p_data_size != 0)) { + char *new_buf; + new_buf = vzalloc(vdec->hdr10p_data_size); + if (new_buf) { + memcpy(new_buf, vdec->hdr10p_data_buf, vdec->hdr10p_data_size); + chunk->hdr10p_data_buf = new_buf; + chunk->hdr10p_data_size = vdec->hdr10p_data_size; + } else { + pr_err("%s:hdr10p data vzalloc size(%d) failed\n", + __func__, vdec->hdr10p_data_size); + chunk->hdr10p_data_buf = NULL; + chunk->hdr10p_data_size = 0; + } + } else { + chunk->hdr10p_data_buf = NULL; + chunk->hdr10p_data_size = 0; + } + vdec->hdr10p_data_valid = false; + + chunk->magic = 0x4b554843; + if (vdec->pts_valid) { + chunk->pts = vdec->pts; + chunk->pts64 = vdec->pts64; + } + + if (vdec->timestamp_valid) + chunk->timestamp = vdec->timestamp; + + if (vdec->pts_valid && + input->last_inpts_u64 > 0 && + input->last_in_nopts_cnt == 0) { + int d = (int)(chunk->pts64 - input->last_inpts_u64); + if (d > 0 && (d < input->last_duration)) + input->last_duration = d; + /* alwasy: used the smallest duration; + if 60fps->30 fps. + maybe have warning value. + */ + } + chunk->pts_valid = vdec->pts_valid; + vdec->pts_valid = false; + INIT_LIST_HEAD(&chunk->list); + + if (vdec_secure(vdec)) { + chunk->offset = 0; + chunk->size = count; + chunk->pading_size = PAGE_ALIGN(chunk->size + need_pading_size) - + chunk->size; + } else { + chunk->offset = block->wp; + chunk->size = count; + chunk->pading_size = need_pading_size; + if (vframe_chunk_fill(input, chunk, buf, count, block)) { + pr_err("vframe_chunk_fill failed\n"); + kfree(chunk); + return -EFAULT; + } + + } + + + flags = vdec_input_lock(input); + + vframe_block_add_chunk(block, chunk); + + list_add_tail(&chunk->list, &input->vframe_chunk_list); + input->data_size += chunk->size; + input->have_frame_num++; + + if (input->have_frame_num == 1) + input->vdec_up(vdec); + ATRACE_COUNTER(input->vdec_input_name, input->have_frame_num); + if (chunk->pts_valid) { + input->last_inpts_u64 = chunk->pts64; + input->last_in_nopts_cnt = 0; + } else { + /*nopts*/ + input->last_in_nopts_cnt++; + } + if (chunk->size > input->frame_max_size) + input->frame_max_size = chunk->size; + input->total_wr_count += count; + vdec_input_unlock(input, flags); +#if 0 + if (add_count == 2) + input->total_wr_count += 38; +#endif + + return count; +} + +int vdec_input_add_frame(struct vdec_input_s *input, const char *buf, + size_t count) +{ + int ret = 0; + struct drm_info drm; + struct vdec_s *vdec = input->vdec; + unsigned long phy_buf; + + if (vdec_secure(vdec)) { + while (count > 0) { + if (count < sizeof(struct drm_info)) + return -EIO; + if (copy_from_user((void*)&drm, buf + ret, sizeof(struct drm_info))) + return -EAGAIN; + if (!(drm.drm_flag & TYPE_DRMINFO_V2)) + return -EIO; /*must drm info v2 version*/ + phy_buf = (unsigned long) drm.drm_phy; + vdec_input_add_chunk(input, (char *)phy_buf, + (size_t)drm.drm_pktsize, drm.handle, NULL, NULL); + count -= sizeof(struct drm_info); + ret += sizeof(struct drm_info); + + /* the drm frame data might include head infos and raw */ + /* data thus the next drm unit still need a valid pts.*/ + if (count >= sizeof(struct drm_info)) + vdec->pts_valid = true; + } + } else { + ret = vdec_input_add_chunk(input, buf, count, 0, NULL, NULL); + } + + return ret; +} +EXPORT_SYMBOL(vdec_input_add_frame); + +int vdec_input_add_frame_with_dma(struct vdec_input_s *input, ulong addr, + size_t count, u32 handle, chunk_free free, void* priv) +{ + struct vdec_s *vdec = input->vdec; + + return vdec_secure(vdec) ? + vdec_input_add_chunk(input, + (char *)addr, count, handle, free, priv) : -1; +} +EXPORT_SYMBOL(vdec_input_add_frame_with_dma); + +struct vframe_chunk_s *vdec_input_next_chunk(struct vdec_input_s *input) +{ + struct vframe_chunk_s *chunk = NULL; + unsigned long flags; + flags = vdec_input_lock(input); + if (!list_empty(&input->vframe_chunk_list)) { + chunk = list_first_entry(&input->vframe_chunk_list, + struct vframe_chunk_s, list); + } + vdec_input_unlock(input, flags); + return chunk; +} +EXPORT_SYMBOL(vdec_input_next_chunk); + +struct vframe_chunk_s *vdec_input_next_input_chunk( + struct vdec_input_s *input) +{ + struct vframe_chunk_s *chunk = NULL; + struct list_head *p; + unsigned long flags; + flags = vdec_input_lock(input); + + list_for_each(p, &input->vframe_chunk_list) { + struct vframe_chunk_s *c = list_entry( + p, struct vframe_chunk_s, list); + if ((c->flag & VFRAME_CHUNK_FLAG_CONSUMED) == 0) { + chunk = c; + break; + } + } + vdec_input_unlock(input, flags); + return chunk; +} +EXPORT_SYMBOL(vdec_input_next_input_chunk); + +void vdec_input_release_chunk(struct vdec_input_s *input, + struct vframe_chunk_s *chunk) +{ + struct vframe_chunk_s *p; + u32 chunk_valid = 0; + unsigned long flags; + struct vframe_block_list_s *block = chunk->block; + struct vframe_block_list_s *tofreeblock = NULL; + flags = vdec_input_lock(input); + + list_for_each_entry(p, &input->vframe_chunk_list, list) { + if (p == chunk) { + chunk_valid = 1; + break; + } + } + /* 2 threads go here, the other done the deletion,so return*/ + if (chunk_valid == 0) { + vdec_input_unlock(input, flags); + pr_err("%s chunk is deleted,so return.\n", __func__); + return; + } + + list_del(&chunk->list); + input->have_frame_num--; + ATRACE_COUNTER(input->vdec_input_name, input->have_frame_num); + if (chunk->pts_valid) { + input->last_comsumed_no_pts_cnt = 0; + input->last_comsumed_pts_u64 = chunk->pts64; + } else + input->last_comsumed_no_pts_cnt++; + block->rp += chunk->size; + if (block->rp >= block->size) + block->rp -= block->size; + block->data_size -= chunk->size; + block->chunk_count--; + input->data_size -= chunk->size; + input->total_rd_count += chunk->size; + if (block->is_out_buf) { + list_move_tail(&block->list, + &input->vframe_block_free_list); + if (block->free) { + vdec_input_del_block_locked(input, block); + block->free(block->priv, block->handle); + kfree(block); + } + } else if (block->chunk_count == 0 && + input->wr_block != block ) {/*don't free used block*/ + if (block->size < input->default_block_size) { + vdec_input_del_block_locked(input, block); + tofreeblock = block; + } else { + block->rp = 0; + block->wp = 0; + list_move_tail(&block->list, + &input->vframe_block_free_list); + } + } + + vdec_input_unlock(input, flags); + if (tofreeblock) + vframe_block_free_block(tofreeblock); + kfree(chunk); +} +EXPORT_SYMBOL(vdec_input_release_chunk); + +unsigned long vdec_input_lock(struct vdec_input_s *input) +{ + unsigned long flags; + + spin_lock_irqsave(&input->lock, flags); + + return flags; +} +EXPORT_SYMBOL(vdec_input_lock); + +void vdec_input_unlock(struct vdec_input_s *input, unsigned long flags) +{ + spin_unlock_irqrestore(&input->lock, flags); +} +EXPORT_SYMBOL(vdec_input_unlock); + +void vdec_input_release(struct vdec_input_s *input) +{ + struct list_head *p, *tmp; + + /* release chunk data */ + list_for_each_safe(p, tmp, &input->vframe_chunk_list) { + struct vframe_chunk_s *chunk = list_entry( + p, struct vframe_chunk_s, list); + vdec_input_release_chunk(input, chunk); + } + list_for_each_safe(p, tmp, &input->vframe_block_list) { + /*should never here.*/ + list_move_tail(p, &input->vframe_block_free_list); + } + /* release input blocks */ + list_for_each_safe(p, tmp, &input->vframe_block_free_list) { + struct vframe_block_list_s *block = list_entry( + p, struct vframe_block_list_s, list); + vdec_input_del_block_locked(input, block); + vframe_block_free_block(block); + } + + /* release swap pages */ + if (vdec_secure(input->vdec)) { + if (input->swap_page_phys) + codec_mm_free_for_dma("SWAP", input->swap_page_phys); + } else { + if (input->swap_page) + codec_mm_dma_free_coherent(input->mem_handle); + } + input->swap_page = NULL; + input->swap_page_phys = 0; + input->swap_valid = false; +} +EXPORT_SYMBOL(vdec_input_release); + +u32 vdec_input_get_freed_handle(struct vdec_s *vdec) +{ + struct vframe_block_list_s *block; + struct vdec_input_s *input = &vdec->input; + unsigned long flags; + u32 handle = 0; + + if (!vdec) + return 0; + + if (!vdec_secure(vdec)) + return 0; + + flags = vdec_input_lock(input); + do { + block = list_first_entry_or_null(&input->vframe_block_free_list, + struct vframe_block_list_s, list); + if (!block) { + break; + } + + handle = block->handle; + vdec_input_del_block_locked(input, block); + if (block->free) + block->free(block->priv, handle); + kfree(block); + + } while(!handle); + + vdec_input_unlock(input, flags); + return handle; +} +EXPORT_SYMBOL(vdec_input_get_freed_handle);
diff --git a/drivers/frame_provider/decoder/utils/vdec_input.h b/drivers/frame_provider/decoder/utils/vdec_input.h new file mode 100644 index 0000000..9783db7 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_input.h
@@ -0,0 +1,191 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/vdec_input.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VDEC_INPUT_H +#define VDEC_INPUT_H + +struct vdec_s; +struct vdec_input_s; + +typedef void (*chunk_free)(void *priv, u32 handle); + +struct vframe_block_list_s { + u32 magic; + int id; + struct list_head list; + ulong start; + void *start_virt; + ulong addr; + bool is_mapped; + int type; + u32 size; + u32 wp; + u32 rp; + int data_size; + int chunk_count; + int is_out_buf; + u32 handle; + ulong mem_handle; + /* free callback */ + chunk_free free; + void* priv; + + struct vdec_input_s *input; +}; + +#define VFRAME_CHUNK_FLAG_CONSUMED 0x0001 + +struct vframe_chunk_s { + u32 magic; + struct list_head list; + int flag; + u32 offset; + u32 size; + u32 pts; + u32 pading_size; + u64 pts64; + bool pts_valid; + u64 timestamp; + bool timestamp_valid; + u64 sequence; + struct vframe_block_list_s *block; + u32 hdr10p_data_size; + char *hdr10p_data_buf; +}; + +#define VDEC_INPUT_TARGET_VLD 0 +#define VDEC_INPUT_TARGET_HEVC 1 +#define VLD_PADDING_SIZE 1024 +#define HEVC_PADDING_SIZE (1024*16) + +struct vdec_input_s { + struct list_head vframe_block_list; + struct list_head vframe_chunk_list; + struct list_head vframe_block_free_list; + struct vframe_block_list_s *wr_block; + int have_free_blocks; + int no_mem_err_cnt;/*when alloc no mem cnt++*/ + int block_nums; + int block_id_seq; + int id; + spinlock_t lock; + int type; + int target; + struct vdec_s *vdec; + bool swap_valid; + bool swap_needed; + bool eos; + ulong mem_handle; + void *swap_page; + dma_addr_t swap_page_phys; + u64 total_wr_count; + u64 total_rd_count; + u64 streaming_rp; + u32 swap_rp; + bool last_swap_slave; + int dirty_count; + u64 sequence; + unsigned start; + unsigned size; + int default_block_size; + int data_size; + int frame_max_size; + int prepare_level; +/*for check frame delay.*/ + u64 last_inpts_u64; + u64 last_comsumed_pts_u64; + int last_in_nopts_cnt; + int last_comsumed_no_pts_cnt; + int last_duration; +/*for check frame delay.*/ + int have_frame_num; + int stream_cookie; /* wrap count for vld_mem and + HEVC_SHIFT_BYTE_COUNT for hevc */ + char vdec_input_name[32]; + bool (*vdec_is_input_frame_empty)(struct vdec_s *); + void (*vdec_up)(struct vdec_s *); +}; + +struct vdec_input_status_s { + int size; + int data_len; + int free_len; + int read_pointer; +}; + +#define input_frame_based(input) \ + (((input)->type == VDEC_TYPE_FRAME_BLOCK) || \ + ((input)->type == VDEC_TYPE_FRAME_CIRCULAR)) +#define input_stream_based(input) \ + (((input)->type == VDEC_TYPE_STREAM_PARSER) || \ + ((input)->type == VDEC_TYPE_SINGLE)) + +/* Initialize vdec_input structure */ +extern void vdec_input_init(struct vdec_input_s *input, struct vdec_s *vdec); +extern int vdec_input_prepare_bufs(struct vdec_input_s *input, + int frame_width, int frame_height); + +/* Get available input data size */ +extern int vdec_input_level(struct vdec_input_s *input); + +/* Set input type and target */ +extern void vdec_input_set_type(struct vdec_input_s *input, int type, + int target); + +/* Set stream buffer information for stream based input */ +extern int vdec_input_set_buffer(struct vdec_input_s *input, u32 start, + u32 size); + +/* Add enqueue video data into decoder's input */ +extern int vdec_input_add_frame(struct vdec_input_s *input, const char *buf, + size_t count); + +extern int vdec_input_add_frame_with_dma(struct vdec_input_s *input, ulong addr, + size_t count, u32 handle, chunk_free free, void* priv); + +/* Peek next frame data from decoder's input */ +extern struct vframe_chunk_s *vdec_input_next_chunk( + struct vdec_input_s *input); + +/* Peek next frame data from decoder's input, not marked as consumed */ +extern struct vframe_chunk_s *vdec_input_next_input_chunk( + struct vdec_input_s *input); + +/* Consume next frame data from decoder's input */ +extern void vdec_input_release_chunk(struct vdec_input_s *input, + struct vframe_chunk_s *chunk); + +/* Get decoder input buffer status */ +extern int vdec_input_get_status(struct vdec_input_s *input, + struct vdec_input_status_s *status); + +extern unsigned long vdec_input_lock(struct vdec_input_s *input); + +extern void vdec_input_unlock(struct vdec_input_s *input, unsigned long lock); + +/* release all resource for decoder's input */ +extern void vdec_input_release(struct vdec_input_s *input); +/* return block handle and free block */ +extern u32 vdec_input_get_freed_handle(struct vdec_s *vdec); +int vdec_input_dump_chunks(int id, struct vdec_input_s *input, + char *bufs, int size); +int vdec_input_dump_blocks(struct vdec_input_s *input, + char *bufs, int size); + +int vdec_input_get_duration_u64(struct vdec_input_s *input); + +#endif /* VDEC_INPUT_H */
diff --git a/drivers/frame_provider/decoder/utils/vdec_power_ctrl.c b/drivers/frame_provider/decoder/utils/vdec_power_ctrl.c new file mode 100644 index 0000000..e43c45a --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_power_ctrl.c
@@ -0,0 +1,808 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/vdec_power_ctrl.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#define DEBUG +#include "vdec_power_ctrl.h" +#include <linux/amlogic/media/utils/vdec_reg.h> +#include <linux/amlogic/power_ctrl.h> +//#include <dt-bindings/power/sc2-pd.h> +//#include <linux/amlogic/pwr_ctrl.h> +#include <linux/amlogic/power_domain.h> +#include <dt-bindings/power/sc2-pd.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../../../common/media_clock/switch/amports_gate.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../../../common/media_clock/clk/clk.h" + +#define HEVC_TEST_LIMIT (100) +#define GXBB_REV_A_MINOR (0xa) + +#define PDID_DSP 0 +#define PDID_DOS_HCODEC 1 +#define PDID_DOS_HEVC 2 +#define PDID_DOS_VDEC 3 +#define PDID_DOS_WAVE 4 + +extern int no_powerdown; +extern int hevc_max_reset_count; + +struct pm_name_s { + int type; + const char *name; +}; + +static const struct pm_name_s pm_name[] = { + {PM_POWER_CTRL_RW_REG, "legacy"}, + {PM_POWER_CTRL_API, "power-ctrl-api"}, + {PM_POWER_DOMAIN, "power-domain"}, + {PM_POWER_DOMAIN_SEC_API, "pd-sec-api"}, + {PM_POWER_DOMAIN_NONSEC_API, "pd-non-sec-api"}, +}; + +const char *get_pm_name(int type) +{ + const char *name = "unknown"; + int i, size = ARRAY_SIZE(pm_name); + + for (i = 0; i < size; i++) { + if (type == pm_name[i].type) + name = pm_name[i].name; + } + + return name; +} +EXPORT_SYMBOL(get_pm_name); + +static struct pm_pd_s pm_domain_data[] = { + { .name = "pwrc-vdec", }, + { .name = "pwrc-hcodec",}, + { .name = "pwrc-vdec-2", }, + { .name = "pwrc-hevc", }, + { .name = "pwrc-hevc-b", }, + { .name = "pwrc-wave", }, +}; + +static void pm_vdec_power_switch(struct pm_pd_s *pd, int id, bool on) +{ + struct device *dev = pd[id].dev; + + if (on) + pm_runtime_get_sync(dev); + else + pm_runtime_put_sync(dev); + + pr_debug("the %-15s power %s\n", + pd[id].name, on ? "on" : "off"); +} + +static int pm_vdec_power_domain_init(struct device *dev) +{ + int i, err; + const struct power_manager_s *pm = of_device_get_match_data(dev); + struct pm_pd_s *pd = pm->pd_data; + + for (i = 0; i < ARRAY_SIZE(pm_domain_data); i++) { + pd[i].dev = dev_pm_domain_attach_by_name(dev, pd[i].name); + if (IS_ERR_OR_NULL(pd[i].dev)) { + err = PTR_ERR(pd[i].dev); + dev_err(dev, "Get %s failed, pm-domain: %d\n", + pd[i].name, err); + continue; + } + + pd[i].link = device_link_add(dev, pd[i].dev, + DL_FLAG_PM_RUNTIME | + DL_FLAG_STATELESS); + if (IS_ERR_OR_NULL(pd[i].link)) { + dev_err(dev, "Adding %s device link failed!\n", + pd[i].name); + return -ENODEV; + } + + pr_debug("power domain: name: %s, dev: %px, link: %px\n", + pd[i].name, pd[i].dev, pd[i].link); + } + + return 0; +} + +static void pm_vdec_power_domain_relese(struct device *dev) +{ + int i; + const struct power_manager_s *pm = of_device_get_match_data(dev); + struct pm_pd_s *pd = pm->pd_data; + + for (i = 0; i < ARRAY_SIZE(pm_domain_data); i++) { + if (!IS_ERR_OR_NULL(pd[i].link)) + device_link_del(pd[i].link); + + if (!IS_ERR_OR_NULL(pd[i].dev)) + dev_pm_domain_detach(pd[i].dev, true); + } +} + +static void pm_vdec_clock_on(int id) +{ + if (id == VDEC_1) { + amports_switch_gate("clk_vdec_mux", 1); + vdec_clock_hi_enable(); + } else if (id == VDEC_HCODEC) { + hcodec_clock_enable(); + } else if (id == VDEC_HEVC) { + /* enable hevc clock */ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SC2 && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D)) + amports_switch_gate("clk_hevcf_mux", 1); + else + amports_switch_gate("clk_hevc_mux", 1); + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) && + !is_hevc_front_back_clk_combined()) + amports_switch_gate("clk_hevcb_mux", 1); + + hevc_clock_hi_enable(); + + if (!is_hevc_front_back_clk_combined()) + hevc_back_clock_hi_enable(); + } +} + +static void pm_vdec_clock_off(int id) +{ + if (id == VDEC_1) { + vdec_clock_off(); + } else if (id == VDEC_HCODEC) { + hcodec_clock_off(); + } else if (id == VDEC_HEVC) { + /* disable hevc clock */ + hevc_clock_off(); + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) && + !is_hevc_front_back_clk_combined()) + hevc_back_clock_off(); + } +} + +static void pm_vdec_power_domain_power_on(struct device *dev, int id) +{ + const struct power_manager_s *pm = of_device_get_match_data(dev); + + pm_vdec_clock_on(id); + pm_vdec_power_switch(pm->pd_data, id, true); +} + +static void pm_vdec_power_domain_power_off(struct device *dev, int id) +{ + const struct power_manager_s *pm = of_device_get_match_data(dev); + + pm_vdec_clock_off(id); + pm_vdec_power_switch(pm->pd_data, id, false); +} + +static bool pm_vdec_power_domain_power_state(struct device *dev, int id) +{ + const struct power_manager_s *pm = of_device_get_match_data(dev); + + return pm_runtime_active(pm->pd_data[id].dev); +} + +static bool test_hevc(u32 decomp_addr, u32 us_delay) +{ + int i; + + /* SW_RESET IPP */ + WRITE_VREG(HEVCD_IPP_TOP_CNTL, 1); + WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0); + + /* initialize all canvas table */ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0); + for (i = 0; i < 32; i++) + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + 0x1 | (i << 8) | decomp_addr); + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (0<<1) | 1); + for (i = 0; i < 32; i++) + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); + + /* Initialize mcrcc */ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2); + WRITE_VREG(HEVCD_MCRCC_CTL2, 0x0); + WRITE_VREG(HEVCD_MCRCC_CTL3, 0x0); + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); + + /* Decomp initialize */ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x0); + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0); + + /* Frame level initialization */ + WRITE_VREG(HEVCD_IPP_TOP_FRMCONFIG, 0x100 | (0x100 << 16)); + WRITE_VREG(HEVCD_IPP_TOP_TILECONFIG3, 0x0); + WRITE_VREG(HEVCD_IPP_TOP_LCUCONFIG, 0x1 << 5); + WRITE_VREG(HEVCD_IPP_BITDEPTH_CONFIG, 0x2 | (0x2 << 2)); + + WRITE_VREG(HEVCD_IPP_CONFIG, 0x0); + WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, 0x0); + + /* Enable SWIMP mode */ + WRITE_VREG(HEVCD_IPP_SWMPREDIF_CONFIG, 0x1); + + /* Enable frame */ + WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x2); + WRITE_VREG(HEVCD_IPP_TOP_FRMCTL, 0x1); + + /* Send SW-command CTB info */ + WRITE_VREG(HEVCD_IPP_SWMPREDIF_CTBINFO, 0x1 << 31); + + /* Send PU_command */ + WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO0, (0x4 << 9) | (0x4 << 16)); + WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO1, 0x1 << 3); + WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO2, 0x0); + WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO3, 0x0); + + udelay(us_delay); + + WRITE_VREG(HEVCD_IPP_DBG_SEL, 0x2 << 4); + + return (READ_VREG(HEVCD_IPP_DBG_DATA) & 3) == 1; +} + +static bool hevc_workaround_needed(void) +{ + return (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) && + (get_meson_cpu_version(MESON_CPU_VERSION_LVL_MINOR) + == GXBB_REV_A_MINOR); +} + +static void pm_vdec_legacy_power_off(struct device *dev, int id); + +static void pm_vdec_legacy_power_on(struct device *dev, int id) +{ + void *decomp_addr = NULL; + ulong decomp_dma_addr; + ulong mem_handle; + u32 decomp_addr_aligned = 0; + int hevc_loop = 0; + int sleep_val, iso_val; + bool is_power_ctrl_ver2 = false; + + is_power_ctrl_ver2 = + ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) ? true : false; + + if (hevc_workaround_needed() && + (id == VDEC_HEVC)) { + decomp_addr = codec_mm_dma_alloc_coherent(&mem_handle, + &decomp_dma_addr, SZ_64K + SZ_4K, "vdec_prealloc"); + if (decomp_addr) { + decomp_addr_aligned = ALIGN(decomp_dma_addr, SZ_64K); + memset((u8 *)decomp_addr + + (decomp_addr_aligned - decomp_dma_addr), + 0xff, SZ_4K); + } else + pr_err("vdec: alloc HEVC gxbb decomp buffer failed.\n"); + } + + if (id == VDEC_1) { + sleep_val = is_power_ctrl_ver2 ? 0x2 : 0xc; + iso_val = is_power_ctrl_ver2 ? 0x2 : 0xc0; + + /* vdec1 power on */ +#ifdef CONFIG_AMLOGIC_POWER + if (is_support_power_ctrl()) { + if (power_ctrl_sleep_mask(true, sleep_val, 0)) { + pr_err("vdec-1 power on ctrl sleep fail.\n"); + return; + } + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val); + } +#else + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val); +#endif + /* wait 10uS */ + udelay(10); + /* vdec1 soft reset */ + WRITE_VREG(DOS_SW_RESET0, 0xfffffffc); + WRITE_VREG(DOS_SW_RESET0, 0); + /* enable vdec1 clock */ + /* + *add power on vdec clock level setting,only for m8 chip, + * m8baby and m8m2 can dynamic adjust vdec clock, + * power on with default clock level + */ + amports_switch_gate("clk_vdec_mux", 1); + vdec_clock_hi_enable(); + /* power up vdec memories */ + WRITE_VREG(DOS_MEM_PD_VDEC, 0); + + /* remove vdec1 isolation */ +#ifdef CONFIG_AMLOGIC_POWER + if (is_support_power_ctrl()) { + if (power_ctrl_iso_mask(true, iso_val, 0)) { + pr_err("vdec-1 power on ctrl iso fail.\n"); + return; + } + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val); + } +#else + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val); +#endif + /* reset DOS top registers */ + WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0); + } else if (id == VDEC_2) { + if (has_vdec2()) { + /* vdec2 power on */ + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & + ~0x30); + /* wait 10uS */ + udelay(10); + /* vdec2 soft reset */ + WRITE_VREG(DOS_SW_RESET2, 0xffffffff); + WRITE_VREG(DOS_SW_RESET2, 0); + /* enable vdec1 clock */ + vdec2_clock_hi_enable(); + /* power up vdec memories */ + WRITE_VREG(DOS_MEM_PD_VDEC2, 0); + /* remove vdec2 isolation */ + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) & + ~0x300); + /* reset DOS top registers */ + WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0); + } + } else if (id == VDEC_HCODEC) { + if (has_hdec()) { + sleep_val = is_power_ctrl_ver2 ? 0x1 : 0x3; + iso_val = is_power_ctrl_ver2 ? 0x1 : 0x30; + + /* hcodec power on */ +#ifdef CONFIG_AMLOGIC_POWER + if (is_support_power_ctrl()) { + if (power_ctrl_sleep_mask(true, sleep_val, 0)) { + pr_err("hcodec power on ctrl sleep fail.\n"); + return; + } + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val); + } +#else + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val); +#endif + /* wait 10uS */ + udelay(10); + /* hcodec soft reset */ + WRITE_VREG(DOS_SW_RESET1, 0xffffffff); + WRITE_VREG(DOS_SW_RESET1, 0); + /* enable hcodec clock */ + hcodec_clock_enable(); + /* power up hcodec memories */ + WRITE_VREG(DOS_MEM_PD_HCODEC, 0); + /* remove hcodec isolation */ +#ifdef CONFIG_AMLOGIC_POWER + if (is_support_power_ctrl()) { + if (power_ctrl_iso_mask(true, iso_val, 0)) { + pr_err("hcodec power on ctrl iso fail.\n"); + return; + } + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val); + } +#else + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val); +#endif + } + } else if (id == VDEC_HEVC) { + if (has_hevc_vdec()) { + bool hevc_fixed = false; + + sleep_val = is_power_ctrl_ver2 ? 0x4 : 0xc0; + iso_val = is_power_ctrl_ver2 ? 0x4 : 0xc00; + + while (!hevc_fixed) { + /* hevc power on */ +#ifdef CONFIG_AMLOGIC_POWER + if (is_support_power_ctrl()) { + if (power_ctrl_sleep_mask(true, sleep_val, 0)) { + pr_err("hevc power on ctrl sleep fail.\n"); + return; + } + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val); + } +#else + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val); +#endif + /* wait 10uS */ + udelay(10); + /* hevc soft reset */ + WRITE_VREG(DOS_SW_RESET3, 0xffffffff); + WRITE_VREG(DOS_SW_RESET3, 0); + /* enable hevc clock */ + amports_switch_gate("clk_hevc_mux", 1); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + amports_switch_gate("clk_hevcb_mux", 1); + hevc_clock_hi_enable(); + hevc_back_clock_hi_enable(); + /* power up hevc memories */ + WRITE_VREG(DOS_MEM_PD_HEVC, 0); + /* remove hevc isolation */ +#ifdef CONFIG_AMLOGIC_POWER + if (is_support_power_ctrl()) { + if (power_ctrl_iso_mask(true, iso_val, 0)) { + pr_err("hevc power on ctrl iso fail.\n"); + return; + } + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val); + } +#else + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val); +#endif + if (!hevc_workaround_needed()) + break; + + if (decomp_addr) + hevc_fixed = test_hevc( + decomp_addr_aligned, 20); + + if (!hevc_fixed) { + hevc_loop++; + if (hevc_loop >= HEVC_TEST_LIMIT) { + pr_warn("hevc power sequence over limit\n"); + pr_warn("=====================================================\n"); + pr_warn(" This chip is identified to have HW failure.\n"); + pr_warn(" Please contact sqa-platform to replace the platform.\n"); + pr_warn("=====================================================\n"); + + panic("Force panic for chip detection !!!\n"); + + break; + } + + pm_vdec_legacy_power_off(NULL, VDEC_HEVC); + + mdelay(10); + } + } + + if (hevc_loop > hevc_max_reset_count) + hevc_max_reset_count = hevc_loop; + + WRITE_VREG(DOS_SW_RESET3, 0xffffffff); + udelay(10); + WRITE_VREG(DOS_SW_RESET3, 0); + } + } + + if (decomp_addr) + codec_mm_dma_free_coherent(mem_handle); +} + +static void pm_vdec_legacy_power_off(struct device *dev, int id) +{ + int sleep_val, iso_val; + bool is_power_ctrl_ver2 = false; + + is_power_ctrl_ver2 = + ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) ? true : false; + + if (id == VDEC_1) { + sleep_val = is_power_ctrl_ver2 ? 0x2 : 0xc; + iso_val = is_power_ctrl_ver2 ? 0x2 : 0xc0; + + /* enable vdec1 isolation */ +#ifdef CONFIG_AMLOGIC_POWER + if (is_support_power_ctrl()) { + if (power_ctrl_iso_mask(false, iso_val, 0)) { + pr_err("vdec-1 power off ctrl iso fail.\n"); + return; + } + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val); + } +#else + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val); +#endif + /* power off vdec1 memories */ + WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL); + /* disable vdec1 clock */ + vdec_clock_off(); + /* vdec1 power off */ +#ifdef CONFIG_AMLOGIC_POWER + if (is_support_power_ctrl()) { + if (power_ctrl_sleep_mask(false, sleep_val, 0)) { + pr_err("vdec-1 power off ctrl sleep fail.\n"); + return; + } + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val); + } +#else + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val); +#endif + } else if (id == VDEC_2) { + if (has_vdec2()) { + /* enable vdec2 isolation */ + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | + 0x300); + /* power off vdec2 memories */ + WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL); + /* disable vdec2 clock */ + vdec2_clock_off(); + /* vdec2 power off */ + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | + 0x30); + } + } else if (id == VDEC_HCODEC) { + if (has_hdec()) { + sleep_val = is_power_ctrl_ver2 ? 0x1 : 0x3; + iso_val = is_power_ctrl_ver2 ? 0x1 : 0x30; + + /* enable hcodec isolation */ +#ifdef CONFIG_AMLOGIC_POWER + if (is_support_power_ctrl()) { + if (power_ctrl_iso_mask(false, iso_val, 0)) { + pr_err("hcodec power off ctrl iso fail.\n"); + return; + } + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val); + } +#else + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val); +#endif + /* power off hcodec memories */ + WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL); + /* disable hcodec clock */ + hcodec_clock_off(); + /* hcodec power off */ +#ifdef CONFIG_AMLOGIC_POWER + if (is_support_power_ctrl()) { + if (power_ctrl_sleep_mask(false, sleep_val, 0)) { + pr_err("hcodec power off ctrl sleep fail.\n"); + return; + } + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val); + } +#else + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val); +#endif + } + } else if (id == VDEC_HEVC) { + if (has_hevc_vdec()) { + sleep_val = is_power_ctrl_ver2 ? 0x4 : 0xc0; + iso_val = is_power_ctrl_ver2 ? 0x4 : 0xc00; + + if (no_powerdown == 0) { + /* enable hevc isolation */ +#ifdef CONFIG_AMLOGIC_POWER + if (is_support_power_ctrl()) { + if (power_ctrl_iso_mask(false, iso_val, 0)) { + pr_err("hevc power off ctrl iso fail.\n"); + return; + } + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val); + } +#else + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val); +#endif + /* power off hevc memories */ + WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL); + + /* disable hevc clock */ + hevc_clock_off(); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + hevc_back_clock_off(); + + /* hevc power off */ +#ifdef CONFIG_AMLOGIC_POWER + if (is_support_power_ctrl()) { + if (power_ctrl_sleep_mask(false, sleep_val, 0)) { + pr_err("hevc power off ctrl sleep fail.\n"); + return; + } + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val); + } +#else + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val); +#endif + } else { + pr_info("!!!!!!!!not power down\n"); + hevc_reset_core(NULL); + no_powerdown = 0; + } + } + } +} + +static bool pm_vdec_legacy_power_state(struct device *dev, int id) +{ + bool ret = false; + + if (id == VDEC_1) { + if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & + (((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) + ? 0x2 : 0xc)) == 0) && + (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100)) + ret = true; + } else if (id == VDEC_2) { + if (has_vdec2()) { + if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0x30) == 0) && + (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100)) + ret = true; + } + } else if (id == VDEC_HCODEC) { + if (has_hdec()) { + if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & + (((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) + ? 0x1 : 0x3)) == 0) && + (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000)) + ret = true; + } + } else if (id == VDEC_HEVC) { + if (has_hevc_vdec()) { + if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & + (((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) + ? 0x4 : 0xc0)) == 0) && + (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x1000000)) + ret = true; + } + } + + return ret; +} + +static void pm_vdec_pd_sec_api_power_on(struct device *dev, int id) +{ + int pd_id = (id == VDEC_1) ? PDID_DOS_VDEC : + (id == VDEC_HEVC) ? PDID_DOS_HEVC : + PDID_DOS_HCODEC; + + pm_vdec_clock_on(id); + pwr_ctrl_psci_smc(pd_id, PWR_ON); +} + +static void pm_vdec_pd_sec_api_power_off(struct device *dev, int id) +{ + int pd_id = (id == VDEC_1) ? PDID_DOS_VDEC : + (id == VDEC_HEVC) ? PDID_DOS_HEVC : + PDID_DOS_HCODEC; + + pm_vdec_clock_off(id); + pwr_ctrl_psci_smc(pd_id, PWR_OFF); +} + +static bool pm_vdec_pd_sec_api_power_state(struct device *dev, int id) +{ + int pd_id = (id == VDEC_1) ? PDID_DOS_VDEC : + (id == VDEC_HEVC) ? PDID_DOS_HEVC : + PDID_DOS_HCODEC; + + return !pwr_ctrl_status_psci_smc(pd_id); +} + +static void pm_vdec_pd_nosec_api_power_on(struct device *dev, int id) +{ +#if 0 + int pd_id = (id == VDEC_1) ? PM_DOS_VDEC : + (id == VDEC_HEVC) ? PM_DOS_HEVC : + PM_DOS_HCODEC; + + pm_vdec_clock_on(id); + power_domain_switch(pd_id, PWR_ON); +#endif +} + +static void pm_vdec_pd_nosec_api_power_off(struct device *dev, int id) +{ +#if 0 + int pd_id = (id == VDEC_1) ? PM_DOS_VDEC : + (id == VDEC_HEVC) ? PM_DOS_HEVC : + PM_DOS_HCODEC; + + pm_vdec_clock_off(id); + power_domain_switch(pd_id, PWR_OFF); +#endif +} + +static bool pm_vdec_pd_nosec_api_power_state(struct device *dev, int id) +{ + return pm_vdec_legacy_power_state(dev, id); +} + +static const struct power_manager_s pm_rw_reg_data = { + .pm_type = PM_POWER_CTRL_RW_REG, + .power_on = pm_vdec_legacy_power_on, + .power_off = pm_vdec_legacy_power_off, + .power_state = pm_vdec_legacy_power_state, +}; + +static const struct power_manager_s pm_ctrl_api_data = { + .pm_type = PM_POWER_CTRL_API, + .power_on = pm_vdec_legacy_power_on, + .power_off = pm_vdec_legacy_power_off, + .power_state = pm_vdec_legacy_power_state, +}; + +static const struct power_manager_s pm_pd_data = { + .pm_type = PM_POWER_DOMAIN, + .pd_data = pm_domain_data, + .init = pm_vdec_power_domain_init, + .release = pm_vdec_power_domain_relese, + .power_on = pm_vdec_power_domain_power_on, + .power_off = pm_vdec_power_domain_power_off, + .power_state = pm_vdec_power_domain_power_state, +}; + +static const struct power_manager_s pm_pd_sec_api_data = { + .pm_type = PM_POWER_DOMAIN_SEC_API, + .power_on = pm_vdec_pd_sec_api_power_on, + .power_off = pm_vdec_pd_sec_api_power_off, + .power_state = pm_vdec_pd_sec_api_power_state, +}; + +static const struct power_manager_s pm_pd_nosec_api_data = { + .pm_type = PM_POWER_DOMAIN_NONSEC_API, + .power_on = pm_vdec_pd_nosec_api_power_on, + .power_off = pm_vdec_pd_nosec_api_power_off, + .power_state = pm_vdec_pd_nosec_api_power_state, +}; + +const struct of_device_id amlogic_vdec_matches[] = { + { .compatible = "amlogic, vdec", .data = &pm_rw_reg_data }, + { .compatible = "amlogic, vdec-pm-api", .data = &pm_ctrl_api_data }, + { .compatible = "amlogic, vdec-pm-pd", .data = &pm_pd_data }, + { .compatible = "amlogic, vdec-pm-pd-sec-api", .data = &pm_pd_sec_api_data }, + { .compatible = "amlogic, vdec-pm-pd-nsec-api", .data = &pm_pd_nosec_api_data }, + {}, +}; +EXPORT_SYMBOL(amlogic_vdec_matches); +
diff --git a/drivers/frame_provider/decoder/utils/vdec_power_ctrl.h b/drivers/frame_provider/decoder/utils/vdec_power_ctrl.h new file mode 100644 index 0000000..e7ab77e --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_power_ctrl.h
@@ -0,0 +1,107 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/utils/vdec_power_ctrl.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_device.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/version.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include "vdec.h" + +/* Directly controlled by reading and writing registers. */ +#define PM_POWER_CTRL_RW_REG (0) + +/* Use power_ctrl_xxx family of interface controls. */ +#define PM_POWER_CTRL_API (1) + +/* + * Power Domain interface control, currently supported + * in versions 4.19 and above. + */ +#define PM_POWER_DOMAIN (2) + +/* + * Controlled by the secure API provided by power domain, + * version 4.9 supports currently supported platforms (SC2). + */ +#define PM_POWER_DOMAIN_SEC_API (3) + +/* + * Use non-secure API control through power domain, version 4.9 support, + * currently supported platforms (SM1, TM2, TM2-revB). + */ +#define PM_POWER_DOMAIN_NONSEC_API (4) + +enum pm_pd_e { + PD_VDEC, + PD_HCODEC, + PD_VDEC2, + PD_HEVC, + PD_HEVCB, + PD_WAVE, + PD_MAX +}; + +struct pm_pd_s { + u8 *name; + struct device *dev; + struct device_link *link; +}; + +struct power_manager_s { + int pm_type; + struct pm_pd_s *pd_data; + int (*init) (struct device *dev); + void (*release) (struct device *dev); + void (*power_on) (struct device *dev, int id); + void (*power_off) (struct device *dev, int id); + bool (*power_state) (struct device *dev, int id); +}; + +const char *get_pm_name(int type); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) +#define DL_FLAG_STATELESS BIT(0) +#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) +#define DL_FLAG_PM_RUNTIME BIT(2) +#define DL_FLAG_RPM_ACTIVE BIT(3) +#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4) + +struct device_link { + u32 flags; + /* ... */ +}; + +static inline struct device *dev_pm_domain_attach_by_name(struct device *dev, + const char *name) + { return NULL; } +static inline struct device_link *device_link_add(struct device *consumer, + struct device *supplier, u32 flags) + { return NULL; } +static inline void device_link_del(struct device_link *link) { return; } +static inline void device_link_remove(void *consumer, struct device *supplier) { return; } +#endif +
diff --git a/drivers/frame_provider/decoder/utils/vdec_profile.c b/drivers/frame_provider/decoder/utils/vdec_profile.c new file mode 100644 index 0000000..dfffeb1 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_profile.c
@@ -0,0 +1,474 @@ +/* + * drivers/amlogic/amports/vdec_profile.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/debugfs.h> +#include <linux/moduleparam.h> +#include <linux/sched/clock.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include <trace/events/meson_atrace.h> +#include "vdec_profile.h" +#include "vdec.h" + +#define ISA_TIMERE 0x2662 +#define ISA_TIMERE_HI 0x2663 + +#define PROFILE_REC_SIZE 40 + +static DEFINE_MUTEX(vdec_profile_mutex); +static int rec_wp; +static bool rec_wrapped; +static uint dec_time_stat_flag; +static uint dec_time_stat_reset; + + +struct dentry *root, *event; + +#define MAX_INSTANCE_MUN 9 + +struct vdec_profile_time_stat_s { + int time_6ms_less_cnt; + int time_6_9ms_cnt; + int time_9_12ms_cnt; + int time_12_15ms_cnt; + int time_15_18ms_cnt; + int time_18_21ms_cnt; + int time_21ms_up_cnt; + u64 time_max_us; + u64 time_total_us; +}; + +struct vdec_profile_statistics_s { + bool status; + u64 run_lasttimestamp; + int run_cnt; + u64 cb_lasttimestamp; + int cb_cnt; + u64 decode_first_us; + struct vdec_profile_time_stat_s run2cb_time_stat; + struct vdec_profile_time_stat_s decode_time_stat; +}; + +static struct vdec_profile_statistics_s statistics_s[MAX_INSTANCE_MUN]; + + +struct vdec_profile_rec_s { + struct vdec_s *vdec; + u64 timestamp; + int event; + int para1; + int para2; +}; + +static struct vdec_profile_rec_s recs[PROFILE_REC_SIZE]; +static const char *event_name[VDEC_PROFILE_MAX_EVENT] = { + "run", + "cb", + "save_input", + "check run ready", + "run ready", + "disconnect", + "dec_work", + "info" +}; + +#if 0 /* get time from hardware. */ +static u64 get_us_time_hw(void) +{ + u32 lo, hi1, hi2; + int offset = 0; + + /* txlx, g12a isa register base is 0x3c00 */ + if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_TXLX) + offset = 0x1600; + + do { + hi1 = READ_MPEG_REG(ISA_TIMERE_HI + offset); + lo = READ_MPEG_REG(ISA_TIMERE + offset); + hi2 = READ_MPEG_REG(ISA_TIMERE_HI + offset); + } while (hi1 != hi2); + + return (((u64)hi1) << 32) | lo; +} +#endif + +static u64 get_us_time_system(void) +{ + return div64_u64(local_clock(), 1000); +} + +static void vdec_profile_update_alloc_time( + struct vdec_profile_time_stat_s *time_stat, u64 startus, u64 endus) +{ + u64 spend_time_us = endus - startus; + + if (spend_time_us > 0 && spend_time_us < 100000000) { + if (spend_time_us < 6000) + time_stat->time_6ms_less_cnt++; + else if (spend_time_us < 9000) + time_stat->time_6_9ms_cnt++; + else if (spend_time_us < 12000) + time_stat->time_9_12ms_cnt++; + else if (spend_time_us < 15000) + time_stat->time_12_15ms_cnt++; + else if (spend_time_us < 18000) + time_stat->time_15_18ms_cnt++; + else if (spend_time_us < 21000) + time_stat->time_18_21ms_cnt++; + else + time_stat->time_21ms_up_cnt++; + } + + if (spend_time_us > time_stat->time_max_us) + time_stat->time_max_us = spend_time_us; + + time_stat->time_total_us += spend_time_us; +} + + +static void vdec_profile_statistics(struct vdec_s *vdec, int event) +{ + struct vdec_profile_statistics_s *time_stat = NULL; + u64 timestamp; + int i; + + if (vdec->id >= MAX_INSTANCE_MUN) + return; + + if (event != VDEC_PROFILE_EVENT_RUN && + event != VDEC_PROFILE_EVENT_CB) + return; + + mutex_lock(&vdec_profile_mutex); + + if (dec_time_stat_reset == 1) { + if (event != VDEC_PROFILE_EVENT_RUN) { + mutex_unlock(&vdec_profile_mutex); + return; + } + for (i = 0; i < MAX_INSTANCE_MUN; i++) + memset(&statistics_s[i], 0, + sizeof(struct vdec_profile_statistics_s)); + dec_time_stat_reset = 0; + } + + time_stat = &statistics_s[vdec->id]; + timestamp = get_us_time_system(); + + if (time_stat->status == false) { + time_stat->decode_first_us = timestamp; + time_stat->status = true; + } + + if (event == VDEC_PROFILE_EVENT_RUN) { + time_stat->run_lasttimestamp = timestamp; + time_stat->run_cnt++; + } else if (event == VDEC_PROFILE_EVENT_CB) { + /*run2cb statistics*/ + vdec_profile_update_alloc_time(&time_stat->run2cb_time_stat, time_stat->run_lasttimestamp, timestamp); + + /*decode statistics*/ + if (time_stat->cb_cnt == 0) + vdec_profile_update_alloc_time(&time_stat->decode_time_stat, time_stat->decode_first_us, timestamp); + else + vdec_profile_update_alloc_time(&time_stat->decode_time_stat, time_stat->cb_lasttimestamp, timestamp); + + time_stat->cb_lasttimestamp = timestamp; + time_stat->cb_cnt++; + ATRACE_COUNTER(vdec->dec_spend_time, timestamp - time_stat->run_lasttimestamp); + ATRACE_COUNTER(vdec->dec_spend_time_ave, div_u64(time_stat->run2cb_time_stat.time_total_us, time_stat->cb_cnt)); + } + + mutex_unlock(&vdec_profile_mutex); +} + + +void vdec_profile_more(struct vdec_s *vdec, int event, int para1, int para2) +{ + mutex_lock(&vdec_profile_mutex); + + recs[rec_wp].vdec = vdec; + recs[rec_wp].timestamp = get_us_time_system(); + recs[rec_wp].event = event; + recs[rec_wp].para1 = para1; + recs[rec_wp].para2 = para2; + + rec_wp++; + if (rec_wp == PROFILE_REC_SIZE) { + rec_wrapped = true; + rec_wp = 0; + } + + mutex_unlock(&vdec_profile_mutex); +} +EXPORT_SYMBOL(vdec_profile_more); + +void vdec_profile(struct vdec_s *vdec, int event) +{ + ATRACE_COUNTER(vdec->vfm_map_id, event); + vdec_profile_more(vdec, event, 0 , 0); + if (dec_time_stat_flag == 1) + vdec_profile_statistics(vdec, event); +} +EXPORT_SYMBOL(vdec_profile); + +void vdec_profile_flush(struct vdec_s *vdec) +{ + int i; + + if (vdec->id >= MAX_INSTANCE_MUN) + return; + + mutex_lock(&vdec_profile_mutex); + + for (i = 0; i < PROFILE_REC_SIZE; i++) { + if (recs[i].vdec == vdec) + recs[i].vdec = NULL; + } + + memset(&statistics_s[vdec->id], 0, sizeof(struct vdec_profile_statistics_s)); + + mutex_unlock(&vdec_profile_mutex); +} + +static const char *event_str(int event) +{ + if (event < VDEC_PROFILE_MAX_EVENT) + return event_name[event]; + + return "INVALID"; +} + +static int vdec_profile_dbg_show(struct seq_file *m, void *v) +{ + int i, end; + u64 base_timestamp; + + mutex_lock(&vdec_profile_mutex); + + if (rec_wrapped) { + i = rec_wp; + end = rec_wp; + } else { + i = 0; + end = rec_wp; + } + + base_timestamp = recs[i].timestamp; + while (1) { + if ((!rec_wrapped) && (i == end)) + break; + + if (recs[i].vdec) { + seq_printf(m, "[%s:%d] \t%016llu us : %s (%d,%d)\n", + vdec_device_name_str(recs[i].vdec), + recs[i].vdec->id, + recs[i].timestamp - base_timestamp, + event_str(recs[i].event), + recs[i].para1, + recs[i].para2 + ); + } else { + seq_printf(m, "[%s:%d] \t%016llu us : %s (%d,%d)\n", + "N/A", + 0, + recs[i].timestamp - base_timestamp, + event_str(recs[i].event), + recs[i].para1, + recs[i].para2 + ); + } + if (++i == PROFILE_REC_SIZE) + i = 0; + + if (rec_wrapped && (i == end)) + break; + } + + mutex_unlock(&vdec_profile_mutex); + + return 0; +} + +static int time_stat_profile_dbg_show(struct seq_file *m, void *v) +{ + int i; + + mutex_lock(&vdec_profile_mutex); + + for (i = 0; i < MAX_INSTANCE_MUN; i++) + { + if (statistics_s[i].status == false) + continue; + + seq_printf(m, "[%d]run_cnt:%d, cb_cnt:%d\n\ + \t\t\ttime_total_us:%llu\n\ + \t\t\trun2cb time:\n\ + \t\t\ttime_max_us:%llu\n\ + \t\t\t[%d]run2cb ave_us:%llu\n\ + \t\t\ttime_6ms_less_cnt:%d\n\ + \t\t\ttime_6_9ms_cnt:%d\n\ + \t\t\ttime_9_12ms_cnt:%d\n\ + \t\t\ttime_12_15ms_cnt:%d\n\ + \t\t\ttime_15_18ms_cnt:%d\n\ + \t\t\ttime_18_21ms_cnt:%d\n\ + \t\t\ttime_21ms_up_cnt:%d\n\ + \t\t\tdecode time:\n\ + \t\t\ttime_total_us:%llu\n\ + \t\t\ttime_max_us:%llu\n\ + \t\t\t[%d]cb2cb ave_us:%llu\n\ + \t\t\ttime_6ms_less_cnt:%d\n\ + \t\t\ttime_6_9ms_cnt:%d\n\ + \t\t\ttime_9_12ms_cnt:%d\n\ + \t\t\ttime_12_15ms_cnt:%d\n\ + \t\t\ttime_15_18ms_cnt:%d\n\ + \t\t\ttime_18_21ms_cnt:%d\n\ + \t\t\ttime_21ms_up_cnt:%d\n", + i, + statistics_s[i].run_cnt, + statistics_s[i].cb_cnt, + statistics_s[i].run2cb_time_stat.time_total_us, + statistics_s[i].run2cb_time_stat.time_max_us, + i, + div_u64(statistics_s[i].run2cb_time_stat.time_total_us , statistics_s[i].cb_cnt), + statistics_s[i].run2cb_time_stat.time_6ms_less_cnt, + statistics_s[i].run2cb_time_stat.time_6_9ms_cnt, + statistics_s[i].run2cb_time_stat.time_9_12ms_cnt, + statistics_s[i].run2cb_time_stat.time_12_15ms_cnt, + statistics_s[i].run2cb_time_stat.time_15_18ms_cnt, + statistics_s[i].run2cb_time_stat.time_18_21ms_cnt, + statistics_s[i].run2cb_time_stat.time_21ms_up_cnt, + statistics_s[i].decode_time_stat.time_total_us, + statistics_s[i].decode_time_stat.time_max_us, + i, + div_u64(statistics_s[i].decode_time_stat.time_total_us , statistics_s[i].cb_cnt), + statistics_s[i].decode_time_stat.time_6ms_less_cnt, + statistics_s[i].decode_time_stat.time_6_9ms_cnt, + statistics_s[i].decode_time_stat.time_9_12ms_cnt, + statistics_s[i].decode_time_stat.time_12_15ms_cnt, + statistics_s[i].decode_time_stat.time_15_18ms_cnt, + statistics_s[i].decode_time_stat.time_18_21ms_cnt, + statistics_s[i].decode_time_stat.time_21ms_up_cnt); + } + + mutex_unlock(&vdec_profile_mutex); + + return 0; +} + + +static int vdec_profile_dbg_open(struct inode *inode, struct file *file) +{ + return single_open(file, vdec_profile_dbg_show, NULL); +} + +static int time_stat_profile_dbg_open(struct inode *inode, struct file *file) +{ + return single_open(file, time_stat_profile_dbg_show, NULL); +} + + +static const struct file_operations event_dbg_fops = { + .open = vdec_profile_dbg_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations time_stat_dbg_fops = { + .open = time_stat_profile_dbg_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + + +#if 0 /*DEBUG_TMP*/ +static int __init vdec_profile_init_debugfs(void) +{ + struct dentry *root, *event; + + root = debugfs_create_dir("vdec_profile", NULL); + if (IS_ERR(root) || !root) + goto err; + + event = debugfs_create_file("event", 0400, root, NULL, + &event_dbg_fops); + if (!event) + goto err_1; + + mutex_init(&vdec_profile_mutex); + + return 0; + +err_1: + debugfs_remove(root); +err: + pr_err("Can not create debugfs for vdec_profile\n"); + return 0; +} + +#endif + +int vdec_profile_init_debugfs(void) +{ + struct dentry *root, *event, *time_stat; + + root = debugfs_create_dir("vdec_profile", NULL); + if (IS_ERR(root) || !root) + goto err; + + event = debugfs_create_file("event", 0400, root, NULL, + &event_dbg_fops); + if (!event) + goto err_1; + + time_stat = debugfs_create_file("time_stat", 0400, root, NULL, + &time_stat_dbg_fops); + if (!time_stat) + goto err_2; + + mutex_init(&vdec_profile_mutex); + + return 0; + +err_2: + debugfs_remove(event); +err_1: + debugfs_remove(root); +err: + pr_err("Can not create debugfs for vdec_profile\n"); + return 0; +} +EXPORT_SYMBOL(vdec_profile_init_debugfs); + +void vdec_profile_exit_debugfs(void) +{ + debugfs_remove(event); + debugfs_remove(root); +} +EXPORT_SYMBOL(vdec_profile_exit_debugfs); + +module_param(dec_time_stat_flag, uint, 0664); + +module_param(dec_time_stat_reset, uint, 0664); + + +/*module_init(vdec_profile_init_debugfs);*/ +
diff --git a/drivers/frame_provider/decoder/utils/vdec_profile.h b/drivers/frame_provider/decoder/utils/vdec_profile.h new file mode 100644 index 0000000..34f3bee --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_profile.h
@@ -0,0 +1,40 @@ +/* + * drivers/amlogic/amports/vdec_profile.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#ifndef VDEC_PROFILE_H +#define VDEC_PROFILE_H + +struct vdec_s; + +#define VDEC_PROFILE_EVENT_RUN 0 +#define VDEC_PROFILE_EVENT_CB 1 +#define VDEC_PROFILE_EVENT_SAVE_INPUT 2 +#define VDEC_PROFILE_EVENT_CHK_RUN_READY 3 +#define VDEC_PROFILE_EVENT_RUN_READY 4 +#define VDEC_PROFILE_EVENT_DISCONNECT 5 +#define VDEC_PROFILE_EVENT_DEC_WORK 6 +#define VDEC_PROFILE_EVENT_INFO 7 +#define VDEC_PROFILE_MAX_EVENT 8 + +extern void vdec_profile(struct vdec_s *vdec, int event); +extern void vdec_profile_more(struct vdec_s *vdec, int event, int para1, int para2); +extern void vdec_profile_flush(struct vdec_s *vdec); + +int vdec_profile_init_debugfs(void); +void vdec_profile_exit_debugfs(void); + +#endif /* VDEC_PROFILE_H */
diff --git a/drivers/frame_provider/decoder/utils/vdec_sync.c b/drivers/frame_provider/decoder/utils/vdec_sync.c new file mode 100644 index 0000000..b4192f6 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_sync.c
@@ -0,0 +1,531 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/uaccess.h> +#include <linux/slab.h> +#include <linux/sched/clock.h> +#include <linux/sync_file.h> +#include "vdec_sync.h" + +#define VDEC_DBG_ENABLE_FENCE (0x100) +#define VDEC_SYNC_COUNT 32 + +struct vdec_sync_core_s { + struct vdec_sync s_vdec_sync[VDEC_SYNC_COUNT]; + spinlock_t vdec_sync_lock; +}; + +static struct vdec_sync_core_s vdec_sync_core; + +extern u32 vdec_get_debug(void); + +static const struct dma_fence_ops timeline_fence_ops; +static inline struct sync_pt *fence_to_sync_pt(struct dma_fence *fence) +{ + if (fence->ops != &timeline_fence_ops) + return NULL; + return container_of(fence, struct sync_pt, fence); +} + +/** + * sync_timeline_create() - creates a sync object + * @name: sync_timeline name + * + * Creates a new sync_timeline. Returns the sync_timeline object or NULL in + * case of error. + */ +static struct sync_timeline *sync_timeline_create(const char *name) +{ + struct sync_timeline *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return NULL; + + kref_init(&obj->kref); + obj->context = dma_fence_context_alloc(1); + obj->timestamp = local_clock(); + strlcpy(obj->name, name, sizeof(obj->name)); + INIT_LIST_HEAD(&obj->active_list_head); + INIT_LIST_HEAD(&obj->pt_list); + spin_lock_init(&obj->lock); + + return obj; +} + +static void sync_timeline_free(struct kref *kref) +{ + struct sync_timeline *obj = + container_of(kref, struct sync_timeline, kref); + struct vdec_sync *sync = obj->parent_sync; + + pr_info("[VDEC-FENCE] free timeline: %lx\n", (ulong) obj); + kfree(obj); + atomic_set(&sync->use_flag, 0); + sync->timeline = NULL; +} + +static void sync_timeline_get(struct sync_timeline *obj) +{ + kref_get(&obj->kref); +} + +static void sync_timeline_put(struct sync_timeline *obj) +{ + kref_put(&obj->kref, sync_timeline_free); +} + +static const char *timeline_fence_get_driver_name(struct dma_fence *fence) +{ + struct sync_timeline *parent = fence_parent(fence); + + return parent->name; +} + +static const char *timeline_fence_get_timeline_name(struct dma_fence *fence) +{ + struct sync_timeline *parent = fence_parent(fence); + + return parent->name; +} + +static void timeline_fence_release(struct dma_fence *fence) +{ + struct sync_pt *pt = fence_to_sync_pt(fence); + struct sync_timeline *parent = fence_parent(fence); + unsigned long flags; + + /*pr_info("[VDEC-FENCE] release fence: %lx\n", (ulong) fence);*/ + + spin_lock_irqsave(fence->lock, flags); + list_del(&pt->link); + if (!list_empty(&pt->active_list)) + list_del(&pt->active_list); + spin_unlock_irqrestore(fence->lock, flags); + sync_timeline_put(parent); + dma_fence_free(fence); +} + +static bool timeline_fence_signaled(struct dma_fence *fence) +{ + struct sync_timeline *parent = fence_parent(fence); + struct sync_pt *pt = get_sync_pt(fence); + + if (__dma_fence_is_later(fence->seqno, parent->value, fence->ops)) + return false; + + if (pt->timestamp > parent->timestamp) + return false; + + return true; +} + +static bool timeline_fence_enable_signaling(struct dma_fence *fence) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, fence); + struct sync_timeline *parent = fence_parent(fence); + + if (timeline_fence_signaled(fence)) + return false; + + list_add_tail(&pt->active_list, &parent->active_list_head); + return true; +} + +#if 0 +static void timeline_fence_disable_signaling(struct dma_fence *fence) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, fence); + + list_del_init(&pt->active_list); +} +#endif + +static void timeline_fence_value_str(struct dma_fence *fence, + char *str, int size) +{ + snprintf(str, size, "%llu", fence->seqno); +} + +static void timeline_fence_timeline_value_str(struct dma_fence *fence, + char *str, int size) +{ + struct sync_timeline *parent = fence_parent(fence); + + snprintf(str, size, "%d", parent->value); +} + +static const struct dma_fence_ops timeline_fence_ops = { + .get_driver_name = timeline_fence_get_driver_name, + .get_timeline_name = timeline_fence_get_timeline_name, + .enable_signaling = timeline_fence_enable_signaling, + //.disable_signaling = timeline_fence_disable_signaling, + .signaled = timeline_fence_signaled, + .wait = dma_fence_default_wait, + .release = timeline_fence_release, + .fence_value_str = timeline_fence_value_str, + .timeline_value_str = timeline_fence_timeline_value_str, +}; + +/** + * sync_timeline_signal() - signal a status change on a sync_timeline + * @obj: sync_timeline to signal + * @inc: num to increment on timeline->value + * + * A sync implementation should call this any time one of it's fences + * has signaled or has an error condition. + */ +static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) +{ + struct sync_pt *pt, *next; + unsigned long flags; + + spin_lock_irqsave(&obj->lock, flags); + obj->value += inc; + list_for_each_entry_safe(pt, next, &obj->active_list_head, + active_list) { + if (dma_fence_is_signaled_locked(&pt->fence)) + list_del_init(&pt->active_list); + } + spin_unlock_irqrestore(&obj->lock, flags); +} + +/** + * sync_pt_create() - creates a sync pt + * @parent: fence's parent sync_timeline + * @inc: value of the fence + * + * Creates a new sync_pt as a child of @parent. @size bytes will be + * allocated allowing for implementation specific data to be kept after + * the generic sync_timeline struct. Returns the sync_pt object or + * NULL in case of error. + */ +static struct sync_pt *sync_pt_create(struct sync_timeline *obj, + unsigned int value) +{ + struct sync_pt *pt; + unsigned long flags; + + pt = kzalloc(sizeof(*pt), GFP_KERNEL); + if (!pt) + return NULL; + spin_lock_irqsave(&obj->lock, flags); + sync_timeline_get(obj); + dma_fence_init(&pt->fence, &timeline_fence_ops, &obj->lock, + obj->context, value); + list_add_tail(&pt->link, &obj->pt_list); + INIT_LIST_HEAD(&pt->active_list); + spin_unlock_irqrestore(&obj->lock, flags); + + return pt; +} + +static void sync_pt_free(struct sync_timeline *obj, + struct sync_pt *pt) +{ + unsigned long flags; + + spin_lock_irqsave(&obj->lock, flags); + list_del(&pt->link); + sync_timeline_put(obj); + spin_unlock_irqrestore(&obj->lock, flags); + kfree(pt); + pt = NULL; +} + +static int timeline_create_fence(struct vdec_sync *sync, int usage, + struct dma_fence **fence, int *fd, u32 value) +{ + int ret; + struct sync_pt *pt; + struct sync_file *sync_file; + struct sync_timeline *obj = sync->timeline; + + if (obj == NULL) + return -EPERM; + + pt = sync_pt_create(obj, value); + if (!pt) { + return -ENOMEM; + } + + if (usage == FENCE_USE_FOR_APP) { + *fd = get_unused_fd_flags(O_CLOEXEC); + if (*fd < 0) { + return -EBADF; + goto err; + } + + sync_file = sync_file_create(&pt->fence); + if (!sync_file) { + ret = -ENOMEM; + goto err; + } + + fd_install(*fd, sync_file->file); + + /* decreases refcnt. */ + dma_fence_put(&pt->fence); + } + + *fence = &pt->fence; + + pt->timestamp = local_clock(); + + if (vdec_get_debug() & VDEC_DBG_ENABLE_FENCE) + pr_info("[VDEC-FENCE]: create fence: %lx, fd: %d, ref: %d, usage: %d\n", + (ulong) &pt->fence, *fd, atomic_read(&pt->fence.refcount.refcount.refs), usage); + return 0; +err: + put_unused_fd(*fd); + if (pt) + sync_pt_free(obj, pt); + + return ret; +} + +struct dma_fence *vdec_fence_get(int fd) +{ + return sync_file_get_fence(fd); +} +EXPORT_SYMBOL(vdec_fence_get); + +void vdec_fence_put(struct dma_fence *fence) +{ + if (vdec_get_debug() & VDEC_DBG_ENABLE_FENCE) + pr_info("[VDEC-FENCE]: the fence (%px) cost time: %lld ns\n", + fence, local_clock() - get_sync_pt(fence)->timestamp); + dma_fence_put(fence); +} +EXPORT_SYMBOL(vdec_fence_put); + +int vdec_fence_wait(struct dma_fence *fence, long timeout) +{ + if (vdec_get_debug() & VDEC_DBG_ENABLE_FENCE) + pr_info("[VDEC-FENCE]: wait fence %lx.\n", (ulong) fence); + + return dma_fence_wait_timeout(fence, false, timeout); +} +EXPORT_SYMBOL(vdec_fence_wait); + +struct vdec_sync *vdec_sync_get(void) +{ + int i; + struct vdec_sync_core_s *core = &vdec_sync_core; + ulong flags; + + spin_lock_irqsave(&core->vdec_sync_lock, flags); + + for (i = 0; i < VDEC_SYNC_COUNT; i++) { + if (atomic_read(&core->s_vdec_sync[i].use_flag) == 0) { + int j; + atomic_set(&core->s_vdec_sync[i].use_flag, 1); + for (j = 0; j < 64; j++) { + core->s_vdec_sync[i].release_callback[j].func = vdec_fence_buffer_count_decrease; + core->s_vdec_sync[i].release_callback[j].private_data = (void *)&core->s_vdec_sync[i]; + } + spin_unlock_irqrestore(&core->vdec_sync_lock, flags); + return &core->s_vdec_sync[i]; + } + } + spin_unlock_irqrestore(&core->vdec_sync_lock, flags); + return 0; +} +EXPORT_SYMBOL(vdec_sync_get); + +void vdec_timeline_create(struct vdec_sync *sync, u8 *name) +{ + struct sync_timeline *obj; + snprintf(sync->name, sizeof(sync->name), "%s", name); + + obj = sync_timeline_create(sync->name); + obj->parent_sync = sync; + sync->timeline = (void *)obj; + + if (sync->timeline) + pr_info("[VDEC-FENCE]: create timeline %lx, name: %s\n", + (ulong) sync->timeline, sync->name); + else + pr_err("[VDEC-FENCE]: create timeline faild.\n"); +} +EXPORT_SYMBOL(vdec_timeline_create); + +int vdec_timeline_create_fence(struct vdec_sync *sync) +{ + struct sync_timeline *obj = sync->timeline; + struct sync_pt *pt = NULL; + ulong flags; + u32 value = 0; + + if (obj == NULL) + return -EPERM; + + spin_lock_irqsave(&obj->lock, flags); + + value = obj->value + 1; + + if (!list_empty(&obj->pt_list)) { + pt = list_last_entry(&obj->pt_list, struct sync_pt, link); + if (value <= pt->fence.seqno) { + value = pt->fence.seqno + 1; + } + } + spin_unlock_irqrestore(&obj->lock, flags); + + return timeline_create_fence(sync, + sync->usage, + &sync->fence, + &sync->fd, + value); +} +EXPORT_SYMBOL(vdec_timeline_create_fence); + +void vdec_timeline_increase(struct vdec_sync *sync, u32 value) +{ + struct sync_timeline *obj = sync->timeline; + + if (obj == NULL) + return; + + obj->timestamp = local_clock(); + + if (vdec_get_debug() & VDEC_DBG_ENABLE_FENCE) + pr_info("[VDEC-FENCE]: update timeline %d.\n", + obj->value + value); + + sync_timeline_signal(obj, value); +} +EXPORT_SYMBOL(vdec_timeline_increase); + +void vdec_timeline_get(struct vdec_sync *sync) +{ + struct sync_timeline *obj = sync->timeline; + + sync_timeline_get(obj); +} +EXPORT_SYMBOL(vdec_timeline_get); + + +void vdec_timeline_put(struct vdec_sync *sync) +{ + struct sync_timeline *obj = sync->timeline; + + sync_timeline_put(obj); +} +EXPORT_SYMBOL(vdec_timeline_put); + +void vdec_fence_status_set(struct dma_fence *fence, int status) +{ + fence->error = status; +} +EXPORT_SYMBOL(vdec_fence_status_set); + +int vdec_fence_status_get(struct dma_fence *fence) +{ + return dma_fence_get_status(fence); +} +EXPORT_SYMBOL(vdec_fence_status_get); + +bool check_objs_all_signaled(struct vdec_sync *sync) +{ + struct sync_timeline *obj = sync->timeline; + bool ret = false; + ulong flags; + + spin_lock_irqsave(&obj->lock, flags); + ret = list_empty(&obj->active_list_head); + spin_unlock_irqrestore(&obj->lock, flags); + + return ret; +} +EXPORT_SYMBOL(check_objs_all_signaled); + +int vdec_clean_all_fence(struct vdec_sync *sync) +{ + /*struct sync_timeline *obj = sync->timeline; + struct sync_pt *pt, *next; + + spin_lock_irq(&obj->lock); + + list_for_each_entry_safe(pt, next, &obj->pt_list, link) { + dma_fence_set_error(&pt->fence, -ENOENT); + dma_fence_signal_locked(&pt->fence); + } + + spin_unlock_irq(&obj->lock);*/ + + struct sync_pt *pt, *next; + struct sync_timeline *obj = sync->timeline; + + list_for_each_entry_safe(pt, next, &obj->pt_list, link) { + pr_err("vdec_clean_all_fence %px\n" , obj->parent_sync); + vdec_fence_put(&pt->fence); + } + + sync_timeline_put(obj); + + return 0; +} +EXPORT_SYMBOL(vdec_clean_all_fence); + +void vdec_fence_buffer_count_increase(ulong fence) +{ + struct vdec_sync *sync = (struct vdec_sync *)fence; + struct sync_timeline *obj = sync->timeline; + + spin_lock_irq(&obj->lock); + + if (atomic_read(&sync->buffer_count) == 0) { + sync_timeline_get(obj); + } + + atomic_inc(&sync->buffer_count); + + spin_unlock_irq(&obj->lock); +} +EXPORT_SYMBOL(vdec_fence_buffer_count_increase); + +void vdec_fence_buffer_count_decrease(struct codec_mm_s *mm, struct codec_mm_cb_s *cb) +{ + struct vdec_sync *sync = (struct vdec_sync *)cb->private_data; + struct sync_pt *pt, *next; + struct sync_timeline *obj = sync->timeline; + + atomic_dec(&sync->buffer_count); + + if (atomic_read(&sync->buffer_count) == 0) { + sync_timeline_put(obj); + list_for_each_entry_safe(pt, next, &obj->pt_list, link) { + vdec_fence_put(&pt->fence); + } + return; + } + return ; +} +EXPORT_SYMBOL(vdec_fence_buffer_count_decrease); + +void vdec_sync_core_init(void) +{ + spin_lock_init(&vdec_sync_core.vdec_sync_lock); +} +EXPORT_SYMBOL(vdec_sync_core_init); + +
diff --git a/drivers/frame_provider/decoder/utils/vdec_sync.h b/drivers/frame_provider/decoder/utils/vdec_sync.h new file mode 100644 index 0000000..6637b42 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_sync.h
@@ -0,0 +1,107 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/list.h> +#include <linux/rbtree.h> +#include <linux/spinlock.h> +#include <linux/dma-fence.h> +#include <linux/sync_file.h> +#include <uapi/linux/sync_file.h> +#include <linux/device.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> + +#define FENCE_USE_FOR_DRIVER (0) +#define FENCE_USE_FOR_APP (1) + +struct sync_timeline { + struct kref kref; + char name[32]; + + /* protected by lock */ + u64 context; + u32 value; + + struct list_head active_list_head; + struct list_head pt_list; + spinlock_t lock; + + u64 timestamp; + struct vdec_sync *parent_sync; +}; + +struct sync_pt { + struct dma_fence fence; + struct list_head link; + struct list_head active_list; + u64 timestamp; +}; + +struct vdec_sync { + u8 name[32]; + void *timeline; + int usage; + int fd; + struct dma_fence *fence; + atomic_t buffer_count; + atomic_t use_flag; + struct codec_mm_cb_s release_callback[64]; +}; + +static inline struct sync_timeline *fence_parent(struct dma_fence *fence) +{ + return container_of(fence->lock, struct sync_timeline, lock); +} + +static inline struct sync_pt *get_sync_pt(struct dma_fence *fence) +{ + return container_of(fence, struct sync_pt, fence); +} + +struct dma_fence *vdec_fence_get(int fd); + +void vdec_fence_put(struct dma_fence *fence); + +int vdec_fence_wait(struct dma_fence *fence, long timeout); + +void vdec_timeline_create(struct vdec_sync *sync, u8 *name); + +int vdec_timeline_create_fence(struct vdec_sync *sync); + +void vdec_timeline_increase(struct vdec_sync *sync, u32 value); + +void vdec_timeline_get(struct vdec_sync *sync); + +void vdec_timeline_put(struct vdec_sync *sync); + +int vdec_fence_status_get(struct dma_fence *fence); + +void vdec_fence_status_set(struct dma_fence *fence, int status); + +bool check_objs_all_signaled(struct vdec_sync *sync); + +int vdec_clean_all_fence(struct vdec_sync *sync); + +void vdec_fence_buffer_count_increase(ulong fence); + +void vdec_fence_buffer_count_decrease(struct codec_mm_s *mm, struct codec_mm_cb_s *cb); + +struct vdec_sync *vdec_sync_get(void); + +void vdec_sync_core_init(void); +
diff --git a/drivers/frame_provider/decoder/utils/vdec_trace.h b/drivers/frame_provider/decoder/utils/vdec_trace.h new file mode 100644 index 0000000..e09518e --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_trace.h
@@ -0,0 +1,149 @@ +/* + * drivers/amlogic/amports/vdec_trace.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM vdec + +#if !defined(_VDEC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _VDEC_TRACE_H + +#include <linux/tracepoint.h> + +struct vdec_s; + +/* single lifecycle events */ +DECLARE_EVENT_CLASS(vdec_event_class, + TP_PROTO(struct vdec_s *vdec), + TP_ARGS(vdec), + TP_STRUCT__entry( + __field(struct vdec_s *, vdec) + ), + TP_fast_assign( + __entry->vdec = vdec; + ), + TP_printk("[%p]", __entry->vdec) +); + +#define DEFINE_VDEC_EVENT(name) \ +DEFINE_EVENT(vdec_event_class, name, \ + TP_PROTO(struct vdec_s *vdec), \ + TP_ARGS(vdec)) + +DEFINE_VDEC_EVENT(vdec_create); +DEFINE_VDEC_EVENT(vdec_connect); +DEFINE_VDEC_EVENT(vdec_disconnect); +DEFINE_VDEC_EVENT(vdec_destroy); +DEFINE_VDEC_EVENT(vdec_reset); +DEFINE_VDEC_EVENT(vdec_release); + +/* set format event */ +#define format_name(format) \ + __print_symbolic(format, \ + {0, "MPEG"}, \ + {1, "MPEG4"}, \ + {2, "H264"}, \ + {3, "MJPEG"}, \ + {4, "REAL"}, \ + {5, "JPEG"}, \ + {6, "VC1"}, \ + {7, "AVS"}, \ + {8, "YUV"}, \ + {9, "H264MVC"}, \ + {10, "H264_4K2K"}, \ + {11, "H265"}, \ + {12, "ENC_AVC"}, \ + {13, "ENC_JPEG"}, \ + {14, "VP9"}) + +TRACE_EVENT(vdec_set_format, + TP_PROTO(struct vdec_s *vdec, int format), + TP_ARGS(vdec, format), + TP_STRUCT__entry( + __field(struct vdec_s *, vdec) + __field(int, format) + ), + TP_fast_assign( + __entry->vdec = vdec; + __entry->format = format; + ), + TP_printk("[%p]:%s", __entry->vdec, + format_name(__entry->format)) +); + +/* status events */ +#define status_name(status) \ + __print_symbolic(status, \ + {0, "UNINITIALIZED"}, \ + {1, "DISCONNECTED"}, \ + {2, "CONNECTED"}, \ + {3, "ACTIVE"}) + +DECLARE_EVENT_CLASS(vdec_status_class, + TP_PROTO(struct vdec_s *vdec, int state), + TP_ARGS(vdec, state), + TP_STRUCT__entry( + __field(struct vdec_s *, vdec) + __field(int, state) + ), + TP_fast_assign( + __entry->vdec = vdec; + __entry->state = state; + ), + TP_printk("[%p]:%s", __entry->vdec, status_name(__entry->state)) +); + +#define DEFINE_STATUS_EVENT(name) \ +DEFINE_EVENT(vdec_status_class, name, \ + TP_PROTO(struct vdec_s *vdec, int status), \ + TP_ARGS(vdec, status)) + +DEFINE_STATUS_EVENT(vdec_set_status); +DEFINE_STATUS_EVENT(vdec_set_next_status); + +/* set pts events */ +DECLARE_EVENT_CLASS(vdec_pts_class, + TP_PROTO(struct vdec_s *vdec, u64 pts), + TP_ARGS(vdec, pts), + TP_STRUCT__entry( + __field(struct vdec_s *, vdec) + __field(u64, pts) + ), + TP_fast_assign( + __entry->vdec = vdec; + __entry->pts = pts; + ), + TP_printk("[%p]%llu", __entry->vdec, __entry->pts) +); + +#define DEFINE_PTS_EVENT(name) \ +DEFINE_EVENT(vdec_pts_class, name, \ + TP_PROTO(struct vdec_s *vdec, u64 pts), \ + TP_ARGS(vdec, pts)) + +DEFINE_PTS_EVENT(vdec_set_pts); +DEFINE_PTS_EVENT(vdec_set_pts64); + +#endif /* _VDEC_TRACE_H */ + +/* +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE vdec_trace +#include <trace/define_trace.h> +*/ +/**/ //DEBUG_TMP
diff --git a/drivers/frame_provider/decoder/utils/vdec_v4l2_buffer_ops.c b/drivers/frame_provider/decoder/utils/vdec_v4l2_buffer_ops.c new file mode 100644 index 0000000..34bc21a --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_v4l2_buffer_ops.c
@@ -0,0 +1,175 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include "vdec_v4l2_buffer_ops.h" +#include <media/v4l2-mem2mem.h> +#include <linux/printk.h> + +int vdec_v4l_get_pic_info(struct aml_vcodec_ctx *ctx, + struct vdec_pic_info *pic) +{ + int ret = 0; + + if (ctx->drv_handle == 0) + return -EIO; + + ret = ctx->dec_if->get_param(ctx->drv_handle, + GET_PARAM_PIC_INFO, pic); + + return ret; +} +EXPORT_SYMBOL(vdec_v4l_get_pic_info); + +int vdec_v4l_set_cfg_infos(struct aml_vcodec_ctx *ctx, + struct aml_vdec_cfg_infos *cfg) +{ + int ret = 0; + + if (ctx->drv_handle == 0) + return -EIO; + + ret = ctx->dec_if->set_param(ctx->drv_handle, + SET_PARAM_CFG_INFO, cfg); + + return ret; +} +EXPORT_SYMBOL(vdec_v4l_set_cfg_infos); + +int vdec_v4l_set_ps_infos(struct aml_vcodec_ctx *ctx, + struct aml_vdec_ps_infos *ps) +{ + int ret = 0; + + if (ctx->drv_handle == 0) + return -EIO; + + ret = ctx->dec_if->set_param(ctx->drv_handle, + SET_PARAM_PS_INFO, ps); + + return ret; +} +EXPORT_SYMBOL(vdec_v4l_set_ps_infos); + +int vdec_v4l_set_comp_buf_info(struct aml_vcodec_ctx *ctx, + struct vdec_comp_buf_info *info) +{ + int ret = 0; + + if (ctx->drv_handle == 0) + return -EIO; + + ret = ctx->dec_if->set_param(ctx->drv_handle, + SET_PARAM_COMP_BUF_INFO, info); + + return ret; + +} +EXPORT_SYMBOL(vdec_v4l_set_comp_buf_info); + +int vdec_v4l_set_hdr_infos(struct aml_vcodec_ctx *ctx, + struct aml_vdec_hdr_infos *hdr) +{ + int ret = 0; + + if (ctx->drv_handle == 0) + return -EIO; + + ret = ctx->dec_if->set_param(ctx->drv_handle, + SET_PARAM_HDR_INFO, hdr); + + return ret; +} +EXPORT_SYMBOL(vdec_v4l_set_hdr_infos); + +void aml_vdec_pic_info_update(struct aml_vcodec_ctx *ctx) +{ + if (ctx != NULL) + ctx->vdec_pic_info_update(ctx); +} + +int vdec_v4l_post_evet(struct aml_vcodec_ctx *ctx, u32 event) +{ + int ret = 0; + + if (ctx->drv_handle == 0) + return -EIO; + if (event == 1) + ctx->reset_flag = 2; + ret = ctx->dec_if->set_param(ctx->drv_handle, + SET_PARAM_POST_EVENT, &event); + + return ret; +} +EXPORT_SYMBOL(vdec_v4l_post_evet); + +int vdec_v4l_res_ch_event(struct aml_vcodec_ctx *ctx) +{ + int ret = 0; + struct aml_vcodec_dev *dev = ctx->dev; + + if (ctx->drv_handle == 0) + return -EIO; + + aml_vdec_pic_info_update(ctx); + + mutex_lock(&ctx->state_lock); + + ctx->state = AML_STATE_FLUSHING;/*prepare flushing*/ + + pr_info("[%d]: vcodec state (AML_STATE_FLUSHING-RESCHG)\n", ctx->id); + + mutex_unlock(&ctx->state_lock); + + while (ctx->m2m_ctx->job_flags & TRANS_RUNNING) { + v4l2_m2m_job_pause(dev->m2m_dev_dec, ctx->m2m_ctx); + } + + return ret; +} +EXPORT_SYMBOL(vdec_v4l_res_ch_event); + + +int vdec_v4l_write_frame_sync(struct aml_vcodec_ctx *ctx) +{ + int ret = 0; + + if (ctx->drv_handle == 0) + return -EIO; + + ret = ctx->dec_if->set_param(ctx->drv_handle, + SET_PARAM_WRITE_FRAME_SYNC, NULL); + + return ret; +} +EXPORT_SYMBOL(vdec_v4l_write_frame_sync); + +int vdec_v4l_get_dw_mode(struct aml_vcodec_ctx *ctx, + unsigned int *dw_mode) +{ + int ret = -1; + + if (ctx->drv_handle == 0) + return -EIO; + + ret = ctx->dec_if->get_param(ctx->drv_handle, + GET_PARAM_DW_MODE, dw_mode); + + return ret; +} +EXPORT_SYMBOL(vdec_v4l_get_dw_mode);
diff --git a/drivers/frame_provider/decoder/utils/vdec_v4l2_buffer_ops.h b/drivers/frame_provider/decoder/utils/vdec_v4l2_buffer_ops.h new file mode 100644 index 0000000..98513d9 --- /dev/null +++ b/drivers/frame_provider/decoder/utils/vdec_v4l2_buffer_ops.h
@@ -0,0 +1,60 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef _AML_VDEC_V4L2_BUFFER_H_ +#define _AML_VDEC_V4L2_BUFFER_H_ + +#include "../../../amvdec_ports/vdec_drv_base.h" +#include "../../../amvdec_ports/aml_vcodec_adapt.h" + +int vdec_v4l_get_pic_info( + struct aml_vcodec_ctx *ctx, + struct vdec_pic_info *pic); + +int vdec_v4l_set_cfg_infos( + struct aml_vcodec_ctx *ctx, + struct aml_vdec_cfg_infos *cfg); + +int vdec_v4l_set_ps_infos( + struct aml_vcodec_ctx *ctx, + struct aml_vdec_ps_infos *ps); + +int vdec_v4l_set_comp_buf_info( + struct aml_vcodec_ctx *ctx, + struct vdec_comp_buf_info *info); + +int vdec_v4l_set_hdr_infos( + struct aml_vcodec_ctx *ctx, + struct aml_vdec_hdr_infos *hdr); + +int vdec_v4l_write_frame_sync( + struct aml_vcodec_ctx *ctx); + +int vdec_v4l_post_evet( + struct aml_vcodec_ctx *ctx, + u32 event); + +int vdec_v4l_res_ch_event( + struct aml_vcodec_ctx *ctx); + +int vdec_v4l_get_dw_mode( + struct aml_vcodec_ctx *ctx, + unsigned int *dw_mode); + +#endif
diff --git a/drivers/frame_provider/decoder/vav1/Makefile b/drivers/frame_provider/decoder/vav1/Makefile new file mode 100644 index 0000000..64a4973 --- /dev/null +++ b/drivers/frame_provider/decoder/vav1/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_AV1) += amvdec_av1.o +amvdec_av1-objs += vav1.o av1_bufmgr.o
diff --git a/drivers/frame_provider/decoder/vav1/aom_av1_define.h b/drivers/frame_provider/decoder/vav1/aom_av1_define.h new file mode 100644 index 0000000..69e63f1 --- /dev/null +++ b/drivers/frame_provider/decoder/vav1/aom_av1_define.h
@@ -0,0 +1,190 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +enum NalUnitType +{ + NAL_UNIT_CODED_SLICE_TRAIL_N = 0, // 0 + NAL_UNIT_CODED_SLICE_TRAIL_R, // 1 + + NAL_UNIT_CODED_SLICE_TSA_N, // 2 + NAL_UNIT_CODED_SLICE_TLA, // 3 // Current name in the spec: TSA_R + + NAL_UNIT_CODED_SLICE_STSA_N, // 4 + NAL_UNIT_CODED_SLICE_STSA_R, // 5 + + NAL_UNIT_CODED_SLICE_RADL_N, // 6 + NAL_UNIT_CODED_SLICE_DLP, // 7 // Current name in the spec: RADL_R + + NAL_UNIT_CODED_SLICE_RASL_N, // 8 + NAL_UNIT_CODED_SLICE_TFD, // 9 // Current name in the spec: RASL_R + + NAL_UNIT_RESERVED_10, + NAL_UNIT_RESERVED_11, + NAL_UNIT_RESERVED_12, + NAL_UNIT_RESERVED_13, + NAL_UNIT_RESERVED_14, + NAL_UNIT_RESERVED_15, + + NAL_UNIT_CODED_SLICE_BLA, // 16 // Current name in the spec: BLA_W_LP + NAL_UNIT_CODED_SLICE_BLANT, // 17 // Current name in the spec: BLA_W_DLP + NAL_UNIT_CODED_SLICE_BLA_N_LP, // 18 + NAL_UNIT_CODED_SLICE_IDR, // 19 // Current name in the spec: IDR_W_DLP + NAL_UNIT_CODED_SLICE_IDR_N_LP, // 20 + NAL_UNIT_CODED_SLICE_CRA, // 21 + NAL_UNIT_RESERVED_22, + NAL_UNIT_RESERVED_23, + + NAL_UNIT_RESERVED_24, + NAL_UNIT_RESERVED_25, + NAL_UNIT_RESERVED_26, + NAL_UNIT_RESERVED_27, + NAL_UNIT_RESERVED_28, + NAL_UNIT_RESERVED_29, + NAL_UNIT_RESERVED_30, + NAL_UNIT_RESERVED_31, + + NAL_UNIT_VPS, // 32 + NAL_UNIT_SPS, // 33 + NAL_UNIT_PPS, // 34 + NAL_UNIT_ACCESS_UNIT_DELIMITER, // 35 + NAL_UNIT_EOS, // 36 + NAL_UNIT_EOB, // 37 + NAL_UNIT_FILLER_DATA, // 38 + NAL_UNIT_SEI, // 39 Prefix SEI + NAL_UNIT_SEI_SUFFIX, // 40 Suffix SEI + NAL_UNIT_RESERVED_41, + NAL_UNIT_RESERVED_42, + NAL_UNIT_RESERVED_43, + NAL_UNIT_RESERVED_44, + NAL_UNIT_RESERVED_45, + NAL_UNIT_RESERVED_46, + NAL_UNIT_RESERVED_47, + NAL_UNIT_UNSPECIFIED_48, + NAL_UNIT_UNSPECIFIED_49, + NAL_UNIT_UNSPECIFIED_50, + NAL_UNIT_UNSPECIFIED_51, + NAL_UNIT_UNSPECIFIED_52, + NAL_UNIT_UNSPECIFIED_53, + NAL_UNIT_UNSPECIFIED_54, + NAL_UNIT_UNSPECIFIED_55, + NAL_UNIT_UNSPECIFIED_56, + NAL_UNIT_UNSPECIFIED_57, + NAL_UNIT_UNSPECIFIED_58, + NAL_UNIT_UNSPECIFIED_59, + NAL_UNIT_UNSPECIFIED_60, + NAL_UNIT_UNSPECIFIED_61, + NAL_UNIT_UNSPECIFIED_62, + NAL_UNIT_UNSPECIFIED_63, + NAL_UNIT_INVALID, +}; + +int forbidden_zero_bit; +int m_nalUnitType; +int m_reservedZero6Bits; +int m_temporalId; + +//--------------------------------------------------- +// Amrisc Software Interrupt +//--------------------------------------------------- +#define AMRISC_STREAM_EMPTY_REQ 0x01 +#define AMRISC_PARSER_REQ 0x02 +#define AMRISC_MAIN_REQ 0x04 + +//--------------------------------------------------- +// AOM_AV1_DEC_STATUS (HEVC_DEC_STATUS) define +//--------------------------------------------------- + /*command*/ +#define AOM_AV1_DEC_IDLE 0 +#define AOM_AV1_DEC_FRAME_HEADER 1 +#define AOM_AV1_DEC_TILE_END 2 +#define AOM_AV1_DEC_TG_END 3 +#define AOM_AV1_DEC_LCU_END 4 +#define AOM_AV1_DECODE_SLICE 5 +#define AOM_AV1_SEARCH_HEAD 6 +#define AOM_AV1_DUMP_LMEM 7 +#define AOM_AV1_FGS_PARAM_CONT 8 +#define AOM_AV1_FGS_PARAM_CONT 8 +#define AOM_AV1_PIC_END_CONT 9 + /*status*/ +#define AOM_AV1_DEC_PIC_END 0xe0 + /*AOM_AV1_FGS_PARA: + Bit[11] - 0 Read, 1 - Write + Bit[10:8] - film_grain_params_ref_idx, For Write request + */ +#define AOM_AV1_FGS_PARAM 0xe1 +#define AOM_AV1_DEC_PIC_END_PRE 0xe2 +#define AOM_AV1_HEAD_PARSER_DONE 0xf0 +#define AOM_AV1_HEAD_SEARCH_DONE 0xf1 +#define AOM_AV1_SEQ_HEAD_PARSER_DONE 0xf2 +#define AOM_AV1_FRAME_HEAD_PARSER_DONE 0xf3 +#define AOM_AV1_FRAME_PARSER_DONE 0xf4 +#define AOM_AV1_REDUNDANT_FRAME_HEAD_PARSER_DONE 0xf5 +#define HEVC_ACTION_DONE 0xff + + +//--------------------------------------------------- +// Include "parser_cmd.h" +//--------------------------------------------------- +#define PARSER_CMD_SKIP_CFG_0 0x0000090b + +#define PARSER_CMD_SKIP_CFG_1 0x1b14140f + +#define PARSER_CMD_SKIP_CFG_2 0x001b1910 + +#define PARSER_CMD_NUMBER 37 + +unsigned short parser_cmd[PARSER_CMD_NUMBER] = { +0x0401, +0x8401, +0x0800, +0x0402, +0x9002, +0x1423, +0x8CC3, +0x1423, +0x8804, +0x9825, +0x0800, +0x04FE, +0x8406, +0x8411, +0x1800, +0x8408, +0x8409, +0x8C2A, +0x9C2B, +0x1C00, +0x840F, +0x8407, +0x8000, +0x8408, +0x2000, +0xA800, +0x8410, +0x04DE, +0x840C, +0x840D, +0xAC00, +0xA000, +0x08C0, +0x08E0, +0xA40E, +0xFC00, +0x7C00 +};
diff --git a/drivers/frame_provider/decoder/vav1/av1_bufmgr.c b/drivers/frame_provider/decoder/vav1/av1_bufmgr.c new file mode 100644 index 0000000..1e45320 --- /dev/null +++ b/drivers/frame_provider/decoder/vav1/av1_bufmgr.c
@@ -0,0 +1,3416 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#else +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/amlogic/media/canvas/canvas.h> + +#undef pr_info +#define pr_info printk + +#define __COMPARE(context, p1, p2) comp(p1, p2) +#define __SHORTSORT(lo, hi, width, comp, context) \ + shortsort(lo, hi, width, comp) +#define CUTOFF 8 /* testing shows that this is good value */ +#define STKSIZ (8*sizeof(void *) - 2) + +#undef swap +static void swap(char *a, char *b, size_t width) +{ + char tmp; + + if (a != b) + /* Do the swap one character at a time to avoid potential + * alignment problems. + */ + while (width--) { + tmp = *a; + *a++ = *b; + *b++ = tmp; + } +} + +static void shortsort(char *lo, char *hi, size_t width, + int (*comp)(const void *, const void *)) +{ + char *p, *max; + + /* Note: in assertions below, i and j are alway inside original + * bound of array to sort. + */ + while (hi > lo) { + /* A[i] <= A[j] for i <= j, j > hi */ + max = lo; + for (p = lo + width; p <= hi; p += width) { + /* A[i] <= A[max] for lo <= i < p */ + if (__COMPARE(context, p, max) > 0) + max = p; + /* A[i] <= A[max] for lo <= i <= p */ + } + /* A[i] <= A[max] for lo <= i <= hi */ + swap(max, hi, width); + + /* A[i] <= A[hi] for i <= hi, so A[i] <= A[j] for i <= j, + * j >= hi + */ + hi -= width; + + /* A[i] <= A[j] for i <= j, j > hi, loop top condition + * established + */ + } +} + +static void qsort(void *base, size_t num, size_t width, + int (*comp)(const void *, const void *)) +{ + char *lo, *hi; /* ends of sub-array currently sorting */ + char *mid; /* points to middle of subarray */ + char *loguy, *higuy; /* traveling pointers for partition step */ + size_t size; /* size of the sub-array */ + char *lostk[STKSIZ], *histk[STKSIZ]; + int stkptr; + +/* stack for saving sub-array to be + * processed + */ +#if 0 + /* validation section */ + _VALIDATE_RETURN_VOID(base != NULL || num == 0, EINVAL); + _VALIDATE_RETURN_VOID(width > 0, EINVAL); + _VALIDATE_RETURN_VOID(comp != NULL, EINVAL); +#endif + if (num < 2) + return; /* nothing to do */ + + stkptr = 0; /* initialize stack */ + lo = (char *)base; + hi = (char *)base + width * (num - 1); /* initialize limits */ + + /* this entry point is for pseudo-recursion calling: setting + * lo and hi and jumping to here is like recursion, but stkptr is + * preserved, locals aren't, so we preserve stuff on the stack + */ +recurse: + + size = (hi - lo) / width + 1; /* number of el's to sort */ + + /* below a certain size, it is faster to use a O(n^2) sorting method */ + if (size <= CUTOFF) { + __SHORTSORT(lo, hi, width, comp, context); + } else { + /* First we pick a partitioning element. The efficiency of + * the algorithm demands that we find one that is approximately + * the median of the values, but also that we select one fast. + * We choose the median of the first, middle, and last + * elements, to avoid bad performance in the face of already + * sorted data, or data that is made up of multiple sorted + * runs appended together. Testing shows that a + * median-of-three algorithm provides better performance than + * simply picking the middle element for the latter case. + */ + + mid = lo + (size / 2) * width; /* find middle element */ + + /* Sort the first, middle, last elements into order */ + if (__COMPARE(context, lo, mid) > 0) + swap(lo, mid, width); + if (__COMPARE(context, lo, hi) > 0) + swap(lo, hi, width); + if (__COMPARE(context, mid, hi) > 0) + swap(mid, hi, width); + + /* We now wish to partition the array into three pieces, one + * consisting of elements <= partition element, one of elements + * equal to the partition element, and one of elements > than + * it. This is done below; comments indicate conditions + * established at every step. + */ + + loguy = lo; + higuy = hi; + + /* Note that higuy decreases and loguy increases on every + * iteration, so loop must terminate. + */ + for (;;) { + /* lo <= loguy < hi, lo < higuy <= hi, + * A[i] <= A[mid] for lo <= i <= loguy, + * A[i] > A[mid] for higuy <= i < hi, + * A[hi] >= A[mid] + */ + + /* The doubled loop is to avoid calling comp(mid,mid), + * since some existing comparison funcs don't work + * when passed the same value for both pointers. + */ + + if (mid > loguy) { + do { + loguy += width; + } while (loguy < mid && + __COMPARE(context, loguy, mid) <= 0); + } + if (mid <= loguy) { + do { + loguy += width; + } while (loguy <= hi && + __COMPARE(context, loguy, mid) <= 0); + } + + /* lo < loguy <= hi+1, A[i] <= A[mid] for + * lo <= i < loguy, + * either loguy > hi or A[loguy] > A[mid] + */ + + do { + higuy -= width; + } while (higuy > mid && + __COMPARE(context, higuy, mid) > 0); + + /* lo <= higuy < hi, A[i] > A[mid] for higuy < i < hi, + * either higuy == lo or A[higuy] <= A[mid] + */ + + if (higuy < loguy) + break; + + /* if loguy > hi or higuy == lo, then we would have + * exited, so A[loguy] > A[mid], A[higuy] <= A[mid], + * loguy <= hi, higuy > lo + */ + + swap(loguy, higuy, width); + + /* If the partition element was moved, follow it. + * Only need to check for mid == higuy, since before + * the swap, A[loguy] > A[mid] implies loguy != mid. + */ + + if (mid == higuy) + mid = loguy; + + /* A[loguy] <= A[mid], A[higuy] > A[mid]; so condition + * at top of loop is re-established + */ + } + + /* A[i] <= A[mid] for lo <= i < loguy, + * A[i] > A[mid] for higuy < i < hi, + * A[hi] >= A[mid] + * higuy < loguy + * implying: + * higuy == loguy-1 + * or higuy == hi - 1, loguy == hi + 1, A[hi] == A[mid] + */ + + /* Find adjacent elements equal to the partition element. The + * doubled loop is to avoid calling comp(mid,mid), since some + * existing comparison funcs don't work when passed the same + * value for both pointers. + */ + + higuy += width; + if (mid < higuy) { + do { + higuy -= width; + } while (higuy > mid && + __COMPARE(context, higuy, mid) == 0); + } + if (mid >= higuy) { + do { + higuy -= width; + } while (higuy > lo && + __COMPARE(context, higuy, mid) == 0); + } + + /* OK, now we have the following: + * higuy < loguy + * lo <= higuy <= hi + * A[i] <= A[mid] for lo <= i <= higuy + * A[i] == A[mid] for higuy < i < loguy + * A[i] > A[mid] for loguy <= i < hi + * A[hi] >= A[mid] + */ + + /* We've finished the partition, now we want to sort the + * subarrays [lo, higuy] and [loguy, hi]. + * We do the smaller one first to minimize stack usage. + * We only sort arrays of length 2 or more. + */ + + if (higuy - lo >= hi - loguy) { + if (lo < higuy) { + lostk[stkptr] = lo; + histk[stkptr] = higuy; + ++stkptr; + } /* save big recursion for later */ + + if (loguy < hi) { + lo = loguy; + goto recurse; /* do small recursion */ + } + } else { + if (loguy < hi) { + lostk[stkptr] = loguy; + histk[stkptr] = hi; + ++stkptr; /* save big recursion for later */ + } + + if (lo < higuy) { + hi = higuy; + goto recurse; /* do small recursion */ + } + } + } + + /* We have sorted the array, except for any pending sorts on the stack. + * Check if there are any, and do them. + */ + + --stkptr; + if (stkptr >= 0) { + lo = lostk[stkptr]; + hi = histk[stkptr]; + goto recurse; /* pop subarray from stack */ + } else + return; /* all subarrays done */ +} + +#endif + +#include "av1_global.h" +int aom_realloc_frame_buffer(AV1_COMMON *cm, PIC_BUFFER_CONFIG *pic, + int width, int height, unsigned int order_hint); +void dump_params(AV1Decoder *pbi, union param_u *params); + +#define assert(a) +#define IMPLIES(a) + +int new_compressed_data_count = 0; + +static int valid_ref_frame_size(int ref_width, int ref_height, + int this_width, int this_height) { + return 2 * this_width >= ref_width && 2 * this_height >= ref_height && + this_width <= 16 * ref_width && this_height <= 16 * ref_height; +} + +#ifdef SUPPORT_SCALE_FACTOR +// Note: Expect val to be in q4 precision +static inline int scaled_x(int val, const struct scale_factors *sf) { + const int off = + (sf->x_scale_fp - (1 << REF_SCALE_SHIFT)) * (1 << (SUBPEL_BITS - 1)); + const int64_t tval = (int64_t)val * sf->x_scale_fp + off; + return (int)ROUND_POWER_OF_TWO_SIGNED_64(tval, + REF_SCALE_SHIFT - SCALE_EXTRA_BITS); +} + +// Note: Expect val to be in q4 precision +static inline int scaled_y(int val, const struct scale_factors *sf) { + const int off = + (sf->y_scale_fp - (1 << REF_SCALE_SHIFT)) * (1 << (SUBPEL_BITS - 1)); + const int64_t tval = (int64_t)val * sf->y_scale_fp + off; + return (int)ROUND_POWER_OF_TWO_SIGNED_64(tval, + REF_SCALE_SHIFT - SCALE_EXTRA_BITS); +} + +// Note: Expect val to be in q4 precision +static int unscaled_value(int val, const struct scale_factors *sf) { + (void)sf; + return val << SCALE_EXTRA_BITS; +} + +static int get_fixed_point_scale_factor(int other_size, int this_size) { + // Calculate scaling factor once for each reference frame + // and use fixed point scaling factors in decoding and encoding routines. + // Hardware implementations can calculate scale factor in device driver + // and use multiplication and shifting on hardware instead of division. + return ((other_size << REF_SCALE_SHIFT) + this_size / 2) / this_size; +} + +// Given the fixed point scale, calculate coarse point scale. +static int fixed_point_scale_to_coarse_point_scale(int scale_fp) { + return ROUND_POWER_OF_TWO(scale_fp, REF_SCALE_SHIFT - SCALE_SUBPEL_BITS); +} + + +void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, + int other_h, int this_w, int this_h) { + if (!valid_ref_frame_size(other_w, other_h, this_w, this_h)) { + sf->x_scale_fp = REF_INVALID_SCALE; + sf->y_scale_fp = REF_INVALID_SCALE; + return; + } + + sf->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w); + sf->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h); + + sf->x_step_q4 = fixed_point_scale_to_coarse_point_scale(sf->x_scale_fp); + sf->y_step_q4 = fixed_point_scale_to_coarse_point_scale(sf->y_scale_fp); + + if (av1_is_scaled(sf)) { + sf->scale_value_x = scaled_x; + sf->scale_value_y = scaled_y; + } else { + sf->scale_value_x = unscaled_value; + sf->scale_value_y = unscaled_value; + } +#ifdef ORI_CODE + // AV1 convolve functions + // Special case convolve functions should produce the same result as + // av1_convolve_2d. + // subpel_x_qn == 0 && subpel_y_qn == 0 + sf->convolve[0][0][0] = av1_convolve_2d_copy_sr; + // subpel_x_qn == 0 + sf->convolve[0][1][0] = av1_convolve_y_sr; + // subpel_y_qn == 0 + sf->convolve[1][0][0] = av1_convolve_x_sr; + // subpel_x_qn != 0 && subpel_y_qn != 0 + sf->convolve[1][1][0] = av1_convolve_2d_sr; + // subpel_x_qn == 0 && subpel_y_qn == 0 + sf->convolve[0][0][1] = av1_dist_wtd_convolve_2d_copy; + // subpel_x_qn == 0 + sf->convolve[0][1][1] = av1_dist_wtd_convolve_y; + // subpel_y_qn == 0 + sf->convolve[1][0][1] = av1_dist_wtd_convolve_x; + // subpel_x_qn != 0 && subpel_y_qn != 0 + sf->convolve[1][1][1] = av1_dist_wtd_convolve_2d; + // AV1 High BD convolve functions + // Special case convolve functions should produce the same result as + // av1_highbd_convolve_2d. + // subpel_x_qn == 0 && subpel_y_qn == 0 + sf->highbd_convolve[0][0][0] = av1_highbd_convolve_2d_copy_sr; + // subpel_x_qn == 0 + sf->highbd_convolve[0][1][0] = av1_highbd_convolve_y_sr; + // subpel_y_qn == 0 + sf->highbd_convolve[1][0][0] = av1_highbd_convolve_x_sr; + // subpel_x_qn != 0 && subpel_y_qn != 0 + sf->highbd_convolve[1][1][0] = av1_highbd_convolve_2d_sr; + // subpel_x_qn == 0 && subpel_y_qn == 0 + sf->highbd_convolve[0][0][1] = av1_highbd_dist_wtd_convolve_2d_copy; + // subpel_x_qn == 0 + sf->highbd_convolve[0][1][1] = av1_highbd_dist_wtd_convolve_y; + // subpel_y_qn == 0 + sf->highbd_convolve[1][0][1] = av1_highbd_dist_wtd_convolve_x; + // subpel_x_qn != 0 && subpel_y_qn != 0 + sf->highbd_convolve[1][1][1] = av1_highbd_dist_wtd_convolve_2d; +#endif +} +#endif + +static RefCntBuffer *assign_cur_frame_new_fb(AV1_COMMON *const cm) { + // Release the previously-used frame-buffer + int new_fb_idx; + if (cm->cur_frame != NULL) { + --cm->cur_frame->ref_count; + cm->cur_frame = NULL; + } + + // Assign a new framebuffer + new_fb_idx = get_free_frame_buffer(cm); + if (new_fb_idx == INVALID_IDX) return NULL; + + cm->cur_frame = &cm->buffer_pool->frame_bufs[new_fb_idx]; + cm->cur_frame->buf.buf_8bit_valid = 0; +#ifdef AML + cm->cur_frame->buf.index = new_fb_idx; +#endif +#ifdef ORI_CODE + av1_zero(cm->cur_frame->interp_filter_selected); +#endif + return cm->cur_frame; +} + +// Modify 'lhs_ptr' to reference the buffer at 'rhs_ptr', and update the ref +// counts accordingly. +static void assign_frame_buffer_p(RefCntBuffer **lhs_ptr, + RefCntBuffer *rhs_ptr) { + RefCntBuffer *const old_ptr = *lhs_ptr; + if (old_ptr != NULL) { + assert(old_ptr->ref_count > 0); + // One less reference to the buffer at 'old_ptr', so decrease ref count. + --old_ptr->ref_count; + } + + *lhs_ptr = rhs_ptr; + // One more reference to the buffer at 'rhs_ptr', so increase ref count. + ++rhs_ptr->ref_count; +} + +AV1Decoder *av1_decoder_create(BufferPool *const pool, AV1_COMMON *cm) { + int i; + +#ifndef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + AV1Decoder *pbi = (AV1Decoder *)malloc(sizeof(*pbi)); +#else + AV1Decoder *pbi = (AV1Decoder *)vmalloc(sizeof(AV1Decoder)); +#endif + if (!pbi) return NULL; + memset(pbi, 0, sizeof(*pbi)); + + // The jmp_buf is valid only for the duration of the function that calls + // setjmp(). Therefore, this function must reset the 'setjmp' field to 0 + // before it returns. + + pbi->common = cm; + cm->error.setjmp = 1; + +#ifdef ORI_CODE + memset(cm->fc, 0, sizeof(*cm->fc)); + memset(cm->default_frame_context, 0, sizeof(*cm->default_frame_context)); +#endif + pbi->need_resync = 1; + + // Initialize the references to not point to any frame buffers. + for (i = 0; i < REF_FRAMES; i++) { + cm->ref_frame_map[i] = NULL; + cm->next_ref_frame_map[i] = NULL; +#ifdef AML + cm->next_used_ref_frame_map[i] = NULL; +#endif + } + + cm->current_frame.frame_number = 0; + pbi->decoding_first_frame = 1; + pbi->common->buffer_pool = pool; + + cm->seq_params.bit_depth = AOM_BITS_8; + +#ifdef ORI_CODE + cm->alloc_mi = dec_alloc_mi; + cm->free_mi = dec_free_mi; + cm->setup_mi = dec_setup_mi; + + av1_loop_filter_init(cm); + + av1_qm_init(cm); + av1_loop_restoration_precal(); +#if CONFIG_ACCOUNTING + pbi->acct_enabled = 1; + aom_accounting_init(&pbi->accounting); +#endif +#endif + cm->error.setjmp = 0; + +#ifdef ORI_CODE + aom_get_worker_interface()->init(&pbi->lf_worker); + pbi->lf_worker.thread_name = "aom lf worker"; +#endif + + return pbi; +} + +static void reset_frame_buffers(AV1Decoder *const pbi); + +void av1_bufmgr_ctx_reset(AV1Decoder *pbi, BufferPool *const pool, AV1_COMMON *cm) +{ + u32 save_w, save_h; + + if (!pbi || !pool || !cm) + return; + + reset_frame_buffers(pbi); + memset(pbi, 0, sizeof(*pbi)); + /*save w,h for resolution change after seek */ + save_w = cm->width; + save_h = cm->height; + memset(cm, 0, sizeof(*cm)); + + cm->current_frame.frame_number = 0; + cm->seq_params.bit_depth = AOM_BITS_8; + cm->error.setjmp = 0; + cm->width = save_w; + cm->height = save_h; + + pbi->bufmgr_proc_count = 0; + pbi->need_resync = 1; + pbi->decoding_first_frame = 1; + pbi->num_output_frames = 0; + pbi->common = cm; + pbi->common->buffer_pool = pool; +} + +int release_fb_cb(void *cb_priv, aom_codec_frame_buffer_t *fb) { +#if 0 + InternalFrameBuffer *const int_fb = (InternalFrameBuffer *)fb->priv; + (void)cb_priv; + if (int_fb) int_fb->in_use = 0; +#endif + return 0; +} + +static void decrease_ref_count(AV1Decoder *pbi, RefCntBuffer *const buf, + BufferPool *const pool) { + if (buf != NULL) { + --buf->ref_count; + // Reference counts should never become negative. If this assertion fails, + // there is a bug in our reference count management. + assert(buf->ref_count >= 0); + // A worker may only get a free framebuffer index when calling get_free_fb. + // But the raw frame buffer is not set up until we finish decoding header. + // So if any error happens during decoding header, frame_bufs[idx] will not + // have a valid raw frame buffer. + if (buf->ref_count == 0 +#ifdef ORI_CODE + && buf->raw_frame_buffer.data +#endif + ) { +#ifdef AML + av1_release_buf(pbi, buf); +#endif + release_fb_cb(pool->cb_priv, &buf->raw_frame_buffer); + buf->raw_frame_buffer.data = NULL; + buf->raw_frame_buffer.size = 0; + buf->raw_frame_buffer.priv = NULL; + } + } +} + +void clear_frame_buf_ref_count(AV1Decoder *pbi) +{ + int i; + + for (i = 0; i < pbi->num_output_frames; i++) { + decrease_ref_count(pbi, pbi->output_frames[i], + pbi->common->buffer_pool); + } + pbi->num_output_frames = 0; +} + +static void swap_frame_buffers(AV1Decoder *pbi, int frame_decoded) { + int ref_index = 0, mask; + AV1_COMMON *const cm = pbi->common; + BufferPool *const pool = cm->buffer_pool; + unsigned long flags; + + if (frame_decoded) { + int check_on_show_existing_frame; + lock_buffer_pool(pool, flags); + + // In ext-tile decoding, the camera frame header is only decoded once. So, + // we don't release the references here. + if (!pbi->camera_frame_header_ready) { + // If we are not holding reference buffers in cm->next_ref_frame_map, + // assert that the following two for loops are no-ops. + assert(IMPLIES(!pbi->hold_ref_buf, + cm->current_frame.refresh_frame_flags == 0)); + assert(IMPLIES(!pbi->hold_ref_buf, + cm->show_existing_frame && !pbi->reset_decoder_state)); + + // The following two for loops need to release the reference stored in + // cm->ref_frame_map[ref_index] before transferring the reference stored + // in cm->next_ref_frame_map[ref_index] to cm->ref_frame_map[ref_index]. + for (mask = cm->current_frame.refresh_frame_flags; mask; mask >>= 1) { + decrease_ref_count(pbi, cm->ref_frame_map[ref_index], pool); + cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; + cm->next_ref_frame_map[ref_index] = NULL; + ++ref_index; + } + + check_on_show_existing_frame = + !cm->show_existing_frame || pbi->reset_decoder_state; + for (; ref_index < REF_FRAMES && check_on_show_existing_frame; + ++ref_index) { + decrease_ref_count(pbi, cm->ref_frame_map[ref_index], pool); + cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; + cm->next_ref_frame_map[ref_index] = NULL; + } + } + + if (cm->show_existing_frame || cm->show_frame) { + if (pbi->output_all_layers) { + // Append this frame to the output queue + if (pbi->num_output_frames >= MAX_NUM_SPATIAL_LAYERS) { + // We can't store the new frame anywhere, so drop it and return an + // error + cm->cur_frame->buf.corrupted = 1; + decrease_ref_count(pbi, cm->cur_frame, pool); + cm->error.error_code = AOM_CODEC_UNSUP_BITSTREAM; + } else { + pbi->output_frames[pbi->num_output_frames] = cm->cur_frame; + pbi->num_output_frames++; + } + } else { + // Replace any existing output frame + assert(pbi->num_output_frames == 0 || pbi->num_output_frames == 1); + if (pbi->num_output_frames > 0) { + decrease_ref_count(pbi, pbi->output_frames[0], pool); + } + if (cm->cur_frame) { + pbi->output_frames[0] = cm->cur_frame; + pbi->num_output_frames = 1; + } + } + } else { + decrease_ref_count(pbi, cm->cur_frame, pool); + } + + unlock_buffer_pool(pool, flags); + } else { + // The code here assumes we are not holding reference buffers in + // cm->next_ref_frame_map. If this assertion fails, we are leaking the + // frame buffer references in cm->next_ref_frame_map. + assert(IMPLIES(!pbi->camera_frame_header_ready, !pbi->hold_ref_buf)); + // Nothing was decoded, so just drop this frame buffer + lock_buffer_pool(pool, flags); + decrease_ref_count(pbi, cm->cur_frame, pool); + unlock_buffer_pool(pool, flags); + } + cm->cur_frame = NULL; + + if (!pbi->camera_frame_header_ready) { + pbi->hold_ref_buf = 0; + + // Invalidate these references until the next frame starts. + for (ref_index = 0; ref_index < INTER_REFS_PER_FRAME; ref_index++) { + cm->remapped_ref_idx[ref_index] = INVALID_IDX; + } + } +} + +void aom_internal_error(struct aom_internal_error_info *info, + aom_codec_err_t error, const char *fmt, ...) { + va_list ap; + + info->error_code = error; + info->has_detail = 0; + + if (fmt) { + size_t sz = sizeof(info->detail); + + info->has_detail = 1; + va_start(ap, fmt); + vsnprintf(info->detail, sz - 1, fmt, ap); + va_end(ap); + info->detail[sz - 1] = '\0'; + } +#ifdef ORI_CODE + if (info->setjmp) longjmp(info->jmp, info->error_code); +#endif +} + +#ifdef ORI_CODE +void av1_zero_unused_internal_frame_buffers(InternalFrameBufferList *list) { + int i; + + assert(list != NULL); + + for (i = 0; i < list->num_internal_frame_buffers; ++i) { + if (list->int_fb[i].data && !list->int_fb[i].in_use) + memset(list->int_fb[i].data, 0, list->int_fb[i].size); + } +} +#endif + +// Release the references to the frame buffers in cm->ref_frame_map and reset +// all elements of cm->ref_frame_map to NULL. +static void reset_ref_frame_map(AV1Decoder *const pbi) { + AV1_COMMON *const cm = pbi->common; + BufferPool *const pool = cm->buffer_pool; + int i; + + for (i = 0; i < REF_FRAMES; i++) { + decrease_ref_count(pbi, cm->ref_frame_map[i], pool); + cm->ref_frame_map[i] = NULL; +#ifdef AML + cm->next_used_ref_frame_map[i] = NULL; +#endif + } +} + +// Generate next_ref_frame_map. +static void generate_next_ref_frame_map(AV1Decoder *const pbi) { + AV1_COMMON *const cm = pbi->common; + BufferPool *const pool = cm->buffer_pool; + unsigned long flags; + int ref_index = 0; + int mask; + + lock_buffer_pool(pool, flags); + // cm->next_ref_frame_map holds references to frame buffers. After storing a + // frame buffer index in cm->next_ref_frame_map, we need to increase the + // frame buffer's ref_count. + for (mask = cm->current_frame.refresh_frame_flags; mask; mask >>= 1) { + if (mask & 1) { + cm->next_ref_frame_map[ref_index] = cm->cur_frame; + } else { + cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; + } + if (cm->next_ref_frame_map[ref_index] != NULL) + ++cm->next_ref_frame_map[ref_index]->ref_count; + ++ref_index; + } + + for (; ref_index < REF_FRAMES; ++ref_index) { + cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; + if (cm->next_ref_frame_map[ref_index] != NULL) + ++cm->next_ref_frame_map[ref_index]->ref_count; + } + unlock_buffer_pool(pool, flags); + pbi->hold_ref_buf = 1; +} + +// If the refresh_frame_flags bitmask is set, update reference frame id values +// and mark frames as valid for reference. +static void update_ref_frame_id(AV1_COMMON *const cm, int frame_id) { + int i; + int refresh_frame_flags = cm->current_frame.refresh_frame_flags; + assert(cm->seq_params.frame_id_numbers_present_flag); + for (i = 0; i < REF_FRAMES; i++) { + if ((refresh_frame_flags >> i) & 1) { + cm->ref_frame_id[i] = frame_id; + cm->valid_for_referencing[i] = 1; + } + } +} + +static void show_existing_frame_reset(AV1Decoder *const pbi, + int existing_frame_idx) { + AV1_COMMON *const cm = pbi->common; + int i; + assert(cm->show_existing_frame); + + cm->current_frame.frame_type = KEY_FRAME; + + cm->current_frame.refresh_frame_flags = (1 << REF_FRAMES) - 1; + + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + cm->remapped_ref_idx[i] = INVALID_IDX; + } + + if (pbi->need_resync) { + reset_ref_frame_map(pbi); + pbi->need_resync = 0; + } + + // Note that the displayed frame must be valid for referencing in order to + // have been selected. + if (cm->seq_params.frame_id_numbers_present_flag) { + cm->current_frame_id = cm->ref_frame_id[existing_frame_idx]; + update_ref_frame_id(cm, cm->current_frame_id); + } + + cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_DISABLED; + + generate_next_ref_frame_map(pbi); + +#ifdef ORI_CODE + // Reload the adapted CDFs from when we originally coded this keyframe + *cm->fc = cm->next_ref_frame_map[existing_frame_idx]->frame_context; +#endif +} + +static void reset_frame_buffers(AV1Decoder *const pbi) { + AV1_COMMON *const cm = pbi->common; + RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + unsigned long flags; + + // We have not stored any references to frame buffers in + // cm->next_ref_frame_map, so we can directly reset it to all NULL. + for (i = 0; i < REF_FRAMES; ++i) { + cm->next_ref_frame_map[i] = NULL; + } + + lock_buffer_pool(cm->buffer_pool, flags); + reset_ref_frame_map(pbi); + assert(cm->cur_frame->ref_count == 1); + for (i = 0; i < FRAME_BUFFERS; ++i) { + // Reset all unreferenced frame buffers. We can also reset cm->cur_frame + // because we are the sole owner of cm->cur_frame. + if (frame_bufs[i].ref_count > 0 && &frame_bufs[i] != cm->cur_frame) { + continue; + } + frame_bufs[i].order_hint = 0; + av1_zero(frame_bufs[i].ref_order_hints); + } +#ifdef ORI_CODE + av1_zero_unused_internal_frame_buffers(&cm->buffer_pool->int_frame_buffers); +#endif + unlock_buffer_pool(cm->buffer_pool, flags); +} + +static int frame_is_intra_only(const AV1_COMMON *const cm) { + return cm->current_frame.frame_type == KEY_FRAME || + cm->current_frame.frame_type == INTRA_ONLY_FRAME; +} + +static int frame_is_sframe(const AV1_COMMON *cm) { + return cm->current_frame.frame_type == S_FRAME; +} + +// These functions take a reference frame label between LAST_FRAME and +// EXTREF_FRAME inclusive. Note that this is different to the indexing +// previously used by the frame_refs[] array. +static int get_ref_frame_map_idx(const AV1_COMMON *const cm, + const MV_REFERENCE_FRAME ref_frame) { + return (ref_frame >= LAST_FRAME && ref_frame <= EXTREF_FRAME) + ? cm->remapped_ref_idx[ref_frame - LAST_FRAME] + : INVALID_IDX; +} + +static RefCntBuffer *get_ref_frame_buf( + const AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) { + const int map_idx = get_ref_frame_map_idx(cm, ref_frame); + return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : NULL; +} +#ifdef SUPPORT_SCALE_FACTOR +static struct scale_factors *get_ref_scale_factors( + AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) { + const int map_idx = get_ref_frame_map_idx(cm, ref_frame); + return (map_idx != INVALID_IDX) ? &cm->ref_scale_factors[map_idx] : NULL; +} +#endif +static RefCntBuffer *get_primary_ref_frame_buf( + const AV1_COMMON *const cm) { + int map_idx; + if (cm->primary_ref_frame == PRIMARY_REF_NONE) return NULL; + map_idx = get_ref_frame_map_idx(cm, cm->primary_ref_frame + 1); + return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : NULL; +} + +static int get_relative_dist(const OrderHintInfo *oh, int a, int b) { + int bits; + int m; + int diff; + if (!oh->enable_order_hint) return 0; + + bits = oh->order_hint_bits_minus_1 + 1; + + assert(bits >= 1); + assert(a >= 0 && a < (1 << bits)); + assert(b >= 0 && b < (1 << bits)); + + diff = a - b; + m = 1 << (bits - 1); + diff = (diff & (m - 1)) - (diff & m); + return diff; +} + + +void av1_read_frame_size(union param_u *params, int num_bits_width, + int num_bits_height, int *width, int *height, int* dec_width) { + *width = params->p.frame_width; + *height = params->p.frame_height;//aom_rb_read_literal(rb, num_bits_height) + 1; +#ifdef AML + *dec_width = params->p.dec_frame_width; +#endif +} + +static REFERENCE_MODE read_frame_reference_mode( + const AV1_COMMON *cm, union param_u *params) { + if (frame_is_intra_only(cm)) { + return SINGLE_REFERENCE; + } else { + return params->p.reference_mode ? REFERENCE_MODE_SELECT : SINGLE_REFERENCE; + } +} + +static inline int calc_mi_size(int len) { + // len is in mi units. Align to a multiple of SBs. + return ALIGN_POWER_OF_TWO(len, MAX_MIB_SIZE_LOG2); +} + +void av1_set_mb_mi(AV1_COMMON *cm, int width, int height) { + // Ensure that the decoded width and height are both multiples of + // 8 luma pixels (note: this may only be a multiple of 4 chroma pixels if + // subsampling is used). + // This simplifies the implementation of various experiments, + // eg. cdef, which operates on units of 8x8 luma pixels. + const int aligned_width = ALIGN_POWER_OF_TWO(width, 3); + const int aligned_height = ALIGN_POWER_OF_TWO(height, 3); + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, " [PICTURE] av1_set_mb_mi (%d X %d)\n", width, height); + + cm->mi_cols = aligned_width >> MI_SIZE_LOG2; + cm->mi_rows = aligned_height >> MI_SIZE_LOG2; + cm->mi_stride = calc_mi_size(cm->mi_cols); + + cm->mb_cols = (cm->mi_cols + 2) >> 2; + cm->mb_rows = (cm->mi_rows + 2) >> 2; + cm->MBs = cm->mb_rows * cm->mb_cols; + +#if CONFIG_LPF_MASK + alloc_loop_filter_mask(cm); +#endif +} + +int av1_alloc_context_buffers(AV1_COMMON *cm, int width, int height) { +#ifdef ORI_CODE + int new_mi_size; +#endif + av1_set_mb_mi(cm, width, height); +#ifdef ORI_CODE + new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows); + if (cm->mi_alloc_size < new_mi_size) { + cm->free_mi(cm); + if (cm->alloc_mi(cm, new_mi_size)) goto fail; + } +#endif + return 0; + +#ifdef ORI_CODE +fail: +#endif + // clear the mi_* values to force a realloc on resync + av1_set_mb_mi(cm, 0, 0); +#ifdef ORI_CODE + av1_free_context_buffers(cm); +#endif + return 1; +} + +#ifndef USE_SCALED_WIDTH_FROM_UCODE +static void calculate_scaled_size_helper(int *dim, int denom) { + if (denom != SCALE_NUMERATOR) { + // We need to ensure the constraint in "Appendix A" of the spec: + // * FrameWidth is greater than or equal to 16 + // * FrameHeight is greater than or equal to 16 + // For this, we clamp the downscaled dimension to at least 16. One + // exception: if original dimension itself was < 16, then we keep the + // downscaled dimension to be same as the original, to ensure that resizing + // is valid. + const int min_dim = AOMMIN(16, *dim); + // Use this version if we need *dim to be even + // *width = (*width * SCALE_NUMERATOR + denom) / (2 * denom); + // *width <<= 1; + *dim = (*dim * SCALE_NUMERATOR + denom / 2) / (denom); + *dim = AOMMAX(*dim, min_dim); + } +} +#ifdef ORI_CODE +void av1_calculate_scaled_size(int *width, int *height, int resize_denom) { + calculate_scaled_size_helper(width, resize_denom); + calculate_scaled_size_helper(height, resize_denom); +} +#endif +void av1_calculate_scaled_superres_size(int *width, int *height, + int superres_denom) { + (void)height; + calculate_scaled_size_helper(width, superres_denom); +} +#endif + +static void setup_superres(AV1_COMMON *const cm, union param_u *params, + int *width, int *height) { +#ifdef USE_SCALED_WIDTH_FROM_UCODE + cm->superres_upscaled_width = params->p.frame_width_scaled; + cm->superres_upscaled_height = params->p.frame_height; + + + *width = params->p.dec_frame_width; + *height = params->p.frame_height; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, " [PICTURE] set decoding size to (%d X %d) scaled size to (%d X %d)\n", + *width, *height, + cm->superres_upscaled_width, + cm->superres_upscaled_height); +#else + cm->superres_upscaled_width = *width; + cm->superres_upscaled_height = *height; + + const SequenceHeader *const seq_params = &cm->seq_params; + if (!seq_params->enable_superres) return; + + //if (aom_rb_read_bit(-1, defmark, rb)) { + if (params->p.superres_scale_denominator != SCALE_NUMERATOR) { +#ifdef ORI_CODE + cm->superres_scale_denominator = + (uint8_t)aom_rb_read_literal(-1, defmark, rb, SUPERRES_SCALE_BITS); + cm->superres_scale_denominator += SUPERRES_SCALE_DENOMINATOR_MIN; +#else + cm->superres_scale_denominator = params->p.superres_scale_denominator; +#endif + // Don't edit cm->width or cm->height directly, or the buffers won't get + // resized correctly + av1_calculate_scaled_superres_size(width, height, + cm->superres_scale_denominator); + } else { + // 1:1 scaling - ie. no scaling, scale not provided + cm->superres_scale_denominator = SCALE_NUMERATOR; + } +/*!USE_SCALED_WIDTH_FROM_UCODE*/ +#endif +} + +static void resize_context_buffers(AV1_COMMON *cm, int width, int height) { +#if CONFIG_SIZE_LIMIT + if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Dimensions of %dx%d beyond allowed size of %dx%d.", + width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT); +#endif + if (cm->width != width || cm->height != height) { + const int new_mi_rows = + ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2; + const int new_mi_cols = + ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2; + + // Allocations in av1_alloc_context_buffers() depend on individual + // dimensions as well as the overall size. + if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) { + if (av1_alloc_context_buffers(cm, width, height)) { + // The cm->mi_* values have been cleared and any existing context + // buffers have been freed. Clear cm->width and cm->height to be + // consistent and to force a realloc next time. + cm->width = 0; + cm->height = 0; + aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, + "Failed to allocate context buffers"); + } + } else { + av1_set_mb_mi(cm, width, height); + } +#ifdef ORI_CODE + av1_init_context_buffers(cm); +#endif + cm->width = width; + cm->height = height; + } + +#ifdef ORI_CODE + ensure_mv_buffer(cm->cur_frame, cm); +#endif + cm->cur_frame->width = cm->width; + cm->cur_frame->height = cm->height; +} + +static void setup_buffer_pool(AV1_COMMON *cm) { + BufferPool *const pool = cm->buffer_pool; + const SequenceHeader *const seq_params = &cm->seq_params; + unsigned long flags; + + lock_buffer_pool(pool, flags); + if (aom_realloc_frame_buffer(cm, &cm->cur_frame->buf, + cm->width, cm->height, cm->cur_frame->order_hint)) { + unlock_buffer_pool(pool, flags); + aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, + "Failed to allocate frame buffer"); + } + unlock_buffer_pool(pool, flags); + + cm->cur_frame->buf.bit_depth = (unsigned int)seq_params->bit_depth; + cm->cur_frame->buf.color_primaries = seq_params->color_primaries; + cm->cur_frame->buf.transfer_characteristics = + seq_params->transfer_characteristics; + cm->cur_frame->buf.matrix_coefficients = seq_params->matrix_coefficients; + cm->cur_frame->buf.monochrome = seq_params->monochrome; + cm->cur_frame->buf.chroma_sample_position = + seq_params->chroma_sample_position; + cm->cur_frame->buf.color_range = seq_params->color_range; + cm->cur_frame->buf.render_width = cm->render_width; + cm->cur_frame->buf.render_height = cm->render_height; +} + +static void setup_frame_size(AV1_COMMON *cm, int frame_size_override_flag, union param_u *params) { + const SequenceHeader *const seq_params = &cm->seq_params; + int width, height, dec_width; + + if (frame_size_override_flag) { + int num_bits_width = seq_params->num_bits_width; + int num_bits_height = seq_params->num_bits_height; + av1_read_frame_size(params, num_bits_width, num_bits_height, &width, &height, &dec_width); +#ifdef AML + cm->dec_width = dec_width; +#endif + if (width > seq_params->max_frame_width || + height > seq_params->max_frame_height) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Frame dimensions are larger than the maximum values"); + } + } else { + width = seq_params->max_frame_width; + height = seq_params->max_frame_height; +#ifdef AML + cm->dec_width = dec_width = params->p.dec_frame_width; +#endif + } + setup_superres(cm, params, &width, &height); + resize_context_buffers(cm, width, height); +#ifdef ORI_CODE + setup_render_size(cm, params); +#endif + setup_buffer_pool(cm); +} + +static int valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth, + int ref_xss, int ref_yss, + aom_bit_depth_t this_bit_depth, + int this_xss, int this_yss) { + return ref_bit_depth == this_bit_depth && ref_xss == this_xss && + ref_yss == this_yss; +} + +static void setup_frame_size_with_refs(AV1_COMMON *cm, union param_u *params) { + int width, height, dec_width; + int found = 0; + int has_valid_ref_frame = 0; + int i; + SequenceHeader *seq_params; + for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { + /*if (aom_rb_read_bit(rb)) {*/ + if (params->p.valid_ref_frame_bits & (1<<i)) { + const RefCntBuffer *const ref_buf = get_ref_frame_buf(cm, i); + // This will never be NULL in a normal stream, as streams are required to + // have a shown keyframe before any inter frames, which would refresh all + // the reference buffers. However, it might be null if we're starting in + // the middle of a stream, and static analysis will error if we don't do + // a null check here. + if (ref_buf == NULL) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Invalid condition: invalid reference buffer"); + } else { + const PIC_BUFFER_CONFIG *const buf = &ref_buf->buf; + width = buf->y_crop_width; + height = buf->y_crop_height; + cm->render_width = buf->render_width; + cm->render_height = buf->render_height; + setup_superres(cm, params, &width, &height); + resize_context_buffers(cm, width, height); + found = 1; + break; + } + } + } + + seq_params = &cm->seq_params; + if (!found) { + int num_bits_width = seq_params->num_bits_width; + int num_bits_height = seq_params->num_bits_height; + + av1_read_frame_size(params, num_bits_width, num_bits_height, &width, &height, &dec_width); +#ifdef AML + cm->dec_width = dec_width; +#endif + setup_superres(cm, params, &width, &height); + resize_context_buffers(cm, width, height); +#ifdef ORI_CODE + setup_render_size(cm, rb); +#endif + } + + if (width <= 0 || height <= 0) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Invalid frame size"); + + // Check to make sure at least one of frames that this frame references + // has valid dimensions. + for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { + const RefCntBuffer *const ref_frame = get_ref_frame_buf(cm, i); + if (ref_frame != NULL) { + has_valid_ref_frame |= + valid_ref_frame_size(ref_frame->buf.y_crop_width, + ref_frame->buf.y_crop_height, width, height); + } + } + if (!has_valid_ref_frame) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Referenced frame has invalid size"); + for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { + const RefCntBuffer *const ref_frame = get_ref_frame_buf(cm, i); + if (ref_frame != NULL) { + if (!valid_ref_frame_img_fmt( + ref_frame->buf.bit_depth, ref_frame->buf.subsampling_x, + ref_frame->buf.subsampling_y, seq_params->bit_depth, + seq_params->subsampling_x, seq_params->subsampling_y)) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Referenced frame has incompatible color format"); + } + } + setup_buffer_pool(cm); +} + +typedef struct { + int map_idx; // frame map index + RefCntBuffer *buf; // frame buffer + int sort_idx; // index based on the offset to be used for sorting +} REF_FRAME_INFO; + +// Compares the sort_idx fields. If they are equal, then compares the map_idx +// fields to break the tie. This ensures a stable sort. +static int compare_ref_frame_info(const void *arg_a, const void *arg_b) { + const REF_FRAME_INFO *info_a = (REF_FRAME_INFO *)arg_a; + const REF_FRAME_INFO *info_b = (REF_FRAME_INFO *)arg_b; + + const int sort_idx_diff = info_a->sort_idx - info_b->sort_idx; + if (sort_idx_diff != 0) return sort_idx_diff; + return info_a->map_idx - info_b->map_idx; +} + + +/* +for av1_setup_motion_field() +*/ +static int motion_field_projection(AV1_COMMON *cm, + MV_REFERENCE_FRAME start_frame, int dir) { +#ifdef ORI_CODE + TPL_MV_REF *tpl_mvs_base = cm->tpl_mvs; + int ref_offset[REF_FRAMES] = { 0 }; +#endif + MV_REFERENCE_FRAME rf; + const RefCntBuffer *const start_frame_buf = + get_ref_frame_buf(cm, start_frame); + int start_frame_order_hint; + unsigned int const *ref_order_hints; + int cur_order_hint; + int start_to_current_frame_offset; + +#ifdef AML + int i; + //av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "$$$$$$$$$$$%s:cm->mv_ref_id_index = %d, start_frame=%d\n", __func__, cm->mv_ref_id_index, start_frame); + cm->mv_ref_id[cm->mv_ref_id_index] = start_frame; + for (i = 0; i < REF_FRAMES; i++) { + cm->mv_ref_offset[cm->mv_ref_id_index][i]=0; + } + cm->mv_cal_tpl_mvs[cm->mv_ref_id_index]=0; + cm->mv_ref_id_index++; +#endif + if (start_frame_buf == NULL) return 0; + + if (start_frame_buf->frame_type == KEY_FRAME || + start_frame_buf->frame_type == INTRA_ONLY_FRAME) + return 0; + + if (start_frame_buf->mi_rows != cm->mi_rows || + start_frame_buf->mi_cols != cm->mi_cols) + return 0; + + start_frame_order_hint = start_frame_buf->order_hint; + ref_order_hints = + &start_frame_buf->ref_order_hints[0]; + cur_order_hint = cm->cur_frame->order_hint; + start_to_current_frame_offset = get_relative_dist( + &cm->seq_params.order_hint_info, start_frame_order_hint, cur_order_hint); + + for (rf = LAST_FRAME; rf <= INTER_REFS_PER_FRAME; ++rf) { + cm->mv_ref_offset[cm->mv_ref_id_index-1][rf] = get_relative_dist(&cm->seq_params.order_hint_info, + start_frame_order_hint, + ref_order_hints[rf - LAST_FRAME]); + } +#ifdef AML + cm->mv_cal_tpl_mvs[cm->mv_ref_id_index-1]=1; +#endif + if (dir == 2) start_to_current_frame_offset = -start_to_current_frame_offset; +#ifdef ORI_CODE + MV_REF *mv_ref_base = start_frame_buf->mvs; + const int mvs_rows = (cm->mi_rows + 1) >> 1; + const int mvs_cols = (cm->mi_cols + 1) >> 1; + + for (int blk_row = 0; blk_row < mvs_rows; ++blk_row) { + for (int blk_col = 0; blk_col < mvs_cols; ++blk_col) { + MV_REF *mv_ref = &mv_ref_base[blk_row * mvs_cols + blk_col]; + MV fwd_mv = mv_ref->mv.as_mv; + + if (mv_ref->ref_frame > INTRA_FRAME) { + int_mv this_mv; + int mi_r, mi_c; + const int ref_frame_offset = ref_offset[mv_ref->ref_frame]; + + int pos_valid = + abs(ref_frame_offset) <= MAX_FRAME_DISTANCE && + ref_frame_offset > 0 && + abs(start_to_current_frame_offset) <= MAX_FRAME_DISTANCE; + + if (pos_valid) { + get_mv_projection(&this_mv.as_mv, fwd_mv, + start_to_current_frame_offset, ref_frame_offset); + pos_valid = get_block_position(cm, &mi_r, &mi_c, blk_row, blk_col, + this_mv.as_mv, dir >> 1); + } + + if (pos_valid) { + const int mi_offset = mi_r * (cm->mi_stride >> 1) + mi_c; + + tpl_mvs_base[mi_offset].mfmv0.as_mv.row = fwd_mv.row; + tpl_mvs_base[mi_offset].mfmv0.as_mv.col = fwd_mv.col; + tpl_mvs_base[mi_offset].ref_frame_offset = ref_frame_offset; + } + } + } + } +#endif + return 1; +} + +#ifdef AML +static int setup_motion_field_debug_count = 0; +#endif +void av1_setup_motion_field(AV1_COMMON *cm) { + const OrderHintInfo *const order_hint_info = &cm->seq_params.order_hint_info; + int ref_frame; + int size; + int cur_order_hint; + const RefCntBuffer *ref_buf[INTER_REFS_PER_FRAME]; + int ref_order_hint[INTER_REFS_PER_FRAME]; + int ref_stamp; + memset(cm->ref_frame_side, 0, sizeof(cm->ref_frame_side)); + if (!order_hint_info->enable_order_hint) return; +#ifdef ORI_CODE + TPL_MV_REF *tpl_mvs_base = cm->tpl_mvs; +#endif + size = ((cm->mi_rows + MAX_MIB_SIZE) >> 1) * (cm->mi_stride >> 1); +#ifdef ORI_CODE + for (int idx = 0; idx < size; ++idx) { + tpl_mvs_base[idx].mfmv0.as_int = INVALID_MV; + tpl_mvs_base[idx].ref_frame_offset = 0; + } +#endif + cur_order_hint = cm->cur_frame->order_hint; + + for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) { + const int ref_idx = ref_frame - LAST_FRAME; + const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref_frame); + int order_hint = 0; + + if (buf != NULL) order_hint = buf->order_hint; + + ref_buf[ref_idx] = buf; + ref_order_hint[ref_idx] = order_hint; + + if (get_relative_dist(order_hint_info, order_hint, cur_order_hint) > 0) + cm->ref_frame_side[ref_frame] = 1; + else if (order_hint == cur_order_hint) + cm->ref_frame_side[ref_frame] = -1; + } + ref_stamp = MFMV_STACK_SIZE - 1; +#ifdef AML + cm->mv_ref_id_index = 0; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%s(%d) mi_cols %d mi_rows %d\n", + __func__, setup_motion_field_debug_count++, + cm->mi_cols, + cm->mi_rows + ); +#endif + if (ref_buf[LAST_FRAME - LAST_FRAME] != NULL) { + const int alt_of_lst_order_hint = + ref_buf[LAST_FRAME - LAST_FRAME] + ->ref_order_hints[ALTREF_FRAME - LAST_FRAME]; + + const int is_lst_overlay = + (alt_of_lst_order_hint == ref_order_hint[GOLDEN_FRAME - LAST_FRAME]); + if (!is_lst_overlay) motion_field_projection(cm, LAST_FRAME, 2); + --ref_stamp; + } + + if (get_relative_dist(order_hint_info, + ref_order_hint[BWDREF_FRAME - LAST_FRAME], + cur_order_hint) > 0) { + if (motion_field_projection(cm, BWDREF_FRAME, 0)) --ref_stamp; + } + + if (get_relative_dist(order_hint_info, + ref_order_hint[ALTREF2_FRAME - LAST_FRAME], + cur_order_hint) > 0) { + if (motion_field_projection(cm, ALTREF2_FRAME, 0)) --ref_stamp; + } + + if (get_relative_dist(order_hint_info, + ref_order_hint[ALTREF_FRAME - LAST_FRAME], + cur_order_hint) > 0 && + ref_stamp >= 0) + if (motion_field_projection(cm, ALTREF_FRAME, 0)) --ref_stamp; + + if (ref_stamp >= 0) motion_field_projection(cm, LAST2_FRAME, 2); +} + + +static void set_ref_frame_info(int *remapped_ref_idx, int frame_idx, + REF_FRAME_INFO *ref_info) { + assert(frame_idx >= 0 && frame_idx < INTER_REFS_PER_FRAME); + + remapped_ref_idx[frame_idx] = ref_info->map_idx; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "+++++++++++++%s:remapped_ref_idx[%d]=0x%x\n", __func__, frame_idx, ref_info->map_idx); +} + + +void av1_set_frame_refs(AV1_COMMON *const cm, int *remapped_ref_idx, + int lst_map_idx, int gld_map_idx) { + int lst_frame_sort_idx = -1; + int gld_frame_sort_idx = -1; + int i; + //assert(cm->seq_params.order_hint_info.enable_order_hint); + //assert(cm->seq_params.order_hint_info.order_hint_bits_minus_1 >= 0); + const int cur_order_hint = (int)cm->current_frame.order_hint; + const int cur_frame_sort_idx = + 1 << cm->seq_params.order_hint_info.order_hint_bits_minus_1; + + REF_FRAME_INFO ref_frame_info[REF_FRAMES]; + int ref_flag_list[INTER_REFS_PER_FRAME] = { 0, 0, 0, 0, 0, 0, 0 }; + int bwd_start_idx; + int bwd_end_idx; + int fwd_start_idx, fwd_end_idx; + int ref_idx; + static const MV_REFERENCE_FRAME ref_frame_list[INTER_REFS_PER_FRAME - 2] = { + LAST2_FRAME, LAST3_FRAME, BWDREF_FRAME, ALTREF2_FRAME, ALTREF_FRAME + }; + + for (i = 0; i < REF_FRAMES; ++i) { + const int map_idx = i; + RefCntBuffer *buf; + int offset; + + ref_frame_info[i].map_idx = map_idx; + ref_frame_info[i].sort_idx = -1; + + buf = cm->ref_frame_map[map_idx]; + ref_frame_info[i].buf = buf; + + if (buf == NULL) continue; + // If this assertion fails, there is a reference leak. + assert(buf->ref_count > 0); + + offset = (int)buf->order_hint; + ref_frame_info[i].sort_idx = + (offset == -1) ? -1 + : cur_frame_sort_idx + + get_relative_dist(&cm->seq_params.order_hint_info, + offset, cur_order_hint); + assert(ref_frame_info[i].sort_idx >= -1); + + if (map_idx == lst_map_idx) lst_frame_sort_idx = ref_frame_info[i].sort_idx; + if (map_idx == gld_map_idx) gld_frame_sort_idx = ref_frame_info[i].sort_idx; + } + + // Confirm both LAST_FRAME and GOLDEN_FRAME are valid forward reference + // frames. + if (lst_frame_sort_idx == -1 || lst_frame_sort_idx >= cur_frame_sort_idx) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Inter frame requests a look-ahead frame as LAST"); + } + if (gld_frame_sort_idx == -1 || gld_frame_sort_idx >= cur_frame_sort_idx) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Inter frame requests a look-ahead frame as GOLDEN"); + } + + // Sort ref frames based on their frame_offset values. + qsort(ref_frame_info, REF_FRAMES, sizeof(REF_FRAME_INFO), + compare_ref_frame_info); + + // Identify forward and backward reference frames. + // Forward reference: offset < order_hint + // Backward reference: offset >= order_hint + fwd_start_idx = 0; + fwd_end_idx = REF_FRAMES - 1; + + for (i = 0; i < REF_FRAMES; i++) { + if (ref_frame_info[i].sort_idx == -1) { + fwd_start_idx++; + continue; + } + + if (ref_frame_info[i].sort_idx >= cur_frame_sort_idx) { + fwd_end_idx = i - 1; + break; + } + } + + bwd_start_idx = fwd_end_idx + 1; + bwd_end_idx = REF_FRAMES - 1; + + // === Backward Reference Frames === + + // == ALTREF_FRAME == + if (bwd_start_idx <= bwd_end_idx) { + set_ref_frame_info(remapped_ref_idx, ALTREF_FRAME - LAST_FRAME, + &ref_frame_info[bwd_end_idx]); + ref_flag_list[ALTREF_FRAME - LAST_FRAME] = 1; + bwd_end_idx--; + } + + // == BWDREF_FRAME == + if (bwd_start_idx <= bwd_end_idx) { + set_ref_frame_info(remapped_ref_idx, BWDREF_FRAME - LAST_FRAME, + &ref_frame_info[bwd_start_idx]); + ref_flag_list[BWDREF_FRAME - LAST_FRAME] = 1; + bwd_start_idx++; + } + + // == ALTREF2_FRAME == + if (bwd_start_idx <= bwd_end_idx) { + set_ref_frame_info(remapped_ref_idx, ALTREF2_FRAME - LAST_FRAME, + &ref_frame_info[bwd_start_idx]); + ref_flag_list[ALTREF2_FRAME - LAST_FRAME] = 1; + } + + // === Forward Reference Frames === + + for (i = fwd_start_idx; i <= fwd_end_idx; ++i) { + // == LAST_FRAME == + if (ref_frame_info[i].map_idx == lst_map_idx) { + set_ref_frame_info(remapped_ref_idx, LAST_FRAME - LAST_FRAME, + &ref_frame_info[i]); + ref_flag_list[LAST_FRAME - LAST_FRAME] = 1; + } + + // == GOLDEN_FRAME == + if (ref_frame_info[i].map_idx == gld_map_idx) { + set_ref_frame_info(remapped_ref_idx, GOLDEN_FRAME - LAST_FRAME, + &ref_frame_info[i]); + ref_flag_list[GOLDEN_FRAME - LAST_FRAME] = 1; + } + } + + assert(ref_flag_list[LAST_FRAME - LAST_FRAME] == 1 && + ref_flag_list[GOLDEN_FRAME - LAST_FRAME] == 1); + + // == LAST2_FRAME == + // == LAST3_FRAME == + // == BWDREF_FRAME == + // == ALTREF2_FRAME == + // == ALTREF_FRAME == + + // Set up the reference frames in the anti-chronological order. + for (ref_idx = 0; ref_idx < (INTER_REFS_PER_FRAME - 2); ref_idx++) { + const MV_REFERENCE_FRAME ref_frame = ref_frame_list[ref_idx]; + + if (ref_flag_list[ref_frame - LAST_FRAME] == 1) continue; + + while (fwd_start_idx <= fwd_end_idx && + (ref_frame_info[fwd_end_idx].map_idx == lst_map_idx || + ref_frame_info[fwd_end_idx].map_idx == gld_map_idx)) { + fwd_end_idx--; + } + if (fwd_start_idx > fwd_end_idx) break; + + set_ref_frame_info(remapped_ref_idx, ref_frame - LAST_FRAME, + &ref_frame_info[fwd_end_idx]); + ref_flag_list[ref_frame - LAST_FRAME] = 1; + + fwd_end_idx--; + } + + // Assign all the remaining frame(s), if any, to the earliest reference frame. + for (; ref_idx < (INTER_REFS_PER_FRAME - 2); ref_idx++) { + const MV_REFERENCE_FRAME ref_frame = ref_frame_list[ref_idx]; + if (ref_flag_list[ref_frame - LAST_FRAME] == 1) continue; + set_ref_frame_info(remapped_ref_idx, ref_frame - LAST_FRAME, + &ref_frame_info[fwd_start_idx]); + ref_flag_list[ref_frame - LAST_FRAME] = 1; + } + + for (i = 0; i < INTER_REFS_PER_FRAME; i++) { + assert(ref_flag_list[i] == 1); + } +} + +void av1_setup_frame_buf_refs(AV1_COMMON *cm) { + MV_REFERENCE_FRAME ref_frame; + cm->cur_frame->order_hint = cm->current_frame.order_hint; + + for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { + const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref_frame); + if (buf != NULL) + cm->cur_frame->ref_order_hints[ref_frame - LAST_FRAME] = buf->order_hint; + } +} + +void av1_setup_frame_sign_bias(AV1_COMMON *cm) { + MV_REFERENCE_FRAME ref_frame; + for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { + const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref_frame); + if (cm->seq_params.order_hint_info.enable_order_hint && buf != NULL) { + const int ref_order_hint = buf->order_hint; + cm->ref_frame_sign_bias[ref_frame] = + (get_relative_dist(&cm->seq_params.order_hint_info, ref_order_hint, + (int)cm->current_frame.order_hint) <= 0) + ? 0 + : 1; + } else { + cm->ref_frame_sign_bias[ref_frame] = 0; + } + } +} + + +void av1_setup_skip_mode_allowed(AV1_COMMON *cm) +{ + const OrderHintInfo *const order_hint_info = &cm->seq_params.order_hint_info; + SkipModeInfo *const skip_mode_info = &cm->current_frame.skip_mode_info; + int i; + int cur_order_hint; + int ref_order_hints[2] = { -1, INT_MAX }; + int ref_idx[2] = { INVALID_IDX, INVALID_IDX }; + + skip_mode_info->skip_mode_allowed = 0; + skip_mode_info->ref_frame_idx_0 = INVALID_IDX; + skip_mode_info->ref_frame_idx_1 = INVALID_IDX; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "av1_setup_skip_mode_allowed %d %d %d\n", order_hint_info->enable_order_hint, + frame_is_intra_only(cm), + cm->current_frame.reference_mode); + if (!order_hint_info->enable_order_hint || frame_is_intra_only(cm) || + cm->current_frame.reference_mode == SINGLE_REFERENCE) + return; + + cur_order_hint = cm->current_frame.order_hint; + + // Identify the nearest forward and backward references. + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + const RefCntBuffer *const buf = get_ref_frame_buf(cm, LAST_FRAME + i); + int ref_order_hint; + if (buf == NULL) continue; + + ref_order_hint = buf->order_hint; + if (get_relative_dist(order_hint_info, ref_order_hint, cur_order_hint) < 0) { + // Forward reference + if (ref_order_hints[0] == -1 || + get_relative_dist(order_hint_info, ref_order_hint, + ref_order_hints[0]) > 0) { + ref_order_hints[0] = ref_order_hint; + ref_idx[0] = i; + } + } else if (get_relative_dist(order_hint_info, ref_order_hint, + cur_order_hint) > 0) { + // Backward reference + if (ref_order_hints[1] == INT_MAX || + get_relative_dist(order_hint_info, ref_order_hint, + ref_order_hints[1]) < 0) { + ref_order_hints[1] = ref_order_hint; + ref_idx[1] = i; + } + } + } + + if (ref_idx[0] != INVALID_IDX && ref_idx[1] != INVALID_IDX) { + // == Bi-directional prediction == + skip_mode_info->skip_mode_allowed = 1; + skip_mode_info->ref_frame_idx_0 = AOMMIN(ref_idx[0], ref_idx[1]); + skip_mode_info->ref_frame_idx_1 = AOMMAX(ref_idx[0], ref_idx[1]); + } else if (ref_idx[0] != INVALID_IDX && ref_idx[1] == INVALID_IDX) { + // == Forward prediction only == + // Identify the second nearest forward reference. + ref_order_hints[1] = -1; + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + const RefCntBuffer *const buf = get_ref_frame_buf(cm, LAST_FRAME + i); + int ref_order_hint; + if (buf == NULL) continue; + + ref_order_hint = buf->order_hint; + if ((ref_order_hints[0] != -1 && + get_relative_dist(order_hint_info, ref_order_hint, ref_order_hints[0]) < 0) && + (ref_order_hints[1] == -1 || + get_relative_dist(order_hint_info, ref_order_hint, ref_order_hints[1]) > 0)) { + // Second closest forward reference + ref_order_hints[1] = ref_order_hint; + ref_idx[1] = i; + } + } + if (ref_order_hints[1] != -1) { + skip_mode_info->skip_mode_allowed = 1; + skip_mode_info->ref_frame_idx_0 = AOMMIN(ref_idx[0], ref_idx[1]); + skip_mode_info->ref_frame_idx_1 = AOMMAX(ref_idx[0], ref_idx[1]); + } + } + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, + "skip_mode_info: skip_mode_allowed 0x%x 0x%x 0x%x\n", + cm->current_frame.skip_mode_info.skip_mode_allowed, + cm->current_frame.skip_mode_info.ref_frame_idx_0, + cm->current_frame.skip_mode_info.ref_frame_idx_1); +} + +static inline int frame_might_allow_ref_frame_mvs(const AV1_COMMON *cm) { + return !cm->error_resilient_mode && + cm->seq_params.order_hint_info.enable_ref_frame_mvs && + cm->seq_params.order_hint_info.enable_order_hint && + !frame_is_intra_only(cm); +} + +#ifdef ORI_CODE +/* +* segmentation +*/ +static const int seg_feature_data_signed[SEG_LVL_MAX] = { + 1, 1, 1, 1, 1, 0, 0, 0 +}; + +static const int seg_feature_data_max[SEG_LVL_MAX] = { MAXQ, + MAX_LOOP_FILTER, + MAX_LOOP_FILTER, + MAX_LOOP_FILTER, + MAX_LOOP_FILTER, + 7, + 0, + 0 }; + + +static inline void segfeatures_copy(struct segmentation *dst, + const struct segmentation *src) { + int i, j; + for (i = 0; i < MAX_SEGMENTS; i++) { + dst->feature_mask[i] = src->feature_mask[i]; + for (j = 0; j < SEG_LVL_MAX; j++) { + dst->feature_data[i][j] = src->feature_data[i][j]; + } + } + dst->segid_preskip = src->segid_preskip; + dst->last_active_segid = src->last_active_segid; +} + +static void av1_clearall_segfeatures(struct segmentation *seg) { + av1_zero(seg->feature_data); + av1_zero(seg->feature_mask); +} + +static void av1_enable_segfeature(struct segmentation *seg, int segment_id, + int feature_id) { + seg->feature_mask[segment_id] |= 1 << feature_id; +} + +void av1_calculate_segdata(struct segmentation *seg) { + seg->segid_preskip = 0; + seg->last_active_segid = 0; + for (int i = 0; i < MAX_SEGMENTS; i++) { + for (int j = 0; j < SEG_LVL_MAX; j++) { + if (seg->feature_mask[i] & (1 << j)) { + seg->segid_preskip |= (j >= SEG_LVL_REF_FRAME); + seg->last_active_segid = i; + } + } + } +} + +static int av1_seg_feature_data_max(int feature_id) { + return seg_feature_data_max[feature_id]; +} + +static int av1_is_segfeature_signed(int feature_id) { + return seg_feature_data_signed[feature_id]; +} + +static void av1_set_segdata(struct segmentation *seg, int segment_id, + int feature_id, int seg_data) { + if (seg_data < 0) { + assert(seg_feature_data_signed[feature_id]); + assert(-seg_data <= seg_feature_data_max[feature_id]); + } else { + assert(seg_data <= seg_feature_data_max[feature_id]); + } + + seg->feature_data[segment_id][feature_id] = seg_data; +} + +static inline int clamp(int value, int low, int high) { + return value < low ? low : (value > high ? high : value); +} + +static void setup_segmentation(AV1_COMMON *const cm, + union param_u *params) { + struct segmentation *const seg = &cm->seg; + + seg->update_map = 0; + seg->update_data = 0; + seg->temporal_update = 0; + + seg->enabled = params->p.seg_enabled; //aom_rb_read_bit(-1, defmark, rb); + if (!seg->enabled) { + if (cm->cur_frame->seg_map) + memset(cm->cur_frame->seg_map, 0, (cm->mi_rows * cm->mi_cols)); + + memset(seg, 0, sizeof(*seg)); + segfeatures_copy(&cm->cur_frame->seg, seg); + return; + } + if (cm->seg.enabled && cm->prev_frame && + (cm->mi_rows == cm->prev_frame->mi_rows) && + (cm->mi_cols == cm->prev_frame->mi_cols)) { + cm->last_frame_seg_map = cm->prev_frame->seg_map; + } else { + cm->last_frame_seg_map = NULL; + } + // Read update flags + if (cm->primary_ref_frame == PRIMARY_REF_NONE) { + // These frames can't use previous frames, so must signal map + features + seg->update_map = 1; + seg->temporal_update = 0; + seg->update_data = 1; + } else { + seg->update_map = params->p.seg_update_map; // aom_rb_read_bit(-1, defmark, rb); + if (seg->update_map) { + seg->temporal_update = params->p.seg_temporal_update; //aom_rb_read_bit(-1, defmark, rb); + } else { + seg->temporal_update = 0; + } + seg->update_data = params->p.seg_update_data; //aom_rb_read_bit(-1, defmark, rb); + } + + // Segmentation data update + if (seg->update_data) { + av1_clearall_segfeatures(seg); + + for (int i = 0; i < MAX_SEGMENTS; i++) { + for (int j = 0; j < SEG_LVL_MAX; j++) { + int data = 0; + const int feature_enabled = params->p.seg_feature_enabled ;//aom_rb_read_bit(-1, defmark, rb); + if (feature_enabled) { + av1_enable_segfeature(seg, i, j); + + const int data_max = av1_seg_feature_data_max(j); + const int data_min = -data_max; + /* + const int ubits = get_unsigned_bits(data_max); + + if (av1_is_segfeature_signed(j)) { + data = aom_rb_read_inv_signed_literal(-1, defmark, rb, ubits); + } else { + data = aom_rb_read_literal(-1, defmark, rb, ubits); + }*/ + data = params->p.seg_data; + data = clamp(data, data_min, data_max); + } + av1_set_segdata(seg, i, j, data); + } + } + av1_calculate_segdata(seg); + } else if (cm->prev_frame) { + segfeatures_copy(seg, &cm->prev_frame->seg); + } + segfeatures_copy(&cm->cur_frame->seg, seg); +} +#endif + +/**/ + + +int av1_decode_frame_headers_and_setup(AV1Decoder *pbi, int trailing_bits_present, union param_u *params) +{ + AV1_COMMON *const cm = pbi->common; + /* + read_uncompressed_header() + */ + const SequenceHeader *const seq_params = &cm->seq_params; + CurrentFrame *const current_frame = &cm->current_frame; + //MACROBLOCKD *const xd = &pbi->mb; + BufferPool *const pool = cm->buffer_pool; + RefCntBuffer *const frame_bufs = pool->frame_bufs; + int i; + int frame_size_override_flag; + unsigned long flags; + + if (!pbi->sequence_header_ready) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "No sequence header"); + } + cm->last_frame_type = current_frame->frame_type; + + if (seq_params->reduced_still_picture_hdr) { + cm->show_existing_frame = 0; + cm->show_frame = 1; + current_frame->frame_type = KEY_FRAME; + if (pbi->sequence_header_changed) { + // This is the start of a new coded video sequence. + pbi->sequence_header_changed = 0; + pbi->decoding_first_frame = 1; + reset_frame_buffers(pbi); + } + cm->error_resilient_mode = 1; + } else { + cm->show_existing_frame = params->p.show_existing_frame; + pbi->reset_decoder_state = 0; + if (cm->show_existing_frame) { + int existing_frame_idx; + RefCntBuffer *frame_to_show; + if (pbi->sequence_header_changed) { + aom_internal_error( + &cm->error, AOM_CODEC_CORRUPT_FRAME, + "New sequence header starts with a show_existing_frame."); + } + // Show an existing frame directly. + existing_frame_idx = params->p.existing_frame_idx; //aom_rb_read_literal(rb, 3); + frame_to_show = cm->ref_frame_map[existing_frame_idx]; + if (frame_to_show == NULL) { + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "Buffer does not contain a decoded frame"); + return 0; + } + if (seq_params->decoder_model_info_present_flag && + cm->timing_info.equal_picture_interval == 0) { + cm->frame_presentation_time = params->p.frame_presentation_time; + //read_temporal_point_info(cm); + } + if (seq_params->frame_id_numbers_present_flag) { + //int frame_id_length = seq_params->frame_id_length; + int display_frame_id = params->p.display_frame_id; //aom_rb_read_literal(rb, frame_id_length); + /* Compare display_frame_id with ref_frame_id and check valid for + * referencing */ + if (display_frame_id != cm->ref_frame_id[existing_frame_idx] || + cm->valid_for_referencing[existing_frame_idx] == 0) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Reference buffer frame ID mismatch"); + } + lock_buffer_pool(pool, flags); + assert(frame_to_show->ref_count > 0); + // cm->cur_frame should be the buffer referenced by the return value + // of the get_free_fb() call in av1_receive_compressed_data(), and + // generate_next_ref_frame_map() has not been called, so ref_count + // should still be 1. + assert(cm->cur_frame->ref_count == 1); + // assign_frame_buffer_p() decrements ref_count directly rather than + // call decrease_ref_count(). If cm->cur_frame->raw_frame_buffer has + // already been allocated, it will not be released by + // assign_frame_buffer_p()! + assert(!cm->cur_frame->raw_frame_buffer.data); + assign_frame_buffer_p(&cm->cur_frame, frame_to_show); + pbi->reset_decoder_state = frame_to_show->frame_type == KEY_FRAME; + unlock_buffer_pool(pool, flags); + +#ifdef ORI_CODE + cm->lf.filter_level[0] = 0; + cm->lf.filter_level[1] = 0; +#endif + cm->show_frame = 1; + + // Section 6.8.2: It is a requirement of bitstream conformance that when + // show_existing_frame is used to show a previous frame, that the value + // of showable_frame for the previous frame was equal to 1. + if (!frame_to_show->showable_frame) { + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "Buffer does not contain a showable frame"); + } + // Section 6.8.2: It is a requirement of bitstream conformance that when + // show_existing_frame is used to show a previous frame with + // RefFrameType[ frame_to_show_map_idx ] equal to KEY_FRAME, that the + // frame is output via the show_existing_frame mechanism at most once. + if (pbi->reset_decoder_state) frame_to_show->showable_frame = 0; + +#ifdef ORI_CODE + cm->film_grain_params = frame_to_show->film_grain_params; +#endif + if (pbi->reset_decoder_state) { + show_existing_frame_reset(pbi, existing_frame_idx); + } else { + current_frame->refresh_frame_flags = 0; + } + + return 0; + } + + current_frame->frame_type = (FRAME_TYPE)params->p.frame_type; //aom_rb_read_literal(rb, 2); + if (pbi->sequence_header_changed) { + if (current_frame->frame_type == KEY_FRAME) { + // This is the start of a new coded video sequence. + pbi->sequence_header_changed = 0; + pbi->decoding_first_frame = 1; + reset_frame_buffers(pbi); + } else { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Sequence header has changed without a keyframe."); + } + } + cm->show_frame = params->p.show_frame; //aom_rb_read_bit(rb); + if (seq_params->still_picture && + (current_frame->frame_type != KEY_FRAME || !cm->show_frame)) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Still pictures must be coded as shown keyframes"); + } + cm->showable_frame = current_frame->frame_type != KEY_FRAME; + if (cm->show_frame) { + if (seq_params->decoder_model_info_present_flag && + cm->timing_info.equal_picture_interval == 0) + cm->frame_presentation_time = params->p.frame_presentation_time; + //read_temporal_point_info(cm); + } else { + // See if this frame can be used as show_existing_frame in future + cm->showable_frame = params->p.showable_frame;//aom_rb_read_bit(rb); + } + cm->cur_frame->show_frame = cm->show_frame; + cm->cur_frame->showable_frame = cm->showable_frame; + cm->error_resilient_mode = + frame_is_sframe(cm) || + (current_frame->frame_type == KEY_FRAME && cm->show_frame) + ? 1 + : params->p.error_resilient_mode; //aom_rb_read_bit(rb); + } + +#ifdef ORI_CODE + cm->disable_cdf_update = aom_rb_read_bit(rb); + if (seq_params->force_screen_content_tools == 2) { + cm->allow_screen_content_tools = aom_rb_read_bit(rb); + } else { + cm->allow_screen_content_tools = seq_params->force_screen_content_tools; + } + + if (cm->allow_screen_content_tools) { + if (seq_params->force_integer_mv == 2) { + cm->cur_frame_force_integer_mv = aom_rb_read_bit(rb); + } else { + cm->cur_frame_force_integer_mv = seq_params->force_integer_mv; + } + } else { + cm->cur_frame_force_integer_mv = 0; + } +#endif + + frame_size_override_flag = 0; + cm->allow_intrabc = 0; + cm->primary_ref_frame = PRIMARY_REF_NONE; + + if (!seq_params->reduced_still_picture_hdr) { + if (seq_params->frame_id_numbers_present_flag) { + int frame_id_length = seq_params->frame_id_length; + int diff_len = seq_params->delta_frame_id_length; + int prev_frame_id = 0; + int have_prev_frame_id = + !pbi->decoding_first_frame && + !(current_frame->frame_type == KEY_FRAME && cm->show_frame); + if (have_prev_frame_id) { + prev_frame_id = cm->current_frame_id; + } + cm->current_frame_id = params->p.current_frame_id; //aom_rb_read_literal(rb, frame_id_length); + + if (have_prev_frame_id) { + int diff_frame_id; + if (cm->current_frame_id > prev_frame_id) { + diff_frame_id = cm->current_frame_id - prev_frame_id; + } else { + diff_frame_id = + (1 << frame_id_length) + cm->current_frame_id - prev_frame_id; + } + /* Check current_frame_id for conformance */ + if (prev_frame_id == cm->current_frame_id || + diff_frame_id >= (1 << (frame_id_length - 1))) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Invalid value of current_frame_id"); + } + } + /* Check if some frames need to be marked as not valid for referencing */ + for (i = 0; i < REF_FRAMES; i++) { + if (current_frame->frame_type == KEY_FRAME && cm->show_frame) { + cm->valid_for_referencing[i] = 0; + } else if (cm->current_frame_id - (1 << diff_len) > 0) { + if (cm->ref_frame_id[i] > cm->current_frame_id || + cm->ref_frame_id[i] < cm->current_frame_id - (1 << diff_len)) + cm->valid_for_referencing[i] = 0; + } else { + if (cm->ref_frame_id[i] > cm->current_frame_id && + cm->ref_frame_id[i] < (1 << frame_id_length) + + cm->current_frame_id - (1 << diff_len)) + cm->valid_for_referencing[i] = 0; + } + } + } + + frame_size_override_flag = frame_is_sframe(cm) ? 1 : params->p.frame_size_override_flag; //aom_rb_read_bit(rb); + + current_frame->order_hint = params->p.order_hint; /*aom_rb_read_literal( + rb, seq_params->order_hint_info.order_hint_bits_minus_1 + 1);*/ + current_frame->frame_number = current_frame->order_hint; + + if (!cm->error_resilient_mode && !frame_is_intra_only(cm)) { + cm->primary_ref_frame = params->p.primary_ref_frame;//aom_rb_read_literal(rb, PRIMARY_REF_BITS); + } + } + + if (seq_params->decoder_model_info_present_flag) { + cm->buffer_removal_time_present = params->p.buffer_removal_time_present; //aom_rb_read_bit(rb); + if (cm->buffer_removal_time_present) { + int op_num; + for (op_num = 0; + op_num < seq_params->operating_points_cnt_minus_1 + 1; op_num++) { + if (cm->op_params[op_num].decoder_model_param_present_flag) { + if ((((seq_params->operating_point_idc[op_num] >> + cm->temporal_layer_id) & + 0x1) && + ((seq_params->operating_point_idc[op_num] >> + (cm->spatial_layer_id + 8)) & + 0x1)) || + seq_params->operating_point_idc[op_num] == 0) { + cm->op_frame_timing[op_num].buffer_removal_time = + params->p.op_frame_timing[op_num]; + /*aom_rb_read_unsigned_literal( + rb, cm->buffer_model.buffer_removal_time_length);*/ + } else { + cm->op_frame_timing[op_num].buffer_removal_time = 0; + } + } else { + cm->op_frame_timing[op_num].buffer_removal_time = 0; + } + } + } + } + if (current_frame->frame_type == KEY_FRAME) { + if (!cm->show_frame) { // unshown keyframe (forward keyframe) + current_frame->refresh_frame_flags = params->p.refresh_frame_flags; //aom_rb_read_literal(rb, REF_FRAMES); + } else { // shown keyframe + current_frame->refresh_frame_flags = (1 << REF_FRAMES) - 1; + } + + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + cm->remapped_ref_idx[i] = INVALID_IDX; + } + if (pbi->need_resync) { + reset_ref_frame_map(pbi); + pbi->need_resync = 0; + } + } else { + if (current_frame->frame_type == INTRA_ONLY_FRAME) { + current_frame->refresh_frame_flags = params->p.refresh_frame_flags; //aom_rb_read_literal(rb, REF_FRAMES); + if (current_frame->refresh_frame_flags == 0xFF) { + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "Intra only frames cannot have refresh flags 0xFF"); + } + if (pbi->need_resync) { + reset_ref_frame_map(pbi); + pbi->need_resync = 0; + } + } else if (pbi->need_resync != 1) { /* Skip if need resync */ + current_frame->refresh_frame_flags = + frame_is_sframe(cm) ? 0xFF : params->p.refresh_frame_flags; //aom_rb_read_literal(rb, REF_FRAMES); + } + } + + if (!frame_is_intra_only(cm) || current_frame->refresh_frame_flags != 0xFF) { + // Read all ref frame order hints if error_resilient_mode == 1 + if (cm->error_resilient_mode && + seq_params->order_hint_info.enable_order_hint) { + int ref_idx; + for (ref_idx = 0; ref_idx < REF_FRAMES; ref_idx++) { + // Read order hint from bit stream + unsigned int order_hint = params->p.ref_order_hint[ref_idx];/*aom_rb_read_literal( + rb, seq_params->order_hint_info.order_hint_bits_minus_1 + 1);*/ + // Get buffer + RefCntBuffer *buf = cm->ref_frame_map[ref_idx]; + int buf_idx; + if (buf == NULL || order_hint != buf->order_hint) { + if (buf != NULL) { + lock_buffer_pool(pool, flags); + decrease_ref_count(pbi, buf, pool); + unlock_buffer_pool(pool, flags); + } + // If no corresponding buffer exists, allocate a new buffer with all + // pixels set to neutral grey. + buf_idx = get_free_frame_buffer(cm); + if (buf_idx == INVALID_IDX) { + aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, + "Unable to find free frame buffer"); + } + buf = &frame_bufs[buf_idx]; + lock_buffer_pool(pool, flags); + if (aom_realloc_frame_buffer(cm, &buf->buf, seq_params->max_frame_width, + seq_params->max_frame_height, buf->order_hint)) { + decrease_ref_count(pbi, buf, pool); + unlock_buffer_pool(pool, flags); + aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, + "Failed to allocate frame buffer"); + } + unlock_buffer_pool(pool, flags); +#ifdef ORI_CODE + set_planes_to_neutral_grey(seq_params, &buf->buf, 0); +#endif + cm->ref_frame_map[ref_idx] = buf; + buf->order_hint = order_hint; + } + } + } + } + + if (current_frame->frame_type == KEY_FRAME) { + setup_frame_size(cm, frame_size_override_flag, params); +#ifdef ORI_CODE + if (cm->allow_screen_content_tools && !av1_superres_scaled(cm)) + cm->allow_intrabc = aom_rb_read_bit(rb); +#endif + cm->allow_ref_frame_mvs = 0; + cm->prev_frame = NULL; + } else { + cm->allow_ref_frame_mvs = 0; + + if (current_frame->frame_type == INTRA_ONLY_FRAME) { +#ifdef ORI_CODE + cm->cur_frame->film_grain_params_present = + seq_params->film_grain_params_present; +#endif + setup_frame_size(cm, frame_size_override_flag, params); +#ifdef ORI_CODE + if (cm->allow_screen_content_tools && !av1_superres_scaled(cm)) + cm->allow_intrabc = aom_rb_read_bit(rb); +#endif + } else if (pbi->need_resync != 1) { /* Skip if need resync */ + int frame_refs_short_signaling = 0; + // Frame refs short signaling is off when error resilient mode is on. + if (seq_params->order_hint_info.enable_order_hint) + frame_refs_short_signaling = params->p.frame_refs_short_signaling;//aom_rb_read_bit(rb); + + if (frame_refs_short_signaling) { + // == LAST_FRAME == + const int lst_ref = params->p.lst_ref; //aom_rb_read_literal(rb, REF_FRAMES_LOG2); + const RefCntBuffer *const lst_buf = cm->ref_frame_map[lst_ref]; + + // == GOLDEN_FRAME == + const int gld_ref = params->p.gld_ref; //aom_rb_read_literal(rb, REF_FRAMES_LOG2); + const RefCntBuffer *const gld_buf = cm->ref_frame_map[gld_ref]; + + // Most of the time, streams start with a keyframe. In that case, + // ref_frame_map will have been filled in at that point and will not + // contain any NULLs. However, streams are explicitly allowed to start + // with an intra-only frame, so long as they don't then signal a + // reference to a slot that hasn't been set yet. That's what we are + // checking here. + if (lst_buf == NULL) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Inter frame requests nonexistent reference"); + if (gld_buf == NULL) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Inter frame requests nonexistent reference"); + + av1_set_frame_refs(cm, cm->remapped_ref_idx, lst_ref, gld_ref); + } + + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + int ref = 0; + if (!frame_refs_short_signaling) { + ref = params->p.remapped_ref_idx[i];//aom_rb_read_literal(rb, REF_FRAMES_LOG2); + + // Most of the time, streams start with a keyframe. In that case, + // ref_frame_map will have been filled in at that point and will not + // contain any NULLs. However, streams are explicitly allowed to start + // with an intra-only frame, so long as they don't then signal a + // reference to a slot that hasn't been set yet. That's what we are + // checking here. + if (cm->ref_frame_map[ref] == NULL) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Inter frame requests nonexistent reference"); + cm->remapped_ref_idx[i] = ref; + } else { + ref = cm->remapped_ref_idx[i]; + } + + cm->ref_frame_sign_bias[LAST_FRAME + i] = 0; + + if (seq_params->frame_id_numbers_present_flag) { + int frame_id_length = seq_params->frame_id_length; + //int diff_len = seq_params->delta_frame_id_length; + int delta_frame_id_minus_1 = params->p.delta_frame_id_minus_1[i];//aom_rb_read_literal(rb, diff_len); + int ref_frame_id = + ((cm->current_frame_id - (delta_frame_id_minus_1 + 1) + + (1 << frame_id_length)) % + (1 << frame_id_length)); + // Compare values derived from delta_frame_id_minus_1 and + // refresh_frame_flags. Also, check valid for referencing + if (ref_frame_id != cm->ref_frame_id[ref] || + cm->valid_for_referencing[ref] == 0) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Reference buffer frame ID mismatch"); + } + } + + if (!cm->error_resilient_mode && frame_size_override_flag) { + setup_frame_size_with_refs(cm, params); + } else { + setup_frame_size(cm, frame_size_override_flag, params); + } +#ifdef ORI_CODE + if (cm->cur_frame_force_integer_mv) { + cm->allow_high_precision_mv = 0; + } else { + cm->allow_high_precision_mv = aom_rb_read_bit(rb); + } + cm->interp_filter = read_frame_interp_filter(rb); + cm->switchable_motion_mode = aom_rb_read_bit(rb); +#endif + } + + cm->prev_frame = get_primary_ref_frame_buf(cm); + if (cm->primary_ref_frame != PRIMARY_REF_NONE && + get_primary_ref_frame_buf(cm) == NULL) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Reference frame containing this frame's initial " + "frame context is unavailable."); + } +#if 0 + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%d,%d,%d,%d\n",cm->error_resilient_mode, + cm->seq_params.order_hint_info.enable_ref_frame_mvs, + cm->seq_params.order_hint_info.enable_order_hint,frame_is_intra_only(cm)); + + printf("frame_might_allow_ref_frame_mvs()=>%d, current_frame->frame_type=%d, pbi->need_resync=%d, params->p.allow_ref_frame_mvs=%d\n", + frame_might_allow_ref_frame_mvs(cm), current_frame->frame_type, pbi->need_resync, + params->p.allow_ref_frame_mvs); +#endif + if (!(current_frame->frame_type == INTRA_ONLY_FRAME) && + pbi->need_resync != 1) { + if (frame_might_allow_ref_frame_mvs(cm)) + cm->allow_ref_frame_mvs = params->p.allow_ref_frame_mvs; //aom_rb_read_bit(-1, "<allow_ref_frame_mvs>", rb); + else + cm->allow_ref_frame_mvs = 0; + +#ifdef SUPPORT_SCALE_FACTOR + for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { + const RefCntBuffer *const ref_buf = get_ref_frame_buf(cm, i); + struct scale_factors *const ref_scale_factors = + get_ref_scale_factors(cm, i); + if (ref_buf != NULL) { +#ifdef AML + av1_setup_scale_factors_for_frame( + ref_scale_factors, ref_buf->buf.y_crop_width, + ref_buf->buf.y_crop_height, cm->dec_width, cm->height); +#else + av1_setup_scale_factors_for_frame( + ref_scale_factors, ref_buf->buf.y_crop_width, + ref_buf->buf.y_crop_height, cm->width, cm->height); +#endif + } + if (ref_scale_factors) { + if ((!av1_is_valid_scale(ref_scale_factors))) + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "Reference frame has invalid dimensions"); + } + } +#endif + } + } + + av1_setup_frame_buf_refs(cm); + + av1_setup_frame_sign_bias(cm); + + cm->cur_frame->frame_type = current_frame->frame_type; + + if (seq_params->frame_id_numbers_present_flag) { + update_ref_frame_id(cm, cm->current_frame_id); + } +#ifdef ORI_CODE + const int might_bwd_adapt = + !(seq_params->reduced_still_picture_hdr) && !(cm->disable_cdf_update); + if (might_bwd_adapt) { + cm->refresh_frame_context = aom_rb_read_bit(rb) + ? REFRESH_FRAME_CONTEXT_DISABLED + : REFRESH_FRAME_CONTEXT_BACKWARD; + } else { + cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_DISABLED; + } +#endif + + cm->cur_frame->buf.bit_depth = seq_params->bit_depth; + cm->cur_frame->buf.color_primaries = seq_params->color_primaries; + cm->cur_frame->buf.transfer_characteristics = + seq_params->transfer_characteristics; + cm->cur_frame->buf.matrix_coefficients = seq_params->matrix_coefficients; + cm->cur_frame->buf.monochrome = seq_params->monochrome; + cm->cur_frame->buf.chroma_sample_position = + seq_params->chroma_sample_position; + cm->cur_frame->buf.color_range = seq_params->color_range; + cm->cur_frame->buf.render_width = cm->render_width; + cm->cur_frame->buf.render_height = cm->render_height; + + if (pbi->need_resync) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Keyframe / intra-only frame required to reset decoder" + " state"); + } + + generate_next_ref_frame_map(pbi); + +#ifdef ORI_CODE + if (cm->allow_intrabc) { + // Set parameters corresponding to no filtering. + struct loopfilter *lf = &cm->lf; + lf->filter_level[0] = 0; + lf->filter_level[1] = 0; + cm->cdef_info.cdef_bits = 0; + cm->cdef_info.cdef_strengths[0] = 0; + cm->cdef_info.nb_cdef_strengths = 1; + cm->cdef_info.cdef_uv_strengths[0] = 0; + cm->rst_info[0].frame_restoration_type = RESTORE_NONE; + cm->rst_info[1].frame_restoration_type = RESTORE_NONE; + cm->rst_info[2].frame_restoration_type = RESTORE_NONE; + } + + read_tile_info(pbi, rb); + if (!av1_is_min_tile_width_satisfied(cm)) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Minimum tile width requirement not satisfied"); + } + + setup_quantization(cm, rb); + xd->bd = (int)seq_params->bit_depth; + + if (cm->num_allocated_above_context_planes < av1_num_planes(cm) || + cm->num_allocated_above_context_mi_col < cm->mi_cols || + cm->num_allocated_above_contexts < cm->tile_rows) { + av1_free_above_context_buffers(cm, cm->num_allocated_above_contexts); + if (av1_alloc_above_context_buffers(cm, cm->tile_rows)) + aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, + "Failed to allocate context buffers"); + } + + if (cm->primary_ref_frame == PRIMARY_REF_NONE) { + av1_setup_past_independence(cm); + } + + setup_segmentation(cm, params); + + cm->delta_q_info.delta_q_res = 1; + cm->delta_q_info.delta_lf_res = 1; + cm->delta_q_info.delta_lf_present_flag = 0; + cm->delta_q_info.delta_lf_multi = 0; + cm->delta_q_info.delta_q_present_flag = + cm->base_qindex > 0 ? aom_rb_read_bit(-1, defmark, rb) : 0; + if (cm->delta_q_info.delta_q_present_flag) { + xd->current_qindex = cm->base_qindex; + cm->delta_q_info.delta_q_res = 1 << aom_rb_read_literal(-1, defmark, rb, 2); + if (!cm->allow_intrabc) + cm->delta_q_info.delta_lf_present_flag = aom_rb_read_bit(-1, defmark, rb); + if (cm->delta_q_info.delta_lf_present_flag) { + cm->delta_q_info.delta_lf_res = 1 << aom_rb_read_literal(-1, defmark, rb, 2); + cm->delta_q_info.delta_lf_multi = aom_rb_read_bit(-1, defmark, rb); + av1_reset_loop_filter_delta(xd, av1_num_planes(cm)); + } + } + + xd->cur_frame_force_integer_mv = cm->cur_frame_force_integer_mv; + + for (int i = 0; i < MAX_SEGMENTS; ++i) { + const int qindex = av1_get_qindex(&cm->seg, i, cm->base_qindex); + xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 && + cm->u_dc_delta_q == 0 && cm->u_ac_delta_q == 0 && + cm->v_dc_delta_q == 0 && cm->v_ac_delta_q == 0; + xd->qindex[i] = qindex; + } + cm->coded_lossless = is_coded_lossless(cm, xd); + cm->all_lossless = cm->coded_lossless && !av1_superres_scaled(cm); + setup_segmentation_dequant(cm, xd); + if (cm->coded_lossless) { + cm->lf.filter_level[0] = 0; + cm->lf.filter_level[1] = 0; + } + if (cm->coded_lossless || !seq_params->enable_cdef) { + cm->cdef_info.cdef_bits = 0; + cm->cdef_info.cdef_strengths[0] = 0; + cm->cdef_info.cdef_uv_strengths[0] = 0; + } + if (cm->all_lossless || !seq_params->enable_restoration) { + cm->rst_info[0].frame_restoration_type = RESTORE_NONE; + cm->rst_info[1].frame_restoration_type = RESTORE_NONE; + cm->rst_info[2].frame_restoration_type = RESTORE_NONE; + } + setup_loopfilter(cm, rb); + + if (!cm->coded_lossless && seq_params->enable_cdef) { + setup_cdef(cm, rb); + } + if (!cm->all_lossless && seq_params->enable_restoration) { + decode_restoration_mode(cm, rb); + } + + cm->tx_mode = read_tx_mode(cm, rb); +#endif + + current_frame->reference_mode = read_frame_reference_mode(cm, params); + +#ifdef ORI_CODE + if (current_frame->reference_mode != SINGLE_REFERENCE) + setup_compound_reference_mode(cm); + + +#endif + + av1_setup_skip_mode_allowed(cm); + + /* + the point that ucode send send_bufmgr_info + and wait bufmgr code to return is_skip_mode_allowed + */ + + /* + read_uncompressed_header() end + */ + + av1_setup_motion_field(cm); +#ifdef AML + cm->cur_frame->mi_cols = cm->mi_cols; + cm->cur_frame->mi_rows = cm->mi_rows; + cm->cur_frame->dec_width = cm->dec_width; + + /* + superres_post_decode(AV1Decoder *pbi) => + av1_superres_upscale(cm, pool); => + aom_realloc_frame_buffer( + frame_to_show, cm->superres_upscaled_width, + cm->superres_upscaled_height, seq_params->subsampling_x, + seq_params->subsampling_y, seq_params->use_highbitdepth, + AOM_BORDER_IN_PIXELS, cm->byte_alignment, fb, cb, cb_priv) + */ + aom_realloc_frame_buffer(cm, &cm->cur_frame->buf, + cm->superres_upscaled_width, cm->superres_upscaled_height, + cm->cur_frame->order_hint); +#endif + return 0; +} + +static int are_seq_headers_consistent(const SequenceHeader *seq_params_old, + const SequenceHeader *seq_params_new) { + return !memcmp(seq_params_old, seq_params_new, sizeof(SequenceHeader)); +} + +aom_codec_err_t aom_get_num_layers_from_operating_point_idc( + int operating_point_idc, unsigned int *number_spatial_layers, + unsigned int *number_temporal_layers) { + // derive number of spatial/temporal layers from operating_point_idc + + if (!number_spatial_layers || !number_temporal_layers) + return AOM_CODEC_INVALID_PARAM; + + if (operating_point_idc == 0) { + *number_temporal_layers = 1; + *number_spatial_layers = 1; + } else { + int j; + *number_spatial_layers = 0; + *number_temporal_layers = 0; + for (j = 0; j < MAX_NUM_SPATIAL_LAYERS; j++) { + *number_spatial_layers += + (operating_point_idc >> (j + MAX_NUM_TEMPORAL_LAYERS)) & 0x1; + } + for (j = 0; j < MAX_NUM_TEMPORAL_LAYERS; j++) { + *number_temporal_layers += (operating_point_idc >> j) & 0x1; + } + } + + return AOM_CODEC_OK; +} + +void av1_read_sequence_header(AV1_COMMON *cm, union param_u *params, + SequenceHeader *seq_params) { +#ifdef ORI_CODE + const int num_bits_width = aom_rb_read_literal(-1, "<num_bits_width>", rb, 4) + 1; + const int num_bits_height = aom_rb_read_literal(-1, "<num_bits_height>", rb, 4) + 1; + const int max_frame_width = aom_rb_read_literal(-1, "<max_frame_width>", rb, num_bits_width) + 1; + const int max_frame_height = aom_rb_read_literal(-1, "<max_frame_height>", rb, num_bits_height) + 1; + + seq_params->num_bits_width = num_bits_width; + seq_params->num_bits_height = num_bits_height; +#endif + seq_params->max_frame_width = params->p.max_frame_width; //max_frame_width; + seq_params->max_frame_height = params->p.max_frame_height; //max_frame_height; + + if (seq_params->reduced_still_picture_hdr) { + seq_params->frame_id_numbers_present_flag = 0; + } else { + seq_params->frame_id_numbers_present_flag = params->p.frame_id_numbers_present_flag; //aom_rb_read_bit(-1, "<frame_id_numbers_present_flag>", rb); + } + if (seq_params->frame_id_numbers_present_flag) { + // We must always have delta_frame_id_length < frame_id_length, + // in order for a frame to be referenced with a unique delta. + // Avoid wasting bits by using a coding that enforces this restriction. +#ifdef ORI_CODE + seq_params->delta_frame_id_length = aom_rb_read_literal(-1, "<delta_frame_id_length>", rb, 4) + 2; + seq_params->frame_id_length = params->p.frame_id_length + aom_rb_read_literal(-1, "<frame_id_length>", rb, 3) + seq_params->delta_frame_id_length + 1; +#else + seq_params->delta_frame_id_length = params->p.delta_frame_id_length; + seq_params->frame_id_length = params->p.frame_id_length + seq_params->delta_frame_id_length + 1; +#endif + if (seq_params->frame_id_length > 16) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Invalid frame_id_length"); + } +#ifdef ORI_CODE + setup_sb_size(seq_params, rb); + seq_params->enable_filter_intra = aom_rb_read_bit(-1, "<enable_filter_intra>", rb); + seq_params->enable_intra_edge_filter = aom_rb_read_bit(-1, "<enable_intra_edge_filter>", rb); +#endif + + if (seq_params->reduced_still_picture_hdr) { + seq_params->enable_interintra_compound = 0; + seq_params->enable_masked_compound = 0; + seq_params->enable_warped_motion = 0; + seq_params->enable_dual_filter = 0; + seq_params->order_hint_info.enable_order_hint = 0; + seq_params->order_hint_info.enable_dist_wtd_comp = 0; + seq_params->order_hint_info.enable_ref_frame_mvs = 0; + seq_params->force_screen_content_tools = 2; // SELECT_SCREEN_CONTENT_TOOLS + seq_params->force_integer_mv = 2; // SELECT_INTEGER_MV + seq_params->order_hint_info.order_hint_bits_minus_1 = -1; + } else { +#ifdef ORI_CODE + seq_params->enable_interintra_compound = aom_rb_read_bit(-1, "<enable_interintra_compound>", rb); + seq_params->enable_masked_compound = aom_rb_read_bit(-1, "<enable_masked_compound>", rb); + seq_params->enable_warped_motion = aom_rb_read_bit(-1, "<enable_warped_motion>", rb); + seq_params->enable_dual_filter = aom_rb_read_bit(-1, "<enable_dual_filter>", rb); +#endif + seq_params->order_hint_info.enable_order_hint = params->p.enable_order_hint; //aom_rb_read_bit(-1, "<order_hint_info.enable_order_hint>", rb); + seq_params->order_hint_info.enable_dist_wtd_comp = + seq_params->order_hint_info.enable_order_hint ? params->p.enable_dist_wtd_comp : 0; //aom_rb_read_bit(-1, "<order_hint_info.enable_dist_wtd_comp>", rb) : 0; + seq_params->order_hint_info.enable_ref_frame_mvs = + seq_params->order_hint_info.enable_order_hint ? params->p.enable_ref_frame_mvs : 0; //aom_rb_read_bit(-1, "<order_hint_info.enable_ref_frame_mvs>", rb) : 0; + +#ifdef ORI_CODE + if (aom_rb_read_bit(-1, defmark, rb)) { + seq_params->force_screen_content_tools = + 2; // SELECT_SCREEN_CONTENT_TOOLS + } else { + seq_params->force_screen_content_tools = aom_rb_read_bit(-1, defmark, rb); + } + + if (seq_params->force_screen_content_tools > 0) { + if (aom_rb_read_bit(-1, defmark, rb)) { + seq_params->force_integer_mv = 2; // SELECT_INTEGER_MV + } else { + seq_params->force_integer_mv = aom_rb_read_bit(-1, defmark, rb); + } + } else { + seq_params->force_integer_mv = 2; // SELECT_INTEGER_MV + } +#endif + seq_params->order_hint_info.order_hint_bits_minus_1 = + seq_params->order_hint_info.enable_order_hint + ? params->p.order_hint_bits_minus_1 /*aom_rb_read_literal(-1, "<order_hint_info.order_hint_bits_minus_1>", rb, 3)*/ + : -1; + } + seq_params->enable_superres = params->p.enable_superres; //aom_rb_read_bit(-1, defmark, rb); + +#ifdef ORI_CODE + seq_params->enable_cdef = aom_rb_read_bit(-1, defmark, rb); + seq_params->enable_restoration = aom_rb_read_bit(-1, defmark, rb); +#endif +} + +#ifdef ORI_CODE +void av1_read_op_parameters_info(AV1_COMMON *const cm, + struct aom_read_bit_buffer *rb, int op_num) { + // The cm->op_params array has MAX_NUM_OPERATING_POINTS + 1 elements. + if (op_num > MAX_NUM_OPERATING_POINTS) { + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "AV1 does not support %d decoder model operating points", + op_num + 1); + } + + cm->op_params[op_num].decoder_buffer_delay = aom_rb_read_unsigned_literal(-1, defmark, + rb, cm->buffer_model.encoder_decoder_buffer_delay_length); + + cm->op_params[op_num].encoder_buffer_delay = aom_rb_read_unsigned_literal(-1, defmark, + rb, cm->buffer_model.encoder_decoder_buffer_delay_length); + + cm->op_params[op_num].low_delay_mode_flag = aom_rb_read_bit(-1, defmark, rb); +} +#endif + +static int is_valid_seq_level_idx(AV1_LEVEL seq_level_idx) { + return seq_level_idx < SEQ_LEVELS || seq_level_idx == SEQ_LEVEL_MAX; +} + +static uint32_t read_sequence_header_obu(AV1Decoder *pbi, + union param_u *params) { + AV1_COMMON *const cm = pbi->common; + int i; + int operating_point; + // Verify rb has been configured to report errors. + //assert(rb->error_handler); + + // Use a local variable to store the information as we decode. At the end, + // if no errors have occurred, cm->seq_params is updated. + SequenceHeader sh = cm->seq_params; + SequenceHeader *const seq_params = &sh; + + seq_params->profile = params->p.profile; //av1_read_profile(rb); + if (seq_params->profile > CONFIG_MAX_DECODE_PROFILE) { + cm->error.error_code = AOM_CODEC_UNSUP_BITSTREAM; + return 0; + } + + // Still picture or not + seq_params->still_picture = params->p.still_picture; //aom_rb_read_bit(-1, "<still_picture>", rb); + seq_params->reduced_still_picture_hdr = params->p.reduced_still_picture_hdr; //aom_rb_read_bit(-1, "<reduced_still_picture_hdr>", rb); + // Video must have reduced_still_picture_hdr = 0 + if (!seq_params->still_picture && seq_params->reduced_still_picture_hdr) { + cm->error.error_code = AOM_CODEC_UNSUP_BITSTREAM; + return 0; + } + + if (seq_params->reduced_still_picture_hdr) { + cm->timing_info_present = 0; + seq_params->decoder_model_info_present_flag = 0; + seq_params->display_model_info_present_flag = 0; + seq_params->operating_points_cnt_minus_1 = 0; + seq_params->operating_point_idc[0] = 0; + //if (!read_bitstream_level(0, "<seq_level_idx>", &seq_params->seq_level_idx[0], rb)) { + if (!is_valid_seq_level_idx(params->p.seq_level_idx[0])) { + cm->error.error_code = AOM_CODEC_UNSUP_BITSTREAM; + return 0; + } + seq_params->tier[0] = 0; + cm->op_params[0].decoder_model_param_present_flag = 0; + cm->op_params[0].display_model_param_present_flag = 0; + } else { + cm->timing_info_present = params->p.timing_info_present; //aom_rb_read_bit(-1, "<timing_info_present>", rb); // timing_info_present_flag + if (cm->timing_info_present) { +#ifdef ORI_CODE + av1_read_timing_info_header(cm, rb); +#endif + seq_params->decoder_model_info_present_flag = params->p.decoder_model_info_present_flag; //aom_rb_read_bit(-1, "<decoder_model_info_present_flag>", rb); +#ifdef ORI_CODE + if (seq_params->decoder_model_info_present_flag) + av1_read_decoder_model_info(cm, rb); +#endif + } else { + seq_params->decoder_model_info_present_flag = 0; + } +#ifdef ORI_CODE + seq_params->display_model_info_present_flag = aom_rb_read_bit(-1, "<display_model_info_present_flag>", rb); +#endif + seq_params->operating_points_cnt_minus_1 = params->p.operating_points_cnt_minus_1; + //aom_rb_read_literal(-1, "<operating_points_cnt_minus_1>", rb, OP_POINTS_CNT_MINUS_1_BITS); + for (i = 0; i < seq_params->operating_points_cnt_minus_1 + 1; i++) { + seq_params->operating_point_idc[i] = params->p.operating_point_idc[i]; + //aom_rb_read_literal(i, "<operating_point_idc>", rb, OP_POINTS_IDC_BITS); + //if (!read_bitstream_level(i, "<seq_level_idx>", &seq_params->seq_level_idx[i], rb)) { + if (!is_valid_seq_level_idx(params->p.seq_level_idx[i])) { + cm->error.error_code = AOM_CODEC_UNSUP_BITSTREAM; + return 0; + } + // This is the seq_level_idx[i] > 7 check in the spec. seq_level_idx 7 + // is equivalent to level 3.3. +#ifdef ORI_CODE + if (seq_params->seq_level_idx[i] >= SEQ_LEVEL_4_0) + seq_params->tier[i] = aom_rb_read_bit(i, "<tier>", rb); + else + seq_params->tier[i] = 0; +#endif + if (seq_params->decoder_model_info_present_flag) { + cm->op_params[i].decoder_model_param_present_flag = params->p.decoder_model_param_present_flag[i]; //aom_rb_read_bit(-1, defmark, rb); +#ifdef ORI_CODE + if (cm->op_params[i].decoder_model_param_present_flag) + av1_read_op_parameters_info(cm, rb, i); +#endif + } else { + cm->op_params[i].decoder_model_param_present_flag = 0; + } +#ifdef ORI_CODE + if (cm->timing_info_present && + (cm->timing_info.equal_picture_interval || + cm->op_params[i].decoder_model_param_present_flag)) { + cm->op_params[i].bitrate = av1_max_level_bitrate( + seq_params->profile, seq_params->seq_level_idx[i], + seq_params->tier[i]); + // Level with seq_level_idx = 31 returns a high "dummy" bitrate to pass + // the check + if (cm->op_params[i].bitrate == 0) + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "AV1 does not support this combination of " + "profile, level, and tier."); + // Buffer size in bits/s is bitrate in bits/s * 1 s + cm->op_params[i].buffer_size = cm->op_params[i].bitrate; + } +#endif + if (cm->timing_info_present && cm->timing_info.equal_picture_interval && + !cm->op_params[i].decoder_model_param_present_flag) { + // When the decoder_model_parameters are not sent for this op, set + // the default ones that can be used with the resource availability mode + cm->op_params[i].decoder_buffer_delay = 70000; + cm->op_params[i].encoder_buffer_delay = 20000; + cm->op_params[i].low_delay_mode_flag = 0; + } + +#ifdef ORI_CODE + if (seq_params->display_model_info_present_flag) { + cm->op_params[i].display_model_param_present_flag = aom_rb_read_bit(-1, defmark, rb); + if (cm->op_params[i].display_model_param_present_flag) { + cm->op_params[i].initial_display_delay = + aom_rb_read_literal(-1, defmark, rb, 4) + 1; + if (cm->op_params[i].initial_display_delay > 10) + aom_internal_error( + &cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "AV1 does not support more than 10 decoded frames delay"); + } else { + cm->op_params[i].initial_display_delay = 10; + } + } else { + cm->op_params[i].display_model_param_present_flag = 0; + cm->op_params[i].initial_display_delay = 10; + } +#endif + } + } + // This decoder supports all levels. Choose operating point provided by + // external means + operating_point = pbi->operating_point; + if (operating_point < 0 || + operating_point > seq_params->operating_points_cnt_minus_1) + operating_point = 0; + pbi->current_operating_point = + seq_params->operating_point_idc[operating_point]; + if (aom_get_num_layers_from_operating_point_idc( + pbi->current_operating_point, &cm->number_spatial_layers, + &cm->number_temporal_layers) != AOM_CODEC_OK) { + cm->error.error_code = AOM_CODEC_ERROR; + return 0; + } + + av1_read_sequence_header(cm, params, seq_params); +#ifdef ORI_CODE + av1_read_color_config(rb, pbi->allow_lowbitdepth, seq_params, &cm->error); + if (!(seq_params->subsampling_x == 0 && seq_params->subsampling_y == 0) && + !(seq_params->subsampling_x == 1 && seq_params->subsampling_y == 1) && + !(seq_params->subsampling_x == 1 && seq_params->subsampling_y == 0)) { + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "Only 4:4:4, 4:2:2 and 4:2:0 are currently supported, " + "%d %d subsampling is not supported.\n", + seq_params->subsampling_x, seq_params->subsampling_y); + } + seq_params->film_grain_params_present = aom_rb_read_bit(-1, "<film_grain_params_present>", rb); + + if (av1_check_trailing_bits(pbi, rb) != 0) { + // cm->error.error_code is already set. + return 0; + } +#endif + + // If a sequence header has been decoded before, we check if the new + // one is consistent with the old one. + if (pbi->sequence_header_ready) { + if (!are_seq_headers_consistent(&cm->seq_params, seq_params)) + pbi->sequence_header_changed = 1; + } + + cm->seq_params = *seq_params; + pbi->sequence_header_ready = 1; + return 0; + +} + +int aom_decode_frame_from_obus(AV1Decoder *pbi, union param_u *params, int obu_type) +{ + AV1_COMMON *const cm = pbi->common; + ObuHeader obu_header; + int frame_decoding_finished = 0; + uint32_t frame_header_size = 0; + + //struct aom_read_bit_buffer rb; + size_t payload_size = 0; + size_t decoded_payload_size = 0; + size_t obu_payload_offset = 0; + //size_t bytes_read = 0; + + memset(&obu_header, 0, sizeof(obu_header)); +#ifdef ORI_CODE + pbi->seen_frame_header = 0; +#else + /* set in the test.c*/ +#endif + + obu_header.type = obu_type; + pbi->cur_obu_type = obu_header.type; + if (av1_is_debug(AOM_DEBUG_PRINT_LIST_INFO)) + dump_params(pbi, params); + switch (obu_header.type) { + case OBU_SEQUENCE_HEADER: + decoded_payload_size = read_sequence_header_obu(pbi, params); + if (cm->error.error_code != AOM_CODEC_OK) return -1; + break; + + case OBU_FRAME_HEADER: + case OBU_REDUNDANT_FRAME_HEADER: + case OBU_FRAME: + if (obu_header.type == OBU_REDUNDANT_FRAME_HEADER) { + if (!pbi->seen_frame_header) { + cm->error.error_code = AOM_CODEC_CORRUPT_FRAME; + return -1; + } + } else { + // OBU_FRAME_HEADER or OBU_FRAME. + if (pbi->seen_frame_header) { + cm->error.error_code = AOM_CODEC_CORRUPT_FRAME; + return -1; + } + } + // Only decode first frame header received + if (!pbi->seen_frame_header || + (cm->large_scale_tile && !pbi->camera_frame_header_ready)) { + frame_header_size = av1_decode_frame_headers_and_setup( + pbi, /*&rb, data, p_data_end,*/obu_header.type != OBU_FRAME, params); + pbi->seen_frame_header = 1; + if (!pbi->ext_tile_debug && cm->large_scale_tile) + pbi->camera_frame_header_ready = 1; + } else { + // TODO(wtc): Verify that the frame_header_obu is identical to the + // original frame_header_obu. For now just skip frame_header_size + // bytes in the bit buffer. + if (frame_header_size > payload_size) { + cm->error.error_code = AOM_CODEC_CORRUPT_FRAME; + return -1; + } + assert(rb.bit_offset == 0); +#ifdef ORI_CODE + rb.bit_offset = 8 * frame_header_size; +#endif + } + + decoded_payload_size = frame_header_size; + pbi->frame_header_size = frame_header_size; + + if (cm->show_existing_frame) { + if (obu_header.type == OBU_FRAME) { + cm->error.error_code = AOM_CODEC_UNSUP_BITSTREAM; + return -1; + } + frame_decoding_finished = 1; + pbi->seen_frame_header = 0; + break; + } + + // In large scale tile coding, decode the common camera frame header + // before any tile list OBU. + if (!pbi->ext_tile_debug && pbi->camera_frame_header_ready) { + frame_decoding_finished = 1; + // Skip the rest of the frame data. + decoded_payload_size = payload_size; + // Update data_end. +#ifdef ORI_CODE + *p_data_end = data_end; +#endif + break; + } +#if 0 //def AML + frame_decoding_finished = 1; +#endif + if (obu_header.type != OBU_FRAME) break; + obu_payload_offset = frame_header_size; + // Byte align the reader before reading the tile group. + // byte_alignment() has set cm->error.error_code if it returns -1. +#ifdef ORI_CODE + if (byte_alignment(cm, &rb)) return -1; + AOM_FALLTHROUGH_INTENDED; // fall through to read tile group. +#endif + default: + break; + } + return frame_decoding_finished; +} + +int get_buffer_index(AV1Decoder *pbi, RefCntBuffer *buffer) +{ + AV1_COMMON *const cm = pbi->common; + int i = -1; + + if (buffer) { + for (i = 0; i < FRAME_BUFFERS; i++) { + RefCntBuffer *buf = + &cm->buffer_pool->frame_bufs[i]; + if (buf == buffer) { + break; + } + } + } + return i; +} + +void dump_buffer(RefCntBuffer *buf) +{ + int i; + pr_info("ref_count %d, vf_ref %d, order_hint %d, w/h(%d,%d) showable_frame %d frame_type %d canvas(%d,%d) w/h(%d,%d) mi_c/r(%d,%d) header 0x%x ref_deltas(", + buf->ref_count, buf->buf.vf_ref, buf->order_hint, buf->width, buf->height, buf->showable_frame, buf->frame_type, + buf->buf.mc_canvas_y, buf->buf.mc_canvas_u_v, + buf->buf.y_crop_width, buf->buf.y_crop_height, + buf->mi_cols, buf->mi_rows, + buf->buf.header_adr); + for (i = 0; i < REF_FRAMES; i++) + pr_info("%d,", buf->ref_deltas[i]); + pr_info("), ref_order_hints("); + + for (i = 0; i < INTER_REFS_PER_FRAME; i++) + pr_info("%d ", buf->ref_order_hints[i]); + pr_info(")"); +} + +void dump_ref_buffer_info(AV1Decoder *pbi, int i) +{ + AV1_COMMON *const cm = pbi->common; + pr_info("remapped_ref_idx %d, ref_frame_sign_bias %d, ref_frame_id %d, valid_for_referencing %d ref_frame_side %d ref_frame_map idx %d, next_ref_frame_map idx %d", + cm->remapped_ref_idx[i], + cm->ref_frame_sign_bias[i], + cm->ref_frame_id[i], + cm->valid_for_referencing[i], + cm->ref_frame_side[i], + get_buffer_index(pbi, cm->ref_frame_map[i]), + get_buffer_index(pbi, cm->next_ref_frame_map[i])); +} + +void dump_mv_refs(AV1Decoder *pbi) +{ + int i, j; + AV1_COMMON *const cm = pbi->common; + for (i = 0; i < cm->mv_ref_id_index; i++) { + pr_info("%d: ref_id %d cal_tpl_mvs %d mv_ref_offset: ", + i, cm->mv_ref_id[i], cm->mv_cal_tpl_mvs[i]); + for (j = 0; j < REF_FRAMES; j++) + pr_info("%d ", cm->mv_ref_offset[i][j]); + pr_info("\n"); + } +} + +void dump_ref_spec_bufs(AV1Decoder *pbi) +{ + int i; + AV1_COMMON *const cm = pbi->common; + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + PIC_BUFFER_CONFIG *pic_config = av1_get_ref_frame_spec_buf(cm, LAST_FRAME + i); + if (pic_config == NULL) continue; + pr_info("%d: index %d order_hint %d header 0x%x dw_header 0x%x canvas(%d,%d) mv_wr_start 0x%x lcu_total %d\n", + i, pic_config->index, + pic_config->order_hint, + pic_config->header_adr, +#ifdef AOM_AV1_MMU_DW + pic_config->header_dw_adr, +#else + 0, +#endif + pic_config->mc_canvas_y, + pic_config->mc_canvas_u_v, + pic_config->mpred_mv_wr_start_addr, + pic_config->lcu_total + ); + } +} + +#ifdef SUPPORT_SCALE_FACTOR +void dump_scale_factors(AV1Decoder *pbi) +{ + int i; + AV1_COMMON *const cm = pbi->common; + for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { + struct scale_factors *const sf = + get_ref_scale_factors(cm, i); + if (sf) + pr_info("%d: is_scaled %d x_scale_fp %d, y_scale_fp %d\n", + i, av1_is_scaled(sf), + sf->x_scale_fp, sf->y_scale_fp); + else + pr_info("%d: sf null\n", i); + } +} + +#endif + +void dump_buffer_status(AV1Decoder *pbi) +{ + int i; + AV1_COMMON *const cm = pbi->common; + BufferPool *const pool = cm->buffer_pool; + unsigned long flags; + + lock_buffer_pool(pool, flags); + + pr_info("%s: pbi %p cm %p cur_frame %p\n", __func__, pbi, cm, cm->cur_frame); + + pr_info("Buffer Pool:\n"); + for (i = 0; i < FRAME_BUFFERS; i++) { + RefCntBuffer *buf = + &cm->buffer_pool->frame_bufs[i]; + pr_info("%d: ", i); + if (buf) + dump_buffer(buf); + pr_info("\n"); + } + + if (cm->prev_frame) { + pr_info("prev_frame (%d): ", + get_buffer_index(pbi, cm->prev_frame)); + dump_buffer(cm->prev_frame); + pr_info("\n"); + } + if (cm->cur_frame) { + pr_info("cur_frame (%d): ", + get_buffer_index(pbi, cm->cur_frame)); + dump_buffer(cm->cur_frame); + pr_info("\n"); + } + pr_info("REF_FRAMES Info(ref buf is ref_frame_map[remapped_ref_idx[i-1]], i=1~7):\n"); + for (i = 0; i < REF_FRAMES; i++) { + pr_info("%d: ", i); + dump_ref_buffer_info(pbi, i); + pr_info("\n"); + } + pr_info("Ref Spec Buffers:\n"); + dump_ref_spec_bufs(pbi); + + pr_info("MV refs:\n"); + dump_mv_refs(pbi); + +#ifdef SUPPORT_SCALE_FACTOR + pr_info("Scale factors:\n"); + dump_scale_factors(pbi); +#endif + unlock_buffer_pool(pool, flags); +} + + +struct param_dump_item_s { + unsigned int size; + char* name; + unsigned int adr_off; +} param_dump_items[] = { + {1, "profile", (unsigned long)&(((union param_u *)0)->p.profile )}, + {1, "still_picture", (unsigned long)&(((union param_u *)0)->p.still_picture )}, + {1, "reduced_still_picture_hdr", (unsigned long)&(((union param_u *)0)->p.reduced_still_picture_hdr )}, + {1, "decoder_model_info_present_flag", (unsigned long)&(((union param_u *)0)->p.decoder_model_info_present_flag)}, + {1, "max_frame_width", (unsigned long)&(((union param_u *)0)->p.max_frame_width )}, + {1, "max_frame_height", (unsigned long)&(((union param_u *)0)->p.max_frame_height )}, + {1, "frame_id_numbers_present_flag", (unsigned long)&(((union param_u *)0)->p.frame_id_numbers_present_flag )}, + {1, "delta_frame_id_length", (unsigned long)&(((union param_u *)0)->p.delta_frame_id_length )}, + {1, "frame_id_length", (unsigned long)&(((union param_u *)0)->p.frame_id_length )}, + {1, "order_hint_bits_minus_1", (unsigned long)&(((union param_u *)0)->p.order_hint_bits_minus_1 )}, + {1, "enable_order_hint", (unsigned long)&(((union param_u *)0)->p.enable_order_hint )}, + {1, "enable_dist_wtd_comp", (unsigned long)&(((union param_u *)0)->p.enable_dist_wtd_comp )}, + {1, "enable_ref_frame_mvs", (unsigned long)&(((union param_u *)0)->p.enable_ref_frame_mvs )}, + {1, "enable_superres", (unsigned long)&(((union param_u *)0)->p.enable_superres )}, + {1, "superres_scale_denominator", (unsigned long)&(((union param_u *)0)->p.superres_scale_denominator )}, + {1, "show_existing_frame", (unsigned long)&(((union param_u *)0)->p.show_existing_frame )}, + {1, "frame_type", (unsigned long)&(((union param_u *)0)->p.frame_type )}, + {1, "show_frame", (unsigned long)&(((union param_u *)0)->p.show_frame )}, + {1, "e.r.r.o.r_resilient_mode", (unsigned long)&(((union param_u *)0)->p.error_resilient_mode )}, + {1, "refresh_frame_flags", (unsigned long)&(((union param_u *)0)->p.refresh_frame_flags )}, + {1, "showable_frame", (unsigned long)&(((union param_u *)0)->p.showable_frame )}, + {1, "current_frame_id", (unsigned long)&(((union param_u *)0)->p.current_frame_id )}, + {1, "frame_size_override_flag", (unsigned long)&(((union param_u *)0)->p.frame_size_override_flag )}, + {1, "order_hint", (unsigned long)&(((union param_u *)0)->p.order_hint )}, + {1, "primary_ref_frame", (unsigned long)&(((union param_u *)0)->p.primary_ref_frame )}, + {1, "frame_refs_short_signaling", (unsigned long)&(((union param_u *)0)->p.frame_refs_short_signaling )}, + {1, "frame_width", (unsigned long)&(((union param_u *)0)->p.frame_width )}, + {1, "dec_frame_width", (unsigned long)&(((union param_u *)0)->p.dec_frame_width )}, + {1, "frame_width_scaled", (unsigned long)&(((union param_u *)0)->p.frame_width_scaled )}, + {1, "frame_height", (unsigned long)&(((union param_u *)0)->p.frame_height )}, + {1, "reference_mode", (unsigned long)&(((union param_u *)0)->p.reference_mode )}, + {1, "update_parameters", (unsigned long)&(((union param_u *)0)->p.update_parameters )}, + {1, "film_grain_params_ref_idx", (unsigned long)&(((union param_u *)0)->p.film_grain_params_ref_idx )}, + {1, "allow_ref_frame_mvs", (unsigned long)&(((union param_u *)0)->p.allow_ref_frame_mvs )}, + {1, "lst_ref", (unsigned long)&(((union param_u *)0)->p.lst_ref )}, + {1, "gld_ref", (unsigned long)&(((union param_u *)0)->p.gld_ref )}, + {INTER_REFS_PER_FRAME, "remapped_ref_idx", (unsigned long)&(((union param_u *)0)->p.remapped_ref_idx[0] )}, + {INTER_REFS_PER_FRAME, "delta_frame_id_minus_1", (unsigned long)&(((union param_u *)0)->p.delta_frame_id_minus_1[0] )}, + {REF_FRAMES, "ref_order_hint", (unsigned long)&(((union param_u *)0)->p.ref_order_hint[0] )}, +}; + +void dump_params(AV1Decoder *pbi, union param_u *params) +{ + int i, j; + unsigned char *start_adr = (unsigned char*)params; + + pr_info("============ params:\n"); + for (i = 0; i < sizeof(param_dump_items) / sizeof(param_dump_items[0]); i++) { + for (j = 0; j < param_dump_items[i].size; j++) { + if (param_dump_items[i].size > 1) + pr_info("%s(%d): 0x%x\n", + param_dump_items[i].name, j, + *((unsigned short*)(start_adr + param_dump_items[i].adr_off + j * 2))); + else + pr_info("%s: 0x%x\n", param_dump_items[i].name, + *((unsigned short*)(start_adr + param_dump_items[i].adr_off + j * 2))); + } + } +} + +/*static void raw_write_image(AV1Decoder *pbi, PIC_BUFFER_CONFIG *sd) +{ + printf("$$$$$$$ output image\n"); +}*/ + +/* + return 0, need decoding data + 1, decoding done + -1, decoding error + +*/ +int av1_bufmgr_process(AV1Decoder *pbi, union param_u *params, + unsigned char new_compressed_data, int obu_type) +{ + AV1_COMMON *const cm = pbi->common; + int j; + // Release any pending output frames from the previous decoder_decode call. + // We need to do this even if the decoder is being flushed or the input + // arguments are invalid. + BufferPool *const pool = cm->buffer_pool; + int frame_decoded; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%s: pbi %p cm %p cur_frame %p\n", __func__, pbi, cm, cm->cur_frame); + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%s: new_compressed_data= %d\n", __func__, new_compressed_data); + for (j = 0; j < pbi->num_output_frames; j++) { + decrease_ref_count(pbi, pbi->output_frames[j], pool); + } + pbi->num_output_frames = 0; + // + if (new_compressed_data) { + if (assign_cur_frame_new_fb(cm) == NULL) { + cm->error.error_code = AOM_CODEC_MEM_ERROR; + return -1; + } + pbi->seen_frame_header = 0; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "New_compressed_data (%d)\n", new_compressed_data_count++); + + } + + frame_decoded = + aom_decode_frame_from_obus(pbi, params, obu_type); + + if (pbi->cur_obu_type == OBU_FRAME_HEADER || + pbi->cur_obu_type == OBU_REDUNDANT_FRAME_HEADER || + pbi->cur_obu_type == OBU_FRAME) { + if (av1_is_debug(AOM_DEBUG_PRINT_LIST_INFO)) { + pr_info("after bufmgr (frame_decoded %d seen_frame_header %d): ", + frame_decoded, pbi->seen_frame_header); + dump_buffer_status(pbi); + } + } + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%s: pbi %p cm %p cur_frame %p\n", __func__, pbi, cm, cm->cur_frame); + + return frame_decoded; + +} + +int av1_get_raw_frame(AV1Decoder *pbi, size_t index, PIC_BUFFER_CONFIG **sd) { + if (index >= pbi->num_output_frames) return -1; + *sd = &pbi->output_frames[index]->buf; + //*grain_params = &pbi->output_frames[index]->film_grain_params; + //aom_clear_system_state(); + return 0; +} + +int av1_bufmgr_postproc(AV1Decoder *pbi, unsigned char frame_decoded) +{ + PIC_BUFFER_CONFIG *sd = NULL; + int index; +#if 0 + if (frame_decoded) { + printf("before swap_frame_buffers: "); + dump_buffer_status(pbi); + } +#endif + swap_frame_buffers(pbi, frame_decoded); + if (frame_decoded) { + if (av1_is_debug(AOM_DEBUG_PRINT_LIST_INFO)) { + pr_info("after swap_frame_buffers: "); + dump_buffer_status(pbi); + } + } + if (frame_decoded) { + pbi->decoding_first_frame = 0; + } + + + for (index = 0;;index++) { + if (av1_get_raw_frame(pbi, index, &sd) < 0) + break; + if (sd) + av1_raw_write_image(pbi, sd); + } + return 0; +} + +int aom_realloc_frame_buffer(AV1_COMMON *cm, PIC_BUFFER_CONFIG *pic, + int width, int height, unsigned int order_hint) +{ + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%s, index 0x%x, width 0x%x, height 0x%x order_hint 0x%x\n", + __func__, pic->index, width, height, order_hint); + pic->y_crop_width = width; + pic->y_crop_height = height; + pic->order_hint = order_hint; + return 0; +} + + +unsigned char av1_frame_is_inter(const AV1_COMMON *const cm) { + unsigned char is_inter = cm->cur_frame && (cm->cur_frame->frame_type != KEY_FRAME) + && (cm->current_frame.frame_type != INTRA_ONLY_FRAME); + return is_inter; +} + +PIC_BUFFER_CONFIG *av1_get_ref_frame_spec_buf( + const AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) { + RefCntBuffer *buf = get_ref_frame_buf(cm, ref_frame); + if (buf) { + buf->buf.order_hint = buf->order_hint; + return &(buf->buf); + } + return NULL; +} + +struct scale_factors *av1_get_ref_scale_factors( + AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) +{ + return get_ref_scale_factors(cm, ref_frame); +} + +void av1_set_next_ref_frame_map(AV1Decoder *pbi) { + int ref_index = 0; + int mask; + AV1_COMMON *const cm = pbi->common; + int check_on_show_existing_frame; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%s, %d, mask 0x%x, show_existing_frame %d, reset_decoder_state %d\n", + __func__, pbi->camera_frame_header_ready, + cm->current_frame.refresh_frame_flags, + cm->show_existing_frame, + pbi->reset_decoder_state + ); + if (!pbi->camera_frame_header_ready) { + for (mask = cm->current_frame.refresh_frame_flags; mask; mask >>= 1) { + cm->next_used_ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; + ++ref_index; + } + + check_on_show_existing_frame = + !cm->show_existing_frame || pbi->reset_decoder_state; + for (; ref_index < REF_FRAMES && check_on_show_existing_frame; + ++ref_index) { + cm->next_used_ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; + } + } +} + +unsigned int av1_get_next_used_ref_info( + const AV1_COMMON *const cm, int i) { + /* + i = 0~1 orde_hint map + i = 2~10 size map[i-2] + */ + unsigned int info = 0; + int j; + if (i < 2) { + /*next_used_ref_frame_map has 8 items*/ + for (j = 0; j < 4; j++) { + RefCntBuffer *buf = + cm->next_used_ref_frame_map[(i * 4) + j]; + if (buf) + info |= ((buf->buf.order_hint & 0xff) + << (j * 8)); + } + } else if (i < 10) { + RefCntBuffer *buf = + cm->next_used_ref_frame_map[i-2]; + if (buf) + info = (buf->buf.y_crop_width << 16) | (buf->buf.y_crop_height & 0xffff); + } else { + for (j = 0; j < 4; j++) { + RefCntBuffer *buf = + cm->next_used_ref_frame_map[((i - 10) * 4) + j]; + if (buf) + info |= ((buf->buf.index & 0xff) + << (j * 8)); + } + } + return info; +} + +RefCntBuffer *av1_get_primary_ref_frame_buf( + const AV1_COMMON *const cm) +{ + return get_primary_ref_frame_buf(cm); +}
diff --git a/drivers/frame_provider/decoder/vav1/av1_global.h b/drivers/frame_provider/decoder/vav1/av1_global.h new file mode 100644 index 0000000..e12b224 --- /dev/null +++ b/drivers/frame_provider/decoder/vav1/av1_global.h
@@ -0,0 +1,2336 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef AV1_GLOBAL_H_ +#define AV1_GLOBAL_H_ +#define AOM_AV1_MMU_DW +#ifndef HAVE_NEON +#define HAVE_NEON 0 +#endif +#ifndef CONFIG_ACCOUNTING +#define CONFIG_ACCOUNTING 0 +#endif +#ifndef CONFIG_INSPECTION +#define CONFIG_INSPECTION 0 +#endif +#ifndef CONFIG_LPF_MASK +#define CONFIG_LPF_MASK 0 +#endif +#ifndef CONFIG_SIZE_LIMIT +#define CONFIG_SIZE_LIMIT 0 +#endif + +#define SUPPORT_SCALE_FACTOR +#define USE_SCALED_WIDTH_FROM_UCODE +#define AML +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +#define AML_DEVICE +#endif +#ifdef BUFMGR_FOR_SIM +#define printf io_printf +#endif + +#ifndef INT_MAX +#define INT_MAX 0x7FFFFFFF +#endif +#define AOMMIN(x, y) (((x) < (y)) ? (x) : (y)) +#define AOMMAX(x, y) (((x) > (y)) ? (x) : (y)) + + +#ifdef AML +#define AOM_AV1_MMU +#define FILM_GRAIN_REG_SIZE 39 +typedef struct buff_s +{ + uint32_t buf_start; + uint32_t buf_size; + uint32_t buf_end; +} buff_t; + +typedef struct BuffInfo_s +{ + uint32_t max_width; + uint32_t max_height; + uint32_t start_adr; + uint32_t end_adr; + buff_t ipp; + buff_t sao_abv; + buff_t sao_vb; + buff_t short_term_rps; + buff_t vps; + buff_t seg_map; + buff_t daala_top; + buff_t sao_up; + buff_t swap_buf; + buff_t cdf_buf; + buff_t gmc_buf; + buff_t scalelut; + buff_t dblk_para; + buff_t dblk_data; + buff_t cdef_data; + buff_t ups_data; +#ifdef AOM_AV1_MMU + buff_t mmu_vbh; + buff_t cm_header; +#endif +#ifdef AOM_AV1_MMU_DW + buff_t mmu_vbh_dw; + buff_t cm_header_dw; +#endif + buff_t fgs_table; + buff_t mpred_above; + buff_t mpred_mv; + buff_t rpm; + buff_t lmem; +} BuffInfo_t; +#endif + +#if 0 +#define va_start(v,l) __builtin_va_start(v,l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v,l) __builtin_va_arg(v,l) +#endif + +/* +mem.h +*/ +#if (defined(__GNUC__) && __GNUC__) || defined(__SUNPRO_C) +#define DECLARE_ALIGNED(n, typ, val) typ val __attribute__((aligned(n))) +#elif defined(_MSC_VER) +#define DECLARE_ALIGNED(n, typ, val) __declspec(align(n)) typ val +#else +#warning No alignment directives known for this compiler. +#define DECLARE_ALIGNED(n, typ, val) typ val +#endif + +/* Indicates that the usage of the specified variable has been audited to assure + * that it's safe to use uninitialized. Silences 'may be used uninitialized' + * warnings on gcc. + */ +#if defined(__GNUC__) && __GNUC__ +#define UNINITIALIZED_IS_SAFE(x) x = x +#else +#define UNINITIALIZED_IS_SAFE(x) x +#endif + +#if HAVE_NEON && defined(_MSC_VER) +#define __builtin_prefetch(x) +#endif + +/* Shift down with rounding for use when n >= 0, value >= 0 */ +#define ROUND_POWER_OF_TWO(value, n) (((value) + (((1 << (n)) >> 1))) >> (n)) + +/* Shift down with rounding for signed integers, for use when n >= 0 */ +#define ROUND_POWER_OF_TWO_SIGNED(value, n) \ + (((value) < 0) ? -ROUND_POWER_OF_TWO(-(value), (n)) \ + : ROUND_POWER_OF_TWO((value), (n))) + +/* Shift down with rounding for use when n >= 0, value >= 0 for (64 bit) */ +#define ROUND_POWER_OF_TWO_64(value, n) \ + (((value) + ((((int64_t)1 << (n)) >> 1))) >> (n)) +/* Shift down with rounding for signed integers, for use when n >= 0 (64 bit) */ +#define ROUND_POWER_OF_TWO_SIGNED_64(value, n) \ + (((value) < 0) ? -ROUND_POWER_OF_TWO_64(-(value), (n)) \ + : ROUND_POWER_OF_TWO_64((value), (n))) + +/* shift right or left depending on sign of n */ +#define RIGHT_SIGNED_SHIFT(value, n) \ + ((n) < 0 ? ((value) << (-(n))) : ((value) >> (n))) + +#define ALIGN_POWER_OF_TWO(value, n) \ + (((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1)) + +#define DIVIDE_AND_ROUND(x, y) (((x) + ((y) >> 1)) / (y)) + +#define CONVERT_TO_SHORTPTR(x) ((uint16_t *)(((uintptr_t)(x)) << 1)) +#define CONVERT_TO_BYTEPTR(x) ((uint8_t *)(((uintptr_t)(x)) >> 1)) + +#ifdef AML +#define TYPEDEF typedef +#define UENUM1BYTE(enumvar) enumvar +#define SENUM1BYTE(enumvar) enumvar +#define UENUM2BYTE(enumvar) enumvar +#define SENUM2BYTE(enumvar) enumvar +#define UENUM4BYTE(enumvar) enumvar +#define SENUM4BYTE(enumvar) enumvar + +#else +#define TYPEDEF +/*!\brief force enum to be unsigned 1 byte*/ +#define UENUM1BYTE(enumvar) \ + ; \ + typedef uint8_t enumvar + +/*!\brief force enum to be signed 1 byte*/ +#define SENUM1BYTE(enumvar) \ + ; \ + typedef int8_t enumvar + +/*!\brief force enum to be unsigned 2 byte*/ +#define UENUM2BYTE(enumvar) \ + ; \ + typedef uint16_t enumvar + +/*!\brief force enum to be signed 2 byte*/ +#define SENUM2BYTE(enumvar) \ + ; \ + typedef int16_t enumvar + +/*!\brief force enum to be unsigned 4 byte*/ +#define UENUM4BYTE(enumvar) \ + ; \ + typedef uint32_t enumvar + +/*!\brief force enum to be unsigned 4 byte*/ +#define SENUM4BYTE(enumvar) \ + ; \ + typedef int32_t enumvar +#endif + + +/* +#include "enums.h" +*/ +#undef MAX_SB_SIZE + +// Max superblock size +#define MAX_SB_SIZE_LOG2 7 +#define MAX_SB_SIZE (1 << MAX_SB_SIZE_LOG2) +#define MAX_SB_SQUARE (MAX_SB_SIZE * MAX_SB_SIZE) + +// Min superblock size +#define MIN_SB_SIZE_LOG2 6 + +// Pixels per Mode Info (MI) unit +#define MI_SIZE_LOG2 2 +#define MI_SIZE (1 << MI_SIZE_LOG2) + +// MI-units per max superblock (MI Block - MIB) +#define MAX_MIB_SIZE_LOG2 (MAX_SB_SIZE_LOG2 - MI_SIZE_LOG2) +#define MAX_MIB_SIZE (1 << MAX_MIB_SIZE_LOG2) + +// MI-units per min superblock +#define MIN_MIB_SIZE_LOG2 (MIN_SB_SIZE_LOG2 - MI_SIZE_LOG2) + +// Mask to extract MI offset within max MIB +#define MAX_MIB_MASK (MAX_MIB_SIZE - 1) + +// Maximum number of tile rows and tile columns +#define MAX_TILE_ROWS 64 +#define MAX_TILE_COLS 64 + +#define MAX_VARTX_DEPTH 2 + +#define MI_SIZE_64X64 (64 >> MI_SIZE_LOG2) +#define MI_SIZE_128X128 (128 >> MI_SIZE_LOG2) + +#define MAX_PALETTE_SQUARE (64 * 64) +// Maximum number of colors in a palette. +#define PALETTE_MAX_SIZE 8 +// Minimum number of colors in a palette. +#define PALETTE_MIN_SIZE 2 + +#define FRAME_OFFSET_BITS 5 +#define MAX_FRAME_DISTANCE ((1 << FRAME_OFFSET_BITS) - 1) + +// 4 frame filter levels: y plane vertical, y plane horizontal, +// u plane, and v plane +#define FRAME_LF_COUNT 4 +#define DEFAULT_DELTA_LF_MULTI 0 +#define MAX_MODE_LF_DELTAS 2 + +#define DIST_PRECISION_BITS 4 +#define DIST_PRECISION (1 << DIST_PRECISION_BITS) // 16 + +#define PROFILE_BITS 3 +// The following three profiles are currently defined. +// Profile 0. 8-bit and 10-bit 4:2:0 and 4:0:0 only. +// Profile 1. 8-bit and 10-bit 4:4:4 +// Profile 2. 8-bit and 10-bit 4:2:2 +// 12-bit 4:0:0, 4:2:2 and 4:4:4 +// Since we have three bits for the profiles, it can be extended later. +TYPEDEF enum { + PROFILE_0, + PROFILE_1, + PROFILE_2, + MAX_PROFILES, +} SENUM1BYTE(BITSTREAM_PROFILE); + +#define OP_POINTS_CNT_MINUS_1_BITS 5 +#define OP_POINTS_IDC_BITS 12 + +// Note: Some enums use the attribute 'packed' to use smallest possible integer +// type, so that we can save memory when they are used in structs/arrays. + +typedef enum ATTRIBUTE_PACKED { + BLOCK_4X4, + BLOCK_4X8, + BLOCK_8X4, + BLOCK_8X8, + BLOCK_8X16, + BLOCK_16X8, + BLOCK_16X16, + BLOCK_16X32, + BLOCK_32X16, + BLOCK_32X32, + BLOCK_32X64, + BLOCK_64X32, + BLOCK_64X64, + BLOCK_64X128, + BLOCK_128X64, + BLOCK_128X128, + BLOCK_4X16, + BLOCK_16X4, + BLOCK_8X32, + BLOCK_32X8, + BLOCK_16X64, + BLOCK_64X16, + BLOCK_SIZES_ALL, + BLOCK_SIZES = BLOCK_4X16, + BLOCK_INVALID = 255, + BLOCK_LARGEST = (BLOCK_SIZES - 1) +} BLOCK_SIZE2; + +// 4X4, 8X8, 16X16, 32X32, 64X64, 128X128 +#define SQR_BLOCK_SIZES 6 + +TYPEDEF enum { + PARTITION_NONE, + PARTITION_HORZ, + PARTITION_VERT, + PARTITION_SPLIT, + PARTITION_HORZ_A, // HORZ split and the top partition is split again + PARTITION_HORZ_B, // HORZ split and the bottom partition is split again + PARTITION_VERT_A, // VERT split and the left partition is split again + PARTITION_VERT_B, // VERT split and the right partition is split again + PARTITION_HORZ_4, // 4:1 horizontal partition + PARTITION_VERT_4, // 4:1 vertical partition + EXT_PARTITION_TYPES, + PARTITION_TYPES = PARTITION_SPLIT + 1, + PARTITION_INVALID = 255 +} UENUM1BYTE(PARTITION_TYPE); + +typedef char PARTITION_CONTEXT; +#define PARTITION_PLOFFSET 4 // number of probability models per block size +#define PARTITION_BLOCK_SIZES 5 +#define PARTITION_CONTEXTS (PARTITION_BLOCK_SIZES * PARTITION_PLOFFSET) + +// block transform size +TYPEDEF enum { + TX_4X4, // 4x4 transform + TX_8X8, // 8x8 transform + TX_16X16, // 16x16 transform + TX_32X32, // 32x32 transform + TX_64X64, // 64x64 transform + TX_4X8, // 4x8 transform + TX_8X4, // 8x4 transform + TX_8X16, // 8x16 transform + TX_16X8, // 16x8 transform + TX_16X32, // 16x32 transform + TX_32X16, // 32x16 transform + TX_32X64, // 32x64 transform + TX_64X32, // 64x32 transform + TX_4X16, // 4x16 transform + TX_16X4, // 16x4 transform + TX_8X32, // 8x32 transform + TX_32X8, // 32x8 transform + TX_16X64, // 16x64 transform + TX_64X16, // 64x16 transform + TX_SIZES_ALL, // Includes rectangular transforms + TX_SIZES = TX_4X8, // Does NOT include rectangular transforms + TX_SIZES_LARGEST = TX_64X64, + TX_INVALID = 255 // Invalid transform size +} UENUM1BYTE(TX_SIZE); + +#define TX_SIZE_LUMA_MIN (TX_4X4) +/* We don't need to code a transform size unless the allowed size is at least + one more than the minimum. */ +#define TX_SIZE_CTX_MIN (TX_SIZE_LUMA_MIN + 1) + +// Maximum tx_size categories +#define MAX_TX_CATS (TX_SIZES - TX_SIZE_CTX_MIN) +#define MAX_TX_DEPTH 2 + +#define MAX_TX_SIZE_LOG2 (6) +#define MAX_TX_SIZE (1 << MAX_TX_SIZE_LOG2) +#define MIN_TX_SIZE_LOG2 2 +#define MIN_TX_SIZE (1 << MIN_TX_SIZE_LOG2) +#define MAX_TX_SQUARE (MAX_TX_SIZE * MAX_TX_SIZE) + +// Pad 4 extra columns to remove horizontal availability check. +#define TX_PAD_HOR_LOG2 2 +#define TX_PAD_HOR 4 +// Pad 6 extra rows (2 on top and 4 on bottom) to remove vertical availability +// check. +#define TX_PAD_TOP 0 +#define TX_PAD_BOTTOM 4 +#define TX_PAD_VER (TX_PAD_TOP + TX_PAD_BOTTOM) +// Pad 16 extra bytes to avoid reading overflow in SIMD optimization. +#define TX_PAD_END 16 +#define TX_PAD_2D ((32 + TX_PAD_HOR) * (32 + TX_PAD_VER) + TX_PAD_END) + +// Number of maxium size transform blocks in the maximum size superblock +#define MAX_TX_BLOCKS_IN_MAX_SB_LOG2 ((MAX_SB_SIZE_LOG2 - MAX_TX_SIZE_LOG2) * 2) +#define MAX_TX_BLOCKS_IN_MAX_SB (1 << MAX_TX_BLOCKS_IN_MAX_SB_LOG2) + +// frame transform mode +TYPEDEF enum { + ONLY_4X4, // use only 4x4 transform + TX_MODE_LARGEST, // transform size is the largest possible for pu size + TX_MODE_SELECT, // transform specified for each block + TX_MODES, +} UENUM1BYTE(TX_MODE); + +// 1D tx types +TYPEDEF enum { + DCT_1D, + ADST_1D, + FLIPADST_1D, + IDTX_1D, + TX_TYPES_1D, +} UENUM1BYTE(TX_TYPE_1D); + +TYPEDEF enum { + DCT_DCT, // DCT in both horizontal and vertical + ADST_DCT, // ADST in vertical, DCT in horizontal + DCT_ADST, // DCT in vertical, ADST in horizontal + ADST_ADST, // ADST in both directions + FLIPADST_DCT, // FLIPADST in vertical, DCT in horizontal + DCT_FLIPADST, // DCT in vertical, FLIPADST in horizontal + FLIPADST_FLIPADST, // FLIPADST in both directions + ADST_FLIPADST, // ADST in vertical, FLIPADST in horizontal + FLIPADST_ADST, // FLIPADST in vertical, ADST in horizontal + IDTX, // Identity in both directions + V_DCT, // DCT in vertical, identity in horizontal + H_DCT, // Identity in vertical, DCT in horizontal + V_ADST, // ADST in vertical, identity in horizontal + H_ADST, // Identity in vertical, ADST in horizontal + V_FLIPADST, // FLIPADST in vertical, identity in horizontal + H_FLIPADST, // Identity in vertical, FLIPADST in horizontal + TX_TYPES, +} UENUM1BYTE(TX_TYPE); + +TYPEDEF enum { + REG_REG, + REG_SMOOTH, + REG_SHARP, + SMOOTH_REG, + SMOOTH_SMOOTH, + SMOOTH_SHARP, + SHARP_REG, + SHARP_SMOOTH, + SHARP_SHARP, +} UENUM1BYTE(DUAL_FILTER_TYPE); + +TYPEDEF enum { + // DCT only + EXT_TX_SET_DCTONLY, + // DCT + Identity only + EXT_TX_SET_DCT_IDTX, + // Discrete Trig transforms w/o flip (4) + Identity (1) + EXT_TX_SET_DTT4_IDTX, + // Discrete Trig transforms w/o flip (4) + Identity (1) + 1D Hor/vert DCT (2) + EXT_TX_SET_DTT4_IDTX_1DDCT, + // Discrete Trig transforms w/ flip (9) + Identity (1) + 1D Hor/Ver DCT (2) + EXT_TX_SET_DTT9_IDTX_1DDCT, + // Discrete Trig transforms w/ flip (9) + Identity (1) + 1D Hor/Ver (6) + EXT_TX_SET_ALL16, + EXT_TX_SET_TYPES +} UENUM1BYTE(TxSetType); + +#define IS_2D_TRANSFORM(tx_type) (tx_type < IDTX) + +#define EXT_TX_SIZES 4 // number of sizes that use extended transforms +#define EXT_TX_SETS_INTER 4 // Sets of transform selections for INTER +#define EXT_TX_SETS_INTRA 3 // Sets of transform selections for INTRA + +TYPEDEF enum { + AOM_LAST_FLAG = 1 << 0, + AOM_LAST2_FLAG = 1 << 1, + AOM_LAST3_FLAG = 1 << 2, + AOM_GOLD_FLAG = 1 << 3, + AOM_BWD_FLAG = 1 << 4, + AOM_ALT2_FLAG = 1 << 5, + AOM_ALT_FLAG = 1 << 6, + AOM_REFFRAME_ALL = (1 << 7) - 1 +} UENUM1BYTE(AOM_REFFRAME); + +TYPEDEF enum { + UNIDIR_COMP_REFERENCE, + BIDIR_COMP_REFERENCE, + COMP_REFERENCE_TYPES, +} UENUM1BYTE(COMP_REFERENCE_TYPE); + +/*enum { PLANE_TYPE_Y, PLANE_TYPE_UV, PLANE_TYPES } UENUM1BYTE(PLANE_TYPE);*/ + +#define CFL_ALPHABET_SIZE_LOG2 4 +#define CFL_ALPHABET_SIZE (1 << CFL_ALPHABET_SIZE_LOG2) +#define CFL_MAGS_SIZE ((2 << CFL_ALPHABET_SIZE_LOG2) + 1) +#define CFL_IDX_U(idx) (idx >> CFL_ALPHABET_SIZE_LOG2) +#define CFL_IDX_V(idx) (idx & (CFL_ALPHABET_SIZE - 1)) + +/*enum { CFL_PRED_U, CFL_PRED_V, CFL_PRED_PLANES } UENUM1BYTE(CFL_PRED_TYPE);*/ + +TYPEDEF enum { + CFL_SIGN_ZERO, + CFL_SIGN_NEG, + CFL_SIGN_POS, + CFL_SIGNS +} UENUM1BYTE(CFL_SIGN_TYPE); + +TYPEDEF enum { + CFL_DISALLOWED, + CFL_ALLOWED, + CFL_ALLOWED_TYPES +} UENUM1BYTE(CFL_ALLOWED_TYPE); + +// CFL_SIGN_ZERO,CFL_SIGN_ZERO is invalid +#define CFL_JOINT_SIGNS (CFL_SIGNS * CFL_SIGNS - 1) +// CFL_SIGN_U is equivalent to (js + 1) / 3 for js in 0 to 8 +#define CFL_SIGN_U(js) (((js + 1) * 11) >> 5) +// CFL_SIGN_V is equivalent to (js + 1) % 3 for js in 0 to 8 +#define CFL_SIGN_V(js) ((js + 1) - CFL_SIGNS * CFL_SIGN_U(js)) + +// There is no context when the alpha for a given plane is zero. +// So there are 2 fewer contexts than joint signs. +#define CFL_ALPHA_CONTEXTS (CFL_JOINT_SIGNS + 1 - CFL_SIGNS) +#define CFL_CONTEXT_U(js) (js + 1 - CFL_SIGNS) +// Also, the contexts are symmetric under swapping the planes. +#define CFL_CONTEXT_V(js) \ + (CFL_SIGN_V(js) * CFL_SIGNS + CFL_SIGN_U(js) - CFL_SIGNS) + +TYPEDEF enum { + PALETTE_MAP, + COLOR_MAP_TYPES, +} UENUM1BYTE(COLOR_MAP_TYPE); + +TYPEDEF enum { + TWO_COLORS, + THREE_COLORS, + FOUR_COLORS, + FIVE_COLORS, + SIX_COLORS, + SEVEN_COLORS, + EIGHT_COLORS, + PALETTE_SIZES +} UENUM1BYTE(PALETTE_SIZE); + +TYPEDEF enum { + PALETTE_COLOR_ONE, + PALETTE_COLOR_TWO, + PALETTE_COLOR_THREE, + PALETTE_COLOR_FOUR, + PALETTE_COLOR_FIVE, + PALETTE_COLOR_SIX, + PALETTE_COLOR_SEVEN, + PALETTE_COLOR_EIGHT, + PALETTE_COLORS +} UENUM1BYTE(PALETTE_COLOR); + +// Note: All directional predictors must be between V_PRED and D67_PRED (both +// inclusive). +TYPEDEF enum { + DC_PRED, // Average of above and left pixels + V_PRED, // Vertical + H_PRED, // Horizontal + D45_PRED, // Directional 45 degree + D135_PRED, // Directional 135 degree + D113_PRED, // Directional 113 degree + D157_PRED, // Directional 157 degree + D203_PRED, // Directional 203 degree + D67_PRED, // Directional 67 degree + SMOOTH_PRED, // Combination of horizontal and vertical interpolation + SMOOTH_V_PRED, // Vertical interpolation + SMOOTH_H_PRED, // Horizontal interpolation + PAETH_PRED, // Predict from the direction of smallest gradient + NEARESTMV, + NEARMV, + GLOBALMV, + NEWMV, + // Compound ref compound modes + NEAREST_NEARESTMV, + NEAR_NEARMV, + NEAREST_NEWMV, + NEW_NEARESTMV, + NEAR_NEWMV, + NEW_NEARMV, + GLOBAL_GLOBALMV, + NEW_NEWMV, + MB_MODE_COUNT, + INTRA_MODE_START = DC_PRED, + INTRA_MODE_END = NEARESTMV, + INTRA_MODE_NUM = INTRA_MODE_END - INTRA_MODE_START, + SINGLE_INTER_MODE_START = NEARESTMV, + SINGLE_INTER_MODE_END = NEAREST_NEARESTMV, + SINGLE_INTER_MODE_NUM = SINGLE_INTER_MODE_END - SINGLE_INTER_MODE_START, + COMP_INTER_MODE_START = NEAREST_NEARESTMV, + COMP_INTER_MODE_END = MB_MODE_COUNT, + COMP_INTER_MODE_NUM = COMP_INTER_MODE_END - COMP_INTER_MODE_START, + INTER_MODE_START = NEARESTMV, + INTER_MODE_END = MB_MODE_COUNT, + INTRA_MODES = PAETH_PRED + 1, // PAETH_PRED has to be the last intra mode. + INTRA_INVALID = MB_MODE_COUNT // For uv_mode in inter blocks +} UENUM1BYTE(PREDICTION_MODE); + +// TODO(ltrudeau) Do we really want to pack this? +// TODO(ltrudeau) Do we match with PREDICTION_MODE? +TYPEDEF enum { + UV_DC_PRED, // Average of above and left pixels + UV_V_PRED, // Vertical + UV_H_PRED, // Horizontal + UV_D45_PRED, // Directional 45 degree + UV_D135_PRED, // Directional 135 degree + UV_D113_PRED, // Directional 113 degree + UV_D157_PRED, // Directional 157 degree + UV_D203_PRED, // Directional 203 degree + UV_D67_PRED, // Directional 67 degree + UV_SMOOTH_PRED, // Combination of horizontal and vertical interpolation + UV_SMOOTH_V_PRED, // Vertical interpolation + UV_SMOOTH_H_PRED, // Horizontal interpolation + UV_PAETH_PRED, // Predict from the direction of smallest gradient + UV_CFL_PRED, // Chroma-from-Luma + UV_INTRA_MODES, + UV_MODE_INVALID, // For uv_mode in inter blocks +} UENUM1BYTE(UV_PREDICTION_MODE); + +TYPEDEF enum { + SIMPLE_TRANSLATION, + OBMC_CAUSAL, // 2-sided OBMC + WARPED_CAUSAL, // 2-sided WARPED + MOTION_MODES +} UENUM1BYTE(MOTION_MODE); + +TYPEDEF enum { + II_DC_PRED, + II_V_PRED, + II_H_PRED, + II_SMOOTH_PRED, + INTERINTRA_MODES +} UENUM1BYTE(INTERINTRA_MODE); + +TYPEDEF enum { + COMPOUND_AVERAGE, + COMPOUND_DISTWTD, + COMPOUND_WEDGE, + COMPOUND_DIFFWTD, + COMPOUND_TYPES, + MASKED_COMPOUND_TYPES = 2, +} UENUM1BYTE(COMPOUND_TYPE); + +TYPEDEF enum { + FILTER_DC_PRED, + FILTER_V_PRED, + FILTER_H_PRED, + FILTER_D157_PRED, + FILTER_PAETH_PRED, + FILTER_INTRA_MODES, +} UENUM1BYTE(FILTER_INTRA_MODE); + +TYPEDEF enum { + SEQ_LEVEL_2_0, + SEQ_LEVEL_2_1, + SEQ_LEVEL_2_2, + SEQ_LEVEL_2_3, + SEQ_LEVEL_3_0, + SEQ_LEVEL_3_1, + SEQ_LEVEL_3_2, + SEQ_LEVEL_3_3, + SEQ_LEVEL_4_0, + SEQ_LEVEL_4_1, + SEQ_LEVEL_4_2, + SEQ_LEVEL_4_3, + SEQ_LEVEL_5_0, + SEQ_LEVEL_5_1, + SEQ_LEVEL_5_2, + SEQ_LEVEL_5_3, + SEQ_LEVEL_6_0, + SEQ_LEVEL_6_1, + SEQ_LEVEL_6_2, + SEQ_LEVEL_6_3, + SEQ_LEVEL_7_0, + SEQ_LEVEL_7_1, + SEQ_LEVEL_7_2, + SEQ_LEVEL_7_3, + SEQ_LEVELS, + SEQ_LEVEL_MAX = 31 +} UENUM1BYTE(AV1_LEVEL); + +#define LEVEL_BITS 5 + +#define DIRECTIONAL_MODES 8 +#define MAX_ANGLE_DELTA 3 +#define ANGLE_STEP 3 + +#define INTER_MODES (1 + NEWMV - NEARESTMV) + +#define INTER_COMPOUND_MODES (1 + NEW_NEWMV - NEAREST_NEARESTMV) + +#define SKIP_CONTEXTS 3 +#define SKIP_MODE_CONTEXTS 3 + +#define COMP_INDEX_CONTEXTS 6 +#define COMP_GROUP_IDX_CONTEXTS 6 + +#define NMV_CONTEXTS 3 + +#define NEWMV_MODE_CONTEXTS 6 +#define GLOBALMV_MODE_CONTEXTS 2 +#define REFMV_MODE_CONTEXTS 6 +#define DRL_MODE_CONTEXTS 3 + +#define GLOBALMV_OFFSET 3 +#define REFMV_OFFSET 4 + +#define NEWMV_CTX_MASK ((1 << GLOBALMV_OFFSET) - 1) +#define GLOBALMV_CTX_MASK ((1 << (REFMV_OFFSET - GLOBALMV_OFFSET)) - 1) +#define REFMV_CTX_MASK ((1 << (8 - REFMV_OFFSET)) - 1) + +#define COMP_NEWMV_CTXS 5 +#define INTER_MODE_CONTEXTS 8 + +#define DELTA_Q_SMALL 3 +#define DELTA_Q_PROBS (DELTA_Q_SMALL) +#define DEFAULT_DELTA_Q_RES_PERCEPTUAL 4 +#define DEFAULT_DELTA_Q_RES_OBJECTIVE 4 + +#define DELTA_LF_SMALL 3 +#define DELTA_LF_PROBS (DELTA_LF_SMALL) +#define DEFAULT_DELTA_LF_RES 2 + +/* Segment Feature Masks */ +#define MAX_MV_REF_CANDIDATES 2 + +#define MAX_REF_MV_STACK_SIZE 8 +#define REF_CAT_LEVEL 640 + +#define INTRA_INTER_CONTEXTS 4 +#define COMP_INTER_CONTEXTS 5 +#define REF_CONTEXTS 3 + +#define COMP_REF_TYPE_CONTEXTS 5 +#define UNI_COMP_REF_CONTEXTS 3 + +#define TXFM_PARTITION_CONTEXTS ((TX_SIZES - TX_8X8) * 6 - 3) +#ifdef ORI_CODE +typedef uint8_t TXFM_CONTEXT; +#endif +// An enum for single reference types (and some derived values). +enum { + NONE_FRAME = -1, + INTRA_FRAME, + LAST_FRAME, + LAST2_FRAME, + LAST3_FRAME, + GOLDEN_FRAME, + BWDREF_FRAME, + ALTREF2_FRAME, + ALTREF_FRAME, + REF_FRAMES, + + // Extra/scratch reference frame. It may be: + // - used to update the ALTREF2_FRAME ref (see lshift_bwd_ref_frames()), or + // - updated from ALTREF2_FRAME ref (see rshift_bwd_ref_frames()). + EXTREF_FRAME = REF_FRAMES, + + // Number of inter (non-intra) reference types. + INTER_REFS_PER_FRAME = ALTREF_FRAME - LAST_FRAME + 1, + + // Number of forward (aka past) reference types. + FWD_REFS = GOLDEN_FRAME - LAST_FRAME + 1, + + // Number of backward (aka future) reference types. + BWD_REFS = ALTREF_FRAME - BWDREF_FRAME + 1, + + SINGLE_REFS = FWD_REFS + BWD_REFS, +}; + +#define REF_FRAMES_LOG2 3 + +// REF_FRAMES for the cm->ref_frame_map array, 1 scratch frame for the new +// frame in cm->cur_frame, INTER_REFS_PER_FRAME for scaled references on the +// encoder in the cpi->scaled_ref_buf array. +#define FRAME_BUFFERS (REF_FRAMES + 1 + INTER_REFS_PER_FRAME) + +#define FWD_RF_OFFSET(ref) (ref - LAST_FRAME) +#define BWD_RF_OFFSET(ref) (ref - BWDREF_FRAME) + +TYPEDEF enum { + LAST_LAST2_FRAMES, // { LAST_FRAME, LAST2_FRAME } + LAST_LAST3_FRAMES, // { LAST_FRAME, LAST3_FRAME } + LAST_GOLDEN_FRAMES, // { LAST_FRAME, GOLDEN_FRAME } + BWDREF_ALTREF_FRAMES, // { BWDREF_FRAME, ALTREF_FRAME } + LAST2_LAST3_FRAMES, // { LAST2_FRAME, LAST3_FRAME } + LAST2_GOLDEN_FRAMES, // { LAST2_FRAME, GOLDEN_FRAME } + LAST3_GOLDEN_FRAMES, // { LAST3_FRAME, GOLDEN_FRAME } + BWDREF_ALTREF2_FRAMES, // { BWDREF_FRAME, ALTREF2_FRAME } + ALTREF2_ALTREF_FRAMES, // { ALTREF2_FRAME, ALTREF_FRAME } + TOTAL_UNIDIR_COMP_REFS, + // NOTE: UNIDIR_COMP_REFS is the number of uni-directional reference pairs + // that are explicitly signaled. + UNIDIR_COMP_REFS = BWDREF_ALTREF_FRAMES + 1, +} UENUM1BYTE(UNIDIR_COMP_REF); + +#define TOTAL_COMP_REFS (FWD_REFS * BWD_REFS + TOTAL_UNIDIR_COMP_REFS) + +#define COMP_REFS (FWD_REFS * BWD_REFS + UNIDIR_COMP_REFS) + +// NOTE: A limited number of unidirectional reference pairs can be signalled for +// compound prediction. The use of skip mode, on the other hand, makes it +// possible to have a reference pair not listed for explicit signaling. +#define MODE_CTX_REF_FRAMES (REF_FRAMES + TOTAL_COMP_REFS) + +// Note: It includes single and compound references. So, it can take values from +// NONE_FRAME to (MODE_CTX_REF_FRAMES - 1). Hence, it is not defined as an enum. +typedef int8_t MV_REFERENCE_FRAME; + +TYPEDEF enum { + RESTORE_NONE, + RESTORE_WIENER, + RESTORE_SGRPROJ, + RESTORE_SWITCHABLE, + RESTORE_SWITCHABLE_TYPES = RESTORE_SWITCHABLE, + RESTORE_TYPES = 4, +} UENUM1BYTE(RestorationType); + +// Picture prediction structures (0-12 are predefined) in scalability metadata. +TYPEDEF enum { + SCALABILITY_L1T2 = 0, + SCALABILITY_L1T3 = 1, + SCALABILITY_L2T1 = 2, + SCALABILITY_L2T2 = 3, + SCALABILITY_L2T3 = 4, + SCALABILITY_S2T1 = 5, + SCALABILITY_S2T2 = 6, + SCALABILITY_S2T3 = 7, + SCALABILITY_L2T1h = 8, + SCALABILITY_L2T2h = 9, + SCALABILITY_L2T3h = 10, + SCALABILITY_S2T1h = 11, + SCALABILITY_S2T2h = 12, + SCALABILITY_S2T3h = 13, + SCALABILITY_SS = 14 +} UENUM1BYTE(SCALABILITY_STRUCTURES); + +#define SUPERRES_SCALE_BITS 3 +#define SUPERRES_SCALE_DENOMINATOR_MIN (SCALE_NUMERATOR + 1) + +// In large_scale_tile coding, external references are used. +#define MAX_EXTERNAL_REFERENCES 128 +#define MAX_TILES 512 + + +#define CONFIG_MULTITHREAD 0 +#define CONFIG_ENTROPY_STATS 0 + +#define CONFIG_MAX_DECODE_PROFILE 2 + +/* +from: +seg_common.h +*/ +#ifdef ORI_CODE + +#define MAX_SEGMENTS 8 +#define SEG_TREE_PROBS (MAX_SEGMENTS - 1) + +#define SEG_TEMPORAL_PRED_CTXS 3 +#define SPATIAL_PREDICTION_PROBS 3 + +enum { + SEG_LVL_ALT_Q, // Use alternate Quantizer .... + SEG_LVL_ALT_LF_Y_V, // Use alternate loop filter value on y plane vertical + SEG_LVL_ALT_LF_Y_H, // Use alternate loop filter value on y plane horizontal + SEG_LVL_ALT_LF_U, // Use alternate loop filter value on u plane + SEG_LVL_ALT_LF_V, // Use alternate loop filter value on v plane + SEG_LVL_REF_FRAME, // Optional Segment reference frame + SEG_LVL_SKIP, // Optional Segment (0,0) + skip mode + SEG_LVL_GLOBALMV, + SEG_LVL_MAX +} UENUM1BYTE(SEG_LVL_FEATURES); + +struct segmentation { + uint8_t enabled; + uint8_t update_map; + uint8_t update_data; + uint8_t temporal_update; + + int16_t feature_data[MAX_SEGMENTS][SEG_LVL_MAX]; + unsigned int feature_mask[MAX_SEGMENTS]; + int last_active_segid; // The highest numbered segment id that has some + // enabled feature. + uint8_t segid_preskip; // Whether the segment id will be read before the + // skip syntax element. + // 1: the segment id will be read first. + // 0: the skip syntax element will be read first. +}; + +/* +from av1_loopfilter.h +*/ +#define MAX_LOOP_FILTER 63 + + +/* from +quant_common.h: +*/ +#define MAXQ 255 + +#endif + +/* +from: +aom/av1/common/common.h +*/ +#define av1_zero(dest) memset(&(dest), 0, sizeof(dest)) +#define av1_zero_array(dest, n) memset(dest, 0, n * sizeof(*(dest))) +/* +from: +aom/av1/common/alloccommon.h +*/ +#define INVALID_IDX -1 // Invalid buffer index. + +/* +from: +aom/av1/common/timing.h +*/ +typedef struct aom_timing { + uint32_t num_units_in_display_tick; + uint32_t time_scale; + int equal_picture_interval; + uint32_t num_ticks_per_picture; +} aom_timing_info_t; + +typedef struct aom_dec_model_info { + uint32_t num_units_in_decoding_tick; + int encoder_decoder_buffer_delay_length; + int buffer_removal_time_length; + int frame_presentation_time_length; +} aom_dec_model_info_t; + +typedef struct aom_dec_model_op_parameters { + int decoder_model_param_present_flag; + int64_t bitrate; + int64_t buffer_size; + uint32_t decoder_buffer_delay; + uint32_t encoder_buffer_delay; + int low_delay_mode_flag; + int display_model_param_present_flag; + int initial_display_delay; +} aom_dec_model_op_parameters_t; + +typedef struct aom_op_timing_info_t { + uint32_t buffer_removal_time; +} aom_op_timing_info_t; +/* +from: +aom/aom_codec.h +*/ +/*!\brief OBU types. */ +typedef enum { + OBU_SEQUENCE_HEADER = 1, + OBU_TEMPORAL_DELIMITER = 2, + OBU_FRAME_HEADER = 3, + OBU_TILE_GROUP = 4, + OBU_METADATA = 5, + OBU_FRAME = 6, + OBU_REDUNDANT_FRAME_HEADER = 7, + OBU_TILE_LIST = 8, + OBU_PADDING = 15, +} OBU_TYPE; + +typedef enum aom_bit_depth { + AOM_BITS_8 = 8, /**< 8 bits */ + AOM_BITS_10 = 10, /**< 10 bits */ + AOM_BITS_12 = 12, /**< 12 bits */ +} aom_bit_depth_t; + +/*!\brief Algorithm return codes */ +typedef enum { + /*!\brief Operation completed without error */ + AOM_CODEC_OK, + + /*!\brief Unspecified error */ + AOM_CODEC_ERROR, + + /*!\brief Memory operation failed */ + AOM_CODEC_MEM_ERROR, + + /*!\brief ABI version mismatch */ + AOM_CODEC_ABI_MISMATCH, + + /*!\brief Algorithm does not have required capability */ + AOM_CODEC_INCAPABLE, + + /*!\brief The given bitstream is not supported. + * + * The bitstream was unable to be parsed at the highest level. The decoder + * is unable to proceed. This error \ref SHOULD be treated as fatal to the + * stream. */ + AOM_CODEC_UNSUP_BITSTREAM, + + /*!\brief Encoded bitstream uses an unsupported feature + * + * The decoder does not implement a feature required by the encoder. This + * return code should only be used for features that prevent future + * pictures from being properly decoded. This error \ref MAY be treated as + * fatal to the stream or \ref MAY be treated as fatal to the current GOP. + */ + AOM_CODEC_UNSUP_FEATURE, + + /*!\brief The coded data for this stream is corrupt or incomplete + * + * There was a problem decoding the current frame. This return code + * should only be used for failures that prevent future pictures from + * being properly decoded. This error \ref MAY be treated as fatal to the + * stream or \ref MAY be treated as fatal to the current GOP. If decoding + * is continued for the current GOP, artifacts may be present. + */ + AOM_CODEC_CORRUPT_FRAME, + + /*!\brief An application-supplied parameter is not valid. + * + */ + AOM_CODEC_INVALID_PARAM, + + /*!\brief An iterator reached the end of list. + * + */ + AOM_CODEC_LIST_END + +} aom_codec_err_t; + +typedef struct cfg_options { + /*!\brief Reflects if ext_partition should be enabled + * + * If this value is non-zero it enabled the feature + */ + unsigned int ext_partition; +} cfg_options_t; + +/* +from: +aom/av1/common/obu_util.h +*/ +typedef struct { + size_t size; // Size (1 or 2 bytes) of the OBU header (including the + // optional OBU extension header) in the bitstream. + OBU_TYPE type; + int has_size_field; + int has_extension; + // The following fields come from the OBU extension header and therefore are + // only used if has_extension is true. + int temporal_layer_id; + int spatial_layer_id; +} ObuHeader; + + +/* +from: +aom/internal/aom_codec_internal.h +*/ + +struct aom_internal_error_info { + aom_codec_err_t error_code; + int has_detail; + char detail[80]; + int setjmp; // Boolean: whether 'jmp' is valid. +#ifdef ORI_CODE + jmp_buf jmp; +#endif +}; + +/* +from: +aom/aom_frame_buffer.h +*/ +typedef struct aom_codec_frame_buffer { + uint8_t *data; /**< Pointer to the data buffer */ + size_t size; /**< Size of data in bytes */ + void *priv; /**< Frame's private data */ +} aom_codec_frame_buffer_t; + +/* +from: +aom/aom_image.h +*/ +#define AOM_IMAGE_ABI_VERSION (5) /**<\hideinitializer*/ + +#define AOM_IMG_FMT_PLANAR 0x100 /**< Image is a planar format. */ +#define AOM_IMG_FMT_UV_FLIP 0x200 /**< V plane precedes U in memory. */ +/** 0x400 used to signal alpha channel, skipping for backwards compatibility. */ +#define AOM_IMG_FMT_HIGHBITDEPTH 0x800 /**< Image uses 16bit framebuffer. */ + +/*!\brief List of supported image formats */ +typedef enum aom_img_fmt { + AOM_IMG_FMT_NONE, + AOM_IMG_FMT_YV12 = + AOM_IMG_FMT_PLANAR | AOM_IMG_FMT_UV_FLIP | 1, /**< planar YVU */ + AOM_IMG_FMT_I420 = AOM_IMG_FMT_PLANAR | 2, + AOM_IMG_FMT_AOMYV12 = AOM_IMG_FMT_PLANAR | AOM_IMG_FMT_UV_FLIP | + 3, /** < planar 4:2:0 format with aom color space */ + AOM_IMG_FMT_AOMI420 = AOM_IMG_FMT_PLANAR | 4, + AOM_IMG_FMT_I422 = AOM_IMG_FMT_PLANAR | 5, + AOM_IMG_FMT_I444 = AOM_IMG_FMT_PLANAR | 6, + AOM_IMG_FMT_I42016 = AOM_IMG_FMT_I420 | AOM_IMG_FMT_HIGHBITDEPTH, + AOM_IMG_FMT_YV1216 = AOM_IMG_FMT_YV12 | AOM_IMG_FMT_HIGHBITDEPTH, + AOM_IMG_FMT_I42216 = AOM_IMG_FMT_I422 | AOM_IMG_FMT_HIGHBITDEPTH, + AOM_IMG_FMT_I44416 = AOM_IMG_FMT_I444 | AOM_IMG_FMT_HIGHBITDEPTH, +} aom_img_fmt_t; /**< alias for enum aom_img_fmt */ + +/*!\brief List of supported color primaries */ +typedef enum aom_color_primaries { + AOM_CICP_CP_RESERVED_0 = 0, /**< For future use */ + AOM_CICP_CP_BT_709 = 1, /**< BT.709 */ + AOM_CICP_CP_UNSPECIFIED = 2, /**< Unspecified */ + AOM_CICP_CP_RESERVED_3 = 3, /**< For future use */ + AOM_CICP_CP_BT_470_M = 4, /**< BT.470 System M (historical) */ + AOM_CICP_CP_BT_470_B_G = 5, /**< BT.470 System B, G (historical) */ + AOM_CICP_CP_BT_601 = 6, /**< BT.601 */ + AOM_CICP_CP_SMPTE_240 = 7, /**< SMPTE 240 */ + AOM_CICP_CP_GENERIC_FILM = + 8, /**< Generic film (color filters using illuminant C) */ + AOM_CICP_CP_BT_2020 = 9, /**< BT.2020, BT.2100 */ + AOM_CICP_CP_XYZ = 10, /**< SMPTE 428 (CIE 1921 XYZ) */ + AOM_CICP_CP_SMPTE_431 = 11, /**< SMPTE RP 431-2 */ + AOM_CICP_CP_SMPTE_432 = 12, /**< SMPTE EG 432-1 */ + AOM_CICP_CP_RESERVED_13 = 13, /**< For future use (values 13 - 21) */ + AOM_CICP_CP_EBU_3213 = 22, /**< EBU Tech. 3213-E */ + AOM_CICP_CP_RESERVED_23 = 23 /**< For future use (values 23 - 255) */ +} aom_color_primaries_t; /**< alias for enum aom_color_primaries */ + +/*!\brief List of supported transfer functions */ +typedef enum aom_transfer_characteristics { + AOM_CICP_TC_RESERVED_0 = 0, /**< For future use */ + AOM_CICP_TC_BT_709 = 1, /**< BT.709 */ + AOM_CICP_TC_UNSPECIFIED = 2, /**< Unspecified */ + AOM_CICP_TC_RESERVED_3 = 3, /**< For future use */ + AOM_CICP_TC_BT_470_M = 4, /**< BT.470 System M (historical) */ + AOM_CICP_TC_BT_470_B_G = 5, /**< BT.470 System B, G (historical) */ + AOM_CICP_TC_BT_601 = 6, /**< BT.601 */ + AOM_CICP_TC_SMPTE_240 = 7, /**< SMPTE 240 M */ + AOM_CICP_TC_LINEAR = 8, /**< Linear */ + AOM_CICP_TC_LOG_100 = 9, /**< Logarithmic (100 : 1 range) */ + AOM_CICP_TC_LOG_100_SQRT10 = + 10, /**< Logarithmic (100 * Sqrt(10) : 1 range) */ + AOM_CICP_TC_IEC_61966 = 11, /**< IEC 61966-2-4 */ + AOM_CICP_TC_BT_1361 = 12, /**< BT.1361 */ + AOM_CICP_TC_SRGB = 13, /**< sRGB or sYCC*/ + AOM_CICP_TC_BT_2020_10_BIT = 14, /**< BT.2020 10-bit systems */ + AOM_CICP_TC_BT_2020_12_BIT = 15, /**< BT.2020 12-bit systems */ + AOM_CICP_TC_SMPTE_2084 = 16, /**< SMPTE ST 2084, ITU BT.2100 PQ */ + AOM_CICP_TC_SMPTE_428 = 17, /**< SMPTE ST 428 */ + AOM_CICP_TC_HLG = 18, /**< BT.2100 HLG, ARIB STD-B67 */ + AOM_CICP_TC_RESERVED_19 = 19 /**< For future use (values 19-255) */ +} aom_transfer_characteristics_t; /**< alias for enum aom_transfer_function */ + +/*!\brief List of supported matrix coefficients */ +typedef enum aom_matrix_coefficients { + AOM_CICP_MC_IDENTITY = 0, /**< Identity matrix */ + AOM_CICP_MC_BT_709 = 1, /**< BT.709 */ + AOM_CICP_MC_UNSPECIFIED = 2, /**< Unspecified */ + AOM_CICP_MC_RESERVED_3 = 3, /**< For future use */ + AOM_CICP_MC_FCC = 4, /**< US FCC 73.628 */ + AOM_CICP_MC_BT_470_B_G = 5, /**< BT.470 System B, G (historical) */ + AOM_CICP_MC_BT_601 = 6, /**< BT.601 */ + AOM_CICP_MC_SMPTE_240 = 7, /**< SMPTE 240 M */ + AOM_CICP_MC_SMPTE_YCGCO = 8, /**< YCgCo */ + AOM_CICP_MC_BT_2020_NCL = + 9, /**< BT.2020 non-constant luminance, BT.2100 YCbCr */ + AOM_CICP_MC_BT_2020_CL = 10, /**< BT.2020 constant luminance */ + AOM_CICP_MC_SMPTE_2085 = 11, /**< SMPTE ST 2085 YDzDx */ + AOM_CICP_MC_CHROMAT_NCL = + 12, /**< Chromaticity-derived non-constant luminance */ + AOM_CICP_MC_CHROMAT_CL = 13, /**< Chromaticity-derived constant luminance */ + AOM_CICP_MC_ICTCP = 14, /**< BT.2100 ICtCp */ + AOM_CICP_MC_RESERVED_15 = 15 /**< For future use (values 15-255) */ +} aom_matrix_coefficients_t; + +/*!\brief List of supported color range */ +typedef enum aom_color_range { + AOM_CR_STUDIO_RANGE = 0, /**< Y [16..235], UV [16..240] */ + AOM_CR_FULL_RANGE = 1 /**< YUV/RGB [0..255] */ +} aom_color_range_t; /**< alias for enum aom_color_range */ + +/*!\brief List of chroma sample positions */ +typedef enum aom_chroma_sample_position { + AOM_CSP_UNKNOWN = 0, /**< Unknown */ + AOM_CSP_VERTICAL = 1, /**< Horizontally co-located with luma(0, 0)*/ + /**< sample, between two vertical samples */ + AOM_CSP_COLOCATED = 2, /**< Co-located with luma(0, 0) sample */ + AOM_CSP_RESERVED = 3 /**< Reserved value */ +} aom_chroma_sample_position_t; /**< alias for enum aom_transfer_function */ + +/* +from: +aom/aom_scale/yv12config.h +*/ +typedef struct PIC_BUFFER_CONFIG_s { + union { + struct { + int y_width; + int uv_width; + }; + int widths[2]; + }; + union { + struct { + int y_height; + int uv_height; + }; + int heights[2]; + }; + union { + struct { + int y_crop_width; + int uv_crop_width; + }; + int crop_widths[2]; + }; + union { + struct { + int y_crop_height; + int uv_crop_height; + }; + int crop_heights[2]; + }; + union { + struct { + int y_stride; + int uv_stride; + }; + int strides[2]; + }; + union { + struct { + uint8_t *y_buffer; + uint8_t *u_buffer; + uint8_t *v_buffer; + }; + uint8_t *buffers[3]; + }; + + // Indicate whether y_buffer, u_buffer, and v_buffer points to the internally + // allocated memory or external buffers. + int use_external_reference_buffers; + // This is needed to store y_buffer, u_buffer, and v_buffer when set reference + // uses an external refernece, and restore those buffer pointers after the + // external reference frame is no longer used. + uint8_t *store_buf_adr[3]; + + // If the frame is stored in a 16-bit buffer, this stores an 8-bit version + // for use in global motion detection. It is allocated on-demand. + uint8_t *y_buffer_8bit; + int buf_8bit_valid; + + uint8_t *buffer_alloc; + size_t buffer_alloc_sz; + int border; + size_t frame_size; + int subsampling_x; + int subsampling_y; + unsigned int bit_depth; + aom_color_primaries_t color_primaries; + aom_transfer_characteristics_t transfer_characteristics; + aom_matrix_coefficients_t matrix_coefficients; + uint8_t monochrome; + aom_chroma_sample_position_t chroma_sample_position; + aom_color_range_t color_range; + int render_width; + int render_height; + + int corrupted; + int flags; + +#ifdef AML + int32_t index; + int32_t decode_idx; + int32_t slice_type; + int32_t RefNum_L0; + int32_t RefNum_L1; + int32_t num_reorder_pic; + int32_t stream_offset; + uint8_t referenced; + uint8_t output_mark; + uint8_t recon_mark; + uint8_t output_ready; + uint8_t error_mark; + /**/ + int32_t slice_idx; + /*buffer*/ + uint32_t fgs_table_adr; +#ifdef AOM_AV1_MMU + uint32_t header_adr; +#endif +#ifdef AOM_AV1_MMU_DW + uint32_t header_dw_adr; +#endif + uint32_t mpred_mv_wr_start_addr; + uint32_t mc_y_adr; + uint32_t mc_u_v_adr; + int32_t mc_canvas_y; + int32_t mc_canvas_u_v; + + int32_t lcu_total; + /**/ + unsigned int order_hint; +#endif +#ifdef AML_DEVICE + int mv_buf_index; + unsigned long cma_alloc_addr; + int BUF_index; + int buf_size; + int comp_body_size; + unsigned int dw_y_adr; + unsigned int dw_u_v_adr; + int double_write_mode; + int y_canvas_index; + int uv_canvas_index; + int vf_ref; + struct canvas_config_s canvas_config[2]; + char *aux_data_buf; + int aux_data_size; + u32 pts; + u64 pts64; + /* picture qos infomation*/ + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; +#endif + u64 timestamp; + u32 hw_decode_time; + u32 frame_size2; // For frame base mode +} PIC_BUFFER_CONFIG; + +/* +from: +common/blockd.h +*/ +TYPEDEF enum { + KEY_FRAME = 0, + INTER_FRAME = 1, + INTRA_ONLY_FRAME = 2, // replaces intra-only + S_FRAME = 3, + FRAME_TYPES, +} UENUM1BYTE(FRAME_TYPE); + +/*from: +mv.h +*/ +#ifdef ORI_CODE +typedef struct mv32 { + int32_t row; + int32_t col; +} MV32; +#endif +/*from: + aom_filter.h +*/ +#define SUBPEL_BITS 4 +#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1) +#define SUBPEL_SHIFTS (1 << SUBPEL_BITS) +#define SUBPEL_TAPS 8 + +#define SCALE_SUBPEL_BITS 10 +#define SCALE_SUBPEL_SHIFTS (1 << SCALE_SUBPEL_BITS) +#define SCALE_SUBPEL_MASK (SCALE_SUBPEL_SHIFTS - 1) +#define SCALE_EXTRA_BITS (SCALE_SUBPEL_BITS - SUBPEL_BITS) +#define SCALE_EXTRA_OFF ((1 << SCALE_EXTRA_BITS) / 2) + +#define RS_SUBPEL_BITS 6 +#define RS_SUBPEL_MASK ((1 << RS_SUBPEL_BITS) - 1) +#define RS_SCALE_SUBPEL_BITS 14 +#define RS_SCALE_SUBPEL_MASK ((1 << RS_SCALE_SUBPEL_BITS) - 1) +#define RS_SCALE_EXTRA_BITS (RS_SCALE_SUBPEL_BITS - RS_SUBPEL_BITS) +#define RS_SCALE_EXTRA_OFF (1 << (RS_SCALE_EXTRA_BITS - 1)) + +/*from: +scale.h +*/ +#define SCALE_NUMERATOR 8 + +#define REF_SCALE_SHIFT 14 +#define REF_NO_SCALE (1 << REF_SCALE_SHIFT) +#define REF_INVALID_SCALE -1 + +struct scale_factors { + int x_scale_fp; // horizontal fixed point scale factor + int y_scale_fp; // vertical fixed point scale factor + int x_step_q4; + int y_step_q4; + + int (*scale_value_x)(int val, const struct scale_factors *sf); + int (*scale_value_y)(int val, const struct scale_factors *sf); +#ifdef ORI_CODE + // convolve_fn_ptr[subpel_x != 0][subpel_y != 0][is_compound] + aom_convolve_fn_t convolve[2][2][2]; + aom_highbd_convolve_fn_t highbd_convolve[2][2][2]; +#endif +}; + +#ifdef ORI_CODE +MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf); +#endif +void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, + int other_h, int this_w, int this_h); + +static inline int av1_is_valid_scale(const struct scale_factors *sf) { +#ifdef ORI_CODE + assert(sf != NULL); +#endif + return sf->x_scale_fp != REF_INVALID_SCALE && + sf->y_scale_fp != REF_INVALID_SCALE; +} + +static inline int av1_is_scaled(const struct scale_factors *sf) { +#ifdef ORI_CODE + assert(sf != NULL); +#endif + return av1_is_valid_scale(sf) && + (sf->x_scale_fp != REF_NO_SCALE || sf->y_scale_fp != REF_NO_SCALE); +} + + +/* +from: +common/onyxc_int.h +*/ + +#define CDEF_MAX_STRENGTHS 16 + +/* Constant values while waiting for the sequence header */ +#define FRAME_ID_LENGTH 15 +#define DELTA_FRAME_ID_LENGTH 14 + +#define FRAME_CONTEXTS (FRAME_BUFFERS + 1) +// Extra frame context which is always kept at default values +#define FRAME_CONTEXT_DEFAULTS (FRAME_CONTEXTS - 1) +#define PRIMARY_REF_BITS 3 +#define PRIMARY_REF_NONE 7 + +#define NUM_PING_PONG_BUFFERS 2 + +#define MAX_NUM_TEMPORAL_LAYERS 8 +#define MAX_NUM_SPATIAL_LAYERS 4 +/* clang-format off */ +// clang-format seems to think this is a pointer dereference and not a +// multiplication. +#define MAX_NUM_OPERATING_POINTS \ + MAX_NUM_TEMPORAL_LAYERS * MAX_NUM_SPATIAL_LAYERS +/* clang-format on*/ + +// TODO(jingning): Turning this on to set up transform coefficient +// processing timer. +#define TXCOEFF_TIMER 0 +#define TXCOEFF_COST_TIMER 0 + +TYPEDEF enum { + SINGLE_REFERENCE = 0, + COMPOUND_REFERENCE = 1, + REFERENCE_MODE_SELECT = 2, + REFERENCE_MODES = 3, +} UENUM1BYTE(REFERENCE_MODE); + +TYPEDEF enum { + /** + * Frame context updates are disabled + */ + REFRESH_FRAME_CONTEXT_DISABLED, + /** + * Update frame context to values resulting from backward probability + * updates based on entropy/counts in the decoded frame + */ + REFRESH_FRAME_CONTEXT_BACKWARD, +} UENUM1BYTE(REFRESH_FRAME_CONTEXT_MODE); + +#define MFMV_STACK_SIZE 3 + +#ifdef AML +#define MV_REF_SIZE 8 +#endif + +#ifdef ORI_CODE +typedef struct { + int_mv mfmv0; + uint8_t ref_frame_offset; +} TPL_MV_REF; +typedef struct { + int_mv mv; + MV_REFERENCE_FRAME ref_frame; +} MV_REF; +#endif + +typedef struct RefCntBuffer_s { + // For a RefCntBuffer, the following are reference-holding variables: + // - cm->ref_frame_map[] + // - cm->cur_frame + // - cm->scaled_ref_buf[] (encoder only) + // - cm->next_ref_frame_map[] (decoder only) + // - pbi->output_frame_index[] (decoder only) + // With that definition, 'ref_count' is the number of reference-holding + // variables that are currently referencing this buffer. + // For example: + // - suppose this buffer is at index 'k' in the buffer pool, and + // - Total 'n' of the variables / array elements above have value 'k' (that + // is, they are pointing to buffer at index 'k'). + // Then, pool->frame_bufs[k].ref_count = n. + int ref_count; + + unsigned int order_hint; + unsigned int ref_order_hints[INTER_REFS_PER_FRAME]; + + int intra_only; + int segmentation_enabled; + unsigned int segment_feature[8]; +#ifdef AML + int segmentation_update_map; + int prev_segmentation_enabled; + int seg_mi_rows; + int seg_mi_cols; + + unsigned int seg_lf_info_y[8]; + unsigned int seg_lf_info_c[8]; + int8_t ref_deltas[REF_FRAMES]; + int8_t mode_deltas[MAX_MODE_LF_DELTAS]; +#endif + //MV_REF *mvs; + uint8_t *seg_map; +#ifdef ORI_CODE + struct segmentation seg; +#endif + + int mi_rows; + int mi_cols; + // Width and height give the size of the buffer (before any upscaling, unlike + // the sizes that can be derived from the buf structure) + int width; + int height; +#ifdef ORI_CODE + WarpedMotionParams global_motion[REF_FRAMES]; +#endif + int showable_frame; // frame can be used as show existing frame in future + uint8_t film_grain_params_present; +#ifdef ORI_CODE + aom_film_grain_t film_grain_params; +#endif +#ifdef AML + int dec_width; + uint8_t film_grain_reg_valid; + uint32_t film_grain_reg[FILM_GRAIN_REG_SIZE]; +#endif + aom_codec_frame_buffer_t raw_frame_buffer; + PIC_BUFFER_CONFIG buf; +#ifdef ORI_CODE + hash_table hash_table; +#endif + FRAME_TYPE frame_type; + + // This is only used in the encoder but needs to be indexed per ref frame + // so it's extremely convenient to keep it here. +#ifdef ORI_CODE + int interp_filter_selected[SWITCHABLE]; +#endif + // Inter frame reference frame delta for loop filter +#ifndef AML + int8_t ref_deltas[REF_FRAMES]; +#endif +#ifdef ORI_CODE + // 0 = ZERO_MV, MV + int8_t mode_deltas[MAX_MODE_LF_DELTAS]; + + FRAME_CONTEXT frame_context; +#endif + int show_frame; +} RefCntBuffer; + +typedef struct BufferPool_s { +// Protect BufferPool from being accessed by several FrameWorkers at +// the same time during frame parallel decode. +// TODO(hkuang): Try to use atomic variable instead of locking the whole pool. +// TODO(wtc): Remove this. See +// https://chromium-review.googlesource.com/c/webm/libvpx/+/560630. +#if CONFIG_MULTITHREAD + pthread_mutex_t pool_mutex; +#endif + + // Private data associated with the frame buffer callbacks. + void *cb_priv; +#ifdef ORI_CODE + aom_get_frame_buffer_cb_fn_t get_fb_cb; + aom_release_frame_buffer_cb_fn_t release_fb_cb; +#endif + RefCntBuffer frame_bufs[FRAME_BUFFERS]; + +#ifdef ORI_CODE + // Frame buffers allocated internally by the codec. + InternalFrameBufferList int_frame_buffers; +#endif +#ifdef AML_DEVICE + spinlock_t lock; +#endif +} BufferPool; + +typedef struct { + int cdef_pri_damping; + int cdef_sec_damping; + int nb_cdef_strengths; + int cdef_strengths[CDEF_MAX_STRENGTHS]; + int cdef_uv_strengths[CDEF_MAX_STRENGTHS]; + int cdef_bits; +} CdefInfo; + +typedef struct { + int delta_q_present_flag; + // Resolution of delta quant + int delta_q_res; + int delta_lf_present_flag; + // Resolution of delta lf level + int delta_lf_res; + // This is a flag for number of deltas of loop filter level + // 0: use 1 delta, for y_vertical, y_horizontal, u, and v + // 1: use separate deltas for each filter level + int delta_lf_multi; +} DeltaQInfo; + +typedef struct { + int enable_order_hint; // 0 - disable order hint, and related tools + int order_hint_bits_minus_1; // dist_wtd_comp, ref_frame_mvs, + // frame_sign_bias + // if 0, enable_dist_wtd_comp and + // enable_ref_frame_mvs must be set as 0. + int enable_dist_wtd_comp; // 0 - disable dist-wtd compound modes + // 1 - enable it + int enable_ref_frame_mvs; // 0 - disable ref frame mvs + // 1 - enable it +} OrderHintInfo; + +// Sequence header structure. +// Note: All syntax elements of sequence_header_obu that need to be +// bit-identical across multiple sequence headers must be part of this struct, +// so that consistency is checked by are_seq_headers_consistent() function. +typedef struct SequenceHeader { + int num_bits_width; + int num_bits_height; + int max_frame_width; + int max_frame_height; + uint8_t frame_id_numbers_present_flag; + int frame_id_length; + int delta_frame_id_length; + BLOCK_SIZE2 sb_size; // Size of the superblock used for this frame + int mib_size; // Size of the superblock in units of MI blocks + int mib_size_log2; // Log 2 of above. + + OrderHintInfo order_hint_info; + + uint8_t force_screen_content_tools; // 0 - force off + // 1 - force on + // 2 - adaptive + uint8_t still_picture; // Video is a single frame still picture + uint8_t reduced_still_picture_hdr; // Use reduced header for still picture + uint8_t force_integer_mv; // 0 - Don't force. MV can use subpel + // 1 - force to integer + // 2 - adaptive + uint8_t enable_filter_intra; // enables/disables filterintra + uint8_t enable_intra_edge_filter; // enables/disables edge upsampling + uint8_t enable_interintra_compound; // enables/disables interintra_compound + uint8_t enable_masked_compound; // enables/disables masked compound + uint8_t enable_dual_filter; // 0 - disable dual interpolation filter + // 1 - enable vert/horz filter selection + uint8_t enable_warped_motion; // 0 - disable warp for the sequence + // 1 - enable warp for the sequence + uint8_t enable_superres; // 0 - Disable superres for the sequence + // and no frame level superres flag + // 1 - Enable superres for the sequence + // enable per-frame superres flag + uint8_t enable_cdef; // To turn on/off CDEF + uint8_t enable_restoration; // To turn on/off loop restoration + BITSTREAM_PROFILE profile; + + // Operating point info. + int operating_points_cnt_minus_1; + int operating_point_idc[MAX_NUM_OPERATING_POINTS]; + uint8_t display_model_info_present_flag; + uint8_t decoder_model_info_present_flag; + AV1_LEVEL seq_level_idx[MAX_NUM_OPERATING_POINTS]; + uint8_t tier[MAX_NUM_OPERATING_POINTS]; // seq_tier in the spec. One bit: 0 + // or 1. + + // Color config. + aom_bit_depth_t bit_depth; // AOM_BITS_8 in profile 0 or 1, + // AOM_BITS_10 or AOM_BITS_12 in profile 2 or 3. + uint8_t use_highbitdepth; // If true, we need to use 16bit frame buffers. + uint8_t monochrome; // Monochorme video + aom_color_primaries_t color_primaries; + aom_transfer_characteristics_t transfer_characteristics; + aom_matrix_coefficients_t matrix_coefficients; + int color_range; + int subsampling_x; // Chroma subsampling for x + int subsampling_y; // Chroma subsampling for y + aom_chroma_sample_position_t chroma_sample_position; + uint8_t separate_uv_delta_q; + uint8_t film_gry_dequant_QTXain_params_present; +} SequenceHeader; + +typedef struct { + int skip_mode_allowed; + int skip_mode_flag; + int ref_frame_idx_0; + int ref_frame_idx_1; +} SkipModeInfo; + +typedef struct { + FRAME_TYPE frame_type; + REFERENCE_MODE reference_mode; + + unsigned int order_hint; + unsigned int frame_number; + SkipModeInfo skip_mode_info; + int refresh_frame_flags; // Which ref frames are overwritten by this frame + int frame_refs_short_signaling; +} CurrentFrame; + +typedef struct AV1_Common_s { + CurrentFrame current_frame; + struct aom_internal_error_info error; + int width; + int height; + int render_width; + int render_height; + int timing_info_present; + aom_timing_info_t timing_info; + int buffer_removal_time_present; + aom_dec_model_info_t buffer_model; + aom_dec_model_op_parameters_t op_params[MAX_NUM_OPERATING_POINTS + 1]; + aom_op_timing_info_t op_frame_timing[MAX_NUM_OPERATING_POINTS + 1]; + uint32_t frame_presentation_time; + + int context_update_tile_id; +#ifdef SUPPORT_SCALE_FACTOR + // Scale of the current frame with respect to itself. + struct scale_factors sf_identity; +#endif + RefCntBuffer *prev_frame; + + // TODO(hkuang): Combine this with cur_buf in macroblockd. + RefCntBuffer *cur_frame; + + // For encoder, we have a two-level mapping from reference frame type to the + // corresponding buffer in the buffer pool: + // * 'remapped_ref_idx[i - 1]' maps reference type 'i' (range: LAST_FRAME ... + // EXTREF_FRAME) to a remapped index 'j' (in range: 0 ... REF_FRAMES - 1) + // * Later, 'cm->ref_frame_map[j]' maps the remapped index 'j' to a pointer to + // the reference counted buffer structure RefCntBuffer, taken from the buffer + // pool cm->buffer_pool->frame_bufs. + // + // LAST_FRAME, ..., EXTREF_FRAME + // | | + // v v + // remapped_ref_idx[LAST_FRAME - 1], ..., remapped_ref_idx[EXTREF_FRAME - 1] + // | | + // v v + // ref_frame_map[], ..., ref_frame_map[] + // + // Note: INTRA_FRAME always refers to the current frame, so there's no need to + // have a remapped index for the same. + int remapped_ref_idx[REF_FRAMES]; + +#ifdef SUPPORT_SCALE_FACTOR + struct scale_factors ref_scale_factors[REF_FRAMES]; +#endif + // For decoder, ref_frame_map[i] maps reference type 'i' to a pointer to + // the buffer in the buffer pool 'cm->buffer_pool.frame_bufs'. + // For encoder, ref_frame_map[j] (where j = remapped_ref_idx[i]) maps + // remapped reference index 'j' (that is, original reference type 'i') to + // a pointer to the buffer in the buffer pool 'cm->buffer_pool.frame_bufs'. + RefCntBuffer *ref_frame_map[REF_FRAMES]; + + // Prepare ref_frame_map for the next frame. + // Only used in frame parallel decode. + RefCntBuffer *next_ref_frame_map[REF_FRAMES]; +#ifdef AML + RefCntBuffer *next_used_ref_frame_map[REF_FRAMES]; +#endif + FRAME_TYPE last_frame_type; /* last frame's frame type for motion search.*/ + + int show_frame; + int showable_frame; // frame can be used as show existing frame in future + int show_existing_frame; + + uint8_t disable_cdf_update; + int allow_high_precision_mv; + uint8_t cur_frame_force_integer_mv; // 0 the default in AOM, 1 only integer + + uint8_t allow_screen_content_tools; + int allow_intrabc; + int allow_warped_motion; + + // MBs, mb_rows/cols is in 16-pixel units; mi_rows/cols is in + // MB_MODE_INFO (8-pixel) units. + int MBs; + int mb_rows, mi_rows; + int mb_cols, mi_cols; + int mi_stride; + + /* profile settings */ + TX_MODE tx_mode; + +#if CONFIG_ENTROPY_STATS + int coef_cdf_category; +#endif + + int base_qindex; + int y_dc_delta_q; + int u_dc_delta_q; + int v_dc_delta_q; + int u_ac_delta_q; + int v_ac_delta_q; + +#ifdef ORI_CODE + // The dequantizers below are true dequantizers used only in the + // dequantization process. They have the same coefficient + // shift/scale as TX. + int16_t y_dequant_QTX[MAX_SEGMENTS][2]; + int16_t u_dequant_QTX[MAX_SEGMENTS][2]; + int16_t v_dequant_QTX[MAX_SEGMENTS][2]; + + // Global quant matrix tables + const qm_val_t *giqmatrix[NUM_QM_LEVELS][3][TX_SIZES_ALL]; + const qm_val_t *gqmatrix[NUM_QM_LEVELS][3][TX_SIZES_ALL]; + + // Local quant matrix tables for each frame + const qm_val_t *y_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL]; + const qm_val_t *u_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL]; + const qm_val_t *v_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL]; +#endif + // Encoder + int using_qmatrix; + int qm_y; + int qm_u; + int qm_v; + int min_qmlevel; + int max_qmlevel; + int use_quant_b_adapt; + + /* We allocate a MB_MODE_INFO struct for each macroblock, together with + an extra row on top and column on the left to simplify prediction. */ + int mi_alloc_size; + +#ifdef ORI_CODE + MB_MODE_INFO *mip; /* Base of allocated array */ + MB_MODE_INFO *mi; /* Corresponds to upper left visible macroblock */ + + // TODO(agrange): Move prev_mi into encoder structure. + // prev_mip and prev_mi will only be allocated in encoder. + MB_MODE_INFO *prev_mip; /* MB_MODE_INFO array 'mip' from last decoded frame */ + MB_MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */ + + // Separate mi functions between encoder and decoder. + int (*alloc_mi)(struct AV1Common *cm, int mi_size); + void (*free_mi)(struct AV1Common *cm); + void (*setup_mi)(struct AV1Common *cm); + + // Grid of pointers to 8x8 MB_MODE_INFO structs. Any 8x8 not in the visible + // area will be NULL. + MB_MODE_INFO **mi_grid_base; + MB_MODE_INFO **mi_grid_visible; + MB_MODE_INFO **prev_mi_grid_base; + MB_MODE_INFO **prev_mi_grid_visible; +#endif + // Whether to use previous frames' motion vectors for prediction. + int allow_ref_frame_mvs; + + uint8_t *last_frame_seg_map; + +#ifdef ORI_CODE + InterpFilter interp_filter; +#endif + int switchable_motion_mode; +#ifdef ORI_CODE + loop_filter_info_n lf_info; +#endif + // The denominator of the superres scale; the numerator is fixed. + uint8_t superres_scale_denominator; + int superres_upscaled_width; + int superres_upscaled_height; + +#ifdef ORI_CODE + RestorationInfo rst_info[MAX_MB_PLANE]; +#endif + // Pointer to a scratch buffer used by self-guided restoration + int32_t *rst_tmpbuf; +#ifdef ORI_CODE + RestorationLineBuffers *rlbs; +#endif + // Output of loop restoration + PIC_BUFFER_CONFIG rst_frame; + + // Flag signaling how frame contexts should be updated at the end of + // a frame decode + REFRESH_FRAME_CONTEXT_MODE refresh_frame_context; + + int ref_frame_sign_bias[REF_FRAMES]; /* Two state 0, 1 */ + +#ifdef ORI_CODE + struct loopfilter lf; + struct segmentation seg; +#endif + + int coded_lossless; // frame is fully lossless at the coded resolution. + int all_lossless; // frame is fully lossless at the upscaled resolution. + + int reduced_tx_set_used; + +#ifdef ORI_CODE + // Context probabilities for reference frame prediction + MV_REFERENCE_FRAME comp_fwd_ref[FWD_REFS]; + MV_REFERENCE_FRAME comp_bwd_ref[BWD_REFS]; + + FRAME_CONTEXT *fc; /* this frame entropy */ + FRAME_CONTEXT *default_frame_context; +#endif + int primary_ref_frame; + + int error_resilient_mode; + + int tile_cols, tile_rows; + + int max_tile_width_sb; + int min_log2_tile_cols; + int max_log2_tile_cols; + int max_log2_tile_rows; + int min_log2_tile_rows; + int min_log2_tiles; + int max_tile_height_sb; + int uniform_tile_spacing_flag; + int log2_tile_cols; // only valid for uniform tiles + int log2_tile_rows; // only valid for uniform tiles + int tile_col_start_sb[MAX_TILE_COLS + 1]; // valid for 0 <= i <= tile_cols + int tile_row_start_sb[MAX_TILE_ROWS + 1]; // valid for 0 <= i <= tile_rows + int tile_width, tile_height; // In MI units + int min_inner_tile_width; // min width of non-rightmost tile + + unsigned int large_scale_tile; + unsigned int single_tile_decoding; + + int byte_alignment; + int skip_loop_filter; + int skip_film_grain; + + // External BufferPool passed from outside. + BufferPool *buffer_pool; + +#ifdef ORI_CODE + PARTITION_CONTEXT **above_seg_context; + ENTROPY_CONTEXT **above_context[MAX_MB_PLANE]; + TXFM_CONTEXT **above_txfm_context; + WarpedMotionParams global_motion[REF_FRAMES]; + aom_film_grain_t film_grain_params; + + CdefInfo cdef_info; + DeltaQInfo delta_q_info; // Delta Q and Delta LF parameters +#endif + int num_tg; + SequenceHeader seq_params; + int current_frame_id; + int ref_frame_id[REF_FRAMES]; + int valid_for_referencing[REF_FRAMES]; +#ifdef ORI_CODE + TPL_MV_REF *tpl_mvs; +#endif + int tpl_mvs_mem_size; + // TODO(jingning): This can be combined with sign_bias later. + int8_t ref_frame_side[REF_FRAMES]; + + int is_annexb; + + int temporal_layer_id; + int spatial_layer_id; + unsigned int number_temporal_layers; + unsigned int number_spatial_layers; + int num_allocated_above_context_mi_col; + int num_allocated_above_contexts; + int num_allocated_above_context_planes; + +#if TXCOEFF_TIMER + int64_t cum_txcoeff_timer; + int64_t txcoeff_timer; + int txb_count; +#endif + +#if TXCOEFF_COST_TIMER + int64_t cum_txcoeff_cost_timer; + int64_t txcoeff_cost_timer; + int64_t txcoeff_cost_count; +#endif + const cfg_options_t *options; + int is_decoding; +#ifdef AML + int mv_ref_offset[MV_REF_SIZE][REF_FRAMES]; + int mv_ref_id[MV_REF_SIZE]; + unsigned char mv_cal_tpl_mvs[MV_REF_SIZE]; + int mv_ref_id_index; + int prev_fb_idx; + int new_fb_idx; + int32_t dec_width; +#endif +#ifdef AML_DEVICE + int cur_fb_idx_mmu; +#ifdef AOM_AV1_MMU_DW + int cur_fb_idx_mmu_dw; +#endif + int current_video_frame; + int use_prev_frame_mvs; + int frame_type; + int intra_only; + struct RefCntBuffer_s frame_refs[INTER_REFS_PER_FRAME]; + +#endif +} AV1_COMMON; + + +/* +from: + decoder/decoder.h +*/ + +typedef struct EXTERNAL_REFERENCES { + PIC_BUFFER_CONFIG refs[MAX_EXTERNAL_REFERENCES]; + int num; +} EXTERNAL_REFERENCES; + +typedef struct AV1Decoder { + //DECLARE_ALIGNED(32, MACROBLOCKD, mb); + + //DECLARE_ALIGNED(32, AV1_COMMON, common); + AV1_COMMON *common; + +#ifdef ORI_CODE + AVxWorker lf_worker; + AV1LfSync lf_row_sync; + AV1LrSync lr_row_sync; + AV1LrStruct lr_ctxt; + AVxWorker *tile_workers; + int num_workers; + DecWorkerData *thread_data; + ThreadData td; + TileDataDec *tile_data; + int allocated_tiles; + TileBufferDec tile_buffers[MAX_TILE_ROWS][MAX_TILE_COLS]; + AV1DecTileMT tile_mt_info; +#endif + + // Each time the decoder is called, we expect to receive a full temporal unit. + // This can contain up to one shown frame per spatial layer in the current + // operating point (note that some layers may be entirely omitted). + // If the 'output_all_layers' option is true, we save all of these shown + // frames so that they can be returned to the application. If the + // 'output_all_layers' option is false, then we only output one image per + // temporal unit. + // + // Note: The saved buffers are released at the start of the next time the + // application calls aom_codec_decode(). + int output_all_layers; + RefCntBuffer *output_frames[MAX_NUM_SPATIAL_LAYERS]; + size_t num_output_frames; // How many frames are queued up so far? + + // In order to properly support random-access decoding, we need + // to behave slightly differently for the very first frame we decode. + // So we track whether this is the first frame or not. + int decoding_first_frame; + + int allow_lowbitdepth; + int max_threads; + int inv_tile_order; + int need_resync; // wait for key/intra-only frame. + int hold_ref_buf; // Boolean: whether we are holding reference buffers in + // common.next_ref_frame_map. + int reset_decoder_state; + + int tile_size_bytes; + int tile_col_size_bytes; + int dec_tile_row, dec_tile_col; // always -1 for non-VR tile encoding +#if CONFIG_ACCOUNTING + int acct_enabled; + Accounting accounting; +#endif + int tg_size; // Number of tiles in the current tilegroup + int tg_start; // First tile in the current tilegroup + int tg_size_bit_offset; + int sequence_header_ready; + int sequence_header_changed; +#if CONFIG_INSPECTION + aom_inspect_cb inspect_cb; + void *inspect_ctx; +#endif + int operating_point; + int current_operating_point; + int seen_frame_header; + + // State if the camera frame header is already decoded while + // large_scale_tile = 1. + int camera_frame_header_ready; + size_t frame_header_size; +#ifdef ORI_CODE + DataBuffer obu_size_hdr; +#endif + int output_frame_width_in_tiles_minus_1; + int output_frame_height_in_tiles_minus_1; + int tile_count_minus_1; + uint32_t coded_tile_data_size; + unsigned int ext_tile_debug; // for ext-tile software debug & testing + unsigned int row_mt; + EXTERNAL_REFERENCES ext_refs; + PIC_BUFFER_CONFIG tile_list_outbuf; + +#ifdef ORI_CODE + CB_BUFFER *cb_buffer_base; +#endif + int cb_buffer_alloc_size; + + int allocated_row_mt_sync_rows; + +#if CONFIG_MULTITHREAD + pthread_mutex_t *row_mt_mutex_; + pthread_cond_t *row_mt_cond_; +#endif + +#ifdef ORI_CODE + AV1DecRowMTInfo frame_row_mt_info; +#endif + +#ifdef AML + unsigned char pred_inter_read_enable; + int cur_obu_type; + int decode_idx; + int bufmgr_proc_count; + int obu_frame_frame_head_come_after_tile; + uint32_t frame_width; + uint32_t frame_height; + BuffInfo_t* work_space_buf; + buff_t* mc_buf; + //unsigned short *rpm_ptr; + void *private_data; + u32 pre_stream_offset; +#endif +} AV1Decoder; + +#define RPM_BEGIN 0x200 +#define RPM_END 0x280 + +typedef union param_u { + struct { + unsigned short data[RPM_END - RPM_BEGIN]; + } l; + struct { + /*sequence head*/ + unsigned short profile; + unsigned short still_picture; + unsigned short reduced_still_picture_hdr; + unsigned short decoder_model_info_present_flag; + unsigned short max_frame_width; + unsigned short max_frame_height; + unsigned short frame_id_numbers_present_flag; + unsigned short delta_frame_id_length; + unsigned short frame_id_length; + unsigned short order_hint_bits_minus_1; + unsigned short enable_order_hint; + unsigned short enable_dist_wtd_comp; + unsigned short enable_ref_frame_mvs; + + /*frame head*/ + unsigned short show_existing_frame; + unsigned short frame_type; + unsigned short show_frame; + unsigned short error_resilient_mode; + unsigned short refresh_frame_flags; + unsigned short showable_frame; + unsigned short current_frame_id; + unsigned short frame_size_override_flag; + unsigned short order_hint; + unsigned short primary_ref_frame; + unsigned short frame_refs_short_signaling; + unsigned short frame_width; + unsigned short dec_frame_width; + unsigned short frame_width_scaled; + unsigned short frame_height; + unsigned short reference_mode; + unsigned short allow_ref_frame_mvs; + unsigned short superres_scale_denominator; + unsigned short lst_ref; + unsigned short gld_ref; + unsigned short existing_frame_idx; + + unsigned short remapped_ref_idx[INTER_REFS_PER_FRAME]; + unsigned short delta_frame_id_minus_1[INTER_REFS_PER_FRAME]; + unsigned short ref_order_hint[REF_FRAMES]; + /*other not in reference*/ + unsigned short bit_depth; + unsigned short seq_flags; + unsigned short update_parameters; + unsigned short film_grain_params_ref_idx; + + /*loop_filter & segmentation*/ + unsigned short loop_filter_sharpness_level; + unsigned short loop_filter_mode_ref_delta_enabled; + unsigned short loop_filter_ref_deltas_0; + unsigned short loop_filter_ref_deltas_1; + unsigned short loop_filter_ref_deltas_2; + unsigned short loop_filter_ref_deltas_3; + unsigned short loop_filter_ref_deltas_4; + unsigned short loop_filter_ref_deltas_5; + unsigned short loop_filter_ref_deltas_6; + unsigned short loop_filter_ref_deltas_7; + unsigned short loop_filter_mode_deltas_0; + unsigned short loop_filter_mode_deltas_1; + unsigned short loop_filter_level_0; + unsigned short loop_filter_level_1; + unsigned short loop_filter_level_u; + unsigned short loop_filter_level_v; + + unsigned short segmentation_enabled; + /* + SEG_LVL_ALT_LF_Y_V feature_enable: seg_lf_info_y[bit7] + SEG_LVL_ALT_LF_Y_V data: seg_lf_info_y[bit0~6] + SEG_LVL_ALT_LF_Y_H feature enable: seg_lf_info_y[bit15] + SEG_LVL_ALT_LF_Y_H data: seg_lf_info_y[bit8~14] + */ + unsigned short seg_lf_info_y[8]; + /* + SEG_LVL_ALT_LF_U feature_enable: seg_lf_info_y[bit7] + SEG_LVL_ALT_LF_U data: seg_lf_info_y[bit0~6] + SEG_LVL_ALT_LF_V feature enable: seg_lf_info_y[bit15] + SEG_LVL_ALT_LF_V data: seg_lf_info_y[bit8~14] + */ + unsigned short seg_lf_info_c[8]; + unsigned short video_signal_type; + unsigned short color_description; + unsigned short mmu_used_num; + unsigned short dw_mmu_used_num; + unsigned short seq_flags_2; + /*ucode end*/ + /*other*/ + unsigned short enable_superres; + + /*seqence not use*/ + unsigned short operating_points_cnt_minus_1; + unsigned short operating_point_idc[MAX_NUM_OPERATING_POINTS]; + unsigned short seq_level_idx[MAX_NUM_OPERATING_POINTS]; + unsigned short decoder_model_param_present_flag[MAX_NUM_OPERATING_POINTS]; + unsigned short timing_info_present; + /*frame head not use*/ + unsigned short display_frame_id; + unsigned short frame_presentation_time; + unsigned short buffer_removal_time_present; + unsigned short op_frame_timing[MAX_NUM_OPERATING_POINTS + 1]; + unsigned short valid_ref_frame_bits; + + } p; +}param_t; + +PIC_BUFFER_CONFIG *av1_get_ref_frame_spec_buf( + const AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame); + +int av1_bufmgr_process(AV1Decoder *pbi, union param_u *params, + unsigned char new_compressed_data, int obu_type); + +struct scale_factors *av1_get_ref_scale_factors( + AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame); + +void av1_set_next_ref_frame_map(AV1Decoder *pbi); + +unsigned int av1_get_next_used_ref_info( + const AV1_COMMON *const cm, int i); + +void av1_release_buf(AV1Decoder *pbi, RefCntBuffer *const buf); + +int av1_bufmgr_postproc(AV1Decoder *pbi, unsigned char frame_decoded); + +AV1Decoder *av1_decoder_create(BufferPool *const pool, AV1_COMMON *cm); + +unsigned char av1_frame_is_inter(const AV1_COMMON *const cm); + +RefCntBuffer *av1_get_primary_ref_frame_buf( + const AV1_COMMON *const cm); + +void av1_raw_write_image(AV1Decoder *pbi, PIC_BUFFER_CONFIG *sd); + +int get_free_frame_buffer(struct AV1_Common_s *cm); + +void av1_bufmgr_ctx_reset(AV1Decoder *pbi, BufferPool *const pool, AV1_COMMON *cm); + +#if 1 +#define lock_buffer_pool(pool, flags) \ + spin_lock_irqsave(&pool->lock, flags) + +#define unlock_buffer_pool(pool, flags) \ + spin_unlock_irqrestore(&pool->lock, flags) +#else +#define lock_buffer_pool(pool, flags) flags=1; + +#define unlock_buffer_pool(pool, flags) flags=0; + +#endif + +#define AV1_DEBUG_BUFMGR 0x01 +#define AV1_DEBUG_BUFMGR_MORE 0x02 +#define AV1_DEBUG_BUFMGR_DETAIL 0x04 +#define AV1_DEBUG_OUT_PTS 0x10 +#define AOM_DEBUG_HW_MORE 0x20 +#define AOM_DEBUG_VFRAME 0x40 +#define AOM_DEBUG_PRINT_LIST_INFO 0x80 +#define AOM_AV1_DEBUG_SEND_PARAM_WITH_REG 0x100 +#define AV1_DEBUG_IGNORE_VF_REF 0x200 +#define AV1_DEBUG_DBG_LF_PRINT 0x400 +#define AV1_DEBUG_REG 0x800 +#define AOM_DEBUG_BUFMGR_ONLY 0x1000 +#define AOM_DEBUG_AUX_DATA 0x2000 +#define AV1_DEBUG_QOS_INFO 0x4000 +#define AOM_DEBUG_DW_DISP_MAIN 0x8000 +#define AV1_DEBUG_DIS_LOC_ERROR_PROC 0x10000 +#define AOM_DEBUG_DIS_RECYCLE_MMU_TAIL 0x20000 +#define AV1_DEBUG_DUMP_PIC_LIST 0x40000 +#define AV1_DEBUG_TRIG_SLICE_SEGMENT_PROC 0x80000 +#define AOM_DEBUG_USE_FIXED_MV_BUF_SIZE 0x100000 +#define AV1_DEBUG_LOAD_UCODE_FROM_FILE 0x200000 +#define AV1_DEBUG_FORCE_SEND_AGAIN 0x400000 +#define AV1_DEBUG_DUMP_DATA 0x800000 +#define AV1_DEBUG_CACHE 0x1000000 +#define AV1_DEBUG_CACHE_HIT_RATE 0x2000000 +#define AV1_DEBUG_SEI_DETAIL 0x4000000 +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 +#if 1 +/*def MULTI_INSTANCE_SUPPORT*/ +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_V4L_DETAIL 0x10000000 +#define PRINT_FLAG_VDEC_STATUS 0x20000000 +#define PRINT_FLAG_VDEC_DETAIL 0x40000000 +#define PRINT_FLAG_VDEC_DATA 0x80000000 +#endif + +int av1_print2(int flag, const char *fmt, ...); + +unsigned char av1_is_debug(int flag); + +#endif +
diff --git a/drivers/frame_provider/decoder/vav1/vav1.c b/drivers/frame_provider/decoder/vav1/vav1.c new file mode 100644 index 0000000..c67757a --- /dev/null +++ b/drivers/frame_provider/decoder/vav1/vav1.c
@@ -0,0 +1,11656 @@ + /* + * drivers/amlogic/amports/vav1.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/sched/clock.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/slab.h> +#include <linux/amlogic/tee.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include <linux/crc32.h> + +#define MEM_NAME "codec_av1" +/* #include <mach/am_regs.h> */ +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../utils/vdec.h" +#include "../utils/amvdec.h" +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +#include "../utils/vdec_profile.h" +#endif + +#include <linux/amlogic/media/video_sink/video.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/config_parser.h" +#include "../utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../../../amvdec_ports/vdec_drv_base.h" + +//#define DEBUG_UCODE_LOG +#define DEBUG_CMD +#define DEBUG_CRC_ERROR + +#define SUPPORT_V4L2 +//#define DEBUG_USE_VP9_DEVICE_NAME +//#define BUFMGR_ONLY_OLD_CHIP + +#ifdef SUPPORT_V4L2 +#include "../utils/vdec_v4l2_buffer_ops.h" +#include <media/v4l2-mem2mem.h> +#endif +#include "../../../amvdec_ports/utils/common.h" +#include "../utils/vdec_feature.h" + +#define AML +#include "aom_av1_define.h" +#include "av1_global.h" + +#define DUMP_FILMGRAIN +#define MIX_STREAM_SUPPORT +//#define MV_USE_FIXED_BUF +//#define USE_SPEC_BUF_FOR_MMU_HEAD + +#define AOM_AV1_DBLK_INIT +#define AOM_AV1_UPSCALE_INIT + +#define USE_DEC_PIC_END + +#define SANITY_CHECK +#define CO_MV_COMPRESS + +#include "vav1.h" + +#define FGS_TABLE_SIZE (512 * 128 / 8) + +#define AV1_GMC_PARAM_BUFF_ADDR 0x316d +#define HEVCD_MPP_DECOMP_AXIURG_CTL 0x34c7 +#define HEVC_FGS_IDX 0x3660 +#define HEVC_FGS_DATA 0x3661 +#define HEVC_FGS_CTRL 0x3662 +#define AV1_SKIP_MODE_INFO 0x316c +#define AV1_QUANT_WR 0x3146 +#define AV1_SEG_W_ADDR 0x3165 +#define AV1_SEG_R_ADDR 0x3166 +#define AV1_REF_SEG_INFO 0x3171 +#define HEVC_ASSIST_PIC_SIZE_FB_READ 0x300d +#define PARSER_REF_SCALE_ENBL 0x316b +#define HEVC_MPRED_MV_RPTR_1 0x3263 +#define HEVC_MPRED_MV_RPTR_2 0x3264 +#define HEVC_SAO_CTRL9 0x362d +#define HEVC_FGS_TABLE_START 0x3666 +#define HEVC_FGS_TABLE_LENGTH 0x3667 +#define HEVC_DBLK_CDEF0 0x3515 +#define HEVC_DBLK_CDEF1 0x3516 +#define HEVC_DBLK_UPS1 0x351c +#define HEVC_DBLK_UPS2 0x351d +#define HEVC_DBLK_UPS3 0x351e +#define HEVC_DBLK_UPS4 0x351f +#define HEVC_DBLK_UPS5 0x3520 +#define AV1_UPSCALE_X0_QN 0x316e +#define AV1_UPSCALE_STEP_QN 0x316f +#define HEVC_DBLK_DBLK0 0x3523 +#define HEVC_DBLK_DBLK1 0x3524 +#define HEVC_DBLK_DBLK2 0x3525 + +#define HW_MASK_FRONT 0x1 +#define HW_MASK_BACK 0x2 + +#define AV1D_MPP_REFINFO_TBL_ACCCONFIG 0x3442 +#define AV1D_MPP_REFINFO_DATA 0x3443 +#define AV1D_MPP_REF_SCALE_ENBL 0x3441 +#define HEVC_MPRED_CTRL4 0x324c +#define HEVC_CM_HEADER_START_ADDR 0x3628 +#define HEVC_DBLK_CFGB 0x350b +#define HEVCD_MPP_ANC2AXI_TBL_DATA 0x3464 +#define HEVC_SAO_MMU_VH1_ADDR 0x363b +#define HEVC_SAO_MMU_VH0_ADDR 0x363a + +#define HEVC_MV_INFO 0x310d +#define HEVC_QP_INFO 0x3137 +#define HEVC_SKIP_INFO 0x3136 + +#define HEVC_CM_BODY_LENGTH2 0x3663 +#define HEVC_CM_HEADER_OFFSET2 0x3664 +#define HEVC_CM_HEADER_LENGTH2 0x3665 + +#define HEVC_CM_HEADER_START_ADDR2 0x364a +#define HEVC_SAO_MMU_DMA_CTRL2 0x364c +#define HEVC_SAO_MMU_VH0_ADDR2 0x364d +#define HEVC_SAO_MMU_VH1_ADDR2 0x364e +#define HEVC_SAO_MMU_STATUS2 0x3650 +#define HEVC_DW_VH0_ADDDR 0x365e +#define HEVC_DW_VH1_ADDDR 0x365f + +#ifdef BUFMGR_ONLY_OLD_CHIP +#undef AV1_SKIP_MODE_INFO +#define AV1_SKIP_MODE_INFO HEVC_ASSIST_SCRATCH_B +#endif + + +#define AOM_AV1_DEC_IDLE 0 +#define AOM_AV1_DEC_FRAME_HEADER 1 +#define AOM_AV1_DEC_TILE_END 2 +#define AOM_AV1_DEC_TG_END 3 +#define AOM_AV1_DEC_LCU_END 4 +#define AOM_AV1_DECODE_SLICE 5 +#define AOM_AV1_SEARCH_HEAD 6 +#define AOM_AV1_DUMP_LMEM 7 +#define AOM_AV1_FGS_PARAM_CONT 8 +#define AOM_AV1_DISCARD_NAL 0x10 +#define AOM_AV1_RESULT_NEED_MORE_BUFFER 0x11 + +/*status*/ +#define AOM_AV1_DEC_PIC_END 0xe0 +/*AOM_AV1_FGS_PARA: +Bit[11] - 0 Read, 1 - Write +Bit[10:8] - film_grain_params_ref_idx, For Write request +*/ +#define AOM_AV1_FGS_PARAM 0xe1 +#define AOM_AV1_DEC_PIC_END_PRE 0xe2 +#define AOM_AV1_HEAD_PARSER_DONE 0xf0 +#define AOM_AV1_HEAD_SEARCH_DONE 0xf1 +#define AOM_AV1_SEQ_HEAD_PARSER_DONE 0xf2 +#define AOM_AV1_FRAME_HEAD_PARSER_DONE 0xf3 +#define AOM_AV1_FRAME_PARSER_DONE 0xf4 +#define AOM_AV1_REDUNDANT_FRAME_HEAD_PARSER_DONE 0xf5 +#define HEVC_ACTION_DONE 0xff + +#define AOM_DECODE_BUFEMPTY 0x20 +#define AOM_DECODE_TIMEOUT 0x21 +#define AOM_SEARCH_BUFEMPTY 0x22 +#define AOM_DECODE_OVER_SIZE 0x23 +#define AOM_EOS 0x24 +#define AOM_NAL_DECODE_DONE 0x25 + +#define VF_POOL_SIZE 32 + +#undef pr_info +#define pr_info printk + +#define DECODE_MODE_SINGLE ((0x80 << 24) | 0) +#define DECODE_MODE_MULTI_STREAMBASE ((0x80 << 24) | 1) +#define DECODE_MODE_MULTI_FRAMEBASE ((0x80 << 24) | 2) +#define DECODE_MODE_SINGLE_LOW_LATENCY ((0x80 << 24) | 3) +#define DECODE_MODE_MULTI_FRAMEBASE_NOHEAD ((0x80 << 24) | 4) + +#define AV1_TRIGGER_FRAME_DONE 0x100 +#define AV1_TRIGGER_FRAME_ENABLE 0x200 + +#define MV_MEM_UNIT 0x240 +/*--------------------------------------------------- + * Include "parser_cmd.h" + *--------------------------------------------------- + */ +#define PARSER_CMD_SKIP_CFG_0 0x0000090b + +#define PARSER_CMD_SKIP_CFG_1 0x1b14140f + +#define PARSER_CMD_SKIP_CFG_2 0x001b1910 + +#define PARSER_CMD_NUMBER 37 + +/*#define HEVC_PIC_STRUCT_SUPPORT*/ +/* to remove, fix build error */ + +/*#define CODEC_MM_FLAGS_FOR_VDECODER 0*/ + +#define MULTI_INSTANCE_SUPPORT +#define SUPPORT_10BIT +/* #define ERROR_HANDLE_DEBUG */ + +#ifndef STAT_KTHREAD +#define STAT_KTHREAD 0x40 +#endif + +#ifdef MULTI_INSTANCE_SUPPORT +#define MAX_DECODE_INSTANCE_NUM 9 + +#ifdef DEBUG_USE_VP9_DEVICE_NAME +#define MULTI_DRIVER_NAME "ammvdec_vp9" +#else +#define MULTI_DRIVER_NAME "ammvdec_av1" +#endif + +#define AUX_BUF_ALIGN(adr) ((adr + 0xf) & (~0xf)) +#ifdef DEBUG_UCODE_LOG +static u32 prefix_aux_buf_size; +static u32 suffix_aux_buf_size; +#else +static u32 prefix_aux_buf_size = (16 * 1024); +static u32 suffix_aux_buf_size; +#endif +#if (defined DEBUG_UCODE_LOG) || (defined DEBUG_CMD) +//#define UCODE_LOG_BUF_SIZE (16 * 1024) +#define UCODE_LOG_BUF_SIZE (1024 * 1024) +#endif +static unsigned int max_decode_instance_num + = MAX_DECODE_INSTANCE_NUM; +static unsigned int decode_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int display_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int max_process_time[MAX_DECODE_INSTANCE_NUM]; +static unsigned int run_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int input_empty[MAX_DECODE_INSTANCE_NUM]; +static unsigned int not_run_ready[MAX_DECODE_INSTANCE_NUM]; +#ifdef AOM_AV1_MMU_DW +static unsigned int dw_mmu_enable[MAX_DECODE_INSTANCE_NUM]; +#endif + +static u32 decode_timeout_val = 600; +static int start_decode_buf_level = 0x8000; +//static u32 work_buf_size; +static u32 force_pts_unstable; +static u32 mv_buf_margin = REF_FRAMES; +static u32 mv_buf_dynamic_alloc; +static u32 force_max_one_mv_buffer_size; + +/* DOUBLE_WRITE_MODE is enabled only when NV21 8 bit output is needed */ +/* double_write_mode: + * 0, no double write; + * 1, 1:1 ratio; + * 2, (1/4):(1/4) ratio; + * 3, (1/4):(1/4) ratio, with both compressed frame included + * 4, (1/2):(1/2) ratio; + * 5, (1/2):(1/2) ratio, with both compressed frame included + * 8, (1/8):(1/8) ratio; + * 0x10, double write only + * 0x20, mmu double write + * 0x100, if > 1080p,use mode 4,else use mode 1; + * 0x200, if > 1080p,use mode 2,else use mode 1; + * 0x300, if > 720p, use mode 4, else use mode 1; + */ +static u32 double_write_mode; + +#ifdef DEBUG_USE_VP9_DEVICE_NAME +#define DRIVER_NAME "amvdec_vp9" +#define DRIVER_HEADER_NAME "amvdec_vp9_header" +#else +#define DRIVER_NAME "amvdec_av1" +#define DRIVER_HEADER_NAME "amvdec_av1_header" +#endif + +#define PUT_INTERVAL (HZ/100) +#define ERROR_SYSTEM_RESET_COUNT 200 + +#define PTS_NORMAL 0 +#define PTS_NONE_REF_USE_DURATION 1 + +#define PTS_MODE_SWITCHING_THRESHOLD 3 +#define PTS_MODE_SWITCHING_RECOVERY_THREASHOLD 3 + +#define DUR2PTS(x) ((x)*90/96) +#define PTS2DUR(x) ((x)*96/90) +#define PTS2DUR_u64(x) (div_u64((x)*96, 90)) + +struct AV1HW_s; +static int vav1_vf_states(struct vframe_states *states, void *); +static struct vframe_s *vav1_vf_peek(void *); +static struct vframe_s *vav1_vf_get(void *); +static void vav1_vf_put(struct vframe_s *, void *); +static int vav1_event_cb(int type, void *data, void *private_data); + +static int vav1_stop(struct AV1HW_s *hw); +#ifdef MULTI_INSTANCE_SUPPORT +static s32 vav1_init(struct vdec_s *vdec); +#else +static s32 vav1_init(struct AV1HW_s *hw); +#endif +static void vav1_prot_init(struct AV1HW_s *hw, u32 mask); +static int vav1_local_init(struct AV1HW_s *hw); +static void vav1_put_timer_func(struct timer_list *timer); +static void dump_data(struct AV1HW_s *hw, int size); +static unsigned int get_data_check_sum + (struct AV1HW_s *hw, int size); +static void dump_pic_list(struct AV1HW_s *hw); +static int vav1_mmu_map_alloc(struct AV1HW_s *hw); +static void vav1_mmu_map_free(struct AV1HW_s *hw); +static int av1_alloc_mmu( + struct AV1HW_s *hw, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr); + +#ifdef DEBUG_USE_VP9_DEVICE_NAME +static const char vav1_dec_id[] = "vvp9-dev"; + +#define PROVIDER_NAME "decoder.vp9" +#define MULTI_INSTANCE_PROVIDER_NAME "vdec.vp9" +#else +static const char vav1_dec_id[] = "vav1-dev"; + +#define PROVIDER_NAME "decoder.av1" +#define MULTI_INSTANCE_PROVIDER_NAME "vdec.av1" +#endif +#define DV_PROVIDER_NAME "dvbldec" + +static const struct vframe_operations_s vav1_vf_provider = { + .peek = vav1_vf_peek, + .get = vav1_vf_get, + .put = vav1_vf_put, + .event_cb = vav1_event_cb, + .vf_states = vav1_vf_states, +}; + +static struct vframe_provider_s vav1_vf_prov; + +static u32 bit_depth_luma; +static u32 bit_depth_chroma; +static u32 frame_width; +static u32 frame_height; +static u32 video_signal_type; +static u32 on_no_keyframe_skiped; +static u32 without_display_mode; +static u32 v4l_bitstream_id_enable = 1; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +static u32 force_dv_enable; +#endif + +#define PROB_SIZE (496 * 2 * 4) +#define PROB_BUF_SIZE (0x5000) +#define COUNT_BUF_SIZE (0x300 * 4 * 4) +/*compute_losless_comp_body_size(4096, 2304, 1) = 18874368(0x1200000)*/ +#define MAX_FRAME_4K_NUM 0x1200 +#define MAX_FRAME_8K_NUM 0x4800 + +#define HEVC_ASSIST_MMU_MAP_ADDR 0x3009 + + +/*USE_BUF_BLOCK*/ +struct BUF_s { + int index; + unsigned int alloc_flag; + /*buffer */ + unsigned int cma_page_count; + unsigned long alloc_addr; + unsigned long start_adr; + unsigned int size; + + unsigned int free_start_adr; + ulong v4l_ref_buf_addr; + ulong header_addr; + u32 header_size; + u32 luma_size; + ulong chroma_addr; + u32 chroma_size; +} /*BUF_t */; + +struct MVBUF_s { + unsigned long start_adr; + unsigned int size; + int used_flag; +} /*MVBUF_t */; + +/*#define TEST_WR_PTR_INC*/ +/*#define WR_PTR_INC_NUM 128*/ +#define WR_PTR_INC_NUM 1 + +//#define SIMULATION +#define DOS_PROJECT +#undef MEMORY_MAP_IN_REAL_CHIP + +/*#undef DOS_PROJECT*/ +/*#define MEMORY_MAP_IN_REAL_CHIP*/ + +/*#define CONFIG_HEVC_CLK_FORCED_ON*/ +/*#define ENABLE_SWAP_TEST*/ +#ifndef BUFMGR_ONLY_OLD_CHIP +#define MCRCC_ENABLE +#endif + +#ifdef AV1_10B_NV21 +#else +#define LOSLESS_COMPRESS_MODE +#endif + +static u32 get_picture_qos; + +static u32 debug; + +static bool is_reset; +/*for debug*/ +static u32 force_bufspec; +/* + udebug_flag: + bit 0, enable ucode print + bit 1, enable ucode detail print + bit [31:16] not 0, pos to dump lmem + bit 2, pop bits to lmem + bit [11:8], pre-pop bits for alignment (when bit 2 is 1) +*/ +static u32 udebug_flag; +/* + when udebug_flag[1:0] is not 0 + udebug_pause_pos not 0, + pause position +*/ +static u32 udebug_pause_pos; +/* + when udebug_flag[1:0] is not 0 + and udebug_pause_pos is not 0, + pause only when DEBUG_REG2 is equal to this val +*/ +static u32 udebug_pause_val; + +static u32 udebug_pause_decode_idx; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +static u32 dv_toggle_prov_name; +#endif + +static u32 run_ready_min_buf_num = 2; +#ifdef DEBUG_CRC_ERROR +/* + bit[4] fill zero in header before starting + bit[5] dump mmu header always + + bit[6] dump mv buffer always + + bit[8] delay after decoding + bit[31~16] delayed mseconds +*/ +static u32 crc_debug_flag; +#endif +#ifdef DEBUG_CMD +static u32 header_dump_size = 0x10000; +static u32 debug_cmd_wait_count; +static u32 debug_cmd_wait_type; +#endif +# +#define DEBUG_REG +#ifdef DEBUG_REG +void AV1_WRITE_VREG_DBG2(unsigned int adr, unsigned int val, int line) +{ + if (debug & AV1_DEBUG_REG) + pr_info("%d:%s(%x, %x)\n", line, __func__, adr, val); + if (adr != 0) + WRITE_VREG(adr, val); +} + +#undef WRITE_VREG +#define WRITE_VREG(a,v) AV1_WRITE_VREG_DBG2(a,v,__LINE__) +#endif + +#define FRAME_CNT_WINDOW_SIZE 59 +#define RATE_CORRECTION_THRESHOLD 5 +/************************************************** + +AV1 buffer management start + +***************************************************/ +#define MMU_COMPRESS_HEADER_SIZE_1080P 0x10000 +#define MMU_COMPRESS_HEADER_SIZE_4K 0x48000 +#define MMU_COMPRESS_HEADER_SIZE_8K 0x120000 + +//#define MMU_COMPRESS_HEADER_SIZE 0x48000 +//#define MAX_ONE_MV_BUFFER_SIZE 0x260000 +//#define MAX_ONE_MV_BUFFER_SIZE 0x130000 + +#define MAX_ONE_MV_BUFFER_SIZE_1080P 0x20400 +#define MAX_ONE_MV_BUFFER_SIZE_4K 0x91400 +#define MAX_ONE_MV_BUFFER_SIZE_8K 0x244800 +/*to support tm2revb and sc2*/ +#define MAX_ONE_MV_BUFFER_SIZE_1080P_TM2REVB 0x26400 +#define MAX_ONE_MV_BUFFER_SIZE_4K_TM2REVB 0xac400 +#define MAX_ONE_MV_BUFFER_SIZE_8K_TM2REVB 0x2b0800 + +static int vav1_mmu_compress_header_size(struct AV1HW_s *hw); + +//#define MMU_COMPRESS_HEADER_SIZE 0x48000 +//#define MMU_COMPRESS_HEADER_SIZE_DW MMU_COMPRESS_HEADER_SIZE +//#define MMU_COMPRESS_8K_HEADER_SIZE (0x48000*4) +#define MAX_SIZE_8K (8192 * 4608) +#define MAX_SIZE_4K (4096 * 2304) +#define MAX_SIZE_2K (1920 * 1088) +#define IS_8K_SIZE(w, h) (((w) * (h)) > MAX_SIZE_4K) +#define IS_4K_SIZE(w, h) (((w) * (h)) > (1920*1088)) + +#define INVALID_IDX -1 /* Invalid buffer index.*/ + + +/*4 scratch frames for the new frames to support a maximum of 4 cores decoding + *in parallel, 3 for scaled references on the encoder. + *TODO(hkuang): Add ondemand frame buffers instead of hardcoding the number + * // of framebuffers. + *TODO(jkoleszar): These 3 extra references could probably come from the + *normal reference pool. + */ +//#define FRAME_BUFFERS (REF_FRAMES + 16) +//#define REF_FRAMES_4K (6) +#define REF_FRAMES_4K REF_FRAMES + +#ifdef USE_SPEC_BUF_FOR_MMU_HEAD +#define HEADER_FRAME_BUFFERS (0) +#elif (defined AOM_AV1_MMU_DW) +#define HEADER_FRAME_BUFFERS (2 * FRAME_BUFFERS) +#else +#define HEADER_FRAME_BUFFERS (FRAME_BUFFERS) +#endif +#define MAX_BUF_NUM (FRAME_BUFFERS) +#define MV_BUFFER_NUM FRAME_BUFFERS + +//#define FRAME_CONTEXTS_LOG2 2 +//#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2) +/*buffer + header buffer + workspace*/ +#ifdef MV_USE_FIXED_BUF +#define MAX_BMMU_BUFFER_NUM (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + 1) +#define VF_BUFFER_IDX(n) (n) +#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n) +#define WORK_SPACE_BUF_ID (FRAME_BUFFERS + HEADER_FRAME_BUFFERS) +#else +#define MAX_BMMU_BUFFER_NUM \ + (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + MV_BUFFER_NUM + 1) +#define VF_BUFFER_IDX(n) (n) +#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n) +#define MV_BUFFER_IDX(n) (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + n) +#define WORK_SPACE_BUF_ID \ + (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + MV_BUFFER_NUM) +#endif +#ifdef AOM_AV1_MMU_DW +#define DW_HEADER_BUFFER_IDX(n) (HEADER_BUFFER_IDX(HEADER_FRAME_BUFFERS/2) + n) +#endif + + +static void set_canvas(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic_config); + +static void fill_frame_info(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *frame, + unsigned int framesize, + unsigned int pts); + + +static int compute_losless_comp_body_size(int width, int height, + uint8_t is_bit_depth_10); + +void clear_frame_buf_ref_count(AV1Decoder *pbi); + +#ifdef MULTI_INSTANCE_SUPPORT +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_CONFIG_PARAM 3 +#define DEC_RESULT_ERROR 4 +#define DEC_INIT_PICLIST 5 +#define DEC_UNINIT_PICLIST 6 +#define DEC_RESULT_GET_DATA 7 +#define DEC_RESULT_GET_DATA_RETRY 8 +#define DEC_RESULT_EOS 9 +#define DEC_RESULT_FORCE_EXIT 10 + +#define DEC_S1_RESULT_NONE 0 +#define DEC_S1_RESULT_DONE 1 +#define DEC_S1_RESULT_FORCE_EXIT 2 +#define DEC_S1_RESULT_TEST_TRIGGER_DONE 0xf0 + +#ifdef FB_DECODING_TEST_SCHEDULE +#define TEST_SET_NONE 0 +#define TEST_SET_PIC_DONE 1 +#define TEST_SET_S2_DONE 2 +#endif + +static void av1_work(struct work_struct *work); +#endif + +#ifdef DUMP_FILMGRAIN +u32 fg_dump_index = 0xff; +#endif + +#ifdef AOM_AV1_DBLK_INIT +struct loop_filter_info_n_s; +struct loopfilter; +struct segmentation_lf; +#endif +struct AV1HW_s { + AV1Decoder *pbi; + union param_u aom_param; + unsigned char frame_decoded; + unsigned char one_compressed_data_done; + unsigned char new_compressed_data; +#if 1 +/*def CHECK_OBU_REDUNDANT_FRAME_HEADER*/ + int obu_frame_frame_head_come_after_tile; +#endif + unsigned char index; + + struct device *cma_dev; + struct platform_device *platform_dev; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + struct vframe_chunk_s *chunk; + int dec_result; + struct work_struct work; + struct work_struct set_clk_work; + u32 start_shift_bytes; + u32 data_size; + + struct BuffInfo_s work_space_buf_store; + unsigned long buf_start; + u32 buf_size; + u32 cma_alloc_count; + unsigned long cma_alloc_addr; + uint8_t eos; + unsigned long int start_process_time; + unsigned last_lcu_idx; + int decode_timeout_count; + unsigned timeout_num; + int save_buffer_mode; + + int double_write_mode; + + long used_4k_num; + + unsigned char m_ins_flag; + char *provider_name; + union param_u param; + int frame_count; + int pic_count; + u32 stat; + struct timer_list timer; + u32 frame_dur; + u32 frame_ar; + int fatal_error; + uint8_t init_flag; + uint8_t config_next_ref_info_flag; + uint8_t first_sc_checked; + uint8_t process_busy; +#define PROC_STATE_INIT 0 +#define PROC_STATE_DECODESLICE 1 +#define PROC_STATE_SENDAGAIN 2 + uint8_t process_state; + u32 ucode_pause_pos; + + int show_frame_num; + struct buff_s mc_buf_spec; + struct dec_sysinfo vav1_amstream_dec_info; + void *rpm_addr; + void *lmem_addr; + dma_addr_t rpm_phy_addr; + dma_addr_t lmem_phy_addr; + unsigned short *lmem_ptr; + unsigned short *debug_ptr; +#ifdef DUMP_FILMGRAIN + dma_addr_t fg_phy_addr; + unsigned char *fg_ptr; + void *fg_addr; +#endif + u32 fgs_valid; + + u8 aux_data_dirty; + u32 prefix_aux_size; + u32 suffix_aux_size; + void *aux_addr; + dma_addr_t aux_phy_addr; + char *dv_data_buf; + int dv_data_size; +#if (defined DEBUG_UCODE_LOG) || (defined DEBUG_CMD) + void *ucode_log_addr; + dma_addr_t ucode_log_phy_addr; +#endif + + void *prob_buffer_addr; + void *count_buffer_addr; + dma_addr_t prob_buffer_phy_addr; + dma_addr_t count_buffer_phy_addr; + + void *frame_mmu_map_addr; + dma_addr_t frame_mmu_map_phy_addr; +#ifdef AOM_AV1_MMU_DW + void *dw_frame_mmu_map_addr; + dma_addr_t dw_frame_mmu_map_phy_addr; +#endif + unsigned int use_cma_flag; + + struct BUF_s m_BUF[MAX_BUF_NUM]; + struct MVBUF_s m_mv_BUF[MV_BUFFER_NUM]; + u32 used_buf_num; + u32 mv_buf_margin; + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(pending_q, struct vframe_s *, VF_POOL_SIZE); + struct vframe_s vfpool[VF_POOL_SIZE]; + u32 vf_pre_count; + u32 vf_get_count; + u32 vf_put_count; + int buf_num; + int pic_num; + int lcu_size_log2; + unsigned int losless_comp_body_size; + + u32 video_signal_type; + + u32 pts_unstable; + u32 last_chunk_pts; + u32 pts_diff_count; + u64 pts_diff_sum; + + bool get_frame_dur; + + u32 saved_resolution; + /**/ + struct AV1_Common_s common; + struct RefCntBuffer_s *cur_buf; + int refresh_frame_flags; + uint8_t need_resync; + uint8_t hold_ref_buf; + uint8_t ready_for_new_data; + struct BufferPool_s av1_buffer_pool; + + struct BuffInfo_s *work_space_buf; + + struct buff_s *mc_buf; + + unsigned int frame_width; + unsigned int frame_height; + + unsigned short *rpm_ptr; + int init_pic_w; + int init_pic_h; + int lcu_total; + + int current_lcu_size; + + int slice_type; + + int skip_flag; + int decode_idx; + int result_done_count; + uint8_t has_keyframe; + uint8_t has_sequence; + uint8_t wait_buf; + uint8_t error_flag; + + /* bit 0, for decoding; bit 1, for displaying */ + uint8_t ignore_bufmgr_error; + int PB_skip_mode; + int PB_skip_count_after_decoding; + /*hw*/ + + /**/ + struct vdec_info *gvs; + + u32 pre_stream_offset; + + unsigned int dec_status; + u32 last_put_idx; + int new_frame_displayed; + void *mmu_box; + void *bmmu_box; + int mmu_enable; +#ifdef AOM_AV1_MMU_DW + void *mmu_box_dw; + int dw_mmu_enable; +#endif + struct vframe_master_display_colour_s vf_dp; + struct firmware_s *fw; + int max_pic_w; + int max_pic_h; + int buffer_spec_index; + int32_t max_one_mv_buffer_size; + + int need_cache_size; + u64 sc_start_time; + bool postproc_done; + int low_latency_flag; + bool no_head; + bool pic_list_init_done; + bool pic_list_init_done2; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + int frameinfo_enable; + struct vframe_qos_s vframe_qos; + +#ifdef AOM_AV1_DBLK_INIT + /* + * malloc may not work in real chip, please allocate memory for the following structures + */ + struct loop_filter_info_n_s *lfi; + struct loopfilter *lf; + struct segmentation_lf *seg_4lf; +#endif + u32 mem_map_mode; + u32 dynamic_buf_num_margin; + struct vframe_s vframe_dummy; + u32 res_ch_flag; + int buffer_wrap[FRAME_BUFFERS]; + int sidebind_type; + int sidebind_channel_id; + u32 cur_obu_type; + u32 multi_frame_cnt; + u32 endian; + u32 run_ready_min_buf_num; + int one_package_frame_cnt; + ulong fb_token; + dma_addr_t rdma_phy_adr; + unsigned *rdma_adr; + struct trace_decoder_name trace; +}; + +static void av1_dump_state(struct vdec_s *vdec); + +int av1_print(struct AV1HW_s *hw, + int flag, const char *fmt, ...) +{ +#define HEVC_PRINT_BUF 512 + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + + if (hw == NULL || + (flag == 0) || + (debug & flag)) { + va_list args; + + va_start(args, fmt); + if (hw) + len = sprintf(buf, "[%d]", hw->index); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; +} + +unsigned char av1_is_debug(int flag) +{ + if ((flag == 0) || (debug & flag)) + return 1; + + return 0; +} + +int av1_print2(int flag, const char *fmt, ...) +{ + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + + if ((flag == 0) || + (debug & flag)) { + va_list args; + + va_start(args, fmt); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; + +} + +static int is_oversize(int w, int h) +{ + int max = (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1)? + MAX_SIZE_8K : MAX_SIZE_4K; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) + max = MAX_SIZE_2K; + + if (w <= 0 || h <= 0) + return true; + + if (h != 0 && (w > max / h)) + return true; + + return false; +} + +static int v4l_alloc_and_config_pic(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic); + +static inline bool close_to(int a, int b, int m) +{ + return (abs(a - b) < m) ? true : false; +} + +#ifdef MULTI_INSTANCE_SUPPORT +static int av1_print_cont(struct AV1HW_s *hw, + int flag, const char *fmt, ...) +{ + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + + if (hw == NULL || + (flag == 0) || + (debug & flag)) { + va_list args; + + va_start(args, fmt); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; +} + +static void trigger_schedule(struct AV1HW_s *hw) +{ + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !hw->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + if (hw->vdec_cb) + hw->vdec_cb(hw_to_vdec(hw), hw->vdec_cb_arg); +} + +static void reset_process_time(struct AV1HW_s *hw) +{ + if (hw->start_process_time) { + unsigned process_time = + 1000 * (jiffies - hw->start_process_time) / HZ; + hw->start_process_time = 0; + if (process_time > max_process_time[hw->index]) + max_process_time[hw->index] = process_time; + } +} + +static void start_process_time(struct AV1HW_s *hw) +{ + hw->start_process_time = jiffies; + hw->decode_timeout_count = 0; + hw->last_lcu_idx = 0; +} + +static void timeout_process(struct AV1HW_s *hw) +{ + reset_process_time(hw); + if (hw->process_busy) { + av1_print(hw, + 0, "%s decoder timeout but process_busy\n", __func__); + if (debug) + av1_print(hw, 0, "debug disable timeout notify\n"); + return; + } + hw->timeout_num++; + amhevc_stop(); + av1_print(hw, + 0, "%s decoder timeout\n", __func__); + + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); +} + +static u32 get_valid_double_write_mode(struct AV1HW_s *hw) +{ + u32 dw = ((double_write_mode & 0x80000000) == 0) ? + hw->double_write_mode : + (double_write_mode & 0x7fffffff); + if (dw & 0x20) { + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T3) + && ((dw & 0xf) == 2 || (dw & 0xf) == 3)) { + pr_info("MMU doueble write 1:4 not supported !!!\n"); + dw = 0; + } + } + return dw; +} + +static int get_double_write_mode(struct AV1HW_s *hw) +{ + u32 valid_dw_mode = get_valid_double_write_mode(hw); + u32 dw; + int w, h; + struct AV1_Common_s *cm = &hw->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config; + + if (!cm->cur_frame) + return 1;/*no valid frame,*/ + + if (hw->is_used_v4l) { + unsigned int out; + + vdec_v4l_get_dw_mode(hw->v4l2_ctx, &out); + dw = out; + return dw; + } + + cur_pic_config = &cm->cur_frame->buf; + w = cur_pic_config->y_crop_width; + h = cur_pic_config->y_crop_height; + + dw = 0x1; /*1:1*/ + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + return dw; +} + +/* for double write buf alloc */ +static int get_double_write_mode_init(struct AV1HW_s *hw) +{ + u32 valid_dw_mode = get_valid_double_write_mode(hw); + u32 dw; + int w = hw->init_pic_w; + int h = hw->init_pic_h; + + dw = 0x1; /*1:1*/ + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + return dw; +} +#endif + +/* return page number */ +static int av1_mmu_page_num(struct AV1HW_s *hw, + int w, int h, int save_mode) +{ + int picture_size; + int cur_mmu_4k_number, max_frame_num; + + picture_size = compute_losless_comp_body_size(w, h, save_mode); + cur_mmu_4k_number = ((picture_size + (PAGE_SIZE - 1)) >> PAGE_SHIFT); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + max_frame_num = MAX_FRAME_8K_NUM; + else + max_frame_num = MAX_FRAME_4K_NUM; + + if (cur_mmu_4k_number > max_frame_num) { + pr_err("over max !! cur_mmu_4k_number 0x%x width %d height %d\n", + cur_mmu_4k_number, w, h); + return -1; + } + + return cur_mmu_4k_number; +} + +static struct internal_comp_buf* v4lfb_to_icomp_buf( + struct AV1HW_s *hw, + struct vdec_v4l2_buffer *fb) +{ + struct aml_video_dec_buf *aml_fb = NULL; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + + aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer); + + return &v4l2_ctx->comp_bufs[aml_fb->internal_index]; +} + +static struct internal_comp_buf* index_to_icomp_buf( + struct AV1HW_s *hw, int index) +{ + struct aml_video_dec_buf *aml_fb = NULL; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + + fb = (struct vdec_v4l2_buffer *) + hw->m_BUF[index].v4l_ref_buf_addr; + aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer); + + return &v4l2_ctx->comp_bufs[aml_fb->internal_index]; +} + +//#define MAX_4K_NUM 0x1200 +int av1_alloc_mmu( + struct AV1HW_s *hw, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr) +{ + int ret = 0; + int bit_depth_10 = (bit_depth == AOM_BITS_10); + int cur_mmu_4k_number; + + if (hw->double_write_mode & 0x10) + return 0; + + if (bit_depth >= AOM_BITS_12) { + hw->fatal_error = DECODER_FATAL_ERROR_SIZE_OVERFLOW; + pr_err("fatal_error, un support bit depth 12!\n\n"); + return -1; + } + + cur_mmu_4k_number = av1_mmu_page_num(hw, + pic_width, + pic_height, + bit_depth_10); + if (cur_mmu_4k_number < 0) + return -1; + + if (hw->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(hw, cur_buf_idx); + + ret = decoder_mmu_box_alloc_idx( + ibuf->mmu_box, + ibuf->index, + ibuf->frame_buffer_size, + mmu_index_adr); + } else { + ret = decoder_mmu_box_alloc_idx( + hw->mmu_box, + cur_buf_idx, + cur_mmu_4k_number, + mmu_index_adr); + } + + return ret; +} + +#ifdef AOM_AV1_MMU_DW +static int compute_losless_comp_body_size_dw(int width, int height, + uint8_t is_bit_depth_10); + +int av1_alloc_mmu_dw( + struct AV1HW_s *hw, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr) +{ + int ret = 0; + int bit_depth_10 = (bit_depth == AOM_BITS_10); + int picture_size; + int cur_mmu_4k_number, max_frame_num; + if (!hw->mmu_box_dw) { + pr_err("error no mmu box!\n"); + return -1; + } + if (hw->double_write_mode & 0x10) + return 0; + if (bit_depth >= AOM_BITS_12) { + hw->fatal_error = DECODER_FATAL_ERROR_SIZE_OVERFLOW; + pr_err("fatal_error, un support bit depth 12!\n\n"); + return -1; + } + picture_size = compute_losless_comp_body_size_dw(pic_width, pic_height, + bit_depth_10); + cur_mmu_4k_number = ((picture_size + (1 << 12) - 1) >> 12); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + max_frame_num = MAX_FRAME_8K_NUM; + else + max_frame_num = MAX_FRAME_4K_NUM; + + if (cur_mmu_4k_number > max_frame_num) { + pr_err("over max !! cur_mmu_4k_number 0x%x width %d height %d\n", + cur_mmu_4k_number, pic_width, pic_height); + return -1; + } + ret = decoder_mmu_box_alloc_idx( + hw->mmu_box_dw, + cur_buf_idx, + cur_mmu_4k_number, + mmu_index_adr); + return ret; +} +#endif + +#ifndef MV_USE_FIXED_BUF +static void dealloc_mv_bufs(struct AV1HW_s *hw) +{ + int i; + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (hw->m_mv_BUF[i].start_adr) { + if (debug) + pr_info( + "dealloc mv buf(%d) adr %ld size 0x%x used_flag %d\n", + i, hw->m_mv_BUF[i].start_adr, + hw->m_mv_BUF[i].size, + hw->m_mv_BUF[i].used_flag); + decoder_bmmu_box_free_idx( + hw->bmmu_box, + MV_BUFFER_IDX(i)); + hw->m_mv_BUF[i].start_adr = 0; + hw->m_mv_BUF[i].size = 0; + hw->m_mv_BUF[i].used_flag = 0; + } + } +} + +static int alloc_mv_buf(struct AV1HW_s *hw, + int i, int size) +{ + int ret = 0; + + if (hw->m_mv_BUF[i].start_adr && + size > hw->m_mv_BUF[i].size) { + dealloc_mv_bufs(hw); + } else if (hw->m_mv_BUF[i].start_adr) + return 0; + + if (decoder_bmmu_box_alloc_buf_phy + (hw->bmmu_box, + MV_BUFFER_IDX(i), size, + DRIVER_NAME, + &hw->m_mv_BUF[i].start_adr) < 0) { + hw->m_mv_BUF[i].start_adr = 0; + ret = -1; + } else { + hw->m_mv_BUF[i].size = size; + hw->m_mv_BUF[i].used_flag = 0; + ret = 0; + if (debug) { + pr_info( + "MV Buffer %d: start_adr %p size %x\n", + i, + (void *)hw->m_mv_BUF[i].start_adr, + hw->m_mv_BUF[i].size); + } + } + return ret; +} + +static int cal_mv_buf_size(struct AV1HW_s *hw, int pic_width, int pic_height) +{ + unsigned lcu_size = hw->current_lcu_size; + int extended_pic_width = (pic_width + lcu_size -1) + & (~(lcu_size - 1)); + int extended_pic_height = (pic_height + lcu_size -1) + & (~(lcu_size - 1)); + + int lcu_x_num = extended_pic_width / lcu_size; + int lcu_y_num = extended_pic_height / lcu_size; + int size_a, size_b, size; + + if (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_SC2) + /*tm2revb and sc2*/ + size_a = lcu_x_num * lcu_y_num * 16 * + ((lcu_size == 64) ? 16 : 64); + else + size_a = lcu_x_num * lcu_y_num * 16 * + ((lcu_size == 64) ? 19 : 76); + + size_b = lcu_x_num * ((lcu_y_num >> 3) + + (lcu_y_num & 0x7)) * 16; + size = ((size_a + size_b) + 0xffff) & (~0xffff); + + if (debug & AOM_DEBUG_USE_FIXED_MV_BUF_SIZE) + size = hw->max_one_mv_buffer_size; + if (force_max_one_mv_buffer_size) + size = force_max_one_mv_buffer_size; + return size; +} + +static int init_mv_buf_list(struct AV1HW_s *hw) +{ + int i; + int ret = 0; + int count = MV_BUFFER_NUM; + int pic_width = hw->init_pic_w; + int pic_height = hw->init_pic_h; + int size = cal_mv_buf_size(hw, pic_width, pic_height); + + if (mv_buf_dynamic_alloc) + return 0; +#if 0 + if (mv_buf_margin > 0) + count = REF_FRAMES + mv_buf_margin; + if (hw->init_pic_w > 2048 && hw->init_pic_h > 1088) + count = REF_FRAMES_4K + mv_buf_margin; +#else + if (debug) + pr_info("%s, calculated mv size 0x%x\n", + __func__, size); + + if ((hw->is_used_v4l) && !IS_8K_SIZE(pic_width, pic_height)) { + size = 0x100000; + } + + if (hw->init_pic_w > 4096 && hw->init_pic_h > 2048) + count = REF_FRAMES_4K + hw->mv_buf_margin; + else if (hw->init_pic_w > 2048 && hw->init_pic_h > 1088) + count = REF_FRAMES_4K + hw->mv_buf_margin; + else + count = REF_FRAMES + hw->mv_buf_margin; + +#endif + if (debug) { + pr_info("%s w:%d, h:%d, count: %d, size 0x%x\n", + __func__, hw->init_pic_w, hw->init_pic_h, + count, size); + } + + for (i = 0; + i < count && i < MV_BUFFER_NUM; i++) { + if (alloc_mv_buf(hw, i, size) < 0) { + ret = -1; + break; + } + } + return ret; +} + +static int get_mv_buf(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + int i; + int ret = -1; + if (mv_buf_dynamic_alloc) { + int size = cal_mv_buf_size(hw, + pic_config->y_crop_width, pic_config->y_crop_height); + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (hw->m_mv_BUF[i].start_adr == 0) { + ret = i; + break; + } + } + if (i == MV_BUFFER_NUM) { + pr_info( + "%s: Error, mv buf MV_BUFFER_NUM is not enough\n", + __func__); + return ret; + } + + if (alloc_mv_buf(hw, ret, size) >= 0) { + pic_config->mv_buf_index = ret; + pic_config->mpred_mv_wr_start_addr = + (hw->m_mv_BUF[ret].start_adr + 0xffff) & + (~0xffff); + } else { + pr_info( + "%s: Error, mv buf alloc fail\n", + __func__); + } + return ret; + } + + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (hw->m_mv_BUF[i].start_adr && + hw->m_mv_BUF[i].used_flag == 0) { + hw->m_mv_BUF[i].used_flag = 1; + ret = i; + break; + } + } + + if (ret >= 0) { + pic_config->mv_buf_index = ret; + pic_config->mpred_mv_wr_start_addr = + (hw->m_mv_BUF[ret].start_adr + 0xffff) & + (~0xffff); + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info( + "%s => %d (%d) size 0x%x\n", + __func__, ret, + pic_config->mpred_mv_wr_start_addr, + hw->m_mv_BUF[ret].size); + } else { + pr_info( + "%s: Error, mv buf is not enough\n", + __func__); + } + return ret; +} +static void put_mv_buf(struct AV1HW_s *hw, + int *mv_buf_index) +{ + int i = *mv_buf_index; + if (i >= MV_BUFFER_NUM) { + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info( + "%s: index %d beyond range\n", + __func__, i); + return; + } + if (mv_buf_dynamic_alloc) { + if (hw->m_mv_BUF[i].start_adr) { + if (debug) + pr_info( + "dealloc mv buf(%d) adr %ld size 0x%x used_flag %d\n", + i, hw->m_mv_BUF[i].start_adr, + hw->m_mv_BUF[i].size, + hw->m_mv_BUF[i].used_flag); + decoder_bmmu_box_free_idx( + hw->bmmu_box, + MV_BUFFER_IDX(i)); + hw->m_mv_BUF[i].start_adr = 0; + hw->m_mv_BUF[i].size = 0; + hw->m_mv_BUF[i].used_flag = 0; + } + *mv_buf_index = -1; + return; + } + + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info( + "%s(%d): used_flag(%d)\n", + __func__, i, + hw->m_mv_BUF[i].used_flag); + + *mv_buf_index = -1; + if (hw->m_mv_BUF[i].start_adr && + hw->m_mv_BUF[i].used_flag) + hw->m_mv_BUF[i].used_flag = 0; +} +static void put_un_used_mv_bufs(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + for (i = 0; i < hw->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.index != -1) && + (frame_bufs[i].buf.mv_buf_index >= 0) + ) + put_mv_buf(hw, &frame_bufs[i].buf.mv_buf_index); + } +} +#endif + +static void init_pic_list_hw(struct AV1HW_s *pbi); + +static void update_hide_frame_timestamp(struct AV1HW_s *hw) +{ + RefCntBuffer *const frame_bufs = hw->common.buffer_pool->frame_bufs; + int i; + + for (i = 0; i < hw->used_buf_num; ++i) { + if ((!frame_bufs[i].show_frame) && + (frame_bufs[i].showable_frame) && + (!frame_bufs[i].buf.vf_ref) && + (frame_bufs[i].buf.BUF_index != -1)) { + frame_bufs[i].buf.timestamp = hw->chunk->timestamp; + frame_bufs[i].buf.pts = hw->chunk->pts; + frame_bufs[i].buf.pts64 = hw->chunk->pts64; + av1_print(hw, AV1_DEBUG_OUT_PTS, + "%s, update %d hide frame ts: %lld, pts %d, pts64 %lld\n", + __func__, i, frame_bufs[i].buf.timestamp, hw->chunk->pts, hw->chunk->pts64); + } + } +} + +static int get_free_fb_idx(AV1_COMMON *cm) +{ + int i; + RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; + + for (i = 0; i < FRAME_BUFFERS; ++i) { + if (frame_bufs[i].ref_count == 0 + && frame_bufs[i].buf.vf_ref == 0) + break; + } + + return (i != FRAME_BUFFERS) ? i : -1; +} + +static int v4l_get_free_fb(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + struct aml_vcodec_ctx * v4l = hw->v4l2_ctx; + struct v4l_buff_pool *pool = &v4l->cap_pool; + struct PIC_BUFFER_CONFIG_s *pic = NULL; + struct PIC_BUFFER_CONFIG_s *free_pic = NULL; + ulong flags; + int idx, i; + + lock_buffer_pool(cm->buffer_pool, flags); + + for (i = 0; i < pool->in; ++i) { + u32 state = (pool->seq[i] >> 16); + u32 index = (pool->seq[i] & 0xffff); + + switch (state) { + case V4L_CAP_BUFF_IN_DEC: + pic = &frame_bufs[i].buf; + if ((frame_bufs[i].ref_count == 0) && + (pic->vf_ref == 0) && + (pic->index != -1) && + pic->cma_alloc_addr) { + free_pic = pic; + } + break; + case V4L_CAP_BUFF_IN_M2M: + idx = get_free_fb_idx(cm); + if (idx < 0) + break; + + pic = &frame_bufs[idx].buf; + pic->y_crop_width = hw->frame_width; + pic->y_crop_height = hw->frame_height; + hw->buffer_wrap[idx] = index; + if (!v4l_alloc_and_config_pic(hw, pic)) { + set_canvas(hw, pic); + init_pic_list_hw(hw); + free_pic = pic; + } + break; + default: + pr_err("v4l buffer state err %d.\n", state); + break; + } + + if (free_pic) { + if (frame_bufs[i].buf.use_external_reference_buffers) { + // If this frame buffer's y_buffer, u_buffer, and v_buffer point to the + // external reference buffers. Restore the buffer pointers to point to the + // internally allocated memory. + PIC_BUFFER_CONFIG *ybf = &frame_bufs[i].buf; + + ybf->y_buffer = ybf->store_buf_adr[0]; + ybf->u_buffer = ybf->store_buf_adr[1]; + ybf->v_buffer = ybf->store_buf_adr[2]; + ybf->use_external_reference_buffers = 0; + } + + frame_bufs[i].ref_count = 1; + break; + } + } + + if (free_pic && hw->chunk) { + free_pic->timestamp = hw->chunk->timestamp; + update_hide_frame_timestamp(hw); + } + + unlock_buffer_pool(cm->buffer_pool, flags); + + if (free_pic) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *) + hw->m_BUF[free_pic->index].v4l_ref_buf_addr; + + fb->status = FB_ST_DECODER; + } + + if (debug & AV1_DEBUG_OUT_PTS) { + if (free_pic) { + pr_debug("%s, idx: %d, ts: %lld\n", + __func__, free_pic->index, free_pic->timestamp); + } else { + pr_debug("%s, av1 get free pic null\n", __func__); + } + } + + return free_pic ? free_pic->index : INVALID_IDX; +} + +static int get_free_fb(AV1_COMMON *cm) { + RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; + unsigned long flags; + int i; + + lock_buffer_pool(cm->buffer_pool, flags); + for (i = 0; i < FRAME_BUFFERS; ++i) { + if (frame_bufs[i].ref_count == 0 +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + && frame_bufs[i].buf.vf_ref == 0 +#endif + ) + break; + } + + if (i != FRAME_BUFFERS) { + if (frame_bufs[i].buf.use_external_reference_buffers) { + // If this frame buffer's y_buffer, u_buffer, and v_buffer point to the + // external reference buffers. Restore the buffer pointers to point to the + // internally allocated memory. + PIC_BUFFER_CONFIG *ybf = &frame_bufs[i].buf; + ybf->y_buffer = ybf->store_buf_adr[0]; + ybf->u_buffer = ybf->store_buf_adr[1]; + ybf->v_buffer = ybf->store_buf_adr[2]; + ybf->use_external_reference_buffers = 0; + } + + frame_bufs[i].ref_count = 1; + } else { + // We should never run out of free buffers. If this assertion fails, there + // is a reference leak. + //assert(0 && "Ran out of free frame buffers. Likely a reference leak."); + // Reset i to be INVALID_IDX to indicate no free buffer found. + i = INVALID_IDX; + } + + if (i != INVALID_IDX) { + struct PIC_BUFFER_CONFIG_s *pic = &frame_bufs[i].buf; + struct AV1HW_s *hw = container_of(cm, struct AV1HW_s, common); + + if (pic && hw->chunk) { + pic->timestamp = hw->chunk->timestamp; + pic->pts = hw->chunk->pts; + pic->pts64 = hw->chunk->pts64; + update_hide_frame_timestamp(hw); + } + + if (pic) + av1_print(hw, AV1_DEBUG_OUT_PTS, + "%s, idx: %d, ts: %lld, pts %d, pts64 %lld\n", + __func__, i, pic->timestamp, pic->pts, pic->pts64); + } + + unlock_buffer_pool(cm->buffer_pool, flags); + + return i; +} + +int get_free_frame_buffer(struct AV1_Common_s *cm) +{ + struct AV1HW_s *hw = container_of(cm, struct AV1HW_s, common); + + return hw->is_used_v4l ? v4l_get_free_fb(hw) : get_free_fb(cm); +} + +static int get_free_buf_count(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int i, free_buf_count = 0; + + if (hw->is_used_v4l) { + for (i = 0; i < hw->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + frame_bufs[i].buf.cma_alloc_addr) { + free_buf_count++; + } + } + + if (ctx->cap_pool.dec < hw->used_buf_num) { + free_buf_count += + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx); + } + /* trigger to parse head data. */ + if (!hw->v4l_params_parsed) { + free_buf_count = hw->run_ready_min_buf_num; + } + if ((debug & AV1_DEBUG_BUFMGR_MORE) && + (free_buf_count <= 0)) { + pr_info("%s, free count %d, m2m_ready %d\n", + __func__, + free_buf_count, + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)); + } + } else { + for (i = 0; i < hw->used_buf_num; ++i) + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.index != -1)) { + free_buf_count++; + } + } + + return free_buf_count; +} + +int aom_bufmgr_init(struct AV1HW_s *hw, struct BuffInfo_s *buf_spec_i, + struct buff_s *mc_buf_i) { + struct AV1_Common_s *cm = &hw->common; + if (debug) + pr_info("%s %d %p\n", __func__, __LINE__, hw->pbi); + hw->frame_count = 0; + hw->pic_count = 0; + hw->pre_stream_offset = 0; + spin_lock_init(&cm->buffer_pool->lock); + cm->prev_fb_idx = INVALID_IDX; + cm->new_fb_idx = INVALID_IDX; + hw->used_4k_num = -1; + cm->cur_fb_idx_mmu = INVALID_IDX; + pr_debug + ("After aom_bufmgr_init, prev_fb_idx : %d, new_fb_idx : %d\r\n", + cm->prev_fb_idx, cm->new_fb_idx); + hw->need_resync = 1; + + cm->current_video_frame = 0; + hw->ready_for_new_data = 1; + + /* private init */ + hw->work_space_buf = buf_spec_i; + if (!hw->mmu_enable) + hw->mc_buf = mc_buf_i; + + hw->rpm_addr = NULL; + hw->lmem_addr = NULL; +#ifdef DUMP_FILMGRAIN + hw->fg_addr = NULL; +#endif + hw->use_cma_flag = 0; + hw->decode_idx = 0; + hw->result_done_count = 0; + /*int m_uiMaxCUWidth = 1<<7;*/ + /*int m_uiMaxCUHeight = 1<<7;*/ + hw->has_keyframe = 0; + hw->has_sequence = 0; + hw->skip_flag = 0; + hw->wait_buf = 0; + hw->error_flag = 0; + + hw->buf_num = 0; + hw->pic_num = 0; + + hw->last_chunk_pts = 0; + hw->pts_diff_count = 0; + hw->pts_diff_sum = 0; + + return 0; +} + +/* +struct AV1HW_s av1_decoder; +union param_u av1_param; +*/ +/************************************************** + * + *AV1 buffer management end + * + *************************************************** + */ + + +#define HEVC_CM_BODY_START_ADDR 0x3626 +#define HEVC_CM_BODY_LENGTH 0x3627 +#define HEVC_CM_HEADER_LENGTH 0x3629 +#define HEVC_CM_HEADER_OFFSET 0x362b + +#define LOSLESS_COMPRESS_MODE + +/*#define DECOMP_HEADR_SURGENT*/ +#ifdef AV1_10B_NV21 +static u32 mem_map_mode = 2 /* 0:linear 1:32x32 2:64x32*/ +#else +static u32 mem_map_mode; /* 0:linear 1:32x32 2:64x32 ; m8baby test1902 */ +#endif +static u32 enable_mem_saving = 1; +static u32 force_w_h; + +static u32 force_fps; + + +const u32 av1_version = 201602101; +static u32 debug; +static u32 radr; +static u32 rval; +static u32 pop_shorts; +static u32 dbg_cmd; +static u32 dbg_skip_decode_index; +/* + * bit 0~3, for HEVCD_IPP_AXIIF_CONFIG endian config + * bit 8~23, for HEVC_SAO_CTRL1 endian config + */ +static u32 endian; +#define HEVC_CONFIG_BIG_ENDIAN ((0x880 << 8) | 0x8) +#define HEVC_CONFIG_LITTLE_ENDIAN ((0xff0 << 8) | 0xf) + +static u32 multi_frames_in_one_pack = 1; +#ifdef ERROR_HANDLE_DEBUG +static u32 dbg_nal_skip_flag; + /* bit[0], skip vps; bit[1], skip sps; bit[2], skip pps */ +static u32 dbg_nal_skip_count; +#endif +/*for debug*/ +static u32 decode_pic_begin; +static uint slice_parse_begin; +static u32 step; +#ifdef MIX_STREAM_SUPPORT +static u32 buf_alloc_width = 4096; +static u32 buf_alloc_height = 2304; +static u32 av1_max_pic_w = 4096; +static u32 av1_max_pic_h = 2304; + +static u32 dynamic_buf_num_margin = 3; +#else +static u32 buf_alloc_width; +static u32 buf_alloc_height; +static u32 dynamic_buf_num_margin = 7; +#endif +static u32 buf_alloc_depth = 10; +static u32 buf_alloc_size; +/* + *bit[0]: 0, + * bit[1]: 0, always release cma buffer when stop + * bit[1]: 1, never release cma buffer when stop + *bit[0]: 1, when stop, release cma buffer if blackout is 1; + *do not release cma buffer is blackout is not 1 + * + *bit[2]: 0, when start decoding, check current displayed buffer + * (only for buffer decoded by AV1) if blackout is 0 + * 1, do not check current displayed buffer + * + *bit[3]: 1, if blackout is not 1, do not release current + * displayed cma buffer always. + */ +/* set to 1 for fast play; + * set to 8 for other case of "keep last frame" + */ +static u32 buffer_mode = 1; +/* buffer_mode_dbg: debug only*/ +static u32 buffer_mode_dbg = 0xffff0000; +/**/ + +/* + *bit 0, 1: only display I picture; + *bit 1, 1: only decode I picture; + */ +static u32 i_only_flag; + +static u32 low_latency_flag; + +static u32 no_head; + +static u32 max_decoding_time; +/* + *error handling + */ +/*error_handle_policy: + *bit 0: 0, auto skip error_skip_nal_count nals before error recovery; + *1, skip error_skip_nal_count nals before error recovery; + *bit 1 (valid only when bit0 == 1): + *1, wait vps/sps/pps after error recovery; + *bit 2 (valid only when bit0 == 0): + *0, auto search after error recovery (av1_recover() called); + *1, manual search after error recovery + *(change to auto search after get IDR: WRITE_VREG(NAL_SEARCH_CTL, 0x2)) + * + *bit 4: 0, set error_mark after reset/recover + * 1, do not set error_mark after reset/recover + *bit 5: 0, check total lcu for every picture + * 1, do not check total lcu + * + */ + +static u32 error_handle_policy; +/*static u32 parser_sei_enable = 1;*/ +#define MAX_BUF_NUM_NORMAL 16 +/*less bufs num 12 caused frame drop, nts failed*/ +#define MAX_BUF_NUM_LESS 14 +static u32 max_buf_num = MAX_BUF_NUM_NORMAL; +#define MAX_BUF_NUM_SAVE_BUF 8 + +static DEFINE_MUTEX(vav1_mutex); +#ifndef MULTI_INSTANCE_SUPPORT +static struct device *cma_dev; +#endif +#define HEVC_DEC_STATUS_REG HEVC_ASSIST_SCRATCH_0 +#define HEVC_FG_STATUS HEVC_ASSIST_SCRATCH_B +#define HEVC_RPM_BUFFER HEVC_ASSIST_SCRATCH_1 +#define AOM_AV1_ADAPT_PROB_REG HEVC_ASSIST_SCRATCH_3 +#define AOM_AV1_MMU_MAP_BUFFER HEVC_ASSIST_SCRATCH_4 // changed to use HEVC_ASSIST_MMU_MAP_ADDR +#define AOM_AV1_DAALA_TOP_BUFFER HEVC_ASSIST_SCRATCH_5 +//#define HEVC_SAO_UP HEVC_ASSIST_SCRATCH_6 +//#define HEVC_STREAM_SWAP_BUFFER HEVC_ASSIST_SCRATCH_7 +#define AOM_AV1_CDF_BUFFER_W HEVC_ASSIST_SCRATCH_8 +#define AOM_AV1_CDF_BUFFER_R HEVC_ASSIST_SCRATCH_9 +#define AOM_AV1_COUNT_SWAP_BUFFER HEVC_ASSIST_SCRATCH_A +#define AOM_AV1_SEG_MAP_BUFFER_W AV1_SEG_W_ADDR // HEVC_ASSIST_SCRATCH_B +#define AOM_AV1_SEG_MAP_BUFFER_R AV1_SEG_R_ADDR // HEVC_ASSIST_SCRATCH_C +//#define HEVC_sao_vb_size HEVC_ASSIST_SCRATCH_B +//#define HEVC_SAO_VB HEVC_ASSIST_SCRATCH_C +//#define HEVC_SCALELUT HEVC_ASSIST_SCRATCH_D +#define HEVC_WAIT_FLAG HEVC_ASSIST_SCRATCH_E +#define RPM_CMD_REG HEVC_ASSIST_SCRATCH_F +//#define HEVC_STREAM_SWAP_TEST HEVC_ASSIST_SCRATCH_L + +#ifdef MULTI_INSTANCE_SUPPORT +#define HEVC_DECODE_COUNT HEVC_ASSIST_SCRATCH_M +#define HEVC_DECODE_SIZE HEVC_ASSIST_SCRATCH_N +#else +#define HEVC_DECODE_PIC_BEGIN_REG HEVC_ASSIST_SCRATCH_M +#define HEVC_DECODE_PIC_NUM_REG HEVC_ASSIST_SCRATCH_N +#endif +#define AOM_AV1_SEGMENT_FEATURE AV1_QUANT_WR + +#define DEBUG_REG1 HEVC_ASSIST_SCRATCH_G +#define DEBUG_REG2 HEVC_ASSIST_SCRATCH_H + +#define LMEM_DUMP_ADR HEVC_ASSIST_SCRATCH_I +#define CUR_NAL_UNIT_TYPE HEVC_ASSIST_SCRATCH_J +#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K + +#define PIC_END_LCU_COUNT HEVC_ASSIST_SCRATCH_2 + +#define HEVC_AUX_ADR HEVC_ASSIST_SCRATCH_L +#define HEVC_AUX_DATA_SIZE HEVC_ASSIST_SCRATCH_7 +#if (defined DEBUG_UCODE_LOG) || (defined DEBUG_CMD) +#define HEVC_DBG_LOG_ADR HEVC_ASSIST_SCRATCH_C +#ifdef DEBUG_CMD +#define HEVC_D_ADR HEVC_ASSIST_SCRATCH_4 +#endif +#endif +/* + *ucode parser/search control + *bit 0: 0, header auto parse; 1, header manual parse + *bit 1: 0, auto skip for noneseamless stream; 1, no skip + *bit [3:2]: valid when bit1==0; + *0, auto skip nal before first vps/sps/pps/idr; + *1, auto skip nal before first vps/sps/pps + *2, auto skip nal before first vps/sps/pps, + * and not decode until the first I slice (with slice address of 0) + * + *3, auto skip before first I slice (nal_type >=16 && nal_type<=21) + *bit [15:4] nal skip count (valid when bit0 == 1 (manual mode) ) + *bit [16]: for NAL_UNIT_EOS when bit0 is 0: + * 0, send SEARCH_DONE to arm ; 1, do not send SEARCH_DONE to arm + *bit [17]: for NAL_SEI when bit0 is 0: + * 0, do not parse SEI in ucode; 1, parse SEI in ucode + *bit [31:20]: used by ucode for debug purpose + */ +#define NAL_SEARCH_CTL HEVC_ASSIST_SCRATCH_I + /*[31:24] chip feature + 31: 0, use MBOX1; 1, use MBOX0 + [24:16] debug + 0x1, bufmgr only + */ +#define DECODE_MODE HEVC_ASSIST_SCRATCH_J +#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K + +#define RPM_BUF_SIZE ((RPM_END - RPM_BEGIN) * 2) +#define LMEM_BUF_SIZE (0x600 * 2) + +/* +#ifdef MAP_8K +static u32 seg_map_size = 0xd8000; +#else +static u32 seg_map_size = 0x36000; +#endif +*/ +//static u32 seg_map_size = 0x36000; + +//#define VBH_BUF_COUNT 4 +//#define VBH_BUF_SIZE_1080P ((((2 * 16 * 1088) + 0xffff) & (~0xffff)) * VBH_BUF_COUNT) +//#define VBH_BUF_SIZE_4K ((((2 * 16 * 2304) + 0xffff) & (~0xffff))) * VBH_BUF_COUNT) +//#define VBH_BUF_SIZE_8K ((((2 * 16 * 4608) + 0xffff) & (~0xffff))) * VBH_BUF_COUNT) + + /*mmu_vbh buf is used by HEVC_SAO_MMU_VH0_ADDR, HEVC_SAO_MMU_VH1_ADDR*/ +#define VBH_BUF_SIZE_1080P 0x3000 +#define VBH_BUF_SIZE_4K 0x5000 +#define VBH_BUF_SIZE_8K 0xa000 +#define VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh.buf_size / 2) + /*mmu_vbh_dw buf is used by HEVC_SAO_MMU_VH0_ADDR2,HEVC_SAO_MMU_VH1_ADDR2, + HEVC_DW_VH0_ADDDR, HEVC_DW_VH1_ADDDR*/ +#define DW_VBH_BUF_SIZE_1080P (VBH_BUF_SIZE_1080P * 2) +#define DW_VBH_BUF_SIZE_4K (VBH_BUF_SIZE_4K * 2) +#define DW_VBH_BUF_SIZE_8K (VBH_BUF_SIZE_8K * 2) +#define DW_VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh_dw.buf_size / 4) + +/* necessary 4K page size align for t7/t3 decoder and after. fix case1440 dec timeout */ +#define WORKBUF_ALIGN(addr) (ALIGN(addr, PAGE_SIZE)) + +#define WORK_BUF_SPEC_NUM 3 + +static struct BuffInfo_s aom_workbuff_spec[WORK_BUF_SPEC_NUM] = { + { //8M bytes + .max_width = 1920, + .max_height = 1088, + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x1E00, + }, + .sao_abv = { + .buf_size = 0x0, //0x30000, + }, + .sao_vb = { + .buf_size = 0x0, //0x30000, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .seg_map = { + // SEGMENT MAP AREA - 1920x1088/4/4 * 3 bits = 0xBF40 Bytes * 16 = 0xBF400 + .buf_size = 0xBF400, + }, + .daala_top = { + // DAALA TOP STORE AREA - 224 Bytes (use 256 Bytes for LPDDR4) per 128. Total 4096/128*256 = 0x2000 + .buf_size = 0xf00, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0x0, //0x2800, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .cdf_buf = { + // for context store/load 1024x256 x16 = 512K bytes 16*0x8000 + .buf_size = 0x80000, + }, + .gmc_buf = { + // for gmc_parameter store/load 128 x 16 = 2K bytes 0x800 + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0x0, //0x8000, + }, + .dblk_para = { + // DBLK -> Max 256(4096/16) LCU, each para 1024bytes(total:0x40000), data 1024bytes(total:0x40000) + .buf_size = 0xd00, /*0xc40*/ + }, + .dblk_data = { + .buf_size = 0x49000, + }, + .cdef_data = { + .buf_size = 0x22400, + }, + .ups_data = { + .buf_size = 0x36000, + }, + .fgs_table = { + .buf_size = FGS_TABLE_SIZE * FRAME_BUFFERS, // 512x128bits + }, +#ifdef AOM_AV1_MMU + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_1080P, //2*16*(more than 2304)/4, 4K + }, + .cm_header = { + #ifdef USE_SPEC_BUF_FOR_MMU_HEAD + .buf_size = MMU_COMPRESS_HEADER_SIZE_1080P * FRAME_BUFFERS, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + #else + .buf_size = 0, + #endif + }, +#endif +#ifdef AOM_AV1_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_1080P, //2*16*(more than 2304)/4, 4K + }, + .cm_header_dw = { + #ifdef USE_SPEC_BUF_FOR_MMU_HEAD + .buf_size = MMU_COMPRESS_HEADER_SIZE_1080P*FRAME_BUFFERS, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + #else + .buf_size = 0, + #endif + }, +#endif + .mpred_above = { + .buf_size = 0x2800, /*round from 0x2760*/ /* 2 * size of hw*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + .buf_size = MAX_ONE_MV_BUFFER_SIZE_1080P_TM2REVB * FRAME_BUFFERS,/*round from 203A0*/ //1080p, 0x40000 per buffer + }, +#endif + .rpm = { + .buf_size = 0x80*2, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { +#ifdef VPU_FILMGRAIN_DUMP + .max_width = 640, + .max_height = 480, +#else + .max_width = 4096, + .max_height = 2304, +#endif + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x0, //0x30000, + }, + .sao_vb = { + .buf_size = 0x0, //0x30000, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .seg_map = { + // SEGMENT MAP AREA - 4096x2304/4/4 * 3 bits = 0x36000 Bytes * 16 = 0x360000 + .buf_size = 0x360000, + }, + .daala_top = { + // DAALA TOP STORE AREA - 224 Bytes (use 256 Bytes for LPDDR4) per 128. Total 4096/128*256 = 0x2000 + .buf_size = 0x2000, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0x0, //0x2800, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .cdf_buf = { + // for context store/load 1024x256 x16 = 512K bytes 16*0x8000 + .buf_size = 0x80000, + }, + .gmc_buf = { + // for gmc_parameter store/load 128 x 16 = 2K bytes 0x800 + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0x0, //0x8000, + }, + .dblk_para = { + // DBLK -> Max 256(4096/16) LCU, each para 1024bytes(total:0x40000), data 1024bytes(total:0x40000) + .buf_size = 0x1a00, /*0x1980*/ + }, + .dblk_data = { + .buf_size = 0x52800, + }, + .cdef_data = { + .buf_size = 0x24a00, + }, + .ups_data = { + .buf_size = 0x6f000, + }, + .fgs_table = { + .buf_size = FGS_TABLE_SIZE * FRAME_BUFFERS, // 512x128bits + }, +#ifdef AOM_AV1_MMU + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_4K, //2*16*(more than 2304)/4, 4K + }, + .cm_header = { + #ifdef USE_SPEC_BUF_FOR_MMU_HEAD + .buf_size = MMU_COMPRESS_HEADER_SIZE_4K*FRAME_BUFFERS, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + #else + .buf_size = 0, + #endif + }, +#endif +#ifdef AOM_AV1_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_4K, //2*16*(more than 2304)/4, 4K + }, + .cm_header_dw = { + #ifdef USE_SPEC_BUF_FOR_MMU_HEAD + .buf_size = MMU_COMPRESS_HEADER_SIZE_4K*FRAME_BUFFERS, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + #else + .buf_size = 0, + #endif + }, +#endif + .mpred_above = { + .buf_size = 0x5400, /* 2 * size of hw*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = MAX_ONE_MV_BUFFER_SIZE_4K_TM2REVB * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = 0x80*2, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + + }, + { + .max_width = 8192, + .max_height = 4608, + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x0, //0x30000, + }, + .sao_vb = { + .buf_size = 0x0, //0x30000, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .seg_map = { + // SEGMENT MAP AREA - 4096x2304/4/4 * 3 bits = 0x36000 Bytes * 16 = 0x360000 + .buf_size = 0xd80000, + }, + .daala_top = { + // DAALA TOP STORE AREA - 224 Bytes (use 256 Bytes for LPDDR4) per 128. Total 4096/128*256 = 0x2000 + .buf_size = 0x2000, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0x0, //0x2800, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .cdf_buf = { + // for context store/load 1024x256 x16 = 512K bytes 16*0x8000 + .buf_size = 0x80000, + }, + .gmc_buf = { + // for gmc_parameter store/load 128 x 16 = 2K bytes 0x800 + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0x0, //0x8000, + }, + .dblk_para = { + // DBLK -> Max 256(4096/16) LCU, each para 1024bytes(total:0x40000), data 1024bytes(total:0x40000) + .buf_size = 0x3300, /*0x32a0*/ + }, + .dblk_data = { + .buf_size = 0xa4800, + }, + .cdef_data = { + .buf_size = 0x29200, + }, + .ups_data = { + .buf_size = 0xdb000, + }, + .fgs_table = { + .buf_size = FGS_TABLE_SIZE * FRAME_BUFFERS, // 512x128bits + }, +#ifdef AOM_AV1_MMU + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_8K, //2*16*(more than 2304)/4, 4K + }, + .cm_header = { + #ifdef USE_SPEC_BUF_FOR_MMU_HEAD + .buf_size = MMU_COMPRESS_HEADER_SIZE_8K*FRAME_BUFFERS, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) +#else + .buf_size = 0, +#endif + }, +#endif +#ifdef AOM_AV1_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_8K, //2*16*(more than 2304)/4, 4K + }, + .cm_header_dw = { + #ifdef USE_SPEC_BUF_FOR_MMU_HEAD + .buf_size = MMU_COMPRESS_HEADER_SIZE_8K*FRAME_BUFFERS, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + #else + .buf_size = 0, + #endif + }, +#endif + .mpred_above = { + .buf_size = 0xA800, /* 2 * size of hw*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = MAX_ONE_MV_BUFFER_SIZE_8K_TM2REVB * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = 0x80*2, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + + } +}; + + +/* +* AUX DATA Process +*/ +static u32 init_aux_size; +static int aux_data_is_avaible(struct AV1HW_s *hw) +{ + u32 reg_val; + + reg_val = READ_VREG(HEVC_AUX_DATA_SIZE); + if (reg_val != 0 && reg_val != init_aux_size) + return 1; + else + return 0; +} + +static void config_aux_buf(struct AV1HW_s *hw) +{ + WRITE_VREG(HEVC_AUX_ADR, hw->aux_phy_addr); + init_aux_size = ((hw->prefix_aux_size >> 4) << 16) | + (hw->suffix_aux_size >> 4); + WRITE_VREG(HEVC_AUX_DATA_SIZE, init_aux_size); +} + +/* +* dv_meta_flag: 1, dolby meta (T35) only; 2, not include dolby meta (T35) +*/ +static void set_aux_data(struct AV1HW_s *hw, + char **aux_data_buf, int *aux_data_size, + unsigned char suffix_flag, + unsigned char dv_meta_flag) +{ + int i; + unsigned short *aux_adr; + unsigned int size_reg_val = + READ_VREG(HEVC_AUX_DATA_SIZE); + unsigned int aux_count = 0; + int aux_size = 0; + if (0 == aux_data_is_avaible(hw)) + return; + + if (hw->aux_data_dirty || + hw->m_ins_flag == 0) { + + hw->aux_data_dirty = 0; + } + + if (suffix_flag) { + aux_adr = (unsigned short *) + (hw->aux_addr + + hw->prefix_aux_size); + aux_count = + ((size_reg_val & 0xffff) << 4) + >> 1; + aux_size = + hw->suffix_aux_size; + } else { + aux_adr = + (unsigned short *)hw->aux_addr; + aux_count = + ((size_reg_val >> 16) << 4) + >> 1; + aux_size = + hw->prefix_aux_size; + } + if (debug & AV1_DEBUG_BUFMGR_MORE) { + av1_print(hw, 0, + "%s:old size %d count %d,suf %d dv_flag %d\r\n", + __func__, *aux_data_size, + aux_count, suffix_flag, dv_meta_flag); + } + if (aux_size > 0 && aux_count > 0) { + int heads_size = 0; + int new_size; + char *new_buf; + + for (i = 0; i < aux_count; i++) { + unsigned char tag = aux_adr[i] >> 8; + if (tag != 0 && tag != 0xff) { + if (dv_meta_flag == 0) + heads_size += 8; + else if (dv_meta_flag == 1 && tag == 0x14) + heads_size += 8; + else if (dv_meta_flag == 2 && tag != 0x14) + heads_size += 8; + } + } + new_size = *aux_data_size + aux_count + heads_size; + new_buf = vmalloc(new_size); + if (new_buf) { + unsigned char valid_tag = 0; + unsigned char *h = + new_buf + + *aux_data_size; + unsigned char *p = h + 8; + int len = 0; + int padding_len = 0; + if (*aux_data_buf) { + memcpy(new_buf, *aux_data_buf, *aux_data_size); + vfree(*aux_data_buf); + } + *aux_data_buf = new_buf; + for (i = 0; i < aux_count; i += 4) { + int ii; + unsigned char tag = aux_adr[i + 3] >> 8; + if (tag != 0 && tag != 0xff) { + if (dv_meta_flag == 0) + valid_tag = 1; + else if (dv_meta_flag == 1 + && tag == 0x14) + valid_tag = 1; + else if (dv_meta_flag == 2 + && tag != 0x14) + valid_tag = 1; + else + valid_tag = 0; + if (valid_tag && len > 0) { + *aux_data_size += + (len + 8); + h[0] = (len >> 24) + & 0xff; + h[1] = (len >> 16) + & 0xff; + h[2] = (len >> 8) + & 0xff; + h[3] = (len >> 0) + & 0xff; + h[6] = + (padding_len >> 8) + & 0xff; + h[7] = (padding_len) + & 0xff; + h += (len + 8); + p += 8; + len = 0; + padding_len = 0; + } + if (valid_tag) { + h[4] = tag; + h[5] = 0; + h[6] = 0; + h[7] = 0; + } + } + if (valid_tag) { + for (ii = 0; ii < 4; ii++) { + unsigned short aa = + aux_adr[i + 3 + - ii]; + *p = aa & 0xff; + p++; + len++; + if ((aa >> 8) == 0xff) + padding_len++; + } + } + } + if (len > 0) { + *aux_data_size += (len + 8); + h[0] = (len >> 24) & 0xff; + h[1] = (len >> 16) & 0xff; + h[2] = (len >> 8) & 0xff; + h[3] = (len >> 0) & 0xff; + h[6] = (padding_len >> 8) & 0xff; + h[7] = (padding_len) & 0xff; + } + if (debug & AV1_DEBUG_BUFMGR_MORE) { + av1_print(hw, 0, + "aux: (size %d) suffix_flag %d\n", + *aux_data_size, suffix_flag); + for (i = 0; i < *aux_data_size; i++) { + av1_print_cont(hw, 0, + "%02x ", (*aux_data_buf)[i]); + if (((i + 1) & 0xf) == 0) + av1_print_cont(hw, 0, "\n"); + } + av1_print_cont(hw, 0, "\n"); + } + + } else { + av1_print(hw, 0, "new buf alloc failed\n"); + if (*aux_data_buf) + vfree(*aux_data_buf); + *aux_data_buf = NULL; + *aux_data_size = 0; + } + } + +} + +static void set_dv_data(struct AV1HW_s *hw) +{ + set_aux_data(hw, &hw->dv_data_buf, + &hw->dv_data_size, 0, 1); + +} + +static void set_pic_aux_data(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic, unsigned char suffix_flag, + unsigned char dv_meta_flag) +{ + if (pic == NULL) + return; + set_aux_data(hw, &pic->aux_data_buf, + &pic->aux_data_size, suffix_flag, dv_meta_flag); +} + +static void copy_dv_data(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic) +{ + char *new_buf; + int new_size; + new_size = pic->aux_data_size + hw->dv_data_size; + new_buf = vmalloc(new_size); + if (new_buf) { + if (debug & AV1_DEBUG_BUFMGR_MORE) { + av1_print(hw, 0, + "%s: (size %d) pic index %d\n", + __func__, + hw->dv_data_size, pic->index); + } + if (pic->aux_data_buf) { + memcpy(new_buf, pic->aux_data_buf, pic->aux_data_size); + vfree(pic->aux_data_buf); + } + memcpy(new_buf + pic->aux_data_size, hw->dv_data_buf, hw->dv_data_size); + pic->aux_data_size += hw->dv_data_size; + pic->aux_data_buf = new_buf; + vfree(hw->dv_data_buf); + hw->dv_data_buf = NULL; + hw->dv_data_size = 0; + } + +} + +static void release_aux_data(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic) +{ + if (pic->aux_data_buf) + vfree(pic->aux_data_buf); + pic->aux_data_buf = NULL; + pic->aux_data_size = 0; +} + +static void dump_aux_buf(struct AV1HW_s *hw) +{ + int i; + unsigned short *aux_adr = + (unsigned short *) + hw->aux_addr; + unsigned int aux_size = + (READ_VREG(HEVC_AUX_DATA_SIZE) + >> 16) << 4; + + if (hw->prefix_aux_size > 0) { + av1_print(hw, 0, + "prefix aux: (size %d)\n", + aux_size); + for (i = 0; i < + (aux_size >> 1); i++) { + av1_print_cont(hw, 0, + "%04x ", + *(aux_adr + i)); + if (((i + 1) & 0xf) + == 0) + av1_print_cont(hw, + 0, "\n"); + } + } + if (hw->suffix_aux_size > 0) { + aux_adr = (unsigned short *) + (hw->aux_addr + + hw->prefix_aux_size); + aux_size = + (READ_VREG(HEVC_AUX_DATA_SIZE) & 0xffff) + << 4; + av1_print(hw, 0, + "suffix aux: (size %d)\n", + aux_size); + for (i = 0; i < + (aux_size >> 1); i++) { + av1_print_cont(hw, 0, + "%04x ", *(aux_adr + i)); + if (((i + 1) & 0xf) == 0) + av1_print_cont(hw, 0, "\n"); + } + } +} + +/* +* +*/ + +/*Losless compression body buffer size 4K per 64x32 (jt)*/ +static int compute_losless_comp_body_size(int width, int height, + uint8_t is_bit_depth_10) +{ + int width_x64; + int height_x32; + int bsize; + + width_x64 = width + 63; + width_x64 >>= 6; + height_x32 = height + 31; + height_x32 >>= 5; + bsize = (is_bit_depth_10?4096:3200)*width_x64*height_x32; + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info("%s(%d,%d,%d)=>%d\n", + __func__, width, height, + is_bit_depth_10, bsize); + + return bsize; +} + +/* Losless compression header buffer size 32bytes per 128x64 (jt)*/ +static int compute_losless_comp_header_size(int width, int height) +{ + int width_x128; + int height_x64; + int hsize; + + width_x128 = width + 127; + width_x128 >>= 7; + height_x64 = height + 63; + height_x64 >>= 6; + + hsize = 32 * width_x128 * height_x64; + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info("%s(%d,%d)=>%d\n", + __func__, width, height, + hsize); + + return hsize; +} + +#ifdef AOM_AV1_MMU_DW +static int compute_losless_comp_body_size_dw(int width, int height, + uint8_t is_bit_depth_10) +{ + + return compute_losless_comp_body_size(width, height, is_bit_depth_10); +} + +/* Losless compression header buffer size 32bytes per 128x64 (jt)*/ +static int compute_losless_comp_header_size_dw(int width, int height) +{ + return compute_losless_comp_header_size(width, height); +} +#endif + +static void init_buff_spec(struct AV1HW_s *hw, + struct BuffInfo_s *buf_spec) +{ + void *mem_start_virt; + + buf_spec->ipp.buf_start = + WORKBUF_ALIGN(buf_spec->start_adr); + buf_spec->sao_abv.buf_start = + WORKBUF_ALIGN(buf_spec->ipp.buf_start + buf_spec->ipp.buf_size); + buf_spec->sao_vb.buf_start = + WORKBUF_ALIGN(buf_spec->sao_abv.buf_start + buf_spec->sao_abv.buf_size); + buf_spec->short_term_rps.buf_start = + WORKBUF_ALIGN(buf_spec->sao_vb.buf_start + buf_spec->sao_vb.buf_size); + buf_spec->vps.buf_start = + WORKBUF_ALIGN(buf_spec->short_term_rps.buf_start + buf_spec->short_term_rps.buf_size); + buf_spec->seg_map.buf_start = + WORKBUF_ALIGN(buf_spec->vps.buf_start + buf_spec->vps.buf_size); + buf_spec->daala_top.buf_start = + WORKBUF_ALIGN(buf_spec->seg_map.buf_start + buf_spec->seg_map.buf_size); + buf_spec->sao_up.buf_start = + WORKBUF_ALIGN(buf_spec->daala_top.buf_start + buf_spec->daala_top.buf_size); + buf_spec->swap_buf.buf_start = + WORKBUF_ALIGN(buf_spec->sao_up.buf_start + buf_spec->sao_up.buf_size); + buf_spec->cdf_buf.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf.buf_start + buf_spec->swap_buf.buf_size); + buf_spec->gmc_buf.buf_start = + WORKBUF_ALIGN(buf_spec->cdf_buf.buf_start + buf_spec->cdf_buf.buf_size); + buf_spec->scalelut.buf_start = + WORKBUF_ALIGN(buf_spec->gmc_buf.buf_start + buf_spec->gmc_buf.buf_size); + buf_spec->dblk_para.buf_start = + WORKBUF_ALIGN(buf_spec->scalelut.buf_start + buf_spec->scalelut.buf_size); + buf_spec->dblk_data.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_para.buf_start + buf_spec->dblk_para.buf_size); + buf_spec->cdef_data.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data.buf_start + buf_spec->dblk_data.buf_size); + buf_spec->ups_data.buf_start = + WORKBUF_ALIGN(buf_spec->cdef_data.buf_start + buf_spec->cdef_data.buf_size); + buf_spec->fgs_table.buf_start = + WORKBUF_ALIGN(buf_spec->ups_data.buf_start + buf_spec->ups_data.buf_size); +#ifdef AOM_AV1_MMU + buf_spec->mmu_vbh.buf_start = + WORKBUF_ALIGN(buf_spec->fgs_table.buf_start + buf_spec->fgs_table.buf_size); + buf_spec->cm_header.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh.buf_start + buf_spec->mmu_vbh.buf_size); +#ifdef AOM_AV1_MMU_DW + buf_spec->mmu_vbh_dw.buf_start = + WORKBUF_ALIGN(buf_spec->cm_header.buf_start + buf_spec->cm_header.buf_size); + buf_spec->cm_header_dw.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh_dw.buf_start + buf_spec->mmu_vbh_dw.buf_size); + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->cm_header_dw.buf_start + buf_spec->cm_header_dw.buf_size); +#else + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->cm_header.buf_start + buf_spec->cm_header.buf_size); +#endif +#else + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->fgs_table.buf_start + buf_spec->fgs_table.buf_size); +#endif +#ifdef MV_USE_FIXED_BUF + buf_spec->mpred_mv.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_mv.buf_start + buf_spec->mpred_mv.buf_size); +#else + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); +#endif + buf_spec->lmem.buf_start = + WORKBUF_ALIGN(buf_spec->rpm.buf_start + buf_spec->rpm.buf_size); + buf_spec->end_adr = + WORKBUF_ALIGN(buf_spec->lmem.buf_start + buf_spec->lmem.buf_size); + + if (!hw) + return; + + if (!vdec_secure(hw_to_vdec(hw))) { + mem_start_virt = + codec_mm_phys_to_virt(buf_spec->dblk_para.buf_start); + if (mem_start_virt) { + memset(mem_start_virt, 0, + buf_spec->dblk_para.buf_size); + codec_mm_dma_flush(mem_start_virt, + buf_spec->dblk_para.buf_size, + DMA_TO_DEVICE); + } else { + mem_start_virt = codec_mm_vmap( + buf_spec->dblk_para.buf_start, + buf_spec->dblk_para.buf_size); + if (mem_start_virt) { + memset(mem_start_virt, 0, + buf_spec->dblk_para.buf_size); + codec_mm_dma_flush(mem_start_virt, + buf_spec->dblk_para.buf_size, + DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(mem_start_virt); + } else { + /*not virt for tvp playing, + may need clear on ucode.*/ + pr_err("mem_start_virt failed\n"); + } + } + } + + if (debug) { + pr_info("%s workspace (%x %x) size = %x\n", __func__, + buf_spec->start_adr, buf_spec->end_adr, + buf_spec->end_adr - buf_spec->start_adr); + } + + if (debug) { + pr_info("ipp.buf_start :%x\n", + buf_spec->ipp.buf_start); + pr_info("sao_abv.buf_start :%x\n", + buf_spec->sao_abv.buf_start); + pr_info("sao_vb.buf_start :%x\n", + buf_spec->sao_vb.buf_start); + pr_info("short_term_rps.buf_start :%x\n", + buf_spec->short_term_rps.buf_start); + pr_info("vps.buf_start :%x\n", + buf_spec->vps.buf_start); + pr_info("seg_map.buf_start :%x\n", + buf_spec->seg_map.buf_start); + pr_info("daala_top.buf_start :%x\n", + buf_spec->daala_top.buf_start); + pr_info("swap_buf.buf_start :%x\n", + buf_spec->swap_buf.buf_start); + pr_info("cdf_buf.buf_start :%x\n", + buf_spec->cdf_buf.buf_start); + pr_info("gmc_buf.buf_start :%x\n", + buf_spec->gmc_buf.buf_start); + pr_info("scalelut.buf_start :%x\n", + buf_spec->scalelut.buf_start); + pr_info("dblk_para.buf_start :%x\n", + buf_spec->dblk_para.buf_start); + pr_info("dblk_data.buf_start :%x\n", + buf_spec->dblk_data.buf_start); + pr_info("cdef_data.buf_start :%x\n", + buf_spec->cdef_data.buf_start); + pr_info("ups_data.buf_start :%x\n", + buf_spec->ups_data.buf_start); + +#ifdef AOM_AV1_MMU + pr_info("mmu_vbh.buf_start :%x\n", + buf_spec->mmu_vbh.buf_start); +#endif + pr_info("mpred_above.buf_start :%x\n", + buf_spec->mpred_above.buf_start); +#ifdef MV_USE_FIXED_BUF + pr_info("mpred_mv.buf_start :%x\n", + buf_spec->mpred_mv.buf_start); +#endif + if ((debug & AOM_AV1_DEBUG_SEND_PARAM_WITH_REG) == 0) { + pr_info("rpm.buf_start :%x\n", + buf_spec->rpm.buf_start); + } + } +} + + + +static void uninit_mmu_buffers(struct AV1HW_s *hw) +{ +#ifndef MV_USE_FIXED_BUF + dealloc_mv_bufs(hw); +#endif + if (hw->mmu_box) + decoder_mmu_box_free(hw->mmu_box); + hw->mmu_box = NULL; + +#ifdef AOM_AV1_MMU_DW + if (hw->mmu_box_dw) + decoder_mmu_box_free(hw->mmu_box_dw); + hw->mmu_box_dw = NULL; +#endif + if (hw->bmmu_box) + decoder_bmmu_box_free(hw->bmmu_box); + hw->bmmu_box = NULL; +} + +static int calc_luc_quantity(int lcu_size, u32 w, u32 h) +{ + int pic_width_64 = (w + 63) & (~0x3f); + int pic_height_32 = (h + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 : pic_height_32 / lcu_size; + + return pic_width_lcu * pic_height_lcu; +} + +/* return in MB */ +static int av1_max_mmu_buf_size(int max_w, int max_h) +{ + int buf_size = 48; + + if ((max_w * max_h > 1280*736) && + (max_w * max_h <= 1920*1088)) { + buf_size = 12; + } else if ((max_w * max_h > 0) && + (max_w * max_h <= 1280*736)) { + buf_size = 4; + } + + return buf_size; +} + +static int av1_get_header_size(int w, int h) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + IS_8K_SIZE(w, h)) + return MMU_COMPRESS_HEADER_SIZE_8K; + if (IS_4K_SIZE(w, h)) + return MMU_COMPRESS_HEADER_SIZE_4K; + + return MMU_COMPRESS_HEADER_SIZE_1080P; +} + +static int v4l_alloc_and_config_pic(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic) +{ + int ret = -1; + int i = pic->index; + int dw_mode = get_double_write_mode_init(hw); + int lcu_total = calc_luc_quantity(hw->current_lcu_size, + hw->frame_width, hw->frame_height); +#ifdef MV_USE_FIXED_BUF + u32 mpred_mv_end = hw->work_space_buf->mpred_mv.buf_start + + hw->work_space_buf->mpred_mv.buf_size; +//#ifdef USE_DYNAMIC_MV_BUFFER +// int32_t MV_MEM_UNIT = (lcu_size == 128) ? (19*4*16) : (19*16); +// int32_t mv_buffer_size = (lcu_total*MV_MEM_UNIT); +//#else + int32_t mv_buffer_size = hw->max_one_mv_buffer_size; +//#endif +#endif + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + + if (i < 0) + return ret; + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + av1_print(hw, 0, "[%d] AV1 get buffer fail.\n", ctx->id); + return ret; + } + + fb->status = FB_ST_DECODER; + + if (hw->mmu_enable) { + struct internal_comp_buf *ibuf = v4lfb_to_icomp_buf(hw, fb); + + hw->m_BUF[i].header_addr = ibuf->header_addr; + if (debug & AV1_DEBUG_BUFMGR_MORE) { + pr_info("MMU header_adr %d: %ld\n", + i, hw->m_BUF[i].header_addr); + } + } + +#ifdef MV_USE_FIXED_BUF + if ((hw->work_space_buf->mpred_mv.buf_start + + ((i + 1) * mv_buffer_size)) + <= mpred_mv_end) { +#endif + hw->m_BUF[i].v4l_ref_buf_addr = (ulong)fb; + pic->cma_alloc_addr = fb->m.mem[0].addr; + if (fb->num_planes == 1) { + hw->m_BUF[i].start_adr = fb->m.mem[0].addr; + hw->m_BUF[i].luma_size = fb->m.mem[0].offset; + hw->m_BUF[i].size = fb->m.mem[0].size; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + pic->dw_y_adr = hw->m_BUF[i].start_adr; + pic->dw_u_v_adr = pic->dw_y_adr + hw->m_BUF[i].luma_size; + } else if (fb->num_planes == 2) { + hw->m_BUF[i].start_adr = fb->m.mem[0].addr; + hw->m_BUF[i].size = fb->m.mem[0].size; + hw->m_BUF[i].chroma_addr = fb->m.mem[1].addr; + hw->m_BUF[i].chroma_size = fb->m.mem[1].size; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + fb->m.mem[1].bytes_used = fb->m.mem[1].size; + pic->dw_y_adr = hw->m_BUF[i].start_adr; + pic->dw_u_v_adr = hw->m_BUF[i].chroma_addr; + } + + /* config frame buffer */ + if (hw->mmu_enable) + pic->header_adr = hw->m_BUF[i].header_addr; + + pic->BUF_index = i; + pic->lcu_total = lcu_total; + pic->mc_canvas_y = pic->index; + pic->mc_canvas_u_v = pic->index; + + if (dw_mode & 0x10) { + pic->mc_canvas_y = (pic->index << 1); + pic->mc_canvas_u_v = (pic->index << 1) + 1; + } + +#ifdef MV_USE_FIXED_BUF + pic->mpred_mv_wr_start_addr = + hw->work_space_buf->mpred_mv.buf_start + + (pic->index * mv_buffer_size); +#endif + +#ifdef DUMP_FILMGRAIN + if (pic->index == fg_dump_index) { + pic->fgs_table_adr = hw->fg_phy_addr; + pr_info("set buffer %d film grain table 0x%x\n", + pic->index, pic->fgs_table_adr); + } else +#endif + pic->fgs_table_adr = + hw->work_space_buf->fgs_table.buf_start + + (pic->index * FGS_TABLE_SIZE); + + if (debug) { + + pr_info("%s index %d BUF_index %d ", + __func__, pic->index, + pic->BUF_index); + pr_info("comp_body_size %x comp_buf_size %x ", + pic->comp_body_size, + pic->buf_size); + pr_info("mpred_mv_wr_start_adr %d\n", + pic->mpred_mv_wr_start_addr); + pr_info("dw_y_adr %d, pic_config->dw_u_v_adr =%d\n", + pic->dw_y_adr, + pic->dw_u_v_adr); + } +#ifdef MV_USE_FIXED_BUF + } +#endif + return ret; +} + +static int config_pic(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + int ret = -1; + int i; + int pic_width = hw->init_pic_w; + int pic_height = hw->init_pic_h; + //int lcu_size = ((params->p.seq_flags >> 6) & 0x1) ? 128 : 64; + int lcu_size = hw->current_lcu_size; + + int pic_width_64 = (pic_width + 63) & (~0x3f); + int pic_height_32 = (pic_height + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 + : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 + : pic_height_32 / lcu_size; + int lcu_total = pic_width_lcu * pic_height_lcu; +#ifdef MV_USE_FIXED_BUF + u32 mpred_mv_end = hw->work_space_buf->mpred_mv.buf_start + + hw->work_space_buf->mpred_mv.buf_size; +//#ifdef USE_DYNAMIC_MV_BUFFER +// int32_t MV_MEM_UNIT = (lcu_size == 128) ? (19*4*16) : (19*16); +// int32_t mv_buffer_size = (lcu_total*MV_MEM_UNIT); +//#else + int32_t mv_buffer_size = hw->max_one_mv_buffer_size; +//#endif + +#endif + + u32 y_adr = 0; + int buf_size = 0; + + int losless_comp_header_size = + compute_losless_comp_header_size(pic_width, + pic_height); + int losless_comp_body_size = compute_losless_comp_body_size(pic_width, + pic_height, buf_alloc_depth == 10); + int mc_buffer_size = losless_comp_header_size + losless_comp_body_size; + int mc_buffer_size_h = (mc_buffer_size + 0xffff) >> 16; + int mc_buffer_size_u_v = 0; + int mc_buffer_size_u_v_h = 0; + int dw_mode = get_double_write_mode_init(hw); + + hw->lcu_total = lcu_total; + + if (dw_mode && (dw_mode & 0x20) == 0) { + int pic_width_dw = pic_width / + get_double_write_ratio(dw_mode & 0xf); + int pic_height_dw = pic_height / + get_double_write_ratio(dw_mode & 0xf); + + int pic_width_64_dw = (pic_width_dw + 63) & (~0x3f); + int pic_height_32_dw = (pic_height_dw + 31) & (~0x1f); + int pic_width_lcu_dw = (pic_width_64_dw % lcu_size) ? + pic_width_64_dw / lcu_size + 1 + : pic_width_64_dw / lcu_size; + int pic_height_lcu_dw = (pic_height_32_dw % lcu_size) ? + pic_height_32_dw / lcu_size + 1 + : pic_height_32_dw / lcu_size; + int lcu_total_dw = pic_width_lcu_dw * pic_height_lcu_dw; + mc_buffer_size_u_v = lcu_total_dw * lcu_size * lcu_size / 2; + mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16; + /*64k alignment*/ + buf_size = ((mc_buffer_size_u_v_h << 16) * 3); + buf_size = ((buf_size + 0xffff) >> 16) << 16; + } + + if (mc_buffer_size & 0xffff) /*64k alignment*/ + mc_buffer_size_h += 1; + if ((!hw->mmu_enable) && ((dw_mode & 0x10) == 0)) + buf_size += (mc_buffer_size_h << 16); + +#ifdef USE_SPEC_BUF_FOR_MMU_HEAD + if (hw->mmu_enable) { + pic_config->header_adr = + hw->work_space_buf->cm_header.buf_start + + (pic_config->index * vav1_mmu_compress_header_size(hw)); + +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + pic_config->header_dw_adr = + hw->work_space_buf->cm_header_dw.buf_start + + (pic_config->index * vav1_mmu_compress_header_size(hw)); + + } +#endif + } + +#else +/*!USE_SPEC_BUF_FOR_MMU_HEAD*/ + if (hw->mmu_enable) { + pic_config->header_adr = decoder_bmmu_box_get_phy_addr( + hw->bmmu_box, HEADER_BUFFER_IDX(pic_config->index)); + +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + pic_config->header_dw_adr = decoder_bmmu_box_get_phy_addr( + hw->bmmu_box, DW_HEADER_BUFFER_IDX(pic_config->index)); + + } + if (debug & AV1_DEBUG_BUFMGR_MORE) { + pr_info("MMU dw header_adr (%d, %d) %d: %d\n", + hw->dw_mmu_enable, + DW_HEADER_BUFFER_IDX(pic_config->index), + pic_config->index, + pic_config->header_dw_adr); + } +#endif + + if (debug & AV1_DEBUG_BUFMGR_MORE) { + pr_info("MMU header_adr %d: %d\n", + pic_config->index, pic_config->header_adr); + } + } +#endif + + i = pic_config->index; +#ifdef MV_USE_FIXED_BUF + if ((hw->work_space_buf->mpred_mv.buf_start + + ((i + 1) * mv_buffer_size)) + <= mpred_mv_end + ) { +#endif + if (buf_size > 0) { + ret = decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, + VF_BUFFER_IDX(i), + buf_size, DRIVER_NAME, + &pic_config->cma_alloc_addr); + if (ret < 0) { + pr_info( + "decoder_bmmu_box_alloc_buf_phy idx %d size %d fail\n", + VF_BUFFER_IDX(i), + buf_size + ); + return ret; + } + + if (pic_config->cma_alloc_addr) + y_adr = pic_config->cma_alloc_addr; + else { + pr_info( + "decoder_bmmu_box_alloc_buf_phy idx %d size %d return null\n", + VF_BUFFER_IDX(i), + buf_size + ); + return -1; + } + } + { + /*ensure get_pic_by_POC() + not get the buffer not decoded*/ + pic_config->BUF_index = i; + pic_config->lcu_total = lcu_total; + + pic_config->comp_body_size = losless_comp_body_size; + pic_config->buf_size = buf_size; + + pic_config->mc_canvas_y = pic_config->index; + pic_config->mc_canvas_u_v = pic_config->index; + if (dw_mode & 0x10) { + pic_config->dw_y_adr = y_adr; + pic_config->dw_u_v_adr = y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); + + pic_config->mc_canvas_y = + (pic_config->index << 1); + pic_config->mc_canvas_u_v = + (pic_config->index << 1) + 1; + } else if (dw_mode && (dw_mode & 0x20) == 0) { + pic_config->dw_y_adr = y_adr; + pic_config->dw_u_v_adr = pic_config->dw_y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); + } +#ifdef MV_USE_FIXED_BUF + pic_config->mpred_mv_wr_start_addr = + hw->work_space_buf->mpred_mv.buf_start + + (pic_config->index * mv_buffer_size); +#endif +#ifdef DUMP_FILMGRAIN + if (pic_config->index == fg_dump_index) { + pic_config->fgs_table_adr = hw->fg_phy_addr; + pr_info("set buffer %d film grain table 0x%x\n", + pic_config->index, pic_config->fgs_table_adr); + } else +#endif + pic_config->fgs_table_adr = + hw->work_space_buf->fgs_table.buf_start + + (pic_config->index * FGS_TABLE_SIZE); + + if (debug) { + pr_info + ("%s index %d BUF_index %d ", + __func__, pic_config->index, + pic_config->BUF_index); + pr_info + ("comp_body_size %x comp_buf_size %x ", + pic_config->comp_body_size, + pic_config->buf_size); + pr_info + ("mpred_mv_wr_start_adr %d\n", + pic_config->mpred_mv_wr_start_addr); + pr_info("dw_y_adr %d, pic_config->dw_u_v_adr =%d\n", + pic_config->dw_y_adr, + pic_config->dw_u_v_adr); + } + ret = 0; + } +#ifdef MV_USE_FIXED_BUF + } +#endif + return ret; +} + +#ifndef USE_SPEC_BUF_FOR_MMU_HEAD +static int vav1_mmu_compress_header_size(struct AV1HW_s *hw) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h)) + return (MMU_COMPRESS_HEADER_SIZE_8K); + + if (IS_4K_SIZE(hw->max_pic_w, hw->max_pic_h)) + return MMU_COMPRESS_HEADER_SIZE_4K; + + return (MMU_COMPRESS_HEADER_SIZE_1080P); +} +#endif +/*#define FRAME_MMU_MAP_SIZE (MAX_FRAME_4K_NUM * 4)*/ +static int vav1_frame_mmu_map_size(struct AV1HW_s *hw) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h)) + return (MAX_FRAME_8K_NUM * 4); + + return (MAX_FRAME_4K_NUM * 4); +} + +#ifdef AOM_AV1_MMU_DW +static int vaom_dw_frame_mmu_map_size(struct AV1HW_s *hw) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h)) + return (MAX_FRAME_8K_NUM * 4); + + return (MAX_FRAME_4K_NUM * 4); +} +#endif + +static void init_pic_list(struct AV1HW_s *hw) +{ + int i; + struct AV1_Common_s *cm = &hw->common; + struct PIC_BUFFER_CONFIG_s *pic_config; + struct vdec_s *vdec = hw_to_vdec(hw); + +#ifndef USE_SPEC_BUF_FOR_MMU_HEAD + u32 header_size; + if (hw->mmu_enable && ((hw->double_write_mode & 0x10) == 0)) { + header_size = vav1_mmu_compress_header_size(hw); + /*alloc AV1 compress header first*/ + for (i = 0; i < hw->used_buf_num; i++) { + unsigned long buf_addr; + if (decoder_bmmu_box_alloc_buf_phy + (hw->bmmu_box, + HEADER_BUFFER_IDX(i), header_size, + DRIVER_HEADER_NAME, + &buf_addr) < 0) { + av1_print(hw, 0, "%s malloc compress header failed %d\n", + DRIVER_HEADER_NAME, i); + hw->fatal_error |= DECODER_FATAL_ERROR_NO_MEM; + return; + } +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + if (decoder_bmmu_box_alloc_buf_phy + (hw->bmmu_box, + DW_HEADER_BUFFER_IDX(i), header_size, + DRIVER_HEADER_NAME, + &buf_addr) < 0) { + av1_print(hw, 0, "%s malloc compress dw header failed %d\n", + DRIVER_HEADER_NAME, i); + hw->fatal_error |= DECODER_FATAL_ERROR_NO_MEM; + return; + } + } +#endif + } + } +#endif + for (i = 0; i < hw->used_buf_num; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + pic_config->index = i; + pic_config->BUF_index = -1; + pic_config->mv_buf_index = -1; + if (vdec->parallel_dec == 1) { + pic_config->y_canvas_index = -1; + pic_config->uv_canvas_index = -1; + } + pic_config->y_crop_width = hw->init_pic_w; + pic_config->y_crop_height = hw->init_pic_h; + pic_config->double_write_mode = get_double_write_mode(hw); + hw->buffer_wrap[i] = i; + + if (!hw->is_used_v4l) { + if (config_pic(hw, pic_config) < 0) { + if (debug) + av1_print(hw, 0, "Config_pic %d fail\n", + pic_config->index); + pic_config->index = -1; + break; + } + + if (pic_config->double_write_mode && + (pic_config->double_write_mode & 0x20) == 0) { + set_canvas(hw, pic_config); + } + } + } + for (; i < hw->used_buf_num; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + pic_config->index = -1; + pic_config->BUF_index = -1; + pic_config->mv_buf_index = -1; + hw->buffer_wrap[i] = i; + if (vdec->parallel_dec == 1) { + pic_config->y_canvas_index = -1; + pic_config->uv_canvas_index = -1; + } + } + av1_print(hw, AV1_DEBUG_BUFMGR, "%s ok, used_buf_num = %d\n", + __func__, hw->used_buf_num); + +} + +static void init_pic_list_hw(struct AV1HW_s *hw) +{ + int i; + struct AV1_Common_s *cm = &hw->common; + struct PIC_BUFFER_CONFIG_s *pic_config; + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x0);*/ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (0x1 << 2)); + + + for (i = 0; i < hw->used_buf_num; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + if (pic_config->index < 0) + break; + + if (hw->mmu_enable && ((pic_config->double_write_mode & 0x10) == 0)) { + + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->header_adr >> 5); + } else { + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + * pic_config->mc_y_adr + * | (pic_config->mc_canvas_y << 8) | 0x1); + */ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->dw_y_adr >> 5); + } +#ifndef LOSLESS_COMPRESS_MODE + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + * pic_config->mc_u_v_adr + * | (pic_config->mc_canvas_u_v << 8)| 0x1); + */ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->dw_u_v_adr >> 5); +#else + if (pic_config->double_write_mode & 0x10) { + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->dw_u_v_adr >> 5); + } +#endif + } + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1); + +#ifdef CHANGE_REMOVED + /*Zero out canvas registers in IPP -- avoid simulation X*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 1); +#else + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (1 << 8) | (0 << 1) | 1); +#endif + for (i = 0; i < 32; i++) + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); +} + + +static void dump_pic_list(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + struct PIC_BUFFER_CONFIG_s *pic_config; + int i; + for (i = 0; i < FRAME_BUFFERS; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + av1_print(hw, 0, + "Buf(%d) index %d mv_buf_index %d ref_count %d vf_ref %d dec_idx %d slice_type %d w/h %d/%d adr%ld\n", + i, + pic_config->index, +#ifndef MV_USE_FIXED_BUF + pic_config->mv_buf_index, +#else + -1, +#endif + cm->buffer_pool-> + frame_bufs[i].ref_count, + pic_config->vf_ref, + pic_config->decode_idx, + pic_config->slice_type, + pic_config->y_crop_width, + pic_config->y_crop_height, + pic_config->cma_alloc_addr + ); + } + return; +} + +void av1_release_buf(AV1Decoder *pbi, RefCntBuffer *const buf) +{ + +#if 0 + //def CHANGE_DONE + struct AV1HW_s *hw = (struct AV1HW_s *)(pbi->private_data); + if (!hw->mmu_enable) + return; + //release_buffer_4k(&av1_mmumgr_m, buf->buf.index); + decoder_mmu_box_free_idx(hw->mmu_box, buf->buf.index); +#ifdef AOM_AV1_MMU_DW + //release_buffer_4k(&av1_mmumgr_dw, buf->buf.index); + decoder_mmu_box_free_idx(hw->mmu_box_dw, buf->buf.index); +#endif + +#endif +} + +void av1_release_bufs(struct AV1HW_s *hw) +{ + AV1_COMMON *cm = &hw->common; + RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + + for (i = 0; i < FRAME_BUFFERS; ++i) { + if (frame_bufs[i].buf.vf_ref == 0 && + frame_bufs[i].ref_count == 0 && + frame_bufs[i].buf.index >= 0) { + if (frame_bufs[i].buf.aux_data_buf) + release_aux_data(hw, &frame_bufs[i].buf); + } + } +} + +#ifdef DEBUG_CMD +static void d_fill_zero(struct AV1HW_s *hw, unsigned int phyadr, int size) +{ + WRITE_VREG(HEVC_DBG_LOG_ADR, phyadr); + WRITE_VREG(DEBUG_REG1, + 0x20000000 | size); + debug_cmd_wait_count = 0; + debug_cmd_wait_type = 1; + while ((READ_VREG(DEBUG_REG1) & 0x1) == 0 + && debug_cmd_wait_count < 0x7fffffff) { + debug_cmd_wait_count++; + } + + WRITE_VREG(DEBUG_REG1, 0); + debug_cmd_wait_type = 0; +} + +static void d_dump(struct AV1HW_s *hw, unsigned int phyadr, int size, + struct file *fp, loff_t *wr_off) +{ + + int jj; + unsigned char *data = (unsigned char *) + (hw->ucode_log_addr); + WRITE_VREG(HEVC_DBG_LOG_ADR, hw->ucode_log_phy_addr); + + WRITE_VREG(HEVC_D_ADR, phyadr); + WRITE_VREG(DEBUG_REG1, + 0x10000000 | size); + + debug_cmd_wait_count = 0; + debug_cmd_wait_type = 3; + while ((READ_VREG(DEBUG_REG1) & 0x1) == 0 + && debug_cmd_wait_count < 0x7fffffff) { + debug_cmd_wait_count++; + } + + if (fp) { + vfs_write(fp, data, + size, wr_off); + + } else { + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + av1_print(hw, 0, + "%06x:", jj); + av1_print_cont(hw, 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + av1_print_cont(hw, 0, + "\n"); + } + av1_print(hw, 0, "\n"); + } + + WRITE_VREG(DEBUG_REG1, 0); + debug_cmd_wait_type = 0; + +} + +static void mv_buffer_fill_zero(struct AV1HW_s *hw, struct PIC_BUFFER_CONFIG_s *pic_config) +{ + pr_info("fill dummy data pic index %d colocate addreses %x size %x\n", + pic_config->index, pic_config->mpred_mv_wr_start_addr, + hw->m_mv_BUF[pic_config->mv_buf_index].size); + d_fill_zero(hw, pic_config->mpred_mv_wr_start_addr, + hw->m_mv_BUF[pic_config->mv_buf_index].size); +} + +static void dump_mv_buffer(struct AV1HW_s *hw, struct PIC_BUFFER_CONFIG_s *pic_config) +{ + + unsigned int adr, size; + unsigned int adr_end = pic_config->mpred_mv_wr_start_addr + + hw->m_mv_BUF[pic_config->mv_buf_index].size; + mm_segment_t old_fs; + loff_t off = 0; + int mode = O_CREAT | O_WRONLY | O_TRUNC; + char file[64]; + struct file *fp; + sprintf(&file[0], "/data/tmp/colocate%d", hw->frame_count-1); + fp = filp_open(file, mode, 0666); + old_fs = get_fs(); + set_fs(KERNEL_DS); + for (adr = pic_config->mpred_mv_wr_start_addr; + adr < adr_end; + adr += UCODE_LOG_BUF_SIZE) { + size = UCODE_LOG_BUF_SIZE; + if (size > (adr_end - adr)) + size = adr_end - adr; + pr_info("dump pic index %d colocate addreses %x size %x\n", + pic_config->index, adr, size); + d_dump(hw, adr, size, fp, &off); + } + set_fs(old_fs); + vfs_fsync(fp, 0); + + filp_close(fp, current->files); +} + +#endif + +static int config_pic_size(struct AV1HW_s *hw, unsigned short bit_depth) +{ + uint32_t data32; + struct AV1_Common_s *cm = &hw->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config = &cm->cur_frame->buf; + int losless_comp_header_size, losless_comp_body_size; +#ifdef AOM_AV1_MMU_DW + int losless_comp_header_size_dw, losless_comp_body_size_dw; +#endif + av1_print(hw, AOM_DEBUG_HW_MORE, + " #### config_pic_size ####, bit_depth = %d\n", bit_depth); + + frame_width = cur_pic_config->y_crop_width; + frame_height = cur_pic_config->y_crop_height; + cur_pic_config->bit_depth = bit_depth; + cur_pic_config->double_write_mode = get_double_write_mode(hw); + + /* use fixed maximum size // 128x128/4/4*3-bits = 384 Bytes + seg_map_size = + ((frame_width + 127) >> 7) * ((frame_height + 127) >> 7) * 384 ; + */ + if (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T3) { + WRITE_VREG(HEVC_PARSER_PICTURE_SIZE, + (frame_height << 16) | frame_width); + } +#ifdef DUAL_DECODE +#else + WRITE_VREG(HEVC_ASSIST_PIC_SIZE_FB_READ, + (frame_height << 16) | frame_width); +#endif +#ifdef AOM_AV1_MMU + + //alloc_mmu(&av1_mmumgr_m, cm->cur_frame->buf.index, frame_width, frame_height, bit_depth); +#endif +#ifdef AOM_AV1_MMU_DW + + //alloc_mmu(&av1_mmumgr_dw, cm->cur_frame->buf.index, frame_width, frame_height, bit_depth); + losless_comp_header_size_dw = + compute_losless_comp_header_size_dw(frame_width, frame_height); + losless_comp_body_size_dw = + compute_losless_comp_body_size_dw(frame_width, frame_height, + (bit_depth == AOM_BITS_10)); +#endif + + losless_comp_header_size = + compute_losless_comp_header_size + (frame_width, frame_height); + losless_comp_body_size = + compute_losless_comp_body_size(frame_width, + frame_height, (bit_depth == AOM_BITS_10)); + + cur_pic_config->comp_body_size = losless_comp_body_size; + + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s: width %d height %d depth %d head_size 0x%x body_size 0x%x\r\n", + __func__, frame_width, frame_height, bit_depth, + losless_comp_header_size, losless_comp_body_size); +#ifdef LOSLESS_COMPRESS_MODE + data32 = READ_VREG(HEVC_SAO_CTRL5); + if (bit_depth == AOM_BITS_10) + data32 &= ~(1<<9); + else + data32 |= (1<<9); + + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + if (hw->mmu_enable) { + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1,(0x1<< 4)); // bit[4] : paged_mem_mode + } else { + if (bit_depth == AOM_BITS_10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0<<3)); // bit[3] smem mdoe + else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (1<<3)); // bit[3] smem mdoe + } + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, + (losless_comp_body_size >> 5)); + /* + WRITE_VREG(HEVCD_MPP_DECOMP_CTL3, + (0xff<<20) | (0xff<<10) | 0xff); //8-bit mode + */ + WRITE_VREG(HEVC_CM_BODY_LENGTH, + losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, + losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, + losless_comp_header_size); + + if (get_double_write_mode(hw) & 0x10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); + +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1,0x1 << 31); +#endif +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + WRITE_VREG(HEVC_CM_BODY_LENGTH2, losless_comp_body_size_dw); + WRITE_VREG(HEVC_CM_HEADER_OFFSET2, losless_comp_body_size_dw); + WRITE_VREG(HEVC_CM_HEADER_LENGTH2, losless_comp_header_size_dw); + } +#endif + return 0; + +} + +static int config_mc_buffer(struct AV1HW_s *hw, unsigned short bit_depth, unsigned char inter_flag) +{ + int32_t i; + AV1_COMMON *cm = &hw->common; + PIC_BUFFER_CONFIG* cur_pic_config = &cm->cur_frame->buf; + uint8_t scale_enable = 0; + + av1_print(hw, AOM_DEBUG_HW_MORE, + " #### config_mc_buffer %s ####\n", + inter_flag ? "inter" : "intra"); + +#ifdef DEBUG_PRINT + if (debug&AOM_AV1_DEBUG_BUFMGR) + av1_print(hw, AOM_DEBUG_HW_MORE, + "config_mc_buffer entered .....\n"); +#endif + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0<<1) | 1); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (cur_pic_config->order_hint<<24) | + (cur_pic_config->mc_canvas_u_v<<16) | + (cur_pic_config->mc_canvas_u_v<<8)| + cur_pic_config->mc_canvas_y); + for (i = LAST_FRAME; i <= ALTREF_FRAME; i++) { + PIC_BUFFER_CONFIG *pic_config; //cm->frame_refs[i].buf; + if (inter_flag) + pic_config = av1_get_ref_frame_spec_buf(cm, i); + else + pic_config = cur_pic_config; + if (pic_config) { + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic_config->order_hint<<24) | + (pic_config->mc_canvas_u_v<<16) | + (pic_config->mc_canvas_u_v<<8) | + pic_config->mc_canvas_y); + if (inter_flag) + av1_print(hw, AOM_DEBUG_HW_MORE, + "refid 0x%x mc_canvas_u_v 0x%x mc_canvas_y 0x%x order_hint 0x%x\n", + i, pic_config->mc_canvas_u_v, + pic_config->mc_canvas_y, pic_config->order_hint); + } else { + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); + } + } + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (0 << 1) | 1); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (cur_pic_config->order_hint << 24) | + (cur_pic_config->mc_canvas_u_v << 16) | + (cur_pic_config->mc_canvas_u_v << 8) | + cur_pic_config->mc_canvas_y); + for (i = LAST_FRAME; i <= ALTREF_FRAME; i++) { + PIC_BUFFER_CONFIG *pic_config; + if (inter_flag) + pic_config = av1_get_ref_frame_spec_buf(cm, i); + else + pic_config = cur_pic_config; + + if (pic_config) { + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic_config->order_hint << 24)| + (pic_config->mc_canvas_u_v << 16) | + (pic_config->mc_canvas_u_v << 8) | + pic_config->mc_canvas_y); + } else { + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); + } + } + + WRITE_VREG(AV1D_MPP_REFINFO_TBL_ACCCONFIG, + (0x1 << 2) | (0x0 <<3)); // auto_inc start index:0 field:0 + for (i = 0; i <= ALTREF_FRAME; i++) { + int32_t ref_pic_body_size; + struct scale_factors * sf = NULL; + PIC_BUFFER_CONFIG *pic_config; + + if (inter_flag && i >= LAST_FRAME) + pic_config = av1_get_ref_frame_spec_buf(cm, i); + else + pic_config = cur_pic_config; + + if (pic_config) { + ref_pic_body_size = + compute_losless_comp_body_size(pic_config->y_crop_width, + pic_config->y_crop_height, (bit_depth == AOM_BITS_10)); + + WRITE_VREG(AV1D_MPP_REFINFO_DATA, pic_config->y_crop_width); + WRITE_VREG(AV1D_MPP_REFINFO_DATA, pic_config->y_crop_height); + if (inter_flag && i >= LAST_FRAME) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "refid %d: ref width/height(%d,%d), cur width/height(%d,%d) ref_pic_body_size 0x%x\n", + i, pic_config->y_crop_width, pic_config->y_crop_height, + cur_pic_config->y_crop_width, cur_pic_config->y_crop_height, + ref_pic_body_size); + } + } else { + ref_pic_body_size = 0; + WRITE_VREG(AV1D_MPP_REFINFO_DATA, 0); + WRITE_VREG(AV1D_MPP_REFINFO_DATA, 0); + } + + if (inter_flag && i >= LAST_FRAME) + sf = av1_get_ref_scale_factors(cm, i); + + if ((sf != NULL) && av1_is_scaled(sf)) { + scale_enable |= (1 << i); + } + + if (sf) { + WRITE_VREG(AV1D_MPP_REFINFO_DATA, sf->x_scale_fp); + WRITE_VREG(AV1D_MPP_REFINFO_DATA, sf->y_scale_fp); + + av1_print(hw, AOM_DEBUG_HW_MORE, + "x_scale_fp %d, y_scale_fp %d\n", + sf->x_scale_fp, sf->y_scale_fp); + } else { + WRITE_VREG(AV1D_MPP_REFINFO_DATA, REF_NO_SCALE); //1<<14 + WRITE_VREG(AV1D_MPP_REFINFO_DATA, REF_NO_SCALE); + } + if (hw->mmu_enable) + WRITE_VREG(AV1D_MPP_REFINFO_DATA, 0); + else + WRITE_VREG(AV1D_MPP_REFINFO_DATA, + ref_pic_body_size >> 5); + } + WRITE_VREG(AV1D_MPP_REF_SCALE_ENBL, scale_enable); + WRITE_VREG(PARSER_REF_SCALE_ENBL, scale_enable); + av1_print(hw, AOM_DEBUG_HW_MORE, + "WRITE_VREG(PARSER_REF_SCALE_ENBL, 0x%x)\n", + scale_enable); + return 0; +} + +static void clear_mpred_hw(struct AV1HW_s *hw) +{ + unsigned int data32; + + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 &= (~(1 << 6)); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); +} + +static void config_mpred_hw(struct AV1HW_s *hw, unsigned char inter_flag) +{ + AV1_COMMON *cm = &hw->common; + PIC_BUFFER_CONFIG *cur_pic_config = &cm->cur_frame->buf; + //PIC_BUFFER_CONFIG *last_frame_pic_config = NULL; + int i, j, pos, reg_i; + int mv_cal_tpl_count = 0; + unsigned int mv_ref_id[MFMV_STACK_SIZE] = {0, 0, 0}; + unsigned ref_offset_reg[] = { + HEVC_MPRED_L0_REF06_POC, + HEVC_MPRED_L0_REF07_POC, + HEVC_MPRED_L0_REF08_POC, + HEVC_MPRED_L0_REF09_POC, + HEVC_MPRED_L0_REF10_POC, + HEVC_MPRED_L0_REF11_POC, + }; + unsigned ref_buf_reg[] = { + HEVC_MPRED_L0_REF03_POC, + HEVC_MPRED_L0_REF04_POC, + HEVC_MPRED_L0_REF05_POC + }; + unsigned ref_offset_val[6] = + {0, 0, 0, 0, 0, 0}; + unsigned ref_buf_val[3] = {0, 0, 0}; + + uint32_t data32; + int32_t mpred_curr_lcu_x; + int32_t mpred_curr_lcu_y; + //int32_t mpred_mv_rd_end_addr; + + av1_print(hw, AOM_DEBUG_HW_MORE, + " #### config_mpred_hw ####\n"); + + /*if (cm->prev_frame) + last_frame_pic_config = &cm->prev_frame->buf; + mpred_mv_rd_end_addr = last_frame_pic_config->mpred_mv_wr_start_addr + + (last_frame_pic_config->lcu_total * MV_MEM_UNIT); + */ + + data32 = READ_VREG(HEVC_MPRED_CURR_LCU); + mpred_curr_lcu_x =data32 & 0xffff; + mpred_curr_lcu_y =(data32>>16) & 0xffff; + + av1_print(hw, AOM_DEBUG_HW_MORE, + "cur pic index %d\n", cur_pic_config->index); + /*printk("cur pic index %d col pic index %d\n", + cur_pic_config->index, last_frame_pic_config->index);*/ + + //WRITE_VREG(HEVC_MPRED_CTRL3,0x24122412); +#ifdef CO_MV_COMPRESS + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + WRITE_VREG(HEVC_MPRED_CTRL3,0x10151015); // 'd10, 'd21 for AV1 + } else { + WRITE_VREG(HEVC_MPRED_CTRL3,0x13151315); // 'd19, 'd21 for AV1 + } +#else + WRITE_VREG(HEVC_MPRED_CTRL3,0x13151315); // 'd19, 'd21 for AV1 +#endif + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, + hw->pbi->work_space_buf->mpred_above.buf_start); + +#if 0 + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 &= (~(1<<6)); + data32 |= (cm->use_prev_frame_mvs << 6); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); +#endif + if (inter_flag) { + /* config sign_bias */ + //data32 = (cm->cur_frame_force_integer_mv & 0x1) << 9; + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 &= (~(0xff << 12)); + //for (i = LAST_FRAME; i <= ALTREF_FRAME; i++) { + /* HEVC_MPRED_CTRL4[bit 12] is for cm->ref_frame_sign_bias[0] + instead of cm->ref_frame_sign_bias[LAST_FRAME] */ + for (i = 0; i <= ALTREF_FRAME; i++) { + data32 |= ((cm->ref_frame_sign_bias[i] & 0x1) << (12 + i)); + } + WRITE_VREG(HEVC_MPRED_CTRL4, data32); + av1_print(hw, AOM_DEBUG_HW_MORE, + "WRITE_VREG(HEVC_MPRED_CTRL4, 0x%x)\n", data32); + } +#if 1 + data32 = ((cm->seq_params.order_hint_info.enable_order_hint << 27) | + (cm->seq_params.order_hint_info.order_hint_bits_minus_1 << 24) | + (cm->cur_frame->order_hint << 16 )); +#ifdef CO_MV_COMPRESS + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + data32 |= (0x10 << 8) | (0x10 << 0); + } else { + data32 |= (0x13 << 8) | (0x13 << 0); + } +#else + data32 |= (0x13 << 8) | (0x13 << 0); +#endif + +#else + data32 = READ_VREG(HEVC_MPRED_L0_REF00_POC); + data32 &= (~(0xff << 16)); + data32 |= (cm->cur_frame->order_hint & 0xff); + data32 &= (~(1 << 27)); + data32 |= (cm->seq_params.order_hint_info.enable_order_hint << 27); +#endif + WRITE_VREG(HEVC_MPRED_L0_REF00_POC, data32); + av1_print(hw, AOM_DEBUG_HW_MORE, + "WRITE_VREG(HEVC_MPRED_L0_REF00_POC, 0x%x)\n", data32); + + if (inter_flag) { + /* config ref_buf id and order hint */ + data32 = 0; + pos = 25; + reg_i = 0; + for (i = ALTREF_FRAME; i >= LAST_FRAME; i--) { + PIC_BUFFER_CONFIG *pic_config = + av1_get_ref_frame_spec_buf(cm, i); + if (pic_config) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "pic_config for %d th ref: index %d, reg[%d] pos %d\n", + i, pic_config->index, reg_i, pos); + data32 |= ((pic_config->index < 0)? 0 : pic_config->index) << pos; + } else + av1_print(hw, AOM_DEBUG_HW_MORE, + "pic_config is null for %d th ref\n", i); + if (pos == 0) { + //WRITE_VREG(ref_buf_reg[reg_i], data32); + ref_buf_val[reg_i] = data32; + av1_print(hw, AOM_DEBUG_HW_MORE, + "ref_buf_reg[%d], WRITE_VREG(0x%x, 0x%x)\n", + reg_i, ref_buf_reg[reg_i], data32); + reg_i++; + data32 = 0; + pos = 24; //for P_HEVC_MPRED_L0_REF04_POC + } else { + if (pos == 24) + pos -= 8; //for P_HEVC_MPRED_L0_REF04_POC + else + pos -= 5; //for P_HEVC_MPRED_L0_REF03_POC + } + } + for (i = ALTREF_FRAME; i >= LAST_FRAME; i--) { + PIC_BUFFER_CONFIG *pic_config = + av1_get_ref_frame_spec_buf(cm, i); + if (pic_config) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "pic_config for %d th ref: order_hint %d, reg[%d] pos %d\n", + i, pic_config->order_hint, reg_i, pos); + data32 |= ((pic_config->index < 0)? 0 : pic_config->order_hint) << pos; + } else + av1_print(hw, AOM_DEBUG_HW_MORE, + "pic_config is null for %d th ref\n", i); + if (pos == 0) { + //WRITE_VREG(ref_buf_reg[reg_i], data32); + ref_buf_val[reg_i] = data32; + av1_print(hw, AOM_DEBUG_HW_MORE, + "ref_buf_reg[%d], WRITE_VREG(0x%x, 0x%x)\n", + reg_i, ref_buf_reg[reg_i], data32); + reg_i++; + data32 = 0; + pos = 24; + } else + pos -= 8; + } + if (pos != 24) { + //WRITE_VREG(ref_buf_reg[reg_i], data32); + ref_buf_val[reg_i] = data32; + av1_print(hw, AOM_DEBUG_HW_MORE, + "ref_buf_reg[%d], WRITE_VREG(0x%x, 0x%x)\n", + reg_i, ref_buf_reg[reg_i], data32); + } + /* config ref_offset */ + data32 = 0; + pos = 24; + mv_cal_tpl_count = 0; + reg_i = 0; + for (i = 0; i < cm->mv_ref_id_index; i++) { + if (cm->mv_cal_tpl_mvs[i]) { + mv_ref_id[mv_cal_tpl_count] = cm->mv_ref_id[i]; + mv_cal_tpl_count++; + for (j = LAST_FRAME; j <= ALTREF_FRAME; j++) { + /*offset can be negative*/ + unsigned char offval = + cm->mv_ref_offset[i][j] & 0xff; + data32 |= (offval << pos); + if (pos == 0) { + //WRITE_VREG(ref_offset_reg[reg_i], data32); + ref_offset_val[reg_i] = data32; + av1_print(hw, AOM_DEBUG_HW_MORE, + "ref_offset_reg[%d], WRITE_VREG(0x%x, 0x%x)\n", + reg_i, ref_offset_reg[reg_i], data32); + reg_i++; + data32 = 0; + pos = 24; + } else + pos -= 8; + } + } + } + if (pos != 24) { + //WRITE_VREG(ref_offset_reg[reg_i], data32); + ref_offset_val[reg_i] = data32; + av1_print(hw, AOM_DEBUG_HW_MORE, + "ref_offset_reg[%d], WRITE_VREG(0x%x, 0x%x)\n", + reg_i, ref_offset_reg[reg_i], data32); + } + + data32 = ref_offset_val[5] | //READ_VREG(HEVC_MPRED_L0_REF11_POC) | + mv_cal_tpl_count | (mv_ref_id[0] << 2) | + (mv_ref_id[1] << 5) | (mv_ref_id[2] << 8); + ref_offset_val[5] = data32; + //WRITE_VREG(HEVC_MPRED_L0_REF11_POC, data32); + av1_print(hw, AOM_DEBUG_HW_MORE, + "WRITE_VREG(HEVC_MPRED_L0_REF11_POC 0x%x, 0x%x)\n", + HEVC_MPRED_L0_REF11_POC, data32); + } + for (i = 0; i < 3; i++) + WRITE_VREG(ref_buf_reg[i], ref_buf_val[i]); + for (i = 0; i < 6; i++) + WRITE_VREG(ref_offset_reg[i], ref_offset_val[i]); + + WRITE_VREG(HEVC_MPRED_MV_WR_START_ADDR, + cur_pic_config->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_WPTR, + cur_pic_config->mpred_mv_wr_start_addr); + + if (inter_flag) { + for (i = 0; i < mv_cal_tpl_count; i++) { + PIC_BUFFER_CONFIG *pic_config = + av1_get_ref_frame_spec_buf(cm, mv_ref_id[i]); + if (pic_config == NULL) + continue; + if (i == 0) { + WRITE_VREG(HEVC_MPRED_MV_RD_START_ADDR, + pic_config->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_RPTR, + pic_config->mpred_mv_wr_start_addr); + } else if (i == 1) { + WRITE_VREG(HEVC_MPRED_L0_REF01_POC, + pic_config->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_RPTR_1, + pic_config->mpred_mv_wr_start_addr); + } else if (i == 2) { + WRITE_VREG(HEVC_MPRED_L0_REF02_POC, + pic_config->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_RPTR_2, + pic_config->mpred_mv_wr_start_addr); + } else { + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s: mv_ref_id error\n", __func__); + } + } + } + data32 = READ_VREG(HEVC_MPRED_CTRL0); + data32 &= ~((1 << 10) | (1 << 11)); + data32 |= (1 << 10); /*write enable*/ + av1_print(hw, AOM_DEBUG_HW_MORE, + "current_frame.frame_type=%d, cur_frame->frame_type=%d, allow_ref_frame_mvs=%d\n", + cm->current_frame.frame_type, cm->cur_frame->frame_type, + cm->allow_ref_frame_mvs); + + if (av1_frame_is_inter(&hw->common)) { + if (cm->allow_ref_frame_mvs) { + data32 |= (1 << 11); /*read enable*/ + } + } + av1_print(hw, AOM_DEBUG_HW_MORE, + "WRITE_VREG(HEVC_MPRED_CTRL0 0x%x, 0x%x)\n", + HEVC_MPRED_CTRL0, data32); + WRITE_VREG(HEVC_MPRED_CTRL0, data32); + /* + printk("config_mpred: (%x) wr_start_addr %x from indx %d; + (%x) rd_start_addr %x from index %d\n", + cur_pic_config, cur_pic_config->mpred_mv_wr_start_addr, cur_pic_config->index, + last_frame_pic_config, last_frame_pic_config->mpred_mv_wr_start_addr, last_frame_pic_config->index); + data32 = ((pbi->lcu_x_num - pbi->tile_width_lcu)*MV_MEM_UNIT); + WRITE_VREG(HEVC_MPRED_MV_WR_ROW_JUMP,data32); + WRITE_VREG(HEVC_MPRED_MV_RD_ROW_JUMP,data32); + + WRITE_VREG(HEVC_MPRED_MV_RD_END_ADDR, mpred_mv_rd_end_addr); + */ +} + +static void config_sao_hw(struct AV1HW_s *hw, union param_u *params) +{ + /* + !!!!!!!!!!!!!!!!!!!!!!!!!TODO .... !!!!!!!!!!! + mem_map_mode, endian, get_double_write_mode + */ + AV1_COMMON *cm = &hw->common; + PIC_BUFFER_CONFIG* pic_config = &cm->cur_frame->buf; + uint32_t data32; + int32_t lcu_size = + ((params->p.seq_flags >> 6) & 0x1) ? 128 : 64; + int32_t mc_buffer_size_u_v = + pic_config->lcu_total*lcu_size*lcu_size/2; + int32_t mc_buffer_size_u_v_h = + (mc_buffer_size_u_v + 0xffff)>>16; //64k alignment + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] #### config_sao_hw ####, lcu_size %d\n", lcu_size); + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] lcu_total : %d\n", pic_config->lcu_total); + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] mc_y_adr : 0x%x\n", pic_config->mc_y_adr); + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] mc_u_v_adr : 0x%x\n", pic_config->mc_u_v_adr); + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] header_adr : 0x%x\n", pic_config->header_adr); +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] header_dw_adr : 0x%x\n", pic_config->header_dw_adr); +#endif + data32 = READ_VREG(HEVC_SAO_CTRL9) | (1 << 1); + WRITE_VREG(HEVC_SAO_CTRL9, data32); + + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (0x1 << 14); /* av1 mode */ + data32 |= (0xff << 16); /* dw {v1,v0,h1,h0} ctrl_y_cbus */ + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + WRITE_VREG(HEVC_SAO_CTRL0, + lcu_size == 128 ? 0x7 : 0x6); /*lcu_size_log2*/ +#ifdef LOSLESS_COMPRESS_MODE + WRITE_VREG(HEVC_CM_BODY_START_ADDR, pic_config->mc_y_adr); +#ifdef AOM_AV1_MMU + WRITE_VREG(HEVC_CM_HEADER_START_ADDR, pic_config->header_adr); +#endif +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) + WRITE_VREG(HEVC_CM_HEADER_START_ADDR2, pic_config->header_dw_adr); +#endif +#else +/*!LOSLESS_COMPRESS_MODE*/ + WRITE_VREG(HEVC_SAO_Y_START_ADDR, pic_config->mc_y_adr); +#endif + + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] sao_body_addr:%x\n", pic_config->mc_y_adr); + //printk("[config_sao_hw] sao_header_addr:%x\n", pic_config->mc_y_adr + losless_comp_body_size ); + +#ifdef VPU_FILMGRAIN_DUMP + // Let Microcode to increase + // WRITE_VREG(HEVC_FGS_TABLE_START, pic_config->fgs_table_adr); +#else + WRITE_VREG(HEVC_FGS_TABLE_START, pic_config->fgs_table_adr); +#endif + WRITE_VREG(HEVC_FGS_TABLE_LENGTH, FGS_TABLE_SIZE * 8); + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] fgs_table adr:0x%x , length 0x%x bits\n", + pic_config->fgs_table_adr, FGS_TABLE_SIZE * 8); + + data32 = (mc_buffer_size_u_v_h<<16)<<1; + //printk("data32 = %x, mc_buffer_size_u_v_h = %x, lcu_total = %x\n", data32, mc_buffer_size_u_v_h, pic_config->lcu_total); + WRITE_VREG(HEVC_SAO_Y_LENGTH ,data32); + +#ifndef LOSLESS_COMPRESS_MODE + WRITE_VREG(HEVC_SAO_C_START_ADDR, pic_config->mc_u_v_adr); +#else +#endif + + data32 = (mc_buffer_size_u_v_h<<16); + WRITE_VREG(HEVC_SAO_C_LENGTH ,data32); + +#ifndef LOSLESS_COMPRESS_MODE + /* multi tile to do... */ + WRITE_VREG(HEVC_SAO_Y_WPTR, pic_config->mc_y_adr); + + WRITE_VREG(HEVC_SAO_C_WPTR, pic_config->mc_u_v_adr); +#else + if (get_double_write_mode(hw) && + (get_double_write_mode(hw) & 0x20) == 0) { + WRITE_VREG(HEVC_SAO_Y_START_ADDR, pic_config->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_START_ADDR, pic_config->dw_u_v_adr); + WRITE_VREG(HEVC_SAO_Y_WPTR, pic_config->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_WPTR, pic_config->dw_u_v_adr); + } else { + //WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0xffffffff); + //WRITE_VREG(HEVC_SAO_C_START_ADDR, 0xffffffff); + } +#endif + + +#ifndef AOM_AV1_NV21 +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + } +#endif +#endif + +#ifdef AOM_AV1_NV21 +#ifdef DOS_PROJECT + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + data32 |= (hw->mem_map_mode << 12); // [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + data32 &= (~0x3); + data32 |= 0x1; // [1]:dw_disable [0]:cm_disable + WRITE_VREG(HEVC_SAO_CTRL1, data32); + + data32 = READ_VREG(HEVC_SAO_CTRL5); // [23:22] dw_v1_ctrl [21:20] dw_v0_ctrl [19:18] dw_h1_ctrl [17:16] dw_h0_ctrl + data32 &= ~(0xff << 16); // set them all 0 for AOM_AV1_NV21 (no down-scale) + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + data32 |= (hw->mem_map_mode << 4); // [5:4] -- address_format 00:linear 01:32x32 10:64x32 + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#else +// m8baby test1902 + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + data32 |= (hw->mem_map_mode << 12); // [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + data32 &= (~0xff0); + //data32 |= 0x670; // Big-Endian per 64-bit + data32 |= 0x880; // Big-Endian per 64-bit + data32 &= (~0x3); + data32 |= 0x1; // [1]:dw_disable [0]:cm_disable + WRITE_VREG(HEVC_SAO_CTRL1, data32); + + data32 = READ_VREG(HEVC_SAO_CTRL5); // [23:22] dw_v1_ctrl [21:20] dw_v0_ctrl [19:18] dw_h1_ctrl [17:16] dw_h0_ctrl + data32 &= ~(0xff << 16); // set them all 0 for AOM_AV1_NV21 (no down-scale) + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + data32 |= (hw->mem_map_mode << 4); // [5:4] -- address_format 00:linear 01:32x32 10:64x32 + data32 &= (~0xF); + data32 |= 0x8; // Big-Endian per 64-bit + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#endif +#else +/*CHANGE_DONE nnn*/ + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + data32 |= (hw->mem_map_mode << 12); /* [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 */ + data32 &= (~0xff0); + /* data32 |= 0x670; // Big-Endian per 64-bit */ +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable == 0) + data32 |= ((hw->endian >> 8) & 0xfff); /* Big-Endian per 64-bit */ +#else + data32 |= ((hw->endian >> 8) & 0xfff); /* Big-Endian per 64-bit */ +#endif + data32 &= (~0x3); /*[1]:dw_disable [0]:cm_disable*/ + if (get_double_write_mode(hw) == 0) + data32 |= 0x2; /*disable double write*/ + else if (get_double_write_mode(hw) & 0x10) + data32 |= 0x1; /*disable cm*/ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { /* >= G12A dw write control */ + unsigned int data; + data = READ_VREG(HEVC_DBLK_CFGB); + data &= (~0x300); /*[8]:first write enable (compress) [9]:double write enable (uncompress)*/ + if (get_double_write_mode(hw) == 0) + data |= (0x1 << 8); /*enable first write*/ + else if (get_double_write_mode(hw) & 0x10) + data |= (0x1 << 9); /*double write only*/ + else + data |= ((0x1 << 8) |(0x1 << 9)); + WRITE_VREG(HEVC_DBLK_CFGB, data); + } + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + data32 &= ~(1 << 8); /* NV21 */ + else + data32 |= (1 << 8); /* NV12 */ + } + data32 &= (~(3 << 14)); + data32 |= (2 << 14); + /* + * [31:24] ar_fifo1_axi_thred + * [23:16] ar_fifo0_axi_thred + * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes + * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + * [11:08] axi_lendian_C + * [07:04] axi_lendian_Y + * [3] reserved + * [2] clk_forceon + * [1] dw_disable:disable double write output + * [0] cm_disable:disable compress output + */ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + + if (get_double_write_mode(hw) & 0x10) { + /* [23:22] dw_v1_ctrl + *[21:20] dw_v0_ctrl + *[19:18] dw_h1_ctrl + *[17:16] dw_h0_ctrl + */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /*set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } else { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) + WRITE_VREG(HEVC_SAO_CTRL26, 0); + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 &= (~(0xff << 16)); + if ((get_double_write_mode(hw) & 0xf) == 8) { + WRITE_VREG(HEVC_SAO_CTRL26, 0xf); + data32 |= (0xff << 16); + } else if ((get_double_write_mode(hw) & 0xf) == 2 || + (get_double_write_mode(hw) & 0xf) == 3) + data32 |= (0xff<<16); + else if ((get_double_write_mode(hw) & 0xf) == 4 || + (get_double_write_mode(hw) & 0xf) == 5) + data32 |= (0x33<<16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /* [5:4] -- address_format 00:linear 01:32x32 10:64x32 */ + data32 |= (hw->mem_map_mode << 4); + data32 &= (~0xf); + data32 |= (hw->endian & 0xf); /* valid only when double write only */ + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + data32 |= (1 << 12); /* NV21 */ + else + data32 &= ~(1 << 12); /* NV12 */ + } + data32 &= (~(3 << 8)); + data32 |= (2 << 8); + /* + * [3:0] little_endian + * [5:4] address_format 00:linear 01:32x32 10:64x32 + * [7:6] reserved + * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte + * [11:10] reserved + * [12] CbCr_byte_swap + * [31:13] reserved + */ + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); + +#endif + +} + + +#ifdef AOM_AV1_DBLK_INIT +/* + * Defines, declarations, sub-functions for av1 de-block loop filter Thr/Lvl table update + * - struct segmentation_lf is for loop filter only (removed something) + * - function "av1_loop_filter_init" and "av1_loop_filter_frame_init" will be instantiated in C_Entry + * - av1_loop_filter_init run once before decoding start + * - av1_loop_filter_frame_init run before every frame decoding start + * - set video format to AOM_AV1 is in av1_loop_filter_init + */ +#define MAX_LOOP_FILTER 63 +#define MAX_MODE_LF_DELTAS 2 +#define MAX_SEGMENTS 8 +#define MAX_MB_PLANE 3 + +typedef enum { + SEG_LVL_ALT_Q, // Use alternate Quantizer .... + SEG_LVL_ALT_LF_Y_V, // Use alternate loop filter value on y plane vertical + SEG_LVL_ALT_LF_Y_H, // Use alternate loop filter value on y plane horizontal + SEG_LVL_ALT_LF_U, // Use alternate loop filter value on u plane + SEG_LVL_ALT_LF_V, // Use alternate loop filter value on v plane + SEG_LVL_REF_FRAME, // Optional Segment reference frame + SEG_LVL_SKIP, // Optional Segment (0,0) + skip mode + SEG_LVL_GLOBALMV, + SEG_LVL_MAX +} SEG_LVL_FEATURES; + +static const SEG_LVL_FEATURES seg_lvl_lf_lut[MAX_MB_PLANE][2] = { + { SEG_LVL_ALT_LF_Y_V, SEG_LVL_ALT_LF_Y_H }, + { SEG_LVL_ALT_LF_U, SEG_LVL_ALT_LF_U }, + { SEG_LVL_ALT_LF_V, SEG_LVL_ALT_LF_V } +}; + +struct segmentation_lf { // for loopfilter only + uint8_t enabled; + /* + SEG_LVL_ALT_LF_Y_V feature_enable: seg_lf_info_y[bit7] + SEG_LVL_ALT_LF_Y_V data: seg_lf_info_y[bit0~6] + SEG_LVL_ALT_LF_Y_H feature enable: seg_lf_info_y[bit15] + SEG_LVL_ALT_LF_Y_H data: seg_lf_info_y[bit8~14] + */ + uint16_t seg_lf_info_y[8]; + /* + SEG_LVL_ALT_LF_U feature_enable: seg_lf_info_c[bit7] + SEG_LVL_ALT_LF_U data: seg_lf_info_c[bit0~6] + SEG_LVL_ALT_LF_V feature enable: seg_lf_info_c[bit15] + SEG_LVL_ALT_LF_V data: seg_lf_info_c[bit8~14] + */ + uint16_t seg_lf_info_c[8]; +}; + +typedef struct { + uint8_t mblim; + uint8_t lim; + uint8_t hev_thr; +} loop_filter_thresh; + +typedef struct loop_filter_info_n_s { + loop_filter_thresh lfthr[MAX_LOOP_FILTER + 1]; + uint8_t lvl[MAX_MB_PLANE][MAX_SEGMENTS][2][REF_FRAMES][MAX_MODE_LF_DELTAS]; +} loop_filter_info_n; + +struct loopfilter { + int32_t filter_level[2]; + int32_t filter_level_u; + int32_t filter_level_v; + + int32_t sharpness_level; + + uint8_t mode_ref_delta_enabled; + uint8_t mode_ref_delta_update; + + // 0 = Intra, Last, Last2+Last3, + // GF, BRF, ARF2, ARF + int8_t ref_deltas[REF_FRAMES]; + + // 0 = ZERO_MV, MV + int8_t mode_deltas[MAX_MODE_LF_DELTAS]; + + int32_t combine_vert_horz_lf; + + int32_t lf_pic_cnt; + +//#if LOOP_FILTER_BITMASK + //LoopFilterMask *lfm; + //size_t lfm_num; + //int lfm_stride; + //LpfSuperblockInfo neighbor_sb_lpf_info; +//#endif // LOOP_FILTER_BITMASK +}; +#ifdef DBG_LPF_DBLK_LVL +static int32_t myclamp(int32_t value, int32_t low, int32_t high) { + return value < low ? low : (value > high ? high : value); +} +#endif +/*static int8_t extend_sign_7bits(uint8_t value) { + return (((value>>6) & 0x1)<<7) | (value&0x7f); +}*/ + +// convert data to int8_t variable +// value : signed data (with any bitwidth<8) which is assigned to uint8_t variable as an input +// bw : bitwidth of signed data, (from 1 to 7) +static int8_t conv2int8 (uint8_t value, uint8_t bw) { + if (bw<1 || bw>7) return (int8_t)value; + else { + const uint8_t data_bits = value & ((1<<bw)-1); + const uint8_t sign_bit = (value>>(bw-1)) & 0x1; + const uint8_t sign_bit_ext = sign_bit | sign_bit<<1 | sign_bit<<2 | sign_bit<<3 | sign_bit<<4 | sign_bit<<5 | sign_bit<<6 | sign_bit<<7; + return (int8_t)((sign_bit_ext<<bw) | data_bits); + } +} + +static void av1_update_sharpness(loop_filter_info_n *lfi, int32_t sharpness_lvl) { + int32_t lvl; + + // For each possible value for the loop filter fill out limits + for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) { + // Set loop filter parameters that control sharpness. + int32_t block_inside_limit = + lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4)); + + if (sharpness_lvl > 0) { + if (block_inside_limit > (9 - sharpness_lvl)) + block_inside_limit = (9 - sharpness_lvl); + } + + if (block_inside_limit < 1) + block_inside_limit = 1; + + lfi->lfthr[lvl].lim = (uint8_t)block_inside_limit; + lfi->lfthr[lvl].mblim = (uint8_t)(2 * (lvl + 2) + block_inside_limit); + } +} + +// instantiate this function once when decode is started +void av1_loop_filter_init(loop_filter_info_n *lfi, struct loopfilter *lf) { + int32_t i; + uint32_t data32; + + // init limits for given sharpness + av1_update_sharpness(lfi, lf->sharpness_level); + + // Write to register + for (i = 0; i < 32; i++) { + uint32_t thr; + thr = ((lfi->lfthr[i*2+1].lim & 0x3f)<<8) | + (lfi->lfthr[i*2+1].mblim & 0xff); + thr = (thr<<16) | ((lfi->lfthr[i*2].lim & 0x3f)<<8) | + (lfi->lfthr[i*2].mblim & 0xff); + WRITE_VREG(HEVC_DBLK_CFG9, thr); + } + // video format is AOM_AV1 + data32 = (0x57 << 8) | // 1st/2nd write both enable + (0x4 << 0); // aom_av1 video format + WRITE_VREG(HEVC_DBLK_CFGB, data32); + av1_print2(AOM_DEBUG_HW_MORE, + "[DBLK DEBUG] CFGB : 0x%x\n", data32); +} + +// perform this function per frame +void av1_loop_filter_frame_init(AV1Decoder* pbi, struct segmentation_lf *seg, + loop_filter_info_n *lfi, + struct loopfilter *lf, + int32_t pic_width) { + BuffInfo_t* buf_spec = pbi->work_space_buf; + int32_t i; +#ifdef DBG_LPF_DBLK_LVL + int32_t dir; + int32_t filt_lvl[MAX_MB_PLANE], filt_lvl_r[MAX_MB_PLANE]; + int32_t plane; + int32_t seg_id; +#endif + // n_shift is the multiplier for lf_deltas + // the multiplier is 1 for when filter_lvl is between 0 and 31; + // 2 when filter_lvl is between 32 and 63 + + // update limits if sharpness has changed + av1_update_sharpness(lfi, lf->sharpness_level); + + // Write to register + for (i = 0; i < 32; i++) { + uint32_t thr; + thr = ((lfi->lfthr[i*2+1].lim & 0x3f)<<8) + | (lfi->lfthr[i*2+1].mblim & 0xff); + thr = (thr<<16) | ((lfi->lfthr[i*2].lim & 0x3f)<<8) + | (lfi->lfthr[i*2].mblim & 0xff); + WRITE_VREG(HEVC_DBLK_CFG9, thr); + } +#ifdef DBG_LPF_DBLK_LVL + filt_lvl[0] = lf->filter_level[0]; + filt_lvl[1] = lf->filter_level_u; + filt_lvl[2] = lf->filter_level_v; + + filt_lvl_r[0] = lf->filter_level[1]; + filt_lvl_r[1] = lf->filter_level_u; + filt_lvl_r[2] = lf->filter_level_v; + +#ifdef DBG_LPF_PRINT + printk("LF_PRINT: pic_cnt(%d) base_filter_level(%d,%d,%d,%d)\n", + lf->lf_pic_cnt, lf->filter_level[0], + lf->filter_level[1], lf->filter_level_u, lf->filter_level_v); +#endif + + for (plane = 0; plane < 3; plane++) { + if (plane == 0 && !filt_lvl[0] && !filt_lvl_r[0]) + break; + else if (plane == 1 && !filt_lvl[1]) + continue; + else if (plane == 2 && !filt_lvl[2]) + continue; + + for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) { // MAX_SEGMENTS==8 + for (dir = 0; dir < 2; ++dir) { + int32_t lvl_seg = (dir == 0) ? filt_lvl[plane] : filt_lvl_r[plane]; + //assert(plane >= 0 && plane <= 2); + const uint8_t seg_lf_info_y0 = seg->seg_lf_info_y[seg_id] & 0xff; + const uint8_t seg_lf_info_y1 = (seg->seg_lf_info_y[seg_id]>>8) & 0xff; + const uint8_t seg_lf_info_u = seg->seg_lf_info_c[seg_id] & 0xff; + const uint8_t seg_lf_info_v = (seg->seg_lf_info_c[seg_id]>>8) & 0xff; + const uint8_t seg_lf_info = (plane==2) ? seg_lf_info_v : (plane==1) ? + seg_lf_info_u : ((dir==0) ? seg_lf_info_y0 : seg_lf_info_y1); + const int8_t seg_lf_active = ((seg->enabled) && ((seg_lf_info>>7) & 0x1)); + const int8_t seg_lf_data = conv2int8(seg_lf_info,7); +#ifdef DBG_LPF_PRINT + const int8_t seg_lf_data_clip = (seg_lf_data>63) ? 63 : + (seg_lf_data<-63) ? -63 : seg_lf_data; +#endif + if (seg_lf_active) { + lvl_seg = myclamp(lvl_seg + (int32_t)seg_lf_data, 0, MAX_LOOP_FILTER); + } + +#ifdef DBG_LPF_PRINT + printk("LF_PRINT:plane(%d) seg_id(%d) dir(%d) seg_lf_info(%d,0x%x),lvl_seg(0x%x)\n", + plane,seg_id,dir,seg_lf_active,seg_lf_data_clip,lvl_seg); +#endif + + if (!lf->mode_ref_delta_enabled) { + // we could get rid of this if we assume that deltas are set to + // zero when not in use; encoder always uses deltas + memset(lfi->lvl[plane][seg_id][dir], lvl_seg, + sizeof(lfi->lvl[plane][seg_id][dir])); + } else { + int32_t ref, mode; + const int32_t scale = 1 << (lvl_seg >> 5); + const int32_t intra_lvl = lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale; + lfi->lvl[plane][seg_id][dir][INTRA_FRAME][0] = + myclamp(intra_lvl, 0, MAX_LOOP_FILTER); +#ifdef DBG_LPF_PRINT + printk("LF_PRINT:ref_deltas[INTRA_FRAME](%d)\n",lf->ref_deltas[INTRA_FRAME]); +#endif + for (ref = LAST_FRAME; ref < REF_FRAMES; ++ref) { // LAST_FRAME==1 REF_FRAMES==8 + for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) { // MAX_MODE_LF_DELTAS==2 + const int32_t inter_lvl = + lvl_seg + lf->ref_deltas[ref] * scale + + lf->mode_deltas[mode] * scale; + lfi->lvl[plane][seg_id][dir][ref][mode] = + myclamp(inter_lvl, 0, MAX_LOOP_FILTER); +#ifdef DBG_LPF_PRINT + printk("LF_PRINT:ref_deltas(%d) mode_deltas(%d)\n", + lf->ref_deltas[ref], lf->mode_deltas[mode]); +#endif + } + } + } + } + } + } + +#ifdef DBG_LPF_PRINT + for (i = 0; i <= MAX_LOOP_FILTER; i++) { + printk("LF_PRINT:(%2d) thr=%d,blim=%3d,lim=%2d\n", + i, lfi->lfthr[i].hev_thr, + lfi->lfthr[i].mblim, lfi->lfthr[i].lim); + } + for (plane = 0; plane < 3; plane++) { + for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) { // MAX_SEGMENTS==8 + for (dir = 0; dir < 2; ++dir) { + int32_t mode; + for (mode = 0; mode < 2; ++mode) { + printk("assign {lvl[%d][%d][%d][0][%d],lvl[%d][%d][%d][1][%d],lvl[%d][%d][%d][2][%d],lvl[%d][%d][%d][3][%d],lvl[%d][%d][%d][4][%d],lvl[%d][%d][%d][5][%d],lvl[%d][%d][%d][6][%d],lvl[%d][%d][%d][7][%d]}={6'd%2d,6'd%2d,6'd%2d,6'd%2d,6'd%2d,6'd%2d,6'd%2d,6'd%2d};\n", + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + lfi->lvl[plane][seg_id][dir][0][mode], + lfi->lvl[plane][seg_id][dir][1][mode], + lfi->lvl[plane][seg_id][dir][2][mode], + lfi->lvl[plane][seg_id][dir][3][mode], + lfi->lvl[plane][seg_id][dir][4][mode], + lfi->lvl[plane][seg_id][dir][5][mode], + lfi->lvl[plane][seg_id][dir][6][mode], + lfi->lvl[plane][seg_id][dir][7][mode]); + } + } + } + } +#endif + // Write to register + for (i = 0; i < 192; i++) { + uint32_t level; + level = ((lfi->lvl[i>>6&3][i>>3&7][1][i&7][1] & 0x3f)<<24) | + ((lfi->lvl[i>>6&3][i>>3&7][1][i&7][0] & 0x3f)<<16) | + ((lfi->lvl[i>>6&3][i>>3&7][0][i&7][1] & 0x3f)<<8) | + (lfi->lvl[i>>6&3][i>>3&7][0][i&7][0] & 0x3f); + if (!lf->filter_level[0] && !lf->filter_level[1]) + level = 0; + WRITE_VREG(HEVC_DBLK_CFGA, level); + } +#endif // DBG_LPF_DBLK_LVL + +#ifdef DBG_LPF_DBLK_FORCED_OFF + if (lf->lf_pic_cnt == 2) { + printk("LF_PRINT: pic_cnt(%d) dblk forced off !!!\n", lf->lf_pic_cnt); + WRITE_VREG(HEVC_DBLK_DBLK0, 0); + } else + WRITE_VREG(HEVC_DBLK_DBLK0, + lf->filter_level[0] | lf->filter_level[1] << 6 | + lf->filter_level_u << 12 | lf->filter_level_v << 18); +#else + WRITE_VREG(HEVC_DBLK_DBLK0, + lf->filter_level[0] | lf->filter_level[1]<<6 | + lf->filter_level_u<<12 | lf->filter_level_v<<18); +#endif + for (i =0; i < 10; i++) + WRITE_VREG(HEVC_DBLK_DBLK1, + ((i<2) ? lf->mode_deltas[i&1] : lf->ref_deltas[(i-2)&7])); + for (i = 0; i < 8; i++) + WRITE_VREG(HEVC_DBLK_DBLK2, + (uint32_t)(seg->seg_lf_info_y[i]) | (uint32_t)(seg->seg_lf_info_c[i]<<16)); + + // Set P_HEVC_DBLK_CFGB again + { + uint32_t lpf_data32 = READ_VREG(HEVC_DBLK_CFGB); + if (lf->mode_ref_delta_enabled) + lpf_data32 |= (0x1<<28); // mode_ref_delta_enabled + else + lpf_data32 &= ~(0x1<<28); + if (seg->enabled) + lpf_data32 |= (0x1<<29); // seg enable + else + lpf_data32 &= ~(0x1<<29); + if (pic_width >= 1280) + lpf_data32 |= (0x1 << 4); // dblk pipeline mode=1 for performance + else + lpf_data32 &= ~(0x3 << 4); + WRITE_VREG(HEVC_DBLK_CFGB, lpf_data32); + } + // Set CDEF + WRITE_VREG(HEVC_DBLK_CDEF0, buf_spec->cdef_data.buf_start); + { + uint32_t cdef_data32 = (READ_VREG(HEVC_DBLK_CDEF1) & 0xffffff00); + cdef_data32 |= 17; // TODO ERROR :: cdef temp dma address left offset +#ifdef DBG_LPF_CDEF_NO_PIPELINE + cdef_data32 |= (1<<17); // cdef test no pipeline for very small picture +#endif + WRITE_VREG(HEVC_DBLK_CDEF1, cdef_data32); + } + // Picture count + lf->lf_pic_cnt++; +} +#endif // #ifdef AOM_AV1_DBLK_INIT + +#ifdef AOM_AV1_UPSCALE_INIT +/* + * these functions here for upscaling updated in every picture + */ +#define RS_SUBPEL_BITS 6 +#define RS_SUBPEL_MASK ((1 << RS_SUBPEL_BITS) - 1) +#define RS_SCALE_SUBPEL_BITS 14 +#define RS_SCALE_SUBPEL_MASK ((1 << RS_SCALE_SUBPEL_BITS) - 1) +#define RS_SCALE_EXTRA_BITS (RS_SCALE_SUBPEL_BITS - RS_SUBPEL_BITS) +#define RS_SCALE_EXTRA_OFF (1 << (RS_SCALE_EXTRA_BITS - 1)) + +static int32_t av1_get_upscale_convolve_step(int32_t in_length, int32_t out_length) { + return ((in_length << RS_SCALE_SUBPEL_BITS) + out_length / 2) / out_length; +} + +static int32_t get_upscale_convolve_x0(int32_t in_length, int32_t out_length, + int32_t x_step_qn) { + const int32_t err = out_length * x_step_qn - (in_length << RS_SCALE_SUBPEL_BITS); + const int32_t x0 = + (-((out_length - in_length) << (RS_SCALE_SUBPEL_BITS - 1)) + + out_length / 2) / + out_length + + RS_SCALE_EXTRA_OFF - err / 2; + return (int32_t)((uint32_t)x0 & RS_SCALE_SUBPEL_MASK); +} + +void av1_upscale_frame_init(AV1Decoder* pbi, AV1_COMMON *cm, param_t* params) +{ + BuffInfo_t* buf_spec = pbi->work_space_buf; + //uint32_t data32; + const int32_t width = cm->dec_width; + const int32_t superres_upscaled_width = cm->superres_upscaled_width; + const int32_t x_step_qn_luma = av1_get_upscale_convolve_step(width, superres_upscaled_width); + const int32_t x0_qn_luma = get_upscale_convolve_x0(width, superres_upscaled_width, x_step_qn_luma); + const int32_t x_step_qn_chroma = av1_get_upscale_convolve_step((width+1)>>1, (superres_upscaled_width+1)>>1); + const int32_t x0_qn_chroma = get_upscale_convolve_x0((width+1)>>1, (superres_upscaled_width+1)>>1, x_step_qn_chroma); + av1_print2(AOM_DEBUG_HW_MORE, + "UPS_PRINT: width(%d -> %d)\n", + width, superres_upscaled_width); + av1_print2(AOM_DEBUG_HW_MORE, + "UPS_PRINT: xstep(%d,%d)(0x%X, 0x%X) x0qn(%d,%d)(0x%X, 0x%X)\n", + x_step_qn_luma,x_step_qn_chroma, + x_step_qn_luma,x_step_qn_chroma, + x0_qn_luma,x0_qn_chroma, + x0_qn_luma,x0_qn_chroma); + WRITE_VREG(HEVC_DBLK_UPS1, buf_spec->ups_data.buf_start); + WRITE_VREG(HEVC_DBLK_UPS2, x0_qn_luma); // x0_qn y + WRITE_VREG(HEVC_DBLK_UPS3, x0_qn_chroma); // x0_qn c + WRITE_VREG(HEVC_DBLK_UPS4, x_step_qn_luma); // x_step y + WRITE_VREG(HEVC_DBLK_UPS5, x_step_qn_chroma); // x_step c + WRITE_VREG(AV1_UPSCALE_X0_QN, (x0_qn_chroma<<16)|x0_qn_luma); + WRITE_VREG(AV1_UPSCALE_STEP_QN, (x_step_qn_chroma<<16)|x_step_qn_luma); + +/* + * TileR calculation here if cm needs an exactly accurate value + */ +//#define AV1_UPSCALE_TILER_CALCULATION +#ifdef AV1_UPSCALE_TILER_CALCULATION + uint32_t upscl_enabled = 1; // 1 just for example, actually this is use_superres flag + uint32_t tiler_x = 192; // 192 just for example, actually this is tile end + uint32_t ux; + uint32_t ux_tiler,ux_tiler_rnd32; + uint32_t xqn_y; + uint32_t xqn_c; + uint32_t tiler_x_y = tiler_x - 8 - 3; // dblk/cdef left-shift-8 plus upscaling extra-3 + uint32_t tiler_x_c = (tiler_x/2) - 4 - 3; // dblk/cdef left-shift-4 plus upscaling extra-3 + + xqn_y = x0_qn_luma; + xqn_c = x0_qn_chroma; + ux_tiler = 0; + ux_tiler_rnd32 = 0; + for (ux=0; ux<16384; ux+=8) { + uint32_t x1qn_y = xqn_y + x_step_qn_luma *( 7+3); // extra-3 is for lrf + uint32_t x1qn_c = xqn_c + x_step_qn_chroma*( 3+3); // extra-3 is for lrf + uint32_t x1qn_y_nxt = xqn_y + x_step_qn_luma *(8+7+3); // extra-3 is for lrf + uint32_t x1qn_c_nxt = xqn_c + x_step_qn_chroma*(4+3+3); // extra-3 is for lrf + + uint32_t x1_y = upscl_enabled ? (x1qn_y>>14) : ux +7+3; + uint32_t x1_c = upscl_enabled ? (x1qn_c>>14) : (ux/2)+3+3; + uint32_t x1_y_nxt = upscl_enabled ? (x1qn_y_nxt>>14) : ux +8+7+3; + uint32_t x1_c_nxt = upscl_enabled ? (x1qn_c_nxt>>14) : (ux/2)+4+3+3; + + if ((x1_y<tiler_x_y && x1_c<tiler_x_c) && + (x1_y_nxt>=tiler_x_y || x1_c_nxt>=tiler_x_c)) { + ux_tiler = ux; + ux_tiler_rnd32 = (ux_tiler/32 + (ux_tiler%32 ? 1 : 0)) * 32; + break; + } + + xqn_y += x_step_qn_luma*8; + xqn_c += x_step_qn_chroma*4; + } + + av1_print(hw, AOM_DEBUG_HW_MORE, + "UPS_PRINT: xqn_y(0x%x), xqn_c(0x%x), x1qn_y(0x%x), x1qn_c(0x%x)\n", + xqn_y, xqn_c, x1qn_y, x1qn_c); + av1_print(hw, AOM_DEBUG_HW_MORE, + "UPS_PRINT: ux_tiler(%d)(0x%x), ux_tiler_rnd32(%d)(0x%x)\n", + ux_tiler, ux_tiler, ux_tiler_rnd32, ux_tiler_rnd32); +#endif + + // TEMP write lrf register here + //WRITE_VREG(HEVC_DBLK_LRF0, 1<<0 | 1<<2); // LRF UNIT SIZE + //WRITE_VREG(HEVC_DBLK_LRF1, 3<<0 | 1<<8 | 1<<16 | 1<<24); // LRF UNIT NUMBER + + // TEMP Global Enables write here + /* + const uint32_t dblk_enable = (!cm->allow_intrabc && !cm->single_tile_decoding && (cm->lf.filter_level[0] || cm->lf.filter_level[1])); + const uint32_t cdef_enable = (!cm->allow_intrabc && !cm->single_tile_decoding && !cm->skip_loop_filter && !cm->coded_lossless && (cm->cdef_bits || cm->cdef_strengths[0] || cm->cdef_uv_strengths[0])); + printk("LPF_ENABLES : dblk(%d) cdef(%d)\n", dblk_enable, cdef_enable); + data32 = READ_VREG(HEVC_DBLK_CFGB ); + data32 &= ~(0xf<<20); + data32 |= (dblk_enable<<20); + data32 |= (cdef_enable<<23); + WRITE_VREG(HEVC_DBLK_CFGB, data32); + */ +} + +#endif // #ifdef AOM_AV1_UPSCALE_INIT + +static void release_dblk_struct(struct AV1HW_s *hw) +{ +#ifdef AOM_AV1_DBLK_INIT + if (hw->lfi) + vfree(hw->lfi); + if (hw->lf) + vfree(hw->lf); + if (hw->seg_4lf) + vfree(hw->seg_4lf); + hw->lfi = NULL; + hw->lf = NULL; + hw->seg_4lf = NULL; +#endif +} + +static int init_dblk_struc(struct AV1HW_s *hw) +{ +#ifdef AOM_AV1_DBLK_INIT + hw->lfi = vmalloc(sizeof(loop_filter_info_n)); + hw->lf = vmalloc(sizeof(struct loopfilter)); + hw->seg_4lf = vmalloc(sizeof(struct segmentation_lf)); + + if (hw->lfi == NULL || hw->lf == NULL || hw->seg_4lf == NULL) { + printk("[test.c] aom_loop_filter init malloc error!!!\n"); + release_dblk_struct(hw); + return -1; + } + + hw->lf->mode_ref_delta_enabled = 1; // set default here + hw->lf->mode_ref_delta_update = 1; // set default here + hw->lf->sharpness_level = 0; // init to 0 + hw->lf->lf_pic_cnt = 0; // init to 0 +#endif + return 0; +} + +static void config_dblk_hw(struct AV1HW_s *hw) +{ + AV1Decoder *pbi = hw->pbi; + AV1_COMMON *cm = &hw->common; + loop_filter_info_n *lfi = hw->lfi; + struct loopfilter *lf = hw->lf; + struct segmentation_lf *seg_4lf = hw->seg_4lf; + BuffInfo_t* buf_spec = pbi->work_space_buf; + PIC_BUFFER_CONFIG* cur_pic_config = &cm->cur_frame->buf; + PIC_BUFFER_CONFIG* prev_pic_config = &cm->prev_frame->buf; + int i; + +#ifdef AOM_AV1_DBLK_INIT +#ifdef DUAL_DECODE +#else + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c ref_delta] cur_frame : %p prev_frame : %p - %p \n", + cm->cur_frame, cm->prev_frame, + av1_get_primary_ref_frame_buf(cm)); + // get lf parameters from parser + lf->mode_ref_delta_enabled = + (hw->aom_param.p.loop_filter_mode_ref_delta_enabled & 1); + lf->mode_ref_delta_update = + ((hw->aom_param.p.loop_filter_mode_ref_delta_enabled >> 1) & 1); + lf->sharpness_level = + hw->aom_param.p.loop_filter_sharpness_level; + if (((hw->aom_param.p.loop_filter_mode_ref_delta_enabled)&3) == 3) { // enabled but and update + if (cm->prev_frame <= 0) { + // already initialized in Microcode + lf->ref_deltas[0] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_0),7); + lf->ref_deltas[1] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_0>>8),7); + lf->ref_deltas[2] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_1),7); + lf->ref_deltas[3] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_1>>8),7); + lf->ref_deltas[4] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_2),7); + lf->ref_deltas[5] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_2>>8),7); + lf->ref_deltas[6] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_3),7); + lf->ref_deltas[7] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_3>>8),7); + lf->mode_deltas[0] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_mode_deltas_0),7); + lf->mode_deltas[1] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_mode_deltas_0>>8),7); + } else { + lf->ref_deltas[0] = (hw->aom_param.p.loop_filter_ref_deltas_0 & 0x80) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_0),7) : + cm->prev_frame->ref_deltas[0]; + lf->ref_deltas[1] = (hw->aom_param.p.loop_filter_ref_deltas_0 & 0x8000) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_0>>8),7) : + cm->prev_frame->ref_deltas[1]; + lf->ref_deltas[2] = (hw->aom_param.p.loop_filter_ref_deltas_1 & 0x80) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_1),7) : + cm->prev_frame->ref_deltas[2]; + lf->ref_deltas[3] = (hw->aom_param.p.loop_filter_ref_deltas_1 & 0x8000) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_1>>8),7) : + cm->prev_frame->ref_deltas[3]; + lf->ref_deltas[4] = (hw->aom_param.p.loop_filter_ref_deltas_2 & 0x80) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_2),7) : + cm->prev_frame->ref_deltas[4]; + lf->ref_deltas[5] = (hw->aom_param.p.loop_filter_ref_deltas_2 & 0x8000) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_2>>8),7) : + cm->prev_frame->ref_deltas[5]; + lf->ref_deltas[6] = (hw->aom_param.p.loop_filter_ref_deltas_3 & 0x80) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_3),7) : + cm->prev_frame->ref_deltas[6]; + lf->ref_deltas[7] = (hw->aom_param.p.loop_filter_ref_deltas_3 & 0x8000) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_3>>8),7) : + cm->prev_frame->ref_deltas[7]; + lf->mode_deltas[0] = (hw->aom_param.p.loop_filter_mode_deltas_0 & 0x80) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_mode_deltas_0),7) : + cm->prev_frame->mode_deltas[0]; + lf->mode_deltas[1] = (hw->aom_param.p.loop_filter_mode_deltas_0 & 0x8000) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_mode_deltas_0>>8),7) : + cm->prev_frame->mode_deltas[1]; + } + } //else if (hw->aom_param.p.loop_filter_mode_ref_delta_enabled == 1) { // enabled but no update + else { // match c code -- not enabled, still need to copy prev to used for next + if ((cm->prev_frame <= 0) | (hw->aom_param.p.loop_filter_mode_ref_delta_enabled & 4)) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] mode_ref_delta set to default\n"); + lf->ref_deltas[0] = conv2int8((uint8_t)1,7); + lf->ref_deltas[1] = conv2int8((uint8_t)0,7); + lf->ref_deltas[2] = conv2int8((uint8_t)0,7); + lf->ref_deltas[3] = conv2int8((uint8_t)0,7); + lf->ref_deltas[4] = conv2int8((uint8_t)0xff,7); + lf->ref_deltas[5] = conv2int8((uint8_t)0,7); + lf->ref_deltas[6] = conv2int8((uint8_t)0xff,7); + lf->ref_deltas[7] = conv2int8((uint8_t)0xff,7); + lf->mode_deltas[0] = conv2int8((uint8_t)0,7); + lf->mode_deltas[1] = conv2int8((uint8_t)0,7); + } else { + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] mode_ref_delta copy from prev_frame\n"); + lf->ref_deltas[0] = cm->prev_frame->ref_deltas[0]; + lf->ref_deltas[1] = cm->prev_frame->ref_deltas[1]; + lf->ref_deltas[2] = cm->prev_frame->ref_deltas[2]; + lf->ref_deltas[3] = cm->prev_frame->ref_deltas[3]; + lf->ref_deltas[4] = cm->prev_frame->ref_deltas[4]; + lf->ref_deltas[5] = cm->prev_frame->ref_deltas[5]; + lf->ref_deltas[6] = cm->prev_frame->ref_deltas[6]; + lf->ref_deltas[7] = cm->prev_frame->ref_deltas[7]; + lf->mode_deltas[0] = cm->prev_frame->mode_deltas[0]; + lf->mode_deltas[1] = cm->prev_frame->mode_deltas[1]; + } + } + lf->filter_level[0] = hw->aom_param.p.loop_filter_level_0; + lf->filter_level[1] = hw->aom_param.p.loop_filter_level_1; + lf->filter_level_u = hw->aom_param.p.loop_filter_level_u; + lf->filter_level_v = hw->aom_param.p.loop_filter_level_v; + + cm->cur_frame->ref_deltas[0] = lf->ref_deltas[0]; + cm->cur_frame->ref_deltas[1] = lf->ref_deltas[1]; + cm->cur_frame->ref_deltas[2] = lf->ref_deltas[2]; + cm->cur_frame->ref_deltas[3] = lf->ref_deltas[3]; + cm->cur_frame->ref_deltas[4] = lf->ref_deltas[4]; + cm->cur_frame->ref_deltas[5] = lf->ref_deltas[5]; + cm->cur_frame->ref_deltas[6] = lf->ref_deltas[6]; + cm->cur_frame->ref_deltas[7] = lf->ref_deltas[7]; + cm->cur_frame->mode_deltas[0] = lf->mode_deltas[0]; + cm->cur_frame->mode_deltas[1] = lf->mode_deltas[1]; + + // get seg_4lf parameters from parser + seg_4lf->enabled = hw->aom_param.p.segmentation_enabled & 1; + cm->cur_frame->segmentation_enabled = hw->aom_param.p.segmentation_enabled & 1; + cm->cur_frame->intra_only = (hw->aom_param.p.segmentation_enabled >> 2) & 1; + cm->cur_frame->segmentation_update_map = (hw->aom_param.p.segmentation_enabled >> 3) & 1; + + if (hw->aom_param.p.segmentation_enabled & 1) { // segmentation_enabled + if (hw->aom_param.p.segmentation_enabled & 2) { // segmentation_update_data + for (i = 0; i < MAX_SEGMENTS; i++) { + seg_4lf->seg_lf_info_y[i] = hw->aom_param.p.seg_lf_info_y[i]; + seg_4lf->seg_lf_info_c[i] = hw->aom_param.p.seg_lf_info_c[i]; + #ifdef DBG_LPF_PRINT + printk(" read seg_lf_info [%d] : 0x%x, 0x%x\n", + i, seg_4lf->seg_lf_info_y[i], seg_4lf->seg_lf_info_c[i]); + #endif + } + } // segmentation_update_data + else { // no segmentation_update_data + if (cm->prev_frame <= 0) { + for (i=0;i<MAX_SEGMENTS;i++) { + seg_4lf->seg_lf_info_y[i] = 0; + seg_4lf->seg_lf_info_c[i] = 0; + } + } else { + for (i = 0; i < MAX_SEGMENTS; i++) { + seg_4lf->seg_lf_info_y[i] = cm->prev_frame->seg_lf_info_y[i]; + seg_4lf->seg_lf_info_c[i] = cm->prev_frame->seg_lf_info_c[i]; + #ifdef DBG_LPF_PRINT + printk(" Refrence seg_lf_info [%d] : 0x%x, 0x%x\n", + i, seg_4lf->seg_lf_info_y[i], seg_4lf->seg_lf_info_c[i]); + #endif + } + } + } // no segmentation_update_data + } // segmentation_enabled + else { + for (i=0;i<MAX_SEGMENTS;i++) { + seg_4lf->seg_lf_info_y[i] = 0; + seg_4lf->seg_lf_info_c[i] = 0; + } + } // NOT segmentation_enabled + for (i=0;i<MAX_SEGMENTS;i++) { + cm->cur_frame->seg_lf_info_y[i] = seg_4lf->seg_lf_info_y[i]; + cm->cur_frame->seg_lf_info_c[i] = seg_4lf->seg_lf_info_c[i]; +#ifdef DBG_LPF_PRINT + printk(" SAVE seg_lf_info [%d] : 0x%x, 0x%x\n", + i, cm->cur_frame->seg_lf_info_y[i], + cm->cur_frame->seg_lf_info_c[i]); +#endif + } + + /* + * Update loop filter Thr/Lvl table for every frame + */ + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] av1_loop_filter_frame_init (run before every frame decoding start)\n"); + av1_loop_filter_frame_init(pbi, seg_4lf, lfi, lf, cm->dec_width); +#endif // not DUAL_DECODE +#endif + +#ifdef AOM_AV1_UPSCALE_INIT + /* + * init for upscaling + */ + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] av1_upscale_frame_init (run before every frame decoding start)\n"); + av1_upscale_frame_init(pbi, + pbi->common, &hw->aom_param); +#endif // #ifdef AOM_AV1_UPSCALE_INIT + + //BuffInfo_t* buf_spec = pbi->work_space_buf; + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] cur_frame : %p prev_frame : %p - %p \n", + cm->cur_frame, cm->prev_frame, av1_get_primary_ref_frame_buf(cm)); + if (cm->cur_frame <= 0) { + WRITE_VREG(AOM_AV1_CDF_BUFFER_W, buf_spec->cdf_buf.buf_start); + WRITE_VREG(AOM_AV1_SEG_MAP_BUFFER_W, buf_spec->seg_map.buf_start); + } + else { + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] Config WRITE CDF_BUF/SEG_MAP_BUF : %d\n", + cur_pic_config->index); + WRITE_VREG(AOM_AV1_CDF_BUFFER_W, + buf_spec->cdf_buf.buf_start + (0x8000*cur_pic_config->index)); + WRITE_VREG(AOM_AV1_SEG_MAP_BUFFER_W, + buf_spec->seg_map.buf_start + ((buf_spec->seg_map.buf_size / 16) * cur_pic_config->index)); + } + cm->cur_frame->seg_mi_rows = cm->cur_frame->mi_rows; + cm->cur_frame->seg_mi_cols = cm->cur_frame->mi_cols; + if (cm->prev_frame <= 0) { + WRITE_VREG(AOM_AV1_CDF_BUFFER_R, buf_spec->cdf_buf.buf_start); + WRITE_VREG(AOM_AV1_SEG_MAP_BUFFER_R, buf_spec->seg_map.buf_start); + } else { + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] Config READ CDF_BUF/SEG_MAP_BUF : %d\n", + prev_pic_config->index); + WRITE_VREG(AOM_AV1_CDF_BUFFER_R, + buf_spec->cdf_buf.buf_start + (0x8000*prev_pic_config->index)); + WRITE_VREG(AOM_AV1_SEG_MAP_BUFFER_R, + buf_spec->seg_map.buf_start + ((buf_spec->seg_map.buf_size / 16) * prev_pic_config->index)); + + // segmentation_enabled but no segmentation_update_data + if ((hw->aom_param.p.segmentation_enabled & 3) == 1) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] segfeatures_copy from prev_frame\n"); + for (i = 0; i < 8; i++) { + WRITE_VREG(AOM_AV1_SEGMENT_FEATURE, + cm->prev_frame->segment_feature[i]); + } + } + // segmentation_enabled but no segmentation_update_map + if ((hw->aom_param.p.segmentation_enabled & 9) == 1) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] seg_map_size copy from prev_frame\n"); + cm->cur_frame->seg_mi_rows = cm->prev_frame->seg_mi_rows; + cm->cur_frame->seg_mi_cols = cm->prev_frame->seg_mi_cols; + } + } +#ifdef PRINT_HEVC_DATA_PATH_MONITOR + { + uint32_t total_clk_count; + uint32_t path_transfer_count; + uint32_t path_wait_count; + float path_wait_ratio; + if (pbi->decode_idx > 1) { + WRITE_VREG(HEVC_PATH_MONITOR_CTRL, 0); // Disabble monitor and set rd_idx to 0 + total_clk_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + + WRITE_VREG(HEVC_PATH_MONITOR_CTRL, (1<<4)); // Disabble monitor and set rd_idx to 0 + + // parser --> iqit + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) + path_wait_ratio = 0.0; + else + path_wait_ratio = + (float)path_wait_count/(float)path_transfer_count; + printk("[P%d HEVC PATH] Parser/IQIT/IPP/DBLK/OW/DDR/CMD WAITING \% : %.2f", + pbi->decode_idx - 2, + path_wait_ratio); + + // iqit --> ipp + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) + path_wait_ratio = 0.0; + else + path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + + // dblk <-- ipp + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) + path_wait_ratio = 0.0; + else + path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + + // dblk --> ow + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) + path_wait_ratio = 0.0; + else path_wait_ratio = + (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + + // <--> DDR + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) + path_wait_ratio = 0.0; + else path_wait_ratio = + (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + + // CMD + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) + path_wait_ratio = 0.0; + else + path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f\n", path_wait_ratio); + } + } + +#endif + +} + +static void aom_config_work_space_hw(struct AV1HW_s *hw, u32 mask) +{ + struct BuffInfo_s *buf_spec = hw->work_space_buf; + unsigned int data32; + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %d\n", __func__, __LINE__); + if (debug && hw->init_flag == 0) + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %x %x %x %x %x %x %x %x\n", + __func__, + buf_spec->ipp.buf_start, + buf_spec->start_adr, + buf_spec->short_term_rps.buf_start, + buf_spec->sao_up.buf_start, + buf_spec->swap_buf.buf_start, + buf_spec->scalelut.buf_start, + buf_spec->dblk_para.buf_start, + buf_spec->dblk_data.buf_start); + if (mask & HW_MASK_FRONT) { + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %d\n", __func__, __LINE__); + if ((debug & AOM_AV1_DEBUG_SEND_PARAM_WITH_REG) == 0) + WRITE_VREG(HEVC_RPM_BUFFER, (u32)hw->rpm_phy_addr); + + /*WRITE_VREG(HEVC_STREAM_SWAP_BUFFER, + buf_spec->swap_buf.buf_start);*/ + WRITE_VREG(LMEM_DUMP_ADR, (u32)hw->lmem_phy_addr); + + } + + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %d\n", __func__, __LINE__); + + WRITE_VREG(AOM_AV1_DAALA_TOP_BUFFER, + buf_spec->daala_top.buf_start); + WRITE_VREG(AV1_GMC_PARAM_BUFF_ADDR, + buf_spec->gmc_buf.buf_start); + + WRITE_VREG(HEVC_DBLK_CFG4, + buf_spec->dblk_para.buf_start); // cfg_addr_cif + WRITE_VREG(HEVC_DBLK_CFG5, + buf_spec->dblk_data.buf_start); // cfg_addr_xio + + if (mask & HW_MASK_BACK) { +#ifdef LOSLESS_COMPRESS_MODE + int losless_comp_header_size = + compute_losless_comp_header_size(hw->init_pic_w, + hw->init_pic_h); + int losless_comp_body_size = + compute_losless_comp_body_size(hw->init_pic_w, + hw->init_pic_h, buf_alloc_depth == 10); +#endif +#ifdef AOM_AV1_MMU_DW + int losless_comp_header_size_dw = + compute_losless_comp_header_size_dw(hw->init_pic_w, + hw->init_pic_h); + int losless_comp_body_size_dw = + compute_losless_comp_body_size_dw(hw->init_pic_w, + hw->init_pic_h, buf_alloc_depth == 10); +#endif + WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, + buf_spec->ipp.buf_start); + //WRITE_VREG(HEVC_SAO_UP, buf_spec->sao_up.buf_start); + //WRITE_VREG(HEVC_SCALELUT, buf_spec->scalelut.buf_start); +#ifdef CHANGE_REMOVED + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + /* cfg_addr_adp*/ + WRITE_VREG(HEVC_DBLK_CFGE, buf_spec->dblk_para.buf_start); + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info("Write HEVC_DBLK_CFGE\n"); + } +#endif + /* cfg_p_addr */ + WRITE_VREG(HEVC_DBLK_CFG4, buf_spec->dblk_para.buf_start); + /* cfg_d_addr */ + WRITE_VREG(HEVC_DBLK_CFG5, buf_spec->dblk_data.buf_start); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + if (buf_spec->max_width <= 4096 && buf_spec->max_height <= 2304) + WRITE_VREG(HEVC_DBLK_CFG3, 0x404010); //default value + else + WRITE_VREG(HEVC_DBLK_CFG3, 0x808020); // make left storage 2 x 4k] + av1_print(hw, AV1_DEBUG_BUFMGR_MORE, + "HEVC_DBLK_CFG3 = %x\n", READ_VREG(HEVC_DBLK_CFG3)); + } + +#ifdef LOSLESS_COMPRESS_MODE + if (hw->mmu_enable) { + /*bit[4] : paged_mem_mode*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); +#ifdef CHANGE_REMOVED + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SM1) +#endif + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0); + } else { + /*if (cur_pic_config->bit_depth == AOM_BITS_10) + * WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0<<3)); + */ + /*bit[3] smem mdoe*/ + /*else WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (1<<3));*/ + /*bit[3] smem mdoe*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, + (losless_comp_body_size >> 5)); + } + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, + (losless_comp_body_size >> 5));*/ + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL3, + (0xff<<20) | (0xff<<10) | 0xff);*/ + /*8-bit mode */ + WRITE_VREG(HEVC_CM_BODY_LENGTH, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, losless_comp_header_size); + if (get_double_write_mode(hw) & 0x10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif + + if (hw->mmu_enable) { + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR, buf_spec->mmu_vbh.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR, buf_spec->mmu_vbh.buf_start + + VBH_BUF_SIZE(buf_spec)); + /*data32 = READ_VREG(HEVC_SAO_CTRL9);*/ + /*data32 |= 0x1;*/ + /*WRITE_VREG(HEVC_SAO_CTRL9, data32);*/ + + /* use HEVC_CM_HEADER_START_ADDR */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (1<<10); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } +#ifdef AOM_AV1_MMU_DW + data32 = READ_VREG(HEVC_SAO_CTRL5); + if (hw->dw_mmu_enable) { + u32 data_tmp; + data_tmp = READ_VREG(HEVC_SAO_CTRL9); + data_tmp |= (1<<10); + WRITE_VREG(HEVC_SAO_CTRL9, data_tmp); + + WRITE_VREG(HEVC_CM_BODY_LENGTH2,losless_comp_body_size_dw); + WRITE_VREG(HEVC_CM_HEADER_OFFSET2,losless_comp_body_size_dw); + WRITE_VREG(HEVC_CM_HEADER_LENGTH2,losless_comp_header_size_dw); + + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR2, buf_spec->mmu_vbh_dw.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR2, buf_spec->mmu_vbh_dw.buf_start + + DW_VBH_BUF_SIZE(buf_spec)); + + WRITE_VREG(HEVC_DW_VH0_ADDDR, buf_spec->mmu_vbh_dw.buf_start + + (2 * DW_VBH_BUF_SIZE(buf_spec))); + WRITE_VREG(HEVC_DW_VH1_ADDDR, buf_spec->mmu_vbh_dw.buf_start + + (3 * DW_VBH_BUF_SIZE(buf_spec))); + + /* use HEVC_CM_HEADER_START_ADDR */ + data32 |= (1<<15); + } else + data32 &= ~(1<<15); + WRITE_VREG(HEVC_SAO_CTRL5, data32); +#endif + + WRITE_VREG(LMEM_DUMP_ADR, (u32)hw->lmem_phy_addr); +#ifdef CHANGE_REMOVED + + WRITE_VREG(AV1_SEG_MAP_BUFFER, buf_spec->seg_map.buf_start); + + /**/ + WRITE_VREG(AV1_PROB_SWAP_BUFFER, hw->prob_buffer_phy_addr); + WRITE_VREG(AV1_COUNT_SWAP_BUFFER, hw->count_buffer_phy_addr); + if (hw->mmu_enable) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR, hw->frame_mmu_map_phy_addr); + else + WRITE_VREG(AV1_MMU_MAP_BUFFER, hw->frame_mmu_map_phy_addr); + } +#else + if (hw->mmu_enable) + WRITE_VREG(HEVC_SAO_MMU_DMA_CTRL, hw->frame_mmu_map_phy_addr); +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + WRITE_VREG(HEVC_SAO_MMU_DMA_CTRL2, hw->dw_frame_mmu_map_phy_addr); + //default of 0xffffffff will disable dw + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0); + WRITE_VREG(HEVC_SAO_C_START_ADDR, 0); + } +#endif +#endif +#ifdef CO_MV_COMPRESS + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 |= (1 << 1); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); + } +#endif + } + + config_aux_buf(hw); +} + +#ifdef MCRCC_ENABLE +static u32 mcrcc_cache_alg_flag = 1; +static void mcrcc_perfcount_reset(struct AV1HW_s *hw); +static void decomp_perfcount_reset(struct AV1HW_s *hw); +#endif + +static void aom_init_decoder_hw(struct AV1HW_s *hw, u32 mask) +{ + unsigned int data32; + int i; + const unsigned short parser_cmd[PARSER_CMD_NUMBER] = { + 0x0401, 0x8401, 0x0800, 0x0402, 0x9002, 0x1423, + 0x8CC3, 0x1423, 0x8804, 0x9825, 0x0800, 0x04FE, + 0x8406, 0x8411, 0x1800, 0x8408, 0x8409, 0x8C2A, + 0x9C2B, 0x1C00, 0x840F, 0x8407, 0x8000, 0x8408, + 0x2000, 0xA800, 0x8410, 0x04DE, 0x840C, 0x840D, + 0xAC00, 0xA000, 0x08C0, 0x08E0, 0xA40E, 0xFC00, + 0x7C00 + }; +#if 0 + if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) { + /* Set MCR fetch priorities*/ + data32 = 0x1 | (0x1 << 2) | (0x1 <<3) | + (24 << 4) | (32 << 11) | (24 << 18) | (32 << 25); + WRITE_VREG(HEVCD_MPP_DECOMP_AXIURG_CTL, data32); + } +#endif + /*if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info("%s\n", __func__);*/ + if (mask & HW_MASK_FRONT) { + data32 = READ_VREG(HEVC_PARSER_INT_CONTROL); +#ifdef CHANGE_REMOVED +#if 1 + /* set bit 31~29 to 3 if HEVC_STREAM_FIFO_CTL[29] is 1 */ + data32 &= ~(7 << 29); + data32 |= (3 << 29); +#endif + data32 = data32 | + (1 << 24) |/*stream_buffer_empty_int_amrisc_enable*/ + (1 << 22) |/*stream_fifo_empty_int_amrisc_enable*/ + (1 << 7) |/*dec_done_int_cpu_enable*/ + (1 << 4) |/*startcode_found_int_cpu_enable*/ + (0 << 3) |/*startcode_found_int_amrisc_enable*/ + (1 << 0) /*parser_int_enable*/ + ; +#else + data32 = data32 & 0x03ffffff; + data32 = data32 | + (3 << 29) | // stream_buffer_empty_int_ctl ( 0x200 interrupt) + (3 << 26) | // stream_fifo_empty_int_ctl ( 4 interrupt) + (1 << 24) | // stream_buffer_empty_int_amrisc_enable + (1 << 22) | // stream_fifo_empty_int_amrisc_enable +#ifdef AOM_AV1_HED_FB +#ifdef DUAL_DECODE + // For HALT CCPU test. Use Pull inside CCPU to generate interrupt + // (1 << 9) | // fed_fb_slice_done_int_amrisc_enable +#else + (1 << 10) | // fed_fb_slice_done_int_cpu_enable +#endif +#endif + (1 << 7) | // dec_done_int_cpu_enable + (1 << 4) | // startcode_found_int_cpu_enable + (0 << 3) | // startcode_found_int_amrisc_enable + (1 << 0) // parser_int_enable + ; +#endif + WRITE_VREG(HEVC_PARSER_INT_CONTROL, data32); + + data32 = READ_VREG(HEVC_SHIFT_STATUS); + data32 = data32 | + (0 << 1) |/*emulation_check_off AV1 + do not have emulation*/ + (1 << 0)/*startcode_check_on*/ + ; + WRITE_VREG(HEVC_SHIFT_STATUS, data32); + WRITE_VREG(HEVC_SHIFT_CONTROL, + (0 << 14) | /*disable_start_code_protect*/ + (1 << 10) | /*length_zero_startcode_en for AV1*/ + (1 << 9) | /*length_valid_startcode_en for AV1*/ + (3 << 6) | /*sft_valid_wr_position*/ + (2 << 4) | /*emulate_code_length_sub_1*/ + (3 << 1) | /*start_code_length_sub_1 + AV1 use 0x00000001 as startcode (4 Bytes)*/ + (1 << 0) /*stream_shift_enable*/ + ); + + WRITE_VREG(HEVC_CABAC_CONTROL, + (1 << 0)/*cabac_enable*/ + ); + + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, + (1 << 0)/* hevc_parser_core_clk_en*/ + ); + + WRITE_VREG(HEVC_DEC_STATUS_REG, 0); + } + + if (mask & HW_MASK_BACK) { + /*Initial IQIT_SCALELUT memory + -- just to avoid X in simulation*/ + if (is_rdma_enable()) + rdma_back_end_work(hw->rdma_phy_adr, RDMA_SIZE); + else { + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 0);/*cfg_p_addr*/ + for (i = 0; i < 1024; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, 0); + } + } + + if (mask & HW_MASK_FRONT) { + u32 decode_mode; +/* +#ifdef ENABLE_SWAP_TEST + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 100); +#else + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 0); +#endif +*/ +#ifdef MULTI_INSTANCE_SUPPORT + if (!hw->m_ins_flag) { + if (hw->low_latency_flag) + decode_mode = DECODE_MODE_SINGLE_LOW_LATENCY; + else + decode_mode = DECODE_MODE_SINGLE; + } else if (vdec_frame_based(hw_to_vdec(hw))) + decode_mode = hw->no_head ? + DECODE_MODE_MULTI_FRAMEBASE_NOHEAD : + DECODE_MODE_MULTI_FRAMEBASE; + else + decode_mode = DECODE_MODE_MULTI_STREAMBASE; + if (debug & AOM_DEBUG_BUFMGR_ONLY) + decode_mode |= (1 << 16); + WRITE_VREG(DECODE_MODE, decode_mode); + WRITE_VREG(HEVC_DECODE_SIZE, 0); + WRITE_VREG(HEVC_DECODE_COUNT, 0); +#else + WRITE_VREG(DECODE_MODE, DECODE_MODE_SINGLE); + WRITE_VREG(HEVC_DECODE_PIC_BEGIN_REG, 0); + WRITE_VREG(HEVC_DECODE_PIC_NUM_REG, 0x7fffffff); /*to remove*/ +#endif + /*Send parser_cmd*/ + WRITE_VREG(HEVC_PARSER_CMD_WRITE, (1 << 16) | (0 << 0)); + for (i = 0; i < PARSER_CMD_NUMBER; i++) + WRITE_VREG(HEVC_PARSER_CMD_WRITE, parser_cmd[i]); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2); + + + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + /* (1 << 8) |*/ /*sao_sw_pred_enable*/ + (1 << 5) | /*parser_sao_if_en*/ + (1 << 2) | /*parser_mpred_if_en*/ + (1 << 0) /*parser_scaler_if_en*/ + ); + } + + if (mask & HW_MASK_BACK) { + /*Changed to Start MPRED in microcode*/ + /* + pr_info("[test.c] Start MPRED\n"); + WRITE_VREG(HEVC_MPRED_INT_STATUS, + (1<<31) + ); + */ + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (0 << 1) | /*enable ipp*/ + (1 << 0) /*software reset ipp and mpp*/ + ); +#ifdef CHANGE_REMOVED + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (1 << 1) | /*enable ipp*/ + (0 << 0) /*software reset ipp and mpp*/ + ); +#else + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (3 << 4) | // av1 + (1 << 1) | /*enable ipp*/ + (0 << 0) /*software reset ipp and mpp*/ + ); +#endif + if (get_double_write_mode(hw) & 0x10) { + /*Enable NV21 reference read mode for MC*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); + } +#ifdef MCRCC_ENABLE + /*Initialize mcrcc and decomp perf counters*/ + if (mcrcc_cache_alg_flag && + hw->init_flag == 0) { + mcrcc_perfcount_reset(hw); + decomp_perfcount_reset(hw); + } +#endif + } +#ifdef CHANGE_REMOVED +#else +// Set MCR fetch priorities + data32 = 0x1 | (0x1 << 2) | (0x1 <<3) | + (24 << 4) | (32 << 11) | (24 << 18) | (32 << 25); + WRITE_VREG(HEVCD_MPP_DECOMP_AXIURG_CTL, data32); +#endif + return; +} + + +#ifdef CONFIG_HEVC_CLK_FORCED_ON +static void config_av1_clk_forced_on(void) +{ + unsigned int rdata32; + /*IQIT*/ + rdata32 = READ_VREG(HEVC_IQIT_CLK_RST_CTRL); + WRITE_VREG(HEVC_IQIT_CLK_RST_CTRL, rdata32 | (0x1 << 2)); + + /* DBLK*/ + rdata32 = READ_VREG(HEVC_DBLK_CFG0); + WRITE_VREG(HEVC_DBLK_CFG0, rdata32 | (0x1 << 2)); + + /* SAO*/ + rdata32 = READ_VREG(HEVC_SAO_CTRL1); + WRITE_VREG(HEVC_SAO_CTRL1, rdata32 | (0x1 << 2)); + + /*MPRED*/ + rdata32 = READ_VREG(HEVC_MPRED_CTRL1); + WRITE_VREG(HEVC_MPRED_CTRL1, rdata32 | (0x1 << 24)); + + /* PARSER*/ + rdata32 = READ_VREG(HEVC_STREAM_CONTROL); + WRITE_VREG(HEVC_STREAM_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_SHIFT_CONTROL); + WRITE_VREG(HEVC_SHIFT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_CABAC_CONTROL); + WRITE_VREG(HEVC_CABAC_CONTROL, rdata32 | (0x1 << 13)); + rdata32 = READ_VREG(HEVC_PARSER_CORE_CONTROL); + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_INT_CONTROL); + WRITE_VREG(HEVC_PARSER_INT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_IF_CONTROL); + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + rdata32 | (0x1 << 6) | (0x1 << 3) | (0x1 << 1)); + + /*IPP*/ + rdata32 = READ_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG); + WRITE_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG, rdata32 | 0xffffffff); + + /* MCRCC*/ + rdata32 = READ_VREG(HEVCD_MCRCC_CTL1); + WRITE_VREG(HEVCD_MCRCC_CTL1, rdata32 | (0x1 << 3)); +} +#endif + + +static int vav1_mmu_map_alloc(struct AV1HW_s *hw) +{ + if (hw->mmu_enable) { + u32 mmu_map_size = vav1_frame_mmu_map_size(hw); + hw->frame_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + mmu_map_size, + &hw->frame_mmu_map_phy_addr, GFP_KERNEL); + if (hw->frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(hw->frame_mmu_map_addr, 0, mmu_map_size); + } +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + u32 mmu_map_size = vaom_dw_frame_mmu_map_size(hw); + hw->dw_frame_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + mmu_map_size, + &hw->dw_frame_mmu_map_phy_addr, GFP_KERNEL); + if (hw->dw_frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(hw->dw_frame_mmu_map_addr, 0, mmu_map_size); + } +#endif + return 0; +} + + +static void vav1_mmu_map_free(struct AV1HW_s *hw) +{ + if (hw->mmu_enable) { + u32 mmu_map_size = vav1_frame_mmu_map_size(hw); + if (hw->frame_mmu_map_addr) { + if (hw->frame_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + mmu_map_size, + hw->frame_mmu_map_addr, + hw->frame_mmu_map_phy_addr); + hw->frame_mmu_map_addr = NULL; + } + } +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + u32 mmu_map_size = vaom_dw_frame_mmu_map_size(hw); + if (hw->dw_frame_mmu_map_addr) { + if (hw->dw_frame_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + mmu_map_size, + hw->dw_frame_mmu_map_addr, + hw->dw_frame_mmu_map_phy_addr); + hw->dw_frame_mmu_map_addr = NULL; + } + } +#endif +} + + +static void av1_local_uninit(struct AV1HW_s *hw) +{ + hw->rpm_ptr = NULL; + hw->lmem_ptr = NULL; +#ifdef DUMP_FILMGRAIN + hw->fg_ptr = NULL; + if (hw->fg_addr) { + if (hw->fg_phy_addr) + dma_free_coherent(amports_get_dma_device(), + FGS_TABLE_SIZE, hw->fg_addr, + hw->fg_phy_addr); + hw->fg_addr = NULL; + } +#endif + if (hw->rpm_addr) { + dma_free_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, + hw->rpm_addr, + hw->rpm_phy_addr); + hw->rpm_addr = NULL; + } + if (hw->aux_addr) { + dma_free_coherent(amports_get_dma_device(), + hw->prefix_aux_size + hw->suffix_aux_size, hw->aux_addr, + hw->aux_phy_addr); + hw->aux_addr = NULL; + } +#if (defined DEBUG_UCODE_LOG) || (defined DEBUG_CMD) + if (hw->ucode_log_addr) { + dma_free_coherent(amports_get_dma_device(), + UCODE_LOG_BUF_SIZE, hw->ucode_log_addr, + hw->ucode_log_phy_addr); + hw->ucode_log_addr = NULL; + } +#endif + if (hw->lmem_addr) { + if (hw->lmem_phy_addr) + dma_free_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, hw->lmem_addr, + hw->lmem_phy_addr); + hw->lmem_addr = NULL; + } + if (hw->prob_buffer_addr) { + if (hw->prob_buffer_phy_addr) + dma_free_coherent(amports_get_dma_device(), + PROB_BUF_SIZE, hw->prob_buffer_addr, + hw->prob_buffer_phy_addr); + + hw->prob_buffer_addr = NULL; + } + if (hw->count_buffer_addr) { + if (hw->count_buffer_phy_addr) + dma_free_coherent(amports_get_dma_device(), + COUNT_BUF_SIZE, hw->count_buffer_addr, + hw->count_buffer_phy_addr); + + hw->count_buffer_addr = NULL; + } + + vav1_mmu_map_free(hw); + + if (hw->gvs) + vfree(hw->gvs); + hw->gvs = NULL; +} + +static int av1_local_init(struct AV1HW_s *hw) +{ + int ret = -1; + /*int losless_comp_header_size, losless_comp_body_size;*/ + + struct BuffInfo_s *cur_buf_info = NULL; + + memset(&hw->param, 0, sizeof(union param_u)); +#ifdef MULTI_INSTANCE_SUPPORT + cur_buf_info = &hw->work_space_buf_store; + hw->pbi->work_space_buf = cur_buf_info; +#if 0 + if (vdec_is_support_4k()) { + memcpy(cur_buf_info, &aom_workbuff_spec[1], /* 8k */ + sizeof(struct BuffInfo_s)); + } else + memcpy(cur_buf_info, &aom_workbuff_spec[0],/* 1080p */ + sizeof(struct BuffInfo_s)); +#endif + memcpy(cur_buf_info, &aom_workbuff_spec[hw->buffer_spec_index], + sizeof(struct BuffInfo_s)); + + cur_buf_info->start_adr = hw->buf_start; + if (!hw->mmu_enable) + hw->mc_buf_spec.buf_end = hw->buf_start + hw->buf_size; + +#else +/*! MULTI_INSTANCE_SUPPORT*/ +#if 0 + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + cur_buf_info = &aom_workbuff_spec[1];/* 8k work space */ + else + cur_buf_info = &aom_workbuff_spec[1];/* 4k2k work space */ + } else + cur_buf_info = &aom_workbuff_spec[0];/* 1080p work space */ +#endif + memcpy(cur_buf_info, &aom_workbuff_spec[hw->buffer_spec_index], + sizeof(struct BuffInfo_s)); +#endif + + init_buff_spec(hw, cur_buf_info); + aom_bufmgr_init(hw, cur_buf_info, NULL); + + if (!vdec_is_support_4k() + && (buf_alloc_width > 1920 && buf_alloc_height > 1088)) { + buf_alloc_width = 1920; + buf_alloc_height = 1088; + if (hw->max_pic_w > 1920 && hw->max_pic_h > 1088) { + hw->max_pic_w = 1920; + hw->max_pic_h = 1088; + } + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + buf_alloc_width = 8192; + buf_alloc_height = 4608; + } + + hw->init_pic_w = hw->max_pic_w ? hw->max_pic_w : + (hw->vav1_amstream_dec_info.width ? hw->vav1_amstream_dec_info.width : + (buf_alloc_width ? buf_alloc_width : hw->work_space_buf->max_width)); + hw->init_pic_h = hw->max_pic_h ? hw->max_pic_h : + (hw->vav1_amstream_dec_info.height ? hw->vav1_amstream_dec_info.height : + (buf_alloc_height ? buf_alloc_height : hw->work_space_buf->max_height)); + + hw->pbi->frame_width = hw->init_pic_w; + hw->pbi->frame_height = hw->init_pic_h; + + /* video is not support unaligned with 64 in tl1 + ** vdec canvas mode will be linear when dump yuv is set + */ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (hw->double_write_mode != 0) && + (((hw->max_pic_w % 64) != 0) || + (hw->vav1_amstream_dec_info.width % 64) != 0)) { + if (hw_to_vdec(hw)->canvas_mode != + CANVAS_BLKMODE_LINEAR) + hw->mem_map_mode = 2; + else { + hw->mem_map_mode = 0; + av1_print(hw, AOM_DEBUG_HW_MORE, "vdec blkmod linear, force mem_map_mode 0\n"); + } + } + +#if 0 +//ndef MV_USE_FIXED_BUF + if (init_mv_buf_list(hw) < 0) { + pr_err("%s: init_mv_buf_list fail\n", __func__); + return -1; + } +#endif + + hw->mv_buf_margin = mv_buf_margin; + if (IS_4K_SIZE(hw->init_pic_w, hw->init_pic_h)) { + hw->used_buf_num = MAX_BUF_NUM_LESS + dynamic_buf_num_margin; + if (hw->used_buf_num > REF_FRAMES_4K) + hw->mv_buf_margin = hw->used_buf_num - REF_FRAMES_4K + 1; + } + else + hw->used_buf_num = max_buf_num + dynamic_buf_num_margin; + + if (hw->is_used_v4l) + hw->used_buf_num = 9 + hw->dynamic_buf_num_margin; + + if (hw->used_buf_num > MAX_BUF_NUM) + hw->used_buf_num = MAX_BUF_NUM; + if (hw->used_buf_num > FRAME_BUFFERS) + hw->used_buf_num = FRAME_BUFFERS; + + hw->pts_unstable = ((unsigned long)(hw->vav1_amstream_dec_info.param) + & 0x40) >> 6; + + if ((debug & AOM_AV1_DEBUG_SEND_PARAM_WITH_REG) == 0) { + hw->rpm_addr = dma_alloc_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, + &hw->rpm_phy_addr, GFP_KERNEL); + if (hw->rpm_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + return -1; + } + hw->rpm_ptr = hw->rpm_addr; + } + + if (prefix_aux_buf_size > 0 || + suffix_aux_buf_size > 0) { + u32 aux_buf_size; + + hw->prefix_aux_size = AUX_BUF_ALIGN(prefix_aux_buf_size); + hw->suffix_aux_size = AUX_BUF_ALIGN(suffix_aux_buf_size); + aux_buf_size = hw->prefix_aux_size + hw->suffix_aux_size; + hw->aux_addr = dma_alloc_coherent(amports_get_dma_device(), + aux_buf_size, &hw->aux_phy_addr, GFP_KERNEL); + if (hw->aux_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + goto dma_alloc_fail; + } + } +#if (defined DEBUG_UCODE_LOG) || (defined DEBUG_CMD) + //if (udebug_flag & 0x8) { + hw->ucode_log_addr = dma_alloc_coherent(amports_get_dma_device(), + UCODE_LOG_BUF_SIZE, &hw->ucode_log_phy_addr, GFP_KERNEL); + if (hw->ucode_log_addr == NULL) { + hw->ucode_log_phy_addr = 0; + } + pr_info("%s: alloc ucode log buffer %p\n", + __func__, hw->ucode_log_addr); + //} +#endif +#ifdef DUMP_FILMGRAIN + hw->fg_addr = dma_alloc_coherent(amports_get_dma_device(), + FGS_TABLE_SIZE, + &hw->fg_phy_addr, GFP_KERNEL); + if (hw->fg_addr == NULL) { + pr_err("%s: failed to alloc fg buffer\n", __func__); + } + hw->fg_ptr = hw->fg_addr; +#endif + hw->lmem_addr = dma_alloc_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, + &hw->lmem_phy_addr, GFP_KERNEL); + if (hw->lmem_addr == NULL) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + goto dma_alloc_fail; + } + hw->lmem_ptr = hw->lmem_addr; + + hw->prob_buffer_addr = dma_alloc_coherent(amports_get_dma_device(), + PROB_BUF_SIZE, + &hw->prob_buffer_phy_addr, GFP_KERNEL); + if (hw->prob_buffer_addr == NULL) { + pr_err("%s: failed to alloc prob_buffer\n", __func__); + goto dma_alloc_fail; + } + memset(hw->prob_buffer_addr, 0, PROB_BUF_SIZE); + hw->count_buffer_addr = dma_alloc_coherent(amports_get_dma_device(), + COUNT_BUF_SIZE, + &hw->count_buffer_phy_addr, GFP_KERNEL); + if (hw->count_buffer_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + goto dma_alloc_fail; + } + memset(hw->count_buffer_addr, 0, COUNT_BUF_SIZE); + + vdec_set_vframe_comm(hw_to_vdec(hw), DRIVER_NAME); + ret = vav1_mmu_map_alloc(hw); + if (ret < 0) + goto dma_alloc_fail; + + return ret; + +dma_alloc_fail: + av1_local_uninit(hw); + return -1; +} + + +#define spec2canvas(x) \ + (((x)->uv_canvas_index << 16) | \ + ((x)->uv_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + + +static void set_canvas(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + struct vdec_s *vdec = hw_to_vdec(hw); + int canvas_w = ALIGN(pic_config->y_crop_width, 64)/4; + int canvas_h = ALIGN(pic_config->y_crop_height, 32)/4; + int blkmode = hw->mem_map_mode; + /*CANVAS_BLKMODE_64X32*/ + if (pic_config->double_write_mode) { + canvas_w = pic_config->y_crop_width / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + canvas_h = pic_config->y_crop_height / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + + /* sao ctrl1 config aligned with 64, so aligned with 64 same */ + canvas_w = ALIGN(canvas_w, 64); + canvas_h = ALIGN(canvas_h, 32); + + if (vdec->parallel_dec == 1) { + if (pic_config->y_canvas_index == -1) + pic_config->y_canvas_index = + vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + if (pic_config->uv_canvas_index == -1) + pic_config->uv_canvas_index = + vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + } else { + pic_config->y_canvas_index = 128 + pic_config->index * 2; + pic_config->uv_canvas_index = 128 + pic_config->index * 2 + 1; + } + + config_cav_lut_ex(pic_config->y_canvas_index, + pic_config->dw_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hw->is_used_v4l ? 0 : 7, VDEC_HEVC); + config_cav_lut_ex(pic_config->uv_canvas_index, + pic_config->dw_u_v_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hw->is_used_v4l ? 0 : 7, VDEC_HEVC); + +#ifdef MULTI_INSTANCE_SUPPORT + pic_config->canvas_config[0].phy_addr = + pic_config->dw_y_adr; + pic_config->canvas_config[0].width = + canvas_w; + pic_config->canvas_config[0].height = + canvas_h; + pic_config->canvas_config[0].block_mode = + blkmode; + pic_config->canvas_config[0].endian = hw->is_used_v4l ? 0 : 7; + + pic_config->canvas_config[1].phy_addr = + pic_config->dw_u_v_adr; + pic_config->canvas_config[1].width = + canvas_w; + pic_config->canvas_config[1].height = + canvas_h; + pic_config->canvas_config[1].block_mode = + blkmode; + pic_config->canvas_config[1].endian = hw->is_used_v4l ? 0 : 7; +#endif + } +} + +static void set_frame_info(struct AV1HW_s *hw, struct vframe_s *vf) +{ + unsigned int ar = DISP_RATIO_ASPECT_RATIO_MAX; + vf->duration = hw->frame_dur; + vf->duration_pulldown = 0; + vf->flag = 0; + vf->prop.master_display_colour = hw->vf_dp; + vf->signal_type = hw->video_signal_type; + if (vf->compWidth && vf->compHeight) + hw->frame_ar = vf->compHeight * 0x100 / vf->compWidth; + ar = min_t(u32, ar, DISP_RATIO_ASPECT_RATIO_MAX); + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + vf->sar_width = 1; + vf->sar_height = 1; + + if (hw->is_used_v4l && hw->vf_dp.present_flag) { + struct aml_vdec_hdr_infos hdr; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + memset(&hdr, 0, sizeof(hdr)); + hdr.signal_type = vf->signal_type; + hdr.color_parms = hw->vf_dp; + vdec_v4l_set_hdr_infos(ctx, &hdr); + } + + vf->sidebind_type = hw->sidebind_type; + vf->sidebind_channel_id = hw->sidebind_channel_id; +} + +static int vav1_vf_states(struct vframe_states *states, void *op_arg) +{ + struct AV1HW_s *hw = (struct AV1HW_s *)op_arg; + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hw->newframe_q); + states->buf_avail_num = kfifo_len(&hw->display_q); + + if (step == 2) + states->buf_avail_num = 0; + return 0; +} + +static struct vframe_s *vav1_vf_peek(void *op_arg) +{ + struct vframe_s *vf[2] = {0, 0}; + struct AV1HW_s *hw = (struct AV1HW_s *)op_arg; + + if (step == 2) + return NULL; + + if (kfifo_len(&hw->display_q) > VF_POOL_SIZE) { + av1_print(hw, AV1_DEBUG_BUFMGR, + "kfifo len:%d invaild, peek error\n", + kfifo_len(&hw->display_q)); + return NULL; + } + + if (kfifo_out_peek(&hw->display_q, (void *)&vf, 2)) { + if (vf[1]) { + vf[0]->next_vf_pts_valid = true; + vf[0]->next_vf_pts = vf[1]->pts; + } else + vf[0]->next_vf_pts_valid = false; + return vf[0]; + } + + return NULL; +} + +static struct vframe_s *vav1_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct AV1HW_s *hw = (struct AV1HW_s *)op_arg; + + if (step == 2) + return NULL; + else if (step == 1) + step = 2; + + if (kfifo_get(&hw->display_q, &vf)) { + struct vframe_s *next_vf = NULL; + uint8_t index = vf->index & 0xff; + ATRACE_COUNTER(hw->trace.disp_q_name, kfifo_len(&hw->display_q)); + if (index < hw->used_buf_num || + (vf->type & VIDTYPE_V4L_EOS)) { + hw->vf_get_count++; + vf->index_disp = hw->vf_get_count; + if (debug & AOM_DEBUG_VFRAME) { + struct BufferPool_s *pool = hw->common.buffer_pool; + struct PIC_BUFFER_CONFIG_s *pic = + &pool->frame_bufs[index].buf; + unsigned long flags; + lock_buffer_pool(hw->common.buffer_pool, flags); + av1_print(hw, AOM_DEBUG_VFRAME, "%s index 0x%x type 0x%x w/h %d/%d, aux size %d, pts %d, %lld, ts: %llu\n", + __func__, vf->index, vf->type, + vf->width, vf->height, + pic->aux_data_size, + vf->pts, + vf->pts_us64, + vf->timestamp); + unlock_buffer_pool(hw->common.buffer_pool, flags); + } + + if (kfifo_peek(&hw->display_q, &next_vf) && next_vf) { + vf->next_vf_pts_valid = true; + vf->next_vf_pts = next_vf->pts; + } else + vf->next_vf_pts_valid = false; +#ifdef DUMP_FILMGRAIN + if (index == fg_dump_index) { + unsigned long flags; + int ii; + lock_buffer_pool(hw->common.buffer_pool, flags); + pr_info("FGS_TABLE for buffer %d:\n", index); + for (ii = 0; ii < FGS_TABLE_SIZE; ii++) { + pr_info("%02x ", hw->fg_ptr[ii]); + if (((ii+ 1) & 0xf) == 0) + pr_info("\n"); + } + unlock_buffer_pool(hw->common.buffer_pool, flags); + } +#endif + + return vf; + } + } + return NULL; +} + +static void vav1_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct AV1HW_s *hw = (struct AV1HW_s *)op_arg; + uint8_t index = vf->index & 0xff; + unsigned long flags; + + if ((vf == NULL) || (hw == NULL)) + return; + + kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->trace.new_q_name, kfifo_len(&hw->newframe_q)); + hw->vf_put_count++; + if (debug & AOM_DEBUG_VFRAME) { + lock_buffer_pool(hw->common.buffer_pool, flags); + av1_print(hw, AOM_DEBUG_VFRAME, "%s index 0x%x type 0x%x w/h %d/%d, pts %d, %lld, ts: %llu\n", + __func__, vf->index, vf->type, + vf->width, vf->height, + vf->pts, + vf->pts_us64, + vf->timestamp); + unlock_buffer_pool(hw->common.buffer_pool, flags); + } + + if (index < hw->used_buf_num) { + struct AV1_Common_s *cm = &hw->common; + struct BufferPool_s *pool = cm->buffer_pool; + + lock_buffer_pool(hw->common.buffer_pool, flags); + if ((debug & AV1_DEBUG_IGNORE_VF_REF) == 0) { + if (pool->frame_bufs[index].buf.vf_ref > 0) + pool->frame_bufs[index].buf.vf_ref--; + } + if (hw->wait_buf) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + hw->last_put_idx = index; + hw->new_frame_displayed++; + unlock_buffer_pool(hw->common.buffer_pool, flags); + } + +} + +static int vav1_event_cb(int type, void *data, void *op_arg) +{ + unsigned long flags; + struct AV1HW_s *hw = (struct AV1HW_s *)op_arg; + struct AV1_Common_s *cm = &hw->common; + struct BufferPool_s *pool = cm->buffer_pool; + + if (type & VFRAME_EVENT_RECEIVER_RESET) { +#if 0 + unsigned long flags; + + amhevc_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vav1_vf_prov); +#endif + spin_lock_irqsave(&hw->lock, flags); + vav1_local_init(); + vav1_prot_init(); + spin_unlock_irqrestore(&hw->lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vav1_vf_prov); +#endif + amhevc_start(); +#endif + } else if (type & VFRAME_EVENT_RECEIVER_GET_AUX_DATA) { + struct provider_aux_req_s *req = + (struct provider_aux_req_s *)data; + unsigned char index; + + lock_buffer_pool(hw->common.buffer_pool, flags); + index = req->vf->index & 0xff; + req->aux_buf = NULL; + req->aux_size = 0; + if (req->bot_flag) + index = (req->vf->index >> 8) & 0xff; + if (index != 0xff + && index < hw->used_buf_num) { + struct PIC_BUFFER_CONFIG_s *pic_config = + &pool->frame_bufs[index].buf; + req->aux_buf = pic_config->aux_data_buf; + req->aux_size = pic_config->aux_data_size; +#if 0 +//def CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (hw->bypass_dvenl && !dolby_meta_with_el) + req->dv_enhance_exist = false; + else + req->dv_enhance_exist = + pic_config->dv_enhance_exist; + av1_print(hw, AOM_DEBUG_VFRAME, + "query dv_enhance_exist for pic (vf 0x%p, poc %d index %d) flag => %d, aux sizd 0x%x\n", + req->vf, + pic_config->POC, index, + req->dv_enhance_exist, req->aux_size); +#else + req->dv_enhance_exist = 0; +#endif + } + unlock_buffer_pool(hw->common.buffer_pool, flags); + + if (debug & AOM_DEBUG_AUX_DATA) + av1_print(hw, 0, + "%s(type 0x%x vf index 0x%x)=>size 0x%x\n", + __func__, type, index, req->aux_size); + } else if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(hw_to_vdec(hw)); + else + req->req_result[0] = 0xffffffff; + } + return 0; +} + +void av1_inc_vf_ref(struct AV1HW_s *hw, int index) +{ + struct AV1_Common_s *cm = &hw->common; + + if ((debug & AV1_DEBUG_IGNORE_VF_REF) == 0) { + cm->buffer_pool->frame_bufs[index].buf.vf_ref++; + + av1_print(hw, AV1_DEBUG_BUFMGR_MORE, "%s index = %d new vf_ref = %d\r\n", + __func__, index, + cm->buffer_pool->frame_bufs[index].buf.vf_ref); + } +} +#if 0 +static int frame_duration_adapt(struct AV1HW_s *hw, struct vframe_s *vf, u32 valid) +{ + u32 old_duration, pts_duration = 0; + u32 pts = vf->pts; + + if (hw->get_frame_dur == true) + return true; + + hw->frame_cnt_window++; + if (!(hw->av1_first_pts_ready == 1)) { + if (valid) { + hw->pts1 = pts; + hw->frame_cnt_window = 0; + hw->duration_from_pts_done = 0; + hw->av1_first_pts_ready = 1; + } else { + return false; + } + } else { + if (pts < hw->pts1) { + if (hw->frame_cnt_window > FRAME_CNT_WINDOW_SIZE) { + hw->pts1 = pts; + hw->frame_cnt_window = 0; + } + } + + if (valid && (hw->frame_cnt_window > FRAME_CNT_WINDOW_SIZE) && + (pts > hw->pts1) && (hw->duration_from_pts_done == 0)) { + old_duration = hw->frame_dur; + hw->pts2 = pts; + pts_duration = (((hw->pts2 - hw->pts1) * 16) / + (hw->frame_cnt_window * 15)); + + if (close_to(pts_duration, old_duration, 2000)) { + hw->frame_dur = pts_duration; + av1_print(hw, AV1_DEBUG_OUT_PTS, + "use calc duration %d\n", pts_duration); + } + + if (hw->duration_from_pts_done == 0) { + if (close_to(pts_duration, old_duration, RATE_CORRECTION_THRESHOLD)) { + hw->duration_from_pts_done = 1; + } else { + if (!close_to(pts_duration, + old_duration, 1000) && + !close_to(pts_duration, + hw->frame_dur, 1000) && + close_to(pts_duration, + hw->last_duration, 200)) { + /* frame_dur must + * wrong,recover it. + */ + hw->frame_dur = pts_duration; + } + hw->pts1 = hw->pts2; + hw->frame_cnt_window = 0; + hw->duration_from_pts_done = 0; + } + } + hw->last_duration = pts_duration; + } + } + return true; +} +#endif + +static void update_vf_memhandle(struct AV1HW_s *hw, + struct vframe_s *vf, struct PIC_BUFFER_CONFIG_s *pic) +{ + /* keeper not needed for v4l solution */ + if (hw->is_used_v4l) + return; + + if (pic->index < 0) { + vf->mem_handle = NULL; + vf->mem_head_handle = NULL; + vf->mem_dw_handle = NULL; + } else if (vf->type & VIDTYPE_SCATTER) { +#ifdef AOM_AV1_MMU_DW + if (pic->double_write_mode & 0x20 && + (debug & AOM_DEBUG_DW_DISP_MAIN) == 0) { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + hw->mmu_box_dw, pic->index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, + DW_HEADER_BUFFER_IDX(pic->BUF_index)); + vf->mem_dw_handle = NULL; + } else +#endif + { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + hw->mmu_box, pic->index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, + HEADER_BUFFER_IDX(pic->BUF_index)); + if (hw->double_write_mode == 3) + vf->mem_dw_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, + VF_BUFFER_IDX(pic->BUF_index)); + else + vf->mem_dw_handle = NULL; + } +#ifdef USE_SPEC_BUF_FOR_MMU_HEAD + vf->mem_head_handle = NULL; +#endif + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, VF_BUFFER_IDX(pic->BUF_index)); + vf->mem_head_handle = NULL; + vf->mem_dw_handle = NULL; + /*vf->mem_head_handle = + *decoder_bmmu_box_get_mem_handle( + *hw->bmmu_box, VF_BUFFER_IDX(BUF_index)); + */ + } +} + +static inline void av1_update_gvs(struct AV1HW_s *hw, struct vframe_s *vf, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + if (hw->gvs->frame_height != pic_config->y_crop_height) { + hw->gvs->frame_width = pic_config->y_crop_width; + hw->gvs->frame_height = pic_config->y_crop_height; + } + if (hw->gvs->frame_dur != hw->frame_dur) { + hw->gvs->frame_dur = hw->frame_dur; + if (hw->frame_dur != 0) + hw->gvs->frame_rate = ((96000 * 10 / hw->frame_dur) % 10) < 5 ? + 96000 / hw->frame_dur : (96000 / hw->frame_dur +1); + else + hw->gvs->frame_rate = -1; + } + if (vf && hw->gvs->ratio_control != vf->ratio_control) + hw->gvs->ratio_control = vf->ratio_control; + + hw->gvs->status = hw->stat | hw->fatal_error; + hw->gvs->error_count = hw->gvs->error_frame_count; + +} + +static int prepare_display_buf(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + struct vframe_s *vf = NULL; + struct vdec_info tmp4x; + int stream_offset = pic_config->stream_offset; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + ulong nv_order = VIDTYPE_VIU_NV21; + u32 pts_valid = 0, pts_us64_valid = 0; + u32 frame_size; + + av1_print(hw, AOM_DEBUG_VFRAME, "%s index = %d\r\n", __func__, pic_config->index); + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + av1_print(hw, 0, "fatal error, no available buffer slot."); + return -1; + } + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + } + + if (pic_config->double_write_mode && + (pic_config->double_write_mode & 0x20) == 0) + set_canvas(hw, pic_config); + + display_frame_count[hw->index]++; + if (vf) { + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->m_BUF[pic_config->BUF_index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + if (hw->mmu_enable) { + vf->mm_box.bmmu_box = hw->bmmu_box; + vf->mm_box.bmmu_idx = HEADER_BUFFER_IDX(hw->buffer_wrap[pic_config->BUF_index]); + vf->mm_box.mmu_box = hw->mmu_box; + vf->mm_box.mmu_idx = hw->buffer_wrap[pic_config->BUF_index]; + } + } + +#ifdef MULTI_INSTANCE_SUPPORT + if (vdec_frame_based(hw_to_vdec(hw))) { + vf->pts = pic_config->pts; + vf->pts_us64 = pic_config->pts64; + + if (hw->is_used_v4l && v4l_bitstream_id_enable) + vf->timestamp = pic_config->timestamp; + else + vf->timestamp = pic_config->pts64; + + if (vf->pts != 0 || vf->pts_us64 != 0) { + pts_valid = 1; + pts_us64_valid = 1; + } else { + pts_valid = 0; + pts_us64_valid = 0; + } + } else +#endif + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, stream_offset, &vf->pts, + &frame_size, 0, + &vf->pts_us64) != 0) { +#ifdef DEBUG_PTS + hw->pts_missed++; +#endif + vf->pts = 0; + vf->pts_us64 = 0; + pts_valid = 0; + pts_us64_valid = 0; + } else { +#ifdef DEBUG_PTS + hw->pts_hit++; +#endif + pts_valid = 1; + pts_us64_valid = 1; + } + av1_print(hw, AV1_DEBUG_OUT_PTS, + "av1 output slice type %d, dur %d, pts %d, pts64 %lld, ts: %llu\n", + pic_config->slice_type, hw->frame_dur, vf->pts, vf->pts_us64, vf->timestamp); + + fill_frame_info(hw, pic_config, frame_size, vf->pts); + + vf->index = 0xff00 | pic_config->index; + if (pic_config->double_write_mode & 0x10) { + /* double write only */ + vf->compBodyAddr = 0; + vf->compHeadAddr = 0; +#ifdef AOM_AV1_MMU_DW + vf->dwBodyAddr = 0; + vf->dwHeadAddr = 0; +#endif + } else { + if (hw->mmu_enable) { + vf->compBodyAddr = 0; + vf->compHeadAddr = pic_config->header_adr; + vf->fgs_table_adr = pic_config->fgs_table_adr; + vf->fgs_valid = hw->fgs_valid; +#ifdef AOM_AV1_MMU_DW + vf->dwBodyAddr = 0; + vf->dwHeadAddr = 0; + if (pic_config->double_write_mode & 0x20) { + u32 mode = pic_config->double_write_mode & 0xf; + if (mode == 5 || mode == 3) + vf->dwHeadAddr = pic_config->header_dw_adr; + else if ((mode == 1 || mode == 2 || mode == 4) + && (debug & AOM_DEBUG_DW_DISP_MAIN) == 0) { + vf->compHeadAddr = pic_config->header_dw_adr; + vf->fgs_valid = 0; + av1_print(hw, 0, + "Use dw mmu for display\n"); + } + } +#endif + } else { + /*vf->compBodyAddr = pic_config->mc_y_adr; + *vf->compHeadAddr = pic_config->mc_y_adr + + *pic_config->comp_body_size; */ + /*head adr*/ + } + vf->canvas0Addr = vf->canvas1Addr = 0; + } + if (pic_config->double_write_mode && + (pic_config->double_write_mode & 0x20) == 0) { + vf->type = VIDTYPE_PROGRESSIVE | + VIDTYPE_VIU_FIELD; + vf->type |= nv_order; + if ((pic_config->double_write_mode == 3 || + pic_config->double_write_mode == 5) && + (!IS_8K_SIZE(pic_config->y_crop_width, + pic_config->y_crop_height))) { + vf->type |= VIDTYPE_COMPRESS; + if (hw->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } +#ifdef MULTI_INSTANCE_SUPPORT + if (hw->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + pic_config->canvas_config[0]; + vf->canvas0_config[1] = + pic_config->canvas_config[1]; + vf->canvas1_config[0] = + pic_config->canvas_config[0]; + vf->canvas1_config[1] = + pic_config->canvas_config[1]; + + } else +#endif + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(pic_config); + } else { + vf->canvas0Addr = vf->canvas1Addr = 0; + vf->type = VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; + if (hw->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + + switch (pic_config->bit_depth) { + case AOM_BITS_8: + vf->bitdepth = BITDEPTH_Y8 | + BITDEPTH_U8 | BITDEPTH_V8; + break; + case AOM_BITS_10: + case AOM_BITS_12: + vf->bitdepth = BITDEPTH_Y10 | + BITDEPTH_U10 | BITDEPTH_V10; + break; + default: + vf->bitdepth = BITDEPTH_Y10 | + BITDEPTH_U10 | BITDEPTH_V10; + break; + } + if ((vf->type & VIDTYPE_COMPRESS) == 0) + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + if (pic_config->bit_depth == AOM_BITS_8) + vf->bitdepth |= BITDEPTH_SAVING_MODE; + + /* if ((vf->width!=pic_config->width)| + * (vf->height!=pic_config->height)) + */ + /* pr_info("aaa: %d/%d, %d/%d\n", + vf->width,vf->height, pic_config->width, + pic_config->height); */ + vf->width = pic_config->y_crop_width / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + vf->height = pic_config->y_crop_height / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + if (force_w_h != 0) { + vf->width = (force_w_h >> 16) & 0xffff; + vf->height = force_w_h & 0xffff; + } + if ((pic_config->double_write_mode & 0x20) && + ((pic_config->double_write_mode & 0xf) == 2 || + (pic_config->double_write_mode & 0xf) == 4)) { + vf->compWidth = pic_config->y_crop_width / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + vf->compHeight = pic_config->y_crop_height / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + } else { + vf->compWidth = pic_config->y_crop_width; + vf->compHeight = pic_config->y_crop_height; + } + set_frame_info(hw, vf); + if (force_fps & 0x100) { + u32 rate = force_fps & 0xff; + + if (rate) + vf->duration = 96000/rate; + else + vf->duration = 0; + } + update_vf_memhandle(hw, vf, pic_config); + + av1_inc_vf_ref(hw, pic_config->index); + decoder_do_frame_check(hw_to_vdec(hw), vf); + vdec_vframe_ready(hw_to_vdec(hw), vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->trace.pts_name, vf->pts); + ATRACE_COUNTER(hw->trace.new_q_name, kfifo_len(&hw->newframe_q)); + ATRACE_COUNTER(hw->trace.disp_q_name, kfifo_len(&hw->display_q)); + + hw->vf_pre_count++; + /*count info*/ + hw->gvs->frame_dur = hw->frame_dur; + vdec_count_info(hw->gvs, 0, stream_offset); + + hw_to_vdec(hw)->vdec_fps_detec(hw_to_vdec(hw)->id); + +#ifdef AUX_DATA_CRC + decoder_do_aux_data_check(hw_to_vdec(hw), pic_config->aux_data_buf, + pic_config->aux_data_size); +#endif + + av1_print(hw, AV1_DEBUG_SEI_DETAIL, "%s aux_data_size = %d\n", + __func__, pic_config->aux_data_size); + + if (debug & AV1_DEBUG_SEI_DETAIL) { + int i = 0; + PR_INIT(128); + for (i = 0; i < pic_config->aux_data_size; i++) { + PR_FILL("%02x ", pic_config->aux_data_buf[i]); + if (((i + 1) & 0xf) == 0) + PR_INFO(hw->index); + } + PR_INFO(hw->index); + } + + if (hw->is_used_v4l) { + update_vframe_src_fmt(vf, + pic_config->aux_data_buf, + pic_config->aux_data_size, + false, hw->provider_name, NULL); + } + + av1_update_gvs(hw, vf, pic_config); + memcpy(&tmp4x, hw->gvs, sizeof(struct vdec_info)); + tmp4x.bit_depth_luma = bit_depth_luma; + tmp4x.bit_depth_chroma = bit_depth_chroma; + tmp4x.double_write_mode = pic_config->double_write_mode; + vdec_fill_vdec_frame(hw_to_vdec(hw), &hw->vframe_qos, &tmp4x, vf, pic_config->hw_decode_time); + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vav1_vf_put(vav1_vf_get(hw), hw); + } else { + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(hw->provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } else + vav1_vf_put(vav1_vf_get(hw), hw); + } + + return 0; +} + +void av1_raw_write_image(AV1Decoder *pbi, PIC_BUFFER_CONFIG *sd) +{ + sd->stream_offset = pbi->pre_stream_offset; + prepare_display_buf((struct AV1HW_s *)(pbi->private_data), sd); + pbi->pre_stream_offset = READ_VREG(HEVC_SHIFT_BYTE_COUNT); +} + +static int notify_v4l_eos(struct vdec_s *vdec) +{ + struct AV1HW_s *hw = (struct AV1HW_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = &hw->vframe_dummy; + struct vdec_v4l2_buffer *fb = NULL; + int index = INVALID_IDX; + ulong expires; + + if (hw->eos) { + if (hw->is_used_v4l) { + expires = jiffies + msecs_to_jiffies(2000); + while (INVALID_IDX == (index = v4l_get_free_fb(hw))) { + if (time_after(jiffies, expires) || + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) + break; + } + + if (index == INVALID_IDX) { + ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token); + if (ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC) < 0) { + pr_err("[%d] EOS get free buff fail.\n", ctx->id); + return -1; + } + } + } + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + vf->v4l_mem_handle = (index == INVALID_IDX) ? (ulong)fb : + hw->m_BUF[index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + + if (hw->is_used_v4l) + fb->task->submit(fb->task, TASK_TYPE_DEC); + else + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + + av1_print(hw, 0, "[%d] AV1 EOS notify.\n", (hw->is_used_v4l)?ctx->id:vdec->id); + } + + return 0; +} + +static void get_rpm_param(union param_u *params) +{ + int i; + unsigned int data32; + + if (debug & AV1_DEBUG_BUFMGR) + pr_info("enter %s\r\n", __func__); + for (i = 0; i < 128; i++) { + do { + data32 = READ_VREG(RPM_CMD_REG); + /*pr_info("%x\n", data32);*/ + } while ((data32 & 0x10000) == 0); + params->l.data[i] = data32&0xffff; + /*pr_info("%x\n", data32);*/ + WRITE_VREG(RPM_CMD_REG, 0); + } + if (debug & AV1_DEBUG_BUFMGR) + pr_info("leave %s\r\n", __func__); +} + +#ifdef CHANGE_REMOVED +static int recycle_mmu_buf_tail(struct AV1HW_s *hw, + bool check_dma) +{ + struct AV1_Common_s *const cm = &hw->common; + + hw->used_4k_num = + READ_VREG(HEVC_SAO_MMU_STATUS) >> 16; + + av1_print(hw, 0, "pic index %d page_start %d\n", + cm->cur_fb_idx_mmu, hw->used_4k_num); + + if (check_dma) + hevc_mmu_dma_check(hw_to_vdec(hw)); + + if (hw->is_used_v4l) { + int index = cm->cur_fb_idx_mmu; + struct internal_comp_buf *ibuf = + index_to_icomp_buf(hw, index); + + decoder_mmu_box_free_idx_tail( + ibuf->mmu_box, + ibuf->index, + hw->used_4k_num); + } else { + decoder_mmu_box_free_idx_tail( + hw->mmu_box, + cm->cur_fb_idx_mmu, + hw->used_4k_num); + } + + cm->cur_fb_idx_mmu = INVALID_IDX; + hw->used_4k_num = -1; + + return 0; +} +#endif + +#ifdef CHANGE_REMOVED +static void av1_recycle_mmu_buf_tail(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + if (hw->double_write_mode & 0x10) + return; + + if (cm->cur_fb_idx_mmu != INVALID_IDX) { + recycle_mmu_buf_tail(hw, + ((hw->used_4k_num == -1) && + hw->m_ins_flag) ? 1 : 0); + } +} +#endif + +static void av1_recycle_mmu_buf(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + + if (hw->is_used_v4l) + return; + + if (hw->double_write_mode & 0x10) + return; + if (cm->cur_fb_idx_mmu != INVALID_IDX) { + decoder_mmu_box_free_idx(hw->mmu_box, + cm->cur_fb_idx_mmu); + + cm->cur_fb_idx_mmu = INVALID_IDX; + hw->used_4k_num = -1; + } +} + +static void dec_again_process(struct AV1HW_s *hw) +{ + amhevc_stop(); + hw->dec_result = DEC_RESULT_AGAIN; + if (hw->process_state == + PROC_STATE_DECODESLICE) { + hw->process_state = + PROC_STATE_SENDAGAIN; + if (hw->mmu_enable) + av1_recycle_mmu_buf(hw); + } + reset_process_time(hw); + vdec_schedule_work(&hw->work); +} + +static void read_film_grain_reg(struct AV1HW_s *hw) +{ + AV1_COMMON *cm = &hw->common; + int i; + if (cm->cur_frame == NULL) { + av1_print(hw, AOM_DEBUG_HW_MORE, "%s, cur_frame not exist!!!\n", __func__); + return; + } else + av1_print(hw, AOM_DEBUG_HW_MORE, "%s\n", __func__); + WRITE_VREG(HEVC_FGS_IDX, 0); + for (i = 0; i < FILM_GRAIN_REG_SIZE; i++) { + cm->cur_frame->film_grain_reg[i] = READ_VREG(HEVC_FGS_DATA); + } + cm->cur_frame->film_grain_reg_valid = 1; +} + +static void config_film_grain_reg(struct AV1HW_s *hw, int film_grain_params_ref_idx) +{ + + AV1_COMMON *cm = &hw->common; + int i; + unsigned char found = 0; + RefCntBuffer *buf; + av1_print(hw, AOM_DEBUG_HW_MORE, + " ## %s frome reference idx %d\n", + __func__, film_grain_params_ref_idx); + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + if (film_grain_params_ref_idx == cm->remapped_ref_idx[i]) { + found = 1; + break; + } + } + if (!found) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s Error, Invalid film grain reference idx %d\n", + __func__, film_grain_params_ref_idx); + return; + } + buf = cm->ref_frame_map[film_grain_params_ref_idx]; + + if (buf->film_grain_reg_valid == 0) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s Error, film grain register data invalid for reference idx %d\n", + __func__, film_grain_params_ref_idx); + return; + } + + if (cm->cur_frame == NULL) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s, cur_frame not exist!!!\n", __func__); + } + WRITE_VREG(HEVC_FGS_IDX, 0); + for (i = 0; i < FILM_GRAIN_REG_SIZE; i++) { + WRITE_VREG(HEVC_FGS_DATA, buf->film_grain_reg[i]); + if (cm->cur_frame) + cm->cur_frame->film_grain_reg[i] = buf->film_grain_reg[i]; + } + if (cm->cur_frame) + cm->cur_frame->film_grain_reg_valid = 1; + WRITE_VREG(HEVC_FGS_CTRL, READ_VREG(HEVC_FGS_CTRL) | 1); // set fil_grain_start +} + +void config_next_ref_info_hw(struct AV1HW_s *hw) +{ + int j; + AV1_COMMON *const cm = &hw->common; + + av1_set_next_ref_frame_map(hw->pbi); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SC2) + WRITE_VREG(HEVC_PARSER_MEM_WR_ADDR, 0x11a0); + else + WRITE_VREG(HEVC_PARSER_MEM_WR_ADDR, 0x1000); + + for (j = 0; j < 12; j++) { + unsigned int info = + av1_get_next_used_ref_info(cm, j); + + WRITE_VREG(HEVC_PARSER_MEM_RW_DATA, info); + av1_print(hw, AOM_DEBUG_HW_MORE, + "config next ref info %d 0x%x\n", j, info); + } +} + + + +#ifdef PRINT_HEVC_DATA_PATH_MONITOR +void datapath_monitor(struct AV1HW_s *hw) +{ + uint32_t total_clk_count; + uint32_t path_transfer_count; + uint32_t path_wait_count; + float path_wait_ratio; + if (pbi->decode_idx > 1) { + WRITE_VREG(HEVC_PATH_MONITOR_CTRL, 0); // Disabble monitor and set rd_idx to 0 + total_clk_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + + WRITE_VREG(HEVC_PATH_MONITOR_CTRL, (1<<4)); // Disabble monitor and set rd_idx to 0 + +// parser --> iqit + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) path_wait_ratio = 0.0; + else path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk("[P%d HEVC PATH] Parser/IQIT/IPP/DBLK/OW/DDR/CMD WAITING \% : %.2f", + pbi->decode_idx - 2, path_wait_ratio); + +// iqit --> ipp + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) path_wait_ratio = 0.0; + else path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + +// dblk <-- ipp + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) path_wait_ratio = 0.0; + else path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + +// dblk --> ow + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) path_wait_ratio = 0.0; + else path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + +// <--> DDR + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) path_wait_ratio = 0.0; + else path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + +// CMD + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) path_wait_ratio = 0.0; + else path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f\n", path_wait_ratio); + } +} + +#endif + +#ifdef MCRCC_ENABLE + +static int mcrcc_hit_rate; +static int mcrcc_bypass_rate; + +#define C_Reg_Wr WRITE_VREG +static void C_Reg_Rd(unsigned int adr, unsigned int *val) +{ + *val = READ_VREG(adr); +} + +static void mcrcc_perfcount_reset(struct AV1HW_s *hw) +{ + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, + "[cache_util.c] Entered mcrcc_perfcount_reset...\n"); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)0x1); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)0x0); + return; +} + +static unsigned raw_mcr_cnt_total_prev; +static unsigned hit_mcr_0_cnt_total_prev; +static unsigned hit_mcr_1_cnt_total_prev; +static unsigned byp_mcr_cnt_nchcanv_total_prev; +static unsigned byp_mcr_cnt_nchoutwin_total_prev; + +static void mcrcc_get_hitrate(struct AV1HW_s *hw, unsigned reset_pre) +{ + unsigned delta_hit_mcr_0_cnt; + unsigned delta_hit_mcr_1_cnt; + unsigned delta_raw_mcr_cnt; + unsigned delta_mcr_cnt_nchcanv; + unsigned delta_mcr_cnt_nchoutwin; + + unsigned tmp; + unsigned raw_mcr_cnt; + unsigned hit_mcr_cnt; + unsigned byp_mcr_cnt_nchoutwin; + unsigned byp_mcr_cnt_nchcanv; + int hitrate; + + if (reset_pre) { + raw_mcr_cnt_total_prev = 0; + hit_mcr_0_cnt_total_prev = 0; + hit_mcr_1_cnt_total_prev = 0; + byp_mcr_cnt_nchcanv_total_prev = 0; + byp_mcr_cnt_nchoutwin_total_prev = 0; + } + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "[cache_util.c] Entered mcrcc_get_hitrate...\n"); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x0<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &raw_mcr_cnt); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x1<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &hit_mcr_cnt); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x2<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &byp_mcr_cnt_nchoutwin); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x3<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &byp_mcr_cnt_nchcanv); + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "raw_mcr_cnt_total: %d\n",raw_mcr_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "hit_mcr_cnt_total: %d\n",hit_mcr_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "byp_mcr_cnt_nchoutwin_total: %d\n",byp_mcr_cnt_nchoutwin); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "byp_mcr_cnt_nchcanv_total: %d\n",byp_mcr_cnt_nchcanv); + + delta_raw_mcr_cnt = raw_mcr_cnt - raw_mcr_cnt_total_prev; + delta_mcr_cnt_nchcanv = byp_mcr_cnt_nchcanv - byp_mcr_cnt_nchcanv_total_prev; + delta_mcr_cnt_nchoutwin = byp_mcr_cnt_nchoutwin - byp_mcr_cnt_nchoutwin_total_prev; + raw_mcr_cnt_total_prev = raw_mcr_cnt; + byp_mcr_cnt_nchcanv_total_prev = byp_mcr_cnt_nchcanv; + byp_mcr_cnt_nchoutwin_total_prev = byp_mcr_cnt_nchoutwin; + + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x4<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &tmp); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "miss_mcr_0_cnt_total: %d\n",tmp); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x5<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &tmp); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "miss_mcr_1_cnt_total: %d\n",tmp); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x6<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &tmp); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "hit_mcr_0_cnt_total: %d\n",tmp); + delta_hit_mcr_0_cnt = tmp - hit_mcr_0_cnt_total_prev; + hit_mcr_0_cnt_total_prev = tmp; + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x7<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &tmp); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "hit_mcr_1_cnt_total: %d\n",tmp); + delta_hit_mcr_1_cnt = tmp - hit_mcr_1_cnt_total_prev; + hit_mcr_1_cnt_total_prev = tmp; + + if ( delta_raw_mcr_cnt != 0 ) { + hitrate = 100 * delta_hit_mcr_0_cnt/ delta_raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "CANV0_HIT_RATE : %d\n", hitrate); + hitrate = 100 * delta_hit_mcr_1_cnt/delta_raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "CANV1_HIT_RATE : %d\n", hitrate); + hitrate = 100 * delta_mcr_cnt_nchcanv/delta_raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "NONCACH_CANV_BYP_RATE : %d\n", hitrate); + hitrate = 100 * delta_mcr_cnt_nchoutwin/delta_raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "CACHE_OUTWIN_BYP_RATE : %d\n", hitrate); + } + + if (raw_mcr_cnt != 0) + { + hitrate = 100*hit_mcr_cnt/raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "MCRCC_HIT_RATE : %d\n", hitrate); + hitrate = 100*(byp_mcr_cnt_nchoutwin + byp_mcr_cnt_nchcanv)/raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "MCRCC_BYP_RATE : %d\n", hitrate); + } else { + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "MCRCC_HIT_RATE : na\n"); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "MCRCC_BYP_RATE : na\n"); + } + mcrcc_hit_rate = 100*hit_mcr_cnt/raw_mcr_cnt; + mcrcc_bypass_rate = 100*(byp_mcr_cnt_nchoutwin + byp_mcr_cnt_nchcanv)/raw_mcr_cnt; + + return; +} + +static void decomp_perfcount_reset(struct AV1HW_s *hw) +{ + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "[cache_util.c] Entered decomp_perfcount_reset...\n"); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)0x1); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)0x0); + return; +} + +static void decomp_get_hitrate(struct AV1HW_s *hw) +{ + unsigned raw_mcr_cnt; + unsigned hit_mcr_cnt; + int hitrate; + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "[cache_util.c] Entered decomp_get_hitrate...\n"); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x0<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &raw_mcr_cnt); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x1<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &hit_mcr_cnt); + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "hcache_raw_cnt_total: %d\n",raw_mcr_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "hcache_hit_cnt_total: %d\n",hit_mcr_cnt); + + if ( raw_mcr_cnt != 0 ) { + hitrate = 100*hit_mcr_cnt/raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "DECOMP_HCACHE_HIT_RATE : %.2f\%\n", hitrate); + } else { + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "DECOMP_HCACHE_HIT_RATE : na\n"); + } + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x2<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &raw_mcr_cnt); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x3<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &hit_mcr_cnt); + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "dcache_raw_cnt_total: %d\n",raw_mcr_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "dcache_hit_cnt_total: %d\n",hit_mcr_cnt); + + if ( raw_mcr_cnt != 0 ) { + hitrate = 100*hit_mcr_cnt/raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "DECOMP_DCACHE_HIT_RATE : %d\n", hitrate); + + //hitrate = ((float)hit_mcr_cnt/(float)raw_mcr_cnt); + //hitrate = (mcrcc_hit_rate + (mcrcc_bypass_rate * hitrate))*100; + hitrate = mcrcc_hit_rate + (mcrcc_bypass_rate * hit_mcr_cnt/raw_mcr_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "MCRCC_DECOMP_DCACHE_EFFECTIVE_HIT_RATE : %d\n", hitrate); + + } else { + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "DECOMP_DCACHE_HIT_RATE : na\n"); + } + + return; +} + +static void decomp_get_comprate(struct AV1HW_s *hw) +{ + unsigned raw_ucomp_cnt; + unsigned fast_comp_cnt; + unsigned slow_comp_cnt; + int comprate; + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "[cache_util.c] Entered decomp_get_comprate...\n"); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x4<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &fast_comp_cnt); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x5<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &slow_comp_cnt); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x6<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &raw_ucomp_cnt); + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "decomp_fast_comp_total: %d\n",fast_comp_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "decomp_slow_comp_total: %d\n",slow_comp_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "decomp_raw_uncomp_total: %d\n",raw_ucomp_cnt); + + if ( raw_ucomp_cnt != 0 ) + { + comprate = 100*(fast_comp_cnt + slow_comp_cnt)/raw_ucomp_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "DECOMP_COMP_RATIO : %d\n", comprate); + } else + { + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "DECOMP_COMP_RATIO : na\n"); + } + return; +} + +static void dump_hit_rate(struct AV1HW_s *hw) +{ + if (debug & AV1_DEBUG_CACHE_HIT_RATE) { + mcrcc_get_hitrate(hw, hw->m_ins_flag); + decomp_get_hitrate(hw); + decomp_get_comprate(hw); + } +} + +static uint32_t mcrcc_get_abs_frame_distance(struct AV1HW_s *hw, uint32_t refid, uint32_t ref_ohint, uint32_t curr_ohint, uint32_t ohint_bits_min1) +{ + int32_t diff_ohint0; + int32_t diff_ohint1; + uint32_t abs_dist; + uint32_t m; + uint32_t m_min1; + + diff_ohint0 = ref_ohint - curr_ohint; + + m = (1 << ohint_bits_min1); + m_min1 = m -1; + + diff_ohint1 = (diff_ohint0 & m_min1 ) - (diff_ohint0 & m); + + abs_dist = (diff_ohint1 < 0) ? -diff_ohint1 : diff_ohint1; + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, + "[cache_util.c] refid:%0x ref_orderhint:%0x curr_orderhint:%0x orderhint_bits_min1:%0x abd_dist:%0x\n", + refid, ref_ohint, curr_ohint, ohint_bits_min1,abs_dist); + + return abs_dist; +} + +static void config_mcrcc_axi_hw_nearest_ref(struct AV1HW_s *hw) +{ + uint32_t i; + uint32_t rdata32; + uint32_t dist_array[8]; + uint32_t refcanvas_array[2]; + uint32_t orderhint_bits; + unsigned char is_inter; + AV1_COMMON *cm = &hw->common; + PIC_BUFFER_CONFIG *curr_pic_config; + int32_t curr_orderhint; + int cindex0 = LAST_FRAME; + uint32_t last_ref_orderhint_dist = 1023; // large distance + uint32_t curr_ref_orderhint_dist = 1023; // large distance + int cindex1; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, + "[test.c] #### config_mcrcc_axi_hw ####\n"); + + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2); // reset mcrcc + + is_inter = av1_frame_is_inter(&hw->common); //((pbi->common.frame_type != KEY_FRAME) && (!pbi->common.intra_only)) ? 1 : 0; + if ( !is_inter ) { // I-PIC + //WRITE_VREG(HEVCD_MCRCC_CTL1, 0x1); // remove reset -- disables clock + WRITE_VREG(HEVCD_MCRCC_CTL2, 0xffffffff); // Replace with current-frame canvas + WRITE_VREG(HEVCD_MCRCC_CTL3, 0xffffffff); // + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); // enable mcrcc progressive-mode + return; + } + +#if 0 + //printk("before call mcrcc_get_hitrate\r\n"); + mcrcc_get_hitrate(hw); + decomp_get_hitrate(hw); + decomp_get_comprate(hw); +#endif + + // Find absolute orderhint delta + curr_pic_config = &cm->cur_frame->buf; + curr_orderhint = curr_pic_config->order_hint; + orderhint_bits = cm->seq_params.order_hint_info.order_hint_bits_minus_1; + for (i = LAST_FRAME; i <= ALTREF_FRAME; i++) { + int32_t ref_orderhint = 0; + PIC_BUFFER_CONFIG *pic_config; + //int32_t tmp; + pic_config = av1_get_ref_frame_spec_buf(cm,i); + if (pic_config) + ref_orderhint = pic_config->order_hint; + //tmp = curr_orderhint - ref_orderhint; + //dist_array[i] = (tmp < 0) ? -tmp : tmp; + dist_array[i] = mcrcc_get_abs_frame_distance(hw, i,ref_orderhint, curr_orderhint, orderhint_bits); + } + // Get smallest orderhint distance refid + for (i = LAST_FRAME; i <= ALTREF_FRAME; i++) { + PIC_BUFFER_CONFIG *pic_config; + pic_config = av1_get_ref_frame_spec_buf(cm, i); + curr_ref_orderhint_dist = dist_array[i]; + if ( curr_ref_orderhint_dist < last_ref_orderhint_dist) { + cindex0 = i; + last_ref_orderhint_dist = curr_ref_orderhint_dist; + } + } + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (cindex0 << 8) | (1<<1) | 0); + refcanvas_array[0] = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR) & 0xffff; + + last_ref_orderhint_dist = 1023; // large distance + curr_ref_orderhint_dist = 1023; // large distance + // Get 2nd smallest orderhint distance refid + cindex1 = LAST_FRAME; + for (i = LAST_FRAME; i <= ALTREF_FRAME; i++) { + PIC_BUFFER_CONFIG *pic_config; + pic_config = av1_get_ref_frame_spec_buf(cm, i); + curr_ref_orderhint_dist = dist_array[i]; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (i << 8) | (1<<1) | 0); + refcanvas_array[1] = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR) & 0xffff; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "[cache_util.c] curr_ref_orderhint_dist:%x last_ref_orderhint_dist:%x refcanvas_array[0]:%x refcanvas_array[1]:%x\n", + curr_ref_orderhint_dist, last_ref_orderhint_dist, refcanvas_array[0],refcanvas_array[1]); + if ((curr_ref_orderhint_dist < last_ref_orderhint_dist) && (refcanvas_array[0] != refcanvas_array[1])) { + cindex1 = i; + last_ref_orderhint_dist = curr_ref_orderhint_dist; + } + } + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (cindex0 << 8) | (1<<1) | 0); + refcanvas_array[0] = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (cindex1 << 8) | (1<<1) | 0); + refcanvas_array[1] = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "[cache_util.c] refcanvas_array[0](index %d):%x refcanvas_array[1](index %d):%x\n", + cindex0, refcanvas_array[0], cindex1, refcanvas_array[1]); + + // lowest delta_picnum + rdata32 = refcanvas_array[0]; + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | ( rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + // 2nd-lowest delta_picnum + rdata32 = refcanvas_array[1]; + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | ( rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); // enable mcrcc progressive-mode + return; +} + + +#endif + +#if 0 +void release_unused_4k_ext(MMU_BUFF_MGR *mmumgr, int32_t cur_buf_idx, long used_4k_num) +{ + int32_t release_4k_position; + int32_t i; + + if(mmumgr->mmu_pic_count < 0) return; + + mmumgr->mmu_offset_seed = used_4k_num & 0xff; + if(used_4k_num > mmumgr->mmu_buf[cur_buf_idx].mmu_4k_number) { + printk("[MMU MEM ERROR] : Use more 4K Page than allocated %d > ([%d] = %d)!!\r\n", used_4k_num, cur_buf_idx, mmumgr->mmu_buf[cur_buf_idx].mmu_4k_number); + } + else printk("[MMU MEM RELEASE] : P%d(buffer %d) used %d of %d 4k buffer (%d%c)\r\n", mmumgr->mmu_pic_count, cur_buf_idx, used_4k_num, mmumgr->mmu_buf[cur_buf_idx].mmu_4k_number, used_4k_num*100/mmumgr->mmu_buf[cur_buf_idx].mmu_4k_number, '%'); + + for(i = used_4k_num; i<mmumgr->mmu_buf[cur_buf_idx].mmu_4k_number; i++){ + release_4k_position = mmumgr->mmu_buf[cur_buf_idx].mmu_4k_index[i] - mmumgr->MC_BUFFER_START_4K_ADR; + printk("[MMU MEM RELEASE DEBUG] release_4k_position[%d] : 0x%x\n", i, release_4k_position+mmumgr->MC_BUFFER_START_4K_ADR); + mmumgr->mmu_4k_status[release_4k_position] = 0; + } + mmumgr->cur_mem_usage -= (mmumgr->mmu_buf[cur_buf_idx].mmu_4k_number - used_4k_num)*4096; + mmumgr->mmu_buf[cur_buf_idx].mmu_4k_number = used_4k_num; +} +#endif + +int av1_continue_decoding(struct AV1HW_s *hw, int obu_type) +{ + int ret = 0; +#ifdef SANITY_CHECK + param_t* params = &hw->aom_param; +#endif +#if 1 + //def CHANGE_DONE + AV1Decoder *pbi = hw->pbi; + AV1_COMMON *const cm = pbi->common; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int i; + + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s: pbi %p cm %p cur_frame %p %d has_seq %d has_keyframe %d\n", + __func__, pbi, cm, cm->cur_frame, + pbi->bufmgr_proc_count, + hw->has_sequence, + hw->has_keyframe); + + if (hw->has_sequence == 0) { + av1_print(hw, 0, + "no sequence head, skip\n"); + if (!hw->m_ins_flag) + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + return -2; + } else if (hw->has_keyframe == 0 && + hw->aom_param.p.frame_type != KEY_FRAME){ + av1_print(hw, 0, + "no key frame, skip\n"); + on_no_keyframe_skiped++; + if (!hw->m_ins_flag) + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + return -2; + } + hw->has_keyframe = 1; + on_no_keyframe_skiped = 0; + + if (hw->is_used_v4l && ctx->param_sets_from_ucode) + hw->res_ch_flag = 0; + + //pre_decode_idx = pbi->decode_idx; + if (pbi->bufmgr_proc_count == 0 || + hw->one_compressed_data_done) { + hw->new_compressed_data = 1; + hw->one_compressed_data_done = 0; + } else { + hw->new_compressed_data = 0; + } +#ifdef SEND_MMU_USED_NUM + if (cm->prev_fb_idx != INVALID_IDX) { + long used_4k_num = aom_param.p.mmu_used_num; + if (used_4k_num != 0) { + printk("mmu free tail, index %d used_num 0x%x\n", + cm->prev_fb_idx, used_4k_num); + release_unused_4k_ext(&av1_mmumgr_m, cm->prev_fb_idx, used_4k_num); + } + +#ifdef AOM_AV1_MMU_DW + used_4k_num = aom_param.p.dw_mmu_used_num; + if (used_4k_num != 0) { + release_unused_4k_ext(&av1_mmumgr_dw, cm->prev_fb_idx, used_4k_num); + printk("dw mmu free tail, index %d used_num 0x%x\n", + cm->prev_fb_idx, used_4k_num); + } +#endif + } + cm->prev_fb_idx = INVALID_IDX; + +#endif +#ifdef SANITY_CHECK + ret = 0; + av1_print(hw, AOM_DEBUG_HW_MORE, + "Check Picture size, max (%d, %d), width/height (%d, %d), dec_width %d\n", + params->p.max_frame_width, + params->p.max_frame_height, + params->p.frame_width_scaled, + params->p.frame_height, + params->p.dec_frame_width + ); + + if (/*params->p.max_frame_width > MAX_PIC_WIDTH || + params->p.max_frame_height > MAX_PIC_HEIGHT ||*/ + (params->p.frame_width_scaled * params->p.frame_height) > MAX_SIZE_8K || + (params->p.dec_frame_width * params->p.frame_height) > MAX_SIZE_8K || + params->p.frame_width_scaled <= 0 || + params->p.dec_frame_width <= 0 || + params->p.frame_height <= 0) { + av1_print(hw, 0, "!!Picture size error, max (%d, %d), width/height (%d, %d), dec_width %d\n", + params->p.max_frame_width, + params->p.max_frame_height, + params->p.frame_width_scaled, + params->p.frame_height, + params->p.dec_frame_width + ); + ret = -1; + } +#endif + if (ret >= 0) { + ret = av1_bufmgr_process(pbi, &hw->aom_param, + hw->new_compressed_data, obu_type); + if (ret < 0) + return -1; + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s: pbi %p cm %p cur_frame %p\n", + __func__, pbi, cm, cm->cur_frame); + + av1_print(hw, AOM_DEBUG_HW_MORE, + "1+++++++++++++++++++++++++++++++++++%d %p\n", + ret, cm->cur_frame); + if (hw->new_compressed_data) + WRITE_VREG(PIC_END_LCU_COUNT, 0); + } + if (ret > 0) { + /* the case when cm->show_existing_frame is 1 */ + /*case 3016*/ + av1_print(hw, AOM_DEBUG_HW_MORE, + "Decoding done (index=%d, show_existing_frame = %d)\n", + cm->cur_frame? cm->cur_frame->buf.index:-1, + cm->show_existing_frame + ); + + if (cm->cur_frame) { + PIC_BUFFER_CONFIG* cur_pic_config = &cm->cur_frame->buf; + if (debug & + AV1_DEBUG_BUFMGR_MORE) + dump_aux_buf(hw); + set_pic_aux_data(hw, + cur_pic_config, 0, 0); + } + config_next_ref_info_hw(hw); + + av1_print(hw, AOM_DEBUG_HW_MORE, + "aom_bufmgr_process=> %d,decode done, AOM_AV1_SEARCH_HEAD\r\n", ret); + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + pbi->decode_idx++; + pbi->bufmgr_proc_count++; + hw->frame_decoded = 1; + return 0; + } else if (ret < 0) { + hw->frame_decoded = 1; + av1_print(hw, AOM_DEBUG_HW_MORE, + "aom_bufmgr_process=> %d, bufmgr e.r.r.o.r. %d, AOM_AV1_SEARCH_HEAD\r\n", + ret, cm->error.error_code); + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + return 0; + } + else if (ret == 0) { + PIC_BUFFER_CONFIG* cur_pic_config = &cm->cur_frame->buf; + PIC_BUFFER_CONFIG* prev_pic_config = &cm->prev_frame->buf; + //struct segmentation_lf *seg_4lf = &hw->seg_4lf_store; + if (debug & + AV1_DEBUG_BUFMGR_MORE) + dump_aux_buf(hw); + set_dv_data(hw); + if (cm->show_frame && + hw->dv_data_buf != NULL) + copy_dv_data(hw, cur_pic_config); + /* to do:.. + set_pic_aux_data(hw, + cur_pic_config, 0, 2);*/ + hw->frame_decoded = 0; + pbi->bufmgr_proc_count++; + if (hw->new_compressed_data == 0) { + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_DECODE_SLICE); + return 0; + } + av1_print(hw, AOM_DEBUG_HW_MORE, + " [PICTURE %d] cm->cur_frame->mi_size : (%d X %d) y_crop_size :(%d X %d)\n", + hw->frame_count, + cm->cur_frame->mi_cols, + cm->cur_frame->mi_rows, + cur_pic_config->y_crop_width, + cur_pic_config->y_crop_height); + if (cm->prev_frame > 0) { + av1_print(hw, AOM_DEBUG_HW_MORE, + " [SEGMENT] cm->prev_frame->segmentation_enabled : %d\n", + cm->prev_frame->segmentation_enabled); + av1_print(hw, AOM_DEBUG_HW_MORE, + " [SEGMENT] cm->prev_frame->mi_size : (%d X %d)\n", + cm->prev_frame->mi_cols, cm->prev_frame->mi_rows); + } + cm->cur_frame->prev_segmentation_enabled = (cm->prev_frame > 0) ? + (cm->prev_frame->segmentation_enabled & (cm->prev_frame->segmentation_update_map + | cm->prev_frame->prev_segmentation_enabled) & + (cm->cur_frame->mi_rows == cm->prev_frame->mi_rows) & + (cm->cur_frame->mi_cols == cm->prev_frame->mi_cols)) : 0; + WRITE_VREG(AV1_SKIP_MODE_INFO, + (cm->cur_frame->prev_segmentation_enabled << 31) | + (((cm->prev_frame > 0) ? cm->prev_frame->intra_only : 0) << 30) | + (((cm->prev_frame > 0) ? prev_pic_config->index : 0x1f) << 24) | + (((cm->cur_frame > 0) ? cur_pic_config->index : 0x1f) << 16) | + (cm->current_frame.skip_mode_info.ref_frame_idx_0 & 0xf) | + ((cm->current_frame.skip_mode_info.ref_frame_idx_1 & 0xf) << 4) | + (cm->current_frame.skip_mode_info.skip_mode_allowed << 8)); + cur_pic_config->decode_idx = pbi->decode_idx; + + av1_print(hw, AOM_DEBUG_HW_MORE, + "Decode Frame Data %d frame_type %d (%d) bufmgr_proc_count %d\n", + pbi->decode_idx, + cm->cur_frame->frame_type, + cm->current_frame.frame_type, + pbi->bufmgr_proc_count); + pbi->decode_idx++; + hw->frame_count++; +#ifdef SEND_MMU_USED_NUM + cm->prev_fb_idx = cm->cur_fb_idx_mmu; + cm->cur_fb_idx_mmu = cm->cur_frame->buf.index; +#endif + cur_pic_config->slice_type = cm->cur_frame->frame_type; + if (hw->chunk) { + av1_print(hw, AV1_DEBUG_OUT_PTS, + "%s, config pic pts %d, pts64 %lld, ts: %lld\n", + __func__, hw->chunk->pts, hw->chunk->pts64, hw->chunk->timestamp); + cur_pic_config->pts = hw->chunk->pts; + cur_pic_config->pts64 = hw->chunk->pts64; + cur_pic_config->timestamp = hw->chunk->timestamp; + + if (hw->is_used_v4l && !v4l_bitstream_id_enable) { + cur_pic_config->pts64 = hw->chunk->timestamp; + hw->chunk->timestamp = 0; + } + } + ATRACE_COUNTER(hw->trace.decode_header_memory_time_name, TRACE_HEADER_REGISTER_START); +#ifdef DUAL_DECODE +#else + config_pic_size(hw, hw->aom_param.p.bit_depth); +#endif + if (get_mv_buf(hw, &cm->cur_frame->buf) < 0) { + av1_print(hw, 0, + "%s: Error get_mv_buf fail\n", + __func__); + ret = -1; + } + + if (ret >= 0 && hw->mmu_enable && ((hw->double_write_mode & 0x10) == 0)) { + ATRACE_COUNTER(hw->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + ret = av1_alloc_mmu(hw, + cm->cur_frame->buf.index, + cur_pic_config->y_crop_width, + cur_pic_config->y_crop_height, + hw->aom_param.p.bit_depth, + hw->frame_mmu_map_addr); + ATRACE_COUNTER(hw->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + if (ret >= 0) + cm->cur_fb_idx_mmu = cm->cur_frame->buf.index; + else + pr_err("can't alloc need mmu1,idx %d ret =%d\n", + cm->cur_frame->buf.index, ret); +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + ret = av1_alloc_mmu_dw(hw, + cm->cur_frame->buf.index, + cur_pic_config->y_crop_width, + cur_pic_config->y_crop_height, + hw->aom_param.p.bit_depth, + hw->dw_frame_mmu_map_addr); + if (ret >= 0) + cm->cur_fb_idx_mmu_dw = cm->cur_frame->buf.index; + else + pr_err("can't alloc need dw mmu1,idx %d ret =%d\n", + cm->cur_frame->buf.index, ret); + } +#endif +#ifdef DEBUG_CRC_ERROR + if (crc_debug_flag & 0x40) + mv_buffer_fill_zero(hw, &cm->cur_frame->buf); +#endif + } else { + ret = 0; + } + if (av1_frame_is_inter(&hw->common)) { + //if ((pbi->common.frame_type != KEY_FRAME) && (!pbi->common.intra_only)) { +#ifdef DUAL_DECODE +#else + config_mc_buffer(hw, hw->aom_param.p.bit_depth, 1); +#endif + config_mpred_hw(hw, 1); + } + else { + config_mc_buffer(hw, hw->aom_param.p.bit_depth, 0); + clear_mpred_hw(hw); + config_mpred_hw(hw, 0); + } +#ifdef DUAL_DECODE +#else +#ifdef MCRCC_ENABLE + config_mcrcc_axi_hw_nearest_ref(hw); +#endif + config_sao_hw(hw, &hw->aom_param); +#endif + + config_dblk_hw(hw); + + /* store segment_feature before shared sub-module run to fix mosaic on t5d */ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SC2) + WRITE_VREG(HEVC_PARSER_MEM_WR_ADDR, 0x11b0 + (cur_pic_config->index)); + else + WRITE_VREG(HEVC_PARSER_MEM_WR_ADDR, 0x1010 + (cur_pic_config->index)); + if (hw->aom_param.p.segmentation_enabled & 1) // segmentation_enabled + WRITE_VREG(HEVC_PARSER_MEM_RW_DATA, READ_VREG(AV1_REF_SEG_INFO)); + else + WRITE_VREG(HEVC_PARSER_MEM_RW_DATA, 0); + + av1_print(hw, AOM_DEBUG_HW_MORE, "HEVC_DEC_STATUS_REG <= AOM_AV1_DECODE_SLICE\n"); + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_DECODE_SLICE); + + // Save segment_feature while hardware decoding + if (hw->seg_4lf->enabled) { + for (i = 0; i < 8; i++) { + cm->cur_frame->segment_feature[i] = READ_VREG(AOM_AV1_SEGMENT_FEATURE); + } + } else { + for (i = 0; i < 8; i++) { + cm->cur_frame->segment_feature[i] = (0x80000000 | (i << 22)); + } + } + } else { + av1_print(hw, AOM_DEBUG_HW_MORE, "Sequence head, Search next start code\n"); + cm->prev_fb_idx = INVALID_IDX; + //skip, search next start code + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_DECODE_SLICE); + } + ATRACE_COUNTER(hw->trace.decode_header_memory_time_name, TRACE_HEADER_REGISTER_END); + return ret; + +#else + + bit_depth_luma = av1_param.p.bit_depth; + bit_depth_chroma = av1_param.p.bit_depth; + + if (hw->process_state != PROC_STATE_SENDAGAIN) { + ret = av1_bufmgr_process(hw, &av1_param); + if (!hw->m_ins_flag) + hw->result_done_count++; + } else { + union param_u *params = &av1_param; + if (hw->mmu_enable && ((hw->double_write_mode & 0x10) == 0)) { + ret = av1_alloc_mmu(hw, + cm->new_fb_idx, + params->p.width, + params->p.height, + params->p.bit_depth, + hw->frame_mmu_map_addr); + if (ret >= 0) + cm->cur_fb_idx_mmu = cm->new_fb_idx; + else + pr_err("can't alloc need mmu1,idx %d ret =%d\n", + cm->new_fb_idx, ret); + } else { + ret = 0; + } + WRITE_VREG(HEVC_PARSER_PICTURE_SIZE, + (params->p.height << 16) | params->p.width); + } + if (ret < 0) { + pr_info("av1_bufmgr_process=> %d, AV1_10B_DISCARD_NAL\r\n", ret); + WRITE_VREG(HEVC_DEC_STATUS_REG, AV1_10B_DISCARD_NAL); + cm->show_frame = 0; + if (hw->mmu_enable) + av1_recycle_mmu_buf(hw); + + if (hw->m_ins_flag) { + hw->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&hw->work); + } + return ret; + } else if (ret == 0) { + struct PIC_BUFFER_CONFIG_s *cur_pic_config + = &cm->cur_frame->buf; + cur_pic_config->decode_idx = hw->frame_count; + + if (hw->process_state != PROC_STATE_SENDAGAIN) { + if (!hw->m_ins_flag) { + hw->frame_count++; + decode_frame_count[hw->index] + = hw->frame_count; + } + if (hw->chunk) { + cur_pic_config->pts = hw->chunk->pts; + cur_pic_config->pts64 = hw->chunk->pts64; + } + } + /*pr_info("Decode Frame Data %d\n", hw->frame_count);*/ + config_pic_size(hw, av1_param.p.bit_depth); + + if ((hw->common.frame_type != KEY_FRAME) + && (!hw->common.intra_only)) { + config_mc_buffer(hw, av1_param.p.bit_depth); + config_mpred_hw(hw); + } else { + clear_mpred_hw(hw); + } +#ifdef MCRCC_ENABLE + if (mcrcc_cache_alg_flag) + config_mcrcc_axi_hw_new(hw); + else + config_mcrcc_axi_hw(hw); +#endif + config_sao_hw(hw, &av1_param); + /*pr_info("HEVC_DEC_STATUS_REG <= AV1_10B_DECODE_SLICE\n");*/ + WRITE_VREG(HEVC_DEC_STATUS_REG, AV1_10B_DECODE_SLICE); + } else { + pr_info("Skip search next start code\n"); + cm->prev_fb_idx = INVALID_IDX; + /*skip, search next start code*/ + WRITE_VREG(HEVC_DEC_STATUS_REG, AV1_10B_DECODE_SLICE); + } + hw->process_state = PROC_STATE_DECODESLICE; + if (hw->mmu_enable && ((hw->double_write_mode & 0x10) == 0)) { + if (hw->last_put_idx < hw->used_buf_num) { + struct RefCntBuffer_s *frame_bufs = + cm->buffer_pool->frame_bufs; + int i = hw->last_put_idx; + /*free not used buffers.*/ + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.index != -1)) { + if (pbi->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(pbi, i); + + decoder_mmu_box_free_idx(ibuf->mmu_box, i); + } else { + decoder_mmu_box_free_idx(pbi->mmu_box, i); + } + } + hw->last_put_idx = -1; + } + } + return ret; +#endif +} + +static void fill_frame_info(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *frame, + unsigned int framesize, + unsigned int pts) +{ + struct vframe_qos_s *vframe_qos = &hw->vframe_qos; + + if (frame->slice_type == KEY_FRAME) + vframe_qos->type = 1; + else if (frame->slice_type == INTER_FRAME) + vframe_qos->type = 2; +/* +#define SHOW_QOS_INFO +*/ + vframe_qos->size = framesize; + vframe_qos->pts = pts; +#ifdef SHOW_QOS_INFO + av1_print(hw, 0, "slice:%d\n", frame->slice_type); +#endif + vframe_qos->max_mv = frame->max_mv; + vframe_qos->avg_mv = frame->avg_mv; + vframe_qos->min_mv = frame->min_mv; +#ifdef SHOW_QOS_INFO + av1_print(hw, 0, "mv: max:%d, avg:%d, min:%d\n", + vframe_qos->max_mv, + vframe_qos->avg_mv, + vframe_qos->min_mv); +#endif + vframe_qos->max_qp = frame->max_qp; + vframe_qos->avg_qp = frame->avg_qp; + vframe_qos->min_qp = frame->min_qp; +#ifdef SHOW_QOS_INFO + av1_print(hw, 0, "qp: max:%d, avg:%d, min:%d\n", + vframe_qos->max_qp, + vframe_qos->avg_qp, + vframe_qos->min_qp); +#endif + vframe_qos->max_skip = frame->max_skip; + vframe_qos->avg_skip = frame->avg_skip; + vframe_qos->min_skip = frame->min_skip; +#ifdef SHOW_QOS_INFO + av1_print(hw, 0, "skip: max:%d, avg:%d, min:%d\n", + vframe_qos->max_skip, + vframe_qos->avg_skip, + vframe_qos->min_skip); +#endif + vframe_qos->num++; + /* + if (hw->frameinfo_enable) + vdec_fill_frame_info(vframe_qos, 1); + */ +} + +/* only when we decoded one field or one frame, +we can call this function to get qos info*/ +static void get_picture_qos_info(struct AV1HW_s *hw) +{ + struct PIC_BUFFER_CONFIG_s *frame = &hw->cur_buf->buf; + + if (!frame) + return; + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + unsigned char a[3]; + unsigned char i, j, t; + unsigned long data; + + data = READ_VREG(HEVC_MV_INFO); + if (frame->slice_type == KEY_FRAME) + data = 0; + a[0] = data & 0xff; + a[1] = (data >> 8) & 0xff; + a[2] = (data >> 16) & 0xff; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + frame->max_mv = a[2]; + frame->avg_mv = a[1]; + frame->min_mv = a[0]; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "mv data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + + data = READ_VREG(HEVC_QP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + frame->max_qp = a[2]; + frame->avg_qp = a[1]; + frame->min_qp = a[0]; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "qp data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + + data = READ_VREG(HEVC_SKIP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + frame->max_skip = a[2]; + frame->avg_skip = a[1]; + frame->min_skip = a[0]; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "skip data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + } else { + uint32_t blk88_y_count; + uint32_t blk88_c_count; + uint32_t blk22_mv_count; + uint32_t rdata32; + int32_t mv_hi; + int32_t mv_lo; + uint32_t rdata32_l; + uint32_t mvx_L0_hi; + uint32_t mvy_L0_hi; + uint32_t mvx_L1_hi; + uint32_t mvy_L1_hi; + int64_t value; + uint64_t temp_value; + int pic_number = frame->decode_idx; + + frame->max_mv = 0; + frame->avg_mv = 0; + frame->min_mv = 0; + + frame->max_skip = 0; + frame->avg_skip = 0; + frame->min_skip = 0; + + frame->max_qp = 0; + frame->avg_qp = 0; + frame->min_qp = 0; + + av1_print(hw, AV1_DEBUG_QOS_INFO, "slice_type:%d, poc:%d\n", + frame->slice_type, + pic_number); + + /* set rd_idx to 0 */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, 0); + + blk88_y_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_y_count == 0) { + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] NO Data yet.\n", + pic_number); + + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_y_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] Y QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_y_count, + rdata32, blk88_y_count); + + frame->avg_qp = rdata32/blk88_y_count; + /* intra_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] Y intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); + + /* skipped_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] Y skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); + + frame->avg_skip = rdata32*100/blk88_y_count; + /* coeff_non_zero_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] Y ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_y_count*1)), + '%', rdata32); + + /* blk66_c_count */ + blk88_c_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_c_count == 0) { + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] NO Data yet.\n", + pic_number); + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_c_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] C QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_c_count, + rdata32, blk88_c_count); + + /* intra_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] C intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); + + /* skipped_cu_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] C skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); + + /* coeff_non_zero_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] C ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_c_count*1)), + '%', rdata32); + + /* 1'h0, qp_c_max[6:0], 1'h0, qp_c_min[6:0], + 1'h0, qp_y_max[6:0], 1'h0, qp_y_min[6:0] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] Y QP min : %d\n", + pic_number, (rdata32>>0)&0xff); + + frame->min_qp = (rdata32>>0)&0xff; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] Y QP max : %d\n", + pic_number, (rdata32>>8)&0xff); + + frame->max_qp = (rdata32>>8)&0xff; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] C QP min : %d\n", + pic_number, (rdata32>>16)&0xff); + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] C QP max : %d\n", + pic_number, (rdata32>>24)&0xff); + + /* blk22_mv_count */ + blk22_mv_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk22_mv_count == 0) { + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] NO MV Data yet.\n", + pic_number); + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* mvy_L1_count[39:32], mvx_L1_count[39:32], + mvy_L0_count[39:32], mvx_L0_count[39:32] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + /* should all be 0x00 or 0xff */ + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MV AVG High Bits: 0x%X\n", + pic_number, rdata32); + + mvx_L0_hi = ((rdata32>>0)&0xff); + mvy_L0_hi = ((rdata32>>8)&0xff); + mvx_L1_hi = ((rdata32>>16)&0xff); + mvy_L1_hi = ((rdata32>>24)&0xff); + + /* mvx_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvx_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + value = div_s64(value, blk22_mv_count); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L0 AVG : %d (%lld/%d)\n", + pic_number, (int)value, + value, blk22_mv_count); + + frame->avg_mv = value; + + /* mvy_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvy_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L0 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); + + /* mvx_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvx_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); + + /* mvy_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvy_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); + + /* {mvx_L0_max, mvx_L0_min} // format : {sign, abs[14:0]} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L0 MAX : %d\n", + pic_number, mv_hi); + + frame->max_mv = mv_hi; + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L0 MIN : %d\n", + pic_number, mv_lo); + + frame->min_mv = mv_lo; + + /* {mvy_L0_max, mvy_L0_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L0 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L0 MIN : %d\n", + pic_number, mv_lo); + + /* {mvx_L1_max, mvx_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L1 MIN : %d\n", + pic_number, mv_lo); + + /* {mvy_L1_max, mvy_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L1 MIN : %d\n", + pic_number, mv_lo); + + rdata32 = READ_VREG(HEVC_PIC_QUALITY_CTRL); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] After Read : VDEC_PIC_QUALITY_CTRL : 0x%x\n", + pic_number, rdata32); + + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + } +} + +static int load_param(struct AV1HW_s *hw, union param_u *params, uint32_t dec_status) +{ + int i; + unsigned long flags; + int head_type = 0; + if (dec_status == AOM_AV1_SEQ_HEAD_PARSER_DONE) + head_type = OBU_SEQUENCE_HEADER; + else if (dec_status == AOM_AV1_FRAME_HEAD_PARSER_DONE) + head_type = OBU_FRAME_HEADER; + else if (dec_status == AOM_AV1_FRAME_PARSER_DONE) + head_type = OBU_FRAME; + else if (dec_status == AOM_AV1_REDUNDANT_FRAME_HEAD_PARSER_DONE) + head_type = OBU_REDUNDANT_FRAME_HEADER; + else { + //printf("Error, dec_status of 0x%x, not supported!!!\n", dec_status); + return -1; + } + av1_print2(AOM_DEBUG_HW_MORE, "load_param: ret 0x%x\n", head_type); + ATRACE_COUNTER(hw->trace.decode_header_memory_time_name, TRACE_HEADER_RPM_START); + if (debug&AOM_AV1_DEBUG_SEND_PARAM_WITH_REG) { + get_rpm_param(params); + } + else { + for (i = 0; i < (RPM_END-RPM_BEGIN); i += 4) { + int32_t ii; + for (ii = 0; ii < 4; ii++) { + params->l.data[i+ii]=hw->rpm_ptr[i+3-ii]; + } + } + } + ATRACE_COUNTER(hw->trace.decode_header_memory_time_name, TRACE_HEADER_RPM_END); + + params->p.enable_ref_frame_mvs = (params->p.seq_flags >> 7) & 0x1; + params->p.enable_superres = (params->p.seq_flags >> 15) & 0x1; + + if (debug & AV1_DEBUG_BUFMGR_MORE) { + lock_buffer_pool(hw->common.buffer_pool, flags); + pr_info("aom_param: (%d)\n", hw->pbi->decode_idx); + //pbi->slice_idx++; + for ( i = 0; i < (RPM_END-RPM_BEGIN); i++) { + pr_info("%04x ", params->l.data[i]); + if (((i + 1) & 0xf) == 0) + pr_info("\n"); + } + unlock_buffer_pool(hw->common.buffer_pool, flags); + } + return head_type; +} + +static int av1_postproc(struct AV1HW_s *hw) +{ + if (hw->postproc_done) + return 0; + hw->postproc_done = 1; + return av1_bufmgr_postproc(hw->pbi, hw->frame_decoded); +} + +static void vav1_get_comp_buf_info(struct AV1HW_s *hw, + struct vdec_comp_buf_info *info) +{ + u16 bit_depth = hw->param.p.bit_depth; + + info->max_size = av1_max_mmu_buf_size( + hw->max_pic_w, + hw->max_pic_h); + info->header_size = av1_get_header_size( + hw->frame_width, + hw->frame_height); + info->frame_buffer_size = av1_mmu_page_num( + hw, hw->frame_width, + hw->frame_height, + bit_depth == 0); +} + +static int vav1_get_ps_info(struct AV1HW_s *hw, struct aml_vdec_ps_infos *ps) +{ + ps->visible_width = hw->frame_width; + ps->visible_height = hw->frame_height; + ps->coded_width = ALIGN(hw->frame_width, 64); + ps->coded_height = ALIGN(hw->frame_height, 64); + ps->dpb_size = hw->used_buf_num; + + return 0; +} + + +static int v4l_res_change(struct AV1HW_s *hw) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct AV1_Common_s *const cm = &hw->common; + int ret = 0; + + if (ctx->param_sets_from_ucode && + hw->res_ch_flag == 0) { + struct aml_vdec_ps_infos ps; + struct vdec_comp_buf_info comp; + + if ((cm->width != 0 && + cm->height != 0) && + (hw->frame_width != cm->width || + hw->frame_height != cm->height)) { + + av1_print(hw, 0, + "%s (%d,%d)=>(%d,%d)\r\n", __func__, cm->width, + cm->height, hw->frame_width, hw->frame_height); + + if (get_valid_double_write_mode(hw) != 16) { + vav1_get_comp_buf_info(hw, &comp); + vdec_v4l_set_comp_buf_info(ctx, &comp); + } + + vav1_get_ps_info(hw, &ps); + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + hw->v4l_params_parsed = false; + hw->res_ch_flag = 1; + ctx->v4l_resolution_change = 1; + hw->eos = 1; + //del_timer_sync(&pbi->timer); + notify_v4l_eos(hw_to_vdec(hw)); + ret = 1; + } + } + + return ret; +} + +static irqreturn_t vav1_isr_thread_fn(int irq, void *data) +{ + struct AV1HW_s *hw = (struct AV1HW_s *)data; + unsigned int dec_status = hw->dec_status; + int obu_type; + int ret = 0; + + if (dec_status == AOM_AV1_FRAME_HEAD_PARSER_DONE || + dec_status == AOM_AV1_SEQ_HEAD_PARSER_DONE || + dec_status == AOM_AV1_FRAME_PARSER_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_START); + } + else if (dec_status == AOM_AV1_DEC_PIC_END || + dec_status == AOM_NAL_DECODE_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_PIC_DONE_START); + } + + if (hw->eos) + return IRQ_HANDLED; + hw->wait_buf = 0; + if ((dec_status == AOM_NAL_DECODE_DONE) || + (dec_status == AOM_SEARCH_BUFEMPTY) || + (dec_status == AOM_DECODE_BUFEMPTY) + ) { + if (hw->m_ins_flag) { + reset_process_time(hw); + if (!vdec_frame_based(hw_to_vdec(hw))) + dec_again_process(hw); + else { + hw->dec_result = DEC_RESULT_DONE; + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + vdec_schedule_work(&hw->work); + } + } + hw->process_busy = 0; + return IRQ_HANDLED; + } else if (dec_status == AOM_AV1_DEC_PIC_END) { + struct AV1_Common_s *const cm = &hw->common; + struct PIC_BUFFER_CONFIG_s *frame = &cm->cur_frame->buf; + struct vdec_s *vdec = hw_to_vdec(hw); +#if 1 + u32 fg_reg0, fg_reg1, num_y_points, num_cb_points, num_cr_points; + WRITE_VREG(HEVC_FGS_IDX, 0); + fg_reg0 = READ_VREG(HEVC_FGS_DATA); + fg_reg1 = READ_VREG(HEVC_FGS_DATA); + num_y_points = fg_reg1 & 0xf; + num_cr_points = (fg_reg1 >> 8) & 0xf; + num_cb_points = (fg_reg1 >> 4) & 0xf; + if ((num_y_points > 0) || + ((num_cb_points > 0) | ((fg_reg0 >> 17) & 0x1)) || + ((num_cr_points > 0) | ((fg_reg0 >> 17) & 0x1))) + hw->fgs_valid = 1; + else + hw->fgs_valid = 0; + av1_print(hw, AOM_DEBUG_HW_MORE, + "fg_data0 0x%x fg_data1 0x%x fg_valid %d\n", + fg_reg0, fg_reg1, hw->fgs_valid); +#else + if (READ_VREG(HEVC_FGS_CTRL) & + ((1 << 4) | (1 << 5) | (1 << 6))) + hw->fgs_valid = 1; + else + hw->fgs_valid = 0; +#endif + decode_frame_count[hw->index] = hw->frame_count; + if (hw->m_ins_flag) { +#ifdef USE_DEC_PIC_END + if (READ_VREG(PIC_END_LCU_COUNT) != 0) { + hw->frame_decoded = 1; + if (cm->cur_frame && vdec->mvfrm && frame) { + frame->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; + frame->frame_size2 = vdec->mvfrm->frame_size; + } + hw->gvs->frame_count = hw->frame_count; + /* + In c module, multi obus are put in one packet, which is decoded + with av1_receive_compressed_data(). + For STREAM_MODE or SINGLE_MODE, there is no packet boundary, + we assume each packet must and only include one picture of data (LCUs) + or cm->show_existing_frame is 1 + */ + av1_print(hw, AOM_DEBUG_HW_MORE, + "Decoding done (index %d), fgs_valid %d data_size 0x%x shiftbyte 0x%x\n", + cm->cur_frame? cm->cur_frame->buf.index:-1, + hw->fgs_valid, + hw->data_size, + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + hw->config_next_ref_info_flag = 1; /*to do: low_latency_flag case*/ + //config_next_ref_info_hw(hw); + } +#endif + + if (get_picture_qos) + get_picture_qos_info(hw); + + reset_process_time(hw); + + if (hw->m_ins_flag && hw->mmu_enable && + (debug & AOM_DEBUG_DIS_RECYCLE_MMU_TAIL) == 0) { + long used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); + if (cm->cur_frame != NULL) { + hevc_mmu_dma_check(hw_to_vdec(hw)); + + av1_print(hw, AOM_DEBUG_HW_MORE, "mmu free tail, index %d used_num 0x%x\n", + cm->cur_frame->buf.index, used_4k_num); + if (hw->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(hw, cm->cur_fb_idx_mmu); + + decoder_mmu_box_free_idx_tail( + ibuf->mmu_box, + ibuf->index, + used_4k_num); + } else { + decoder_mmu_box_free_idx_tail(hw->mmu_box, + cm->cur_frame->buf.index, used_4k_num); + } +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS2) >> 16); + decoder_mmu_box_free_idx_tail(hw->mmu_box_dw, + cm->cur_frame->buf.index, used_4k_num); + av1_print(hw, AOM_DEBUG_HW_MORE, "dw mmu free tail, index %d used_num 0x%x\n", + cm->cur_frame->buf.index, used_4k_num); + } +#endif + } + + + } + /* + if (debug & + AV1_DEBUG_BUFMGR_MORE) + dump_aux_buf(hw); + set_aux_data(hw, + &cm->cur_frame->buf, 0, 0); + */ + if (/*hw->vf_pre_count == 0 ||*/ hw->low_latency_flag) + av1_postproc(hw); + + if (multi_frames_in_one_pack && + hw->frame_decoded && + READ_VREG(HEVC_SHIFT_BYTE_COUNT) < hw->data_size) { +#ifdef DEBUG_CRC_ERROR + if ((crc_debug_flag & 0x40) && cm->cur_frame) + dump_mv_buffer(hw, &cm->cur_frame->buf); +#endif + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + av1_print(hw, AOM_DEBUG_HW_MORE, + "PIC_END, fgs_valid %d search head ...\n", + hw->fgs_valid); + if (hw->config_next_ref_info_flag) + config_next_ref_info_hw(hw); + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + } else { +#ifdef DEBUG_CRC_ERROR + if ((crc_debug_flag & 0x40) && cm->cur_frame) + dump_mv_buffer(hw, &cm->cur_frame->buf); +#endif + hw->dec_result = DEC_RESULT_DONE; + amhevc_stop(); +#ifdef MCRCC_ENABLE + if (mcrcc_cache_alg_flag) + dump_hit_rate(hw); +#endif + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + vdec_schedule_work(&hw->work); + } + } else { + av1_print(hw, AOM_DEBUG_HW_MORE, + "PIC_END, fgs_valid %d search head ...\n", + hw->fgs_valid); +#ifdef USE_DEC_PIC_END + if (READ_VREG(PIC_END_LCU_COUNT) != 0) { + hw->frame_decoded = 1; + /* + In c module, multi obus are put in one packet, which is decoded + with av1_receive_compressed_data(). + For STREAM_MODE or SINGLE_MODE, there is no packet boundary, + we assume each packet must and only include one picture of data (LCUs) + or cm->show_existing_frame is 1 + */ + if (cm->cur_frame) + av1_print(hw, AOM_DEBUG_HW_MORE, "Decoding done (index %d)\n", + cm->cur_frame? cm->cur_frame->buf.index:-1); + config_next_ref_info_hw(hw); + } +#endif + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + /* + if (debug & + AV1_DEBUG_BUFMGR_MORE) + dump_aux_buf(hw); + set_aux_data(hw, + &cm->cur_frame->buf, 0, 0); + */ + if (hw->low_latency_flag) { + av1_postproc(hw); + vdec_profile(hw_to_vdec(hw), VDEC_PROFILE_EVENT_CB); + if (debug & PRINT_FLAG_VDEC_DETAIL) + pr_info("%s AV1 frame done \n", __func__); + } + } + + hw->process_busy = 0; + return IRQ_HANDLED; + } + + if (dec_status == AOM_EOS) { + if (hw->m_ins_flag) + reset_process_time(hw); + + av1_print(hw, AOM_DEBUG_HW_MORE, "AV1_EOS, flush buffer\r\n"); + + av1_postproc(hw); + + av1_print(hw, AOM_DEBUG_HW_MORE, "send AV1_10B_DISCARD_NAL\r\n"); + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_DISCARD_NAL); + hw->process_busy = 0; + if (hw->m_ins_flag) { + hw->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&hw->work); + } + return IRQ_HANDLED; + } else if (dec_status == AOM_DECODE_OVER_SIZE) { + av1_print(hw, AOM_DEBUG_HW_MORE, "av1 decode oversize !!\n"); + /*debug |= (AV1_DEBUG_DIS_LOC_ERROR_PROC | + AV1_DEBUG_DIS_SYS_ERROR_PROC);*/ + hw->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + hw->process_busy = 0; + if (hw->m_ins_flag) + reset_process_time(hw); + return IRQ_HANDLED; + } + + obu_type = load_param(hw, &hw->aom_param, dec_status); + if (obu_type < 0) { + hw->process_busy = 0; + return IRQ_HANDLED; + } + + if (obu_type == OBU_SEQUENCE_HEADER) { + int next_lcu_size; + hw->has_sequence = 1; + av1_bufmgr_process(hw->pbi, &hw->aom_param, 0, obu_type); + + if ((hw->max_pic_w < hw->aom_param.p.max_frame_width) || + (hw->max_pic_h < hw->aom_param.p.max_frame_height)) { + av1_print(hw, 0, "%s, max size change (%d, %d) -> (%d, %d)\n", + __func__, hw->max_pic_w, hw->max_pic_h, + hw->aom_param.p.max_frame_width, hw->aom_param.p.max_frame_height); + vav1_mmu_map_free(hw); + hw->max_pic_w = hw->aom_param.p.max_frame_width; + hw->max_pic_h = hw->aom_param.p.max_frame_height; + hw->init_pic_w = hw->max_pic_w; + hw->init_pic_h = hw->max_pic_h; + hw->pbi->frame_width = hw->init_pic_w; + hw->pbi->frame_height = hw->init_pic_h; + if (IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h)) { + hw->double_write_mode = 4; + hw->used_buf_num = MAX_BUF_NUM_LESS; + if (hw->used_buf_num > REF_FRAMES_4K) + hw->mv_buf_margin = hw->used_buf_num - REF_FRAMES_4K + 1; + if (((hw->max_pic_w % 64) != 0) && + (hw_to_vdec(hw)->canvas_mode != CANVAS_BLKMODE_LINEAR)) + hw->mem_map_mode = 2; + av1_print(hw, 0, + "force 8k double write 4, mem_map_mode %d\n", hw->mem_map_mode); + } + vav1_mmu_map_alloc(hw); + if (hw->mmu_enable) + WRITE_VREG(HEVC_SAO_MMU_DMA_CTRL, hw->frame_mmu_map_phy_addr); +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + WRITE_VREG(HEVC_SAO_MMU_DMA_CTRL2, hw->dw_frame_mmu_map_phy_addr); + //default of 0xffffffff will disable dw + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0); + WRITE_VREG(HEVC_SAO_C_START_ADDR, 0); + } +#endif + + /*v4l2 alloc new mv when max size changed */ + if (hw->is_used_v4l) { + /* now less than 8k use fix mv buf size */ + if (IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h) && hw->pic_list_init_done) { + if (init_mv_buf_list(hw) < 0) + pr_err("%s: !!!!Error, reinit_mv_buf_list fail\n", __func__); + } + } + } + bit_depth_luma = hw->aom_param.p.bit_depth; + bit_depth_chroma = hw->aom_param.p.bit_depth; + next_lcu_size = ((hw->aom_param.p.seq_flags >> 6) & 0x1) ? 128 : 64; + hw->video_signal_type = (hw->aom_param.p.video_signal_type << 16 + | hw->aom_param.p.color_description); + + if (next_lcu_size != hw->current_lcu_size) { + av1_print(hw, AOM_DEBUG_HW_MORE, + " ## lcu_size changed from %d to %d\n", + hw->current_lcu_size, next_lcu_size); + hw->current_lcu_size = next_lcu_size; + } + + if (!hw->pic_list_init_done) { +#if 0 + if (hw->m_ins_flag) { + /* picture list init.*/ + hw->dec_result = DEC_INIT_PICLIST; + vdec_schedule_work(&hw->work); + } else +#endif + { + init_pic_list(hw); + init_pic_list_hw(hw); +#ifndef MV_USE_FIXED_BUF + if (init_mv_buf_list(hw) < 0) { + pr_err("%s: !!!!Error, init_mv_buf_list fail\n", __func__); + } +#endif + } + hw->pic_list_init_done = true; + } + av1_print(hw, AOM_DEBUG_HW_MORE, + "AOM_AV1_SEQ_HEAD_PARSER_DONE, search head ...\n"); + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + hw->process_busy = 0; + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + return IRQ_HANDLED; + } +#ifndef USE_DEC_PIC_END + //if (pbi->wait_buf) { + if (pbi->bufmgr_proc_count > 0) { + if (READ_VREG(PIC_END_LCU_COUNT) != 0) { + hw->frame_decoded = 1; + /* + In c module, multi obus are put in one packet, which is decoded + with av1_receive_compressed_data(). + For STREAM_MODE or SINGLE_MODE, there is no packet boundary, + we assume each packet must and only include one picture of data (LCUs) + or cm->show_existing_frame is 1 + */ + if (cm->cur_frame) + av1_print(hw, AOM_DEBUG_HW_MORE, "Decoding done (index %d)\n", + cm->cur_frame? cm->cur_frame->buf.index:-1); + } + } +#endif +#if 1 +/*def CHECK_OBU_REDUNDANT_FRAME_HEADER*/ + if (debug & AOM_DEBUG_BUFMGR_ONLY) { + if (READ_VREG(PIC_END_LCU_COUNT) != 0) + hw->obu_frame_frame_head_come_after_tile = 0; + + if (obu_type == OBU_FRAME_HEADER || + obu_type == OBU_FRAME) { + hw->obu_frame_frame_head_come_after_tile = 1; + } else if (obu_type == OBU_REDUNDANT_FRAME_HEADER && + hw->obu_frame_frame_head_come_after_tile == 0) { + if (hw->frame_decoded == 1) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "Warning, OBU_REDUNDANT_FRAME_HEADER come without OBU_FRAME or OBU_FRAME_HEAD\n"); + hw->frame_decoded = 0; + } + } + } +#endif + if (hw->frame_decoded) + hw->one_compressed_data_done = 1; + + if (hw->m_ins_flag) + reset_process_time(hw); + + if (hw->process_state != PROC_STATE_SENDAGAIN + ) { + if (hw->one_compressed_data_done) { + av1_postproc(hw); + av1_release_bufs(hw); +#ifndef MV_USE_FIXED_BUF + put_un_used_mv_bufs(hw); +#endif + } + } + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + hw->frame_width = hw->common.seq_params.max_frame_width; + hw->frame_height = hw->common.seq_params.max_frame_height; + + if (!v4l_res_change(hw)) { + if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + struct vdec_comp_buf_info comp; + + pr_info("set ucode parse\n"); + if (get_valid_double_write_mode(hw) != 16) { + vav1_get_comp_buf_info(hw, &comp); + vdec_v4l_set_comp_buf_info(ctx, &comp); + } + + vav1_get_ps_info(hw, &ps); + /*notice the v4l2 codec.*/ + vdec_v4l_set_ps_infos(ctx, &ps); + hw->v4l_params_parsed = true; + hw->postproc_done = 0; + hw->process_busy = 0; + dec_again_process(hw); + return IRQ_HANDLED; + } + } else { + hw->postproc_done = 0; + hw->process_busy = 0; + dec_again_process(hw); + return IRQ_HANDLED; + } + } + + if (hw->one_package_frame_cnt) { + if (get_free_buf_count(hw) <= 0) { + hw->dec_result = AOM_AV1_RESULT_NEED_MORE_BUFFER; + hw->cur_obu_type = obu_type; + hw->process_busy = 0; + vdec_schedule_work(&hw->work); + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + return IRQ_HANDLED; + } + } + hw->one_package_frame_cnt++; + + ret = av1_continue_decoding(hw, obu_type); + hw->postproc_done = 0; + hw->process_busy = 0; + + if (hw->m_ins_flag) { + if (ret >= 0) + start_process_time(hw); + else { + hw->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&hw->work); + } + } + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + return IRQ_HANDLED; +} + +static irqreturn_t vav1_isr(int irq, void *data) +{ + int i; + unsigned int dec_status; + struct AV1HW_s *hw = (struct AV1HW_s *)data; + //struct AV1_Common_s *const cm = &hw->common; + uint debug_tag; + + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + dec_status = READ_VREG(HEVC_DEC_STATUS_REG) & 0xff; + + if (dec_status == AOM_AV1_FRAME_HEAD_PARSER_DONE || + dec_status == AOM_AV1_SEQ_HEAD_PARSER_DONE || + dec_status == AOM_AV1_FRAME_PARSER_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_HEAD_DONE); + } + else if (dec_status == AOM_AV1_DEC_PIC_END || + dec_status == AOM_NAL_DECODE_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_PIC_DONE); + } + if (!hw) + return IRQ_HANDLED; + if (hw->init_flag == 0) + return IRQ_HANDLED; + if (hw->process_busy)/*on process.*/ + return IRQ_HANDLED; + hw->dec_status = dec_status; + hw->process_busy = 1; + if (debug & AV1_DEBUG_BUFMGR) + av1_print(hw, AV1_DEBUG_BUFMGR, + "av1 isr (%d) dec status = 0x%x (0x%x), lcu 0x%x shiftbyte 0x%x shifted_data 0x%x (%x %x lev %x, wr %x, rd %x) log %x\n", + irq, + dec_status, READ_VREG(HEVC_DEC_STATUS_REG), + READ_VREG(HEVC_PARSER_LCU_START), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFTED_DATA), + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), +#ifdef DEBUG_UCODE_LOG + READ_VREG(HEVC_DBG_LOG_ADR) +#else + 0 +#endif + ); +#ifdef DEBUG_UCODE_LOG + if ((udebug_flag & 0x8) && + (hw->ucode_log_addr != 0) && + (READ_VREG(HEVC_DEC_STATUS_REG) & 0x100)) { + unsigned long flags; + unsigned short *log_adr = + (unsigned short *)hw->ucode_log_addr; + lock_buffer_pool(hw->pbi->common.buffer_pool, flags); + while (*(log_adr + 3)) { + pr_info("dbg%04x %04x %04x %04x\n", + *(log_adr + 3), *(log_adr + 2), *(log_adr + 1), *(log_adr + 0) + ); + log_adr += 4; + } + unlock_buffer_pool(hw->pbi->common.buffer_pool, flags); + } +#endif + debug_tag = READ_HREG(DEBUG_REG1); + if (debug_tag & 0x10000) { + pr_info("LMEM<tag %x>:\n", READ_HREG(DEBUG_REG1)); + for (i = 0; i < 0x400; i += 4) { + int ii; + if ((i & 0xf) == 0) + pr_info("%03x: ", i); + for (ii = 0; ii < 4; ii++) { + pr_info("%04x ", + hw->lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + pr_info("\n"); + } + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == hw->result_done_count) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hw->ucode_pause_pos = udebug_pause_pos; + } + else if (debug_tag & 0x20000) + hw->ucode_pause_pos = 0xffffffff; + if (hw->ucode_pause_pos) + reset_process_time(hw); + else + WRITE_HREG(DEBUG_REG1, 0); + } else if (debug_tag != 0) { + pr_info( + "dbg%x: %x lcu %x\n", READ_HREG(DEBUG_REG1), + READ_HREG(DEBUG_REG2), + READ_VREG(HEVC_PARSER_LCU_START)); + + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == hw->result_done_count) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hw->ucode_pause_pos = udebug_pause_pos; + } + if (hw->ucode_pause_pos) + reset_process_time(hw); + else + WRITE_HREG(DEBUG_REG1, 0); + hw->process_busy = 0; + return IRQ_HANDLED; + } + + //if (READ_VREG(HEVC_FG_STATUS) == AOM_AV1_FGS_PARAM) { + if (hw->dec_status == AOM_AV1_FGS_PARAM) { + uint32_t status_val = READ_VREG(HEVC_FG_STATUS); + WRITE_VREG(HEVC_FG_STATUS, AOM_AV1_FGS_PARAM_CONT); + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_FGS_PARAM_CONT); + // Bit[11] - 0 Read, 1 - Write + // Bit[10:8] - film_grain_params_ref_idx // For Write request + if ((status_val >> 11) & 0x1) { + uint32_t film_grain_params_ref_idx = (status_val >> 8) & 0x7; + config_film_grain_reg(hw, film_grain_params_ref_idx); + } + else + read_film_grain_reg(hw); + hw->process_busy = 0; + return IRQ_HANDLED; + } + + if (!hw->m_ins_flag) { + av1_print(hw, AV1_DEBUG_BUFMGR, + "error flag = %d\n", hw->error_flag); + if (hw->error_flag == 1) { + hw->error_flag = 2; + hw->process_busy = 0; + return IRQ_HANDLED; + } else if (hw->error_flag == 3) { + hw->process_busy = 0; + return IRQ_HANDLED; + } + if (get_free_buf_count(hw) <= 0) { + /* + if (hw->wait_buf == 0) + pr_info("set wait_buf to 1\r\n"); + */ + hw->wait_buf = 1; + hw->process_busy = 0; + av1_print(hw, AV1_DEBUG_BUFMGR, + "free buf not enough = %d\n", + get_free_buf_count(hw)); + return IRQ_HANDLED; + } + } + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_END); + return IRQ_WAKE_THREAD; +} + +static void av1_set_clk(struct work_struct *work) +{ + struct AV1HW_s *hw = container_of(work, + struct AV1HW_s, set_clk_work); + int fps = 96000 / hw->frame_dur; + + if (hevc_source_changed(VFORMAT_AV1, + frame_width, frame_height, fps) > 0) + hw->saved_resolution = frame_width * + frame_height * fps; +} + +static void vav1_put_timer_func(struct timer_list *timer) +{ + struct AV1HW_s *hw = container_of(timer, + struct AV1HW_s, timer); + uint8_t empty_flag; + unsigned int buf_level; + + enum receviver_start_e state = RECEIVER_INACTIVE; + + if (hw->m_ins_flag) { + if (hw_to_vdec(hw)->next_status + == VDEC_STATUS_DISCONNECTED) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + if (!hw->is_used_v4l || ctx->is_stream_off) { + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + pr_debug("vdec requested to be disconnected\n"); + return; + } + } + } + if (hw->init_flag == 0) { + if (hw->stat & STAT_TIMER_ARM) { + timer->expires = jiffies + PUT_INTERVAL; + add_timer(&hw->timer); + } + return; + } + if (hw->m_ins_flag == 0) { + if (vf_get_receiver(hw->provider_name)) { + state = + vf_notify_receiver(hw->provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) + state = RECEIVER_INACTIVE; + } else + state = RECEIVER_INACTIVE; + + empty_flag = (READ_VREG(HEVC_PARSER_INT_STATUS) >> 6) & 0x1; + /* error watchdog */ + if (empty_flag == 0) { + /* decoder has input */ + if ((debug & AV1_DEBUG_DIS_LOC_ERROR_PROC) == 0) { + + buf_level = READ_VREG(HEVC_STREAM_LEVEL); + /* receiver has no buffer to recycle */ + if ((state == RECEIVER_INACTIVE) && + (kfifo_is_empty(&hw->display_q) && + buf_level > 0x200) + ) { + WRITE_VREG + (HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } + + } + } +#ifdef MULTI_INSTANCE_SUPPORT + else { + if ( + (decode_timeout_val > 0) && + (hw->start_process_time > 0) && + ((1000 * (jiffies - hw->start_process_time) / HZ) + > decode_timeout_val) + ) { + int current_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + if (hw->last_lcu_idx == current_lcu_idx) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) { + if (input_frame_based( + hw_to_vdec(hw)) || + (READ_VREG(HEVC_STREAM_LEVEL) > 0x200)) + timeout_process(hw); + else { + av1_print(hw, 0, + "timeout & empty, again\n"); + dec_again_process(hw); + } + } + } else { + start_process_time(hw); + hw->last_lcu_idx = current_lcu_idx; + } + } + } +#endif + + if ((hw->ucode_pause_pos != 0) && + (hw->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != hw->ucode_pause_pos) { + hw->ucode_pause_pos = 0; + WRITE_HREG(DEBUG_REG1, 0); + } +#ifdef MULTI_INSTANCE_SUPPORT + if (debug & AV1_DEBUG_DUMP_DATA) { + debug &= ~AV1_DEBUG_DUMP_DATA; + av1_print(hw, 0, + "%s: chunk size 0x%x off 0x%x sum 0x%x\n", + __func__, + hw->chunk->size, + hw->chunk->offset, + get_data_check_sum(hw, hw->chunk->size) + ); + dump_data(hw, hw->chunk->size); + } +#endif + if (debug & AV1_DEBUG_DUMP_PIC_LIST) { + /*dump_pic_list(hw);*/ + av1_dump_state(hw_to_vdec(hw)); + debug &= ~AV1_DEBUG_DUMP_PIC_LIST; + } + if (debug & AV1_DEBUG_TRIG_SLICE_SEGMENT_PROC) { + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + debug &= ~AV1_DEBUG_TRIG_SLICE_SEGMENT_PROC; + } + /*if (debug & AV1_DEBUG_HW_RESET) { + }*/ + + if (radr != 0) { + if ((radr >> 24) != 0) { + int count = radr >> 24; + int adr = radr & 0xffffff; + int i; + for (i = 0; i < count; i++) + pr_info("READ_VREG(%x)=%x\n", adr+i, READ_VREG(adr+i)); + } else if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + if (pop_shorts != 0) { + int i; + u32 sum = 0; + + pr_info("pop stream 0x%x shorts\r\n", pop_shorts); + for (i = 0; i < pop_shorts; i++) { + u32 data = + (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + WRITE_HREG(HEVC_SHIFT_COMMAND, + (1<<7)|16); + if ((i & 0xf) == 0) + pr_info("%04x:", i); + pr_info("%04x ", data); + if (((i + 1) & 0xf) == 0) + pr_info("\r\n"); + sum += data; + } + pr_info("\r\nsum = %x\r\n", sum); + pop_shorts = 0; + } + if (dbg_cmd != 0) { + if (dbg_cmd == 1) { + u32 disp_laddr; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB && + get_double_write_mode(hw) == 0) { + disp_laddr = + READ_VCBUS_REG(AFBC_BODY_BADDR) << 4; + } else { + struct canvas_s cur_canvas; + + canvas_read((READ_VCBUS_REG(VD1_IF0_CANVAS0) + & 0xff), &cur_canvas); + disp_laddr = cur_canvas.addr; + } + pr_info("current displayed buffer address %x\r\n", + disp_laddr); + } + dbg_cmd = 0; + } + /*don't changed at start.*/ + if (hw->get_frame_dur && hw->show_frame_num > 60 && + hw->frame_dur > 0 && hw->saved_resolution != + frame_width * frame_height * + (96000 / hw->frame_dur)) + vdec_schedule_work(&hw->set_clk_work); + + timer->expires = jiffies + PUT_INTERVAL; + add_timer(timer); +} + + +int vav1_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct AV1HW_s *av1 = + (struct AV1HW_s *)vdec->private; + + if (!av1) + return -1; + + vstatus->frame_width = frame_width; + vstatus->frame_height = frame_height; + if (av1->frame_dur != 0) + vstatus->frame_rate = 96000 / av1->frame_dur; + else + vstatus->frame_rate = -1; + vstatus->error_count = 0; + vstatus->status = av1->stat | av1->fatal_error; + vstatus->frame_dur = av1->frame_dur; +//#ifndef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + vstatus->bit_rate = av1->gvs->bit_rate; + vstatus->frame_data = av1->gvs->frame_data; + vstatus->total_data = av1->gvs->total_data; + vstatus->frame_count = av1->gvs->frame_count; + vstatus->error_frame_count = av1->gvs->error_frame_count; + vstatus->drop_frame_count = av1->gvs->drop_frame_count; + vstatus->samp_cnt = av1->gvs->samp_cnt; + vstatus->offset = av1->gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); +//#endif + return 0; +} + +int vav1_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +#if 0 +static void AV1_DECODE_INIT(void) +{ + /* enable av1 clocks */ + WRITE_VREG(DOS_GCLK_EN3, 0xffffffff); + /* *************************************************************** */ + /* Power ON HEVC */ + /* *************************************************************** */ + /* Powerup HEVC */ + WRITE_VREG(AO_RTI_GEN_PWR_SLEEP0, + READ_VREG(AO_RTI_GEN_PWR_SLEEP0) & (~(0x3 << 6))); + WRITE_VREG(DOS_MEM_PD_HEVC, 0x0); + WRITE_VREG(DOS_SW_RESET3, READ_VREG(DOS_SW_RESET3) | (0x3ffff << 2)); + WRITE_VREG(DOS_SW_RESET3, READ_VREG(DOS_SW_RESET3) & (~(0x3ffff << 2))); + /* remove isolations */ + WRITE_VREG(AO_RTI_GEN_PWR_ISO0, + READ_VREG(AO_RTI_GEN_PWR_ISO0) & (~(0x3 << 10))); + +} +#endif + +static void vav1_prot_init(struct AV1HW_s *hw, u32 mask) +{ + unsigned int data32; + /* AV1_DECODE_INIT(); */ + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %d\n", __func__, __LINE__); + + aom_config_work_space_hw(hw, mask); + if (mask & HW_MASK_BACK) { + //to do: .. for single instance, called after init_pic_list() + if (hw->m_ins_flag) + init_pic_list_hw(hw); + } + + aom_init_decoder_hw(hw, mask); + +#ifdef AOM_AV1_DBLK_INIT + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] av1_loop_filter_init (run once before decoding start)\n"); + av1_loop_filter_init(hw->lfi, hw->lf); +#endif + if ((mask & HW_MASK_FRONT) == 0) + return; +#if 1 + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info("%s\n", __func__); + data32 = READ_VREG(HEVC_STREAM_CONTROL); + data32 = data32 | + (1 << 0)/*stream_fetch_enable*/ + ; + WRITE_VREG(HEVC_STREAM_CONTROL, data32); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + if (debug & AV1_DEBUG_BUFMGR) + pr_info("[test.c] Config STREAM_FIFO_CTL\n"); + data32 = READ_VREG(HEVC_STREAM_FIFO_CTL); + data32 = data32 | + (1 << 29) // stream_fifo_hole + ; + WRITE_VREG(HEVC_STREAM_FIFO_CTL, data32); + } +#if 0 + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x00000100) { + pr_info("av1 prot init error %d\n", __LINE__); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x00000300) { + pr_info("av1 prot init error %d\n", __LINE__); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x12345678); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x9abcdef0); + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x12345678) { + pr_info("av1 prot init error %d\n", __LINE__); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x9abcdef0) { + pr_info("av1 prot init error %d\n", __LINE__); + return; + } +#endif + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x000000001); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x00000300); +#endif + + + + WRITE_VREG(HEVC_WAIT_FLAG, 1); + + /* WRITE_VREG(HEVC_MPSR, 1); */ + + /* clear mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 1); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(HEVC_PSCALE_CTRL, 0); + + WRITE_VREG(DEBUG_REG1, 0x0); + /*check vps/sps/pps/i-slice in ucode*/ + WRITE_VREG(NAL_SEARCH_CTL, 0x8); + + WRITE_VREG(DECODE_STOP_POS, udebug_flag); +#if (defined DEBUG_UCODE_LOG) || (defined DEBUG_CMD) + WRITE_VREG(HEVC_DBG_LOG_ADR, hw->ucode_log_phy_addr); +#endif +} + +static int vav1_local_init(struct AV1HW_s *hw) +{ + int i; + int ret; + int width, height; + + hw->gvs = vzalloc(sizeof(struct vdec_info)); + if (NULL == hw->gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -1; + } +#ifdef DEBUG_PTS + hw->pts_missed = 0; + hw->pts_hit = 0; +#endif + hw->new_frame_displayed = 0; + hw->last_put_idx = -1; + hw->saved_resolution = 0; + hw->get_frame_dur = false; + on_no_keyframe_skiped = 0; + width = hw->vav1_amstream_dec_info.width; + height = hw->vav1_amstream_dec_info.height; + hw->frame_dur = + (hw->vav1_amstream_dec_info.rate == + 0) ? 3200 : hw->vav1_amstream_dec_info.rate; + if (width && height) + hw->frame_ar = height * 0x100 / width; +/* + *TODO:FOR VERSION + */ + pr_info("av1: ver (%d,%d) decinfo: %dx%d rate=%d\n", av1_version, + 0, width, height, hw->frame_dur); + + if (hw->frame_dur == 0) + hw->frame_dur = 96000 / 24; + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < FRAME_BUFFERS; i++) { + hw->buffer_wrap[i] = i; + } + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hw->vfpool[i]; + + hw->vfpool[i].index = -1; + kfifo_put(&hw->newframe_q, vf); + } + + ret = av1_local_init(hw); + + if (force_pts_unstable) { + if (!hw->pts_unstable) { + hw->pts_unstable = + (hw->vav1_amstream_dec_info.rate == 0)?1:0; + pr_info("set pts unstable\n"); + } + } + + return ret; +} + + +#ifdef MULTI_INSTANCE_SUPPORT +static s32 vav1_init(struct vdec_s *vdec) +{ + struct AV1HW_s *hw = (struct AV1HW_s *)vdec->private; +#else +static s32 vav1_init(struct AV1HW_s *hw) +{ +#endif + int ret; + int fw_size = 0x1000 * 16; + struct firmware_s *fw = NULL; + + hw->stat |= STAT_TIMER_INIT; + + if (vav1_local_init(hw) < 0) + return -EBUSY; + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %d\n", __func__, __LINE__); +#ifdef DEBUG_USE_VP9_DEVICE_NAME + if (get_firmware_data(VIDEO_DEC_VP9_MMU, fw->data) < 0) { +#else + if (get_firmware_data(VIDEO_DEC_AV1_MMU, fw->data) < 0) { +#endif + pr_err("get firmware fail.\n"); + printk("%s %d\n", __func__, __LINE__); + vfree(fw); + return -1; + } + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %d\n", __func__, __LINE__); + fw->len = fw_size; + + INIT_WORK(&hw->set_clk_work, av1_set_clk); + timer_setup(&hw->timer, vav1_put_timer_func, 0); + +#ifdef MULTI_INSTANCE_SUPPORT + if (hw->m_ins_flag) { + hw->timer.expires = jiffies + PUT_INTERVAL; + + /*add_timer(&hw->timer); + + hw->stat |= STAT_TIMER_ARM; + hw->stat |= STAT_ISR_REG;*/ + + INIT_WORK(&hw->work, av1_work); + hw->fw = fw; + + return 0; /*multi instance return */ + } +#endif + amhevc_enable(); + + ret = amhevc_loadmc_ex(VFORMAT_AV1, NULL, fw->data); + if (ret < 0) { + amhevc_disable(); + vfree(fw); + pr_err("AV1: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(fw); + + hw->stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + vav1_prot_init(hw, HW_MASK_FRONT | HW_MASK_BACK); + + if (vdec_request_threaded_irq(VDEC_IRQ_0, + vav1_isr, + vav1_isr_thread_fn, + IRQF_ONESHOT,/*run thread on this irq disabled*/ + "vav1-irq", (void *)hw)) { + pr_info("vav1 irq register error.\n"); + amhevc_disable(); + return -ENOENT; + } + + hw->stat |= STAT_ISR_REG; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (force_dv_enable) + hw->provider_name = DV_PROVIDER_NAME; + else +#endif + hw->provider_name = PROVIDER_NAME; +#ifdef MULTI_INSTANCE_SUPPORT + if (!hw->is_used_v4l) { + vf_provider_init(&vav1_vf_prov, hw->provider_name, + &vav1_vf_provider, hw); + vf_reg_provider(&vav1_vf_prov); + vf_notify_receiver(hw->provider_name, VFRAME_EVENT_PROVIDER_START, NULL); + if (hw->frame_dur != 0) { + if (!is_reset) + vf_notify_receiver(hw->provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)hw->frame_dur)); + } + } +#else + vf_provider_init(&vav1_vf_prov, hw->provider_name, &vav1_vf_provider, + hw); + vf_reg_provider(&vav1_vf_prov); + vf_notify_receiver(hw->provider_name, VFRAME_EVENT_PROVIDER_START, NULL); + if (!is_reset) + vf_notify_receiver(hw->provider_name, VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long)hw->frame_dur)); +#endif + hw->stat |= STAT_VF_HOOK; + + hw->timer.expires = jiffies + PUT_INTERVAL; + add_timer(&hw->timer); + + hw->stat |= STAT_VDEC_RUN; + hw->stat |= STAT_TIMER_ARM; + + amhevc_start(); + + hw->init_flag = 1; + hw->process_busy = 0; + pr_info("%d, vav1_init, RP=0x%x\n", + __LINE__, READ_VREG(HEVC_STREAM_RD_PTR)); + return 0; +} + +static int vmav1_stop(struct AV1HW_s *hw) +{ + hw->init_flag = 0; + + if (hw->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_0, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->timer); + hw->stat &= ~STAT_TIMER_ARM; + } + + if (!hw->is_used_v4l && (hw->stat & STAT_VF_HOOK)) { + if (!is_reset) + vf_notify_receiver(hw->provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + + vf_unreg_provider(&vav1_vf_prov); + hw->stat &= ~STAT_VF_HOOK; + } + av1_local_uninit(hw); + reset_process_time(hw); + cancel_work_sync(&hw->work); + cancel_work_sync(&hw->set_clk_work); + uninit_mmu_buffers(hw); + if (hw->fw) + vfree(hw->fw); + hw->fw = NULL; + return 0; +} + +static int vav1_stop(struct AV1HW_s *hw) +{ + + hw->init_flag = 0; + hw->first_sc_checked = 0; + if (hw->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { +#ifdef MULTI_INSTANCE_SUPPORT + if (!hw->m_ins_flag) +#endif + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 0); + vdec_free_irq(VDEC_IRQ_0, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->timer); + hw->stat &= ~STAT_TIMER_ARM; + } + + if (!hw->is_used_v4l && (hw->stat & STAT_VF_HOOK)) { + if (!is_reset) + vf_notify_receiver(hw->provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + + vf_unreg_provider(&vav1_vf_prov); + hw->stat &= ~STAT_VF_HOOK; + } + av1_local_uninit(hw); + + cancel_work_sync(&hw->set_clk_work); +#ifdef MULTI_INSTANCE_SUPPORT + if (hw->m_ins_flag) { + cancel_work_sync(&hw->work); + } else + amhevc_disable(); +#else + amhevc_disable(); +#endif + uninit_mmu_buffers(hw); + + vfree(hw->fw); + hw->fw = NULL; + return 0; +} +static int amvdec_av1_mmu_init(struct AV1HW_s *hw) +{ + int tvp_flag = vdec_secure(hw_to_vdec(hw)) ? + CODEC_MM_FLAGS_TVP : 0; + int buf_size = 48; + + if ((hw->max_pic_w * hw->max_pic_h > 1280*736) && + (hw->max_pic_w * hw->max_pic_h <= 1920*1088)) { + buf_size = 12; + } else if ((hw->max_pic_w * hw->max_pic_h > 0) && + (hw->max_pic_w * hw->max_pic_h <= 1280*736)) { + buf_size = 4; + } + hw->need_cache_size = buf_size * SZ_1M; + hw->sc_start_time = get_jiffies_64(); + if (hw->mmu_enable && !hw->is_used_v4l) { + int count = FRAME_BUFFERS; + hw->mmu_box = decoder_mmu_box_alloc_box(DRIVER_NAME, + hw->index /* * 2*/, count, + hw->need_cache_size, + tvp_flag + ); + if (!hw->mmu_box) { + pr_err("av1 alloc mmu box failed!!\n"); + return -1; + } +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + hw->mmu_box_dw = decoder_mmu_box_alloc_box(DRIVER_NAME, + hw->index /** 2 + 1*/, count, + hw->need_cache_size, + tvp_flag + ); + if (!hw->mmu_box_dw) { + pr_err("av1 alloc dw mmu box failed!!\n"); + return -1; + } + } +#endif + + } + hw->bmmu_box = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + hw->index, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + tvp_flag); + av1_print(hw, AV1_DEBUG_BUFMGR, + "%s, MAX_BMMU_BUFFER_NUM = %d\n", + __func__, + MAX_BMMU_BUFFER_NUM); + if (!hw->bmmu_box) { + pr_err("av1 alloc bmmu box failed!!\n"); + return -1; + } + return 0; +} + +static struct AV1HW_s *gHevc; + + +static int amvdec_av1_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + + struct AV1HW_s *hw; + AV1Decoder *pbi; + int ret; + u32 work_buf_size; + struct BuffInfo_s *p_buf_info; +#ifndef MULTI_INSTANCE_SUPPORT + int i; +#endif + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_TM2) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) || + ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_TM2) && !is_meson_rev_b())) { + pr_err("av1 unsupported on cpu %d, is_tm2_revb %d\n", + get_cpu_major_id(), is_cpu_tm2_revb()); + return -EINVAL; + } + + mutex_lock(&vav1_mutex); + hw = vzalloc(sizeof(struct AV1HW_s)); + if (hw == NULL) { + av1_print(hw, 0, "\namvdec_av1 device data allocation failed\n"); + mutex_unlock(&vav1_mutex); + return -ENOMEM; + } + gHevc = hw; + /* + memcpy(&BUF[0], &hw->m_BUF[0], sizeof(struct BUF_s) * MAX_BUF_NUM); + memset(hw, 0, sizeof(struct AV1HW_s)); + memcpy(&hw->m_BUF[0], &BUF[0], sizeof(struct BUF_s) * MAX_BUF_NUM); + */ + if (init_dblk_struc(hw) < 0) { + av1_print(hw, 0, "\nammvdec_av1 device data allocation failed\n"); + vfree(hw); + return -ENOMEM; + } + + pbi = av1_decoder_create(&hw->av1_buffer_pool, &hw->common); //&aom_decoder; + hw->pbi = pbi; + if (hw->pbi == NULL) { + pr_info("\nammvdec_av1 device data allocation failed\n"); + release_dblk_struct(hw); + vfree(hw); + return -ENOMEM; + } + //hw->common.buffer_pool = &hw->av1_buffer_pool; //???? + hw->pbi->private_data = hw; + + hw->init_flag = 0; + hw->first_sc_checked= 0; + +#ifdef MULTI_INSTANCE_SUPPORT + hw->eos = 0; + hw->start_process_time = 0; + hw->timeout_num = 0; +#endif + hw->fatal_error = 0; + hw->show_frame_num = 0; + if (pdata == NULL) { + av1_print(hw, 0, "\namvdec_av1 memory resource undefined.\n"); + vfree(hw); + mutex_unlock(&vav1_mutex); + return -EFAULT; + } + + if (pdata->sys_info) { + hw->vav1_amstream_dec_info = *pdata->sys_info; + av1_max_pic_w = (hw->vav1_amstream_dec_info.width) ? + (hw->vav1_amstream_dec_info.width) : 8192; + + av1_max_pic_h = (hw->vav1_amstream_dec_info.height) ? + (hw->vav1_amstream_dec_info.height) : 4608; + } else { + hw->vav1_amstream_dec_info.width = 0; + hw->vav1_amstream_dec_info.height = 0; + hw->vav1_amstream_dec_info.rate = 30; + av1_max_pic_w = 8192; + av1_max_pic_h = 4608; + } + hw->max_pic_w = av1_max_pic_w; + hw->max_pic_h = av1_max_pic_h; + + hw->m_ins_flag = 0; + + if (force_bufspec) { + hw->buffer_spec_index = force_bufspec & 0xf; + pr_info("force buffer spec %d\n", force_bufspec & 0xf); + } else if (vdec_is_support_4k()) { + if (IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h)) + hw->buffer_spec_index = 2; + else if (IS_4K_SIZE(hw->max_pic_w, hw->max_pic_h)) + hw->buffer_spec_index = 1; + else + hw->buffer_spec_index = 0; + } else + hw->buffer_spec_index = 0; + + if (hw->buffer_spec_index == 0) + hw->max_one_mv_buffer_size = + (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_SC2) ? + MAX_ONE_MV_BUFFER_SIZE_1080P : MAX_ONE_MV_BUFFER_SIZE_1080P_TM2REVB; + else if (hw->buffer_spec_index == 1) + hw->max_one_mv_buffer_size = + (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_SC2) ? + MAX_ONE_MV_BUFFER_SIZE_4K : MAX_ONE_MV_BUFFER_SIZE_4K_TM2REVB; + else + hw->max_one_mv_buffer_size = + (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_SC2) ? + MAX_ONE_MV_BUFFER_SIZE_8K : MAX_ONE_MV_BUFFER_SIZE_8K_TM2REVB; + + p_buf_info = &aom_workbuff_spec[hw->buffer_spec_index]; + work_buf_size = (p_buf_info->end_adr - p_buf_info->start_adr + + 0xffff) & (~0xffff); + av1_print(hw, 0, + "vdec_is_support_4k() %d max_pic_w %d max_pic_h %d buffer_spec_index %d work_buf_size 0x%x\n", + vdec_is_support_4k(), hw->max_pic_w, hw->max_pic_h, + hw->buffer_spec_index, work_buf_size); + +#ifdef MULTI_INSTANCE_SUPPORT + hw->platform_dev = pdev; + platform_set_drvdata(pdev, pdata); +#endif + hw->double_write_mode = double_write_mode; + hw->mmu_enable = 1; +#ifdef AOM_AV1_MMU_DW + hw->dw_mmu_enable = + get_double_write_mode_init(hw) & 0x20 ? 1 : 0; +#endif + if (amvdec_av1_mmu_init(hw) < 0) { + vfree(hw); + mutex_unlock(&vav1_mutex); + pr_err("av1 alloc bmmu box failed!!\n"); + return -1; + } + + ret = decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, WORK_SPACE_BUF_ID, + work_buf_size, DRIVER_NAME, &pdata->mem_start); + if (ret < 0) { + uninit_mmu_buffers(hw); + vfree(hw); + mutex_unlock(&vav1_mutex); + return ret; + } + hw->buf_size = work_buf_size; + +#ifdef MULTI_INSTANCE_SUPPORT + hw->buf_start = pdata->mem_start; +#else + if (!hw->mmu_enable) + hw->mc_buf_spec.buf_end = pdata->mem_start + hw->buf_size; + + for (i = 0; i < WORK_BUF_SPEC_NUM; i++) + aom_workbuff_spec[i].start_adr = pdata->mem_start; +#endif + + if (debug) { + av1_print(hw, AOM_DEBUG_HW_MORE, "===AV1 decoder mem resource 0x%lx size 0x%x\n", + pdata->mem_start, hw->buf_size); + } + + hw->no_head = no_head; +#ifdef MULTI_INSTANCE_SUPPORT + hw->cma_dev = pdata->cma_dev; +#else + cma_dev = pdata->cma_dev; +#endif + + hw->endian = HEVC_CONFIG_LITTLE_ENDIAN; + if (is_support_vdec_canvas()) + hw->endian = HEVC_CONFIG_BIG_ENDIAN; + if (endian) + hw->endian = endian; + +#ifdef MULTI_INSTANCE_SUPPORT + pdata->private = hw; + pdata->dec_status = vav1_dec_status; + pdata->set_isreset = vav1_set_isreset; + is_reset = 0; + if (vav1_init(pdata) < 0) { +#else + if (vav1_init(hw) < 0) { +#endif + av1_print(hw, 0, "\namvdec_av1 init failed.\n"); + av1_local_uninit(hw); + uninit_mmu_buffers(hw); + vfree(hw); + pdata->dec_status = NULL; + mutex_unlock(&vav1_mutex); + return -ENODEV; + } + /*set the max clk for smooth playing...*/ + hevc_source_changed(VFORMAT_AV1, 4096, 2048, 60); + mutex_unlock(&vav1_mutex); + + return 0; +} + +static int amvdec_av1_remove(struct platform_device *pdev) +{ + struct AV1HW_s *hw = gHevc; + struct vdec_s *vdec = hw_to_vdec(hw); + int i; + + if (debug) + av1_print(hw, AOM_DEBUG_HW_MORE, "amvdec_av1_remove\n"); + + mutex_lock(&vav1_mutex); + + vav1_stop(hw); + + hevc_source_changed(VFORMAT_AV1, 0, 0, 0); + + if (vdec->parallel_dec == 1) { + for (i = 0; i < FRAME_BUFFERS; i++) { + vdec->free_canvas_ex(hw->common.buffer_pool-> + frame_bufs[i].buf.y_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->common.buffer_pool-> + frame_bufs[i].buf.uv_canvas_index, vdec->id); + } + } + +#ifdef DEBUG_PTS + pr_info("pts missed %ld, pts hit %ld, duration %d\n", + hw->pts_missed, hw->pts_hit, hw->frame_dur); +#endif + vfree(hw->pbi); + release_dblk_struct(hw); + vfree(hw); + mutex_unlock(&vav1_mutex); + + return 0; +} + +/****************************************/ +#ifdef CONFIG_PM +static int av1_suspend(struct device *dev) +{ + amhevc_suspend(to_platform_device(dev), dev->power.power_state); + return 0; +} + +static int av1_resume(struct device *dev) +{ + amhevc_resume(to_platform_device(dev)); + return 0; +} + +static const struct dev_pm_ops av1_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(av1_suspend, av1_resume) +}; +#endif + +static struct platform_driver amvdec_av1_driver = { + .probe = amvdec_av1_probe, + .remove = amvdec_av1_remove, + .driver = { + .name = DRIVER_NAME, +#ifdef CONFIG_PM + .pm = &av1_pm_ops, +#endif + } +}; + +static struct codec_profile_t amvdec_av1_profile = { +#ifdef DEBUG_USE_VP9_DEVICE_NAME + .name = "vp9", +#else + .name = "av1", +#endif + .profile = "" +}; + +static struct codec_profile_t amvdec_av1_profile_mult; + +static unsigned int get_data_check_sum + (struct AV1HW_s *hw, int size) +{ + int sum = 0; + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + sum = crc32_le(0, data, size); + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void dump_data(struct AV1HW_s *hw, int size) +{ + int jj; + u8 *data = NULL; + int padding_size = hw->chunk->offset & + (VDEC_FIFO_ALIGN - 1); + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + av1_print(hw, 0, "padding: "); + for (jj = padding_size; jj > 0; jj--) + av1_print_cont(hw, + 0, + "%02x ", *(data - jj)); + av1_print_cont(hw, 0, "data adr %p\n", + data); + + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + av1_print(hw, + 0, + "%06x:", jj); + av1_print_cont(hw, + 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + av1_print(hw, + 0, + "\n"); + } + av1_print(hw, + 0, + "\n"); + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); +} + +static void av1_work(struct work_struct *work) +{ + struct AV1HW_s *hw = container_of(work, + struct AV1HW_s, work); + struct vdec_s *vdec = hw_to_vdec(hw); + /* finished decoding one frame or error, + * notify vdec core to switch context + */ + if (hw->dec_result != AOM_AV1_RESULT_NEED_MORE_BUFFER) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_START); + + if (hw->dec_result == DEC_RESULT_AGAIN) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_AGAIN); + + av1_print(hw, PRINT_FLAG_VDEC_DETAIL, + "%s dec_result %d %x %x %x\n", + __func__, + hw->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + + if (hw->dec_result == AOM_AV1_RESULT_NEED_MORE_BUFFER) { + reset_process_time(hw); + if (get_free_buf_count(hw) <= 0) { + hw->dec_result = AOM_AV1_RESULT_NEED_MORE_BUFFER; + vdec_schedule_work(&hw->work); + } else { + av1_release_bufs(hw); + av1_continue_decoding(hw, hw->cur_obu_type); + hw->postproc_done = 0; + start_process_time(hw); + } + return; + } + + if (((hw->dec_result == DEC_RESULT_GET_DATA) || + (hw->dec_result == DEC_RESULT_GET_DATA_RETRY)) + && (hw_to_vdec(hw)->next_status != + VDEC_STATUS_DISCONNECTED)) { + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + + if (hw->dec_result == DEC_RESULT_GET_DATA) { + av1_print(hw, PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA %x %x %x\n", + __func__, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + vdec_vframe_dirty(vdec, hw->chunk); + vdec_clean_input(vdec); + } + + if (get_free_buf_count(hw) >= + hw->run_ready_min_buf_num) { + int r; + int decode_size; + r = vdec_prepare_input(vdec, &hw->chunk); + if (r < 0) { + hw->dec_result = DEC_RESULT_GET_DATA_RETRY; + + av1_print(hw, + PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&hw->work); + return; + } + hw->dec_result = DEC_RESULT_NONE; + av1_print(hw, PRINT_FLAG_VDEC_STATUS, + "%s: chunk size 0x%x sum 0x%x\n", + __func__, r, + (debug & PRINT_FLAG_VDEC_STATUS) ? + get_data_check_sum(hw, r) : 0 + ); + + if (debug & PRINT_FLAG_VDEC_DATA) + dump_data(hw, hw->chunk->size); + + decode_size = hw->chunk->size + + (hw->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + + WRITE_VREG(HEVC_DECODE_SIZE, + READ_VREG(HEVC_DECODE_SIZE) + decode_size); + + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + + start_process_time(hw); + + } else { + hw->dec_result = DEC_RESULT_GET_DATA_RETRY; + + av1_print(hw, PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&hw->work); + } + return; + } else if (hw->dec_result == DEC_RESULT_DONE) { + /* if (!hw->ctx_valid) + hw->ctx_valid = 1; */ + hw->result_done_count++; + hw->process_state = PROC_STATE_INIT; + + av1_print(hw, PRINT_FLAG_VDEC_STATUS, + "%s (===> %d) dec_result %d (%d) %x %x %x shiftbytes 0x%x decbytes 0x%x\n", + __func__, + hw->frame_count, + hw->dec_result, + hw->result_done_count, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_BYTE_COUNT) - + hw->start_shift_bytes + ); + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + } else if (hw->dec_result == DEC_RESULT_AGAIN) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + } else if (hw->dec_result == DEC_RESULT_EOS) { + av1_print(hw, PRINT_FLAG_VDEC_STATUS, + "%s: end of stream\n", + __func__); + hw->eos = 1; + av1_postproc(hw); + + notify_v4l_eos(hw_to_vdec(hw)); + + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + } else if (hw->dec_result == DEC_RESULT_FORCE_EXIT) { + av1_print(hw, PRINT_FLAG_VDEC_STATUS, + "%s: force exit\n", + __func__); + if (hw->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { +#ifdef MULTI_INSTANCE_SUPPORT + if (!hw->m_ins_flag) +#endif + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 0); + vdec_free_irq(VDEC_IRQ_0, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + } + if (hw->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->timer); + hw->stat &= ~STAT_TIMER_ARM; + } + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_END); + /* mark itself has all HW resource released and input released */ + if (vdec->parallel_dec == 1) + vdec_core_finish_run(vdec, CORE_MASK_HEVC); + else + vdec_core_finish_run(hw_to_vdec(hw), CORE_MASK_VDEC_1 + | CORE_MASK_HEVC); + trigger_schedule(hw); +} + +static int av1_hw_ctx_restore(struct AV1HW_s *hw) +{ + vav1_prot_init(hw, HW_MASK_FRONT | HW_MASK_BACK); + return 0; +} + +static bool is_avaliable_buffer(struct AV1HW_s *hw) +{ + AV1_COMMON *cm = &hw->common; + RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int i, free_count = 0; + + if (ctx->cap_pool.dec < hw->used_buf_num) { + free_count = v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx); + if (free_count && + !ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token)) { + return false; + } + } + + for (i = 0; i < hw->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.index >= 0) && + frame_bufs[i].buf.cma_alloc_addr) { + free_count++; + } + } + + return free_count < hw->run_ready_min_buf_num ? 0 : 1; +} + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + int tvp = vdec_secure(hw_to_vdec(hw)) ? + CODEC_MM_FLAGS_TVP : 0; + unsigned long ret = 0; + + if (!hw->pic_list_init_done2 || hw->eos) + return ret; + + if (!hw->first_sc_checked && hw->mmu_enable) { + int size; + void * mmu_box; + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + mmu_box = ctx->mmu_box; + } else + mmu_box = hw->mmu_box; + + size = decoder_mmu_box_sc_check(mmu_box, tvp); + hw->first_sc_checked = 1; + av1_print(hw, 0, "av1 cached=%d need_size=%d speed= %d ms\n", + size, (hw->need_cache_size >> PAGE_SHIFT), + (int)(get_jiffies_64() - hw->sc_start_time) * 1000/HZ); +#ifdef AOM_AV1_MMU_DW + /*!!!!!! To do ... */ + if (hw->dw_mmu_enable) { + + } +#endif + } + + if (get_free_buf_count(hw) >= + hw->run_ready_min_buf_num) { + if (vdec->parallel_dec == 1) + ret = CORE_MASK_HEVC; + else + ret = CORE_MASK_VDEC_1 | CORE_MASK_HEVC; + } + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode) { + if (hw->v4l_params_parsed) { + if (ctx->cap_pool.dec < hw->used_buf_num) { + if (is_avaliable_buffer(hw)) + ret = CORE_MASK_HEVC; + else + ret = 0; + } + } else { + if (ctx->v4l_resolution_change) + ret = 0; + } + } else if (ctx->cap_pool.in < ctx->dpb_size) { + if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < + hw->run_ready_min_buf_num) + ret = 0; + } + } + + if (ret) + not_run_ready[hw->index] = 0; + else + not_run_ready[hw->index]++; + + /*av1_print(hw, + PRINT_FLAG_VDEC_DETAIL, "%s mask %lx=>%lx\r\n", + __func__, mask, ret);*/ + return ret; +} + +static void av1_frame_mode_cal_dur(struct AV1HW_s *hw) +{ + if (hw->chunk == NULL) + return; + av1_print(hw, AV1_DEBUG_OUT_PTS, + "run_front: pts %d, pts64 %lld, ts: %llu\n", + hw->chunk->pts, hw->chunk->pts64, hw->chunk->timestamp); + + if (hw->pts_diff_count > FRAME_BUFFERS) + return ; + + if ((hw->chunk->pts > hw->last_chunk_pts) && (hw->last_chunk_pts > 0)) { + hw->pts_diff_count++; + hw->pts_diff_sum = hw->pts_diff_sum + (hw->chunk->pts - hw->last_chunk_pts); + if (hw->pts_diff_count > FRAME_BUFFERS) { + u32 calc_dur = (u32)div_u64(div_u64(hw->pts_diff_sum, hw->pts_diff_count)*96, 90); + if ((calc_dur <= 16000) && (calc_dur >= 800)) { + av1_print(hw, 0, "change to calc dur %d, old dur %d\n", calc_dur, hw->frame_dur); + hw->frame_dur = calc_dur; + } + } + } + + hw->last_chunk_pts = hw->chunk->pts; +} + +static void run_front(struct vdec_s *vdec) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + int ret, size; + + run_count[hw->index]++; + /* hw->chunk = vdec_prepare_input(vdec); */ + hevc_reset_core(vdec); + + size = vdec_prepare_input(vdec, &hw->chunk); + if (size < 0) { + input_empty[hw->index]++; + + hw->dec_result = DEC_RESULT_AGAIN; + + av1_print(hw, PRINT_FLAG_VDEC_DETAIL, + "ammvdec_av1: Insufficient data\n"); + + vdec_schedule_work(&hw->work); + return; + } + input_empty[hw->index] = 0; + hw->dec_result = DEC_RESULT_NONE; + hw->start_shift_bytes = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + + av1_frame_mode_cal_dur(hw); + + if (debug & PRINT_FLAG_VDEC_STATUS) { + if (vdec_frame_based(vdec) && hw->chunk && !vdec_secure(vdec)) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + //print_hex_debug(data, size, size > 64 ? 64 : size); + av1_print(hw, 0, + "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, size, get_data_check_sum(hw, size), + data[0], data[1], data[2], data[3], + data[4], data[5], data[size - 4], + data[size - 3], data[size - 2], + data[size - 1]); + av1_print(hw, 0, + "%s frm cnt (%d): chunk (0x%x 0x%x) (%x %x %x %x %x) bytes 0x%x\n", + __func__, hw->frame_count, hw->chunk->size, hw->chunk->offset, + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + hw->start_shift_bytes); + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } else { + av1_print(hw, 0, + "%s (%d): size 0x%x (0x%x 0x%x) (%x %x %x %x %x) bytes 0x%x\n", + __func__, + hw->frame_count, size, + hw->chunk ? hw->chunk->size : 0, + hw->chunk ? hw->chunk->offset : 0, + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + hw->start_shift_bytes); + } + } + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_START); + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else { +#ifdef DEBUG_USE_VP9_DEVICE_NAME + ret = amhevc_loadmc_ex(VFORMAT_VP9, NULL, hw->fw->data); +#else + ret = amhevc_loadmc_ex(VFORMAT_AV1, NULL, hw->fw->data); +#endif + if (ret < 0) { + amhevc_disable(); + av1_print(hw, PRINT_FLAG_ERROR, + "AV1: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + vdec->mc_loaded = 1; +#ifdef DEBUG_USE_VP9_DEVICE_NAME + vdec->mc_type = VFORMAT_VP9; +#else + vdec->mc_type = VFORMAT_AV1; +#endif + } + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_END); + + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_START); + if (av1_hw_ctx_restore(hw) < 0) { + vdec_schedule_work(&hw->work); + return; + } + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_END); + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + + if (vdec_frame_based(vdec)) { + if (debug & PRINT_FLAG_VDEC_DATA) + dump_data(hw, hw->chunk->size); + + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, 0); + size = hw->chunk->size + + (hw->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + if (vdec->mvfrm) + vdec->mvfrm->frame_size = hw->chunk->size; + } + hw->data_size = size; + WRITE_VREG(HEVC_DECODE_SIZE, size); + WRITE_VREG(HEVC_DECODE_COUNT, hw->result_done_count); + WRITE_VREG(LMEM_DUMP_ADR, (u32)hw->lmem_phy_addr); + if (hw->config_next_ref_info_flag) + config_next_ref_info_hw(hw); + hw->config_next_ref_info_flag = 0; + hw->init_flag = 1; + + av1_print(hw, PRINT_FLAG_VDEC_DETAIL, + "%s: start hw (%x %x %x) HEVC_DECODE_SIZE 0x%x\n", + __func__, + READ_VREG(HEVC_DEC_STATUS_REG), + READ_VREG(HEVC_MPC_E), + READ_VREG(HEVC_MPSR), + READ_VREG(HEVC_DECODE_SIZE)); + + start_process_time(hw); + mod_timer(&hw->timer, jiffies); + hw->stat |= STAT_TIMER_ARM; + hw->stat |= STAT_ISR_REG; + amhevc_start(); + hw->stat |= STAT_VDEC_RUN; +} + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_RUN_START); + av1_print(hw, + PRINT_FLAG_VDEC_DETAIL, "%s mask %lx\r\n", + __func__, mask); + + run_count[hw->index]++; + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + hw->vdec_cb_arg = arg; + hw->vdec_cb = callback; + hw->one_package_frame_cnt = 0; + run_front(vdec); + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_RUN_END); +} + +static void av1_decode_ctx_reset(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + + for (i = 0; i < FRAME_BUFFERS; ++i) { + frame_bufs[i].ref_count = 0; + frame_bufs[i].buf.vf_ref = 0; + frame_bufs[i].buf.decode_idx = 0; + frame_bufs[i].buf.cma_alloc_addr = 0; + frame_bufs[i].buf.index = i; + frame_bufs[i].buf.BUF_index = -1; + frame_bufs[i].buf.mv_buf_index = -1; + } + + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (hw->m_mv_BUF[i].start_adr) { + hw->m_mv_BUF[i].used_flag = 0; + } + } + + hw->one_compressed_data_done = 0; + hw->config_next_ref_info_flag = 0; + hw->init_flag = 0; + hw->first_sc_checked = 0; + hw->fatal_error = 0; + hw->show_frame_num = 0; + hw->postproc_done = 0; + hw->process_busy = 0; + hw->process_state = 0; + hw->frame_decoded = 0; + hw->eos = 0; +} + +static void reset(struct vdec_s *vdec) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + + cancel_work_sync(&hw->work); + cancel_work_sync(&hw->set_clk_work); + + if (hw->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->timer); + hw->stat &= ~STAT_TIMER_ARM; + } + + reset_process_time(hw); + + av1_bufmgr_ctx_reset(hw->pbi, &hw->av1_buffer_pool, &hw->common); + hw->pbi->private_data = hw; + + av1_local_uninit(hw); + if (vav1_local_init(hw) < 0) + av1_print(hw, 0, "%s local_init failed \r\n", __func__); + + av1_decode_ctx_reset(hw); + + av1_print(hw, PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__); +} + +static irqreturn_t av1_irq_cb(struct vdec_s *vdec, int irq) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + return vav1_isr(0, hw); +} + +static irqreturn_t av1_threaded_irq_cb(struct vdec_s *vdec, int irq) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + return vav1_isr_thread_fn(0, hw); +} + +static void av1_dump_state(struct vdec_s *vdec) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + struct AV1_Common_s *const cm = &hw->common; + int i; + av1_print(hw, 0, "====== %s\n", __func__); + + av1_print(hw, 0, + "width/height (%d/%d), used_buf_num %d\n", + cm->width, + cm->height, + hw->used_buf_num + ); + + av1_print(hw, 0, + "is_framebase(%d), eos %d, dec_result 0x%x dec_frm %d disp_frm %d run %d not_run_ready %d input_empty %d low_latency %d no_head %d \n", + input_frame_based(vdec), + hw->eos, + hw->dec_result, + decode_frame_count[hw->index], + display_frame_count[hw->index], + run_count[hw->index], + not_run_ready[hw->index], + input_empty[hw->index], + hw->low_latency_flag, + hw->no_head + ); + + if (!hw->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + av1_print(hw, 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + + av1_print(hw, 0, + "%s, newq(%d/%d), dispq(%d/%d), vf prepare/get/put (%d/%d/%d), free_buf_count %d (min %d for run_ready)\n", + __func__, + kfifo_len(&hw->newframe_q), + VF_POOL_SIZE, + kfifo_len(&hw->display_q), + VF_POOL_SIZE, + hw->vf_pre_count, + hw->vf_get_count, + hw->vf_put_count, + get_free_buf_count(hw), + hw->run_ready_min_buf_num + ); + + dump_pic_list(hw); + + for (i = 0; i < MAX_BUF_NUM; i++) { + av1_print(hw, 0, + "mv_Buf(%d) start_adr 0x%x size 0x%x used %d\n", + i, + hw->m_mv_BUF[i].start_adr, + hw->m_mv_BUF[i].size, + hw->m_mv_BUF[i].used_flag); + } + + av1_print(hw, 0, + "HEVC_DEC_STATUS_REG=0x%x\n", + READ_VREG(HEVC_DEC_STATUS_REG)); + av1_print(hw, 0, + "HEVC_MPC_E=0x%x\n", + READ_VREG(HEVC_MPC_E)); + av1_print(hw, 0, + "DECODE_MODE=0x%x\n", + READ_VREG(DECODE_MODE)); + av1_print(hw, 0, + "NAL_SEARCH_CTL=0x%x\n", + READ_VREG(NAL_SEARCH_CTL)); + av1_print(hw, 0, + "HEVC_PARSER_LCU_START=0x%x\n", + READ_VREG(HEVC_PARSER_LCU_START)); + av1_print(hw, 0, + "HEVC_DECODE_SIZE=0x%x\n", + READ_VREG(HEVC_DECODE_SIZE)); + av1_print(hw, 0, + "HEVC_SHIFT_BYTE_COUNT=0x%x\n", + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + av1_print(hw, 0, + "HEVC_STREAM_START_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_START_ADDR)); + av1_print(hw, 0, + "HEVC_STREAM_END_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_END_ADDR)); + av1_print(hw, 0, + "HEVC_STREAM_LEVEL=0x%x\n", + READ_VREG(HEVC_STREAM_LEVEL)); + av1_print(hw, 0, + "HEVC_STREAM_WR_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_WR_PTR)); + av1_print(hw, 0, + "HEVC_STREAM_RD_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_RD_PTR)); + av1_print(hw, 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + av1_print(hw, 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (input_frame_based(vdec) && + (debug & PRINT_FLAG_VDEC_DATA) + ) { + int jj; + if (hw->chunk && hw->chunk->block && + hw->chunk->size > 0) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap( + hw->chunk->block->start + + hw->chunk->offset, + hw->chunk->size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + av1_print(hw, 0, + "frame data size 0x%x\n", + hw->chunk->size); + for (jj = 0; jj < hw->chunk->size; jj++) { + if ((jj & 0xf) == 0) + av1_print(hw, 0, + "%06x:", jj); + av1_print_cont(hw, 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + av1_print_cont(hw, 0, + "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } + +} + +static int ammvdec_av1_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + int ret; + int config_val; + int i; + struct vframe_content_light_level_s content_light_level; + struct vframe_master_display_colour_s vf_dp; + u32 work_buf_size; + struct BuffInfo_s *p_buf_info; + struct AV1HW_s *hw = NULL; + + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_TM2) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) || + ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_TM2) && !is_meson_rev_b())) { + pr_err("av1 unsupported on cpu %d, is_tm2_revb %d\n", + get_cpu_major_id(), is_cpu_tm2_revb()); + return -EINVAL; + } + + if (pdata == NULL) { + av1_print(hw, 0, "\nammvdec_av1 memory resource undefined.\n"); + return -EFAULT; + } + memset(&vf_dp, 0, sizeof(struct vframe_master_display_colour_s)); + + hw = vzalloc(sizeof(struct AV1HW_s)); + if (hw == NULL) { + av1_print(hw, 0, "\nammvdec_av1 device data allocation failed\n"); + return -ENOMEM; + } + + if (init_dblk_struc(hw) < 0) { + av1_print(hw, 0, "\nammvdec_av1 device data allocation failed\n"); + vfree(hw); + return -ENOMEM; + } + + hw->pbi = av1_decoder_create(&hw->av1_buffer_pool, &hw->common); //&aom_decoder; + if (hw->pbi == NULL) { + av1_print(hw, 0, "\nammvdec_av1 device data allocation failed\n"); + release_dblk_struct(hw); + vfree(hw); + return -ENOMEM; + } + + hw->pbi->private_data = hw; + /* the ctx from v4l2 driver. */ + hw->v4l2_ctx = pdata->private; + + pdata->private = hw; + pdata->dec_status = vav1_dec_status; + /* pdata->set_trickmode = set_trickmode; */ + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = av1_irq_cb; + pdata->threaded_irq_handler = av1_threaded_irq_cb; + pdata->dump_state = av1_dump_state; + + hw->index = pdev->id; + if (is_rdma_enable()) { + hw->rdma_adr = dma_alloc_coherent(amports_get_dma_device(), RDMA_SIZE, &hw->rdma_phy_adr, GFP_KERNEL); + for (i = 0; i < SCALELUT_DATA_WRITE_NUM; i++) { + hw->rdma_adr[i * 4] = HEVC_IQIT_SCALELUT_WR_ADDR & 0xfff; + hw->rdma_adr[i * 4 + 1] = i; + hw->rdma_adr[i * 4 + 2] = HEVC_IQIT_SCALELUT_DATA & 0xfff; + hw->rdma_adr[i * 4 + 3] = 0; + if (i == SCALELUT_DATA_WRITE_NUM - 1) { + hw->rdma_adr[i * 4 + 2] = (HEVC_IQIT_SCALELUT_DATA & 0xfff) | 0x20000; + } + } + } + snprintf(hw->trace.vdec_name, sizeof(hw->trace.vdec_name), + "av1-%d", hw->index); + snprintf(hw->trace.pts_name, sizeof(hw->trace.pts_name), + "%s-pts", hw->trace.vdec_name); + snprintf(hw->trace.new_q_name, sizeof(hw->trace.new_q_name), + "%s-newframe_q", hw->trace.vdec_name); + snprintf(hw->trace.disp_q_name, sizeof(hw->trace.disp_q_name), + "%s-dispframe_q", hw->trace.vdec_name); + snprintf(hw->trace.decode_time_name, sizeof(hw->trace.decode_time_name), + "decoder_time%d", pdev->id); + snprintf(hw->trace.decode_run_time_name, sizeof(hw->trace.decode_run_time_name), + "decoder_run_time%d", pdev->id); + snprintf(hw->trace.decode_header_memory_time_name, sizeof(hw->trace.decode_header_memory_time_name), + "decoder_header_time%d", pdev->id); + snprintf(hw->trace.decode_work_time_name, sizeof(hw->trace.decode_work_time_name), + "decoder_work_time%d", pdev->id); + if (pdata->use_vfm_path) + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (vdec_dual(pdata)) { + struct AV1HW_s *hevc_pair = NULL; + + if (dv_toggle_prov_name) /*debug purpose*/ + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVBL_PROVIDER_NAME : + VFM_DEC_DVEL_PROVIDER_NAME); + else + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVEL_PROVIDER_NAME : + VFM_DEC_DVBL_PROVIDER_NAME); + if (pdata->master) + hevc_pair = (struct AV1HW_s *)pdata->master->private; + else if (pdata->slave) + hevc_pair = (struct AV1HW_s *)pdata->slave->private; + } +#endif + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + MULTI_INSTANCE_PROVIDER_NAME ".%02x", pdev->id & 0xff); + + hw->provider_name = pdata->vf_provider_name; + platform_set_drvdata(pdev, pdata); + + hw->platform_dev = pdev; + hw->video_signal_type = 0; + hw->m_ins_flag = 1; + + if (pdata->sys_info) { + hw->vav1_amstream_dec_info = *pdata->sys_info; + if ((unsigned long) hw->vav1_amstream_dec_info.param + & 0x08) { + hw->low_latency_flag = 1; + } else + hw->low_latency_flag = 0; + } else { + hw->vav1_amstream_dec_info.width = 0; + hw->vav1_amstream_dec_info.height = 0; + hw->vav1_amstream_dec_info.rate = 30; + } + + if ((debug & IGNORE_PARAM_FROM_CONFIG) == 0 && + pdata->config_len) { +#ifdef MULTI_INSTANCE_SUPPORT + int av1_buf_width = 0; + int av1_buf_height = 0; + /*use ptr config for doubel_write_mode, etc*/ + av1_print(hw, 0, "pdata->config=%s\n", pdata->config); + if (get_config_int(pdata->config, "av1_double_write_mode", + &config_val) == 0) + hw->double_write_mode = config_val; + else + hw->double_write_mode = double_write_mode; + + if (get_config_int(pdata->config, "save_buffer_mode", + &config_val) == 0) + hw->save_buffer_mode = config_val; + else + hw->save_buffer_mode = 0; + if (get_config_int(pdata->config, "av1_buf_width", + &config_val) == 0) { + av1_buf_width = config_val; + } + if (get_config_int(pdata->config, "av1_buf_height", + &config_val) == 0) { + av1_buf_height = config_val; + } + + if (get_config_int(pdata->config, "no_head", + &config_val) == 0) + hw->no_head = config_val; + else + hw->no_head = no_head; + + /*use ptr config for max_pic_w, etc*/ + if (get_config_int(pdata->config, "av1_max_pic_w", + &config_val) == 0) { + hw->max_pic_w = config_val; + } + if (get_config_int(pdata->config, "av1_max_pic_h", + &config_val) == 0) { + hw->max_pic_h = config_val; + } + if ((hw->max_pic_w * hw->max_pic_h) + < (av1_buf_width * av1_buf_height)) { + hw->max_pic_w = av1_buf_width; + hw->max_pic_h = av1_buf_height; + av1_print(hw, 0, "use buf resolution\n"); + } + + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + hw->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + hw->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + hw->is_used_v4l = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_buffer_margin", + &config_val) == 0) + hw->dynamic_buf_num_margin = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + hw->mem_map_mode = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_low_latency_mode", + &config_val) == 0) + hw->low_latency_flag = config_val; + +#endif + if (get_config_int(pdata->config, "HDRStaticInfo", + &vf_dp.present_flag) == 0 + && vf_dp.present_flag == 1) { + get_config_int(pdata->config, "mG.x", + &vf_dp.primaries[0][0]); + get_config_int(pdata->config, "mG.y", + &vf_dp.primaries[0][1]); + get_config_int(pdata->config, "mB.x", + &vf_dp.primaries[1][0]); + get_config_int(pdata->config, "mB.y", + &vf_dp.primaries[1][1]); + get_config_int(pdata->config, "mR.x", + &vf_dp.primaries[2][0]); + get_config_int(pdata->config, "mR.y", + &vf_dp.primaries[2][1]); + get_config_int(pdata->config, "mW.x", + &vf_dp.white_point[0]); + get_config_int(pdata->config, "mW.y", + &vf_dp.white_point[1]); + get_config_int(pdata->config, "mMaxDL", + &vf_dp.luminance[0]); + get_config_int(pdata->config, "mMinDL", + &vf_dp.luminance[1]); + vf_dp.content_light_level.present_flag = 1; + get_config_int(pdata->config, "mMaxCLL", + &content_light_level.max_content); + get_config_int(pdata->config, "mMaxFALL", + &content_light_level.max_pic_average); + vf_dp.content_light_level = content_light_level; + hw->video_signal_type = (1 << 29) + | (5 << 26) /* unspecified */ + | (0 << 25) /* limit */ + | (1 << 24) /* color available */ + | (9 << 16) /* 2020 */ + | (16 << 8) /* 2084 */ + | (9 << 0); /* 2020 */ + } + hw->vf_dp = vf_dp; + } else { + u32 force_w, force_h; + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) { + force_w = 1920; + force_h = 1088; + } else { + force_w = 8192; + force_h = 4608; + } + if (hw->vav1_amstream_dec_info.width) + hw->max_pic_w = hw->vav1_amstream_dec_info.width; + else + hw->max_pic_w = force_w; + + if (hw->vav1_amstream_dec_info.height) + hw->max_pic_h = hw->vav1_amstream_dec_info.height; + else + hw->max_pic_h = force_h; + hw->double_write_mode = double_write_mode; + } + + hw->endian = HEVC_CONFIG_LITTLE_ENDIAN; + if (!hw->is_used_v4l) { + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vav1_vf_provider, hw); + } + + hw->mem_map_mode = mem_map_mode; + if (is_support_vdec_canvas()) + hw->endian = HEVC_CONFIG_BIG_ENDIAN; + if (endian) + hw->endian = endian; + + if (is_oversize(hw->max_pic_w, hw->max_pic_h)) { + pr_err("over size: %dx%d, probe failed\n", + hw->max_pic_w, hw->max_pic_h); + return -1; + } + if (force_bufspec) { + hw->buffer_spec_index = force_bufspec & 0xf; + pr_info("force buffer spec %d\n", force_bufspec & 0xf); + } else if (vdec_is_support_4k()) { + if (IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h)) + hw->buffer_spec_index = 2; + else if (IS_4K_SIZE(hw->max_pic_w, hw->max_pic_h)) + hw->buffer_spec_index = 1; + else + hw->buffer_spec_index = 0; + } else + hw->buffer_spec_index = 0; + + if (hw->buffer_spec_index == 0) + hw->max_one_mv_buffer_size = + (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_SC2) ? + MAX_ONE_MV_BUFFER_SIZE_1080P : MAX_ONE_MV_BUFFER_SIZE_1080P_TM2REVB; + else if (hw->buffer_spec_index == 1) + hw->max_one_mv_buffer_size = + (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_SC2) ? + MAX_ONE_MV_BUFFER_SIZE_4K : MAX_ONE_MV_BUFFER_SIZE_4K_TM2REVB; + else + hw->max_one_mv_buffer_size = + (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_SC2) ? + MAX_ONE_MV_BUFFER_SIZE_8K : MAX_ONE_MV_BUFFER_SIZE_8K_TM2REVB; + + p_buf_info = &aom_workbuff_spec[hw->buffer_spec_index]; + work_buf_size = (p_buf_info->end_adr - p_buf_info->start_adr + + 0xffff) & (~0xffff); + + av1_print(hw, 0, + "vdec_is_support_4k() %d max_pic_w %d max_pic_h %d buffer_spec_index %d work_buf_size 0x%x\n", + vdec_is_support_4k(), hw->max_pic_w, hw->max_pic_h, + hw->buffer_spec_index, work_buf_size); + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL || + hw->double_write_mode == 0x10) + hw->mmu_enable = 0; + else + hw->mmu_enable = 1; + + video_signal_type = hw->video_signal_type; + + if (pdata->sys_info) { + hw->vav1_amstream_dec_info = *pdata->sys_info; + if ((unsigned long) hw->vav1_amstream_dec_info.param + & 0x08) { + hw->low_latency_flag = 1; + } else + hw->low_latency_flag = 0; + } else { + hw->vav1_amstream_dec_info.width = 0; + hw->vav1_amstream_dec_info.height = 0; + hw->vav1_amstream_dec_info.rate = 30; + } + +#ifdef AOM_AV1_MMU_DW + hw->dw_mmu_enable = + get_double_write_mode_init(hw) & 0x20 ? 1 : 0; + +#endif + av1_print(hw, 0, + "no_head %d low_latency %d, signal_type 0x%x\n", + hw->no_head, hw->low_latency_flag, hw->video_signal_type); +#if 0 + hw->buf_start = pdata->mem_start; + hw->buf_size = pdata->mem_end - pdata->mem_start + 1; +#else + if (amvdec_av1_mmu_init(hw) < 0) { + pr_err("av1 alloc bmmu box failed!!\n"); + /* devm_kfree(&pdev->dev, (void *)hw); */ + vfree((void *)hw); + pdata->dec_status = NULL; + return -1; + } + + hw->cma_alloc_count = PAGE_ALIGN(work_buf_size) / PAGE_SIZE; + ret = decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, WORK_SPACE_BUF_ID, + hw->cma_alloc_count * PAGE_SIZE, DRIVER_NAME, + &hw->cma_alloc_addr); + if (ret < 0) { + uninit_mmu_buffers(hw); + /* devm_kfree(&pdev->dev, (void *)hw); */ + vfree((void *)hw); + pdata->dec_status = NULL; + return ret; + } + hw->buf_start = hw->cma_alloc_addr; + hw->buf_size = work_buf_size; +#endif + + hw->init_flag = 0; + hw->first_sc_checked = 0; + hw->fatal_error = 0; + hw->show_frame_num = 0; + hw->run_ready_min_buf_num = run_ready_min_buf_num; + + if (debug) { + av1_print(hw, AOM_DEBUG_HW_MORE, "===AV1 decoder mem resource 0x%lx size 0x%x\n", + hw->buf_start, + hw->buf_size); + } + + hw->cma_dev = pdata->cma_dev; + if (vav1_init(pdata) < 0) { + av1_print(hw, 0, "\namvdec_av1 init failed.\n"); + av1_local_uninit(hw); + uninit_mmu_buffers(hw); + /* devm_kfree(&pdev->dev, (void *)hw); */ + vfree((void *)hw); + pdata->dec_status = NULL; + return -ENODEV; + } + +#ifdef AUX_DATA_CRC + vdec_aux_data_check_init(pdata); +#endif + + vdec_set_prepare_level(pdata, start_decode_buf_level); + hevc_source_changed(VFORMAT_AV1, 4096, 2048, 60); + + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_HEVC); + else + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + + hw->pic_list_init_done2 = true; + return 0; +} + +static int ammvdec_av1_remove(struct platform_device *pdev) +{ + struct AV1HW_s *hw = (struct AV1HW_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec = hw_to_vdec(hw); + int i; + if (debug) + av1_print(hw, AOM_DEBUG_HW_MORE, "amvdec_av1_remove\n"); + +#ifdef AUX_DATA_CRC + vdec_aux_data_check_exit(vdec); +#endif + + vmav1_stop(hw); + + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(hw), CORE_MASK_HEVC); + else + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + vdec_set_status(hw_to_vdec(hw), VDEC_STATUS_DISCONNECTED); + + if (vdec->parallel_dec == 1) { + for (i = 0; i < FRAME_BUFFERS; i++) { + vdec->free_canvas_ex + (hw->common.buffer_pool->frame_bufs[i].buf.y_canvas_index, + vdec->id); + vdec->free_canvas_ex + (hw->common.buffer_pool->frame_bufs[i].buf.uv_canvas_index, + vdec->id); + } + } + +#ifdef DEBUG_PTS + pr_info("pts missed %ld, pts hit %ld, duration %d\n", + hw->pts_missed, hw->pts_hit, hw->frame_dur); +#endif + /* devm_kfree(&pdev->dev, (void *)hw); */ + if (is_rdma_enable()) + dma_free_coherent(amports_get_dma_device(), RDMA_SIZE, hw->rdma_adr, hw->rdma_phy_adr); + vfree(hw->pbi); + release_dblk_struct(hw); + vfree((void *)hw); + return 0; +} + +static struct platform_driver ammvdec_av1_driver = { + .probe = ammvdec_av1_probe, + .remove = ammvdec_av1_remove, + .driver = { + .name = MULTI_DRIVER_NAME, +#ifdef CONFIG_PM + .pm = &av1_pm_ops, +#endif + } +}; +#endif +static struct mconfig av1_configs[] = { + MC_PU32("bit_depth_luma", &bit_depth_luma), + MC_PU32("bit_depth_chroma", &bit_depth_chroma), + MC_PU32("frame_width", &frame_width), + MC_PU32("frame_height", &frame_height), + MC_PU32("debug", &debug), + MC_PU32("radr", &radr), + MC_PU32("rval", &rval), + MC_PU32("pop_shorts", &pop_shorts), + MC_PU32("dbg_cmd", &dbg_cmd), + MC_PU32("dbg_skip_decode_index", &dbg_skip_decode_index), + MC_PU32("endian", &endian), + MC_PU32("step", &step), + MC_PU32("udebug_flag", &udebug_flag), + MC_PU32("decode_pic_begin", &decode_pic_begin), + MC_PU32("slice_parse_begin", &slice_parse_begin), + MC_PU32("i_only_flag", &i_only_flag), + MC_PU32("error_handle_policy", &error_handle_policy), + MC_PU32("buf_alloc_width", &buf_alloc_width), + MC_PU32("buf_alloc_height", &buf_alloc_height), + MC_PU32("buf_alloc_depth", &buf_alloc_depth), + MC_PU32("buf_alloc_size", &buf_alloc_size), + MC_PU32("buffer_mode", &buffer_mode), + MC_PU32("buffer_mode_dbg", &buffer_mode_dbg), + MC_PU32("max_buf_num", &max_buf_num), + MC_PU32("dynamic_buf_num_margin", &dynamic_buf_num_margin), + MC_PU32("mem_map_mode", &mem_map_mode), + MC_PU32("double_write_mode", &double_write_mode), + MC_PU32("enable_mem_saving", &enable_mem_saving), + MC_PU32("force_w_h", &force_w_h), + MC_PU32("force_fps", &force_fps), + MC_PU32("max_decoding_time", &max_decoding_time), + MC_PU32("on_no_keyframe_skiped", &on_no_keyframe_skiped), + MC_PU32("start_decode_buf_level", &start_decode_buf_level), + MC_PU32("decode_timeout_val", &decode_timeout_val), + MC_PU32("av1_max_pic_w", &av1_max_pic_w), + MC_PU32("av1_max_pic_h", &av1_max_pic_h), +}; +static struct mconfig_node av1_node; + +static int __init amvdec_av1_driver_init_module(void) +{ + //struct BuffInfo_s *p_buf_info; + int i; +#ifdef BUFMGR_ONLY_OLD_CHIP + debug |= AOM_DEBUG_BUFMGR_ONLY; +#endif + /* + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + p_buf_info = &aom_workbuff_spec[1]; + else + p_buf_info = &aom_workbuff_spec[1]; + } else + p_buf_info = &aom_workbuff_spec[0]; + + init_buff_spec(NULL, p_buf_info); + work_buf_size = + (p_buf_info->end_adr - p_buf_info->start_adr + + 0xffff) & (~0xffff); + */ + for (i = 0; i < WORK_BUF_SPEC_NUM; i++) + init_buff_spec(NULL, &aom_workbuff_spec[i]); + + pr_debug("amvdec_av1 module init\n"); + + error_handle_policy = 0; + +#ifdef ERROR_HANDLE_DEBUG + dbg_nal_skip_flag = 0; + dbg_nal_skip_count = 0; +#endif + udebug_flag = 0; + decode_pic_begin = 0; + slice_parse_begin = 0; + step = 0; + buf_alloc_size = 0; +#ifdef MULTI_INSTANCE_SUPPORT + if (platform_driver_register(&ammvdec_av1_driver)) + pr_err("failed to register ammvdec_av1 driver\n"); + +#endif + if (platform_driver_register(&amvdec_av1_driver)) { + pr_err("failed to register amvdec_av1 driver\n"); + return -ENODEV; + } + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) { + amvdec_av1_profile.profile = + "10bit, dwrite, compressed, no_head, v4l-uvm"; + } else if (((get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2) || is_cpu_tm2_revb()) + && (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5)) { + amvdec_av1_profile.profile = + "8k, 10bit, dwrite, compressed, no_head, frame_dv, v4l-uvm"; + } else { + amvdec_av1_profile.name = "av1_unsupport"; + } + + vcodec_profile_register(&amvdec_av1_profile); + amvdec_av1_profile_mult = amvdec_av1_profile; +#ifdef DEBUG_USE_VP9_DEVICE_NAME + + amvdec_av1_profile_mult.name = "mvp9"; + vcodec_profile_register(&amvdec_av1_profile_mult); + INIT_REG_NODE_CONFIGS("media.decoder", &av1_node, + "vp9", av1_configs, CONFIG_FOR_RW); + +#else + amvdec_av1_profile_mult.name = "mav1"; + vcodec_profile_register(&amvdec_av1_profile_mult); + INIT_REG_NODE_CONFIGS("media.decoder", &av1_node, + "av1", av1_configs, CONFIG_FOR_RW); +#endif + vcodec_feature_register(VFORMAT_AV1, 0); + + return 0; +} + +static void __exit amvdec_av1_driver_remove_module(void) +{ + pr_debug("amvdec_av1 module remove.\n"); +#ifdef MULTI_INSTANCE_SUPPORT + platform_driver_unregister(&ammvdec_av1_driver); +#endif + platform_driver_unregister(&amvdec_av1_driver); +} + +/****************************************/ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +module_param(force_dv_enable, uint, 0664); +MODULE_PARM_DESC(force_dv_enable, "\n amvdec_av1 force_dv_enable\n"); +#endif + +module_param(bit_depth_luma, uint, 0664); +MODULE_PARM_DESC(bit_depth_luma, "\n amvdec_av1 bit_depth_luma\n"); + +module_param(bit_depth_chroma, uint, 0664); +MODULE_PARM_DESC(bit_depth_chroma, "\n amvdec_av1 bit_depth_chroma\n"); + +module_param(frame_width, uint, 0664); +MODULE_PARM_DESC(frame_width, "\n amvdec_av1 frame_width\n"); + +module_param(frame_height, uint, 0664); +MODULE_PARM_DESC(frame_height, "\n amvdec_av1 frame_height\n"); + +module_param(multi_frames_in_one_pack, uint, 0664); +MODULE_PARM_DESC(multi_frames_in_one_pack, "\n multi_frames_in_one_pack\n"); + +module_param(debug, uint, 0664); +MODULE_PARM_DESC(debug, "\n amvdec_av1 debug\n"); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\n radr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\n rval\n"); + +module_param(pop_shorts, uint, 0664); +MODULE_PARM_DESC(pop_shorts, "\n rval\n"); + +module_param(dbg_cmd, uint, 0664); +MODULE_PARM_DESC(dbg_cmd, "\n dbg_cmd\n"); + +module_param(dbg_skip_decode_index, uint, 0664); +MODULE_PARM_DESC(dbg_skip_decode_index, "\n dbg_skip_decode_index\n"); + +module_param(endian, uint, 0664); +MODULE_PARM_DESC(endian, "\n rval\n"); + +module_param(step, uint, 0664); +MODULE_PARM_DESC(step, "\n amvdec_av1 step\n"); + +module_param(decode_pic_begin, uint, 0664); +MODULE_PARM_DESC(decode_pic_begin, "\n amvdec_av1 decode_pic_begin\n"); + +module_param(slice_parse_begin, uint, 0664); +MODULE_PARM_DESC(slice_parse_begin, "\n amvdec_av1 slice_parse_begin\n"); + +module_param(i_only_flag, uint, 0664); +MODULE_PARM_DESC(i_only_flag, "\n amvdec_av1 i_only_flag\n"); + +module_param(low_latency_flag, uint, 0664); +MODULE_PARM_DESC(low_latency_flag, "\n amvdec_av1 low_latency_flag\n"); + +module_param(no_head, uint, 0664); +MODULE_PARM_DESC(no_head, "\n amvdec_av1 no_head\n"); + +module_param(error_handle_policy, uint, 0664); +MODULE_PARM_DESC(error_handle_policy, "\n amvdec_av1 error_handle_policy\n"); + +module_param(buf_alloc_width, uint, 0664); +MODULE_PARM_DESC(buf_alloc_width, "\n buf_alloc_width\n"); + +module_param(buf_alloc_height, uint, 0664); +MODULE_PARM_DESC(buf_alloc_height, "\n buf_alloc_height\n"); + +module_param(buf_alloc_depth, uint, 0664); +MODULE_PARM_DESC(buf_alloc_depth, "\n buf_alloc_depth\n"); + +module_param(buf_alloc_size, uint, 0664); +MODULE_PARM_DESC(buf_alloc_size, "\n buf_alloc_size\n"); + +module_param(buffer_mode, uint, 0664); +MODULE_PARM_DESC(buffer_mode, "\n buffer_mode\n"); + +module_param(buffer_mode_dbg, uint, 0664); +MODULE_PARM_DESC(buffer_mode_dbg, "\n buffer_mode_dbg\n"); +/*USE_BUF_BLOCK*/ +module_param(max_buf_num, uint, 0664); +MODULE_PARM_DESC(max_buf_num, "\n max_buf_num\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n dynamic_buf_num_margin\n"); + +module_param(mv_buf_margin, uint, 0664); +MODULE_PARM_DESC(mv_buf_margin, "\n mv_buf_margin\n"); + +module_param(mv_buf_dynamic_alloc, uint, 0664); +MODULE_PARM_DESC(mv_buf_dynamic_alloc, "\n mv_buf_dynamic_alloc\n"); + +module_param(force_max_one_mv_buffer_size, uint, 0664); +MODULE_PARM_DESC(force_max_one_mv_buffer_size, "\n force_max_one_mv_buffer_size\n"); + +module_param(run_ready_min_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_min_buf_num, "\n run_ready_min_buf_num\n"); + +/**/ + +module_param(mem_map_mode, uint, 0664); +MODULE_PARM_DESC(mem_map_mode, "\n mem_map_mode\n"); + +#ifdef SUPPORT_10BIT +module_param(double_write_mode, uint, 0664); +MODULE_PARM_DESC(double_write_mode, "\n double_write_mode\n"); + +module_param(enable_mem_saving, uint, 0664); +MODULE_PARM_DESC(enable_mem_saving, "\n enable_mem_saving\n"); + +module_param(force_w_h, uint, 0664); +MODULE_PARM_DESC(force_w_h, "\n force_w_h\n"); +#endif + +module_param(force_fps, uint, 0664); +MODULE_PARM_DESC(force_fps, "\n force_fps\n"); + +module_param(max_decoding_time, uint, 0664); +MODULE_PARM_DESC(max_decoding_time, "\n max_decoding_time\n"); + +module_param(on_no_keyframe_skiped, uint, 0664); +MODULE_PARM_DESC(on_no_keyframe_skiped, "\n on_no_keyframe_skiped\n"); + +#ifdef MCRCC_ENABLE +module_param(mcrcc_cache_alg_flag, uint, 0664); +MODULE_PARM_DESC(mcrcc_cache_alg_flag, "\n mcrcc_cache_alg_flag\n"); +#endif + +#ifdef MULTI_INSTANCE_SUPPORT +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n av1 start_decode_buf_level\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, + "\n av1 decode_timeout_val\n"); + +module_param(av1_max_pic_w, uint, 0664); +MODULE_PARM_DESC(av1_max_pic_w, "\n av1_max_pic_w\n"); + +module_param(av1_max_pic_h, uint, 0664); +MODULE_PARM_DESC(av1_max_pic_h, "\n av1_max_pic_h\n"); + +module_param_array(decode_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(display_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(max_process_time, uint, + &max_decode_instance_num, 0664); + +module_param_array(run_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(input_empty, uint, + &max_decode_instance_num, 0664); + +module_param_array(not_run_ready, uint, + &max_decode_instance_num, 0664); + +#ifdef AOM_AV1_MMU_DW +module_param_array(dw_mmu_enable, uint, + &max_decode_instance_num, 0664); +#endif + +module_param(prefix_aux_buf_size, uint, 0664); +MODULE_PARM_DESC(prefix_aux_buf_size, "\n prefix_aux_buf_size\n"); + +module_param(suffix_aux_buf_size, uint, 0664); +MODULE_PARM_DESC(suffix_aux_buf_size, "\n suffix_aux_buf_size\n"); + +#endif + +#ifdef DUMP_FILMGRAIN +module_param(fg_dump_index, uint, 0664); +MODULE_PARM_DESC(fg_dump_index, "\n fg_dump_index\n"); +#endif + +module_param(get_picture_qos, uint, 0664); +MODULE_PARM_DESC(get_picture_qos, "\n amvdec_av1 get_picture_qos\n"); + +module_param(force_bufspec, uint, 0664); +MODULE_PARM_DESC(force_bufspec, "\n amvdec_h265 force_bufspec\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_h265 udebug_flag\n"); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +module_param(dv_toggle_prov_name, uint, 0664); +MODULE_PARM_DESC(dv_toggle_prov_name, "\n dv_toggle_prov_name\n"); +#endif + +module_param(udebug_pause_pos, uint, 0664); +MODULE_PARM_DESC(udebug_pause_pos, "\n udebug_pause_pos\n"); + +module_param(udebug_pause_val, uint, 0664); +MODULE_PARM_DESC(udebug_pause_val, "\n udebug_pause_val\n"); + +module_param(udebug_pause_decode_idx, uint, 0664); +MODULE_PARM_DESC(udebug_pause_decode_idx, "\n udebug_pause_decode_idx\n"); + +#ifdef DEBUG_CRC_ERROR +module_param(crc_debug_flag, uint, 0664); +MODULE_PARM_DESC(crc_debug_flag, "\n crc_debug_flag\n"); +#endif + +#ifdef DEBUG_CMD +module_param(debug_cmd_wait_type, uint, 0664); +MODULE_PARM_DESC(debug_cmd_wait_type, "\n debug_cmd_wait_type\n"); + +module_param(debug_cmd_wait_count, uint, 0664); +MODULE_PARM_DESC(debug_cmd_wait_count, "\n debug_cmd_wait_count\n"); + +module_param(header_dump_size, uint, 0664); +MODULE_PARM_DESC(header_dump_size, "\n header_dump_size\n"); +#endif + +module_param(force_pts_unstable, uint, 0664); +MODULE_PARM_DESC(force_pts_unstable, "\n force_pts_unstable\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n without_display_mode\n"); + +module_param(v4l_bitstream_id_enable, uint, 0664); +MODULE_PARM_DESC(v4l_bitstream_id_enable, "\n v4l_bitstream_id_enable\n"); + +module_init(amvdec_av1_driver_init_module); +module_exit(amvdec_av1_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC av1 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +
diff --git a/drivers/frame_provider/decoder/vav1/vav1.h b/drivers/frame_provider/decoder/vav1/vav1.h new file mode 100644 index 0000000..0f2b765 --- /dev/null +++ b/drivers/frame_provider/decoder/vav1/vav1.h
@@ -0,0 +1,22 @@ +/* + * drivers/amlogic/amports/vav1.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VAV1_H +#define VAV1_H +void adapt_coef_probs(int pic_count, int prev_kf, int cur_kf, int pre_fc, +unsigned int *prev_prob, unsigned int *cur_prob, unsigned int *count); +#endif
diff --git a/drivers/frame_provider/decoder/vc1/Makefile b/drivers/frame_provider/decoder/vc1/Makefile new file mode 100644 index 0000000..b43a600 --- /dev/null +++ b/drivers/frame_provider/decoder/vc1/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_VC1) += amvdec_vc1.o +amvdec_vc1-objs += vvc1.o
diff --git a/drivers/frame_provider/decoder/vc1/vvc1.c b/drivers/frame_provider/decoder/vc1/vvc1.c new file mode 100644 index 0000000..1f0dfa9 --- /dev/null +++ b/drivers/frame_provider/decoder/vc1/vvc1.c
@@ -0,0 +1,1440 @@ +/* + * drivers/amlogic/amports/vvc1.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/canvas/canvas_mgr.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../utils/amvdec.h" +#include "../utils/vdec.h" +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/firmware.h" +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/delay.h> +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../utils/vdec_feature.h" + +#define DRIVER_NAME "amvdec_vc1" +#define MODULE_NAME "amvdec_vc1" + +#define DEBUG_PTS +#if 1 /* //MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +#define NV21 +#endif + +#define VC1_MAX_SUPPORT_SIZE (1920*1088) + +#define I_PICTURE 0 +#define P_PICTURE 1 +#define B_PICTURE 2 + +#define ORI_BUFFER_START_ADDR 0x01000000 + +#define INTERLACE_FLAG 0x80 +#define BOTTOM_FIELD_FIRST_FLAG 0x40 + +/* protocol registers */ +#define VC1_PIC_RATIO AV_SCRATCH_0 +#define VC1_ERROR_COUNT AV_SCRATCH_6 +#define VC1_SOS_COUNT AV_SCRATCH_7 +#define VC1_BUFFERIN AV_SCRATCH_8 +#define VC1_BUFFEROUT AV_SCRATCH_9 +#define VC1_REPEAT_COUNT AV_SCRATCH_A +#define VC1_TIME_STAMP AV_SCRATCH_B +#define VC1_OFFSET_REG AV_SCRATCH_C +#define MEM_OFFSET_REG AV_SCRATCH_F + +#define VF_POOL_SIZE 16 +#define DECODE_BUFFER_NUM_MAX 4 +#define WORKSPACE_SIZE (2 * SZ_1M) +#define MAX_BMMU_BUFFER_NUM (DECODE_BUFFER_NUM_MAX + 1) +#define VF_BUFFER_IDX(n) (1 + n) +#define DCAC_BUFF_START_ADDR 0x01f00000 + + +#define PUT_INTERVAL (HZ/100) + +#if 1 /* /MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +/* TODO: move to register headers */ +#define VPP_VD1_POSTBLEND (1 << 10) +#define MEM_FIFO_CNT_BIT 16 +#define MEM_LEVEL_CNT_BIT 18 +#endif +static struct vdec_info *gvs; +static struct vdec_s *vdec = NULL; + +static struct vframe_s *vvc1_vf_peek(void *); +static struct vframe_s *vvc1_vf_get(void *); +static void vvc1_vf_put(struct vframe_s *, void *); +static int vvc1_vf_states(struct vframe_states *states, void *); +static int vvc1_event_cb(int type, void *data, void *private_data); + +static int vvc1_prot_init(void); +static void vvc1_local_init(bool is_reset); + +static const char vvc1_dec_id[] = "vvc1-dev"; + +#define PROVIDER_NAME "decoder.vc1" +static const struct vframe_operations_s vvc1_vf_provider = { + .peek = vvc1_vf_peek, + .get = vvc1_vf_get, + .put = vvc1_vf_put, + .event_cb = vvc1_event_cb, + .vf_states = vvc1_vf_states, +}; +static void *mm_blk_handle; +static struct vframe_provider_s vvc1_vf_prov; + +static DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(recycle_q, struct vframe_s *, VF_POOL_SIZE); + +static struct vframe_s vfpool[VF_POOL_SIZE]; +static struct vframe_s vfpool2[VF_POOL_SIZE]; +static int cur_pool_idx; + +static s32 vfbuf_use[DECODE_BUFFER_NUM_MAX]; +static struct timer_list recycle_timer; +static u32 stat; +static u32 buf_size = 32 * 1024 * 1024; +static u32 buf_offset; +static u32 avi_flag; +static u32 unstable_pts_debug; +static u32 unstable_pts; +static u32 vvc1_ratio; +static u32 vvc1_format; + +static u32 intra_output; +static u32 frame_width, frame_height, frame_dur; +static u32 saved_resolution; +static u32 pts_by_offset = 1; +static u32 total_frame; +static u32 next_pts; +static u64 next_pts_us64; +static bool is_reset; +static struct work_struct set_clk_work; +static struct work_struct error_wd_work; +static struct canvas_config_s vc1_canvas_config[DECODE_BUFFER_NUM_MAX][3]; +spinlock_t vc1_rp_lock; + + +#ifdef DEBUG_PTS +static u32 pts_hit, pts_missed, pts_i_hit, pts_i_missed; +#endif +static DEFINE_SPINLOCK(lock); + +static struct dec_sysinfo vvc1_amstream_dec_info; + +struct frm_s { + int state; + u32 start_pts; + int num; + u32 end_pts; + u32 rate; + u32 trymax; +}; + +static struct frm_s frm; + +enum { + RATE_MEASURE_START_PTS = 0, + RATE_MEASURE_END_PTS, + RATE_MEASURE_DONE +}; +#define RATE_MEASURE_NUM 8 +#define RATE_CORRECTION_THRESHOLD 5 +#define RATE_24_FPS 3755 /* 23.97 */ +#define RATE_30_FPS 3003 /* 29.97 */ +#define DUR2PTS(x) ((x)*90/96) +#define PTS2DUR(x) ((x)*96/90) + +static inline int pool_index(struct vframe_s *vf) +{ + if ((vf >= &vfpool[0]) && (vf <= &vfpool[VF_POOL_SIZE - 1])) + return 0; + else if ((vf >= &vfpool2[0]) && (vf <= &vfpool2[VF_POOL_SIZE - 1])) + return 1; + else + return -1; +} + +static inline bool close_to(int a, int b, int m) +{ + return abs(a - b) < m; +} + +static inline u32 index2canvas(u32 index) +{ + const u32 canvas_tab[DECODE_BUFFER_NUM_MAX] = { +#if 1 /* ALWASY.MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + 0x010100, 0x030302, 0x050504, 0x070706/*, + 0x090908, 0x0b0b0a, 0x0d0d0c, 0x0f0f0e*/ +#else + 0x020100, 0x050403, 0x080706, 0x0b0a09 +#endif + }; + + return canvas_tab[index]; +} + +static void set_aspect_ratio(struct vframe_s *vf, unsigned int pixel_ratio) +{ + int ar = 0; + + if (vvc1_ratio == 0) { + /* always stretch to 16:9 */ + vf->ratio_control |= (0x90 << DISP_RATIO_ASPECT_RATIO_BIT); + } else if (pixel_ratio > 0x0f) { + ar = (vvc1_amstream_dec_info.height * (pixel_ratio & 0xff) * + vvc1_ratio) / (vvc1_amstream_dec_info.width * + (pixel_ratio >> 8)); + } else { + switch (pixel_ratio) { + case 0: + ar = (vvc1_amstream_dec_info.height * vvc1_ratio) / + vvc1_amstream_dec_info.width; + break; + case 1: + vf->sar_width = 1; + vf->sar_height = 1; + ar = (vf->height * vvc1_ratio) / vf->width; + break; + case 2: + vf->sar_width = 12; + vf->sar_height = 11; + ar = (vf->height * 11 * vvc1_ratio) / (vf->width * 12); + break; + case 3: + vf->sar_width = 10; + vf->sar_height = 11; + ar = (vf->height * 11 * vvc1_ratio) / (vf->width * 10); + break; + case 4: + vf->sar_width = 16; + vf->sar_height = 11; + ar = (vf->height * 11 * vvc1_ratio) / (vf->width * 16); + break; + case 5: + vf->sar_width = 40; + vf->sar_height = 33; + ar = (vf->height * 33 * vvc1_ratio) / (vf->width * 40); + break; + case 6: + vf->sar_width = 24; + vf->sar_height = 11; + ar = (vf->height * 11 * vvc1_ratio) / (vf->width * 24); + break; + case 7: + vf->sar_width = 20; + vf->sar_height = 11; + ar = (vf->height * 11 * vvc1_ratio) / (vf->width * 20); + break; + case 8: + vf->sar_width = 32; + vf->sar_height = 11; + ar = (vf->height * 11 * vvc1_ratio) / (vf->width * 32); + break; + case 9: + vf->sar_width = 80; + vf->sar_height = 33; + ar = (vf->height * 33 * vvc1_ratio) / (vf->width * 80); + break; + case 10: + vf->sar_width = 18; + vf->sar_height = 11; + ar = (vf->height * 11 * vvc1_ratio) / (vf->width * 18); + break; + case 11: + vf->sar_width = 15; + vf->sar_height = 11; + ar = (vf->height * 11 * vvc1_ratio) / (vf->width * 15); + break; + case 12: + vf->sar_width = 64; + vf->sar_height = 33; + ar = (vf->height * 33 * vvc1_ratio) / (vf->width * 64); + break; + case 13: + vf->sar_width = 160; + vf->sar_height = 99; + ar = (vf->height * 99 * vvc1_ratio) / + (vf->width * 160); + break; + default: + vf->sar_width = 1; + vf->sar_height = 1; + ar = (vf->height * vvc1_ratio) / vf->width; + break; + } + } + + ar = min(ar, DISP_RATIO_ASPECT_RATIO_MAX); + + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + /*vf->ratio_control |= DISP_RATIO_FORCECONFIG | DISP_RATIO_KEEPRATIO;*/ +} + +static void vc1_set_rp(void) { + unsigned long flags; + + spin_lock_irqsave(&vc1_rp_lock, flags); + STBUF_WRITE(&vdec->vbuf, set_rp, + READ_VREG(VLD_MEM_VIFIFO_RP)); + spin_unlock_irqrestore(&vc1_rp_lock, flags); +} + +static irqreturn_t vvc1_isr(int irq, void *dev_id) +{ + u32 reg; + struct vframe_s *vf = NULL; + u32 repeat_count; + u32 picture_type; + u32 buffer_index; + unsigned int pts, pts_valid = 0, offset = 0; + u32 v_width, v_height; + u64 pts_us64 = 0; + u32 frame_size; + + reg = READ_VREG(VC1_BUFFEROUT); + + if (reg) { + v_width = READ_VREG(AV_SCRATCH_J); + v_height = READ_VREG(AV_SCRATCH_K); + + vc1_set_rp(); + + if (v_width && v_width <= 4096 + && (v_width != vvc1_amstream_dec_info.width)) { + pr_info("frame width changed %d to %d\n", + vvc1_amstream_dec_info.width, v_width); + vvc1_amstream_dec_info.width = v_width; + frame_width = v_width; + } + if (v_height && v_height <= 4096 + && (v_height != vvc1_amstream_dec_info.height)) { + pr_info("frame height changed %d to %d\n", + vvc1_amstream_dec_info.height, v_height); + vvc1_amstream_dec_info.height = v_height; + frame_height = v_height; + } + + if (pts_by_offset) { + offset = READ_VREG(VC1_OFFSET_REG); + if (pts_lookup_offset_us64( + PTS_TYPE_VIDEO, + offset, &pts, &frame_size, + 0, &pts_us64) == 0) { + pts_valid = 1; +#ifdef DEBUG_PTS + pts_hit++; +#endif + } else { +#ifdef DEBUG_PTS + pts_missed++; +#endif + } + } + + repeat_count = READ_VREG(VC1_REPEAT_COUNT); + buffer_index = ((reg & 0x7) - 1) & 3; + picture_type = (reg >> 3) & 7; + + if (buffer_index >= DECODE_BUFFER_NUM_MAX) { + pr_info("fatal error, invalid buffer index."); + return IRQ_HANDLED; + } + + if ((intra_output == 0) && (picture_type != 0)) { + WRITE_VREG(VC1_BUFFERIN, ~(1 << buffer_index)); + WRITE_VREG(VC1_BUFFEROUT, 0); + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + return IRQ_HANDLED; + } + + intra_output = 1; + +#ifdef DEBUG_PTS + if (picture_type == I_PICTURE) { + /* pr_info("I offset 0x%x, + *pts_valid %d\n", offset, pts_valid); + */ + if (!pts_valid) + pts_i_missed++; + else + pts_i_hit++; + } +#endif + + if ((pts_valid) && (frm.state != RATE_MEASURE_DONE)) { + if (frm.state == RATE_MEASURE_START_PTS) { + frm.start_pts = pts; + frm.state = RATE_MEASURE_END_PTS; + frm.trymax = RATE_MEASURE_NUM; + } else if (frm.state == RATE_MEASURE_END_PTS) { + if (frm.num >= frm.trymax) { + frm.end_pts = pts; + frm.rate = (frm.end_pts - + frm.start_pts) / frm.num; + pr_info("frate before=%d,%d,num=%d\n", + frm.rate, + DUR2PTS(vvc1_amstream_dec_info.rate), + frm.num); + /* check if measured rate is same as + * settings from upper layer + * and correct it if necessary + */ + if ((close_to(frm.rate, RATE_30_FPS, + RATE_CORRECTION_THRESHOLD) && + close_to( + DUR2PTS( + vvc1_amstream_dec_info.rate), + RATE_24_FPS, + RATE_CORRECTION_THRESHOLD)) + || + (close_to( + frm.rate, RATE_24_FPS, + RATE_CORRECTION_THRESHOLD) + && + close_to(DUR2PTS( + vvc1_amstream_dec_info.rate), + RATE_30_FPS, + RATE_CORRECTION_THRESHOLD))) { + pr_info( + "vvc1: frate from %d to %d\n", + vvc1_amstream_dec_info.rate, + PTS2DUR(frm.rate)); + + vvc1_amstream_dec_info.rate = + PTS2DUR(frm.rate); + frm.state = RATE_MEASURE_DONE; + } else if (close_to(frm.rate, + DUR2PTS( + vvc1_amstream_dec_info.rate), + RATE_CORRECTION_THRESHOLD)) + frm.state = RATE_MEASURE_DONE; + else { + +/* maybe still have problem, + * try next double frames.... + */ + frm.state = RATE_MEASURE_DONE; + frm.start_pts = pts; + frm.state = + RATE_MEASURE_END_PTS; + /*60 fps*60 S */ + frm.num = 0; + } + } + } + } + + if (frm.state != RATE_MEASURE_DONE) + frm.num += (repeat_count > 1) ? repeat_count : 1; + if (vvc1_amstream_dec_info.rate == 0) + vvc1_amstream_dec_info.rate = PTS2DUR(frm.rate); + + if (reg & INTERLACE_FLAG) { /* interlace */ + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->width = vvc1_amstream_dec_info.width; + vf->height = vvc1_amstream_dec_info.height; + vf->bufWidth = 1920; + vf->flag = 0; + + if (pts_valid) { + vf->pts = pts; + vf->pts_us64 = pts_us64; + if ((repeat_count > 1) && avi_flag) { + vf->duration = + vvc1_amstream_dec_info.rate * + repeat_count >> 1; + next_pts = pts + + (vvc1_amstream_dec_info.rate * + repeat_count >> 1) * 15 / 16; + next_pts_us64 = pts_us64 + + ((vvc1_amstream_dec_info.rate * + repeat_count >> 1) * 15 / 16) * + 100 / 9; + } else { + vf->duration = + vvc1_amstream_dec_info.rate >> 1; + next_pts = 0; + next_pts_us64 = 0; + if (picture_type != I_PICTURE && + unstable_pts) { + vf->pts = 0; + vf->pts_us64 = 0; + } + } + } else { + vf->pts = next_pts; + vf->pts_us64 = next_pts_us64; + if ((repeat_count > 1) && avi_flag) { + vf->duration = + vvc1_amstream_dec_info.rate * + repeat_count >> 1; + if (next_pts != 0) { + next_pts += ((vf->duration) - + ((vf->duration) >> 4)); + } + if (next_pts_us64 != 0) { + next_pts_us64 += + div_u64((u64)((vf->duration) - + ((vf->duration) >> 4)) * + 100, 9); + } + } else { + vf->duration = + vvc1_amstream_dec_info.rate >> 1; + next_pts = 0; + next_pts_us64 = 0; + if (picture_type != I_PICTURE && + unstable_pts) { + vf->pts = 0; + vf->pts_us64 = 0; + } + } + } + + vf->duration_pulldown = 0; + vf->type = (reg & BOTTOM_FIELD_FIRST_FLAG) ? + VIDTYPE_INTERLACE_BOTTOM : VIDTYPE_INTERLACE_TOP; +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->orientation = 0; + vf->type_original = vf->type; + set_aspect_ratio(vf, READ_VREG(VC1_PIC_RATIO)); + + vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + buffer_index); + if (is_support_vdec_canvas()) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->canvas0_config[0] = vc1_canvas_config[buffer_index][0]; + vf->canvas0_config[1] = vc1_canvas_config[buffer_index][1]; +#ifdef NV21 + vf->plane_num = 2; +#else + vf->canvas0_config[2] = vc1_canvas_config[buffer_index][2]; + vf->plane_num = 3; +#endif + } + kfifo_put(&display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + + vf_notify_receiver( + PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->width = vvc1_amstream_dec_info.width; + vf->height = vvc1_amstream_dec_info.height; + vf->bufWidth = 1920; + vf->flag = 0; + + vf->pts = next_pts; + vf->pts_us64 = next_pts_us64; + if ((repeat_count > 1) && avi_flag) { + vf->duration = + vvc1_amstream_dec_info.rate * + repeat_count >> 1; + if (next_pts != 0) { + next_pts += + ((vf->duration) - + ((vf->duration) >> 4)); + } + if (next_pts_us64 != 0) { + next_pts_us64 += div_u64((u64)((vf->duration) - + ((vf->duration) >> 4)) * 100, 9); + } + } else { + vf->duration = + vvc1_amstream_dec_info.rate >> 1; + next_pts = 0; + next_pts_us64 = 0; + if (picture_type != I_PICTURE && + unstable_pts) { + vf->pts = 0; + vf->pts_us64 = 0; + } + } + + vf->duration_pulldown = 0; + vf->type = (reg & BOTTOM_FIELD_FIRST_FLAG) ? + VIDTYPE_INTERLACE_TOP : VIDTYPE_INTERLACE_BOTTOM; +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->orientation = 0; + vf->type_original = vf->type; + set_aspect_ratio(vf, READ_VREG(VC1_PIC_RATIO)); + + vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + buffer_index); + + if (is_support_vdec_canvas()) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->canvas0_config[0] = vc1_canvas_config[buffer_index][0]; + vf->canvas0_config[1] = vc1_canvas_config[buffer_index][1]; +#ifdef NV21 + vf->plane_num = 2; +#else + vf->canvas0_config[2] = vc1_canvas_config[buffer_index][2]; + vf->plane_num = 3; +#endif + } + kfifo_put(&display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + + vf_notify_receiver( + PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + } else { /* progressive */ + if (kfifo_get(&newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->width = vvc1_amstream_dec_info.width; + vf->height = vvc1_amstream_dec_info.height; + vf->bufWidth = 1920; + vf->flag = 0; + + if (pts_valid) { + vf->pts = pts; + vf->pts_us64 = pts_us64; + if ((repeat_count > 1) && avi_flag) { + vf->duration = + vvc1_amstream_dec_info.rate * + repeat_count; + next_pts = + pts + + (vvc1_amstream_dec_info.rate * + repeat_count) * 15 / 16; + next_pts_us64 = pts_us64 + + ((vvc1_amstream_dec_info.rate * + repeat_count) * 15 / 16) * + 100 / 9; + } else { + vf->duration = + vvc1_amstream_dec_info.rate; + next_pts = 0; + next_pts_us64 = 0; + if (picture_type != I_PICTURE && + unstable_pts) { + vf->pts = 0; + vf->pts_us64 = 0; + } + } + } else { + vf->pts = next_pts; + vf->pts_us64 = next_pts_us64; + if ((repeat_count > 1) && avi_flag) { + vf->duration = + vvc1_amstream_dec_info.rate * + repeat_count; + if (next_pts != 0) { + next_pts += ((vf->duration) - + ((vf->duration) >> 4)); + } + if (next_pts_us64 != 0) { + next_pts_us64 += + div_u64((u64)((vf->duration) - + ((vf->duration) >> 4)) * + 100, 9); + } + } else { + vf->duration = + vvc1_amstream_dec_info.rate; + next_pts = 0; + next_pts_us64 = 0; + if (picture_type != I_PICTURE && + unstable_pts) { + vf->pts = 0; + vf->pts_us64 = 0; + } + } + } + + vf->duration_pulldown = 0; +#ifdef NV21 + vf->type = + VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | + VIDTYPE_VIU_NV21; +#else + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; +#endif + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->orientation = 0; + vf->type_original = vf->type; + set_aspect_ratio(vf, READ_VREG(VC1_PIC_RATIO)); + + vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + mm_blk_handle, + buffer_index); + if (is_support_vdec_canvas()) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->canvas0_config[0] = vc1_canvas_config[buffer_index][0]; + vf->canvas0_config[1] = vc1_canvas_config[buffer_index][1]; +#ifdef NV21 + vf->plane_num = 2; +#else + vf->canvas0_config[2] = vc1_canvas_config[buffer_index][2]; + vf->plane_num = 3; +#endif + } + kfifo_put(&display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(MODULE_NAME, vf->pts); + + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + } + frame_dur = vvc1_amstream_dec_info.rate; + total_frame++; + + /*count info*/ + gvs->frame_dur = frame_dur; + vdec_count_info(gvs, 0, offset); + + /* pr_info("PicType = %d, PTS = 0x%x, repeat + *count %d\n", picture_type, vf->pts, repeat_count); + */ + WRITE_VREG(VC1_BUFFEROUT, 0); + } + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + return IRQ_HANDLED; +} + +static struct vframe_s *vvc1_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + + if (kfifo_peek(&display_q, &vf)) + return vf; + + return NULL; +} + +static struct vframe_s *vvc1_vf_get(void *op_arg) +{ + struct vframe_s *vf; + + if (kfifo_get(&display_q, &vf)) + return vf; + + return NULL; +} + +static void vvc1_vf_put(struct vframe_s *vf, void *op_arg) +{ + if (pool_index(vf) == cur_pool_idx) + kfifo_put(&recycle_q, (const struct vframe_s *)vf); +} + +static int vvc1_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + + spin_lock_irqsave(&lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&newframe_q); + states->buf_avail_num = kfifo_len(&display_q); + states->buf_recycle_num = kfifo_len(&recycle_q); + + spin_unlock_irqrestore(&lock, flags); + + return 0; +} + +static int vvc1_event_cb(int type, void *data, void *private_data) +{ + if (type & VFRAME_EVENT_RECEIVER_RESET) { + unsigned long flags; + + amvdec_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vvc1_vf_prov); +#endif + spin_lock_irqsave(&lock, flags); + vvc1_local_init(true); + vvc1_prot_init(); + spin_unlock_irqrestore(&lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vvc1_vf_prov); +#endif + amvdec_start(); + } + + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE && vdec) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + return 0; +} + +int vvc1_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + if (!(stat & STAT_VDEC_RUN)) + return -1; + + vstatus->frame_width = vvc1_amstream_dec_info.width; + vstatus->frame_height = vvc1_amstream_dec_info.height; + if (vvc1_amstream_dec_info.rate != 0) + vstatus->frame_rate = 96000 / vvc1_amstream_dec_info.rate; + else + vstatus->frame_rate = -1; + vstatus->error_count = READ_VREG(AV_SCRATCH_C); + vstatus->status = stat; + vstatus->bit_rate = gvs->bit_rate; + vstatus->frame_dur = vvc1_amstream_dec_info.rate; + vstatus->frame_data = gvs->frame_data; + vstatus->total_data = gvs->total_data; + vstatus->frame_count = gvs->frame_count; + vstatus->error_frame_count = gvs->error_frame_count; + vstatus->drop_frame_count = gvs->drop_frame_count; + vstatus->total_data = gvs->total_data; + vstatus->samp_cnt = gvs->samp_cnt; + vstatus->offset = gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + +int vvc1_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +static int vvc1_vdec_info_init(void) +{ + gvs = kzalloc(sizeof(struct vdec_info), GFP_KERNEL); + if (NULL == gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -ENOMEM; + } + return 0; +} + +/****************************************/ +static int vvc1_canvas_init(void) +{ + int i, ret; + u32 canvas_width, canvas_height; + u32 alloc_size, decbuf_size, decbuf_y_size, decbuf_uv_size; + unsigned long buf_start; + + if (buf_size <= 0x00400000) { + /* SD only */ + canvas_width = 768; + canvas_height = 576; + decbuf_y_size = 0x80000; + decbuf_uv_size = 0x20000; + decbuf_size = 0x100000; + } else { + /* HD & SD */ + canvas_width = 1920; + canvas_height = 1088; + decbuf_y_size = 0x200000; + decbuf_uv_size = 0x80000; + decbuf_size = 0x300000; + } + + for (i = 0; i < MAX_BMMU_BUFFER_NUM; i++) { + /* workspace mem */ + if (i == (MAX_BMMU_BUFFER_NUM - 1)) + alloc_size = WORKSPACE_SIZE; + else + alloc_size = decbuf_size; + + ret = decoder_bmmu_box_alloc_buf_phy(mm_blk_handle, i, + alloc_size, DRIVER_NAME, &buf_start); + if (ret < 0) + return ret; + if (i == (MAX_BMMU_BUFFER_NUM - 1)) { + buf_offset = buf_start - DCAC_BUFF_START_ADDR; + continue; + } + +#ifdef NV21 + config_cav_lut_ex(2 * i + 0, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_32X32, 0, VDEC_1); + vc1_canvas_config[i][0].endian = 0; + vc1_canvas_config[i][0].width = canvas_width; + vc1_canvas_config[i][0].height = canvas_height; + vc1_canvas_config[i][0].block_mode = CANVAS_BLKMODE_32X32; + vc1_canvas_config[i][0].phy_addr = buf_start; + + config_cav_lut_ex(2 * i + 1, + buf_start + + decbuf_y_size, canvas_width, + canvas_height / 2, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); + vc1_canvas_config[i][1].endian = 0; + vc1_canvas_config[i][1].width = canvas_width; + vc1_canvas_config[i][1].height = canvas_height >> 1; + vc1_canvas_config[i][1].block_mode = CANVAS_BLKMODE_32X32; + vc1_canvas_config[i][1].phy_addr = buf_start + decbuf_y_size; +#else + config_cav_lut_ex(3 * i + 0, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_32X32, 0, VDEC_1); + vc1_canvas_config[i][0].endian = 0; + vc1_canvas_config[i][0].width = canvas_width; + vc1_canvas_config[i][0].height = canvas_height; + vc1_canvas_config[i][0].block_mode = CANVAS_BLKMODE_32X32; + vc1_canvas_config[i][0].phy_addr = buf_start; + config_cav_lut_ex(3 * i + 1, + buf_start + + decbuf_y_size, canvas_width / 2, + canvas_height / 2, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); + vc1_canvas_config[i][1].endian = 0; + vc1_canvas_config[i][1].width = canvas_width >> 1; + vc1_canvas_config[i][1].height = canvas_height >> 1; + vc1_canvas_config[i][1].block_mode = CANVAS_BLKMODE_32X32; + vc1_canvas_config[i][1].phy_addr = buf_start + decbuf_y_size; + config_cav_lut_ex(3 * i + 2, + buf_start + + decbuf_y_size + decbuf_uv_size, + canvas_width / 2, canvas_height / 2, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_32X32, 0, VDEC_1); + vc1_canvas_config[i][2].endian = 0; + vc1_canvas_config[i][2].width = canvas_width >> 1; + vc1_canvas_config[i][2].height = canvas_height >> 1; + vc1_canvas_config[i][2].block_mode = CANVAS_BLKMODE_32X32; + vc1_canvas_config[i][2].phy_addr = buf_start + + decbuf_y_size + decbuf_uv_size; +#endif + + } + return 0; +} + +static int vvc1_prot_init(void) +{ + int r; +#if 1 /* /MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 9) | (1 << 8)); + WRITE_VREG(DOS_SW_RESET0, 0); + +#else + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + READ_RESET_REG(RESET0_REGISTER); + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + + WRITE_RESET_REG(RESET2_REGISTER, RESET_PIC_DC | RESET_DBLK); +#endif + + WRITE_VREG(POWER_CTL_VLD, 0x10); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 2, MEM_FIFO_CNT_BIT, 2); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 8, MEM_LEVEL_CNT_BIT, 6); + + r = vvc1_canvas_init(); + + /* index v << 16 | u << 8 | y */ +#ifdef NV21 + WRITE_VREG(AV_SCRATCH_0, 0x010100); + WRITE_VREG(AV_SCRATCH_1, 0x030302); + WRITE_VREG(AV_SCRATCH_2, 0x050504); + WRITE_VREG(AV_SCRATCH_3, 0x070706); +/* WRITE_VREG(AV_SCRATCH_G, 0x090908); + WRITE_VREG(AV_SCRATCH_H, 0x0b0b0a); + WRITE_VREG(AV_SCRATCH_I, 0x0d0d0c); + WRITE_VREG(AV_SCRATCH_J, 0x0f0f0e);*/ +#else + WRITE_VREG(AV_SCRATCH_0, 0x020100); + WRITE_VREG(AV_SCRATCH_1, 0x050403); + WRITE_VREG(AV_SCRATCH_2, 0x080706); + WRITE_VREG(AV_SCRATCH_3, 0x0b0a09); + WRITE_VREG(AV_SCRATCH_G, 0x090908); + WRITE_VREG(AV_SCRATCH_H, 0x0b0b0a); + WRITE_VREG(AV_SCRATCH_I, 0x0d0d0c); + WRITE_VREG(AV_SCRATCH_J, 0x0f0f0e); +#endif + + /* notify ucode the buffer offset */ + WRITE_VREG(AV_SCRATCH_F, buf_offset); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + + WRITE_VREG(VC1_SOS_COUNT, 0); + WRITE_VREG(VC1_BUFFERIN, 0); + WRITE_VREG(VC1_BUFFEROUT, 0); + + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + + return r; +} + +static void vvc1_local_init(bool is_reset) +{ + int i; + + /* vvc1_ratio = 0x100; */ + vvc1_ratio = vvc1_amstream_dec_info.ratio; + + avi_flag = (unsigned long) vvc1_amstream_dec_info.param & 0x01; + + unstable_pts = (((unsigned long) vvc1_amstream_dec_info.param & 0x40) >> 6); + if (unstable_pts_debug == 1) { + unstable_pts = 1; + pr_info("vc1 init , unstable_pts_debug = %u\n",unstable_pts_debug); + } + total_frame = 0; + + next_pts = 0; + + next_pts_us64 = 0; + saved_resolution = 0; + frame_width = frame_height = frame_dur = 0; +#ifdef DEBUG_PTS + pts_hit = pts_missed = pts_i_hit = pts_i_missed = 0; +#endif + + memset(&frm, 0, sizeof(frm)); + + if (!is_reset) { + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + vfbuf_use[i] = 0; + + INIT_KFIFO(display_q); + INIT_KFIFO(recycle_q); + INIT_KFIFO(newframe_q); + cur_pool_idx ^= 1; + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf; + + if (cur_pool_idx == 0) { + vf = &vfpool[i]; + vfpool[i].index = DECODE_BUFFER_NUM_MAX; + } else { + vf = &vfpool2[i]; + vfpool2[i].index = DECODE_BUFFER_NUM_MAX; + } + kfifo_put(&newframe_q, (const struct vframe_s *)vf); + } + } + + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + + mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER); +} + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER +static void vvc1_ppmgr_reset(void) +{ + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_RESET, NULL); + + vvc1_local_init(true); + + /* vf_notify_receiver(PROVIDER_NAME, + * VFRAME_EVENT_PROVIDER_START,NULL); + */ + + pr_info("vvc1dec: vf_ppmgr_reset\n"); +} +#endif + +static void vvc1_set_clk(struct work_struct *work) +{ + int fps = 96000 / frame_dur; + + saved_resolution = frame_width * frame_height * fps; + vdec_source_changed(VFORMAT_VC1, + frame_width, frame_height, fps); + +} + +static void error_do_work(struct work_struct *work) +{ + amvdec_stop(); + msleep(20); +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vvc1_ppmgr_reset(); +#else + vf_light_unreg_provider(&vvc1_vf_prov); + vvc1_local_init(true); + vf_reg_provider(&vvc1_vf_prov); +#endif + vvc1_prot_init(); + amvdec_start(); +} + +static void vvc1_put_timer_func(struct timer_list *timer) +{ + if (READ_VREG(VC1_SOS_COUNT) > 10) + schedule_work(&error_wd_work); + + vc1_set_rp(); + + while (!kfifo_is_empty(&recycle_q) && (READ_VREG(VC1_BUFFERIN) == 0)) { + struct vframe_s *vf; + + if (kfifo_get(&recycle_q, &vf)) { + if ((vf->index < DECODE_BUFFER_NUM_MAX) && + (--vfbuf_use[vf->index] == 0)) { + WRITE_VREG(VC1_BUFFERIN, ~(1 << vf->index)); + vf->index = DECODE_BUFFER_NUM_MAX; + } + if (pool_index(vf) == cur_pool_idx) + kfifo_put(&newframe_q, (const struct vframe_s *)vf); + } + } + + if (frame_dur > 0 && saved_resolution != + frame_width * frame_height * (96000 / frame_dur)) + schedule_work(&set_clk_work); + timer->expires = jiffies + PUT_INTERVAL; + + add_timer(timer); +} + +static s32 vvc1_init(void) +{ + int ret = -1; + char *buf = vmalloc(0x1000 * 16); + int fw_type = VIDEO_DEC_VC1; + + if (IS_ERR_OR_NULL(buf)) + return -ENOMEM; + + pr_info("vvc1_init, format %d\n", vvc1_amstream_dec_info.format); + timer_setup(&recycle_timer, vvc1_put_timer_func, 0); + + stat |= STAT_TIMER_INIT; + + intra_output = 0; + amvdec_enable(); + + vvc1_local_init(false); + + if (vvc1_amstream_dec_info.format == VIDEO_DEC_FORMAT_WMV3) { + pr_info("WMV3 dec format\n"); + vvc1_format = VIDEO_DEC_FORMAT_WMV3; + WRITE_VREG(AV_SCRATCH_4, 0); + } else if (vvc1_amstream_dec_info.format == VIDEO_DEC_FORMAT_WVC1) { + pr_info("WVC1 dec format\n"); + vvc1_format = VIDEO_DEC_FORMAT_WVC1; + WRITE_VREG(AV_SCRATCH_4, 1); + } else + pr_info("not supported VC1 format\n"); + + if (get_firmware_data(fw_type, buf) < 0) { + amvdec_disable(); + pr_err("get firmware fail."); + vfree(buf); + return -1; + } + + ret = amvdec_loadmc_ex(VFORMAT_VC1, NULL, buf); + if (ret < 0) { + amvdec_disable(); + vfree(buf); + pr_err("VC1: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(buf); + + stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + ret = vvc1_prot_init(); + if (ret < 0) + return ret; + + if (vdec_request_irq(VDEC_IRQ_1, vvc1_isr, + "vvc1-irq", (void *)vvc1_dec_id)) { + amvdec_disable(); + + pr_info("vvc1 irq register error.\n"); + return -ENOENT; + } + + stat |= STAT_ISR_REG; +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_provider_init(&vvc1_vf_prov, + PROVIDER_NAME, &vvc1_vf_provider, NULL); + vf_reg_provider(&vvc1_vf_prov); + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_START, NULL); +#else + vf_provider_init(&vvc1_vf_prov, + PROVIDER_NAME, &vvc1_vf_provider, NULL); + vf_reg_provider(&vvc1_vf_prov); +#endif + + if (!is_reset) + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)vvc1_amstream_dec_info.rate)); + + stat |= STAT_VF_HOOK; + + recycle_timer.expires = jiffies + PUT_INTERVAL; + add_timer(&recycle_timer); + + stat |= STAT_TIMER_ARM; + + amvdec_start(); + + stat |= STAT_VDEC_RUN; + + return 0; +} + +static int amvdec_vc1_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + + if (pdata == NULL) { + pr_info("amvdec_vc1 memory resource undefined.\n"); + return -EFAULT; + } + + if (pdata->sys_info) { + vvc1_amstream_dec_info = *pdata->sys_info; + + if ((vvc1_amstream_dec_info.height != 0) && + (vvc1_amstream_dec_info.width > + (VC1_MAX_SUPPORT_SIZE/vvc1_amstream_dec_info.height))) { + pr_info("amvdec_vc1: over size, unsupport: %d * %d\n", + vvc1_amstream_dec_info.width, + vvc1_amstream_dec_info.height); + return -EFAULT; + } + } + pdata->dec_status = vvc1_dec_status; + pdata->set_isreset = vvc1_set_isreset; + is_reset = 0; + vdec = pdata; + + vvc1_vdec_info_init(); + + INIT_WORK(&error_wd_work, error_do_work); + INIT_WORK(&set_clk_work, vvc1_set_clk); + spin_lock_init(&vc1_rp_lock); + if (vvc1_init() < 0) { + pr_info("amvdec_vc1 init failed.\n"); + kfree(gvs); + gvs = NULL; + pdata->dec_status = NULL; + return -ENODEV; + } + + return 0; +} + +static int amvdec_vc1_remove(struct platform_device *pdev) +{ + cancel_work_sync(&error_wd_work); + if (stat & STAT_VDEC_RUN) { + amvdec_stop(); + stat &= ~STAT_VDEC_RUN; + } + + if (stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)vvc1_dec_id); + stat &= ~STAT_ISR_REG; + } + + if (stat & STAT_TIMER_ARM) { + del_timer_sync(&recycle_timer); + stat &= ~STAT_TIMER_ARM; + } + + cancel_work_sync(&set_clk_work); + if (stat & STAT_VF_HOOK) { + if (!is_reset) + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + + vf_unreg_provider(&vvc1_vf_prov); + stat &= ~STAT_VF_HOOK; + } + + amvdec_disable(); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_TM2) + vdec_reset_core(NULL); + + if (mm_blk_handle) { + decoder_bmmu_box_free(mm_blk_handle); + mm_blk_handle = NULL; + } + +#ifdef DEBUG_PTS + pr_debug("pts hit %d, pts missed %d, i hit %d, missed %d\n", pts_hit, + pts_missed, pts_i_hit, pts_i_missed); + pr_debug("total frame %d, avi_flag %d, rate %d\n", + total_frame, avi_flag, + vvc1_amstream_dec_info.rate); +#endif + kfree(gvs); + gvs = NULL; + vdec = NULL; + + return 0; +} + +/****************************************/ + +static struct platform_driver amvdec_vc1_driver = { + .probe = amvdec_vc1_probe, + .remove = amvdec_vc1_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +#if defined(CONFIG_ARCH_MESON) /*meson1 only support progressive */ +static struct codec_profile_t amvdec_vc1_profile = { + .name = "vc1", + .profile = "progressive, wmv3" +}; +#else +static struct codec_profile_t amvdec_vc1_profile = { + .name = "vc1", + .profile = "progressive, interlace, wmv3" +}; +#endif + +static int __init amvdec_vc1_driver_init_module(void) +{ + pr_debug("amvdec_vc1 module init\n"); + + if (platform_driver_register(&amvdec_vc1_driver)) { + pr_err("failed to register amvdec_vc1 driver\n"); + return -ENODEV; + } + vcodec_profile_register(&amvdec_vc1_profile); + vcodec_feature_register(VFORMAT_VC1, 0); + return 0; +} + +static void __exit amvdec_vc1_driver_remove_module(void) +{ + pr_debug("amvdec_vc1 module remove.\n"); + + platform_driver_unregister(&amvdec_vc1_driver); +} +module_param(unstable_pts_debug, uint, 0664); +MODULE_PARM_DESC(unstable_pts_debug, "\n amvdec_vc1 unstable_pts\n"); + +/****************************************/ +module_init(amvdec_vc1_driver_init_module); +module_exit(amvdec_vc1_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC VC1 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Qi Wang <qi.wang@amlogic.com>");
diff --git a/drivers/frame_provider/decoder/vp9/Makefile b/drivers/frame_provider/decoder/vp9/Makefile new file mode 100644 index 0000000..51edefe --- /dev/null +++ b/drivers/frame_provider/decoder/vp9/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_VP9) += amvdec_vp9.o +amvdec_vp9-objs += vvp9.o
diff --git a/drivers/frame_provider/decoder/vp9/vvp9.c b/drivers/frame_provider/decoder/vp9/vvp9.c new file mode 100644 index 0000000..e6a47fd --- /dev/null +++ b/drivers/frame_provider/decoder/vp9/vvp9.c
@@ -0,0 +1,12840 @@ + /* + * drivers/amlogic/amports/vvp9.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/slab.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/sched/clock.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../utils/decoder_mmu_box.h" +#include "../utils/decoder_bmmu_box.h" + +#define MEM_NAME "codec_vp9" +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../utils/vdec.h" +#include "../utils/amvdec.h" +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +#include "../utils/vdec_profile.h" +#endif + +#include <linux/amlogic/media/video_sink/video.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../utils/config_parser.h" +#include "../utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../utils/vdec_v4l2_buffer_ops.h" +#include <media/v4l2-mem2mem.h> +#include "../utils/vdec_feature.h" + +#define MIX_STREAM_SUPPORT + +#include "vvp9.h" + +#define VP9_10B_MMU_DW +/*#define SUPPORT_FB_DECODING*/ +/*#define FB_DECODING_TEST_SCHEDULE*/ + +#define CO_MV_COMPRESS +#define HW_MASK_FRONT 0x1 +#define HW_MASK_BACK 0x2 + +#define VP9D_MPP_REFINFO_TBL_ACCCONFIG 0x3442 +#define VP9D_MPP_REFINFO_DATA 0x3443 +#define VP9D_MPP_REF_SCALE_ENBL 0x3441 +#define HEVC_MPRED_CTRL4 0x324c +#define HEVC_CM_HEADER_START_ADDR 0x3628 +#define HEVC_DBLK_CFGB 0x350b +#define HEVCD_MPP_ANC2AXI_TBL_DATA 0x3464 +#define HEVC_SAO_MMU_VH1_ADDR 0x363b +#define HEVC_SAO_MMU_VH0_ADDR 0x363a +#define HEVC_SAO_MMU_VH0_ADDR2 0x364d +#define HEVC_SAO_MMU_VH1_ADDR2 0x364e +#define HEVC_MV_INFO 0x310d +#define HEVC_QP_INFO 0x3137 +#define HEVC_SKIP_INFO 0x3136 + +#define HEVC_SAO_CTRL9 0x362d +#define HEVC_CM_HEADER_START_ADDR2 0x364a +#define HEVC_SAO_MMU_DMA_CTRL2 0x364c +#define HEVC_SAO_MMU_VH0_ADDR2 0x364d +#define HEVC_SAO_MMU_VH1_ADDR2 0x364e +#define HEVC_SAO_MMU_STATUS2 0x3650 +#define HEVC_DW_VH0_ADDDR 0x365e +#define HEVC_DW_VH1_ADDDR 0x365f + +#define VP9_10B_DEC_IDLE 0 +#define VP9_10B_DEC_FRAME_HEADER 1 +#define VP9_10B_DEC_SLICE_SEGMENT 2 +#define VP9_10B_DECODE_SLICE 5 +#define VP9_10B_DISCARD_NAL 6 +#define VP9_DUMP_LMEM 7 +#define HEVC_DECPIC_DATA_DONE 0xa +#define HEVC_DECPIC_DATA_ERROR 0xb +#define HEVC_NAL_DECODE_DONE 0xe +#define HEVC_DECODE_BUFEMPTY 0x20 +#define HEVC_DECODE_TIMEOUT 0x21 +#define HEVC_SEARCH_BUFEMPTY 0x22 +#define HEVC_DECODE_OVER_SIZE 0x23 +#define HEVC_S2_DECODING_DONE 0x50 +#define VP9_HEAD_PARSER_DONE 0xf0 +#define VP9_HEAD_SEARCH_DONE 0xf1 +#define VP9_EOS 0xf2 +#define HEVC_ACTION_DONE 0xff + +#define VF_POOL_SIZE 32 + +#undef pr_info +#define pr_info printk + +#define DECODE_MODE_SINGLE ((0x80 << 24) | 0) +#define DECODE_MODE_MULTI_STREAMBASE ((0x80 << 24) | 1) +#define DECODE_MODE_MULTI_FRAMEBASE ((0x80 << 24) | 2) +#define DECODE_MODE_SINGLE_LOW_LATENCY ((0x80 << 24) | 3) +#define DECODE_MODE_MULTI_FRAMEBASE_NOHEAD ((0x80 << 24) | 4) + +#define VP9_TRIGGER_FRAME_DONE 0x100 +#define VP9_TRIGGER_FRAME_ENABLE 0x200 + +#define MV_MEM_UNIT 0x240 +/*--------------------------------------------------- + * Include "parser_cmd.h" + *--------------------------------------------------- + */ +#define PARSER_CMD_SKIP_CFG_0 0x0000090b + +#define PARSER_CMD_SKIP_CFG_1 0x1b14140f + +#define PARSER_CMD_SKIP_CFG_2 0x001b1910 + +#define PARSER_CMD_NUMBER 37 + +/*#define HEVC_PIC_STRUCT_SUPPORT*/ +/* to remove, fix build error */ + +/*#define CODEC_MM_FLAGS_FOR_VDECODER 0*/ + +#define MULTI_INSTANCE_SUPPORT +#define SUPPORT_10BIT +/* #define ERROR_HANDLE_DEBUG */ + +#ifndef STAT_KTHREAD +#define STAT_KTHREAD 0x40 +#endif + +#ifdef MULTI_INSTANCE_SUPPORT +#define MAX_DECODE_INSTANCE_NUM 9 +#define MULTI_DRIVER_NAME "ammvdec_vp9" + +static unsigned int max_decode_instance_num + = MAX_DECODE_INSTANCE_NUM; +static unsigned int decode_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int display_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int max_process_time[MAX_DECODE_INSTANCE_NUM]; +static unsigned int run_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int input_empty[MAX_DECODE_INSTANCE_NUM]; +static unsigned int not_run_ready[MAX_DECODE_INSTANCE_NUM]; + +static u32 decode_timeout_val = 200; +static int start_decode_buf_level = 0x8000; +static u32 work_buf_size; + +static u32 force_pts_unstable; + +static u32 mv_buf_margin; + +static u32 mv_buf_dynamic_alloc; + +/* DOUBLE_WRITE_MODE is enabled only when NV21 8 bit output is needed */ +/* double_write_mode: + * 0, no double write; + * 1, 1:1 ratio; + * 2, (1/4):(1/4) ratio; + * 3, (1/4):(1/4) ratio, with both compressed frame included + * 4, (1/2):(1/2) ratio; + * 8, (1/8):(1/8) ratio; + * 0x10, double write only + * 0x100, if > 1080p,use mode 4,else use mode 1; + * 0x200, if > 1080p,use mode 2,else use mode 1; + * 0x300, if > 720p, use mode 4, else use mode 1; + */ +static u32 double_write_mode; + +#define DRIVER_NAME "amvdec_vp9" +#define DRIVER_HEADER_NAME "amvdec_vp9_header" + + +#define PUT_INTERVAL (HZ/100) +#define ERROR_SYSTEM_RESET_COUNT 200 + +#define PTS_NORMAL 0 +#define PTS_NONE_REF_USE_DURATION 1 + +#define PTS_MODE_SWITCHING_THRESHOLD 3 +#define PTS_MODE_SWITCHING_RECOVERY_THREASHOLD 3 + +#define DUR2PTS(x) ((x)*90/96) + +struct VP9Decoder_s; +static int vvp9_vf_states(struct vframe_states *states, void *); +static struct vframe_s *vvp9_vf_peek(void *); +static struct vframe_s *vvp9_vf_get(void *); +static void vvp9_vf_put(struct vframe_s *, void *); +static int vvp9_event_cb(int type, void *data, void *private_data); + +static int vvp9_stop(struct VP9Decoder_s *pbi); +#ifdef MULTI_INSTANCE_SUPPORT +static s32 vvp9_init(struct vdec_s *vdec); +#else +static s32 vvp9_init(struct VP9Decoder_s *pbi); +#endif +static void vvp9_prot_init(struct VP9Decoder_s *pbi, u32 mask); +static int vvp9_local_init(struct VP9Decoder_s *pbi); +static void vvp9_put_timer_func(struct timer_list *timer); +static void dump_data(struct VP9Decoder_s *pbi, int size); +static unsigned char get_data_check_sum + (struct VP9Decoder_s *pbi, int size); +static void dump_pic_list(struct VP9Decoder_s *pbi); +static int vp9_alloc_mmu( + struct VP9Decoder_s *pbi, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr); +#ifdef VP9_10B_MMU_DW +int vp9_alloc_mmu_dw( + struct VP9Decoder_s *pbi, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr); +#endif + +static const char vvp9_dec_id[] = "vvp9-dev"; + +#define PROVIDER_NAME "decoder.vp9" +#define MULTI_INSTANCE_PROVIDER_NAME "vdec.vp9" + +static const struct vframe_operations_s vvp9_vf_provider = { + .peek = vvp9_vf_peek, + .get = vvp9_vf_get, + .put = vvp9_vf_put, + .event_cb = vvp9_event_cb, + .vf_states = vvp9_vf_states, +}; + +static struct vframe_provider_s vvp9_vf_prov; + +static u32 bit_depth_luma; +static u32 bit_depth_chroma; +static u32 frame_width; +static u32 frame_height; +static u32 video_signal_type; + +static u32 on_no_keyframe_skiped; + +#define PROB_SIZE (496 * 2 * 4) +#define PROB_BUF_SIZE (0x5000) +#define COUNT_BUF_SIZE (0x300 * 4 * 4) +/*compute_losless_comp_body_size(4096, 2304, 1) = 18874368(0x1200000)*/ +#define MAX_FRAME_4K_NUM 0x1200 +#define MAX_FRAME_8K_NUM 0x4800 + +#define HEVC_ASSIST_MMU_MAP_ADDR 0x3009 +// bit[31:20] -- fb_read_lcu_y +// READ only// bit[19:8] -- fb_read_lcu_x +// READ only// bit[7] -- fb_read_lcu_latch +// bit[6:5] -- reserved +// bit[4] -- fb_disable_wr_iqit_buf +// bit[3] -- fb_read_avs2_enable +// bit[2] -- fb_read_vp9_enable +// bit[1] -- fb_avs2_enable +// bit[0] -- fb_vp9_enable +#define HEVC_ASSIST_HED_FB_CTL 0x300c +// [31:16] height// [15:0] width +#define HEVC_ASSIST_PIC_SIZE_FB_READ 0x300d +#define HEVC_ASSIST_MMU_MAP_ADDR2 0x300e + + +#ifdef SUPPORT_FB_DECODING +/* register define */ +#define HEVC_ASSIST_HED_FB_W_CTL 0x3006 +#define HEVC_ASSIST_HED_FB_R_CTL 0x3007 +#define HEVC_ASSIST_HED_FB_ADDR 0x3008 +#define HEVC_ASSIST_FB_MMU_MAP_ADDR 0x300a +#define HEVC_ASSIST_FBD_MMU_MAP_ADDR 0x300b + + +#define MAX_STAGE_PAGE_NUM 0x1200 +#define STAGE_MMU_MAP_SIZE (MAX_STAGE_PAGE_NUM * 4) +#endif + +static inline int div_r32(int64_t m, int n) +{ +/* + *return (int)(m/n) + */ +#ifndef CONFIG_ARM64 + int64_t qu = 0; + qu = div_s64(m, n); + return (int)qu; +#else + return (int)(m/n); +#endif +} + +/*USE_BUF_BLOCK*/ +struct BUF_s { + int index; + unsigned int alloc_flag; + /*buffer */ + unsigned int cma_page_count; + unsigned long alloc_addr; + unsigned long start_adr; + unsigned int size; + + unsigned int free_start_adr; + ulong v4l_ref_buf_addr; + ulong header_addr; + u32 header_size; + u32 luma_size; + ulong chroma_addr; + u32 chroma_size; +} /*BUF_t */; + +struct MVBUF_s { + unsigned long start_adr; + unsigned int size; + int used_flag; +} /*MVBUF_t */; + + /* #undef BUFMGR_ONLY to enable hardware configuration */ + +/*#define TEST_WR_PTR_INC*/ +/*#define WR_PTR_INC_NUM 128*/ +#define WR_PTR_INC_NUM 1 + +#define SIMULATION +#define DOS_PROJECT +#undef MEMORY_MAP_IN_REAL_CHIP + +/*#undef DOS_PROJECT*/ +/*#define MEMORY_MAP_IN_REAL_CHIP*/ + +/*#define BUFFER_MGR_ONLY*/ +/*#define CONFIG_HEVC_CLK_FORCED_ON*/ +/*#define ENABLE_SWAP_TEST*/ +#define MCRCC_ENABLE + +#define VP9_LPF_LVL_UPDATE +/*#define DBG_LF_PRINT*/ + +#ifdef VP9_10B_NV21 +#else +#define LOSLESS_COMPRESS_MODE +#endif + +#define DOUBLE_WRITE_YSTART_TEMP 0x02000000 +#define DOUBLE_WRITE_CSTART_TEMP 0x02900000 + +#define VP9_DEBUG_BUFMGR 0x01 +#define VP9_DEBUG_BUFMGR_MORE 0x02 +#define VP9_DEBUG_BUFMGR_DETAIL 0x04 +#define VP9_DEBUG_OUT_PTS 0x10 +#define VP9_DEBUG_SEND_PARAM_WITH_REG 0x100 +#define VP9_DEBUG_MERGE 0x200 +#define VP9_DEBUG_DBG_LF_PRINT 0x400 +#define VP9_DEBUG_REG 0x800 +#define VP9_DEBUG_2_STAGE 0x1000 +#define VP9_DEBUG_2_STAGE_MORE 0x2000 +#define VP9_DEBUG_QOS_INFO 0x4000 +#define VP9_DEBUG_DIS_LOC_ERROR_PROC 0x10000 +#define VP9_DEBUG_DIS_SYS_ERROR_PROC 0x20000 +#define VP9_DEBUG_DUMP_PIC_LIST 0x40000 +#define VP9_DEBUG_TRIG_SLICE_SEGMENT_PROC 0x80000 +#define VP9_DEBUG_NO_TRIGGER_FRAME 0x100000 +#define VP9_DEBUG_LOAD_UCODE_FROM_FILE 0x200000 +#define VP9_DEBUG_FORCE_SEND_AGAIN 0x400000 +#define VP9_DEBUG_DUMP_DATA 0x800000 +#define VP9_DEBUG_CACHE 0x1000000 +#define VP9_DEBUG_CACHE_HIT_RATE 0x2000000 +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 +#ifdef MULTI_INSTANCE_SUPPORT +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_V4L_DETAIL 0x10000000 +#define PRINT_FLAG_VDEC_STATUS 0x20000000 +#define PRINT_FLAG_VDEC_DETAIL 0x40000000 +#define PRINT_FLAG_VDEC_DATA 0x80000000 +#endif +static u32 force_bufspec; +static u32 debug; +static bool is_reset; +/*for debug*/ +/* + udebug_flag: + bit 0, enable ucode print + bit 1, enable ucode detail print + bit [31:16] not 0, pos to dump lmem + bit 2, pop bits to lmem + bit [11:8], pre-pop bits for alignment (when bit 2 is 1) +*/ +static u32 udebug_flag; +/* + when udebug_flag[1:0] is not 0 + udebug_pause_pos not 0, + pause position +*/ +static u32 udebug_pause_pos; +/* + when udebug_flag[1:0] is not 0 + and udebug_pause_pos is not 0, + pause only when DEBUG_REG2 is equal to this val +*/ +static u32 udebug_pause_val; + +static u32 udebug_pause_decode_idx; + +static u32 without_display_mode; + +static u32 v4l_bitstream_id_enable = 1; + +/* + *[3:0] 0: default use config from omx. + * 1: force enable fence. + * 2: disable fence. + *[7:4] 0: fence use for driver. + * 1: fence fd use for app. + */ +static u32 force_config_fence; + +#define DEBUG_REG +#ifdef DEBUG_REG +void WRITE_VREG_DBG2(unsigned int adr, unsigned int val) +{ + if (debug & VP9_DEBUG_REG) + pr_info("%s(%x, %x)\n", __func__, adr, val); + if (adr != 0) + WRITE_VREG(adr, val); +} + +#undef WRITE_VREG +#define WRITE_VREG WRITE_VREG_DBG2 +#endif + +#define FRAME_CNT_WINDOW_SIZE 59 +#define RATE_CORRECTION_THRESHOLD 5 +/************************************************** + +VP9 buffer management start + +***************************************************/ +#define MMU_COMPRESS_HEADER_SIZE_1080P 0x10000 +#define MMU_COMPRESS_HEADER_SIZE_4K 0x48000 +#define MMU_COMPRESS_HEADER_SIZE_8K 0x120000 + + +//#define MMU_COMPRESS_HEADER_SIZE 0x48000 +//#define MMU_COMPRESS_HEADER_SIZE_DW 0x48000 +//#define MMU_COMPRESS_8K_HEADER_SIZE (MMU_COMPRESS_HEADER_SIZE * 4) + +#define MMU_COMPRESS_HEADER_SIZE 0x48000 +#define MMU_COMPRESS_HEADER_SIZE_DW 0x48000 +#define MMU_COMPRESS_8K_HEADER_SIZE (MMU_COMPRESS_HEADER_SIZE * 4) + +#define MAX_SIZE_8K (8192 * 4608) +#define MAX_SIZE_4K (4096 * 2304) +#define MAX_SIZE_2K (1920 * 1088) +#define IS_8K_SIZE(w, h) (((w) * (h)) > MAX_SIZE_4K) +#define IS_4K_SIZE(w, h) (((w) * (h)) > (1920*1088)) + +#define INVALID_IDX -1 /* Invalid buffer index.*/ + +#define RPM_BEGIN 0x200 +#define RPM_END 0x280 + +union param_u { + struct { + unsigned short data[RPM_END - RPM_BEGIN]; + } l; + struct { + /* from ucode lmem, do not change this struct */ + unsigned short profile; + unsigned short show_existing_frame; + unsigned short frame_to_show_idx; + unsigned short frame_type; /*1 bit*/ + unsigned short show_frame; /*1 bit*/ + unsigned short error_resilient_mode; /*1 bit*/ + unsigned short intra_only; /*1 bit*/ + unsigned short display_size_present; /*1 bit*/ + unsigned short reset_frame_context; + unsigned short refresh_frame_flags; + unsigned short width; + unsigned short height; + unsigned short display_width; + unsigned short display_height; + /* + *bit[11:8] - ref_frame_info_0 (ref(3-bits), ref_frame_sign_bias(1-bit)) + *bit[7:4] - ref_frame_info_1 (ref(3-bits), ref_frame_sign_bias(1-bit)) + *bit[3:0] - ref_frame_info_2 (ref(3-bits), ref_frame_sign_bias(1-bit)) + */ + unsigned short ref_info; + /* + *bit[2]: same_frame_size0 + *bit[1]: same_frame_size1 + *bit[0]: same_frame_size2 + */ + unsigned short same_frame_size; + + unsigned short mode_ref_delta_enabled; + unsigned short ref_deltas[4]; + unsigned short mode_deltas[2]; + unsigned short filter_level; + unsigned short sharpness_level; + unsigned short bit_depth; + unsigned short seg_quant_info[8]; + unsigned short seg_enabled; + unsigned short seg_abs_delta; + /* bit 15: feature enabled; bit 8, sign; bit[5:0], data */ + unsigned short seg_lf_info[8]; + } p; +}; + + +struct vpx_codec_frame_buffer_s { + uint8_t *data; /**< Pointer to the data buffer */ + size_t size; /**< Size of data in bytes */ + void *priv; /**< Frame's private data */ +}; + +enum vpx_color_space_t { + VPX_CS_UNKNOWN = 0, /**< Unknown */ + VPX_CS_BT_601 = 1, /**< BT.601 */ + VPX_CS_BT_709 = 2, /**< BT.709 */ + VPX_CS_SMPTE_170 = 3, /**< SMPTE.170 */ + VPX_CS_SMPTE_240 = 4, /**< SMPTE.240 */ + VPX_CS_BT_2020 = 5, /**< BT.2020 */ + VPX_CS_RESERVED = 6, /**< Reserved */ + VPX_CS_SRGB = 7 /**< sRGB */ +}; /**< alias for enum vpx_color_space */ + +enum vpx_bit_depth_t { + VPX_BITS_8 = 8, /**< 8 bits */ + VPX_BITS_10 = 10, /**< 10 bits */ + VPX_BITS_12 = 12, /**< 12 bits */ +}; + +#define MAX_SLICE_NUM 1024 +struct PIC_BUFFER_CONFIG_s { + int index; + int BUF_index; + int mv_buf_index; + int comp_body_size; + int buf_size; + int vf_ref; + int y_canvas_index; + int uv_canvas_index; +#ifdef MULTI_INSTANCE_SUPPORT + struct canvas_config_s canvas_config[2]; +#endif + int decode_idx; + int slice_type; + int stream_offset; + u32 pts; + u64 pts64; + u64 timestamp; + uint8_t error_mark; + /**/ + int slice_idx; + /*buffer*/ + unsigned long header_adr; +#ifdef VP9_10B_MMU_DW + unsigned long header_dw_adr; +#endif + unsigned long mpred_mv_wr_start_addr; + int mv_size; + /*unsigned long mc_y_adr; + *unsigned long mc_u_v_adr; + */ + unsigned int dw_y_adr; + unsigned int dw_u_v_adr; + u32 luma_size; + u32 chroma_size; + int mc_canvas_y; + int mc_canvas_u_v; + + int lcu_total; + /**/ + int y_width; + int y_height; + int y_crop_width; + int y_crop_height; + int y_stride; + + int uv_width; + int uv_height; + int uv_crop_width; + int uv_crop_height; + int uv_stride; + + int alpha_width; + int alpha_height; + int alpha_stride; + + uint8_t *y_buffer; + uint8_t *u_buffer; + uint8_t *v_buffer; + uint8_t *alpha_buffer; + + uint8_t *buffer_alloc; + int buffer_alloc_sz; + int border; + int frame_size; + int subsampling_x; + int subsampling_y; + unsigned int bit_depth; + enum vpx_color_space_t color_space; + + int corrupted; + int flags; + unsigned long cma_alloc_addr; + + int double_write_mode; + + /* picture qos infomation*/ + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; + + u32 hw_decode_time; + u32 frame_size2; // For frame base mode + + /* vdec sync. */ + struct dma_fence *fence; + + /* hdr10 plus data */ + u32 hdr10p_data_size; + char *hdr10p_data_buf; +} PIC_BUFFER_CONFIG; + +enum BITSTREAM_PROFILE { + PROFILE_0, + PROFILE_1, + PROFILE_2, + PROFILE_3, + MAX_PROFILES +}; + +enum FRAME_TYPE { + KEY_FRAME = 0, + INTER_FRAME = 1, + FRAME_TYPES, +}; + +enum REFERENCE_MODE { + SINGLE_REFERENCE = 0, + COMPOUND_REFERENCE = 1, + REFERENCE_MODE_SELECT = 2, + REFERENCE_MODES = 3, +}; + +#define NONE -1 +#define INTRA_FRAME 0 +#define LAST_FRAME 1 +#define GOLDEN_FRAME 2 +#define ALTREF_FRAME 3 +#define MAX_REF_FRAMES 4 + +#define REFS_PER_FRAME 3 + +#define REF_FRAMES_LOG2 3 +#define REF_FRAMES (1 << REF_FRAMES_LOG2) +#define REF_FRAMES_4K (6) + +/*4 scratch frames for the new frames to support a maximum of 4 cores decoding + *in parallel, 3 for scaled references on the encoder. + *TODO(hkuang): Add ondemand frame buffers instead of hardcoding the number + * // of framebuffers. + *TODO(jkoleszar): These 3 extra references could probably come from the + *normal reference pool. + */ +#define FRAME_BUFFERS (REF_FRAMES + 16) +#define HEADER_FRAME_BUFFERS (FRAME_BUFFERS) +#define MAX_BUF_NUM (FRAME_BUFFERS) +#define MV_BUFFER_NUM FRAME_BUFFERS +#ifdef SUPPORT_FB_DECODING +#define STAGE_MAX_BUFFERS 16 +#else +#define STAGE_MAX_BUFFERS 0 +#endif + +#define FRAME_CONTEXTS_LOG2 2 +#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2) +/*buffer + header buffer + workspace*/ +#ifdef MV_USE_FIXED_BUF +#define MAX_BMMU_BUFFER_NUM (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + 1) +#define VF_BUFFER_IDX(n) (n) +#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n) +#define WORK_SPACE_BUF_ID (FRAME_BUFFERS + HEADER_FRAME_BUFFERS) +#else +#define MAX_BMMU_BUFFER_NUM \ + (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + MV_BUFFER_NUM + 1) +#define VF_BUFFER_IDX(n) (n) +#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n) +#define MV_BUFFER_IDX(n) (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + n) +#define WORK_SPACE_BUF_ID \ + (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + MV_BUFFER_NUM) +#endif + +struct RefCntBuffer_s { + int ref_count; + /*MV_REF *mvs;*/ + int mi_rows; + int mi_cols; + struct vpx_codec_frame_buffer_s raw_frame_buffer; + struct PIC_BUFFER_CONFIG_s buf; + +/*The Following variables will only be used in frame parallel decode. + * + *frame_worker_owner indicates which FrameWorker owns this buffer. NULL means + *that no FrameWorker owns, or is decoding, this buffer. + *VP9Worker *frame_worker_owner; + * + *row and col indicate which position frame has been decoded to in real + *pixel unit. They are reset to -1 when decoding begins and set to INT_MAX + *when the frame is fully decoded. + */ + int row; + int col; + + int show_frame; +} RefCntBuffer; + +struct RefBuffer_s { +/*TODO(dkovalev): idx is not really required and should be removed, now it + *is used in vp9_onyxd_if.c + */ + int idx; + struct PIC_BUFFER_CONFIG_s *buf; + /*struct scale_factors sf;*/ +} RefBuffer; + +struct InternalFrameBuffer_s { + uint8_t *data; + size_t size; + int in_use; +} InternalFrameBuffer; + +struct InternalFrameBufferList_s { + int num_internal_frame_buffers; + struct InternalFrameBuffer_s *int_fb; +} InternalFrameBufferList; + +struct BufferPool_s { +/*Protect BufferPool from being accessed by several FrameWorkers at + *the same time during frame parallel decode. + *TODO(hkuang): Try to use atomic variable instead of locking the whole pool. + * + *Private data associated with the frame buffer callbacks. + *void *cb_priv; + * + *vpx_get_frame_buffer_cb_fn_t get_fb_cb; + *vpx_release_frame_buffer_cb_fn_t release_fb_cb; + */ + + struct RefCntBuffer_s frame_bufs[FRAME_BUFFERS]; + +/*Frame buffers allocated internally by the codec.*/ + struct InternalFrameBufferList_s int_frame_buffers; + unsigned long flags; + spinlock_t lock; + +} BufferPool; + +#define lock_buffer_pool(pool, flags) \ + spin_lock_irqsave(&pool->lock, flags) + +#define unlock_buffer_pool(pool, flags) \ + spin_unlock_irqrestore(&pool->lock, flags) + +struct VP9_Common_s { + enum vpx_color_space_t color_space; + int width; + int height; + int display_width; + int display_height; + int last_width; + int last_height; + + int subsampling_x; + int subsampling_y; + + int use_highbitdepth;/*Marks if we need to use 16bit frame buffers.*/ + + struct PIC_BUFFER_CONFIG_s *frame_to_show; + struct RefCntBuffer_s *prev_frame; + + /*TODO(hkuang): Combine this with cur_buf in macroblockd.*/ + struct RefCntBuffer_s *cur_frame; + + int ref_frame_map[REF_FRAMES]; /* maps fb_idx to reference slot */ + + /*Prepare ref_frame_map for the next frame. + *Only used in frame parallel decode. + */ + int next_ref_frame_map[REF_FRAMES]; + + /* TODO(jkoleszar): could expand active_ref_idx to 4, + *with 0 as intra, and roll new_fb_idx into it. + */ + + /*Each frame can reference REFS_PER_FRAME buffers*/ + struct RefBuffer_s frame_refs[REFS_PER_FRAME]; + + int prev_fb_idx; + int new_fb_idx; + int cur_fb_idx_mmu; + /*last frame's frame type for motion search*/ + enum FRAME_TYPE last_frame_type; + enum FRAME_TYPE frame_type; + + int show_frame; + int last_show_frame; + int show_existing_frame; + + /*Flag signaling that the frame is encoded using only INTRA modes.*/ + uint8_t intra_only; + uint8_t last_intra_only; + + int allow_high_precision_mv; + + /*Flag signaling that the frame context should be reset to default + *values. 0 or 1 implies don't reset, 2 reset just the context + *specified in the frame header, 3 reset all contexts. + */ + int reset_frame_context; + + /*MBs, mb_rows/cols is in 16-pixel units; mi_rows/cols is in + * MODE_INFO (8-pixel) units. + */ + int MBs; + int mb_rows, mi_rows; + int mb_cols, mi_cols; + int mi_stride; + + /*Whether to use previous frame's motion vectors for prediction.*/ + int use_prev_frame_mvs; + + int refresh_frame_context; /* Two state 0 = NO, 1 = YES */ + + int ref_frame_sign_bias[MAX_REF_FRAMES]; /* Two state 0, 1 */ + + /*struct loopfilter lf;*/ + /*struct segmentation seg;*/ + + /*TODO(hkuang):Remove this as it is the same as frame_parallel_decode*/ + /* in pbi.*/ + int frame_parallel_decode; /* frame-based threading.*/ + + /*Context probabilities for reference frame prediction*/ + /*MV_REFERENCE_FRAME comp_fixed_ref;*/ + /*MV_REFERENCE_FRAME comp_var_ref[2];*/ + enum REFERENCE_MODE reference_mode; + + /*FRAME_CONTEXT *fc; */ /* this frame entropy */ + /*FRAME_CONTEXT *frame_contexts; */ /*FRAME_CONTEXTS*/ + /*unsigned int frame_context_idx; *//* Context to use/update */ + /*FRAME_COUNTS counts;*/ + + unsigned int current_video_frame; + enum BITSTREAM_PROFILE profile; + + enum vpx_bit_depth_t bit_depth; + + int error_resilient_mode; + int frame_parallel_decoding_mode; + + int byte_alignment; + int skip_loop_filter; + + /*External BufferPool passed from outside.*/ + struct BufferPool_s *buffer_pool; + + int above_context_alloc_cols; +}; + +static void set_canvas(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config); +static int prepare_display_buf(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config); + +static void fill_frame_info(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *frame, + unsigned int framesize, + unsigned int pts); + +static struct PIC_BUFFER_CONFIG_s *get_frame_new_buffer(struct VP9_Common_s *cm) +{ + return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf; +} + +static void ref_cnt_fb(struct RefCntBuffer_s *bufs, int *idx, int new_idx) +{ + const int ref_index = *idx; + + if (ref_index >= 0 && bufs[ref_index].ref_count > 0) { + bufs[ref_index].ref_count--; + /*pr_info("[MMU DEBUG 2] dec ref_count[%d] : %d\r\n", + * ref_index, bufs[ref_index].ref_count); + */ + } + + *idx = new_idx; + + bufs[new_idx].ref_count++; + /*pr_info("[MMU DEBUG 3] inc ref_count[%d] : %d\r\n", + * new_idx, bufs[new_idx].ref_count); + */ +} + +int vp9_release_frame_buffer(struct vpx_codec_frame_buffer_s *fb) +{ + struct InternalFrameBuffer_s *const int_fb = + (struct InternalFrameBuffer_s *)fb->priv; + if (int_fb) + int_fb->in_use = 0; + return 0; +} + +static int compute_losless_comp_body_size(int width, int height, + uint8_t is_bit_depth_10); + +static void setup_display_size(struct VP9_Common_s *cm, union param_u *params, + int print_header_info) +{ + cm->display_width = cm->width; + cm->display_height = cm->height; + if (params->p.display_size_present) { + if (print_header_info) + pr_info(" * 1-bit display_size_present read : 1\n"); + cm->display_width = params->p.display_width; + cm->display_height = params->p.display_height; + /*vp9_read_frame_size(rb, &cm->display_width, + * &cm->display_height); + */ + } else { + if (print_header_info) + pr_info(" * 1-bit display_size_present read : 0\n"); + } +} + + +uint8_t print_header_info = 0; + +struct buff_s { + u32 buf_start; + u32 buf_size; + u32 buf_end; +} buff_t; + +struct BuffInfo_s { + u32 max_width; + u32 max_height; + u32 start_adr; + u32 end_adr; + struct buff_s ipp; + struct buff_s sao_abv; + struct buff_s sao_vb; + struct buff_s short_term_rps; + struct buff_s vps; + struct buff_s sps; + struct buff_s pps; + struct buff_s sao_up; + struct buff_s swap_buf; + struct buff_s swap_buf2; + struct buff_s scalelut; + struct buff_s dblk_para; + struct buff_s dblk_data; + struct buff_s seg_map; + struct buff_s mmu_vbh; + struct buff_s cm_header; +#ifdef VP9_10B_MMU_DW + struct buff_s mmu_vbh_dw; + struct buff_s cm_header_dw; +#endif + struct buff_s mpred_above; +#ifdef MV_USE_FIXED_BUF + struct buff_s mpred_mv; +#endif + struct buff_s rpm; + struct buff_s lmem; +} BuffInfo_t; +#ifdef MULTI_INSTANCE_SUPPORT +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_CONFIG_PARAM 3 +#define DEC_RESULT_ERROR 4 +#define DEC_INIT_PICLIST 5 +#define DEC_UNINIT_PICLIST 6 +#define DEC_RESULT_GET_DATA 7 +#define DEC_RESULT_GET_DATA_RETRY 8 +#define DEC_RESULT_EOS 9 +#define DEC_RESULT_FORCE_EXIT 10 +#define DEC_RESULT_NEED_MORE_BUFFER 11 +#define DEC_V4L2_CONTINUE_DECODING 18 + +#define DEC_S1_RESULT_NONE 0 +#define DEC_S1_RESULT_DONE 1 +#define DEC_S1_RESULT_FORCE_EXIT 2 +#define DEC_S1_RESULT_TEST_TRIGGER_DONE 0xf0 + +#ifdef FB_DECODING_TEST_SCHEDULE +#define TEST_SET_NONE 0 +#define TEST_SET_PIC_DONE 1 +#define TEST_SET_S2_DONE 2 +#endif + +static void vp9_work(struct work_struct *work); +#endif +struct loop_filter_info_n; +struct loopfilter; +struct segmentation; + +#ifdef SUPPORT_FB_DECODING +static void mpred_process(struct VP9Decoder_s *pbi); +static void vp9_s1_work(struct work_struct *work); + +struct stage_buf_s { + int index; + unsigned short rpm[RPM_END - RPM_BEGIN]; +}; + +static unsigned int not_run2_ready[MAX_DECODE_INSTANCE_NUM]; + +static unsigned int run2_count[MAX_DECODE_INSTANCE_NUM]; + +#ifdef FB_DECODING_TEST_SCHEDULE +u32 stage_buf_num; /* = 16;*/ +#else +u32 stage_buf_num; +#endif +#endif + +struct vp9_fence_vf_t { + u32 used_size; + struct vframe_s *fence_vf[VF_POOL_SIZE]; +}; + +struct VP9Decoder_s { +#ifdef MULTI_INSTANCE_SUPPORT + unsigned char index; + + struct device *cma_dev; + struct platform_device *platform_dev; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + struct vframe_chunk_s *chunk; + int dec_result; + struct work_struct work; + struct work_struct recycle_mmu_work; + struct work_struct set_clk_work; + u32 start_shift_bytes; + + struct BuffInfo_s work_space_buf_store; + unsigned long buf_start; + u32 buf_size; + u32 cma_alloc_count; + unsigned long cma_alloc_addr; + uint8_t eos; + unsigned long int start_process_time; + unsigned last_lcu_idx; + int decode_timeout_count; + unsigned timeout_num; + int save_buffer_mode; + + int double_write_mode; +#endif + long used_4k_num; + + unsigned char m_ins_flag; + char *provider_name; + union param_u param; + int frame_count; + int pic_count; + u32 stat; + struct timer_list timer; + u32 frame_dur; + u32 frame_ar; + int fatal_error; + uint8_t init_flag; + uint8_t first_sc_checked; + uint8_t process_busy; +#define PROC_STATE_INIT 0 +#define PROC_STATE_DECODESLICE 1 +#define PROC_STATE_SENDAGAIN 2 + uint8_t process_state; + u32 ucode_pause_pos; + + int show_frame_num; + struct buff_s mc_buf_spec; + struct dec_sysinfo vvp9_amstream_dec_info; + void *rpm_addr; + void *lmem_addr; + dma_addr_t rpm_phy_addr; + dma_addr_t lmem_phy_addr; + unsigned short *lmem_ptr; + unsigned short *debug_ptr; + + void *prob_buffer_addr; + void *count_buffer_addr; + dma_addr_t prob_buffer_phy_addr; + dma_addr_t count_buffer_phy_addr; + + void *frame_mmu_map_addr; + dma_addr_t frame_mmu_map_phy_addr; +#ifdef VP9_10B_MMU_DW + void *frame_mmu_dw_map_addr; + dma_addr_t frame_mmu_dw_map_phy_addr; +#endif + unsigned int use_cma_flag; + + struct BUF_s m_BUF[MAX_BUF_NUM]; + struct MVBUF_s m_mv_BUF[MV_BUFFER_NUM]; + u32 used_buf_num; + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(pending_q, struct vframe_s *, VF_POOL_SIZE); + struct vframe_s vfpool[VF_POOL_SIZE]; + u32 vf_pre_count; + u32 vf_get_count; + u32 vf_put_count; + int buf_num; + int pic_num; + int lcu_size_log2; + unsigned int losless_comp_body_size; + u32 video_signal_type; + + int pts_mode; + int last_lookup_pts; + int last_pts; + u64 last_lookup_pts_us64; + u64 last_pts_us64; + u64 shift_byte_count; + + u32 pts_unstable; + u32 frame_cnt_window; + u32 pts1, pts2; + u32 last_duration; + u32 duration_from_pts_done; + bool vp9_first_pts_ready; + + u32 shift_byte_count_lo; + u32 shift_byte_count_hi; + int pts_mode_switching_count; + int pts_mode_recovery_count; + + bool get_frame_dur; + u32 saved_resolution; + + /**/ + struct VP9_Common_s common; + struct RefCntBuffer_s *cur_buf; + int refresh_frame_flags; + uint8_t need_resync; + uint8_t hold_ref_buf; + uint8_t ready_for_new_data; + struct BufferPool_s vp9_buffer_pool; + + struct BuffInfo_s *work_space_buf; + + struct buff_s *mc_buf; + + unsigned int frame_width; + unsigned int frame_height; + + unsigned short *rpm_ptr; + int init_pic_w; + int init_pic_h; + int lcu_total; + int lcu_size; + + int slice_type; + + int skip_flag; + int decode_idx; + int slice_idx; + uint8_t has_keyframe; + uint8_t wait_buf; + uint8_t error_flag; + + /* bit 0, for decoding; bit 1, for displaying */ + uint8_t ignore_bufmgr_error; + int PB_skip_mode; + int PB_skip_count_after_decoding; + /*hw*/ + + /*lf*/ + int default_filt_lvl; + struct loop_filter_info_n *lfi; + struct loopfilter *lf; + struct segmentation *seg_4lf; + /**/ + struct vdec_info *gvs; + + u32 pre_stream_offset; + + unsigned int dec_status; + u32 last_put_idx; + int new_frame_displayed; + void *mmu_box; + void *bmmu_box; + int mmu_enable; +#ifdef VP9_10B_MMU_DW + void *mmu_box_dw; + int dw_mmu_enable; +#endif + struct vframe_master_display_colour_s vf_dp; + struct firmware_s *fw; + int max_pic_w; + int max_pic_h; +#ifdef SUPPORT_FB_DECODING + int dec_s1_result; + int s1_test_cmd; + struct work_struct s1_work; + int used_stage_buf_num; + int s1_pos; + int s2_pos; + void *stage_mmu_map_addr; + dma_addr_t stage_mmu_map_phy_addr; + struct stage_buf_s *s1_buf; + struct stage_buf_s *s2_buf; + struct stage_buf_s *stage_bufs + [STAGE_MAX_BUFFERS]; + unsigned char run2_busy; + + int s1_mv_buf_index; + int s1_mv_buf_index_pre; + int s1_mv_buf_index_pre_pre; + unsigned long s1_mpred_mv_wr_start_addr; + unsigned long s1_mpred_mv_wr_start_addr_pre; + unsigned short s1_intra_only; + unsigned short s1_frame_type; + unsigned short s1_width; + unsigned short s1_height; + unsigned short s1_last_show_frame; + union param_u s1_param; + u8 back_not_run_ready; +#endif + int need_cache_size; + u64 sc_start_time; + bool postproc_done; + int low_latency_flag; + bool no_head; + bool pic_list_init_done; + bool pic_list_init_done2; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + int frameinfo_enable; + struct vframe_qos_s vframe_qos; + u32 mem_map_mode; + u32 dynamic_buf_num_margin; + struct vframe_s vframe_dummy; + u32 res_ch_flag; + /*struct VP9Decoder_s vp9_decoder;*/ + union param_u vp9_param; + int sidebind_type; + int sidebind_channel_id; + bool enable_fence; + int fence_usage; + u32 frame_mode_pts_save[FRAME_BUFFERS]; + u64 frame_mode_pts64_save[FRAME_BUFFERS]; + int run_ready_min_buf_num; + int one_package_frame_cnt; + int buffer_wrap[FRAME_BUFFERS]; + int last_width; + int last_height; + u32 error_frame_width; + u32 error_frame_height; + u32 endian; + ulong fb_token; + struct vp9_fence_vf_t fence_vf_s; + struct mutex fence_mutex; + dma_addr_t rdma_phy_adr; + unsigned *rdma_adr; + struct trace_decoder_name trace; +}; + +static int vp9_print(struct VP9Decoder_s *pbi, + int flag, const char *fmt, ...) +{ +#define HEVC_PRINT_BUF 512 + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + + if (pbi == NULL || + (flag == 0) || + (debug & flag)) { + va_list args; + + va_start(args, fmt); + if (pbi) + len = sprintf(buf, "[%d]", pbi->index); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_debug("%s", buf); + va_end(args); + } + return 0; +} + +static int is_oversize(int w, int h) +{ + int max = (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1)? + MAX_SIZE_8K : MAX_SIZE_4K; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) + max = MAX_SIZE_2K; + + if (w <= 0 || h <= 0) + return true; + + if (h != 0 && (w > max / h)) + return true; + + return false; +} + +static int vvp9_mmu_compress_header_size(int w, int h) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + IS_8K_SIZE(w, h)) + return (MMU_COMPRESS_HEADER_SIZE_8K); + if (IS_4K_SIZE(w, h)) + return (MMU_COMPRESS_HEADER_SIZE_4K); + return (MMU_COMPRESS_HEADER_SIZE_1080P); +} + +/*#define FRAME_MMU_MAP_SIZE (MAX_FRAME_4K_NUM * 4)*/ +static int vvp9_frame_mmu_map_size(struct VP9Decoder_s *pbi) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + IS_8K_SIZE(pbi->max_pic_w, pbi->max_pic_h)) + return (MAX_FRAME_8K_NUM << 2); + + return (MAX_FRAME_4K_NUM << 2); +} + +static int v4l_alloc_and_config_pic(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic); + +static void resize_context_buffers(struct VP9Decoder_s *pbi, + struct VP9_Common_s *cm, int width, int height) +{ + if (cm->width != width || cm->height != height) { + /* to do ..*/ + if (pbi != NULL) { + pbi->vp9_first_pts_ready = 0; + pbi->duration_from_pts_done = 0; + } + pr_info("%s (%d,%d)=>(%d,%d)\r\n", __func__, + cm->width, cm->height, + width, height); + cm->width = width; + cm->height = height; + } + /* + *if (cm->cur_frame->mvs == NULL || + * cm->mi_rows > cm->cur_frame->mi_rows || + * cm->mi_cols > cm->cur_frame->mi_cols) { + * resize_mv_buffer(cm); + *} + */ +} + +static int valid_ref_frame_size(int ref_width, int ref_height, + int this_width, int this_height) { + return 2 * this_width >= ref_width && + 2 * this_height >= ref_height && + this_width <= 16 * ref_width && + this_height <= 16 * ref_height; +} + +/* + *static int valid_ref_frame_img_fmt(enum vpx_bit_depth_t ref_bit_depth, + * int ref_xss, int ref_yss, + * enum vpx_bit_depth_t this_bit_depth, + * int this_xss, int this_yss) { + * return ref_bit_depth == this_bit_depth && ref_xss == this_xss && + * ref_yss == this_yss; + *} + */ + + +static int setup_frame_size( + struct VP9Decoder_s *pbi, + struct VP9_Common_s *cm, union param_u *params, + unsigned int *mmu_index_adr, + unsigned int *mmu_dw_index_adr, + int print_header_info) { + int width, height; + struct BufferPool_s * const pool = cm->buffer_pool; + struct PIC_BUFFER_CONFIG_s *ybf; + int ret = 0; + + width = params->p.width; + height = params->p.height; + if (is_oversize(width, height)) { + pbi->error_frame_width = width; + pbi->error_frame_height = height; + vp9_print(pbi, 0, "%s, Error: Invalid frame size\n", __func__); + return -1; + } + pbi->error_frame_width = 0; + pbi->error_frame_height = 0; + + /*vp9_read_frame_size(rb, &width, &height);*/ + if (print_header_info) + pr_info(" * 16-bits w read : %d (width : %d)\n", width, height); + if (print_header_info) + pr_info + (" * 16-bits h read : %d (height : %d)\n", width, height); + + WRITE_VREG(HEVC_PARSER_PICTURE_SIZE, (height << 16) | width); +#ifdef VP9_10B_HED_FB + WRITE_VREG(HEVC_ASSIST_PIC_SIZE_FB_READ, (height << 16) | width); +#endif + if (pbi->mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) { + ret = vp9_alloc_mmu(pbi, + cm->new_fb_idx, + params->p.width, + params->p.height, + params->p.bit_depth, + mmu_index_adr); + if (ret != 0) { + pr_err("can't alloc need mmu1,idx %d ret =%d\n", + cm->new_fb_idx, + ret); + return ret; + } + cm->cur_fb_idx_mmu = cm->new_fb_idx; + } +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable && (mmu_dw_index_adr != NULL)) { + ret = vp9_alloc_mmu_dw(pbi, cm->new_fb_idx, + params->p.width, params->p.height, + params->p.bit_depth, mmu_dw_index_adr); + if (ret != 0) { + pr_err("can't alloc need mmu1 dw,idx %d ret =%d\n", + cm->new_fb_idx, + ret); + return ret; + } + } +#endif + resize_context_buffers(pbi, cm, width, height); + setup_display_size(cm, params, print_header_info); +#if 0 + lock_buffer_pool(pool); + if (vp9_realloc_frame_buffer( + get_frame_new_buffer(cm), cm->width, cm->height, + cm->subsampling_x, cm->subsampling_y, +#if CONFIG_VP9_HIGHBITDEPTH + cm->use_highbitdepth, +#endif + VP9_DEC_BORDER_IN_PIXELS, + cm->byte_alignment, + &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, + pool->get_fb_cb, pool->cb_priv)) { + unlock_buffer_pool(pool); + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate frame buffer"); + } + unlock_buffer_pool(pool); +#else + /* porting */ + ybf = get_frame_new_buffer(cm); + if (!ybf) + return -1; + + ybf->y_crop_width = width; + ybf->y_crop_height = height; + ybf->bit_depth = params->p.bit_depth; +#endif + pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x; + pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y; + pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = + (unsigned int)cm->bit_depth; + pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space; + return ret; +} + +static int setup_frame_size_with_refs( + struct VP9Decoder_s *pbi, + struct VP9_Common_s *cm, + union param_u *params, + unsigned int *mmu_index_adr, + unsigned int *mmu_dw_index_adr, + int print_header_info) { + + int width, height; + int found = 0, i; + int has_valid_ref_frame = 0; + struct PIC_BUFFER_CONFIG_s *ybf; + struct BufferPool_s * const pool = cm->buffer_pool; + int ret = 0; + + for (i = 0; i < REFS_PER_FRAME; ++i) { + if ((params->p.same_frame_size >> + (REFS_PER_FRAME - i - 1)) & 0x1) { + struct PIC_BUFFER_CONFIG_s *const buf = + cm->frame_refs[i].buf; + /*if (print_header_info) + * pr_info + * ("1-bit same_frame_size[%d] read : 1\n", i); + */ + width = buf->y_crop_width; + height = buf->y_crop_height; + /*if (print_header_info) + * pr_info + * (" - same_frame_size width : %d\n", width); + */ + /*if (print_header_info) + * pr_info + * (" - same_frame_size height : %d\n", height); + */ + found = 1; + break; + } else { + /*if (print_header_info) + * pr_info + * ("1-bit same_frame_size[%d] read : 0\n", i); + */ + } + } + + if (!found) { + /*vp9_read_frame_size(rb, &width, &height);*/ + width = params->p.width; + height = params->p.height; + /*if (print_header_info) + * pr_info + * (" * 16-bits w read : %d (width : %d)\n", + * width, height); + *if (print_header_info) + * pr_info + * (" * 16-bits h read : %d (height : %d)\n", + * width, height); + */ + } + + if (is_oversize(width, height)) { + pbi->error_frame_width = width; + pbi->error_frame_height = height; + vp9_print(pbi, 0, "%s, Error: Invalid frame size\n", __func__); + return -1; + } + pbi->error_frame_width = 0; + pbi->error_frame_height = 0; + + params->p.width = width; + params->p.height = height; + + WRITE_VREG(HEVC_PARSER_PICTURE_SIZE, (height << 16) | width); + if (pbi->mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) { + /*if(cm->prev_fb_idx >= 0) release_unused_4k(cm->prev_fb_idx); + *cm->prev_fb_idx = cm->new_fb_idx; + */ + /* pr_info + * ("[DEBUG DEBUG]Before alloc_mmu, + * prev_fb_idx : %d, new_fb_idx : %d\r\n", + * cm->prev_fb_idx, cm->new_fb_idx); + */ + ret = vp9_alloc_mmu(pbi, cm->new_fb_idx, + params->p.width, params->p.height, + params->p.bit_depth, mmu_index_adr); + if (ret != 0) { + pr_err("can't alloc need mmu,idx %d\r\n", + cm->new_fb_idx); + return ret; + } + cm->cur_fb_idx_mmu = cm->new_fb_idx; + } +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable && (mmu_dw_index_adr != NULL)) { + ret = vp9_alloc_mmu_dw(pbi, cm->new_fb_idx, + params->p.width, params->p.height, + params->p.bit_depth, mmu_dw_index_adr); + if (ret != 0) { + pr_err("can't alloc need mmu dw,idx %d\r\n", + cm->new_fb_idx); + return ret; + } + } +#endif + /*Check to make sure at least one of frames that this frame references + *has valid dimensions. + */ + for (i = 0; i < REFS_PER_FRAME; ++i) { + struct RefBuffer_s * const ref_frame = &cm->frame_refs[i]; + + has_valid_ref_frame |= + valid_ref_frame_size(ref_frame->buf->y_crop_width, + ref_frame->buf->y_crop_height, + width, height); + } + if (!has_valid_ref_frame) { + pr_err("Error: Referenced frame has invalid size\r\n"); + return -1; + } +#if 0 + for (i = 0; i < REFS_PER_FRAME; ++i) { + struct RefBuffer_s * const ref_frame = + &cm->frame_refs[i]; + if (!valid_ref_frame_img_fmt( + ref_frame->buf->bit_depth, + ref_frame->buf->subsampling_x, + ref_frame->buf->subsampling_y, + cm->bit_depth, + cm->subsampling_x, + cm->subsampling_y)) + pr_err + ("Referenced frame incompatible color fmt\r\n"); + return -1; + } +#endif + resize_context_buffers(pbi, cm, width, height); + setup_display_size(cm, params, print_header_info); + +#if 0 + lock_buffer_pool(pool); + if (vp9_realloc_frame_buffer( + get_frame_new_buffer(cm), cm->width, cm->height, + cm->subsampling_x, cm->subsampling_y, +#if CONFIG_VP9_HIGHBITDEPTH + cm->use_highbitdepth, +#endif + VP9_DEC_BORDER_IN_PIXELS, + cm->byte_alignment, + &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, + pool->get_fb_cb, + pool->cb_priv)) { + unlock_buffer_pool(pool); + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate frame buffer"); + } + unlock_buffer_pool(pool); +#else + /* porting */ + ybf = get_frame_new_buffer(cm); + if (!ybf) + return -1; + + ybf->y_crop_width = width; + ybf->y_crop_height = height; + ybf->bit_depth = params->p.bit_depth; +#endif + pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x; + pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y; + pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = + (unsigned int)cm->bit_depth; + pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space; + return ret; +} + +static inline bool close_to(int a, int b, int m) +{ + return (abs(a - b) < m) ? true : false; +} + +#ifdef MULTI_INSTANCE_SUPPORT +static int vp9_print_cont(struct VP9Decoder_s *pbi, + int flag, const char *fmt, ...) +{ + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + + if (pbi == NULL || + (flag == 0) || + (debug & flag)) { + va_list args; + + va_start(args, fmt); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_debug("%s", buf); + va_end(args); + } + return 0; +} + +static void trigger_schedule(struct VP9Decoder_s *pbi) +{ + if (pbi->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !pbi->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + if (pbi->vdec_cb) + pbi->vdec_cb(hw_to_vdec(pbi), pbi->vdec_cb_arg); +} + +static void reset_process_time(struct VP9Decoder_s *pbi) +{ + if (pbi->start_process_time) { + unsigned process_time = + 1000 * (jiffies - pbi->start_process_time) / HZ; + pbi->start_process_time = 0; + if (process_time > max_process_time[pbi->index]) + max_process_time[pbi->index] = process_time; + } +} + +static void start_process_time(struct VP9Decoder_s *pbi) +{ + pbi->start_process_time = jiffies; + pbi->decode_timeout_count = 0; + pbi->last_lcu_idx = 0; +} + +static void timeout_process(struct VP9Decoder_s *pbi) +{ + pbi->timeout_num++; + amhevc_stop(); + vp9_print(pbi, + 0, "%s decoder timeout\n", __func__); + + pbi->dec_result = DEC_RESULT_DONE; + reset_process_time(pbi); + vdec_schedule_work(&pbi->work); +} + +static u32 get_valid_double_write_mode(struct VP9Decoder_s *pbi) +{ + u32 dw = ((double_write_mode & 0x80000000) == 0) ? + pbi->double_write_mode : + (double_write_mode & 0x7fffffff); + if (dw & 0x20) { + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T3) + && ((dw & 0xf) == 2 || (dw & 0xf) == 3)) { + pr_info("MMU doueble write 1:4 not supported !!!\n"); + dw = 0; + } + } + return dw; +} + +static int get_double_write_mode(struct VP9Decoder_s *pbi) +{ + u32 valid_dw_mode = get_valid_double_write_mode(pbi); + u32 dw; + int w, h; + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config; + + if (pbi->is_used_v4l) { + unsigned int out; + + vdec_v4l_get_dw_mode(pbi->v4l2_ctx, &out); + dw = out; + return dw; + } + + /* mask for supporting double write value bigger than 0x100 */ + if (valid_dw_mode & 0xffffff00) { + if (!cm->cur_frame) + return 1;/*no valid frame,*/ + cur_pic_config = &cm->cur_frame->buf; + w = cur_pic_config->y_crop_width; + h = cur_pic_config->y_crop_height; + + dw = 0x1; /*1:1*/ + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + break; + } + return dw; + } + + return valid_dw_mode; +} + +/* for double write buf alloc */ +static int get_double_write_mode_init(struct VP9Decoder_s *pbi) +{ + u32 valid_dw_mode = get_valid_double_write_mode(pbi); + u32 dw; + int w = pbi->init_pic_w; + int h = pbi->init_pic_h; + + dw = 0x1; /*1:1*/ + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + return dw; +} +#endif + +//#define MAX_4K_NUM 0x1200 + +/* return page number */ +static int vp9_mmu_page_num(struct VP9Decoder_s *pbi, + int w, int h, int save_mode) +{ + int picture_size; + int cur_mmu_4k_number, max_frame_num; + + picture_size = compute_losless_comp_body_size(w, h, save_mode); + cur_mmu_4k_number = ((picture_size + (PAGE_SIZE - 1)) >> PAGE_SHIFT); + + max_frame_num = (vvp9_frame_mmu_map_size(pbi) >> 2); + + if (cur_mmu_4k_number > max_frame_num) { + pr_err("over max !! cur_mmu_4k_number 0x%x width %d height %d\n", + cur_mmu_4k_number, w, h); + return -1; + } + + return cur_mmu_4k_number; +} + +static struct internal_comp_buf* v4lfb_to_icomp_buf( + struct VP9Decoder_s *pbi, + struct vdec_v4l2_buffer *fb) +{ + struct aml_video_dec_buf *aml_fb = NULL; + struct aml_vcodec_ctx * v4l2_ctx = pbi->v4l2_ctx; + + aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer); + return &v4l2_ctx->comp_bufs[aml_fb->internal_index]; +} + +static struct internal_comp_buf* index_to_icomp_buf( + struct VP9Decoder_s *pbi, int index) +{ + struct aml_video_dec_buf *aml_fb = NULL; + struct aml_vcodec_ctx * v4l2_ctx = pbi->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + + fb = (struct vdec_v4l2_buffer *) + pbi->m_BUF[index].v4l_ref_buf_addr; + aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer); + return &v4l2_ctx->comp_bufs[aml_fb->internal_index]; +} + +int vp9_alloc_mmu( + struct VP9Decoder_s *pbi, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr) +{ + int ret; + int bit_depth_10 = (bit_depth == VPX_BITS_10); + int cur_mmu_4k_number; + + if (get_double_write_mode(pbi) == 0x10) + return 0; + + if (bit_depth >= VPX_BITS_12) { + pbi->fatal_error = DECODER_FATAL_ERROR_SIZE_OVERFLOW; + pr_err("fatal_error, un support bit depth 12!\n\n"); + return -1; + } + + cur_mmu_4k_number = vp9_mmu_page_num(pbi, + pic_width, + pic_height, + bit_depth_10); + if (cur_mmu_4k_number < 0) + return -1; + + if (pbi->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(pbi, cur_buf_idx); + + ret = decoder_mmu_box_alloc_idx( + ibuf->mmu_box, + ibuf->index, + ibuf->frame_buffer_size, + mmu_index_adr); + } else { + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + ret = decoder_mmu_box_alloc_idx( + pbi->mmu_box, + cur_buf_idx, + cur_mmu_4k_number, + mmu_index_adr); + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + } + + return ret; +} + +#ifdef VP9_10B_MMU_DW +int vp9_alloc_mmu_dw( + struct VP9Decoder_s *pbi, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr) +{ + int ret; + int bit_depth_10 = (bit_depth == VPX_BITS_10); + int cur_mmu_4k_number; + + if (pbi->is_used_v4l) + return -1; + + if (get_double_write_mode(pbi) == 0x10) + return 0; + + if (bit_depth >= VPX_BITS_12) { + pbi->fatal_error = DECODER_FATAL_ERROR_SIZE_OVERFLOW; + pr_err("fatal_error, un support bit depth 12!\n\n"); + return -1; + } + + cur_mmu_4k_number = vp9_mmu_page_num(pbi, + pic_width, + pic_height, + bit_depth_10); + if (cur_mmu_4k_number < 0) + return -1; + + ret = decoder_mmu_box_alloc_idx( + pbi->mmu_box_dw, + cur_buf_idx, + cur_mmu_4k_number, + mmu_index_adr); + + return ret; +} + +#endif + +#ifndef MV_USE_FIXED_BUF +static void dealloc_mv_bufs(struct VP9Decoder_s *pbi) +{ + int i; + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (pbi->m_mv_BUF[i].start_adr) { + if (debug) + pr_info( + "dealloc mv buf(%d) adr %ld size 0x%x used_flag %d\n", + i, pbi->m_mv_BUF[i].start_adr, + pbi->m_mv_BUF[i].size, + pbi->m_mv_BUF[i].used_flag); + decoder_bmmu_box_free_idx( + pbi->bmmu_box, + MV_BUFFER_IDX(i)); + pbi->m_mv_BUF[i].start_adr = 0; + pbi->m_mv_BUF[i].size = 0; + pbi->m_mv_BUF[i].used_flag = 0; + } + } +} + +static int alloc_mv_buf(struct VP9Decoder_s *pbi, + int i, int size) +{ + int ret = 0; + + if (pbi->m_mv_BUF[i].start_adr && + size > pbi->m_mv_BUF[i].size) { + dealloc_mv_bufs(pbi); + } else if (pbi->m_mv_BUF[i].start_adr) + return 0; + + if (decoder_bmmu_box_alloc_buf_phy + (pbi->bmmu_box, + MV_BUFFER_IDX(i), size, + DRIVER_NAME, + &pbi->m_mv_BUF[i].start_adr) < 0) { + pbi->m_mv_BUF[i].start_adr = 0; + ret = -1; + } else { + pbi->m_mv_BUF[i].size = size; + pbi->m_mv_BUF[i].used_flag = 0; + ret = 0; + if (debug) { + pr_info( + "MV Buffer %d: start_adr %px size %x\n", + i, + (void *)pbi->m_mv_BUF[i].start_adr, + pbi->m_mv_BUF[i].size); + } + } + return ret; +} + +static int cal_mv_buf_size(struct VP9Decoder_s *pbi, int pic_width, int pic_height) +{ + int lcu_size = 64; /*fixed 64*/ + int pic_width_64 = (pic_width + 63) & (~0x3f); + int pic_height_32 = (pic_height + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 + : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 + : pic_height_32 / lcu_size; + int lcu_total = pic_width_lcu * pic_height_lcu; + int size_a = lcu_total * 36 * 16; + int size_b = pic_width_lcu * 16 * + ((pic_height_lcu >> 3) + (pic_height_lcu & 0x7)); + int size = (size_a + size_b + 0xffff) & + (~0xffff); + + return size; +} + + +static int init_mv_buf_list(struct VP9Decoder_s *pbi) +{ + int i; + int ret = 0; + int count = MV_BUFFER_NUM; + int pic_width = pbi->init_pic_w; + int pic_height = pbi->init_pic_h; + int size = cal_mv_buf_size(pbi, pic_width, pic_height); + + if (mv_buf_dynamic_alloc) + return 0; + + if (mv_buf_margin > 0) + count = REF_FRAMES + mv_buf_margin; + + if (pbi->init_pic_w > 2048 && pbi->init_pic_h > 1088) + count = REF_FRAMES_4K + mv_buf_margin; + + if (debug) { + pr_info("%s w:%d, h:%d, count: %d\n", + __func__, pbi->init_pic_w, pbi->init_pic_h, count); + } + + for (i = 0; + i < count && i < MV_BUFFER_NUM; i++) { + if (alloc_mv_buf(pbi, i, size) < 0) { + ret = -1; + break; + } + } + return ret; +} + +static int get_mv_buf(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + int i; + int ret = -1; + if (mv_buf_dynamic_alloc) { + union param_u *params = &pbi->vp9_param; + int size = cal_mv_buf_size(pbi, + params->p.width, params->p.height); + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (pbi->m_mv_BUF[i].start_adr == 0) { + ret = i; + break; + } + } + if (i == MV_BUFFER_NUM) { + pr_info( + "%s: Error, mv buf MV_BUFFER_NUM is not enough\n", + __func__); + return ret; + } + + if (alloc_mv_buf(pbi, ret, size) >= 0) { + pic_config->mv_buf_index = ret; + pic_config->mpred_mv_wr_start_addr = + (pbi->m_mv_BUF[ret].start_adr + 0xffff) & + (~0xffff); + pic_config->mv_size = size; + + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info( + "%s alloc => %d (%ld) size 0x%x\n", + __func__, ret, + pic_config->mpred_mv_wr_start_addr, + pic_config->mv_size); + } else { + pr_info( + "%s: Error, mv buf alloc fail\n", + __func__); + } + return ret; + } + + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (pbi->m_mv_BUF[i].start_adr && + pbi->m_mv_BUF[i].used_flag == 0) { + pbi->m_mv_BUF[i].used_flag = 1; + ret = i; + break; + } + } + + if (ret >= 0) { + pic_config->mv_buf_index = ret; + pic_config->mpred_mv_wr_start_addr = + (pbi->m_mv_BUF[ret].start_adr + 0xffff) & + (~0xffff); + pic_config->mv_size = pbi->m_mv_BUF[ret].size; + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info( + "%s => %d (%lx) size 0x%x\n", + __func__, ret, + pic_config->mpred_mv_wr_start_addr, + pic_config->mv_size); + } else { + pr_info( + "%s: Error, mv buf is not enough\n", + __func__); + } + return ret; +} + +static void put_mv_buf(struct VP9Decoder_s *pbi, + int *mv_buf_index) +{ + int i = *mv_buf_index; + if (i >= MV_BUFFER_NUM) { + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info( + "%s: index %d beyond range\n", + __func__, i); + return; + } + + if (mv_buf_dynamic_alloc) { + if (pbi->m_mv_BUF[i].start_adr) { + if (debug) + pr_info( + "dealloc mv buf(%d) adr %ld size 0x%x used_flag %d\n", + i, pbi->m_mv_BUF[i].start_adr, + pbi->m_mv_BUF[i].size, + pbi->m_mv_BUF[i].used_flag); + decoder_bmmu_box_free_idx( + pbi->bmmu_box, + MV_BUFFER_IDX(i)); + pbi->m_mv_BUF[i].start_adr = 0; + pbi->m_mv_BUF[i].size = 0; + pbi->m_mv_BUF[i].used_flag = 0; + } + *mv_buf_index = -1; + return; + } + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info( + "%s(%d): used_flag(%d)\n", + __func__, i, + pbi->m_mv_BUF[i].used_flag); + + *mv_buf_index = -1; + if (pbi->m_mv_BUF[i].start_adr && + pbi->m_mv_BUF[i].used_flag) + pbi->m_mv_BUF[i].used_flag = 0; +} + +static void put_un_used_mv_bufs(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + for (i = 0; i < pbi->used_buf_num; ++i) { +#if 0 + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.index != -1) && + (frame_bufs[i].buf.mv_buf_index >= 0) + ) +#else + if ((&frame_bufs[i] != cm->prev_frame) && + (frame_bufs[i].buf.index != -1) && + (frame_bufs[i].buf.mv_buf_index >= 0) + ) +#endif + + put_mv_buf(pbi, &frame_bufs[i].buf.mv_buf_index); + } +} + +#ifdef SUPPORT_FB_DECODING +static bool mv_buf_available(struct VP9Decoder_s *pbi) +{ + int i; + bool ret = 0; + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (pbi->m_mv_BUF[i].start_adr && + pbi->m_mv_BUF[i].used_flag == 0) { + ret = 1; + break; + } + } + return ret; +} +#endif +#endif + +#ifdef SUPPORT_FB_DECODING +static void init_stage_buf(struct VP9Decoder_s *pbi) +{ + uint i; + for (i = 0; i < STAGE_MAX_BUFFERS + && i < stage_buf_num; i++) { + pbi->stage_bufs[i] = + vmalloc(sizeof(struct stage_buf_s)); + if (pbi->stage_bufs[i] == NULL) { + vp9_print(pbi, + 0, "%s vmalloc fail\n", __func__); + break; + } + pbi->stage_bufs[i]->index = i; + } + pbi->used_stage_buf_num = i; + pbi->s1_pos = 0; + pbi->s2_pos = 0; + pbi->s1_buf = NULL; + pbi->s2_buf = NULL; + pbi->s1_mv_buf_index = FRAME_BUFFERS; + pbi->s1_mv_buf_index_pre = FRAME_BUFFERS; + pbi->s1_mv_buf_index_pre_pre = FRAME_BUFFERS; + + if (pbi->used_stage_buf_num > 0) + vp9_print(pbi, + 0, "%s 2 stage decoding buf %d\n", + __func__, + pbi->used_stage_buf_num); +} + +static void uninit_stage_buf(struct VP9Decoder_s *pbi) +{ + int i; + for (i = 0; i < pbi->used_stage_buf_num; i++) { + if (pbi->stage_bufs[i]) + vfree(pbi->stage_bufs[i]); + pbi->stage_bufs[i] = NULL; + } + pbi->used_stage_buf_num = 0; + pbi->s1_pos = 0; + pbi->s2_pos = 0; + pbi->s1_buf = NULL; + pbi->s2_buf = NULL; +} + +static int get_s1_buf( + struct VP9Decoder_s *pbi) +{ + struct stage_buf_s *buf = NULL; + int ret = -1; + int buf_page_num = MAX_STAGE_PAGE_NUM; + int next_s1_pos = pbi->s1_pos + 1; + + if (next_s1_pos >= pbi->used_stage_buf_num) + next_s1_pos = 0; + if (next_s1_pos == pbi->s2_pos) { + pbi->s1_buf = NULL; + return ret; + } + + buf = pbi->stage_bufs[pbi->s1_pos]; + ret = decoder_mmu_box_alloc_idx( + pbi->mmu_box, + buf->index, + buf_page_num, + pbi->stage_mmu_map_addr); + if (ret < 0) { + vp9_print(pbi, 0, + "%s decoder_mmu_box_alloc fail for index %d (s1_pos %d s2_pos %d)\n", + __func__, buf->index, + pbi->s1_pos, pbi->s2_pos); + buf = NULL; + } else { + vp9_print(pbi, VP9_DEBUG_2_STAGE, + "%s decoder_mmu_box_alloc %d page for index %d (s1_pos %d s2_pos %d)\n", + __func__, buf_page_num, buf->index, + pbi->s1_pos, pbi->s2_pos); + } + pbi->s1_buf = buf; + return ret; +} + +static void inc_s1_pos(struct VP9Decoder_s *pbi) +{ + struct stage_buf_s *buf = + pbi->stage_bufs[pbi->s1_pos]; + + int used_page_num = +#ifdef FB_DECODING_TEST_SCHEDULE + MAX_STAGE_PAGE_NUM/2; +#else + (READ_VREG(HEVC_ASSIST_HED_FB_W_CTL) >> 16); +#endif + decoder_mmu_box_free_idx_tail(pbi->mmu_box, + FRAME_BUFFERS + buf->index, used_page_num); + + pbi->s1_pos++; + if (pbi->s1_pos >= pbi->used_stage_buf_num) + pbi->s1_pos = 0; + + vp9_print(pbi, VP9_DEBUG_2_STAGE, + "%s (used_page_num %d) for index %d (s1_pos %d s2_pos %d)\n", + __func__, used_page_num, buf->index, + pbi->s1_pos, pbi->s2_pos); +} + +#define s2_buf_available(pbi) (pbi->s1_pos != pbi->s2_pos) + +static int get_s2_buf( + struct VP9Decoder_s *pbi) +{ + int ret = -1; + struct stage_buf_s *buf = NULL; + if (s2_buf_available(pbi)) { + buf = pbi->stage_bufs[pbi->s2_pos]; + vp9_print(pbi, VP9_DEBUG_2_STAGE, + "%s for index %d (s1_pos %d s2_pos %d)\n", + __func__, buf->index, + pbi->s1_pos, pbi->s2_pos); + pbi->s2_buf = buf; + ret = 0; + } + return ret; +} + +static void inc_s2_pos(struct VP9Decoder_s *pbi) +{ + struct stage_buf_s *buf = + pbi->stage_bufs[pbi->s2_pos]; + decoder_mmu_box_free_idx(pbi->mmu_box, + FRAME_BUFFERS + buf->index); + pbi->s2_pos++; + if (pbi->s2_pos >= pbi->used_stage_buf_num) + pbi->s2_pos = 0; + vp9_print(pbi, VP9_DEBUG_2_STAGE, + "%s for index %d (s1_pos %d s2_pos %d)\n", + __func__, buf->index, + pbi->s1_pos, pbi->s2_pos); +} + +static int get_free_stage_buf_num(struct VP9Decoder_s *pbi) +{ + int num; + if (pbi->s1_pos >= pbi->s2_pos) + num = pbi->used_stage_buf_num - + (pbi->s1_pos - pbi->s2_pos) - 1; + else + num = (pbi->s2_pos - pbi->s1_pos) - 1; + return num; +} + +#ifndef FB_DECODING_TEST_SCHEDULE +static DEFINE_SPINLOCK(fb_core_spin_lock); + +static u8 is_s2_decoding_finished(struct VP9Decoder_s *pbi) +{ + /* to do: VLSI review + completion of last LCU decoding in BACK + */ + return 1; +} + +static void start_s1_decoding(struct VP9Decoder_s *pbi) +{ + /* to do: VLSI review + after parser, how to start LCU decoding in BACK + */ +} + +static void fb_reset_core(struct vdec_s *vdec, u32 mask) +{ + /* to do: VLSI review + 1. how to disconnect DMC for FRONT and BACK + 2. reset bit 13, 24, FRONT or BACK ?? + */ + + unsigned long flags; + u32 reset_bits = 0; + if (mask & HW_MASK_FRONT) + WRITE_VREG(HEVC_STREAM_CONTROL, 0); + spin_lock_irqsave(&fb_core_spin_lock, flags); + codec_dmcbus_write(DMC_REQ_CTRL, + codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 4))); + spin_unlock_irqrestore(&fb_core_spin_lock, flags); + + while (!(codec_dmcbus_read(DMC_CHAN_STS) + & (1 << 4))) + ; + + if ((mask & HW_MASK_FRONT) && + input_frame_based(vdec)) + WRITE_VREG(HEVC_STREAM_CONTROL, 0); + + /* + * 2: assist + * 3: parser + * 4: parser_state + * 8: dblk + * 11:mcpu + * 12:ccpu + * 13:ddr + * 14:iqit + * 15:ipp + * 17:qdct + * 18:mpred + * 19:sao + * 24:hevc_afifo + */ + if (mask & HW_MASK_FRONT) { + reset_bits = + (1<<3)|(1<<4)|(1<<11)| + (1<<12)|(1<<18); + } + if (mask & HW_MASK_BACK) { + reset_bits = + (1<<8)|(1<<13)|(1<<14)|(1<<15)| + (1<<17)|(1<<19)|(1<<24); + } + WRITE_VREG(DOS_SW_RESET3, reset_bits); +#if 0 + (1<<3)|(1<<4)|(1<<8)|(1<<11)| + (1<<12)|(1<<13)|(1<<14)|(1<<15)| + (1<<17)|(1<<18)|(1<<19)|(1<<24); +#endif + WRITE_VREG(DOS_SW_RESET3, 0); + + + spin_lock_irqsave(&fb_core_spin_lock, flags); + codec_dmcbus_write(DMC_REQ_CTRL, + codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 4)); + spin_unlock_irqrestore(&fb_core_spin_lock, flags); + +} +#endif + +#endif + +static void init_pic_list_hw(struct VP9Decoder_s *pbi); + +static int get_free_fb(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + unsigned long flags; + + lock_buffer_pool(cm->buffer_pool, flags); + if (debug & VP9_DEBUG_BUFMGR_MORE) { + for (i = 0; i < pbi->used_buf_num; ++i) { + pr_info("%s:%d, ref_count %d vf_ref %d index %d\r\n", + __func__, i, frame_bufs[i].ref_count, + frame_bufs[i].buf.vf_ref, + frame_bufs[i].buf.index); + } + } + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.index != -1) && + (cm->cur_frame != &frame_bufs[i]) + ) + break; + } + if (i != pbi->used_buf_num) { + frame_bufs[i].ref_count = 1; + /*pr_info("[MMU DEBUG 1] set ref_count[%d] : %d\r\n", + i, frame_bufs[i].ref_count);*/ + } else { + /* Reset i to be INVALID_IDX to indicate + no free buffer found*/ + i = INVALID_IDX; + } + + unlock_buffer_pool(cm->buffer_pool, flags); + return i; +} + +static void update_hide_frame_timestamp(struct VP9Decoder_s *pbi) +{ + struct RefCntBuffer_s *const frame_bufs = + pbi->common.buffer_pool->frame_bufs; + int i; + + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((!frame_bufs[i].show_frame) && + (!frame_bufs[i].buf.vf_ref) && + (frame_bufs[i].buf.BUF_index != -1)) { + frame_bufs[i].buf.timestamp = pbi->chunk->timestamp; + vp9_print(pbi, VP9_DEBUG_OUT_PTS, + "%s, update %d hide frame ts: %lld\n", + __func__, i, frame_bufs[i].buf.timestamp); + } + } +} + +static int get_free_fb_idx(struct VP9Decoder_s *pbi) +{ + int i; + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0)) + break; + } + + return i; +} + +static int v4l_get_free_fb(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + struct aml_vcodec_ctx * v4l = pbi->v4l2_ctx; + struct v4l_buff_pool *pool = &v4l->cap_pool; + struct PIC_BUFFER_CONFIG_s *pic = NULL; + struct PIC_BUFFER_CONFIG_s *free_pic = NULL; + ulong flags; + int idx, i; + + lock_buffer_pool(cm->buffer_pool, flags); + + for (i = 0; i < pool->in; ++i) { + u32 state = (pool->seq[i] >> 16); + u32 index = (pool->seq[i] & 0xffff); + + switch (state) { + case V4L_CAP_BUFF_IN_DEC: + pic = &frame_bufs[i].buf; + if ((frame_bufs[i].ref_count == 0) && + (pic->vf_ref == 0) && + (pic->index != -1) && + pic->cma_alloc_addr) { + free_pic = pic; + } + break; + case V4L_CAP_BUFF_IN_M2M: + idx = get_free_fb_idx(pbi); + pic = &frame_bufs[idx].buf; + pic->y_crop_width = pbi->frame_width; + pic->y_crop_height = pbi->frame_height; + pbi->buffer_wrap[idx] = index; + if (!v4l_alloc_and_config_pic(pbi, pic)) { + set_canvas(pbi, pic); + init_pic_list_hw(pbi); + free_pic = pic; + } + break; + default: + break; + } + + if (free_pic) { + frame_bufs[i].ref_count = 1; + break; + } + } + + if (free_pic && pbi->chunk) { + free_pic->timestamp = pbi->chunk->timestamp; + update_hide_frame_timestamp(pbi); + } + + unlock_buffer_pool(cm->buffer_pool, flags); + + if (free_pic) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *) + pbi->m_BUF[i].v4l_ref_buf_addr; + + fb->status = FB_ST_DECODER; + } + + if (debug & VP9_DEBUG_OUT_PTS) { + if (free_pic) { + pr_debug("%s, idx: %d, ts: %lld\n", + __func__, free_pic->index, free_pic->timestamp); + } else { + pr_debug("%s, vp9 get free pic null\n", __func__); + } + } + + return free_pic ? free_pic->index : INVALID_IDX; +} + +static int get_free_buf_count(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + int i, free_buf_count = 0; + + if (pbi->is_used_v4l) { + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + frame_bufs[i].buf.cma_alloc_addr) { + free_buf_count++; + } + } + + if (ctx->cap_pool.dec < pbi->used_buf_num) { + free_buf_count += + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx); + } + + /* trigger to parse head data. */ + if (!pbi->v4l_params_parsed) { + free_buf_count = pbi->run_ready_min_buf_num; + } + } else { + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.index != -1)) + free_buf_count++; + } + } + + return free_buf_count; +} + +static void decrease_ref_count(int idx, struct RefCntBuffer_s *const frame_bufs, + struct BufferPool_s *const pool) +{ + if (idx >= 0) { + --frame_bufs[idx].ref_count; + /*pr_info("[MMU DEBUG 7] dec ref_count[%d] : %d\r\n", idx, + * frame_bufs[idx].ref_count); + */ + /*A worker may only get a free framebuffer index when + *calling get_free_fb. But the private buffer is not set up + *until finish decoding header. So any error happens during + *decoding header, the frame_bufs will not have valid priv + *buffer. + */ + + if (frame_bufs[idx].ref_count == 0 && + frame_bufs[idx].raw_frame_buffer.priv) + vp9_release_frame_buffer + (&frame_bufs[idx].raw_frame_buffer); + } +} + +static void generate_next_ref_frames(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *frame_bufs = cm->buffer_pool->frame_bufs; + struct BufferPool_s *const pool = cm->buffer_pool; + int mask, ref_index = 0; + unsigned long flags; + + /* Generate next_ref_frame_map.*/ + lock_buffer_pool(pool, flags); + for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { + if (mask & 1) { + cm->next_ref_frame_map[ref_index] = cm->new_fb_idx; + ++frame_bufs[cm->new_fb_idx].ref_count; + /*pr_info("[MMU DEBUG 4] inc ref_count[%d] : %d\r\n", + *cm->new_fb_idx, frame_bufs[cm->new_fb_idx].ref_count); + */ + } else + cm->next_ref_frame_map[ref_index] = + cm->ref_frame_map[ref_index]; + /* Current thread holds the reference frame.*/ + if (cm->ref_frame_map[ref_index] >= 0) { + ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; + /*pr_info + *("[MMU DEBUG 5] inc ref_count[%d] : %d\r\n", + *cm->ref_frame_map[ref_index], + *frame_bufs[cm->ref_frame_map[ref_index]].ref_count); + */ + } + ++ref_index; + } + + for (; ref_index < REF_FRAMES; ++ref_index) { + cm->next_ref_frame_map[ref_index] = + cm->ref_frame_map[ref_index]; + /* Current thread holds the reference frame.*/ + if (cm->ref_frame_map[ref_index] >= 0) { + ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; + /*pr_info("[MMU DEBUG 6] inc ref_count[%d] : %d\r\n", + *cm->ref_frame_map[ref_index], + *frame_bufs[cm->ref_frame_map[ref_index]].ref_count); + */ + } + } + unlock_buffer_pool(pool, flags); + return; +} + +static void refresh_ref_frames(struct VP9Decoder_s *pbi) + +{ + struct VP9_Common_s *const cm = &pbi->common; + struct BufferPool_s *pool = cm->buffer_pool; + struct RefCntBuffer_s *frame_bufs = cm->buffer_pool->frame_bufs; + int mask, ref_index = 0; + unsigned long flags; + + lock_buffer_pool(pool, flags); + for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { + const int old_idx = cm->ref_frame_map[ref_index]; + /*Current thread releases the holding of reference frame.*/ + decrease_ref_count(old_idx, frame_bufs, pool); + + /*Release the reference frame in reference map.*/ + if ((mask & 1) && old_idx >= 0) + decrease_ref_count(old_idx, frame_bufs, pool); + cm->ref_frame_map[ref_index] = + cm->next_ref_frame_map[ref_index]; + ++ref_index; + } + + /*Current thread releases the holding of reference frame.*/ + for (; ref_index < REF_FRAMES && !cm->show_existing_frame; + ++ref_index) { + const int old_idx = cm->ref_frame_map[ref_index]; + + decrease_ref_count(old_idx, frame_bufs, pool); + cm->ref_frame_map[ref_index] = + cm->next_ref_frame_map[ref_index]; + } + unlock_buffer_pool(pool, flags); + return; +} + +int vp9_bufmgr_process(struct VP9Decoder_s *pbi, union param_u *params) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct BufferPool_s *pool = cm->buffer_pool; + struct RefCntBuffer_s *frame_bufs = cm->buffer_pool->frame_bufs; + struct PIC_BUFFER_CONFIG_s *pic = NULL; + int i; + int ret; + + pbi->ready_for_new_data = 0; + + if ((pbi->has_keyframe == 0) && + (params->p.frame_type != KEY_FRAME) && + (!params->p.intra_only)){ + on_no_keyframe_skiped++; + pr_info("vp9_bufmgr_process no key frame return\n"); + return -2; + } + pbi->has_keyframe = 1; + on_no_keyframe_skiped = 0; +#if 0 + if (pbi->mmu_enable) { + if (!pbi->m_ins_flag) + pbi->used_4k_num = (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); + if (cm->prev_fb_idx >= 0) { + decoder_mmu_box_free_idx_tail(pbi->mmu_box, + cm->prev_fb_idx, pbi->used_4k_num); + } + } +#endif + if (cm->new_fb_idx >= 0 + && frame_bufs[cm->new_fb_idx].ref_count == 0){ + vp9_release_frame_buffer + (&frame_bufs[cm->new_fb_idx].raw_frame_buffer); + } + /*pr_info("Before get_free_fb, prev_fb_idx : %d, new_fb_idx : %d\r\n", + cm->prev_fb_idx, cm->new_fb_idx);*/ +#ifndef MV_USE_FIXED_BUF + put_un_used_mv_bufs(pbi); + if (debug & VP9_DEBUG_BUFMGR_DETAIL) + dump_pic_list(pbi); +#endif + cm->new_fb_idx = pbi->is_used_v4l ? + v4l_get_free_fb(pbi) : + get_free_fb(pbi); + if (cm->new_fb_idx == INVALID_IDX) { + pr_info("get_free_fb error\r\n"); + return -1; + } + +#ifndef MV_USE_FIXED_BUF +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num == 0) { +#endif + if (get_mv_buf(pbi, + &pool->frame_bufs[cm->new_fb_idx]. + buf) < 0) { + pr_info("get_mv_buf fail\r\n"); + return -1; + } + if (debug & VP9_DEBUG_BUFMGR_DETAIL) + dump_pic_list(pbi); +#ifdef SUPPORT_FB_DECODING + } +#endif +#endif + cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx]; + /*if (debug & VP9_DEBUG_BUFMGR) + pr_info("[VP9 DEBUG]%s(get_free_fb): %d\r\n", __func__, + cm->new_fb_idx);*/ + + pbi->cur_buf = &frame_bufs[cm->new_fb_idx]; + if (pbi->mmu_enable) { + /* moved to after picture size ready + *alloc_mmu(cm, params->p.width, params->p.height, + *params->p.bit_depth, pbi->frame_mmu_map_addr); + */ + cm->prev_fb_idx = cm->new_fb_idx; + } + /*read_uncompressed_header()*/ + cm->last_frame_type = cm->frame_type; + cm->last_intra_only = cm->intra_only; + cm->profile = params->p.profile; + if (cm->profile >= MAX_PROFILES) { + pr_err("Error: Unsupported profile %d\r\n", cm->profile); + return -1; + } + cm->show_existing_frame = params->p.show_existing_frame; + if (cm->show_existing_frame) { + /* Show an existing frame directly.*/ + int frame_to_show_idx = params->p.frame_to_show_idx; + int frame_to_show; + unsigned long flags; + if (frame_to_show_idx >= REF_FRAMES) { + pr_info("frame_to_show_idx %d exceed max index\r\n", + frame_to_show_idx); + return -1; + } + + frame_to_show = cm->ref_frame_map[frame_to_show_idx]; + /*pr_info("frame_to_show %d\r\n", frame_to_show);*/ + lock_buffer_pool(pool, flags); + if (frame_to_show < 0 || + frame_bufs[frame_to_show].ref_count < 1) { + unlock_buffer_pool(pool, flags); + pr_err + ("Error:Buffer %d does not contain a decoded frame", + frame_to_show); + return -1; + } + + ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show); + unlock_buffer_pool(pool, flags); + pbi->refresh_frame_flags = 0; + /*cm->lf.filter_level = 0;*/ + cm->show_frame = 1; + cm->cur_frame->show_frame = 1; + + /* + *if (pbi->frame_parallel_decode) { + * for (i = 0; i < REF_FRAMES; ++i) + * cm->next_ref_frame_map[i] = + * cm->ref_frame_map[i]; + *} + */ + /* do not decode, search next start code */ + return 1; + } + cm->frame_type = params->p.frame_type; + cm->show_frame = params->p.show_frame; + cm->bit_depth = params->p.bit_depth; + cm->error_resilient_mode = params->p.error_resilient_mode; + cm->cur_frame->show_frame = cm->show_frame; + + if (cm->frame_type == KEY_FRAME) { + pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1; + + for (i = 0; i < REFS_PER_FRAME; ++i) { + cm->frame_refs[i].idx = INVALID_IDX; + cm->frame_refs[i].buf = NULL; + } +#ifdef VP9_10B_MMU_DW + ret = setup_frame_size(pbi, + cm, params, + pbi->frame_mmu_map_addr, + pbi->frame_mmu_dw_map_addr, + print_header_info); +#else + ret = setup_frame_size(pbi, + cm, params, + pbi->frame_mmu_map_addr, + NULL, + print_header_info); +#endif + if (ret) + return -1; + if (pbi->need_resync) { + memset(&cm->ref_frame_map, -1, + sizeof(cm->ref_frame_map)); + pbi->need_resync = 0; + } + } else { + cm->intra_only = cm->show_frame ? 0 : params->p.intra_only; + /*if (print_header_info) { + * if (cm->show_frame) + * pr_info + * ("intra_only set to 0 because of show_frame\n"); + * else + * pr_info + * ("1-bit intra_only read: %d\n", cm->intra_only); + *} + */ + + + cm->reset_frame_context = cm->error_resilient_mode ? + 0 : params->p.reset_frame_context; + if (print_header_info) { + if (cm->error_resilient_mode) + pr_info + ("reset to 0 error_resilient_mode\n"); + else + pr_info + (" * 2-bits reset_frame_context read : %d\n", + cm->reset_frame_context); + } + + if (cm->intra_only) { + if (cm->profile > PROFILE_0) { + /*read_bitdepth_colorspace_sampling(cm, + * rb, print_header_info); + */ + } else { + /*NOTE: The intra-only frame header + *does not include the specification + *of either the color format or + *color sub-sampling + *in profile 0. VP9 specifies that the default + *color format should be YUV 4:2:0 in this + *case (normative). + */ + cm->color_space = VPX_CS_BT_601; + cm->subsampling_y = cm->subsampling_x = 1; + cm->bit_depth = VPX_BITS_8; + cm->use_highbitdepth = 0; + } + + pbi->refresh_frame_flags = + params->p.refresh_frame_flags; + /*if (print_header_info) + * pr_info("*%d-bits refresh_frame read:0x%x\n", + * REF_FRAMES, pbi->refresh_frame_flags); + */ +#ifdef VP9_10B_MMU_DW + ret = setup_frame_size(pbi, + cm, + params, + pbi->frame_mmu_map_addr, + pbi->frame_mmu_dw_map_addr, + print_header_info); +#else + ret = setup_frame_size(pbi, + cm, + params, + pbi->frame_mmu_map_addr, + NULL, + print_header_info); +#endif + if (ret) + return -1; + if (pbi->need_resync) { + memset(&cm->ref_frame_map, -1, + sizeof(cm->ref_frame_map)); + pbi->need_resync = 0; + } + } else if (pbi->need_resync != 1) { /* Skip if need resync */ + pbi->refresh_frame_flags = + params->p.refresh_frame_flags; + if (print_header_info) + pr_info + ("*%d-bits refresh_frame read:0x%x\n", + REF_FRAMES, pbi->refresh_frame_flags); + for (i = 0; i < REFS_PER_FRAME; ++i) { + const int ref = + (params->p.ref_info >> + (((REFS_PER_FRAME-i-1)*4)+1)) + & 0x7; + const int idx = + cm->ref_frame_map[ref]; + struct RefBuffer_s * const ref_frame = + &cm->frame_refs[i]; + if (print_header_info) + pr_info("*%d-bits ref[%d]read:%d\n", + REF_FRAMES_LOG2, i, ref); + ref_frame->idx = idx; + ref_frame->buf = &frame_bufs[idx].buf; + cm->ref_frame_sign_bias[LAST_FRAME + i] + = (params->p.ref_info >> + ((REFS_PER_FRAME-i-1)*4)) & 0x1; + if (print_header_info) + pr_info("1bit ref_frame_sign_bias"); + /*pr_info + *("%dread: %d\n", + *LAST_FRAME+i, + *cm->ref_frame_sign_bias + *[LAST_FRAME + i]); + */ + /*pr_info + *("[VP9 DEBUG]%s(get ref):%d\r\n", + *__func__, ref_frame->idx); + */ + + } +#ifdef VP9_10B_MMU_DW + ret = setup_frame_size_with_refs( + pbi, + cm, + params, + pbi->frame_mmu_map_addr, + pbi->frame_mmu_dw_map_addr, + print_header_info); +#else + ret = setup_frame_size_with_refs( + pbi, + cm, + params, + pbi->frame_mmu_map_addr, + NULL, + print_header_info); +#endif + if (ret) + return -1; + for (i = 0; i < REFS_PER_FRAME; ++i) { + /*struct RefBuffer_s *const ref_buf = + *&cm->frame_refs[i]; + */ + /* to do: + *vp9_setup_scale_factors_for_frame + */ + } + } + } + + pic = get_frame_new_buffer(cm); + if (!pic) + return -1; + + pic->bit_depth = cm->bit_depth; + pic->color_space = cm->color_space; + pic->slice_type = cm->frame_type; + + if (pbi->need_resync) { + pr_err + ("Error: Keyframe/intra-only frame required to reset\r\n"); + return -1; + } + generate_next_ref_frames(pbi); + pbi->hold_ref_buf = 1; + +#if 0 + if (frame_is_intra_only(cm) || cm->error_resilient_mode) + vp9_setup_past_independence(cm); + setup_loopfilter(&cm->lf, rb, print_header_info); + setup_quantization(cm, &pbi->mb, rb, print_header_info); + setup_segmentation(&cm->seg, rb, print_header_info); + setup_segmentation_dequant(cm, print_header_info); + + setup_tile_info(cm, rb, print_header_info); + sz = vp9_rb_read_literal(rb, 16); + if (print_header_info) + pr_info(" * 16-bits size read : %d (0x%x)\n", sz, sz); + + if (sz == 0) + vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, + "Invalid header size"); +#endif + /*end read_uncompressed_header()*/ + cm->use_prev_frame_mvs = !cm->error_resilient_mode && + cm->width == cm->last_width && + cm->height == cm->last_height && + !cm->last_intra_only && + cm->last_show_frame && + (cm->last_frame_type != KEY_FRAME); + + /*pr_info + *("set use_prev_frame_mvs to %d (last_width %d last_height %d", + *cm->use_prev_frame_mvs, cm->last_width, cm->last_height); + *pr_info + *(" last_intra_only %d last_show_frame %d last_frame_type %d)\n", + *cm->last_intra_only, cm->last_show_frame, cm->last_frame_type); + */ + + if (pbi->enable_fence && cm->show_frame) { + struct PIC_BUFFER_CONFIG_s *pic = &cm->cur_frame->buf; + struct vdec_s *vdec = hw_to_vdec(pbi); + + /* create fence for each buffers. */ + ret = vdec_timeline_create_fence(vdec->sync); + if (ret < 0) + return ret; + + pic->fence = vdec->sync->fence; + pic->bit_depth = cm->bit_depth; + pic->slice_type = cm->frame_type; + pic->stream_offset = pbi->pre_stream_offset; + + if (pbi->chunk) { + pic->pts = pbi->chunk->pts; + pic->pts64 = pbi->chunk->pts64; + pic->timestamp = pbi->chunk->timestamp; + } + + /* post video vframe. */ + prepare_display_buf(pbi, pic); + } + + return 0; +} + + +void swap_frame_buffers(struct VP9Decoder_s *pbi) +{ + int ref_index = 0; + struct VP9_Common_s *const cm = &pbi->common; + struct BufferPool_s *const pool = cm->buffer_pool; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + unsigned long flags; + refresh_ref_frames(pbi); + pbi->hold_ref_buf = 0; + cm->frame_to_show = get_frame_new_buffer(cm); + + if (cm->frame_to_show) { + /*if (!pbi->frame_parallel_decode || !cm->show_frame) {*/ + lock_buffer_pool(pool, flags); + --frame_bufs[cm->new_fb_idx].ref_count; + /*pr_info("[MMU DEBUG 8] dec ref_count[%d] : %d\r\n", cm->new_fb_idx, + * frame_bufs[cm->new_fb_idx].ref_count); + */ + unlock_buffer_pool(pool, flags); + /*}*/ + } + + /*Invalidate these references until the next frame starts.*/ + for (ref_index = 0; ref_index < 3; ref_index++) + cm->frame_refs[ref_index].idx = -1; +} + +#if 0 +static void check_resync(vpx_codec_alg_priv_t *const ctx, + const struct VP9Decoder_s *const pbi) +{ + /* Clear resync flag if worker got a key frame or intra only frame.*/ + if (ctx->need_resync == 1 && pbi->need_resync == 0 && + (pbi->common.intra_only || pbi->common.frame_type == KEY_FRAME)) + ctx->need_resync = 0; +} +#endif + +int vp9_get_raw_frame(struct VP9Decoder_s *pbi, struct PIC_BUFFER_CONFIG_s *sd) +{ + struct VP9_Common_s *const cm = &pbi->common; + int ret = -1; + + if (pbi->ready_for_new_data == 1) + return ret; + + pbi->ready_for_new_data = 1; + + /* no raw frame to show!!! */ + if (!cm->show_frame) + return ret; + + /* may not be get buff in v4l2 */ + if (!cm->frame_to_show) + return ret; + + pbi->ready_for_new_data = 1; + + *sd = *cm->frame_to_show; + ret = 0; + + return ret; +} + +int vp9_bufmgr_init(struct VP9Decoder_s *pbi, struct BuffInfo_s *buf_spec_i, + struct buff_s *mc_buf_i) { + struct VP9_Common_s *cm = &pbi->common; + + /*memset(pbi, 0, sizeof(struct VP9Decoder_s));*/ + pbi->frame_count = 0; + pbi->pic_count = 0; + pbi->pre_stream_offset = 0; + cm->buffer_pool = &pbi->vp9_buffer_pool; + spin_lock_init(&cm->buffer_pool->lock); + cm->prev_fb_idx = INVALID_IDX; + cm->new_fb_idx = INVALID_IDX; + pbi->used_4k_num = -1; + cm->cur_fb_idx_mmu = INVALID_IDX; + pr_debug + ("After vp9_bufmgr_init, prev_fb_idx : %d, new_fb_idx : %d\r\n", + cm->prev_fb_idx, cm->new_fb_idx); + pbi->need_resync = 1; + /* Initialize the references to not point to any frame buffers.*/ + memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); + memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map)); + cm->current_video_frame = 0; + pbi->ready_for_new_data = 1; + + /* private init */ + pbi->work_space_buf = buf_spec_i; + if (!pbi->mmu_enable) + pbi->mc_buf = mc_buf_i; + + pbi->rpm_addr = NULL; + pbi->lmem_addr = NULL; + + pbi->use_cma_flag = 0; + pbi->decode_idx = 0; + pbi->slice_idx = 0; + /*int m_uiMaxCUWidth = 1<<7;*/ + /*int m_uiMaxCUHeight = 1<<7;*/ + pbi->has_keyframe = 0; + pbi->skip_flag = 0; + pbi->wait_buf = 0; + pbi->error_flag = 0; + + pbi->pts_mode = PTS_NORMAL; + pbi->last_pts = 0; + pbi->last_lookup_pts = 0; + pbi->last_pts_us64 = 0; + pbi->last_lookup_pts_us64 = 0; + pbi->shift_byte_count = 0; + pbi->shift_byte_count_lo = 0; + pbi->shift_byte_count_hi = 0; + pbi->pts_mode_switching_count = 0; + pbi->pts_mode_recovery_count = 0; + + pbi->buf_num = 0; + pbi->pic_num = 0; + + return 0; +} + +int vp9_bufmgr_postproc(struct VP9Decoder_s *pbi) +{ + struct vdec_s *vdec = hw_to_vdec(pbi); + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s sd; + + if (pbi->postproc_done) + return 0; + pbi->postproc_done = 1; + swap_frame_buffers(pbi); + if (!cm->show_existing_frame) { + cm->last_show_frame = cm->show_frame; + cm->prev_frame = cm->cur_frame; +#if 0 + if (cm->seg.enabled && !pbi->frame_parallel_decode) + vp9_swap_current_and_last_seg_map(cm); +#endif + } + cm->last_width = cm->width; + cm->last_height = cm->height; + pbi->last_width = cm->width; + pbi->last_height = cm->height; + + if (cm->show_frame) + cm->current_video_frame++; + + if (vp9_get_raw_frame(pbi, &sd) == 0) { + /*pr_info("Display frame index %d\r\n", sd.index);*/ + sd.stream_offset = pbi->pre_stream_offset; + + if (pbi->enable_fence) { + int i, j, used_size, ret; + int signed_count = 0; + struct vframe_s *signed_fence[VF_POOL_SIZE]; + /* notify signal to wake up wq of fence. */ + vdec_timeline_increase(vdec->sync, 1); + mutex_lock(&pbi->fence_mutex); + used_size = pbi->fence_vf_s.used_size; + if (used_size) { + for (i = 0, j = 0; i < VF_POOL_SIZE && j < used_size; i++) { + if (pbi->fence_vf_s.fence_vf[i] != NULL) { + ret = dma_fence_get_status(pbi->fence_vf_s.fence_vf[i]->fence); + if (ret == 1) { + signed_fence[signed_count] = pbi->fence_vf_s.fence_vf[i]; + pbi->fence_vf_s.fence_vf[i] = NULL; + pbi->fence_vf_s.used_size--; + signed_count++; + } + j++; + } + } + } + mutex_unlock(&pbi->fence_mutex); + if (signed_count != 0) { + for (i = 0; i < signed_count; i++) + vvp9_vf_put(signed_fence[i], vdec); + } + } else { + prepare_display_buf(pbi, &sd); + } + + pbi->pre_stream_offset = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + } + +/* else + * pr_info + * ("Not display this frame,ready_for_new_data%d show_frame%d\r\n", + * pbi->ready_for_new_data, cm->show_frame); + */ + return 0; +} + +/************************************************** + * + *VP9 buffer management end + * + *************************************************** + */ + + +#define HEVC_CM_BODY_START_ADDR 0x3626 +#define HEVC_CM_BODY_LENGTH 0x3627 +#define HEVC_CM_HEADER_LENGTH 0x3629 +#define HEVC_CM_HEADER_OFFSET 0x362b + +#define HEVC_CM_BODY_LENGTH2 0x3663 +#define HEVC_CM_HEADER_OFFSET2 0x3664 +#define HEVC_CM_HEADER_LENGTH2 0x3665 +#define LOSLESS_COMPRESS_MODE + +/*#define DECOMP_HEADR_SURGENT*/ +#ifdef VP9_10B_NV21 +static u32 mem_map_mode = 2 /* 0:linear 1:32x32 2:64x32*/ +#else +static u32 mem_map_mode; /* 0:linear 1:32x32 2:64x32 ; m8baby test1902 */ +#endif +static u32 enable_mem_saving = 1; +static u32 force_w_h; + +static u32 force_fps; + + +const u32 vp9_version = 201602101; +static u32 debug; +static u32 radr; +static u32 rval; +static u32 pop_shorts; +static u32 dbg_cmd; +static u32 dbg_skip_decode_index; + +/* + * bit 0~3, for HEVCD_IPP_AXIIF_CONFIG endian config + * bit 8~23, for HEVC_SAO_CTRL1 endian config + */ +static u32 endian; +#define HEVC_CONFIG_BIG_ENDIAN ((0x880 << 8) | 0x8) +#define HEVC_CONFIG_LITTLE_ENDIAN ((0xff0 << 8) | 0xf) + +#ifdef ERROR_HANDLE_DEBUG +static u32 dbg_nal_skip_flag; + /* bit[0], skip vps; bit[1], skip sps; bit[2], skip pps */ +static u32 dbg_nal_skip_count; +#endif +/*for debug*/ +static u32 decode_pic_begin; +static uint slice_parse_begin; +static u32 step; +static u32 vp9_max_pic_w; +static u32 vp9_max_pic_h; +static u32 dynamic_buf_num_margin; + +static u32 buf_alloc_depth = 10; +static u32 buf_alloc_size; +/* + *bit[0]: 0, + * bit[1]: 0, always release cma buffer when stop + * bit[1]: 1, never release cma buffer when stop + *bit[0]: 1, when stop, release cma buffer if blackout is 1; + *do not release cma buffer is blackout is not 1 + * + *bit[2]: 0, when start decoding, check current displayed buffer + * (only for buffer decoded by vp9) if blackout is 0 + * 1, do not check current displayed buffer + * + *bit[3]: 1, if blackout is not 1, do not release current + * displayed cma buffer always. + */ +/* set to 1 for fast play; + * set to 8 for other case of "keep last frame" + */ +static u32 buffer_mode = 1; +/* buffer_mode_dbg: debug only*/ +static u32 buffer_mode_dbg = 0xffff0000; +/**/ + +/* + *bit 0, 1: only display I picture; + *bit 1, 1: only decode I picture; + */ +static u32 i_only_flag; + +static u32 low_latency_flag; + +static u32 no_head; + +static u32 max_decoding_time; +/* + *error handling + */ +/*error_handle_policy: + *bit 0: 0, auto skip error_skip_nal_count nals before error recovery; + *1, skip error_skip_nal_count nals before error recovery; + *bit 1 (valid only when bit0 == 1): + *1, wait vps/sps/pps after error recovery; + *bit 2 (valid only when bit0 == 0): + *0, auto search after error recovery (vp9_recover() called); + *1, manual search after error recovery + *(change to auto search after get IDR: WRITE_VREG(NAL_SEARCH_CTL, 0x2)) + * + *bit 4: 0, set error_mark after reset/recover + * 1, do not set error_mark after reset/recover + *bit 5: 0, check total lcu for every picture + * 1, do not check total lcu + * + */ + +static u32 error_handle_policy; +/*static u32 parser_sei_enable = 1;*/ +#define MAX_BUF_NUM_NORMAL 12 +#define MAX_BUF_NUM_LESS 10 +static u32 max_buf_num = MAX_BUF_NUM_NORMAL; +#define MAX_BUF_NUM_SAVE_BUF 8 + +static u32 run_ready_min_buf_num = 2; + + +static DEFINE_MUTEX(vvp9_mutex); +#ifndef MULTI_INSTANCE_SUPPORT +static struct device *cma_dev; +#endif + +#define HEVC_DEC_STATUS_REG HEVC_ASSIST_SCRATCH_0 +#define HEVC_RPM_BUFFER HEVC_ASSIST_SCRATCH_1 +#define HEVC_SHORT_TERM_RPS HEVC_ASSIST_SCRATCH_2 +#define VP9_ADAPT_PROB_REG HEVC_ASSIST_SCRATCH_3 +#define VP9_MMU_MAP_BUFFER HEVC_ASSIST_SCRATCH_4 +#define HEVC_PPS_BUFFER HEVC_ASSIST_SCRATCH_5 +//#define HEVC_SAO_UP HEVC_ASSIST_SCRATCH_6 +#define HEVC_STREAM_SWAP_BUFFER HEVC_ASSIST_SCRATCH_7 +#define HEVC_STREAM_SWAP_BUFFER2 HEVC_ASSIST_SCRATCH_8 +#define VP9_PROB_SWAP_BUFFER HEVC_ASSIST_SCRATCH_9 +#define VP9_COUNT_SWAP_BUFFER HEVC_ASSIST_SCRATCH_A +#define VP9_SEG_MAP_BUFFER HEVC_ASSIST_SCRATCH_B +//#define HEVC_SCALELUT HEVC_ASSIST_SCRATCH_D +#define HEVC_WAIT_FLAG HEVC_ASSIST_SCRATCH_E +#define RPM_CMD_REG HEVC_ASSIST_SCRATCH_F +#define LMEM_DUMP_ADR HEVC_ASSIST_SCRATCH_F +#define HEVC_STREAM_SWAP_TEST HEVC_ASSIST_SCRATCH_L +#ifdef MULTI_INSTANCE_SUPPORT +#define HEVC_DECODE_COUNT HEVC_ASSIST_SCRATCH_M +#define HEVC_DECODE_SIZE HEVC_ASSIST_SCRATCH_N +#else +#define HEVC_DECODE_PIC_BEGIN_REG HEVC_ASSIST_SCRATCH_M +#define HEVC_DECODE_PIC_NUM_REG HEVC_ASSIST_SCRATCH_N +#endif +#define DEBUG_REG1 HEVC_ASSIST_SCRATCH_G +#define DEBUG_REG2 HEVC_ASSIST_SCRATCH_H + + +/* + *ucode parser/search control + *bit 0: 0, header auto parse; 1, header manual parse + *bit 1: 0, auto skip for noneseamless stream; 1, no skip + *bit [3:2]: valid when bit1==0; + *0, auto skip nal before first vps/sps/pps/idr; + *1, auto skip nal before first vps/sps/pps + *2, auto skip nal before first vps/sps/pps, + * and not decode until the first I slice (with slice address of 0) + * + *3, auto skip before first I slice (nal_type >=16 && nal_type<=21) + *bit [15:4] nal skip count (valid when bit0 == 1 (manual mode) ) + *bit [16]: for NAL_UNIT_EOS when bit0 is 0: + * 0, send SEARCH_DONE to arm ; 1, do not send SEARCH_DONE to arm + *bit [17]: for NAL_SEI when bit0 is 0: + * 0, do not parse SEI in ucode; 1, parse SEI in ucode + *bit [31:20]: used by ucode for debug purpose + */ +#define NAL_SEARCH_CTL HEVC_ASSIST_SCRATCH_I + /*[31:24] chip feature + 31: 0, use MBOX1; 1, use MBOX0 + */ +#define DECODE_MODE HEVC_ASSIST_SCRATCH_J +#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K + +#ifdef MULTI_INSTANCE_SUPPORT +#define RPM_BUF_SIZE (0x400 * 2) +#else +#define RPM_BUF_SIZE (0x80*2) +#endif +#define LMEM_BUF_SIZE (0x400 * 2) + +//#define VBH_BUF_SIZE (2 * 16 * 2304) +//#define VBH_BUF_COUNT 4 + + /*mmu_vbh buf is used by HEVC_SAO_MMU_VH0_ADDR, HEVC_SAO_MMU_VH1_ADDR*/ +#define VBH_BUF_SIZE_1080P 0x3000 +#define VBH_BUF_SIZE_4K 0x5000 +#define VBH_BUF_SIZE_8K 0xa000 +#define VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh.buf_size / 2) + /*mmu_vbh_dw buf is used by HEVC_SAO_MMU_VH0_ADDR2,HEVC_SAO_MMU_VH1_ADDR2, + HEVC_DW_VH0_ADDDR, HEVC_DW_VH1_ADDDR*/ +#define DW_VBH_BUF_SIZE_1080P (VBH_BUF_SIZE_1080P * 2) +#define DW_VBH_BUF_SIZE_4K (VBH_BUF_SIZE_4K * 2) +#define DW_VBH_BUF_SIZE_8K (VBH_BUF_SIZE_8K * 2) +#define DW_VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh_dw.buf_size / 4) + +/* necessary 4K page size align for t7/t3 decoder and after */ +#define WORKBUF_ALIGN(addr) (ALIGN(addr, PAGE_SIZE)) + +#define WORK_BUF_SPEC_NUM 6 +static struct BuffInfo_s amvvp9_workbuff_spec[WORK_BUF_SPEC_NUM] = { + { + /* 8M bytes */ + .max_width = 1920, + .max_height = 1088, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x30000, + }, + .sao_vb = { + .buf_size = 0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = + * 32Kbytes (0x8000) + */ + .buf_size = 0x8000, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + *each para 1024bytes(total:0x40000), + *data 1024bytes(total:0x40000) + */ + .buf_size = 0x80000, + }, + .dblk_data = { + .buf_size = 0x80000, + }, + .seg_map = { + /*4096x2304/64/64 *24 = 0xd800 Bytes*/ + .buf_size = 0xd800, + }, + .mmu_vbh = { + .buf_size = 0x5000, /*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif + .mpred_above = { + .buf_size = 0x10000, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = {/* 1080p, 0x40000 per buffer */ + .buf_size = 0x40000 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096, + .max_height = 2304, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x30000, + }, + .sao_vb = { + .buf_size = 0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = 32Kbytes + * (0x8000) + */ + .buf_size = 0x8000, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + *each para 1024bytes(total:0x40000), + *data 1024bytes(total:0x40000) + */ + .buf_size = 0x80000, + }, + .dblk_data = { + .buf_size = 0x80000, + }, + .seg_map = { + /*4096x2304/64/64 *24 = 0xd800 Bytes*/ + .buf_size = 0xd800, + }, + .mmu_vbh = { + .buf_size = 0x5000,/*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif + .mpred_above = { + .buf_size = 0x10000, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + * //4k2k , 0x100000 per buffer + */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = 0x120000 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096*2, + .max_height = 2304*2, + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x4000*2, + }, + .sao_abv = { + .buf_size = 0x30000*2, + }, + .sao_vb = { + .buf_size = 0x30000*2, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .sps = { + // SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .pps = { + // PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, total 0x2000 bytes + .buf_size = 0x2000, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0x2800*2, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0x8000*2, + }, + .dblk_para = { + // DBLK -> Max 256(4096/16) LCU, each para 1024bytes(total:0x40000), data 1024bytes(total:0x40000) + .buf_size = 0x80000*2, + }, + .dblk_data = { + .buf_size = 0x80000*2, + }, + .seg_map = { + /*4096x2304/64/64 *24 = 0xd800 Bytes*/ + .buf_size = 0xd800*4, + }, + .mmu_vbh = { + .buf_size = 0x5000*2, //2*16*(more than 2304)/4, 4K + }, +#if 0 + .cm_header = { + //.buf_size = MMU_COMPRESS_HEADER_SIZE*8, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + .buf_size = MMU_COMPRESS_HEADER_SIZE*16, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif + .mpred_above = { + .buf_size = 0x10000*2, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = 0x120000 * FRAME_BUFFERS * 4, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + /* 8M bytes */ + .max_width = 1920, + .max_height = 1088, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x1e00, + }, + .sao_abv = { + .buf_size = 0, + }, + .sao_vb = { + .buf_size = 0, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = + * 32Kbytes (0x8000) + */ + .buf_size = 0, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + *each para 1024bytes(total:0x40000), + *data 1024bytes(total:0x40000) + */ + .buf_size = 0x49000, + }, + .dblk_data = { + .buf_size = 0x49000, + }, + .seg_map = { + /*4096x2304/64/64 *24 = 0xd800 Bytes*/ + .buf_size = 0x3000, //0x2fd0, + }, + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_1080P, /*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif +#ifdef VP9_10B_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_1080P, //VBH_BUF_SIZE * VBH_BUF_COUNT, //2*16*(more than 2304)/4, 4K + }, +#if 0 + .cm_header_dw = { + .buf_size = MMU_COMPRESS_HEADER_SIZE_DW * 16, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x2200, //0x21c0, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = {/* 1080p, 0x40000 per buffer */ + .buf_size = 0x48200 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096, + .max_height = 2304, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0, + }, + .sao_vb = { + .buf_size = 0, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = 32Kbytes + * (0x8000) + */ + .buf_size = 0, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + *each para 1024bytes(total:0x40000), + *data 1024bytes(total:0x40000) + */ + .buf_size = 0x52800, + }, + .dblk_data = { + .buf_size = 0x52800, + }, + .seg_map = { + /*4096x2304/64/64 *24 = 0xd800 Bytes*/ + .buf_size = 0xd800, + }, + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_4K,/*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif +#ifdef VP9_10B_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_4K, //VBH_BUF_SIZE * VBH_BUF_COUNT, //2*16*(more than 2304)/4, 4K + }, +#if 0 + .cm_header_dw = { + .buf_size = MMU_COMPRESS_HEADER_SIZE_DW * 16, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x4800, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + * //4k2k , 0x100000 per buffer + */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = 0x145400 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096*2, + .max_height = 2304*2, + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x4000*2, + }, + .sao_abv = { + .buf_size = 0, + }, + .sao_vb = { + .buf_size = 0, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .sps = { + // SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .pps = { + // PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, total 0x2000 bytes + .buf_size = 0x2000, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0, + }, + .dblk_para = { + // DBLK -> Max 256(4096/16) LCU, each para 1024bytes(total:0x40000), data 1024bytes(total:0x40000) + .buf_size = 0xa4800, + }, + .dblk_data = { + .buf_size = 0xa4800, + }, + .seg_map = { + /*4096x2304/64/64 *24 = 0xd800 Bytes*/ + .buf_size = 0x36000, + }, + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_8K, //2*16*(more than 2304)/4, 4K + }, +#if 0 + .cm_header = { + //.buf_size = MMU_COMPRESS_HEADER_SIZE*8, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + .buf_size = MMU_COMPRESS_HEADER_SIZE*16, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif +#ifdef VP9_10B_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_8K, //VBH_BUF_SIZE * VBH_BUF_COUNT, //2*16*(more than 2304)/4, 4K + }, +#if 0 + .cm_header_dw = { + .buf_size = MMU_COMPRESS_HEADER_SIZE_DW * 16, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x9000, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = 0x514800 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + } +}; + + +/*Losless compression body buffer size 4K per 64x32 (jt)*/ +static int compute_losless_comp_body_size(int width, int height, + uint8_t is_bit_depth_10) +{ + int width_x64; + int height_x32; + int bsize; + + width_x64 = width + 63; + width_x64 >>= 6; + height_x32 = height + 31; + height_x32 >>= 5; + bsize = (is_bit_depth_10?4096:3200)*width_x64*height_x32; + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("%s(%d,%d,%d)=>%d\n", + __func__, width, height, + is_bit_depth_10, bsize); + + return bsize; +} + +/* Losless compression header buffer size 32bytes per 128x64 (jt)*/ +static int compute_losless_comp_header_size(int width, int height) +{ + int width_x128; + int height_x64; + int hsize; + + width_x128 = width + 127; + width_x128 >>= 7; + height_x64 = height + 63; + height_x64 >>= 6; + + hsize = 32 * width_x128 * height_x64; + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("%s(%d,%d)=>%d\n", + __func__, width, height, + hsize); + + return hsize; +} + +static void init_buff_spec(struct VP9Decoder_s *pbi, + struct BuffInfo_s *buf_spec) +{ + void *mem_start_virt; + + buf_spec->ipp.buf_start = + WORKBUF_ALIGN(buf_spec->start_adr); + buf_spec->sao_abv.buf_start = + WORKBUF_ALIGN(buf_spec->ipp.buf_start + buf_spec->ipp.buf_size); + buf_spec->sao_vb.buf_start = + WORKBUF_ALIGN(buf_spec->sao_abv.buf_start + buf_spec->sao_abv.buf_size); + buf_spec->short_term_rps.buf_start = + WORKBUF_ALIGN(buf_spec->sao_vb.buf_start + buf_spec->sao_vb.buf_size); + buf_spec->vps.buf_start = + WORKBUF_ALIGN(buf_spec->short_term_rps.buf_start + buf_spec->short_term_rps.buf_size); + buf_spec->sps.buf_start = + WORKBUF_ALIGN(buf_spec->vps.buf_start + buf_spec->vps.buf_size); + buf_spec->pps.buf_start = + WORKBUF_ALIGN(buf_spec->sps.buf_start + buf_spec->sps.buf_size); + buf_spec->sao_up.buf_start = + WORKBUF_ALIGN(buf_spec->pps.buf_start + buf_spec->pps.buf_size); + buf_spec->swap_buf.buf_start = + WORKBUF_ALIGN(buf_spec->sao_up.buf_start + buf_spec->sao_up.buf_size); + buf_spec->swap_buf2.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf.buf_start + buf_spec->swap_buf.buf_size); + buf_spec->scalelut.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf2.buf_start + buf_spec->swap_buf2.buf_size); + buf_spec->dblk_para.buf_start = + WORKBUF_ALIGN(buf_spec->scalelut.buf_start + buf_spec->scalelut.buf_size); + buf_spec->dblk_data.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_para.buf_start + buf_spec->dblk_para.buf_size); + buf_spec->seg_map.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data.buf_start + buf_spec->dblk_data.buf_size); + if (pbi == NULL || pbi->mmu_enable) { + buf_spec->mmu_vbh.buf_start = + WORKBUF_ALIGN(buf_spec->seg_map.buf_start + buf_spec->seg_map.buf_size); +#ifdef VP9_10B_MMU_DW + if (pbi == NULL || pbi->dw_mmu_enable) { + buf_spec->mmu_vbh_dw.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh.buf_start + buf_spec->mmu_vbh.buf_size); + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh_dw.buf_start + buf_spec->mmu_vbh_dw.buf_size); + } else { + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh.buf_start + buf_spec->mmu_vbh.buf_size); + } +#else + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh.buf_start + buf_spec->mmu_vbh.buf_size); +#endif + } else { + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->seg_map.buf_start + buf_spec->seg_map.buf_size); + } +#ifdef MV_USE_FIXED_BUF + buf_spec->mpred_mv.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); + + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_mv.buf_start + buf_spec->mpred_mv.buf_size); +#else + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); +#endif + buf_spec->lmem.buf_start = + WORKBUF_ALIGN(buf_spec->rpm.buf_start + buf_spec->rpm.buf_size); + buf_spec->end_adr = + WORKBUF_ALIGN(buf_spec->lmem.buf_start + buf_spec->lmem.buf_size); + + if (!pbi) + return; + + if (!vdec_secure(hw_to_vdec(pbi))) { + mem_start_virt = + codec_mm_phys_to_virt(buf_spec->dblk_para.buf_start); + if (mem_start_virt) { + memset(mem_start_virt, 0, + buf_spec->dblk_para.buf_size); + codec_mm_dma_flush(mem_start_virt, + buf_spec->dblk_para.buf_size, + DMA_TO_DEVICE); + } else { + mem_start_virt = codec_mm_vmap( + buf_spec->dblk_para.buf_start, + buf_spec->dblk_para.buf_size); + if (mem_start_virt) { + memset(mem_start_virt, 0, + buf_spec->dblk_para.buf_size); + codec_mm_dma_flush(mem_start_virt, + buf_spec->dblk_para.buf_size, + DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(mem_start_virt); + } else { + /*not virt for tvp playing, + may need clear on ucode.*/ + pr_err("mem_start_virt failed\n"); + } + } + } + + if (debug) { + pr_info("%s workspace (%x %x) size = %x\n", __func__, + buf_spec->start_adr, buf_spec->end_adr, + buf_spec->end_adr - buf_spec->start_adr); + } + + if (debug) { + pr_info("ipp.buf_start :%x\n", + buf_spec->ipp.buf_start); + pr_info("sao_abv.buf_start :%x\n", + buf_spec->sao_abv.buf_start); + pr_info("sao_vb.buf_start :%x\n", + buf_spec->sao_vb.buf_start); + pr_info("short_term_rps.buf_start :%x\n", + buf_spec->short_term_rps.buf_start); + pr_info("vps.buf_start :%x\n", + buf_spec->vps.buf_start); + pr_info("sps.buf_start :%x\n", + buf_spec->sps.buf_start); + pr_info("pps.buf_start :%x\n", + buf_spec->pps.buf_start); + pr_info("sao_up.buf_start :%x\n", + buf_spec->sao_up.buf_start); + pr_info("swap_buf.buf_start :%x\n", + buf_spec->swap_buf.buf_start); + pr_info("swap_buf2.buf_start :%x\n", + buf_spec->swap_buf2.buf_start); + pr_info("scalelut.buf_start :%x\n", + buf_spec->scalelut.buf_start); + pr_info("dblk_para.buf_start :%x\n", + buf_spec->dblk_para.buf_start); + pr_info("dblk_data.buf_start :%x\n", + buf_spec->dblk_data.buf_start); + pr_info("seg_map.buf_start :%x\n", + buf_spec->seg_map.buf_start); + if (pbi->mmu_enable) { + pr_info("mmu_vbh.buf_start :%x\n", + buf_spec->mmu_vbh.buf_start); +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable) { + pr_info("mmu_vbh_dw.buf_start :%x", + buf_spec->mmu_vbh_dw.buf_start); +#if 0 + pr_info("cm_header_dw.buf_start :%x", + buf_spec->mmu_vbh_dw.buf_start); +#endif + } +#endif + } + pr_info("mpred_above.buf_start :%x\n", + buf_spec->mpred_above.buf_start); +#ifdef MV_USE_FIXED_BUF + pr_info("mpred_mv.buf_start :%x\n", + buf_spec->mpred_mv.buf_start); +#endif + if ((debug & VP9_DEBUG_SEND_PARAM_WITH_REG) == 0) { + pr_info("rpm.buf_start :%x\n", + buf_spec->rpm.buf_start); + } + } +} + +/* cache_util.c */ +#define THODIYIL_MCRCC_CANVAS_ALGX 4 + +static u32 mcrcc_cache_alg_flag = THODIYIL_MCRCC_CANVAS_ALGX; + +static void mcrcc_perfcount_reset(void) +{ + if (debug & VP9_DEBUG_CACHE) + pr_info("[cache_util.c] Entered mcrcc_perfcount_reset...\n"); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)0x1); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)0x0); + return; +} + +static unsigned raw_mcr_cnt_total_prev; +static unsigned hit_mcr_0_cnt_total_prev; +static unsigned hit_mcr_1_cnt_total_prev; +static unsigned byp_mcr_cnt_nchcanv_total_prev; +static unsigned byp_mcr_cnt_nchoutwin_total_prev; + +static void mcrcc_get_hitrate(unsigned reset_pre) +{ + unsigned delta_hit_mcr_0_cnt; + unsigned delta_hit_mcr_1_cnt; + unsigned delta_raw_mcr_cnt; + unsigned delta_mcr_cnt_nchcanv; + unsigned delta_mcr_cnt_nchoutwin; + + unsigned tmp; + unsigned raw_mcr_cnt; + unsigned hit_mcr_cnt; + unsigned byp_mcr_cnt_nchoutwin; + unsigned byp_mcr_cnt_nchcanv; + int hitrate; + if (reset_pre) { + raw_mcr_cnt_total_prev = 0; + hit_mcr_0_cnt_total_prev = 0; + hit_mcr_1_cnt_total_prev = 0; + byp_mcr_cnt_nchcanv_total_prev = 0; + byp_mcr_cnt_nchoutwin_total_prev = 0; + } + if (debug & VP9_DEBUG_CACHE) + pr_info("[cache_util.c] Entered mcrcc_get_hitrate...\n"); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x0<<1)); + raw_mcr_cnt = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x1<<1)); + hit_mcr_cnt = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x2<<1)); + byp_mcr_cnt_nchoutwin = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x3<<1)); + byp_mcr_cnt_nchcanv = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + + if (debug & VP9_DEBUG_CACHE) + pr_info("raw_mcr_cnt_total: %d\n", + raw_mcr_cnt); + if (debug & VP9_DEBUG_CACHE) + pr_info("hit_mcr_cnt_total: %d\n", + hit_mcr_cnt); + if (debug & VP9_DEBUG_CACHE) + pr_info("byp_mcr_cnt_nchoutwin_total: %d\n", + byp_mcr_cnt_nchoutwin); + if (debug & VP9_DEBUG_CACHE) + pr_info("byp_mcr_cnt_nchcanv_total: %d\n", + byp_mcr_cnt_nchcanv); + + delta_raw_mcr_cnt = raw_mcr_cnt - + raw_mcr_cnt_total_prev; + delta_mcr_cnt_nchcanv = byp_mcr_cnt_nchcanv - + byp_mcr_cnt_nchcanv_total_prev; + delta_mcr_cnt_nchoutwin = byp_mcr_cnt_nchoutwin - + byp_mcr_cnt_nchoutwin_total_prev; + raw_mcr_cnt_total_prev = raw_mcr_cnt; + byp_mcr_cnt_nchcanv_total_prev = byp_mcr_cnt_nchcanv; + byp_mcr_cnt_nchoutwin_total_prev = byp_mcr_cnt_nchoutwin; + + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x4<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & VP9_DEBUG_CACHE) + pr_info("miss_mcr_0_cnt_total: %d\n", tmp); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x5<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & VP9_DEBUG_CACHE) + pr_info("miss_mcr_1_cnt_total: %d\n", tmp); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x6<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & VP9_DEBUG_CACHE) + pr_info("hit_mcr_0_cnt_total: %d\n", tmp); + delta_hit_mcr_0_cnt = tmp - hit_mcr_0_cnt_total_prev; + hit_mcr_0_cnt_total_prev = tmp; + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x7<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & VP9_DEBUG_CACHE) + pr_info("hit_mcr_1_cnt_total: %d\n", tmp); + delta_hit_mcr_1_cnt = tmp - hit_mcr_1_cnt_total_prev; + hit_mcr_1_cnt_total_prev = tmp; + + if (delta_raw_mcr_cnt != 0) { + hitrate = 100 * delta_hit_mcr_0_cnt + / delta_raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("CANV0_HIT_RATE : %d\n", hitrate); + hitrate = 100 * delta_hit_mcr_1_cnt + / delta_raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("CANV1_HIT_RATE : %d\n", hitrate); + hitrate = 100 * delta_mcr_cnt_nchcanv + / delta_raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("NONCACH_CANV_BYP_RATE : %d\n", hitrate); + hitrate = 100 * delta_mcr_cnt_nchoutwin + / delta_raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("CACHE_OUTWIN_BYP_RATE : %d\n", hitrate); + } + + + if (raw_mcr_cnt != 0) { + hitrate = 100 * hit_mcr_cnt / raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("MCRCC_HIT_RATE : %d\n", hitrate); + hitrate = 100 * (byp_mcr_cnt_nchoutwin + byp_mcr_cnt_nchcanv) + / raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("MCRCC_BYP_RATE : %d\n", hitrate); + } else { + if (debug & VP9_DEBUG_CACHE) + pr_info("MCRCC_HIT_RATE : na\n"); + if (debug & VP9_DEBUG_CACHE) + pr_info("MCRCC_BYP_RATE : na\n"); + } + return; +} + + +static void decomp_perfcount_reset(void) +{ + if (debug & VP9_DEBUG_CACHE) + pr_info("[cache_util.c] Entered decomp_perfcount_reset...\n"); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)0x1); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)0x0); + return; +} + +static void decomp_get_hitrate(void) +{ + unsigned raw_mcr_cnt; + unsigned hit_mcr_cnt; + int hitrate; + if (debug & VP9_DEBUG_CACHE) + pr_info("[cache_util.c] Entered decomp_get_hitrate...\n"); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x0<<1)); + raw_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x1<<1)); + hit_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + + if (debug & VP9_DEBUG_CACHE) + pr_info("hcache_raw_cnt_total: %d\n", raw_mcr_cnt); + if (debug & VP9_DEBUG_CACHE) + pr_info("hcache_hit_cnt_total: %d\n", hit_mcr_cnt); + + if (raw_mcr_cnt != 0) { + hitrate = hit_mcr_cnt * 100 / raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("DECOMP_HCACHE_HIT_RATE : %d\n", hitrate); + } else { + if (debug & VP9_DEBUG_CACHE) + pr_info("DECOMP_HCACHE_HIT_RATE : na\n"); + } + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x2<<1)); + raw_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x3<<1)); + hit_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + + if (debug & VP9_DEBUG_CACHE) + pr_info("dcache_raw_cnt_total: %d\n", raw_mcr_cnt); + if (debug & VP9_DEBUG_CACHE) + pr_info("dcache_hit_cnt_total: %d\n", hit_mcr_cnt); + + if (raw_mcr_cnt != 0) { + hitrate = hit_mcr_cnt * 100 / raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("DECOMP_DCACHE_HIT_RATE : %d\n", hitrate); + } else { + if (debug & VP9_DEBUG_CACHE) + pr_info("DECOMP_DCACHE_HIT_RATE : na\n"); + } + return; +} + +static void decomp_get_comprate(void) +{ + unsigned raw_ucomp_cnt; + unsigned fast_comp_cnt; + unsigned slow_comp_cnt; + int comprate; + + if (debug & VP9_DEBUG_CACHE) + pr_info("[cache_util.c] Entered decomp_get_comprate...\n"); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x4<<1)); + fast_comp_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x5<<1)); + slow_comp_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x6<<1)); + raw_ucomp_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + + if (debug & VP9_DEBUG_CACHE) + pr_info("decomp_fast_comp_total: %d\n", fast_comp_cnt); + if (debug & VP9_DEBUG_CACHE) + pr_info("decomp_slow_comp_total: %d\n", slow_comp_cnt); + if (debug & VP9_DEBUG_CACHE) + pr_info("decomp_raw_uncomp_total: %d\n", raw_ucomp_cnt); + + if (raw_ucomp_cnt != 0) { + comprate = (fast_comp_cnt + slow_comp_cnt) + * 100 / raw_ucomp_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("DECOMP_COMP_RATIO : %d\n", comprate); + } else { + if (debug & VP9_DEBUG_CACHE) + pr_info("DECOMP_COMP_RATIO : na\n"); + } + return; +} +/* cache_util.c end */ + +/*==================================================== + *======================================================================== + *vp9_prob define + *======================================================================== + */ +#define VP9_PARTITION_START 0 +#define VP9_PARTITION_SIZE_STEP (3 * 4) +#define VP9_PARTITION_ONE_SIZE (4 * VP9_PARTITION_SIZE_STEP) +#define VP9_PARTITION_KEY_START 0 +#define VP9_PARTITION_P_START VP9_PARTITION_ONE_SIZE +#define VP9_PARTITION_SIZE (2 * VP9_PARTITION_ONE_SIZE) +#define VP9_SKIP_START (VP9_PARTITION_START + VP9_PARTITION_SIZE) +#define VP9_SKIP_SIZE 4 /* only use 3*/ +#define VP9_TX_MODE_START (VP9_SKIP_START+VP9_SKIP_SIZE) +#define VP9_TX_MODE_8_0_OFFSET 0 +#define VP9_TX_MODE_8_1_OFFSET 1 +#define VP9_TX_MODE_16_0_OFFSET 2 +#define VP9_TX_MODE_16_1_OFFSET 4 +#define VP9_TX_MODE_32_0_OFFSET 6 +#define VP9_TX_MODE_32_1_OFFSET 9 +#define VP9_TX_MODE_SIZE 12 +#define VP9_COEF_START (VP9_TX_MODE_START+VP9_TX_MODE_SIZE) +#define VP9_COEF_BAND_0_OFFSET 0 +#define VP9_COEF_BAND_1_OFFSET (VP9_COEF_BAND_0_OFFSET + 3 * 3 + 1) +#define VP9_COEF_BAND_2_OFFSET (VP9_COEF_BAND_1_OFFSET + 6 * 3) +#define VP9_COEF_BAND_3_OFFSET (VP9_COEF_BAND_2_OFFSET + 6 * 3) +#define VP9_COEF_BAND_4_OFFSET (VP9_COEF_BAND_3_OFFSET + 6 * 3) +#define VP9_COEF_BAND_5_OFFSET (VP9_COEF_BAND_4_OFFSET + 6 * 3) +#define VP9_COEF_SIZE_ONE_SET 100 /* ((3 +5*6)*3 + 1 padding)*/ +#define VP9_COEF_4X4_START (VP9_COEF_START + 0 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_8X8_START (VP9_COEF_START + 4 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_16X16_START (VP9_COEF_START + 8 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_32X32_START (VP9_COEF_START + 12 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_SIZE_PLANE (2 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_SIZE (4 * 2 * 2 * VP9_COEF_SIZE_ONE_SET) +#define VP9_INTER_MODE_START (VP9_COEF_START+VP9_COEF_SIZE) +#define VP9_INTER_MODE_SIZE 24 /* only use 21 ( #*7)*/ +#define VP9_INTERP_START (VP9_INTER_MODE_START+VP9_INTER_MODE_SIZE) +#define VP9_INTERP_SIZE 8 +#define VP9_INTRA_INTER_START (VP9_INTERP_START+VP9_INTERP_SIZE) +#define VP9_INTRA_INTER_SIZE 4 +#define VP9_INTERP_INTRA_INTER_START VP9_INTERP_START +#define VP9_INTERP_INTRA_INTER_SIZE (VP9_INTERP_SIZE + VP9_INTRA_INTER_SIZE) +#define VP9_COMP_INTER_START \ + (VP9_INTERP_INTRA_INTER_START+VP9_INTERP_INTRA_INTER_SIZE) +#define VP9_COMP_INTER_SIZE 5 +#define VP9_COMP_REF_START (VP9_COMP_INTER_START+VP9_COMP_INTER_SIZE) +#define VP9_COMP_REF_SIZE 5 +#define VP9_SINGLE_REF_START (VP9_COMP_REF_START+VP9_COMP_REF_SIZE) +#define VP9_SINGLE_REF_SIZE 10 +#define VP9_REF_MODE_START VP9_COMP_INTER_START +#define VP9_REF_MODE_SIZE \ + (VP9_COMP_INTER_SIZE+VP9_COMP_REF_SIZE+VP9_SINGLE_REF_SIZE) +#define VP9_IF_Y_MODE_START (VP9_REF_MODE_START+VP9_REF_MODE_SIZE) +#define VP9_IF_Y_MODE_SIZE 36 +#define VP9_IF_UV_MODE_START (VP9_IF_Y_MODE_START+VP9_IF_Y_MODE_SIZE) +#define VP9_IF_UV_MODE_SIZE 92 /* only use 90*/ +#define VP9_MV_JOINTS_START (VP9_IF_UV_MODE_START+VP9_IF_UV_MODE_SIZE) +#define VP9_MV_JOINTS_SIZE 3 +#define VP9_MV_SIGN_0_START (VP9_MV_JOINTS_START+VP9_MV_JOINTS_SIZE) +#define VP9_MV_SIGN_0_SIZE 1 +#define VP9_MV_CLASSES_0_START (VP9_MV_SIGN_0_START+VP9_MV_SIGN_0_SIZE) +#define VP9_MV_CLASSES_0_SIZE 10 +#define VP9_MV_CLASS0_0_START (VP9_MV_CLASSES_0_START+VP9_MV_CLASSES_0_SIZE) +#define VP9_MV_CLASS0_0_SIZE 1 +#define VP9_MV_BITS_0_START (VP9_MV_CLASS0_0_START+VP9_MV_CLASS0_0_SIZE) +#define VP9_MV_BITS_0_SIZE 10 +#define VP9_MV_SIGN_1_START (VP9_MV_BITS_0_START+VP9_MV_BITS_0_SIZE) +#define VP9_MV_SIGN_1_SIZE 1 +#define VP9_MV_CLASSES_1_START \ + (VP9_MV_SIGN_1_START+VP9_MV_SIGN_1_SIZE) +#define VP9_MV_CLASSES_1_SIZE 10 +#define VP9_MV_CLASS0_1_START \ + (VP9_MV_CLASSES_1_START+VP9_MV_CLASSES_1_SIZE) +#define VP9_MV_CLASS0_1_SIZE 1 +#define VP9_MV_BITS_1_START \ + (VP9_MV_CLASS0_1_START+VP9_MV_CLASS0_1_SIZE) +#define VP9_MV_BITS_1_SIZE 10 +#define VP9_MV_CLASS0_FP_0_START \ + (VP9_MV_BITS_1_START+VP9_MV_BITS_1_SIZE) +#define VP9_MV_CLASS0_FP_0_SIZE 9 +#define VP9_MV_CLASS0_FP_1_START \ + (VP9_MV_CLASS0_FP_0_START+VP9_MV_CLASS0_FP_0_SIZE) +#define VP9_MV_CLASS0_FP_1_SIZE 9 +#define VP9_MV_CLASS0_HP_0_START \ + (VP9_MV_CLASS0_FP_1_START+VP9_MV_CLASS0_FP_1_SIZE) +#define VP9_MV_CLASS0_HP_0_SIZE 2 +#define VP9_MV_CLASS0_HP_1_START \ + (VP9_MV_CLASS0_HP_0_START+VP9_MV_CLASS0_HP_0_SIZE) +#define VP9_MV_CLASS0_HP_1_SIZE 2 +#define VP9_MV_START VP9_MV_JOINTS_START +#define VP9_MV_SIZE 72 /*only use 69*/ + +#define VP9_TOTAL_SIZE (VP9_MV_START + VP9_MV_SIZE) + + +/*======================================================================== + * vp9_count_mem define + *======================================================================== + */ +#define VP9_COEF_COUNT_START 0 +#define VP9_COEF_COUNT_BAND_0_OFFSET 0 +#define VP9_COEF_COUNT_BAND_1_OFFSET \ + (VP9_COEF_COUNT_BAND_0_OFFSET + 3*5) +#define VP9_COEF_COUNT_BAND_2_OFFSET \ + (VP9_COEF_COUNT_BAND_1_OFFSET + 6*5) +#define VP9_COEF_COUNT_BAND_3_OFFSET \ + (VP9_COEF_COUNT_BAND_2_OFFSET + 6*5) +#define VP9_COEF_COUNT_BAND_4_OFFSET \ + (VP9_COEF_COUNT_BAND_3_OFFSET + 6*5) +#define VP9_COEF_COUNT_BAND_5_OFFSET \ + (VP9_COEF_COUNT_BAND_4_OFFSET + 6*5) +#define VP9_COEF_COUNT_SIZE_ONE_SET 165 /* ((3 +5*6)*5 */ +#define VP9_COEF_COUNT_4X4_START \ + (VP9_COEF_COUNT_START + 0*VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_8X8_START \ + (VP9_COEF_COUNT_START + 4*VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_16X16_START \ + (VP9_COEF_COUNT_START + 8*VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_32X32_START \ + (VP9_COEF_COUNT_START + 12*VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_SIZE_PLANE (2 * VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_SIZE (4 * 2 * 2 * VP9_COEF_COUNT_SIZE_ONE_SET) + +#define VP9_INTRA_INTER_COUNT_START \ + (VP9_COEF_COUNT_START+VP9_COEF_COUNT_SIZE) +#define VP9_INTRA_INTER_COUNT_SIZE (4*2) +#define VP9_COMP_INTER_COUNT_START \ + (VP9_INTRA_INTER_COUNT_START+VP9_INTRA_INTER_COUNT_SIZE) +#define VP9_COMP_INTER_COUNT_SIZE (5*2) +#define VP9_COMP_REF_COUNT_START \ + (VP9_COMP_INTER_COUNT_START+VP9_COMP_INTER_COUNT_SIZE) +#define VP9_COMP_REF_COUNT_SIZE (5*2) +#define VP9_SINGLE_REF_COUNT_START \ + (VP9_COMP_REF_COUNT_START+VP9_COMP_REF_COUNT_SIZE) +#define VP9_SINGLE_REF_COUNT_SIZE (10*2) +#define VP9_TX_MODE_COUNT_START \ + (VP9_SINGLE_REF_COUNT_START+VP9_SINGLE_REF_COUNT_SIZE) +#define VP9_TX_MODE_COUNT_SIZE (12*2) +#define VP9_SKIP_COUNT_START \ + (VP9_TX_MODE_COUNT_START+VP9_TX_MODE_COUNT_SIZE) +#define VP9_SKIP_COUNT_SIZE (3*2) +#define VP9_MV_SIGN_0_COUNT_START \ + (VP9_SKIP_COUNT_START+VP9_SKIP_COUNT_SIZE) +#define VP9_MV_SIGN_0_COUNT_SIZE (1*2) +#define VP9_MV_SIGN_1_COUNT_START \ + (VP9_MV_SIGN_0_COUNT_START+VP9_MV_SIGN_0_COUNT_SIZE) +#define VP9_MV_SIGN_1_COUNT_SIZE (1*2) +#define VP9_MV_BITS_0_COUNT_START \ + (VP9_MV_SIGN_1_COUNT_START+VP9_MV_SIGN_1_COUNT_SIZE) +#define VP9_MV_BITS_0_COUNT_SIZE (10*2) +#define VP9_MV_BITS_1_COUNT_START \ + (VP9_MV_BITS_0_COUNT_START+VP9_MV_BITS_0_COUNT_SIZE) +#define VP9_MV_BITS_1_COUNT_SIZE (10*2) +#define VP9_MV_CLASS0_HP_0_COUNT_START \ + (VP9_MV_BITS_1_COUNT_START+VP9_MV_BITS_1_COUNT_SIZE) +#define VP9_MV_CLASS0_HP_0_COUNT_SIZE (2*2) +#define VP9_MV_CLASS0_HP_1_COUNT_START \ + (VP9_MV_CLASS0_HP_0_COUNT_START+VP9_MV_CLASS0_HP_0_COUNT_SIZE) +#define VP9_MV_CLASS0_HP_1_COUNT_SIZE (2*2) +/* Start merge_tree*/ +#define VP9_INTER_MODE_COUNT_START \ + (VP9_MV_CLASS0_HP_1_COUNT_START+VP9_MV_CLASS0_HP_1_COUNT_SIZE) +#define VP9_INTER_MODE_COUNT_SIZE (7*4) +#define VP9_IF_Y_MODE_COUNT_START \ + (VP9_INTER_MODE_COUNT_START+VP9_INTER_MODE_COUNT_SIZE) +#define VP9_IF_Y_MODE_COUNT_SIZE (10*4) +#define VP9_IF_UV_MODE_COUNT_START \ + (VP9_IF_Y_MODE_COUNT_START+VP9_IF_Y_MODE_COUNT_SIZE) +#define VP9_IF_UV_MODE_COUNT_SIZE (10*10) +#define VP9_PARTITION_P_COUNT_START \ + (VP9_IF_UV_MODE_COUNT_START+VP9_IF_UV_MODE_COUNT_SIZE) +#define VP9_PARTITION_P_COUNT_SIZE (4*4*4) +#define VP9_INTERP_COUNT_START \ + (VP9_PARTITION_P_COUNT_START+VP9_PARTITION_P_COUNT_SIZE) +#define VP9_INTERP_COUNT_SIZE (4*3) +#define VP9_MV_JOINTS_COUNT_START \ + (VP9_INTERP_COUNT_START+VP9_INTERP_COUNT_SIZE) +#define VP9_MV_JOINTS_COUNT_SIZE (1 * 4) +#define VP9_MV_CLASSES_0_COUNT_START \ + (VP9_MV_JOINTS_COUNT_START+VP9_MV_JOINTS_COUNT_SIZE) +#define VP9_MV_CLASSES_0_COUNT_SIZE (1*11) +#define VP9_MV_CLASS0_0_COUNT_START \ + (VP9_MV_CLASSES_0_COUNT_START+VP9_MV_CLASSES_0_COUNT_SIZE) +#define VP9_MV_CLASS0_0_COUNT_SIZE (1*2) +#define VP9_MV_CLASSES_1_COUNT_START \ + (VP9_MV_CLASS0_0_COUNT_START+VP9_MV_CLASS0_0_COUNT_SIZE) +#define VP9_MV_CLASSES_1_COUNT_SIZE (1*11) +#define VP9_MV_CLASS0_1_COUNT_START \ + (VP9_MV_CLASSES_1_COUNT_START+VP9_MV_CLASSES_1_COUNT_SIZE) +#define VP9_MV_CLASS0_1_COUNT_SIZE (1*2) +#define VP9_MV_CLASS0_FP_0_COUNT_START \ + (VP9_MV_CLASS0_1_COUNT_START+VP9_MV_CLASS0_1_COUNT_SIZE) +#define VP9_MV_CLASS0_FP_0_COUNT_SIZE (3*4) +#define VP9_MV_CLASS0_FP_1_COUNT_START \ + (VP9_MV_CLASS0_FP_0_COUNT_START+VP9_MV_CLASS0_FP_0_COUNT_SIZE) +#define VP9_MV_CLASS0_FP_1_COUNT_SIZE (3*4) + + +#define DC_PRED 0 /* Average of above and left pixels*/ +#define V_PRED 1 /* Vertical*/ +#define H_PRED 2 /* Horizontal*/ +#define D45_PRED 3 /*Directional 45 deg = round(arctan(1/1) * 180/pi)*/ +#define D135_PRED 4 /* Directional 135 deg = 180 - 45*/ +#define D117_PRED 5 /* Directional 117 deg = 180 - 63*/ +#define D153_PRED 6 /* Directional 153 deg = 180 - 27*/ +#define D207_PRED 7 /* Directional 207 deg = 180 + 27*/ +#define D63_PRED 8 /*Directional 63 deg = round(arctan(2/1) * 180/pi)*/ +#define TM_PRED 9 /*True-motion*/ + +int clip_prob(int p) +{ + return (p > 255) ? 255 : (p < 1) ? 1 : p; +} + +#define ROUND_POWER_OF_TWO(value, n) \ + (((value) + (1 << ((n) - 1))) >> (n)) + +#define MODE_MV_COUNT_SAT 20 +static const int count_to_update_factor[MODE_MV_COUNT_SAT + 1] = { + 0, 6, 12, 19, 25, 32, 38, 44, 51, 57, 64, + 70, 76, 83, 89, 96, 102, 108, 115, 121, 128 +}; + +void vp9_tree_merge_probs(unsigned int *prev_prob, unsigned int *cur_prob, + int coef_node_start, int tree_left, int tree_right, int tree_i, + int node) { + + int prob_32, prob_res, prob_shift; + int pre_prob, new_prob; + int den, m_count, get_prob, factor; + + prob_32 = prev_prob[coef_node_start / 4 * 2]; + prob_res = coef_node_start & 3; + prob_shift = prob_res * 8; + pre_prob = (prob_32 >> prob_shift) & 0xff; + + den = tree_left + tree_right; + + if (den == 0) + new_prob = pre_prob; + else { + m_count = (den < MODE_MV_COUNT_SAT) ? + den : MODE_MV_COUNT_SAT; + get_prob = clip_prob( + div_r32(((int64_t)tree_left * 256 + (den >> 1)), + den)); + /*weighted_prob*/ + factor = count_to_update_factor[m_count]; + new_prob = ROUND_POWER_OF_TWO(pre_prob * (256 - factor) + + get_prob * factor, 8); + } + cur_prob[coef_node_start / 4 * 2] = (cur_prob[coef_node_start / 4 * 2] + & (~(0xff << prob_shift))) | (new_prob << prob_shift); + + /*pr_info(" - [%d][%d] 0x%02X --> 0x%02X (0x%X 0x%X) (%X)\n", + *tree_i, node, pre_prob, new_prob, tree_left, tree_right, + *cur_prob[coef_node_start/4*2]); + */ +} + + +/*void adapt_coef_probs(void)*/ +void adapt_coef_probs(int pic_count, int prev_kf, int cur_kf, int pre_fc, + unsigned int *prev_prob, unsigned int *cur_prob, unsigned int *count) +{ + /* 80 * 64bits = 0xF00 ( use 0x1000 4K bytes) + *unsigned int prev_prob[496*2]; + *unsigned int cur_prob[496*2]; + *0x300 * 128bits = 0x3000 (32K Bytes) + *unsigned int count[0x300*4]; + */ + + int tx_size, coef_tx_size_start, coef_count_tx_size_start; + int plane, coef_plane_start, coef_count_plane_start; + int type, coef_type_start, coef_count_type_start; + int band, coef_band_start, coef_count_band_start; + int cxt_num; + int cxt, coef_cxt_start, coef_count_cxt_start; + int node, coef_node_start, coef_count_node_start; + + int tree_i, tree_left, tree_right; + int mvd_i; + + int count_sat = 24; + /*int update_factor = 112;*/ /*If COEF_MAX_UPDATE_FACTOR_AFTER_KEY, + *use 128 + */ + /* If COEF_MAX_UPDATE_FACTOR_AFTER_KEY, use 128*/ + /*int update_factor = (pic_count == 1) ? 128 : 112;*/ + int update_factor = cur_kf ? 112 : + prev_kf ? 128 : 112; + + int prob_32; + int prob_res; + int prob_shift; + int pre_prob; + + int num, den; + int get_prob; + int m_count; + int factor; + + int new_prob; + + if (debug & VP9_DEBUG_MERGE) + pr_info + ("\n ##adapt_coef_probs (pre_fc : %d ,prev_kf : %d,cur_kf : %d)##\n\n", + pre_fc, prev_kf, cur_kf); + + /*adapt_coef_probs*/ + for (tx_size = 0; tx_size < 4; tx_size++) { + coef_tx_size_start = VP9_COEF_START + + tx_size * 4 * VP9_COEF_SIZE_ONE_SET; + coef_count_tx_size_start = VP9_COEF_COUNT_START + + tx_size * 4 * VP9_COEF_COUNT_SIZE_ONE_SET; + coef_plane_start = coef_tx_size_start; + coef_count_plane_start = coef_count_tx_size_start; + for (plane = 0; plane < 2; plane++) { + coef_type_start = coef_plane_start; + coef_count_type_start = coef_count_plane_start; + for (type = 0; type < 2; type++) { + coef_band_start = coef_type_start; + coef_count_band_start = coef_count_type_start; + for (band = 0; band < 6; band++) { + if (band == 0) + cxt_num = 3; + else + cxt_num = 6; + coef_cxt_start = coef_band_start; + coef_count_cxt_start = + coef_count_band_start; + for (cxt = 0; cxt < cxt_num; cxt++) { + const int n0 = + count[coef_count_cxt_start]; + const int n1 = + count[coef_count_cxt_start + 1]; + const int n2 = + count[coef_count_cxt_start + 2]; + const int neob = + count[coef_count_cxt_start + 3]; + const int nneob = + count[coef_count_cxt_start + 4]; + const unsigned int + branch_ct[3][2] = { + { neob, nneob }, + { n0, n1 + n2 }, + { n1, n2 } + }; + coef_node_start = + coef_cxt_start; + for + (node = 0; node < 3; node++) { + prob_32 = + prev_prob[ + coef_node_start + / 4 * 2]; + prob_res = + coef_node_start & 3; + prob_shift = + prob_res * 8; + pre_prob = + (prob_32 >> prob_shift) + & 0xff; + + /*get_binary_prob*/ + num = + branch_ct[node][0]; + den = + branch_ct[node][0] + + branch_ct[node][1]; + m_count = (den < + count_sat) + ? den : count_sat; + + get_prob = + (den == 0) ? 128u : + clip_prob( + div_r32(((int64_t) + num * 256 + + (den >> 1)), + den)); + + factor = + update_factor * m_count + / count_sat; + new_prob = + ROUND_POWER_OF_TWO + (pre_prob * + (256 - factor) + + get_prob * factor, 8); + + cur_prob[coef_node_start + / 4 * 2] = + (cur_prob + [coef_node_start + / 4 * 2] & (~(0xff << + prob_shift))) | + (new_prob << + prob_shift); + + coef_node_start += 1; + } + + coef_cxt_start = + coef_cxt_start + 3; + coef_count_cxt_start = + coef_count_cxt_start + + 5; + } + if (band == 0) { + coef_band_start += 10; + coef_count_band_start += 15; + } else { + coef_band_start += 18; + coef_count_band_start += 30; + } + } + coef_type_start += VP9_COEF_SIZE_ONE_SET; + coef_count_type_start += + VP9_COEF_COUNT_SIZE_ONE_SET; + } + coef_plane_start += 2 * VP9_COEF_SIZE_ONE_SET; + coef_count_plane_start += + 2 * VP9_COEF_COUNT_SIZE_ONE_SET; + } + } + + if (cur_kf == 0) { + /*mode_mv_merge_probs - merge_intra_inter_prob*/ + for (coef_count_node_start = VP9_INTRA_INTER_COUNT_START; + coef_count_node_start < (VP9_MV_CLASS0_HP_1_COUNT_START + + VP9_MV_CLASS0_HP_1_COUNT_SIZE); coef_count_node_start += 2) { + + if (coef_count_node_start == + VP9_INTRA_INTER_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_intra_inter_prob\n"); + coef_node_start = VP9_INTRA_INTER_START; + } else if (coef_count_node_start == + VP9_COMP_INTER_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_comp_inter_prob\n"); + coef_node_start = VP9_COMP_INTER_START; + } + /* + *else if (coef_count_node_start == + * VP9_COMP_REF_COUNT_START) { + * pr_info(" # merge_comp_inter_prob\n"); + * coef_node_start = VP9_COMP_REF_START; + *} + *else if (coef_count_node_start == + * VP9_SINGLE_REF_COUNT_START) { + * pr_info(" # merge_comp_inter_prob\n"); + * coef_node_start = VP9_SINGLE_REF_START; + *} + */ + else if (coef_count_node_start == + VP9_TX_MODE_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_tx_mode_probs\n"); + coef_node_start = VP9_TX_MODE_START; + } else if (coef_count_node_start == + VP9_SKIP_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_skip_probs\n"); + coef_node_start = VP9_SKIP_START; + } else if (coef_count_node_start == + VP9_MV_SIGN_0_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_sign_0\n"); + coef_node_start = VP9_MV_SIGN_0_START; + } else if (coef_count_node_start == + VP9_MV_SIGN_1_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_sign_1\n"); + coef_node_start = VP9_MV_SIGN_1_START; + } else if (coef_count_node_start == + VP9_MV_BITS_0_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_bits_0\n"); + coef_node_start = VP9_MV_BITS_0_START; + } else if (coef_count_node_start == + VP9_MV_BITS_1_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_bits_1\n"); + coef_node_start = VP9_MV_BITS_1_START; + } else if (coef_count_node_start == + VP9_MV_CLASS0_HP_0_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_class0_hp\n"); + coef_node_start = VP9_MV_CLASS0_HP_0_START; + } + + + den = count[coef_count_node_start] + + count[coef_count_node_start + 1]; + + prob_32 = prev_prob[coef_node_start / 4 * 2]; + prob_res = coef_node_start & 3; + prob_shift = prob_res * 8; + pre_prob = (prob_32 >> prob_shift) & 0xff; + + if (den == 0) + new_prob = pre_prob; + else { + m_count = (den < MODE_MV_COUNT_SAT) ? + den : MODE_MV_COUNT_SAT; + get_prob = + clip_prob( + div_r32(((int64_t)count[coef_count_node_start] + * 256 + (den >> 1)), + den)); + /*weighted_prob*/ + factor = count_to_update_factor[m_count]; + new_prob = + ROUND_POWER_OF_TWO(pre_prob * (256 - factor) + + get_prob * factor, 8); + } + cur_prob[coef_node_start / 4 * 2] = + (cur_prob[coef_node_start / 4 * 2] & + (~(0xff << prob_shift))) + | (new_prob << prob_shift); + + coef_node_start = coef_node_start + 1; + } + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_inter_mode_tree\n"); + coef_node_start = VP9_INTER_MODE_START; + coef_count_node_start = VP9_INTER_MODE_COUNT_START; + for (tree_i = 0; tree_i < 7; tree_i++) { + for (node = 0; node < 3; node++) { + switch (node) { + case 2: + tree_left = + count[coef_count_node_start + 1]; + tree_right = + count[coef_count_node_start + 3]; + break; + case 1: + tree_left = + count[coef_count_node_start + 0]; + tree_right = + count[coef_count_node_start + 1] + + count[coef_count_node_start + 3]; + break; + default: + tree_left = + count[coef_count_node_start + 2]; + tree_right = + count[coef_count_node_start + 0] + + count[coef_count_node_start + 1] + + count[coef_count_node_start + 3]; + break; + + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 4; + } + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_intra_mode_tree\n"); + coef_node_start = VP9_IF_Y_MODE_START; + coef_count_node_start = VP9_IF_Y_MODE_COUNT_START; + for (tree_i = 0; tree_i < 14; tree_i++) { + for (node = 0; node < 9; node++) { + switch (node) { + case 8: + tree_left = + count[coef_count_node_start+D153_PRED]; + tree_right = + count[coef_count_node_start+D207_PRED]; + break; + case 7: + tree_left = + count[coef_count_node_start+D63_PRED]; + tree_right = + count[coef_count_node_start+D207_PRED] + + count[coef_count_node_start+D153_PRED]; + break; + case 6: + tree_left = + count[coef_count_node_start + D45_PRED]; + tree_right = + count[coef_count_node_start+D207_PRED] + + count[coef_count_node_start+D153_PRED] + + count[coef_count_node_start+D63_PRED]; + break; + case 5: + tree_left = + count[coef_count_node_start+D135_PRED]; + tree_right = + count[coef_count_node_start+D117_PRED]; + break; + case 4: + tree_left = + count[coef_count_node_start+H_PRED]; + tree_right = + count[coef_count_node_start+D117_PRED] + + count[coef_count_node_start+D135_PRED]; + break; + case 3: + tree_left = + count[coef_count_node_start+H_PRED] + + count[coef_count_node_start+D117_PRED] + + count[coef_count_node_start+D135_PRED]; + tree_right = + count[coef_count_node_start+D45_PRED] + + count[coef_count_node_start+D207_PRED] + + count[coef_count_node_start+D153_PRED] + + count[coef_count_node_start+D63_PRED]; + break; + case 2: + tree_left = + count[coef_count_node_start+V_PRED]; + tree_right = + count[coef_count_node_start+H_PRED] + + count[coef_count_node_start+D117_PRED] + + count[coef_count_node_start+D135_PRED] + + count[coef_count_node_start+D45_PRED] + + count[coef_count_node_start+D207_PRED] + + count[coef_count_node_start+D153_PRED] + + count[coef_count_node_start+D63_PRED]; + break; + case 1: + tree_left = + count[coef_count_node_start+TM_PRED]; + tree_right = + count[coef_count_node_start+V_PRED] + + count[coef_count_node_start+H_PRED] + + count[coef_count_node_start+D117_PRED] + + count[coef_count_node_start+D135_PRED] + + count[coef_count_node_start+D45_PRED] + + count[coef_count_node_start+D207_PRED] + + count[coef_count_node_start+D153_PRED] + + count[coef_count_node_start+D63_PRED]; + break; + default: + tree_left = + count[coef_count_node_start+DC_PRED]; + tree_right = + count[coef_count_node_start+TM_PRED] + + count[coef_count_node_start+V_PRED] + + count[coef_count_node_start+H_PRED] + + count[coef_count_node_start+D117_PRED] + + count[coef_count_node_start+D135_PRED] + + count[coef_count_node_start+D45_PRED] + + count[coef_count_node_start+D207_PRED] + + count[coef_count_node_start+D153_PRED] + + count[coef_count_node_start+D63_PRED]; + break; + + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 10; + } + + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_partition_tree\n"); + coef_node_start = VP9_PARTITION_P_START; + coef_count_node_start = VP9_PARTITION_P_COUNT_START; + for (tree_i = 0; tree_i < 16; tree_i++) { + for (node = 0; node < 3; node++) { + switch (node) { + case 2: + tree_left = + count[coef_count_node_start + 2]; + tree_right = + count[coef_count_node_start + 3]; + break; + case 1: + tree_left = + count[coef_count_node_start + 1]; + tree_right = + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + break; + default: + tree_left = + count[coef_count_node_start + 0]; + tree_right = + count[coef_count_node_start + 1] + + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + break; + + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 4; + } + + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_switchable_interp_tree\n"); + coef_node_start = VP9_INTERP_START; + coef_count_node_start = VP9_INTERP_COUNT_START; + for (tree_i = 0; tree_i < 4; tree_i++) { + for (node = 0; node < 2; node++) { + switch (node) { + case 1: + tree_left = + count[coef_count_node_start + 1]; + tree_right = + count[coef_count_node_start + 2]; + break; + default: + tree_left = + count[coef_count_node_start + 0]; + tree_right = + count[coef_count_node_start + 1] + + count[coef_count_node_start + 2]; + break; + + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 3; + } + + if (debug & VP9_DEBUG_MERGE) + pr_info("# merge_vp9_mv_joint_tree\n"); + coef_node_start = VP9_MV_JOINTS_START; + coef_count_node_start = VP9_MV_JOINTS_COUNT_START; + for (tree_i = 0; tree_i < 1; tree_i++) { + for (node = 0; node < 3; node++) { + switch (node) { + case 2: + tree_left = + count[coef_count_node_start + 2]; + tree_right = + count[coef_count_node_start + 3]; + break; + case 1: + tree_left = + count[coef_count_node_start + 1]; + tree_right = + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + break; + default: + tree_left = + count[coef_count_node_start + 0]; + tree_right = + count[coef_count_node_start + 1] + + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + break; + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 4; + } + + for (mvd_i = 0; mvd_i < 2; mvd_i++) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_mv_class_tree [%d] -\n", mvd_i); + coef_node_start = + mvd_i ? VP9_MV_CLASSES_1_START : VP9_MV_CLASSES_0_START; + coef_count_node_start = + mvd_i ? VP9_MV_CLASSES_1_COUNT_START + : VP9_MV_CLASSES_0_COUNT_START; + tree_i = 0; + for (node = 0; node < 10; node++) { + switch (node) { + case 9: + tree_left = + count[coef_count_node_start + 9]; + tree_right = + count[coef_count_node_start + 10]; + break; + case 8: + tree_left = + count[coef_count_node_start + 7]; + tree_right = + count[coef_count_node_start + 8]; + break; + case 7: + tree_left = + count[coef_count_node_start + 7] + + count[coef_count_node_start + 8]; + tree_right = + count[coef_count_node_start + 9] + + count[coef_count_node_start + 10]; + break; + case 6: + tree_left = + count[coef_count_node_start + 6]; + tree_right = + count[coef_count_node_start + 7] + + count[coef_count_node_start + 8] + + count[coef_count_node_start + 9] + + count[coef_count_node_start + 10]; + break; + case 5: + tree_left = + count[coef_count_node_start + 4]; + tree_right = + count[coef_count_node_start + 5]; + break; + case 4: + tree_left = + count[coef_count_node_start + 4] + + count[coef_count_node_start + 5]; + tree_right = + count[coef_count_node_start + 6] + + count[coef_count_node_start + 7] + + count[coef_count_node_start + 8] + + count[coef_count_node_start + 9] + + count[coef_count_node_start + 10]; + break; + case 3: + tree_left = + count[coef_count_node_start + 2]; + tree_right = + count[coef_count_node_start + 3]; + break; + case 2: + tree_left = + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + tree_right = + count[coef_count_node_start + 4] + + count[coef_count_node_start + 5] + + count[coef_count_node_start + 6] + + count[coef_count_node_start + 7] + + count[coef_count_node_start + 8] + + count[coef_count_node_start + 9] + + count[coef_count_node_start + 10]; + break; + case 1: + tree_left = + count[coef_count_node_start + 1]; + tree_right = + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3] + + count[coef_count_node_start + 4] + + count[coef_count_node_start + 5] + + count[coef_count_node_start + 6] + + count[coef_count_node_start + 7] + + count[coef_count_node_start + 8] + + count[coef_count_node_start + 9] + + count[coef_count_node_start + 10]; + break; + default: + tree_left = + count[coef_count_node_start + 0]; + tree_right = + count[coef_count_node_start + 1] + + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3] + + count[coef_count_node_start + 4] + + count[coef_count_node_start + 5] + + count[coef_count_node_start + 6] + + count[coef_count_node_start + 7] + + count[coef_count_node_start + 8] + + count[coef_count_node_start + 9] + + count[coef_count_node_start + 10]; + break; + + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_mv_class0_tree [%d] -\n", mvd_i); + coef_node_start = + mvd_i ? VP9_MV_CLASS0_1_START : VP9_MV_CLASS0_0_START; + coef_count_node_start = + mvd_i ? VP9_MV_CLASS0_1_COUNT_START : + VP9_MV_CLASS0_0_COUNT_START; + tree_i = 0; + node = 0; + tree_left = count[coef_count_node_start + 0]; + tree_right = count[coef_count_node_start + 1]; + + vp9_tree_merge_probs(prev_prob, cur_prob, coef_node_start, + tree_left, tree_right, tree_i, node); + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_mv_fp_tree_class0_fp [%d] -\n", + mvd_i); + coef_node_start = + mvd_i ? VP9_MV_CLASS0_FP_1_START : + VP9_MV_CLASS0_FP_0_START; + coef_count_node_start = + mvd_i ? VP9_MV_CLASS0_FP_1_COUNT_START : + VP9_MV_CLASS0_FP_0_COUNT_START; + for (tree_i = 0; tree_i < 3; tree_i++) { + for (node = 0; node < 3; node++) { + switch (node) { + case 2: + tree_left = + count[coef_count_node_start + 2]; + tree_right = + count[coef_count_node_start + 3]; + break; + case 1: + tree_left = + count[coef_count_node_start + 1]; + tree_right = + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + break; + default: + tree_left = + count[coef_count_node_start + 0]; + tree_right = + count[coef_count_node_start + 1] + + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + break; + + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 4; + } + + } /* for mvd_i (mvd_y or mvd_x)*/ +} + +} + +static void uninit_mmu_buffers(struct VP9Decoder_s *pbi) +{ +#ifndef MV_USE_FIXED_BUF + dealloc_mv_bufs(pbi); +#endif + if (pbi->mmu_box) { + decoder_mmu_box_free(pbi->mmu_box); + pbi->mmu_box = NULL; + } +#ifdef VP9_10B_MMU_DW + if (pbi->mmu_box_dw) { + decoder_mmu_box_free(pbi->mmu_box_dw); + pbi->mmu_box_dw = NULL; + } +#endif + if (pbi->bmmu_box) { + decoder_bmmu_box_free(pbi->bmmu_box); + pbi->bmmu_box = NULL; + } +} + +static int calc_luc_quantity(u32 w, u32 h) +{ + int lcu_size = 64; /*fixed 64*/ + int pic_width_64 = (w + 63) & (~0x3f); + int pic_height_32 = (h + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 : pic_height_32 / lcu_size; + + return pic_width_lcu * pic_height_lcu; +} + +/* return in MB */ +static int vp9_max_mmu_buf_size(int max_w, int max_h) +{ + int buf_size = 48; + + if ((max_w * max_h > 1280*736) && + (max_w * max_h <= 1920*1088)) { + buf_size = 12; + } else if ((max_w * max_h > 0) && + (max_w * max_h <= 1280*736)) { + buf_size = 4; + } + + return buf_size; +} + +static int v4l_alloc_and_config_pic(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic) +{ + int ret = -1; + int i = pic->index; + int dw_mode = get_double_write_mode_init(pbi); + int lcu_total = calc_luc_quantity(pbi->frame_width, pbi->frame_height); +#ifdef MV_USE_FIXED_BUF + u32 mpred_mv_end = pbi->work_space_buf->mpred_mv.buf_start + + pbi->work_space_buf->mpred_mv.buf_size; + int mv_size = cal_mv_buf_size(pbi, pbi->frame_width, pbi->frame_height); +#endif + struct aml_vcodec_ctx * ctx = (struct aml_vcodec_ctx *)pbi->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + + if (i < 0) + return ret; + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, pbi->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + vp9_print(pbi, 0, "[%d] VP9 get buffer fail.\n", + ((struct aml_vcodec_ctx *) (pbi->v4l2_ctx))->id); + return ret; + } + + fb->status = FB_ST_DECODER; + + if (pbi->mmu_enable) { + struct internal_comp_buf *ibuf = v4lfb_to_icomp_buf(pbi, fb); + + pbi->m_BUF[i].header_addr = ibuf->header_addr; + if (debug & VP9_DEBUG_BUFMGR_MORE) { + pr_info("MMU header_adr %d: %ld\n", + i, pbi->m_BUF[i].header_addr); + } + } + +#ifdef MV_USE_FIXED_BUF + if ((pbi->work_space_buf->mpred_mv.buf_start + + ((i + 1) * mv_size)) + <= mpred_mv_end) { +#endif + pbi->m_BUF[i].v4l_ref_buf_addr = (ulong)fb; + pic->cma_alloc_addr = fb->m.mem[0].addr; + if (fb->num_planes == 1) { + pbi->m_BUF[i].start_adr = fb->m.mem[0].addr; + pbi->m_BUF[i].luma_size = fb->m.mem[0].offset; + pbi->m_BUF[i].size = fb->m.mem[0].size; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + pic->dw_y_adr = pbi->m_BUF[i].start_adr; + pic->dw_u_v_adr = pic->dw_y_adr + pbi->m_BUF[i].luma_size; + pic->luma_size = fb->m.mem[0].offset; + pic->chroma_size = fb->m.mem[0].size - fb->m.mem[0].offset; + } else if (fb->num_planes == 2) { + pbi->m_BUF[i].start_adr = fb->m.mem[0].addr; + pbi->m_BUF[i].size = fb->m.mem[0].size; + pbi->m_BUF[i].chroma_addr = fb->m.mem[1].addr; + pbi->m_BUF[i].chroma_size = fb->m.mem[1].size; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + fb->m.mem[1].bytes_used = fb->m.mem[1].size; + pic->dw_y_adr = pbi->m_BUF[i].start_adr; + pic->dw_u_v_adr = pbi->m_BUF[i].chroma_addr; + pic->luma_size = fb->m.mem[0].size; + pic->chroma_size = fb->m.mem[1].size; + } + + /* config frame buffer */ + if (pbi->mmu_enable) + pic->header_adr = pbi->m_BUF[i].header_addr; + + pic->BUF_index = i; + pic->lcu_total = lcu_total; + pic->mc_canvas_y = pic->index; + pic->mc_canvas_u_v = pic->index; + + if (dw_mode & 0x10) { + pic->mc_canvas_y = (pic->index << 1); + pic->mc_canvas_u_v = (pic->index << 1) + 1; + } + +#ifdef MV_USE_FIXED_BUF + pic->mpred_mv_wr_start_addr = + pbi->work_space_buf->mpred_mv.buf_start + + (pic->index * mv_size); + pic->mv_size = mv_size; +#endif + if (debug) { + pr_info("%s index %d BUF_index %d ", + __func__, pic->index, + pic->BUF_index); + pr_info("comp_body_size %x comp_buf_size %x ", + pic->comp_body_size, + pic->buf_size); + pr_info("mpred_mv_wr_start_adr %ld\n", + pic->mpred_mv_wr_start_addr); + pr_info("dw_y_adr %d, pic_config->dw_u_v_adr =%d\n", + pic->dw_y_adr, + pic->dw_u_v_adr); + } +#ifdef MV_USE_FIXED_BUF + } +#endif + return ret; +} + +static int config_pic(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + int ret = -1; + int i; + int pic_width = pbi->init_pic_w; + int pic_height = pbi->init_pic_h; + int lcu_size = 64; /*fixed 64*/ + int pic_width_64 = (pic_width + 63) & (~0x3f); + int pic_height_32 = (pic_height + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 + : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 + : pic_height_32 / lcu_size; + int lcu_total = pic_width_lcu * pic_height_lcu; +#ifdef MV_USE_FIXED_BUF + u32 mpred_mv_end = pbi->work_space_buf->mpred_mv.buf_start + + pbi->work_space_buf->mpred_mv.buf_size; + int mv_size = cal_mv_buf_size(pbi, pbi->init_pic_w, pbi->init_pic_h); +#endif + u32 y_adr = 0; + int buf_size = 0; + + int losless_comp_header_size = + compute_losless_comp_header_size(pic_width, + pic_height); + int losless_comp_body_size = compute_losless_comp_body_size(pic_width, + pic_height, buf_alloc_depth == 10); + int mc_buffer_size = losless_comp_header_size + losless_comp_body_size; + int mc_buffer_size_h = (mc_buffer_size + 0xffff) >> 16; + int mc_buffer_size_u_v = 0; + int mc_buffer_size_u_v_h = 0; + int dw_mode = get_double_write_mode_init(pbi); + struct vdec_s *vdec = hw_to_vdec(pbi); + + pbi->lcu_total = lcu_total; + + if (dw_mode && (dw_mode & 0x20) == 0) { + int pic_width_dw = pic_width / + get_double_write_ratio(dw_mode & 0xf); + int pic_height_dw = pic_height / + get_double_write_ratio(dw_mode & 0xf); + + int pic_width_64_dw = (pic_width_dw + 63) & (~0x3f); + int pic_height_32_dw = (pic_height_dw + 31) & (~0x1f); + int pic_width_lcu_dw = (pic_width_64_dw % lcu_size) ? + pic_width_64_dw / lcu_size + 1 + : pic_width_64_dw / lcu_size; + int pic_height_lcu_dw = (pic_height_32_dw % lcu_size) ? + pic_height_32_dw / lcu_size + 1 + : pic_height_32_dw / lcu_size; + int lcu_total_dw = pic_width_lcu_dw * pic_height_lcu_dw; + mc_buffer_size_u_v = lcu_total_dw * lcu_size * lcu_size / 2; + mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16; + /*64k alignment*/ + buf_size = ((mc_buffer_size_u_v_h << 16) * 3); + buf_size = ((buf_size + 0xffff) >> 16) << 16; + } + + if (mc_buffer_size & 0xffff) /*64k alignment*/ + mc_buffer_size_h += 1; + if ((!pbi->mmu_enable) && ((dw_mode & 0x10) == 0)) + buf_size += (mc_buffer_size_h << 16); + + if (pbi->mmu_enable) { + pic_config->header_adr = decoder_bmmu_box_get_phy_addr( + pbi->bmmu_box, HEADER_BUFFER_IDX(pic_config->index)); + + if (debug & VP9_DEBUG_BUFMGR_MORE) { + pr_info("MMU header_adr %d: %ld\n", + pic_config->index, pic_config->header_adr); + } +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable) { + pic_config->header_dw_adr = pic_config->header_adr + + vvp9_mmu_compress_header_size(pbi->init_pic_w, pbi->init_pic_h); + + if (debug & VP9_DEBUG_BUFMGR_MORE) { + pr_info("MMU header_dw_adr %d: %ld size 0x%x\n", + pic_config->index, pic_config->header_dw_adr, + vvp9_mmu_compress_header_size(pbi->init_pic_w, pbi->init_pic_h)); + } + } +#endif + } + i = pic_config->index; +#ifdef MV_USE_FIXED_BUF + if ((pbi->work_space_buf->mpred_mv.buf_start + + ((i + 1) * mv_size)) + <= mpred_mv_end + ) { +#endif + if (buf_size > 0) { + ret = decoder_bmmu_box_alloc_buf_phy(pbi->bmmu_box, + VF_BUFFER_IDX(i), + buf_size, DRIVER_NAME, + &pic_config->cma_alloc_addr); + if (ret < 0) { + pr_info( + "decoder_bmmu_box_alloc_buf_phy idx %d size %d fail\n", + VF_BUFFER_IDX(i), + buf_size + ); + return ret; + } + + if (pbi->enable_fence) { + //mm->fence_ref_release = vdec_fence_buffer_count_decrease; + vdec_fence_buffer_count_increase((ulong)vdec->sync); + INIT_LIST_HEAD(&vdec->sync->release_callback[VF_BUFFER_IDX(i)].node); + decoder_bmmu_box_add_callback_func(pbi->bmmu_box, VF_BUFFER_IDX(i), (void *)&vdec->sync->release_callback[VF_BUFFER_IDX(i)]); + } + + if (pic_config->cma_alloc_addr) + y_adr = pic_config->cma_alloc_addr; + else { + pr_info( + "decoder_bmmu_box_alloc_buf_phy idx %d size %d return null\n", + VF_BUFFER_IDX(i), + buf_size + ); + return -1; + } + } + { + /*ensure get_pic_by_POC() + not get the buffer not decoded*/ + pic_config->BUF_index = i; + pic_config->lcu_total = lcu_total; + + pic_config->comp_body_size = losless_comp_body_size; + pic_config->buf_size = buf_size; + + pic_config->mc_canvas_y = pic_config->index; + pic_config->mc_canvas_u_v = pic_config->index; + if (dw_mode & 0x10) { + pic_config->dw_y_adr = y_adr; + pic_config->dw_u_v_adr = y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); + + pic_config->mc_canvas_y = + (pic_config->index << 1); + pic_config->mc_canvas_u_v = + (pic_config->index << 1) + 1; + } else if (dw_mode && ((dw_mode & 0x20) == 0)) { + pic_config->dw_y_adr = y_adr; + pic_config->dw_u_v_adr = pic_config->dw_y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); + } +#ifdef MV_USE_FIXED_BUF + pic_config->mpred_mv_wr_start_addr = + pbi->work_space_buf->mpred_mv.buf_start + + (pic_config->index * mv_size); + pic_config->mv_size = mv_size; +#endif + if (debug) { + pr_info + ("%s index %d BUF_index %d ", + __func__, pic_config->index, + pic_config->BUF_index); + pr_info + ("comp_body_size %x comp_buf_size %x ", + pic_config->comp_body_size, + pic_config->buf_size); + pr_info + ("mpred_mv_wr_start_adr %ld\n", + pic_config->mpred_mv_wr_start_addr); + pr_info("dw_y_adr %d, pic_config->dw_u_v_adr =%d\n", + pic_config->dw_y_adr, + pic_config->dw_u_v_adr); + } + ret = 0; + } +#ifdef MV_USE_FIXED_BUF + } +#endif + return ret; +} + + +static void init_pic_list(struct VP9Decoder_s *pbi) +{ + int i; + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *pic_config; + u32 header_size; + struct vdec_s *vdec = hw_to_vdec(pbi); + + if (!pbi->is_used_v4l && pbi->mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) { + header_size = vvp9_mmu_compress_header_size( + pbi->max_pic_w, pbi->max_pic_h); +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable) + header_size <<= 1; +#endif + /*alloc VP9 compress header first*/ + for (i = 0; i < pbi->used_buf_num; i++) { + unsigned long buf_addr; + if (decoder_bmmu_box_alloc_buf_phy + (pbi->bmmu_box, + HEADER_BUFFER_IDX(i), header_size, + DRIVER_HEADER_NAME, + &buf_addr) < 0) { + pr_info("%s malloc compress header failed %d\n", + DRIVER_HEADER_NAME, i); + pbi->fatal_error |= DECODER_FATAL_ERROR_NO_MEM; + return; + } + if (pbi->enable_fence) { + vdec_fence_buffer_count_increase((ulong)vdec->sync); + INIT_LIST_HEAD(&vdec->sync->release_callback[HEADER_BUFFER_IDX(i)].node); + decoder_bmmu_box_add_callback_func(pbi->bmmu_box, HEADER_BUFFER_IDX(i), (void *)&vdec->sync->release_callback[HEADER_BUFFER_IDX(i)]); + //mm->fence_ref_release = vdec_fence_buffer_count_decrease; + } + } + } + for (i = 0; i < pbi->used_buf_num; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + pic_config->index = i; + pic_config->BUF_index = -1; + pic_config->mv_buf_index = -1; + if (vdec->parallel_dec == 1) { + pic_config->y_canvas_index = -1; + pic_config->uv_canvas_index = -1; + } + pic_config->y_crop_width = pbi->init_pic_w; + pic_config->y_crop_height = pbi->init_pic_h; + pic_config->double_write_mode = get_double_write_mode(pbi); + + if (!pbi->is_used_v4l) { + if (config_pic(pbi, pic_config) < 0) { + if (debug) + pr_info("Config_pic %d fail\n", + pic_config->index); + pic_config->index = -1; + break; + } + + if (pic_config->double_write_mode && + (pic_config->double_write_mode & 0x20) == 0) { + set_canvas(pbi, pic_config); + } + } + } + for (; i < pbi->used_buf_num; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + pic_config->index = -1; + pic_config->BUF_index = -1; + pic_config->mv_buf_index = -1; + if (vdec->parallel_dec == 1) { + pic_config->y_canvas_index = -1; + pic_config->uv_canvas_index = -1; + } + } + pr_info("%s ok, used_buf_num = %d\n", + __func__, pbi->used_buf_num); +} + +static void init_pic_list_hw(struct VP9Decoder_s *pbi) +{ + int i; + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *pic_config; + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x0);*/ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (0x1 << 2)); + + for (i = 0; i < pbi->used_buf_num; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + if (pic_config->index < 0) + break; + + if (pbi->mmu_enable && ((pic_config->double_write_mode & 0x10) == 0)) { + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->header_adr >> 5); + } else { + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + * pic_config->mc_y_adr + * | (pic_config->mc_canvas_y << 8) | 0x1); + */ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->dw_y_adr >> 5); + } +#ifndef LOSLESS_COMPRESS_MODE + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + * pic_config->mc_u_v_adr + * | (pic_config->mc_canvas_u_v << 8)| 0x1); + */ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->header_adr >> 5); +#else + if (pic_config->double_write_mode & 0x10) { + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->dw_u_v_adr >> 5); + } +#endif + } + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1); + + /*Zero out canvas registers in IPP -- avoid simulation X*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 1); + for (i = 0; i < 32; i++) + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); +} + +static void dump_pic_list(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *pic_config; + int i; + for (i = 0; i < FRAME_BUFFERS; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + vp9_print(pbi, 0, + "Buf(%d) index %d mv_buf_index %d ref_count %d vf_ref %d dec_idx %d slice_type %d w/h %d/%d adr: %x\n", + i, + pic_config->index, +#ifndef MV_USE_FIXED_BUF + pic_config->mv_buf_index, +#else + -1, +#endif + cm->buffer_pool-> + frame_bufs[i].ref_count, + pic_config->vf_ref, + pic_config->decode_idx, + pic_config->slice_type, + pic_config->y_crop_width, + pic_config->y_crop_height, + pic_config->cma_alloc_addr + ); + } + return; +} + +static int config_pic_size(struct VP9Decoder_s *pbi, unsigned short bit_depth) +{ +#ifdef LOSLESS_COMPRESS_MODE + unsigned int data32; +#endif + int losless_comp_header_size, losless_comp_body_size; + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config = &cm->cur_frame->buf; + + frame_width = cur_pic_config->y_crop_width; + frame_height = cur_pic_config->y_crop_height; + cur_pic_config->bit_depth = bit_depth; + cur_pic_config->double_write_mode = get_double_write_mode(pbi); + losless_comp_header_size = + compute_losless_comp_header_size(cur_pic_config->y_crop_width, + cur_pic_config->y_crop_height); + losless_comp_body_size = + compute_losless_comp_body_size(cur_pic_config->y_crop_width, + cur_pic_config->y_crop_height, (bit_depth == VPX_BITS_10)); + cur_pic_config->comp_body_size = losless_comp_body_size; +#ifdef LOSLESS_COMPRESS_MODE + data32 = READ_VREG(HEVC_SAO_CTRL5); + if (bit_depth == VPX_BITS_10) + data32 &= ~(1 << 9); + else + data32 |= (1 << 9); + + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + if (pbi->mmu_enable) { + /*bit[4] : paged_mem_mode*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); + } else { + /*bit[3] smem mdoe*/ + if (bit_depth == VPX_BITS_10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0 << 3)); + else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (1 << 3)); + } + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SM1) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, (losless_comp_body_size >> 5)); + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL3,(0xff<<20) | (0xff<<10) | 0xff);*/ + WRITE_VREG(HEVC_CM_BODY_LENGTH, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, losless_comp_header_size); + if (get_double_write_mode(pbi) & 0x10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable) { + /*losless_comp_header_size_dw same as losless_comp_header_size*/ + WRITE_VREG(HEVC_CM_BODY_LENGTH2, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET2, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH2, losless_comp_header_size); + } +#endif + return 0; +} + +static int config_mc_buffer(struct VP9Decoder_s *pbi, unsigned short bit_depth) +{ + int i; + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config = &cm->cur_frame->buf; + uint8_t scale_enable = 0; + + if (debug&VP9_DEBUG_BUFMGR_MORE) + pr_info("config_mc_buffer entered .....\n"); + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 1); + for (i = 0; i < REFS_PER_FRAME; ++i) { + struct PIC_BUFFER_CONFIG_s *pic_config = cm->frame_refs[i].buf; + if (!pic_config) + continue; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic_config->mc_canvas_u_v << 16) + | (pic_config->mc_canvas_u_v << 8) + | pic_config->mc_canvas_y); + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("refid %x mc_canvas_u_v %x mc_canvas_y %x\n", + i, pic_config->mc_canvas_u_v, + pic_config->mc_canvas_y); + } + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (0 << 1) | 1); + for (i = 0; i < REFS_PER_FRAME; ++i) { + struct PIC_BUFFER_CONFIG_s *pic_config = cm->frame_refs[i].buf; + if (!pic_config) + continue; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic_config->mc_canvas_u_v << 16) + | (pic_config->mc_canvas_u_v << 8) + | pic_config->mc_canvas_y); + } + + /*auto_inc start index:0 field:0*/ + WRITE_VREG(VP9D_MPP_REFINFO_TBL_ACCCONFIG, 0x1 << 2); + /*index 0:last 1:golden 2:altref*/ + for (i = 0; i < REFS_PER_FRAME; i++) { + int ref_pic_body_size; + struct PIC_BUFFER_CONFIG_s *pic_config = cm->frame_refs[i].buf; + if (!pic_config) + continue; + WRITE_VREG(VP9D_MPP_REFINFO_DATA, pic_config->y_crop_width); + WRITE_VREG(VP9D_MPP_REFINFO_DATA, pic_config->y_crop_height); + + if (pic_config->y_crop_width != cur_pic_config->y_crop_width || + pic_config->y_crop_height != cur_pic_config->y_crop_height) { + scale_enable |= (1 << i); + } + ref_pic_body_size = + compute_losless_comp_body_size(pic_config->y_crop_width, + pic_config->y_crop_height, (bit_depth == VPX_BITS_10)); + WRITE_VREG(VP9D_MPP_REFINFO_DATA, + (pic_config->y_crop_width << 14) + / cur_pic_config->y_crop_width); + WRITE_VREG(VP9D_MPP_REFINFO_DATA, + (pic_config->y_crop_height << 14) + / cur_pic_config->y_crop_height); + if (pbi->mmu_enable) + WRITE_VREG(VP9D_MPP_REFINFO_DATA, 0); + else + WRITE_VREG(VP9D_MPP_REFINFO_DATA, ref_pic_body_size >> 5); + } + WRITE_VREG(VP9D_MPP_REF_SCALE_ENBL, scale_enable); + return 0; +} + +static void clear_mpred_hw(struct VP9Decoder_s *pbi) +{ + unsigned int data32; + + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 &= (~(1 << 6)); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); +} + +static void config_mpred_hw(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config = &cm->cur_frame->buf; + struct PIC_BUFFER_CONFIG_s *last_frame_pic_config = + &cm->prev_frame->buf; + + unsigned int data32; + int mpred_curr_lcu_x; + int mpred_curr_lcu_y; + int mpred_mv_rd_end_addr; + + + mpred_mv_rd_end_addr = last_frame_pic_config->mpred_mv_wr_start_addr + + last_frame_pic_config->mv_size; + //+ (last_frame_pic_config->lcu_total * MV_MEM_UNIT); + + data32 = READ_VREG(HEVC_MPRED_CURR_LCU); + mpred_curr_lcu_x = data32 & 0xffff; + mpred_curr_lcu_y = (data32 >> 16) & 0xffff; + + if (debug & VP9_DEBUG_BUFMGR) + pr_info("cur pic_config index %d col pic_config index %d\n", + cur_pic_config->index, last_frame_pic_config->index); + WRITE_VREG(HEVC_MPRED_CTRL3, 0x24122412); + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, + pbi->work_space_buf->mpred_above.buf_start); + + data32 = READ_VREG(HEVC_MPRED_CTRL4); + + data32 &= (~(1 << 6)); + data32 |= (cm->use_prev_frame_mvs << 6); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); + + WRITE_VREG(HEVC_MPRED_MV_WR_START_ADDR, + cur_pic_config->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_WPTR, cur_pic_config->mpred_mv_wr_start_addr); + + WRITE_VREG(HEVC_MPRED_MV_RD_START_ADDR, + last_frame_pic_config->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_RPTR, + last_frame_pic_config->mpred_mv_wr_start_addr); + /*data32 = ((pbi->lcu_x_num - pbi->tile_width_lcu)*MV_MEM_UNIT);*/ + /*WRITE_VREG(HEVC_MPRED_MV_WR_ROW_JUMP,data32);*/ + /*WRITE_VREG(HEVC_MPRED_MV_RD_ROW_JUMP,data32);*/ + WRITE_VREG(HEVC_MPRED_MV_RD_END_ADDR, mpred_mv_rd_end_addr); + +} + +static void config_sao_hw(struct VP9Decoder_s *pbi, union param_u *params) +{ + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *pic_config = &cm->cur_frame->buf; + + unsigned int data32; + int lcu_size = 64; + int mc_buffer_size_u_v = + pic_config->lcu_total * lcu_size*lcu_size/2; + int mc_buffer_size_u_v_h = + (mc_buffer_size_u_v + 0xffff) >> 16;/*64k alignment*/ + struct aml_vcodec_ctx * v4l2_ctx = pbi->v4l2_ctx; + + if (get_double_write_mode(pbi) && + (get_double_write_mode(pbi) & 0x20) == 0) { + WRITE_VREG(HEVC_SAO_Y_START_ADDR, pic_config->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_START_ADDR, pic_config->dw_u_v_adr); + WRITE_VREG(HEVC_SAO_Y_WPTR, pic_config->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_WPTR, pic_config->dw_u_v_adr); + } else { + if (!pbi->dw_mmu_enable) { + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0xffffffff); + WRITE_VREG(HEVC_SAO_C_START_ADDR, 0xffffffff); + } + } + if (pbi->mmu_enable) + WRITE_VREG(HEVC_CM_HEADER_START_ADDR, pic_config->header_adr); + + if (pbi->is_used_v4l) { + WRITE_VREG(HEVC_SAO_Y_LENGTH, pic_config->luma_size); + WRITE_VREG(HEVC_SAO_C_LENGTH, pic_config->chroma_size); + if (debug & PRINT_FLAG_V4L_DETAIL) { + pr_info("[%d] config pic, id: %d, Y:(%x, %d) C:(%x, %d).\n", + v4l2_ctx->id, pic_config->index, + pic_config->dw_y_adr, pic_config->luma_size, + pic_config->dw_u_v_adr, pic_config->chroma_size); + } + } else { +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable) { + WRITE_VREG(HEVC_CM_HEADER_START_ADDR2, pic_config->header_dw_adr); + } +#endif + data32 = (mc_buffer_size_u_v_h << 16) << 1; + /*pr_info("data32=%x,mc_buffer_size_u_v_h=%x,lcu_total=%x\n", + data32, mc_buffer_size_u_v_h, pic_config->lcu_total);*/ + + WRITE_VREG(HEVC_SAO_Y_LENGTH, data32); + + data32 = (mc_buffer_size_u_v_h << 16); + WRITE_VREG(HEVC_SAO_C_LENGTH, data32); + } + +#ifdef VP9_10B_NV21 +#ifdef DOS_PROJECT + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + /*[13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32*/ + data32 |= (pbi->mem_map_mode << 12); + data32 &= (~0x3); + data32 |= 0x1; /* [1]:dw_disable [0]:cm_disable*/ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + /*[23:22] dw_v1_ctrl [21:20] dw_v0_ctrl [19:18] dw_h1_ctrl + * [17:16] dw_h0_ctrl + */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /*set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /*[5:4] address_format 00:linear 01:32x32 10:64x32*/ + data32 |= (pbi->mem_map_mode << 4); + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#else + /*m8baby test1902*/ + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + /*[13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32*/ + data32 |= (pbi->mem_map_mode << 12); + data32 &= (~0xff0); + /*data32 |= 0x670;*/ /*Big-Endian per 64-bit*/ + data32 |= 0x880; /*.Big-Endian per 64-bit */ + data32 &= (~0x3); + data32 |= 0x1; /*[1]:dw_disable [0]:cm_disable*/ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + /* [23:22] dw_v1_ctrl [21:20] dw_v0_ctrl + *[19:18] dw_h1_ctrl [17:16] dw_h0_ctrl + */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /* set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /*[5:4] address_format 00:linear 01:32x32 10:64x32*/ + data32 |= (pbi->mem_map_mode << 4); + data32 &= (~0xF); + data32 |= 0x8; /*Big-Endian per 64-bit*/ + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#endif +#else + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~(3 << 14)); + data32 |= (2 << 14); /* line align with 64*/ + + data32 &= (~0x3000); + /* [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 */ + data32 |= (pbi->mem_map_mode << 12); + + data32 &= (~0xff0); +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable == 0) + data32 |= ((pbi->endian >> 8) & 0xfff); /* Big-Endian per 64-bit */ +#else + data32 |= ((pbi->endian >> 8) & 0xfff); +#endif + data32 &= (~0x3); /*[1]:dw_disable [0]:cm_disable*/ + if (get_double_write_mode(pbi) == 0) + data32 |= 0x2; /*disable double write*/ + else if (get_double_write_mode(pbi) & 0x10) + data32 |= 0x1; /*disable cm*/ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { /* >= G12A dw write control */ + unsigned int data; + data = READ_VREG(HEVC_DBLK_CFGB); + data &= (~0x300); /*[8]:first write enable (compress) [9]:double write enable (uncompress)*/ + if (get_double_write_mode(pbi) == 0) + data |= (0x1 << 8); /*enable first write*/ + else if (get_double_write_mode(pbi) & 0x10) + data |= (0x1 << 9); /*double write only*/ + else + data |= ((0x1 << 8) |(0x1 << 9)); + WRITE_VREG(HEVC_DBLK_CFGB, data); + } + + /* swap uv */ + if (pbi->is_used_v4l) { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + data32 &= ~(1 << 8); /* NV21 */ + else + data32 |= (1 << 8); /* NV12 */ + } + + /* + * [31:24] ar_fifo1_axi_thred + * [23:16] ar_fifo0_axi_thred + * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes + * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + * [11:08] axi_lendian_C + * [07:04] axi_lendian_Y + * [3] reserved + * [2] clk_forceon + * [1] dw_disable:disable double write output + * [0] cm_disable:disable compress output + */ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + + if (get_double_write_mode(pbi) & 0x10) { + /* [23:22] dw_v1_ctrl + *[21:20] dw_v0_ctrl + *[19:18] dw_h1_ctrl + *[17:16] dw_h0_ctrl + */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /*set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } else { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) + WRITE_VREG(HEVC_SAO_CTRL26, 0); + + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 &= (~(0xff << 16)); + if ((get_double_write_mode(pbi) & 0xf) == 8 || + (get_double_write_mode(pbi) & 0xf) == 9) { + data32 |= (0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL26, 0xf); + } else if ((get_double_write_mode(pbi) & 0xf) == 2 || + (get_double_write_mode(pbi) & 0xf) == 3) + data32 |= (0xff << 16); + else if ((get_double_write_mode(pbi) & 0xf) == 4) + data32 |= (0x33 << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /* [5:4] -- address_format 00:linear 01:32x32 10:64x32 */ + data32 |= (pbi->mem_map_mode << 4); + data32 &= (~0xf); + data32 |= (pbi->endian & 0xf); /* valid only when double write only */ + + /* swap uv */ + if (pbi->is_used_v4l) { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + data32 |= (1 << 12); /* NV21 */ + else + data32 &= ~(1 << 12); /* NV12 */ + } + data32 &= (~(3 << 8)); + data32 |= (2 << 8); /* line align with 64 for dw only */ + /* + * [3:0] little_endian + * [5:4] address_format 00:linear 01:32x32 10:64x32 + * [7:6] reserved + * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte + * [11:10] reserved + * [12] CbCr_byte_swap + * [31:13] reserved + */ + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#endif +} + +static void vp9_config_work_space_hw(struct VP9Decoder_s *pbi, u32 mask) +{ + struct BuffInfo_s *buf_spec = pbi->work_space_buf; + unsigned int data32, data_tmp; + int losless_comp_header_size, losless_comp_body_size; + + if (debug && pbi->init_flag == 0) + pr_info("%s w %d h %d %x %x %x %x %x %x %x %x %x %x %x %x\n", + __func__, + buf_spec->max_width, + buf_spec->max_height, + buf_spec->ipp.buf_start, + buf_spec->start_adr, + buf_spec->short_term_rps.buf_start, + buf_spec->vps.buf_start, + buf_spec->sps.buf_start, + buf_spec->pps.buf_start, + buf_spec->sao_up.buf_start, + buf_spec->swap_buf.buf_start, + buf_spec->swap_buf2.buf_start, + buf_spec->scalelut.buf_start, + buf_spec->dblk_para.buf_start, + buf_spec->dblk_data.buf_start); + + if (mask & HW_MASK_FRONT) { + if ((debug & VP9_DEBUG_SEND_PARAM_WITH_REG) == 0) + WRITE_VREG(HEVC_RPM_BUFFER, (u32)pbi->rpm_phy_addr); + + WRITE_VREG(HEVC_SHORT_TERM_RPS, + buf_spec->short_term_rps.buf_start); + /*WRITE_VREG(HEVC_VPS_BUFFER, buf_spec->vps.buf_start);*/ + /*WRITE_VREG(HEVC_SPS_BUFFER, buf_spec->sps.buf_start);*/ + WRITE_VREG(HEVC_PPS_BUFFER, buf_spec->pps.buf_start); + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER, + buf_spec->swap_buf.buf_start); + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, + buf_spec->swap_buf2.buf_start); + WRITE_VREG(LMEM_DUMP_ADR, (u32)pbi->lmem_phy_addr); + + } + + if ((mask & HW_MASK_BACK) == 0) + return; + +#ifdef LOSLESS_COMPRESS_MODE + losless_comp_header_size = + compute_losless_comp_header_size(pbi->init_pic_w, + pbi->init_pic_h); + losless_comp_body_size = + compute_losless_comp_body_size(pbi->init_pic_w, + pbi->init_pic_h, buf_alloc_depth == 10); +#endif + WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, + buf_spec->ipp.buf_start); + //WRITE_VREG(HEVC_SAO_UP, buf_spec->sao_up.buf_start); + //WRITE_VREG(HEVC_SCALELUT, buf_spec->scalelut.buf_start); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + /* cfg_addr_adp*/ + WRITE_VREG(HEVC_DBLK_CFGE, buf_spec->dblk_para.buf_start); + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("Write HEVC_DBLK_CFGE\n"); + } + /* cfg_p_addr */ + WRITE_VREG(HEVC_DBLK_CFG4, buf_spec->dblk_para.buf_start); + + /* cfg_d_addr */ + WRITE_VREG(HEVC_DBLK_CFG5, buf_spec->dblk_data.buf_start); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + /* + * data32 = (READ_VREG(P_HEVC_DBLK_CFG3)>>8) & 0xff; // xio left offset, default is 0x40 + * data32 = data32 * 2; + * data32 = (READ_VREG(P_HEVC_DBLK_CFG3)>>16) & 0xff; // adp left offset, default is 0x040 + * data32 = data32 * 2; + */ + if (buf_spec->max_width <= 4096 && buf_spec->max_height <= 2304) + WRITE_VREG(HEVC_DBLK_CFG3, 0x404010); //default value + else + WRITE_VREG(HEVC_DBLK_CFG3, 0x808020); // make left storage 2 x 4k] + vp9_print(pbi, VP9_DEBUG_BUFMGR_MORE, + "HEVC_DBLK_CFG3 = %x\n", READ_VREG(HEVC_DBLK_CFG3)); + } +#ifdef LOSLESS_COMPRESS_MODE + if (pbi->mmu_enable) { + /*bit[4] : paged_mem_mode*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SM1) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0); + } else { + /*if(cur_pic_config->bit_depth == VPX_BITS_10) + * WRITE_VREG(P_HEVCD_MPP_DECOMP_CTL1, (0<<3)); + */ + /*bit[3] smem mdoe*/ + /*else WRITE_VREG(P_HEVCD_MPP_DECOMP_CTL1, (1<<3));*/ + /*bit[3] smem mdoe*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, + (losless_comp_body_size >> 5)); + } + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, + (losless_comp_body_size >> 5));*/ + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL3, + (0xff<<20) | (0xff<<10) | 0xff);*/ + /*8-bit mode */ + WRITE_VREG(HEVC_CM_BODY_LENGTH, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, losless_comp_header_size); + + if (get_double_write_mode(pbi) & 0x10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif + + if (pbi->mmu_enable) { + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR, buf_spec->mmu_vbh.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR, buf_spec->mmu_vbh.buf_start + + VBH_BUF_SIZE(buf_spec)); + /*data32 = READ_VREG(P_HEVC_SAO_CTRL9);*/ + /*data32 |= 0x1;*/ + /*WRITE_VREG(P_HEVC_SAO_CTRL9, data32);*/ + + /* use HEVC_CM_HEADER_START_ADDR */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (1<<10); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable) { + data_tmp = READ_VREG(HEVC_SAO_CTRL9); + data_tmp |= (1<<10); + WRITE_VREG(HEVC_SAO_CTRL9, data_tmp); + + WRITE_VREG(HEVC_CM_BODY_LENGTH2, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET2, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH2, losless_comp_header_size); + + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR2, buf_spec->mmu_vbh_dw.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR2, buf_spec->mmu_vbh_dw.buf_start + + DW_VBH_BUF_SIZE(buf_spec)); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + if (get_double_write_mode(pbi) & 0x20) { + WRITE_VREG(HEVC_DW_VH0_ADDDR, + buf_spec->mmu_vbh_dw.buf_start + DW_VBH_BUF_SIZE(buf_spec) * 2); + WRITE_VREG(HEVC_DW_VH1_ADDDR, + buf_spec->mmu_vbh_dw.buf_start + DW_VBH_BUF_SIZE(buf_spec) * 3); + } + } + data32 = READ_VREG(HEVC_SAO_CTRL5); + /* use HEVC_CM_HEADER_START_ADDR */ + data32 |= (1<<15); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } +#endif + + /* config mpred axi burst threshold */ + WRITE_VREG(HEVC_MPRED_CTRL3, 0x24122412); + +#ifdef CO_MV_COMPRESS + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) { + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 |= (1 << 1); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); + } +#endif + + WRITE_VREG(VP9_SEG_MAP_BUFFER, buf_spec->seg_map.buf_start); + + WRITE_VREG(LMEM_DUMP_ADR, (u32)pbi->lmem_phy_addr); + + WRITE_VREG(VP9_PROB_SWAP_BUFFER, pbi->prob_buffer_phy_addr); + WRITE_VREG(VP9_COUNT_SWAP_BUFFER, pbi->count_buffer_phy_addr); + if (pbi->mmu_enable) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR, pbi->frame_mmu_map_phy_addr); + else + WRITE_VREG(VP9_MMU_MAP_BUFFER, pbi->frame_mmu_map_phy_addr); + } +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + WRITE_VREG(HEVC_SAO_MMU_DMA_CTRL2, pbi->frame_mmu_dw_map_phy_addr); + //WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR2, pbi->frame_mmu_dw_map_phy_addr); + } else { + WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR2, pbi->frame_mmu_dw_map_phy_addr); + } + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0); + WRITE_VREG(HEVC_SAO_C_START_ADDR, 0); +#ifdef VP9_10B_HED_FB + WRITE_VREG(HEVC_ASSIST_FB_MMU_MAP_ADDR2, FB_FRAME_MMU_MAP_ADDR); + #ifdef VP9_10B_HED_SAME_FB + WRITE_VREG(HEVC_ASSIST_FBD_MMU_MAP_ADDR2, FB_FRAME_MMU_MAP_ADDR); + #endif +#endif + } +#endif +} + + +#ifdef VP9_LPF_LVL_UPDATE +/* + * Defines, declarations, sub-functions for vp9 de-block loop + filter Thr/Lvl table update + * - struct segmentation is for loop filter only (removed something) + * - function "vp9_loop_filter_init" and "vp9_loop_filter_frame_init" will + be instantiated in C_Entry + * - vp9_loop_filter_init run once before decoding start + * - vp9_loop_filter_frame_init run before every frame decoding start + * - set video format to VP9 is in vp9_loop_filter_init + */ +#define MAX_LOOP_FILTER 63 +#define MAX_REF_LF_DELTAS 4 +#define MAX_MODE_LF_DELTAS 2 +/*#define INTRA_FRAME 0*/ +/*#define LAST_FRAME 1*/ +/*#define MAX_REF_FRAMES 4*/ +#define SEGMENT_DELTADATA 0 +#define SEGMENT_ABSDATA 1 +#define MAX_SEGMENTS 8 +/*.#define SEG_TREE_PROBS (MAX_SEGMENTS-1)*/ +/*no use for loop filter, if this struct for common use, pls add it back*/ +/*#define PREDICTION_PROBS 3*/ +/* no use for loop filter, if this struct for common use, pls add it back*/ + +enum SEG_LVL_FEATURES { + SEG_LVL_ALT_Q = 0, /*Use alternate Quantizer ....*/ + SEG_LVL_ALT_LF = 1, /*Use alternate loop filter value...*/ + SEG_LVL_REF_FRAME = 2, /*Optional Segment reference frame*/ + SEG_LVL_SKIP = 3, /*Optional Segment (0,0) + skip mode*/ + SEG_LVL_MAX = 4 /*Number of features supported*/ +}; + +struct segmentation { + uint8_t enabled; + uint8_t update_map; + uint8_t update_data; + uint8_t abs_delta; + uint8_t temporal_update; + + /*no use for loop filter, if this struct + *for common use, pls add it back + */ + /*vp9_prob tree_probs[SEG_TREE_PROBS]; */ + /* no use for loop filter, if this struct + * for common use, pls add it back + */ + /*vp9_prob pred_probs[PREDICTION_PROBS];*/ + + int16_t feature_data[MAX_SEGMENTS][SEG_LVL_MAX]; + unsigned int feature_mask[MAX_SEGMENTS]; +}; + +struct loop_filter_thresh { + uint8_t mblim; + uint8_t lim; + uint8_t hev_thr; +}; + +struct loop_filter_info_n { + struct loop_filter_thresh lfthr[MAX_LOOP_FILTER + 1]; + uint8_t lvl[MAX_SEGMENTS][MAX_REF_FRAMES][MAX_MODE_LF_DELTAS]; +}; + +struct loopfilter { + int filter_level; + + int sharpness_level; + int last_sharpness_level; + + uint8_t mode_ref_delta_enabled; + uint8_t mode_ref_delta_update; + + /*0 = Intra, Last, GF, ARF*/ + signed char ref_deltas[MAX_REF_LF_DELTAS]; + signed char last_ref_deltas[MAX_REF_LF_DELTAS]; + + /*0 = ZERO_MV, MV*/ + signed char mode_deltas[MAX_MODE_LF_DELTAS]; + signed char last_mode_deltas[MAX_MODE_LF_DELTAS]; +}; + +static int vp9_clamp(int value, int low, int high) +{ + return value < low ? low : (value > high ? high : value); +} + +int segfeature_active(struct segmentation *seg, + int segment_id, + enum SEG_LVL_FEATURES feature_id) { + return seg->enabled && + (seg->feature_mask[segment_id] & (1 << feature_id)); +} + +int get_segdata(struct segmentation *seg, int segment_id, + enum SEG_LVL_FEATURES feature_id) { + return seg->feature_data[segment_id][feature_id]; +} + +static void vp9_update_sharpness(struct loop_filter_info_n *lfi, + int sharpness_lvl) +{ + int lvl; + /*For each possible value for the loop filter fill out limits*/ + for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) { + /*Set loop filter parameters that control sharpness.*/ + int block_inside_limit = lvl >> ((sharpness_lvl > 0) + + (sharpness_lvl > 4)); + + if (sharpness_lvl > 0) { + if (block_inside_limit > (9 - sharpness_lvl)) + block_inside_limit = (9 - sharpness_lvl); + } + + if (block_inside_limit < 1) + block_inside_limit = 1; + + lfi->lfthr[lvl].lim = (uint8_t)block_inside_limit; + lfi->lfthr[lvl].mblim = (uint8_t)(2 * (lvl + 2) + + block_inside_limit); + } +} + +/*instantiate this function once when decode is started*/ +void vp9_loop_filter_init(struct VP9Decoder_s *pbi) +{ + struct loop_filter_info_n *lfi = pbi->lfi; + struct loopfilter *lf = pbi->lf; + struct segmentation *seg_4lf = pbi->seg_4lf; + int i; + unsigned int data32; + + memset(lfi, 0, sizeof(struct loop_filter_info_n)); + memset(lf, 0, sizeof(struct loopfilter)); + memset(seg_4lf, 0, sizeof(struct segmentation)); + lf->sharpness_level = 0; /*init to 0 */ + /*init limits for given sharpness*/ + vp9_update_sharpness(lfi, lf->sharpness_level); + lf->last_sharpness_level = lf->sharpness_level; + /*init hev threshold const vectors (actually no use) + *for (i = 0; i <= MAX_LOOP_FILTER; i++) + * lfi->lfthr[i].hev_thr = (uint8_t)(i >> 4); + */ + + /*Write to register*/ + for (i = 0; i < 32; i++) { + unsigned int thr; + + thr = ((lfi->lfthr[i * 2 + 1].lim & 0x3f)<<8) | + (lfi->lfthr[i * 2 + 1].mblim & 0xff); + thr = (thr<<16) | ((lfi->lfthr[i*2].lim & 0x3f)<<8) | + (lfi->lfthr[i * 2].mblim & 0xff); + WRITE_VREG(HEVC_DBLK_CFG9, thr); + } + + /*video format is VP9*/ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + data32 = (0x3 << 14) | // (dw fifo thres r and b) + (0x3 << 12) | // (dw fifo thres r or b) + (0x3 << 10) | // (dw fifo thres not r/b) + (0x3 << 8) | // 1st/2nd write both enable + (0x1 << 0); // vp9 video format + if (get_double_write_mode(pbi) == 0x10) + data32 &= (~0x100); + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + data32 = (0x57 << 8) | /*1st/2nd write both enable*/ + (0x1 << 0); /*vp9 video format*/ + if (get_double_write_mode(pbi) == 0x10) + data32 &= (~0x100); + } else + data32 = 0x40400001; + + WRITE_VREG(HEVC_DBLK_CFGB, data32); + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("[DBLK DEBUG] CFGB : 0x%x\n", data32); +} + /* perform this function per frame*/ +void vp9_loop_filter_frame_init(struct segmentation *seg, + struct loop_filter_info_n *lfi, struct loopfilter *lf, + int default_filt_lvl) { + int i; + int seg_id; + /*n_shift is the multiplier for lf_deltas + *the multiplier is 1 for when filter_lvl is between 0 and 31; + *2 when filter_lvl is between 32 and 63 + */ + const int scale = 1 << (default_filt_lvl >> 5); + + /*update limits if sharpness has changed*/ + if (lf->last_sharpness_level != lf->sharpness_level) { + vp9_update_sharpness(lfi, lf->sharpness_level); + lf->last_sharpness_level = lf->sharpness_level; + + /*Write to register*/ + for (i = 0; i < 32; i++) { + unsigned int thr; + + thr = ((lfi->lfthr[i * 2 + 1].lim & 0x3f) << 8) + | (lfi->lfthr[i * 2 + 1].mblim & 0xff); + thr = (thr << 16) | ((lfi->lfthr[i * 2].lim & 0x3f) << 8) + | (lfi->lfthr[i * 2].mblim & 0xff); + WRITE_VREG(HEVC_DBLK_CFG9, thr); + } + } + + for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) {/*MAX_SEGMENTS = 8*/ + int lvl_seg = default_filt_lvl; + + if (segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) { + const int data = get_segdata(seg, seg_id, + SEG_LVL_ALT_LF); + lvl_seg = vp9_clamp(seg->abs_delta == SEGMENT_ABSDATA ? + data : default_filt_lvl + data, + 0, MAX_LOOP_FILTER); +#ifdef DBG_LF_PRINT + pr_info("segfeature_active!!!seg_id=%d,lvl_seg=%d\n", seg_id, lvl_seg); +#endif + } + + if (!lf->mode_ref_delta_enabled) { + /*we could get rid of this if we assume that deltas are set to + *zero when not in use; encoder always uses deltas + */ + memset(lfi->lvl[seg_id], lvl_seg, sizeof(lfi->lvl[seg_id])); + } else { + int ref, mode; + const int intra_lvl = lvl_seg + lf->ref_deltas[INTRA_FRAME] + * scale; +#ifdef DBG_LF_PRINT + pr_info("LF_PRINT:vp9_loop_filter_frame_init,seg_id=%d\n", seg_id); + pr_info("ref_deltas[INTRA_FRAME]=%d\n", lf->ref_deltas[INTRA_FRAME]); +#endif + lfi->lvl[seg_id][INTRA_FRAME][0] = + vp9_clamp(intra_lvl, 0, MAX_LOOP_FILTER); + + for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) { + /* LAST_FRAME = 1, MAX_REF_FRAMES = 4*/ + for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) { + /*MAX_MODE_LF_DELTAS = 2*/ + const int inter_lvl = + lvl_seg + lf->ref_deltas[ref] * scale + + lf->mode_deltas[mode] * scale; +#ifdef DBG_LF_PRINT +#endif + lfi->lvl[seg_id][ref][mode] = + vp9_clamp(inter_lvl, 0, + MAX_LOOP_FILTER); + } + } + } + } + +#ifdef DBG_LF_PRINT + /*print out thr/lvl table per frame*/ + for (i = 0; i <= MAX_LOOP_FILTER; i++) { + pr_info("LF_PRINT:(%d)thr=%d,blim=%d,lim=%d\n", + i, lfi->lfthr[i].hev_thr, lfi->lfthr[i].mblim, + lfi->lfthr[i].lim); + } + for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) { + pr_info("LF_PRINT:lvl(seg_id=%d)(mode=0,%d,%d,%d,%d)\n", + seg_id, lfi->lvl[seg_id][0][0], + lfi->lvl[seg_id][1][0], lfi->lvl[seg_id][2][0], + lfi->lvl[seg_id][3][0]); + pr_info("i(mode=1,%d,%d,%d,%d)\n", lfi->lvl[seg_id][0][1], + lfi->lvl[seg_id][1][1], lfi->lvl[seg_id][2][1], + lfi->lvl[seg_id][3][1]); + } +#endif + + /*Write to register */ + for (i = 0; i < 16; i++) { + unsigned int level; + + level = ((lfi->lvl[i >> 1][3][i & 1] & 0x3f) << 24) | + ((lfi->lvl[i >> 1][2][i & 1] & 0x3f) << 16) | + ((lfi->lvl[i >> 1][1][i & 1] & 0x3f) << 8) | + (lfi->lvl[i >> 1][0][i & 1] & 0x3f); + if (!default_filt_lvl) + level = 0; + WRITE_VREG(HEVC_DBLK_CFGA, level); + } +} +/* VP9_LPF_LVL_UPDATE */ +#endif + +static void vp9_init_decoder_hw(struct VP9Decoder_s *pbi, u32 mask) +{ + unsigned int data32; + int i; + const unsigned short parser_cmd[PARSER_CMD_NUMBER] = { + 0x0401, 0x8401, 0x0800, 0x0402, 0x9002, 0x1423, + 0x8CC3, 0x1423, 0x8804, 0x9825, 0x0800, 0x04FE, + 0x8406, 0x8411, 0x1800, 0x8408, 0x8409, 0x8C2A, + 0x9C2B, 0x1C00, 0x840F, 0x8407, 0x8000, 0x8408, + 0x2000, 0xA800, 0x8410, 0x04DE, 0x840C, 0x840D, + 0xAC00, 0xA000, 0x08C0, 0x08E0, 0xA40E, 0xFC00, + 0x7C00 + }; +#if 0 + if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) { + /* Set MCR fetch priorities*/ + data32 = 0x1 | (0x1 << 2) | (0x1 <<3) | + (24 << 4) | (32 << 11) | (24 << 18) | (32 << 25); + WRITE_VREG(HEVCD_MPP_DECOMP_AXIURG_CTL, data32); + } +#endif + /*if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("%s\n", __func__);*/ + if (mask & HW_MASK_FRONT) { + data32 = READ_VREG(HEVC_PARSER_INT_CONTROL); +#if 1 + /* set bit 31~29 to 3 if HEVC_STREAM_FIFO_CTL[29] is 1 */ + data32 &= ~(7 << 29); + data32 |= (3 << 29); +#endif + data32 = data32 | + (1 << 24) |/*stream_buffer_empty_int_amrisc_enable*/ + (1 << 22) |/*stream_fifo_empty_int_amrisc_enable*/ + (1 << 7) |/*dec_done_int_cpu_enable*/ + (1 << 4) |/*startcode_found_int_cpu_enable*/ + (0 << 3) |/*startcode_found_int_amrisc_enable*/ + (1 << 0) /*parser_int_enable*/ + ; +#ifdef SUPPORT_FB_DECODING +#ifndef FB_DECODING_TEST_SCHEDULE + /*fed_fb_slice_done_int_cpu_enable*/ + if (pbi->used_stage_buf_num > 0) + data32 |= (1 << 10); +#endif +#endif + WRITE_VREG(HEVC_PARSER_INT_CONTROL, data32); + + data32 = READ_VREG(HEVC_SHIFT_STATUS); + data32 = data32 | + (0 << 1) |/*emulation_check_off VP9 + do not have emulation*/ + (1 << 0)/*startcode_check_on*/ + ; + WRITE_VREG(HEVC_SHIFT_STATUS, data32); + WRITE_VREG(HEVC_SHIFT_CONTROL, + (0 << 14) | /*disable_start_code_protect*/ + (1 << 10) | /*length_zero_startcode_en for VP9*/ + (1 << 9) | /*length_valid_startcode_en for VP9*/ + (3 << 6) | /*sft_valid_wr_position*/ + (2 << 4) | /*emulate_code_length_sub_1*/ + (3 << 1) | /*start_code_length_sub_1 + VP9 use 0x00000001 as startcode (4 Bytes)*/ + (1 << 0) /*stream_shift_enable*/ + ); + + WRITE_VREG(HEVC_CABAC_CONTROL, + (1 << 0)/*cabac_enable*/ + ); + + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, + (1 << 0)/* hevc_parser_core_clk_en*/ + ); + + + WRITE_VREG(HEVC_DEC_STATUS_REG, 0); + + } + + if (mask & HW_MASK_BACK) { + /*Initial IQIT_SCALELUT memory + -- just to avoid X in simulation*/ + if (is_rdma_enable()) + rdma_back_end_work(pbi->rdma_phy_adr, RDMA_SIZE); + else { + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 0);/*cfg_p_addr*/ + for (i = 0; i < 1024; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, 0); + } + } + + if (mask & HW_MASK_FRONT) { + u32 decode_mode; +#ifdef ENABLE_SWAP_TEST + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 100); +#else + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 0); +#endif +#ifdef MULTI_INSTANCE_SUPPORT + if (!pbi->m_ins_flag) { + if (pbi->low_latency_flag) + decode_mode = DECODE_MODE_SINGLE_LOW_LATENCY; + else + decode_mode = DECODE_MODE_SINGLE; + } else if (vdec_frame_based(hw_to_vdec(pbi))) + decode_mode = pbi->no_head ? + DECODE_MODE_MULTI_FRAMEBASE_NOHEAD : + DECODE_MODE_MULTI_FRAMEBASE; + else + decode_mode = DECODE_MODE_MULTI_STREAMBASE; +#ifdef SUPPORT_FB_DECODING +#ifndef FB_DECODING_TEST_SCHEDULE + if (pbi->used_stage_buf_num > 0) + decode_mode |= (0x01 << 24); +#endif +#endif + WRITE_VREG(DECODE_MODE, decode_mode); + WRITE_VREG(HEVC_DECODE_SIZE, 0); + WRITE_VREG(HEVC_DECODE_COUNT, 0); +#else + WRITE_VREG(DECODE_MODE, DECODE_MODE_SINGLE); + WRITE_VREG(HEVC_DECODE_PIC_BEGIN_REG, 0); + WRITE_VREG(HEVC_DECODE_PIC_NUM_REG, 0x7fffffff); /*to remove*/ +#endif + /*Send parser_cmd*/ + WRITE_VREG(HEVC_PARSER_CMD_WRITE, (1 << 16) | (0 << 0)); + for (i = 0; i < PARSER_CMD_NUMBER; i++) + WRITE_VREG(HEVC_PARSER_CMD_WRITE, parser_cmd[i]); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2); + + + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + /* (1 << 8) |*/ /*sao_sw_pred_enable*/ + (1 << 5) | /*parser_sao_if_en*/ + (1 << 2) | /*parser_mpred_if_en*/ + (1 << 0) /*parser_scaler_if_en*/ + ); + } + + if (mask & HW_MASK_BACK) { + /*Changed to Start MPRED in microcode*/ + /* + pr_info("[test.c] Start MPRED\n"); + WRITE_VREG(HEVC_MPRED_INT_STATUS, + (1<<31) + ); + */ + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (0 << 1) | /*enable ipp*/ + (1 << 0) /*software reset ipp and mpp*/ + ); + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (1 << 1) | /*enable ipp*/ + (0 << 0) /*software reset ipp and mpp*/ + ); + if (get_double_write_mode(pbi) & 0x10) { + /*Enable NV21 reference read mode for MC*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); + } + + /*Initialize mcrcc and decomp perf counters*/ + if (mcrcc_cache_alg_flag && + pbi->init_flag == 0) { + mcrcc_perfcount_reset(); + decomp_perfcount_reset(); + } + } + return; +} + + +#ifdef CONFIG_HEVC_CLK_FORCED_ON +static void config_vp9_clk_forced_on(void) +{ + unsigned int rdata32; + /*IQIT*/ + rdata32 = READ_VREG(HEVC_IQIT_CLK_RST_CTRL); + WRITE_VREG(HEVC_IQIT_CLK_RST_CTRL, rdata32 | (0x1 << 2)); + + /* DBLK*/ + rdata32 = READ_VREG(HEVC_DBLK_CFG0); + WRITE_VREG(HEVC_DBLK_CFG0, rdata32 | (0x1 << 2)); + + /* SAO*/ + rdata32 = READ_VREG(HEVC_SAO_CTRL1); + WRITE_VREG(HEVC_SAO_CTRL1, rdata32 | (0x1 << 2)); + + /*MPRED*/ + rdata32 = READ_VREG(HEVC_MPRED_CTRL1); + WRITE_VREG(HEVC_MPRED_CTRL1, rdata32 | (0x1 << 24)); + + /* PARSER*/ + rdata32 = READ_VREG(HEVC_STREAM_CONTROL); + WRITE_VREG(HEVC_STREAM_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_SHIFT_CONTROL); + WRITE_VREG(HEVC_SHIFT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_CABAC_CONTROL); + WRITE_VREG(HEVC_CABAC_CONTROL, rdata32 | (0x1 << 13)); + rdata32 = READ_VREG(HEVC_PARSER_CORE_CONTROL); + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_INT_CONTROL); + WRITE_VREG(HEVC_PARSER_INT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_IF_CONTROL); + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + rdata32 | (0x1 << 6) | (0x1 << 3) | (0x1 << 1)); + + /*IPP*/ + rdata32 = READ_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG); + WRITE_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG, rdata32 | 0xffffffff); + + /* MCRCC*/ + rdata32 = READ_VREG(HEVCD_MCRCC_CTL1); + WRITE_VREG(HEVCD_MCRCC_CTL1, rdata32 | (0x1 << 3)); +} +#endif + + +#ifdef MCRCC_ENABLE +static void dump_hit_rate(struct VP9Decoder_s *pbi) +{ + if (debug & VP9_DEBUG_CACHE_HIT_RATE) { + mcrcc_get_hitrate(pbi->m_ins_flag); + decomp_get_hitrate(); + decomp_get_comprate(); + } +} + +static void config_mcrcc_axi_hw(struct VP9Decoder_s *pbi) +{ + unsigned int rdata32; + unsigned short is_inter; + /*pr_info("Entered config_mcrcc_axi_hw...\n");*/ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2);/* reset mcrcc*/ + is_inter = ((pbi->common.frame_type != KEY_FRAME) && + (!pbi->common.intra_only)) ? 1 : 0; + if (!is_inter) { /* I-PIC*/ + /*remove reset -- disables clock*/ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x0); + return; + } + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + mcrcc_get_hitrate(pbi->m_ins_flag); + decomp_get_hitrate(); + decomp_get_comprate(); + } + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (1 << 1) | 0); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + /*Programme canvas1 */ + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + /*enable mcrcc progressive-mode*/ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); +} + +static void config_mcrcc_axi_hw_new(struct VP9Decoder_s *pbi) +{ + u32 curr_picnum = -1; + u32 lastref_picnum = -1; + u32 goldenref_picnum = -1; + u32 altref_picnum = -1; + + u32 lastref_delta_picnum; + u32 goldenref_delta_picnum; + u32 altref_delta_picnum; + + u32 rdata32; + + u32 lastcanvas; + u32 goldencanvas; + u32 altrefcanvas; + + u16 is_inter; + u16 lastref_inref; + u16 goldenref_inref; + u16 altref_inref; + + u32 refcanvas_array[3], utmp; + int deltapicnum_array[3], tmp; + + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config + = &cm->cur_frame->buf; + curr_picnum = cur_pic_config->decode_idx; + if (cm->frame_refs[0].buf) + lastref_picnum = cm->frame_refs[0].buf->decode_idx; + if (cm->frame_refs[1].buf) + goldenref_picnum = cm->frame_refs[1].buf->decode_idx; + if (cm->frame_refs[2].buf) + altref_picnum = cm->frame_refs[2].buf->decode_idx; + + lastref_delta_picnum = (lastref_picnum >= curr_picnum) ? + (lastref_picnum - curr_picnum) : (curr_picnum - lastref_picnum); + goldenref_delta_picnum = (goldenref_picnum >= curr_picnum) ? + (goldenref_picnum - curr_picnum) : + (curr_picnum - goldenref_picnum); + altref_delta_picnum = + (altref_picnum >= curr_picnum) ? + (altref_picnum - curr_picnum) : (curr_picnum - altref_picnum); + + lastref_inref = (cm->frame_refs[0].idx != INVALID_IDX) ? 1 : 0; + goldenref_inref = (cm->frame_refs[1].idx != INVALID_IDX) ? 1 : 0; + altref_inref = (cm->frame_refs[2].idx != INVALID_IDX) ? 1 : 0; + + if (debug & VP9_DEBUG_CACHE) + pr_info("%s--0--lastref_inref:%d goldenref_inref:%d altref_inref:%d\n", + __func__, lastref_inref, goldenref_inref, altref_inref); + + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2); /* reset mcrcc */ + + is_inter = ((pbi->common.frame_type != KEY_FRAME) + && (!pbi->common.intra_only)) ? 1 : 0; + + if (!is_inter) { /* I-PIC */ + /* remove reset -- disables clock */ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x0); + return; + } + + if (!pbi->m_ins_flag) + dump_hit_rate(pbi); + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (1<<1) | 0); + lastcanvas = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + goldencanvas = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + altrefcanvas = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + + if (debug & VP9_DEBUG_CACHE) + pr_info("[test.c] lastref_canv:%x goldenref_canv:%x altref_canv:%x\n", + lastcanvas, goldencanvas, altrefcanvas); + + altref_inref = ((altref_inref == 1) && + (altrefcanvas != (goldenref_inref + ? goldencanvas : 0xffffffff)) && + (altrefcanvas != (lastref_inref ? + lastcanvas : 0xffffffff))) ? 1 : 0; + goldenref_inref = ((goldenref_inref == 1) && + (goldencanvas != (lastref_inref ? + lastcanvas : 0xffffffff))) ? 1 : 0; + if (debug & VP9_DEBUG_CACHE) + pr_info("[test.c]--1--lastref_inref:%d goldenref_inref:%d altref_inref:%d\n", + lastref_inref, goldenref_inref, altref_inref); + + altref_delta_picnum = altref_inref ? altref_delta_picnum : 0x7fffffff; + goldenref_delta_picnum = goldenref_inref ? + goldenref_delta_picnum : 0x7fffffff; + lastref_delta_picnum = lastref_inref ? + lastref_delta_picnum : 0x7fffffff; + if (debug & VP9_DEBUG_CACHE) + pr_info("[test.c]--1--lastref_delta_picnum:%d goldenref_delta_picnum:%d altref_delta_picnum:%d\n", + lastref_delta_picnum, goldenref_delta_picnum, + altref_delta_picnum); + /*ARRAY SORT HERE DELTA/CANVAS ARRAY SORT -- use DELTA*/ + + refcanvas_array[0] = lastcanvas; + refcanvas_array[1] = goldencanvas; + refcanvas_array[2] = altrefcanvas; + + deltapicnum_array[0] = lastref_delta_picnum; + deltapicnum_array[1] = goldenref_delta_picnum; + deltapicnum_array[2] = altref_delta_picnum; + + /* sort0 : 2-to-1 */ + if (deltapicnum_array[2] < deltapicnum_array[1]) { + utmp = refcanvas_array[2]; + refcanvas_array[2] = refcanvas_array[1]; + refcanvas_array[1] = utmp; + tmp = deltapicnum_array[2]; + deltapicnum_array[2] = deltapicnum_array[1]; + deltapicnum_array[1] = tmp; + } + /* sort1 : 1-to-0 */ + if (deltapicnum_array[1] < deltapicnum_array[0]) { + utmp = refcanvas_array[1]; + refcanvas_array[1] = refcanvas_array[0]; + refcanvas_array[0] = utmp; + tmp = deltapicnum_array[1]; + deltapicnum_array[1] = deltapicnum_array[0]; + deltapicnum_array[0] = tmp; + } + /* sort2 : 2-to-1 */ + if (deltapicnum_array[2] < deltapicnum_array[1]) { + utmp = refcanvas_array[2]; refcanvas_array[2] = + refcanvas_array[1]; refcanvas_array[1] = utmp; + tmp = deltapicnum_array[2]; deltapicnum_array[2] = + deltapicnum_array[1]; deltapicnum_array[1] = tmp; + } + if (mcrcc_cache_alg_flag == + THODIYIL_MCRCC_CANVAS_ALGX) { /*09/15/2017*/ + /* lowest delta_picnum */ + rdata32 = refcanvas_array[0]; + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + /* 2nd-lowest delta_picnum */ + rdata32 = refcanvas_array[1]; + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + } else { + /* previous version -- LAST/GOLDEN ALWAYS -- before 09/13/2017*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (1<<1) | 0); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + /* Programme canvas1 */ + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + } + + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); /* enable mcrcc progressive-mode */ + return; +} + +#endif + + +static void free_lf_buf(struct VP9Decoder_s *pbi) +{ + if (pbi->lfi) + vfree(pbi->lfi); + if (pbi->lf) + vfree(pbi->lf); + if (pbi->seg_4lf) + vfree(pbi->seg_4lf); + pbi->lfi = NULL; + pbi->lf = NULL; + pbi->seg_4lf = NULL; +} + +static int alloc_lf_buf(struct VP9Decoder_s *pbi) +{ + pbi->lfi = vmalloc(sizeof(struct loop_filter_info_n)); + pbi->lf = vmalloc(sizeof(struct loopfilter)); + pbi->seg_4lf = vmalloc(sizeof(struct segmentation)); + if (pbi->lfi == NULL || pbi->lf == NULL || pbi->seg_4lf == NULL) { + free_lf_buf(pbi); + pr_err("[test.c] vp9_loop_filter init malloc error!!!\n"); + return -1; + } + return 0; +} + +static void vp9_local_uninit(struct VP9Decoder_s *pbi) +{ + pbi->rpm_ptr = NULL; + pbi->lmem_ptr = NULL; + if (pbi->rpm_addr) { + dma_free_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, + pbi->rpm_addr, + pbi->rpm_phy_addr); + pbi->rpm_addr = NULL; + } + if (pbi->lmem_addr) { + if (pbi->lmem_phy_addr) + dma_free_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, pbi->lmem_addr, + pbi->lmem_phy_addr); + pbi->lmem_addr = NULL; + } + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) && + (vdec_secure(hw_to_vdec(pbi)))) { + tee_vp9_prob_free((u32)pbi->prob_buffer_phy_addr); + pbi->prob_buffer_phy_addr = 0; + pbi->count_buffer_phy_addr = 0; + pbi->prob_buffer_addr = NULL; + pbi->count_buffer_addr = NULL; + } else { + if (pbi->prob_buffer_addr) { + if (pbi->prob_buffer_phy_addr) + dma_free_coherent(amports_get_dma_device(), + PROB_BUF_SIZE, pbi->prob_buffer_addr, + pbi->prob_buffer_phy_addr); + + pbi->prob_buffer_addr = NULL; + } + if (pbi->count_buffer_addr) { + if (pbi->count_buffer_phy_addr) + dma_free_coherent(amports_get_dma_device(), + COUNT_BUF_SIZE, pbi->count_buffer_addr, + pbi->count_buffer_phy_addr); + + pbi->count_buffer_addr = NULL; + } + } + if (pbi->mmu_enable) { + u32 mmu_map_size = vvp9_frame_mmu_map_size(pbi); + if (pbi->frame_mmu_map_addr) { + if (pbi->frame_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + mmu_map_size, + pbi->frame_mmu_map_addr, + pbi->frame_mmu_map_phy_addr); + pbi->frame_mmu_map_addr = NULL; + } + } + +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable) { + u32 dw_mmu_map_size = vvp9_frame_mmu_map_size(pbi); + if (pbi->frame_mmu_dw_map_addr) { + if (pbi->frame_mmu_dw_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + dw_mmu_map_size, + pbi->frame_mmu_dw_map_addr, + pbi->frame_mmu_dw_map_phy_addr); + pbi->frame_mmu_dw_map_addr = NULL; + } + } +#endif + +#ifdef SUPPORT_FB_DECODING + if (pbi->stage_mmu_map_addr) { + if (pbi->stage_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + STAGE_MMU_MAP_SIZE * STAGE_MAX_BUFFERS, + pbi->stage_mmu_map_addr, + pbi->stage_mmu_map_phy_addr); + pbi->stage_mmu_map_addr = NULL; + } + + uninit_stage_buf(pbi); +#endif + +#ifdef VP9_LPF_LVL_UPDATE + free_lf_buf(pbi); +#endif + if (pbi->gvs) + vfree(pbi->gvs); + pbi->gvs = NULL; +} + +static int vp9_local_init(struct VP9Decoder_s *pbi) +{ + int ret = -1; + /*int losless_comp_header_size, losless_comp_body_size;*/ + + struct BuffInfo_s *cur_buf_info = NULL; + + memset(&pbi->param, 0, sizeof(union param_u)); + memset(&pbi->common, 0, sizeof(struct VP9_Common_s)); +#ifdef MULTI_INSTANCE_SUPPORT + cur_buf_info = &pbi->work_space_buf_store; + if (force_bufspec) { + memcpy(cur_buf_info, &amvvp9_workbuff_spec[force_bufspec & 0xf], + sizeof(struct BuffInfo_s)); + pr_info("force buffer spec %d\n", force_bufspec & 0xf); + } else { + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + memcpy(cur_buf_info, &amvvp9_workbuff_spec[2], /* 8k */ + sizeof(struct BuffInfo_s)); + } else + memcpy(cur_buf_info, &amvvp9_workbuff_spec[1], /* 4k */ + sizeof(struct BuffInfo_s)); + } else + memcpy(cur_buf_info, &amvvp9_workbuff_spec[0],/* 1080p */ + sizeof(struct BuffInfo_s)); + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) { + memcpy(cur_buf_info, &amvvp9_workbuff_spec[5], /* 8k */ + sizeof(struct BuffInfo_s)); + } else + memcpy(cur_buf_info, &amvvp9_workbuff_spec[3],/* 1080p */ + sizeof(struct BuffInfo_s)); + } + } + + cur_buf_info->start_adr = pbi->buf_start; + if (!pbi->mmu_enable) + pbi->mc_buf_spec.buf_end = pbi->buf_start + pbi->buf_size; +#endif + + init_buff_spec(pbi, cur_buf_info); + vp9_bufmgr_init(pbi, cur_buf_info, NULL); + + /* vp9_max_pic_w/h for debug */ + pbi->init_pic_w = (vp9_max_pic_w) ? vp9_max_pic_w: + ((pbi->max_pic_w) ? pbi->max_pic_w : pbi->work_space_buf->max_width); + pbi->init_pic_h = (vp9_max_pic_h) ? vp9_max_pic_h: + ((pbi->max_pic_h) ? pbi->max_pic_h : pbi->work_space_buf->max_height); + + /* video is not support unaligned with 64 in tl1 + ** vdec canvas mode will be linear when dump yuv is set + */ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) && + (pbi->double_write_mode != 0) && + (((pbi->max_pic_w % 64) != 0) || + (pbi->vvp9_amstream_dec_info.width % 64) != 0)) { + if (hw_to_vdec(pbi)->canvas_mode != + CANVAS_BLKMODE_LINEAR) + pbi->mem_map_mode = 2; + else { + pbi->mem_map_mode = 0; + pr_info("vdec blkmod linear, force mem_map_mode 0\n"); + } + } + +#ifndef MV_USE_FIXED_BUF + if (!pbi->is_used_v4l) { + if (init_mv_buf_list(pbi) < 0) { + pr_err("%s: init_mv_buf_list fail\n", __func__); + return -1; + } + } +#endif + if (pbi->save_buffer_mode) + pbi->used_buf_num = MAX_BUF_NUM_SAVE_BUF; + else { + if (pbi->is_used_v4l) + pbi->used_buf_num = 5 + pbi->dynamic_buf_num_margin; + else + pbi->used_buf_num = max_buf_num; + } + + if (pbi->used_buf_num > MAX_BUF_NUM) + pbi->used_buf_num = MAX_BUF_NUM; + if (pbi->used_buf_num > FRAME_BUFFERS) + pbi->used_buf_num = FRAME_BUFFERS; + + pbi->pts_unstable = ((unsigned long)(pbi->vvp9_amstream_dec_info.param) + & 0x40) >> 6; + + if ((debug & VP9_DEBUG_SEND_PARAM_WITH_REG) == 0) { + pbi->rpm_addr = dma_alloc_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, + &pbi->rpm_phy_addr, GFP_KERNEL); + if (pbi->rpm_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + return -1; + } + + pbi->rpm_ptr = pbi->rpm_addr; + } + + pbi->lmem_addr = dma_alloc_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, + &pbi->lmem_phy_addr, GFP_KERNEL); + if (pbi->lmem_addr == NULL) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + return -1; + } + pbi->lmem_ptr = pbi->lmem_addr; + + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) && + (vdec_secure(hw_to_vdec(pbi)))) { + u32 prob_addr, id; + id = tee_vp9_prob_malloc(&prob_addr); + if (prob_addr <= 0) + pr_err("%s, tee[%d] malloc prob buf failed\n", __func__, id); + else { + pbi->prob_buffer_phy_addr = prob_addr; + pbi->count_buffer_phy_addr = pbi->prob_buffer_phy_addr + PROB_BUF_SIZE; + } + pbi->prob_buffer_addr = NULL; + pbi->count_buffer_addr = NULL; + } else { + pbi->prob_buffer_addr = dma_alloc_coherent(amports_get_dma_device(), + PROB_BUF_SIZE, + &pbi->prob_buffer_phy_addr, GFP_KERNEL); + if (pbi->prob_buffer_addr == NULL) { + pr_err("%s: failed to alloc prob_buffer\n", __func__); + return -1; + } + memset(pbi->prob_buffer_addr, 0, PROB_BUF_SIZE); + pbi->count_buffer_addr = dma_alloc_coherent(amports_get_dma_device(), + COUNT_BUF_SIZE, + &pbi->count_buffer_phy_addr, GFP_KERNEL); + if (pbi->count_buffer_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(pbi->count_buffer_addr, 0, COUNT_BUF_SIZE); + } + + if (pbi->mmu_enable) { + u32 mmu_map_size = vvp9_frame_mmu_map_size(pbi); + pbi->frame_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + mmu_map_size, + &pbi->frame_mmu_map_phy_addr, GFP_KERNEL); + if (pbi->frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(pbi->frame_mmu_map_addr, 0, COUNT_BUF_SIZE); + } +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable) { + u32 dw_mmu_map_size = vvp9_frame_mmu_map_size(pbi); + pbi->frame_mmu_dw_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + dw_mmu_map_size, + &pbi->frame_mmu_dw_map_phy_addr, GFP_KERNEL); + if (pbi->frame_mmu_dw_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer mmu dw\n", __func__); + return -1; + } + memset(pbi->frame_mmu_dw_map_addr, 0, COUNT_BUF_SIZE); + } +#endif +#ifdef SUPPORT_FB_DECODING + if (pbi->m_ins_flag && stage_buf_num > 0) { + pbi->stage_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + STAGE_MMU_MAP_SIZE * STAGE_MAX_BUFFERS, + &pbi->stage_mmu_map_phy_addr, GFP_KERNEL); + if (pbi->stage_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(pbi->stage_mmu_map_addr, + 0, STAGE_MMU_MAP_SIZE * STAGE_MAX_BUFFERS); + + init_stage_buf(pbi); + } +#endif + + ret = 0; + return ret; +} + +/******************************************** + * Mailbox command + ********************************************/ +#define CMD_FINISHED 0 +#define CMD_ALLOC_VIEW 1 +#define CMD_FRAME_DISPLAY 3 +#define CMD_DEBUG 10 + + +#define DECODE_BUFFER_NUM_MAX 32 +#define DISPLAY_BUFFER_NUM 6 + +#define video_domain_addr(adr) (adr&0x7fffffff) +#define DECODER_WORK_SPACE_SIZE 0x800000 + +#define spec2canvas(x) \ + (((x)->uv_canvas_index << 16) | \ + ((x)->uv_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + + +static void set_canvas(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + struct vdec_s *vdec = hw_to_vdec(pbi); + int canvas_w = ALIGN(pic_config->y_crop_width, 64)/4; + int canvas_h = ALIGN(pic_config->y_crop_height, 32)/4; + int blkmode = pbi->mem_map_mode; + /*CANVAS_BLKMODE_64X32*/ + if (pic_config->double_write_mode) { + canvas_w = pic_config->y_crop_width / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + canvas_h = pic_config->y_crop_height / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + + /* sao ctrl1 reg alignline with 64, align with 64 */ + canvas_w = ALIGN(canvas_w, 64); + canvas_h = ALIGN(canvas_h, 32); + + if (vdec->parallel_dec == 1) { + if (pic_config->y_canvas_index == -1) + pic_config->y_canvas_index = + vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + if (pic_config->uv_canvas_index == -1) + pic_config->uv_canvas_index = + vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + } else { + pic_config->y_canvas_index = 128 + pic_config->index * 2; + pic_config->uv_canvas_index = 128 + pic_config->index * 2 + 1; + } + + config_cav_lut_ex(pic_config->y_canvas_index, + pic_config->dw_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, pbi->is_used_v4l ? 0 : 7, VDEC_HEVC); + config_cav_lut_ex(pic_config->uv_canvas_index, + pic_config->dw_u_v_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, pbi->is_used_v4l ? 0 : 7, VDEC_HEVC); + +#ifdef MULTI_INSTANCE_SUPPORT + pic_config->canvas_config[0].phy_addr = + pic_config->dw_y_adr; + pic_config->canvas_config[0].width = + canvas_w; + pic_config->canvas_config[0].height = + canvas_h; + pic_config->canvas_config[0].block_mode = + blkmode; + pic_config->canvas_config[0].endian = pbi->is_used_v4l ? 0 : 7; + + pic_config->canvas_config[1].phy_addr = + pic_config->dw_u_v_adr; + pic_config->canvas_config[1].width = + canvas_w; + pic_config->canvas_config[1].height = + canvas_h; + pic_config->canvas_config[1].block_mode = + blkmode; + pic_config->canvas_config[1].endian = pbi->is_used_v4l ? 0 : 7; +#endif + } +} + + +static void set_frame_info(struct VP9Decoder_s *pbi, struct vframe_s *vf) +{ + unsigned int ar = DISP_RATIO_ASPECT_RATIO_MAX; + vf->duration = pbi->frame_dur; + vf->duration_pulldown = 0; + vf->flag = 0; + vf->prop.master_display_colour = pbi->vf_dp; + vf->signal_type = pbi->video_signal_type; + if (vf->compWidth && vf->compHeight) + pbi->frame_ar = vf->compHeight * 0x100 / vf->compWidth; + ar = min_t(u32, ar, DISP_RATIO_ASPECT_RATIO_MAX); + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + vf->sar_width = 1; + vf->sar_height = 1; + + if (pbi->is_used_v4l && pbi->vf_dp.present_flag) { + struct aml_vdec_hdr_infos hdr; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + + memset(&hdr, 0, sizeof(hdr)); + hdr.signal_type = vf->signal_type; + hdr.color_parms = pbi->vf_dp; + + hdr.color_parms.luminance[0] = hdr.color_parms.luminance[0] / 10000; + + vdec_v4l_set_hdr_infos(ctx, &hdr); + } + + if ((pbi->chunk != NULL) && (pbi->chunk->hdr10p_data_buf != NULL) + && (pbi->chunk->hdr10p_data_size != 0)) { + if (pbi->chunk->hdr10p_data_size <= 128) { + char *new_buf; + int i = 0; + new_buf = kzalloc(pbi->chunk->hdr10p_data_size, GFP_ATOMIC); + + if (new_buf) { + memcpy(new_buf, pbi->chunk->hdr10p_data_buf, pbi->chunk->hdr10p_data_size); + if (debug & VP9_DEBUG_BUFMGR_MORE) { + vp9_print(pbi, VP9_DEBUG_BUFMGR_MORE, + "hdr10p data: (size %d)\n", + pbi->chunk->hdr10p_data_size); + for (i = 0; i < pbi->chunk->hdr10p_data_size; i++) { + vp9_print(pbi, VP9_DEBUG_BUFMGR_MORE, + "%02x ", pbi->chunk->hdr10p_data_buf[i]); + if (((i + 1) & 0xf) == 0) + vp9_print(pbi, VP9_DEBUG_BUFMGR_MORE, "\n"); + } + vp9_print(pbi, VP9_DEBUG_BUFMGR_MORE, "\n"); + } + + vf->hdr10p_data_size = pbi->chunk->hdr10p_data_size; + vf->hdr10p_data_buf = new_buf; + } else { + vp9_print(pbi, 0, "%s:hdr10p data vzalloc size(%d) fail\n", + __func__, pbi->chunk->hdr10p_data_size); + vf->hdr10p_data_size = pbi->chunk->hdr10p_data_size; + vf->hdr10p_data_buf = new_buf; + } + } + + vfree(pbi->chunk->hdr10p_data_buf); + pbi->chunk->hdr10p_data_buf = NULL; + pbi->chunk->hdr10p_data_size = 0; + } + + vf->sidebind_type = pbi->sidebind_type; + vf->sidebind_channel_id = pbi->sidebind_channel_id; +} + +static int vvp9_vf_states(struct vframe_states *states, void *op_arg) +{ + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)op_arg; + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&pbi->newframe_q); + states->buf_avail_num = kfifo_len(&pbi->display_q); + + if (step == 2) + states->buf_avail_num = 0; + return 0; +} + +static struct vframe_s *vvp9_vf_peek(void *op_arg) +{ + struct vframe_s *vf[2] = {0, 0}; + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)op_arg; + + if (step == 2) + return NULL; + + if (kfifo_len(&pbi->display_q) > VF_POOL_SIZE) { + vp9_print(pbi, VP9_DEBUG_BUFMGR, + "kfifo len:%d invaild, peek error\n", + kfifo_len(&pbi->display_q)); + return NULL; + } + + if (kfifo_out_peek(&pbi->display_q, (void *)&vf, 2)) { + if (vf[1]) { + vf[0]->next_vf_pts_valid = true; + vf[0]->next_vf_pts = vf[1]->pts; + } else + vf[0]->next_vf_pts_valid = false; + return vf[0]; + } + + return NULL; +} + +static struct vframe_s *vvp9_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)op_arg; + + if (step == 2) + return NULL; + else if (step == 1) + step = 2; + + if (kfifo_get(&pbi->display_q, &vf)) { + struct vframe_s *next_vf = NULL; + uint8_t index = vf->index & 0xff; + ATRACE_COUNTER(pbi->trace.disp_q_name, kfifo_len(&pbi->display_q)); + if (index < pbi->used_buf_num || + (vf->type & VIDTYPE_V4L_EOS)) { + vf->index_disp = pbi->vf_get_count; + pbi->vf_get_count++; + if (debug & VP9_DEBUG_BUFMGR) + pr_info("%s idx: %d, type 0x%x w/h %d/%d, pts %d, %lld, ts: %lld\n", + __func__, index, vf->type, + vf->width, vf->height, + vf->pts, + vf->pts_us64, + vf->timestamp); + + if (kfifo_peek(&pbi->display_q, &next_vf) && next_vf) { + vf->next_vf_pts_valid = true; + vf->next_vf_pts = next_vf->pts; + } else + vf->next_vf_pts_valid = false; + + return vf; + } + } + + return NULL; +} + +static void vvp9_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)op_arg; + uint8_t index; + + if (vf == (&pbi->vframe_dummy)) + return; + + if (!vf) + return; + + if (pbi->enable_fence && vf->fence) { + int ret, i; + + mutex_lock(&pbi->fence_mutex); + ret = dma_fence_get_status(vf->fence); + if (ret == 0) { + for (i = 0; i < VF_POOL_SIZE; i++) { + if (pbi->fence_vf_s.fence_vf[i] == NULL) { + pbi->fence_vf_s.fence_vf[i] = vf; + pbi->fence_vf_s.used_size++; + mutex_unlock(&pbi->fence_mutex); + return; + } + } + } + mutex_unlock(&pbi->fence_mutex); + } + + index = vf->index & 0xff; + + if (pbi->enable_fence && vf->fence) { + vdec_fence_put(vf->fence); + vf->fence = NULL; + } + + if (vf->hdr10p_data_buf) { + kfree(vf->hdr10p_data_buf); + vf->hdr10p_data_buf = NULL; + vf->hdr10p_data_size = 0; + } + + kfifo_put(&pbi->newframe_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(pbi->trace.new_q_name, kfifo_len(&pbi->newframe_q)); + pbi->vf_put_count++; + + if (debug & VP9_DEBUG_BUFMGR) + pr_info("%s idx: %d, type 0x%x w/h %d/%d, pts %d, %lld, ts: %lld\n", + __func__, index, vf->type, + vf->width, vf->height, + vf->pts, + vf->pts_us64, + vf->timestamp); + + if (index < pbi->used_buf_num) { + struct VP9_Common_s *cm = &pbi->common; + struct BufferPool_s *pool = cm->buffer_pool; + unsigned long flags; + + lock_buffer_pool(pool, flags); + if (pool->frame_bufs[index].buf.vf_ref > 0) + pool->frame_bufs[index].buf.vf_ref--; + + if (pbi->wait_buf) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + pbi->last_put_idx = index; + pbi->new_frame_displayed++; + unlock_buffer_pool(pool, flags); +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0 && + pbi->back_not_run_ready) + trigger_schedule(pbi); +#endif + } + +} + +static int vvp9_event_cb(int type, void *data, void *private_data) +{ + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)private_data; + + if (type & VFRAME_EVENT_RECEIVER_RESET) { +#if 0 + unsigned long flags; + + amhevc_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vvp9_vf_prov); +#endif + spin_lock_irqsave(&pbi->lock, flags); + vvp9_local_init(); + vvp9_prot_init(); + spin_unlock_irqrestore(&pbi->lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vvp9_vf_prov); +#endif + amhevc_start(); +#endif + } else if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(hw_to_vdec(pbi)); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +void inc_vf_ref(struct VP9Decoder_s *pbi, int index) +{ + struct VP9_Common_s *cm = &pbi->common; + + cm->buffer_pool->frame_bufs[index].buf.vf_ref++; + + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("%s index = %d new vf_ref = %d\r\n", + __func__, index, + cm->buffer_pool->frame_bufs[index].buf.vf_ref); +} + +static int frame_duration_adapt(struct VP9Decoder_s *pbi, struct vframe_s *vf, u32 valid) +{ + u32 old_duration, pts_duration = 0; + u32 pts = vf->pts; + + if (pbi->get_frame_dur == true) + return true; + + pbi->frame_cnt_window++; + if (!(pbi->vp9_first_pts_ready == 1)) { + if (valid) { + pbi->pts1 = pts; + pbi->frame_cnt_window = 0; + pbi->duration_from_pts_done = 0; + pbi->vp9_first_pts_ready = 1; + } else { + return false; + } + } else { + if (pts < pbi->pts1) { + if (pbi->frame_cnt_window > FRAME_CNT_WINDOW_SIZE) { + pbi->pts1 = pts; + pbi->frame_cnt_window = 0; + } + } + + if (valid && (pbi->frame_cnt_window > FRAME_CNT_WINDOW_SIZE) && + (pts > pbi->pts1) && (pbi->duration_from_pts_done == 0)) { + old_duration = pbi->frame_dur; + pbi->pts2 = pts; + pts_duration = (((pbi->pts2 - pbi->pts1) * 16) / + (pbi->frame_cnt_window * 15)); + + if (close_to(pts_duration, old_duration, 2000)) { + pbi->frame_dur = pts_duration; + if ((debug & VP9_DEBUG_OUT_PTS) != 0) + pr_info("use calc duration %d\n", pts_duration); + } + + if (pbi->duration_from_pts_done == 0) { + if (close_to(pts_duration, old_duration, RATE_CORRECTION_THRESHOLD)) { + pbi->duration_from_pts_done = 1; + } else { + if (!close_to(pts_duration, + old_duration, 1000) && + !close_to(pts_duration, + pbi->frame_dur, 1000) && + close_to(pts_duration, + pbi->last_duration, 200)) { + /* frame_dur must + * wrong,recover it. + */ + pbi->frame_dur = pts_duration; + } + pbi->pts1 = pbi->pts2; + pbi->frame_cnt_window = 0; + pbi->duration_from_pts_done = 0; + } + } + pbi->last_duration = pts_duration; + } + } + return true; +} + +static void update_vf_memhandle(struct VP9Decoder_s *pbi, + struct vframe_s *vf, struct PIC_BUFFER_CONFIG_s *pic) +{ + vf->mem_handle = NULL; + vf->mem_head_handle = NULL; + vf->mem_dw_handle = NULL; + + /* keeper not needed for v4l solution */ + if (pbi->is_used_v4l) + return; + + if (vf->type & VIDTYPE_SCATTER) { + +#ifdef VP9_10B_MMU_DW + if (pic->double_write_mode & 0x20) { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + pbi->mmu_box_dw, pic->index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + pbi->bmmu_box, + HEADER_BUFFER_IDX(pic->BUF_index)); + vf->mem_dw_handle = NULL; + } else +#endif + { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + pbi->mmu_box, pic->index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + pbi->bmmu_box, + HEADER_BUFFER_IDX(pic->BUF_index)); + if (pbi->double_write_mode == 3) + vf->mem_dw_handle = + decoder_bmmu_box_get_mem_handle( + pbi->bmmu_box, + VF_BUFFER_IDX(pic->BUF_index)); + else + vf->mem_dw_handle = NULL; + } + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + pbi->bmmu_box, VF_BUFFER_IDX(pic->BUF_index)); + vf->mem_head_handle = NULL; + vf->mem_dw_handle = NULL; + /*vf->mem_head_handle = + *decoder_bmmu_box_get_mem_handle( + *hevc->bmmu_box, VF_BUFFER_IDX(BUF_index)); + */ + } +} + +static inline void pbi_update_gvs(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + if (pbi->gvs->frame_height != pic_config->y_crop_height) { + pbi->gvs->frame_width = pic_config->y_crop_width; + pbi->gvs->frame_height = pic_config->y_crop_height; + } + if (pbi->gvs->frame_dur != pbi->frame_dur) { + pbi->gvs->frame_dur = pbi->frame_dur; + if (pbi->frame_dur != 0) + pbi->gvs->frame_rate = ((96000 * 10 / pbi->frame_dur) % 10) < 5 ? + 96000 / pbi->frame_dur : (96000 / pbi->frame_dur +1); + else + pbi->gvs->frame_rate = -1; + } + pbi->gvs->status = pbi->stat | pbi->fatal_error; +} + +static int prepare_display_buf(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + struct vframe_s *vf = NULL; + struct vdec_s *pvdec = hw_to_vdec(pbi); + int stream_offset = pic_config->stream_offset; + unsigned short slice_type = pic_config->slice_type; + struct aml_vcodec_ctx * v4l2_ctx = pbi->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + ulong nv_order = VIDTYPE_VIU_NV21; + u32 pts_valid = 0, pts_us64_valid = 0; + u32 pts_save; + u64 pts_us64_save; + u32 frame_size = 0; + int i = 0; + + + if (debug & VP9_DEBUG_BUFMGR) + pr_info("%s index = %d\r\n", __func__, pic_config->index); + if (kfifo_get(&pbi->newframe_q, &vf) == 0) { + pr_info("fatal error, no available buffer slot."); + return -1; + } + + /* swap uv */ + if (pbi->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + } + + if (pic_config->double_write_mode && + (pic_config->double_write_mode & 0x20) == 0) + set_canvas(pbi, pic_config); + + display_frame_count[pbi->index]++; + if (vf) { + if (!force_pts_unstable) { + if ((pic_config->pts == 0) || ((pic_config->pts <= pbi->last_pts) && + (pic_config->pts64 <= pbi->last_pts_us64))) { + for (i = (FRAME_BUFFERS - 1); i > 0; i--) { + if ((pbi->last_pts == pbi->frame_mode_pts_save[i]) || + (pbi->last_pts_us64 == pbi->frame_mode_pts64_save[i])) { + pic_config->pts = pbi->frame_mode_pts_save[i - 1]; + pic_config->pts64 = pbi->frame_mode_pts64_save[i - 1]; + break; + } + } + if ((i == 0) || (pic_config->pts <= pbi->last_pts)) { + vp9_print(pbi, VP9_DEBUG_OUT_PTS, + "no found pts %d, set 0. %d, %d\n", + i, pic_config->pts, pbi->last_pts); + pic_config->pts = 0; + pic_config->pts64 = 0; + } + } + } + + if (pbi->is_used_v4l) { + vf->v4l_mem_handle + = pbi->m_BUF[pic_config->BUF_index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + if (pbi->mmu_enable) { + vf->mm_box.bmmu_box = pbi->bmmu_box; + vf->mm_box.bmmu_idx = HEADER_BUFFER_IDX(pbi->buffer_wrap[pic_config->BUF_index]); + vf->mm_box.mmu_box = pbi->mmu_box; + vf->mm_box.mmu_idx = pbi->buffer_wrap[pic_config->index]; + } + } + + if (pbi->enable_fence) { + /* fill fence information. */ + if (pbi->fence_usage == FENCE_USE_FOR_DRIVER) + vf->fence = pic_config->fence; + } + +#ifdef MULTI_INSTANCE_SUPPORT + if (vdec_frame_based(pvdec)) { + vf->pts = pic_config->pts; + vf->pts_us64 = pic_config->pts64; + + if (pbi->is_used_v4l && v4l_bitstream_id_enable) + vf->timestamp = pic_config->timestamp; + else + vf->timestamp = pic_config->pts64; + + if (vf->pts != 0 || vf->pts_us64 != 0) { + pts_valid = 1; + pts_us64_valid = 1; + } else { + pts_valid = 0; + pts_us64_valid = 0; + } + } else +#endif + /* if (pts_lookup_offset(PTS_TYPE_VIDEO, + * stream_offset, &vf->pts, 0) != 0) { + */ + if ((pvdec->vbuf.no_parser == 0) || (pvdec->vbuf.use_ptsserv)) { + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, stream_offset, &vf->pts, + &frame_size, 0, + &vf->pts_us64) != 0) { +#ifdef DEBUG_PTS + pbi->pts_missed++; +#endif + vf->pts = 0; + vf->pts_us64 = 0; + pts_valid = 0; + pts_us64_valid = 0; + } else { +#ifdef DEBUG_PTS + pbi->pts_hit++; +#endif + pts_valid = 1; + pts_us64_valid = 1; + } + } + + fill_frame_info(pbi, pic_config, frame_size, vf->pts); + + pts_save = vf->pts; + pts_us64_save = vf->pts_us64; + if (pbi->is_used_v4l || pbi->pts_unstable) { + frame_duration_adapt(pbi, vf, pts_valid); + if (pbi->duration_from_pts_done) { + pbi->pts_mode = PTS_NONE_REF_USE_DURATION; + } else { + if (pts_valid || pts_us64_valid) + pbi->pts_mode = PTS_NORMAL; + } + } + + if ((pbi->pts_mode == PTS_NORMAL) && (vf->pts != 0) + && pbi->get_frame_dur) { + int pts_diff = (int)vf->pts - pbi->last_lookup_pts; + + if (pts_diff < 0) { + pbi->pts_mode_switching_count++; + pbi->pts_mode_recovery_count = 0; + + if (pbi->pts_mode_switching_count >= + PTS_MODE_SWITCHING_THRESHOLD) { + pbi->pts_mode = + PTS_NONE_REF_USE_DURATION; + pr_info + ("HEVC: switch to n_d mode.\n"); + } + + } else { + int p = PTS_MODE_SWITCHING_RECOVERY_THREASHOLD; + + pbi->pts_mode_recovery_count++; + if (pbi->pts_mode_recovery_count > p) { + pbi->pts_mode_switching_count = 0; + pbi->pts_mode_recovery_count = 0; + } + } + } + + if (vf->pts != 0) + pbi->last_lookup_pts = vf->pts; + + if ((pbi->pts_mode == PTS_NONE_REF_USE_DURATION) + && (slice_type != KEY_FRAME)) + vf->pts = pbi->last_pts + DUR2PTS(pbi->frame_dur); + pbi->last_pts = vf->pts; + + if (vf->pts_us64 != 0) + pbi->last_lookup_pts_us64 = vf->pts_us64; + + if ((pbi->pts_mode == PTS_NONE_REF_USE_DURATION) + && (slice_type != KEY_FRAME)) { + vf->pts_us64 = + pbi->last_pts_us64 + + (DUR2PTS(pbi->frame_dur) * 100 / 9); + } + pbi->last_pts_us64 = vf->pts_us64; + + if (pbi->pts_mode == PTS_NONE_REF_USE_DURATION) { + vf->disp_pts = vf->pts; + vf->disp_pts_us64 = vf->pts_us64; + vf->pts = pts_save; + vf->pts_us64 = pts_us64_save; + } else { + vf->disp_pts = 0; + vf->disp_pts_us64 = 0; + } + + vf->index = 0xff00 | pic_config->index; + + if (pic_config->double_write_mode & 0x10) { + /* double write only */ + vf->compBodyAddr = 0; + vf->compHeadAddr = 0; +#ifdef VP9_10B_MMU_DW + vf->dwBodyAddr = 0; + vf->dwHeadAddr = 0; +#endif + } else { + if (pbi->mmu_enable) { + vf->compBodyAddr = 0; + vf->compHeadAddr = pic_config->header_adr; +#ifdef VP9_10B_MMU_DW + vf->dwBodyAddr = 0; + vf->dwHeadAddr = 0; + if (pic_config->double_write_mode & 0x20) { + u32 mode = pic_config->double_write_mode & 0xf; + if (mode == 5 || mode == 3) + vf->dwHeadAddr = pic_config->header_dw_adr; + else if ((mode == 1 || mode == 2 || mode == 4) + && (debug & VP9_DEBUG_OUT_PTS) == 0) { + vf->compHeadAddr = pic_config->header_dw_adr; + pr_debug("Use dw mmu for display\n"); + } + } +#endif + } else { + /*vf->compBodyAddr = pic_config->mc_y_adr; + *vf->compHeadAddr = pic_config->mc_y_adr + + *pic_config->comp_body_size; */ + /*head adr*/ + } + vf->canvas0Addr = vf->canvas1Addr = 0; + } + if (pic_config->double_write_mode && + (pic_config->double_write_mode & 0x20) == 0) { + vf->type = VIDTYPE_PROGRESSIVE | + VIDTYPE_VIU_FIELD; + vf->type |= nv_order; + if ((pic_config->double_write_mode == 3 || + pic_config->double_write_mode == 5) && + (!IS_8K_SIZE(pic_config->y_crop_width, + pic_config->y_crop_height))) { + vf->type |= VIDTYPE_COMPRESS; + if (pbi->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + pic_config->canvas_config[0]; + vf->canvas0_config[1] = + pic_config->canvas_config[1]; + vf->canvas1_config[0] = + pic_config->canvas_config[0]; + vf->canvas1_config[1] = + pic_config->canvas_config[1]; + + } else +#endif + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(pic_config); + } else { + vf->canvas0Addr = vf->canvas1Addr = 0; + vf->type = VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; + if (pbi->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + + switch (pic_config->bit_depth) { + case VPX_BITS_8: + vf->bitdepth = BITDEPTH_Y8 | + BITDEPTH_U8 | BITDEPTH_V8; + break; + case VPX_BITS_10: + case VPX_BITS_12: + vf->bitdepth = BITDEPTH_Y10 | + BITDEPTH_U10 | BITDEPTH_V10; + break; + default: + vf->bitdepth = BITDEPTH_Y10 | + BITDEPTH_U10 | BITDEPTH_V10; + break; + } + if ((vf->type & VIDTYPE_COMPRESS) == 0) + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + if (pic_config->bit_depth == VPX_BITS_8) + vf->bitdepth |= BITDEPTH_SAVING_MODE; + + vf->width = pic_config->y_crop_width / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + vf->height = pic_config->y_crop_height / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + if (force_w_h != 0) { + vf->width = (force_w_h >> 16) & 0xffff; + vf->height = force_w_h & 0xffff; + } + if ((pic_config->double_write_mode & 0x20) && + ((pic_config->double_write_mode & 0xf) == 2 || + (pic_config->double_write_mode & 0xf) == 4)) { + vf->compWidth = pic_config->y_crop_width / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + vf->compHeight = pic_config->y_crop_height / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + } else { + vf->compWidth = pic_config->y_crop_width; + vf->compHeight = pic_config->y_crop_height; + } + set_frame_info(pbi, vf); + if (force_fps & 0x100) { + u32 rate = force_fps & 0xff; + + if (rate) + vf->duration = 96000/rate; + else + vf->duration = 0; + } + update_vf_memhandle(pbi, vf, pic_config); + if (vdec_stream_based(pvdec) && (!pvdec->vbuf.use_ptsserv)) { + vf->pts_us64 = stream_offset; + vf->pts = 0; + } + if ((debug & VP9_DEBUG_OUT_PTS) != 0) { + pr_info + ("VP9 dec out pts: pts_mode=%d,dur=%d,pts(%d,%lld,%lld)(%d,%lld)\n", + pbi->pts_mode, pbi->frame_dur, vf->pts, + vf->pts_us64, vf->timestamp, pts_save, + pts_us64_save); + } + if (!(pic_config->y_crop_width == 196 + && pic_config->y_crop_height == 196 + && (debug & VP9_DEBUG_NO_TRIGGER_FRAME) == 0 + && (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_TXLX))) { + struct vdec_info tmp4x; + + inc_vf_ref(pbi, pic_config->index); + decoder_do_frame_check(pvdec, vf); + vdec_vframe_ready(pvdec, vf); + kfifo_put(&pbi->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(pbi->trace.pts_name, vf->pts); + ATRACE_COUNTER(pbi->trace.new_q_name, kfifo_len(&pbi->newframe_q)); + ATRACE_COUNTER(pbi->trace.disp_q_name, kfifo_len(&pbi->display_q)); + pbi->vf_pre_count++; + pbi_update_gvs(pbi, pic_config); + /*count info*/ + vdec_count_info(pbi->gvs, 0, stream_offset); + if (stream_offset) { + if (slice_type == KEY_FRAME) { + pbi->gvs->i_decoded_frames++; + } else if (slice_type == INTER_FRAME) { + pbi->gvs->p_decoded_frames++; + } else if (slice_type == FRAME_TYPES) { + pbi->gvs->b_decoded_frames++; + } + } + memcpy(&tmp4x, pbi->gvs, sizeof(struct vdec_info)); + tmp4x.bit_depth_luma = pbi->vp9_param.p.bit_depth; + tmp4x.bit_depth_chroma = pbi->vp9_param.p.bit_depth; + tmp4x.double_write_mode = pic_config->double_write_mode; + vdec_fill_vdec_frame(pvdec, &pbi->vframe_qos, &tmp4x, + vf, pic_config->hw_decode_time); + pvdec->vdec_fps_detec(pvdec->id); + if (without_display_mode == 0) { + if (pbi->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vvp9_vf_put(vvp9_vf_get(pbi), pbi); + } else { + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(pbi->provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } else + vvp9_vf_put(vvp9_vf_get(pbi), pbi); + } else { + pbi->stat |= VP9_TRIGGER_FRAME_DONE; + hevc_source_changed(VFORMAT_VP9, 196, 196, 30); + pr_debug("[%s %d] drop trigger frame width %d height %d state 0x%x\n", + __func__, __LINE__, vf->width, + vf->height, pbi->stat); + } + } + + return 0; +} + +static int notify_v4l_eos(struct vdec_s *vdec) +{ + struct VP9Decoder_s *hw = (struct VP9Decoder_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = &hw->vframe_dummy; + struct vdec_v4l2_buffer *fb = NULL; + int index = INVALID_IDX; + ulong expires; + + if (hw->eos) { + if (hw->is_used_v4l) { + expires = jiffies + msecs_to_jiffies(2000); + while (INVALID_IDX == (index = v4l_get_free_fb(hw))) { + if (time_after(jiffies, expires) || + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) + break; + } + + if (index == INVALID_IDX) { + ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token); + if (ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC) < 0) { + pr_err("[%d] EOS get free buff fail.\n", ctx->id); + return -1; + } + } + } + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + vf->v4l_mem_handle = (index == INVALID_IDX) ? (ulong)fb : + hw->m_BUF[index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + + if (hw->is_used_v4l) + fb->task->submit(fb->task, TASK_TYPE_DEC); + else + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + + pr_info("[%d] VP9 EOS notify.\n", (hw->is_used_v4l)?ctx->id:vdec->id); + } + + return 0; +} + +static void get_rpm_param(union param_u *params) +{ + int i; + unsigned int data32; + + if (debug & VP9_DEBUG_BUFMGR) + pr_info("enter %s\r\n", __func__); + for (i = 0; i < 128; i++) { + do { + data32 = READ_VREG(RPM_CMD_REG); + /*pr_info("%x\n", data32);*/ + } while ((data32 & 0x10000) == 0); + params->l.data[i] = data32&0xffff; + /*pr_info("%x\n", data32);*/ + WRITE_VREG(RPM_CMD_REG, 0); + } + if (debug & VP9_DEBUG_BUFMGR) + pr_info("leave %s\r\n", __func__); +} +static void debug_buffer_mgr_more(struct VP9Decoder_s *pbi) +{ + int i; + + if (!(debug & VP9_DEBUG_BUFMGR_MORE)) + return; + pr_info("vp9_param: (%d)\n", pbi->slice_idx); + for (i = 0; i < (RPM_END-RPM_BEGIN); i++) { + pr_info("%04x ", pbi->vp9_param.l.data[i]); + if (((i + 1) & 0xf) == 0) + pr_info("\n"); + } + pr_info("=============param==========\r\n"); + pr_info("profile %x\r\n", pbi->vp9_param.p.profile); + pr_info("show_existing_frame %x\r\n", + pbi->vp9_param.p.show_existing_frame); + pr_info("frame_to_show_idx %x\r\n", + pbi->vp9_param.p.frame_to_show_idx); + pr_info("frame_type %x\r\n", pbi->vp9_param.p.frame_type); + pr_info("show_frame %x\r\n", pbi->vp9_param.p.show_frame); + pr_info("e.r.r.o.r_resilient_mode %x\r\n", + pbi->vp9_param.p.error_resilient_mode); + pr_info("intra_only %x\r\n", pbi->vp9_param.p.intra_only); + pr_info("display_size_present %x\r\n", + pbi->vp9_param.p.display_size_present); + pr_info("reset_frame_context %x\r\n", + pbi->vp9_param.p.reset_frame_context); + pr_info("refresh_frame_flags %x\r\n", + pbi->vp9_param.p.refresh_frame_flags); + pr_info("bit_depth %x\r\n", pbi->vp9_param.p.bit_depth); + pr_info("width %x\r\n", pbi->vp9_param.p.width); + pr_info("height %x\r\n", pbi->vp9_param.p.height); + pr_info("display_width %x\r\n", pbi->vp9_param.p.display_width); + pr_info("display_height %x\r\n", pbi->vp9_param.p.display_height); + pr_info("ref_info %x\r\n", pbi->vp9_param.p.ref_info); + pr_info("same_frame_size %x\r\n", pbi->vp9_param.p.same_frame_size); + if (!(debug & VP9_DEBUG_DBG_LF_PRINT)) + return; + pr_info("mode_ref_delta_enabled: 0x%x\r\n", + pbi->vp9_param.p.mode_ref_delta_enabled); + pr_info("sharpness_level: 0x%x\r\n", + pbi->vp9_param.p.sharpness_level); + pr_info("ref_deltas: 0x%x, 0x%x, 0x%x, 0x%x\r\n", + pbi->vp9_param.p.ref_deltas[0], pbi->vp9_param.p.ref_deltas[1], + pbi->vp9_param.p.ref_deltas[2], pbi->vp9_param.p.ref_deltas[3]); + pr_info("mode_deltas: 0x%x, 0x%x\r\n", pbi->vp9_param.p.mode_deltas[0], + pbi->vp9_param.p.mode_deltas[1]); + pr_info("filter_level: 0x%x\r\n", pbi->vp9_param.p.filter_level); + pr_info("seg_enabled: 0x%x\r\n", pbi->vp9_param.p.seg_enabled); + pr_info("seg_abs_delta: 0x%x\r\n", pbi->vp9_param.p.seg_abs_delta); + pr_info("seg_lf_feature_enabled: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\r\n", + (pbi->vp9_param.p.seg_lf_info[0]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[1]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[2]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[3]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[4]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[5]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[6]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[7]>>15 & 1)); + pr_info("seg_lf_feature_data: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\r\n", + (pbi->vp9_param.p.seg_lf_info[0] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[1] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[2] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[3] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[4] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[5] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[6] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[7] & 0x13f)); + +} + +static int recycle_mmu_buf_tail(struct VP9Decoder_s *pbi, + bool check_dma) +{ + struct VP9_Common_s *const cm = &pbi->common; + + if (pbi->used_4k_num == -1) { + pbi->used_4k_num = + READ_VREG(HEVC_SAO_MMU_STATUS) >> 16; + } + vp9_print(pbi, VP9_DEBUG_BUFMGR_MORE, + "pic index %d page_start %d\n", + cm->cur_fb_idx_mmu, pbi->used_4k_num); + + if (check_dma) + hevc_mmu_dma_check(hw_to_vdec(pbi)); + + if (pbi->is_used_v4l) { + int index = cm->cur_fb_idx_mmu; + struct internal_comp_buf *ibuf = + index_to_icomp_buf(pbi, index); + + decoder_mmu_box_free_idx_tail( + ibuf->mmu_box, + ibuf->index, + pbi->used_4k_num); + } else { + decoder_mmu_box_free_idx_tail( + pbi->mmu_box, + cm->cur_fb_idx_mmu, + pbi->used_4k_num); + } + + cm->cur_fb_idx_mmu = INVALID_IDX; + pbi->used_4k_num = -1; + + return 0; +} + +static void vp9_recycle_mmu_buf_tail(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + if (pbi->double_write_mode & 0x10) + return; + if (cm->cur_fb_idx_mmu != INVALID_IDX) { + recycle_mmu_buf_tail(pbi, + ((pbi->used_4k_num == -1) && + pbi->m_ins_flag) ? 1 : 0); + } +} + +#ifdef MULTI_INSTANCE_SUPPORT +static void vp9_recycle_mmu_buf(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + + if (pbi->is_used_v4l) + return; + + if (pbi->double_write_mode & 0x10) + return; + if (cm->cur_fb_idx_mmu != INVALID_IDX) { + decoder_mmu_box_free_idx(pbi->mmu_box, + cm->cur_fb_idx_mmu); + + cm->cur_fb_idx_mmu = INVALID_IDX; + pbi->used_4k_num = -1; + } +} + +void vp9_recycle_mmu_work(struct work_struct *work) +{ + struct VP9Decoder_s *pbi = container_of(work, + struct VP9Decoder_s, recycle_mmu_work); + + if (pbi) + vp9_recycle_mmu_buf(pbi); +} +#endif + + +static void dec_again_process(struct VP9Decoder_s *pbi) +{ + amhevc_stop(); + pbi->dec_result = DEC_RESULT_AGAIN; + if (pbi->process_state == + PROC_STATE_DECODESLICE) { + pbi->process_state = + PROC_STATE_SENDAGAIN; + if (pbi->mmu_enable) { + /* + * Because vp9_recycle_mmu_buf has sleep function,we can't + * call it directly. Use a recycle_mmu_work to substitude it. + */ + vdec_schedule_work(&pbi->recycle_mmu_work); + } + } + reset_process_time(pbi); + vdec_schedule_work(&pbi->work); +} + +int continue_decoding(struct VP9Decoder_s *pbi) +{ + int ret; + int i; + struct VP9_Common_s *const cm = &pbi->common; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + debug_buffer_mgr_more(pbi); + + if (pbi->is_used_v4l && ctx->param_sets_from_ucode) + pbi->res_ch_flag = 0; + bit_depth_luma = pbi->vp9_param.p.bit_depth; + bit_depth_chroma = pbi->vp9_param.p.bit_depth; + + if ((pbi->vp9_param.p.bit_depth >= VPX_BITS_10) && + (get_double_write_mode(pbi) == 0x10)) { + pbi->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + pr_err("fatal err, bit_depth %d, unsupport dw 0x10\n", + pbi->vp9_param.p.bit_depth); + return -1; + } + + if (pbi->process_state != PROC_STATE_SENDAGAIN) { + ret = vp9_bufmgr_process(pbi, &pbi->vp9_param); + if (!pbi->m_ins_flag) + pbi->slice_idx++; + } else { + union param_u *params = &pbi->vp9_param; + if (pbi->mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) { + ret = vp9_alloc_mmu(pbi, + cm->new_fb_idx, + params->p.width, + params->p.height, + params->p.bit_depth, + pbi->frame_mmu_map_addr); + if (ret >= 0) + cm->cur_fb_idx_mmu = cm->new_fb_idx; + else + pr_err("can't alloc need mmu1,idx %d ret =%d\n", + cm->new_fb_idx, + ret); + +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) { + ret = vp9_alloc_mmu_dw(pbi, + cm->new_fb_idx, + params->p.width, + params->p.height, + params->p.bit_depth, + pbi->frame_mmu_dw_map_addr); + if (ret < 0) + pr_err("can't alloc need dw mmu1,idx %d ret =%d\n", + cm->new_fb_idx, + ret); + } +#endif + + } else { + ret = 0; + } + WRITE_VREG(HEVC_PARSER_PICTURE_SIZE, + (params->p.height << 16) | params->p.width); + } + if (ret < 0) { + pr_info("vp9_bufmgr_process=> %d, VP9_10B_DISCARD_NAL\r\n", + ret); + WRITE_VREG(HEVC_DEC_STATUS_REG, VP9_10B_DISCARD_NAL); + cm->show_frame = 0; + if (pbi->mmu_enable) { + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + vp9_recycle_mmu_buf(pbi); + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + } +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) { + pbi->dec_result = DEC_RESULT_DONE; +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num == 0) +#endif + amhevc_stop(); + vdec_schedule_work(&pbi->work); + } +#endif + return ret; + } else if (ret == 0) { + struct PIC_BUFFER_CONFIG_s *cur_pic_config + = &cm->cur_frame->buf; + cur_pic_config->decode_idx = pbi->frame_count; + + if (pbi->process_state != PROC_STATE_SENDAGAIN) { + if (!pbi->m_ins_flag) { + pbi->frame_count++; + decode_frame_count[pbi->index] + = pbi->frame_count; + } +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->chunk) { + cur_pic_config->pts = pbi->chunk->pts; + cur_pic_config->pts64 = pbi->chunk->pts64; + + if (pbi->is_used_v4l && !v4l_bitstream_id_enable) + cur_pic_config->pts64 = pbi->chunk->timestamp; + } +#endif + } + /*pr_info("Decode Frame Data %d\n", pbi->frame_count);*/ + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_REGISTER_START); + config_pic_size(pbi, pbi->vp9_param.p.bit_depth); + + if ((pbi->common.frame_type != KEY_FRAME) + && (!pbi->common.intra_only)) { + config_mc_buffer(pbi, pbi->vp9_param.p.bit_depth); +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num == 0) +#endif + config_mpred_hw(pbi); + } else { +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num == 0) +#endif + clear_mpred_hw(pbi); + } + +#ifdef MCRCC_ENABLE + if (mcrcc_cache_alg_flag) + config_mcrcc_axi_hw_new(pbi); + else + config_mcrcc_axi_hw(pbi); +#endif + config_sao_hw(pbi, &pbi->vp9_param); + +#ifdef VP9_LPF_LVL_UPDATE + /* + * Get loop filter related picture level parameters from Parser + */ + pbi->lf->mode_ref_delta_enabled = pbi->vp9_param.p.mode_ref_delta_enabled; + pbi->lf->sharpness_level = pbi->vp9_param.p.sharpness_level; + for (i = 0; i < 4; i++) + pbi->lf->ref_deltas[i] = pbi->vp9_param.p.ref_deltas[i]; + for (i = 0; i < 2; i++) + pbi->lf->mode_deltas[i] = pbi->vp9_param.p.mode_deltas[i]; + pbi->default_filt_lvl = pbi->vp9_param.p.filter_level; + pbi->seg_4lf->enabled = pbi->vp9_param.p.seg_enabled; + pbi->seg_4lf->abs_delta = pbi->vp9_param.p.seg_abs_delta; + for (i = 0; i < MAX_SEGMENTS; i++) + pbi->seg_4lf->feature_mask[i] = (pbi->vp9_param.p.seg_lf_info[i] & + 0x8000) ? (1 << SEG_LVL_ALT_LF) : 0; + for (i = 0; i < MAX_SEGMENTS; i++) + pbi->seg_4lf->feature_data[i][SEG_LVL_ALT_LF] + = (pbi->vp9_param.p.seg_lf_info[i] + & 0x100) ? -(pbi->vp9_param.p.seg_lf_info[i] + & 0x3f) : (pbi->vp9_param.p.seg_lf_info[i] & 0x3f); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + /*Set pipeline mode*/ + uint32_t lpf_data32 = READ_VREG(HEVC_DBLK_CFGB); + /*dblk pipeline mode=1 for performance*/ + if (pbi->vp9_param.p.width >= 1280) + lpf_data32 |= (0x1 << 4); + else + lpf_data32 &= ~(0x3 << 4); + WRITE_VREG(HEVC_DBLK_CFGB, lpf_data32); + } + /* + * Update loop filter Thr/Lvl table for every frame + */ + /*pr_info + ("vp9_loop_filter (run before every frame decoding start)\n");*/ + vp9_loop_filter_frame_init(pbi->seg_4lf, + pbi->lfi, pbi->lf, pbi->default_filt_lvl); +#endif + /*pr_info("HEVC_DEC_STATUS_REG <= VP9_10B_DECODE_SLICE\n");*/ + WRITE_VREG(HEVC_DEC_STATUS_REG, VP9_10B_DECODE_SLICE); + } else { + pr_info("Skip search next start code\n"); + cm->prev_fb_idx = INVALID_IDX; + /*skip, search next start code*/ + WRITE_VREG(HEVC_DEC_STATUS_REG, VP9_10B_DECODE_SLICE); + } + pbi->process_state = PROC_STATE_DECODESLICE; + if (pbi->mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) { + if (pbi->last_put_idx < pbi->used_buf_num) { + struct RefCntBuffer_s *frame_bufs = + cm->buffer_pool->frame_bufs; + int i = pbi->last_put_idx; + /*free not used buffers.*/ + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.index != -1)) { + if (pbi->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(pbi, i); + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + decoder_mmu_box_free_idx(ibuf->mmu_box, ibuf->index); + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + } else { + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + decoder_mmu_box_free_idx(pbi->mmu_box, i); + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable) + decoder_mmu_box_free_idx(pbi->mmu_box_dw, i); +#endif + + } + } + pbi->last_put_idx = -1; + } + } + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_REGISTER_END); + return ret; +} + +static void fill_frame_info(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *frame, + unsigned int framesize, + unsigned int pts) +{ + struct vframe_qos_s *vframe_qos = &pbi->vframe_qos; + + if (frame->slice_type == KEY_FRAME) + vframe_qos->type = 1; + else if (frame->slice_type == INTER_FRAME) + vframe_qos->type = 2; +/* +#define SHOW_QOS_INFO +*/ + if (input_frame_based(hw_to_vdec(pbi))) + vframe_qos->size = frame->frame_size2; + else + vframe_qos->size = framesize; + vframe_qos->pts = pts; +#ifdef SHOW_QOS_INFO + vp9_print(pbi, 0, "slice:%d\n", frame->slice_type); +#endif + vframe_qos->max_mv = frame->max_mv; + vframe_qos->avg_mv = frame->avg_mv; + vframe_qos->min_mv = frame->min_mv; +#ifdef SHOW_QOS_INFO + vp9_print(pbi, 0, "mv: max:%d, avg:%d, min:%d\n", + vframe_qos->max_mv, + vframe_qos->avg_mv, + vframe_qos->min_mv); +#endif + vframe_qos->max_qp = frame->max_qp; + vframe_qos->avg_qp = frame->avg_qp; + vframe_qos->min_qp = frame->min_qp; +#ifdef SHOW_QOS_INFO + vp9_print(pbi, 0, "qp: max:%d, avg:%d, min:%d\n", + vframe_qos->max_qp, + vframe_qos->avg_qp, + vframe_qos->min_qp); +#endif + vframe_qos->max_skip = frame->max_skip; + vframe_qos->avg_skip = frame->avg_skip; + vframe_qos->min_skip = frame->min_skip; +#ifdef SHOW_QOS_INFO + vp9_print(pbi, 0, "skip: max:%d, avg:%d, min:%d\n", + vframe_qos->max_skip, + vframe_qos->avg_skip, + vframe_qos->min_skip); +#endif + vframe_qos->num++; +} + +/* only when we decoded one field or one frame, +we can call this function to get qos info*/ +static void get_picture_qos_info(struct VP9Decoder_s *pbi) +{ + struct PIC_BUFFER_CONFIG_s *frame = &pbi->cur_buf->buf; + struct vdec_s *vdec = hw_to_vdec(pbi); + + if (!frame) + return; + if (vdec->mvfrm) { + frame->frame_size2 = vdec->mvfrm->frame_size; + frame->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; + } + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + unsigned char a[3]; + unsigned char i, j, t; + unsigned long data; + + data = READ_VREG(HEVC_MV_INFO); + if (frame->slice_type == KEY_FRAME) + data = 0; + a[0] = data & 0xff; + a[1] = (data >> 8) & 0xff; + a[2] = (data >> 16) & 0xff; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + frame->max_mv = a[2]; + frame->avg_mv = a[1]; + frame->min_mv = a[0]; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "mv data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + + data = READ_VREG(HEVC_QP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + frame->max_qp = a[2]; + frame->avg_qp = a[1]; + frame->min_qp = a[0]; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "qp data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + + data = READ_VREG(HEVC_SKIP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + frame->max_skip = a[2]; + frame->avg_skip = a[1]; + frame->min_skip = a[0]; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "skip data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + } else { + uint32_t blk88_y_count; + uint32_t blk88_c_count; + uint32_t blk22_mv_count; + uint32_t rdata32; + int32_t mv_hi; + int32_t mv_lo; + uint32_t rdata32_l; + uint32_t mvx_L0_hi; + uint32_t mvy_L0_hi; + uint32_t mvx_L1_hi; + uint32_t mvy_L1_hi; + int64_t value; + uint64_t temp_value; + int pic_number = frame->decode_idx; + + frame->max_mv = 0; + frame->avg_mv = 0; + frame->min_mv = 0; + + frame->max_skip = 0; + frame->avg_skip = 0; + frame->min_skip = 0; + + frame->max_qp = 0; + frame->avg_qp = 0; + frame->min_qp = 0; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, "slice_type:%d, poc:%d\n", + frame->slice_type, + pic_number); + + /* set rd_idx to 0 */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, 0); + + blk88_y_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_y_count == 0) { + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] NO Data yet.\n", + pic_number); + + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_y_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] Y QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_y_count, + rdata32, blk88_y_count); + + frame->avg_qp = rdata32/blk88_y_count; + /* intra_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] Y intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); + + /* skipped_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] Y skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); + + frame->avg_skip = rdata32*100/blk88_y_count; + /* coeff_non_zero_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] Y ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_y_count*1)), + '%', rdata32); + + /* blk66_c_count */ + blk88_c_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_c_count == 0) { + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] NO Data yet.\n", + pic_number); + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_c_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] C QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_c_count, + rdata32, blk88_c_count); + + /* intra_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] C intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); + + /* skipped_cu_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] C skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); + + /* coeff_non_zero_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] C ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_c_count*1)), + '%', rdata32); + + /* 1'h0, qp_c_max[6:0], 1'h0, qp_c_min[6:0], + 1'h0, qp_y_max[6:0], 1'h0, qp_y_min[6:0] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] Y QP min : %d\n", + pic_number, (rdata32>>0)&0xff); + + frame->min_qp = (rdata32>>0)&0xff; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] Y QP max : %d\n", + pic_number, (rdata32>>8)&0xff); + + frame->max_qp = (rdata32>>8)&0xff; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] C QP min : %d\n", + pic_number, (rdata32>>16)&0xff); + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] C QP max : %d\n", + pic_number, (rdata32>>24)&0xff); + + /* blk22_mv_count */ + blk22_mv_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk22_mv_count == 0) { + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] NO MV Data yet.\n", + pic_number); + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* mvy_L1_count[39:32], mvx_L1_count[39:32], + mvy_L0_count[39:32], mvx_L0_count[39:32] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + /* should all be 0x00 or 0xff */ + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MV AVG High Bits: 0x%X\n", + pic_number, rdata32); + + mvx_L0_hi = ((rdata32>>0)&0xff); + mvy_L0_hi = ((rdata32>>8)&0xff); + mvx_L1_hi = ((rdata32>>16)&0xff); + mvy_L1_hi = ((rdata32>>24)&0xff); + + /* mvx_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvx_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + value = div_s64(value, blk22_mv_count); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L0 AVG : %d (%lld/%d)\n", + pic_number, (int)value, + value, blk22_mv_count); + + frame->avg_mv = value; + + /* mvy_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvy_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L0 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); + + /* mvx_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvx_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); + + /* mvy_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvy_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); + + /* {mvx_L0_max, mvx_L0_min} // format : {sign, abs[14:0]} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L0 MAX : %d\n", + pic_number, mv_hi); + + frame->max_mv = mv_hi; + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L0 MIN : %d\n", + pic_number, mv_lo); + + frame->min_mv = mv_lo; + + /* {mvy_L0_max, mvy_L0_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L0 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L0 MIN : %d\n", + pic_number, mv_lo); + + /* {mvx_L1_max, mvx_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L1 MIN : %d\n", + pic_number, mv_lo); + + /* {mvy_L1_max, mvy_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L1 MIN : %d\n", + pic_number, mv_lo); + + rdata32 = READ_VREG(HEVC_PIC_QUALITY_CTRL); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] After Read : VDEC_PIC_QUALITY_CTRL : 0x%x\n", + pic_number, rdata32); + + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + } +} + +static void vvp9_get_comp_buf_info(struct VP9Decoder_s *pbi, + struct vdec_comp_buf_info *info) +{ + u16 bit_depth = pbi->param.p.bit_depth; + + info->max_size = vp9_max_mmu_buf_size( + pbi->max_pic_w, + pbi->max_pic_h); + info->header_size = vvp9_mmu_compress_header_size( + pbi->frame_width, + pbi->frame_height); + info->frame_buffer_size = vp9_mmu_page_num( + pbi, pbi->frame_width, + pbi->frame_height, + bit_depth == 0); +} + +static int vvp9_get_ps_info(struct VP9Decoder_s *pbi, struct aml_vdec_ps_infos *ps) +{ + ps->visible_width = pbi->frame_width; + ps->visible_height = pbi->frame_height; + ps->coded_width = ALIGN(pbi->frame_width, 64); + ps->coded_height = ALIGN(pbi->frame_height, 64); + ps->dpb_size = pbi->used_buf_num; + + return 0; +} + + +static int v4l_res_change(struct VP9Decoder_s *pbi) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + int ret = 0; + + if (ctx->param_sets_from_ucode && + pbi->res_ch_flag == 0) { + struct aml_vdec_ps_infos ps; + struct vdec_comp_buf_info comp; + + if ((pbi->last_width != 0 && + pbi->last_height != 0) && + (pbi->frame_width != pbi->last_width || + pbi->frame_height != pbi->last_height)) { + + vp9_print(pbi, 0, "%s (%d,%d)=>(%d,%d)\r\n", __func__, pbi->last_width, + pbi->last_height, pbi->frame_width, pbi->frame_height); + + if (get_valid_double_write_mode(pbi) != 16) { + vvp9_get_comp_buf_info(pbi, &comp); + vdec_v4l_set_comp_buf_info(ctx, &comp); + } + + vvp9_get_ps_info(pbi, &ps); + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + + pbi->init_pic_w = pbi->frame_width; + pbi->init_pic_h = pbi->frame_height; + init_mv_buf_list(pbi); + + pbi->v4l_params_parsed = false; + pbi->res_ch_flag = 1; + ctx->v4l_resolution_change = 1; + pbi->eos = 1; + vp9_bufmgr_postproc(pbi); + //del_timer_sync(&pbi->timer); + notify_v4l_eos(hw_to_vdec(pbi)); + ret = 1; + } + } + + return ret; +} + + +static irqreturn_t vvp9_isr_thread_fn(int irq, void *data) +{ + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)data; + unsigned int dec_status = pbi->dec_status; + int i; + + /*if (pbi->wait_buf) + * pr_info("set wait_buf to 0\r\n"); + */ + + if (dec_status == VP9_HEAD_PARSER_DONE) { + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_START); + } + else if (dec_status == HEVC_DECPIC_DATA_DONE) { + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_THREAD_PIC_DONE_START); + } + + if (pbi->eos) + return IRQ_HANDLED; + pbi->wait_buf = 0; +#ifdef MULTI_INSTANCE_SUPPORT +#ifdef SUPPORT_FB_DECODING +#ifdef FB_DECODING_TEST_SCHEDULE + if (pbi->s1_test_cmd == TEST_SET_PIC_DONE) + dec_status = HEVC_DECPIC_DATA_DONE; + else if (pbi->s1_test_cmd == TEST_SET_S2_DONE + && dec_status == HEVC_DECPIC_DATA_DONE) + dec_status = HEVC_S2_DECODING_DONE; + pbi->s1_test_cmd = TEST_SET_NONE; +#else + /*if (irq != VDEC_IRQ_0) + dec_status = HEVC_S2_DECODING_DONE;*/ +#endif + if (dec_status == HEVC_S2_DECODING_DONE) { + pbi->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&pbi->work); +#ifdef FB_DECODING_TEST_SCHEDULE + amhevc_stop(); + pbi->dec_s1_result = DEC_S1_RESULT_DONE; + vdec_schedule_work(&pbi->s1_work); +#endif + } else +#endif + if ((dec_status == HEVC_NAL_DECODE_DONE) || + (dec_status == HEVC_SEARCH_BUFEMPTY) || + (dec_status == HEVC_DECODE_BUFEMPTY) + ) { + if (pbi->m_ins_flag) { + reset_process_time(pbi); + if (!vdec_frame_based(hw_to_vdec(pbi))) + dec_again_process(pbi); + else { + if (pbi->common.show_existing_frame) { + pbi->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&pbi->work); + } + else { + pbi->dec_result = DEC_RESULT_GET_DATA; + vdec_schedule_work(&pbi->work); + } + } + } + pbi->process_busy = 0; + return IRQ_HANDLED; + } else if (dec_status == HEVC_DECPIC_DATA_DONE) { + if (pbi->m_ins_flag) { + get_picture_qos_info(pbi); +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) { + reset_process_time(pbi); + inc_s1_pos(pbi); + trigger_schedule(pbi); +#ifdef FB_DECODING_TEST_SCHEDULE + pbi->s1_test_cmd = TEST_SET_S2_DONE; +#else + amhevc_stop(); + pbi->dec_s1_result = DEC_S1_RESULT_DONE; + vdec_schedule_work(&pbi->s1_work); +#endif + } else +#endif + { + reset_process_time(pbi); + if (pbi->vf_pre_count == 0 || pbi->low_latency_flag) + vp9_bufmgr_postproc(pbi); + + pbi->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + if (mcrcc_cache_alg_flag) + dump_hit_rate(pbi); + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + vdec_schedule_work(&pbi->work); + } + } else { + if (pbi->low_latency_flag) { + vp9_bufmgr_postproc(pbi); + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + vdec_profile(hw_to_vdec(pbi), VDEC_PROFILE_EVENT_CB); + if (debug & PRINT_FLAG_VDEC_DETAIL) + pr_info("%s VP9 frame done \n", __func__); +#endif + } + } + + pbi->process_busy = 0; + return IRQ_HANDLED; + } +#endif + + if (dec_status == VP9_EOS) { +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) + reset_process_time(pbi); +#endif + + pr_info("VP9_EOS, flush buffer\r\n"); + + vp9_bufmgr_postproc(pbi); + + pr_info("send VP9_10B_DISCARD_NAL\r\n"); + WRITE_VREG(HEVC_DEC_STATUS_REG, VP9_10B_DISCARD_NAL); + pbi->process_busy = 0; +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) { + pbi->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&pbi->work); + } +#endif + return IRQ_HANDLED; + } else if (dec_status == HEVC_DECODE_OVER_SIZE) { + pr_info("vp9 decode oversize !!\n"); + debug |= (VP9_DEBUG_DIS_LOC_ERROR_PROC | + VP9_DEBUG_DIS_SYS_ERROR_PROC); + pbi->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) + reset_process_time(pbi); +#endif + return IRQ_HANDLED; + } + + if (dec_status != VP9_HEAD_PARSER_DONE) { + pbi->process_busy = 0; + return IRQ_HANDLED; + } + + if (pbi->m_ins_flag && + !get_free_buf_count(pbi)) { + pbi->run_ready_min_buf_num = pbi->one_package_frame_cnt + 1; + pr_err("need buffer, one package frame count = %d\n", pbi->one_package_frame_cnt + 1); + pbi->dec_result = DEC_RESULT_NEED_MORE_BUFFER; + vdec_schedule_work(&pbi->work); + return IRQ_HANDLED; + } + + pbi->one_package_frame_cnt++; + +#ifdef MULTI_INSTANCE_SUPPORT +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + if (pbi->m_ins_flag ==0 && pbi->low_latency_flag) { + vdec_profile(hw_to_vdec(pbi), VDEC_PROFILE_EVENT_RUN); + if (debug & PRINT_FLAG_VDEC_DETAIL) + pr_info("%s VP9 frame header found \n", __func__); + } +#endif + if (pbi->m_ins_flag) + reset_process_time(pbi); +#endif + if (pbi->process_state != PROC_STATE_SENDAGAIN +#ifdef SUPPORT_FB_DECODING + && pbi->used_stage_buf_num == 0 +#endif + ) { + if (pbi->mmu_enable) { + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + vp9_recycle_mmu_buf_tail(pbi); + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + } + + if (pbi->frame_count > 0) + vp9_bufmgr_postproc(pbi); + } + + if (debug & VP9_DEBUG_SEND_PARAM_WITH_REG) { + get_rpm_param(&pbi->vp9_param); + } else { +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) { + reset_process_time(pbi); + get_s1_buf(pbi); + + if (get_mv_buf(pbi, + &pbi->s1_mv_buf_index, + &pbi->s1_mpred_mv_wr_start_addr + ) < 0) { + vp9_print(pbi, 0, + "%s: Error get_mv_buf fail\n", + __func__); + } + + if (pbi->s1_buf == NULL) { + vp9_print(pbi, 0, + "%s: Error get_s1_buf fail\n", + __func__); + pbi->process_busy = 0; + return IRQ_HANDLED; + } + + for (i = 0; i < (RPM_END - RPM_BEGIN); i += 4) { + int ii; + for (ii = 0; ii < 4; ii++) { + pbi->s1_buf->rpm[i + 3 - ii] = + pbi->rpm_ptr[i + 3 - ii]; + pbi->s1_param.l.data[i + ii] = + pbi->rpm_ptr[i + 3 - ii]; + } + } + + mpred_process(pbi); +#ifdef FB_DECODING_TEST_SCHEDULE + pbi->dec_s1_result = + DEC_S1_RESULT_TEST_TRIGGER_DONE; + vdec_schedule_work(&pbi->s1_work); +#else + WRITE_VREG(HEVC_ASSIST_FB_MMU_MAP_ADDR, + pbi->stage_mmu_map_phy_addr + + pbi->s1_buf->index * STAGE_MMU_MAP_SIZE); + + start_s1_decoding(pbi); +#endif + start_process_time(pbi); + pbi->process_busy = 0; + return IRQ_HANDLED; + } else +#endif + { + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_RPM_START); + for (i = 0; i < (RPM_END - RPM_BEGIN); i += 4) { + int ii; + for (ii = 0; ii < 4; ii++) + pbi->vp9_param.l.data[i + ii] = + pbi->rpm_ptr[i + 3 - ii]; + } + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_RPM_END); + } + } + + if (pbi->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + + pbi->frame_width = pbi->vp9_param.p.width; + pbi->frame_height = pbi->vp9_param.p.height; + + if (!pbi->has_keyframe && + ((pbi->frame_width == 0) || + (pbi->frame_height == 0))) { + continue_decoding(pbi); + pbi->postproc_done = 0; + pbi->process_busy = 0; + return IRQ_HANDLED; + } + + if (!v4l_res_change(pbi)) { + if (ctx->param_sets_from_ucode && !pbi->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + struct vdec_comp_buf_info comp; + + pr_debug("set ucode parse\n"); + if (get_valid_double_write_mode(pbi) != 16) { + vvp9_get_comp_buf_info(pbi, &comp); + vdec_v4l_set_comp_buf_info(ctx, &comp); + } + + vvp9_get_ps_info(pbi, &ps); + /*notice the v4l2 codec.*/ + vdec_v4l_set_ps_infos(ctx, &ps); + + pbi->init_pic_w = pbi->frame_width; + pbi->init_pic_h = pbi->frame_height; + init_mv_buf_list(pbi); + + pbi->v4l_params_parsed = true; + pbi->postproc_done = 0; + pbi->process_busy = 0; + dec_again_process(pbi); + return IRQ_HANDLED; + } + } else { + pbi->postproc_done = 0; + pbi->process_busy = 0; + dec_again_process(pbi); + return IRQ_HANDLED; + } + } + + continue_decoding(pbi); + pbi->postproc_done = 0; + pbi->process_busy = 0; + +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) + start_process_time(pbi); +#endif + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + return IRQ_HANDLED; +} + +static irqreturn_t vvp9_isr(int irq, void *data) +{ + int i; + unsigned int dec_status; + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)data; + unsigned int adapt_prob_status; + uint debug_tag; + + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + dec_status = READ_VREG(HEVC_DEC_STATUS_REG); + if (dec_status == VP9_HEAD_PARSER_DONE) { + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_HEAD_DONE); + } + else if (dec_status == HEVC_DECPIC_DATA_DONE) { + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_PIC_DONE); + } + + adapt_prob_status = READ_VREG(VP9_ADAPT_PROB_REG); + if (!pbi) + return IRQ_HANDLED; + if (pbi->init_flag == 0) + return IRQ_HANDLED; + if (pbi->process_busy)/*on process.*/ + return IRQ_HANDLED; + pbi->dec_status = dec_status; + pbi->process_busy = 1; + if (debug & VP9_DEBUG_BUFMGR) + pr_info("vp9 isr (%d) dec status = 0x%x, lcu 0x%x shiftbyte 0x%x (%x %x lev %x, wr %x, rd %x)\n", + irq, + dec_status, READ_VREG(HEVC_PARSER_LCU_START), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR) + ); +#ifdef SUPPORT_FB_DECODING + /*if (irq != VDEC_IRQ_0) + return IRQ_WAKE_THREAD;*/ +#endif + + debug_tag = READ_HREG(DEBUG_REG1); + if (debug_tag & 0x10000) { + pr_info("LMEM<tag %x>:\n", READ_HREG(DEBUG_REG1)); + for (i = 0; i < 0x400; i += 4) { + int ii; + if ((i & 0xf) == 0) + pr_info("%03x: ", i); + for (ii = 0; ii < 4; ii++) { + pr_info("%04x ", + pbi->lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + pr_info("\n"); + } + + if ((udebug_pause_pos == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == pbi->slice_idx) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) + pbi->ucode_pause_pos = udebug_pause_pos; + else if (debug_tag & 0x20000) + pbi->ucode_pause_pos = 0xffffffff; + if (pbi->ucode_pause_pos) + reset_process_time(pbi); + else + WRITE_HREG(DEBUG_REG1, 0); + } else if (debug_tag != 0) { + pr_info( + "dbg%x: %x lcu %x\n", READ_HREG(DEBUG_REG1), + READ_HREG(DEBUG_REG2), + READ_VREG(HEVC_PARSER_LCU_START)); + if ((udebug_pause_pos == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == pbi->slice_idx) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) + pbi->ucode_pause_pos = udebug_pause_pos; + if (pbi->ucode_pause_pos) + reset_process_time(pbi); + else + WRITE_HREG(DEBUG_REG1, 0); + pbi->process_busy = 0; + return IRQ_HANDLED; + } + +#ifdef MULTI_INSTANCE_SUPPORT + if (!pbi->m_ins_flag) { +#endif + if (pbi->error_flag == 1) { + pbi->error_flag = 2; + pbi->process_busy = 0; + return IRQ_HANDLED; + } else if (pbi->error_flag == 3) { + pbi->process_busy = 0; + return IRQ_HANDLED; + } + + if (get_free_buf_count(pbi) <= 0) { + /* + if (pbi->wait_buf == 0) + pr_info("set wait_buf to 1\r\n"); + */ + pbi->wait_buf = 1; + pbi->process_busy = 0; + return IRQ_HANDLED; + } +#ifdef MULTI_INSTANCE_SUPPORT + } +#endif + if ((adapt_prob_status & 0xff) == 0xfd) { + struct VP9_Common_s *const cm = &pbi->common; + int pre_fc = 0; + + if (pbi->m_ins_flag) + reset_process_time(pbi); + + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) && + (vdec_secure(hw_to_vdec(pbi)))) { + pre_fc = ((cm->frame_type == KEY_FRAME) || (cm->intra_only)) ? 0 : 1; + tee_vp9_prob_process(pre_fc, cm->last_frame_type, + adapt_prob_status, (unsigned int)pbi->prob_buffer_phy_addr); + } else { + uint8_t *prev_prob_b, *cur_prob_b, *count_b; + + /*VP9_REQ_ADAPT_PROB*/ + pre_fc = ((cm->frame_type == KEY_FRAME) || (cm->intra_only)); + prev_prob_b = ((uint8_t *)pbi->prob_buffer_addr) + + ((adapt_prob_status >> 8) * 0x1000); + cur_prob_b = ((uint8_t *)pbi->prob_buffer_addr) + 0x4000; + count_b = (uint8_t *)pbi->count_buffer_addr; + + adapt_coef_probs(pbi->pic_count, + (cm->last_frame_type == KEY_FRAME), + pre_fc, (adapt_prob_status >> 8), + (unsigned int *)prev_prob_b, + (unsigned int *)cur_prob_b, (unsigned int *)count_b); + + memcpy(prev_prob_b, cur_prob_b, PROB_SIZE); + } + + WRITE_VREG(VP9_ADAPT_PROB_REG, 0); + pbi->pic_count += 1; +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) + start_process_time(pbi); +#endif + } + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_END); + return IRQ_WAKE_THREAD; +} + +static void vp9_set_clk(struct work_struct *work) +{ + struct VP9Decoder_s *pbi = container_of(work, + struct VP9Decoder_s, set_clk_work); + int fps = 96000 / pbi->frame_dur; + + if (hevc_source_changed(VFORMAT_VP9, + frame_width, frame_height, fps) > 0) + pbi->saved_resolution = frame_width * + frame_height * fps; +} + +static void vvp9_put_timer_func(struct timer_list *timer) +{ + struct VP9Decoder_s *pbi = container_of(timer, + struct VP9Decoder_s, timer); + enum receviver_start_e state = RECEIVER_INACTIVE; + uint8_t empty_flag; + unsigned int buf_level; + + if (pbi->m_ins_flag) { + if (hw_to_vdec(pbi)->next_status + == VDEC_STATUS_DISCONNECTED && + !pbi->is_used_v4l) { +#ifdef SUPPORT_FB_DECODING + if (pbi->run2_busy) + return; + + pbi->dec_s1_result = DEC_S1_RESULT_FORCE_EXIT; + vdec_schedule_work(&pbi->s1_work); +#endif + pbi->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&pbi->work); + pr_debug( + "vdec requested to be disconnected\n"); + return; + } + } + if (pbi->init_flag == 0) { + if (pbi->stat & STAT_TIMER_ARM) { + timer->expires = jiffies + PUT_INTERVAL; + add_timer(&pbi->timer); + } + return; + } + if (pbi->m_ins_flag == 0) { + if (vf_get_receiver(pbi->provider_name)) { + state = + vf_notify_receiver(pbi->provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) + state = RECEIVER_INACTIVE; + } else + state = RECEIVER_INACTIVE; + + empty_flag = (READ_VREG(HEVC_PARSER_INT_STATUS) >> 6) & 0x1; + /* error watchdog */ + if (empty_flag == 0) { + /* decoder has input */ + if ((debug & VP9_DEBUG_DIS_LOC_ERROR_PROC) == 0) { + + buf_level = READ_VREG(HEVC_STREAM_LEVEL); + /* receiver has no buffer to recycle */ + if ((state == RECEIVER_INACTIVE) && + (kfifo_is_empty(&pbi->display_q) && + buf_level > 0x200) + ) { + WRITE_VREG + (HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } + + if ((debug & VP9_DEBUG_DIS_SYS_ERROR_PROC) == 0) { + /* receiver has no buffer to recycle */ + /*if ((state == RECEIVER_INACTIVE) && + * (kfifo_is_empty(&pbi->display_q))) { + *pr_info("vp9 something error,need reset\n"); + *} + */ + } + } + } +#ifdef MULTI_INSTANCE_SUPPORT + else { + if ( + (decode_timeout_val > 0) && + (pbi->start_process_time > 0) && + ((1000 * (jiffies - pbi->start_process_time) / HZ) + > decode_timeout_val) + ) { + int current_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + if (pbi->last_lcu_idx == current_lcu_idx) { + if (pbi->decode_timeout_count > 0) + pbi->decode_timeout_count--; + if (pbi->decode_timeout_count == 0) { + if (input_frame_based( + hw_to_vdec(pbi)) || + (READ_VREG(HEVC_STREAM_LEVEL) > 0x200)) + timeout_process(pbi); + else { + vp9_print(pbi, 0, + "timeout & empty, again\n"); + dec_again_process(pbi); + } + } + } else { + start_process_time(pbi); + pbi->last_lcu_idx = current_lcu_idx; + } + } + } +#endif + + if ((pbi->ucode_pause_pos != 0) && + (pbi->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != pbi->ucode_pause_pos) { + pbi->ucode_pause_pos = 0; + WRITE_HREG(DEBUG_REG1, 0); + } +#ifdef MULTI_INSTANCE_SUPPORT + if (debug & VP9_DEBUG_FORCE_SEND_AGAIN) { + pr_info( + "Force Send Again\r\n"); + debug &= ~VP9_DEBUG_FORCE_SEND_AGAIN; + reset_process_time(pbi); + pbi->dec_result = DEC_RESULT_AGAIN; + if (pbi->process_state == + PROC_STATE_DECODESLICE) { + if (pbi->mmu_enable) + vp9_recycle_mmu_buf(pbi); + pbi->process_state = + PROC_STATE_SENDAGAIN; + } + amhevc_stop(); + + vdec_schedule_work(&pbi->work); + } + + if (debug & VP9_DEBUG_DUMP_DATA) { + debug &= ~VP9_DEBUG_DUMP_DATA; + vp9_print(pbi, 0, + "%s: chunk size 0x%x off 0x%x sum 0x%x\n", + __func__, + pbi->chunk->size, + pbi->chunk->offset, + get_data_check_sum(pbi, pbi->chunk->size) + ); + dump_data(pbi, pbi->chunk->size); + } +#endif + if (debug & VP9_DEBUG_DUMP_PIC_LIST) { + dump_pic_list(pbi); + debug &= ~VP9_DEBUG_DUMP_PIC_LIST; + } + if (debug & VP9_DEBUG_TRIG_SLICE_SEGMENT_PROC) { + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + debug &= ~VP9_DEBUG_TRIG_SLICE_SEGMENT_PROC; + } + /*if (debug & VP9_DEBUG_HW_RESET) { + }*/ + + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + if (pop_shorts != 0) { + int i; + u32 sum = 0; + + pr_info("pop stream 0x%x shorts\r\n", pop_shorts); + for (i = 0; i < pop_shorts; i++) { + u32 data = + (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + WRITE_HREG(HEVC_SHIFT_COMMAND, + (1<<7)|16); + if ((i & 0xf) == 0) + pr_info("%04x:", i); + pr_info("%04x ", data); + if (((i + 1) & 0xf) == 0) + pr_info("\r\n"); + sum += data; + } + pr_info("\r\nsum = %x\r\n", sum); + pop_shorts = 0; + } + if (dbg_cmd != 0) { + if (dbg_cmd == 1) { + u32 disp_laddr; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB && + get_double_write_mode(pbi) == 0) { + disp_laddr = + READ_VCBUS_REG(AFBC_BODY_BADDR) << 4; + } else { + struct canvas_s cur_canvas; + + canvas_read((READ_VCBUS_REG(VD1_IF0_CANVAS0) + & 0xff), &cur_canvas); + disp_laddr = cur_canvas.addr; + } + pr_info("current displayed buffer address %x\r\n", + disp_laddr); + } + dbg_cmd = 0; + } + /*don't changed at start.*/ + if (pbi->get_frame_dur && pbi->show_frame_num > 60 && + pbi->frame_dur > 0 && pbi->saved_resolution != + frame_width * frame_height * + (96000 / pbi->frame_dur)) + vdec_schedule_work(&pbi->set_clk_work); + + timer->expires = jiffies + PUT_INTERVAL; + add_timer(timer); +} + + +int vvp9_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct VP9Decoder_s *vp9 = + (struct VP9Decoder_s *)vdec->private; + + if (!vp9) + return -1; + + vstatus->frame_width = frame_width; + vstatus->frame_height = frame_height; + if (vp9->error_frame_width && + vp9->error_frame_height) { + vstatus->frame_width = vp9->error_frame_width; + vstatus->frame_height = vp9->error_frame_height; + } + + if (vp9->frame_dur != 0) + vstatus->frame_rate = ((96000 * 10 / vp9->frame_dur) % 10) < 5 ? + 96000 / vp9->frame_dur : (96000 / vp9->frame_dur +1); + else + vstatus->frame_rate = -1; + vstatus->error_count = 0; + vstatus->status = vp9->stat | vp9->fatal_error; + vstatus->frame_dur = vp9->frame_dur; + vstatus->bit_rate = vp9->gvs->bit_rate; + vstatus->frame_data = vp9->gvs->frame_data; + vstatus->total_data = vp9->gvs->total_data; + vstatus->frame_count = vp9->gvs->frame_count; + vstatus->error_frame_count = vp9->gvs->error_frame_count; + vstatus->drop_frame_count = vp9->gvs->drop_frame_count; + vstatus->i_decoded_frames = vp9->gvs->i_decoded_frames; + vstatus->i_lost_frames = vp9->gvs->i_lost_frames; + vstatus->i_concealed_frames = vp9->gvs->i_concealed_frames; + vstatus->p_decoded_frames = vp9->gvs->p_decoded_frames; + vstatus->p_lost_frames = vp9->gvs->p_lost_frames; + vstatus->p_concealed_frames = vp9->gvs->p_concealed_frames; + vstatus->b_decoded_frames = vp9->gvs->b_decoded_frames; + vstatus->b_lost_frames = vp9->gvs->b_lost_frames; + vstatus->b_concealed_frames = vp9->gvs->b_concealed_frames; + vstatus->total_data = vp9->gvs->total_data; + vstatus->samp_cnt = vp9->gvs->samp_cnt; + vstatus->offset = vp9->gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + return 0; +} + +int vvp9_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +#if 0 +static void VP9_DECODE_INIT(void) +{ + /* enable vp9 clocks */ + WRITE_VREG(DOS_GCLK_EN3, 0xffffffff); + /* *************************************************************** */ + /* Power ON HEVC */ + /* *************************************************************** */ + /* Powerup HEVC */ + WRITE_VREG(AO_RTI_GEN_PWR_SLEEP0, + READ_VREG(AO_RTI_GEN_PWR_SLEEP0) & (~(0x3 << 6))); + WRITE_VREG(DOS_MEM_PD_HEVC, 0x0); + WRITE_VREG(DOS_SW_RESET3, READ_VREG(DOS_SW_RESET3) | (0x3ffff << 2)); + WRITE_VREG(DOS_SW_RESET3, READ_VREG(DOS_SW_RESET3) & (~(0x3ffff << 2))); + /* remove isolations */ + WRITE_VREG(AO_RTI_GEN_PWR_ISO0, + READ_VREG(AO_RTI_GEN_PWR_ISO0) & (~(0x3 << 10))); + +} +#endif + +static void vvp9_prot_init(struct VP9Decoder_s *pbi, u32 mask) +{ + unsigned int data32; + /* VP9_DECODE_INIT(); */ + vp9_config_work_space_hw(pbi, mask); + if (mask & HW_MASK_BACK) + init_pic_list_hw(pbi); + + vp9_init_decoder_hw(pbi, mask); + +#ifdef VP9_LPF_LVL_UPDATE + if (mask & HW_MASK_BACK) + vp9_loop_filter_init(pbi); +#endif + + if ((mask & HW_MASK_FRONT) == 0) + return; +#if 1 + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("%s\n", __func__); + data32 = READ_VREG(HEVC_STREAM_CONTROL); + data32 = data32 | + (1 << 0)/*stream_fetch_enable*/ + ; + WRITE_VREG(HEVC_STREAM_CONTROL, data32); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + if (debug & VP9_DEBUG_BUFMGR) + pr_info("[test.c] Config STREAM_FIFO_CTL\n"); + data32 = READ_VREG(HEVC_STREAM_FIFO_CTL); + data32 = data32 | + (1 << 29) // stream_fifo_hole + ; + WRITE_VREG(HEVC_STREAM_FIFO_CTL, data32); + } +#if 0 + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x00000100) { + pr_info("vp9 prot init error %d\n", __LINE__); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x00000300) { + pr_info("vp9 prot init error %d\n", __LINE__); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x12345678); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x9abcdef0); + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x12345678) { + pr_info("vp9 prot init error %d\n", __LINE__); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x9abcdef0) { + pr_info("vp9 prot init error %d\n", __LINE__); + return; + } +#endif + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x000000001); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x00000300); +#endif + + + + WRITE_VREG(HEVC_WAIT_FLAG, 1); + + /* WRITE_VREG(HEVC_MPSR, 1); */ + + /* clear mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 1); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(HEVC_PSCALE_CTRL, 0); + + WRITE_VREG(DEBUG_REG1, 0x0); + /*check vps/sps/pps/i-slice in ucode*/ + WRITE_VREG(NAL_SEARCH_CTL, 0x8); + + WRITE_VREG(DECODE_STOP_POS, udebug_flag); +#ifdef SUPPORT_FB_DECODING +#ifndef FB_DECODING_TEST_SCHEDULE + if (pbi->used_stage_buf_num > 0) { + if (mask & HW_MASK_FRONT) { + data32 = READ_VREG( + HEVC_ASSIST_HED_FB_W_CTL); + data32 = data32 | + (1 << 0) /*hed_fb_wr_en*/ + ; + WRITE_VREG(HEVC_ASSIST_HED_FB_W_CTL, + data32); + } + if (mask & HW_MASK_BACK) { + data32 = READ_VREG( + HEVC_ASSIST_HED_FB_R_CTL); + while (data32 & (1 << 7)) { + /*wait finish*/ + data32 = READ_VREG( + HEVC_ASSIST_HED_FB_R_CTL); + } + data32 &= (~(0x1 << 0)); + /*hed_fb_rd_addr_auto_rd*/ + data32 &= (~(0x1 << 1)); + /*rd_id = 0, hed_rd_map_auto_halt_num, + after wr 2 ready, then start reading*/ + data32 |= (0x2 << 16); + WRITE_VREG(HEVC_ASSIST_HED_FB_R_CTL, + data32); + + data32 |= (0x1 << 11); /*hed_rd_map_auto_halt_en*/ + data32 |= (0x1 << 1); /*hed_fb_rd_addr_auto_rd*/ + data32 |= (0x1 << 0); /*hed_fb_rd_en*/ + WRITE_VREG(HEVC_ASSIST_HED_FB_R_CTL, + data32); + } + + } +#endif +#endif +} + +static int vvp9_local_init(struct VP9Decoder_s *pbi) +{ + int i; + int ret; + int width, height; + if (alloc_lf_buf(pbi) < 0) + return -1; + + pbi->gvs = vzalloc(sizeof(struct vdec_info)); + if (NULL == pbi->gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -1; + } + vdec_set_vframe_comm(hw_to_vdec(pbi), DRIVER_NAME); +#ifdef DEBUG_PTS + pbi->pts_missed = 0; + pbi->pts_hit = 0; +#endif + pbi->new_frame_displayed = 0; + pbi->last_put_idx = -1; + pbi->saved_resolution = 0; + pbi->get_frame_dur = false; + on_no_keyframe_skiped = 0; + pbi->duration_from_pts_done = 0; + pbi->vp9_first_pts_ready = 0; + pbi->frame_cnt_window = 0; + width = pbi->vvp9_amstream_dec_info.width; + height = pbi->vvp9_amstream_dec_info.height; + pbi->frame_dur = + (pbi->vvp9_amstream_dec_info.rate == + 0) ? 3200 : pbi->vvp9_amstream_dec_info.rate; + if (width && height) + pbi->frame_ar = height * 0x100 / width; +/* + *TODO:FOR VERSION + */ + pr_info("vp9: ver (%d,%d) decinfo: %dx%d rate=%d\n", vp9_version, + 0, width, height, pbi->frame_dur); + + if (pbi->frame_dur == 0) + pbi->frame_dur = 96000 / 24; + + INIT_KFIFO(pbi->display_q); + INIT_KFIFO(pbi->newframe_q); + + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &pbi->vfpool[i]; + + pbi->vfpool[i].index = -1; + kfifo_put(&pbi->newframe_q, vf); + } + + + ret = vp9_local_init(pbi); + + if (!pbi->pts_unstable) { + pbi->pts_unstable = + (pbi->vvp9_amstream_dec_info.rate == 0)?1:0; + pr_info("set pts unstable\n"); + } + + return ret; +} + + +#ifdef MULTI_INSTANCE_SUPPORT +static s32 vvp9_init(struct vdec_s *vdec) +{ + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)vdec->private; +#else +static s32 vvp9_init(struct VP9Decoder_s *pbi) +{ +#endif + int ret; + int fw_size = 0x1000 * 16; + struct firmware_s *fw = NULL; + + pbi->stat |= STAT_TIMER_INIT; + + if (vvp9_local_init(pbi) < 0) + return -EBUSY; + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + if (get_firmware_data(VIDEO_DEC_VP9_MMU, fw->data) < 0) { + pr_err("get firmware fail.\n"); + vfree(fw); + return -1; + } + + fw->len = fw_size; + + INIT_WORK(&pbi->set_clk_work, vp9_set_clk); + timer_setup(&pbi->timer, vvp9_put_timer_func, 0); + +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) { + pbi->timer.expires = jiffies + PUT_INTERVAL; + + /*add_timer(&pbi->timer); + + pbi->stat |= STAT_TIMER_ARM; + pbi->stat |= STAT_ISR_REG;*/ + + INIT_WORK(&pbi->work, vp9_work); + INIT_WORK(&pbi->recycle_mmu_work, vp9_recycle_mmu_work); +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) + INIT_WORK(&pbi->s1_work, vp9_s1_work); +#endif + pbi->fw = fw; + + /* picture list init.*/ + pbi->dec_result = DEC_INIT_PICLIST; + vdec_schedule_work(&pbi->work); + + return 0; + } +#endif + amhevc_enable(); + + init_pic_list(pbi); + + ret = amhevc_loadmc_ex(VFORMAT_VP9, NULL, fw->data); + if (ret < 0) { + amhevc_disable(); + vfree(fw); + pr_err("VP9: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(fw); + + pbi->stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + vvp9_prot_init(pbi, HW_MASK_FRONT | HW_MASK_BACK); + + if (vdec_request_threaded_irq(VDEC_IRQ_0, + vvp9_isr, + vvp9_isr_thread_fn, + IRQF_ONESHOT,/*run thread on this irq disabled*/ + "vvp9-irq", (void *)pbi)) { + pr_info("vvp9 irq register error.\n"); + amhevc_disable(); + return -ENOENT; + } + + pbi->stat |= STAT_ISR_REG; + + pbi->provider_name = PROVIDER_NAME; +#ifdef MULTI_INSTANCE_SUPPORT + if (!pbi->is_used_v4l) { + vf_provider_init(&vvp9_vf_prov, PROVIDER_NAME, + &vvp9_vf_provider, pbi); + vf_reg_provider(&vvp9_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); + if (pbi->frame_dur != 0) { + if (!is_reset) + vf_notify_receiver(pbi->provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)pbi->frame_dur)); + } + } +#else + vf_provider_init(&vvp9_vf_prov, PROVIDER_NAME, &vvp9_vf_provider, + pbi); + vf_reg_provider(&vvp9_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); + if (!is_reset) + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long)pbi->frame_dur)); +#endif + pbi->stat |= STAT_VF_HOOK; + + pbi->timer.expires = jiffies + PUT_INTERVAL; + add_timer(&pbi->timer); + + pbi->stat |= STAT_VDEC_RUN; + + pbi->stat |= STAT_TIMER_ARM; + + amhevc_start(); + + pbi->init_flag = 1; + pbi->process_busy = 0; + pr_info("%d, vvp9_init, RP=0x%x\n", + __LINE__, READ_VREG(HEVC_STREAM_RD_PTR)); + return 0; +} + +static int vmvp9_stop(struct VP9Decoder_s *pbi) +{ + pbi->init_flag = 0; + + if (pbi->stat & STAT_VDEC_RUN) { + amhevc_stop(); + pbi->stat &= ~STAT_VDEC_RUN; + } + if (pbi->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_0, (void *)pbi); + pbi->stat &= ~STAT_ISR_REG; + } + if (pbi->stat & STAT_TIMER_ARM) { + del_timer_sync(&pbi->timer); + pbi->stat &= ~STAT_TIMER_ARM; + } + + if (!pbi->is_used_v4l && (pbi->stat & STAT_VF_HOOK)) { + if (!is_reset) + vf_notify_receiver(pbi->provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + + vf_unreg_provider(&vvp9_vf_prov); + pbi->stat &= ~STAT_VF_HOOK; + } + vp9_local_uninit(pbi); + reset_process_time(pbi); + cancel_work_sync(&pbi->work); + cancel_work_sync(&pbi->recycle_mmu_work); +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) + cancel_work_sync(&pbi->s1_work); +#endif + cancel_work_sync(&pbi->set_clk_work); + uninit_mmu_buffers(pbi); + if (pbi->fw) + vfree(pbi->fw); + pbi->fw = NULL; + return 0; +} + +static int vvp9_stop(struct VP9Decoder_s *pbi) +{ + + pbi->init_flag = 0; + pbi->first_sc_checked = 0; + if (pbi->stat & STAT_VDEC_RUN) { + amhevc_stop(); + pbi->stat &= ~STAT_VDEC_RUN; + } + + if (pbi->stat & STAT_ISR_REG) { +#ifdef MULTI_INSTANCE_SUPPORT + if (!pbi->m_ins_flag) +#endif + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 0); + vdec_free_irq(VDEC_IRQ_0, (void *)pbi); + pbi->stat &= ~STAT_ISR_REG; + } + + if (pbi->stat & STAT_TIMER_ARM) { + del_timer_sync(&pbi->timer); + pbi->stat &= ~STAT_TIMER_ARM; + } + + if (!pbi->is_used_v4l && (pbi->stat & STAT_VF_HOOK)) { + if (!is_reset) + vf_notify_receiver(pbi->provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + + vf_unreg_provider(&vvp9_vf_prov); + pbi->stat &= ~STAT_VF_HOOK; + } + vp9_local_uninit(pbi); + + cancel_work_sync(&pbi->set_clk_work); +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) { +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) + cancel_work_sync(&pbi->s1_work); +#endif + cancel_work_sync(&pbi->work); + cancel_work_sync(&pbi->recycle_mmu_work); + } else + amhevc_disable(); +#else + amhevc_disable(); +#endif + uninit_mmu_buffers(pbi); + + vfree(pbi->fw); + pbi->fw = NULL; + return 0; +} + +static int amvdec_vp9_mmu_init(struct VP9Decoder_s *pbi) +{ + int tvp_flag = vdec_secure(hw_to_vdec(pbi)) ? + CODEC_MM_FLAGS_TVP : 0; + int buf_size = vp9_max_mmu_buf_size(pbi->max_pic_w, pbi->max_pic_h); + + pbi->need_cache_size = buf_size * SZ_1M; + pbi->sc_start_time = get_jiffies_64(); + if (pbi->mmu_enable && !pbi->is_used_v4l) { + pbi->mmu_box = decoder_mmu_box_alloc_box(DRIVER_NAME, + pbi->index, FRAME_BUFFERS, + pbi->need_cache_size, + tvp_flag + ); + if (!pbi->mmu_box) { + pr_err("vp9 alloc mmu box failed!!\n"); + return -1; + } + } +#ifdef VP9_10B_MMU_DW + if (pbi->dw_mmu_enable && !pbi->is_used_v4l) { + pbi->mmu_box_dw = decoder_mmu_box_alloc_box(DRIVER_NAME, + pbi->index, FRAME_BUFFERS, + pbi->need_cache_size, + tvp_flag + ); + if (!pbi->mmu_box_dw) { + pr_err("vp9 alloc mmu dw box failed!!\n"); + } + } +#endif + pbi->bmmu_box = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + pbi->index, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + tvp_flag); + if (!pbi->bmmu_box) { + pr_err("vp9 alloc bmmu box failed!!\n"); + return -1; + } + return 0; +} + +static struct VP9Decoder_s *gHevc; + +static int amvdec_vp9_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct BUF_s BUF[MAX_BUF_NUM]; + struct VP9Decoder_s *pbi; + int ret; +#ifndef MULTI_INSTANCE_SUPPORT + int i; +#endif + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL || + get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_TXL || + get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) { + pr_info("vp9 unsupported on cpu 0x%x\n", get_cpu_major_id()); + return -EINVAL; + } + pr_debug("%s\n", __func__); + + mutex_lock(&vvp9_mutex); + pbi = vzalloc(sizeof(struct VP9Decoder_s)); + if (pbi == NULL) { + pr_info("\namvdec_vp9 device data allocation failed\n"); + mutex_unlock(&vvp9_mutex); + return -ENOMEM; + } + + gHevc = pbi; + memcpy(&BUF[0], &pbi->m_BUF[0], sizeof(struct BUF_s) * MAX_BUF_NUM); + memset(pbi, 0, sizeof(struct VP9Decoder_s)); + memcpy(&pbi->m_BUF[0], &BUF[0], sizeof(struct BUF_s) * MAX_BUF_NUM); + + pbi->init_flag = 0; + pbi->first_sc_checked= 0; + + if (!vdec_is_support_4k()) { + pbi->max_pic_w = 1920; + pbi->max_pic_h = 1088; + } else if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SM1) { + pbi->max_pic_w = 4096; + pbi->max_pic_h = 2304; + } else { + pbi->max_pic_w = 8192; + pbi->max_pic_h = 4608; + } + if (pdata->sys_info) { + pbi->vvp9_amstream_dec_info = *pdata->sys_info; + if ((pbi->vvp9_amstream_dec_info.width != 0) && + (pbi->vvp9_amstream_dec_info.height != 0)) { + pbi->max_pic_w = pbi->vvp9_amstream_dec_info.width; + pbi->max_pic_h = pbi->vvp9_amstream_dec_info.height; + } + } else { + pbi->vvp9_amstream_dec_info.width = 0; + pbi->vvp9_amstream_dec_info.height = 0; + pbi->vvp9_amstream_dec_info.rate = 30; + } + +#ifdef MULTI_INSTANCE_SUPPORT + pbi->eos = 0; + pbi->start_process_time = 0; + pbi->timeout_num = 0; +#endif + pbi->fatal_error = 0; + pbi->show_frame_num = 0; + if (pdata == NULL) { + pr_info("\namvdec_vp9 memory resource undefined.\n"); + vfree(pbi); + mutex_unlock(&vvp9_mutex); + return -EFAULT; + } + pbi->m_ins_flag = 0; +#ifdef MULTI_INSTANCE_SUPPORT + pbi->platform_dev = pdev; + platform_set_drvdata(pdev, pdata); +#endif + pbi->double_write_mode = double_write_mode; + pbi->mmu_enable = 1; +#ifdef VP9_10B_MMU_DW + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + pbi->dw_mmu_enable = + (get_double_write_mode(pbi) & 0x20) ? 1 : 0; + } else { + pbi->dw_mmu_enable = 0; + } +#endif + if (amvdec_vp9_mmu_init(pbi) < 0) { + vfree(pbi); + mutex_unlock(&vvp9_mutex); + pr_err("vp9 alloc bmmu box failed!!\n"); + return -1; + } + + ret = decoder_bmmu_box_alloc_buf_phy(pbi->bmmu_box, WORK_SPACE_BUF_ID, + work_buf_size, DRIVER_NAME, &pdata->mem_start); + if (ret < 0) { + uninit_mmu_buffers(pbi); + vfree(pbi); + mutex_unlock(&vvp9_mutex); + return ret; + } + pbi->buf_size = work_buf_size; + +#ifdef MULTI_INSTANCE_SUPPORT + pbi->buf_start = pdata->mem_start; +#else + if (!pbi->mmu_enable) + pbi->mc_buf_spec.buf_end = pdata->mem_start + pbi->buf_size; + + for (i = 0; i < WORK_BUF_SPEC_NUM; i++) + amvvp9_workbuff_spec[i].start_adr = pdata->mem_start; +#endif + + + if (debug) { + pr_info("===VP9 decoder mem resource 0x%lx size 0x%x\n", + pdata->mem_start, pbi->buf_size); + } + pbi->no_head = no_head; +#ifdef MULTI_INSTANCE_SUPPORT + pbi->cma_dev = pdata->cma_dev; +#else + cma_dev = pdata->cma_dev; +#endif + /* config endian */ + pbi->endian = HEVC_CONFIG_LITTLE_ENDIAN; + if (is_support_vdec_canvas()) + pbi->endian = HEVC_CONFIG_BIG_ENDIAN; + if (endian) + pbi->endian = endian; + +#ifdef MULTI_INSTANCE_SUPPORT + pdata->private = pbi; + pdata->dec_status = vvp9_dec_status; + pdata->set_isreset = vvp9_set_isreset; + is_reset = 0; + if (vvp9_init(pdata) < 0) { +#else + if (vvp9_init(pbi) < 0) { +#endif + pr_info("\namvdec_vp9 init failed.\n"); + vp9_local_uninit(pbi); + uninit_mmu_buffers(pbi); + vfree(pbi); + pdata->dec_status = NULL; + mutex_unlock(&vvp9_mutex); + return -ENODEV; + } + /*set the max clk for smooth playing...*/ + hevc_source_changed(VFORMAT_VP9, + 4096, 2048, 60); + mutex_unlock(&vvp9_mutex); + + return 0; +} + +static void vdec_fence_release(struct VP9Decoder_s *pbi, + struct vdec_sync *sync) +{ + ulong expires; + + /* notify signal to wake up all fences. */ + vdec_timeline_increase(sync, VF_POOL_SIZE); + + expires = jiffies + msecs_to_jiffies(2000); + while (!check_objs_all_signaled(sync)) { + if (time_after(jiffies, expires)) { + pr_err("wait fence signaled timeout.\n"); + break; + } + } + + /* decreases refcnt of timeline. */ + vdec_timeline_put(sync); +} + +static int amvdec_vp9_remove(struct platform_device *pdev) +{ + struct VP9Decoder_s *pbi = gHevc; + struct vdec_s *vdec = hw_to_vdec(pbi); + int i; + + if (debug) + pr_info("amvdec_vp9_remove\n"); + + mutex_lock(&vvp9_mutex); + + vvp9_stop(pbi); + + hevc_source_changed(VFORMAT_VP9, 0, 0, 0); + + if (vdec->parallel_dec == 1) { + for (i = 0; i < FRAME_BUFFERS; i++) { + vdec->free_canvas_ex(pbi->common.buffer_pool-> + frame_bufs[i].buf.y_canvas_index, vdec->id); + vdec->free_canvas_ex(pbi->common.buffer_pool-> + frame_bufs[i].buf.uv_canvas_index, vdec->id); + } + } + +#ifdef DEBUG_PTS + pr_info("pts missed %ld, pts hit %ld, duration %d\n", + pbi->pts_missed, pbi->pts_hit, pbi->frame_dur); +#endif + mem_map_mode = 0; + + if (pbi->enable_fence) + vdec_fence_release(pbi, vdec->sync); + + vfree(pbi); + mutex_unlock(&vvp9_mutex); + + return 0; +} + +/****************************************/ + +static struct platform_driver amvdec_vp9_driver = { + .probe = amvdec_vp9_probe, + .remove = amvdec_vp9_remove, +#ifdef CONFIG_PM + .suspend = amhevc_suspend, + .resume = amhevc_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t amvdec_vp9_profile = { + .name = "vp9", + .profile = "" +}; + +static struct codec_profile_t amvdec_vp9_profile_mult; + +static unsigned char get_data_check_sum + (struct VP9Decoder_s *pbi, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (vdec_secure(hw_to_vdec(pbi))) + return 0; + + if (!pbi->chunk->block->is_mapped) + data = codec_mm_vmap(pbi->chunk->block->start + + pbi->chunk->offset, size); + else + data = ((u8 *)pbi->chunk->block->start_virt) + + pbi->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, size, sum, + (size < 1) ? 0 : data[0], + (size < 2) ? 0 : data[1], + (size < 3) ? 0 : data[2], + (size < 4) ? 0 : data[3], + (size < 5) ? 0 : data[4], + (size < 6) ? 0 : data[5], + (size < 4) ? 0 : data[size - 4], + (size < 3) ? 0 : data[size - 3], + (size < 2) ? 0 : data[size - 2], + (size < 1) ? 0 : data[size - 1]); + + if (!pbi->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void dump_data(struct VP9Decoder_s *pbi, int size) +{ + int jj; + u8 *data = NULL; + int padding_size = pbi->chunk->offset & + (VDEC_FIFO_ALIGN - 1); + + if (!pbi->chunk->block->is_mapped) + data = codec_mm_vmap(pbi->chunk->block->start + + pbi->chunk->offset, size); + else + data = ((u8 *)pbi->chunk->block->start_virt) + + pbi->chunk->offset; + + vp9_print(pbi, 0, "padding: "); + for (jj = padding_size; jj > 0; jj--) + vp9_print_cont(pbi, + 0, + "%02x ", *(data - jj)); + vp9_print_cont(pbi, 0, "data adr %p\n", + data); + + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + vp9_print(pbi, + 0, + "%06x:", jj); + vp9_print_cont(pbi, + 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + vp9_print(pbi, + 0, + "\n"); + } + vp9_print(pbi, + 0, + "\n"); + + if (!pbi->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); +} + +static void vp9_work(struct work_struct *work) +{ + struct VP9Decoder_s *pbi = container_of(work, + struct VP9Decoder_s, work); + struct vdec_s *vdec = hw_to_vdec(pbi); + /* finished decoding one frame or error, + * notify vdec core to switch context + */ + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_WORKER_START); + if (pbi->dec_result == DEC_RESULT_AGAIN) + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_WORKER_AGAIN); + vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, + "%s dec_result %d %x %x %x\n", + __func__, + pbi->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + + if (pbi->dec_result == DEC_INIT_PICLIST) { + init_pic_list(pbi); + pbi->pic_list_init_done = true; + return; + } + + if (pbi->dec_result == DEC_RESULT_NEED_MORE_BUFFER) { + reset_process_time(pbi); + if (!get_free_buf_count(pbi)) { + pbi->dec_result = DEC_RESULT_NEED_MORE_BUFFER; + vdec_schedule_work(&pbi->work); + } else { + int i; + + if (pbi->mmu_enable) + vp9_recycle_mmu_buf_tail(pbi); + + if (pbi->frame_count > 0) + vp9_bufmgr_postproc(pbi); + + for (i = 0; i < (RPM_END - RPM_BEGIN); i += 4) { + int ii; + for (ii = 0; ii < 4; ii++) + pbi->vp9_param.l.data[i + ii] = + pbi->rpm_ptr[i + 3 - ii]; + } + continue_decoding(pbi); + pbi->postproc_done = 0; + pbi->process_busy = 0; + + start_process_time(pbi); + } + return; + } + + if (((pbi->dec_result == DEC_RESULT_GET_DATA) || + (pbi->dec_result == DEC_RESULT_GET_DATA_RETRY)) + && (hw_to_vdec(pbi)->next_status != + VDEC_STATUS_DISCONNECTED)) { + if (!vdec_has_more_input(vdec)) { + pbi->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&pbi->work); + return; + } + + if (pbi->dec_result == DEC_RESULT_GET_DATA) { + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA %x %x %x\n", + __func__, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + vdec_vframe_dirty(vdec, pbi->chunk); + vdec_clean_input(vdec); + } + + if (get_free_buf_count(pbi) >= + pbi->run_ready_min_buf_num) { + int r; + int decode_size; + r = vdec_prepare_input(vdec, &pbi->chunk); + if (r < 0) { + pbi->dec_result = DEC_RESULT_GET_DATA_RETRY; + + vp9_print(pbi, + PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&pbi->work); + return; + } + pbi->dec_result = DEC_RESULT_NONE; + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s: chunk size 0x%x sum 0x%x\n", + __func__, r, + (debug & PRINT_FLAG_VDEC_STATUS) ? + get_data_check_sum(pbi, r) : 0 + ); + + if (debug & PRINT_FLAG_VDEC_DATA) + dump_data(pbi, pbi->chunk->size); + + decode_size = pbi->chunk->size + + (pbi->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + + WRITE_VREG(HEVC_DECODE_SIZE, + READ_VREG(HEVC_DECODE_SIZE) + decode_size); + + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + + start_process_time(pbi); + + } else{ + pbi->dec_result = DEC_RESULT_GET_DATA_RETRY; + + vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&pbi->work); + } + return; + } else if (pbi->dec_result == DEC_RESULT_DONE) { +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) { +#ifndef FB_DECODING_TEST_SCHEDULE + if (!is_s2_decoding_finished(pbi)) { + vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, + "s2 decoding not done, check again later\n"); + vdec_schedule_work(&pbi->work); + } +#endif + inc_s2_pos(pbi); + if (mcrcc_cache_alg_flag) + dump_hit_rate(pbi); + } +#endif + /* if (!pbi->ctx_valid) + pbi->ctx_valid = 1; */ + pbi->slice_idx++; + pbi->frame_count++; + pbi->process_state = PROC_STATE_INIT; + decode_frame_count[pbi->index] = pbi->frame_count; + + if (pbi->mmu_enable) + pbi->used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s (===> %d) dec_result %d %x %x %x shiftbytes 0x%x decbytes 0x%x\n", + __func__, + pbi->frame_count, + pbi->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_BYTE_COUNT) - + pbi->start_shift_bytes + ); + vdec_vframe_dirty(hw_to_vdec(pbi), pbi->chunk); + } else if (pbi->dec_result == DEC_RESULT_AGAIN) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec)) { + pbi->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&pbi->work); + return; + } + } else if (pbi->dec_result == DEC_RESULT_EOS) { + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s: end of stream\n", + __func__); + pbi->eos = 1; + vp9_bufmgr_postproc(pbi); + + notify_v4l_eos(hw_to_vdec(pbi)); + + vdec_vframe_dirty(hw_to_vdec(pbi), pbi->chunk); + } else if (pbi->dec_result == DEC_RESULT_FORCE_EXIT) { + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s: force exit\n", + __func__); + if (pbi->stat & STAT_VDEC_RUN) { + amhevc_stop(); + pbi->stat &= ~STAT_VDEC_RUN; + } + + if (pbi->stat & STAT_ISR_REG) { +#ifdef MULTI_INSTANCE_SUPPORT + if (!pbi->m_ins_flag) +#endif + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 0); + vdec_free_irq(VDEC_IRQ_0, (void *)pbi); + pbi->stat &= ~STAT_ISR_REG; + } + } + if (pbi->stat & STAT_VDEC_RUN) { + amhevc_stop(); + pbi->stat &= ~STAT_VDEC_RUN; + } + + if (pbi->stat & STAT_TIMER_ARM) { + del_timer_sync(&pbi->timer); + pbi->stat &= ~STAT_TIMER_ARM; + } + /* mark itself has all HW resource released and input released */ + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_WORKER_END); +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) + vdec_core_finish_run(hw_to_vdec(pbi), CORE_MASK_HEVC_BACK); + else + vdec_core_finish_run(hw_to_vdec(pbi), CORE_MASK_VDEC_1 + | CORE_MASK_HEVC + | CORE_MASK_HEVC_FRONT + | CORE_MASK_HEVC_BACK + ); +#else + if (vdec->parallel_dec == 1) + vdec_core_finish_run(vdec, CORE_MASK_HEVC); + else + vdec_core_finish_run(hw_to_vdec(pbi), CORE_MASK_VDEC_1 + | CORE_MASK_HEVC); +#endif + trigger_schedule(pbi); +} + +static int vp9_hw_ctx_restore(struct VP9Decoder_s *pbi) +{ + /* new to do ... */ +#if (!defined SUPPORT_FB_DECODING) + vvp9_prot_init(pbi, HW_MASK_FRONT | HW_MASK_BACK); +#elif (defined FB_DECODING_TEST_SCHEDULE) + vvp9_prot_init(pbi, HW_MASK_FRONT | HW_MASK_BACK); +#else + if (pbi->used_stage_buf_num > 0) + vvp9_prot_init(pbi, HW_MASK_FRONT); + else + vvp9_prot_init(pbi, HW_MASK_FRONT | HW_MASK_BACK); +#endif + return 0; +} + +static bool is_avaliable_buffer(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + int i, free_count = 0; + + if (ctx->cap_pool.dec < pbi->used_buf_num) { + free_count = v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx); + if (free_count && + !ctx->fb_ops.query(&ctx->fb_ops, &pbi->fb_token)) { + return false; + } + } + + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + frame_bufs[i].buf.cma_alloc_addr) { + free_count++; + } + } + + return free_count < pbi->run_ready_min_buf_num ? 0 : 1; +} + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + int tvp = vdec_secure(hw_to_vdec(pbi)) ? + CODEC_MM_FLAGS_TVP : 0; + unsigned long ret = 0; + + if (!(pbi->pic_list_init_done && pbi->pic_list_init_done2) || pbi->eos) + return ret; + if (!pbi->first_sc_checked && pbi->mmu_enable) { + int size; + void * mmu_box; + + if (pbi->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + mmu_box = ctx->mmu_box; + } else + mmu_box = pbi->mmu_box; + + size = decoder_mmu_box_sc_check(mmu_box, tvp); + pbi->first_sc_checked = 1; + vp9_print(pbi, 0, "vp9 cached=%d need_size=%d speed= %d ms\n", + size, (pbi->need_cache_size >> PAGE_SHIFT), + (int)(get_jiffies_64() - pbi->sc_start_time) * 1000/HZ); + } + +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) { + if (mask & CORE_MASK_HEVC_FRONT) { + if (get_free_stage_buf_num(pbi) > 0 + && mv_buf_available(pbi)) + ret |= CORE_MASK_HEVC_FRONT; + } + if (mask & CORE_MASK_HEVC_BACK) { + if (s2_buf_available(pbi) && + (get_free_buf_count(pbi) >= + pbi->run_ready_min_buf_num)) { + ret |= CORE_MASK_HEVC_BACK; + pbi->back_not_run_ready = 0; + } else + pbi->back_not_run_ready = 1; +#if 0 + if (get_free_buf_count(pbi) < + run_ready_min_buf_num) + dump_pic_list(pbi); +#endif + } + } else if (get_free_buf_count(pbi) >= + pbi->run_ready_min_buf_num) + ret = CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_HEVC_FRONT + | CORE_MASK_HEVC_BACK; + + if (ret & CORE_MASK_HEVC_FRONT) + not_run_ready[pbi->index] = 0; + else + not_run_ready[pbi->index]++; + + if (ret & CORE_MASK_HEVC_BACK) + not_run2_ready[pbi->index] = 0; + else + not_run2_ready[pbi->index]++; + + vp9_print(pbi, + PRINT_FLAG_VDEC_DETAIL, "%s mask %lx=>%lx (%d %d %d %d)\r\n", + __func__, mask, ret, + get_free_stage_buf_num(pbi), + mv_buf_available(pbi), + s2_buf_available(pbi), + get_free_buf_count(pbi) + ); + + return ret; + +#else + if (get_free_buf_count(pbi) >= + pbi->run_ready_min_buf_num) { + if (vdec->parallel_dec == 1) + ret = CORE_MASK_HEVC; + else + ret = CORE_MASK_VDEC_1 | CORE_MASK_HEVC; + } + + if (pbi->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + + if (ctx->param_sets_from_ucode) { + if (pbi->v4l_params_parsed) { + if (ctx->cap_pool.dec < pbi->used_buf_num) { + if (is_avaliable_buffer(pbi)) + ret = CORE_MASK_HEVC; + else + ret = 0; + } + } else { + if (ctx->v4l_resolution_change) + ret = 0; + } + } else if (ctx->cap_pool.in < ctx->dpb_size) { + if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < + pbi->run_ready_min_buf_num) + ret = 0; + } + } + + if (ret) + not_run_ready[pbi->index] = 0; + else + not_run_ready[pbi->index]++; + + vp9_print(pbi, + PRINT_FLAG_VDEC_DETAIL, "%s mask %lx=>%lx\r\n", + __func__, mask, ret); + return ret; +#endif +} + +static void vp9_frame_mode_pts_save(struct VP9Decoder_s *pbi) +{ + int i = 0; + + if (pbi->chunk == NULL) + return; + vp9_print(pbi, VP9_DEBUG_OUT_PTS, + "run front: pts %d, pts64 %lld, ts: %lld\n", + pbi->chunk->pts, pbi->chunk->pts64, pbi->chunk->timestamp); + for (i = (FRAME_BUFFERS - 1); i > 0; i--) { + pbi->frame_mode_pts_save[i] = pbi->frame_mode_pts_save[i - 1]; + pbi->frame_mode_pts64_save[i] = pbi->frame_mode_pts64_save[i - 1]; + } + pbi->frame_mode_pts_save[0] = pbi->chunk->pts; + pbi->frame_mode_pts64_save[0] = pbi->chunk->pts64; + + if (pbi->is_used_v4l && !v4l_bitstream_id_enable) + pbi->frame_mode_pts64_save[0] = pbi->chunk->timestamp; +} + +static void run_front(struct vdec_s *vdec) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + int ret, size; + + run_count[pbi->index]++; + /* pbi->chunk = vdec_prepare_input(vdec); */ +#if (!defined SUPPORT_FB_DECODING) + hevc_reset_core(vdec); +#elif (defined FB_DECODING_TEST_SCHEDULE) + hevc_reset_core(vdec); +#else + if (pbi->used_stage_buf_num > 0) + fb_reset_core(vdec, HW_MASK_FRONT); + else + hevc_reset_core(vdec); +#endif + + size = vdec_prepare_input(vdec, &pbi->chunk); + if (size < 0) { + input_empty[pbi->index]++; + + pbi->dec_result = DEC_RESULT_AGAIN; + + vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, + "ammvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&pbi->work); + return; + } + + input_empty[pbi->index] = 0; + pbi->dec_result = DEC_RESULT_NONE; + pbi->start_shift_bytes = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + + vp9_frame_mode_pts_save(pbi); + + if (debug & PRINT_FLAG_VDEC_STATUS) { + int ii; + vp9_print(pbi, 0, + "%s (%d): size 0x%x (0x%x 0x%x) sum 0x%x (%x %x %x %x %x) bytes 0x%x", + __func__, + pbi->frame_count, size, + pbi->chunk ? pbi->chunk->size : 0, + pbi->chunk ? pbi->chunk->offset : 0, + pbi->chunk ? ((vdec_frame_based(vdec) && + (debug & PRINT_FLAG_VDEC_STATUS)) ? + get_data_check_sum(pbi, size) : 0) : 0, + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + pbi->start_shift_bytes); + + if (!vdec_secure(hw_to_vdec(pbi)) && + vdec_frame_based(vdec) && pbi->chunk) { + u8 *data = NULL; + + if (!pbi->chunk->block->is_mapped) + data = codec_mm_vmap(pbi->chunk->block->start + + pbi->chunk->offset, 8); + else + data = ((u8 *)pbi->chunk->block->start_virt) + + pbi->chunk->offset; + + vp9_print_cont(pbi, 0, "data adr %p:", + data); + for (ii = 0; ii < 8; ii++) + vp9_print_cont(pbi, 0, "%02x ", + data[ii]); + + if (!pbi->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + vp9_print_cont(pbi, 0, "\r\n"); + } + ATRACE_COUNTER(pbi->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_START); + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else { + ret = amhevc_loadmc_ex(VFORMAT_VP9, NULL, pbi->fw->data); + if (ret < 0) { + amhevc_disable(); + vp9_print(pbi, PRINT_FLAG_ERROR, + "VP9: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + pbi->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&pbi->work); + return; + } + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_VP9; + } + ATRACE_COUNTER(pbi->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_END); + + ATRACE_COUNTER(pbi->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_START); + if (vp9_hw_ctx_restore(pbi) < 0) { + vdec_schedule_work(&pbi->work); + return; + } + ATRACE_COUNTER(pbi->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_END); + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + + if (vdec_frame_based(vdec)) { + if (debug & PRINT_FLAG_VDEC_DATA) + dump_data(pbi, pbi->chunk->size); + + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, 0); + size = pbi->chunk->size + + (pbi->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + if (vdec->mvfrm) + vdec->mvfrm->frame_size = pbi->chunk->size; + } + WRITE_VREG(HEVC_DECODE_SIZE, size); + WRITE_VREG(HEVC_DECODE_COUNT, pbi->slice_idx); + pbi->init_flag = 1; + + vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, + "%s: start hevc (%x %x %x)\n", + __func__, + READ_VREG(HEVC_DEC_STATUS_REG), + READ_VREG(HEVC_MPC_E), + READ_VREG(HEVC_MPSR)); + + start_process_time(pbi); + mod_timer(&pbi->timer, jiffies); + pbi->stat |= STAT_TIMER_ARM; + pbi->stat |= STAT_ISR_REG; + amhevc_start(); + pbi->stat |= STAT_VDEC_RUN; +} + +#ifdef SUPPORT_FB_DECODING +static void mpred_process(struct VP9Decoder_s *pbi) +{ + union param_u *params = &pbi->s1_param; + unsigned char use_prev_frame_mvs = + !params->p.error_resilient_mode && + params->p.width == pbi->s1_width && + params->p.height == pbi->s1_height && + !pbi->s1_intra_only && + pbi->s1_last_show_frame && + (pbi->s1_frame_type != KEY_FRAME); + pbi->s1_width = params->p.width; + pbi->s1_height = params->p.height; + pbi->s1_frame_type = params->p.frame_type; + pbi->s1_intra_only = + (params->p.show_frame || + params->p.show_existing_frame) + ? 0 : params->p.intra_only; + if ((pbi->s1_frame_type != KEY_FRAME) + && (!pbi->s1_intra_only)) { + unsigned int data32; + int mpred_mv_rd_end_addr; + + mpred_mv_rd_end_addr = + pbi->s1_mpred_mv_wr_start_addr_pre + + (pbi->lcu_total * MV_MEM_UNIT); + + WRITE_VREG(HEVC_MPRED_CTRL3, 0x24122412); + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, + pbi->work_space_buf-> + mpred_above.buf_start); + + data32 = READ_VREG(HEVC_MPRED_CTRL4); + + data32 &= (~(1 << 6)); + data32 |= (use_prev_frame_mvs << 6); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); + + WRITE_VREG(HEVC_MPRED_MV_WR_START_ADDR, + pbi->s1_mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_WPTR, + pbi->s1_mpred_mv_wr_start_addr); + + WRITE_VREG(HEVC_MPRED_MV_RD_START_ADDR, + pbi->s1_mpred_mv_wr_start_addr_pre); + WRITE_VREG(HEVC_MPRED_MV_RPTR, + pbi->s1_mpred_mv_wr_start_addr_pre); + + WRITE_VREG(HEVC_MPRED_MV_RD_END_ADDR, + mpred_mv_rd_end_addr); + + } else + clear_mpred_hw(pbi); + + if (!params->p.show_existing_frame) { + pbi->s1_mpred_mv_wr_start_addr_pre = + pbi->s1_mpred_mv_wr_start_addr; + pbi->s1_last_show_frame = + params->p.show_frame; + if (pbi->s1_mv_buf_index_pre_pre != MV_BUFFER_NUM) + put_mv_buf(pbi, &pbi->s1_mv_buf_index_pre_pre); + pbi->s1_mv_buf_index_pre_pre = + pbi->s1_mv_buf_index_pre; + pbi->s1_mv_buf_index_pre = pbi->s1_mv_buf_index; + } else + put_mv_buf(pbi, &pbi->s1_mv_buf_index); +} + +static void vp9_s1_work(struct work_struct *s1_work) +{ + struct VP9Decoder_s *pbi = container_of(s1_work, + struct VP9Decoder_s, s1_work); + vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, + "%s dec_s1_result %d\n", + __func__, + pbi->dec_s1_result); + +#ifdef FB_DECODING_TEST_SCHEDULE + if (pbi->dec_s1_result == + DEC_S1_RESULT_TEST_TRIGGER_DONE) { + pbi->s1_test_cmd = TEST_SET_PIC_DONE; + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + } +#endif + if (pbi->dec_s1_result == DEC_S1_RESULT_DONE || + pbi->dec_s1_result == DEC_S1_RESULT_FORCE_EXIT) { + + vdec_core_finish_run(hw_to_vdec(pbi), + CORE_MASK_HEVC_FRONT); + + trigger_schedule(pbi); + /*pbi->dec_s1_result = DEC_S1_RESULT_NONE;*/ + } + +} + +static void run_back(struct vdec_s *vdec) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + int i; + run2_count[pbi->index]++; + if (debug & PRINT_FLAG_VDEC_STATUS) { + vp9_print(pbi, 0, + "%s", __func__); + } + pbi->run2_busy = 1; +#ifndef FB_DECODING_TEST_SCHEDULE + fb_reset_core(vdec, HW_MASK_BACK); + + vvp9_prot_init(pbi, HW_MASK_BACK); +#endif + vp9_recycle_mmu_buf_tail(pbi); + + if (pbi->frame_count > 0) + vp9_bufmgr_postproc(pbi); + + if (get_s2_buf(pbi) >= 0) { + for (i = 0; i < (RPM_END - RPM_BEGIN); i += 4) { + int ii; + for (ii = 0; ii < 4; ii++) + pbi->vp9_param.l.data[i + ii] = + pbi->s2_buf->rpm[i + 3 - ii]; + } +#ifndef FB_DECODING_TEST_SCHEDULE + WRITE_VREG(HEVC_ASSIST_FBD_MMU_MAP_ADDR, + pbi->stage_mmu_map_phy_addr + + pbi->s2_buf->index * STAGE_MMU_MAP_SIZE); +#endif + continue_decoding(pbi); + } + pbi->run2_busy = 0; +} +#endif + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_RUN_START); + vp9_print(pbi, + PRINT_FLAG_VDEC_DETAIL, "%s mask %lx\r\n", + __func__, mask); + + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + run_count[pbi->index]++; + pbi->vdec_cb_arg = arg; + pbi->vdec_cb = callback; + pbi->one_package_frame_cnt = 0; +#ifdef SUPPORT_FB_DECODING + if ((mask & CORE_MASK_HEVC) || + (mask & CORE_MASK_HEVC_FRONT)) + run_front(vdec); + + if ((pbi->used_stage_buf_num > 0) + && (mask & CORE_MASK_HEVC_BACK)) + run_back(vdec); +#else + run_front(vdec); +#endif + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_RUN_END); + +} + +static void vp9_decoder_ctx_reset(struct VP9Decoder_s *pbi) +{ + struct vdec_s *vdec = hw_to_vdec(pbi); + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + struct BufferPool_s *buffer_pool = cm->buffer_pool; + int i; + + cm->buffer_pool = buffer_pool; + + for (i = 0; i < FRAME_BUFFERS; ++i) { + frame_bufs[i].buf.index = i; + frame_bufs[i].ref_count = 0; + frame_bufs[i].buf.vf_ref = 0; + frame_bufs[i].buf.decode_idx = 0; + frame_bufs[i].buf.cma_alloc_addr = 0; + frame_bufs[i].buf.BUF_index = -1; + frame_bufs[i].buf.slice_type = 0; + } + + for (i = 0; i < MV_BUFFER_NUM; ++i) { + pbi->m_mv_BUF[i].used_flag = 0; + } + + for (i = 0; i < FRAME_BUFFERS; i++) { + pbi->buffer_wrap[i] = i; + } + + if (vdec->parallel_dec == 1) { + for (i = 0; i < FRAME_BUFFERS; i++) { + vdec->free_canvas_ex + (pbi->common.buffer_pool->frame_bufs[i].buf.y_canvas_index, + vdec->id); + vdec->free_canvas_ex + (pbi->common.buffer_pool->frame_bufs[i].buf.uv_canvas_index, + vdec->id); + } + } + + pbi->init_flag = 0; + pbi->first_sc_checked = 0; + pbi->fatal_error = 0; + pbi->show_frame_num = 0; + pbi->eos = 0; +} + +static void reset(struct vdec_s *vdec) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + + cancel_work_sync(&pbi->set_clk_work); + cancel_work_sync(&pbi->work); + if (pbi->stat & STAT_VDEC_RUN) { + amhevc_stop(); + pbi->stat &= ~STAT_VDEC_RUN; + } + + if (pbi->stat & STAT_TIMER_ARM) { + del_timer_sync(&pbi->timer); + pbi->stat &= ~STAT_TIMER_ARM; + } + + reset_process_time(pbi); + + vp9_local_uninit(pbi); + if (vvp9_local_init(pbi) < 0) + vp9_print(pbi, 0, "%s local_init failed \r\n", __func__); + + vp9_decoder_ctx_reset(pbi); + + vp9_print(pbi, 0, "%s\r\n", __func__); +} + +static irqreturn_t vp9_irq_cb(struct vdec_s *vdec, int irq) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + return vvp9_isr(0, pbi); +} + +static irqreturn_t vp9_threaded_irq_cb(struct vdec_s *vdec, int irq) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + return vvp9_isr_thread_fn(0, pbi); +} + +static void vp9_dump_state(struct vdec_s *vdec) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + struct VP9_Common_s *const cm = &pbi->common; + int i; + vp9_print(pbi, 0, "====== %s\n", __func__); + + vp9_print(pbi, 0, + "width/height (%d/%d), used_buf_num %d video_signal_type 0x%x\n", + cm->width, + cm->height, + pbi->used_buf_num, + pbi->video_signal_type + ); + + vp9_print(pbi, 0, + "is_framebase(%d), eos %d, dec_result 0x%x dec_frm %d disp_frm %d run %d not_run_ready %d input_empty %d low_latency %d no_head %d \n", + input_frame_based(vdec), + pbi->eos, + pbi->dec_result, + decode_frame_count[pbi->index], + display_frame_count[pbi->index], + run_count[pbi->index], + not_run_ready[pbi->index], + input_empty[pbi->index], + pbi->low_latency_flag, + pbi->no_head + ); + + if (!pbi->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + vp9_print(pbi, 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + + vp9_print(pbi, 0, + "%s, newq(%d/%d), dispq(%d/%d), vf prepare/get/put (%d/%d/%d), free_buf_count %d (min %d for run_ready)\n", + __func__, + kfifo_len(&pbi->newframe_q), + VF_POOL_SIZE, + kfifo_len(&pbi->display_q), + VF_POOL_SIZE, + pbi->vf_pre_count, + pbi->vf_get_count, + pbi->vf_put_count, + get_free_buf_count(pbi), + pbi->run_ready_min_buf_num + ); + + dump_pic_list(pbi); + + for (i = 0; i < MAX_BUF_NUM; i++) { + vp9_print(pbi, 0, + "mv_Buf(%d) start_adr 0x%x size 0x%x used %d\n", + i, + pbi->m_mv_BUF[i].start_adr, + pbi->m_mv_BUF[i].size, + pbi->m_mv_BUF[i].used_flag); + } + + vp9_print(pbi, 0, + "HEVC_DEC_STATUS_REG=0x%x\n", + READ_VREG(HEVC_DEC_STATUS_REG)); + vp9_print(pbi, 0, + "HEVC_MPC_E=0x%x\n", + READ_VREG(HEVC_MPC_E)); + vp9_print(pbi, 0, + "DECODE_MODE=0x%x\n", + READ_VREG(DECODE_MODE)); + vp9_print(pbi, 0, + "NAL_SEARCH_CTL=0x%x\n", + READ_VREG(NAL_SEARCH_CTL)); + vp9_print(pbi, 0, + "HEVC_PARSER_LCU_START=0x%x\n", + READ_VREG(HEVC_PARSER_LCU_START)); + vp9_print(pbi, 0, + "HEVC_DECODE_SIZE=0x%x\n", + READ_VREG(HEVC_DECODE_SIZE)); + vp9_print(pbi, 0, + "HEVC_SHIFT_BYTE_COUNT=0x%x\n", + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + vp9_print(pbi, 0, + "HEVC_STREAM_START_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_START_ADDR)); + vp9_print(pbi, 0, + "HEVC_STREAM_END_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_END_ADDR)); + vp9_print(pbi, 0, + "HEVC_STREAM_LEVEL=0x%x\n", + READ_VREG(HEVC_STREAM_LEVEL)); + vp9_print(pbi, 0, + "HEVC_STREAM_WR_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_WR_PTR)); + vp9_print(pbi, 0, + "HEVC_STREAM_RD_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_RD_PTR)); + vp9_print(pbi, 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + vp9_print(pbi, 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (input_frame_based(vdec) && + (debug & PRINT_FLAG_VDEC_DATA) + ) { + int jj; + if (pbi->chunk && pbi->chunk->block && + pbi->chunk->size > 0) { + u8 *data = NULL; + + if (!pbi->chunk->block->is_mapped) + data = codec_mm_vmap( + pbi->chunk->block->start + + pbi->chunk->offset, + pbi->chunk->size); + else + data = ((u8 *)pbi->chunk->block->start_virt) + + pbi->chunk->offset; + vp9_print(pbi, 0, + "frame data size 0x%x\n", + pbi->chunk->size); + for (jj = 0; jj < pbi->chunk->size; jj++) { + if ((jj & 0xf) == 0) + vp9_print(pbi, 0, + "%06x:", jj); + vp9_print_cont(pbi, 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + vp9_print_cont(pbi, 0, + "\n"); + } + + if (!pbi->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } + +} + +static int ammvdec_vp9_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + int ret; + int config_val; + int transfer_val; + struct vframe_content_light_level_s content_light_level; + struct vframe_master_display_colour_s vf_dp; + struct VP9Decoder_s *pbi = NULL; + int i; + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL || + get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_TXL || + get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) { + pr_info("vp9 unsupported on cpu 0x%x\n", get_cpu_major_id()); + return -EINVAL; + } + pr_debug("%s\n", __func__); + + if (pdata == NULL) { + pr_info("\nammvdec_vp9 memory resource undefined.\n"); + return -EFAULT; + } + /*pbi = (struct VP9Decoder_s *)devm_kzalloc(&pdev->dev, + sizeof(struct VP9Decoder_s), GFP_KERNEL);*/ + memset(&vf_dp, 0, sizeof(struct vframe_master_display_colour_s)); + pbi = vmalloc(sizeof(struct VP9Decoder_s)); + if (pbi == NULL) { + pr_info("\nammvdec_vp9 device data allocation failed\n"); + return -ENOMEM; + } + memset(pbi, 0, sizeof(struct VP9Decoder_s)); + + /* the ctx from v4l2 driver. */ + pbi->v4l2_ctx = pdata->private; + + pdata->private = pbi; + pdata->dec_status = vvp9_dec_status; + /* pdata->set_trickmode = set_trickmode; */ + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = vp9_irq_cb; + pdata->threaded_irq_handler = vp9_threaded_irq_cb; + pdata->dump_state = vp9_dump_state; + + pbi->index = pdev->id; + + if (is_rdma_enable()) { + pbi->rdma_adr = dma_alloc_coherent(amports_get_dma_device(), RDMA_SIZE, &pbi->rdma_phy_adr, GFP_KERNEL); + for (i = 0; i < SCALELUT_DATA_WRITE_NUM; i++) { + pbi->rdma_adr[i * 4] = HEVC_IQIT_SCALELUT_WR_ADDR & 0xfff; + pbi->rdma_adr[i * 4 + 1] = i; + pbi->rdma_adr[i * 4 + 2] = HEVC_IQIT_SCALELUT_DATA & 0xfff; + pbi->rdma_adr[i * 4 + 3] = 0; + if (i == SCALELUT_DATA_WRITE_NUM - 1) { + pbi->rdma_adr[i * 4 + 2] = (HEVC_IQIT_SCALELUT_DATA & 0xfff) | 0x20000; + } + } + } + + snprintf(pbi->trace.vdec_name, sizeof(pbi->trace.vdec_name), + "vp9-%d", pbi->index); + snprintf(pbi->trace.pts_name, sizeof(pbi->trace.pts_name), + "%s-pts", pbi->trace.vdec_name); + snprintf(pbi->trace.new_q_name, sizeof(pbi->trace.new_q_name), + "%s-newframe_q", pbi->trace.vdec_name); + snprintf(pbi->trace.disp_q_name, sizeof(pbi->trace.disp_q_name), + "%s-dispframe_q", pbi->trace.vdec_name); + snprintf(pbi->trace.decode_time_name, sizeof(pbi->trace.decode_time_name), + "decoder_time%d", pdev->id); + snprintf(pbi->trace.decode_run_time_name, sizeof(pbi->trace.decode_run_time_name), + "decoder_run_time%d", pdev->id); + snprintf(pbi->trace.decode_header_memory_time_name, sizeof(pbi->trace.decode_header_memory_time_name), + "decoder_header_time%d", pdev->id); + + if (pdata->use_vfm_path) + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + MULTI_INSTANCE_PROVIDER_NAME ".%02x", pdev->id & 0xff); + + pbi->provider_name = pdata->vf_provider_name; + platform_set_drvdata(pdev, pdata); + + pbi->platform_dev = pdev; + pbi->video_signal_type = 0; + pbi->m_ins_flag = 1; + if (!vdec_is_support_4k()) { + pbi->max_pic_w = 1920; + pbi->max_pic_h = 1088; + } else if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SM1) { + pbi->max_pic_w = 4096; + pbi->max_pic_h = 2304; + } else { + pbi->max_pic_w = 8192; + pbi->max_pic_h = 4608; + } + + if ((debug & IGNORE_PARAM_FROM_CONFIG) == 0 && + pdata->config_len) { +#ifdef MULTI_INSTANCE_SUPPORT + int vp9_buf_width = 0; + int vp9_buf_height = 0; + /*use ptr config for doubel_write_mode, etc*/ + vp9_print(pbi, 0, "pdata->config=%s\n", pdata->config); + if (get_config_int(pdata->config, "vp9_double_write_mode", + &config_val) == 0) + pbi->double_write_mode = config_val; + else + pbi->double_write_mode = double_write_mode; + + if (get_config_int(pdata->config, "save_buffer_mode", + &config_val) == 0) + pbi->save_buffer_mode = config_val; + else + pbi->save_buffer_mode = 0; + if (get_config_int(pdata->config, "vp9_buf_width", + &config_val) == 0) { + vp9_buf_width = config_val; + } + if (get_config_int(pdata->config, "vp9_buf_height", + &config_val) == 0) { + vp9_buf_height = config_val; + } + + if (get_config_int(pdata->config, "no_head", + &config_val) == 0) + pbi->no_head = config_val; + else + pbi->no_head = no_head; + + /*use ptr config for max_pic_w, etc*/ + if (get_config_int(pdata->config, "vp9_max_pic_w", + &config_val) == 0) { + pbi->max_pic_w = config_val; + } + if (get_config_int(pdata->config, "vp9_max_pic_h", + &config_val) == 0) { + pbi->max_pic_h = config_val; + } + + if ((pbi->max_pic_w * pbi->max_pic_h) + < (vp9_buf_width * vp9_buf_height)) { + pbi->max_pic_w = vp9_buf_width; + pbi->max_pic_h = vp9_buf_height; + vp9_print(pbi, 0, "use buf resolution\n"); + } + + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + pbi->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + pbi->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + pbi->is_used_v4l = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_buffer_margin", + &config_val) == 0) + pbi->dynamic_buf_num_margin = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + pbi->mem_map_mode = config_val; + + if (get_config_int(pdata->config, + "parm_enable_fence", + &config_val) == 0) + pbi->enable_fence = config_val; + + if (get_config_int(pdata->config, + "parm_fence_usage", + &config_val) == 0) + pbi->fence_usage = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_low_latency_mode", + &config_val) == 0) + pbi->low_latency_flag = config_val; +#endif + if (get_config_int(pdata->config, "HDRStaticInfo", + &vf_dp.present_flag) == 0 + && vf_dp.present_flag == 1) { + get_config_int(pdata->config, "signal_type", + &pbi->video_signal_type); + get_config_int(pdata->config, "mG.x", + &vf_dp.primaries[0][0]); + get_config_int(pdata->config, "mG.y", + &vf_dp.primaries[0][1]); + get_config_int(pdata->config, "mB.x", + &vf_dp.primaries[1][0]); + get_config_int(pdata->config, "mB.y", + &vf_dp.primaries[1][1]); + get_config_int(pdata->config, "mR.x", + &vf_dp.primaries[2][0]); + get_config_int(pdata->config, "mR.y", + &vf_dp.primaries[2][1]); + get_config_int(pdata->config, "mW.x", + &vf_dp.white_point[0]); + get_config_int(pdata->config, "mW.y", + &vf_dp.white_point[1]); + get_config_int(pdata->config, "mMaxDL", + &vf_dp.luminance[0]); + get_config_int(pdata->config, "mMinDL", + &vf_dp.luminance[1]); + vf_dp.content_light_level.present_flag = 1; + get_config_int(pdata->config, "mMaxCLL", + &content_light_level.max_content); + get_config_int(pdata->config, "mMaxFALL", + &content_light_level.max_pic_average); + + get_config_int(pdata->config, "mTransfer", + &transfer_val); + + if (transfer_val == 0) + transfer_val = 16; + + vp9_print(pbi, 0, "transfer_val=%d\n",transfer_val); + + vf_dp.content_light_level = content_light_level; + if (!pbi->video_signal_type) { + pbi->video_signal_type = (1 << 29) + | (5 << 26) /* unspecified */ + | (0 << 25) /* limit */ + | (1 << 24) /* color available */ + | (9 << 16) /* 2020 */ + | (transfer_val << 8) /* 2084 */ + | (9 << 0); /* 2020 */ + } + } + pbi->vf_dp = vf_dp; + } else { + if (pdata->sys_info) { + pbi->vvp9_amstream_dec_info = *pdata->sys_info; + if ((pbi->vvp9_amstream_dec_info.width != 0) && + (pbi->vvp9_amstream_dec_info.height != 0)) { + pbi->max_pic_w = pbi->vvp9_amstream_dec_info.width; + pbi->max_pic_h = pbi->vvp9_amstream_dec_info.height; + } + } + /*pbi->vvp9_amstream_dec_info.width = 0; + pbi->vvp9_amstream_dec_info.height = 0; + pbi->vvp9_amstream_dec_info.rate = 30;*/ + pbi->double_write_mode = double_write_mode; + } + + if (!pbi->is_used_v4l) { + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vvp9_vf_provider, pbi); + } + + if (no_head & 0x10) { + pbi->no_head = (no_head & 0xf); + } + + pbi->endian = HEVC_CONFIG_LITTLE_ENDIAN; + if (!pbi->is_used_v4l) { + pbi->mem_map_mode = mem_map_mode; + if (is_support_vdec_canvas()) + pbi->endian = HEVC_CONFIG_BIG_ENDIAN; + } + if (endian) + pbi->endian = endian; + + if (pbi->is_used_v4l) + pbi->run_ready_min_buf_num = run_ready_min_buf_num - 1 ; + else + pbi->run_ready_min_buf_num = run_ready_min_buf_num; + + if (is_oversize(pbi->max_pic_w, pbi->max_pic_h)) { + pr_err("over size: %dx%d, probe failed\n", + pbi->max_pic_w, pbi->max_pic_h); + return -1; + } + + if (force_config_fence) { + pbi->enable_fence = true; + pbi->fence_usage = + (force_config_fence >> 4) & 0xf; + if (force_config_fence & 0x2) + pbi->enable_fence = false; + vp9_print(pbi, 0, "enable fence: %d, fence usage: %d\n", + pbi->enable_fence, pbi->fence_usage); + } + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL || + pbi->double_write_mode == 0x10) + pbi->mmu_enable = 0; + else + pbi->mmu_enable = 1; + +#ifdef VP9_10B_MMU_DW + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + pbi->dw_mmu_enable = + (get_double_write_mode(pbi) & 0x20) ? 1 : 0; + } else { + pbi->dw_mmu_enable = 0; + } +#endif + video_signal_type = pbi->video_signal_type; + + if (pdata->sys_info) { + pbi->vvp9_amstream_dec_info = *pdata->sys_info; + } else { + pbi->vvp9_amstream_dec_info.width = 0; + pbi->vvp9_amstream_dec_info.height = 0; + pbi->vvp9_amstream_dec_info.rate = 30; + } + + pbi->low_latency_flag = 1; + vp9_print(pbi, 0, + "no_head %d low_latency %d, signal_type 0x%x\n", + pbi->no_head, pbi->low_latency_flag, pbi->video_signal_type); +#if 0 + pbi->buf_start = pdata->mem_start; + pbi->buf_size = pdata->mem_end - pdata->mem_start + 1; +#else + if (amvdec_vp9_mmu_init(pbi) < 0) { + pr_err("vp9 alloc bmmu box failed!!\n"); + /* devm_kfree(&pdev->dev, (void *)pbi); */ + vfree((void *)pbi); + pdata->dec_status = NULL; + return -1; + } + + pbi->cma_alloc_count = PAGE_ALIGN(work_buf_size) / PAGE_SIZE; + ret = decoder_bmmu_box_alloc_buf_phy(pbi->bmmu_box, WORK_SPACE_BUF_ID, + pbi->cma_alloc_count * PAGE_SIZE, DRIVER_NAME, + &pbi->cma_alloc_addr); + if (ret < 0) { + uninit_mmu_buffers(pbi); + /* devm_kfree(&pdev->dev, (void *)pbi); */ + vfree((void *)pbi); + pdata->dec_status = NULL; + return ret; + } + pbi->buf_start = pbi->cma_alloc_addr; + pbi->buf_size = work_buf_size; +#endif + + pbi->init_flag = 0; + pbi->first_sc_checked = 0; + pbi->fatal_error = 0; + pbi->show_frame_num = 0; + + if (debug) { + pr_info("===VP9 decoder mem resource 0x%lx size 0x%x\n", + pbi->buf_start, + pbi->buf_size); + } + + pbi->cma_dev = pdata->cma_dev; + + mutex_init(&pbi->fence_mutex); + + if (pbi->enable_fence) { + pdata->sync = vdec_sync_get(); + if (!pdata->sync) { + vp9_print(pbi, 0, "alloc fence timeline error\n"); + vp9_local_uninit(pbi); + uninit_mmu_buffers(pbi); + /* devm_kfree(&pdev->dev, (void *)pbi); */ + vfree((void *)pbi); + pdata->dec_status = NULL; + return -1; + } + pdata->sync->usage = pbi->fence_usage; + vdec_timeline_create(pdata->sync, DRIVER_NAME); + } + + if (vvp9_init(pdata) < 0) { + pr_info("\namvdec_vp9 init failed.\n"); + vdec_timeline_put(pdata->sync); + vp9_local_uninit(pbi); + uninit_mmu_buffers(pbi); + /* devm_kfree(&pdev->dev, (void *)pbi); */ + vfree((void *)pbi); + pdata->dec_status = NULL; + return -ENODEV; + } + vdec_set_prepare_level(pdata, start_decode_buf_level); + hevc_source_changed(VFORMAT_VP9, + 4096, 2048, 60); +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) + vdec_core_request(pdata, + CORE_MASK_HEVC_FRONT | CORE_MASK_HEVC_BACK); + else + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_HEVC_FRONT | CORE_MASK_HEVC_BACK + | CORE_MASK_COMBINE); +#else + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_HEVC); + else + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); +#endif + pbi->pic_list_init_done2 = true; + + return 0; +} + +static int ammvdec_vp9_remove(struct platform_device *pdev) +{ + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec = hw_to_vdec(pbi); + int i; + if (debug) + pr_info("amvdec_vp9_remove\n"); + + vmvp9_stop(pbi); + +#ifdef SUPPORT_FB_DECODING + vdec_core_release(hw_to_vdec(pbi), CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_HEVC_FRONT | CORE_MASK_HEVC_BACK + ); +#else + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(pbi), CORE_MASK_HEVC); + else + vdec_core_release(hw_to_vdec(pbi), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); +#endif + vdec_set_status(hw_to_vdec(pbi), VDEC_STATUS_DISCONNECTED); + + if (vdec->parallel_dec == 1) { + for (i = 0; i < FRAME_BUFFERS; i++) { + vdec->free_canvas_ex + (pbi->common.buffer_pool->frame_bufs[i].buf.y_canvas_index, + vdec->id); + vdec->free_canvas_ex + (pbi->common.buffer_pool->frame_bufs[i].buf.uv_canvas_index, + vdec->id); + } + } + + if (pbi->enable_fence) + vdec_fence_release(pbi, vdec->sync); + +#ifdef DEBUG_PTS + pr_info("pts missed %ld, pts hit %ld, duration %d\n", + pbi->pts_missed, pbi->pts_hit, pbi->frame_dur); +#endif + mem_map_mode = 0; + + /* devm_kfree(&pdev->dev, (void *)pbi); */ + if (is_rdma_enable()) + dma_free_coherent(amports_get_dma_device(), RDMA_SIZE, pbi->rdma_adr, pbi->rdma_phy_adr); + vfree((void *)pbi); + return 0; +} + +static struct platform_driver ammvdec_vp9_driver = { + .probe = ammvdec_vp9_probe, + .remove = ammvdec_vp9_remove, +#ifdef CONFIG_PM + .suspend = amhevc_suspend, + .resume = amhevc_resume, +#endif + .driver = { + .name = MULTI_DRIVER_NAME, + } +}; +#endif +static struct mconfig vp9_configs[] = { + MC_PU32("bit_depth_luma", &bit_depth_luma), + MC_PU32("bit_depth_chroma", &bit_depth_chroma), + MC_PU32("frame_width", &frame_width), + MC_PU32("frame_height", &frame_height), + MC_PU32("debug", &debug), + MC_PU32("radr", &radr), + MC_PU32("rval", &rval), + MC_PU32("pop_shorts", &pop_shorts), + MC_PU32("dbg_cmd", &dbg_cmd), + MC_PU32("dbg_skip_decode_index", &dbg_skip_decode_index), + MC_PU32("endian", &endian), + MC_PU32("step", &step), + MC_PU32("udebug_flag", &udebug_flag), + MC_PU32("decode_pic_begin", &decode_pic_begin), + MC_PU32("slice_parse_begin", &slice_parse_begin), + MC_PU32("i_only_flag", &i_only_flag), + MC_PU32("error_handle_policy", &error_handle_policy), + MC_PU32("buf_alloc_depth", &buf_alloc_depth), + MC_PU32("buf_alloc_size", &buf_alloc_size), + MC_PU32("buffer_mode", &buffer_mode), + MC_PU32("buffer_mode_dbg", &buffer_mode_dbg), + MC_PU32("max_buf_num", &max_buf_num), + MC_PU32("dynamic_buf_num_margin", &dynamic_buf_num_margin), + MC_PU32("mem_map_mode", &mem_map_mode), + MC_PU32("double_write_mode", &double_write_mode), + MC_PU32("enable_mem_saving", &enable_mem_saving), + MC_PU32("force_w_h", &force_w_h), + MC_PU32("force_fps", &force_fps), + MC_PU32("max_decoding_time", &max_decoding_time), + MC_PU32("on_no_keyframe_skiped", &on_no_keyframe_skiped), + MC_PU32("start_decode_buf_level", &start_decode_buf_level), + MC_PU32("decode_timeout_val", &decode_timeout_val), + MC_PU32("vp9_max_pic_w", &vp9_max_pic_w), + MC_PU32("vp9_max_pic_h", &vp9_max_pic_h), +}; +static struct mconfig_node vp9_node; + +static int __init amvdec_vp9_driver_init_module(void) +{ + + struct BuffInfo_s *p_buf_info; + + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + p_buf_info = &amvvp9_workbuff_spec[2]; + else + p_buf_info = &amvvp9_workbuff_spec[1]; + } else + p_buf_info = &amvvp9_workbuff_spec[0]; + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) + p_buf_info = &amvvp9_workbuff_spec[5]; + else + p_buf_info = &amvvp9_workbuff_spec[4]; + } + + init_buff_spec(NULL, p_buf_info); + work_buf_size = + (p_buf_info->end_adr - p_buf_info->start_adr + + 0xffff) & (~0xffff); + + pr_debug("amvdec_vp9 module init\n"); + + error_handle_policy = 0; + +#ifdef ERROR_HANDLE_DEBUG + dbg_nal_skip_flag = 0; + dbg_nal_skip_count = 0; +#endif + udebug_flag = 0; + decode_pic_begin = 0; + slice_parse_begin = 0; + step = 0; + buf_alloc_size = 0; +#ifdef MULTI_INSTANCE_SUPPORT + if (platform_driver_register(&ammvdec_vp9_driver)) + pr_err("failed to register ammvdec_vp9 driver\n"); + +#endif + if (platform_driver_register(&amvdec_vp9_driver)) { + pr_err("failed to register amvdec_vp9 driver\n"); + return -ENODEV; + } + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL || + get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_TXL || + get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) { + amvdec_vp9_profile.name = "vp9_unsupport"; + } else if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D)) { + amvdec_vp9_profile.profile = + "8k, 10bit, dwrite, compressed, fence, v4l-uvm"; + } else { + if (vdec_is_support_4k()) + amvdec_vp9_profile.profile = + "4k, 10bit, dwrite, compressed, fence, v4l-uvm"; + else + amvdec_vp9_profile.profile = + "10bit, dwrite, compressed, fence, v4l-uvm"; + } + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + max_buf_num = MAX_BUF_NUM_LESS; + + vcodec_profile_register(&amvdec_vp9_profile); + amvdec_vp9_profile_mult = amvdec_vp9_profile; + amvdec_vp9_profile_mult.name = "mvp9"; + vcodec_profile_register(&amvdec_vp9_profile_mult); + INIT_REG_NODE_CONFIGS("media.decoder", &vp9_node, + "vp9", vp9_configs, CONFIG_FOR_RW); + vcodec_feature_register(VFORMAT_VP9, 0); + + return 0; +} + +static void __exit amvdec_vp9_driver_remove_module(void) +{ + pr_debug("amvdec_vp9 module remove.\n"); +#ifdef MULTI_INSTANCE_SUPPORT + platform_driver_unregister(&ammvdec_vp9_driver); +#endif + platform_driver_unregister(&amvdec_vp9_driver); +} + +/****************************************/ + +module_param(bit_depth_luma, uint, 0664); +MODULE_PARM_DESC(bit_depth_luma, "\n amvdec_vp9 bit_depth_luma\n"); + +module_param(bit_depth_chroma, uint, 0664); +MODULE_PARM_DESC(bit_depth_chroma, "\n amvdec_vp9 bit_depth_chroma\n"); + +module_param(frame_width, uint, 0664); +MODULE_PARM_DESC(frame_width, "\n amvdec_vp9 frame_width\n"); + +module_param(frame_height, uint, 0664); +MODULE_PARM_DESC(frame_height, "\n amvdec_vp9 frame_height\n"); + +module_param(debug, uint, 0664); +MODULE_PARM_DESC(debug, "\n amvdec_vp9 debug\n"); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\n radr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\n rval\n"); + +module_param(pop_shorts, uint, 0664); +MODULE_PARM_DESC(pop_shorts, "\n rval\n"); + +module_param(dbg_cmd, uint, 0664); +MODULE_PARM_DESC(dbg_cmd, "\n dbg_cmd\n"); + +module_param(dbg_skip_decode_index, uint, 0664); +MODULE_PARM_DESC(dbg_skip_decode_index, "\n dbg_skip_decode_index\n"); + +module_param(endian, uint, 0664); +MODULE_PARM_DESC(endian, "\n rval\n"); + +module_param(step, uint, 0664); +MODULE_PARM_DESC(step, "\n amvdec_vp9 step\n"); + +module_param(decode_pic_begin, uint, 0664); +MODULE_PARM_DESC(decode_pic_begin, "\n amvdec_vp9 decode_pic_begin\n"); + +module_param(slice_parse_begin, uint, 0664); +MODULE_PARM_DESC(slice_parse_begin, "\n amvdec_vp9 slice_parse_begin\n"); + +module_param(i_only_flag, uint, 0664); +MODULE_PARM_DESC(i_only_flag, "\n amvdec_vp9 i_only_flag\n"); + +module_param(low_latency_flag, uint, 0664); +MODULE_PARM_DESC(low_latency_flag, "\n amvdec_vp9 low_latency_flag\n"); + +module_param(no_head, uint, 0664); +MODULE_PARM_DESC(no_head, "\n amvdec_vp9 no_head\n"); + +module_param(error_handle_policy, uint, 0664); +MODULE_PARM_DESC(error_handle_policy, "\n amvdec_vp9 error_handle_policy\n"); + +module_param(buf_alloc_depth, uint, 0664); +MODULE_PARM_DESC(buf_alloc_depth, "\n buf_alloc_depth\n"); + +module_param(buf_alloc_size, uint, 0664); +MODULE_PARM_DESC(buf_alloc_size, "\n buf_alloc_size\n"); + +module_param(buffer_mode, uint, 0664); +MODULE_PARM_DESC(buffer_mode, "\n buffer_mode\n"); + +module_param(buffer_mode_dbg, uint, 0664); +MODULE_PARM_DESC(buffer_mode_dbg, "\n buffer_mode_dbg\n"); +/*USE_BUF_BLOCK*/ +module_param(max_buf_num, uint, 0664); +MODULE_PARM_DESC(max_buf_num, "\n max_buf_num\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n dynamic_buf_num_margin\n"); + +module_param(mv_buf_margin, uint, 0664); +MODULE_PARM_DESC(mv_buf_margin, "\n mv_buf_margin\n"); + +module_param(mv_buf_dynamic_alloc, uint, 0664); +MODULE_PARM_DESC(mv_buf_dynamic_alloc, "\n mv_buf_dynamic_alloc\n"); + +module_param(run_ready_min_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_min_buf_num, "\n run_ready_min_buf_num\n"); + +/**/ + +module_param(mem_map_mode, uint, 0664); +MODULE_PARM_DESC(mem_map_mode, "\n mem_map_mode\n"); + +#ifdef SUPPORT_10BIT +module_param(double_write_mode, uint, 0664); +MODULE_PARM_DESC(double_write_mode, "\n double_write_mode\n"); + +module_param(enable_mem_saving, uint, 0664); +MODULE_PARM_DESC(enable_mem_saving, "\n enable_mem_saving\n"); + +module_param(force_w_h, uint, 0664); +MODULE_PARM_DESC(force_w_h, "\n force_w_h\n"); +#endif + +module_param(force_fps, uint, 0664); +MODULE_PARM_DESC(force_fps, "\n force_fps\n"); + +module_param(max_decoding_time, uint, 0664); +MODULE_PARM_DESC(max_decoding_time, "\n max_decoding_time\n"); + +module_param(on_no_keyframe_skiped, uint, 0664); +MODULE_PARM_DESC(on_no_keyframe_skiped, "\n on_no_keyframe_skiped\n"); + +module_param(mcrcc_cache_alg_flag, uint, 0664); +MODULE_PARM_DESC(mcrcc_cache_alg_flag, "\n mcrcc_cache_alg_flag\n"); + +#ifdef MULTI_INSTANCE_SUPPORT +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n vp9 start_decode_buf_level\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, + "\n vp9 decode_timeout_val\n"); + +module_param(vp9_max_pic_w, uint, 0664); +MODULE_PARM_DESC(vp9_max_pic_w, "\n vp9_max_pic_w\n"); + +module_param(vp9_max_pic_h, uint, 0664); +MODULE_PARM_DESC(vp9_max_pic_h, "\n vp9_max_pic_h\n"); + +module_param_array(decode_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(display_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(max_process_time, uint, + &max_decode_instance_num, 0664); + +module_param_array(run_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(input_empty, uint, + &max_decode_instance_num, 0664); + +module_param_array(not_run_ready, uint, + &max_decode_instance_num, 0664); +#endif + +#ifdef SUPPORT_FB_DECODING +module_param_array(not_run2_ready, uint, + &max_decode_instance_num, 0664); + +module_param_array(run2_count, uint, + &max_decode_instance_num, 0664); + +module_param(stage_buf_num, uint, 0664); +MODULE_PARM_DESC(stage_buf_num, "\n amvdec_h265 stage_buf_num\n"); +#endif +module_param(force_bufspec, uint, 0664); +MODULE_PARM_DESC(force_bufspec, "\n amvdec_h265 force_bufspec\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_h265 udebug_flag\n"); + +module_param(udebug_pause_pos, uint, 0664); +MODULE_PARM_DESC(udebug_pause_pos, "\n udebug_pause_pos\n"); + +module_param(udebug_pause_val, uint, 0664); +MODULE_PARM_DESC(udebug_pause_val, "\n udebug_pause_val\n"); + +module_param(udebug_pause_decode_idx, uint, 0664); +MODULE_PARM_DESC(udebug_pause_decode_idx, "\n udebug_pause_decode_idx\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n without_display_mode\n"); + +module_param(force_config_fence, uint, 0664); +MODULE_PARM_DESC(force_config_fence, "\n force enable fence\n"); + +module_param(force_pts_unstable, uint, 0664); +MODULE_PARM_DESC(force_pts_unstable, "\n force_pts_unstable\n"); + +module_param(v4l_bitstream_id_enable, uint, 0664); +MODULE_PARM_DESC(v4l_bitstream_id_enable, "\n v4l_bitstream_id_enable\n"); + +module_init(amvdec_vp9_driver_init_module); +module_exit(amvdec_vp9_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC vp9 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +
diff --git a/drivers/frame_provider/decoder/vp9/vvp9.h b/drivers/frame_provider/decoder/vp9/vvp9.h new file mode 100644 index 0000000..1db9d09 --- /dev/null +++ b/drivers/frame_provider/decoder/vp9/vvp9.h
@@ -0,0 +1,23 @@ +/* + * drivers/amlogic/amports/vvp9.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VVP9_H +#define VVP9_H + +void adapt_coef_probs(int pic_count, int prev_kf, int cur_kf, int pre_fc, +unsigned int *prev_prob, unsigned int *cur_prob, unsigned int *count); +#endif
diff --git a/drivers/frame_provider/decoder_v4l/Makefile b/drivers/frame_provider/decoder_v4l/Makefile new file mode 100644 index 0000000..43e94df --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/Makefile
@@ -0,0 +1,9 @@ +obj-y += mpeg12/ +obj-y += mpeg4/ +obj-y += h264_multi/ +obj-y += h265/ +obj-y += vp9/ +obj-y += mjpeg/ +obj-y += avs2/ +obj-y += avs_multi/ +obj-y += vav1/
diff --git a/drivers/frame_provider/decoder_v4l/avs2/Makefile b/drivers/frame_provider/decoder_v4l/avs2/Makefile new file mode 100644 index 0000000..9995065 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/avs2/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_AVS2) += amvdec_avs2_v4l.o +amvdec_avs2_v4l-objs += vavs2.o avs2_bufmgr.o
diff --git a/drivers/frame_provider/decoder_v4l/avs2/avs2_bufmgr.c b/drivers/frame_provider/decoder_v4l/avs2/avs2_bufmgr.c new file mode 100644 index 0000000..91c3132 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/avs2/avs2_bufmgr.c
@@ -0,0 +1,2205 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/slab.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/sched/clock.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../../decoder/utils/decoder_mmu_box.h" +#include "../../decoder/utils/decoder_bmmu_box.h" +#include "avs2_global.h" + +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../decoder/utils/vdec.h" +#include "../../decoder/utils/amvdec.h" + +#undef pr_info +#define pr_info printk + +#define assert(chk_cond) {\ + if (!(chk_cond))\ + pr_info("error line %d\n", __LINE__);\ + while (!(chk_cond))\ + ;\ +} + +int16_t get_param(uint16_t value, int8_t *print_info) +{ + if (is_avs2_print_param()) + pr_info("%s = %x\n", print_info, value); + return (int16_t)value; +} + +void readAlfCoeff(struct avs2_decoder *avs2_dec, struct ALFParam_s *Alfp) +{ + int32_t pos; + union param_u *rpm_param = &avs2_dec->param; + + int32_t f = 0, symbol, pre_symbole; + const int32_t numCoeff = (int32_t)ALF_MAX_NUM_COEF; + + switch (Alfp->componentID) { + case ALF_Cb: + case ALF_Cr: { + for (pos = 0; pos < numCoeff; pos++) { + if (Alfp->componentID == ALF_Cb) + Alfp->coeffmulti[0][pos] = + get_param( + rpm_param->alf.alf_cb_coeffmulti[pos], + "Chroma ALF coefficients"); + else + Alfp->coeffmulti[0][pos] = + get_param( + rpm_param->alf.alf_cr_coeffmulti[pos], + "Chroma ALF coefficients"); +#if Check_Bitstream + if (pos <= 7) + assert(Alfp->coeffmulti[0][pos] >= -64 + && Alfp->coeffmulti[0][pos] <= 63); + if (pos == 8) + assert(Alfp->coeffmulti[0][pos] >= -1088 + && Alfp->coeffmulti[0][pos] <= 1071); +#endif + } + } + break; + case ALF_Y: { + int32_t region_distance_idx = 0; + Alfp->filters_per_group = + get_param(rpm_param->alf.alf_filters_num_m_1, + "ALF_filter_number_minus_1"); +#if Check_Bitstream + assert(Alfp->filters_per_group >= 0 + && Alfp->filters_per_group <= 15); +#endif + Alfp->filters_per_group = Alfp->filters_per_group + 1; + + memset(Alfp->filterPattern, 0, NO_VAR_BINS * sizeof(int32_t)); + pre_symbole = 0; + symbol = 0; + for (f = 0; f < Alfp->filters_per_group; f++) { + if (f > 0) { + if (Alfp->filters_per_group != 16) { + symbol = + get_param(rpm_param->alf.region_distance + [region_distance_idx++], + "Region distance"); + } else { + symbol = 1; + } + Alfp->filterPattern[symbol + pre_symbole] = 1; + pre_symbole = symbol + pre_symbole; + } + + for (pos = 0; pos < numCoeff; pos++) { + Alfp->coeffmulti[f][pos] = + get_param( + rpm_param->alf.alf_y_coeffmulti[f][pos], + "Luma ALF coefficients"); +#if Check_Bitstream + if (pos <= 7) + assert( + Alfp->coeffmulti[f][pos] + >= -64 && + Alfp->coeffmulti[f][pos] + <= 63); + if (pos == 8) + assert( + Alfp->coeffmulti[f][pos] + >= -1088 && + Alfp->coeffmulti[f][pos] + <= 1071); +#endif + + } + } + +#if Check_Bitstream + assert(pre_symbole >= 0 && pre_symbole <= 15); + +#endif + } + break; + default: { + pr_info("Not a legal component ID\n"); + assert(0); + return; /* exit(-1);*/ + } + } +} + +void Read_ALF_param(struct avs2_decoder *avs2_dec) +{ + struct inp_par *input = &avs2_dec->input; + struct ImageParameters_s *img = &avs2_dec->img; + union param_u *rpm_param = &avs2_dec->param; + int32_t compIdx; + if (input->alf_enable) { + img->pic_alf_on[0] = + get_param( + rpm_param->alf.picture_alf_enable_Y, + "alf_pic_flag_Y"); + img->pic_alf_on[1] = + get_param( + rpm_param->alf.picture_alf_enable_Cb, + "alf_pic_flag_Cb"); + img->pic_alf_on[2] = + get_param( + rpm_param->alf.picture_alf_enable_Cr, + "alf_pic_flag_Cr"); + + avs2_dec->m_alfPictureParam[ALF_Y].alf_flag + = img->pic_alf_on[ALF_Y]; + avs2_dec->m_alfPictureParam[ALF_Cb].alf_flag + = img->pic_alf_on[ALF_Cb]; + avs2_dec->m_alfPictureParam[ALF_Cr].alf_flag + = img->pic_alf_on[ALF_Cr]; + if (img->pic_alf_on[0] + || img->pic_alf_on[1] + || img->pic_alf_on[2]) { + for (compIdx = 0; + compIdx < NUM_ALF_COMPONENT; + compIdx++) { + if (img->pic_alf_on[compIdx]) { + readAlfCoeff( + avs2_dec, + &avs2_dec->m_alfPictureParam[compIdx]); + } + } + } + } + +} + +void Get_SequenceHeader(struct avs2_decoder *avs2_dec) +{ + struct inp_par *input = &avs2_dec->input; + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + union param_u *rpm_param = &avs2_dec->param; + /*int32_t i, j;*/ + + /*fpr_info(stdout, "Sequence Header\n");*/ + /*memcpy(currStream->streamBuffer, buf, length);*/ + /*currStream->code_len = currStream->bitstream_length = length;*/ + /*currStream->read_len = currStream->frame_bitoffset = (startcodepos + + 1) * 8;*/ + + input->profile_id = + get_param(rpm_param->p.profile_id, "profile_id"); + input->level_id = + get_param(rpm_param->p.level_id, "level_id"); + hd->progressive_sequence = + get_param( + rpm_param->p.progressive_sequence, + "progressive_sequence"); +#if INTERLACE_CODING + hd->is_field_sequence = + get_param( + rpm_param->p.is_field_sequence, + "field_coded_sequence"); +#endif +#if HALF_PIXEL_COMPENSATION || HALF_PIXEL_CHROMA + img->is_field_sequence = hd->is_field_sequence; +#endif + hd->horizontal_size = + get_param(rpm_param->p.horizontal_size, "horizontal_size"); + hd->vertical_size = + get_param(rpm_param->p.vertical_size, "vertical_size"); + input->chroma_format = + get_param(rpm_param->p.chroma_format, "chroma_format"); + input->output_bit_depth = 8; + input->sample_bit_depth = 8; + hd->sample_precision = 1; + if (input->profile_id == BASELINE10_PROFILE) { /* 10bit profile (0x52)*/ + input->output_bit_depth = + get_param(rpm_param->p.sample_precision, + "sample_precision"); + input->output_bit_depth = + 6 + (input->output_bit_depth) * 2; + input->sample_bit_depth = + get_param(rpm_param->p.encoding_precision, + "encoding_precision"); + input->sample_bit_depth = + 6 + (input->sample_bit_depth) * 2; + } else { /* other profile*/ + hd->sample_precision = + get_param(rpm_param->p.sample_precision, + "sample_precision"); + } + hd->aspect_ratio_information = + get_param(rpm_param->p.aspect_ratio_information, + "aspect_ratio_information"); + hd->frame_rate_code = + get_param(rpm_param->p.frame_rate_code, "frame_rate_code"); + + hd->bit_rate_lower = + get_param(rpm_param->p.bit_rate_lower, "bit_rate_lower"); + /*hd->marker_bit = get_param(rpm_param->p.marker_bit, + * "marker bit");*/ + /*CHECKMARKERBIT*/ + hd->bit_rate_upper = + get_param(rpm_param->p.bit_rate_upper, "bit_rate_upper"); + hd->low_delay = + get_param(rpm_param->p.low_delay, "low_delay"); + /*hd->marker_bit = + get_param(rpm_param->p.marker_bit2, + "marker bit");*/ + /*CHECKMARKERBIT*/ +#if M3480_TEMPORAL_SCALABLE + hd->temporal_id_exist_flag = + get_param(rpm_param->p.temporal_id_exist_flag, + "temporal_id exist flag"); /*get + Extention Flag*/ +#endif + /*u_v(18, "bbv buffer size");*/ + input->g_uiMaxSizeInBit = + get_param(rpm_param->p.g_uiMaxSizeInBit, + "Largest Coding Block Size"); + + + /*hd->background_picture_enable = 0x01 ^ + (get_param(rpm_param->p.avs2_seq_flags, + "background_picture_disable") + >> BACKGROUND_PICTURE_DISABLE_BIT) & 0x1;*/ + /*rain???*/ + hd->background_picture_enable = 0x01 ^ + ((get_param(rpm_param->p.avs2_seq_flags, + "background_picture_disable") + >> BACKGROUND_PICTURE_DISABLE_BIT) & 0x1); + + + hd->b_dmh_enabled = 1; + + hd->b_mhpskip_enabled = + get_param(rpm_param->p.avs2_seq_flags >> B_MHPSKIP_ENABLED_BIT, + "mhpskip enabled") & 0x1; + hd->dhp_enabled = + get_param(rpm_param->p.avs2_seq_flags >> DHP_ENABLED_BIT, + "dhp enabled") & 0x1; + hd->wsm_enabled = + get_param(rpm_param->p.avs2_seq_flags >> WSM_ENABLED_BIT, + "wsm enabled") & 0x1; + + img->inter_amp_enable = + get_param(rpm_param->p.avs2_seq_flags >> INTER_AMP_ENABLE_BIT, + "Asymmetric Motion Partitions") & 0x1; + input->useNSQT = + get_param(rpm_param->p.avs2_seq_flags >> USENSQT_BIT, + "useNSQT") & 0x1; + input->useSDIP = + get_param(rpm_param->p.avs2_seq_flags >> USESDIP_BIT, + "useNSIP") & 0x1; + + hd->b_secT_enabled = + get_param(rpm_param->p.avs2_seq_flags >> B_SECT_ENABLED_BIT, + "secT enabled") & 0x1; + + input->sao_enable = + get_param(rpm_param->p.avs2_seq_flags >> SAO_ENABLE_BIT, + "SAO Enable Flag") & 0x1; + input->alf_enable = + get_param(rpm_param->p.avs2_seq_flags >> ALF_ENABLE_BIT, + "ALF Enable Flag") & 0x1; + hd->b_pmvr_enabled = + get_param(rpm_param->p.avs2_seq_flags >> B_PMVR_ENABLED_BIT, + "pmvr enabled") & 0x1; + + + hd->gop_size = get_param(rpm_param->p.num_of_RPS, + "num_of_RPS"); +#if Check_Bitstream + /*assert(hd->gop_size<=32);*/ +#endif + + if (hd->low_delay == 0) { + hd->picture_reorder_delay = + get_param(rpm_param->p.picture_reorder_delay, + "picture_reorder_delay"); + } + + input->crossSliceLoopFilter = + get_param(rpm_param->p.avs2_seq_flags + >> CROSSSLICELOOPFILTER_BIT, + "Cross Loop Filter Flag") & 0x1; + +#if BCBR + if ((input->profile_id == SCENE_PROFILE || + input->profile_id == SCENE10_PROFILE) && + hd->background_picture_enable) { + hd->bcbr_enable = u_v(1, + "block_composed_background_picture_enable"); + u_v(1, "reserved bits"); + } else { + hd->bcbr_enable = 0; + u_v(2, "reserved bits"); + } +#else + /*u_v(2, "reserved bits");*/ +#endif + + img->width = hd->horizontal_size; + img->height = hd->vertical_size; + img->width_cr = (img->width >> 1); + + if (input->chroma_format == 1) { + img->height_cr + = (img->height >> 1); + } + + img->PicWidthInMbs = img->width / MIN_CU_SIZE; + img->PicHeightInMbs = img->height / MIN_CU_SIZE; + img->PicSizeInMbs = img->PicWidthInMbs * img->PicHeightInMbs; + img->buf_cycle = input->buf_cycle + 1; + img->max_mb_nr = (img->width * img->height) + / (MIN_CU_SIZE * MIN_CU_SIZE); + +#ifdef AML +avs2_dec->lcu_size = + get_param(rpm_param->p.lcu_size, "lcu_size"); +avs2_dec->lcu_size = 1<<(avs2_dec->lcu_size); +#endif +hc->seq_header++; +} + + +void Get_I_Picture_Header(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + union param_u *rpm_param = &avs2_dec->param; + +#if RD1501_FIX_BG /*//Longfei.Wang@mediatek.com*/ + hd->background_picture_flag = 0; + hd->background_picture_output_flag = 0; + img->typeb = 0; +#endif + + hd->time_code_flag = + get_param(rpm_param->p.time_code_flag, + "time_code_flag"); + + if (hd->time_code_flag) { + hd->time_code = + get_param(rpm_param->p.time_code, + "time_code"); + } + if (hd->background_picture_enable) { + hd->background_picture_flag = + get_param(rpm_param->p.background_picture_flag, + "background_picture_flag"); + + if (hd->background_picture_flag) { + img->typeb = + BACKGROUND_IMG; + } else { + img->typeb = 0; + } + + if (img->typeb == BACKGROUND_IMG) { + hd->background_picture_output_flag = + get_param( + rpm_param->p.background_picture_output_flag, + "background_picture_output_flag"); + } + } + + + { + img->coding_order = + get_param(rpm_param->p.coding_order, + "coding_order"); + + + +#if M3480_TEMPORAL_SCALABLE + if (hd->temporal_id_exist_flag == 1) { + hd->cur_layer = + get_param(rpm_param->p.cur_layer, + "temporal_id"); + } +#endif +#if RD1501_FIX_BG /*Longfei.Wang@mediatek.com*/ + if (hd->low_delay == 0 + && !(hd->background_picture_flag && + !hd->background_picture_output_flag)) { /*cdp*/ +#else + if (hd->low_delay == 0 && + !(hd->background_picture_enable && + !hd->background_picture_output_flag)) { /*cdp*/ +#endif + hd->displaydelay = + get_param(rpm_param->p.displaydelay, + "picture_output_delay"); + } + + } + { + int32_t RPS_idx;/* = (img->coding_order-1) % gop_size;*/ + int32_t predict; + int32_t j; + predict = + get_param(rpm_param->p.predict, + "use RCS in SPS"); + /*if (predict) {*/ + RPS_idx = + get_param(rpm_param->p.RPS_idx, + "predict for RCS"); + /* hd->curr_RPS = hd->decod_RPS[RPS_idx];*/ + /*} else {*/ + /*gop size16*/ + hd->curr_RPS.referd_by_others = + get_param(rpm_param->p.referd_by_others_cur, + "refered by others"); + hd->curr_RPS.num_of_ref = + get_param(rpm_param->p.num_of_ref_cur, + "num of reference picture"); + for (j = 0; j < hd->curr_RPS.num_of_ref; j++) { + hd->curr_RPS.ref_pic[j] = + get_param(rpm_param->p.ref_pic_cur[j], + "delta COI of ref pic"); + } + hd->curr_RPS.num_to_remove = + get_param(rpm_param->p.num_to_remove_cur, + "num of removed picture"); +#ifdef SANITY_CHECK + if (hd->curr_RPS.num_to_remove > MAXREF) { + hd->curr_RPS.num_to_remove = MAXREF; + pr_info("Warning, %s: num_to_remove %d beyond range, force to MAXREF\n", + __func__, hd->curr_RPS.num_to_remove); + } +#endif + + for (j = 0; j < hd->curr_RPS.num_to_remove; j++) { + hd->curr_RPS.remove_pic[j] = + get_param( + rpm_param->p.remove_pic_cur[j], + "delta COI of removed pic"); + } + /*u_v(1, "marker bit");*/ + + /*}*/ + } + /*xyji 12.23*/ + if (hd->low_delay) { + /*ue_v( + "bbv check times");*/ + } + + hd->progressive_frame = + get_param(rpm_param->p.progressive_frame, + "progressive_frame"); + + if (!hd->progressive_frame) { + img->picture_structure = + get_param(rpm_param->p.picture_structure, + "picture_structure"); + } else { + img->picture_structure + = 1; + } + + hd->top_field_first = + get_param(rpm_param->p.top_field_first, + "top_field_first"); + hd->repeat_first_field = + get_param(rpm_param->p.repeat_first_field, + "repeat_first_field"); +#if INTERLACE_CODING + if (hd->is_field_sequence) { + hd->is_top_field = + get_param(rpm_param->p.is_top_field, + "is_top_field"); +#if HALF_PIXEL_COMPENSATION || HALF_PIXEL_CHROMA + img->is_top_field = hd->is_top_field; +#endif + } +#endif + + + img->qp = hd->picture_qp; + + img->type = I_IMG; + +} + +/* + * Function:pb picture header + * Input: + * Output: + * Return: + * Attention: + */ + +void Get_PB_Picture_Header(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + union param_u *rpm_param = &avs2_dec->param; + + + /*u_v(32, "bbv delay");*/ + + hd->picture_coding_type = + get_param(rpm_param->p.picture_coding_type, + "picture_coding_type"); + + if (hd->background_picture_enable && + (hd->picture_coding_type == 1 || + hd->picture_coding_type == 3)) { + if (hd->picture_coding_type == 1) { + hd->background_pred_flag = + get_param( + rpm_param->p.background_pred_flag, + "background_pred_flag"); + } else { + hd->background_pred_flag = 0; + } + if (hd->background_pred_flag == 0) { + + hd->background_reference_enable = + get_param( + rpm_param-> + p.background_reference_enable, + "background_reference_enable"); + + } else { +#if RD170_FIX_BG + hd->background_reference_enable = 1; +#else + hd->background_reference_enable = 0; +#endif + } + + } else { + hd->background_pred_flag = 0; + hd->background_reference_enable = 0; + } + + + + if (hd->picture_coding_type == 1) { + img->type = + P_IMG; + } else if (hd->picture_coding_type == 3) { + img->type = + F_IMG; + } else { + img->type = + B_IMG; + } + + + if (hd->picture_coding_type == 1 && + hd->background_pred_flag) { + img->typeb = BP_IMG; + } else { + img->typeb = 0; + } + + + { + img->coding_order = + get_param( + rpm_param->p.coding_order, + "coding_order"); + + +#if M3480_TEMPORAL_SCALABLE + if (hd->temporal_id_exist_flag == 1) { + hd->cur_layer = + get_param(rpm_param->p.cur_layer, + "temporal_id"); + } +#endif + + if (hd->low_delay == 0) { + hd->displaydelay = + get_param(rpm_param->p.displaydelay, + "displaydelay"); + } + } + { + int32_t RPS_idx;/* = (img->coding_order-1) % gop_size;*/ + int32_t predict; + predict = + get_param(rpm_param->p.predict, + "use RPS in SPS"); + if (predict) { + RPS_idx = + get_param(rpm_param->p.RPS_idx, + "predict for RPS"); + hd->curr_RPS = hd->decod_RPS[RPS_idx]; + } /*else*/ + { + /*gop size16*/ + int32_t j; + hd->curr_RPS.referd_by_others = + get_param( + rpm_param->p.referd_by_others_cur, + "refered by others"); + hd->curr_RPS.num_of_ref = + get_param( + rpm_param->p.num_of_ref_cur, + "num of reference picture"); + for (j = 0; j < hd->curr_RPS.num_of_ref; j++) { + hd->curr_RPS.ref_pic[j] = + get_param( + rpm_param->p.ref_pic_cur[j], + "delta COI of ref pic"); + } + hd->curr_RPS.num_to_remove = + get_param( + rpm_param->p.num_to_remove_cur, + "num of removed picture"); +#ifdef SANITY_CHECK + if (hd->curr_RPS.num_to_remove > MAXREF) { + hd->curr_RPS.num_to_remove = MAXREF; + pr_info("Warning, %s: num_to_remove %d beyond range, force to MAXREF\n", + __func__, hd->curr_RPS.num_to_remove); + } +#endif + for (j = 0; + j < hd->curr_RPS.num_to_remove; j++) { + hd->curr_RPS.remove_pic[j] = + get_param( + rpm_param->p.remove_pic_cur[j], + "delta COI of removed pic"); + } + /*u_v(1, "marker bit");*/ + + } + } + /*xyji 12.23*/ + if (hd->low_delay) { + /*ue_v( + "bbv check times");*/ + } + + hd->progressive_frame = + get_param(rpm_param->p.progressive_frame, + "progressive_frame"); + + if (!hd->progressive_frame) { + img->picture_structure = + get_param(rpm_param->p.picture_structure, + "picture_structure"); + } else { + img->picture_structure = 1; + } + + hd->top_field_first = + get_param(rpm_param->p.top_field_first, + "top_field_first"); + hd->repeat_first_field = + get_param(rpm_param->p.repeat_first_field, + "repeat_first_field"); +#if INTERLACE_CODING + if (hd->is_field_sequence) { + hd->is_top_field = + get_param(rpm_param->p.is_top_field, + "is_top_field"); +#if HALF_PIXEL_COMPENSATION || HALF_PIXEL_CHROMA + img->is_top_field = hd->is_top_field; +#endif + /*u_v(1, "reserved bit for interlace coding");*/ + } +#endif + +#if Check_Bitstream + /*assert(hd->picture_qp>=0&&hd->picture_qp<=(63 + 8 * + (input->sample_bit_depth - 8)));*/ +#endif + + img->random_access_decodable_flag = + get_param(rpm_param->p.random_access_decodable_flag, + "random_access_decodable_flag"); + + img->qp = hd->picture_qp; +} + + + + +void calc_picture_distance(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + /* + union param_u *rpm_param = &avs2_dec->param; + + for POC mode 0: + uint32_t MaxPicDistanceLsb = (1 << 8); + */ + if (img->coding_order < img->PrevPicDistanceLsb) + + { + int32_t i, j; + + hc->total_frames++; + for (i = 0; i < avs2_dec->ref_maxbuffer; i++) { + if ( + avs2_dec->fref[i]->imgtr_fwRefDistance + >= 0) { + avs2_dec->fref[i]-> + imgtr_fwRefDistance -= 256; + avs2_dec->fref[i]-> + imgcoi_ref -= 256; + } +#if RD170_FIX_BG + for (j = 0; j < MAXREF; j++) { +#else + for (j = 0; j < 4; j++) { +#endif + avs2_dec->fref[i]->ref_poc[j] -= 256; + } + } + for (i = 0; i < avs2_dec->outprint.buffer_num; i++) { + avs2_dec->outprint.stdoutdata[i].framenum -= 256; + avs2_dec->outprint.stdoutdata[i].tr -= 256; + } + + hd->last_output -= 256; + hd->curr_IDRtr -= 256; + hd->curr_IDRcoi -= 256; + hd->next_IDRtr -= 256; + hd->next_IDRcoi -= 256; + } + if (hd->low_delay == 0) { + img->tr = img->coding_order + + hd->displaydelay - hd->picture_reorder_delay; + } else { + img->tr = + img->coding_order; + } + +#if REMOVE_UNUSED + img->pic_distance = img->tr; +#else + img->pic_distance = img->tr % 256; +#endif + hc->picture_distance = img->pic_distance; + +} + +int32_t avs2_init_global_buffers(struct avs2_decoder *avs2_dec) +{ + struct inp_par *input = &avs2_dec->input; + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + + int32_t refnum; + + int32_t memory_size = 0; + /* +int32_t img_height = (hd->vertical_size + img->auto_crop_bottom); + */ + img->buf_cycle = input->buf_cycle + 1; + + img->buf_cycle *= 2; + + hc->background_ref = hc->backgroundReferenceFrame; + + for (refnum = 0; refnum < REF_MAXBUFFER; refnum++) { + avs2_dec->fref[refnum] = &avs2_dec->frm_pool[refnum]; + + /*//avs2_dec->fref[i] memory allocation*/ + if (is_avs2_print_bufmgr_detail()) + pr_info("[t] avs2_dec->fref[%d]@0x%p\n", + refnum, avs2_dec->fref[refnum]); + avs2_dec->fref[refnum]->imgcoi_ref = -257; + avs2_dec->fref[refnum]->is_output = -1; + avs2_dec->fref[refnum]->refered_by_others = -1; + avs2_dec->fref[refnum]-> + imgtr_fwRefDistance = -256; + init_frame_t(avs2_dec->fref[refnum]); +#ifdef AML + avs2_dec->fref[refnum]->index = refnum; +#endif + } +#ifdef AML + avs2_dec->f_bg = NULL; + + avs2_dec->m_bg = &avs2_dec->frm_pool[REF_MAXBUFFER]; + /*///avs2_dec->fref[i] memory allocation*/ + if (is_avs2_print_bufmgr_detail()) + pr_info("[t] avs2_dec->m_bg@0x%p\n", + avs2_dec->m_bg); + avs2_dec->m_bg->imgcoi_ref = -257; + avs2_dec->m_bg->is_output = -1; + avs2_dec->m_bg->refered_by_others = -1; + avs2_dec->m_bg->imgtr_fwRefDistance = -256; + init_frame_t(avs2_dec->m_bg); + avs2_dec->m_bg->index = refnum; +#endif + +#if BCBR + /*init BCBR related*/ + img->iNumCUsInFrame = + ((img->width + MAX_CU_SIZE - 1) / MAX_CU_SIZE) + * ((img->height + MAX_CU_SIZE - 1) + / MAX_CU_SIZE); + /*img->BLCUidx = (int32_t*) calloc( + img->iNumCUsInFrame, sizeof(int32_t));*/ + /*memset( img->BLCUidx, 0, img->iNumCUsInFrame);*/ +#endif + return memory_size; +} + +#ifdef AML +static void free_unused_buffers(struct avs2_decoder *avs2_dec) +{ + struct inp_par *input = &avs2_dec->input; + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + + int32_t refnum; + + img->buf_cycle = input->buf_cycle + 1; + + img->buf_cycle *= 2; + + hc->background_ref = hc->backgroundReferenceFrame; + + for (refnum = 0; refnum < REF_MAXBUFFER; refnum++) { +#ifndef NO_DISPLAY + if (avs2_dec->fref[refnum]->vf_ref > 0 || + avs2_dec->fref[refnum]->to_prepare_disp) + continue; +#endif + if (is_avs2_print_bufmgr_detail()) + pr_info("%s[t] avs2_dec->fref[%d]@0x%p\n", + __func__, refnum, avs2_dec->fref[refnum]); + avs2_dec->fref[refnum]->imgcoi_ref = -257; + avs2_dec->fref[refnum]->is_output = -1; + avs2_dec->fref[refnum]->refered_by_others = -1; + avs2_dec->fref[refnum]-> + imgtr_fwRefDistance = -256; + memset(avs2_dec->fref[refnum]->ref_poc, 0, + sizeof(avs2_dec->fref[refnum]->ref_poc)); + } + avs2_dec->f_bg = NULL; + + if (is_avs2_print_bufmgr_detail()) + pr_info("%s[t] avs2_dec->m_bg@0x%p\n", + __func__, avs2_dec->m_bg); + avs2_dec->m_bg->imgcoi_ref = -257; + avs2_dec->m_bg->is_output = -1; + avs2_dec->m_bg->refered_by_others = -1; + avs2_dec->m_bg->imgtr_fwRefDistance = -256; + memset(avs2_dec->m_bg->ref_poc, 0, + sizeof(avs2_dec->m_bg->ref_poc)); + +#if BCBR + /*init BCBR related*/ + img->iNumCUsInFrame = + ((img->width + MAX_CU_SIZE - 1) / MAX_CU_SIZE) + * ((img->height + MAX_CU_SIZE - 1) + / MAX_CU_SIZE); + /*img->BLCUidx = (int32_t*) calloc( + img->iNumCUsInFrame, sizeof(int32_t));*/ + /*memset( img->BLCUidx, 0, img->iNumCUsInFrame);*/ +#endif +} +#endif + +void init_frame_t(struct avs2_frame_s *currfref) +{ + memset(currfref, 0, sizeof(struct avs2_frame_s)); + currfref->imgcoi_ref = -257; + currfref->is_output = -1; + currfref->refered_by_others = -1; + currfref->imgtr_fwRefDistance = -256; + memset(currfref->ref_poc, 0, sizeof(currfref->ref_poc)); +} + +void get_reference_list_info(struct avs2_decoder *avs2_dec, int8_t *str) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + + int8_t str_tmp[16]; + int32_t i; + /* int32_t poc = hc->f_rec->imgtr_fwRefDistance; + fred.chiu@mediatek.com*/ + + if (img->num_of_references > 0) { + strcpy(str, "["); + for (i = 0; i < img->num_of_references; i++) { +#if RD1510_FIX_BG + if (img->type == B_IMG) { + sprintf(str_tmp, "%4d ", + hc->f_rec-> + ref_poc[ + img->num_of_references - 1 - i]); + } else { + sprintf(str_tmp, "%4d ", + hc->f_rec->ref_poc[i]); + } +#else + sprintf(str_tmp, "%4d ", + avs2_dec->fref[i]->imgtr_fwRefDistance); +#endif + + str_tmp[5] = '\0'; + strcat(str, str_tmp); + } + strcat(str, "]"); + } else { + str[0] = '\0'; + } +} + +void prepare_RefInfo(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + + int32_t i, j; + int32_t ii; + struct avs2_frame_s *tmp_fref; + + /*update IDR frame*/ + if (img->tr > hd->next_IDRtr && hd->curr_IDRtr != hd->next_IDRtr) { + hd->curr_IDRtr = hd->next_IDRtr; + hd->curr_IDRcoi = hd->next_IDRcoi; + } + /* re-order the ref buffer according to RPS*/ + img->num_of_references = hd->curr_RPS.num_of_ref; + +#if 1 + /*rain*/ + if (is_avs2_print_bufmgr_detail()) { + pr_info("%s: coding_order is %d, curr_IDRcoi is %d\n", + __func__, img->coding_order, hd->curr_IDRcoi); + for (ii = 0; ii < MAXREF; ii++) { + pr_info("ref_pic(%d)=%d\n", + ii, hd->curr_RPS.ref_pic[ii]); + } + for (ii = 0; ii < avs2_dec->ref_maxbuffer; ii++) { + pr_info( + "fref[%d]: index %d imgcoi_ref %d imgtr_fwRefDistance %d\n", + ii, avs2_dec->fref[ii]->index, + avs2_dec->fref[ii]->imgcoi_ref, + avs2_dec->fref[ii]->imgtr_fwRefDistance); + } + } +#endif + + for (i = 0; i < hd->curr_RPS.num_of_ref; i++) { + /*int32_t accumulate = 0;*/ + /* copy tmp_fref from avs2_dec->fref[i] */ + tmp_fref = avs2_dec->fref[i]; + +#if REMOVE_UNUSED + for (j = i; j < avs2_dec->ref_maxbuffer; j++) { + /*/////////////to be modified IDR*/ + if (avs2_dec->fref[j]->imgcoi_ref == + img->coding_order - + hd->curr_RPS.ref_pic[i]) { + break; + } + } +#else + + for (j = i; j < avs2_dec->ref_maxbuffer; j++) { + /*/////////////to be modified IDR*/ + int32_t k , tmp_tr; + for (k = 0; k < avs2_dec->ref_maxbuffer; k++) { + if (((int32_t)img->coding_order - + (int32_t)hd->curr_RPS.ref_pic[i]) == + avs2_dec->fref[k]->imgcoi_ref && + avs2_dec->fref[k]->imgcoi_ref >= -256) { + break; + } + } + if (k == avs2_dec->ref_maxbuffer) { + tmp_tr = + -1-1; + } else { + tmp_tr = + avs2_dec->fref[k]->imgtr_fwRefDistance; + } + if (tmp_tr < hd->curr_IDRtr) { + hd->curr_RPS.ref_pic[i] = + img->coding_order - hd->curr_IDRcoi; + + for (k = 0; k < i; k++) { + if (hd->curr_RPS.ref_pic[k] == + hd->curr_RPS.ref_pic[i]) { + accumulate++; + break; + } + } + } + if (avs2_dec->fref[j]->imgcoi_ref == + img->coding_order - hd->curr_RPS.ref_pic[i]) { + break; + } + } + if (j == avs2_dec->ref_maxbuffer || accumulate) + img->num_of_references--; +#endif + if (j != avs2_dec->ref_maxbuffer) { + /* copy avs2_dec->fref[i] from avs2_dec->fref[j] */ + avs2_dec->fref[i] = avs2_dec->fref[j]; + /* copy avs2_dec->fref[j] from ferf[tmp] */ + avs2_dec->fref[j] = tmp_fref; + if (is_avs2_print_bufmgr_detail()) { + pr_info("%s, switch %d %d: ", __func__, i, j); + for (ii = 0; ii < hd->curr_RPS.num_of_ref + || ii <= j; ii++) + pr_info("%d ", + avs2_dec->fref[ii]->index); + pr_info("\n"); + } + } + } + if (img->type == B_IMG && + (avs2_dec->fref[0]->imgtr_fwRefDistance <= img->tr + || avs2_dec->fref[1]->imgtr_fwRefDistance >= img->tr)) { + + pr_info("wrong reference configuration for B frame\n"); + pr_info( + "fref0 imgtr_fwRefDistance %d, fref1 imgtr_fwRefDistance %d, img->tr %d\n", + avs2_dec->fref[0]->imgtr_fwRefDistance, + avs2_dec->fref[1]->imgtr_fwRefDistance, + img->tr); + hc->f_rec->error_mark = 1; + avs2_dec->bufmgr_error_flag = 1; + return; /* exit(-1);*/ + /*******************************************/ + } + +#if !FIX_PROFILE_LEVEL_DPB_RPS_1 + /* delete the frame that will never be used*/ + for (i = 0; i < hd->curr_RPS.num_to_remove; i++) { + for (j = 0; j < avs2_dec->ref_maxbuffer; j++) { + if (avs2_dec->fref[j]->imgcoi_ref >= -256 + && avs2_dec->fref[j]->imgcoi_ref + == img->coding_order - + hd->curr_RPS.remove_pic[i]) { + break; + } + } + if (j < avs2_dec->ref_maxbuffer && + j >= img->num_of_references) { + avs2_dec->fref[j]->imgcoi_ref = -257; +#if M3480_TEMPORAL_SCALABLE + avs2_dec->fref[j]->temporal_id = -1; +#endif + if (avs2_dec->fref[j]->is_output == -1) { + avs2_dec->fref[j]-> + imgtr_fwRefDistance = -256; + } + } + } +#endif + + /* add inter-view reference picture*/ + + /* add current frame to ref buffer*/ + for (i = 0; i < avs2_dec->ref_maxbuffer; i++) { + if ((avs2_dec->fref[i]->imgcoi_ref < -256 + || abs(avs2_dec->fref[i]-> + imgtr_fwRefDistance - img->tr) >= 128) + && avs2_dec->fref[i]->is_output == -1 + && avs2_dec->fref[i]->bg_flag == 0 +#ifndef NO_DISPLAY + && avs2_dec->fref[i]->vf_ref == 0 + && avs2_dec->fref[i]->to_prepare_disp == 0 +#endif + ) { + break; + } + } + if (i == avs2_dec->ref_maxbuffer) { + pr_info( + "%s, warning, no enough buf\n", + __func__); + i--; + } + + hc->f_rec = avs2_dec->fref[i]; + hc->currentFrame = hc->f_rec->ref; + hc->f_rec->imgtr_fwRefDistance = img->tr; + hc->f_rec->imgcoi_ref = img->coding_order; +#if M3480_TEMPORAL_SCALABLE + hc->f_rec->temporal_id = hd->cur_layer; +#endif + hc->f_rec->is_output = 1; +#ifdef AML + hc->f_rec->error_mark = 0; + hc->f_rec->decoded_lcu = 0; + hc->f_rec->slice_type = img->type; +#endif + hc->f_rec->refered_by_others = hd->curr_RPS.referd_by_others; + if (is_avs2_print_bufmgr_detail()) + pr_info( + "%s, set f_rec (cur_pic) <= fref[%d] img->tr %d coding_order %d img_type %d\n", + __func__, i, img->tr, img->coding_order, + img->type); + + if (img->type != B_IMG) { + for (j = 0; + j < img->num_of_references; j++) { + hc->f_rec->ref_poc[j] = + avs2_dec->fref[j]->imgtr_fwRefDistance; + } + } else { + hc->f_rec->ref_poc[0] = + avs2_dec->fref[1]->imgtr_fwRefDistance; + hc->f_rec->ref_poc[1] = + avs2_dec->fref[0]->imgtr_fwRefDistance; + } + +#if M3480_TEMPORAL_SCALABLE + + for (j = img->num_of_references; + j < 4; j++) { + /**/ + hc->f_rec->ref_poc[j] = 0; + } + + if (img->type == INTRA_IMG) { + int32_t l; + for (l = 0; l < 4; l++) { + hc->f_rec->ref_poc[l] + = img->tr; + } + } + +#endif + +/*////////////////////////////////////////////////////////////////////////*/ + /* updata ref pointer*/ + + if (img->type != I_IMG) { + + img->imgtr_next_P = img->type == B_IMG ? + avs2_dec->fref[0]->imgtr_fwRefDistance : img->tr; + if (img->type == B_IMG) { + hd->trtmp = avs2_dec->fref[0]->imgtr_fwRefDistance; + avs2_dec->fref[0]->imgtr_fwRefDistance = + avs2_dec->fref[1]->imgtr_fwRefDistance; + } + } +#if 1 + /*rain*/ + if (is_avs2_print_bufmgr_detail()) { + for (ii = 0; ii < avs2_dec->ref_maxbuffer; ii++) { + pr_info( + "fref[%d]: index %d imgcoi_ref %d imgtr_fwRefDistance %d refered %d, is_out %d, bg %d, vf_ref %d ref_pos(%d,%d,%d,%d,%d,%d,%d)\n", + ii, avs2_dec->fref[ii]->index, + avs2_dec->fref[ii]->imgcoi_ref, + avs2_dec->fref[ii]->imgtr_fwRefDistance, + avs2_dec->fref[ii]->refered_by_others, + avs2_dec->fref[ii]->is_output, + avs2_dec->fref[ii]->bg_flag, + avs2_dec->fref[ii]->vf_ref, + avs2_dec->fref[ii]->ref_poc[0], + avs2_dec->fref[ii]->ref_poc[1], + avs2_dec->fref[ii]->ref_poc[2], + avs2_dec->fref[ii]->ref_poc[3], + avs2_dec->fref[ii]->ref_poc[4], + avs2_dec->fref[ii]->ref_poc[5], + avs2_dec->fref[ii]->ref_poc[6] + ); + } + } +#endif +} + +int32_t init_frame(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + + +#if RD1510_FIX_BG + if (img->type == I_IMG && + img->typeb == BACKGROUND_IMG) { /*G/GB frame*/ + img->num_of_references = 0; + } else if (img->type == P_IMG && img->typeb == BP_IMG) { + /* only one reference frame(G\GB) for S frame*/ + img->num_of_references = 1; + } +#endif + + if (img->typeb == BACKGROUND_IMG && + hd->background_picture_output_flag == 0) { + hc->currentFrame = hc->background_ref; +#ifdef AML + hc->cur_pic = avs2_dec->m_bg; +#endif + } else { + prepare_RefInfo(avs2_dec); +#ifdef AML + hc->cur_pic = hc->f_rec; +#endif + } + + +#ifdef FIX_CHROMA_FIELD_MV_BK_DIST + if (img->typeb == BACKGROUND_IMG + && img->is_field_sequence) { + avs2_dec->bk_img_is_top_field + = img->is_top_field; + } +#endif + return 0; +} + +void delete_trbuffer(struct outdata_s *data, int32_t pos) +{ + int32_t i; + for (i = pos; + i < data->buffer_num - 1; i++) { + data->stdoutdata[i] = + data->stdoutdata[i + 1]; + } + data->buffer_num--; +} + +#if RD170_FIX_BG +void flushDPB(struct avs2_decoder *avs2_dec) +{ + struct Video_Dec_data_s *hd = &avs2_dec->hd; + int j, tmp_min, i, pos = -1; + int search_times = avs2_dec->outprint.buffer_num; + + tmp_min = 1 << 20; + i = 0, j = 0; + pos = -1; + + for (j = 0; j < search_times; j++) { + pos = -1; + tmp_min = (1 << 20); + /*search for min poi picture to display*/ + for (i = 0; i < avs2_dec->outprint.buffer_num; i++) { + if (avs2_dec->outprint.stdoutdata[i].tr < tmp_min) { + pos = i; + tmp_min = avs2_dec->outprint.stdoutdata[i].tr; + } + } + + if (pos != -1) { + hd->last_output = avs2_dec->outprint.stdoutdata[pos].tr; + report_frame(avs2_dec, &avs2_dec->outprint, pos); + if (avs2_dec->outprint.stdoutdata[pos].typeb + == BACKGROUND_IMG && + avs2_dec->outprint.stdoutdata[pos]. + background_picture_output_flag + == 0) { + /*write_GB_frame(hd->p_out_background);*/ + } else { + write_frame(avs2_dec, + avs2_dec->outprint.stdoutdata[pos].tr); + } + + delete_trbuffer(&avs2_dec->outprint, pos); + } + } + + /*clear dpb info*/ + for (j = 0; j < REF_MAXBUFFER; j++) { + avs2_dec->fref[j]->imgtr_fwRefDistance = -256; + avs2_dec->fref[j]->imgcoi_ref = -257; + avs2_dec->fref[j]->temporal_id = -1; + avs2_dec->fref[j]->refered_by_others = 0; + } +} +#endif + + + +#if M3480_TEMPORAL_SCALABLE +void cleanRefMVBufRef(int pos) +{ +#if 0 + int k, x, y; + /*re-init mvbuf*/ + for (k = 0; k < 2; k++) { + for (y = 0; y < img->height / MIN_BLOCK_SIZE; y++) { + for (x = 0; x < img->width / MIN_BLOCK_SIZE; x++) + fref[pos]->mvbuf[y][x][k] = 0; + + } + } + /*re-init refbuf*/ + for (y = 0; y < img->height / MIN_BLOCK_SIZE; y++) { + for (x = 0; x < img->width / MIN_BLOCK_SIZE ; x++) + fref[pos]->refbuf[y][x] = -1; + + } +#endif +} +#endif + +static int frame_postprocessing(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + + int32_t pointer_tmp = avs2_dec->outprint.buffer_num; + int32_t i; + struct STDOUT_DATA_s *p_outdata; +#if RD160_FIX_BG + int32_t j, tmp_min, output_cur_dec_pic, pos = -1; + int32_t search_times = avs2_dec->outprint.buffer_num; +#endif + /*pic dist by Grandview Semi. @ [06-07-20 15:25]*/ + img->PrevPicDistanceLsb = (img->coding_order % 256); + + pointer_tmp = avs2_dec->outprint.buffer_num; + p_outdata = &avs2_dec->outprint.stdoutdata[pointer_tmp]; + + p_outdata->type = img->type; + p_outdata->typeb = img->typeb; + p_outdata->framenum = img->tr; + p_outdata->tr = img->tr; +#if 0 /*def ORI*/ + p_outdata->qp = img->qp; +#else + p_outdata->qp = 0; +#endif + /*p_outdata->snr_y = snr->snr_y;*/ + /*p_outdata->snr_u = snr->snr_u;*/ + /*p_outdata->snr_v = snr->snr_v;*/ + p_outdata->tmp_time = hd->tmp_time; + p_outdata->picture_structure = img->picture_structure; + /*p_outdata->curr_frame_bits = + StatBitsPtr->curr_frame_bits;*/ + /*p_outdata->emulate_bits = StatBitsPtr->emulate_bits;*/ +#if RD1501_FIX_BG + p_outdata->background_picture_output_flag + = hd->background_picture_output_flag; + /*Longfei.Wang@mediatek.com*/ +#endif + +#if RD160_FIX_BG + p_outdata->picture_reorder_delay = hd->picture_reorder_delay; +#endif + avs2_dec->outprint.buffer_num++; + +#if RD170_FIX_BG + search_times = avs2_dec->outprint.buffer_num; +#endif + /* record the reference list*/ + strcpy(p_outdata->str_reference_list, hc->str_list_reference); + +#if !REF_OUTPUT + #error "!!!REF_OUTPUT should be 1" + for (i = 0; i < avs2_dec->outprint.buffer_num; i++) { + min_tr(avs2_dec->outprint, &pos); + if (avs2_dec->outprint.stdoutdata[pos].tr < img->tr + || avs2_dec->outprint.stdoutdata[pos].tr + == (hd->last_output + 1)) { + hd->last_output = avs2_dec->outprint.stdoutdata[pos].tr; + report_frame(avs2_dec, &avs2_dec->outprint, pos); +#if 0 /*def ORI*/ + write_frame(hd->p_out, + avs2_dec->outprint.stdoutdata[pos].tr); +#endif + delete_trbuffer(&avs2_dec->outprint, pos); + i--; + } else { + break; + } + } +#else +#if RD160_FIX_BG /*Longfei.Wang@mediatek.com*/ + tmp_min = 1 << 20; + i = 0, j = 0; + output_cur_dec_pic = 0; + pos = -1; + for (j = 0; j < search_times; j++) { + pos = -1; + tmp_min = (1 << 20); + /*search for min poi picture to display*/ + for (i = 0; i < avs2_dec->outprint.buffer_num; i++) { + if ((avs2_dec->outprint.stdoutdata[i].tr < tmp_min) && + ((avs2_dec->outprint.stdoutdata[i].tr + + avs2_dec->outprint.stdoutdata[i]. + picture_reorder_delay) + <= (int32_t)img->coding_order)) { + pos = i; + tmp_min = avs2_dec->outprint.stdoutdata[i].tr; + } + } + + if ((0 == hd->displaydelay) && (0 == output_cur_dec_pic)) { + if (img->tr <= tmp_min) {/*fred.chiu@mediatek.com*/ + /*output current decode picture + right now*/ + pos = avs2_dec->outprint.buffer_num - 1; + output_cur_dec_pic = 1; + } + } + if (pos != -1) { + hd->last_output = avs2_dec->outprint.stdoutdata[pos].tr; + report_frame(avs2_dec, &avs2_dec->outprint, pos); +#if 1 /*def ORI*/ + if (avs2_dec->outprint.stdoutdata[pos].typeb + == BACKGROUND_IMG && + avs2_dec->outprint.stdoutdata[pos]. + background_picture_output_flag == 0) { + /**/ + /**/ + } else { + write_frame(avs2_dec, + avs2_dec->outprint.stdoutdata[pos].tr); + } +#endif + delete_trbuffer(&avs2_dec->outprint, pos); + } + + } + +#else + #error "!!!RD160_FIX_BG should be defined" + if (img->coding_order + + (uint32_t)hc->total_frames * 256 >= + (uint32_t)hd->picture_reorder_delay) { + int32_t tmp_min, pos = -1; + tmp_min = 1 << 20; + + for (i = 0; i < + avs2_dec->outprint.buffer_num; i++) { + if (avs2_dec->outprint.stdoutdata[i].tr + < tmp_min && + avs2_dec->outprint.stdoutdata[i].tr + >= hd->last_output) { + /*GB has the same "tr" with "last_output"*/ + pos = i; + tmp_min = + avs2_dec->outprint.stdoutdata[i].tr; + } + } + + if (pos != -1) { + hd->last_output = avs2_dec->outprint.stdoutdata[pos].tr; + report_frame(avs2_dec, &avs2_dec->outprint, pos); +#if RD1501_FIX_BG + if (avs2_dec->outprint.stdoutdata[pos].typeb + == BACKGROUND_IMG && avs2_dec-> + outprint.stdoutdata[pos]. + background_picture_output_flag == 0) { +#else + if (avs2_dec->outprint.stdoutdata[pos].typeb + == BACKGROUND_IMG && + hd->background_picture_output_flag + == 0) { +#endif + write_GB_frame( + hd->p_out_background); + } else { + write_frame(avs2_dec, + avs2_dec->outprint.stdoutdata[pos].tr); + } + delete_trbuffer(&avs2_dec->outprint, pos); + + } + + } +#endif +#endif + return pos; + + } + +void write_frame(struct avs2_decoder *avs2_dec, int32_t pos) +{ + int32_t j; + + if (is_avs2_print_bufmgr_detail()) + pr_info("%s(pos = %d)\n", __func__, pos); + + for (j = 0; j < avs2_dec->ref_maxbuffer; j++) { + if (avs2_dec->fref[j]->imgtr_fwRefDistance == pos) { + avs2_dec->fref[j]->imgtr_fwRefDistance_bak = pos; + avs2_dec->fref[j]->is_output = -1; + avs2_dec->fref[j]->to_prepare_disp = + avs2_dec->to_prepare_disp_count++; + if (avs2_dec->fref[j]->refered_by_others == 0 + || avs2_dec->fref[j]->imgcoi_ref + == -257) { + avs2_dec->fref[j]->imgtr_fwRefDistance + = -256; + avs2_dec->fref[j]->imgcoi_ref = -257; +#if M3480_TEMPORAL_SCALABLE + avs2_dec->fref[j]->temporal_id = -1; +#endif + if (is_avs2_print_bufmgr_detail()) + pr_info("%s, fref index %d\n", + __func__, j); + } + break; + } + } +} + +/*rain???, outdata *data*/ +void report_frame(struct avs2_decoder *avs2_dec, + struct outdata_s *data, int32_t pos) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + + int8_t *Frmfld; + int8_t Frm[] = "FRM"; + int8_t Fld[] = "FLD"; + struct STDOUT_DATA_s *p_stdoutdata + = &data->stdoutdata[pos]; + const int8_t *typ; + +#if 0 + if (input->MD5Enable & 0x02) { + sprintf(MD5str, "%08X%08X%08X%08X\0", + p_stdoutdata->DecMD5Value[0], + p_stdoutdata->DecMD5Value[1], + p_stdoutdata->DecMD5Value[2], + p_stdoutdata->DecMD5Value[3]); + } else { + memset(MD5val, 0, 16); + memset(MD5str, 0, 33); + } +#endif + + if (p_stdoutdata-> + picture_structure) { + Frmfld = Frm; + } else { + Frmfld = Fld; + } +#if INTERLACE_CODING + if (img->is_field_sequence) { /*rcs??*/ + Frmfld = Fld; + } +#endif + if ((p_stdoutdata->tr + hc->total_frames * 256) + == hd->end_SeqTr) { /* I picture*/ + /*if ( img->new_sequence_flag == 1 )*/ + { + img->sequence_end_flag = 0; + /*fprintf(stdout, "Sequence + End\n\n");*/ + } + } + if ((p_stdoutdata->tr + hc->total_frames * 256) + == hd->next_IDRtr) { +#if !RD170_FIX_BG + if (hd->vec_flag) /**/ +#endif + { + hd->vec_flag = 0; + /*fprintf(stdout, "Video Edit + Code\n");*/ + } + } + + if (p_stdoutdata->typeb == BACKGROUND_IMG) { + typ = (hd->background_picture_output_flag != 0) ? "G" : "GB"; + } else { +#if REMOVE_UNUSED + typ = (p_stdoutdata->type == INTRA_IMG) + ? "I" : (p_stdoutdata->type == INTER_IMG) ? + ((p_stdoutdata->typeb == BP_IMG) ? "S" : "P") + : (p_stdoutdata->type == F_IMG ? "F" : "B"); +#else + typ = (p_stdoutdata->type == INTRA_IMG) ? "I" : + (p_stdoutdata->type == INTER_IMG) ? + ((p_stdoutdata->type == BP_IMG) ? "S" : "P") + : (p_stdoutdata->type == F_IMG ? "F" : "B"); +#endif + } + +#if 0 + /*rain???*/ + pr_info("%3d(%s) %3d %5d %7.4f %7.4f %7.4f %5d\t\t%s %8d %6d\t%s", + p_stdoutdata->framenum + hc->total_frames * 256, + typ, p_stdoutdata->tr + hc->total_frames * 256, + p_stdoutdata->qp, p_stdoutdata->snr_y, + p_stdoutdata->snr_u, p_stdoutdata->snr_v, + p_stdoutdata->tmp_time, Frmfld, + p_stdoutdata->curr_frame_bits, + p_stdoutdata->emulate_bits, + ""); +#endif + if (is_avs2_print_bufmgr_detail()) + pr_info(" %s\n", p_stdoutdata->str_reference_list); + + /*fflush(stdout);*/ + hd->FrameNum++; +} + +void avs2_prepare_header(struct avs2_decoder *avs2_dec, int32_t start_code) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + + switch (start_code) { + case SEQUENCE_HEADER_CODE: + img->new_sequence_flag = 1; + if (is_avs2_print_bufmgr_detail()) + pr_info("SEQUENCE\n"); +#ifdef TO_CHECK +#if SEQ_CHANGE_CHECKER + if (seq_checker_buf == NULL) { + seq_checker_buf = malloc(length); + seq_checker_length = length; + memcpy(seq_checker_buf, Buf, length); + } else { + if ((seq_checker_length != length) || + (memcmp(seq_checker_buf, Buf, length) != 0)) { + free(seq_checker_buf); + /*fprintf(stdout, + "Non-conformance + stream: sequence + header cannot change + !!\n");*/ +#if RD170_FIX_BG + seq_checker_buf = NULL; + seq_checker_length = 0; + seq_checker_buf = malloc(length); + seq_checker_length = length; + memcpy(seq_checker_buf, Buf, length); +#endif + } + + + } +#endif +#if RD170_FIX_BG + if (input->alf_enable + && alfParAllcoated == 1) { + ReleaseAlfGlobalBuffer(); + alfParAllcoated = 0; + } +#endif +/*TO_CHECK*/ +#endif +#if FIX_FLUSH_DPB_BY_LF + if (hd->vec_flag) { + int32_t k; + if (is_avs2_print_bufmgr_detail()) + pr_info("vec_flag is 1, flushDPB and reinit bugmgr\n"); + + flushDPB(avs2_dec); + for (k = 0; k < avs2_dec->ref_maxbuffer; k++) + cleanRefMVBufRef(k); + + hd->vec_flag = 0; +#ifdef AML + free_unused_buffers(avs2_dec); +#else + free_global_buffers(avs2_dec); +#endif + img->number = 0; + img->PrevPicDistanceLsb = 0; + avs2_dec->init_hw_flag = 0; + } +#endif + +#if FIX_SEQ_END_FLUSH_DPB_BY_LF + if (img->new_sequence_flag + && img->sequence_end_flag) { + int32_t k; + if (is_avs2_print_bufmgr_detail()) + pr_info( + "new_sequence_flag after sequence_end_flag, flushDPB and reinit bugmgr\n"); + flushDPB(avs2_dec); + for (k = 0; k < avs2_dec->ref_maxbuffer; k++) + cleanRefMVBufRef(k); + +#ifdef AML + free_unused_buffers(avs2_dec); +#else + free_global_buffers(avs2_dec); +#endif + img->number = 0; + img->PrevPicDistanceLsb = 0; + avs2_dec->init_hw_flag = 0; + } +#endif + img->seq_header_indicate = 1; + break; + case I_PICTURE_START_CODE: + if (is_avs2_print_bufmgr_detail()) + pr_info("PIC-I\n"); + Get_SequenceHeader(avs2_dec); + Get_I_Picture_Header(avs2_dec); + calc_picture_distance(avs2_dec); + Read_ALF_param(avs2_dec); + if (!img->seq_header_indicate) { + img->B_discard_flag = 1; + /*fprintf(stdout, " I + %3d\t\tDIDSCARD!!\n", + img->tr);*/ + break; + } + break; + case PB_PICTURE_START_CODE: + if (is_avs2_print_bufmgr_detail()) + pr_info("PIC-PB\n"); + Get_SequenceHeader(avs2_dec); + Get_PB_Picture_Header(avs2_dec); + calc_picture_distance(avs2_dec); + Read_ALF_param(avs2_dec); + /* xiaozhen zheng, 20071009*/ + if (!img->seq_header_indicate) { + img->B_discard_flag = 1; + + if (img->type == P_IMG) { + /*fprintf(stdout, " P + %3d\t\tDIDSCARD!!\n", + img->tr);*/ + } + if (img->type == F_IMG) { + /*fprintf(stdout, " F + %3d\t\tDIDSCARD!!\n", + img->tr);*/ + } else { + /*fprintf(stdout, " B + %3d\t\tDIDSCARD!!\n", + img->tr);*/ + } + + break; + } + + if (img->seq_header_indicate == 1 + && img->type != B_IMG) { + img->B_discard_flag = 0; + } + if (img->type == B_IMG && img->B_discard_flag == 1 + && !img->random_access_decodable_flag) { + /*fprintf(stdout, " B + %3d\t\tDIDSCARD!!\n", + img->tr);*/ + break; + } + + break; + case SEQUENCE_END_CODE: + if (is_avs2_print_bufmgr_detail()) + pr_info("SEQUENCE_END_CODE\n"); +#ifdef TO_CHECK +#if SEQ_CHANGE_CHECKER + if (seq_checker_buf != NULL) { + free(seq_checker_buf); + seq_checker_buf = NULL; + seq_checker_length = 0; + } +#endif +#endif +img->new_sequence_flag = 1; +img->sequence_end_flag = 1; +break; + case VIDEO_EDIT_CODE: + if (is_avs2_print_bufmgr_detail()) + pr_info("VIDEO_EDIT_CODE\n"); + /*video_edit_code_data(Buf, startcodepos, length);*/ + hd->vec_flag = 1; +#ifdef TO_CHECK +#if SEQ_CHANGE_CHECKER + if (seq_checker_buf != NULL) { + free(seq_checker_buf); + seq_checker_buf = NULL; + seq_checker_length = 0; + } +#endif +#endif + +break; + } +} + +#ifdef AML +static uint32_t log2i(uint32_t val) +{ + uint32_t ret = -1; + while (val != 0) { + val >>= 1; + ret++; + } + return ret; +} +#endif + +int32_t avs2_process_header(struct avs2_decoder *avs2_dec) +{ + struct inp_par *input = &avs2_dec->input; + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + int32_t lcu_x_num_div; + int32_t lcu_y_num_div; + + int32_t N8_SizeScale; + /*pr_info("%s\n", __func__);*/ + { + N8_SizeScale = 1; + + if (hd->horizontal_size % + (MIN_CU_SIZE * N8_SizeScale) != 0) { + img->auto_crop_right = + (MIN_CU_SIZE * N8_SizeScale) - + (hd->horizontal_size % + (MIN_CU_SIZE * N8_SizeScale)); + } else + img->auto_crop_right = 0; + +#if !INTERLACE_CODING + if (hd->progressive_sequence) /**/ +#endif + { + if (hd->vertical_size % + (MIN_CU_SIZE * N8_SizeScale) != 0) { + img->auto_crop_bottom = + (MIN_CU_SIZE * N8_SizeScale) - + (hd->vertical_size % + (MIN_CU_SIZE * N8_SizeScale)); + } else + img->auto_crop_bottom = 0; + } + + /* Reinit parameters (NOTE: need to do + before init_frame //*/ + img->width = + (hd->horizontal_size + img->auto_crop_right); + img->height = + (hd->vertical_size + img->auto_crop_bottom); + img->width_cr = (img->width >> 1); + + if (input->chroma_format == 1) + img->height_cr = (img->height >> 1); + + img->PicWidthInMbs = img->width / MIN_CU_SIZE; + img->PicHeightInMbs = img->height / MIN_CU_SIZE; + img->PicSizeInMbs = img->PicWidthInMbs * img->PicHeightInMbs; + img->max_mb_nr = (img->width * img->height) / + (MIN_CU_SIZE * MIN_CU_SIZE); + } + + if (img->new_sequence_flag && img->sequence_end_flag) { +#if 0/*RD170_FIX_BG //*/ + int32_t k; + flushDPB(); + for (k = 0; k < avs2_dec->ref_maxbuffer; k++) + cleanRefMVBufRef(k); + + free_global_buffers(); + img->number = 0; +#endif + hd->end_SeqTr = img->tr; + img->sequence_end_flag = 0; + } + if (img->new_sequence_flag) { + hd->next_IDRtr = img->tr; + hd->next_IDRcoi = img->coding_order; + img->new_sequence_flag = 0; + } +#if 0/*RD170_FIX_BG*/ + if (hd->vec_flag) { + int32_t k; + flushDPB(); + for (k = 0; k < avs2_dec->ref_maxbuffer; k++) + cleanRefMVBufRef(k); + + hd->vec_flag = 0; + free_global_buffers(); + img->number = 0; + } +#endif +/* allocate memory for frame buffers*/ +#if 0 +/* called in vavs2.c*/ + if (img->number == 0) + avs2_init_global_buffers(avs2_dec); +#endif + img->current_mb_nr = 0; + + init_frame(avs2_dec); + + img->types = img->type; /* jlzheng 7.15*/ + + if (img->type != B_IMG) { + hd->pre_img_type = img->type; + hd->pre_img_types = img->types; + } + +#ifdef AML + avs2_dec->lcu_size_log2 = log2i(avs2_dec->lcu_size); + lcu_x_num_div = (img->width/avs2_dec->lcu_size); + lcu_y_num_div = (img->height/avs2_dec->lcu_size); + avs2_dec->lcu_x_num = ((img->width % avs2_dec->lcu_size) == 0) ? + lcu_x_num_div : lcu_x_num_div+1; + avs2_dec->lcu_y_num = ((img->height % avs2_dec->lcu_size) == 0) ? + lcu_y_num_div : lcu_y_num_div+1; + avs2_dec->lcu_total = avs2_dec->lcu_x_num*avs2_dec->lcu_y_num; +#endif + return SOP; +} + +int avs2_post_process(struct avs2_decoder *avs2_dec) +{ + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + int32_t i; + int ret; + if (img->typeb == BACKGROUND_IMG && hd->background_picture_enable) { +#ifdef AML + for (i = 0; i < avs2_dec->ref_maxbuffer; i++) { + if (avs2_dec->fref[i]->bg_flag != 0) { + avs2_dec->fref[i]->bg_flag = 0; + if (is_avs2_print_bufmgr_detail()) + pr_info( + "clear old BACKGROUND_IMG for index %d\r\n", + avs2_dec->fref[i]->index); + } + } + if (is_avs2_print_bufmgr_detail()) + pr_info( + "post_process: set BACKGROUND_IMG flag for %d\r\n", + hc->cur_pic->index); + avs2_dec->f_bg = hc->cur_pic; + hc->cur_pic->bg_flag = 1; +#endif + } + +#if BCBR + if (hd->background_picture_enable + && hd->bcbr_enable && img->number > 0) + updateBgReference(); +#endif + + if (img->typeb == BACKGROUND_IMG && + hd->background_picture_output_flag == 0) + hd->background_number++; + + if (img->type == B_IMG) { + avs2_dec->fref[0]->imgtr_fwRefDistance + = hd->trtmp; + } + + /* record the reference list information*/ + get_reference_list_info(avs2_dec, avs2_dec->hc.str_list_reference); + + /*pr_info("%s\n", __func__);*/ + ret = frame_postprocessing(avs2_dec); + +#if FIX_PROFILE_LEVEL_DPB_RPS_1 + /* delete the frame that will never be used*/ + { + int32_t i, j; + if (is_avs2_print_bufmgr_detail()) { + pr_info( + "%s, coding_order %d to remove %d buf: ", + __func__, + img->coding_order, + hd->curr_RPS.num_to_remove); + for (i = 0; i < hd->curr_RPS.num_to_remove; i++) + pr_info("%d ", hd->curr_RPS.remove_pic[i]); + pr_info("\n"); + } + for (i = 0; i < hd->curr_RPS.num_to_remove; i++) { + for (j = 0; j < avs2_dec->ref_maxbuffer; j++) { + + if (avs2_dec->fref[j]->imgcoi_ref >= -256 + && avs2_dec->fref[j]->imgcoi_ref == + img->coding_order - + hd->curr_RPS.remove_pic[i]) + break; + } + if (j < avs2_dec->ref_maxbuffer) { /**/ +#if FIX_RPS_PICTURE_REMOVE +/* Label new frames as "un-referenced" */ + avs2_dec->fref[j]->refered_by_others = 0; + + /* remove frames which have been outputted */ + if (avs2_dec->fref[j]->is_output == -1) { + avs2_dec->fref[j]-> + imgtr_fwRefDistance = -256; + avs2_dec->fref[j]->imgcoi_ref = -257; + avs2_dec->fref[j]->temporal_id = -1; + + } +#else + avs2_dec->fref[j]->imgcoi_ref = -257; +#if M3480_TEMPORAL_SCALABLE + avs2_dec->fref[j]->temporal_id = -1; +#endif + if (avs2_dec->fref[j]->is_output == -1) { + avs2_dec->fref[j]->imgtr_fwRefDistance + = -256; + } +#endif + } + } + } +#endif + + + /*! TO 19.11.2001 Known Problem: for init_frame + * we have to know the picture type of the + * actual frame*/ + /*! in case the first slice of the P-Frame + * following the I-Frame was lost we decode this + * P-Frame but! do not write it because it + * was + * assumed to be an I-Frame in init_frame.So we + * force the decoder to*/ + /*! guess the right picture type. This is a hack + * a should be removed by the time there is a + * clean*/ + /*! solution where we do not have to know the + * picture type for the function init_frame.*/ + /*! End TO 19.11.2001//Lou*/ + + { + if (img->type == I_IMG || + img->type == P_IMG || + img->type == F_IMG) + img->number++; + else { + hc->Bframe_ctr++; /* B + pictures*/ + } + } + return ret; +} + +void init_avs2_decoder(struct avs2_decoder *avs2_dec) +{ + int32_t i, j, k; + + struct inp_par *input = &avs2_dec->input; + struct ImageParameters_s *img = &avs2_dec->img; + struct Video_Com_data_s *hc = &avs2_dec->hc; + struct Video_Dec_data_s *hd = &avs2_dec->hd; + if (is_avs2_print_bufmgr_detail()) + pr_info("[t] struct avs2_dec @0x%p\n", avs2_dec); + memset(avs2_dec, 0, sizeof(struct avs2_decoder)); +#ifdef AML + avs2_dec->to_prepare_disp_count = 1; +#endif + /* + * ALFParam init + */ + for (i = 0; i < 3; i++) { + avs2_dec->m_alfPictureParam[i].alf_flag = 0; /*1*/ + avs2_dec->m_alfPictureParam[i].num_coeff = 9; /*1*/ + avs2_dec->m_alfPictureParam[i].filters_per_group = 3; /*1*/ + avs2_dec->m_alfPictureParam[i].componentID = i; /*1*/ + for (j = 0; j < 16; j++) { + avs2_dec->m_alfPictureParam[i].filterPattern[j] = 0; + /*16*/ + } + for (j = 0; j < 16; j++) { + for (k = 0; k < 9; k++) { + avs2_dec-> + m_alfPictureParam[i].coeffmulti[j][k] = 0; + /*16*9*/ + } + } + } + + img->seq_header_indicate = 0; + img->B_discard_flag = 0; + + hd->eos = 0; + + if (input->ref_pic_order) { /*ref order*/ + hd->dec_ref_num = 0; + } + + /* + memset(g_log2size, -1, MAX_CU_SIZE + 1); + c = 2; + for (k = 4; k <= MAX_CU_SIZE; k *= 2) { + g_log2size[k] = c; + c++; + } + */ + + avs2_dec->outprint.buffer_num = 0; + + hd->last_output = -1; + hd->end_SeqTr = -1; + hd->curr_IDRtr = 0; + hd->curr_IDRcoi = 0; + hd->next_IDRtr = 0; + hd->next_IDRcoi = 0; + /* Allocate Slice data struct*/ + img->number = 0; + img->type = I_IMG; + + img->imgtr_next_P = 0; + + img->imgcoi_next_ref = 0; + + + img->num_of_references = 0; + hc->seq_header = 0; + + img->new_sequence_flag = 1; + + hd->vec_flag = 0; + + hd->FrameNum = 0; + + /* B pictures*/ + hc->Bframe_ctr = 0; + hc->total_frames = 0; + + /* time for total decoding session*/ + hc->tot_time = 0; + +} +
diff --git a/drivers/frame_provider/decoder_v4l/avs2/avs2_global.h b/drivers/frame_provider/decoder_v4l/avs2/avs2_global.h new file mode 100644 index 0000000..d9f85bd --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/avs2/avs2_global.h
@@ -0,0 +1,1687 @@ +/* The copyright in this software is being made available under the BSD + * License, included below. This software may be subject to other third party + * and contributor rights, including patent rights, and no such rights are + * granted under this license. + * + * Copyright (c) 2002-2016, Audio Video coding Standard Workgroup of China + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Audio Video coding Standard Workgroup of China + * nor the names of its contributors maybe + * used to endorse or promote products + * derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + + + + +/* + * File name: global.h + * Function: global definitions for for AVS decoder. + * + */ + +#ifndef _GLOBAL_H_ +#define _GLOBAL_H_ + +/* #include <stdio.h> //!< for FILE */ +/* #include <stdlib.h> */ + +#define AML +#define SANITY_CHECK +#undef NO_DISPLAY + +/* #include "define.h" */ +#define RD "19.2" +#define VERSION "19.2" + +#define RESERVED_PROFILE_ID 0x24 +#define BASELINE_PICTURE_PROFILE 18 +#define BASELINE_PROFILE 32 /* 0x20 */ +#define BASELINE10_PROFILE 34 /* 0x22 */ + + +#define SCENE_PROFILE 48 /* 0x21 */ +#define SCENE10_PROFILE 50 /* 0x23 */ + +#define TRACE 0 /* !< 0:Trace off 1:Trace on */ + + +/* Type definitions and file operation for Windows/Linux + * All file operations for windows are replaced with native (FILE *) operations + * Falei LUO (falei.luo@vipl.ict.ac.cn) + * */ + +#define _FILE_OFFSET_BITS 64 /* for 64 bit fseeko */ +#define fseek fseeko + +#define int16 int16_t +#define int64 int64_t + +/* ////////////////// bug fix ///////////////////////////// */ +#define ALFSliceFix 1 +#define WRITENBIT_FIX 1 +#define FIX_PROFILE_LEVEL_DPB_RPS_1 1 +#define FIX_PROFILE_LEVEL_DPB_RPS_2 1 +#define FIX_RPS_PICTURE_REMOVE 1 /* flluo@pku.edu.cn */ +#define Mv_Clip 1 /* yuquanhe@hisilicon.com */ +#define REMOVE_UNUSED 1 /* yuquanhe@hisilicon.com */ +#define SAO_Height_Fix 1 /* yuquanhe@hisilicon.com */ +#define B_BACKGROUND_Fix 1 /* yuquanhe@hisilicon.com */ +#define Check_Bitstream 1 /* yuquanhe@hisilicon.com */ +#define Wq_param_Clip 1 /* yuquanhe@hisilicon.com */ + /* luofalei flluo@pku.edu.cn , wlq15@mails.tsinghua.edu.cn , + Longfei.Wang@mediatek.com */ +#define RD1501_FIX_BG 1 + /* yuquanhe@hisilicon.com ; he-yuan.lin@mstarsemi.com */ +#define Mv_Rang 1 + /* Longfei.Wang@mediatek.com ;fred.chiu@mediatek.com + jie1222.chen@samsung.com */ +#define RD160_FIX_BG 1 + /* Y_K_Tu@novatek.com.tw, he-yuan.lin@mstarsemi.com, + victor.huang@montage-tech.com M4041 */ +#define RD1601_FIX_BG 1 +#define SEQ_CHANGE_CHECKER 1 /* he-yuan.lin@mstarsemi.com */ +#define M4140_END_OF_SLICE_CHECKER 1 /* he-yuan.lin@mstarsemi.com */ + /* wlq15@mails.tsinghua.edu.cn */ +#define Mv_check_bug 1 +#define SAO_ASSERTION_FIX 1 /* fred.chiu@mediatek.com */ +#define FIELD_HORI_MV_NO_SCALE_FIX 1 /* fred.chiu@mediatek.com */ +#define RD170_FIX_BG 1 +#define FIX_CHROMA_FIELD_MV_BK_DIST 1 +#define FIX_LUMA_FIELD_MV_BK_DIST 1 +#define FIX_CHROMA_FIELD_MV_CLIP 1 +#if 1 +#define FIX_FLUSH_DPB_BY_LF 1 /* fred.chiu@mediatek.com */ +#define FIX_SEQ_END_FLUSH_DPB_BY_LF 1 /* fred.chiu@mediatek.com */ +#else +#define FIX_FLUSH_DPB_BY_LF 0 /* fred.chiu@mediatek.com */ +#define FIX_SEQ_END_FLUSH_DPB_BY_LF 0 /* fred.chiu@mediatek.com */ +#endif +#define RD191_FIX_BUG 1 /* yuquanhe@hsilicon.com */ +#define SYM_MV_SCALE_FIX 1/* peisong.chen@broadcom.com */ +#define BUG_10BIT_REFINEQP 0 /* wangzhenyu */ + + + +#if RD191_FIX_BUG +#endif + +/************************ + * AVS2 macros start + **************************/ + +#define INTERLACE_CODING 1 +#if INTERLACE_CODING /* M3531: MV scaling compensation */ +/* Luma component */ +#define HALF_PIXEL_COMPENSATION 1 /* common functions definition */ +#define HALF_PIXEL_COMPENSATION_PMV 1 /* spacial MV prediction */ +#define HALF_PIXEL_COMPENSATION_DIRECT 1 /* B direct mode */ + /* MV derivation method 1, weighted P_skip mode */ +#define HALF_PIXEL_COMPENSATION_M1 1 + /* M1 related with mv-scaling function */ +#define HALF_PIXEL_COMPENSATION_M1_FUCTION 1 +#define HALF_PIXEL_COMPENSATION_MVD 1 /* MV scaling from FW->BW */ +/* Chroma components */ + /* chroma MV is scaled with luma MV for 4:2:0 format */ +#define HALF_PIXEL_CHROMA 1 + /* half pixel compensation for p skip/direct */ +#define HALF_PIXEL_PSKIP 1 +#define INTERLACE_CODING_FIX 1 /* HLS fix */ +#define OUTPUT_INTERLACE_MERGED_PIC 1 + +#endif +/* + ******************************* +AVS2 10bit/12bit profile + ******************************** + */ + +#define DBFIX_10bit 1 + +#define BUG_10bit 1 + +/* + *************************************** +AVS2 HIGH LEVEL SYNTAX + *************************************** + */ +#define AVS2_HDR_HLS 1 + /* AVS2 HDR technology //yuquanhe@hisilicon.com */ +#define AVS2_HDR_Tec 1 +#if AVS2_HDR_Tec +#define HDR_CHROMA_DELTA_QP 1 /* M3905 */ +#define HDR_ADPTIVE_UV_DELTA 1 +#endif +/* + ************************************* +AVS2 S2 + ************************************* + */ +#define AVS2_S2_FASTMODEDECISION 1 +#define RD1510_FIX_BG 1 /* 20160714, flluo@pku.edu.cn */ + + +/* ////////////////// prediction techniques ///////////////////////////// */ +#define LAM_2Level_TU 0.8 + + +#define DIRECTION 4 +#define DS_FORWARD 4 +#define DS_BACKWARD 2 +#define DS_SYM 3 +#define DS_BID 1 + +#define MH_PSKIP_NUM 4 +#define NUM_OFFSET 0 +#define BID_P_FST 1 +#define BID_P_SND 2 +#define FW_P_FST 3 +#define FW_P_SND 4 +#define WPM_NUM 3 + /* M3330 changes it to 2, the original value is 3 */ +#define MAX_MVP_CAND_NUM 2 + +#define DMH_MODE_NUM 5 /* Number of DMH mode */ +#define TH_ME 0 /* Threshold of ME */ + +#define MV_SCALE 1 + +/* ///// reference picture management // */ +#define FIX_MAX_REF 1 /* Falei LUO, flluo@pku.edu.cn */ +#if FIX_MAX_REF + /* maximum number of reference frame for each frame */ +#define MAXREF 7 +#define MAXGOP 32 +#endif + +/* #define REF_MAXBUFFER 7 */ +/* more bufferes for displaying and background */ +/* #define REF_MAXBUFFER 15 */ +#if 1 +#define REF_MAXBUFFER 23 +#define REF_BUFFER 16 +#else +#if RD170_FIX_BG +#define REF_MAXBUFFER 16 +#else +#define REF_MAXBUFFER 7 +#endif +#endif + +#ifdef TO_PORTING + /* block-composed background reference, fangdong@mail.ustc.edu.cn */ +#define BCBR 1 +#else +#define BCBR 0 +#endif +/* one more buffer for background when background_picture_output_flag is 0*/ +#define AVS2_MAX_BUFFER_NUM (REF_MAXBUFFER + 1) + +/* /////////////////Adaptive Loop Filter////////////////////////// */ +#define NUM_ALF_COEFF_CTX 1 +#define NUM_ALF_LCU_CTX 4 + +#define LAMBDA_SCALE_LUMA (1.0) +#define LAMBDA_SCALE_CHROMA (1.0) + + + +/* ////////////////// entropy coding ///////////////////////////// */ + /* M3090: Make sure rs1 will not overflow for 8-bit unsign char */ +#define NUN_VALUE_BOUND 254 +#define Encoder_BYPASS_Final 1 /* M3484 */ +#define Decoder_Bypass_Annex 0 /* M3484 */ +#define Decoder_Final_Annex 0 /* M3540 */ + + +/* ////////////////// coefficient coding ///// */ + /* M3035 size of an coefficient group, 4x4 */ +#define CG_SIZE 16 + +#define SWAP(x, y) {\ + (y) = (y) ^ (x);\ + (x) = (y) ^ (x);\ + (y) = (x) ^ (y);\ +} + +/* ////////////////// encoder optimization /////// */ +#define TH 2 + +#define M3624MDLOG /* reserved */ + +#define TDRDO 1 /* M3528 */ +/* #define FIX_TDRDO_BG 1 // flluo@pku.edu.cn, 20160318// */ +#define RATECONTROL 1 /* M3580 M3627 M3689 */ +#define AQPO 1 /* M3623 */ +#define AQPOM3694 0 +#define AQPOM4063 1 +#define AQPOM3762 1 +#define BGQPO 1 /* M4061 */ +#if BGQPO +#define LONGREFERENCE 32 +#endif + +/* #define REPORT */ +/* ////////////////// Quantization /////////////////////////////////////// */ + /* Adaptive frequency weighting quantization */ +#define FREQUENCY_WEIGHTING_QUANTIZATION 1 +#if FREQUENCY_WEIGHTING_QUANTIZATION +#define CHROMA_DELTA_QP 1 +#define AWQ_WEIGHTING 1 +#define AWQ_LARGE_BLOCK_ENABLE 1 +#define COUNT_BIT_OVERHEAD 0 +#define AWQ_LARGE_BLOCK_EXT_MAPPING 1 +#endif + +#define QuantClip 1 +#define QuantMatrixClipFix 1 /* 20160418, fllu@pku.edu.cn */ + +#define WQ_MATRIX_FCD 1 +#if !WQ_MATRIX_FCD +#define WQ_FLATBASE_INBIT 7 +#else +#define WQ_FLATBASE_INBIT 6 +#endif + + +#define REFINED_QP 1 + + +/* ////////////////// delta QP ///// */ + /* M3122: the minimum dQP unit is Macro block */ +#define MB_DQP 1 + /* M3122: 1 represents left prediction + and 0 represents previous prediction */ +#define LEFT_PREDICTION 1 + + +/* //////////////////////SAO///////// */ +#define NUM_BO_OFFSET 32 +#define MAX_NUM_SAO_CLASSES 32 +#define NUM_SAO_BO_CLASSES_LOG2 5 +#define NUM_SAO_BO_CLASSES_IN_BIT 5 +#define MAX_DOUBLE (1.7e + 308) +#define NUM_SAO_EO_TYPES_LOG2 2 +#define NUM_SAO_BO_CLASSES (1<<NUM_SAO_BO_CLASSES_LOG2) +#define SAO_RATE_THR 0.75 +#define SAO_RATE_CHROMA_THR 1 +#define SAO_SHIFT_PIX_NUM 4 + +#define SAO_PARA_CROSS_SLICE 1 +#define SAO_MULSLICE_FTR_FIX 1 + +/* /////////////////// Transform ///////////////////// */ +#define SEC_TR_SIZE 4 + /* apply secT to greater than or equal to 8x8 block, */ +#define SEC_TR_MIN_BITSIZE 3 + +#define BUGFIXED_COMBINED_ST_BD 1 + +/* /////////////////// Scalable ///////////////////// */ +#define M3480_TEMPORAL_SCALABLE 1 +#define TEMPORAL_MAXLEVEL 8 +#define TEMPORAL_MAXLEVEL_BIT 3 + + + + +/* + ************************************* + * AVS2 macros end + * + ************************************* + */ + +#define CHROMA 1 +#define LUMA_8x8 2 +#define NUM_BLOCK_TYPES 8 + +#if (!defined clamp) + /* !< clamp a to the range of [b;c] */ +#define clamp(a, b, c) ((a) < (b) ? (b) : ((a) > (c) ? (c) : (a))) +#endif + + /* POC200301 moved from defines.h */ +#define LOG2_MAX_FRAME_NUM_MINUS4 4 + /* !< bytes for one frame */ +#define MAX_CODED_FRAME_SIZE 15000000 + +/* ----------------------- */ +/* FLAGS and DEFINES for new chroma intra prediction, Dzung Hoang */ +/* Threshold values to zero out quantized transform coefficients. */ +/* Recommend that _CHROMA_COEFF_COST_ be low to improve chroma quality */ +#define _LUMA_COEFF_COST_ 4 /* !< threshold for luma coeffs */ + /* !< Number of pixels padded around the reference frame (>=4) */ +#define IMG_PAD_SIZE 64 + +#define OUTSTRING_SIZE 255 + + /* !< abs macro, faster than procedure */ +#define absm(A) ((A) < (0) ? (-(A)) : (A)) + /* !< used for start value for some variables */ +#define MAX_VALUE 999999 + +#define Clip1(a) ((a) > 255 ? 255:((a) < 0 ? 0 : (a))) +#define Clip3(min, max, val) (((val) < (min)) ?\ + (min) : (((val) > (max)) ? (max) : (val))) + +/* --------------------------------------------- */ + +/* block size of block transformed by AVS */ +#define PSKIPDIRECT 0 +#define P2NX2N 1 +#define P2NXN 2 +#define PNX2N 3 +#define PHOR_UP 4 +#define PHOR_DOWN 5 +#define PVER_LEFT 6 +#define PVER_RIGHT 7 +#define PNXN 8 +#define I8MB 9 +#define I16MB 10 +#define IBLOCK 11 +#define InNxNMB 12 +#define INxnNMB 13 +#define MAXMODE 14 /* add yuqh 20130824 */ +#define LAMBDA_ACCURACY_BITS 16 +#define LAMBDA_FACTOR(lambda) ((int)((double)(1 << LAMBDA_ACCURACY_BITS)\ + * lambda + 0.5)) +#define WEIGHTED_COST(factor, bits) (((factor) * (bits))\ + >> LAMBDA_ACCURACY_BITS) +#define MV_COST(f, s, cx, cy, px, py) (WEIGHTED_COST(f, mvbits[((cx) << (s))\ + - px] + mvbits[((cy) << (s)) - py])) +#define REF_COST(f, ref) (WEIGHTED_COST(f, refbits[(ref)])) + +#define BWD_IDX(ref) (((ref) < 2) ? 1 - (ref) : (ref)) +#define REF_COST_FWD(f, ref) (WEIGHTED_COST(f,\ + ((img->num_ref_pic_active_fwd_minus1 == 0) ?\ + 0 : refbits[(ref)]))) +#define REF_COST_BWD(f, ef) (WEIGHTED_COST(f,\ + ((img->num_ref_pic_active_bwd_minus1 == 0) ?\ + 0 : BWD_IDX(refbits[ref])))) + +#define IS_INTRA(MB) ((MB)->cuType == I8MB ||\ + (MB)->cuType == I16MB ||\ + (MB)->cuType == InNxNMB || (MB)->cuType == INxnNMB) +#define IS_INTER(MB) ((MB)->cuType != I8MB &&\ + (MB)->cuType != I16MB && (MB)->cuType != InNxNMB\ + && (MB)->cuType != INxnNMB) +#define IS_INTERMV(MB) ((MB)->cuType != I8MB &&\ + (MB)->cuType != I16MB && (MB)->cuType != InNxNMB &&\ + (MB)->cuType != INxnNMB && (MB)->cuType != 0) + + +#define IS_DIRECT(MB) ((MB)->cuType == PSKIPDIRECT && (img->type == B_IMG)) +#define IS_P_SKIP(MB) ((MB)->cuType == PSKIPDIRECT &&\ + (((img->type == F_IMG)) || ((img->type == P_IMG)))) +#define IS_P8x8(MB) ((MB)->cuType == PNXN) + +/* Quantization parameter range */ +#define MIN_QP 0 +#define MAX_QP 63 +#define SHIFT_QP 11 + +/* Picture types */ +#define INTRA_IMG 0 /* !< I frame */ +#define INTER_IMG 1 /* !< P frame */ +#define B_IMG 2 /* !< B frame */ +#define I_IMG 0 /* !< I frame */ +#define P_IMG 1 /* !< P frame */ +#define F_IMG 4 /* !< F frame */ + +#define BACKGROUND_IMG 3 + +#define BP_IMG 5 + + +/* Direct Mode types */ +#define MIN_CU_SIZE 8 +#define MIN_BLOCK_SIZE 4 +#define MIN_CU_SIZE_IN_BIT 3 +#define MIN_BLOCK_SIZE_IN_BIT 2 +#define BLOCK_MULTIPLE (MIN_CU_SIZE/(MIN_BLOCK_SIZE)) +#define MAX_CU_SIZE 64 +#define MAX_CU_SIZE_IN_BIT 6 +#define B4X4_IN_BIT 2 +#define B8X8_IN_BIT 3 +#define B16X16_IN_BIT 4 +#define B32X32_IN_BIT 5 +#define B64X64_IN_BIT 6 + /* !< # luma intra prediction modes */ +#define NUM_INTRA_PMODE 33 + /* number of luma modes for full RD search */ +#define NUM_MODE_FULL_RD 9 + /* !< #chroma intra prediction modes */ +#define NUM_INTRA_PMODE_CHROMA 5 + +/* luma intra prediction modes */ + +#define DC_PRED 0 +#define PLANE_PRED 1 +#define BI_PRED 2 +#define VERT_PRED 12 +#define HOR_PRED 24 + + +/* chroma intra prediction modes */ +#define DM_PRED_C 0 +#define DC_PRED_C 1 +#define HOR_PRED_C 2 +#define VERT_PRED_C 3 +#define BI_PRED_C 4 + +#define EOS 1 /* !< End Of Sequence */ + /* !< Start Of Picture */ +#define SOP 2 + +#define DECODING_OK 0 +#define SEARCH_SYNC 1 +#define DECODE_MB 1 + +#ifndef max + /* !< Macro returning max value */ +#define max(a, b) ((a) > (b) ? (a) : (b)) + /* !< Macro returning min value */ +#define min(a, b) ((a) < (b) ? (a) : (b)) +#endif + + +#define XY_MIN_PMV 1 +#if XY_MIN_PMV +#define MVPRED_xy_MIN 0 +#else +#define MVPRED_MEDIAN 0 +#endif +#define MVPRED_L 1 +#define MVPRED_U 2 +#define MVPRED_UR 3 + +#define DUAL 4 +#define FORWARD 0 +#define BACKWARD 1 +#define SYM 2 +#define BID 3 +#define INTRA -1 + +#define BUF_CYCLE 5 + +#define ROI_M3264 1 /* ROI Information Encoding */ + +#define PicExtensionData 1 + + +#define REF_OUTPUT 1 /* M3337 */ + + +/* MV scaling 14 bit */ +#define MULTI 16384 +#define HALF_MULTI 8192 +#define OFFSET 14 +/* end of MV scaling */ + /* store the middle pixel's mv in a motion information unit */ +#define MV_DECIMATION_FACTOR 4 + +/* BUGFIX_AVAILABILITY_INTRA */ +#define NEIGHBOR_INTRA_LEFT 0 +#define NEIGHBOR_INTRA_UP 1 +#define NEIGHBOR_INTRA_UP_RIGHT 2 +#define NEIGHBOR_INTRA_UP_LEFT 3 +#define NEIGHBOR_INTRA_LEFT_DOWN 4 +/* end of BUGFIX_AVAILABILITY_INTRA */ + +/* end #include "define.h" */ + +/*#include "commonStructures.h"*/ + +/*typedef uint16_t byte;*/ /* !< byte type definition */ +#define byte uint16_t +#define pel_t byte + +enum BitCountType_e { + BITS_HEADER, + BITS_TOTAL_MB, + BITS_MB_MODE, + BITS_INTER_MB, + BITS_CBP_MB, + BITS_CBP01_MB, + BITS_COEFF_Y_MB, + BITS_COEFF_UV_MB, + BITS_DELTA_QUANT_MB, + BITS_SAO_MB, + MAX_BITCOUNTER_MB +}; + + +enum SAOEOClasses { +/* EO Groups, the assignments depended on +how you implement the edgeType calculation */ + SAO_CLASS_EO_FULL_VALLEY = 0, + SAO_CLASS_EO_HALF_VALLEY = 1, + SAO_CLASS_EO_PLAIN = 2, + SAO_CLASS_EO_HALF_PEAK = 3, + SAO_CLASS_EO_FULL_PEAK = 4, + SAO_CLASS_BO = 5, + NUM_SAO_EO_CLASSES = SAO_CLASS_BO, + NUM_SAO_OFFSET +}; + +struct SAOstatdata { + int32_t diff[MAX_NUM_SAO_CLASSES]; + int32_t count[MAX_NUM_SAO_CLASSES]; +}; + +struct CopyRight_s { + int32_t extension_id; + int32_t copyright_flag; + int32_t copyright_id; + int32_t original_or_copy; + int32_t reserved; + int32_t copyright_number; +}; + +struct CameraParamters_s { + int32_t reserved; + int32_t camera_id; + int32_t height_of_image_device; + int32_t focal_length; + int32_t f_number; + int32_t vertical_angle_of_view; + int32_t camera_position_x; + int32_t camera_position_y; + int32_t camera_position_z; + int32_t camera_direction_x; + int32_t camera_direction_y; + int32_t camera_direction_z; + int32_t image_plane_vertical_x; + int32_t image_plane_vertical_y; + int32_t image_plane_vertical_z; +}; + +/* ! SNRParameters */ +struct SNRParameters_s { + double snr_y; /* !< current Y SNR */ + double snr_u; /* !< current U SNR */ + double snr_v; /* !< current V SNR */ + double snr_y1; /* !< SNR Y(dB) first frame */ + double snr_u1; /* !< SNR U(dB) first frame */ + double snr_v1; /* !< SNR V(dB) first frame */ + double snr_ya; /* !< Average SNR Y(dB) remaining frames */ + double snr_ua; /* !< Average SNR U(dB) remaining frames */ + double snr_va; /* !< Average SNR V(dB) remaining frames */ +#if INTERLACE_CODING + double i_snr_ya; /* !< current Y SNR */ + double i_snr_ua; /* !< current U SNR */ + double i_snr_va; /* !< current V SNR */ +#endif +}; + +/* signal to noise ratio parameters */ + +/* ! codingUnit */ +struct codingUnit { + uint32_t ui_MbBitSize; + int32_t uiBitSize; /* size of MB */ + /* !< number of current syntax element */ + int32_t currSEnr; + int32_t slice_nr; + int32_t delta_quant; /* !< for rate control */ + int32_t delta_qp; + int32_t qp; + int32_t bitcounter[MAX_BITCOUNTER_MB]; + struct codingUnit + *mb_available[3][3]; /*!< pointer to neighboring MBs + in a 3x3 window of current MB, which is located at [1][1] \n + NULL pointer identifies neighboring MBs which are unavailable */ + /* some storage of codingUnit syntax elements for global access */ + int32_t cuType; + int32_t weighted_skipmode; + + int32_t md_directskip_mode; + + int32_t trans_size; + int + /* !< indices correspond to [forw,backw][block_y][block_x][x,y, dmh] */ + mvd[2][BLOCK_MULTIPLE][BLOCK_MULTIPLE][3]; + + int32_t intra_pred_modes[BLOCK_MULTIPLE * BLOCK_MULTIPLE]; + int32_t real_intra_pred_modes[BLOCK_MULTIPLE * BLOCK_MULTIPLE]; + int32_t l_ipred_mode; + int32_t cbp, cbp_blk; + uint32_t cbp_bits; + + int32_t b8mode[4]; + int32_t b8pdir[4]; + /* !< chroma intra prediction mode */ + int32_t c_ipred_mode; + + /* !< pointer to neighboring MB (AEC) */ + struct codingUnit *mb_available_up; + /* !< pointer to neighboring MB (AEC) */ + struct codingUnit *mb_available_left; + int32_t mbAddrA, mbAddrB, mbAddrC, mbAddrD; + /* !<added by mz, 2008.04 */ + int32_t slice_set_index; + /* added by mz, 2008.04 */ + int32_t slice_header_flag; + int32_t sliceqp; /* added by mz, 2008.04 */ +#if MB_DQP + int32_t previouse_qp; + int32_t left_cu_qp; +#endif + int32_t block_available_up; + int32_t block_available_left; + +}; + + +/* image parameters */ +struct syntaxelement; +struct slice; +struct alfdatapart; +struct SAOBlkParam_s { + int32_t modeIdc; /* NEW, MERGE, OFF */ + /* NEW: EO_0, EO_90, EO_135, EO_45, BO. MERGE: left, above */ + int32_t typeIdc; + int32_t startBand; /* BO: starting band index */ + int32_t startBand2; + int32_t deltaband; + int32_t offset[MAX_NUM_SAO_CLASSES]; +}; +struct ALFParam_s { + int32_t alf_flag; + int32_t num_coeff; + int32_t filters_per_group; + int32_t componentID; + int32_t filterPattern[16]; /* *filterPattern; */ + int32_t coeffmulti[16][9]; /* **coeffmulti; */ +}; + +enum ALFComponentID { + ALF_Y = 0, + ALF_Cb, + ALF_Cr, + NUM_ALF_COMPONENT +}; +struct ALF_APS_s { + int32_t usedflag; + int32_t cur_number; + int32_t max_number; + struct ALFParam_s alf_par[NUM_ALF_COMPONENT]; +}; + + +/* ------------------------------------------------------ + * frame data + */ +struct avs2_frame_s { + int32_t imgcoi_ref; + byte * *referenceFrame[3]; + int32_t **refbuf; + int32_t ***mvbuf; +#if 0 + double saorate[NUM_SAO_COMPONENTS]; +#endif + byte ***ref; + + int32_t imgtr_fwRefDistance; + int32_t refered_by_others; + int32_t is_output; + int32_t to_prepare_disp; +#if M3480_TEMPORAL_SCALABLE + /* temporal level setted in configure file */ + int32_t temporal_id; +#endif + byte **oneForthRefY; +#if FIX_MAX_REF + int32_t ref_poc[MAXREF]; +#else + int32_t ref_poc[4]; +#endif +#ifdef AML + int32_t index; + int32_t mmu_alloc_flag; + int32_t lcu_size_log2; + /*uint32_t header_adr;*/ + uint32_t mc_y_adr; + uint32_t mc_u_v_adr; + uint32_t mc_canvas_y; + uint32_t mc_canvas_u_v; + uint32_t mpred_mv_wr_start_addr; + uint8_t bg_flag; + /**/ + unsigned long header_adr; + int buf_size; + int lcu_total; + int comp_body_size; + uint32_t dw_y_adr; + uint32_t dw_u_v_adr; + int y_canvas_index; + int uv_canvas_index; + struct canvas_config_s canvas_config[2]; + int double_write_mode; + int bit_depth; + unsigned long cma_alloc_addr; + int BUF_index; + int pic_w; + int pic_h; + int stream_offset; + u32 pts; + u64 pts64; + /**/ + int vf_ref; + int decode_idx; + int slice_type; + int32_t imgtr_fwRefDistance_bak; + int32_t error_mark; + int32_t decoded_lcu; +#endif +#ifndef MV_USE_FIXED_BUF + int mv_buf_index; +#endif + + /* picture qos infomation*/ + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; + + u32 hw_decode_time; + u32 frame_size; // For frame base mode + + char *cuva_data_buf; + int cuva_data_size; +}; + + +struct ImageParameters_s { + struct codingUnit *mb_data; + int32_t number; /* <! frame number */ + int32_t numIPFrames; + + int32_t type; + int32_t typeb; + int32_t typeb_before; + + int32_t qp; /* <! quant for the current frame */ + int32_t current_mb_nr; /* bitstream order */ + int32_t current_slice_nr; + int32_t tr; /* <! temporal reference, 8 bit, */ + + int32_t width; /* !< Number of pels */ + int32_t width_cr; /* !< Number of pels chroma */ + int32_t height; /* !< Number of lines */ + int32_t height_cr; /* !< Number of lines chroma */ + int32_t PicWidthInMbs; + int32_t PicSizeInMbs; + int32_t block8_x, block8_y; + int32_t subblock_x; + int32_t subblock_y; + + int32_t num_of_references; + /* <! Bug Fix: correct picture size for outputted reconstructed pictures */ + int32_t auto_crop_right; + int32_t auto_crop_bottom; + int32_t buf_cycle; + int32_t picture_structure; + /* <! pointer to current Slice data struct */ + struct slice *currentSlice; + + int32_t **predBlock; /* !< current best prediction mode */ + int32_t **predBlockTmp; + /* !< the diff pixel values between orginal image and prediction */ + int32_t **resiY; + /* !< Array containing square values,used for snr computation */ + int32_t *quad; + + /* //location of current MB////// */ + int32_t mb_y; /* !< current MB vertical */ + int32_t mb_x; /* !< current MB horizontal */ + int32_t pix_y; /* !< current pixel vertical */ + int32_t pix_x; /* !< current pixel horizontal */ + int32_t pix_c_y; /* !< current pixel chroma vertical */ + int32_t pix_c_x; /* !< current pixel chroma horizontal */ + + int32_t imgtr_next_P; + + int32_t imgcoi_next_ref; + + /* !< GH ipredmode[90][74];prediction mode for inter frames */ + /* fix from ver 4.1 */ + int32_t **ipredmode; + int32_t **rec_ipredmode; + + + /* //////////////decoder////////////////////////// */ + int32_t max_mb_nr; + int32_t **intra_block; + + int32_t block_y; + int32_t block_x; + /* <! final 4x4 block. Extended to 16x16 for AVS */ + int32_t resiUV[2][MAX_CU_SIZE][MAX_CU_SIZE]; + + int32_t **fw_refFrArr; /* <! [72][88]; */ + int32_t **bw_refFrArr; /* <! [72][88]; */ + + int32_t random_access_decodable_flag; + + int32_t seq_header_indicate; + int32_t B_discard_flag; + + /* B pictures */ + uint32_t pic_distance; + + uint32_t coding_order; + + uint32_t PrevPicDistanceLsb; + int32_t CurrPicDistanceMsb; + + int32_t PicHeightInMbs; + + int32_t types; + + int32_t new_sequence_flag; + int32_t sequence_end_flag; /* <! rm52k_r2 */ + + int32_t current_slice_set_index; /* <! added by mz, 2008.04 */ + int32_t current_slice_header_flag; /* <! added by mz, 2008.04 */ + int32_t slice_set_qp[64]; /* <! added by mz, 2008.04 */ + + + int32_t inter_amp_enable; + + /* ////////////////////////encoder////////////////////////// */ + + /* int32_t nb_references; //!< replaced by "num_of_references" */ + + int32_t framerate; + + int32_t ***predBlockY; /* !< all 9 prediction modes */ + /* !< new chroma 8x8 intra prediction modes */ + int32_t ****predBlockUV; + + int32_t **Coeff_all;/* qyu 0821 */ + + struct syntaxelement *MB_SyntaxElements; /* !< by oliver 0612 */ + + /* B pictures */ + + int32_t b_frame_to_code; + int32_t num_ref_pic_active_fwd_minus1; + int32_t num_ref_pic_active_bwd_minus1; + int32_t mv_range_flag; + + uint32_t frame_num; /* frame_num for this frame */ + int32_t slice_offset; + /* the following are sent in the slice header */ + int32_t NoResidueDirect; + int32_t coded_mb_nr; + int32_t progressive_frame; + int32_t tc_reserve_bit; + /* the last MB no in current slice. Yulj 2004.07.15 */ + int32_t mb_no_currSliceLastMB; + int32_t Seqheader_flag; /* Added by cjw, 20070327 */ + int32_t EncodeEnd_flag; /* Carmen, 2007/12/19 */ + + uint16_t bbv_delay; + + int32_t tmp_fwBSkipMv[DIRECTION + 1][2]; + int32_t tmp_bwBSkipMv[DIRECTION + 1][2]; + + int32_t tmp_pref_fst[MH_PSKIP_NUM + NUM_OFFSET + 1]; + int32_t tmp_pref_snd[MH_PSKIP_NUM + NUM_OFFSET + 1]; + int32_t tmp_fstPSkipMv[MH_PSKIP_NUM + NUM_OFFSET + 1][3]; + int32_t tmp_sndPSkipMv[MH_PSKIP_NUM + NUM_OFFSET + 1][3]; +#if BCBR +byte *org_ref_y; +byte *org_ref_u; +byte *org_ref_v; +int32_t *BLCUidx; +int32_t *DQPList; +int32_t iNumCUsInFrame; + +byte *org_ref2_y; +byte *org_ref2_u; +byte *org_ref2_v; +int32_t ref2Num; +#endif +/* //////////////SAO parameter////////////////// */ +double *cur_saorate; +#if 0 +int32_t slice_sao_on[NUM_SAO_COMPONENTS]; +#endif +int32_t pic_alf_on[NUM_ALF_COMPONENT]; +struct alfdatapart *dp_ALF; + +#if INTERLACE_CODING +int32_t is_field_sequence; +int32_t is_top_field; +#endif + + +}; + + + +/* ! struct for context management */ +struct BiContextType_s { + uint8_t MPS; /* 1 bit */ + uint32_t LG_PMPS; /* 10 bits */ + uint8_t cycno; /* 2 bits */ +}; + +/*********************************************************************** + * D a t a t y p e s f o r A E C + ************************************************************************/ + + + +struct pix_pos { + int32_t available; /* ABCD */ + int32_t mb_addr; /* MB position */ + int32_t x; + int32_t y; + int32_t pos_x; /* 4x4 x-pos */ + int32_t pos_y; +}; + + + +struct STDOUT_DATA_s { + int32_t type; + int32_t typeb; + + int32_t framenum; + int32_t tr; + int32_t qp; + double snr_y; + double snr_u; + double snr_v; + int32_t tmp_time; + int32_t picture_structure; + int32_t curr_frame_bits; + int32_t emulate_bits; + + uint32_t DecMD5Value[4]; +#if RD1501_FIX_BG +int32_t background_picture_output_flag;/* Longfei.Wang@mediatek.com */ +#endif +#if RD160_FIX_BG +int32_t picture_reorder_delay; +#endif +int8_t str_reference_list[128]; /* reference list information */ +}; + +/********************************************************************** + * C O N T E X T S F O R T M L S Y N T A X E L E M E N T S + ********************************************************************** + */ +#define NUM_CuType_CTX (11 + 10) +#define NUM_B8_TYPE_CTX 9 +#define NUM_MVD_CTX 15 +#define NUM_PMV_IDX_CTX 10 +#define NUM_REF_NO_CTX 6 +#define NUM_DELTA_QP_CTX 4 +#define NUM_INTER_DIR_CTX 18 +#define NUM_INTER_DIR_DHP_CTX 3 +#define NUM_B8_TYPE_DHP_CTX 1 +#define NUM_AMP_CTX 2 +#define NUM_C_INTRA_MODE_CTX 4 +#define NUM_CBP_CTX 4 +#define NUM_BCBP_CTX 4 +#define NUM_MAP_CTX 17 +#define NUM_LAST_CTX 17 + +#define NUM_INTRA_MODE_CTX 7 + +#define NUM_ABS_CTX 5 +#define NUM_TU_CTX 3 +#define NUM_SPLIT_CTX 8 /* CU depth */ +#if BCBR +#define NUM_BGBLCOK_CTX 1 +#endif + +#define NUM_BRP_CTX 8 + + +#define NUM_LAST_CG_CTX_LUMA 12 +#define NUM_LAST_CG_CTX_CHROMA 6 +#define NUM_SIGCG_CTX_LUMA 2 +#define NUM_SIGCG_CTX_CHROMA 1 +#define NUM_LAST_POS_CTX_LUMA 56 +#define NUM_LAST_POS_CTX_CHROMA 16 +#define NUM_LAST_CG_CTX (NUM_LAST_CG_CTX_LUMA + NUM_LAST_CG_CTX_CHROMA) +#define NUM_SIGCG_CTX (NUM_SIGCG_CTX_LUMA + NUM_SIGCG_CTX_CHROMA) +#define NUM_LAST_POS_CTX (NUM_LAST_POS_CTX_LUMA + NUM_LAST_POS_CTX_CHROMA) +#define NUM_SAO_MERGE_FLAG_CTX 3 +#define NUM_SAO_MODE_CTX 1 +#define NUM_SAO_OFFSET_CTX 2 +#define NUM_INTER_DIR_MIN_CTX 2 + +/*end #include "commonStructures.h"*/ + +/*#include "commonVariables.h"*/ + +/* +extern struct CameraParamters_s *camera; +extern struct SNRParameters_s *snr; +extern struct ImageParameters_s *img; + */ + +/* avs2_frame_t *fref[REF_MAXBUFFER]; */ + + +#define ET_SIZE 300 /* !< size of error text buffer */ + + +/* ------------------------------------------------------ + * common data + */ +struct Video_Com_data_s { + int32_t Bframe_ctr; + + /* FILE *p_log; //!< SNR file */ + /* FILE *p_trace; //!< Trace file */ + + int32_t tot_time; + + /* Tsinghua for picture_distance 200701 */ + int32_t picture_distance; + + /* M3178 PKU Reference Manage */ + int32_t coding_order; + /* !< current encoding/decoding frame pointer */ + struct avs2_frame_s *f_rec; + int32_t seq_header; + /* !< Array for reference frames of each block */ + int32_t **refFrArr; + int32_t **p_snd_refFrArr; + + byte ***currentFrame; /* [yuv][height][width] */ +#ifdef AML + struct avs2_frame_s *cur_pic; /*either f_rec or m_bg*/ +#endif + byte **backgroundReferenceFrame[3]; + byte ***background_ref; + + + int32_t total_frames; + + /* mv_range, 20071009 */ + int32_t Min_V_MV; + int32_t Max_V_MV; + int32_t Min_H_MV; + int32_t Max_H_MV; + /* !< buffer for error message for exit with error(void) */ + int8_t errortext[ET_SIZE]; + int8_t str_list_reference[128]; + + +}; +/* extern Video_Com_data *hc; */ + + +/*end #include "commonVariables.h"*/ +/* #define USE_PARAM_TXT */ +/* +#if FIX_CHROMA_FIELD_MV_BK_DIST +int8_t bk_img_is_top_field; +#endif +*/ +/* void write_GB_frame(FILE *p_dec); */ + +#if !FIX_MAX_REF +#define MAXREF 4 +#define MAXGOP 32 +#endif + +struct StatBits { + int32_t curr_frame_bits; + int32_t prev_frame_bits; + int32_t emulate_bits; + int32_t prev_emulate_bits; + int32_t last_unit_bits; + int32_t bitrate; + int32_t total_bitrate[1000]; + int32_t coded_pic_num; + int32_t time_s; +}; + +struct reference_management { + int32_t poc; + int32_t qp_offset; + int32_t num_of_ref; + int32_t referd_by_others; + int32_t ref_pic[MAXREF]; + int32_t predict; + int32_t deltaRPS; + int32_t num_to_remove; + int32_t remove_pic[MAXREF]; +}; + + +/* ------------------------------------------------------ + * dec data + */ +struct Video_Dec_data_s { + byte **background_frame[3]; + int32_t background_reference_enable; + + int32_t background_picture_flag; + int32_t background_picture_output_flag; + int32_t background_picture_enable; + + int32_t background_number; + +#if BCBR + int32_t bcbr_enable; +#endif + + int32_t demulate_enable; + int32_t currentbitoffset; + + int32_t aspect_ratio_information; + int32_t frame_rate_code; + int32_t bit_rate_lower; + int32_t bit_rate_upper; + int32_t marker_bit; + + int32_t video_format; + int32_t color_description; + int32_t color_primaries; + int32_t transfer_characteristics; + int32_t matrix_coefficients; + + int32_t progressive_sequence; +#if INTERLACE_CODING +int32_t is_field_sequence; +#endif +int32_t low_delay; +int32_t horizontal_size; +int32_t vertical_size; +int32_t sample_precision; +int32_t video_range; + +int32_t display_horizontal_size; +int32_t display_vertical_size; +int32_t TD_mode; +int32_t view_packing_mode; +int32_t view_reverse; + +int32_t b_pmvr_enabled; +int32_t dhp_enabled; +int32_t b_dmh_enabled; +int32_t b_mhpskip_enabled; +int32_t wsm_enabled; +int32_t b_secT_enabled; + +int32_t tmp_time; +int32_t FrameNum; +int32_t eos; +int32_t pre_img_type; +int32_t pre_img_types; +/* int32_t pre_str_vec; */ +int32_t pre_img_tr; +int32_t pre_img_qp; +int32_t pre_tmp_time; +int32_t RefPicExist; /* 20071224 */ +int32_t BgRefPicExist; +int32_t dec_ref_num; /* ref order */ + +/* video edit code */ /* M1956 by Grandview 2006.12.12 */ +int32_t vec_flag; + +/* Copyright_extension(void) header */ +int32_t copyright_flag; +int32_t copyright_identifier; +int32_t original_or_copy; +int64_t copyright_number_1; +int64_t copyright_number_2; +int64_t copyright_number_3; +/* Camera_parameters_extension */ +int32_t camera_id; +int32_t height_of_image_device; +int32_t focal_length; +int32_t f_number; +int32_t vertical_angle_of_view; +int32_t camera_position_x_upper; +int32_t camera_position_x_lower; +int32_t camera_position_y_upper; +int32_t camera_position_y_lower; +int32_t camera_position_z_upper; +int32_t camera_position_z_lower; +int32_t camera_direction_x; +int32_t camera_direction_y; +int32_t camera_direction_z; +int32_t image_plane_vertical_x; +int32_t image_plane_vertical_y; +int32_t image_plane_vertical_z; + +#if AVS2_HDR_HLS +/* mastering_display_and_content_metadata_extension(void) header */ +int32_t display_primaries_x0; +int32_t display_primaries_y0; +int32_t display_primaries_x1; +int32_t display_primaries_y1; +int32_t display_primaries_x2; +int32_t display_primaries_y2; +int32_t white_point_x; +int32_t white_point_y; +int32_t max_display_mastering_luminance; +int32_t min_display_mastering_luminance; +int32_t maximum_content_light_level; +int32_t maximum_frame_average_light_level; +#endif + +/* I_pictures_header(void) */ +int32_t top_field_first; +int32_t repeat_first_field; +int32_t progressive_frame; +#if INTERLACE_CODING +int32_t is_top_field; +#endif +/* int32_t fixed_picture_qp; //qyu 0927 */ +int32_t picture_qp; +int32_t fixed_picture_qp; +int32_t time_code_flag; +int32_t time_code; +int32_t loop_filter_disable; +int32_t loop_filter_parameter_flag; +/* int32_t alpha_offset; */ +/* int32_t beta_offset; */ + +/* Pb_picture_header(void) */ +int32_t picture_coding_type; + +/*picture_display_extension(void)*/ +int32_t frame_centre_horizontal_offset[4]; +int32_t frame_centre_vertical_offset[4]; + +/* slice_header(void) */ +int32_t img_width; +int32_t slice_vertical_position; +int32_t slice_vertical_position_extension; +int32_t fixed_slice_qp; +int32_t slice_qp; +int32_t slice_horizontal_positon; /* added by mz, 2008.04 */ +int32_t slice_horizontal_positon_extension; + +int32_t StartCodePosition; +int32_t background_pred_flag; + + +/* Reference Manage */ +int32_t displaydelay; +int32_t picture_reorder_delay; +#if M3480_TEMPORAL_SCALABLE +int32_t temporal_id_exist_flag; +#endif + +int32_t gop_size; +struct reference_management decod_RPS[MAXGOP]; +struct reference_management curr_RPS; +int32_t last_output; +int32_t trtmp; +#if M3480_TEMPORAL_SCALABLE +int32_t cur_layer; +#endif + +/* Adaptive frequency weighting quantization */ +#if FREQUENCY_WEIGHTING_QUANTIZATION +int32_t weight_quant_enable_flag; +int32_t load_seq_weight_quant_data_flag; + +int32_t pic_weight_quant_enable_flag; +int32_t pic_weight_quant_data_index; +int32_t weighting_quant_param; +int32_t weighting_quant_model; +int16_t quant_param_undetail[6]; /* M2148 2007-09 */ +int16_t quant_param_detail[6]; /* M2148 2007-09 */ +int32_t WeightQuantEnable; /* M2148 2007-09 */ +int32_t mb_adapt_wq_disable; /* M2331 2008-04 */ +int32_t mb_wq_mode; /* M2331 2008-04 */ +#if CHROMA_DELTA_QP +int32_t chroma_quant_param_disable; +int32_t chroma_quant_param_delta_u; +int32_t chroma_quant_param_delta_v; +#endif + +int32_t b_pre_dec_intra_img; +int32_t pre_dec_img_type; +int32_t CurrentSceneModel; +#endif + +int32_t curr_IDRcoi; +int32_t curr_IDRtr; +int32_t next_IDRtr; +int32_t next_IDRcoi; +int32_t end_SeqTr; + +#if MB_DQP +int32_t lastQP; +/* FILE * testQP; */ +#endif + +}; +/* extern Video_Dec_data *hd; */ + +struct DecodingEnvironment_s { + uint32_t Dbuffer; + int32_t Dbits_to_go; + uint8_t *Dcodestrm; + int32_t *Dcodestrm_len; +}; + +/* added at rm52k version */ + +struct inp_par; + + + +/* ! Slice */ +struct slice { + int32_t picture_id; + int32_t qp; + int32_t picture_type; /* !< picture type */ + int32_t start_mb_nr; + /* !< number of different partitions */ + int32_t max_part_nr; + + /* added by lzhang */ + /* !< pointer to struct of context models for use in AEC */ + struct SyntaxInfoContexts_s *syn_ctx; +}; + +struct alfdatapart { + struct Bitstream_s *bitstream; + struct DecodingEnvironment_s de_AEC; + struct SyntaxInfoContexts_s *syn_ctx; +}; +/* static int32_t alfParAllcoated = 0; */ + +/* input parameters from configuration file */ +struct inp_par { + int32_t buf_cycle; /* <! Frame buffer size */ + int32_t ref_pic_order; /* <! ref order */ + int32_t output_dec_pic; /* <! output_dec_pic */ + int32_t profile_id; + int32_t level_id; + int32_t chroma_format; + int32_t g_uiMaxSizeInBit; + int32_t alpha_c_offset; + int32_t beta_offset; + int32_t useNSQT; +#if MB_DQP + int32_t useDQP; +#endif + int32_t useSDIP; + int32_t sao_enable; +#if M3480_TEMPORAL_SCALABLE + int32_t temporal_id_exist_flag; +#endif + int32_t alf_enable; + + int32_t crossSliceLoopFilter; + + int32_t sample_bit_depth; /* sample bit depth */ + /* decoded file bit depth (assuming output_bit_depth is + less or equal to sample_bit_depth) */ + int32_t output_bit_depth; + + + int32_t MD5Enable; + +#if OUTPUT_INTERLACE_MERGED_PIC + int32_t output_interlace_merged_picture; +#endif + +}; + +/* extern struct inp_par *input; */ + +struct outdata_s { +#if RD170_FIX_BG + struct STDOUT_DATA_s stdoutdata[REF_MAXBUFFER]; +#else + struct STDOUT_DATA_s stdoutdata[8]; +#endif + int32_t buffer_num; +}; +/* outdata outprint; */ + +#define PAYLOAD_TYPE_IDERP 8 + +struct Bitstream_s *AllocBitstream(void); +void FreeBitstream(void); +#if TRACE +void tracebits2(const int8_t *trace_str, int32_t len, int32_t info); +#endif + +/* int32_t direct_mv[45][80][4][4][3]; // only to verify result */ + +#define I_PICTURE_START_CODE 0xB3 +#define PB_PICTURE_START_CODE 0xB6 +#define SLICE_START_CODE_MIN 0x00 +#define SLICE_START_CODE_MAX 0x8F +#define USER_DATA_START_CODE 0xB2 +#define SEQUENCE_HEADER_CODE 0xB0 +#define EXTENSION_START_CODE 0xB5 +#define SEQUENCE_END_CODE 0xB1 +#define VIDEO_EDIT_CODE 0xB7 + + +#define SEQUENCE_DISPLAY_EXTENSION_ID 2 +#define COPYRIGHT_EXTENSION_ID 4 +#define CAMERAPARAMETERS_EXTENSION_ID 11 +#define PICTURE_DISPLAY_EXTENSION_ID 7 +#if M3480_TEMPORAL_SCALABLE +#define TEMPORAL_SCALABLE_EXTENSION_ID 3 +#endif + +#if ROI_M3264 +#if RD1501_FIX_BG +#define LOCATION_DATA_EXTENSION_ID 12 +#else +#define LOCATION_DATA_EXTENSION_ID 15 +#endif +#endif + +#if AVS2_HDR_HLS +#define MASTERING_DISPLAY_AND_CONTENT_METADATA_EXTENSION 10 +#endif + +void malloc_slice(void); +void free_slice(void); + + +void read_ipred_modes(void); + +int32_t AEC_startcode_follows(int32_t eos_bit); + +/* extern uint32_t max_value_s; */ + +/*ComAdaptiveLoopFilter.h*/ +#define ALF_MAX_NUM_COEF 9 +#define NO_VAR_BINS 16 + + +#define RPM_BEGIN 0x100 +#define ALF_BEGIN 0x180 +#define RPM_END 0x280 + +union param_u { + struct { + uint16_t data[RPM_END - RPM_BEGIN]; + } l; + struct { + /*sequence*/ + uint16_t profile_id; + uint16_t level_id; + uint16_t progressive_sequence; + uint16_t is_field_sequence; + uint16_t horizontal_size; + uint16_t vertical_size; + uint16_t chroma_format; + uint16_t sample_precision; + uint16_t encoding_precision; + uint16_t aspect_ratio_information; + uint16_t frame_rate_code; + uint16_t bit_rate_lower; + uint16_t bit_rate_upper; + uint16_t low_delay; + uint16_t temporal_id_exist_flag; + uint16_t g_uiMaxSizeInBit; + +#define BACKGROUND_PICTURE_DISABLE_BIT 11 +#define B_MHPSKIP_ENABLED_BIT 10 +#define DHP_ENABLED_BIT 9 +#define WSM_ENABLED_BIT 8 +#define INTER_AMP_ENABLE_BIT 7 +#define USENSQT_BIT 6 +#define USESDIP_BIT 5 +#define B_SECT_ENABLED_BIT 4 +#define SAO_ENABLE_BIT 3 +#define ALF_ENABLE_BIT 2 +#define B_PMVR_ENABLED_BIT 1 +#define CROSSSLICELOOPFILTER_BIT 0 + uint16_t avs2_seq_flags; + + uint16_t num_of_RPS; + uint16_t picture_reorder_delay; + /*PIC*/ + uint16_t time_code_flag; + uint16_t time_code; + uint16_t background_picture_flag; + uint16_t background_picture_output_flag; + uint16_t coding_order; + uint16_t cur_layer; + uint16_t displaydelay; /*???*/ + uint16_t predict; /*???*/ + uint16_t RPS_idx; /*???*/ + uint16_t referd_by_others_cur; + uint16_t num_of_ref_cur; + uint16_t ref_pic_cur[8]; + uint16_t num_to_remove_cur; + uint16_t remove_pic_cur[8]; + uint16_t progressive_frame; + uint16_t picture_structure; + uint16_t top_field_first; + uint16_t repeat_first_field; + uint16_t is_top_field; + + uint16_t picture_coding_type; + uint16_t background_pred_flag; + uint16_t background_reference_enable; + uint16_t random_access_decodable_flag; + uint16_t lcu_size; + uint16_t alpha_c_offset; + uint16_t beta_offset; + uint16_t chroma_quant_param_delta_cb; + uint16_t chroma_quant_param_delta_cr; + uint16_t loop_filter_disable; + + uint16_t video_signal_type; + uint16_t color_description; + uint16_t display_primaries_x[3]; + uint16_t display_primaries_y[3]; + uint16_t white_point_x; + uint16_t white_point_y; + uint16_t max_display_mastering_luminance; + uint16_t min_display_mastering_luminance; + uint16_t max_content_light_level; + uint16_t max_picture_average_light_level; + } p; + struct { + uint16_t padding[ALF_BEGIN - RPM_BEGIN]; + uint16_t picture_alf_enable_Y; + uint16_t picture_alf_enable_Cb; + uint16_t picture_alf_enable_Cr; + uint16_t alf_filters_num_m_1; + uint16_t region_distance[16]; + uint16_t alf_cb_coeffmulti[9]; + uint16_t alf_cr_coeffmulti[9]; + uint16_t alf_y_coeffmulti[16][9]; + } alf; +}; + + +struct avs2_decoder { + uint8_t init_hw_flag; + struct inp_par input; + struct ImageParameters_s img; + struct Video_Com_data_s hc; + struct Video_Dec_data_s hd; + union param_u param; + struct avs2_frame_s frm_pool[AVS2_MAX_BUFFER_NUM]; + struct avs2_frame_s *fref[REF_MAXBUFFER]; +#ifdef AML + /*used for background + when background_picture_output_flag is 0*/ + struct avs2_frame_s *m_bg; + /*current background picture, ether m_bg or fref[..]*/ + struct avs2_frame_s *f_bg; +#endif + struct outdata_s outprint; + uint32_t cm_header_start; + struct ALFParam_s m_alfPictureParam[NUM_ALF_COMPONENT]; +#ifdef FIX_CHROMA_FIELD_MV_BK_DIST + int8_t bk_img_is_top_field; +#endif +#ifdef AML + int32_t lcu_size; + int32_t lcu_size_log2; + int32_t lcu_x_num; + int32_t lcu_y_num; + int32_t lcu_total; + int32_t ref_maxbuffer; + int32_t to_prepare_disp_count; + int8_t bufmgr_error_flag; +#endif +}; + + +extern void write_frame(struct avs2_decoder *avs2_dec, int32_t pos); +extern void init_frame_t(struct avs2_frame_s *currfref); +extern void report_frame(struct avs2_decoder *avs2_dec, + struct outdata_s *data, int32_t pos); + +extern int avs2_post_process(struct avs2_decoder *avs2_dec); +extern void avs2_prepare_header(struct avs2_decoder *avs2_dec, + int32_t start_code); +extern int32_t avs2_process_header(struct avs2_decoder *avs2_dec); + +extern void init_avs2_decoder(struct avs2_decoder *avs2_dec); + +extern int32_t avs2_init_global_buffers(struct avs2_decoder *avs2_dec); + +extern bool is_avs2_print_param(void); +extern bool is_avs2_print_bufmgr_detail(void); +#endif +
diff --git a/drivers/frame_provider/decoder_v4l/avs2/vavs2.c b/drivers/frame_provider/decoder_v4l/avs2/vavs2.c new file mode 100644 index 0000000..ce5ffde --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/avs2/vavs2.c
@@ -0,0 +1,8375 @@ + /* + * drivers/amlogic/amports/avs2.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/frame_sync/tsync.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/slab.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/sched/clock.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../../decoder/utils/decoder_mmu_box.h" +#include "../../decoder/utils/decoder_bmmu_box.h" +#include "avs2_global.h" + +#define MEM_NAME "codec_avs2" +/* #include <mach/am_regs.h> */ +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../decoder/utils/vdec.h" +#include "../../decoder/utils/amvdec.h" + +#include <linux/amlogic/media/video_sink/video.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../../decoder/utils/config_parser.h" +#include "../../decoder/utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../../decoder/utils/vdec_feature.h" + + +#define I_ONLY_SUPPORT +#define MIX_STREAM_SUPPORT +#define G12A_BRINGUP_DEBUG +#define CONSTRAIN_MAX_BUF_NUM + +#define CO_MV_COMPRESS + +#include "vavs2.h" +#define HEVC_SHIFT_LENGTH_PROTECT 0x313a +#define HEVC_MPRED_CTRL4 0x324c +#define HEVC_MPRED_CTRL9 0x325b +#define HEVC_DBLK_CFGD 0x350d +#define HEVC_CM_HEADER_START_ADDR 0x3628 +#define HEVC_DBLK_CFGB 0x350b +#define HEVCD_MPP_ANC2AXI_TBL_DATA 0x3464 +#define HEVC_SAO_MMU_VH1_ADDR 0x363b +#define HEVC_SAO_MMU_VH0_ADDR 0x363a + + +/* + * AVS2_DEC_STATUS define +*/ +/*internal*/ +#define AVS2_DEC_IDLE 0 +#define AVS2_SEQUENCE 1 +#define AVS2_I_PICTURE 2 +#define AVS2_PB_PICTURE 3 +#define AVS2_DISCARD_STARTCODE 4 +#define AVS2_DISCARD_NAL 4 + +#define AVS2_SLICE_DECODING 6 + +#define SWAP_IN_CMD 0x10 +#define SWAP_OUT_CMD 0x11 +#define SWAP_OUTIN_CMD 0x12 +#define SWAP_DONE 0x13 +#define SWAP_POST_INIT 0x14 + +/*head*/ +#define AVS2_HEAD_SEQ_READY 0x21 +#define AVS2_HEAD_PIC_I_READY 0x22 +#define AVS2_HEAD_PIC_PB_READY 0x23 +#define AVS2_HEAD_SEQ_END_READY 0x24 +#define AVS2_STARTCODE_SEARCH_DONE 0x25 + +/*pic done*/ +#define HEVC_DECPIC_DATA_DONE 0x30 +#define HEVC_DECPIC_DATA_ERROR 0x31 +#define HEVC_NAL_DECODE_DONE 0x32 +#define AVS2_DECODE_BUFEMPTY 0x33 +#define AVS2_DECODE_TIMEOUT 0x34 +#define AVS2_DECODE_OVER_SIZE 0x35 +#define AVS2_EOS 0x36 + +/*cmd*/ +#define AVS2_10B_DISCARD_NAL 0xf0 +#define AVS2_SEARCH_NEW_PIC 0xf1 +#define AVS2_ACTION_ERROR 0xfe +#define HEVC_ACTION_ERROR 0xfe +#define AVS2_ACTION_DONE 0xff +/*AVS2_DEC_STATUS end*/ + + +#define VF_POOL_SIZE 32 + +#undef pr_info +#define pr_info printk + +#define DECODE_MODE_SINGLE (0 | (0x80 << 24)) +#define DECODE_MODE_MULTI_STREAMBASE (1 | (0x80 << 24)) +#define DECODE_MODE_MULTI_FRAMEBASE (2 | (0x80 << 24)) + + +#define VP9_TRIGGER_FRAME_DONE 0x100 +#define VP9_TRIGGER_FRAME_ENABLE 0x200 + +/*#define MV_MEM_UNIT 0x240*/ +#define MV_MEM_UNIT 0x200 +/*--------------------------------------------------- + Include "parser_cmd.h" +---------------------------------------------------*/ +#define PARSER_CMD_SKIP_CFG_0 0x0000090b + +#define PARSER_CMD_SKIP_CFG_1 0x1b14140f + +#define PARSER_CMD_SKIP_CFG_2 0x001b1910 + + +#define PARSER_CMD_NUMBER 37 + +static unsigned short parser_cmd[PARSER_CMD_NUMBER] = { +0x0401, +0x8401, +0x0800, +0x0402, +0x9002, +0x1423, +0x8CC3, +0x1423, +0x8804, +0x9825, +0x0800, +0x04FE, +0x8406, +0x8411, +0x1800, +0x8408, +0x8409, +0x8C2A, +0x9C2B, +0x1C00, +0x840F, +0x8407, +0x8000, +0x8408, +0x2000, +0xA800, +0x8410, +0x04DE, +0x840C, +0x840D, +0xAC00, +0xA000, +0x08C0, +0x08E0, +0xA40E, +0xFC00, +0x7C00 +}; + +static int32_t g_WqMDefault4x4[16] = { + 64, 64, 64, 68, + 64, 64, 68, 72, + 64, 68, 76, 80, + 72, 76, 84, 96 +}; + + +static int32_t g_WqMDefault8x8[64] = { + 64, 64, 64, 64, 68, 68, 72, 76, + 64, 64, 64, 68, 72, 76, 84, 92, + 64, 64, 68, 72, 76, 80, 88, 100, + 64, 68, 72, 80, 84, 92, 100, 112, + 68, 72, 80, 84, 92, 104, 112, 128, + 76, 80, 84, 92, 104, 116, 132, 152, + 96, 100, 104, 116, 124, 140, 164, 188, + 104, 108, 116, 128, 152, 172, 192, 216 +}; +/*#define HEVC_PIC_STRUCT_SUPPORT*/ +/* to remove, fix build error */ + +/*#define CODEC_MM_FLAGS_FOR_VDECODER 0*/ + +#define MULTI_INSTANCE_SUPPORT +/* #define ERROR_HANDLE_DEBUG */ + +#ifndef STAT_KTHREAD +#define STAT_KTHREAD 0x40 +#endif + +#ifdef MULTI_INSTANCE_SUPPORT +#define MAX_DECODE_INSTANCE_NUM 12 +#define MULTI_DRIVER_NAME "ammvdec_avs2_v4l" + +#define lock_buffer(dec, flags) \ + spin_lock_irqsave(&dec->buffer_lock, flags) + +#define unlock_buffer(dec, flags) \ + spin_unlock_irqrestore(&dec->buffer_lock, flags) + +static u32 debug_mask = 0xffffffff; +#define get_dbg_flag(dec) ((debug_mask & (1 << dec->index)) ? debug : 0) + +static unsigned int max_decode_instance_num + = MAX_DECODE_INSTANCE_NUM; +static unsigned int decode_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int display_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int max_process_time[MAX_DECODE_INSTANCE_NUM]; +static unsigned int run_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int input_empty[MAX_DECODE_INSTANCE_NUM]; +static unsigned int not_run_ready[MAX_DECODE_INSTANCE_NUM]; + +#ifdef G12A_BRINGUP_DEBUG +static u32 decode_timeout_val = 200; +#else +static u32 decode_timeout_val = 200; +#endif +static int start_decode_buf_level = 0x8000; +#ifdef AVS2_10B_MMU +static u32 work_buf_size; /* = 24 * 1024 * 1024*/; +#else +static u32 work_buf_size = 32 * 1024 * 1024; +#endif + +static u32 mv_buf_margin; +static int pre_decode_buf_level = 0x1000; +static u32 again_threshold; + + +/* DOUBLE_WRITE_MODE is enabled only when NV21 8 bit output is needed */ +/* double_write_mode: + * 0, no double write; + * 1, 1:1 ratio; + * 2, (1/4):(1/4) ratio; + * 3, (1/4):(1/4) ratio, with both compressed frame included + * 4, (1/2):(1/2) ratio; + * 8, (1/8):(1/8) ratio; + * 0x10, double write only + * 0x100, if > 1080p,use mode 4,else use mode 1; + * 0x200, if > 1080p,use mode 2,else use mode 1; + * 0x300, if > 720p, use mode 4, else use mode 1; + */ +static u32 double_write_mode; +static u32 without_display_mode; + +static u32 mv_buf_dynamic_alloc; +#define DRIVER_NAME "amvdec_avs2_v4l" +#define DRIVER_HEADER_NAME "amvdec_avs2_header" + + +#define PUT_INTERVAL (HZ/100) +#define ERROR_SYSTEM_RESET_COUNT 200 + +#define PTS_NORMAL 0 +#define PTS_NONE_REF_USE_DURATION 1 + +#define PTS_MODE_SWITCHING_THRESHOLD 3 +#define PTS_MODE_SWITCHING_RECOVERY_THREASHOLD 3 + +#define DUR2PTS(x) ((x)*90/96) + +struct AVS2Decoder_s; +static int vavs2_vf_states(struct vframe_states *states, void *); +static struct vframe_s *vavs2_vf_peek(void *); +static struct vframe_s *vavs2_vf_get(void *); +static void vavs2_vf_put(struct vframe_s *, void *); +static int vavs2_event_cb(int type, void *data, void *private_data); +static void set_vframe(struct AVS2Decoder_s *dec, + struct vframe_s *vf, struct avs2_frame_s *pic, u8 dummy); +static s32 vavs2_init(struct vdec_s *vdec); +static void vavs2_prot_init(struct AVS2Decoder_s *dec); +static int vavs2_local_init(struct AVS2Decoder_s *dec); +static void vavs2_put_timer_func(struct timer_list *timer); +static void dump_data(struct AVS2Decoder_s *dec, int size); +static unsigned char get_data_check_sum + (struct AVS2Decoder_s *dec, int size); +static void dump_pic_list(struct AVS2Decoder_s *dec); + +static const char vavs2_dec_id[] = "vavs2-dev"; + +#define PROVIDER_NAME "decoder.avs2" +#define MULTI_INSTANCE_PROVIDER_NAME "vdec.avs2" + +static const struct vframe_operations_s vavs2_vf_provider = { + .peek = vavs2_vf_peek, + .get = vavs2_vf_get, + .put = vavs2_vf_put, + .event_cb = vavs2_event_cb, + .vf_states = vavs2_vf_states, +}; + +static struct vframe_provider_s vavs2_vf_prov; + +static u32 bit_depth_luma; +static u32 bit_depth_chroma; +static u32 frame_width; +static u32 frame_height; +static u32 video_signal_type; +static u32 pts_unstable; +static u32 on_no_keyframe_skiped; + +static u32 force_video_signal_type; +static u32 enable_force_video_signal_type; +#define VIDEO_SIGNAL_TYPE_AVAILABLE_MASK 0x20000000 +#define HDR_CUVA_MASK 0x40000000 + + +static const char * const video_format_names[] = { + "component", "PAL", "NTSC", "SECAM", + "MAC", "unspecified", "Reserved", "Reserved" +}; + +static inline int div_r32(int64_t m, int n) +{ +/* +return (int)(m/n) +*/ +#ifndef CONFIG_ARM64 + int64_t qu = 0; + qu = div_s64(m, n); + return (int)qu; +#else + return (int)(m/n); +#endif +} + +enum vpx_bit_depth_t { + AVS2_BITS_8 = 8, /**< 8 bits */ + AVS2_BITS_10 = 10, /**< 10 bits */ + AVS2_BITS_12 = 12, /**< 12 bits */ +}; + +/*USE_BUF_BLOCK*/ +struct BUF_s { + int index; + unsigned int alloc_flag; + /*buffer */ + unsigned int cma_page_count; + unsigned long alloc_addr; + unsigned long start_adr; + unsigned int size; + + unsigned int free_start_adr; +} /*BUF_t */; + +struct MVBUF_s { + unsigned long start_adr; + unsigned int size; + int used_flag; +} /*MVBUF_t */; + + /* #undef BUFMGR_ONLY to enable hardware configuration */ + +/*#define TEST_WR_PTR_INC*/ +#define WR_PTR_INC_NUM 128 + +#define SIMULATION +#define DOS_PROJECT +#undef MEMORY_MAP_IN_REAL_CHIP + +/*#undef DOS_PROJECT*/ +/*#define MEMORY_MAP_IN_REAL_CHIP*/ + +/*#define BUFFER_MGR_ONLY*/ +/*#define CONFIG_HEVC_CLK_FORCED_ON*/ +/*#define ENABLE_SWAP_TEST*/ + +#ifdef AVS2_10B_NV21 +#define MEM_MAP_MODE 2 /* 0:linear 1:32x32 2:64x32*/ +#else +#define MEM_MAP_MODE 0 /* 0:linear 1:32x32 2:64x32*/ +#endif + +#ifdef AVS2_10B_NV21 +#else +#define LOSLESS_COMPRESS_MODE +#endif + +#define DOUBLE_WRITE_YSTART_TEMP 0x02000000 +#define DOUBLE_WRITE_CSTART_TEMP 0x02900000 + +#define AVS2_DBG_BUFMGR 0x01 +#define AVS2_DBG_BUFMGR_MORE 0x02 +#define AVS2_DBG_BUFMGR_DETAIL 0x04 +#define AVS2_DBG_IRQ_EVENT 0x08 +#define AVS2_DBG_OUT_PTS 0x10 +#define AVS2_DBG_PRINT_SOURCE_LINE 0x20 +#define AVS2_DBG_PRINT_PARAM 0x40 +#define AVS2_DBG_PRINT_PIC_LIST 0x80 +#define AVS2_DBG_SEND_PARAM_WITH_REG 0x100 +#define AVS2_DBG_MERGE 0x200 +#define AVS2_DBG_DBG_LF_PRINT 0x400 +#define AVS2_DBG_REG 0x800 +#define AVS2_DBG_PIC_LEAK 0x1000 +#define AVS2_DBG_PIC_LEAK_WAIT 0x2000 +#define AVS2_DBG_HDR_INFO 0x4000 +#define AVS2_DBG_HDR_DATA 0x8000 +#define AVS2_DBG_DIS_LOC_ERROR_PROC 0x10000 +#define AVS2_DBG_DIS_SYS_ERROR_PROC 0x20000 +#define AVS2_DBG_DUMP_PIC_LIST 0x40000 +#define AVS2_DBG_TRIG_SLICE_SEGMENT_PROC 0x80000 +#define AVS2_DBG_FORCE_UNCOMPRESS 0x100000 +#define AVS2_DBG_LOAD_UCODE_FROM_FILE 0x200000 +#define AVS2_DBG_FORCE_SEND_AGAIN 0x400000 +#define AVS2_DBG_DUMP_DATA 0x800000 +#define AVS2_DBG_DUMP_LMEM_BUF 0x1000000 +#define AVS2_DBG_DUMP_RPM_BUF 0x2000000 +#define AVS2_DBG_CACHE 0x4000000 +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 +/*MULTI_INSTANCE_SUPPORT*/ +#define PRINT_FLAG_ERROR 0 +#define PRINT_FLAG_VDEC_STATUS 0x20000000 +#define PRINT_FLAG_VDEC_DETAIL 0x40000000 +#define PRINT_FLAG_VDEC_DATA 0x80000000 + +#define PRINT_LINE() \ + do { \ + if (debug & AVS2_DBG_PRINT_SOURCE_LINE)\ + pr_info("%s line %d\n", __func__, __LINE__);\ + } while (0) + +static u32 debug; + +static u32 debug_again; + +bool is_avs2_print_param(void) +{ + bool ret = false; + if (debug & AVS2_DBG_PRINT_PARAM) + ret = true; + return ret; +} + +bool is_avs2_print_bufmgr_detail(void) +{ + bool ret = false; + if (debug & AVS2_DBG_BUFMGR_DETAIL) + ret = true; + return ret; +} +static bool is_reset; +/*for debug*/ +static u32 force_bufspec; +/* + udebug_flag: + bit 0, enable ucode print + bit 1, enable ucode detail print + bit [31:16] not 0, pos to dump lmem + bit 2, pop bits to lmem + bit [11:8], pre-pop bits for alignment (when bit 2 is 1) +*/ +static u32 udebug_flag; +/* + when udebug_flag[1:0] is not 0 + udebug_pause_pos not 0, + pause position +*/ +static u32 udebug_pause_pos; +/* + when udebug_flag[1:0] is not 0 + and udebug_pause_pos is not 0, + pause only when DEBUG_REG2 is equal to this val +*/ +static u32 udebug_pause_val; + +static u32 udebug_pause_decode_idx; + +static u32 force_disp_pic_index; + +#define AUX_BUF_ALIGN(adr) ((adr + 0xf) & (~0xf)) +static u32 cuva_buf_size = 512; + +#define DEBUG_REG +#ifdef DEBUG_REG +static void WRITE_VREG_DBG2(unsigned adr, unsigned val) +{ + if (debug & AVS2_DBG_REG) + pr_info("%s(%x, %x)\n", __func__, adr, val); + if (adr != 0) + WRITE_VREG(adr, val); +} + +#undef WRITE_VREG +#define WRITE_VREG WRITE_VREG_DBG2 +#endif + + +//#ifdef AVS2_10B_MMU +//#define MMU_COMPRESS_HEADER_SIZE 0x48000 +//#define MMU_COMPRESS_8K_HEADER_SIZE 0x48000*4 +//#endif +#define MMU_COMPRESS_HEADER_SIZE_1080P 0x10000 +#define MMU_COMPRESS_HEADER_SIZE_4K 0x48000 +#define MMU_COMPRESS_HEADER_SIZE_8K 0x120000 + +#define INVALID_IDX -1 /* Invalid buffer index.*/ + + +#define FRAME_BUFFERS (AVS2_MAX_BUFFER_NUM) +#define HEADER_FRAME_BUFFERS (FRAME_BUFFERS) +#define MAX_BUF_NUM (FRAME_BUFFERS) + +#define FRAME_CONTEXTS_LOG2 2 +#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2) +/*buffer + header buffer + workspace*/ +#undef MV_USE_FIXED_BUF +#ifdef MV_USE_FIXED_BUF +#define MAX_BMMU_BUFFER_NUM ((FRAME_BUFFERS + HEADER_FRAME_BUFFERS + 1)+1) +#define VF_BUFFER_IDX(n) (n) +#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n+1) +#define WORK_SPACE_BUF_ID (FRAME_BUFFERS + HEADER_FRAME_BUFFERS+1) +#else +#define MAX_BMMU_BUFFER_NUM (((FRAME_BUFFERS*2)+HEADER_FRAME_BUFFERS+1)+1) +#define VF_BUFFER_IDX(n) (n) +#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n+1) +#define MV_BUFFER_IDX(n) ((FRAME_BUFFERS * 2) + n+1) +#define WORK_SPACE_BUF_ID ((FRAME_BUFFERS * 2) + HEADER_FRAME_BUFFERS+1) +#endif + +#define CO_MV_BUF_SIZE_1080P 0x3fc00 +#define CO_MV_BUF_SIZE_4K 0x120000 +#define CO_MV_BUF_SIZE_8K 0x480000 +/* +static void set_canvas(struct AVS2Decoder_s *dec, + struct avs2_frame_s *pic); +int avs2_prepare_display_buf(struct AVS2Decoder_s *dec, + int pos); +*/ + + +struct buff_s { + u32 buf_start; + u32 buf_size; + u32 buf_end; +}; + +struct BuffInfo_s { + u32 max_width; + u32 max_height; + u32 start_adr; + u32 end_adr; + struct buff_s ipp; + struct buff_s sao_abv; + struct buff_s sao_vb; + struct buff_s short_term_rps; + struct buff_s rcs; + struct buff_s sps; + struct buff_s pps; + struct buff_s sao_up; + struct buff_s swap_buf; + struct buff_s swap_buf2; + struct buff_s scalelut; + struct buff_s dblk_para; + struct buff_s dblk_data; + struct buff_s dblk_data2; +#ifdef AVS2_10B_MMU + struct buff_s mmu_vbh; + struct buff_s cm_header; +#endif + struct buff_s mpred_above; +#ifdef MV_USE_FIXED_BUF + struct buff_s mpred_mv; +#endif + struct buff_s rpm; + struct buff_s lmem; +}; + +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_CONFIG_PARAM 3 +#define DEC_RESULT_ERROR 4 +#define DEC_INIT_PICLIST 5 +#define DEC_UNINIT_PICLIST 6 +#define DEC_RESULT_GET_DATA 7 +#define DEC_RESULT_GET_DATA_RETRY 8 +#define DEC_RESULT_EOS 9 +#define DEC_RESULT_FORCE_EXIT 10 + +static void avs2_work(struct work_struct *work); +struct loop_filter_info_n; +struct loopfilter; +struct segmentation; + +struct AVS2Decoder_s { + int pic_list_init_flag; + unsigned char index; + spinlock_t buffer_lock; + struct device *cma_dev; + struct platform_device *platform_dev; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + struct vframe_chunk_s *chunk; + int dec_result; + struct work_struct work; + u32 start_shift_bytes; + + struct BuffInfo_s work_space_buf_store; + unsigned long buf_start; + u32 buf_size; + u32 cma_alloc_count; + unsigned long cma_alloc_addr; + uint8_t eos; + unsigned long int start_process_time; + unsigned last_lcu_idx; + int decode_timeout_count; + unsigned timeout_num; + + int double_write_mode; + + unsigned char m_ins_flag; + char *provider_name; + int frame_count; + u32 stat; + struct timer_list timer; + u32 frame_dur; + u32 frame_ar; + int fatal_error; + uint8_t init_flag; + uint8_t first_sc_checked; + uint8_t process_busy; +#define PROC_STATE_INIT 0 +#define PROC_STATE_HEAD_DONE 1 +#define PROC_STATE_DECODING 2 +#define PROC_STATE_HEAD_AGAIN 3 +#define PROC_STATE_DECODE_AGAIN 4 +#define PROC_STATE_TEST1 5 + uint8_t process_state; + u32 ucode_pause_pos; + + int show_frame_num; +#ifndef AVS2_10B_MMU + struct buff_s mc_buf_spec; +#endif + struct dec_sysinfo vavs2_amstream_dec_info; + void *rpm_addr; + void *lmem_addr; + dma_addr_t rpm_phy_addr; + dma_addr_t lmem_phy_addr; + unsigned short *lmem_ptr; + unsigned short *debug_ptr; + + u32 cuva_size; + void *cuva_addr; + dma_addr_t cuva_phy_addr; + +#if 1 + /*AVS2_10B_MMU*/ + void *frame_mmu_map_addr; + dma_addr_t frame_mmu_map_phy_addr; +#endif + unsigned int use_cma_flag; + + struct BUF_s m_BUF[MAX_BUF_NUM]; + struct MVBUF_s m_mv_BUF[MAX_BUF_NUM]; + u32 used_buf_num; + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(pending_q, struct vframe_s *, VF_POOL_SIZE); + struct vframe_s vfpool[VF_POOL_SIZE]; + atomic_t vf_pre_count; + atomic_t vf_get_count; + atomic_t vf_put_count; + int buf_num; + unsigned int losless_comp_body_size; + + u32 video_signal_type; + u32 video_ori_signal_type; + + int pts_mode; + int last_lookup_pts; + int last_pts; + u64 last_lookup_pts_us64; + u64 last_pts_us64; + u64 shift_byte_count; + u32 shift_byte_count_lo; + u32 shift_byte_count_hi; + int pts_mode_switching_count; + int pts_mode_recovery_count; + + bool get_frame_dur; + u32 saved_resolution; + + /**/ + int refresh_frame_flags; + uint8_t hold_ref_buf; + struct BuffInfo_s *work_space_buf; +#ifndef AVS2_10B_MMU + struct buff_s *mc_buf; +#endif + unsigned int frame_width; + unsigned int frame_height; + + unsigned short *rpm_ptr; + int init_pic_w; + int init_pic_h; + + int slice_type; + + int decode_idx; + int slice_idx; + uint8_t wait_buf; + uint8_t error_flag; + unsigned int bufmgr_error_count; + + /* bit 0, for decoding; bit 1, for displaying */ + uint8_t ignore_bufmgr_error; + uint8_t skip_PB_before_I; + int PB_skip_mode; + int PB_skip_count_after_decoding; + /*hw*/ + + /**/ + struct vdec_info *gvs; + + + unsigned int dec_status; + u32 last_put_idx; + int new_frame_displayed; + void *mmu_box; + void *bmmu_box; + struct vframe_master_display_colour_s vf_dp; + struct firmware_s *fw; +#ifdef AVS2_10B_MMU + int cur_fb_idx_mmu; + long used_4k_num; +#endif + struct avs2_decoder avs2_dec; +#define ALF_NUM_BIT_SHIFT 6 +#define NO_VAR_BINS 16 + int32_t m_filterCoeffSym[16][9]; + int32_t m_varIndTab[NO_VAR_BINS]; + + struct vframe_s vframe_dummy; + /* start_decoding_flag, + bit 0, SEQ ready + bit 1, I ready + */ + unsigned char start_decoding_flag; + uint32_t mpred_abv_start_addr; + uint32_t mpred_abv_start_addr_bak; + u8 next_again_flag; + u32 pre_parser_wr_ptr; + int need_cache_size; + u64 sc_start_time; +#ifdef I_ONLY_SUPPORT + u32 i_only; +#endif + int frameinfo_enable; + struct vframe_qos_s vframe_qos; + u32 dynamic_buf_margin; + int sidebind_type; + int sidebind_channel_id; + u32 endian; + char vdec_name[32]; + char pts_name[32]; + char new_q_name[32]; + char disp_q_name[32]; + dma_addr_t rdma_phy_adr; + unsigned *rdma_adr; + int hdr_flag; +}; + +static int compute_losless_comp_body_size( + struct AVS2Decoder_s *dec, int width, int height, + uint8_t is_bit_depth_10); + +static int avs2_print(struct AVS2Decoder_s *dec, + int flag, const char *fmt, ...) +{ +#define HEVC_PRINT_BUF 256 + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + if (dec == NULL || + (flag == 0) || + (debug & flag)) { + va_list args; + va_start(args, fmt); + if (dec) + len = sprintf(buf, "[%d]", dec->index); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; +} + +static int avs2_print_cont(struct AVS2Decoder_s *dec, + int flag, const char *fmt, ...) +{ + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + if (dec == NULL || + (flag == 0) || + (debug & flag)) { + va_list args; + va_start(args, fmt); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; +} + +#define PROB_SIZE (496 * 2 * 4) +#define PROB_BUF_SIZE (0x5000) +#define COUNT_BUF_SIZE (0x300 * 4 * 4) +/*compute_losless_comp_body_size(4096, 2304, 1) = 18874368(0x1200000)*/ +#define MAX_FRAME_4K_NUM 0x1200 +#define MAX_FRAME_8K_NUM 0x4800 +#define MAX_SIZE_4K (4096 * 2304) +#define IS_8K_SIZE(w, h) (((w) * (h)) > MAX_SIZE_4K) +#define IS_4K_SIZE(w, h) (((w) * (h)) > (1920*1088)) + +static int get_frame_mmu_map_size(struct AVS2Decoder_s *dec) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (IS_8K_SIZE(dec->init_pic_w, dec->init_pic_h))) + return (MAX_FRAME_8K_NUM * 4); + return (MAX_FRAME_4K_NUM * 4); +} + +static int get_compress_header_size(struct AVS2Decoder_s *dec) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (IS_8K_SIZE(dec->init_pic_w, dec->init_pic_h))) + return MMU_COMPRESS_HEADER_SIZE_8K; + else if (IS_4K_SIZE(dec->init_pic_w, dec->init_pic_h)) + return MMU_COMPRESS_HEADER_SIZE_4K; + return MMU_COMPRESS_HEADER_SIZE_1080P; +} + +static void reset_process_time(struct AVS2Decoder_s *dec) +{ + if (dec->start_process_time) { + unsigned process_time = + 1000 * (jiffies - dec->start_process_time) / HZ; + dec->start_process_time = 0; + if (process_time > max_process_time[dec->index]) + max_process_time[dec->index] = process_time; + } +} + +static void start_process_time(struct AVS2Decoder_s *dec) +{ + dec->start_process_time = jiffies; + dec->decode_timeout_count = 0; + dec->last_lcu_idx = 0; +} + +static void update_decoded_pic(struct AVS2Decoder_s *dec); + +static void timeout_process(struct AVS2Decoder_s *dec) +{ + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *cur_pic = avs2_dec->hc.cur_pic; + dec->timeout_num++; + amhevc_stop(); + avs2_print(dec, + 0, "%s decoder timeout\n", __func__); + if (cur_pic) + cur_pic->error_mark = 1; + dec->dec_result = DEC_RESULT_DONE; + update_decoded_pic(dec); + reset_process_time(dec); + vdec_schedule_work(&dec->work); +} + +static u32 get_valid_double_write_mode(struct AVS2Decoder_s *dec) +{ + return (dec->m_ins_flag && + ((double_write_mode & 0x80000000) == 0)) ? + dec->double_write_mode : + (double_write_mode & 0x7fffffff); +} + +static int get_double_write_mode(struct AVS2Decoder_s *dec) +{ + u32 valid_dw_mode = get_valid_double_write_mode(dec); + int w = dec->avs2_dec.img.width; + int h = dec->avs2_dec.img.height; + u32 dw = 0x1; /*1:1*/ + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + return dw; +} + +/* for double write buf alloc */ +static int get_double_write_mode_init(struct AVS2Decoder_s *dec) +{ + u32 valid_dw_mode = get_valid_double_write_mode(dec); + u32 dw; + int w = dec->init_pic_w; + int h = dec->init_pic_h; + + dw = 0x1; /*1:1*/ + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + return dw; +} + +//#define MAX_4K_NUM 0x1200 +#ifdef AVS2_10B_MMU +int avs2_alloc_mmu( + struct AVS2Decoder_s *dec, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr) +{ + int bit_depth_10 = (bit_depth == AVS2_BITS_10); + int picture_size; + int cur_mmu_4k_number, max_frame_num; +#ifdef DYNAMIC_ALLOC_HEAD + unsigned long buf_addr; + struct avs2_frame_s *pic = dec->avs2_dec.hc.cur_pic; + if (pic->header_adr == 0) { + if (decoder_bmmu_box_alloc_buf_phy + (dec->bmmu_box, + HEADER_BUFFER_IDX(cur_buf_idx), + get_compress_header_size(dec), + DRIVER_HEADER_NAME, + &buf_addr) < 0){ + avs2_print(dec, 0, + "%s malloc compress header failed %d\n", + DRIVER_HEADER_NAME, cur_buf_idx); + dec->fatal_error |= DECODER_FATAL_ERROR_NO_MEM; + return -1; + } else + pic->header_adr = buf_addr; + } +#endif + + picture_size = compute_losless_comp_body_size( + dec, pic_width, pic_height, + bit_depth_10); + cur_mmu_4k_number = ((picture_size + (1 << 12) - 1) >> 12); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + max_frame_num = MAX_FRAME_8K_NUM; + else + max_frame_num = MAX_FRAME_4K_NUM; + if (cur_mmu_4k_number > max_frame_num) { + pr_err("over max !! cur_mmu_4k_number 0x%x width %d height %d\n", + cur_mmu_4k_number, pic_width, pic_height); + return -1; + } + return decoder_mmu_box_alloc_idx( + dec->mmu_box, + cur_buf_idx, + cur_mmu_4k_number, + mmu_index_adr); +} +#endif + +#if 0 +/*ndef MV_USE_FIXED_BUF*/ +static void dealloc_mv_bufs(struct AVS2Decoder_s *dec) +{ + int i; + for (i = 0; i < FRAME_BUFFERS; i++) { + if (dec->m_mv_BUF[i].start_adr) { + if (debug) + pr_info( + "dealloc mv buf(%d) adr %ld size 0x%x used_flag %d\n", + i, dec->m_mv_BUF[i].start_adr, + dec->m_mv_BUF[i].size, + dec->m_mv_BUF[i].used_flag); + decoder_bmmu_box_free_idx( + dec->bmmu_box, + MV_BUFFER_IDX(i)); + dec->m_mv_BUF[i].start_adr = 0; + dec->m_mv_BUF[i].size = 0; + dec->m_mv_BUF[i].used_flag = 0; + } + } +} + +static int alloc_mv_buf(struct AVS2Decoder_s *dec, + int i, int size) +{ + int ret = 0; + if (decoder_bmmu_box_alloc_buf_phy + (dec->bmmu_box, + MV_BUFFER_IDX(i), size, + DRIVER_NAME, + &dec->m_mv_BUF[i].start_adr) < 0) { + dec->m_mv_BUF[i].start_adr = 0; + ret = -1; + } else { + dec->m_mv_BUF[i].size = size; + dec->m_mv_BUF[i].used_flag = 0; + ret = 0; + if (debug) { + pr_info( + "MV Buffer %d: start_adr %p size %x\n", + i, + (void *)dec->m_mv_BUF[i].start_adr, + dec->m_mv_BUF[i].size); + } + } + return ret; +} + +static int init_mv_buf_list(struct AVS2Decoder_s *dec) +{ + int i; + int ret = 0; + int count = FRAME_BUFFERS; + int pic_width = dec->init_pic_w; + int pic_height = dec->init_pic_h; + int lcu_size = 64; /*fixed 64*/ + int pic_width_64 = (pic_width + 63) & (~0x3f); + int pic_height_32 = (pic_height + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 + : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 + : pic_height_32 / lcu_size; + int lcu_total = pic_width_lcu * pic_height_lcu; + int size = ((lcu_total * MV_MEM_UNIT) + 0xffff) & + (~0xffff); + if (mv_buf_margin > 0) + count = dec->avs2_dec.ref_maxbuffer + mv_buf_margin; + for (i = 0; i < count; i++) { + if (alloc_mv_buf(dec, i, size) < 0) { + ret = -1; + break; + } + } + return ret; +} +#if 0 + +static int get_mv_buf(struct AVS2Decoder_s *dec, + struct avs2_frame_s *pic) +{ + int i; + int ret = -1; + for (i = 0; i < FRAME_BUFFERS; i++) { + if (dec->m_mv_BUF[i].start_adr && + dec->m_mv_BUF[i].used_flag == 0) { + dec->m_mv_BUF[i].used_flag = 1; + ret = i; + break; + } + } + + if (ret >= 0) { + pic->mv_buf_index = ret; + pic->mpred_mv_wr_start_addr = + (dec->m_mv_BUF[ret].start_adr + 0xffff) & + (~0xffff); + if (debug & AVS2_DBG_BUFMGR_MORE) + pr_info( + "%s => %d (%d) size 0x%x\n", + __func__, ret, + pic->mpred_mv_wr_start_addr, + dec->m_mv_BUF[ret].size); + } else { + pr_info( + "%s: Error, mv buf is not enough\n", + __func__); + } + return ret; +} + +static void put_mv_buf(struct AVS2Decoder_s *dec, + struct avs2_frame_s *pic) +{ + int i = pic->mv_buf_index; + if (i >= FRAME_BUFFERS) { + if (debug & AVS2_DBG_BUFMGR_MORE) + pr_info( + "%s: index %d beyond range\n", + __func__, i); + return; + } + if (debug & AVS2_DBG_BUFMGR_MORE) + pr_info( + "%s(%d): used_flag(%d)\n", + __func__, i, + dec->m_mv_BUF[i].used_flag); + + pic->mv_buf_index = -1; + if (dec->m_mv_BUF[i].start_adr && + dec->m_mv_BUF[i].used_flag) + dec->m_mv_BUF[i].used_flag = 0; +} + +static void put_un_used_mv_bufs(struct AVS2Decoder_s *dec) +{ + struct VP9_Common_s *const cm = &dec->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + for (i = 0; i < dec->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.index != -1) && + (frame_bufs[i].buf.mv_buf_index >= 0) + ) + put_mv_buf(dec, &frame_bufs[i].buf); + } +} +#endif + +#endif + +static int get_free_buf_count(struct AVS2Decoder_s *dec) +{ + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + int i; + int count = 0; + for (i = 0; i < avs2_dec->ref_maxbuffer; i++) { + if ((avs2_dec->fref[i]->imgcoi_ref < -256 +#if 0 + || abs(avs2_dec->fref[i]-> + imgtr_fwRefDistance - img->tr) >= 128 +#endif + ) && avs2_dec->fref[i]->is_output == -1 + && avs2_dec->fref[i]->bg_flag == 0 +#ifndef NO_DISPLAY + && avs2_dec->fref[i]->vf_ref == 0 + && avs2_dec->fref[i]->to_prepare_disp == 0 +#endif + ) { + count++; + } + } + + return count; +} + +#ifdef CONSTRAIN_MAX_BUF_NUM +static int get_vf_ref_only_buf_count(struct AVS2Decoder_s *dec) +{ + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + int i; + int count = 0; + for (i = 0; i < avs2_dec->ref_maxbuffer; i++) { + if ((avs2_dec->fref[i]->imgcoi_ref < -256 +#if 0 + || abs(avs2_dec->fref[i]-> + imgtr_fwRefDistance - img->tr) >= 128 +#endif + ) && avs2_dec->fref[i]->is_output == -1 + && avs2_dec->fref[i]->bg_flag == 0 +#ifndef NO_DISPLAY + && avs2_dec->fref[i]->vf_ref > 0 + && avs2_dec->fref[i]->to_prepare_disp == 0 +#endif + ) { + count++; + } + } + + return count; +} + +static int get_used_buf_count(struct AVS2Decoder_s *dec) +{ + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + int i; + int count = 0; + for (i = 0; i < avs2_dec->ref_maxbuffer; i++) { + if ((avs2_dec->fref[i]->imgcoi_ref >= -256 +#if 0 + || abs(avs2_dec->fref[i]-> + imgtr_fwRefDistance - img->tr) >= 128 +#endif + ) || avs2_dec->fref[i]->is_output != -1 + || avs2_dec->fref[i]->bg_flag != 0 +#ifndef NO_DISPLAY + || avs2_dec->fref[i]->vf_ref != 0 + || avs2_dec->fref[i]->to_prepare_disp != 0 +#endif + ) { + count++; + } + } + + return count; +} +#endif + +int avs2_bufmgr_init(struct AVS2Decoder_s *dec, struct BuffInfo_s *buf_spec_i, + struct buff_s *mc_buf_i) { + + dec->frame_count = 0; +#ifdef AVS2_10B_MMU + dec->used_4k_num = -1; + dec->cur_fb_idx_mmu = INVALID_IDX; +#endif + + + /* private init */ + dec->work_space_buf = buf_spec_i; +#ifndef AVS2_10B_MMU + dec->mc_buf = mc_buf_i; +#endif + dec->rpm_addr = NULL; + dec->lmem_addr = NULL; + + dec->use_cma_flag = 0; + dec->decode_idx = 0; + dec->slice_idx = 0; + /*int m_uiMaxCUWidth = 1<<7;*/ + /*int m_uiMaxCUHeight = 1<<7;*/ + dec->wait_buf = 0; + dec->error_flag = 0; + dec->skip_PB_before_I = 0; + + dec->pts_mode = PTS_NORMAL; + dec->last_pts = 0; + dec->last_lookup_pts = 0; + dec->last_pts_us64 = 0; + dec->last_lookup_pts_us64 = 0; + dec->shift_byte_count = 0; + dec->shift_byte_count_lo = 0; + dec->shift_byte_count_hi = 0; + dec->pts_mode_switching_count = 0; + dec->pts_mode_recovery_count = 0; + + dec->buf_num = 0; + + dec->bufmgr_error_count = 0; + return 0; +} + + + +#define HEVC_CM_BODY_START_ADDR 0x3626 +#define HEVC_CM_BODY_LENGTH 0x3627 +#define HEVC_CM_HEADER_LENGTH 0x3629 +#define HEVC_CM_HEADER_OFFSET 0x362b + +#define LOSLESS_COMPRESS_MODE + +/*#define DECOMP_HEADR_SURGENT*/ + +static u32 mem_map_mode; /* 0:linear 1:32x32 2:64x32 ; m8baby test1902 */ +static u32 enable_mem_saving = 1; +static u32 force_w_h; + +static u32 force_fps; + + +const u32 avs2_version = 201602101; +static u32 debug; +static u32 radr; +static u32 rval; +static u32 pop_shorts; +static u32 dbg_cmd; +static u32 dbg_skip_decode_index; +/* + * bit 0~3, for HEVCD_IPP_AXIIF_CONFIG endian config + * bit 8~23, for HEVC_SAO_CTRL1 endian config + */ +static u32 endian; +#define HEVC_CONFIG_BIG_ENDIAN ((0x880 << 8) | 0x8) +#define HEVC_CONFIG_LITTLE_ENDIAN ((0xff0 << 8) | 0xf) + +#ifdef ERROR_HANDLE_DEBUG +static u32 dbg_nal_skip_flag; + /* bit[0], skip vps; bit[1], skip sps; bit[2], skip pps */ +static u32 dbg_nal_skip_count; +#endif +/*for debug*/ +static u32 decode_pic_begin; +static uint slice_parse_begin; +static u32 step; +#ifdef MIX_STREAM_SUPPORT +static u32 buf_alloc_width = 4096; +static u32 buf_alloc_height = 2304; + +static u32 dynamic_buf_num_margin; +#else +static u32 buf_alloc_width; +static u32 buf_alloc_height; +static u32 dynamic_buf_num_margin = 7; +#endif +#ifdef CONSTRAIN_MAX_BUF_NUM +static u32 run_ready_max_vf_only_num; +static u32 run_ready_display_q_num; + /*0: not check + 0xff: avs2_dec.ref_maxbuffer + */ +static u32 run_ready_max_buf_num = 0xff; +#endif +static u32 buf_alloc_depth = 10; +static u32 buf_alloc_size; +/* +bit[0]: 0, + bit[1]: 0, always release cma buffer when stop + bit[1]: 1, never release cma buffer when stop +bit[0]: 1, when stop, release cma buffer if blackout is 1; +do not release cma buffer is blackout is not 1 + +bit[2]: 0, when start decoding, check current displayed buffer + (only for buffer decoded by vp9) if blackout is 0 + 1, do not check current displayed buffer + +bit[3]: 1, if blackout is not 1, do not release current + displayed cma buffer always. +*/ +/* set to 1 for fast play; + set to 8 for other case of "keep last frame" +*/ +static u32 buffer_mode = 1; +/* buffer_mode_dbg: debug only*/ +static u32 buffer_mode_dbg = 0xffff0000; +/**/ + +/* +bit 0, 1: only display I picture; +bit 1, 1: only decode I picture; +*/ +static u32 i_only_flag; + + +static u32 max_decoding_time; +/* +error handling +*/ +/*error_handle_policy: +bit 0: search seq again if buffer mgr error occur + (buffer mgr error count need big than + re_search_seq_threshold) +bit 1: 1, display from I picture; + 0, display from any correct pic +*/ + +static u32 error_handle_policy = 1; +/* +re_search_seq_threshold: + bit 7~0: buffer mgr error research seq count + bit 15~8: frame count threshold +*/ +static u32 re_search_seq_threshold = 0x800; /*0x8;*/ +/*static u32 parser_sei_enable = 1;*/ + +static u32 max_buf_num = (REF_BUFFER + 1); + +static u32 run_ready_min_buf_num = 2; + +static DEFINE_MUTEX(vavs2_mutex); + +#define HEVC_DEC_STATUS_REG HEVC_ASSIST_SCRATCH_0 +#define HEVC_RPM_BUFFER HEVC_ASSIST_SCRATCH_1 +#define AVS2_ALF_SWAP_BUFFER HEVC_ASSIST_SCRATCH_2 +#define HEVC_RCS_BUFFER HEVC_ASSIST_SCRATCH_3 +#define HEVC_SPS_BUFFER HEVC_ASSIST_SCRATCH_4 +#define HEVC_PPS_BUFFER HEVC_ASSIST_SCRATCH_5 +//#define HEVC_SAO_UP HEVC_ASSIST_SCRATCH_6 +#ifdef AVS2_10B_MMU +#define AVS2_MMU_MAP_BUFFER HEVC_ASSIST_SCRATCH_7 +#else +#define HEVC_STREAM_SWAP_BUFFER HEVC_ASSIST_SCRATCH_7 +#endif +#define HEVC_STREAM_SWAP_BUFFER2 HEVC_ASSIST_SCRATCH_8 +/* +#define VP9_PROB_SWAP_BUFFER HEVC_ASSIST_SCRATCH_9 +#define VP9_COUNT_SWAP_BUFFER HEVC_ASSIST_SCRATCH_A +#define VP9_SEG_MAP_BUFFER HEVC_ASSIST_SCRATCH_B +*/ +//#define HEVC_SCALELUT HEVC_ASSIST_SCRATCH_D +#define AVS2_CUVA_ADR HEVC_ASSIST_SCRATCH_A +#define AVS2_CUVA_DATA_SIZE HEVC_ASSIST_SCRATCH_B + +#define HEVC_WAIT_FLAG HEVC_ASSIST_SCRATCH_E +#define RPM_CMD_REG HEVC_ASSIST_SCRATCH_F +#define LMEM_DUMP_ADR HEVC_ASSIST_SCRATCH_9 +#define HEVC_STREAM_SWAP_TEST HEVC_ASSIST_SCRATCH_L +/*!!!*/ +#define HEVC_DECODE_COUNT HEVC_ASSIST_SCRATCH_M +#define HEVC_DECODE_SIZE HEVC_ASSIST_SCRATCH_N +#define DEBUG_REG1 HEVC_ASSIST_SCRATCH_G +#define DEBUG_REG2 HEVC_ASSIST_SCRATCH_H + + +/* +ucode parser/search control +bit 0: 0, header auto parse; 1, header manual parse +bit 1: 0, auto skip for noneseamless stream; 1, no skip +bit [3:2]: valid when bit1==0; +0, auto skip nal before first vps/sps/pps/idr; +1, auto skip nal before first vps/sps/pps +2, auto skip nal before first vps/sps/pps, + and not decode until the first I slice (with slice address of 0) + +3, auto skip before first I slice (nal_type >=16 && nal_type<=21) +bit [15:4] nal skip count (valid when bit0 == 1 (manual mode) ) +bit [16]: for NAL_UNIT_EOS when bit0 is 0: + 0, send SEARCH_DONE to arm ; 1, do not send SEARCH_DONE to arm +bit [17]: for NAL_SEI when bit0 is 0: + 0, do not parse SEI in ucode; 1, parse SEI in ucode +bit [31:20]: used by ucode for debug purpose +*/ +#define NAL_SEARCH_CTL HEVC_ASSIST_SCRATCH_I + /*DECODE_MODE: set before start decoder + bit 7~0: decode mode + bit 23~16: start_decoding_flag + bit [0] - SEQ_ready + bit [2:1] - I Picture Count + bit 31~24: chip feature + */ +#define DECODE_MODE HEVC_ASSIST_SCRATCH_J +#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K + /*read only*/ +#define CUR_NAL_UNIT_TYPE HEVC_ASSIST_SCRATCH_J + +#define RPM_BUF_SIZE (0x600 * 2) +#define LMEM_BUF_SIZE (0x600 * 2) + + /*mmu_vbh buf is used by HEVC_SAO_MMU_VH0_ADDR, HEVC_SAO_MMU_VH1_ADDR*/ +#define VBH_BUF_SIZE_1080P 0x3000 +#define VBH_BUF_SIZE_4K 0x5000 +#define VBH_BUF_SIZE_8K 0xa000 +#define VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh.buf_size / 2) + /*mmu_vbh_dw buf is used by HEVC_SAO_MMU_VH0_ADDR2,HEVC_SAO_MMU_VH1_ADDR2, + HEVC_DW_VH0_ADDDR, HEVC_DW_VH1_ADDDR*/ +#define DW_VBH_BUF_SIZE_1080P (VBH_BUF_SIZE_1080P * 2) +#define DW_VBH_BUF_SIZE_4K (VBH_BUF_SIZE_4K * 2) +#define DW_VBH_BUF_SIZE_8K (VBH_BUF_SIZE_8K * 2) +#define DW_VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh_dw.buf_size / 4) + +/* necessary 4K page size align for t7/t3 decoder and after */ +#define WORKBUF_ALIGN(addr) (ALIGN(addr, PAGE_SIZE)) + +#define WORK_BUF_SPEC_NUM 6 +static struct BuffInfo_s amvavs2_workbuff_spec[WORK_BUF_SPEC_NUM] = { + { + /* 8M bytes */ + .max_width = 1920, + .max_height = 1088, + .ipp = { + /* IPP work space calculation : + 4096 * (Y+CbCr+Flags) = 12k, round to 16k */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x30000, + }, + .sao_vb = { + .buf_size = 0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + total 64x16x2 = 2048 bytes (0x800) */ + .buf_size = 0x800, + }, + .rcs = { + /* RCS STORE AREA - Max 32 RCS, each has 32 bytes, + total 0x0400 bytes */ + .buf_size = 0x400, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + total 0x0800 bytes*/ + .buf_size = 0x800, + }, + .pps = { + /*PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + total 0x2000 bytes*/ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + each has 16 bytes total 0x2800 bytes */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + (only 144 cycles valid) */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = + 32Kbytes (0x8000) */ + .buf_size = 0x8000, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + each para 1024bytes(total:0x40000), + data 1024bytes(total:0x40000)*/ + .buf_size = 0x40000, + }, + .dblk_data = { + .buf_size = 0x40000, + }, + .dblk_data2 = { + .buf_size = 0x40000, + }, +#ifdef AVS2_10B_MMU + .mmu_vbh = { + .buf_size = 0x5000, /*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x8000, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = {/* 1080p, 0x40000 per buffer */ + .buf_size = 0x40000 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096, + .max_height = 2304, + .ipp = { + /* IPP work space calculation : + 4096 * (Y+CbCr+Flags) = 12k, round to 16k */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x30000, + }, + .sao_vb = { + .buf_size = 0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + total 64x16x2 = 2048 bytes (0x800) */ + .buf_size = 0x800, + }, + .rcs = { + /* RCS STORE AREA - Max 16 RCS, each has 32 bytes, + total 0x0400 bytes */ + .buf_size = 0x400, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + total 0x0800 bytes */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + total 0x2000 bytes */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + each has 16 bytes total 0x2800 bytes */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + (only 144 cycles valid) */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = 32Kbytes + (0x8000) */ + .buf_size = 0x8000, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + each para 1024bytes(total:0x40000), + data 1024bytes(total:0x40000)*/ + .buf_size = 0x80000, + }, + .dblk_data = { + /*DBLK -> Max 256(4096/16) LCU, + each para 1024bytes(total:0x40000), + data 1024bytes(total:0x40000)*/ + .buf_size = 0x80000, + }, + .dblk_data2 = { + .buf_size = 0x80000, + }, +#ifdef AVS2_10B_MMU + .mmu_vbh = { + .buf_size = 0x5000,/*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x10000, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = 0x120000 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096 * 2, + .max_height = 2304 * 2, + .ipp = { + /*IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, + round to 16k*/ + .buf_size = 0x4000 * 2, + }, + .sao_abv = { + .buf_size = 0x30000 * 2, + }, + .sao_vb = { + .buf_size = 0x30000 * 2, + }, + .short_term_rps = { + /*SHORT_TERM_RPS - Max 64 set, 16 entry every set, + total 64x16x2 = 2048 bytes (0x800)*/ + .buf_size = 0x800, + }, + .rcs = { + /*RCS STORE AREA - Max 16 RCS, each has 32 bytes, + total 0x0400 bytes*/ + .buf_size = 0x400, + }, + .sps = { + /*SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + total 0x0800 bytes*/ + .buf_size = 0x800, + }, + .pps = { + /*PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, total + 0x2000 bytes*/ + .buf_size = 0x2000, + }, + .sao_up = { + /*SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes i + total 0x2800 bytes*/ + .buf_size = 0x2800 * 2, + }, + .swap_buf = { + /*256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid)*/ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /*support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000)*/ + .buf_size = 0x8000 * 2, + }, + .dblk_para = { + .buf_size = 0x40000 * 2, + }, + .dblk_data = { + .buf_size = 0x80000 * 2, + }, + .dblk_data2 = { + .buf_size = 0x80000 * 2, + }, +#ifdef AVS2_10B_MMU + .mmu_vbh = { + .buf_size = 0x5000 * 2, /*2*16*2304/4, 4K*/ + }, +#if 0 + .cm_header = { + /*0x44000 = ((1088*2*1024*4)/32/4)*(32/8)*/ + .buf_size = MMU_COMPRESS_8K_HEADER_SIZE * 17, + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x8000 * 2, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /*4k2k , 0x100000 per buffer*/ + .buf_size = 0x120000 * FRAME_BUFFERS * 4, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + /* 8M bytes */ + .max_width = 1920, + .max_height = 1088, + .ipp = { + /* IPP work space calculation : + 4096 * (Y+CbCr+Flags) = 12k, round to 16k */ + .buf_size = 0x1e00, + }, + .sao_abv = { + .buf_size = 0, + }, + .sao_vb = { + .buf_size = 0, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + total 64x16x2 = 2048 bytes (0x800) */ + .buf_size = 0x800, + }, + .rcs = { + /* RCS STORE AREA - Max 32 RCS, each has 32 bytes, + total 0x0400 bytes */ + .buf_size = 0x400, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + total 0x0800 bytes*/ + .buf_size = 0x800, + }, + .pps = { + /*PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + total 0x2000 bytes*/ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + each has 16 bytes total 0x2800 bytes */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + (only 144 cycles valid) */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = + 32Kbytes (0x8000) */ + .buf_size = 0, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + each para 1024bytes(total:0x40000), + data 1024bytes(total:0x40000)*/ + .buf_size = 0x3d00, //0x3c80, + }, + .dblk_data = { + .buf_size = 0x62800, + }, + .dblk_data2 = { + .buf_size = 0x62800, + }, +#ifdef AVS2_10B_MMU + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_1080P, /*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x1e00, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = {/* 1080p, 0x40000 per buffer */ + .buf_size = CO_MV_BUF_SIZE_1080P * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096, + .max_height = 2304, + .ipp = { + /* IPP work space calculation : + 4096 * (Y+CbCr+Flags) = 12k, round to 16k */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0, + }, + .sao_vb = { + .buf_size = 0, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + total 64x16x2 = 2048 bytes (0x800) */ + .buf_size = 0x800, + }, + .rcs = { + /* RCS STORE AREA - Max 16 RCS, each has 32 bytes, + total 0x0400 bytes */ + .buf_size = 0x400, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + total 0x0800 bytes */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + total 0x2000 bytes */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + each has 16 bytes total 0x2800 bytes */ + .buf_size = 0, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + (only 144 cycles valid) */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = 32Kbytes + (0x8000) */ + .buf_size = 0, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + each para 1024bytes(total:0x40000), + data 1024bytes(total:0x40000)*/ + .buf_size = 0x8100, //0x8080, + }, + .dblk_data = { + /*DBLK -> Max 256(4096/16) LCU, + each para 1024bytes(total:0x40000), + data 1024bytes(total:0x40000)*/ + .buf_size = 0x88800, + }, + .dblk_data2 = { + .buf_size = 0x88800, + }, +#ifdef AVS2_10B_MMU + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_4K,/*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x4000, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = CO_MV_BUF_SIZE_4K * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096 * 2, + .max_height = 2304 * 2, + .ipp = { + /*IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, + round to 16k*/ + .buf_size = 0x4000 * 2, + }, + .sao_abv = { + .buf_size = 0, + }, + .sao_vb = { + .buf_size = 0, + }, + .short_term_rps = { + /*SHORT_TERM_RPS - Max 64 set, 16 entry every set, + total 64x16x2 = 2048 bytes (0x800)*/ + .buf_size = 0x800, + }, + .rcs = { + /*RCS STORE AREA - Max 16 RCS, each has 32 bytes, + total 0x0400 bytes*/ + .buf_size = 0x400, + }, + .sps = { + /*SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + total 0x0800 bytes*/ + .buf_size = 0x800, + }, + .pps = { + /*PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, total + 0x2000 bytes*/ + .buf_size = 0x2000, + }, + .sao_up = { + /*SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes i + total 0x2800 bytes*/ + .buf_size = 0, + }, + .swap_buf = { + /*256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid)*/ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /*support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000)*/ + .buf_size = 0, + }, + .dblk_para = { + .buf_size = 0x10100, //0x10080, + }, + .dblk_data = { + .buf_size = 0x110800, + }, + .dblk_data2 = { + .buf_size = 0x110800, + }, +#ifdef AVS2_10B_MMU + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_8K, /*2*16*2304/4, 4K*/ + }, +#if 0 + .cm_header = { + /*0x44000 = ((1088*2*1024*4)/32/4)*(32/8)*/ + .buf_size = MMU_COMPRESS_8K_HEADER_SIZE * 17, + }, +#endif +#endif + .mpred_above = { + .buf_size = 0x8000, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /*4k2k , 0x100000 per buffer*/ + .buf_size = CO_MV_BUF_SIZE_8K * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + } +}; + +#define IS_8K_SIZE(w, h) (((w) * (h)) > MAX_SIZE_4K) +#define IS_4K_SIZE(w, h) (((w) * (h)) > (1920*1088)) +#ifndef MV_USE_FIXED_BUF +static uint32_t get_mv_buf_size(struct AVS2Decoder_s *dec, int width, int height) { + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + uint32_t size; + if (mv_buf_dynamic_alloc == 1) { + int mv_mem_unit = + avs2_dec->lcu_size_log2 == 6 ? 0x200 : avs2_dec->lcu_size_log2 == + 5 ? 0x80 : 0x20; + int extended_pic_width = (width + avs2_dec->lcu_size -1) + & (~(avs2_dec->lcu_size - 1)); + int extended_pic_height = (height + avs2_dec->lcu_size -1) + & (~(avs2_dec->lcu_size - 1)); + int lcu_x_num = extended_pic_width / avs2_dec->lcu_size; + int lcu_y_num = extended_pic_height / avs2_dec->lcu_size; + int new_size = lcu_x_num * lcu_y_num * mv_mem_unit; + size = (new_size + 0xffff) & (~0xffff); + + } else { + if (IS_8K_SIZE(width, height)) + size = CO_MV_BUF_SIZE_8K; + else if (IS_4K_SIZE(width, height)) + size = CO_MV_BUF_SIZE_4K; + else + size = CO_MV_BUF_SIZE_1080P; + } + return size; +} +#endif + +/*Losless compression body buffer size 4K per 64x32 (jt)*/ +static int compute_losless_comp_body_size(struct AVS2Decoder_s *dec, + int width, int height, + uint8_t is_bit_depth_10) +{ + int width_x64; + int height_x32; + int bsize; + width_x64 = width + 63; + width_x64 >>= 6; + height_x32 = height + 31; + height_x32 >>= 5; +#ifdef AVS2_10B_MMU + bsize = (is_bit_depth_10 ? 4096 : 3200) + * width_x64 * height_x32; +#else + bsize = (is_bit_depth_10 ? 4096 : 3072) + * width_x64 * height_x32; +#endif + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s(%d,%d,%d)=>%d\n", + __func__, width, height, + is_bit_depth_10, bsize); + + return bsize; +} + +/* Losless compression header buffer size 32bytes per 128x64 (jt)*/ +static int compute_losless_comp_header_size(struct AVS2Decoder_s *dec, + int width, int height) +{ + int width_x128; + int height_x64; + int hsize; + width_x128 = width + 127; + width_x128 >>= 7; + height_x64 = height + 63; + height_x64 >>= 6; + + hsize = 32 * width_x128 * height_x64; + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s(%d,%d)=>%d\n", + __func__, width, height, + hsize); + + return hsize; +} + +static void init_buff_spec(struct AVS2Decoder_s *dec, + struct BuffInfo_s *buf_spec) +{ + void *mem_start_virt; + + buf_spec->ipp.buf_start = + WORKBUF_ALIGN(buf_spec->start_adr); + buf_spec->sao_abv.buf_start = + WORKBUF_ALIGN(buf_spec->ipp.buf_start + buf_spec->ipp.buf_size); + buf_spec->sao_vb.buf_start = + WORKBUF_ALIGN(buf_spec->sao_abv.buf_start + buf_spec->sao_abv.buf_size); + buf_spec->short_term_rps.buf_start = + WORKBUF_ALIGN(buf_spec->sao_vb.buf_start + buf_spec->sao_vb.buf_size); + buf_spec->rcs.buf_start = + WORKBUF_ALIGN(buf_spec->short_term_rps.buf_start + buf_spec->short_term_rps.buf_size); + buf_spec->sps.buf_start = + WORKBUF_ALIGN(buf_spec->rcs.buf_start + buf_spec->rcs.buf_size); + buf_spec->pps.buf_start = + WORKBUF_ALIGN(buf_spec->sps.buf_start + buf_spec->sps.buf_size); + buf_spec->sao_up.buf_start = + WORKBUF_ALIGN(buf_spec->pps.buf_start + buf_spec->pps.buf_size); + buf_spec->swap_buf.buf_start = + WORKBUF_ALIGN(buf_spec->sao_up.buf_start + buf_spec->sao_up.buf_size); + buf_spec->swap_buf2.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf.buf_start + buf_spec->swap_buf.buf_size); + buf_spec->scalelut.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf2.buf_start + buf_spec->swap_buf2.buf_size); + buf_spec->dblk_para.buf_start = + WORKBUF_ALIGN(buf_spec->scalelut.buf_start + buf_spec->scalelut.buf_size); + buf_spec->dblk_data.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_para.buf_start + buf_spec->dblk_para.buf_size); + buf_spec->dblk_data2.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data.buf_start + buf_spec->dblk_data.buf_size); +#ifdef AVS2_10B_MMU + buf_spec->mmu_vbh.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data2.buf_start + buf_spec->dblk_data2.buf_size); + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh.buf_start + buf_spec->mmu_vbh.buf_size); +#else + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data2.buf_start + buf_spec->dblk_data2.buf_size); +#endif +#ifdef MV_USE_FIXED_BUF + buf_spec->mpred_mv.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_mv.buf_start + buf_spec->mpred_mv.buf_size); +#else + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); +#endif + buf_spec->lmem.buf_start = + WORKBUF_ALIGN(buf_spec->rpm.buf_start + buf_spec->rpm.buf_size); + buf_spec->end_adr = + WORKBUF_ALIGN(buf_spec->lmem.buf_start + buf_spec->lmem.buf_size); + + if (dec) { + mem_start_virt = + codec_mm_phys_to_virt(buf_spec->dblk_para.buf_start); + if (mem_start_virt) { + memset(mem_start_virt, 0, buf_spec->dblk_para.buf_size); + codec_mm_dma_flush(mem_start_virt, + buf_spec->dblk_para.buf_size, + DMA_TO_DEVICE); + } else { + /*not virt for tvp playing, + may need clear on ucode.*/ + pr_err("mem_start_virt failed\n"); + } + if (debug) { + pr_info("%s workspace (%x %x) size = %x\n", __func__, + buf_spec->start_adr, buf_spec->end_adr, + buf_spec->end_adr - buf_spec->start_adr); + } + if (debug) { + pr_info("ipp.buf_start :%x\n", + buf_spec->ipp.buf_start); + pr_info("sao_abv.buf_start :%x\n", + buf_spec->sao_abv.buf_start); + pr_info("sao_vb.buf_start :%x\n", + buf_spec->sao_vb.buf_start); + pr_info("short_term_rps.buf_start :%x\n", + buf_spec->short_term_rps.buf_start); + pr_info("rcs.buf_start :%x\n", + buf_spec->rcs.buf_start); + pr_info("sps.buf_start :%x\n", + buf_spec->sps.buf_start); + pr_info("pps.buf_start :%x\n", + buf_spec->pps.buf_start); + pr_info("sao_up.buf_start :%x\n", + buf_spec->sao_up.buf_start); + pr_info("swap_buf.buf_start :%x\n", + buf_spec->swap_buf.buf_start); + pr_info("swap_buf2.buf_start :%x\n", + buf_spec->swap_buf2.buf_start); + pr_info("scalelut.buf_start :%x\n", + buf_spec->scalelut.buf_start); + pr_info("dblk_para.buf_start :%x\n", + buf_spec->dblk_para.buf_start); + pr_info("dblk_data.buf_start :%x\n", + buf_spec->dblk_data.buf_start); + pr_info("dblk_data2.buf_start :%x\n", + buf_spec->dblk_data2.buf_start); + #ifdef AVS2_10B_MMU + pr_info("mmu_vbh.buf_start :%x\n", + buf_spec->mmu_vbh.buf_start); + #endif + pr_info("mpred_above.buf_start :%x\n", + buf_spec->mpred_above.buf_start); +#ifdef MV_USE_FIXED_BUF + pr_info("mpred_mv.buf_start :%x\n", + buf_spec->mpred_mv.buf_start); +#endif + if ((debug & AVS2_DBG_SEND_PARAM_WITH_REG) == 0) { + pr_info("rpm.buf_start :%x\n", + buf_spec->rpm.buf_start); + } + } + } + +} + +static void uninit_mmu_buffers(struct AVS2Decoder_s *dec) +{ +#if 0 +/*ndef MV_USE_FIXED_BUF*/ + dealloc_mv_bufs(dec); +#endif + decoder_mmu_box_free(dec->mmu_box); + dec->mmu_box = NULL; + + if (dec->bmmu_box) + decoder_bmmu_box_free(dec->bmmu_box); + dec->bmmu_box = NULL; +} + +#ifndef AVS2_10B_MMU +static void init_buf_list(struct AVS2Decoder_s *dec) +{ + int i; + int buf_size; + int mc_buffer_end = dec->mc_buf->buf_start + dec->mc_buf->buf_size; + dec->used_buf_num = max_buf_num; + + if (dec->used_buf_num > MAX_BUF_NUM) + dec->used_buf_num = MAX_BUF_NUM; + if (buf_alloc_size > 0) { + buf_size = buf_alloc_size; + avs2_print(dec, AVS2_DBG_BUFMGR, + "[Buffer Management] init_buf_list:\n"); + } else { + int pic_width = dec->init_pic_w; + int pic_height = dec->init_pic_h; + + /*SUPPORT_10BIT*/ + int losless_comp_header_size = compute_losless_comp_header_size + (dec, pic_width, pic_height); + int losless_comp_body_size = compute_losless_comp_body_size + (dec, pic_width, pic_height, buf_alloc_depth == 10); + int mc_buffer_size = losless_comp_header_size + + losless_comp_body_size; + int mc_buffer_size_h = (mc_buffer_size + 0xffff)>>16; + + int dw_mode = get_double_write_mode_init(dec); + + if (dw_mode) { + int pic_width_dw = pic_width / + get_double_write_ratio(dw_mode); + int pic_height_dw = pic_height / + get_double_write_ratio(dw_mode); + int lcu_size = 64; /*fixed 64*/ + int pic_width_64 = (pic_width_dw + 63) & (~0x3f); + int pic_height_32 = (pic_height_dw + 31) & (~0x1f); + int pic_width_lcu = + (pic_width_64 % lcu_size) ? pic_width_64 / lcu_size + + 1 : pic_width_64 / lcu_size; + int pic_height_lcu = + (pic_height_32 % lcu_size) ? pic_height_32 / lcu_size + + 1 : pic_height_32 / lcu_size; + int lcu_total = pic_width_lcu * pic_height_lcu; + int mc_buffer_size_u_v = lcu_total * lcu_size * lcu_size / 2; + int mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16; + /*64k alignment*/ + buf_size = ((mc_buffer_size_u_v_h << 16) * 3); + } else + buf_size = 0; + + if (mc_buffer_size & 0xffff) { /*64k alignment*/ + mc_buffer_size_h += 1; + } + if ((dw_mode & 0x10) == 0) + buf_size += (mc_buffer_size_h << 16); + avs2_print(dec, AVS2_DBG_BUFMGR, + "init_buf_list num %d (width %d height %d):\n", + dec->used_buf_num, pic_width, pic_height); + } + + for (i = 0; i < dec->used_buf_num; i++) { + if (((i + 1) * buf_size) > dec->mc_buf->buf_size) + dec->use_cma_flag = 1; +#ifndef AVS2_10B_MMU + dec->m_BUF[i].alloc_flag = 0; + dec->m_BUF[i].index = i; + + dec->use_cma_flag = 1; + if (dec->use_cma_flag) { + dec->m_BUF[i].cma_page_count = + PAGE_ALIGN(buf_size) / PAGE_SIZE; + if (decoder_bmmu_box_alloc_buf_phy(dec->bmmu_box, + VF_BUFFER_IDX(i), buf_size, DRIVER_NAME, + &dec->m_BUF[i].alloc_addr) < 0) { + dec->m_BUF[i].cma_page_count = 0; + if (i <= 5) { + dec->fatal_error |= + DECODER_FATAL_ERROR_NO_MEM; + } + break; + } + dec->m_BUF[i].start_adr = dec->m_BUF[i].alloc_addr; + } else { + dec->m_BUF[i].cma_page_count = 0; + dec->m_BUF[i].alloc_addr = 0; + dec->m_BUF[i].start_adr = + dec->mc_buf->buf_start + i * buf_size; + } + dec->m_BUF[i].size = buf_size; + dec->m_BUF[i].free_start_adr = dec->m_BUF[i].start_adr; + + if (((dec->m_BUF[i].start_adr + buf_size) > mc_buffer_end) + && (dec->m_BUF[i].alloc_addr == 0)) { + if (debug) { + avs2_print(dec, 0, + "Max mc buffer or mpred_mv buffer is used\n"); + } + break; + } + + avs2_print(dec, AVS2_DBG_BUFMGR, + "Buffer %d: start_adr %p size %x\n", i, + (void *)dec->m_BUF[i].start_adr, + dec->m_BUF[i].size); +#endif + } + dec->buf_num = i; +} +#endif + +static int config_pic(struct AVS2Decoder_s *dec, + struct avs2_frame_s *pic, int32_t lcu_size_log2) +{ + int ret = -1; + int i; + int pic_width = dec->init_pic_w; + int pic_height = dec->init_pic_h; + /*struct avs2_decoder *avs2_dec = &dec->avs2_dec; + int32_t lcu_size_log2 = avs2_dec->lcu_size_log2;*/ + int32_t lcu_size = 1 << lcu_size_log2; + int pic_width_64 = (pic_width + 63) & (~0x3f); + int pic_height_32 = (pic_height + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 + : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 + : pic_height_32 / lcu_size; + int lcu_total = pic_width_lcu * pic_height_lcu; +#if 0 + int32_t MV_MEM_UNIT = + (lcu_size_log2 == 6) ? 0x200 : + ((lcu_size_log2 == 5) ? 0x80 : 0x20); +#endif +#ifdef MV_USE_FIXED_BUF + u32 mpred_mv_end = dec->work_space_buf->mpred_mv.buf_start + + dec->work_space_buf->mpred_mv.buf_size; +#endif + u32 y_adr = 0; + int buf_size = 0; + + int losless_comp_header_size = + compute_losless_comp_header_size( + dec, pic_width, pic_height); + int losless_comp_body_size = compute_losless_comp_body_size( + dec, pic_width, + pic_height, buf_alloc_depth == 10); + int mc_buffer_size = losless_comp_header_size + losless_comp_body_size; + int mc_buffer_size_h = (mc_buffer_size + 0xffff) >> 16; + int mc_buffer_size_u_v = 0; + int mc_buffer_size_u_v_h = 0; + int dw_mode = get_double_write_mode_init(dec); + + if (dw_mode) { + int pic_width_dw = pic_width / + get_double_write_ratio(dw_mode); + int pic_height_dw = pic_height / + get_double_write_ratio(dw_mode); + int pic_width_64_dw = (pic_width_dw + 63) & (~0x3f); + int pic_height_32_dw = (pic_height_dw + 31) & (~0x1f); + int pic_width_lcu_dw = (pic_width_64_dw % lcu_size) ? + pic_width_64_dw / lcu_size + 1 + : pic_width_64_dw / lcu_size; + int pic_height_lcu_dw = (pic_height_32_dw % lcu_size) ? + pic_height_32_dw / lcu_size + 1 + : pic_height_32_dw / lcu_size; + int lcu_total_dw = pic_width_lcu_dw * pic_height_lcu_dw; + + mc_buffer_size_u_v = lcu_total_dw * lcu_size * lcu_size / 2; + mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16; + /*64k alignment*/ + buf_size = ((mc_buffer_size_u_v_h << 16) * 3); + buf_size = ((buf_size + 0xffff) >> 16) << 16; + } + if (mc_buffer_size & 0xffff) /*64k alignment*/ + mc_buffer_size_h += 1; +#ifndef AVS2_10B_MMU + if ((dw_mode & 0x10) == 0) + buf_size += (mc_buffer_size_h << 16); +#endif + +#ifdef AVS2_10B_MMU +#ifndef DYNAMIC_ALLOC_HEAD + pic->header_adr = decoder_bmmu_box_get_phy_addr( + dec->bmmu_box, HEADER_BUFFER_IDX(pic->index)); + + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "buf_size %d, MMU header_adr %d: %ld\n", + buf_size, pic->index, pic->header_adr); +#endif +#endif + + i = pic->index; +#ifdef MV_USE_FIXED_BUF +#ifdef G12A_BRINGUP_DEBUG + if (1) { +#else + if ((dec->work_space_buf->mpred_mv.buf_start + + (((i + 1) * lcu_total) * MV_MEM_UNIT)) + <= mpred_mv_end + ) { +#endif +#endif +#ifndef AVS2_10B_MMU + if (debug) { + pr_err("start %x .size=%d\n", + dec->mc_buf_spec.buf_start + i * buf_size, + buf_size); + } +#endif +#ifndef AVS2_10B_MMU + for (i = 0; i < dec->buf_num; i++) { + y_adr = ((dec->m_BUF[i].free_start_adr + + 0xffff) >> 16) << 16; + /*64k alignment*/ + if ((y_adr+buf_size) <= (dec->m_BUF[i].start_adr+ + dec->m_BUF[i].size)) { + dec->m_BUF[i].free_start_adr = + y_adr + buf_size; + break; + } + } + if (i < dec->buf_num) +#else + /*if ((dec->mc_buf->buf_start + (i + 1) * buf_size) < + dec->mc_buf->buf_end) + y_adr = dec->mc_buf->buf_start + i * buf_size; + else {*/ + if (buf_size > 0 && pic->cma_alloc_addr == 0) { + ret = decoder_bmmu_box_alloc_buf_phy(dec->bmmu_box, + VF_BUFFER_IDX(i), + buf_size, DRIVER_NAME, + &pic->cma_alloc_addr); + if (ret < 0) { + avs2_print(dec, 0, + "decoder_bmmu_box_alloc_buf_phy idx %d size %d fail\n", + VF_BUFFER_IDX(i), + buf_size + ); + return ret; + } + + if (pic->cma_alloc_addr) + y_adr = pic->cma_alloc_addr; + else { + avs2_print(dec, 0, + "decoder_bmmu_box_alloc_buf_phy idx %d size %d return null\n", + VF_BUFFER_IDX(i), + buf_size + ); + return -1; + } + } +#endif + { + /*ensure get_pic_by_POC() + not get the buffer not decoded*/ + pic->BUF_index = i; + pic->lcu_total = lcu_total; + + pic->comp_body_size = losless_comp_body_size; + pic->buf_size = buf_size; +#ifndef AVS2_10B_MMU + pic->mc_y_adr = y_adr; +#endif + pic->mc_canvas_y = pic->index; + pic->mc_canvas_u_v = pic->index; +#ifndef AVS2_10B_MMU + if (dw_mode & 0x10) { + pic->mc_u_v_adr = y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); + + pic->mc_canvas_y = + (pic->index << 1); + pic->mc_canvas_u_v = + (pic->index << 1) + 1; + + pic->dw_y_adr = y_adr; + pic->dw_u_v_adr = pic->mc_u_v_adr; + } else +#endif + if (dw_mode) { + pic->dw_y_adr = y_adr +#ifndef AVS2_10B_MMU + + (mc_buffer_size_h << 16) +#endif + ; + pic->dw_u_v_adr = pic->dw_y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); +#ifdef AVS2_10B_MMU + pic->mc_y_adr = pic->dw_y_adr; + pic->mc_u_v_adr = pic->dw_u_v_adr; +#endif + } +#ifdef MV_USE_FIXED_BUF +#ifdef G12A_BRINGUP_DEBUG + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + pic->mpred_mv_wr_start_addr = + dec->work_space_buf->mpred_mv.buf_start + + (pic->index * 0x120000 * 4); + } else { + pic->mpred_mv_wr_start_addr = + dec->work_space_buf->mpred_mv.buf_start + + (pic->index * 0x120000); + } +#else + pic->mpred_mv_wr_start_addr = + dec->work_space_buf->mpred_mv.buf_start + + ((pic->index * lcu_total) + * MV_MEM_UNIT); +#endif +#endif + if (debug) { + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s index %d BUF_index %d mc_y_adr %x ", + __func__, pic->index, + pic->BUF_index, + pic->mc_y_adr); + avs2_print_cont(dec, AVS2_DBG_BUFMGR, + "comp_body_size %x comp_buf_size %x ", + pic->comp_body_size, + pic->buf_size); + avs2_print_cont(dec, AVS2_DBG_BUFMGR, + "mpred_mv_wr_start_adr %d\n", + pic->mpred_mv_wr_start_addr); + avs2_print_cont(dec, AVS2_DBG_BUFMGR, + "dw_y_adr %d, pic->dw_u_v_adr =%d\n", + pic->dw_y_adr, + pic->dw_u_v_adr); + } + ret = 0; + } +#ifdef MV_USE_FIXED_BUF + } else { + avs2_print(dec, 0, + "mv buffer alloc fail %x > %x\n", + dec->work_space_buf->mpred_mv.buf_start + + (((i + 1) * lcu_total) * MV_MEM_UNIT), + mpred_mv_end); + } +#endif + return ret; +} + +static void init_pic_list(struct AVS2Decoder_s *dec, + int32_t lcu_size_log2) +{ + int i; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *pic; +#ifdef AVS2_10B_MMU + unsigned long buf_addr1; + /*alloc AVS2 compress header first*/ + if (decoder_bmmu_box_alloc_buf_phy + (dec->bmmu_box, + HEADER_BUFFER_IDX(-1), get_compress_header_size(dec), + DRIVER_HEADER_NAME, + &buf_addr1) < 0){ + avs2_print(dec, 0, + "%s malloc compress header failed %d\n", + DRIVER_HEADER_NAME, -1); + dec->fatal_error |= DECODER_FATAL_ERROR_NO_MEM; + return; + } +#ifndef DYNAMIC_ALLOC_HEAD + for (i = 0; i < dec->used_buf_num; i++) { + unsigned long buf_addr; + if (decoder_bmmu_box_alloc_buf_phy + (dec->bmmu_box, + HEADER_BUFFER_IDX(i), get_compress_header_size(dec), + DRIVER_HEADER_NAME, + &buf_addr) < 0){ + avs2_print(dec, 0, + "%s malloc compress header failed %d\n", + DRIVER_HEADER_NAME, i); + dec->fatal_error |= DECODER_FATAL_ERROR_NO_MEM; + return; + } + } +#endif +#endif + dec->frame_height = avs2_dec->img.height; + dec->frame_width = avs2_dec->img.width; + + for (i = 0; i < dec->used_buf_num; i++) { + if (i == (dec->used_buf_num - 1)) + pic = avs2_dec->m_bg; + else + pic = avs2_dec->fref[i]; + pic->index = i; + pic->BUF_index = -1; + pic->mv_buf_index = -1; + if (config_pic(dec, pic, lcu_size_log2) < 0) { + if (debug) + avs2_print(dec, 0, + "Config_pic %d fail\n", + pic->index); + pic->index = -1; + break; + } + pic->pic_w = avs2_dec->img.width; + pic->pic_h = avs2_dec->img.height; + } + for (; i < dec->used_buf_num; i++) { + if (i == (dec->used_buf_num - 1)) + pic = avs2_dec->m_bg; + else + pic = avs2_dec->fref[i]; + pic->index = -1; + pic->BUF_index = -1; + pic->mv_buf_index = -1; + } + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s ok, used_buf_num = %d\n", + __func__, dec->used_buf_num); + dec->pic_list_init_flag = 1; +} + + +static void init_pic_list_hw(struct AVS2Decoder_s *dec) +{ + int i; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *pic; + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x0);*/ +#if 0 + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (0x1 << 2)); + +#ifdef DUAL_CORE_64 + WRITE_VREG(HEVC2_HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (0x1 << 2)); +#endif +#endif + for (i = 0; i < dec->used_buf_num; i++) { + if (i == (dec->used_buf_num - 1)) + pic = avs2_dec->m_bg; + else + pic = avs2_dec->fref[i]; + if (pic->index < 0) + break; +#ifdef AVS2_10B_MMU + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + pic->header_adr + | (pic->mc_canvas_y << 8)|0x1);*/ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (pic->index << 8)); + +#ifdef DUAL_CORE_64 + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXLX2) + WRITE_VREG(HEVC2_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (pic->index << 8)); + else + WRITE_VREG(HEVC2_HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (pic->index << 8)); +#endif + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, pic->header_adr >> 5); +#else + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + pic->mc_y_adr + | (pic->mc_canvas_y << 8) | 0x1);*/ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, pic->mc_y_adr >> 5); +#endif +#ifndef LOSLESS_COMPRESS_MODE + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + pic->mc_u_v_adr + | (pic->mc_canvas_u_v << 8)| 0x1);*/ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, pic->mc_u_v_adr >> 5); +#endif +#ifdef DUAL_CORE_64 +#ifdef AVS2_10B_MMU + WRITE_VREG(HEVC2_HEVCD_MPP_ANC2AXI_TBL_DATA, + pic->header_adr >> 5); +#else + WRITE_VREG(HEVC2_HEVCD_MPP_ANC2AXI_TBL_DATA, + pic->mc_y_adr >> 5); +#endif +#ifndef LOSLESS_COMPRESS_MODE + WRITE_VREG(HEVC2_HEVCD_MPP_ANC2AXI_TBL_DATA, + pic->mc_u_v_adr >> 5); +#endif +/*DUAL_CORE_64*/ +#endif + } + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1); +#ifdef DUAL_CORE_64 + WRITE_VREG(HEVC2_HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + 0x1); +#endif + /*Zero out canvas registers in IPP -- avoid simulation X*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 1); + for (i = 0; i < 32; i++) { + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); +#ifdef DUAL_CORE_64 + WRITE_VREG(HEVC2_HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); +#endif + } +} + + +static void dump_pic_list(struct AVS2Decoder_s *dec) +{ + int ii; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + for (ii = 0; ii < avs2_dec->ref_maxbuffer; ii++) { + avs2_print(dec, 0, + "fref[%d]: index %d decode_id %d mvbuf %d imgcoi_ref %d imgtr_fwRefDistance %d refered %d, pre %d is_out %d, bg %d, vf_ref %d error %d lcu %d ref_pos(%d,%d,%d,%d,%d,%d,%d)\n", + ii, avs2_dec->fref[ii]->index, + avs2_dec->fref[ii]->decode_idx, + avs2_dec->fref[ii]->mv_buf_index, + avs2_dec->fref[ii]->imgcoi_ref, + avs2_dec->fref[ii]->imgtr_fwRefDistance, + avs2_dec->fref[ii]->refered_by_others, + avs2_dec->fref[ii]->to_prepare_disp, + avs2_dec->fref[ii]->is_output, + avs2_dec->fref[ii]->bg_flag, + avs2_dec->fref[ii]->vf_ref, + avs2_dec->fref[ii]->error_mark, + avs2_dec->fref[ii]->decoded_lcu, + avs2_dec->fref[ii]->ref_poc[0], + avs2_dec->fref[ii]->ref_poc[1], + avs2_dec->fref[ii]->ref_poc[2], + avs2_dec->fref[ii]->ref_poc[3], + avs2_dec->fref[ii]->ref_poc[4], + avs2_dec->fref[ii]->ref_poc[5], + avs2_dec->fref[ii]->ref_poc[6] + ); + } + return; +} + +static int config_mc_buffer(struct AVS2Decoder_s *dec) +{ + int32_t i; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *pic; + struct avs2_frame_s *cur_pic = avs2_dec->hc.cur_pic; + + /*if (avs2_dec->img.type == I_IMG) + return 0; + */ + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "Entered config_mc_buffer....\n"); + if (avs2_dec->f_bg != NULL) { + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "config_mc_buffer for background (canvas_y %d, canvas_u_v %d)\n", + avs2_dec->f_bg->mc_canvas_y, avs2_dec->f_bg->mc_canvas_u_v); + /*WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (7 << 8) | (0<<1) | 1); L0:BG */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (15 << 8) | (0<<1) | 1); /* L0:BG*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (avs2_dec->f_bg->mc_canvas_u_v << 16) | + (avs2_dec->f_bg->mc_canvas_u_v << 8) | + avs2_dec->f_bg->mc_canvas_y); + /*WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (23 << 8) | (0<<1) | 1); L1:BG*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (31 << 8) | (0<<1) | 1); /* L1:BG*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (avs2_dec->f_bg->mc_canvas_u_v << 16) | + (avs2_dec->f_bg->mc_canvas_u_v << 8) | + avs2_dec->f_bg->mc_canvas_y); + } + + if (avs2_dec->img.type == I_IMG) + return 0; + + if (avs2_dec->img.type == P_IMG) { + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "config_mc_buffer for P_IMG, img type %d\n", + avs2_dec->img.type); + /*refer to prepare_RefInfo()*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0<<1) | 1); + for (i = 0; i < avs2_dec->img.num_of_references; i++) { + pic = avs2_dec->fref[i]; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v << 16) | + (pic->mc_canvas_u_v << 8) | + pic->mc_canvas_y); + + if (pic->error_mark) + cur_pic->error_mark = 1; + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "refid %x mc_canvas_u_v %x mc_canvas_y %x error_mark %x\n", + i, pic->mc_canvas_u_v, pic->mc_canvas_y, + pic->error_mark); + } + } else if (avs2_dec->img.type == F_IMG) { + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "config_mc_buffer for F_IMG, img type %d\n", + avs2_dec->img.type); + /*refer to prepare_RefInfo()*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0<<1) | 1); + for (i = 0; i < avs2_dec->img.num_of_references; i++) { + pic = avs2_dec->fref[i]; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v << 16) | + (pic->mc_canvas_u_v << 8) | + pic->mc_canvas_y); + + if (pic->error_mark) + cur_pic->error_mark = 1; + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "refid %x mc_canvas_u_v %x mc_canvas_y %x error_mark %x\n", + i, pic->mc_canvas_u_v, pic->mc_canvas_y, + pic->error_mark); + } + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (0<<1) | 1); + for (i = 0; i < avs2_dec->img.num_of_references; i++) { + pic = avs2_dec->fref[i]; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v << 16) | + (pic->mc_canvas_u_v << 8) | + pic->mc_canvas_y); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "refid %x mc_canvas_u_v %x mc_canvas_y %x\n", + i, pic->mc_canvas_u_v, pic->mc_canvas_y); + } + } else { + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "config_mc_buffer for B_IMG\n"); + /*refer to prepare_RefInfo()*/ + pic = avs2_dec->fref[1]; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0<<1) | 1); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v << 16) | + (pic->mc_canvas_u_v << 8) | + pic->mc_canvas_y); + + if (pic->error_mark) + cur_pic->error_mark = 1; + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "refid %x mc_canvas_u_v %x mc_canvas_y %x error_mark %x\n", + 1, pic->mc_canvas_u_v, pic->mc_canvas_y, + pic->error_mark); + + pic = avs2_dec->fref[0]; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (0<<1) | 1); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v<<16) | + (pic->mc_canvas_u_v<<8) | + pic->mc_canvas_y); + + if (pic->error_mark) + cur_pic->error_mark = 1; + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "refid %x mc_canvas_u_v %x mc_canvas_y %x error_mark %x\n", + 0, pic->mc_canvas_u_v, pic->mc_canvas_y, + pic->error_mark); + } + return 0; +} +#if 0 +static void mcrcc_get_hitrate(void) +{ + u32 tmp; + u32 raw_mcr_cnt; + u32 hit_mcr_cnt; + u32 byp_mcr_cnt_nchoutwin; + u32 byp_mcr_cnt_nchcanv; + int hitrate; + + if (debug & AVS2_DBG_CACHE) + pr_info("[cache_util.c] Entered mcrcc_get_hitrate...\n"); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x0<<1)); + raw_mcr_cnt = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x1<<1)); + hit_mcr_cnt = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x2<<1)); + byp_mcr_cnt_nchoutwin = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x3<<1)); + byp_mcr_cnt_nchcanv = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + + if (debug & AVS2_DBG_CACHE) { + pr_info("raw_mcr_cnt_total: %d\n",raw_mcr_cnt); + pr_info("hit_mcr_cnt_total: %d\n",hit_mcr_cnt); + pr_info("byp_mcr_cnt_nchoutwin_total: %d\n",byp_mcr_cnt_nchoutwin); + pr_info("byp_mcr_cnt_nchcanv_total: %d\n",byp_mcr_cnt_nchcanv); + } + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x4<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & AVS2_DBG_CACHE) + pr_info("miss_mcr_0_cnt_total: %d\n", tmp); + + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x5<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & AVS2_DBG_CACHE) + pr_info("miss_mcr_1_cnt_total: %d\n", tmp); + + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x6<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & AVS2_DBG_CACHE) + pr_info("hit_mcr_0_cnt_total: %d\n",tmp); + + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x7<<1)); + tmp= READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & AVS2_DBG_CACHE) + pr_info("hit_mcr_1_cnt_total: %d\n",tmp); + + if (raw_mcr_cnt != 0) { + hitrate = (hit_mcr_cnt / raw_mcr_cnt) * 100; + if (debug & AVS2_DBG_CACHE) + pr_info("MCRCC_HIT_RATE : %d\n", hitrate); + hitrate = ((byp_mcr_cnt_nchoutwin + byp_mcr_cnt_nchcanv) + /raw_mcr_cnt) * 100; + if (debug & AVS2_DBG_CACHE) + pr_info("MCRCC_BYP_RATE : %d\n", hitrate); + } else if (debug & AVS2_DBG_CACHE) { + pr_info("MCRCC_HIT_RATE : na\n"); + pr_info("MCRCC_BYP_RATE : na\n"); + } + return; +} + + +static void decomp_get_hitrate(void) +{ + u32 raw_mcr_cnt; + u32 hit_mcr_cnt; + int hitrate; + + if (debug & AVS2_DBG_CACHE) + pr_info("[cache_util.c] Entered decomp_get_hitrate...\n"); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x0<<1)); + raw_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x1<<1)); + hit_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + + if (debug & AVS2_DBG_CACHE) { + pr_info("hcache_raw_cnt_total: %d\n",raw_mcr_cnt); + pr_info("hcache_hit_cnt_total: %d\n",hit_mcr_cnt); + } + if (raw_mcr_cnt != 0) { + hitrate = (hit_mcr_cnt / raw_mcr_cnt) * 100; + if (debug & AVS2_DBG_CACHE) + pr_info("DECOMP_HCACHE_HIT_RATE : %d\n", hitrate); + } else { + if (debug & AVS2_DBG_CACHE) + pr_info("DECOMP_HCACHE_HIT_RATE : na\n"); + } + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x2<<1)); + raw_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x3<<1)); + hit_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + + if (debug & AVS2_DBG_CACHE) { + pr_info("dcache_raw_cnt_total: %d\n", raw_mcr_cnt); + pr_info("dcache_hit_cnt_total: %d\n", hit_mcr_cnt); + } + if (raw_mcr_cnt != 0) { + hitrate = (hit_mcr_cnt / raw_mcr_cnt) * 100; + if (debug & AVS2_DBG_CACHE) + pr_info("DECOMP_DCACHE_HIT_RATE : %d\n", hitrate); + } else if (debug & AVS2_DBG_CACHE) { + pr_info("DECOMP_DCACHE_HIT_RATE : na\n"); + } +return; +} + +static void decomp_get_comprate(void) +{ + u32 raw_ucomp_cnt; + u32 fast_comp_cnt; + u32 slow_comp_cnt; + int comprate; + + if (debug & AVS2_DBG_CACHE) + pr_info("[cache_util.c] Entered decomp_get_comprate...\n"); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x4<<1)); + fast_comp_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x5<<1)); + slow_comp_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x6<<1)); + raw_ucomp_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + if (debug & AVS2_DBG_CACHE) { + pr_info("decomp_fast_comp_total: %d\n", fast_comp_cnt); + pr_info("decomp_slow_comp_total: %d\n", slow_comp_cnt); + pr_info("decomp_raw_uncomp_total: %d\n", raw_ucomp_cnt); + } + + if (raw_ucomp_cnt != 0) { + comprate = ((fast_comp_cnt + slow_comp_cnt) + / raw_ucomp_cnt) * 100; + if (debug & AVS2_DBG_CACHE) + pr_info("DECOMP_COMP_RATIO : %d\n", comprate); + } else if (debug & AVS2_DBG_CACHE) { + pr_info("DECOMP_COMP_RATIO : na\n"); + } + return; +} +#endif + +static void config_mcrcc_axi_hw(struct AVS2Decoder_s *dec) +{ + uint32_t rdata32; + uint32_t rdata32_2; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2); /* reset mcrcc*/ + + if (avs2_dec->img.type == I_IMG) { /* I-PIC*/ + /* remove reset -- disables clock */ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x0); + return; + } +/* + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + mcrcc_get_hitrate(); + decomp_get_hitrate(); + decomp_get_comprate(); + } +*/ + if ((avs2_dec->img.type == B_IMG) || + (avs2_dec->img.type == F_IMG)) { /*B-PIC or F_PIC*/ + /*Programme canvas0 */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 0); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + /*Programme canvas1 */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (1 << 1) | 0); + rdata32_2 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32_2 = rdata32_2 & 0xffff; + rdata32_2 = rdata32_2 | (rdata32_2 << 16); + if (rdata32 == rdata32_2) { + rdata32_2 = + READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32_2 = rdata32_2 & 0xffff; + rdata32_2 = rdata32_2 | (rdata32_2 << 16); + } + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32_2); + } else { /* P-PIC */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (1 << 1) | 0); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + /*Programme canvas1*/ + rdata32 = + READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + } + /*enable mcrcc progressive-mode */ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); + return; +} + +static void config_mpred_hw(struct AVS2Decoder_s *dec) +{ + uint32_t data32; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *cur_pic = avs2_dec->hc.cur_pic; + struct avs2_frame_s *col_pic = avs2_dec->fref[0]; + int32_t mpred_mv_rd_start_addr; + int32_t mpred_curr_lcu_x; + int32_t mpred_curr_lcu_y; + int32_t mpred_mv_rd_end_addr; + int32_t above_en; + int32_t mv_wr_en; + int32_t mv_rd_en; + int32_t col_isIntra; + int mv_mem_unit; + if (avs2_dec->img.type != I_IMG) { + above_en = 1; + mv_wr_en = 1; + mv_rd_en = 1; + col_isIntra = 0; + } else { + above_en = 1; + mv_wr_en = 1; + mv_rd_en = 0; + col_isIntra = 0; + } + + mpred_mv_rd_start_addr = + col_pic->mpred_mv_wr_start_addr; + data32 = READ_VREG(HEVC_MPRED_CURR_LCU); + mpred_curr_lcu_x = data32 & 0xffff; + mpred_curr_lcu_y = (data32 >> 16) & 0xffff; + + mv_mem_unit = avs2_dec->lcu_size_log2 == 6 ? + 0x200 : (avs2_dec->lcu_size_log2 == 5 ? + 0x80 : 0x20); + + mpred_mv_rd_end_addr = + mpred_mv_rd_start_addr + + ((avs2_dec->lcu_x_num * + avs2_dec->lcu_y_num) * mv_mem_unit); + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "cur pic index %d col pic index %d\n", + cur_pic->index, col_pic->index); + + + WRITE_VREG(HEVC_MPRED_MV_WR_START_ADDR, + cur_pic->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_RD_START_ADDR, + col_pic->mpred_mv_wr_start_addr); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[MPRED CO_MV] write 0x%x read 0x%x\n", + cur_pic->mpred_mv_wr_start_addr, + col_pic->mpred_mv_wr_start_addr); + + data32 = + ((avs2_dec->bk_img_is_top_field) << 13) | + ((avs2_dec->hd.background_picture_enable & 1) << 12) | + ((avs2_dec->hd.curr_RPS.num_of_ref & 7) << 8) | + ((avs2_dec->hd.b_pmvr_enabled & 1) << 6) | + ((avs2_dec->img.is_top_field & 1) << 5) | + ((avs2_dec->img.is_field_sequence & 1) << 4) | + ((avs2_dec->img.typeb & 7) << 1) | + (avs2_dec->hd.background_reference_enable & 0x1); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "HEVC_MPRED_CTRL9 <= 0x%x(num of ref %d)\n", + data32, avs2_dec->hd.curr_RPS.num_of_ref); + WRITE_VREG(HEVC_MPRED_CTRL9, data32); + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "%s: dis %d %d %d %d %d %d %d fref0_ref_poc %d %d %d %d %d %d %d\n", + __func__, + avs2_dec->fref[0]->imgtr_fwRefDistance, + avs2_dec->fref[1]->imgtr_fwRefDistance, + avs2_dec->fref[2]->imgtr_fwRefDistance, + avs2_dec->fref[3]->imgtr_fwRefDistance, + avs2_dec->fref[4]->imgtr_fwRefDistance, + avs2_dec->fref[5]->imgtr_fwRefDistance, + avs2_dec->fref[6]->imgtr_fwRefDistance, + avs2_dec->fref[0]->ref_poc[0], + avs2_dec->fref[0]->ref_poc[1], + avs2_dec->fref[0]->ref_poc[2], + avs2_dec->fref[0]->ref_poc[3], + avs2_dec->fref[0]->ref_poc[4], + avs2_dec->fref[0]->ref_poc[5], + avs2_dec->fref[0]->ref_poc[6] + ); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "pic_distance %d, imgtr_next_P %d\n", + avs2_dec->img.pic_distance, avs2_dec->img.imgtr_next_P); + + + WRITE_VREG(HEVC_MPRED_CUR_POC, avs2_dec->img.pic_distance); + WRITE_VREG(HEVC_MPRED_COL_POC, avs2_dec->img.imgtr_next_P); + + /*below MPRED Ref_POC_xx_Lx registers + must follow Ref_POC_xx_L0 -> + Ref_POC_xx_L1 in pair write order!!!*/ + WRITE_VREG(HEVC_MPRED_L0_REF00_POC, + avs2_dec->fref[0]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF00_POC, + avs2_dec->fref[0]->ref_poc[0]); + + WRITE_VREG(HEVC_MPRED_L0_REF01_POC, + avs2_dec->fref[1]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF01_POC, + avs2_dec->fref[0]->ref_poc[1]); + + WRITE_VREG(HEVC_MPRED_L0_REF02_POC, + avs2_dec->fref[2]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF02_POC, + avs2_dec->fref[0]->ref_poc[2]); + + WRITE_VREG(HEVC_MPRED_L0_REF03_POC, + avs2_dec->fref[3]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF03_POC, + avs2_dec->fref[0]->ref_poc[3]); + + WRITE_VREG(HEVC_MPRED_L0_REF04_POC, + avs2_dec->fref[4]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF04_POC, + avs2_dec->fref[0]->ref_poc[4]); + + WRITE_VREG(HEVC_MPRED_L0_REF05_POC, + avs2_dec->fref[5]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF05_POC, + avs2_dec->fref[0]->ref_poc[5]); + + WRITE_VREG(HEVC_MPRED_L0_REF06_POC, + avs2_dec->fref[6]->imgtr_fwRefDistance); + WRITE_VREG(HEVC_MPRED_L1_REF06_POC, + avs2_dec->fref[0]->ref_poc[6]); + + + WRITE_VREG(HEVC_MPRED_MV_RD_END_ADDR, + mpred_mv_rd_end_addr); +} + +static void config_dblk_hw(struct AVS2Decoder_s *dec) +{ + /* + * Picture level de-block parameter configuration here + */ + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + union param_u *rpm_param = &avs2_dec->param; + uint32_t data32; + + data32 = READ_VREG(HEVC_DBLK_CFG1); + data32 = (((data32 >> 20) & 0xfff) << 20) | + (((avs2_dec->input.sample_bit_depth == 10) + ? 0xa : 0x0) << 16) | /*[16 +: 4]: {luma_bd[1:0], + chroma_bd[1:0]}*/ + (((data32 >> 2) & 0x3fff) << 2) | + (((rpm_param->p.lcu_size == 6) + ? 0 : (rpm_param->p.lcu_size == 5) + ? 1 : 2) << 0);/*[ 0 +: 2]: lcu_size*/ + WRITE_VREG(HEVC_DBLK_CFG1, data32); + + data32 = (avs2_dec->img.height << 16) | + avs2_dec->img.width; + WRITE_VREG(HEVC_DBLK_CFG2, data32); + /* + [27 +: 1]: cross_slice_loopfilter_enable_flag + [26 +: 1]: loop_filter_disable + [25 +: 1]: useNSQT + [22 +: 3]: imgtype + [17 +: 5]: alpha_c_offset (-8~8) + [12 +: 5]: beta_offset (-8~8) + [ 6 +: 6]: chroma_quant_param_delta_u (-16~16) + [ 0 +: 6]: chroma_quant_param_delta_v (-16~16) + */ + data32 = ((avs2_dec->input.crossSliceLoopFilter + & 0x1) << 27) | + ((rpm_param->p.loop_filter_disable & 0x1) << 26) | + ((avs2_dec->input.useNSQT & 0x1) << 25) | + ((avs2_dec->img.type & 0x7) << 22) | + ((rpm_param->p.alpha_c_offset & 0x1f) << 17) | + ((rpm_param->p.beta_offset & 0x1f) << 12) | + ((rpm_param->p.chroma_quant_param_delta_cb & 0x3f) << 6) | + ((rpm_param->p.chroma_quant_param_delta_cr & 0x3f) << 0); + + WRITE_VREG(HEVC_DBLK_CFG9, data32); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] cfgDBLK: crossslice(%d),lfdisable(%d),bitDepth(%d),lcuSize(%d),NSQT(%d)\n", + avs2_dec->input.crossSliceLoopFilter, + rpm_param->p.loop_filter_disable, + avs2_dec->input.sample_bit_depth, + avs2_dec->lcu_size, + avs2_dec->input.useNSQT); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] cfgDBLK: alphaCOffset(%d),betaOffset(%d),quantDeltaCb(%d),quantDeltaCr(%d)\n", + rpm_param->p.alpha_c_offset, + rpm_param->p.beta_offset, + rpm_param->p.chroma_quant_param_delta_cb, + rpm_param->p.chroma_quant_param_delta_cr); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] cfgDBLK: .done.\n"); +} + +static void config_sao_hw(struct AVS2Decoder_s *dec) +{ + uint32_t data32; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *cur_pic = avs2_dec->hc.cur_pic; + + int lcu_size = 64; + int mc_buffer_size_u_v = + cur_pic->lcu_total * lcu_size*lcu_size/2; + int mc_buffer_size_u_v_h = + (mc_buffer_size_u_v + 0xffff) >> 16;/*64k alignment*/ + + data32 = READ_VREG(HEVC_SAO_CTRL0); + data32 &= (~0xf); + data32 |= avs2_dec->lcu_size_log2; + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "%s, lcu_size_log2 = %d, config HEVC_SAO_CTRL0 0x%x\n", + __func__, + avs2_dec->lcu_size_log2, + data32); + + WRITE_VREG(HEVC_SAO_CTRL0, data32); + +#ifndef AVS2_10B_MMU + if ((get_double_write_mode(dec) & 0x10) == 0) + WRITE_VREG(HEVC_CM_BODY_START_ADDR, cur_pic->mc_y_adr); +#endif + if (get_double_write_mode(dec)) { + WRITE_VREG(HEVC_SAO_Y_START_ADDR, cur_pic->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_START_ADDR, cur_pic->dw_u_v_adr); + WRITE_VREG(HEVC_SAO_Y_WPTR, cur_pic->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_WPTR, cur_pic->dw_u_v_adr); + } else { + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0xffffffff); + WRITE_VREG(HEVC_SAO_C_START_ADDR, 0xffffffff); + } +#ifdef AVS2_10B_MMU + WRITE_VREG(HEVC_CM_HEADER_START_ADDR, cur_pic->header_adr); +#endif + data32 = (mc_buffer_size_u_v_h << 16) << 1; + /*pr_info("data32=%x,mc_buffer_size_u_v_h=%x,lcu_total=%x\n", + data32, mc_buffer_size_u_v_h, cur_pic->lcu_total);*/ + WRITE_VREG(HEVC_SAO_Y_LENGTH, data32); + + data32 = (mc_buffer_size_u_v_h << 16); + WRITE_VREG(HEVC_SAO_C_LENGTH, data32); + +#ifdef AVS2_10B_NV21 +#ifdef DOS_PROJECT + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + /*[13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32*/ + data32 |= (MEM_MAP_MODE << 12); + data32 &= (~0x3); + data32 |= 0x1; /* [1]:dw_disable [0]:cm_disable*/ + + /* + * [31:24] ar_fifo1_axi_thred + * [23:16] ar_fifo0_axi_thred + * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes + * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + * [11:08] axi_lendian_C + * [07:04] axi_lendian_Y + * [3] reserved + * [2] clk_forceon + * [1] dw_disable:disable double write output + * [0] cm_disable:disable compress output + */ + data32 &= (~(3 << 14)); + data32 |= (2 << 14); + + WRITE_VREG(HEVC_SAO_CTRL1, data32); + /*[23:22] dw_v1_ctrl [21:20] dw_v0_ctrl [19:18] dw_h1_ctrl + [17:16] dw_h0_ctrl*/ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /*set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + ata32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /*[5:4] address_format 00:linear 01:32x32 10:64x32*/ + data32 |= (MEM_MAP_MODE << 4); + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#else + /*m8baby test1902*/ + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + /*[13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32*/ + data32 |= (MEM_MAP_MODE << 12); + data32 &= (~0xff0); + /*data32 |= 0x670;*/ /*Big-Endian per 64-bit*/ + data32 |= 0x880; /*.Big-Endian per 64-bit */ + data32 &= (~0x3); + data32 |= 0x1; /*[1]:dw_disable [0]:cm_disable*/ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + /* [23:22] dw_v1_ctrl [21:20] dw_v0_ctrl + [19:18] dw_h1_ctrl [17:16] dw_h0_ctrl*/ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /* set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + /* + * [3:0] little_endian + * [5:4] address_format 00:linear 01:32x32 10:64x32 + * [7:6] reserved + * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte + * [11:10] reserved + * [12] CbCr_byte_swap + * [31:13] reserved + */ + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /*[5:4] address_format 00:linear 01:32x32 10:64x32*/ + data32 |= (MEM_MAP_MODE << 4); + data32 &= (~0xF); + data32 |= 0x8; /*Big-Endian per 64-bit*/ + + data32 &= (~(3 << 8)); + data32 |= (2 << 8); + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#endif +#else + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~(3 << 14)); + data32 |= (2 << 14); /* line align with 64*/ + data32 &= (~0x3000); + data32 |= (MEM_MAP_MODE << 12); /* [13:12] axi_aformat, 0-Linear, + 1-32x32, 2-64x32 */ + data32 &= (~0xff0); + data32 |= ((dec->endian >> 8) & 0xfff); /* data32 |= 0x670; Big-Endian per 64-bit */ + data32 &= (~0x3); /*[1]:dw_disable [0]:cm_disable*/ +#if 0 + if (get_cpu_major_id() < MESON_CPU_MAJOR_ID_G12A) { + if (get_double_write_mode(dec) == 0) + data32 |= 0x2; /*disable double write*/ +#ifndef AVS2_10B_MMU + else + if (get_double_write_mode(dec) & 0x10) + data32 |= 0x1; /*disable cm*/ +#endif + } +#endif + if (get_double_write_mode(dec) == 0) + data32 |= 0x2; /*disable double write*/ + else if (get_double_write_mode(dec) & 0x10) + data32 |= 0x1; /*disable cm*/ + + /* + * [31:24] ar_fifo1_axi_thred + * [23:16] ar_fifo0_axi_thred + * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes + * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + * [11:08] axi_lendian_C + * [07:04] axi_lendian_Y + * [3] reserved + * [2] clk_forceon + * [1] dw_disable:disable double write output + * [0] cm_disable:disable compress output + */ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + + if (get_double_write_mode(dec) & 0x10) { + /* [23:22] dw_v1_ctrl + [21:20] dw_v0_ctrl + [19:18] dw_h1_ctrl + [17:16] dw_h0_ctrl + */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /*set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } else { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) + WRITE_VREG(HEVC_SAO_CTRL26, 0); + + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 &= (~(0xff << 16)); + if (get_double_write_mode(dec) == 8 || + get_double_write_mode(dec) == 9) { + data32 |= (0xff<<16); + WRITE_VREG(HEVC_SAO_CTRL26, 0xf); + } else if (get_double_write_mode(dec) == 2 || + get_double_write_mode(dec) == 3) + data32 |= (0xff<<16); + else if (get_double_write_mode(dec) == 4) + data32 |= (0x33<<16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + + /* + * [3:0] little_endian + * [5:4] address_format 00:linear 01:32x32 10:64x32 + * [7:6] reserved + * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte + * [11:10] reserved + * [12] CbCr_byte_swap + * [31:13] reserved + */ + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /* [5:4] -- address_format 00:linear 01:32x32 10:64x32 */ + data32 |= (mem_map_mode << 4); + data32 &= (~0xF); + data32 |= (dec->endian & 0xf); /* valid only when double write only */ + /*data32 |= 0x8;*/ /* Big-Endian per 64-bit */ + data32 &= (~(3 << 8)); + data32 |= (2 << 8); /* line align with 64 for dw only */ + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#endif +} + +static void reconstructCoefficients(struct AVS2Decoder_s *dec, + struct ALFParam_s *alfParam) +{ + int32_t g, sum, i, coeffPred; + for (g = 0; g < alfParam->filters_per_group; g++) { + sum = 0; + for (i = 0; i < alfParam->num_coeff - 1; i++) { + sum += (2 * alfParam->coeffmulti[g][i]); + dec->m_filterCoeffSym[g][i] = + alfParam->coeffmulti[g][i]; + /*pr_info("[t] dec->m_filterCoeffSym[%d][%d]=0x%x\n", + g, i, dec->m_filterCoeffSym[g][i]);*/ + } + coeffPred = (1 << ALF_NUM_BIT_SHIFT) - sum; + dec->m_filterCoeffSym[g][alfParam->num_coeff - 1] + = coeffPred + + alfParam->coeffmulti[g][alfParam->num_coeff - 1]; + /*pr_info("[t] dec->m_filterCoeffSym[%d][%d]=0x%x\n", + g, (alfParam->num_coeff - 1), + dec->m_filterCoeffSym[g][alfParam->num_coeff - 1]);*/ + } +} + +static void reconstructCoefInfo(struct AVS2Decoder_s *dec, + int32_t compIdx, struct ALFParam_s *alfParam) +{ + int32_t i; + if (compIdx == ALF_Y) { + if (alfParam->filters_per_group > 1) { + for (i = 1; i < NO_VAR_BINS; ++i) { + if (alfParam->filterPattern[i]) + dec->m_varIndTab[i] = + dec->m_varIndTab[i - 1] + 1; + else + dec->m_varIndTab[i] = + dec->m_varIndTab[i - 1]; + } + } + } + reconstructCoefficients(dec, alfParam); +} + +static void config_alf_hw(struct AVS2Decoder_s *dec) +{ + /* + * Picture level ALF parameter configuration here + */ + uint32_t data32; + int32_t i, j; + int32_t m_filters_per_group; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct ALFParam_s *m_alfPictureParam_y = + &avs2_dec->m_alfPictureParam[0]; + struct ALFParam_s *m_alfPictureParam_cb = + &avs2_dec->m_alfPictureParam[1]; + struct ALFParam_s *m_alfPictureParam_cr = + &avs2_dec->m_alfPictureParam[2]; + + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[t]alfy,cidx(%d),flag(%d),filters_per_group(%d),filterPattern[0]=0x%x,[15]=0x%x\n", + m_alfPictureParam_y->componentID, + m_alfPictureParam_y->alf_flag, + m_alfPictureParam_y->filters_per_group, + m_alfPictureParam_y->filterPattern[0], + m_alfPictureParam_y->filterPattern[15]); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[t]alfy,num_coeff(%d),coeffmulti[0][0]=0x%x,[0][1]=0x%x,[1][0]=0x%x,[1][1]=0x%x\n", + m_alfPictureParam_y->num_coeff, + m_alfPictureParam_y->coeffmulti[0][0], + m_alfPictureParam_y->coeffmulti[0][1], + m_alfPictureParam_y->coeffmulti[1][0], + m_alfPictureParam_y->coeffmulti[1][1]); + + /*Cr*/ + for (i = 0; i < 16; i++) + dec->m_varIndTab[i] = 0; + for (j = 0; j < 16; j++) + for (i = 0; i < 9; i++) + dec->m_filterCoeffSym[j][i] = 0; + reconstructCoefInfo(dec, 2, m_alfPictureParam_cr); + data32 = + ((dec->m_filterCoeffSym[0][4] & 0xf) << 28) | + ((dec->m_filterCoeffSym[0][3] & 0x7f) << 21) | + ((dec->m_filterCoeffSym[0][2] & 0x7f) << 14) | + ((dec->m_filterCoeffSym[0][1] & 0x7f) << 7) | + ((dec->m_filterCoeffSym[0][0] & 0x7f) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + data32 = + ((dec->m_filterCoeffSym[0][8] & 0x7f) << 24) | + ((dec->m_filterCoeffSym[0][7] & 0x7f) << 17) | + ((dec->m_filterCoeffSym[0][6] & 0x7f) << 10) | + ((dec->m_filterCoeffSym[0][5] & 0x7f) << 3) | + (((dec->m_filterCoeffSym[0][4] >> 4) & 0x7) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] pic_alf_on_cr(%d), alf_cr_coef(%d %d %d %d %d %d %d %d %d)\n", + m_alfPictureParam_cr->alf_flag, + dec->m_filterCoeffSym[0][0], + dec->m_filterCoeffSym[0][1], + dec->m_filterCoeffSym[0][2], + dec->m_filterCoeffSym[0][3], + dec->m_filterCoeffSym[0][4], + dec->m_filterCoeffSym[0][5], + dec->m_filterCoeffSym[0][6], + dec->m_filterCoeffSym[0][7], + dec->m_filterCoeffSym[0][8]); + + /* Cb*/ + for (j = 0; j < 16; j++) + for (i = 0; i < 9; i++) + dec->m_filterCoeffSym[j][i] = 0; + reconstructCoefInfo(dec, 1, m_alfPictureParam_cb); + data32 = + ((dec->m_filterCoeffSym[0][4] & 0xf) << 28) | + ((dec->m_filterCoeffSym[0][3] & 0x7f) << 21) | + ((dec->m_filterCoeffSym[0][2] & 0x7f) << 14) | + ((dec->m_filterCoeffSym[0][1] & 0x7f) << 7) | + ((dec->m_filterCoeffSym[0][0] & 0x7f) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + data32 = + ((dec->m_filterCoeffSym[0][8] & 0x7f) << 24) | + ((dec->m_filterCoeffSym[0][7] & 0x7f) << 17) | + ((dec->m_filterCoeffSym[0][6] & 0x7f) << 10) | + ((dec->m_filterCoeffSym[0][5] & 0x7f) << 3) | + (((dec->m_filterCoeffSym[0][4] >> 4) & 0x7) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] pic_alf_on_cb(%d), alf_cb_coef(%d %d %d %d %d %d %d %d %d)\n", + m_alfPictureParam_cb->alf_flag, + dec->m_filterCoeffSym[0][0], + dec->m_filterCoeffSym[0][1], + dec->m_filterCoeffSym[0][2], + dec->m_filterCoeffSym[0][3], + dec->m_filterCoeffSym[0][4], + dec->m_filterCoeffSym[0][5], + dec->m_filterCoeffSym[0][6], + dec->m_filterCoeffSym[0][7], + dec->m_filterCoeffSym[0][8]); + + /* Y*/ + for (j = 0; j < 16; j++) + for (i = 0; i < 9; i++) + dec->m_filterCoeffSym[j][i] = 0; + reconstructCoefInfo(dec, 0, m_alfPictureParam_y); + data32 = + ((dec->m_varIndTab[7] & 0xf) << 28) | + ((dec->m_varIndTab[6] & 0xf) << 24) | + ((dec->m_varIndTab[5] & 0xf) << 20) | + ((dec->m_varIndTab[4] & 0xf) << 16) | + ((dec->m_varIndTab[3] & 0xf) << 12) | + ((dec->m_varIndTab[2] & 0xf) << 8) | + ((dec->m_varIndTab[1] & 0xf) << 4) | + ((dec->m_varIndTab[0] & 0xf) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + data32 = ((dec->m_varIndTab[15] & 0xf) << 28) | + ((dec->m_varIndTab[14] & 0xf) << 24) | + ((dec->m_varIndTab[13] & 0xf) << 20) | + ((dec->m_varIndTab[12] & 0xf) << 16) | + ((dec->m_varIndTab[11] & 0xf) << 12) | + ((dec->m_varIndTab[10] & 0xf) << 8) | + ((dec->m_varIndTab[9] & 0xf) << 4) | + ((dec->m_varIndTab[8] & 0xf) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] pic_alf_on_y(%d), alf_y_tab(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)\n", + m_alfPictureParam_y->alf_flag, + dec->m_varIndTab[0], + dec->m_varIndTab[1], + dec->m_varIndTab[2], + dec->m_varIndTab[3], + dec->m_varIndTab[4], + dec->m_varIndTab[5], + dec->m_varIndTab[6], + dec->m_varIndTab[7], + dec->m_varIndTab[8], + dec->m_varIndTab[9], + dec->m_varIndTab[10], + dec->m_varIndTab[11], + dec->m_varIndTab[12], + dec->m_varIndTab[13], + dec->m_varIndTab[14], + dec->m_varIndTab[15]); + + m_filters_per_group = + (m_alfPictureParam_y->alf_flag == 0) ? + 1 : m_alfPictureParam_y->filters_per_group; + for (i = 0; i < m_filters_per_group; i++) { + data32 = + ((dec->m_filterCoeffSym[i][4] & 0xf) << 28) | + ((dec->m_filterCoeffSym[i][3] & 0x7f) << 21) | + ((dec->m_filterCoeffSym[i][2] & 0x7f) << 14) | + ((dec->m_filterCoeffSym[i][1] & 0x7f) << 7) | + ((dec->m_filterCoeffSym[i][0] & 0x7f) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + data32 = + /*[31] last indication*/ + ((i == m_filters_per_group-1) << 31) | + ((dec->m_filterCoeffSym[i][8] & 0x7f) << 24) | + ((dec->m_filterCoeffSym[i][7] & 0x7f) << 17) | + ((dec->m_filterCoeffSym[i][6] & 0x7f) << 10) | + ((dec->m_filterCoeffSym[i][5] & 0x7f) << 3) | + (((dec->m_filterCoeffSym[i][4] >> 4) & 0x7) << 0); + WRITE_VREG(HEVC_DBLK_CFGD, data32); + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] alf_y_coef[%d](%d %d %d %d %d %d %d %d %d)\n", + i, dec->m_filterCoeffSym[i][0], + dec->m_filterCoeffSym[i][1], + dec->m_filterCoeffSym[i][2], + dec->m_filterCoeffSym[i][3], + dec->m_filterCoeffSym[i][4], + dec->m_filterCoeffSym[i][5], + dec->m_filterCoeffSym[i][6], + dec->m_filterCoeffSym[i][7], + dec->m_filterCoeffSym[i][8]); + } + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "[c] cfgALF .done.\n"); +} + +static void config_other_hw(struct AVS2Decoder_s *dec) +{ + uint32_t data32; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *cur_pic = avs2_dec->hc.cur_pic; + int bit_depth = cur_pic->bit_depth; + int losless_comp_header_size = + compute_losless_comp_header_size( + dec, cur_pic->pic_w, + cur_pic->pic_h); + int losless_comp_body_size = + compute_losless_comp_body_size( + dec, cur_pic->pic_w, + cur_pic->pic_h, (bit_depth == AVS2_BITS_10)); + cur_pic->comp_body_size = losless_comp_body_size; + +#ifdef LOSLESS_COMPRESS_MODE + data32 = READ_VREG(HEVC_SAO_CTRL5); + if (bit_depth == AVS2_BITS_10) + data32 &= ~(1 << 9); + else + data32 |= (1 << 9); + + WRITE_VREG(HEVC_SAO_CTRL5, data32); + +#ifdef AVS2_10B_MMU + /*bit[4] : paged_mem_mode*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); +#else + /*bit[3] smem mdoe*/ + if (bit_depth == AVS2_BITS_10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0 << 3)); + else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (1 << 3)); +#endif + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, (losless_comp_body_size >> 5)); + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL3,(0xff<<20) | (0xff<<10) | 0xff);*/ + WRITE_VREG(HEVC_CM_BODY_LENGTH, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, losless_comp_header_size); +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif +} + +static u32 init_cuva_size; +static int cuva_data_is_avaible(struct AVS2Decoder_s *dec) +{ + u32 reg_val; + + reg_val = READ_VREG(AVS2_CUVA_DATA_SIZE); + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s:reg_val: %u \n", + __func__, reg_val); + if (reg_val != 0 && reg_val != init_cuva_size) + return 1; + else + return 0; +} + +static void config_cuva_buf(struct AVS2Decoder_s *dec) +{ + WRITE_VREG(AVS2_CUVA_ADR, dec->cuva_phy_addr); + init_cuva_size = (dec->cuva_size >> 4) << 16; + WRITE_VREG(AVS2_CUVA_DATA_SIZE, init_cuva_size); +} + +static void set_cuva_data(struct AVS2Decoder_s *dec) +{ + int i; + unsigned short *cuva_adr; + unsigned int size_reg_val = + READ_VREG(AVS2_CUVA_DATA_SIZE); + unsigned int cuva_count = 0; + int cuva_size = 0; + struct avs2_frame_s *pic = dec->avs2_dec.hc.cur_pic; + if (pic == NULL || 0 == cuva_data_is_avaible(dec)) { + avs2_print(dec, AVS2_DBG_HDR_INFO, + "%s:pic 0x%p or data not avaible\n", + __func__, pic); + return; + } + + cuva_adr = (unsigned short *)dec->cuva_addr; + cuva_count = ((size_reg_val >> 16) << 4) >> 1; + cuva_size = dec->cuva_size; + dec->hdr_flag |= HDR_CUVA_MASK; + + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s:pic 0x%p cuva_count(%d) cuva_size(%d) hdr_flag 0x%x\n", + __func__, pic, cuva_count, cuva_size, dec->hdr_flag); + if (cuva_size > 0 && cuva_count > 0) { + int new_size; + char *new_buf; + + new_size = cuva_size; + new_buf = vzalloc(new_size); + if (new_buf) { + unsigned char *p = new_buf; + int len = 0; + pic->cuva_data_buf = new_buf; + + for (i = 0; i < cuva_count; i += 4) { + int j; + + for (j = 0; j < 4; j++) { + unsigned short aa = cuva_adr[i + 3 - j]; + *p = aa & 0xff; + p++; + len++; + } + } + if (len > 0) { + pic->cuva_data_size = len; + } + + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "cuva: (size %d)\n", + pic->cuva_data_size); + if (get_dbg_flag(dec) & AVS2_DBG_HDR_DATA) { + for (i = 0; i < pic->cuva_data_size; i++) { + pr_info("%02x ", pic->cuva_data_buf[i]); + if (((i + 1) & 0xf) == 0) + pr_info("\n"); + } + pr_info("\n"); + } + + } else { + avs2_print(dec, 0, "new buf alloc failed\n"); + if (pic->cuva_data_buf) + vfree(pic->cuva_data_buf); + pic->cuva_data_buf = NULL; + pic->cuva_data_size = 0; + } + } +} + +static void release_cuva_data(struct avs2_frame_s *pic) +{ + if (pic == NULL) + return; + if (pic->cuva_data_buf) { + vfree(pic->cuva_data_buf); + } + pic->cuva_data_buf = NULL; + pic->cuva_data_size = 0; +} + +static void avs2_config_work_space_hw(struct AVS2Decoder_s *dec) +{ + struct BuffInfo_s *buf_spec = dec->work_space_buf; +#ifdef LOSLESS_COMPRESS_MODE + int losless_comp_header_size = + compute_losless_comp_header_size( + dec, dec->init_pic_w, + dec->init_pic_h); + int losless_comp_body_size = + compute_losless_comp_body_size(dec, + dec->init_pic_w, + dec->init_pic_h, buf_alloc_depth == 10); +#endif +#ifdef AVS2_10B_MMU + unsigned int data32; +#endif + if (debug && dec->init_flag == 0) + avs2_print(dec, 0, + "%s %x %x %x %x %x %x %x %x %x %x %x %x %x\n", + __func__, + buf_spec->ipp.buf_start, + buf_spec->start_adr, + buf_spec->short_term_rps.buf_start, + buf_spec->rcs.buf_start, + buf_spec->sps.buf_start, + buf_spec->pps.buf_start, + buf_spec->sao_up.buf_start, + buf_spec->swap_buf.buf_start, + buf_spec->swap_buf2.buf_start, + buf_spec->scalelut.buf_start, + buf_spec->dblk_para.buf_start, + buf_spec->dblk_data.buf_start, + buf_spec->dblk_data2.buf_start); + WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, buf_spec->ipp.buf_start); + if ((debug & AVS2_DBG_SEND_PARAM_WITH_REG) == 0) + WRITE_VREG(HEVC_RPM_BUFFER, (u32)dec->rpm_phy_addr); + WRITE_VREG(AVS2_ALF_SWAP_BUFFER, buf_spec->short_term_rps.buf_start); + WRITE_VREG(HEVC_RCS_BUFFER, buf_spec->rcs.buf_start); + WRITE_VREG(HEVC_SPS_BUFFER, buf_spec->sps.buf_start); + WRITE_VREG(HEVC_PPS_BUFFER, buf_spec->pps.buf_start); + //WRITE_VREG(HEVC_SAO_UP, buf_spec->sao_up.buf_start); +#ifdef AVS2_10B_MMU + WRITE_VREG(AVS2_MMU_MAP_BUFFER, dec->frame_mmu_map_phy_addr); +#else + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER, buf_spec->swap_buf.buf_start); +#endif + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, buf_spec->swap_buf2.buf_start); + //WRITE_VREG(HEVC_SCALELUT, buf_spec->scalelut.buf_start); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + if (buf_spec->max_width <= 4096 && buf_spec->max_height <= 2304) + WRITE_VREG(HEVC_DBLK_CFG3, 0x404010); //default value + else + WRITE_VREG(HEVC_DBLK_CFG3, 0x808020); // make left storage 2 x 4k] + avs2_print(dec, AVS2_DBG_BUFMGR, + "HEVC_DBLK_CFG3 = %x\n", READ_VREG(HEVC_DBLK_CFG3)); + } + + /* cfg_p_addr */ + WRITE_VREG(HEVC_DBLK_CFG4, buf_spec->dblk_para.buf_start); + /* cfg_d_addr */ + WRITE_VREG(HEVC_DBLK_CFG5, buf_spec->dblk_data.buf_start); + + WRITE_VREG(HEVC_DBLK_CFGE, buf_spec->dblk_data2.buf_start); + +#ifdef LOSLESS_COMPRESS_MODE + data32 = READ_VREG(HEVC_SAO_CTRL5); +#if 1 + data32 &= ~(1<<9); +#else + if (params->p.bit_depth != 0x00) + data32 &= ~(1<<9); + else + data32 |= (1<<9); +#endif + WRITE_VREG(HEVC_SAO_CTRL5, data32); +#ifdef AVS2_10B_MMU + /*bit[4] : paged_mem_mode*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0); +#else + /* bit[3] smem mode*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0<<3)); + + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, (losless_comp_body_size >> 5)); +#endif + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL2,(losless_comp_body_size >> 5));*/ + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL3,(0xff<<20) | (0xff<<10) | 0xff);*/ +/*8-bit mode */ + WRITE_VREG(HEVC_CM_BODY_LENGTH, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, losless_comp_header_size); +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif + +#ifdef AVS2_10B_MMU + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR, buf_spec->mmu_vbh.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR, buf_spec->mmu_vbh.buf_start + + VBH_BUF_SIZE(buf_spec)); + /*data32 = READ_VREG(HEVC_SAO_CTRL9);*/ + /*data32 |= 0x1;*/ + /*WRITE_VREG(HEVC_SAO_CTRL9, data32);*/ + + /* use HEVC_CM_HEADER_START_ADDR */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (1<<10); +#if 1 + if (debug & AVS2_DBG_FORCE_UNCOMPRESS) + data32 |= 0x80; +#endif + WRITE_VREG(HEVC_SAO_CTRL5, data32); + +#endif + + WRITE_VREG(LMEM_DUMP_ADR, (u32)dec->lmem_phy_addr); +#if 1 +/*MULTI_INSTANCE_SUPPORT*/ + /*new added in simulation???*/ + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, buf_spec->mpred_above.buf_start); +#endif +#ifdef CO_MV_COMPRESS + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) { + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 |= (1 << 1); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); + } +#endif +} + +static void decomp_perfcount_reset(void) +{ + if (debug & AVS2_DBG_CACHE) + pr_info("[cache_util.c] Entered decomp_perfcount_reset...\n"); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)0x1); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)0x0); + return; +} + +static void mcrcc_perfcount_reset(void) +{ + if (debug & AVS2_DBG_CACHE) + pr_info("[cache_util.c] Entered mcrcc_perfcount_reset...\n"); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)0x1); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)0x0); + return; +} + +static void avs2_init_decoder_hw(struct AVS2Decoder_s *dec) +{ + unsigned int data32; + unsigned int decode_mode; + int i; + + /*if (debug & AVS2_DBG_BUFMGR_MORE) + pr_info("%s\n", __func__);*/ + data32 = READ_VREG(HEVC_PARSER_INT_CONTROL); +#if 1 + /* set bit 31~29 to 3 if HEVC_STREAM_FIFO_CTL[29] is 1 */ + data32 &= ~(7 << 29); + data32 |= (3 << 29); +#endif + data32 = data32 | + (1 << 24) |/*stream_buffer_empty_int_amrisc_enable*/ + (1 << 22) |/*stream_fifo_empty_int_amrisc_enable*/ + (1 << 7) |/*dec_done_int_cpu_enable*/ + (1 << 4) |/*startcode_found_int_cpu_enable*/ + (0 << 3) |/*startcode_found_int_amrisc_enable*/ + (1 << 0) /*parser_int_enable*/ + ; + WRITE_VREG(HEVC_PARSER_INT_CONTROL, data32); + + data32 = READ_VREG(HEVC_SHIFT_STATUS); + data32 = data32 | + (0 << 1) |/*emulation_check_off VP9 + do not have emulation*/ + (1 << 0)/*startcode_check_on*/ + ; + WRITE_VREG(HEVC_SHIFT_STATUS, data32); + WRITE_VREG(HEVC_SHIFT_CONTROL, + (6 << 20) | /* emu_push_bits (6-bits for AVS2)*/ + (0 << 19) | /* emu_3_enable, maybe turned on in microcode*/ + (0 << 18) | /* emu_2_enable, maybe turned on in microcode*/ + (0 << 17) | /* emu_1_enable, maybe turned on in microcode*/ + (0 << 16) | /* emu_0_enable, maybe turned on in microcode*/ + (0 << 14) | /*disable_start_code_protect*/ + (3 << 6) | /* sft_valid_wr_position*/ + (2 << 4) | /* emulate_code_length_sub_1*/ + (2 << 1) | /* start_code_length_sub_1*/ + (1 << 0) /* stream_shift_enable*/ + ); + + WRITE_VREG(HEVC_SHIFT_LENGTH_PROTECT, + (0 << 30) | /*data_protect_fill_00_enable*/ + (1 << 29) /*data_protect_fill_ff_enable*/ + ); + WRITE_VREG(HEVC_CABAC_CONTROL, + (1 << 0)/*cabac_enable*/ + ); + + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, + (1 << 0)/* hevc_parser_core_clk_en*/ + ); + + + WRITE_VREG(HEVC_DEC_STATUS_REG, 0); + + /*Initial IQIT_SCALELUT memory -- just to avoid X in simulation*/ + if (is_rdma_enable()) + rdma_back_end_work(dec->rdma_phy_adr, RDMA_SIZE); + else { + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 0);/*cfg_p_addr*/ + for (i = 0; i < 1024; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, 0); + } + +#ifdef ENABLE_SWAP_TEST + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 100); +#else + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 0); +#endif + if (!dec->m_ins_flag) + decode_mode = DECODE_MODE_SINGLE; + else if (vdec_frame_based(hw_to_vdec(dec))) + decode_mode = DECODE_MODE_MULTI_FRAMEBASE; + else + decode_mode = DECODE_MODE_MULTI_STREAMBASE; + if (dec->avs2_dec.bufmgr_error_flag && + (error_handle_policy & 0x1)) { + dec->bufmgr_error_count++; + dec->avs2_dec.bufmgr_error_flag = 0; + if (dec->bufmgr_error_count > + (re_search_seq_threshold & 0xff) + && dec->frame_count > + ((re_search_seq_threshold >> 8) & 0xff)) { + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + dec->start_decoding_flag = 0; + avs2_dec->hd.vec_flag = 1; + dec->skip_PB_before_I = 1; + avs2_print(dec, 0, + "!!Bufmgr error, search seq again (0x%x %d %d)\n", + error_handle_policy, + dec->frame_count, + dec->bufmgr_error_count); + dec->bufmgr_error_count = 0; + } + } + decode_mode |= (dec->start_decoding_flag << 16); + + WRITE_VREG(DECODE_MODE, decode_mode); + WRITE_VREG(HEVC_DECODE_SIZE, 0); + WRITE_VREG(HEVC_DECODE_COUNT, 0); + + /*Send parser_cmd*/ + WRITE_VREG(HEVC_PARSER_CMD_WRITE, (1 << 16) | (0 << 0)); + for (i = 0; i < PARSER_CMD_NUMBER; i++) + WRITE_VREG(HEVC_PARSER_CMD_WRITE, parser_cmd[i]); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2); + + + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + (1 << 9) | /* parser_alf_if_en*/ + /* (1 << 8) |*/ /*sao_sw_pred_enable*/ + (1 << 5) | /*parser_sao_if_en*/ + (1 << 2) | /*parser_mpred_if_en*/ + (1 << 0) /*parser_scaler_if_en*/ + ); + +#ifdef MULTI_INSTANCE_SUPPORT + WRITE_VREG(HEVC_MPRED_INT_STATUS, (1<<31)); + + WRITE_VREG(HEVC_PARSER_RESULT_3, 0xffffffff); + + for (i = 0; i < 8; i++) + data32 = READ_VREG(HEVC_MPRED_ABV_START_ADDR); + + WRITE_VREG(DOS_SW_RESET3, (1<<18)); /* reset mpred */ + WRITE_VREG(DOS_SW_RESET3, 0); + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, data32); + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, data32); + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, data32); +#endif + /*End of Multi-instance*/ + /*Changed to Start MPRED in microcode*/ + /* + pr_info("[test.c] Start MPRED\n"); + WRITE_VREG(HEVC_MPRED_INT_STATUS, + (1<<31) + ); + */ + + /*AVS2 default seq_wq_matrix config*/ + + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "Config AVS2 default seq_wq_matrix ...\n"); + /*4x4*/ + /* default seq_wq_matrix_4x4 begin address*/ + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 64); + for (i = 0; i < 16; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, g_WqMDefault4x4[i]); + + /*8x8*/ + /*default seq_wq_matrix_8x8 begin address*/ + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 0); + for (i = 0; i < 64; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, g_WqMDefault8x8[i]); + + + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (0 << 1) | /*enable ipp*/ + (1 << 0) /*software reset ipp and mpp*/ + ); + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (1 << 1) | /*enable ipp*/ + (0 << 0) /*software reset ipp and mpp*/ + ); +#if 0 +/*AVS2_10B_NV21*/ + /*Enable NV21 reference read mode for MC*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif + /* Init dblk*/ + data32 = READ_VREG(HEVC_DBLK_CFGB); + data32 |= (2 << 0); + /* [3:0] cfg_video_type -> AVS2*/ + + data32 &= (~0x300); /*[8]:first write enable (compress) + [9]:double write enable (uncompress)*/ + if (get_double_write_mode(dec) == 0) + data32 |= (0x1 << 8); /*enable first write*/ + else if (get_double_write_mode(dec) == 0x10) + data32 |= (0x1 << 9); /*double write only*/ + else + data32 |= ((0x1 << 8) | (0x1 << 9)); + WRITE_VREG(HEVC_DBLK_CFGB, data32); + + WRITE_VREG(HEVC_DBLK_CFG0, (1 << 0)); /* [0] rst_sync*/ + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "Bitstream level Init for DBLK .Done.\n"); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + mcrcc_perfcount_reset(); + decomp_perfcount_reset(); + } + + return; +} + + +#ifdef CONFIG_HEVC_CLK_FORCED_ON +static void config_avs2_clk_forced_on(void) +{ + unsigned int rdata32; + /*IQIT*/ + rdata32 = READ_VREG(HEVC_IQIT_CLK_RST_CTRL); + WRITE_VREG(HEVC_IQIT_CLK_RST_CTRL, rdata32 | (0x1 << 2)); + + /* DBLK*/ + rdata32 = READ_VREG(HEVC_DBLK_CFG0); + WRITE_VREG(HEVC_DBLK_CFG0, rdata32 | (0x1 << 2)); + + /* SAO*/ + rdata32 = READ_VREG(HEVC_SAO_CTRL1); + WRITE_VREG(HEVC_SAO_CTRL1, rdata32 | (0x1 << 2)); + + /*MPRED*/ + rdata32 = READ_VREG(HEVC_MPRED_CTRL1); + WRITE_VREG(HEVC_MPRED_CTRL1, rdata32 | (0x1 << 24)); + + /* PARSER*/ + rdata32 = READ_VREG(HEVC_STREAM_CONTROL); + WRITE_VREG(HEVC_STREAM_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_SHIFT_CONTROL); + WRITE_VREG(HEVC_SHIFT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_CABAC_CONTROL); + WRITE_VREG(HEVC_CABAC_CONTROL, rdata32 | (0x1 << 13)); + rdata32 = READ_VREG(HEVC_PARSER_CORE_CONTROL); + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_INT_CONTROL); + WRITE_VREG(HEVC_PARSER_INT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_IF_CONTROL); + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + rdata32 | (0x1 << 6) | (0x1 << 3) | (0x1 << 1)); + + /*IPP*/ + rdata32 = READ_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG); + WRITE_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG, rdata32 | 0xffffffff); + + /* MCRCC*/ + rdata32 = READ_VREG(HEVCD_MCRCC_CTL1); + WRITE_VREG(HEVCD_MCRCC_CTL1, rdata32 | (0x1 << 3)); +} +#endif + +static void avs2_local_uninit(struct AVS2Decoder_s *dec) +{ + dec->rpm_ptr = NULL; + dec->lmem_ptr = NULL; + if (dec->rpm_addr) { + dma_free_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, dec->rpm_addr, + dec->rpm_phy_addr); + dec->rpm_addr = NULL; + } + + if (dec->cuva_addr) { + dma_free_coherent(amports_get_dma_device(), + dec->cuva_size, dec->cuva_addr, + dec->cuva_phy_addr); + dec->cuva_addr = NULL; + } + + if (dec->lmem_addr) { + if (dec->lmem_phy_addr) + dma_free_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, dec->lmem_addr, + dec->lmem_phy_addr); + dec->lmem_addr = NULL; + } + +#ifdef AVS2_10B_MMU + if (dec->frame_mmu_map_addr) { + if (dec->frame_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(dec), dec->frame_mmu_map_addr, + dec->frame_mmu_map_phy_addr); + dec->frame_mmu_map_addr = NULL; + } +#endif + if (dec->gvs) + vfree(dec->gvs); + dec->gvs = NULL; +} + +static int avs2_local_init(struct AVS2Decoder_s *dec) +{ + int ret = -1; + /*int losless_comp_header_size, losless_comp_body_size;*/ + + struct BuffInfo_s *cur_buf_info = NULL; + + cur_buf_info = &dec->work_space_buf_store; + if (force_bufspec) { + memcpy(cur_buf_info, &amvavs2_workbuff_spec[force_bufspec & 0xf], + sizeof(struct BuffInfo_s)); + pr_info("force buffer spec %d\n", force_bufspec & 0xf); + } else { + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + memcpy(cur_buf_info, &amvavs2_workbuff_spec[2], /* 8k */ + sizeof(struct BuffInfo_s)); + else + memcpy(cur_buf_info, &amvavs2_workbuff_spec[1], /* 4k */ + sizeof(struct BuffInfo_s)); + } else + memcpy(cur_buf_info, &amvavs2_workbuff_spec[0],/* 1080p */ + sizeof(struct BuffInfo_s)); + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) { + memcpy(cur_buf_info, &amvavs2_workbuff_spec[5], /* 8k */ + sizeof(struct BuffInfo_s)); + } else + memcpy(cur_buf_info, &amvavs2_workbuff_spec[3],/* 1080p */ + sizeof(struct BuffInfo_s)); + } + } + + cur_buf_info->start_adr = dec->buf_start; +#ifndef AVS2_10B_MMU + dec->mc_buf_spec.buf_end = dec->buf_start + dec->buf_size; +#endif + + init_buff_spec(dec, cur_buf_info); + + init_avs2_decoder(&dec->avs2_dec); + +#ifdef AVS2_10B_MMU + avs2_bufmgr_init(dec, cur_buf_info, NULL); +#else + dec->mc_buf_spec.buf_start = (cur_buf_info->end_adr + 0xffff) + & (~0xffff); + dec->mc_buf_spec.buf_size = (dec->mc_buf_spec.buf_end + - dec->mc_buf_spec.buf_start); + if (debug) { + pr_err("dec->mc_buf_spec.buf_start %x-%x\n", + dec->mc_buf_spec.buf_start, + dec->mc_buf_spec.buf_start + + dec->mc_buf_spec.buf_size); + } + avs2_bufmgr_init(dec, cur_buf_info, &dec->mc_buf_spec); +#endif + + if (!vdec_is_support_4k() + && (buf_alloc_width > 1920 && buf_alloc_height > 1088)) { + buf_alloc_width = 1920; + buf_alloc_height = 1088; + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + buf_alloc_width = 8192; + buf_alloc_height = 4608; + } + dec->init_pic_w = buf_alloc_width ? buf_alloc_width : + (dec->vavs2_amstream_dec_info.width ? + dec->vavs2_amstream_dec_info.width : + dec->work_space_buf->max_width); + dec->init_pic_h = buf_alloc_height ? buf_alloc_height : + (dec->vavs2_amstream_dec_info.height ? + dec->vavs2_amstream_dec_info.height : + dec->work_space_buf->max_height); +#if 0 +/*ndef MV_USE_FIXED_BUF*/ + if (init_mv_buf_list(dec) < 0) { + pr_err("%s: init_mv_buf_list fail\n", __func__); + return -1; + } +#endif + +#ifndef AVS2_10B_MMU + init_buf_list(dec); +#else + dec->used_buf_num = max_buf_num + dec->dynamic_buf_margin; + if (dec->used_buf_num > MAX_BUF_NUM) + dec->used_buf_num = MAX_BUF_NUM; + if (dec->used_buf_num > FRAME_BUFFERS) + dec->used_buf_num = FRAME_BUFFERS; +#endif + dec->avs2_dec.ref_maxbuffer = dec->used_buf_num - 1; + /*init_pic_list(dec);*/ + + pts_unstable = ((unsigned long)(dec->vavs2_amstream_dec_info.param) + & 0x40) >> 6; + + if ((debug & AVS2_DBG_SEND_PARAM_WITH_REG) == 0) { + dec->rpm_addr = dma_alloc_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, + &dec->rpm_phy_addr, GFP_KERNEL); + if (dec->rpm_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + return -1; + } + avs2_print(dec, AVS2_DBG_BUFMGR, + "rpm_phy_addr %x\n", (u32) dec->rpm_phy_addr); + dec->rpm_ptr = dec->rpm_addr; + } + + if (cuva_buf_size > 0) { + dec->cuva_size = AUX_BUF_ALIGN(cuva_buf_size); + + dec->cuva_addr = dma_alloc_coherent(amports_get_dma_device(), + dec->cuva_size, &dec->cuva_phy_addr, GFP_KERNEL); + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s, cuva_size = %d cuva_phy_addr %x dec->cuva_addr = %px\n", + __func__, dec->cuva_size, (u32)dec->cuva_phy_addr, dec->cuva_addr); + if (dec->cuva_addr == NULL) { + pr_err("%s: failed to alloc cuva buffer\n", __func__); + return -1; + } + } + + dec->lmem_addr = dma_alloc_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, + &dec->lmem_phy_addr, GFP_KERNEL); + if (dec->lmem_addr == NULL) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + return -1; + } else + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s, lmem_phy_addr %x\n", + __func__, (u32)dec->lmem_phy_addr); +/* + dec->lmem_phy_addr = dma_map_single(amports_get_dma_device(), + dec->lmem_addr, LMEM_BUF_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(amports_get_dma_device(), + dec->lmem_phy_addr)) { + pr_err("%s: failed to map lmem buffer\n", __func__); + kfree(dec->lmem_addr); + dec->lmem_addr = NULL; + return -1; + } +*/ + dec->lmem_ptr = dec->lmem_addr; + + +#ifdef AVS2_10B_MMU + dec->frame_mmu_map_addr = dma_alloc_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(dec), + &dec->frame_mmu_map_phy_addr, GFP_KERNEL); + if (dec->frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(dec->frame_mmu_map_addr, 0, get_frame_mmu_map_size(dec)); +/* dec->frame_mmu_map_phy_addr = dma_map_single(amports_get_dma_device(), + dec->frame_mmu_map_addr, FRAME_MMU_MAP_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(amports_get_dma_device(), + dec->frame_mmu_map_phy_addr)) { + pr_err("%s: failed to map count_buffer\n", __func__); + kfree(dec->frame_mmu_map_addr); + dec->frame_mmu_map_addr = NULL; + return -1; + }*/ +#endif + + ret = 0; + return ret; +} + +/******************************************** + * Mailbox command + ********************************************/ +#define CMD_FINISHED 0 +#define CMD_ALLOC_VIEW 1 +#define CMD_FRAME_DISPLAY 3 +#define CMD_DEBUG 10 + + +#define DECODE_BUFFER_NUM_MAX 32 +#define DISPLAY_BUFFER_NUM 6 + +#define video_domain_addr(adr) (adr&0x7fffffff) +#define DECODER_WORK_SPACE_SIZE 0x800000 + +#define spec2canvas(x) \ + (((x)->uv_canvas_index << 16) | \ + ((x)->uv_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + + +static void set_canvas(struct AVS2Decoder_s *dec, + struct avs2_frame_s *pic) +{ + int canvas_w = ALIGN(pic->pic_w, 64)/4; + int canvas_h = ALIGN(pic->pic_h, 32)/4; + int blkmode = mem_map_mode; + struct vdec_s *vdec = hw_to_vdec(dec); + /*CANVAS_BLKMODE_64X32*/ + if (pic->double_write_mode) { + canvas_w = pic->pic_w / + get_double_write_ratio(pic->double_write_mode); + canvas_h = pic->pic_h / + get_double_write_ratio(pic->double_write_mode); + /*sao_crtl1 aligned with 64*/ + canvas_w = ALIGN(canvas_w, 64); + canvas_h = ALIGN(canvas_h, 32); + + if (vdec->parallel_dec == 1) { + if (pic->y_canvas_index == -1) + pic->y_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + if (pic->uv_canvas_index == -1) + pic->uv_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + } else { + pic->y_canvas_index = 128 + pic->index * 2; + pic->uv_canvas_index = 128 + pic->index * 2 + 1; + } + + config_cav_lut_ex(pic->y_canvas_index, + pic->dw_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, 0x7, VDEC_HEVC); + config_cav_lut_ex(pic->uv_canvas_index, + pic->dw_u_v_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, 0x7, VDEC_HEVC); +#ifdef MULTI_INSTANCE_SUPPORT + pic->canvas_config[0].phy_addr = + pic->dw_y_adr; + pic->canvas_config[0].width = + canvas_w; + pic->canvas_config[0].height = + canvas_h; + pic->canvas_config[0].block_mode = + blkmode; + pic->canvas_config[0].endian = 7; + + pic->canvas_config[1].phy_addr = + pic->dw_u_v_adr; + pic->canvas_config[1].width = + canvas_w; + pic->canvas_config[1].height = + canvas_h; + pic->canvas_config[1].block_mode = + blkmode; + pic->canvas_config[1].endian = 7; +#endif + } else { + #ifndef AVS2_10B_MMU + if (vdec->parallel_dec == 1) { + if (pic->y_canvas_index == -1) + pic->y_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + if (pic->uv_canvas_index == -1) + pic->uv_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + } else { + pic->y_canvas_index = 128 + pic->index; + pic->uv_canvas_index = 128 + pic->index; + } + + config_cav_lut_ex(pic->y_canvas_index, + pic->mc_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, 0x7, VDEC_HEVC); + config_cav_lut_ex(pic->uv_canvas_index, + pic->mc_u_v_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, 0x7, VDEC_HEVC); + #endif + } +} + +static void set_frame_info(struct AVS2Decoder_s *dec, struct vframe_s *vf) +{ + unsigned int ar; + + vf->duration = dec->frame_dur; + vf->duration_pulldown = 0; + vf->flag = 0; + vf->prop.master_display_colour = dec->vf_dp; + if (dec->hdr_flag & HDR_CUVA_MASK) + dec->video_signal_type |= 1 << 31; + vf->signal_type = dec->video_signal_type; + + avs2_print(dec, AVS2_DBG_HDR_INFO, + "signal_typesignal_type 0x%x \n", + vf->signal_type); + + ar = min_t(u32, dec->frame_ar, DISP_RATIO_ASPECT_RATIO_MAX); + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + + vf->sidebind_type = dec->sidebind_type; + vf->sidebind_channel_id = dec->sidebind_channel_id; + + return; +} + +static int vavs2_vf_states(struct vframe_states *states, void *op_arg) +{ + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)op_arg; + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&dec->newframe_q); + states->buf_avail_num = kfifo_len(&dec->display_q); + + if (step == 2) + states->buf_avail_num = 0; + return 0; +} + +static struct vframe_s *vavs2_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)op_arg; + if (step == 2) + return NULL; + + if (force_disp_pic_index & 0x100) { + if (force_disp_pic_index & 0x200) + return NULL; + return &dec->vframe_dummy; + } + + if (kfifo_peek(&dec->display_q, &vf)) + return vf; + + return NULL; +} + +static struct avs2_frame_s *get_pic_by_index( + struct AVS2Decoder_s *dec, int index) +{ + int i; + struct avs2_frame_s *pic = NULL; + if (index == (dec->used_buf_num - 1)) + pic = dec->avs2_dec.m_bg; + else if (index >= 0 && index < dec->used_buf_num) { + for (i = 0; i < dec->used_buf_num; i++) { + if (dec->avs2_dec.fref[i]->index == index) + pic = dec->avs2_dec.fref[i]; + } + } + return pic; +} + +static struct vframe_s *vavs2_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)op_arg; + if (step == 2) + return NULL; + else if (step == 1) + step = 2; + + if (force_disp_pic_index & 0x100) { + int idx = force_disp_pic_index & 0xff; + struct avs2_frame_s *pic = NULL; + if (idx >= 0 + && idx < dec->avs2_dec.ref_maxbuffer) + pic = get_pic_by_index(dec, idx); + if (pic == NULL) + return NULL; + if (force_disp_pic_index & 0x200) + return NULL; + + vf = &dec->vframe_dummy; + + set_vframe(dec, vf, pic, 1); + + force_disp_pic_index |= 0x200; + return vf; + } + + if (kfifo_get(&dec->display_q, &vf)) { + uint8_t index = vf->index & 0xff; + ATRACE_COUNTER(dec->disp_q_name, kfifo_len(&dec->display_q)); + if (index < dec->used_buf_num) { + struct avs2_frame_s *pic = get_pic_by_index(dec, index); + if (pic == NULL && + (debug & AVS2_DBG_PIC_LEAK)) { + int i; + avs2_print(dec, 0, + "%s error index 0x%x pic not exist\n", + __func__, index); + dump_pic_list(dec); + for (i = 0; i < 10; i++) { + pic = get_pic_by_index(dec, index); + pr_info("pic = %p\n", pic); + } + + if (debug & AVS2_DBG_PIC_LEAK) + debug |= AVS2_DBG_PIC_LEAK_WAIT; + return NULL; + } + vf->index_disp = atomic_read(&dec->vf_get_count); + atomic_add(1, &dec->vf_get_count); + if (pic) + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s index 0x%x pos %d getcount %d type 0x%x w/h %d/%d, pts %d, %lld\n", + __func__, index, + pic->imgtr_fwRefDistance_bak, + dec->vf_get_count, + vf->type, + vf->width, vf->height, + vf->pts, + vf->pts_us64); + return vf; + } + } + return NULL; +} + +static void vavs2_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)op_arg; + uint8_t index; + + if (vf == (&dec->vframe_dummy)) + return; + + if (!vf) + return; + + index = vf->index & 0xff; + + kfifo_put(&dec->newframe_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(dec->new_q_name, kfifo_len(&dec->newframe_q)); + atomic_add(1, &dec->vf_put_count); + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s index putcount 0x%x %d\n", + __func__, vf->index, + dec->vf_put_count); + + if (index < dec->used_buf_num) { + unsigned long flags; + struct avs2_frame_s *pic; + + lock_buffer(dec, flags); + pic = get_pic_by_index(dec, index); + if (pic && pic->vf_ref > 0) + pic->vf_ref--; + else { + if (pic) + avs2_print(dec, 0, + "%s, error pic (index %d) vf_ref is %d\n", + __func__, index, pic->vf_ref); + else + avs2_print(dec, 0, + "%s, error pic (index %d) is NULL\n", + __func__, index); + } + if (dec->wait_buf) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + dec->last_put_idx = index; + dec->new_frame_displayed++; + unlock_buffer(dec, flags); + } + +} + +static int vavs2_event_cb(int type, void *data, void *private_data) +{ + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)private_data; + + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(hw_to_vdec(dec)); + else + req->req_result[0] = 0xffffffff; + } else if (type & VFRAME_EVENT_RECEIVER_GET_AUX_DATA) { + struct provider_aux_req_s *req = + (struct provider_aux_req_s *)data; + unsigned char index; + unsigned long flags; + struct avs2_frame_s *pic; + + if (!req->vf) { + req->aux_size = atomic_read(&dec->vf_put_count); + return 0; + } + lock_buffer(dec, flags); + index = req->vf->index & 0xff; + req->aux_buf = NULL; + req->aux_size = 0; + req->format = VFORMAT_AVS2; + if (index < dec->used_buf_num) { + pic = get_pic_by_index(dec, index); + req->aux_buf = pic->cuva_data_buf; + req->aux_size = pic->cuva_data_size; + } + unlock_buffer(dec, flags); + + avs2_print(dec, PRINT_FLAG_VDEC_STATUS, + "%s pic 0x%p index %d =>size %d\n", + __func__, pic, index, req->aux_size); + } + + return 0; +} + +static struct avs2_frame_s *get_disp_pic(struct AVS2Decoder_s *dec) +{ + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *pic = NULL; + int32_t j; + int32_t pre_disp_count_min = 0x7fffffff; + for (j = 0; j < avs2_dec->ref_maxbuffer; j++) { + if (avs2_dec->fref[j]->to_prepare_disp && + avs2_dec->fref[j]->to_prepare_disp < + pre_disp_count_min) { + pre_disp_count_min = + avs2_dec->fref[j]->to_prepare_disp; + pic = avs2_dec->fref[j]; + } + } + if (pic) + pic->to_prepare_disp = 0; + + return pic; + +} + + + +static void fill_frame_info(struct AVS2Decoder_s *dec, + struct avs2_frame_s *pic, unsigned int framesize, unsigned int pts) +{ + struct vframe_qos_s *vframe_qos = &dec->vframe_qos; + + if (pic->slice_type == I_IMG) + vframe_qos->type = 1; + else if (pic->slice_type == P_IMG) + vframe_qos->type = 2; + else if (pic->slice_type == B_IMG) + vframe_qos->type = 3; +/* +#define SHOW_QOS_INFO +*/ + if (input_frame_based(hw_to_vdec(dec))) + vframe_qos->size = pic->frame_size; + else + vframe_qos->size = framesize; + vframe_qos->pts = pts; +#ifdef SHOW_QOS_INFO + avs2_print(dec, 0, "slice:%d\n", pic->slice_type); +#endif + + + vframe_qos->max_mv = pic->max_mv; + vframe_qos->avg_mv = pic->avg_mv; + vframe_qos->min_mv = pic->min_mv; +#ifdef SHOW_QOS_INFO + avs2_print(dec, 0, "mv: max:%d, avg:%d, min:%d\n", + vframe_qos->max_mv, + vframe_qos->avg_mv, + vframe_qos->min_mv); +#endif + + vframe_qos->max_qp = pic->max_qp; + vframe_qos->avg_qp = pic->avg_qp; + vframe_qos->min_qp = pic->min_qp; +#ifdef SHOW_QOS_INFO + avs2_print(dec, 0, "qp: max:%d, avg:%d, min:%d\n", + vframe_qos->max_qp, + vframe_qos->avg_qp, + vframe_qos->min_qp); +#endif + + vframe_qos->max_skip = pic->max_skip; + vframe_qos->avg_skip = pic->avg_skip; + vframe_qos->min_skip = pic->min_skip; +#ifdef SHOW_QOS_INFO + avs2_print(dec, 0, "skip: max:%d, avg:%d, min:%d\n", + vframe_qos->max_skip, + vframe_qos->avg_skip, + vframe_qos->min_skip); +#endif + + vframe_qos->num++; + +} + +static void set_vframe(struct AVS2Decoder_s *dec, + struct vframe_s *vf, struct avs2_frame_s *pic, u8 dummy) +{ + unsigned long flags; + int stream_offset; + unsigned int frame_size = 0; + int pts_discontinue; + struct vdec_s *vdec = hw_to_vdec(dec); + stream_offset = pic->stream_offset; + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s index = %d pos = %d\r\n", + __func__, pic->index, + pic->imgtr_fwRefDistance); + + if (pic->double_write_mode) + set_canvas(dec, pic); + + display_frame_count[dec->index]++; + + if (!dummy) { +#ifdef MULTI_INSTANCE_SUPPORT + if (vdec_frame_based(vdec)) { + vf->pts = pic->pts; + vf->pts_us64 = pic->pts64; + } else { +#endif + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + /* if (pts_lookup_offset(PTS_TYPE_VIDEO, + stream_offset, &vf->pts, 0) != 0) { */ + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, stream_offset, + &vf->pts, &frame_size, 0, + &vf->pts_us64) != 0) { +#ifdef DEBUG_PTS + dec->pts_missed++; +#endif + vf->pts = 0; + vf->pts_us64 = 0; + } + } + } +#ifdef DEBUG_PTS + else + dec->pts_hit++; +#endif + if (pts_unstable) + dec->pts_mode = PTS_NONE_REF_USE_DURATION; + + fill_frame_info(dec, pic, frame_size, vf->pts); + + if ((dec->pts_mode == PTS_NORMAL) && (vf->pts != 0) + && dec->get_frame_dur) { + int pts_diff = (int)vf->pts - dec->last_lookup_pts; + + if (pts_diff < 0) { + dec->pts_mode_switching_count++; + dec->pts_mode_recovery_count = 0; + + if (dec->pts_mode_switching_count >= + PTS_MODE_SWITCHING_THRESHOLD) { + dec->pts_mode = + PTS_NONE_REF_USE_DURATION; + pr_info + ("HEVC: switch to n_d mode.\n"); + } + + } else { + int p = PTS_MODE_SWITCHING_RECOVERY_THREASHOLD; + dec->pts_mode_recovery_count++; + if (dec->pts_mode_recovery_count > p) { + dec->pts_mode_switching_count = 0; + dec->pts_mode_recovery_count = 0; + } + } + } + + pts_discontinue = + (abs(dec->last_pts - vf->pts) >= + tsync_vpts_discontinuity_margin()); + + if (vf->pts != 0) + dec->last_lookup_pts = vf->pts; +#if 1 + if ((dec->pts_mode == PTS_NONE_REF_USE_DURATION) + && ((pic->slice_type != I_IMG) || (!pts_discontinue && + !first_pts_checkin_complete(PTS_TYPE_AUDIO)))) + vf->pts = dec->last_pts + DUR2PTS(dec->frame_dur); +#endif + dec->last_pts = vf->pts; + + if (vf->pts_us64 != 0) + dec->last_lookup_pts_us64 = vf->pts_us64; + +#if 1 + if ((dec->pts_mode == PTS_NONE_REF_USE_DURATION) + && ((pic->slice_type != I_IMG) || (!pts_discontinue && + !first_pts_checkin_complete(PTS_TYPE_AUDIO)))) { + vf->pts_us64 = + dec->last_pts_us64 + + (DUR2PTS(dec->frame_dur) * 100 / 9); + } +#endif + dec->last_pts_us64 = vf->pts_us64; + avs2_print(dec, AVS2_DBG_OUT_PTS, + "avs2 dec out pts: vf->pts=%d, vf->pts_us64 = %lld\n", + vf->pts, vf->pts_us64); + } + + vf->index = 0xff00 | pic->index; + + if (pic->double_write_mode & 0x10) { + /* double write only */ + vf->compBodyAddr = 0; + vf->compHeadAddr = 0; + } else { +#ifdef AVS2_10B_MMU + vf->compBodyAddr = 0; + vf->compHeadAddr = pic->header_adr; +#else + vf->compBodyAddr = pic->mc_y_adr; /*body adr*/ + vf->compHeadAddr = pic->mc_y_adr + + pic->comp_body_size; + /*head adr*/ +#endif + } + if (pic->double_write_mode) { + vf->type = VIDTYPE_PROGRESSIVE | + VIDTYPE_VIU_FIELD; + vf->type |= VIDTYPE_VIU_NV21; + if (pic->double_write_mode == 3) { + vf->type |= VIDTYPE_COMPRESS; +#ifdef AVS2_10B_MMU + vf->type |= VIDTYPE_SCATTER; +#endif + } +#ifdef MULTI_INSTANCE_SUPPORT + if (dec->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + pic->canvas_config[0]; + vf->canvas0_config[1] = + pic->canvas_config[1]; + + vf->canvas1_config[0] = + pic->canvas_config[0]; + vf->canvas1_config[1] = + pic->canvas_config[1]; + + } else +#endif + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(pic); + } else { + vf->canvas0Addr = vf->canvas1Addr = 0; + vf->type = VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; +#ifdef AVS2_10B_MMU + vf->type |= VIDTYPE_SCATTER; +#endif + } + + switch (pic->bit_depth) { + case AVS2_BITS_8: + vf->bitdepth = BITDEPTH_Y8 | + BITDEPTH_U8 | BITDEPTH_V8; + break; + case AVS2_BITS_10: + case AVS2_BITS_12: + vf->bitdepth = BITDEPTH_Y10 | + BITDEPTH_U10 | BITDEPTH_V10; + break; + default: + vf->bitdepth = BITDEPTH_Y10 | + BITDEPTH_U10 | BITDEPTH_V10; + break; + } + if ((vf->type & VIDTYPE_COMPRESS) == 0) + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + if (pic->bit_depth == AVS2_BITS_8) + vf->bitdepth |= BITDEPTH_SAVING_MODE; + + set_frame_info(dec, vf); + /* if((vf->width!=pic->width)| + (vf->height!=pic->height)) */ + /* pr_info("aaa: %d/%d, %d/%d\n", + vf->width,vf->height, pic->width, + pic->height); */ + vf->width = pic->pic_w / + get_double_write_ratio(pic->double_write_mode); + vf->height = pic->pic_h / + get_double_write_ratio(pic->double_write_mode); + if (force_w_h != 0) { + vf->width = (force_w_h >> 16) & 0xffff; + vf->height = force_w_h & 0xffff; + } + vf->compWidth = pic->pic_w; + vf->compHeight = pic->pic_h; + if (force_fps & 0x100) { + u32 rate = force_fps & 0xff; + if (rate) + vf->duration = 96000/rate; + else + vf->duration = 0; + } +#ifdef AVS2_10B_MMU + if (vf->type & VIDTYPE_SCATTER) { + vf->mem_handle = decoder_mmu_box_get_mem_handle( + dec->mmu_box, + pic->index); + vf->mem_head_handle = decoder_bmmu_box_get_mem_handle( + dec->bmmu_box, + HEADER_BUFFER_IDX(pic->index)); + } else { + vf->mem_handle = decoder_bmmu_box_get_mem_handle( + dec->bmmu_box, + VF_BUFFER_IDX(pic->index)); + vf->mem_head_handle = decoder_bmmu_box_get_mem_handle( + dec->bmmu_box, + HEADER_BUFFER_IDX(pic->index)); + } +#else + vf->mem_handle = decoder_bmmu_box_get_mem_handle( + dec->bmmu_box, + VF_BUFFER_IDX(pic->index)); +#endif + if (!vdec->vbuf.use_ptsserv && vdec_stream_based(vdec)) { + vf->pts_us64 = stream_offset; + vf->pts = 0; + } + if (!dummy) { + lock_buffer(dec, flags); + pic->vf_ref = 1; + unlock_buffer(dec, flags); + } + atomic_add(1, &dec->vf_pre_count); +} + +static inline void dec_update_gvs(struct AVS2Decoder_s *dec) +{ + if (dec->gvs->frame_height != dec->frame_height) { + dec->gvs->frame_width = dec->frame_width; + dec->gvs->frame_height = dec->frame_height; + } + if (dec->gvs->frame_dur != dec->frame_dur) { + dec->gvs->frame_dur = dec->frame_dur; + if (dec->frame_dur != 0) + dec->gvs->frame_rate = ((96000 * 10 / dec->frame_dur) % 10) < 5 ? + 96000 / dec->frame_dur : (96000 / dec->frame_dur +1); + else + dec->gvs->frame_rate = -1; + } + dec->gvs->status = dec->stat | dec->fatal_error; +} + +static int avs2_prepare_display_buf(struct AVS2Decoder_s *dec) +{ +#ifndef NO_DISPLAY + struct vframe_s *vf = NULL; + /*unsigned short slice_type;*/ + struct avs2_frame_s *pic; + struct vdec_s *pvdec = hw_to_vdec(dec); + while (1) { + pic = get_disp_pic(dec); + if (pic == NULL) + break; + + if (force_disp_pic_index & 0x100) { + /*recycle directly*/ + continue; + } + + if (pic->error_mark) { + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "!!!error pic, skip\n", + 0); + continue; + } + + if (dec->start_decoding_flag != 0) { + if (dec->skip_PB_before_I && + pic->slice_type != I_IMG) { + avs2_print(dec, AVS2_DBG_BUFMGR_DETAIL, + "!!!slice type %d (not I) skip\n", + 0, pic->slice_type); + continue; + } + dec->skip_PB_before_I = 0; + } + + if (kfifo_get(&dec->newframe_q, &vf) == 0) { + pr_info("fatal error, no available buffer slot."); + return -1; + } + + if (vf) { + struct vdec_info tmp4x; + int stream_offset = pic->stream_offset; + set_vframe(dec, vf, pic, 0); + decoder_do_frame_check(pvdec, vf); + vdec_vframe_ready(pvdec, vf); + kfifo_put(&dec->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(dec->pts_name, vf->timestamp); + ATRACE_COUNTER(dec->new_q_name, kfifo_len(&dec->newframe_q)); + ATRACE_COUNTER(dec->disp_q_name, kfifo_len(&dec->display_q)); + + dec_update_gvs(dec); + /*count info*/ + vdec_count_info(dec->gvs, 0, stream_offset); + if (stream_offset) { + if (pic->slice_type == I_IMG) { + dec->gvs->i_decoded_frames++; + } else if (pic->slice_type == P_IMG) { + dec->gvs->p_decoded_frames++; + } else if (pic->slice_type == B_IMG) { + dec->gvs->b_decoded_frames++; + } + } + memcpy(&tmp4x, dec->gvs, sizeof(struct vdec_info)); + tmp4x.bit_depth_luma = bit_depth_luma; + tmp4x.bit_depth_chroma = bit_depth_chroma; + tmp4x.double_write_mode = pic->double_write_mode; + vdec_fill_vdec_frame(pvdec, &dec->vframe_qos, &tmp4x, vf, pic->hw_decode_time); + pvdec->vdec_fps_detec(pvdec->id); + if (without_display_mode == 0) { + vf_notify_receiver(dec->provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } else + vavs2_vf_put(vavs2_vf_get(dec), dec); + } + } +/*!NO_DISPLAY*/ +#endif + return 0; +} + +static void get_rpm_param(union param_u *params) +{ + int i; + unsigned int data32; + if (debug & AVS2_DBG_BUFMGR) + pr_info("enter %s\r\n", __func__); + for (i = 0; i < (RPM_END - RPM_BEGIN); i++) { + do { + data32 = READ_VREG(RPM_CMD_REG); + /*pr_info("%x\n", data32);*/ + } while ((data32 & 0x10000) == 0); + params->l.data[i] = data32&0xffff; + /*pr_info("%x\n", data32);*/ + WRITE_VREG(RPM_CMD_REG, 0); + } + if (debug & AVS2_DBG_BUFMGR) + pr_info("leave %s\r\n", __func__); +} +static void debug_buffer_mgr_more(struct AVS2Decoder_s *dec) +{ + int i; + if (!(debug & AVS2_DBG_BUFMGR_MORE)) + return; + pr_info("avs2_param: (%d)\n", dec->avs2_dec.img.number); + for (i = 0; i < (RPM_END-RPM_BEGIN); i++) { + pr_info("%04x ", dec->avs2_dec.param.l.data[i]); + if (((i + 1) & 0xf) == 0) + pr_info("\n"); + } +} + +#ifdef AVS2_10B_MMU +static void avs2_recycle_mmu_buf_tail(struct AVS2Decoder_s *dec) +{ + if (dec->cur_fb_idx_mmu != INVALID_IDX) { + if (dec->used_4k_num == -1) { + dec->used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); + if (dec->m_ins_flag) + hevc_mmu_dma_check(hw_to_vdec(dec)); + decoder_mmu_box_free_idx_tail(dec->mmu_box, + dec->cur_fb_idx_mmu, dec->used_4k_num); + } + dec->cur_fb_idx_mmu = INVALID_IDX; + dec->used_4k_num = -1; + } +} + +static void avs2_recycle_mmu_buf(struct AVS2Decoder_s *dec) +{ + if (dec->cur_fb_idx_mmu != INVALID_IDX) { + decoder_mmu_box_free_idx(dec->mmu_box, + dec->cur_fb_idx_mmu); + + dec->cur_fb_idx_mmu = INVALID_IDX; + dec->used_4k_num = -1; + } +} +#endif + +static void dec_again_process(struct AVS2Decoder_s *dec) +{ + amhevc_stop(); + dec->dec_result = DEC_RESULT_AGAIN; + if (dec->process_state == + PROC_STATE_DECODING) { + dec->process_state = + PROC_STATE_DECODE_AGAIN; + } else if (dec->process_state == + PROC_STATE_HEAD_DONE) { + dec->process_state = + PROC_STATE_HEAD_AGAIN; + } + dec->next_again_flag = 1; + reset_process_time(dec); + vdec_schedule_work(&dec->work); +} + +static uint32_t log2i(uint32_t val) +{ + uint32_t ret = -1; + while (val != 0) { + val >>= 1; + ret++; + } + return ret; +} + +static void check_pic_error(struct AVS2Decoder_s *dec, + struct avs2_frame_s *pic) +{ + if (pic->decoded_lcu == 0) { + pic->decoded_lcu = + (READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff) + 1; + } + if (pic->decoded_lcu != dec->avs2_dec.lcu_total) { + avs2_print(dec, AVS2_DBG_BUFMGR, + "%s error pic(index %d imgtr_fwRefDistance %d) decoded lcu %d (total %d)\n", + __func__, pic->index, pic->imgtr_fwRefDistance, + pic->decoded_lcu, dec->avs2_dec.lcu_total); + pic->error_mark = 1; + } else { + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s pic(index %d imgtr_fwRefDistance %d) decoded lcu %d (total %d)\n", + __func__, pic->index, pic->imgtr_fwRefDistance, + pic->decoded_lcu, dec->avs2_dec.lcu_total); + + } +} +static void update_decoded_pic(struct AVS2Decoder_s *dec) +{ + struct avs2_frame_s *pic = dec->avs2_dec.hc.cur_pic; + if (pic) { + dec->avs2_dec.hc.cur_pic->decoded_lcu = + (READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff) + 1; + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s pic(index %d imgtr_fwRefDistance %d) decoded lcu %d (total %d)\n", + __func__, pic->index, pic->imgtr_fwRefDistance, + pic->decoded_lcu, dec->avs2_dec.lcu_total); + } +} +/* +[SE] [BUG][BUG-171463][chuanqi.wang]: get frame rate by video sequeue*/ +static int get_frame_rate(union param_u *params, struct AVS2Decoder_s *dec) +{ + int tmp = 0; + + switch (params->p.frame_rate_code) { + case 1: + case 2: + tmp = 24; + break; + case 3: + tmp = 25; + break; + case 4: + case 5: + tmp = 30; + break; + case 6: + tmp = 50; + break; + case 7: + case 8: + tmp = 60; + break; + case 9: + tmp = 100; + break; + case 10: + tmp = 120; + break; + default: + tmp = 25; + break; + } + + if (!params->p.progressive_sequence) + tmp = tmp / 2; + dec->frame_dur = div_u64(96000ULL, tmp); + dec->get_frame_dur = true; + /*avs2_print(dec, 0, "avs2 frame_dur:%d,progressive:%d\n", dec->frame_dur, params->p.progressive_sequence);*/ + return 0; +} + + +#define HEVC_MV_INFO 0x310d +#define HEVC_QP_INFO 0x3137 +#define HEVC_SKIP_INFO 0x3136 + +/* only when we decoded one field or one frame, +we can call this function to get qos info*/ +static void get_picture_qos_info(struct AVS2Decoder_s *dec) +{ + struct avs2_frame_s *picture = dec->avs2_dec.hc.cur_pic; + struct vdec_s *vdec = hw_to_vdec(dec); + if (!picture) { + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s decode picture is none exist\n"); + + return; + } + if (vdec->mvfrm) { + picture->frame_size = vdec->mvfrm->frame_size; + picture->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; + } + +/* +#define DEBUG_QOS +*/ + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + unsigned char a[3]; + unsigned char i, j, t; + unsigned long data; + + data = READ_VREG(HEVC_MV_INFO); + if (picture->slice_type == I_IMG) + data = 0; + a[0] = data & 0xff; + a[1] = (data >> 8) & 0xff; + a[2] = (data >> 16) & 0xff; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_mv = a[2]; + picture->avg_mv = a[1]; + picture->min_mv = a[0]; +#ifdef DEBUG_QOS + avs2_print(dec, 0, "mv data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); +#endif + + data = READ_VREG(HEVC_QP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_qp = a[2]; + picture->avg_qp = a[1]; + picture->min_qp = a[0]; +#ifdef DEBUG_QOS + avs2_print(dec, 0, "qp data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); +#endif + + data = READ_VREG(HEVC_SKIP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_skip = a[2]; + picture->avg_skip = a[1]; + picture->min_skip = a[0]; + +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "skip data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); +#endif + } else { + uint32_t blk88_y_count; + uint32_t blk88_c_count; + uint32_t blk22_mv_count; + uint32_t rdata32; + int32_t mv_hi; + int32_t mv_lo; + uint32_t rdata32_l; + uint32_t mvx_L0_hi; + uint32_t mvy_L0_hi; + uint32_t mvx_L1_hi; + uint32_t mvy_L1_hi; + int64_t value; + uint64_t temp_value; +#ifdef DEBUG_QOS + int pic_number = 0; +#endif + + picture->max_mv = 0; + picture->avg_mv = 0; + picture->min_mv = 0; + + picture->max_skip = 0; + picture->avg_skip = 0; + picture->min_skip = 0; + + picture->max_qp = 0; + picture->avg_qp = 0; + picture->min_qp = 0; + + + +#ifdef DEBUG_QOS + avs2_print(dec, 0, "slice_type:%d, poc:%d\n", + picture->slice_type, + pic_number); +#endif + /* set rd_idx to 0 */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, 0); + + blk88_y_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_y_count == 0) { +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_y_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] Y QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_y_count, + rdata32, blk88_y_count); +#endif + picture->avg_qp = rdata32/blk88_y_count; + /* intra_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] Y intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + /* skipped_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] Y skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + picture->avg_skip = rdata32*100/blk88_y_count; + /* coeff_non_zero_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] Y ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_y_count*1)), + '%', rdata32); +#endif + /* blk66_c_count */ + blk88_c_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_c_count == 0) { +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_c_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] C QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_c_count, + rdata32, blk88_c_count); +#endif + /* intra_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] C intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* skipped_cu_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] C skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* coeff_non_zero_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] C ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_c_count*1)), + '%', rdata32); +#endif + + /* 1'h0, qp_c_max[6:0], 1'h0, qp_c_min[6:0], + 1'h0, qp_y_max[6:0], 1'h0, qp_y_min[6:0] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + avs2_print(dec, 0, "[Picture %d Quality] Y QP min : %d\n", + pic_number, (rdata32>>0)&0xff); +#endif + picture->min_qp = (rdata32>>0)&0xff; + +#ifdef DEBUG_QOS + avs2_print(dec, 0, "[Picture %d Quality] Y QP max : %d\n", + pic_number, (rdata32>>8)&0xff); +#endif + picture->max_qp = (rdata32>>8)&0xff; + +#ifdef DEBUG_QOS + avs2_print(dec, 0, "[Picture %d Quality] C QP min : %d\n", + pic_number, (rdata32>>16)&0xff); + avs2_print(dec, 0, "[Picture %d Quality] C QP max : %d\n", + pic_number, (rdata32>>24)&0xff); +#endif + + /* blk22_mv_count */ + blk22_mv_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk22_mv_count == 0) { +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] NO MV Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* mvy_L1_count[39:32], mvx_L1_count[39:32], + mvy_L0_count[39:32], mvx_L0_count[39:32] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + /* should all be 0x00 or 0xff */ +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] MV AVG High Bits: 0x%X\n", + pic_number, rdata32); +#endif + mvx_L0_hi = ((rdata32>>0)&0xff); + mvy_L0_hi = ((rdata32>>8)&0xff); + mvx_L1_hi = ((rdata32>>16)&0xff); + mvy_L1_hi = ((rdata32>>24)&0xff); + + /* mvx_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvx_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + value = div_s64(value, blk22_mv_count); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] MVX_L0 AVG : %d (%lld/%d)\n", + pic_number, (int)value, + value, blk22_mv_count); +#endif + picture->avg_mv = value; + + /* mvy_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvy_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] MVY_L0 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvx_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvx_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] MVX_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvy_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvy_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] MVY_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* {mvx_L0_max, mvx_L0_min} // format : {sign, abs[14:0]} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; +#ifdef DEBUG_QOS + avs2_print(dec, 0, "[Picture %d Quality] MVX_L0 MAX : %d\n", + pic_number, mv_hi); +#endif + picture->max_mv = mv_hi; + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; +#ifdef DEBUG_QOS + avs2_print(dec, 0, "[Picture %d Quality] MVX_L0 MIN : %d\n", + pic_number, mv_lo); +#endif + picture->min_mv = mv_lo; + +#ifdef DEBUG_QOS + /* {mvy_L0_max, mvy_L0_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + avs2_print(dec, 0, "[Picture %d Quality] MVY_L0 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + avs2_print(dec, 0, "[Picture %d Quality] MVY_L0 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvx_L1_max, mvx_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + avs2_print(dec, 0, "[Picture %d Quality] MVX_L1 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + avs2_print(dec, 0, "[Picture %d Quality] MVX_L1 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvy_L1_max, mvy_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + avs2_print(dec, 0, "[Picture %d Quality] MVY_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + avs2_print(dec, 0, "[Picture %d Quality] MVY_L1 MIN : %d\n", + pic_number, mv_lo); +#endif + + rdata32 = READ_VREG(HEVC_PIC_QUALITY_CTRL); +#ifdef DEBUG_QOS + avs2_print(dec, 0, + "[Picture %d Quality] After Read : VDEC_PIC_QUALITY_CTRL : 0x%x\n", + pic_number, rdata32); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + } +} + +static irqreturn_t vavs2_isr_thread_fn(int irq, void *data) +{ + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)data; + unsigned int dec_status = dec->dec_status; + int i, ret; + int32_t start_code = 0; + + /*if (dec->wait_buf) + pr_info("set wait_buf to 0\r\n"); + */ + + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s decode_status 0x%x process_state %d lcu 0x%x\n", + __func__, dec_status, dec->process_state, + READ_VREG(HEVC_PARSER_LCU_START)); + +#ifndef G12A_BRINGUP_DEBUG + if (dec->eos) { + PRINT_LINE(); + goto irq_handled_exit; + } +#endif + dec->wait_buf = 0; + if (dec_status == AVS2_DECODE_BUFEMPTY) { + PRINT_LINE(); + if (dec->m_ins_flag) { + reset_process_time(dec); + if (!vdec_frame_based(hw_to_vdec(dec))) + dec_again_process(dec); + else { + dec->dec_result = DEC_RESULT_DONE; + reset_process_time(dec); + amhevc_stop(); + vdec_schedule_work(&dec->work); + } + } + goto irq_handled_exit; + } else if (dec_status == HEVC_DECPIC_DATA_DONE) { + PRINT_LINE(); + dec->start_decoding_flag |= 0x3; + if (dec->m_ins_flag) { + set_cuva_data(dec); + update_decoded_pic(dec); + get_picture_qos_info(dec); + reset_process_time(dec); + dec->dec_result = DEC_RESULT_DONE; + amhevc_stop(); +#if 0 /*def AVS2_10B_MMU*/ + if (dec->m_ins_flag) { + /*avs2_recycle_mmu_buf_tail(dec);*/ + dec->used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); + } +#endif + +#if 0 + /*keep hardware state*/ + WRITE_VREG(HEVC_MPRED_INT_STATUS, (1<<31)); + WRITE_VREG(HEVC_PARSER_RESULT_3, 0xffffffff); + dec->mpred_abv_start_addr = + READ_VREG(HEVC_MPRED_ABV_START_ADDR); + /**/ +#endif + vdec_schedule_work(&dec->work); + } + goto irq_handled_exit; + } + PRINT_LINE(); +#if 0 + if (dec_status == AVS2_EOS) { + if (dec->m_ins_flag) + reset_process_time(dec); + + avs2_print(dec, AVS2_DBG_BUFMGR, + "AVS2_EOS, flush buffer\r\n"); + + avs2_post_process(&dec->avs2_dec); + avs2_prepare_display_buf(dec); + + avs2_print(dec, AVS2_DBG_BUFMGR, + "send AVS2_10B_DISCARD_NAL\r\n"); + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_10B_DISCARD_NAL); + if (dec->m_ins_flag) { + update_decoded_pic(dec); + dec->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&dec->work); + } + goto irq_handled_exit; + } else +#endif + if (dec_status == AVS2_DECODE_OVER_SIZE) { + avs2_print(dec, 0, + "avs2 decode oversize !!\n"); + debug |= (AVS2_DBG_DIS_LOC_ERROR_PROC | + AVS2_DBG_DIS_SYS_ERROR_PROC); + dec->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + if (dec->m_ins_flag) + reset_process_time(dec); + goto irq_handled_exit; + } + PRINT_LINE(); + + if (dec->m_ins_flag) + reset_process_time(dec); + + if (dec_status == AVS2_HEAD_SEQ_READY) + start_code = SEQUENCE_HEADER_CODE; + else if (dec_status == AVS2_HEAD_PIC_I_READY) + start_code = I_PICTURE_START_CODE; + else if (dec_status == AVS2_HEAD_PIC_PB_READY) + start_code = PB_PICTURE_START_CODE; + else if (dec_status == AVS2_STARTCODE_SEARCH_DONE) + /*SEQUENCE_END_CODE, VIDEO_EDIT_CODE*/ + start_code = READ_VREG(CUR_NAL_UNIT_TYPE); + + if (dec->process_state == + PROC_STATE_HEAD_AGAIN + ) { + if ((start_code == I_PICTURE_START_CODE) + || (start_code == PB_PICTURE_START_CODE)) { + avs2_print(dec, 0, + "PROC_STATE_HEAD_AGAIN error, start_code 0x%x!!!\r\n", + start_code); + goto irq_handled_exit; + } else { + avs2_print(dec, AVS2_DBG_BUFMGR, + "PROC_STATE_HEAD_AGAIN, start_code 0x%x\r\n", + start_code); + dec->process_state = PROC_STATE_HEAD_DONE; + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_ACTION_DONE); + goto irq_handled_exit; + } + } else if (dec->process_state == + PROC_STATE_DECODE_AGAIN) { + if ((start_code == I_PICTURE_START_CODE) + || (start_code == PB_PICTURE_START_CODE)) { + avs2_print(dec, AVS2_DBG_BUFMGR, + "PROC_STATE_DECODE_AGAIN=> decode_slice, start_code 0x%x\r\n", + start_code); + goto decode_slice; + } else { + avs2_print(dec, 0, + "PROC_STATE_DECODE_AGAIN, start_code 0x%x!!!\r\n", + start_code); + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_ACTION_DONE); + goto irq_handled_exit; + } + } + + if ((start_code == I_PICTURE_START_CODE) + || (start_code == PB_PICTURE_START_CODE) + || (start_code == SEQUENCE_END_CODE) + || (start_code == VIDEO_EDIT_CODE)) { + PRINT_LINE(); + + if (dec->avs2_dec.hc.cur_pic != NULL) { + int32_t ii; +#ifdef AVS2_10B_MMU + avs2_recycle_mmu_buf_tail(dec); +#endif + check_pic_error(dec, dec->avs2_dec.hc.cur_pic); + avs2_post_process(&dec->avs2_dec); + + if (debug & AVS2_DBG_PRINT_PIC_LIST) + dump_pic_list(dec); + + avs2_prepare_display_buf(dec); + dec->avs2_dec.hc.cur_pic = NULL; + for (ii = 0; ii < dec->avs2_dec.ref_maxbuffer; + ii++) { + struct avs2_frame_s *pic = + dec->avs2_dec.fref[ii]; + if (pic->bg_flag == 0 && + pic->is_output == -1 && + pic->mmu_alloc_flag && + pic->vf_ref == 0) { + if (pic->refered_by_others == 0) { +#ifdef AVS2_10B_MMU + pic->mmu_alloc_flag = 0; + /*release_buffer_4k( + dec->avs2_dec.fref[ii]->index);*/ + decoder_mmu_box_free_idx(dec->mmu_box, + pic->index); +#ifdef DYNAMIC_ALLOC_HEAD + decoder_bmmu_box_free_idx( + dec->bmmu_box, + HEADER_BUFFER_IDX(pic->index)); + pic->header_adr = 0; +#endif +#endif +#ifndef MV_USE_FIXED_BUF + decoder_bmmu_box_free_idx( + dec->bmmu_box, + MV_BUFFER_IDX(pic->index)); + pic->mpred_mv_wr_start_addr = 0; +#endif + } + /* + decoder_bmmu_box_free_idx( + dec->bmmu_box, + VF_BUFFER_IDX(pic->index)); + dec->cma_alloc_addr = 0;*/ + } + } + } + } + + if ((dec_status == AVS2_HEAD_PIC_I_READY) + || (dec_status == AVS2_HEAD_PIC_PB_READY)) { + PRINT_LINE(); + + if (debug & AVS2_DBG_SEND_PARAM_WITH_REG) { + get_rpm_param( + &dec->avs2_dec.param); + } else { + + for (i = 0; i < (RPM_END - RPM_BEGIN); i += 4) { + int ii; + for (ii = 0; ii < 4; ii++) + dec->avs2_dec.param.l.data[i + ii] = + dec->rpm_ptr[i + 3 - ii]; + } + } +#ifdef SANITY_CHECK + if (dec->avs2_dec.param.p.num_of_ref_cur > + dec->avs2_dec.ref_maxbuffer) { + pr_info("Warning: Wrong num_of_ref_cur %d, force to %d\n", + dec->avs2_dec.param.p.num_of_ref_cur, + dec->avs2_dec.ref_maxbuffer); + dec->avs2_dec.param.p.num_of_ref_cur = + dec->avs2_dec.ref_maxbuffer; + } +#endif + PRINT_LINE(); + + debug_buffer_mgr_more(dec); + get_frame_rate(&dec->avs2_dec.param, dec); + +#if 1 // The video_signal_type is type of uint16_t and result false, so comment it out. + if (dec->avs2_dec.param.p.video_signal_type + & (1<<30)) { + union param_u *pPara; + + avs2_print(dec, 0, + "avs2 HDR meta data present\n"); + pPara = &dec->avs2_dec.param; + + /*clean this flag*/ + pPara->p.video_signal_type + &= ~(1<<30); + + dec->vf_dp.present_flag = 1; + + dec->vf_dp.white_point[0] + = pPara->p.white_point_x; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "white_point[0]:0x%x\n", + dec->vf_dp.white_point[0]); + + dec->vf_dp.white_point[1] + = pPara->p.white_point_y; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "white_point[1]:0x%x\n", + dec->vf_dp.white_point[1]); + + for (i = 0; i < 3; i++) { + dec->vf_dp.primaries[i][0] + = pPara->p.display_primaries_x[i]; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "primaries[%d][0]:0x%x\n", + i, + dec->vf_dp.primaries[i][0]); + } + + for (i = 0; i < 3; i++) { + dec->vf_dp.primaries[i][1] + = pPara->p.display_primaries_y[i]; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "primaries[%d][1]:0x%x\n", + i, + dec->vf_dp.primaries[i][1]); + } + + dec->vf_dp.luminance[0] + = pPara->p.max_display_mastering_luminance; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "luminance[0]:0x%x\n", + dec->vf_dp.luminance[0]); + + dec->vf_dp.luminance[1] + = pPara->p.min_display_mastering_luminance; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "luminance[1]:0x%x\n", + dec->vf_dp.luminance[1]); + + + dec->vf_dp.content_light_level.present_flag + = 1; + dec->vf_dp.content_light_level.max_content + = pPara->p.max_content_light_level; + avs2_print(dec, AVS2_DBG_HDR_INFO, + "max_content:0x%x\n", + dec->vf_dp.content_light_level.max_content); + + dec->vf_dp.content_light_level.max_pic_average + = pPara->p.max_picture_average_light_level; + + avs2_print(dec, AVS2_DBG_HDR_INFO, + "max_pic_average:0x%x\n", + dec->vf_dp.content_light_level.max_pic_average); + } +#endif + + + if (dec->video_ori_signal_type != + ((dec->avs2_dec.param.p.video_signal_type << 16) + | dec->avs2_dec.param.p.color_description)) { + u32 v = dec->avs2_dec.param.p.video_signal_type; + u32 c = dec->avs2_dec.param.p.color_description; + u32 convert_c = c; + + if (v & 0x2000) { + avs2_print(dec, AVS2_DBG_HDR_INFO, + "video_signal_type present:\n"); + avs2_print(dec, AVS2_DBG_HDR_INFO, + " %s %s\n", + video_format_names[(v >> 10) & 7], + ((v >> 9) & 1) ? + "full_range" : "limited"); + if (v & 0x100) { + u32 transfer; + u32 maxtrix; + + avs2_print(dec, AVS2_DBG_HDR_INFO, + "color_description present:\n"); + avs2_print(dec, AVS2_DBG_HDR_INFO, + "color_primarie = %d\n", + v & 0xff); + avs2_print(dec, AVS2_DBG_HDR_INFO, + "transfer_characteristic = %d\n", + (c >> 8) & 0xff); + avs2_print(dec, AVS2_DBG_HDR_INFO, + " matrix_coefficient = %d\n", + c & 0xff); + + transfer = (c >> 8) & 0xFF; + if (transfer >= 15) + avs2_print(dec, AVS2_DBG_HDR_INFO, + "unsupport transfer_characteristic\n"); + else if (transfer == 14) + transfer = 18; /* HLG */ + else if (transfer == 13) + transfer = 32; + else if (transfer == 12) + transfer = 16; + else if (transfer == 11) + transfer = 15; + + maxtrix = c & 0xFF; + if (maxtrix >= 10) + avs2_print(dec, AVS2_DBG_HDR_INFO, + "unsupport matrix_coefficient\n"); + else if (maxtrix == 9) + maxtrix = 10; + else if (maxtrix == 8) + maxtrix = 9; + + convert_c = (transfer << 8) | (maxtrix); + + avs2_print(dec, AVS2_DBG_HDR_INFO, + " convered c:0x%x\n", + convert_c); + } + } + + if (enable_force_video_signal_type) + dec->video_signal_type + = force_video_signal_type; + else { + dec->video_signal_type + = (v << 16) | convert_c; + + dec->video_ori_signal_type + = (v << 16) | c; + } + + video_signal_type = dec->video_signal_type; + } + } +#if 0 + if ((debug_again & 0x4) && + dec->process_state == + PROC_STATE_INIT) { + if (start_code == PB_PICTURE_START_CODE) { + dec->process_state = PROC_STATE_TEST1; + dec_again_process(dec); + goto irq_handled_exit; + } + } +#endif + PRINT_LINE(); + avs2_prepare_header(&dec->avs2_dec, start_code); + + if (start_code == SEQUENCE_HEADER_CODE || + start_code == VIDEO_EDIT_CODE || + start_code == SEQUENCE_END_CODE) { + if (dec->m_ins_flag && + vdec_frame_based(hw_to_vdec(dec))) + dec->start_decoding_flag |= 0x1; + dec->process_state = PROC_STATE_HEAD_DONE; + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_ACTION_DONE); + } else if (start_code == I_PICTURE_START_CODE || + start_code == PB_PICTURE_START_CODE) { + ret = 0; + if (dec->pic_list_init_flag == 0) { + int32_t lcu_size_log2 = + log2i(dec->avs2_dec.param.p.lcu_size); + + avs2_init_global_buffers(&dec->avs2_dec); + /*avs2_dec->m_bg->index is + set to dec->used_buf_num - 1*/ + init_pic_list(dec, lcu_size_log2); + init_pic_list_hw(dec); + } + ret = avs2_process_header(&dec->avs2_dec); + if (!dec->m_ins_flag) + dec->slice_idx++; + + if (dec->m_ins_flag && ret + && dec->avs2_dec.hc.cur_pic->cuva_data_buf != NULL) + release_cuva_data(dec->avs2_dec.hc.cur_pic); + + PRINT_LINE(); +#ifdef I_ONLY_SUPPORT + if ((start_code == PB_PICTURE_START_CODE) && + (dec->i_only & 0x2)) + ret = -2; +#endif +#ifdef AVS2_10B_MMU + if (ret >= 0) { + ret = avs2_alloc_mmu(dec, + dec->avs2_dec.hc.cur_pic->index, + dec->avs2_dec.img.width, + dec->avs2_dec.img.height, + dec->avs2_dec.input.sample_bit_depth, + dec->frame_mmu_map_addr); + if (ret >= 0) { + dec->cur_fb_idx_mmu = + dec->avs2_dec.hc.cur_pic->index; + dec->avs2_dec.hc.cur_pic->mmu_alloc_flag = 1; + } else + pr_err("can't alloc need mmu1,idx %d ret =%d\n", + dec->avs2_dec.hc.cur_pic->index, + ret); + } +#endif + +#ifndef MV_USE_FIXED_BUF + if (ret >= 0 && + dec->avs2_dec.hc.cur_pic-> + mpred_mv_wr_start_addr == 0) { + unsigned long buf_addr; + unsigned mv_buf_size = get_mv_buf_size( + dec, + dec->avs2_dec.hc.cur_pic->pic_w, + dec->avs2_dec.hc.cur_pic->pic_h); + int i = dec->avs2_dec.hc.cur_pic->index; + /*if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + mv_buf_size = 0x120000 * 4;*/ + if (decoder_bmmu_box_alloc_buf_phy + (dec->bmmu_box, + MV_BUFFER_IDX(i), + mv_buf_size, + DRIVER_NAME, + &buf_addr) < 0) + ret = -1; + else + dec->avs2_dec.hc.cur_pic-> + mpred_mv_wr_start_addr + = buf_addr; + } +#endif + if (ret < 0) { + avs2_print(dec, AVS2_DBG_BUFMGR, + "avs2_bufmgr_process=> %d, AVS2_10B_DISCARD_NAL\r\n", + ret); + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_10B_DISCARD_NAL); + #ifdef AVS2_10B_MMU + avs2_recycle_mmu_buf(dec); + #endif + if (dec->m_ins_flag) { + dec->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&dec->work); + } + + goto irq_handled_exit; + } else { + PRINT_LINE(); + dec->avs2_dec.hc.cur_pic->stream_offset = + READ_VREG(HEVC_SHIFT_BYTE_COUNT); + /* + struct PIC_BUFFER_CONFIG_s *cur_pic + = &cm->cur_frame->buf; + cur_pic->decode_idx = dec->frame_count; + */ + if (!dec->m_ins_flag) { + dec->frame_count++; + decode_frame_count[dec->index] + = dec->frame_count; + } + /*MULTI_INSTANCE_SUPPORT*/ + if (dec->chunk) { + dec->avs2_dec.hc.cur_pic->pts = + dec->chunk->pts; + dec->avs2_dec.hc.cur_pic->pts64 = + dec->chunk->pts64; + } + /**/ + dec->avs2_dec.hc.cur_pic->bit_depth + = dec->avs2_dec.input.sample_bit_depth; + dec->avs2_dec.hc.cur_pic->double_write_mode + = get_double_write_mode(dec); +decode_slice: + PRINT_LINE(); + + config_mc_buffer(dec); + config_mcrcc_axi_hw(dec); + config_mpred_hw(dec); + config_dblk_hw(dec); + config_sao_hw(dec); + config_alf_hw(dec); + config_other_hw(dec); + + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "=>fref0 imgtr_fwRefDistance %d, fref1 imgtr_fwRefDistance %d, dis2/dis3/dis4 %d %d %d img->tr %d\n", + dec->avs2_dec.fref[0]->imgtr_fwRefDistance, + dec->avs2_dec.fref[1]->imgtr_fwRefDistance, + dec->avs2_dec.fref[2]->imgtr_fwRefDistance, + dec->avs2_dec.fref[3]->imgtr_fwRefDistance, + dec->avs2_dec.fref[4]->imgtr_fwRefDistance, + dec->avs2_dec.img.tr); + + if ((debug_again & 0x2) && + dec->process_state == + PROC_STATE_INIT) { + dec->process_state = PROC_STATE_DECODING; + dec_again_process(dec); + goto irq_handled_exit; + } + + dec->process_state = PROC_STATE_DECODING; + + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_ACTION_DONE); + + } + + if (dec->m_ins_flag) + start_process_time(dec); + } +irq_handled_exit: + PRINT_LINE(); + dec->process_busy = 0; + return IRQ_HANDLED; +} + +static irqreturn_t vavs2_isr(int irq, void *data) +{ + int i; + unsigned int dec_status; + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)data; + uint debug_tag; + + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + dec_status = READ_VREG(HEVC_DEC_STATUS_REG); + + if (!dec) + return IRQ_HANDLED; + if (dec->init_flag == 0) + return IRQ_HANDLED; + if (dec->process_busy)/*on process.*/ + return IRQ_HANDLED; + dec->dec_status = dec_status; + dec->process_busy = 1; + if (debug & AVS2_DBG_IRQ_EVENT) + avs2_print(dec, 0, + "avs2 isr dec status = 0x%x, lcu 0x%x shiftbyte 0x%x (%x %x lev %x, wr %x, rd %x)\n", + dec_status, READ_VREG(HEVC_PARSER_LCU_START), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR) + ); + + debug_tag = READ_HREG(DEBUG_REG1); + if (debug_tag & 0x10000) { + dma_sync_single_for_cpu( + amports_get_dma_device(), + dec->lmem_phy_addr, + LMEM_BUF_SIZE, + DMA_FROM_DEVICE); + + pr_info("LMEM<tag %x>:\n", READ_HREG(DEBUG_REG1)); + for (i = 0; i < 0x400; i += 4) { + int ii; + if ((i & 0xf) == 0) + pr_info("%03x: ", i); + for (ii = 0; ii < 4; ii++) { + pr_info("%04x ", + dec->lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + pr_info("\n"); + } + + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == dec->decode_idx) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + dec->ucode_pause_pos = udebug_pause_pos; + } else if (debug_tag & 0x20000) + dec->ucode_pause_pos = 0xffffffff; + if (dec->ucode_pause_pos) + reset_process_time(dec); + else + WRITE_HREG(DEBUG_REG1, 0); + } else if (debug_tag != 0) { + pr_info( + "dbg%x: %x lcu %x\n", READ_HREG(DEBUG_REG1), + READ_HREG(DEBUG_REG2), + READ_VREG(HEVC_PARSER_LCU_START)); + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == dec->decode_idx) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + dec->ucode_pause_pos = udebug_pause_pos; + } + if (dec->ucode_pause_pos) + reset_process_time(dec); + else + WRITE_HREG(DEBUG_REG1, 0); + dec->process_busy = 0; + return IRQ_HANDLED; + } + + if (!dec->m_ins_flag) { + if (dec->error_flag == 1) { + dec->error_flag = 2; + dec->process_busy = 0; + return IRQ_HANDLED; + } else if (dec->error_flag == 3) { + dec->process_busy = 0; + return IRQ_HANDLED; + } + + if ((dec->pic_list_init_flag) && + get_free_buf_count(dec) <= 0) { + /* + if (dec->wait_buf == 0) + pr_info("set wait_buf to 1\r\n"); + */ + dec->wait_buf = 1; + dec->process_busy = 0; + if (debug & AVS2_DBG_IRQ_EVENT) + avs2_print(dec, 0, "wait_buf\n"); + return IRQ_HANDLED; + } else if (force_disp_pic_index) { + dec->process_busy = 0; + return IRQ_HANDLED; + } + } + return IRQ_WAKE_THREAD; +} + +static void vavs2_put_timer_func(struct timer_list *timer) +{ + struct AVS2Decoder_s *dec = container_of(timer, + struct AVS2Decoder_s, timer); + uint8_t empty_flag; + unsigned int buf_level; + + enum receviver_start_e state = RECEIVER_INACTIVE; + if (dec->m_ins_flag) { + if (hw_to_vdec(dec)->next_status + == VDEC_STATUS_DISCONNECTED) { + dec->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&dec->work); + avs2_print(dec, AVS2_DBG_BUFMGR, + "vdec requested to be disconnected\n"); + return; + } + } + if (dec->init_flag == 0) { + if (dec->stat & STAT_TIMER_ARM) { + timer->expires = jiffies + PUT_INTERVAL; + add_timer(&dec->timer); + } + return; + } + if (dec->m_ins_flag == 0) { + if (vf_get_receiver(dec->provider_name)) { + state = + vf_notify_receiver(dec->provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) + state = RECEIVER_INACTIVE; + } else + state = RECEIVER_INACTIVE; + + empty_flag = (READ_VREG(HEVC_PARSER_INT_STATUS) >> 6) & 0x1; + /* error watchdog */ + if (empty_flag == 0) { + /* decoder has input */ + if ((debug & AVS2_DBG_DIS_LOC_ERROR_PROC) == 0) { + + buf_level = READ_VREG(HEVC_STREAM_LEVEL); + /* receiver has no buffer to recycle */ + if ((state == RECEIVER_INACTIVE) && + (kfifo_is_empty(&dec->display_q) && + buf_level > 0x200) + ) { + WRITE_VREG + (HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } + + if ((debug & AVS2_DBG_DIS_SYS_ERROR_PROC) == 0) { + /* receiver has no buffer to recycle */ + /*if ((state == RECEIVER_INACTIVE) && + (kfifo_is_empty(&dec->display_q))) { + pr_info("avs2 something error,need reset\n"); + }*/ + } + } + } else { + if ( + (decode_timeout_val > 0) && + (dec->start_process_time > 0) && + ((1000 * (jiffies - dec->start_process_time) / HZ) + > decode_timeout_val) + ) { + int current_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + if (dec->last_lcu_idx == current_lcu_idx) { + if (dec->decode_timeout_count > 0) + dec->decode_timeout_count--; + if (dec->decode_timeout_count == 0) { + if (input_frame_based( + hw_to_vdec(dec)) || + (READ_VREG(HEVC_STREAM_LEVEL) > 0x200)) + timeout_process(dec); + else { + avs2_print(dec, 0, + "timeout & empty, again\n"); + dec_again_process(dec); + } + } + } else { + start_process_time(dec); + dec->last_lcu_idx = current_lcu_idx; + } + } + } + + if ((dec->ucode_pause_pos != 0) && + (dec->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != dec->ucode_pause_pos) { + dec->ucode_pause_pos = 0; + WRITE_HREG(DEBUG_REG1, 0); + } + if (debug & AVS2_DBG_DUMP_DATA) { + debug &= ~AVS2_DBG_DUMP_DATA; + avs2_print(dec, 0, + "%s: chunk size 0x%x off 0x%x sum 0x%x\n", + __func__, + dec->chunk->size, + dec->chunk->offset, + get_data_check_sum(dec, dec->chunk->size) + ); + dump_data(dec, dec->chunk->size); + } + if (debug & AVS2_DBG_DUMP_PIC_LIST) { + dump_pic_list(dec); + debug &= ~AVS2_DBG_DUMP_PIC_LIST; + } + if (debug & AVS2_DBG_TRIG_SLICE_SEGMENT_PROC) { + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + debug &= ~AVS2_DBG_TRIG_SLICE_SEGMENT_PROC; + } + if (debug & AVS2_DBG_DUMP_RPM_BUF) { + int i; + + pr_info("RPM:\n"); + for (i = 0; i < RPM_BUF_SIZE; i += 4) { + int ii; + if ((i & 0xf) == 0) + pr_info("%03x: ", i); + for (ii = 0; ii < 4; ii++) { + pr_info("%04x ", + dec->lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + pr_info("\n"); + } + debug &= ~AVS2_DBG_DUMP_RPM_BUF; + } + if (debug & AVS2_DBG_DUMP_LMEM_BUF) { + int i; + + pr_info("LMEM:\n"); + for (i = 0; i < LMEM_BUF_SIZE; i += 4) { + int ii; + if ((i & 0xf) == 0) + pr_info("%03x: ", i); + for (ii = 0; ii < 4; ii++) { + pr_info("%04x ", + dec->lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + pr_info("\n"); + } + debug &= ~AVS2_DBG_DUMP_LMEM_BUF; + } + /*if (debug & AVS2_DBG_HW_RESET) { + }*/ + + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + if (pop_shorts != 0) { + int i; + u32 sum = 0; + pr_info("pop stream 0x%x shorts\r\n", pop_shorts); + for (i = 0; i < pop_shorts; i++) { + u32 data = + (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + WRITE_HREG(HEVC_SHIFT_COMMAND, + (1<<7)|16); + if ((i & 0xf) == 0) + pr_info("%04x:", i); + pr_info("%04x ", data); + if (((i + 1) & 0xf) == 0) + pr_info("\r\n"); + sum += data; + } + pr_info("\r\nsum = %x\r\n", sum); + pop_shorts = 0; + } + if (dbg_cmd != 0) { + if (dbg_cmd == 1) { + u32 disp_laddr; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB && + get_double_write_mode(dec) == 0) { + disp_laddr = + READ_VCBUS_REG(AFBC_BODY_BADDR) << 4; + } else { + struct canvas_s cur_canvas; + canvas_read((READ_VCBUS_REG(VD1_IF0_CANVAS0) + & 0xff), &cur_canvas); + disp_laddr = cur_canvas.addr; + } + pr_info("current displayed buffer address %x\r\n", + disp_laddr); + } + dbg_cmd = 0; + } + /*don't changed at start.*/ + if (dec->get_frame_dur && dec->show_frame_num > 60 && + dec->frame_dur > 0 && dec->saved_resolution != + frame_width * frame_height * + (96000 / dec->frame_dur)) { + int fps = 96000 / dec->frame_dur; + if (hevc_source_changed(VFORMAT_AVS2, + frame_width, frame_height, fps) > 0) + dec->saved_resolution = frame_width * + frame_height * fps; + } + + timer->expires = jiffies + PUT_INTERVAL; + add_timer(timer); +} + + +int vavs2_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + + if (!dec) + return -1; + + vstatus->frame_width = dec->frame_width; + vstatus->frame_height = dec->frame_height; + + if (dec->frame_dur != 0) + vstatus->frame_rate = ((96000 * 10 / dec->frame_dur) % 10) < 5 ? + 96000 / dec->frame_dur : (96000 / dec->frame_dur +1); + else + vstatus->frame_rate = -1; + vstatus->error_count = 0; + vstatus->status = dec->stat | dec->fatal_error; + vstatus->frame_dur = dec->frame_dur; + vstatus->bit_rate = dec->gvs->bit_rate; + vstatus->frame_data = dec->gvs->frame_data; + vstatus->total_data = dec->gvs->total_data; + vstatus->frame_count = dec->gvs->frame_count; + vstatus->error_frame_count = dec->gvs->error_frame_count; + vstatus->drop_frame_count = dec->gvs->drop_frame_count; + vstatus->i_decoded_frames = dec->gvs->i_decoded_frames; + vstatus->i_lost_frames = dec->gvs->i_lost_frames; + vstatus->i_concealed_frames = dec->gvs->i_concealed_frames; + vstatus->p_decoded_frames = dec->gvs->p_decoded_frames; + vstatus->p_lost_frames = dec->gvs->p_lost_frames; + vstatus->p_concealed_frames = dec->gvs->p_concealed_frames; + vstatus->b_decoded_frames = dec->gvs->b_decoded_frames; + vstatus->b_lost_frames = dec->gvs->b_lost_frames; + vstatus->b_concealed_frames = dec->gvs->b_concealed_frames; + vstatus->total_data = dec->gvs->total_data; + vstatus->samp_cnt = dec->gvs->samp_cnt; + vstatus->offset = dec->gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + return 0; +} + +int vavs2_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +static void vavs2_prot_init(struct AVS2Decoder_s *dec) +{ + unsigned int data32; + + avs2_config_work_space_hw(dec); + if (dec->pic_list_init_flag) + init_pic_list_hw(dec); + + avs2_init_decoder_hw(dec); + +#if 1 + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%s\n", __func__); + data32 = READ_VREG(HEVC_STREAM_CONTROL); + data32 = data32 | + (1 << 0)/*stream_fetch_enable*/ + ; + WRITE_VREG(HEVC_STREAM_CONTROL, data32); +#if 0 + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x00000100) { + pr_info("avs2 prot init error %d\n", __LINE__); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x00000300) { + pr_info("avs2 prot init error %d\n", __LINE__); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x12345678); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x9abcdef0); + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x12345678) { + pr_info("avs2 prot init error %d\n", __LINE__); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x9abcdef0) { + pr_info("avs2 prot init error %d\n", __LINE__); + return; + } +#endif + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x00000100); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x00000000); +#endif + + + + WRITE_VREG(HEVC_WAIT_FLAG, 1); + + /* WRITE_VREG(HEVC_MPSR, 1); */ + + /* clear mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 1); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(HEVC_PSCALE_CTRL, 0); + + WRITE_VREG(DEBUG_REG1, 0x0); + /*check vps/sps/pps/i-slice in ucode*/ + WRITE_VREG(NAL_SEARCH_CTL, 0x8); + + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + + config_cuva_buf(dec); +} + +#ifdef I_ONLY_SUPPORT +static int vavs2_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + if (i_only_flag & 0x100) + return 0; + if (trickmode == TRICKMODE_I || trickmode == TRICKMODE_I_HEVC) + dec->i_only = 0x3; + else if (trickmode == TRICKMODE_NONE) + dec->i_only = 0x0; + return 0; +} +#endif + +static int vavs2_local_init(struct AVS2Decoder_s *dec) +{ + int i; + int ret; + int width, height; + + dec->gvs = vzalloc(sizeof(struct vdec_info)); + if (NULL == dec->gvs) { + avs2_print(dec, 0, + "the struct of vdec status malloc failed.\n"); + return -1; + } +#ifdef DEBUG_PTS + dec->pts_missed = 0; + dec->pts_hit = 0; +#endif + dec->new_frame_displayed = 0; + dec->last_put_idx = -1; + dec->saved_resolution = 0; + dec->get_frame_dur = false; + on_no_keyframe_skiped = 0; + width = dec->vavs2_amstream_dec_info.width; + height = dec->vavs2_amstream_dec_info.height; + dec->frame_dur = + (dec->vavs2_amstream_dec_info.rate == + 0) ? 3600 : dec->vavs2_amstream_dec_info.rate; + if (width && height) + dec->frame_ar = height * 0x100 / width; +/* +TODO:FOR VERSION +*/ + avs2_print(dec, AVS2_DBG_BUFMGR, + "avs2: ver (%d,%d) decinfo: %dx%d rate=%d\n", avs2_version, + 0, width, height, dec->frame_dur); + + if (dec->frame_dur == 0) + dec->frame_dur = 96000 / 24; +#ifdef I_ONLY_SUPPORT + if (i_only_flag & 0x100) + dec->i_only = i_only_flag & 0xff; + else if ((unsigned long) dec->vavs2_amstream_dec_info.param + & 0x08) + dec->i_only = 0x7; + else + dec->i_only = 0x0; +#endif + INIT_KFIFO(dec->display_q); + INIT_KFIFO(dec->newframe_q); + + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &dec->vfpool[i]; + dec->vfpool[i].index = -1; + kfifo_put(&dec->newframe_q, vf); + } + + + ret = avs2_local_init(dec); + + return ret; +} + + +static s32 vavs2_init(struct vdec_s *vdec) +{ + int ret = -1, size = -1; + int fw_size = 0x1000 * 16; + struct firmware_s *fw = NULL; + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *)vdec->private; + + timer_setup(&dec->timer, vavs2_put_timer_func, 0); + + dec->stat |= STAT_TIMER_INIT; + if (vavs2_local_init(dec) < 0) + return -EBUSY; + + vdec_set_vframe_comm(vdec, DRIVER_NAME); + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + size = get_firmware_data(VIDEO_DEC_AVS2_MMU, fw->data); + if (size < 0) { + pr_err("get firmware fail.\n"); + vfree(fw); + return -1; + } + + fw->len = fw_size; + + if (dec->m_ins_flag) { + dec->timer.expires = jiffies + PUT_INTERVAL; + + /*add_timer(&dec->timer); + + dec->stat |= STAT_TIMER_ARM; + dec->stat |= STAT_ISR_REG;*/ + + INIT_WORK(&dec->work, avs2_work); + dec->fw = fw; + + return 0; + } + + amhevc_enable(); + + ret = amhevc_loadmc_ex(VFORMAT_AVS2, NULL, fw->data); + if (ret < 0) { + amhevc_disable(); + vfree(fw); + pr_err("AVS2: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(fw); + + dec->stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + vavs2_prot_init(dec); + + if (vdec_request_threaded_irq(VDEC_IRQ_0, + vavs2_isr, + vavs2_isr_thread_fn, + IRQF_ONESHOT,/*run thread on this irq disabled*/ + "vavs2-irq", (void *)dec)) { + pr_info("vavs2 irq register error.\n"); + amhevc_disable(); + return -ENOENT; + } + + dec->stat |= STAT_ISR_REG; + + dec->provider_name = PROVIDER_NAME; + vf_provider_init(&vavs2_vf_prov, PROVIDER_NAME, + &vavs2_vf_provider, dec); + vf_reg_provider(&vavs2_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); + if (dec->frame_dur != 0) { + if (!is_reset) + vf_notify_receiver(dec->provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)dec->frame_dur)); + } + dec->stat |= STAT_VF_HOOK; + + dec->timer.expires = jiffies + PUT_INTERVAL; + add_timer(&dec->timer); + + dec->stat |= STAT_TIMER_ARM; + + /* dec->stat |= STAT_KTHREAD; */ + dec->process_busy = 0; + avs2_print(dec, AVS2_DBG_BUFMGR_MORE, + "%d, vavs2_init, RP=0x%x\n", + __LINE__, READ_VREG(HEVC_STREAM_RD_PTR)); + return 0; +} + +static int vmavs2_stop(struct AVS2Decoder_s *dec) +{ + dec->init_flag = 0; + dec->first_sc_checked = 0; + if (dec->stat & STAT_TIMER_ARM) { + del_timer_sync(&dec->timer); + dec->stat &= ~STAT_TIMER_ARM; + } + + if (dec->stat & STAT_VF_HOOK) { + if (!is_reset) + vf_notify_receiver(dec->provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + + vf_unreg_provider(&vavs2_vf_prov); + dec->stat &= ~STAT_VF_HOOK; + } + avs2_local_uninit(dec); + reset_process_time(dec); + cancel_work_sync(&dec->work); + uninit_mmu_buffers(dec); + if (dec->fw) { + vfree(dec->fw); + dec->fw = NULL; + } + + return 0; +} + +static int amvdec_avs2_mmu_init(struct AVS2Decoder_s *dec) +{ + int tvp_flag = vdec_secure(hw_to_vdec(dec)) ? + CODEC_MM_FLAGS_TVP : 0; + int buf_size = 48; + +#ifdef AVS2_10B_MMU + dec->need_cache_size = buf_size * SZ_1M; + dec->sc_start_time = get_jiffies_64(); + dec->mmu_box = decoder_mmu_box_alloc_box(DRIVER_NAME, + dec->index, FRAME_BUFFERS, + dec->need_cache_size, + tvp_flag + ); + if (!dec->mmu_box) { + pr_err("avs2 alloc mmu box failed!!\n"); + return -1; + } +#endif + dec->bmmu_box = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + dec->index, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + tvp_flag); + if (!dec->bmmu_box) { + pr_err("avs2 alloc bmmu box failed!!\n"); + return -1; + } + return 0; +} + +/****************************************/ +static struct codec_profile_t amvdec_avs2_profile = { + .name = "AVS2-V4L", + .profile = "" +}; + +static unsigned char get_data_check_sum + (struct AVS2Decoder_s *dec, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (!dec->chunk->block->is_mapped) + data = codec_mm_vmap(dec->chunk->block->start + + dec->chunk->offset, size); + else + data = ((u8 *)dec->chunk->block->start_virt) + + dec->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + if (!dec->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void dump_data(struct AVS2Decoder_s *dec, int size) +{ + int jj; + u8 *data = NULL; + int padding_size = dec->chunk->offset & + (VDEC_FIFO_ALIGN - 1); + + if (!dec->chunk->block->is_mapped) + data = codec_mm_vmap(dec->chunk->block->start + + dec->chunk->offset, size); + else + data = ((u8 *)dec->chunk->block->start_virt) + + dec->chunk->offset; + + avs2_print(dec, 0, "padding: "); + for (jj = padding_size; jj > 0; jj--) + avs2_print_cont(dec, + 0, + "%02x ", *(data - jj)); + avs2_print_cont(dec, 0, "data adr %p\n", + data); + + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + avs2_print(dec, + 0, + "%06x:", jj); + avs2_print_cont(dec, + 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + avs2_print(dec, + 0, + "\n"); + } + avs2_print(dec, + 0, + "\n"); + + if (!dec->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); +} + +static void avs2_work(struct work_struct *work) +{ + struct AVS2Decoder_s *dec = container_of(work, + struct AVS2Decoder_s, work); + struct vdec_s *vdec = hw_to_vdec(dec); + /* finished decoding one frame or error, + * notify vdec core to switch context + */ + avs2_print(dec, PRINT_FLAG_VDEC_DETAIL, + "%s dec_result %d %x %x %x\n", + __func__, + dec->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + + if (((dec->dec_result == DEC_RESULT_GET_DATA) || + (dec->dec_result == DEC_RESULT_GET_DATA_RETRY)) + && (hw_to_vdec(dec)->next_status != + VDEC_STATUS_DISCONNECTED)) { + if (!vdec_has_more_input(vdec)) { + dec->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&dec->work); + return; + } + + if (dec->dec_result == DEC_RESULT_GET_DATA) { + avs2_print(dec, PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA %x %x %x\n", + __func__, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + vdec_vframe_dirty(vdec, dec->chunk); + vdec_clean_input(vdec); + } + + if (get_free_buf_count(dec) >= + run_ready_min_buf_num) { + int r; + int decode_size; + r = vdec_prepare_input(vdec, &dec->chunk); + if (r < 0) { + dec->dec_result = DEC_RESULT_GET_DATA_RETRY; + + avs2_print(dec, + PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&dec->work); + return; + } + dec->dec_result = DEC_RESULT_NONE; + avs2_print(dec, PRINT_FLAG_VDEC_STATUS, + "%s: chunk size 0x%x sum 0x%x\n", + __func__, r, + (debug & PRINT_FLAG_VDEC_STATUS) ? + get_data_check_sum(dec, r) : 0 + ); + if (debug & PRINT_FLAG_VDEC_DATA) + dump_data(dec, dec->chunk->size); + + decode_size = dec->chunk->size + + (dec->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + + WRITE_VREG(HEVC_DECODE_SIZE, + READ_VREG(HEVC_DECODE_SIZE) + decode_size); + + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_ACTION_DONE); + + start_process_time(dec); + + } else{ + dec->dec_result = DEC_RESULT_GET_DATA_RETRY; + + avs2_print(dec, PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&dec->work); + } + return; + } else if (dec->dec_result == DEC_RESULT_DONE) { + /* if (!dec->ctx_valid) + dec->ctx_valid = 1; */ + dec->slice_idx++; + dec->frame_count++; + dec->process_state = PROC_STATE_INIT; + decode_frame_count[dec->index] = dec->frame_count; + +#ifdef AVS2_10B_MMU + dec->used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); +#endif + avs2_print(dec, PRINT_FLAG_VDEC_STATUS, + "%s (===> %d) dec_result %d %x %x %x shiftbytes 0x%x decbytes 0x%x\n", + __func__, + dec->frame_count, + dec->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_BYTE_COUNT) - + dec->start_shift_bytes + ); + vdec_vframe_dirty(hw_to_vdec(dec), dec->chunk); + } else if (dec->dec_result == DEC_RESULT_AGAIN) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec)) { + dec->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&dec->work); + return; + } + } else if (dec->dec_result == DEC_RESULT_EOS) { + avs2_print(dec, 0, + "%s: end of stream\n", + __func__); + dec->eos = 1; + if ( dec->avs2_dec.hc.cur_pic != NULL) { + check_pic_error(dec, dec->avs2_dec.hc.cur_pic); + avs2_post_process(&dec->avs2_dec); + avs2_prepare_display_buf(dec); + } + vdec_vframe_dirty(hw_to_vdec(dec), dec->chunk); + } else if (dec->dec_result == DEC_RESULT_FORCE_EXIT) { + avs2_print(dec, PRINT_FLAG_VDEC_STATUS, + "%s: force exit\n", + __func__); + if (dec->stat & STAT_VDEC_RUN) { + amhevc_stop(); + dec->stat &= ~STAT_VDEC_RUN; + } + + if (dec->stat & STAT_ISR_REG) { + if (!dec->m_ins_flag) + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 0); + vdec_free_irq(VDEC_IRQ_0, (void *)dec); + dec->stat &= ~STAT_ISR_REG; + } + } + + if (dec->stat & STAT_TIMER_ARM) { + del_timer_sync(&dec->timer); + dec->stat &= ~STAT_TIMER_ARM; + } + /* mark itself has all HW resource released and input released */ + if (vdec->parallel_dec ==1) + vdec_core_finish_run(vdec, CORE_MASK_HEVC); + else + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + if (dec->vdec_cb) + dec->vdec_cb(hw_to_vdec(dec), dec->vdec_cb_arg); +} + +static int avs2_hw_ctx_restore(struct AVS2Decoder_s *dec) +{ + /* new to do ... */ + vavs2_prot_init(dec); + return 0; +} + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + int tvp = vdec_secure(hw_to_vdec(dec)) ? + CODEC_MM_FLAGS_TVP : 0; + unsigned long ret = 0; + avs2_print(dec, + PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__); + if (debug & AVS2_DBG_PIC_LEAK_WAIT) + return ret; + + if (dec->eos) + return ret; + if (!dec->first_sc_checked) { + int size = decoder_mmu_box_sc_check(dec->mmu_box, tvp); + dec->first_sc_checked = 1; + avs2_print(dec, 0, "vavs2 cached=%d need_size=%d speed= %d ms\n", + size, (dec->need_cache_size >> PAGE_SHIFT), + (int)(get_jiffies_64() - dec->sc_start_time) * 1000/HZ); + } + + if (dec->next_again_flag && + (!vdec_frame_based(vdec))) { + u32 parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + if (parser_wr_ptr >= dec->pre_parser_wr_ptr && + (parser_wr_ptr - dec->pre_parser_wr_ptr) < + again_threshold) { + int r = vdec_sync_input(vdec); + avs2_print(dec, + PRINT_FLAG_VDEC_DETAIL, "%s buf lelvel:%x\n", __func__, r); + return 0; + } + } +/* + if (vdec_stream_based(vdec) && (dec->pic_list_init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + + if (level < pre_decode_buf_level) + return 0; + } +*/ + + if ((dec->pic_list_init_flag == 0) || + get_free_buf_count(dec) >= + run_ready_min_buf_num) + ret = 1; +#ifdef CONSTRAIN_MAX_BUF_NUM + if (dec->pic_list_init_flag) { + if (run_ready_max_vf_only_num > 0 && + get_vf_ref_only_buf_count(dec) >= + run_ready_max_vf_only_num + ) + ret = 0; + if (run_ready_display_q_num > 0 && + kfifo_len(&dec->display_q) >= + run_ready_display_q_num) + ret = 0; + + if (run_ready_max_buf_num == 0xff && + get_used_buf_count(dec) >= + dec->avs2_dec.ref_maxbuffer) + ret = 0; + else if (run_ready_max_buf_num && + get_used_buf_count(dec) >= + run_ready_max_buf_num) + ret = 0; + } +#endif + if (ret) + not_run_ready[dec->index] = 0; + else + not_run_ready[dec->index]++; + + if (vdec->parallel_dec == 1) + return ret ? CORE_MASK_HEVC : 0; + else + return ret ? (CORE_MASK_VDEC_1 | CORE_MASK_HEVC) : 0; +} + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + int r; + + run_count[dec->index]++; + dec->vdec_cb_arg = arg; + dec->vdec_cb = callback; + /* dec->chunk = vdec_prepare_input(vdec); */ + hevc_reset_core(vdec); + + if (vdec_stream_based(vdec)) { + dec->pre_parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + dec->next_again_flag = 0; + } + + r = vdec_prepare_input(vdec, &dec->chunk); + if (r < 0) { + input_empty[dec->index]++; + + dec->dec_result = DEC_RESULT_AGAIN; + + avs2_print(dec, PRINT_FLAG_VDEC_DETAIL, + "ammvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&dec->work); + return; + } + input_empty[dec->index] = 0; + dec->dec_result = DEC_RESULT_NONE; + dec->start_shift_bytes = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + + if (debug & PRINT_FLAG_VDEC_STATUS) { + int ii; + avs2_print(dec, 0, + "%s (%d): size 0x%x (0x%x 0x%x) sum 0x%x (%x %x %x %x %x) bytes 0x%x", + __func__, + dec->frame_count, r, + dec->chunk ? dec->chunk->size : 0, + dec->chunk ? dec->chunk->offset : 0, + dec->chunk ? ((vdec_frame_based(vdec) && + (debug & PRINT_FLAG_VDEC_STATUS)) ? + get_data_check_sum(dec, r) : 0) : 0, + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + dec->start_shift_bytes); + if (vdec_frame_based(vdec) && dec->chunk) { + u8 *data = NULL; + if (!dec->chunk->block->is_mapped) + data = codec_mm_vmap(dec->chunk->block->start + + dec->chunk->offset, 8); + else + data = ((u8 *)dec->chunk->block->start_virt) + + dec->chunk->offset; + + avs2_print_cont(dec, 0, "data adr %p:", + data); + for (ii = 0; ii < 8; ii++) + avs2_print_cont(dec, 0, "%02x ", + data[ii]); + if (!dec->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + avs2_print_cont(dec, 0, "\r\n"); + } + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else if (amhevc_loadmc_ex(VFORMAT_AVS2, NULL, dec->fw->data) < 0) { + vdec->mc_loaded = 0; + amhevc_disable(); + avs2_print(dec, 0, + "%s: Error amvdec_loadmc fail\n", __func__); + dec->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&dec->work); + return; + } else { + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_AVS2; + } + + + if (avs2_hw_ctx_restore(dec) < 0) { + vdec_schedule_work(&dec->work); + return; + } + + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, AVS2_SEARCH_NEW_PIC); + + if (vdec_frame_based(vdec) && dec->chunk) { + if (debug & PRINT_FLAG_VDEC_DATA) + dump_data(dec, dec->chunk->size); + + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, 0); + r = dec->chunk->size + + (dec->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + if (vdec->mvfrm) + vdec->mvfrm->frame_size = dec->chunk->size; + } + + WRITE_VREG(HEVC_DECODE_SIZE, r); + WRITE_VREG(HEVC_DECODE_COUNT, dec->slice_idx); + dec->init_flag = 1; + + avs2_print(dec, PRINT_FLAG_VDEC_DETAIL, + "%s: start hevc (%x %x %x)\n", + __func__, + READ_VREG(HEVC_DEC_STATUS_REG), + READ_VREG(HEVC_MPC_E), + READ_VREG(HEVC_MPSR)); + + start_process_time(dec); + mod_timer(&dec->timer, jiffies); + dec->stat |= STAT_TIMER_ARM; + dec->stat |= STAT_ISR_REG; + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + amhevc_start(); + dec->stat |= STAT_VDEC_RUN; +} + +static void reset(struct vdec_s *vdec) +{ + + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + + atomic_set(&dec->vf_pre_count, 0); + atomic_set(&dec->vf_get_count, 0); + atomic_set(&dec->vf_put_count, 0); + + avs2_print(dec, + PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__); + +} + +static irqreturn_t avs2_irq_cb(struct vdec_s *vdec, int irq) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + return vavs2_isr(0, dec); +} + +static irqreturn_t avs2_threaded_irq_cb(struct vdec_s *vdec, int irq) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + return vavs2_isr_thread_fn(0, dec); +} + +static void avs2_dump_state(struct vdec_s *vdec) +{ + struct AVS2Decoder_s *dec = + (struct AVS2Decoder_s *)vdec->private; + int i; + avs2_print(dec, 0, "====== %s\n", __func__); + + avs2_print(dec, 0, + "width/height (%d/%d), used_buf_num %d\n", + dec->avs2_dec.img.width, + dec->avs2_dec.img.height, + dec->used_buf_num + ); + + avs2_print(dec, 0, + "is_framebase(%d), eos %d, dec_result 0x%x dec_frm %d disp_frm %d run %d not_run_ready %d input_empty %d\n", + input_frame_based(vdec), + dec->eos, + dec->dec_result, + decode_frame_count[dec->index], + display_frame_count[dec->index], + run_count[dec->index], + not_run_ready[dec->index], + input_empty[dec->index] + ); + + if (vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + avs2_print(dec, 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + + avs2_print(dec, 0, + "%s, newq(%d/%d), dispq(%d/%d), vf prepare/get/put (%d/%d/%d), free_buf_count %d (min %d for run_ready)\n", + __func__, + kfifo_len(&dec->newframe_q), + VF_POOL_SIZE, + kfifo_len(&dec->display_q), + VF_POOL_SIZE, + dec->vf_pre_count, + dec->vf_get_count, + dec->vf_put_count, + get_free_buf_count(dec), + run_ready_min_buf_num + ); + + dump_pic_list(dec); + + for (i = 0; i < MAX_BUF_NUM; i++) { + avs2_print(dec, 0, + "mv_Buf(%d) start_adr 0x%x size 0x%x used %d\n", + i, + dec->m_mv_BUF[i].start_adr, + dec->m_mv_BUF[i].size, + dec->m_mv_BUF[i].used_flag); + } + + avs2_print(dec, 0, + "HEVC_DEC_STATUS_REG=0x%x\n", + READ_VREG(HEVC_DEC_STATUS_REG)); + avs2_print(dec, 0, + "HEVC_MPC_E=0x%x\n", + READ_VREG(HEVC_MPC_E)); + avs2_print(dec, 0, + "DECODE_MODE=0x%x\n", + READ_VREG(DECODE_MODE)); + avs2_print(dec, 0, + "NAL_SEARCH_CTL=0x%x\n", + READ_VREG(NAL_SEARCH_CTL)); + avs2_print(dec, 0, + "HEVC_PARSER_LCU_START=0x%x\n", + READ_VREG(HEVC_PARSER_LCU_START)); + avs2_print(dec, 0, + "HEVC_DECODE_SIZE=0x%x\n", + READ_VREG(HEVC_DECODE_SIZE)); + avs2_print(dec, 0, + "HEVC_SHIFT_BYTE_COUNT=0x%x\n", + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + avs2_print(dec, 0, + "HEVC_STREAM_START_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_START_ADDR)); + avs2_print(dec, 0, + "HEVC_STREAM_END_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_END_ADDR)); + avs2_print(dec, 0, + "HEVC_STREAM_LEVEL=0x%x\n", + READ_VREG(HEVC_STREAM_LEVEL)); + avs2_print(dec, 0, + "HEVC_STREAM_WR_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_WR_PTR)); + avs2_print(dec, 0, + "HEVC_STREAM_RD_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_RD_PTR)); + avs2_print(dec, 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + avs2_print(dec, 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (input_frame_based(vdec) && + (debug & PRINT_FLAG_VDEC_DATA) + ) { + int jj; + if (dec->chunk && dec->chunk->block && + dec->chunk->size > 0) { + u8 *data = NULL; + if (!dec->chunk->block->is_mapped) + data = codec_mm_vmap(dec->chunk->block->start + + dec->chunk->offset, dec->chunk->size); + else + data = ((u8 *)dec->chunk->block->start_virt) + + dec->chunk->offset; + avs2_print(dec, 0, + "frame data size 0x%x\n", + dec->chunk->size); + for (jj = 0; jj < dec->chunk->size; jj++) { + if ((jj & 0xf) == 0) + avs2_print(dec, 0, + "%06x:", jj); + avs2_print_cont(dec, 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + avs2_print_cont(dec, 0, + "\n"); + } + + if (!dec->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } + +} + +static int ammvdec_avs2_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + int ret; + int config_val; + int i; + struct vframe_content_light_level_s content_light_level; + struct vframe_master_display_colour_s vf_dp; + /*struct BUF_s BUF[MAX_BUF_NUM];*/ + struct AVS2Decoder_s *dec = NULL; + + pr_info("%s\n", __func__); + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) { + pr_info("%s, chip id %d is not support avs2\n", + __func__, get_cpu_major_id()); + return -1; + } + if (pdata == NULL) { + pr_info("\nammvdec_avs2 memory resource undefined.\n"); + return -EFAULT; + } + /*dec = (struct AVS2Decoder_s *)devm_kzalloc(&pdev->dev, + sizeof(struct AVS2Decoder_s), GFP_KERNEL);*/ + memset(&vf_dp, 0, sizeof(struct vframe_master_display_colour_s)); + dec = vmalloc(sizeof(struct AVS2Decoder_s)); + memset(dec, 0, sizeof(struct AVS2Decoder_s)); + if (dec == NULL) { + pr_info("\nammvdec_avs2 device data allocation failed\n"); + return -ENOMEM; + } + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < AVS2_MAX_BUFFER_NUM; i++) { + dec->avs2_dec.frm_pool[i].y_canvas_index = -1; + dec->avs2_dec.frm_pool[i].uv_canvas_index = -1; + } + } + pdata->private = dec; + pdata->dec_status = vavs2_dec_status; +#ifdef I_ONLY_SUPPORT + pdata->set_trickmode = vavs2_set_trickmode; +#endif + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = avs2_irq_cb; + pdata->threaded_irq_handler = avs2_threaded_irq_cb; + pdata->dump_state = avs2_dump_state; + + /* + * memcpy(&BUF[0], &dec->m_BUF[0], sizeof(struct BUF_s) * MAX_BUF_NUM); + * memset(dec, 0, sizeof(struct AVS2Decoder_s)); + * memcpy(&dec->m_BUF[0], &BUF[0], sizeof(struct BUF_s) * MAX_BUF_NUM); + */ + + dec->index = pdev->id; + dec->m_ins_flag = 1; + + if (is_rdma_enable()) { + dec->rdma_adr = dma_alloc_coherent(amports_get_dma_device(), RDMA_SIZE , &dec->rdma_phy_adr, GFP_KERNEL); + for (i = 0; i < SCALELUT_DATA_WRITE_NUM; i++) { + dec->rdma_adr[i * 4] = HEVC_IQIT_SCALELUT_WR_ADDR & 0xfff; + dec->rdma_adr[i * 4 + 1] = i; + dec->rdma_adr[i * 4 + 2] = HEVC_IQIT_SCALELUT_DATA & 0xfff; + dec->rdma_adr[i * 4 + 3] = 0; + if (i == SCALELUT_DATA_WRITE_NUM - 1) { + dec->rdma_adr[i * 4 + 2] = (HEVC_IQIT_SCALELUT_DATA & 0xfff) | 0x20000; + } + } + } + + snprintf(dec->vdec_name, sizeof(dec->vdec_name), + "avs2-%d", dec->index); + snprintf(dec->pts_name, sizeof(dec->pts_name), + "%s-timestamp", dec->vdec_name); + snprintf(dec->new_q_name, sizeof(dec->new_q_name), + "%s-newframe_q", dec->vdec_name); + snprintf(dec->disp_q_name, sizeof(dec->disp_q_name), + "%s-dispframe_q", dec->vdec_name); + + if (pdata->use_vfm_path) { + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + dec->frameinfo_enable = 1; + } else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + MULTI_INSTANCE_PROVIDER_NAME ".%02x", pdev->id & 0xff); + + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vavs2_vf_provider, dec); + + dec->provider_name = pdata->vf_provider_name; + platform_set_drvdata(pdev, pdata); + + dec->platform_dev = pdev; + dec->video_signal_type = 0; + dec->video_ori_signal_type = 0; + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_TXLX) + dec->stat |= VP9_TRIGGER_FRAME_ENABLE; +#if 1 + if ((debug & IGNORE_PARAM_FROM_CONFIG) == 0 && + pdata->config_len) { + /*use ptr config for doubel_write_mode, etc*/ + avs2_print(dec, 0, "pdata->config=%s\n", pdata->config); + if (get_config_int(pdata->config, "avs2_double_write_mode", + &config_val) == 0) + dec->double_write_mode = config_val; + else + dec->double_write_mode = double_write_mode; + + if (get_config_int(pdata->config, "parm_v4l_buffer_margin", + &config_val) == 0) + dec->dynamic_buf_margin = config_val; + else + dec->dynamic_buf_margin = 0; + + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + dec->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + dec->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, "HDRStaticInfo", + &vf_dp.present_flag) == 0 + && vf_dp.present_flag == 1) { + get_config_int(pdata->config, "mG.x", + &vf_dp.primaries[0][0]); + get_config_int(pdata->config, "mG.y", + &vf_dp.primaries[0][1]); + get_config_int(pdata->config, "mB.x", + &vf_dp.primaries[1][0]); + get_config_int(pdata->config, "mB.y", + &vf_dp.primaries[1][1]); + get_config_int(pdata->config, "mR.x", + &vf_dp.primaries[2][0]); + get_config_int(pdata->config, "mR.y", + &vf_dp.primaries[2][1]); + get_config_int(pdata->config, "mW.x", + &vf_dp.white_point[0]); + get_config_int(pdata->config, "mW.y", + &vf_dp.white_point[1]); + get_config_int(pdata->config, "mMaxDL", + &vf_dp.luminance[0]); + get_config_int(pdata->config, "mMinDL", + &vf_dp.luminance[1]); + vf_dp.content_light_level.present_flag = 1; + get_config_int(pdata->config, "mMaxCLL", + &content_light_level.max_content); + get_config_int(pdata->config, "mMaxFALL", + &content_light_level.max_pic_average); + vf_dp.content_light_level = content_light_level; + dec->video_signal_type = (1 << 29) + | (5 << 26) /* unspecified */ + | (0 << 25) /* limit */ + | (1 << 24) /* color available */ + | (9 << 16) /* 2020 */ + | (16 << 8) /* 2084 */ + | (9 << 0); /* 2020 */ + } + dec->vf_dp = vf_dp; + } else +#endif + { + /*dec->vavs2_amstream_dec_info.width = 0; + dec->vavs2_amstream_dec_info.height = 0; + dec->vavs2_amstream_dec_info.rate = 30;*/ + dec->double_write_mode = double_write_mode; + dec->dynamic_buf_margin = dynamic_buf_num_margin; + } + video_signal_type = dec->video_signal_type; + +#if 0 + dec->buf_start = pdata->mem_start; + dec->buf_size = pdata->mem_end - pdata->mem_start + 1; +#else + if (amvdec_avs2_mmu_init(dec) < 0) { + pr_err("avs2 alloc bmmu box failed!!\n"); + /* devm_kfree(&pdev->dev, (void *)dec); */ + vfree((void *)dec); + return -1; + } + dec->cma_alloc_count = PAGE_ALIGN(work_buf_size) / PAGE_SIZE; + ret = decoder_bmmu_box_alloc_buf_phy(dec->bmmu_box, WORK_SPACE_BUF_ID, + dec->cma_alloc_count * PAGE_SIZE, DRIVER_NAME, + &dec->cma_alloc_addr); + if (ret < 0) { + uninit_mmu_buffers(dec); + /* devm_kfree(&pdev->dev, (void *)dec); */ + vfree((void *)dec); + return ret; + } + dec->buf_start = dec->cma_alloc_addr; + dec->buf_size = work_buf_size; +#endif + dec->init_flag = 0; + dec->first_sc_checked = 0; + dec->fatal_error = 0; + dec->show_frame_num = 0; + + if (debug) { + pr_info("===AVS2 decoder mem resource 0x%lx size 0x%x\n", + dec->buf_start, + dec->buf_size); + } + + if (pdata->sys_info) { + dec->vavs2_amstream_dec_info = *pdata->sys_info; + dec->frame_width = dec->vavs2_amstream_dec_info.width; + dec->frame_height = dec->vavs2_amstream_dec_info.height; + } else { + dec->vavs2_amstream_dec_info.width = 0; + dec->vavs2_amstream_dec_info.height = 0; + dec->vavs2_amstream_dec_info.rate = 30; + } + + dec->endian = HEVC_CONFIG_LITTLE_ENDIAN; + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) + dec->endian = HEVC_CONFIG_BIG_ENDIAN; + if (endian) + dec->endian = endian; + + dec->cma_dev = pdata->cma_dev; + if (vavs2_init(pdata) < 0) { + pr_info("\namvdec_avs2 init failed.\n"); + avs2_local_uninit(dec); + uninit_mmu_buffers(dec); + /* devm_kfree(&pdev->dev, (void *)dec); */ + vfree((void *)dec); + pdata->dec_status = NULL; + return -ENODEV; + } + vdec_set_prepare_level(pdata, start_decode_buf_level); + hevc_source_changed(VFORMAT_AVS2, + 4096, 2048, 60); + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_HEVC); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } + + return 0; +} + +static int ammvdec_avs2_remove(struct platform_device *pdev) +{ + struct AVS2Decoder_s *dec = (struct AVS2Decoder_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct avs2_decoder *avs2_dec = &dec->avs2_dec; + struct avs2_frame_s *pic; + int i; + + if (debug) + pr_info("amvdec_avs2_remove\n"); + + vmavs2_stop(dec); + + if (pdata->parallel_dec == 1) + vdec_core_release(hw_to_vdec(dec), CORE_MASK_HEVC); + else + vdec_core_release(hw_to_vdec(dec), CORE_MASK_HEVC); + + vdec_set_status(hw_to_vdec(dec), VDEC_STATUS_DISCONNECTED); + if (pdata->parallel_dec == 1) { + for (i = 0; i < AVS2_MAX_BUFFER_NUM; i++) { + pdata->free_canvas_ex(dec->avs2_dec.frm_pool[i].y_canvas_index, pdata->id); + pdata->free_canvas_ex(dec->avs2_dec.frm_pool[i].uv_canvas_index, pdata->id); + } + } + + for (i = 0; i < dec->used_buf_num; i++) { + if (i == (dec->used_buf_num - 1)) + pic = avs2_dec->m_bg; + else + pic = avs2_dec->fref[i]; + release_cuva_data(pic); + } + + +#ifdef DEBUG_PTS + pr_info("pts missed %ld, pts hit %ld, duration %d\n", + dec->pts_missed, dec->pts_hit, dec->frame_dur); +#endif + if (is_rdma_enable()) + dma_free_coherent(amports_get_dma_device(), RDMA_SIZE, dec->rdma_adr, dec->rdma_phy_adr); + /* devm_kfree(&pdev->dev, (void *)dec); */ + vfree((void *)dec); + return 0; +} + +static struct platform_driver ammvdec_avs2_driver = { + .probe = ammvdec_avs2_probe, + .remove = ammvdec_avs2_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = MULTI_DRIVER_NAME, + } +}; +#endif +static struct mconfig avs2_configs[] = { + MC_PU32("bit_depth_luma", &bit_depth_luma), + MC_PU32("bit_depth_chroma", &bit_depth_chroma), + MC_PU32("frame_width", &frame_width), + MC_PU32("frame_height", &frame_height), + MC_PU32("debug", &debug), + MC_PU32("radr", &radr), + MC_PU32("rval", &rval), + MC_PU32("pop_shorts", &pop_shorts), + MC_PU32("dbg_cmd", &dbg_cmd), + MC_PU32("dbg_skip_decode_index", &dbg_skip_decode_index), + MC_PU32("endian", &endian), + MC_PU32("step", &step), + MC_PU32("udebug_flag", &udebug_flag), + MC_PU32("decode_pic_begin", &decode_pic_begin), + MC_PU32("slice_parse_begin", &slice_parse_begin), + MC_PU32("i_only_flag", &i_only_flag), + MC_PU32("error_handle_policy", &error_handle_policy), + MC_PU32("buf_alloc_width", &buf_alloc_width), + MC_PU32("buf_alloc_height", &buf_alloc_height), + MC_PU32("buf_alloc_depth", &buf_alloc_depth), + MC_PU32("buf_alloc_size", &buf_alloc_size), + MC_PU32("buffer_mode", &buffer_mode), + MC_PU32("buffer_mode_dbg", &buffer_mode_dbg), + MC_PU32("max_buf_num", &max_buf_num), + MC_PU32("dynamic_buf_num_margin", &dynamic_buf_num_margin), + MC_PU32("mem_map_mode", &mem_map_mode), + MC_PU32("double_write_mode", &double_write_mode), + MC_PU32("enable_mem_saving", &enable_mem_saving), + MC_PU32("force_w_h", &force_w_h), + MC_PU32("force_fps", &force_fps), + MC_PU32("max_decoding_time", &max_decoding_time), + MC_PU32("on_no_keyframe_skiped", &on_no_keyframe_skiped), + MC_PU32("start_decode_buf_level", &start_decode_buf_level), + MC_PU32("decode_timeout_val", &decode_timeout_val), +}; +static struct mconfig_node avs2_node; + +static int __init amvdec_avs2_driver_init_module(void) +{ + +#ifdef AVS2_10B_MMU + + struct BuffInfo_s *p_buf_info; + + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + p_buf_info = &amvavs2_workbuff_spec[2]; + else + p_buf_info = &amvavs2_workbuff_spec[1]; + } else + p_buf_info = &amvavs2_workbuff_spec[0]; + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) { + p_buf_info = &amvavs2_workbuff_spec[5]; + } else + p_buf_info = &amvavs2_workbuff_spec[3]; + } + + init_buff_spec(NULL, p_buf_info); + work_buf_size = + (p_buf_info->end_adr - p_buf_info->start_adr + + 0xffff) & (~0xffff); + +#endif + pr_debug("amvdec_avs2 module init\n"); + +#ifdef ERROR_HANDLE_DEBUG + dbg_nal_skip_flag = 0; + dbg_nal_skip_count = 0; +#endif + udebug_flag = 0; + decode_pic_begin = 0; + slice_parse_begin = 0; + step = 0; + buf_alloc_size = 0; + + if (platform_driver_register(&ammvdec_avs2_driver)) { + pr_err("failed to register ammvdec_avs2 driver\n"); + return -ENODEV; + } + + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D)) { + amvdec_avs2_profile.name = "avs2_unsupport"; + } else if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SM1) { + if (vdec_is_support_4k()) + amvdec_avs2_profile.profile = + "4k, 10bit, dwrite, compressed"; + else + amvdec_avs2_profile.profile = + "10bit, dwrite, compressed"; + } else { + /* cpu id larger than sm1 support 8k */ + amvdec_avs2_profile.profile = + "8k, 10bit, dwrite, compressed"; + } + + vcodec_profile_register(&amvdec_avs2_profile); + + INIT_REG_NODE_CONFIGS("media.decoder", &avs2_node, + "avs2-v4l", avs2_configs, CONFIG_FOR_RW); + vcodec_feature_register(VFORMAT_AVS2, 1); + + return 0; +} + +static void __exit amvdec_avs2_driver_remove_module(void) +{ + pr_debug("amvdec_avs2 module remove.\n"); + platform_driver_unregister(&ammvdec_avs2_driver); +} + +/****************************************/ + +module_param(bit_depth_luma, uint, 0664); +MODULE_PARM_DESC(bit_depth_luma, "\n amvdec_avs2 bit_depth_luma\n"); + +module_param(bit_depth_chroma, uint, 0664); +MODULE_PARM_DESC(bit_depth_chroma, "\n amvdec_avs2 bit_depth_chroma\n"); + +module_param(frame_width, uint, 0664); +MODULE_PARM_DESC(frame_width, "\n amvdec_avs2 frame_width\n"); + +module_param(frame_height, uint, 0664); +MODULE_PARM_DESC(frame_height, "\n amvdec_avs2 frame_height\n"); + +module_param(debug, uint, 0664); +MODULE_PARM_DESC(debug, "\n amvdec_avs2 debug\n"); + +module_param(debug_again, uint, 0664); +MODULE_PARM_DESC(debug_again, "\n amvdec_avs2 debug_again\n"); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(pop_shorts, uint, 0664); +MODULE_PARM_DESC(pop_shorts, "\nrval\n"); + +module_param(dbg_cmd, uint, 0664); +MODULE_PARM_DESC(dbg_cmd, "\ndbg_cmd\n"); + +module_param(dbg_skip_decode_index, uint, 0664); +MODULE_PARM_DESC(dbg_skip_decode_index, "\ndbg_skip_decode_index\n"); + +module_param(endian, uint, 0664); +MODULE_PARM_DESC(endian, "\nrval\n"); + +module_param(step, uint, 0664); +MODULE_PARM_DESC(step, "\n amvdec_avs2 step\n"); + +module_param(decode_pic_begin, uint, 0664); +MODULE_PARM_DESC(decode_pic_begin, "\n amvdec_avs2 decode_pic_begin\n"); + +module_param(slice_parse_begin, uint, 0664); +MODULE_PARM_DESC(slice_parse_begin, "\n amvdec_avs2 slice_parse_begin\n"); + +module_param(i_only_flag, uint, 0664); +MODULE_PARM_DESC(i_only_flag, "\n amvdec_avs2 i_only_flag\n"); + +module_param(error_handle_policy, uint, 0664); +MODULE_PARM_DESC(error_handle_policy, "\n amvdec_avs2 error_handle_policy\n"); + +module_param(re_search_seq_threshold, uint, 0664); +MODULE_PARM_DESC(re_search_seq_threshold, "\n amvdec_avs2 re_search_seq_threshold\n"); + +module_param(buf_alloc_width, uint, 0664); +MODULE_PARM_DESC(buf_alloc_width, "\n buf_alloc_width\n"); + +module_param(buf_alloc_height, uint, 0664); +MODULE_PARM_DESC(buf_alloc_height, "\n buf_alloc_height\n"); + +module_param(buf_alloc_depth, uint, 0664); +MODULE_PARM_DESC(buf_alloc_depth, "\n buf_alloc_depth\n"); + +module_param(buf_alloc_size, uint, 0664); +MODULE_PARM_DESC(buf_alloc_size, "\n buf_alloc_size\n"); + +module_param(buffer_mode, uint, 0664); +MODULE_PARM_DESC(buffer_mode, "\n buffer_mode\n"); + +module_param(buffer_mode_dbg, uint, 0664); +MODULE_PARM_DESC(buffer_mode_dbg, "\n buffer_mode_dbg\n"); +/*USE_BUF_BLOCK*/ +module_param(max_buf_num, uint, 0664); +MODULE_PARM_DESC(max_buf_num, "\n max_buf_num\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n dynamic_buf_num_margin\n"); + +#ifdef CONSTRAIN_MAX_BUF_NUM +module_param(run_ready_max_vf_only_num, uint, 0664); +MODULE_PARM_DESC(run_ready_max_vf_only_num, "\n run_ready_max_vf_only_num\n"); + +module_param(run_ready_display_q_num, uint, 0664); +MODULE_PARM_DESC(run_ready_display_q_num, "\n run_ready_display_q_num\n"); + +module_param(run_ready_max_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_max_buf_num, "\n run_ready_max_buf_num\n"); +#endif + +module_param(mv_buf_margin, uint, 0664); +MODULE_PARM_DESC(mv_buf_margin, "\n mv_buf_margin\n"); + +module_param(run_ready_min_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_min_buf_num, "\n run_ready_min_buf_num\n"); + +/**/ + +module_param(mem_map_mode, uint, 0664); +MODULE_PARM_DESC(mem_map_mode, "\n mem_map_mode\n"); + +module_param(double_write_mode, uint, 0664); +MODULE_PARM_DESC(double_write_mode, "\n double_write_mode\n"); + +module_param(enable_mem_saving, uint, 0664); +MODULE_PARM_DESC(enable_mem_saving, "\n enable_mem_saving\n"); + +module_param(force_w_h, uint, 0664); +MODULE_PARM_DESC(force_w_h, "\n force_w_h\n"); + +module_param(force_fps, uint, 0664); +MODULE_PARM_DESC(force_fps, "\n force_fps\n"); + +module_param(max_decoding_time, uint, 0664); +MODULE_PARM_DESC(max_decoding_time, "\n max_decoding_time\n"); + +module_param(on_no_keyframe_skiped, uint, 0664); +MODULE_PARM_DESC(on_no_keyframe_skiped, "\n on_no_keyframe_skiped\n"); + + +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n avs2 start_decode_buf_level\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, + "\n avs2 decode_timeout_val\n"); + +module_param_array(decode_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(display_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(max_process_time, uint, + &max_decode_instance_num, 0664); + +module_param_array(run_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(input_empty, uint, + &max_decode_instance_num, 0664); + +module_param_array(not_run_ready, uint, + &max_decode_instance_num, 0664); + +module_param(video_signal_type, uint, 0664); +MODULE_PARM_DESC(video_signal_type, "\n amvdec_avs2 video_signal_type\n"); + +module_param(force_video_signal_type, uint, 0664); +MODULE_PARM_DESC(force_video_signal_type, "\n amvdec_avs2 force_video_signal_type\n"); + +module_param(enable_force_video_signal_type, uint, 0664); +MODULE_PARM_DESC(enable_force_video_signal_type, "\n amvdec_avs2 enable_force_video_signal_type\n"); + +module_param(force_bufspec, uint, 0664); +MODULE_PARM_DESC(force_bufspec, "\n amvdec_h265 force_bufspec\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_h265 udebug_flag\n"); + +module_param(udebug_pause_pos, uint, 0664); +MODULE_PARM_DESC(udebug_pause_pos, "\n udebug_pause_pos\n"); + +module_param(udebug_pause_val, uint, 0664); +MODULE_PARM_DESC(udebug_pause_val, "\n udebug_pause_val\n"); + +module_param(udebug_pause_decode_idx, uint, 0664); +MODULE_PARM_DESC(udebug_pause_decode_idx, "\n udebug_pause_decode_idx\n"); + +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, + "\n amvdec_avs2 pre_decode_buf_level\n"); + +module_param(again_threshold, uint, 0664); +MODULE_PARM_DESC(again_threshold, "\n again_threshold\n"); + + +module_param(force_disp_pic_index, int, 0664); +MODULE_PARM_DESC(force_disp_pic_index, + "\n amvdec_h265 force_disp_pic_index\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n without_display_mode\n"); + +module_param(mv_buf_dynamic_alloc, uint, 0664); +MODULE_PARM_DESC(mv_buf_dynamic_alloc, "\n mv_buf_dynamic_alloc\n"); + +module_init(amvdec_avs2_driver_init_module); +module_exit(amvdec_avs2_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC avs2 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <tim.yao@amlogic.com>");
diff --git a/drivers/frame_provider/decoder_v4l/avs2/vavs2.h b/drivers/frame_provider/decoder_v4l/avs2/vavs2.h new file mode 100644 index 0000000..6b51f61 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/avs2/vavs2.h
@@ -0,0 +1,26 @@ +/* + * drivers/amlogic/amports/vavs2.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#ifndef VAVS2_H +#define VAVS2_H + +#define AVS2_10B_MMU +#define MV_USE_FIXED_BUF + +void adapt_coef_probs(int pic_count, int prev_kf, int cur_kf, int pre_fc, +unsigned int *prev_prob, unsigned int *cur_prob, unsigned int *count); +#endif
diff --git a/drivers/frame_provider/decoder_v4l/avs_multi/Makefile b/drivers/frame_provider/decoder_v4l/avs_multi/Makefile new file mode 100644 index 0000000..10da3ab --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/avs_multi/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_AVS_MULTI) += amvdec_mavs_v4l.o +amvdec_mavs_v4l-objs += avs_multi.o
diff --git a/drivers/frame_provider/decoder_v4l/avs_multi/avs_multi.c b/drivers/frame_provider/decoder_v4l/avs_multi/avs_multi.c new file mode 100644 index 0000000..3ad9385 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/avs_multi/avs_multi.c
@@ -0,0 +1,5016 @@ +/* + * drivers/amlogic/amports/vavs.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../../stream_input/amports/streambuf_reg.h" +#include "../../decoder/utils/amvdec.h" +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/dma-mapping.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/slab.h> +#include "avs_multi.h" +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../../decoder/utils/decoder_mmu_box.h" +#include "../../decoder/utils/decoder_bmmu_box.h" +#include "../../decoder/utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include <linux/amlogic/tee.h> +#include "../../decoder/utils/vdec_feature.h" + +#define DEBUG_MULTI_FLAG 0 +/* +#define DEBUG_WITH_SINGLE_MODE +#define DEBUG_MULTI_WITH_AUTOMODE +#define DEBUG_MULTI_FRAME_INS +*/ + + +#define USE_DYNAMIC_BUF_NUM + +#define DRIVER_NAME "ammvdec_avs_v4l" + + +#define MULTI_DRIVER_NAME "ammvdec_avs_v4l" + +#define ENABLE_USER_DATA + +#if 1/* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +#define NV21 +#endif + +#define USE_AVS_SEQ_INFO +#define HANDLE_AVS_IRQ +#define DEBUG_PTS + +#define CHECK_INTERVAL (HZ/100) + +#define I_PICTURE 0 +#define P_PICTURE 1 +#define B_PICTURE 2 + +#define LMEM_BUF_SIZE (0x500 * 2) + +/* #define ORI_BUFFER_START_ADDR 0x81000000 */ +#define ORI_BUFFER_START_ADDR 0x80000000 + +#define INTERLACE_FLAG 0x80 +#define TOP_FIELD_FIRST_FLAG 0x40 + +/* protocol registers */ +#define AVS_PIC_RATIO AV_SCRATCH_0 +#define AVS_PIC_WIDTH AV_SCRATCH_1 +#define AVS_PIC_HEIGHT AV_SCRATCH_2 +#define AVS_FRAME_RATE AV_SCRATCH_3 + +/*#define AVS_ERROR_COUNT AV_SCRATCH_6*/ +#define AVS_SOS_COUNT AV_SCRATCH_7 +#define AVS_BUFFERIN AV_SCRATCH_8 +#define AVS_BUFFEROUT AV_SCRATCH_9 +#define AVS_REPEAT_COUNT AV_SCRATCH_A +#define AVS_TIME_STAMP AV_SCRATCH_B +#define AVS_OFFSET_REG AV_SCRATCH_C +#define MEM_OFFSET_REG AV_SCRATCH_F +#define AVS_ERROR_RECOVERY_MODE AV_SCRATCH_G +#define DECODE_PIC_COUNT AV_SCRATCH_G + +#define DECODE_MODE AV_SCRATCH_6 +#define DECODE_MODE_SINGLE 0x0 +#define DECODE_MODE_MULTI_FRAMEBASE 0x1 +#define DECODE_MODE_MULTI_STREAMBASE 0x2 +#define DECODE_MODE_MULTI_STREAMBASE_CONT 0x3 + +#define DECODE_STATUS AV_SCRATCH_H +#define DECODE_STATUS_PIC_DONE 0x1 +#define DECODE_STATUS_DECODE_BUF_EMPTY 0x2 +#define DECODE_STATUS_SEARCH_BUF_EMPTY 0x3 +#define DECODE_STATUS_SKIP_PIC_DONE 0x4 +#define DECODE_SEARCH_HEAD 0xff + +#define DECODE_STOP_POS AV_SCRATCH_J + +#define DECODE_LMEM_BUF_ADR AV_SCRATCH_I + +#define DECODE_CFG AV_SCRATCH_K + +#define VF_POOL_SIZE 64 +#define PUT_INTERVAL (HZ/100) + +#if 1 /*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8*/ +#define INT_AMVENCODER INT_DOS_MAILBOX_1 +#else +/* #define AMVENC_DEV_VERSION "AML-MT" */ +#define INT_AMVENCODER INT_MAILBOX_1A +#endif + +#ifdef USE_DYNAMIC_BUF_NUM +static unsigned int buf_spec_reg[] = { + AV_SCRATCH_0, + AV_SCRATCH_1, + AV_SCRATCH_2, + AV_SCRATCH_3, + AV_SCRATCH_7, /*AVS_SOS_COUNT*/ + AV_SCRATCH_D, /*DEBUG_REG2*/ + AV_SCRATCH_E, /*DEBUG_REG1*/ + AV_SCRATCH_M /*user_data_poc_number*/ +}; +#endif + +#define DEBUG_REG1 AV_SCRATCH_E +#define DEBUG_REG2 AV_SCRATCH_D + + +static void check_timer_func(struct timer_list *timer); +static void vavs_work(struct work_struct *work); + +#define DEC_CONTROL_FLAG_FORCE_2500_1080P_INTERLACE 0x0001 +static u32 dec_control = DEC_CONTROL_FLAG_FORCE_2500_1080P_INTERLACE; + + +#define VPP_VD1_POSTBLEND (1 << 10) + +static int debug; +static unsigned int debug_mask = 0xff; + +/*for debug*/ +/* + udebug_flag: + bit 0, enable ucode print + bit 1, enable ucode more print + bit 3, enable ucdode detail print + bit [31:16] not 0, pos to dump lmem + bit 2, pop bits to lmem + bit [11:8], pre-pop bits for alignment (when bit 2 is 1) + + avs only: + bit [8], disable empty muitl-instance handling + bit [9], enable writting of VC1_CONTROL_REG in ucode +*/ +static u32 udebug_flag; +/* + when udebug_flag[1:0] is not 0 + udebug_pause_pos not 0, + pause position +*/ +static u32 udebug_pause_pos; +/* + when udebug_flag[1:0] is not 0 + and udebug_pause_pos is not 0, + pause only when DEBUG_REG2 is equal to this val +*/ +static u32 udebug_pause_val; + +static u32 udebug_pause_decode_idx; + +static u32 udebug_pause_ins_id; + +static u32 force_fps; + +#ifdef DEBUG_MULTI_FRAME_INS +static u32 delay; +#endif + +static u32 step; + +static u32 start_decoding_delay; + +#define AVS_DEV_NUM 9 +static unsigned int max_decode_instance_num = AVS_DEV_NUM; +static unsigned int max_process_time[AVS_DEV_NUM]; +static unsigned int max_get_frame_interval[AVS_DEV_NUM]; +static unsigned int run_count[AVS_DEV_NUM]; +static unsigned int ins_udebug_flag[AVS_DEV_NUM]; +#ifdef DEBUG_MULTI_FRAME_INS +static unsigned int max_run_count[AVS_DEV_NUM]; +#endif +/* +error_handle_policy: +*/ +static unsigned int error_handle_policy = 3; + +static u32 again_threshold = 0; /*0x40;*/ + +static unsigned int decode_timeout_val = 200; +static unsigned int start_decode_buf_level = 0x8000; + +/******************************** +firmware_sel + 0: use avsp_trans long cabac ucode; + 1: not use avsp_trans long cabac ucode + in ucode: + #define USE_EXT_BUFFER_ASSIGNMENT + #undef USE_DYNAMIC_BUF_NUM +********************************/ +static int firmware_sel; +static int disable_longcabac_trans = 1; +static int pre_decode_buf_level = 0x800; + + +static struct vframe_s *vavs_vf_peek(void *); +static struct vframe_s *vavs_vf_get(void *); +static void vavs_vf_put(struct vframe_s *, void *); +static int vavs_vf_states(struct vframe_states *states, void *); +static int vavs_event_cb(int type, void *data, void *private_data); + +static const char vavs_dec_id[] = "vavs-dev"; + +#define PROVIDER_NAME "decoder.avs" +static DEFINE_SPINLOCK(lock); +static DEFINE_MUTEX(vavs_mutex); + +static const struct vframe_operations_s vavs_vf_provider = { + .peek = vavs_vf_peek, + .get = vavs_vf_get, + .put = vavs_vf_put, + .event_cb = vavs_event_cb, + .vf_states = vavs_vf_states, +}; +/* +static void *mm_blk_handle; +*/ +static struct vframe_provider_s vavs_vf_prov; + +#define VF_BUF_NUM_MAX 16 +#ifdef DEBUG_MULTI_FRAME_INS +#define WORKSPACE_SIZE (16 * SZ_1M) +#else +#define WORKSPACE_SIZE (4 * SZ_1M) +#endif +#ifdef AVSP_LONG_CABAC +#define MAX_BMMU_BUFFER_NUM (VF_BUF_NUM_MAX + 2) +#define WORKSPACE_SIZE_A (MAX_CODED_FRAME_SIZE + LOCAL_HEAP_SIZE) +#else +#define MAX_BMMU_BUFFER_NUM (VF_BUF_NUM_MAX + 1) +#endif + +#define RV_AI_BUFF_START_ADDR 0x01a00000 +#define LONG_CABAC_RV_AI_BUFF_START_ADDR 0x00000000 + +/* 4 buffers not enough for multi inc*/ +static u32 vf_buf_num = 8; +/*static u32 vf_buf_num_used;*/ +static u32 canvas_base = 128; +#ifdef NV21 +static int canvas_num = 2; /*NV21*/ +#else +static int canvas_num = 3; +#endif + +#if 0 +static struct vframe_s vfpool[VF_POOL_SIZE]; +/*static struct vframe_s vfpool2[VF_POOL_SIZE];*/ +static struct vframe_s *cur_vfpool; +static unsigned char recover_flag; +static s32 vfbuf_use[VF_BUF_NUM_MAX]; +static u32 saved_resolution; +static u32 frame_width, frame_height, frame_dur, frame_prog; +static struct timer_list recycle_timer; +static u32 stat; +#endif +static u32 buf_size = 32 * 1024 * 1024; +#if 0 +static u32 buf_offset; +static u32 avi_flag; +static u32 vavs_ratio; +static u32 pic_type; +#endif +static u32 pts_by_offset = 1; +#if 0 +static u32 total_frame; +static u32 next_pts; +static unsigned char throw_pb_flag; +#ifdef DEBUG_PTS +static u32 pts_hit, pts_missed, pts_i_hit, pts_i_missed; +#endif +#endif +static u32 radr, rval; +static u32 dbg_cmd; +#if 0 +static struct dec_sysinfo vavs_amstream_dec_info; +static struct vdec_info *gvs; +static u32 fr_hint_status; +static struct work_struct notify_work; +static struct work_struct set_clk_work; +static bool is_reset; +#endif +/*static struct vdec_s *vdec;*/ + +#ifdef AVSP_LONG_CABAC +static struct work_struct long_cabac_wd_work; +void *es_write_addr_virt; +dma_addr_t es_write_addr_phy; + +void *bitstream_read_tmp; +dma_addr_t bitstream_read_tmp_phy; +void *avsp_heap_adr; +static uint long_cabac_busy; +#endif + +#if 0 +#ifdef ENABLE_USER_DATA +static void *user_data_buffer; +static dma_addr_t user_data_buffer_phys; +#endif +static DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); +static DECLARE_KFIFO(recycle_q, struct vframe_s *, VF_POOL_SIZE); +#endif +static inline u32 index2canvas(u32 index) +{ + const u32 canvas_tab[VF_BUF_NUM_MAX] = { + 0x010100, 0x030302, 0x050504, 0x070706, + 0x090908, 0x0b0b0a, 0x0d0d0c, 0x0f0f0e, + 0x111110, 0x131312, 0x151514, 0x171716, + 0x191918, 0x1b1b1a, 0x1d1d1c, 0x1f1f1e, + }; + const u32 canvas_tab_3[4] = { + 0x010100, 0x040403, 0x070706, 0x0a0a09 + }; + + if (canvas_num == 2) + return canvas_tab[index] + (canvas_base << 16) + + (canvas_base << 8) + canvas_base; + + return canvas_tab_3[index] + (canvas_base << 16) + + (canvas_base << 8) + canvas_base; +} + +static const u32 frame_rate_tab[16] = { + 96000 / 30, /* forbidden */ + 96000000 / 23976, /* 24000/1001 (23.967) */ + 96000 / 24, + 96000 / 25, + 9600000 / 2997, /* 30000/1001 (29.97) */ + 96000 / 30, + 96000 / 50, + 9600000 / 5994, /* 60000/1001 (59.94) */ + 96000 / 60, + /* > 8 reserved, use 24 */ + 96000 / 24, 96000 / 24, 96000 / 24, 96000 / 24, + 96000 / 24, 96000 / 24, 96000 / 24 +}; + +#define DECODE_BUFFER_NUM_MAX VF_BUF_NUM_MAX +#define PIC_PTS_NUM 64 +struct buf_pool_s { + unsigned detached; + struct vframe_s vf; +}; + +#define buf_of_vf(vf) container_of(vf, struct buf_pool_s, vf) + +struct pic_pts_s { + u32 pts; + u64 pts64; + u64 timestamp; + unsigned short decode_pic_count; +}; + +struct vdec_avs_hw_s { + spinlock_t lock; + unsigned char m_ins_flag; + struct platform_device *platform_dev; + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(recycle_q, struct vframe_s *, VF_POOL_SIZE); + struct buf_pool_s vfpool[VF_POOL_SIZE]; + s32 vfbuf_use[VF_BUF_NUM_MAX]; + unsigned char again_flag; + unsigned char recover_flag; + u32 frame_width; + u32 frame_height; + u32 frame_dur; + u32 frame_prog; + u32 saved_resolution; + u32 avi_flag; + u32 vavs_ratio; + u32 pic_type; + + u32 vf_buf_num_used; + u32 total_frame; + u32 next_pts; + unsigned char throw_pb_flag; + struct pic_pts_s pic_pts[PIC_PTS_NUM]; + int pic_pts_wr_pos; + +#ifdef DEBUG_PTS + u32 pts_hit; + u32 pts_missed; + u32 pts_i_hit; + u32 pts_i_missed; +#endif +#ifdef ENABLE_USER_DATA + struct work_struct userdata_push_work; + void *user_data_buffer; + dma_addr_t user_data_buffer_phys; +#endif + dma_addr_t lmem_addr; + ulong lmem_phy_addr; + + u32 buf_offset; + + struct dec_sysinfo vavs_amstream_dec_info; + struct vdec_info *gvs; + u32 fr_hint_status; + struct work_struct set_clk_work; + bool is_reset; + + /*debug*/ + u32 ucode_pause_pos; + /**/ + u32 decode_pic_count; + u8 reset_decode_flag; + u32 display_frame_count; + u32 buf_status; + u32 pre_parser_wr_ptr; + /* + buffer_status &= ~buf_recycle_status + */ + u32 buf_recycle_status; + u32 seqinfo; + u32 ctx_valid; + u32 dec_control; + void *mm_blk_handle; + struct vframe_chunk_s *chunk; + u32 stat; + u8 init_flag; + unsigned long buf_start; + u32 buf_size; + + u32 reg_scratch_0; + u32 reg_scratch_1; + u32 reg_scratch_2; + u32 reg_scratch_3; + u32 reg_scratch_4; + u32 reg_scratch_5; + u32 reg_scratch_6; + u32 reg_scratch_7; + u32 reg_scratch_8; + u32 reg_scratch_9; + u32 reg_scratch_A; + u32 reg_scratch_B; + u32 reg_scratch_C; + u32 reg_scratch_D; + u32 reg_scratch_E; + u32 reg_scratch_F; + u32 reg_scratch_G; + u32 reg_scratch_H; + u32 reg_scratch_I; + u32 reg_mb_width; + u32 reg_viff_bit_cnt; + u32 reg_canvas_addr; + u32 reg_dbkr_canvas_addr; + u32 reg_dbkw_canvas_addr; + u32 reg_anc2_canvas_addr; + u32 reg_anc0_canvas_addr; + u32 reg_anc1_canvas_addr; + u32 reg_anc3_canvas_addr; + u32 reg_anc4_canvas_addr; + u32 reg_anc5_canvas_addr; + u32 slice_ver_pos_pic_type; + u32 vc1_control_reg; + u32 avs_co_mb_wr_addr; + u32 slice_start_byte_01; + u32 slice_start_byte_23; + u32 vcop_ctrl_reg; + u32 iqidct_control; + u32 rv_ai_mb_count; + u32 slice_qp; + u32 dc_scaler; + u32 avsp_iq_wq_param_01; + u32 avsp_iq_wq_param_23; + u32 avsp_iq_wq_param_45; + u32 avs_co_mb_rd_addr; + u32 dblk_mb_wid_height; + u32 mc_pic_w_h; + u32 avs_co_mb_rw_ctl; + u32 vld_decode_control; + + struct timer_list check_timer; + u32 decode_timeout_count; + unsigned long int start_process_time; + u32 last_vld_level; + u32 eos; + u32 canvas_spec[DECODE_BUFFER_NUM_MAX]; + struct canvas_config_s canvas_config[DECODE_BUFFER_NUM_MAX][2]; + + s32 refs[2]; + int dec_result; + struct timer_list recycle_timer; + struct work_struct work; + struct work_struct notify_work; + atomic_t error_handler_run; + struct work_struct fatal_error_wd_work; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; +/* for error handling */ + u32 run_count; + u32 not_run_ready; + u32 input_empty; + atomic_t prepare_num; + atomic_t put_num; + atomic_t peek_num; + atomic_t get_num; + u32 drop_frame_count; + u32 buffer_not_ready; + int frameinfo_enable; + struct firmware_s *fw; + u32 old_udebug_flag; + u32 decode_status_skip_pic_done_flag; + u32 decode_decode_cont_start_code; + int vdec_pg_enable_flag; + char vdec_name[32]; + char pts_name[32]; + char new_q_name[32]; + char disp_q_name[32]; +}; + +static void reset_process_time(struct vdec_avs_hw_s *hw); +static void start_process_time(struct vdec_avs_hw_s *hw); +static void vavs_save_regs(struct vdec_avs_hw_s *hw); + +struct vdec_avs_hw_s *ghw; + +#define MULTI_INSTANCE_PROVIDER_NAME "vdec.avs" + +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_ERROR 3 +#define DEC_RESULT_FORCE_EXIT 4 +#define DEC_RESULT_EOS 5 +#define DEC_RESULT_GET_DATA 6 +#define DEC_RESULT_GET_DATA_RETRY 7 +#define DEC_RESULT_USERDATA 8 + +#define DECODE_ID(hw) (hw->m_ins_flag? hw_to_vdec(hw)->id : 0) + +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_RUN_FLOW 0X0001 +#define PRINT_FLAG_DECODING 0x0002 +#define PRINT_FLAG_PTS 0x0004 +#define PRINT_FLAG_VFRAME_DETAIL 0x0010 +#define PRINT_FLAG_VLD_DETAIL 0x0020 +#define PRINT_FLAG_DEC_DETAIL 0x0040 +#define PRINT_FLAG_BUFFER_DETAIL 0x0080 +#define PRINT_FLAG_FORCE_DONE 0x0100 +#define PRINT_FLAG_COUNTER 0X0200 +#define PRINT_FRAMEBASE_DATA 0x0400 +#define PRINT_FLAG_PARA_DATA 0x1000 +#define DEBUG_FLAG_PREPARE_MORE_INPUT 0x2000 +#define DEBUG_FLAG_PRINT_REG 0x4000 +#define DEBUG_FLAG_DISABLE_TIMEOUT 0x10000 +#define DEBUG_WAIT_DECODE_DONE_WHEN_STOP 0x20000 +#define DEBUG_PIC_DONE_WHEN_UCODE_PAUSE 0x40000 + + +#undef DEBUG_REG +#ifdef DEBUG_REG +static void WRITE_VREG_DBG2(unsigned adr, unsigned val) +{ + if (debug & DEBUG_FLAG_PRINT_REG) + pr_info("%s(%x, %x)\n", __func__, adr, val); + if (adr != 0) + WRITE_VREG(adr, val); +} + +#undef WRITE_VREG +#define WRITE_VREG WRITE_VREG_DBG2 +#endif + +#undef pr_info +#define pr_info printk +static int debug_print(struct vdec_avs_hw_s *hw, + int flag, const char *fmt, ...) +{ +#define AVS_PRINT_BUF 256 + unsigned char buf[AVS_PRINT_BUF]; + int len = 0; + int index = 0; + if (hw) + index = hw->m_ins_flag ? DECODE_ID(hw) : 0; + if (hw == NULL || + (flag == 0) || + ((debug_mask & + (1 << index)) + && (debug & flag))) { + va_list args; + + va_start(args, fmt); + if (hw) + len = sprintf(buf, "[%d]", index); + vsnprintf(buf + len, AVS_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; +} + +static int debug_print_cont(struct vdec_avs_hw_s *hw, + int flag, const char *fmt, ...) +{ + unsigned char buf[AVS_PRINT_BUF]; + int len = 0; + int index = 0; + if (hw) + index = hw->m_ins_flag ? DECODE_ID(hw) : 0; + if (hw == NULL || + (flag == 0) || + ((debug_mask & + (1 << index)) + && (debug & flag))) { + va_list args; + + va_start(args, fmt); + vsnprintf(buf + len, AVS_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; +} + +static void avs_pts_check_in(struct vdec_avs_hw_s *hw, + unsigned short decode_pic_count, struct vframe_chunk_s *chunk) +{ + if (chunk) + debug_print(hw, PRINT_FLAG_PTS, + "%s %d (wr pos %d), pts %d pts64 %ld timestamp %ld\n", + __func__, decode_pic_count, hw->pic_pts_wr_pos, + chunk->pts, (u64)(chunk->pts64), (u64)(chunk->timestamp)); + else + debug_print(hw, PRINT_FLAG_PTS, + "%s %d, chunk is null\n", + __func__, decode_pic_count); + + if (chunk) { + hw->pic_pts[hw->pic_pts_wr_pos].pts = chunk->pts; + hw->pic_pts[hw->pic_pts_wr_pos].pts64 = chunk->pts64; + hw->pic_pts[hw->pic_pts_wr_pos].timestamp = chunk->timestamp; + } else { + hw->pic_pts[hw->pic_pts_wr_pos].pts = 0; + hw->pic_pts[hw->pic_pts_wr_pos].pts64 = 0; + hw->pic_pts[hw->pic_pts_wr_pos].timestamp = 0; + } + hw->pic_pts[hw->pic_pts_wr_pos].decode_pic_count + = decode_pic_count; + hw->pic_pts_wr_pos++; + if (hw->pic_pts_wr_pos >= PIC_PTS_NUM) + hw->pic_pts_wr_pos = 0; + return; +} + +static void clear_pts_buf(struct vdec_avs_hw_s *hw) +{ + int i; + debug_print(hw, PRINT_FLAG_PTS, + "%s\n", __func__); + hw->pic_pts_wr_pos = 0; + for (i = 0; i < PIC_PTS_NUM; i++) { + hw->pic_pts[hw->pic_pts_wr_pos].pts = 0; + hw->pic_pts[hw->pic_pts_wr_pos].pts64 = 0; + hw->pic_pts[hw->pic_pts_wr_pos].timestamp = 0; + hw->pic_pts[hw->pic_pts_wr_pos].decode_pic_count = 0; + } +} + +static int set_vframe_pts(struct vdec_avs_hw_s *hw, + unsigned short decode_pic_count, struct vframe_s *vf) +{ + int i; + int ret = -1; + for (i = 0; i < PIC_PTS_NUM; i++) { + if (hw->pic_pts[i].decode_pic_count == decode_pic_count) { + vf->pts = hw->pic_pts[i].pts; + vf->pts_us64 = hw->pic_pts[i].pts64; + vf->timestamp = hw->pic_pts[i].timestamp; + ret = 0; + debug_print(hw, PRINT_FLAG_PTS, + "%s %d (rd pos %d), pts %d pts64 %ld timestamp %ld\n", + __func__, decode_pic_count, i, + vf->pts, vf->pts_us64, vf->timestamp); + + break; + } + } + return ret; +} + +static void avs_vf_notify_receiver(struct vdec_avs_hw_s *hw, + const char *provider_name, int event_type, void *data) +{ + if (hw->m_ins_flag) + vf_notify_receiver(hw_to_vdec(hw)->vf_provider_name, + event_type, data); + else + vf_notify_receiver(provider_name, event_type, data); +} + +static void set_frame_info(struct vdec_avs_hw_s *hw, struct vframe_s *vf, + unsigned int *duration) +{ + int ar = 0; + + unsigned int pixel_ratio = READ_VREG(AVS_PIC_RATIO); + atomic_add(1, &hw->prepare_num); +#ifndef USE_AVS_SEQ_INFO + if (hw->vavs_amstream_dec_info.width > 0 + && hw->vavs_amstream_dec_info.height > 0) { + vf->width = hw->vavs_amstream_dec_info.width; + vf->height = hw->vavs_amstream_dec_info.height; + } else +#endif + { + vf->width = READ_VREG(AVS_PIC_WIDTH); + vf->height = READ_VREG(AVS_PIC_HEIGHT); + hw->frame_width = vf->width; + hw->frame_height = vf->height; + /* pr_info("%s: (%d,%d)\n", __func__,vf->width, vf->height);*/ + } + +#ifndef USE_AVS_SEQ_INFO + if (hw->vavs_amstream_dec_info.rate > 0) + *duration = hw->vavs_amstream_dec_info.rate; + else +#endif + { + *duration = frame_rate_tab[READ_VREG(AVS_FRAME_RATE) & 0xf]; + /* pr_info("%s: duration = %d\n", __func__, *duration); */ + hw->frame_dur = *duration; + schedule_work(&hw->notify_work); + } + + if (hw->vavs_ratio == 0) { + /* always stretch to 16:9 */ + vf->ratio_control |= (0x90 << + DISP_RATIO_ASPECT_RATIO_BIT); + vf->sar_width = 1; + vf->sar_height = 1; + } else { + switch (pixel_ratio) { + case 1: + vf->sar_width = 1; + vf->sar_height = 1; + ar = (vf->height * hw->vavs_ratio) / vf->width; + break; + case 2: + vf->sar_width = 4; + vf->sar_height = 3; + ar = (vf->height * 3 * hw->vavs_ratio) / (vf->width * 4); + break; + case 3: + vf->sar_width = 16; + vf->sar_height = 9; + ar = (vf->height * 9 * hw->vavs_ratio) / (vf->width * 16); + break; + case 4: + vf->sar_width = 221; + vf->sar_height = 100; + ar = (vf->height * 100 * hw->vavs_ratio) / (vf->width * + 221); + break; + default: + vf->sar_width = 1; + vf->sar_height = 1; + ar = (vf->height * hw->vavs_ratio) / vf->width; + break; + } + } + + ar = min(ar, DISP_RATIO_ASPECT_RATIO_MAX); + + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + /*vf->ratio_control |= DISP_RATIO_FORCECONFIG | DISP_RATIO_KEEPRATIO; */ + + vf->flag = 0; + buf_of_vf(vf)->detached = 0; + +} + +#ifdef ENABLE_USER_DATA + +/*static struct work_struct userdata_push_work;*/ +/* +#define DUMP_LAST_REPORTED_USER_DATA +*/ +static void userdata_push_process(struct vdec_avs_hw_s *hw) +{ + unsigned int user_data_flags; + unsigned int user_data_wp; + unsigned int user_data_length; + struct userdata_poc_info_t user_data_poc; +#ifdef DUMP_LAST_REPORTED_USER_DATA + int user_data_len; + int wp_start; + unsigned char *pdata; + int nLeft; +#endif + + user_data_flags = READ_VREG(AV_SCRATCH_N); + user_data_wp = (user_data_flags >> 16) & 0xffff; + user_data_length = user_data_flags & 0x7fff; + +#ifdef DUMP_LAST_REPORTED_USER_DATA + dma_sync_single_for_cpu(amports_get_dma_device(), + hw->user_data_buffer_phys, USER_DATA_SIZE, + DMA_FROM_DEVICE); + + if (user_data_length & 0x07) + user_data_len = (user_data_length + 8) & 0xFFFFFFF8; + else + user_data_len = user_data_length; + + if (user_data_wp >= user_data_len) { + wp_start = user_data_wp - user_data_len; + + pdata = (unsigned char *)hw->user_data_buffer; + pdata += wp_start; + nLeft = user_data_len; + while (nLeft >= 8) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + } else { + wp_start = user_data_wp + + USER_DATA_SIZE - user_data_len; + + pdata = (unsigned char *)hw->user_data_buffer; + pdata += wp_start; + nLeft = USER_DATA_SIZE - wp_start; + + while (nLeft >= 8) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + + pdata = (unsigned char *)hw->user_data_buffer; + nLeft = user_data_wp; + while (nLeft >= 8) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + } +#endif + +/* + pr_info("pocinfo 0x%x, poc %d, wp 0x%x, len %d\n", + READ_VREG(AV_SCRATCH_L), READ_VREG(AV_SCRATCH_M), + user_data_wp, user_data_length); +*/ + user_data_poc.poc_info = READ_VREG(AV_SCRATCH_L); + user_data_poc.poc_number = READ_VREG(AV_SCRATCH_M); + + WRITE_VREG(AV_SCRATCH_N, 0); +/* + wakeup_userdata_poll(user_data_poc, user_data_wp, + (unsigned long)hw->user_data_buffer, + USER_DATA_SIZE, user_data_length); +*/ +} + +static void userdata_push_do_work(struct work_struct *work) +{ + struct vdec_avs_hw_s *hw = + container_of(work, struct vdec_avs_hw_s, userdata_push_work); + userdata_push_process(hw); +} + +static u8 UserDataHandler(struct vdec_avs_hw_s *hw) +{ + unsigned int user_data_flags; + + user_data_flags = READ_VREG(AV_SCRATCH_N); + if (user_data_flags & (1 << 15)) { /* data ready */ + if (hw->m_ins_flag) { + hw->dec_result = DEC_RESULT_USERDATA; + vdec_schedule_work(&hw->work); + return 1; + } else + schedule_work(&hw->userdata_push_work); + } + return 0; +} +#endif + + +static inline void avs_update_gvs(struct vdec_avs_hw_s *hw) +{ + if (hw->gvs->frame_height != hw->frame_height) { + hw->gvs->frame_width = hw->frame_width; + hw->gvs->frame_height = hw->frame_height; + } + if (hw->gvs->frame_dur != hw->frame_dur) { + hw->gvs->frame_dur = hw->frame_dur; + if (hw->frame_dur != 0) + hw->gvs->frame_rate = ((96000 * 10 / hw->frame_dur) % 10) < 5 ? + 96000 / hw->frame_dur : (96000 / hw->frame_dur +1); + else + hw->gvs->frame_rate = -1; + } + + hw->gvs->status = hw->stat; + hw->gvs->error_count = READ_VREG(AV_SCRATCH_C); + hw->gvs->drop_frame_count = hw->drop_frame_count; + +} + +#ifdef HANDLE_AVS_IRQ +static irqreturn_t vavs_isr(int irq, void *dev_id) +#else +static void vavs_isr(void) +#endif +{ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + return IRQ_WAKE_THREAD; +} +/* + *static int run_flag = 1; + *static int step_flag; + */ +static int error_recovery_mode; /*0: blocky 1: mosaic*/ +/* + *static uint error_watchdog_threshold=10; + *static uint error_watchdog_count; + *static uint error_watchdog_buf_threshold = 0x4000000; + */ + +static struct vframe_s *vavs_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)op_arg; + atomic_add(1, &hw->peek_num); + if (step == 2) + return NULL; + if (hw->recover_flag) + return NULL; + + if (kfifo_peek(&hw->display_q, &vf)) { + if (vf) { + if (force_fps & 0x100) { + u32 rate = force_fps & 0xff; + + if (rate) + vf->duration = 96000/rate; + else + vf->duration = 0; + } + + } + return vf; + } + + return NULL; + +} + +static struct vframe_s *vavs_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)op_arg; + unsigned long flags; + + if (hw->recover_flag) + return NULL; + + if (step == 2) + return NULL; + else if (step == 1) + step = 2; + + spin_lock_irqsave(&lock, flags); + if (kfifo_get(&hw->display_q, &vf)) { + if (vf) { + vf->index_disp = atomic_read(&hw->get_num); + atomic_add(1, &hw->get_num); + if (force_fps & 0x100) { + u32 rate = force_fps & 0xff; + + if (rate) + vf->duration = 96000/rate; + else + vf->duration = 0; + } + + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "%s, index = %d, w %d h %d, type 0x%x detached %d\n", + __func__, + vf->index, + vf->width, + vf->height, + vf->type, + buf_of_vf(vf)->detached); + } + spin_unlock_irqrestore(&lock, flags); + return vf; + } + spin_unlock_irqrestore(&lock, flags); + + return NULL; + +} + +static void vavs_vf_put(struct vframe_s *vf, void *op_arg) +{ + int i; + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)op_arg; + + if (vf) { + atomic_add(1, &hw->put_num); + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "%s, index = %d, w %d h %d, type 0x%x detached 0x%x\n", + __func__, + vf->index, + vf->width, + vf->height, + vf->type, + buf_of_vf(vf)->detached); + } + if (hw->recover_flag) + return; + + for (i = 0; i < VF_POOL_SIZE; i++) { + if (vf == &hw->vfpool[i].vf) + break; + } + if (i < VF_POOL_SIZE) + + kfifo_put(&hw->recycle_q, (const struct vframe_s *)vf); + +} + +static int vavs_event_cb(int type, void *data, void *private_data) +{ + struct vdec_avs_hw_s *hw = (struct vdec_avs_hw_s *)private_data; + + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(hw_to_vdec(hw)); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +static int vavs_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + /*if (!(hw->stat & STAT_VDEC_RUN)) + return -1;*/ + if (!hw) + return -1; + + vstatus->frame_width = hw->frame_width; + vstatus->frame_height = hw->frame_height; + if (hw->frame_dur != 0) + vstatus->frame_rate = ((96000 * 10 / hw->frame_dur) % 10) < 5 ? + 96000 / hw->frame_dur : (96000 / hw->frame_dur +1); + else + vstatus->frame_rate = -1; + vstatus->error_count = READ_VREG(AV_SCRATCH_C); + vstatus->status = hw->stat; + vstatus->bit_rate = hw->gvs->bit_rate; + vstatus->frame_dur = hw->frame_dur; + vstatus->frame_data = hw->gvs->frame_data; + vstatus->total_data = hw->gvs->total_data; + vstatus->frame_count = hw->gvs->frame_count; + vstatus->error_frame_count = hw->gvs->error_frame_count; + vstatus->drop_frame_count = hw->gvs->drop_frame_count; + vstatus->i_decoded_frames = hw->gvs->i_decoded_frames; + vstatus->i_lost_frames = hw->gvs->i_lost_frames; + vstatus->i_concealed_frames = hw->gvs->i_concealed_frames; + vstatus->p_decoded_frames = hw->gvs->p_decoded_frames; + vstatus->p_lost_frames = hw->gvs->p_lost_frames; + vstatus->p_concealed_frames = hw->gvs->p_concealed_frames; + vstatus->b_decoded_frames = hw->gvs->b_decoded_frames; + vstatus->b_lost_frames = hw->gvs->b_lost_frames; + vstatus->b_concealed_frames = hw->gvs->b_concealed_frames; + vstatus->total_data = hw->gvs->total_data; + vstatus->samp_cnt = hw->gvs->samp_cnt; + vstatus->offset = hw->gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + +static int vavs_set_isreset(struct vdec_s *vdec, int isreset) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + + hw->is_reset = isreset; + return 0; +} + +static int vavs_vdec_info_init(struct vdec_avs_hw_s *hw) +{ + + hw->gvs = kzalloc(sizeof(struct vdec_info), GFP_KERNEL); + if (NULL == hw->gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -ENOMEM; + } + + return 0; +} +/****************************************/ +static int vavs_canvas_init(struct vdec_avs_hw_s *hw) +{ + int i, ret; + u32 canvas_width, canvas_height; + u32 decbuf_size, decbuf_y_size, decbuf_uv_size; + unsigned long buf_start; + int need_alloc_buf_num; + struct vdec_s *vdec = NULL; + + if (hw->m_ins_flag) + vdec = hw_to_vdec(hw); + + if (buf_size <= 0x00400000) { + /* SD only */ + canvas_width = 768; + canvas_height = 576; + decbuf_y_size = 0x80000; + decbuf_uv_size = 0x20000; + decbuf_size = 0x100000; + } else { + /* HD & SD */ + canvas_width = 1920; + canvas_height = 1088; + decbuf_y_size = 0x200000; + decbuf_uv_size = 0x80000; + decbuf_size = 0x300000; + } + +#ifdef AVSP_LONG_CABAC + need_alloc_buf_num = hw->vf_buf_num_used + 2; +#else + need_alloc_buf_num = hw->vf_buf_num_used + 1; +#endif + for (i = 0; i < need_alloc_buf_num; i++) { + + if (i == (need_alloc_buf_num - 1)) + decbuf_size = WORKSPACE_SIZE; +#ifdef AVSP_LONG_CABAC + else if (i == (need_alloc_buf_num - 2)) + decbuf_size = WORKSPACE_SIZE_A; +#endif + ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, i, + decbuf_size, DRIVER_NAME, &buf_start); + if (ret < 0) + return ret; + if (i == (need_alloc_buf_num - 1)) { + if (firmware_sel == 1) + hw->buf_offset = buf_start - + RV_AI_BUFF_START_ADDR; + else + hw->buf_offset = buf_start - + LONG_CABAC_RV_AI_BUFF_START_ADDR; + continue; + } +#ifdef AVSP_LONG_CABAC + else if (i == (need_alloc_buf_num - 2)) { + avsp_heap_adr = codec_mm_phys_to_virt(buf_start); + continue; + } +#endif + if (hw->m_ins_flag) { + unsigned canvas; + + if (vdec->parallel_dec == 1) { + unsigned tmp; + if (canvas_u(hw->canvas_spec[i]) == 0xff) { + tmp = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~(0xffff << 8); + hw->canvas_spec[i] |= tmp << 8; + hw->canvas_spec[i] |= tmp << 16; + } + if (canvas_y(hw->canvas_spec[i]) == 0xff) { + tmp = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~0xff; + hw->canvas_spec[i] |= tmp; + } + canvas = hw->canvas_spec[i]; + } else { + canvas = vdec->get_canvas(i, 2); + hw->canvas_spec[i] = canvas; + } + + hw->canvas_config[i][0].phy_addr = + buf_start; + hw->canvas_config[i][0].width = + canvas_width; + hw->canvas_config[i][0].height = + canvas_height; + hw->canvas_config[i][0].block_mode = + CANVAS_BLKMODE_32X32; + + hw->canvas_config[i][1].phy_addr = + buf_start + decbuf_y_size; + hw->canvas_config[i][1].width = + canvas_width; + hw->canvas_config[i][1].height = + canvas_height / 2; + hw->canvas_config[i][1].block_mode = + CANVAS_BLKMODE_32X32; + + } else { +#ifdef NV21 + config_cav_lut_ex(canvas_base + canvas_num * i + 0, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); + config_cav_lut_ex(canvas_base + canvas_num * i + 1, + buf_start + + decbuf_y_size, canvas_width, + canvas_height / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); +#else + config_cav_lut_ex(canvas_num * i + 0, + buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); + config_cav_lut_ex(canvas_num * i + 1, + buf_start + + decbuf_y_size, canvas_width / 2, + canvas_height / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); + config_cav_lut_ex(canvas_num * i + 2, + buf_start + + decbuf_y_size + decbuf_uv_size, + canvas_width / 2, canvas_height / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_32X32, 0, VDEC_1); +#endif + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "canvas config %d, addr %p\n", i, + (void *)buf_start); + } + } + return 0; +} + +static void vavs_recover(struct vdec_avs_hw_s *hw) +{ + vavs_canvas_init(hw); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 9) | (1 << 8)); + WRITE_VREG(DOS_SW_RESET0, 0); + + if (firmware_sel == 1) { + WRITE_VREG(POWER_CTL_VLD, 0x10); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 2, + MEM_FIFO_CNT_BIT, 2); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 8, + MEM_LEVEL_CNT_BIT, 6); + } + + + if (firmware_sel == 0) { + /* fixed canvas index */ + WRITE_VREG(AV_SCRATCH_0, canvas_base); + WRITE_VREG(AV_SCRATCH_1, hw->vf_buf_num_used); + } else { + int ii; +#ifndef USE_DYNAMIC_BUF_NUM + for (ii = 0; ii < 4; ii++) { + WRITE_VREG(AV_SCRATCH_0 + ii, + (canvas_base + canvas_num * ii) | + ((canvas_base + canvas_num * ii + 1) + << 8) | + ((canvas_base + canvas_num * ii + 1) + << 16) + ); + } +#else + for (ii = 0; ii < hw->vf_buf_num_used; ii += 2) { + WRITE_VREG(buf_spec_reg[ii >> 1], + (canvas_base + canvas_num * ii) | + ((canvas_base + canvas_num * ii + 1) + << 8) | + ((canvas_base + canvas_num * ii + 2) + << 16) | + ((canvas_base + canvas_num * ii + 3) + << 24) + ); + } +#endif + } + + /* notify ucode the buffer offset */ + WRITE_VREG(AV_SCRATCH_F, hw->buf_offset); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + +#ifndef USE_DYNAMIC_BUF_NUM + WRITE_VREG(AVS_SOS_COUNT, 0); +#endif + WRITE_VREG(AVS_BUFFERIN, 0); + WRITE_VREG(AVS_BUFFEROUT, 0); + if (error_recovery_mode) + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 0); + else + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 1); + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); +#ifndef USE_DYNAMIC_BUF_NUM /* def DEBUG_UCODE */ + WRITE_VREG(AV_SCRATCH_D, 0); +#endif + +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + +#ifdef PIC_DC_NEED_CLEAR + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 31); +#endif + +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) { + WRITE_VREG(LONG_CABAC_DES_ADDR, es_write_addr_phy); + WRITE_VREG(LONG_CABAC_REQ, 0); + WRITE_VREG(LONG_CABAC_PIC_SIZE, 0); + WRITE_VREG(LONG_CABAC_SRC_ADDR, 0); + } +#endif + WRITE_VREG(AV_SCRATCH_5, 0); + +} + +#define MBY_MBX MB_MOTION_MODE /*0xc07*/ +#define AVS_CO_MB_WR_ADDR 0xc38 +#define AVS_CO_MB_RW_CTL 0xc3d +#define AVS_CO_MB_RD_ADDR 0xc39 +#define AVSP_IQ_WQ_PARAM_01 0x0e19 +#define AVSP_IQ_WQ_PARAM_23 0x0e1a +#define AVSP_IQ_WQ_PARAM_45 0x0e1b + +static void vavs_save_regs(struct vdec_avs_hw_s *hw) +{ + hw->reg_scratch_0 = READ_VREG(AV_SCRATCH_0); + hw->reg_scratch_1 = READ_VREG(AV_SCRATCH_1); + hw->reg_scratch_2 = READ_VREG(AV_SCRATCH_2); + hw->reg_scratch_3 = READ_VREG(AV_SCRATCH_3); + hw->reg_scratch_4 = READ_VREG(AV_SCRATCH_4); + hw->reg_scratch_5 = READ_VREG(AV_SCRATCH_5); + hw->reg_scratch_6 = READ_VREG(AV_SCRATCH_6); + hw->reg_scratch_7 = READ_VREG(AV_SCRATCH_7); + hw->reg_scratch_8 = READ_VREG(AV_SCRATCH_8); + hw->reg_scratch_9 = READ_VREG(AV_SCRATCH_9); + hw->reg_scratch_A = READ_VREG(AV_SCRATCH_A); + hw->reg_scratch_B = READ_VREG(AV_SCRATCH_B); + hw->reg_scratch_C = READ_VREG(AV_SCRATCH_C); + hw->reg_scratch_D = READ_VREG(AV_SCRATCH_D); + hw->reg_scratch_E = READ_VREG(AV_SCRATCH_E); + hw->reg_scratch_F = READ_VREG(AV_SCRATCH_F); + hw->reg_scratch_G = READ_VREG(AV_SCRATCH_G); + hw->reg_scratch_H = READ_VREG(AV_SCRATCH_H); + hw->reg_scratch_I = READ_VREG(AV_SCRATCH_I); + + hw->reg_mb_width = READ_VREG(MB_WIDTH); + hw->reg_viff_bit_cnt = READ_VREG(VIFF_BIT_CNT); + + hw->reg_canvas_addr = READ_VREG(REC_CANVAS_ADDR); + hw->reg_dbkr_canvas_addr = READ_VREG(DBKR_CANVAS_ADDR); + hw->reg_dbkw_canvas_addr = READ_VREG(DBKW_CANVAS_ADDR); + hw->reg_anc2_canvas_addr = READ_VREG(ANC2_CANVAS_ADDR); + hw->reg_anc0_canvas_addr = READ_VREG(ANC0_CANVAS_ADDR); + hw->reg_anc1_canvas_addr = READ_VREG(ANC1_CANVAS_ADDR); + hw->reg_anc3_canvas_addr = READ_VREG(ANC3_CANVAS_ADDR); + hw->reg_anc4_canvas_addr = READ_VREG(ANC4_CANVAS_ADDR); + hw->reg_anc5_canvas_addr = READ_VREG(ANC5_CANVAS_ADDR); + + hw->slice_ver_pos_pic_type = READ_VREG(SLICE_VER_POS_PIC_TYPE); + + hw->vc1_control_reg = READ_VREG(VC1_CONTROL_REG); + hw->avs_co_mb_wr_addr = READ_VREG(AVS_CO_MB_WR_ADDR); + hw->slice_start_byte_01 = READ_VREG(SLICE_START_BYTE_01); + hw->slice_start_byte_23 = READ_VREG(SLICE_START_BYTE_23); + hw->vcop_ctrl_reg = READ_VREG(VCOP_CTRL_REG); + hw->iqidct_control = READ_VREG(IQIDCT_CONTROL); + hw->rv_ai_mb_count = READ_VREG(RV_AI_MB_COUNT); + hw->slice_qp = READ_VREG(SLICE_QP); + + hw->dc_scaler = READ_VREG(DC_SCALER); + hw->avsp_iq_wq_param_01 = READ_VREG(AVSP_IQ_WQ_PARAM_01); + hw->avsp_iq_wq_param_23 = READ_VREG(AVSP_IQ_WQ_PARAM_23); + hw->avsp_iq_wq_param_45 = READ_VREG(AVSP_IQ_WQ_PARAM_45); + hw->avs_co_mb_rd_addr = READ_VREG(AVS_CO_MB_RD_ADDR); + hw->dblk_mb_wid_height = READ_VREG(DBLK_MB_WID_HEIGHT); + hw->mc_pic_w_h = READ_VREG(MC_PIC_W_H); + hw->avs_co_mb_rw_ctl = READ_VREG(AVS_CO_MB_RW_CTL); + + hw->vld_decode_control = READ_VREG(VLD_DECODE_CONTROL); +} + +static void vavs_restore_regs(struct vdec_avs_hw_s *hw) +{ + debug_print(hw, PRINT_FLAG_DECODING, + "%s scratch_8 (AVS_BUFFERIN) 0x%x, decode_pic_count = %d\n", + __func__, hw->reg_scratch_8, hw->decode_pic_count); + + WRITE_VREG(AV_SCRATCH_0, hw->reg_scratch_0); + WRITE_VREG(AV_SCRATCH_1, hw->reg_scratch_1); + WRITE_VREG(AV_SCRATCH_2, hw->reg_scratch_2); + WRITE_VREG(AV_SCRATCH_3, hw->reg_scratch_3); + WRITE_VREG(AV_SCRATCH_4, hw->reg_scratch_4); + WRITE_VREG(AV_SCRATCH_5, hw->reg_scratch_5); + WRITE_VREG(AV_SCRATCH_6, hw->reg_scratch_6); + WRITE_VREG(AV_SCRATCH_7, hw->reg_scratch_7); + WRITE_VREG(AV_SCRATCH_8, hw->reg_scratch_8); + WRITE_VREG(AV_SCRATCH_9, hw->reg_scratch_9); + WRITE_VREG(AV_SCRATCH_A, hw->reg_scratch_A); + WRITE_VREG(AV_SCRATCH_B, hw->reg_scratch_B); + WRITE_VREG(AV_SCRATCH_C, hw->reg_scratch_C); + WRITE_VREG(AV_SCRATCH_D, hw->reg_scratch_D); + WRITE_VREG(AV_SCRATCH_E, hw->reg_scratch_E); + WRITE_VREG(AV_SCRATCH_F, hw->reg_scratch_F); + WRITE_VREG(AV_SCRATCH_G, hw->reg_scratch_G); + WRITE_VREG(AV_SCRATCH_H, hw->reg_scratch_H); + WRITE_VREG(AV_SCRATCH_I, hw->reg_scratch_I); + + WRITE_VREG(MB_WIDTH, hw->reg_mb_width); + WRITE_VREG(VIFF_BIT_CNT, hw->reg_viff_bit_cnt); + + WRITE_VREG(REC_CANVAS_ADDR, hw->reg_canvas_addr); + WRITE_VREG(DBKR_CANVAS_ADDR, hw->reg_dbkr_canvas_addr); + WRITE_VREG(DBKW_CANVAS_ADDR, hw->reg_dbkw_canvas_addr); + WRITE_VREG(ANC2_CANVAS_ADDR, hw->reg_anc2_canvas_addr); + WRITE_VREG(ANC0_CANVAS_ADDR, hw->reg_anc0_canvas_addr); + WRITE_VREG(ANC1_CANVAS_ADDR, hw->reg_anc1_canvas_addr); + WRITE_VREG(ANC3_CANVAS_ADDR, hw->reg_anc3_canvas_addr); + WRITE_VREG(ANC4_CANVAS_ADDR, hw->reg_anc4_canvas_addr); + WRITE_VREG(ANC5_CANVAS_ADDR, hw->reg_anc5_canvas_addr); + + WRITE_VREG(SLICE_VER_POS_PIC_TYPE, hw->slice_ver_pos_pic_type); + + WRITE_VREG(VC1_CONTROL_REG, hw->vc1_control_reg); + WRITE_VREG(AVS_CO_MB_WR_ADDR, hw->avs_co_mb_wr_addr); + WRITE_VREG(SLICE_START_BYTE_01, hw->slice_start_byte_01); + WRITE_VREG(SLICE_START_BYTE_23, hw->slice_start_byte_23); + WRITE_VREG(VCOP_CTRL_REG, hw->vcop_ctrl_reg); + WRITE_VREG(IQIDCT_CONTROL, hw->iqidct_control); + WRITE_VREG(RV_AI_MB_COUNT, hw->rv_ai_mb_count); + WRITE_VREG(SLICE_QP, hw->slice_qp); + + WRITE_VREG(DC_SCALER, hw->dc_scaler); + WRITE_VREG(AVSP_IQ_WQ_PARAM_01, hw->avsp_iq_wq_param_01); + WRITE_VREG(AVSP_IQ_WQ_PARAM_23, hw->avsp_iq_wq_param_23); + WRITE_VREG(AVSP_IQ_WQ_PARAM_45, hw->avsp_iq_wq_param_45); + WRITE_VREG(AVS_CO_MB_RD_ADDR, hw->avs_co_mb_rd_addr); + WRITE_VREG(DBLK_MB_WID_HEIGHT, hw->dblk_mb_wid_height); + WRITE_VREG(MC_PIC_W_H, hw->mc_pic_w_h); + WRITE_VREG(AVS_CO_MB_RW_CTL, hw->avs_co_mb_rw_ctl); + + WRITE_VREG(VLD_DECODE_CONTROL, hw->vld_decode_control); + +} + +static int vavs_prot_init(struct vdec_avs_hw_s *hw) +{ + int r = 0; +#if DEBUG_MULTI_FLAG > 0 + if (hw->decode_pic_count == 0) { +#endif +#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) | (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 9) | (1 << 8)); + WRITE_VREG(DOS_SW_RESET0, 0); + +#else + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + READ_RESET_REG(RESET0_REGISTER); + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + + WRITE_RESET_REG(RESET2_REGISTER, RESET_PIC_DC | RESET_DBLK); +#endif +#if DEBUG_MULTI_FLAG > 0 + } +#endif + /***************** reset vld **********************************/ + WRITE_VREG(POWER_CTL_VLD, 0x10); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 2, MEM_FIFO_CNT_BIT, 2); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 8, MEM_LEVEL_CNT_BIT, 6); + /*************************************************************/ + if (hw->m_ins_flag) { + int i; + if (hw->decode_pic_count == 0) { + r = vavs_canvas_init(hw); +#ifndef USE_DYNAMIC_BUF_NUM + for (i = 0; i < 4; i++) { + WRITE_VREG(AV_SCRATCH_0 + i, + hw->canvas_spec[i] + ); + } +#else + for (i = 0; i < hw->vf_buf_num_used; i++) + WRITE_VREG(buf_spec_reg[i], 0); + for (i = 0; i < hw->vf_buf_num_used; i += 2) { + WRITE_VREG(buf_spec_reg[i >> 1], + (hw->canvas_spec[i] & 0xffff) | + ((hw->canvas_spec[i + 1] & 0xffff) + << 16) + ); + debug_print(hw, PRINT_FLAG_DECODING, + "%s WRITE_VREG(0x%x, 0x%x)\n", + __func__, buf_spec_reg[i >> 1], READ_VREG(buf_spec_reg[i >> 1])); + } +#endif + } else + vavs_restore_regs(hw); + + for (i = 0; i < hw->vf_buf_num_used; i++) { + config_cav_lut_ex(canvas_y(hw->canvas_spec[i]), + hw->canvas_config[i][0].phy_addr, + hw->canvas_config[i][0].width, + hw->canvas_config[i][0].height, + CANVAS_ADDR_NOWRAP, + hw->canvas_config[i][0].block_mode, + 0, VDEC_1); + + config_cav_lut_ex(canvas_u(hw->canvas_spec[i]), + hw->canvas_config[i][1].phy_addr, + hw->canvas_config[i][1].width, + hw->canvas_config[i][1].height, + CANVAS_ADDR_NOWRAP, + hw->canvas_config[i][1].block_mode, + 0, VDEC_1); + } + } else { + r = vavs_canvas_init(hw); +#ifdef NV21 + if (firmware_sel == 0) { + /* fixed canvas index */ + WRITE_VREG(AV_SCRATCH_0, canvas_base); + WRITE_VREG(AV_SCRATCH_1, hw->vf_buf_num_used); + } else { + int ii; +#ifndef USE_DYNAMIC_BUF_NUM + for (ii = 0; ii < 4; ii++) { + WRITE_VREG(AV_SCRATCH_0 + ii, + (canvas_base + canvas_num * ii) | + ((canvas_base + canvas_num * ii + 1) + << 8) | + ((canvas_base + canvas_num * ii + 1) + << 16) + ); + } +#else + for (ii = 0; ii < hw->vf_buf_num_used; ii += 2) { + WRITE_VREG(buf_spec_reg[ii >> 1], + (canvas_base + canvas_num * ii) | + ((canvas_base + canvas_num * ii + 1) + << 8) | + ((canvas_base + canvas_num * ii + 2) + << 16) | + ((canvas_base + canvas_num * ii + 3) + << 24) + ); + } +#endif + /* + *WRITE_VREG(AV_SCRATCH_0, 0x010100); + *WRITE_VREG(AV_SCRATCH_1, 0x040403); + *WRITE_VREG(AV_SCRATCH_2, 0x070706); + *WRITE_VREG(AV_SCRATCH_3, 0x0a0a09); + */ + } +#else + /* index v << 16 | u << 8 | y */ + WRITE_VREG(AV_SCRATCH_0, 0x020100); + WRITE_VREG(AV_SCRATCH_1, 0x050403); + WRITE_VREG(AV_SCRATCH_2, 0x080706); + WRITE_VREG(AV_SCRATCH_3, 0x0b0a09); +#endif + } + /* notify ucode the buffer offset */ + if (hw->decode_pic_count == 0) + WRITE_VREG(AV_SCRATCH_F, hw->buf_offset); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + + if (hw->decode_pic_count == 0) { +#ifndef USE_DYNAMIC_BUF_NUM + WRITE_VREG(AVS_SOS_COUNT, 0); +#endif + WRITE_VREG(AVS_BUFFERIN, 0); + WRITE_VREG(AVS_BUFFEROUT, 0); + } + if (error_recovery_mode) + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 0); + else + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 1); + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); +#ifndef USE_DYNAMIC_BUF_NUM /* def DEBUG_UCODE */ + if (hw->decode_pic_count == 0) + WRITE_VREG(AV_SCRATCH_D, 0); +#endif + +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + /* V4L2_PIX_FMT_NV21 V4L2_PIX_FMT_NV21M */ + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + +#ifdef PIC_DC_NEED_CLEAR + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 31); +#endif + if (hw->m_ins_flag && start_decoding_delay > 0) + msleep(start_decoding_delay); + + //pr_info("+++++++++++++++++++++++++++++++\n"); + //pr_info("+++++++++++++++++++++++++++++++\n"); + //pr_info("+++++++++++++++++++++++++++++++\n"); +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) { + WRITE_VREG(LONG_CABAC_DES_ADDR, es_write_addr_phy); + WRITE_VREG(LONG_CABAC_REQ, 0); + WRITE_VREG(LONG_CABAC_PIC_SIZE, 0); + WRITE_VREG(LONG_CABAC_SRC_ADDR, 0); + } +#endif + +#ifdef ENABLE_USER_DATA + if (hw->decode_pic_count == 0) { + WRITE_VREG(AV_SCRATCH_N, (u32)(hw->user_data_buffer_phys - hw->buf_offset)); + pr_debug("AV_SCRATCH_N = 0x%x\n", READ_VREG(AV_SCRATCH_N)); + } else + WRITE_VREG(AV_SCRATCH_N, 0); +#endif + if (hw->m_ins_flag) { + if (vdec_frame_based(hw_to_vdec(hw))) + WRITE_VREG(DECODE_MODE, DECODE_MODE_MULTI_FRAMEBASE); + else { + if (hw->decode_status_skip_pic_done_flag) { + WRITE_VREG(DECODE_CFG, hw->decode_decode_cont_start_code); + WRITE_VREG(DECODE_MODE, DECODE_MODE_MULTI_STREAMBASE_CONT); + } else + WRITE_VREG(DECODE_MODE, DECODE_MODE_MULTI_STREAMBASE); + } + WRITE_VREG(DECODE_LMEM_BUF_ADR, (u32)hw->lmem_phy_addr); + } else + WRITE_VREG(DECODE_MODE, DECODE_MODE_SINGLE); + + if (ins_udebug_flag[DECODE_ID(hw)] && + (ins_udebug_flag[DECODE_ID(hw)] >> 16) == hw->decode_pic_count) { + WRITE_VREG(DECODE_STOP_POS, + ins_udebug_flag[DECODE_ID(hw)] & 0xffff); + } + else + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + hw->old_udebug_flag = udebug_flag; + + return r; +} + + +#ifdef AVSP_LONG_CABAC +static unsigned char es_write_addr[MAX_CODED_FRAME_SIZE] __aligned(64); +#endif +static void vavs_local_init(struct vdec_avs_hw_s *hw) +{ + int i; + + hw->vf_buf_num_used = vf_buf_num; + + hw->vavs_ratio = hw->vavs_amstream_dec_info.ratio; + + hw->avi_flag = (unsigned long) hw->vavs_amstream_dec_info.param; + + hw->frame_width = hw->frame_height = hw->frame_dur = hw->frame_prog = 0; + + hw->throw_pb_flag = 1; + + hw->total_frame = 0; + hw->saved_resolution = 0; + hw->next_pts = 0; + +#ifdef DEBUG_PTS + hw->pts_hit = hw->pts_missed = hw->pts_i_hit = hw->pts_i_missed = 0; +#endif + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->recycle_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hw->vfpool[i].vf; + + hw->vfpool[i].vf.index = hw->vf_buf_num_used; + hw->vfpool[i].vf.bufWidth = 1920; + hw->vfpool[i].detached = 0; + kfifo_put(&hw->newframe_q, vf); + } + for (i = 0; i < hw->vf_buf_num_used; i++) + hw->vfbuf_use[i] = 0; + + /*cur_vfpool = vfpool;*/ + + if (hw->recover_flag == 1) + return; + + if (hw->mm_blk_handle) { + pr_info("decoder_bmmu_box_free\n"); + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + + hw->mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER); + if (hw->mm_blk_handle == NULL) + pr_info("Error, decoder_bmmu_box_alloc_box fail\n"); + +} + +static int vavs_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)op_arg; + + + spin_lock_irqsave(&lock, flags); + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hw->newframe_q); + states->buf_avail_num = kfifo_len(&hw->display_q); + states->buf_recycle_num = kfifo_len(&hw->recycle_q); + if (step == 2) + states->buf_avail_num = 0; + spin_unlock_irqrestore(&lock, flags); + return 0; +} + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER +static void vavs_ppmgr_reset(void) +{ + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_RESET, NULL); + + vavs_local_init(ghw); + + pr_info("vavs: vf_ppmgr_reset\n"); +} +#endif + +static void vavs_local_reset(struct vdec_avs_hw_s *hw) +{ + mutex_lock(&vavs_mutex); + hw->recover_flag = 1; + pr_info("error, local reset\n"); + amvdec_stop(); + msleep(100); + avs_vf_notify_receiver(hw, PROVIDER_NAME, VFRAME_EVENT_PROVIDER_RESET, NULL); + vavs_local_init(hw); + vavs_recover(hw); + +#ifdef ENABLE_USER_DATA + reset_userdata_fifo(1); +#endif + + amvdec_start(); + hw->recover_flag = 0; +#if 0 + error_watchdog_count = 0; + + pr_info("pc %x stream buf wp %x rp %x level %x\n", + READ_VREG(MPC_E), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); +#endif + + + + mutex_unlock(&vavs_mutex); +} + +#if 0 +static struct work_struct fatal_error_wd_work; +static struct work_struct notify_work; +static atomic_t error_handler_run = ATOMIC_INIT(0); +#endif +static void vavs_fatal_error_handler(struct work_struct *work) +{ + struct vdec_avs_hw_s *hw = + container_of(work, struct vdec_avs_hw_s, fatal_error_wd_work); + if (debug & AVS_DEBUG_OLD_ERROR_HANDLE) { + mutex_lock(&vavs_mutex); + pr_info("vavs fatal error reset !\n"); + amvdec_stop(); +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vavs_ppmgr_reset(); +#else + vf_light_unreg_provider(&vavs_vf_prov); + vavs_local_init(hw); + vf_reg_provider(&vavs_vf_prov); +#endif + vavs_recover(hw); + amvdec_start(); + mutex_unlock(&vavs_mutex); + } else { + pr_info("avs fatal_error_handler\n"); + vavs_local_reset(hw); + } + atomic_set(&hw->error_handler_run, 0); +} + +static void vavs_notify_work(struct work_struct *work) +{ + struct vdec_avs_hw_s *hw = + container_of(work, struct vdec_avs_hw_s, notify_work); + if (hw->fr_hint_status == VDEC_NEED_HINT) { + avs_vf_notify_receiver(hw, PROVIDER_NAME , + VFRAME_EVENT_PROVIDER_FR_HINT , + (void *)((unsigned long)hw->frame_dur)); + hw->fr_hint_status = VDEC_HINTED; + } + return; +} + +static void avs_set_clk(struct work_struct *work) +{ + struct vdec_avs_hw_s *hw = + container_of(work, struct vdec_avs_hw_s, set_clk_work); + if (hw->frame_dur > 0 && hw->saved_resolution != + hw->frame_width * hw->frame_height * (96000 / hw->frame_dur)) { + int fps = 96000 / hw->frame_dur; + + hw->saved_resolution = hw->frame_width * hw->frame_height * fps; + if (firmware_sel == 0 && + (debug & AVS_DEBUG_USE_FULL_SPEED)) { + vdec_source_changed(VFORMAT_AVS, + 4096, 2048, 60); + } else { + vdec_source_changed(VFORMAT_AVS, + hw->frame_width, hw->frame_height, fps); + } + + } +} + +#ifdef DEBUG_MULTI_WITH_AUTOMODE +int delay_count = 0; +#endif +static void vavs_put_timer_func(struct timer_list *arg) +{ + struct vdec_avs_hw_s *hw = container_of(arg, + struct vdec_avs_hw_s, recycle_timer); + struct timer_list *timer = &hw->recycle_timer; + +#ifndef HANDLE_AVS_IRQ + vavs_isr(); +#endif +#ifdef DEBUG_MULTI_WITH_AUTOMODE + if (delay_count > 0) { + if (delay_count == 1) + amvdec_start(); + delay_count--; + } +#endif + if (READ_VREG(AVS_SOS_COUNT)) { + if (!error_recovery_mode) { +#if 0 + if (debug & AVS_DEBUG_OLD_ERROR_HANDLE) { + mutex_lock(&vavs_mutex); + pr_info("vavs fatal error reset !\n"); + amvdec_stop(); +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vavs_ppmgr_reset(); +#else + vf_light_unreg_provider(&vavs_vf_prov); + vavs_local_init(); + vf_reg_provider(&vavs_vf_prov); +#endif + vavs_recover(); + amvdec_start(); + mutex_unlock(&vavs_mutex); + } else { + vavs_local_reset(); + } +#else + if (!atomic_read(&hw->error_handler_run)) { + atomic_set(&hw->error_handler_run, 1); + pr_info("AVS_SOS_COUNT = %d\n", + READ_VREG(AVS_SOS_COUNT)); + pr_info("WP = 0x%x, RP = 0x%x, LEVEL = 0x%x, AVAIL = 0x%x, CUR_PTR = 0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL), + READ_VREG(VLD_MEM_VIFIFO_CURR_PTR)); + schedule_work(&hw->fatal_error_wd_work); + } +#endif + } + } +#if 0 + if (long_cabac_busy == 0 && + error_watchdog_threshold > 0 && + kfifo_len(&hw->display_q) == 0 && + READ_VREG(VLD_MEM_VIFIFO_LEVEL) > + error_watchdog_buf_threshold) { + pr_info("newq %d dispq %d recyq %d\r\n", + kfifo_len(&hw->newframe_q), + kfifo_len(&hw->display_q), + kfifo_len(&hw->recycle_q)); + pr_info("pc %x stream buf wp %x rp %x level %x\n", + READ_VREG(MPC_E), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + error_watchdog_count++; + if (error_watchdog_count >= error_watchdog_threshold) + vavs_local_reset(); + } else + error_watchdog_count = 0; +#endif + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + if ((hw->ucode_pause_pos != 0) && + (hw->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != hw->ucode_pause_pos) { + hw->ucode_pause_pos = 0; + WRITE_VREG(DEBUG_REG1, 0); + } + + if (!kfifo_is_empty(&hw->recycle_q) && (READ_VREG(AVS_BUFFERIN) == 0)) { + struct vframe_s *vf; + + if (kfifo_get(&hw->recycle_q, &vf)) { + if ((vf->index < hw->vf_buf_num_used) && + (--hw->vfbuf_use[vf->index] == 0)) { + debug_print(hw, PRINT_FLAG_DECODING, + "%s WRITE_VREG(AVS_BUFFERIN, 0x%x) for vf index of %d\n", + __func__, + ~(1 << vf->index), vf->index); + WRITE_VREG(AVS_BUFFERIN, ~(1 << vf->index)); + vf->index = hw->vf_buf_num_used; + } + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + } + + } + + schedule_work(&hw->set_clk_work); + + timer->expires = jiffies + PUT_INTERVAL; + + add_timer(timer); +} + +#ifdef AVSP_LONG_CABAC + +static void long_cabac_do_work(struct work_struct *work) +{ + int status = 0; + struct vdec_avs_hw_s *hw = gw; +#ifdef PERFORMANCE_DEBUG + pr_info("enter %s buf level (new %d, display %d, recycle %d)\r\n", + __func__, + kfifo_len(&hw->newframe_q), + kfifo_len(&hw->display_q), + kfifo_len(&hw->recycle_q) + ); +#endif + mutex_lock(&vavs_mutex); + long_cabac_busy = 1; + while (READ_VREG(LONG_CABAC_REQ)) { + if (process_long_cabac() < 0) { + status = -1; + break; + } + } + long_cabac_busy = 0; + mutex_unlock(&vavs_mutex); +#ifdef PERFORMANCE_DEBUG + pr_info("exit %s buf level (new %d, display %d, recycle %d)\r\n", + __func__, + kfifo_len(&hw->newframe_q), + kfifo_len(&hw->display_q), + kfifo_len(&hw->recycle_q) + ); +#endif + if (status < 0) { + pr_info("transcoding error, local reset\r\n"); + vavs_local_reset(hw); + } + +} +#endif + +#ifdef AVSP_LONG_CABAC +static void init_avsp_long_cabac_buf(void) +{ +#if 0 + es_write_addr_phy = (unsigned long)codec_mm_alloc_for_dma( + "vavs", + PAGE_ALIGN(MAX_CODED_FRAME_SIZE)/PAGE_SIZE, + 0, CODEC_MM_FLAGS_DMA_CPU); + es_write_addr_virt = codec_mm_phys_to_virt(es_write_addr_phy); + +#elif 0 + es_write_addr_virt = + (void *)dma_alloc_coherent(amports_get_dma_device(), + MAX_CODED_FRAME_SIZE, &es_write_addr_phy, + GFP_KERNEL); +#else + /*es_write_addr_virt = kmalloc(MAX_CODED_FRAME_SIZE, GFP_KERNEL); + * es_write_addr_virt = (void *)__get_free_pages(GFP_KERNEL, + * get_order(MAX_CODED_FRAME_SIZE)); + */ + es_write_addr_virt = &es_write_addr[0]; + if (es_write_addr_virt == NULL) { + pr_err("%s: failed to alloc es_write_addr_virt buffer\n", + __func__); + return; + } + + es_write_addr_phy = dma_map_single(amports_get_dma_device(), + es_write_addr_virt, + MAX_CODED_FRAME_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(amports_get_dma_device(), + es_write_addr_phy)) { + pr_err("%s: failed to map es_write_addr_virt buffer\n", + __func__); + /*kfree(es_write_addr_virt);*/ + es_write_addr_virt = NULL; + return; + } +#endif + + +#ifdef BITSTREAM_READ_TMP_NO_CACHE + bitstream_read_tmp = + (void *)dma_alloc_coherent(amports_get_dma_device(), + SVA_STREAM_BUF_SIZE, &bitstream_read_tmp_phy, + GFP_KERNEL); + +#else + + bitstream_read_tmp = kmalloc(SVA_STREAM_BUF_SIZE, GFP_KERNEL); + /*bitstream_read_tmp = (void *)__get_free_pages(GFP_KERNEL, + *get_order(MAX_CODED_FRAME_SIZE)); + */ + if (bitstream_read_tmp == NULL) { + pr_err("%s: failed to alloc bitstream_read_tmp buffer\n", + __func__); + return; + } + + bitstream_read_tmp_phy = dma_map_single(amports_get_dma_device(), + bitstream_read_tmp, + SVA_STREAM_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(amports_get_dma_device(), + bitstream_read_tmp_phy)) { + pr_err("%s: failed to map rpm buffer\n", __func__); + kfree(bitstream_read_tmp); + bitstream_read_tmp = NULL; + return; + } +#endif +} +#endif + + +static s32 vavs_init(struct vdec_avs_hw_s *hw) +{ + int ret, size = -1; + struct firmware_s *fw; + u32 fw_size = 0x1000 * 16; + /*char *buf = vmalloc(0x1000 * 16); + + if (IS_ERR_OR_NULL(buf)) + return -ENOMEM; + */ + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + pr_info("vavs_init\n"); + //init_timer(&hw->recycle_timer); + + //hw->stat |= STAT_TIMER_INIT; + + //amvdec_enable(); + + //vdec_enable_DMC(NULL); + + vavs_local_init(hw); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM) + size = get_firmware_data(VIDEO_DEC_AVS_MULTI, fw->data); + else { + if (firmware_sel == 1) + size = get_firmware_data(VIDEO_DEC_AVS_NOCABAC, fw->data); +#ifdef AVSP_LONG_CABAC + else { + init_avsp_long_cabac_buf(); + size = get_firmware_data(VIDEO_DEC_AVS_MULTI, fw->data); + } +#endif + } + + if (size < 0) { + amvdec_disable(); + pr_err("get firmware fail."); + vfree(fw); + return -1; + } + + fw->len = size; + hw->fw = fw; + + if (hw->m_ins_flag) { + timer_setup(&hw->check_timer, check_timer_func, 0); + //init_timer(&hw->check_timer); + //hw->check_timer.data = (ulong) hw; + //hw->check_timer.function = check_timer_func; + hw->check_timer.expires = jiffies + CHECK_INTERVAL; + + + //add_timer(&hw->check_timer); + hw->stat |= STAT_TIMER_ARM; + + INIT_WORK(&hw->work, vavs_work); + + hw->fw = fw; + return 0; + } + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM) + ret = amvdec_loadmc_ex(VFORMAT_AVS, NULL, fw->data); + else if (firmware_sel == 1) + ret = amvdec_loadmc_ex(VFORMAT_AVS, "avs_no_cabac", fw->data); + else + ret = amvdec_loadmc_ex(VFORMAT_AVS, NULL, fw->data); + + if (ret < 0) { + amvdec_disable(); + /*vfree(buf);*/ + pr_err("AVS: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + /*vfree(buf);*/ + + hw->stat |= STAT_MC_LOAD; + + + /* enable AMRISC side protocol */ + ret = vavs_prot_init(hw); + if (ret < 0) + return ret; + +#ifdef HANDLE_AVS_IRQ + if (vdec_request_irq(VDEC_IRQ_1, vavs_isr, + "vavs-irq", (void *)hw)) { + amvdec_disable(); + pr_info("vavs irq register error.\n"); + return -ENOENT; + } +#endif + + hw->stat |= STAT_ISR_REG; + +#ifdef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_provider_init(&vavs_vf_prov, PROVIDER_NAME, &vavs_vf_provider, hw); + vf_reg_provider(&vavs_vf_prov); + avs_vf_notify_receiver(hw, PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); +#else + vf_provider_init(&vavs_vf_prov, PROVIDER_NAME, &vavs_vf_provider, hw); + vf_reg_provider(&vavs_vf_prov); +#endif + + if (hw->vavs_amstream_dec_info.rate != 0) { + if (!hw->is_reset) + avs_vf_notify_receiver(hw, PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long) + hw->vavs_amstream_dec_info.rate)); + hw->fr_hint_status = VDEC_HINTED; + } else + hw->fr_hint_status = VDEC_NEED_HINT; + + hw->stat |= STAT_VF_HOOK; + + timer_setup(&hw->recycle_timer, vavs_put_timer_func, 0); + //hw->recycle_timer.data = (ulong)(hw); + //hw->recycle_timer.function = vavs_put_timer_func; + hw->recycle_timer.expires = jiffies + PUT_INTERVAL; + + add_timer(&hw->recycle_timer); + + hw->stat |= STAT_TIMER_ARM; + +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) + INIT_WORK(&long_cabac_wd_work, long_cabac_do_work); +#endif + vdec_source_changed(VFORMAT_AVS, + 1920, 1080, 30); +#ifdef DEBUG_MULTI_WITH_AUTOMODE + if (start_decoding_delay == 0) + amvdec_start(); + else + delay_count = start_decoding_delay/10; +#else + amvdec_start(); +#endif + hw->stat |= STAT_VDEC_RUN; + return 0; +} + +static int amvdec_avs_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_avs_hw_s *hw = NULL; + + if (pdata == NULL) { + pr_info("amvdec_avs memory resource undefined.\n"); + return -EFAULT; + } + + hw = (struct vdec_avs_hw_s *)vzalloc(sizeof(struct vdec_avs_hw_s)); + if (hw == NULL) { + pr_info("\nammvdec_avs decoder driver alloc failed\n"); + return -ENOMEM; + } + pdata->private = hw; + ghw = hw; + atomic_set(&hw->error_handler_run, 0); + hw->m_ins_flag = 0; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM || disable_longcabac_trans) + firmware_sel = 1; + + if (firmware_sel == 1) { +#ifndef USE_DYNAMIC_BUF_NUM + vf_buf_num = 4; +#endif + canvas_base = 0; + canvas_num = 3; + } else { + + canvas_base = 128; + canvas_num = 2; /*NV21*/ + } + + + if (pdata->sys_info) + hw->vavs_amstream_dec_info = *pdata->sys_info; + + pr_info("%s (%d,%d) %d\n", __func__, hw->vavs_amstream_dec_info.width, + hw->vavs_amstream_dec_info.height, hw->vavs_amstream_dec_info.rate); + + pdata->dec_status = vavs_dec_status; + pdata->set_isreset = vavs_set_isreset; + hw->is_reset = 0; + + pdata->user_data_read = NULL; + pdata->reset_userdata_fifo = NULL; + + vavs_vdec_info_init(hw); + +#ifdef ENABLE_USER_DATA + if (NULL == hw->user_data_buffer) { + hw->user_data_buffer = + dma_alloc_coherent(amports_get_dma_device(), + USER_DATA_SIZE, + &hw->user_data_buffer_phys, GFP_KERNEL); + if (!hw->user_data_buffer) { + pr_info("%s: Can not allocate hw->user_data_buffer\n", + __func__); + return -ENOMEM; + } + pr_debug("hw->user_data_buffer = 0x%p, hw->user_data_buffer_phys = 0x%x\n", + hw->user_data_buffer, (u32)hw->user_data_buffer_phys); + } +#endif + INIT_WORK(&hw->set_clk_work, avs_set_clk); + if (vavs_init(hw) < 0) { + pr_info("amvdec_avs init failed.\n"); + kfree(hw->gvs); + hw->gvs = NULL; + pdata->dec_status = NULL; + if (hw->fw) + vfree(hw->fw); + hw->fw = NULL; + return -ENODEV; + } + /*vdec = pdata;*/ + + INIT_WORK(&hw->fatal_error_wd_work, vavs_fatal_error_handler); + atomic_set(&hw->error_handler_run, 0); +#ifdef ENABLE_USER_DATA + INIT_WORK(&hw->userdata_push_work, userdata_push_do_work); +#endif + INIT_WORK(&hw->notify_work, vavs_notify_work); + + return 0; +} + +static int amvdec_avs_remove(struct platform_device *pdev) +{ + struct vdec_avs_hw_s *hw = ghw; + + cancel_work_sync(&hw->fatal_error_wd_work); + atomic_set(&hw->error_handler_run, 0); +#ifdef ENABLE_USER_DATA + cancel_work_sync(&hw->userdata_push_work); +#endif + cancel_work_sync(&hw->notify_work); + cancel_work_sync(&hw->set_clk_work); + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)vavs_dec_id); + hw->stat &= ~STAT_ISR_REG; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->recycle_timer); + hw->stat &= ~STAT_TIMER_ARM; + } +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) { + mutex_lock(&vavs_mutex); + cancel_work_sync(&long_cabac_wd_work); + mutex_unlock(&vavs_mutex); + + if (es_write_addr_virt) { +#if 0 + codec_mm_free_for_dma("vavs", es_write_addr_phy); +#else + dma_unmap_single(amports_get_dma_device(), + es_write_addr_phy, + MAX_CODED_FRAME_SIZE, DMA_FROM_DEVICE); + /*kfree(es_write_addr_virt);*/ + es_write_addr_virt = NULL; +#endif + } + +#ifdef BITSTREAM_READ_TMP_NO_CACHE + if (bitstream_read_tmp) { + dma_free_coherent(amports_get_dma_device(), + SVA_STREAM_BUF_SIZE, bitstream_read_tmp, + bitstream_read_tmp_phy); + bitstream_read_tmp = NULL; + } +#else + if (bitstream_read_tmp) { + dma_unmap_single(amports_get_dma_device(), + bitstream_read_tmp_phy, + SVA_STREAM_BUF_SIZE, DMA_FROM_DEVICE); + kfree(bitstream_read_tmp); + bitstream_read_tmp = NULL; + } +#endif + } +#endif + if (hw->stat & STAT_VF_HOOK) { + if (hw->fr_hint_status == VDEC_HINTED && !hw->is_reset) + avs_vf_notify_receiver(hw, PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_END_HINT, NULL); + hw->fr_hint_status = VDEC_NO_NEED_HINT; + vf_unreg_provider(&vavs_vf_prov); + hw->stat &= ~STAT_VF_HOOK; + } + +#ifdef ENABLE_USER_DATA + if (hw->user_data_buffer != NULL) { + dma_free_coherent( + amports_get_dma_device(), + USER_DATA_SIZE, + hw->user_data_buffer, + hw->user_data_buffer_phys); + hw->user_data_buffer = NULL; + hw->user_data_buffer_phys = 0; + } +#endif + + if (hw->fw) { + vfree(hw->fw); + hw->fw = NULL; + } + + //amvdec_disable(); + //vdec_disable_DMC(NULL); + + hw->pic_type = 0; + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } +#ifdef DEBUG_PTS + pr_debug("pts hit %d, pts missed %d, i hit %d, missed %d\n", hw->pts_hit, + hw->pts_missed, hw->pts_i_hit, hw->pts_i_missed); + pr_debug("total frame %d, hw->avi_flag %d, rate %d\n", hw->total_frame, hw->avi_flag, + hw->vavs_amstream_dec_info.rate); +#endif + kfree(hw->gvs); + hw->gvs = NULL; + vfree(hw); + return 0; +} + +/****************************************/ +#if 0 +static struct platform_driver amvdec_avs_driver = { + .probe = amvdec_avs_probe, + .remove = amvdec_avs_remove, + .driver = { + .name = DRIVER_NAME, + } +}; +#endif + +static void recycle_frames(struct vdec_avs_hw_s *hw); + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + int ret = 1; + unsigned buf_busy_mask = (1 << hw->vf_buf_num_used) - 1; +#ifdef DEBUG_MULTI_FRAME_INS + if ((DECODE_ID(hw) == 0) && run_count[0] > run_count[1] && + run_count[1] < max_run_count[1]) + return 0; + + if ((DECODE_ID(hw) == 1) && run_count[1] >= run_count[0] && + run_count[0] < max_run_count[0]) + return 0; + + if (max_run_count[DECODE_ID(hw)] > 0 && + run_count[DECODE_ID(hw)] >= max_run_count[DECODE_ID(hw)]) + return 0; +#endif + if (vdec_stream_based(vdec) && (hw->init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + + if (level < pre_decode_buf_level) { + hw->not_run_ready++; + return 0; + } + } + + if (hw->reset_decode_flag == 0 && + hw->again_flag == 0 && + (hw->buf_status & buf_busy_mask) == buf_busy_mask) { + recycle_frames(hw); + if (hw->buf_recycle_status == 0) + ret = 0; + } + + if (again_threshold > 0 && + hw->pre_parser_wr_ptr != 0 && + hw->again_flag && + (!vdec_frame_based(vdec))) { + u32 parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_rp); + if (parser_wr_ptr >= hw->pre_parser_wr_ptr && + (parser_wr_ptr - hw->pre_parser_wr_ptr) < + again_threshold) { + int r = vdec_sync_input(vdec); + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "%s buf lelvel:%x\n", __func__, r); + ret = 0; + } + } + + if (ret) + hw->not_run_ready = 0; + else + hw->not_run_ready++; + + if (ret != 0) { + if (vdec->parallel_dec == 1) + return (unsigned long)(CORE_MASK_VDEC_1); + else + return (unsigned long)(CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + } else + return 0; +} + +static void vavs_work(struct work_struct *work) +{ + struct vdec_avs_hw_s *hw = + container_of(work, struct vdec_avs_hw_s, work); + struct vdec_s *vdec = hw_to_vdec(hw); + if (hw->dec_result != DEC_RESULT_AGAIN) + debug_print(hw, PRINT_FLAG_RUN_FLOW, + "ammvdec_avs: vavs_work,result=%d,status=%d\n", + hw->dec_result, hw_to_vdec(hw)->next_status); + hw->again_flag = 0; + if (hw->dec_result == DEC_RESULT_USERDATA) { + userdata_push_process(hw); + return; + } else if (hw->dec_result == DEC_RESULT_DONE) { + + if (!hw->ctx_valid) + hw->ctx_valid = 1; +#ifdef DEBUG_MULTI_FRAME_INS + msleep(delay); +#endif + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + } else if (hw->dec_result == DEC_RESULT_AGAIN + && (hw_to_vdec(hw)->next_status != + VDEC_STATUS_DISCONNECTED)) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + hw->again_flag = 1; + if (!vdec_has_more_input(hw_to_vdec(hw))) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + } else if (hw->dec_result == DEC_RESULT_GET_DATA + && (hw_to_vdec(hw)->next_status != + VDEC_STATUS_DISCONNECTED)) { + if (!vdec_has_more_input(hw_to_vdec(hw))) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + debug_print(hw, PRINT_FLAG_VLD_DETAIL, + "%s DEC_RESULT_GET_DATA %x %x %x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP)); + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + vdec_clean_input(hw_to_vdec(hw)); + return; + } else if (hw->dec_result == DEC_RESULT_FORCE_EXIT) { + debug_print(hw, PRINT_FLAG_ERROR, + "%s: force exit\n", __func__); + if (hw->stat & STAT_ISR_REG) { + amvdec_stop(); + /*disable mbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 0); + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + } else if (hw->dec_result == DEC_RESULT_EOS) { + debug_print(hw, PRINT_FLAG_DECODING, + "%s: end of stream\n", __func__); + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + hw->eos = 1; + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + vdec_clean_input(hw_to_vdec(hw)); + } + if (hw->stat & STAT_VDEC_RUN) { +#if DEBUG_MULTI_FLAG == 1 +#else + amvdec_stop(); +#endif + hw->stat &= ~STAT_VDEC_RUN; + } + /*wait_vmmpeg12_search_done(hw);*/ + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + if (hw->dec_result == DEC_RESULT_DONE) + hw->buf_recycle_status = 0; + debug_print(hw, PRINT_FLAG_RUN_FLOW, "work end %d\n", hw->dec_result); + if (vdec->parallel_dec == 1) + vdec_core_finish_run(hw_to_vdec(hw), CORE_MASK_VDEC_1); + else + vdec_core_finish_run(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + if (hw->vdec_cb) { + hw->vdec_cb(hw_to_vdec(hw), hw->vdec_cb_arg); + debug_print(hw, 0x80000, + "%s:\n", __func__); + } +} + + +static void reset_process_time(struct vdec_avs_hw_s *hw) +{ + if (!hw->m_ins_flag) + return; + if (hw->start_process_time) { + unsigned process_time = + 1000 * (jiffies - hw->start_process_time) / HZ; + hw->start_process_time = 0; + if (process_time > max_process_time[DECODE_ID(hw)]) + max_process_time[DECODE_ID(hw)] = process_time; + } +} +static void start_process_time(struct vdec_avs_hw_s *hw) +{ + hw->decode_timeout_count = 2; + hw->start_process_time = jiffies; +} + +static void handle_decoding_error(struct vdec_avs_hw_s *hw) +{ + int i; + unsigned long flags; + struct vframe_s *vf; + spin_lock_irqsave(&lock, flags); + for (i = 0; i < VF_POOL_SIZE; i++) { + vf = &hw->vfpool[i].vf; + if (vf->index < hw->vf_buf_num_used) { + hw->vfpool[i].detached = 1; + hw->vfbuf_use[vf->index] = 0; + } + } + if (error_handle_policy & 0x2) { + while (!kfifo_is_empty(&hw->display_q)) { + if (kfifo_get(&hw->display_q, &vf)) { + if (buf_of_vf(vf)->detached !=0) { + debug_print(hw, PRINT_FLAG_DECODING, + "%s recycle %d => newframe_q\n", + __func__, + vf->index); + vf->index = hw->vf_buf_num_used; + buf_of_vf(vf)->detached = 0; + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + } + } + + } + } + clear_pts_buf(hw); + hw->decode_pic_count = 0; + hw->reset_decode_flag = 1; + hw->pre_parser_wr_ptr = 0; + hw->buf_status = 0; + hw->throw_pb_flag = 1; + spin_unlock_irqrestore(&lock, flags); +} + +static void timeout_process(struct vdec_avs_hw_s *hw) +{ + struct vdec_s *vdec = hw_to_vdec(hw); + amvdec_stop(); + if (error_handle_policy & 0x1) { + handle_decoding_error(hw); + } else { + vavs_save_regs(hw); + + //if (hw->decode_pic_count == 0) + hw->decode_pic_count++; + if ((hw->decode_pic_count & 0xffff) == 0) { + /*make ucode do not handle it as first picture*/ + hw->decode_pic_count++; + } + } + hw->dec_result = DEC_RESULT_DONE; + + debug_print(hw, PRINT_FLAG_ERROR, + "%s decoder timeout, status=%d, level=%d, bit_cnt=0x%x\n", + __func__, vdec->status, READ_VREG(VLD_MEM_VIFIFO_LEVEL), READ_VREG(VIFF_BIT_CNT)); + reset_process_time(hw); + vdec_schedule_work(&hw->work); +} + + +static void recycle_frame_bufferin(struct vdec_avs_hw_s *hw) +{ + if (!kfifo_is_empty(&hw->recycle_q) && (READ_VREG(AVS_BUFFERIN) == 0)) { + struct vframe_s *vf; + + if (kfifo_get(&hw->recycle_q, &vf)) { + if (buf_of_vf(vf)->detached) { + debug_print(hw, 0, + "%s recycle detached vf, index=%d detched %d used %d\n", + __func__, vf->index, + buf_of_vf(vf)->detached, + hw->vfbuf_use[vf->index]); + } + if ((vf->index < hw->vf_buf_num_used) && + (buf_of_vf(vf)->detached == 0) && + (--hw->vfbuf_use[vf->index] == 0)) { + hw->buf_recycle_status |= (1 << vf->index); + WRITE_VREG(AVS_BUFFERIN, ~(1 << vf->index)); + debug_print(hw, PRINT_FLAG_DECODING, + "%s WRITE_VREG(AVS_BUFFERIN, 0x%x) for vf index of %d => buf_recycle_status 0x%x\n", + __func__, + READ_VREG(AVS_BUFFERIN), vf->index, + hw->buf_recycle_status); + } + vf->index = hw->vf_buf_num_used; + buf_of_vf(vf)->detached = 0; + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + } + + } + +} + +static void recycle_frames(struct vdec_avs_hw_s *hw) +{ + while (!kfifo_is_empty(&hw->recycle_q)) { + struct vframe_s *vf; + + if (kfifo_get(&hw->recycle_q, &vf)) { + if (buf_of_vf(vf)->detached) { + debug_print(hw, 0, + "%s recycle detached vf, index=%d detched %d used %d\n", + __func__, vf->index, + buf_of_vf(vf)->detached, + hw->vfbuf_use[vf->index]); + } + + + if ((vf->index < hw->vf_buf_num_used) && + (buf_of_vf(vf)->detached == 0) && + (--hw->vfbuf_use[vf->index] == 0)) { + hw->buf_recycle_status |= (1 << vf->index); + debug_print(hw, PRINT_FLAG_DECODING, + "%s for vf index of %d => buf_recycle_status 0x%x\n", + __func__, + vf->index, + hw->buf_recycle_status); + } + vf->index = hw->vf_buf_num_used; + buf_of_vf(vf)->detached = 0; + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + } + + } + +} + + +static void check_timer_func(struct timer_list *timer) +{ + struct vdec_avs_hw_s *hw = container_of(timer, + struct vdec_avs_hw_s, check_timer); + struct vdec_s *vdec = hw_to_vdec(hw); + unsigned int timeout_val = decode_timeout_val; + unsigned long flags; + + if (hw->m_ins_flag && + (debug & + DEBUG_WAIT_DECODE_DONE_WHEN_STOP) == 0 && + vdec->next_status == + VDEC_STATUS_DISCONNECTED) { + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + debug_print(hw, + 0, "vdec requested to be disconnected\n"); + return; + } + + /*recycle*/ + if (!hw->m_ins_flag) { + spin_lock_irqsave(&lock, flags); + recycle_frame_bufferin(hw); + spin_unlock_irqrestore(&lock, flags); + } + + if (hw->m_ins_flag) { + if ((READ_VREG(AV_SCRATCH_5) & 0xf) != 0 && + (READ_VREG(AV_SCRATCH_5) & 0xff00) != 0){ + /*ucode buffer empty*/ + if ((kfifo_len(&hw->recycle_q) == 0) && + (kfifo_len(&hw->display_q) == 0)) { + debug_print(hw, + 0, "AV_SCRATCH_5=0x%x, recover ucode buffer_status\n", + READ_VREG(AV_SCRATCH_5)); + WRITE_VREG(AV_SCRATCH_5, 0x10); + /*let ucode to recover buffer_status*/ + } + } + } + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + + if (udebug_flag != hw->old_udebug_flag) { + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + hw->old_udebug_flag = udebug_flag; + } + if (dbg_cmd != 0) { + if (dbg_cmd == 1) { + int r = vdec_sync_input(vdec); + dbg_cmd = 0; + pr_info( + "vdec_sync_input=>0x%x, (lev %x, wp %x rp %x, prp %x, pwp %x)\n", + r, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + STBUF_READ(&vdec->vbuf, get_rp), + STBUF_READ(&vdec->vbuf, get_wp)); + } + } + + if ((debug & DEBUG_FLAG_DISABLE_TIMEOUT) == 0 && + (timeout_val > 0) && + (hw->start_process_time > 0) && + ((1000 * (jiffies - hw->start_process_time) / HZ) + > timeout_val)) { + if (hw->last_vld_level == READ_VREG(VLD_MEM_VIFIFO_LEVEL)) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + timeout_process(hw); + } + hw->last_vld_level = READ_VREG(VLD_MEM_VIFIFO_LEVEL); + } + + if (READ_VREG(AVS_SOS_COUNT)) { + if (!error_recovery_mode) { + amvdec_stop(); + if (error_handle_policy & 0x1) { + handle_decoding_error(hw); + } else { + vavs_save_regs(hw); + + //if (hw->decode_pic_count == 0) + hw->decode_pic_count++; + if ((hw->decode_pic_count & 0xffff) == 0) { + /*make ucode do not handle it as first picture*/ + hw->decode_pic_count++; + } + } + hw->dec_result = DEC_RESULT_DONE; + + debug_print(hw, PRINT_FLAG_ERROR, + "%s decoder error, status=%d, level=%d, AVS_SOS_COUNT=0x%x\n", + __func__, vdec->status, READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(AVS_SOS_COUNT)); + reset_process_time(hw); + vdec_schedule_work(&hw->work); + } + } + + if ((hw->ucode_pause_pos != 0) && + (hw->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != hw->ucode_pause_pos) { + hw->ucode_pause_pos = 0; + WRITE_VREG(DEBUG_REG1, 0); + } + + if (vdec->next_status == VDEC_STATUS_DISCONNECTED) { + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + pr_info("vdec requested to be disconnected\n"); + return; + } + + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static int avs_hw_ctx_restore(struct vdec_avs_hw_s *hw) +{ + /*int r = 0;*/ + vavs_prot_init(hw); + + return 0; +} + +static unsigned char get_data_check_sum + (struct vdec_avs_hw_s *hw, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void run(struct vdec_s *vdec, unsigned long mask, +void (*callback)(struct vdec_s *, void *), + void *arg) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + int save_reg; + int size, ret; + if (!hw->vdec_pg_enable_flag) { + hw->vdec_pg_enable_flag = 1; + amvdec_enable(); + } + save_reg = READ_VREG(POWER_CTL_VLD); + /* reset everything except DOS_TOP[1] and APB_CBUS[0]*/ + debug_print(hw, PRINT_FLAG_RUN_FLOW,"run in\n"); + if (vdec_stream_based(vdec)) { + hw->pre_parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + } +#if 1 +#if DEBUG_MULTI_FLAG > 0 + if (hw->decode_pic_count == 0) { +#endif + WRITE_VREG(DOS_SW_RESET0, 0xfffffff0); + WRITE_VREG(DOS_SW_RESET0, 0); + WRITE_VREG(POWER_CTL_VLD, save_reg); + hw->run_count++; + run_count[DECODE_ID(hw)] = hw->run_count; + vdec_reset_core(vdec); +#if DEBUG_MULTI_FLAG > 0 + } +#endif +#else + vdec_reset_core(vdec); +#endif + hw->vdec_cb_arg = arg; + hw->vdec_cb = callback; + + size = vdec_prepare_input(vdec, &hw->chunk); + if (debug & DEBUG_FLAG_PREPARE_MORE_INPUT) { + if (size < start_decode_buf_level) { + /*debug_print(hw, PRINT_FLAG_VLD_DETAIL, + "DEC_RESULT_AGAIN %x %x %x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP));*/ + + hw->input_empty++; + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + return; + } + } else { + if (size < 0) { + hw->input_empty++; + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + return; + } + } + if (input_frame_based(vdec)) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + if (debug & PRINT_FLAG_RUN_FLOW + ) { + debug_print(hw, 0, + "%s decode_pic_count %d buf_recycle_status 0x%x: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, hw->decode_pic_count, + hw->buf_recycle_status, + size, get_data_check_sum(hw, size), + data[0], data[1], data[2], data[3], + data[4], data[5], data[size - 4], + data[size - 3], data[size - 2], + data[size - 1]); + } + if (debug & PRINT_FRAMEBASE_DATA + ) { + int jj; + + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + debug_print(hw, + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + debug_print(hw, + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + debug_print(hw, + PRINT_FRAMEBASE_DATA, + "\n"); + } + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } else + debug_print(hw, PRINT_FLAG_RUN_FLOW, + "%s decode_pic_count %d buf_recycle_status 0x%x: %x %x %x %x %x size 0x%x\n", + __func__, + hw->decode_pic_count, + hw->buf_recycle_status, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + STBUF_READ(&vdec->vbuf, get_rp), + STBUF_READ(&vdec->vbuf, get_wp), + size); + + + hw->input_empty = 0; + debug_print(hw, PRINT_FLAG_RUN_FLOW, + "%s,%d, size=%d\n", __func__, __LINE__, size); + + /*vdec_enable_input(vdec); + need run after VC1_CONTROL_REG is configured + */ + hw->init_flag = 1; + + if (hw->chunk) + debug_print(hw, PRINT_FLAG_RUN_FLOW, + "input chunk offset %d, size %d\n", + hw->chunk->offset, hw->chunk->size); + + hw->dec_result = DEC_RESULT_NONE; + /*vdec->mc_loaded = 0;*/ + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else { + ret = amvdec_vdec_loadmc_buf_ex(VFORMAT_AVS, "avs_multi", vdec, + hw->fw->data, hw->fw->len); + if (ret < 0) { + pr_err("[%d] %s: the %s fw loading failed, err: %x\n", vdec->id, + hw->fw->name, tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_AVS; + } + if (avs_hw_ctx_restore(hw) < 0) { + hw->dec_result = DEC_RESULT_ERROR; + debug_print(hw, PRINT_FLAG_ERROR, + "ammvdec_avs: error HW context restore\n"); + vdec_schedule_work(&hw->work); + return; + } + + /* + This configureation of VC1_CONTROL_REG will + pop bits (even no data in the stream buffer) if input is enabled, + so it can only be configured before vdec_enable_input() is called. + So move this code from ucode to here + */ +#define DISABLE_DBLK_HCMD 0 +#define DISABLE_MC_HCMD 0 + WRITE_VREG(VC1_CONTROL_REG, (DISABLE_DBLK_HCMD<<6) | + (DISABLE_MC_HCMD<<5) | (1 << 7) | (0xc <<8) | (1<<14)); + if (vdec_frame_based(vdec)) { + size = hw->chunk->size + + (hw->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + } + + + vdec_enable_input(vdec); + /**/ + + /*wmb();*/ + hw->stat |= STAT_MC_LOAD; + hw->last_vld_level = 0; + + debug_print(hw, PRINT_FLAG_DECODING, + "%s READ_VREG(AVS_BUFFERIN)=0x%x, recycle_q num %d\n", + __func__, READ_VREG(AVS_BUFFERIN), + kfifo_len(&hw->recycle_q)); + + WRITE_VREG(VIFF_BIT_CNT, size * 8); + if (hw->reset_decode_flag) + WRITE_VREG(DECODE_STATUS, 0); + else { + recycle_frames(hw); + avs_pts_check_in(hw, + hw->decode_pic_count & 0xffff, + hw->chunk); + + WRITE_VREG(DECODE_STATUS, + (hw->decode_pic_count & 0xffff) | + ((~hw->buf_recycle_status) << 16)); + } + + hw->reset_decode_flag = 0; + //hw->decode_status_skip_pic_done_flag = 0; + start_process_time(hw); +#if DEBUG_MULTI_FLAG == 1 + if (hw->decode_pic_count > 0) + WRITE_VREG(DECODE_STATUS, 0xff); + else +#endif + amvdec_start(); + hw->stat |= STAT_VDEC_RUN; + + hw->stat |= STAT_TIMER_ARM; + + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static void reset(struct vdec_s *vdec) +{ +} + +static irqreturn_t vmavs_isr_thread_fn(struct vdec_s *vdec, int irq) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + u32 reg; + struct vframe_s *vf = NULL; + u32 dur; + u32 repeat_count; + u32 picture_type; + u32 buffer_index; + u32 frame_size; + bool force_interlaced_frame = false; + unsigned int pts, pts_valid = 0, offset = 0; + u64 pts_us64; + u32 debug_tag; + u32 buffer_status_debug; + //struct vdec_avs_hw_s *hw = (struct vdec_avs_hw_s *)dev_id; + + /*if (debug & AVS_DEBUG_UCODE) { + if (READ_VREG(AV_SCRATCH_E) != 0) { + pr_info("dbg%x: %x\n", READ_VREG(AV_SCRATCH_E), + READ_VREG(AV_SCRATCH_D)); + WRITE_VREG(AV_SCRATCH_E, 0); + } + }*/ + + debug_print(hw, PRINT_FLAG_RUN_FLOW, "READ_VREG(AVS_BUFFEROUT) 0x%x, READ_VREG(DECODE_STATUS) 0x%x READ_VREG(AV_SCRATCH_N) 0x%x, READ_VREG(DEBUG_REG1) 0x%x\n", + READ_VREG(AVS_BUFFEROUT),READ_VREG(DECODE_STATUS), READ_VREG(AV_SCRATCH_N), READ_VREG(DEBUG_REG1)); + + debug_tag = READ_VREG(DEBUG_REG1); + buffer_status_debug = debug_tag >> 16; + debug_tag &= 0xffff; + /* if (debug_tag & 0x10000) { + int i; + dma_sync_single_for_cpu( + amports_get_dma_device(), + hw->lmem_phy_addr, + LMEM_BUF_SIZE, + DMA_FROM_DEVICE); + + debug_print(hw, 0, + "LMEM<tag %x>:\n", debug_tag); + + for (i = 0; i < 0x400; i += 4) { + int ii; + unsigned short *lmem_ptr = hw->lmem_addr; + if ((i & 0xf) == 0) + debug_print_cont(hw, 0, "%03x: ", i); + for (ii = 0; ii < 4; ii++) { + debug_print_cont(hw, 0, "%04x ", + lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + debug_print_cont(hw, 0, "\n"); + } + + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == hw->decode_pic_count) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_VREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hw->ucode_pause_pos = udebug_pause_pos; + } + else if (debug_tag & 0x20000) + hw->ucode_pause_pos = 0xffffffff; + if (hw->ucode_pause_pos) + reset_process_time(hw); + else + WRITE_VREG(DEBUG_REG1, 0); + } else*/ if (debug_tag != 0) { + debug_print(hw, 1, + "dbg%x: %x buffer_status 0x%x l/w/r %x %x %x bitcnt %x AVAIL %x\n", + debug_tag, + READ_VREG(DEBUG_REG2), + buffer_status_debug, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VIFF_BIT_CNT), + READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL)); + + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == hw->decode_pic_count) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_VREG(DEBUG_REG2)) && + (udebug_pause_ins_id == 0 || + DECODE_ID(hw) == (udebug_pause_ins_id -1))) { + udebug_pause_pos &= 0xffff; + hw->ucode_pause_pos = udebug_pause_pos; + if (debug & DEBUG_PIC_DONE_WHEN_UCODE_PAUSE) { + hw->decode_pic_count++; + if ((hw->decode_pic_count & 0xffff) == 0) { + /*make ucode do not handle it as first picture*/ + hw->decode_pic_count++; + } + reset_process_time(hw); + hw->dec_result = DEC_RESULT_DONE; + amvdec_stop(); + vavs_save_regs(hw); + debug_print(hw, PRINT_FLAG_DECODING, + "%s ucode pause, force done, decode_pic_count = %d, bit_cnt=0x%x\n", + __func__, + hw->decode_pic_count, + READ_VREG(VIFF_BIT_CNT)); + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + } + if (hw->ucode_pause_pos) + reset_process_time(hw); + else + WRITE_VREG(DEBUG_REG1, 0); + return IRQ_HANDLED; + } else { + debug_print(hw, PRINT_FLAG_DECODING, + "%s decode_status 0x%x, buffer_status 0x%x\n", + __func__, + READ_VREG(DECODE_STATUS), + buffer_status_debug); + } + +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0 && READ_VREG(LONG_CABAC_REQ)) { +#ifdef PERFORMANCE_DEBUG + pr_info("%s:schedule long_cabac_wd_work\r\n", __func__); +#endif + pr_info("schedule long_cabac_wd_work and requested from %d\n", + (READ_VREG(LONG_CABAC_REQ) >> 8)&0xFF); + schedule_work(&long_cabac_wd_work); + } +#endif + +#ifdef ENABLE_USER_DATA + if (UserDataHandler(hw)) + return IRQ_HANDLED; +#endif + reg = READ_VREG(AVS_BUFFEROUT); + if (reg) { + unsigned short decode_pic_count + = READ_VREG(DECODE_PIC_COUNT); + debug_print(hw, PRINT_FLAG_DECODING, "AVS_BUFFEROUT=0x%x decode_pic_count %d\n", + reg, decode_pic_count); + if (pts_by_offset) { + offset = READ_VREG(AVS_OFFSET_REG); + debug_print(hw, PRINT_FLAG_DECODING, "AVS OFFSET=%x\n", offset); + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + if (pts_lookup_offset_us64(PTS_TYPE_VIDEO, offset, &pts, + &frame_size, 0, &pts_us64) == 0) { + pts_valid = 1; +#ifdef DEBUG_PTS + hw->pts_hit++; +#endif + } else { +#ifdef DEBUG_PTS + hw->pts_missed++; +#endif + } + } + } + + repeat_count = READ_VREG(AVS_REPEAT_COUNT); +#ifdef USE_DYNAMIC_BUF_NUM + buffer_index = + ((reg & 0x7) + + (((reg >> 8) & 0x3) << 3) - 1) & 0x1f; +#else + if (firmware_sel == 0) + buffer_index = + ((reg & 0x7) + + (((reg >> 8) & 0x3) << 3) - 1) & 0x1f; + else + buffer_index = + ((reg & 0x7) - 1) & 3; +#endif + picture_type = (reg >> 3) & 7; +#ifdef DEBUG_PTS + if (picture_type == I_PICTURE) { + /* pr_info("I offset 0x%x, pts_valid %d\n", + * offset, pts_valid); + */ + if (!pts_valid) + hw->pts_i_missed++; + else + hw->pts_i_hit++; + } +#endif + + if ((dec_control & DEC_CONTROL_FLAG_FORCE_2500_1080P_INTERLACE) + && hw->frame_width == 1920 && hw->frame_height == 1080) { + force_interlaced_frame = true; + } + + if (hw->throw_pb_flag && picture_type != I_PICTURE) { + + debug_print(hw, PRINT_FLAG_DECODING, + "%s WRITE_VREG(AVS_BUFFERIN, 0x%x) for throwing picture with type of %d\n", + __func__, + ~(1 << buffer_index), picture_type); + + WRITE_VREG(AVS_BUFFERIN, ~(1 << buffer_index)); + } else if (reg & INTERLACE_FLAG || force_interlaced_frame) { /* interlace */ + hw->throw_pb_flag = 0; + + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "interlace, picture type %d\n", + picture_type); + + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + set_frame_info(hw, vf, &dur); + vf->bufWidth = 1920; + hw->pic_type = 2; + if ((picture_type == I_PICTURE) && pts_valid) { + vf->pts = pts; + vf->pts_us64 = pts_us64; + if ((repeat_count > 1) && hw->avi_flag) { + /* hw->next_pts = pts + + * (hw->vavs_amstream_dec_info.rate * + * repeat_count >> 1)*15/16; + */ + hw->next_pts = + pts + + (dur * repeat_count >> 1) * + 15 / 16; + } else + hw->next_pts = 0; + } else { + vf->pts = hw->next_pts; + if (vf->pts == 0) { + vf->pts_us64 = 0; + } + if ((repeat_count > 1) && hw->avi_flag) { + /* vf->duration = + * hw->vavs_amstream_dec_info.rate * + * repeat_count >> 1; + */ + vf->duration = dur * repeat_count >> 1; + if (hw->next_pts != 0) { + hw->next_pts += + ((vf->duration) - + ((vf->duration) >> 4)); + } + } else { + /* vf->duration = + * hw->vavs_amstream_dec_info.rate >> 1; + */ + vf->duration = dur >> 1; + hw->next_pts = 0; + } + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->duration_pulldown = 0; + if (force_interlaced_frame) { + vf->type = VIDTYPE_INTERLACE_TOP; + }else{ + vf->type = + (reg & TOP_FIELD_FIRST_FLAG) + ? VIDTYPE_INTERLACE_TOP + : VIDTYPE_INTERLACE_BOTTOM; + } +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + if (hw->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + + vf->canvas0_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas0_config[1] = hw->canvas_config[buffer_index][1]; + + vf->canvas1_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas1_config[1] = hw->canvas_config[buffer_index][1]; + } else + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->type_original = vf->type; + + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "buffer_index %d, canvas addr %x\n", + buffer_index, vf->canvas0Addr); + vf->pts = (pts_valid)?pts:0; + //vf->pts_us64 = (pts_valid) ? pts_us64 : 0; + hw->vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, + buffer_index); + + if (hw->m_ins_flag && vdec_frame_based(hw_to_vdec(hw))) + set_vframe_pts(hw, decode_pic_count, vf); + + if (vdec_stream_based(vdec) && (!vdec->vbuf.use_ptsserv)) { + vf->pts_us64 = + (((u64)vf->duration << 32) & 0xffffffff00000000) | offset; + vf->pts = 0; + } + + debug_print(hw, PRINT_FLAG_PTS, + "interlace1 vf->pts = %d, vf->pts_us64 = %lld, pts_valid = %d\n", vf->pts, vf->pts_us64, pts_valid); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->timestamp); + avs_vf_notify_receiver(hw, PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + pr_info("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + set_frame_info(hw, vf, &dur); + vf->bufWidth = 1920; + if (force_interlaced_frame) + vf->pts = 0; + else + vf->pts = hw->next_pts; + + if (vf->pts == 0) { + vf->pts_us64 = 0; + } + + if ((repeat_count > 1) && hw->avi_flag) { + /* vf->duration = hw->vavs_amstream_dec_info.rate * + * repeat_count >> 1; + */ + vf->duration = dur * repeat_count >> 1; + if (hw->next_pts != 0) { + hw->next_pts += + ((vf->duration) - + ((vf->duration) >> 4)); + } + } else { + /* vf->duration = hw->vavs_amstream_dec_info.rate + * >> 1; + */ + vf->duration = dur >> 1; + hw->next_pts = 0; + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->duration_pulldown = 0; + if (force_interlaced_frame) { + vf->type = VIDTYPE_INTERLACE_BOTTOM; + } else { + vf->type = + (reg & TOP_FIELD_FIRST_FLAG) ? + VIDTYPE_INTERLACE_BOTTOM : + VIDTYPE_INTERLACE_TOP; + } +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + if (hw->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + + vf->canvas0_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas0_config[1] = hw->canvas_config[buffer_index][1]; + + vf->canvas1_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas1_config[1] = hw->canvas_config[buffer_index][1]; + } else + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->type_original = vf->type; + vf->pts_us64 = 0; + hw->vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, + buffer_index); + + if (hw->m_ins_flag && vdec_frame_based(hw_to_vdec(hw))) + set_vframe_pts(hw, decode_pic_count, vf); + + if (vdec_stream_based(vdec) && (!vdec->vbuf.use_ptsserv)) { + vf->pts_us64 = (u64)-1; + vf->pts = 0; + } + debug_print(hw, PRINT_FLAG_PTS, + "interlace2 vf->pts = %d, vf->pts_us64 = %lld, pts_valid = %d\n", vf->pts, vf->pts_us64, pts_valid); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->timestamp); + avs_vf_notify_receiver(hw, PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + hw->total_frame++; + } else { /* progressive */ + hw->throw_pb_flag = 0; + + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "progressive picture type %d\n", + picture_type); + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + pr_info + ("fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + set_frame_info(hw, vf, &dur); + vf->bufWidth = 1920; + hw->pic_type = 1; + + if ((picture_type == I_PICTURE) && pts_valid) { + vf->pts = pts; + if ((repeat_count > 1) && hw->avi_flag) { + /* hw->next_pts = pts + + * (hw->vavs_amstream_dec_info.rate * + * repeat_count)*15/16; + */ + hw->next_pts = + pts + + (dur * repeat_count) * 15 / 16; + } else + hw->next_pts = 0; + } else { + vf->pts = hw->next_pts; + if (vf->pts == 0) { + vf->pts_us64 = 0; + } + if ((repeat_count > 1) && hw->avi_flag) { + /* vf->duration = + * hw->vavs_amstream_dec_info.rate * + * repeat_count; + */ + vf->duration = dur * repeat_count; + if (hw->next_pts != 0) { + hw->next_pts += + ((vf->duration) - + ((vf->duration) >> 4)); + } + } else { + /* vf->duration = + * hw->vavs_amstream_dec_info.rate; + */ + vf->duration = dur; + hw->next_pts = 0; + } + } + vf->signal_type = 0; + vf->index = buffer_index; + vf->duration_pulldown = 0; + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; +#ifdef NV21 + vf->type |= VIDTYPE_VIU_NV21; +#endif + if (hw->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + + vf->canvas0_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas0_config[1] = hw->canvas_config[buffer_index][1]; + + vf->canvas1_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas1_config[1] = hw->canvas_config[buffer_index][1]; + } else + vf->canvas0Addr = vf->canvas1Addr = + index2canvas(buffer_index); + vf->type_original = vf->type; + + vf->pts = (pts_valid)?pts:0; + //vf->pts_us64 = (pts_valid) ? pts_us64 : 0; + debug_print(hw, PRINT_FLAG_VFRAME_DETAIL, + "buffer_index %d, canvas addr %x\n", + buffer_index, vf->canvas0Addr); + debug_print(hw, PRINT_FLAG_PTS, + "progressive vf->pts = %d, vf->pts_us64 = %lld, pts_valid = %d\n", vf->pts, vf->pts_us64, pts_valid); + hw->vfbuf_use[buffer_index]++; + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, + buffer_index); + + if (hw->m_ins_flag && vdec_frame_based(hw_to_vdec(hw))) + set_vframe_pts(hw, decode_pic_count, vf); + + if (vdec_stream_based(vdec) && (!vdec->vbuf.use_ptsserv)) { + vf->pts_us64 = + (((u64)vf->duration << 32) & 0xffffffff00000000) | offset; + vf->pts = 0; + } + decoder_do_frame_check(hw_to_vdec(hw), vf); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->timestamp); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + avs_vf_notify_receiver(hw, PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + hw->total_frame++; + } + + /*count info*/ + vdec_count_info(hw->gvs, 0, offset); + if (offset) { + if (picture_type == I_PICTURE) { + hw->gvs->i_decoded_frames++; + } else if (picture_type == P_PICTURE) { + hw->gvs->p_decoded_frames++; + } else if (picture_type == B_PICTURE) { + hw->gvs->b_decoded_frames++; + } + } + avs_update_gvs(hw); + vdec_fill_vdec_frame(hw_to_vdec(hw), NULL, hw->gvs, vf, 0); + + /* pr_info("PicType = %d, PTS = 0x%x\n", + * picture_type, vf->pts); + */ + WRITE_VREG(AVS_BUFFEROUT, 0); + } + //WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + + if (hw->m_ins_flag) { + u32 status_reg = READ_VREG(DECODE_STATUS); + u32 decode_status = status_reg & 0xff; + if (hw->dec_result == DEC_RESULT_DONE || + hw->dec_result == DEC_RESULT_AGAIN) { + debug_print(hw, PRINT_FLAG_DECODING, + "%s !!! READ_VREG(DECODE_STATUS) = 0x%x, decode_status 0x%x, buf_status 0x%x, dec_result = 0x%x, decode_pic_count = %d bit_cnt=0x%x\n", + __func__, status_reg, decode_status, + hw->buf_status, + hw->dec_result, hw->decode_pic_count, + READ_VREG(VIFF_BIT_CNT)); + return IRQ_HANDLED; + } else if (decode_status == DECODE_STATUS_PIC_DONE || + decode_status == DECODE_STATUS_SKIP_PIC_DONE) { + hw->buf_status = (status_reg >> 16) & 0xffff; + if (decode_status == DECODE_STATUS_SKIP_PIC_DONE) { + hw->decode_status_skip_pic_done_flag = 1; + hw->decode_decode_cont_start_code = (status_reg >> 8) & 0xff; + } else + hw->decode_status_skip_pic_done_flag = 0; + hw->decode_pic_count++; + if ((hw->decode_pic_count & 0xffff) == 0) { + /*make ucode do not handle it as first picture*/ + hw->decode_pic_count++; + } + reset_process_time(hw); + hw->dec_result = DEC_RESULT_DONE; +#if DEBUG_MULTI_FLAG == 1 + WRITE_VREG(DECODE_STATUS, 0); +#else + amvdec_stop(); +#endif + vavs_save_regs(hw); + debug_print(hw, PRINT_FLAG_DECODING, + "%s %s, READ_VREG(DECODE_STATUS) = 0x%x, decode_status 0x%x, buf_status 0x%x, dec_result = 0x%x, decode_pic_count = %d, bit_cnt=0x%x\n", + __func__, + (decode_status == DECODE_STATUS_PIC_DONE) ? + "DECODE_STATUS_PIC_DONE" : "DECODE_STATUS_SKIP_PIC_DONE", + status_reg, decode_status, + hw->buf_status, + hw->dec_result, hw->decode_pic_count, + READ_VREG(VIFF_BIT_CNT)); + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } else if (decode_status == DECODE_STATUS_DECODE_BUF_EMPTY || + decode_status == DECODE_STATUS_SEARCH_BUF_EMPTY) { + hw->buf_status = (status_reg >> 16) & 0xffff; + reset_process_time(hw); +#if DEBUG_MULTI_FLAG == 1 + WRITE_VREG(DECODE_STATUS, 0); +#else + amvdec_stop(); +#endif + if (vdec_frame_based(hw_to_vdec(hw))) { + hw->dec_result = DEC_RESULT_DONE; + //if (hw->decode_pic_count == 0) { + hw->decode_pic_count++; + //} + if ((hw->decode_pic_count & 0xffff) == 0) { + /*make ucode do not handle it as first picture*/ + hw->decode_pic_count++; + } + vavs_save_regs(hw); + } else + hw->dec_result = DEC_RESULT_AGAIN; + + debug_print(hw, PRINT_FLAG_DECODING, + "%s BUF_EMPTY, READ_VREG(DECODE_STATUS) = 0x%x, decode_status 0x%x, buf_status 0x%x, scratch_8 (AVS_BUFFERIN) 0x%x, dec_result = 0x%x, decode_pic_count = %d, bit_cnt=0x%x, hw->decode_status_skip_pic_done_flag = %d, hw->decode_decode_cont_start_code = 0x%x\n", + __func__, status_reg, decode_status, + hw->buf_status, + hw->reg_scratch_8, + hw->dec_result, hw->decode_pic_count, + READ_VREG(VIFF_BIT_CNT), hw->decode_status_skip_pic_done_flag, hw->decode_decode_cont_start_code); + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + } + + +#ifdef HANDLE_AVS_IRQ + return IRQ_HANDLED; +#else + return; +#endif +} + +static irqreturn_t vmavs_isr(struct vdec_s *vdec, int irq) +{ + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + return IRQ_WAKE_THREAD; + //return vavs_isr(0, hw); + +} + +static void vmavs_dump_state(struct vdec_s *vdec) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + int i; + debug_print(hw, 0, + "====== %s\n", __func__); + + debug_print(hw, 0, + "width/height (%d/%d), dur %d\n", + hw->frame_width, + hw->frame_height, + hw->frame_dur + ); + + debug_print(hw, 0, + "is_framebase(%d), decode_status 0x%x, buf_status 0x%x, buf_recycle_status 0x%x, throw %d, eos %d, state 0x%x, dec_result 0x%x dec_frm %d disp_frm %d run %d not_run_ready %d input_empty %d\n", + vdec_frame_based(vdec), + READ_VREG(DECODE_STATUS) & 0xff, + hw->buf_status, + hw->buf_recycle_status, + hw->throw_pb_flag, + hw->eos, + hw->stat, + hw->dec_result, + hw->decode_pic_count, + hw->display_frame_count, + hw->run_count, + hw->not_run_ready, + hw->input_empty + ); + + if (vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + debug_print(hw, 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + + debug_print(hw, 0, + "%s, newq(%d/%d), dispq(%d/%d)recycleq(%d/%d) drop %d vf peek %d, prepare/get/put (%d/%d/%d)\n", + __func__, + kfifo_len(&hw->newframe_q), + VF_POOL_SIZE, + kfifo_len(&hw->display_q), + VF_POOL_SIZE, + kfifo_len(&hw->recycle_q), + VF_POOL_SIZE, + hw->drop_frame_count, + hw->peek_num, + hw->prepare_num, + hw->get_num, + hw->put_num + ); + + debug_print(hw, 0, "vfbuf_use:\n"); + for (i = 0; i < hw->vf_buf_num_used; i++) + debug_print(hw, 0, "%d: vf_buf_use %d\n", + i, hw->vfbuf_use[i]); + + debug_print(hw, 0, + "DECODE_STATUS=0x%x\n", + READ_VREG(DECODE_STATUS)); + debug_print(hw, 0, + "MPC_E=0x%x\n", + READ_VREG(MPC_E)); + debug_print(hw, 0, + "DECODE_MODE=0x%x\n", + READ_VREG(DECODE_MODE)); + debug_print(hw, 0, + "wait_buf_status, AV_SCRATCH_5=0x%x\n", + READ_VREG(AV_SCRATCH_5)); + debug_print(hw, 0, + "MBY_MBX=0x%x\n", + READ_VREG(MBY_MBX)); + debug_print(hw, 0, + "VIFF_BIT_CNT=0x%x\n", + READ_VREG(VIFF_BIT_CNT)); + debug_print(hw, 0, + "VLD_MEM_VIFIFO_LEVEL=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + debug_print(hw, 0, + "VLD_MEM_VIFIFO_WP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP)); + debug_print(hw, 0, + "VLD_MEM_VIFIFO_RP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_RP)); + debug_print(hw, 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + debug_print(hw, 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (vdec_frame_based(vdec) && + (debug & PRINT_FRAMEBASE_DATA) + ) { + int jj; + if (hw->chunk && hw->chunk->block && + hw->chunk->size > 0) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, hw->chunk->size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + debug_print(hw, 0, + "frame data size 0x%x\n", + hw->chunk->size); + for (jj = 0; jj < hw->chunk->size; jj++) { + if ((jj & 0xf) == 0) + debug_print(hw, + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + debug_print_cont(hw, + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + debug_print_cont(hw, + PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } + +} + + int ammvdec_avs_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_avs_hw_s *hw = NULL; + int r = 0; + + if (vdec_get_debug_flags() & 0x8) + return amvdec_avs_probe(pdev); + + pr_info("ammvdec_avs probe start.\n"); + + if (pdata == NULL) { + pr_info("ammvdec_avs platform data undefined.\n"); + return -EFAULT; + } + + hw = (struct vdec_avs_hw_s *)vzalloc(sizeof(struct vdec_avs_hw_s)); + if (hw == NULL) { + pr_info("\nammvdec_avs decoder driver alloc failed\n"); + return -ENOMEM; + } + /*atomic_set(&hw->error_handler_run, 0);*/ + hw->m_ins_flag = 1; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM || disable_longcabac_trans) + firmware_sel = 1; + + if (firmware_sel == 1) { +#ifndef USE_DYNAMIC_BUF_NUM + vf_buf_num = 4; +#endif + canvas_base = 0; + canvas_num = 3; + } else { + pr_info("Error, do not support longcabac work around!!!"); + r = -ENOMEM; + goto error1; + } + + if (pdata->sys_info) + hw->vavs_amstream_dec_info = *pdata->sys_info; + + hw->is_reset = 0; + pdata->user_data_read = NULL; + pdata->reset_userdata_fifo = NULL; + + pdata->private = hw; + pdata->dec_status = vavs_dec_status; + pdata->set_isreset = vavs_set_isreset; + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = vmavs_isr; + pdata->threaded_irq_handler = vmavs_isr_thread_fn; + pdata->dump_state = vmavs_dump_state; + + snprintf(hw->vdec_name, sizeof(hw->vdec_name), + "avs-%d", pdev->id); + snprintf(hw->pts_name, sizeof(hw->pts_name), + "%s-timestamp", hw->vdec_name); + snprintf(hw->new_q_name, sizeof(hw->new_q_name), + "%s-newframe_q", hw->vdec_name); + snprintf(hw->disp_q_name, sizeof(hw->disp_q_name), + "%s-dispframe_q", hw->vdec_name); + + vavs_vdec_info_init(hw); + +#ifdef ENABLE_USER_DATA + if (NULL == hw->user_data_buffer) { + hw->user_data_buffer = + dma_alloc_coherent(amports_get_dma_device(), + USER_DATA_SIZE, + &hw->user_data_buffer_phys, GFP_KERNEL); + if (!hw->user_data_buffer) { + pr_info("%s: Can not allocate hw->user_data_buffer\n", + __func__); + r = -ENOMEM; + goto error2; + } + pr_debug("hw->user_data_buffer = 0x%p, hw->user_data_buffer_phys = 0x%x\n", + hw->user_data_buffer, (u32)hw->user_data_buffer_phys); + } +#endif + /*hw->lmem_addr = kmalloc(LMEM_BUF_SIZE, GFP_KERNEL); + if (hw->lmem_addr == NULL) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + return -1; + } + hw->lmem_phy_addr = dma_map_single(amports_get_dma_device(), + hw->lmem_addr, LMEM_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(amports_get_dma_device(), + hw->lmem_phy_addr)) { + pr_err("%s: failed to map lmem buffer\n", __func__); + kfree(hw->lmem_addr); + hw->lmem_addr = NULL; + return -1; + }*/ + /*INIT_WORK(&hw->set_clk_work, avs_set_clk);*/ + hw->lmem_addr = (dma_addr_t)dma_alloc_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, (dma_addr_t *)&hw->lmem_phy_addr, GFP_KERNEL); + if (hw->lmem_addr == 0) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + r = -1; + goto error3; + } + + if (vavs_init(hw) < 0) { + pr_info("amvdec_avs init failed.\n"); + r = -ENODEV; + goto error4; + } + + /*INIT_WORK(&hw->fatal_error_wd_work, vavs_fatal_error_handler); + atomic_set(&hw->error_handler_run, 0);*/ +#if 0 +#ifdef ENABLE_USER_DATA + INIT_WORK(&hw->userdata_push_work, userdata_push_do_work); +#endif +#endif + INIT_WORK(&hw->notify_work, vavs_notify_work); + + if (pdata->use_vfm_path) { + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + hw->frameinfo_enable = 1; + } + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + MULTI_INSTANCE_PROVIDER_NAME ".%02x", pdev->id & 0xff); + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->canvas_spec[i] = 0xffffff; + } + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vavs_vf_provider, hw); + + platform_set_drvdata(pdev, pdata); + + hw->platform_dev = pdev; + + vdec_set_prepare_level(pdata, start_decode_buf_level); + + vdec_set_vframe_comm(pdata, DRIVER_NAME); + + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_VDEC_1); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } + + /*INIT_WORK(&hw->userdata_push_work, userdata_push_do_work);*/ + + return 0; + +error4: + dma_free_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, (void *)hw->lmem_addr, + hw->lmem_phy_addr); +error3: + dma_free_coherent( + amports_get_dma_device(), + USER_DATA_SIZE, + hw->user_data_buffer, + hw->user_data_buffer_phys); +error2: + kfree(hw->gvs); + hw->gvs = NULL; + pdata->dec_status = NULL; +error1: + vfree(hw); + return r; +} + + int ammvdec_avs_remove(struct platform_device *pdev) +{ + + if (vdec_get_debug_flags() & 0x8) + return amvdec_avs_remove(pdev); + else { + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec = hw_to_vdec(hw); + int i; + + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + + cancel_work_sync(&hw->work); + cancel_work_sync(&hw->notify_work); + + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1); + else + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + vdec_set_status(hw_to_vdec(hw), VDEC_STATUS_DISCONNECTED); + + if (vdec->parallel_dec == 1) { + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + vdec->free_canvas_ex(canvas_y(hw->canvas_spec[i]), vdec->id); + vdec->free_canvas_ex(canvas_u(hw->canvas_spec[i]), vdec->id); + } + } + #ifdef ENABLE_USER_DATA + if (hw->user_data_buffer != NULL) { + dma_free_coherent( + amports_get_dma_device(), + USER_DATA_SIZE, + hw->user_data_buffer, + hw->user_data_buffer_phys); + hw->user_data_buffer = NULL; + hw->user_data_buffer_phys = 0; + } + #endif + /*if (hw->lmem_addr) { + dma_unmap_single(amports_get_dma_device(), + hw->lmem_phy_addr, LMEM_BUF_SIZE, DMA_FROM_DEVICE); + kfree(hw->lmem_addr); + hw->lmem_addr = NULL; + }*/ + if (hw->lmem_addr) { + dma_free_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, (void *)hw->lmem_addr, + hw->lmem_phy_addr); + hw->lmem_addr = 0; + } + + if (hw->fw) { + vfree(hw->fw); + hw->fw = NULL; + } + + pr_info("ammvdec_avs removed.\n"); + if (hw->gvs) { + kfree(hw->gvs); + hw->gvs = NULL; + } + + vfree(hw); + return 0; + } +} + + +#ifdef DEBUG_MULTI_WITH_AUTOMODE +struct stream_buf_s *get_vbuf(void); +s32 esparser_init(struct stream_buf_s *buf, struct vdec_s *vdec); + + +static s32 vavs_init2(struct vdec_avs_hw_s *hw) +{ + int size = -1; + struct firmware_s *fw; + u32 fw_size = 0x1000 * 16; + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + pr_info("vavs_init\n"); + + amvdec_enable(); + + + vavs_local_init(hw); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM) + size = get_firmware_data(VIDEO_DEC_AVS_MULTI, fw->data); + else { + if (firmware_sel == 1) + size = get_firmware_data(VIDEO_DEC_AVS_NOCABAC, fw->data); +#ifdef AVSP_LONG_CABAC + else { + init_avsp_long_cabac_buf(); + size = get_firmware_data(VIDEO_DEC_AVS_MULTI, fw->data); + } +#endif + } + + if (size < 0) { + amvdec_disable(); + pr_err("get firmware fail."); + /*vfree(buf);*/ + return -1; + } + + fw->len = size; + hw->fw = fw; + if (hw->m_ins_flag) { + init_timer(&hw->check_timer); + hw->check_timer.data = (ulong) hw; + hw->check_timer.function = check_timer_func; + hw->check_timer.expires = jiffies + CHECK_INTERVAL; + + + //add_timer(&hw->check_timer); + hw->stat |= STAT_TIMER_ARM; + + INIT_WORK(&hw->work, vavs_work); + + hw->fw = fw; + } + return 0; +} + +unsigned int debug_flag2; +static int vavs_prot_init2(struct vdec_avs_hw_s *hw, unsigned char post_flag) +{ + int r = 0; + /* + * 2: assist + * 3: vld_reset + * 4: vld_part_reset + * 5: vfifo reset + * 6: iqidct + * 7: mc + * 8: dblk + * 9: pic_dc + * 10: psc + * 11: mcpu + * 12: ccpu + * 13: ddr + * 14: afifo + */ + unsigned char run_flag; +#ifdef OOO + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) /*| (1 << 4)*/); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6) /*| (1 << 4)*/); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1 << 9) | (1 << 8)); + WRITE_VREG(DOS_SW_RESET0, 0); +#endif + /***************** reset vld **********************************/ +#ifdef OOO + WRITE_VREG(POWER_CTL_VLD, 0x10); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 2, MEM_FIFO_CNT_BIT, 2); + WRITE_VREG_BITS(VLD_MEM_VIFIFO_CONTROL, 8, MEM_LEVEL_CNT_BIT, 6); +#endif + if (start_decoding_delay & 0x80000) + msleep(start_decoding_delay&0xffff); + +if (debug_flag2 & 0x1) + run_flag = post_flag; +else + run_flag = !post_flag; +if (run_flag) { + if (hw->m_ins_flag) { + int i; + if (hw->decode_pic_count == 0) { + r = vavs_canvas_init(hw); +#ifndef USE_DYNAMIC_BUF_NUM + for (i = 0; i < 4; i++) { + WRITE_VREG(AV_SCRATCH_0 + i, + hw->canvas_spec[i] + ); + } +#else + for (i = 0; i < hw->vf_buf_num_used; i += 2) { + WRITE_VREG(buf_spec_reg[i >> 1], + (hw->canvas_spec[i] & 0xffff) | + ((hw->canvas_spec[i + 1] & 0xffff) + << 16) + ); + } +#endif + } else + vavs_restore_regs(hw); + + for (i = 0; i < hw->vf_buf_num_used; i++) { + config_cav_lut_ex(canvas_y(hw->canvas_spec[i]), + hw->canvas_config[i][0].phy_addr, + hw->canvas_config[i][0].width, + hw->canvas_config[i][0].height, + CANVAS_ADDR_NOWRAP, + hw->canvas_config[i][0].block_mode, + 0, VDEC_1); + + config_cav_lut_ex(canvas_u(hw->canvas_spec[i]), + hw->canvas_config[i][1].phy_addr, + hw->canvas_config[i][1].width, + hw->canvas_config[i][1].height, + CANVAS_ADDR_NOWRAP, + hw->canvas_config[i][1].block_mode, + 0, VDEC_1); + } + } +} + +if (debug_flag2 & 0x2) + run_flag = post_flag; +else + run_flag = !post_flag; +if (run_flag) { + + /* notify ucode the buffer offset */ + if (hw->decode_pic_count == 0) + WRITE_VREG(AV_SCRATCH_F, hw->buf_offset); +#ifdef OOO + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); +#endif + } + if (start_decoding_delay & 0x40000) + msleep(start_decoding_delay&0xffff); + + if (debug_flag2 & 0x4) + run_flag = post_flag; + else + run_flag = !post_flag; + if (run_flag) { + if (hw->decode_pic_count == 0) { +#ifndef USE_DYNAMIC_BUF_NUM + WRITE_VREG(AVS_SOS_COUNT, 0); +#endif + WRITE_VREG(AVS_BUFFERIN, 0); + WRITE_VREG(AVS_BUFFEROUT, 0); + } + if (error_recovery_mode) + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 0); + else + WRITE_VREG(AVS_ERROR_RECOVERY_MODE, 1); + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); +} + +if (debug_flag2 & 0x8) + run_flag = post_flag; +else + run_flag = !post_flag; +if (run_flag) { + +#ifndef USE_DYNAMIC_BUF_NUM /* def DEBUG_UCODE */ + if (hw->decode_pic_count == 0) + WRITE_VREG(AV_SCRATCH_D, 0); +#endif + if (start_decoding_delay & 0x10000) + msleep(start_decoding_delay&0xffff); +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + /* V4L2_PIX_FMT_NV21 V4L2_PIX_FMT_NV21M */ + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + + if (start_decoding_delay & 0x20000) + msleep(start_decoding_delay&0xffff); + + +#ifdef PIC_DC_NEED_CLEAR + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 31); +#endif +} +if (debug_flag2 & 0x10) + run_flag = post_flag; +else + run_flag = !post_flag; +if (run_flag) { +#ifdef ENABLE_USER_DATA + if (firmware_sel == 0) { + pr_info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! firmware_sel is 0\n"); + WRITE_VREG(AV_SCRATCH_N, (u32)(hw->user_data_buffer_phys - hw->buf_offset)); + pr_debug("AV_SCRATCH_N = 0x%x\n", READ_VREG(AV_SCRATCH_N)); + } +#endif +} + +if (debug_flag2 & 0x20) + run_flag = post_flag; +else + run_flag = !post_flag; +if (run_flag) { + if (hw->m_ins_flag) { + if (vdec_frame_based(hw_to_vdec(hw))) + WRITE_VREG(DECODE_MODE, DECODE_MODE_MULTI_FRAMEBASE); + else + WRITE_VREG(DECODE_MODE, DECODE_MODE_MULTI_STREAMBASE); + WRITE_VREG(DECODE_LMEM_BUF_ADR, (u32)hw->lmem_phy_addr); + } else + WRITE_VREG(DECODE_MODE, DECODE_MODE_SINGLE); + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + hw->old_udebug_flag = udebug_flag; +} + return r; +} + +static void init_hw(struct vdec_s *vdec) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + int ret; + pr_info("%s, %d\n", __func__, __LINE__); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM) + ret = amvdec_loadmc_ex(VFORMAT_AVS, NULL, hw->fw->data); + else if (firmware_sel == 1) + ret = amvdec_loadmc_ex(VFORMAT_AVS, "avs_no_cabac", hw->fw->data); + else + ret = amvdec_loadmc_ex(VFORMAT_AVS, NULL, hw->fw->data); + + if (ret < 0) { + amvdec_disable(); + /*vfree(buf);*/ + pr_err("AVS: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + } + pr_info("%s, %d\n", __func__, __LINE__); + + /*vfree(buf);*/ + + hw->stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + ret = vavs_prot_init2(hw, 0); + if (ret < 0) + return; + pr_info("%s, %d\n", __func__, __LINE__); + +} + + +static unsigned long run_ready2(struct vdec_s *vdec, unsigned long mask) +{ + return 1; +} + +static void run2(struct vdec_s *vdec, unsigned long mask, +void (*callback)(struct vdec_s *, void *), + void *arg) +{ + struct vdec_avs_hw_s *hw = + (struct vdec_avs_hw_s *)vdec->private; + pr_info("%s, %d\n", __func__, __LINE__); + + vavs_prot_init2(hw, 1); + + vdec_source_changed(VFORMAT_AVS, + 1920, 1080, 30); + + amvdec_start(); + + hw->stat |= STAT_VDEC_RUN; + pr_info("%s %d\n", __func__, __LINE__); + +} + +static int ammvdec_avs_probe2(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_avs_hw_s *hw = NULL; + + pr_info("ammvdec_avs probe start.\n"); + + if (pdata == NULL) { + pr_info("ammvdec_avs platform data undefined.\n"); + return -EFAULT; + } + pr_info("%s %d\n", __func__, __LINE__); + + hw = (struct vdec_avs_hw_s *)vzalloc(sizeof(struct vdec_avs_hw_s)); + if (hw == NULL) { + pr_info("\nammvdec_avs decoder driver alloc failed\n"); + return -ENOMEM; + } + pr_info("%s %d\n", __func__, __LINE__); + /*atomic_set(&hw->error_handler_run, 0);*/ + hw->m_ins_flag = 1; + pr_info("%s %d\n", __func__, __LINE__); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXM || disable_longcabac_trans) + firmware_sel = 1; + pr_info("%s %d\n", __func__, __LINE__); + + if (firmware_sel == 1) { +#ifndef USE_DYNAMIC_BUF_NUM + vf_buf_num = 4; +#endif + canvas_base = 0; + canvas_num = 3; + } else { + pr_info("Error, do not support longcabac work around!!!"); + return -ENOMEM; + } + pr_info("%s %d\n", __func__, __LINE__); + + if (pdata->sys_info) + hw->vavs_amstream_dec_info = *pdata->sys_info; + pr_info("%s %d\n", __func__, __LINE__); + + hw->is_reset = 0; + pdata->user_data_read = NULL; + pdata->reset_userdata_fifo = NULL; + + pr_info("%s %d\n", __func__, __LINE__); + + pdata->private = hw; + pdata->dec_status = vavs_dec_status; + pdata->set_isreset = vavs_set_isreset; + pdata->run_ready = run_ready2; + pdata->run = run2; + pdata->reset = reset; + pdata->irq_handler = vmavs_isr; + pdata->threaded_irq_handler = vmavs_isr_thread_fn; + pdata->dump_state = vmavs_dump_state; + + pr_info("%s %d\n", __func__, __LINE__); + + vavs_vdec_info_init(hw); + + pr_info("%s %d\n", __func__, __LINE__); + +#ifdef ENABLE_USER_DATA + if (NULL == hw->user_data_buffer) { + hw->user_data_buffer = + dma_alloc_coherent(amports_get_dma_device(), + USER_DATA_SIZE, + &hw->user_data_buffer_phys, GFP_KERNEL); + if (!hw->user_data_buffer) { + pr_info("%s: Can not allocate hw->user_data_buffer\n", + __func__); + return -ENOMEM; + } + pr_debug("hw->user_data_buffer = 0x%p, hw->user_data_buffer_phys = 0x%x\n", + hw->user_data_buffer, (u32)hw->user_data_buffer_phys); + } +#endif + hw->lmem_addr = kmalloc(LMEM_BUF_SIZE, GFP_KERNEL); + if (hw->lmem_addr == NULL) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + return -1; + } + hw->lmem_phy_addr = dma_map_single(amports_get_dma_device(), + hw->lmem_addr, LMEM_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(amports_get_dma_device(), + hw->lmem_phy_addr)) { + pr_err("%s: failed to map lmem buffer\n", __func__); + kfree(hw->lmem_addr); + hw->lmem_addr = NULL; + return -1; + } + + pr_info("%s %d\n", __func__, __LINE__); + + /*INIT_WORK(&hw->set_clk_work, avs_set_clk);*/ + + pr_info("%s %d\n", __func__, __LINE__); + + if (vavs_init2(hw) < 0) { + pr_info("amvdec_avs init failed.\n"); + kfree(hw->gvs); + hw->gvs = NULL; + pdata->dec_status = NULL; + return -ENODEV; + } + /*vdec = pdata;*/ + pr_info("%s, %d\n", __func__, __LINE__); + +if (hw->m_ins_flag) { + INIT_WORK(&hw->notify_work, vavs_notify_work); +#if 1 + if (pdata->use_vfm_path) { + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + hw->frameinfo_enable = 1; + } + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + MULTI_INSTANCE_PROVIDER_NAME ".%02x", pdev->id & 0xff); + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->canvas_spec[i] = 0xffffff; + } + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vavs_vf_provider, hw); + + platform_set_drvdata(pdev, pdata); + + hw->platform_dev = pdev; + + vdec_set_prepare_level(pdata, start_decode_buf_level); + + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_VDEC_1); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } + pr_info("%s, %d\n", __func__, __LINE__); +#endif +}else{ + /*INIT_WORK(&hw->fatal_error_wd_work, vavs_fatal_error_handler); + atomic_set(&hw->error_handler_run, 0);*/ +#ifdef ENABLE_USER_DATA + INIT_WORK(&hw->userdata_push_work, userdata_push_do_work); +#endif + INIT_WORK(&hw->notify_work, vavs_notify_work); +} + + init_hw(pdata); + return 0; +} + +static int ammvdec_avs_remove2(struct platform_device *pdev) +{ + struct vdec_avs_hw_s *hw = ghw; + + cancel_work_sync(&hw->fatal_error_wd_work); + atomic_set(&hw->error_handler_run, 0); +#ifdef ENABLE_USER_DATA + cancel_work_sync(&hw->userdata_push_work); +#endif + cancel_work_sync(&hw->notify_work); + cancel_work_sync(&hw->set_clk_work); + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)vavs_dec_id); + hw->stat &= ~STAT_ISR_REG; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->recycle_timer); + hw->stat &= ~STAT_TIMER_ARM; + } +#ifdef AVSP_LONG_CABAC + if (firmware_sel == 0) { + mutex_lock(&vavs_mutex); + cancel_work_sync(&long_cabac_wd_work); + mutex_unlock(&vavs_mutex); + + if (es_write_addr_virt) { +#if 0 + codec_mm_free_for_dma("vavs", es_write_addr_phy); +#else + dma_unmap_single(amports_get_dma_device(), + es_write_addr_phy, + MAX_CODED_FRAME_SIZE, DMA_FROM_DEVICE); + /*kfree(es_write_addr_virt);*/ + es_write_addr_virt = NULL; +#endif + } + +#ifdef BITSTREAM_READ_TMP_NO_CACHE + if (bitstream_read_tmp) { + dma_free_coherent(amports_get_dma_device(), + SVA_STREAM_BUF_SIZE, bitstream_read_tmp, + bitstream_read_tmp_phy); + bitstream_read_tmp = NULL; + } +#else + if (bitstream_read_tmp) { + dma_unmap_single(amports_get_dma_device(), + bitstream_read_tmp_phy, + SVA_STREAM_BUF_SIZE, DMA_FROM_DEVICE); + kfree(bitstream_read_tmp); + bitstream_read_tmp = NULL; + } +#endif + } +#endif + if (hw->stat & STAT_VF_HOOK) { + if (hw->fr_hint_status == VDEC_HINTED && !hw->is_reset) + avs_vf_notify_receiver(hw, PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_END_HINT, NULL); + hw->fr_hint_status = VDEC_NO_NEED_HINT; + vf_unreg_provider(&vavs_vf_prov); + hw->stat &= ~STAT_VF_HOOK; + } + +#ifdef ENABLE_USER_DATA + if (hw->user_data_buffer != NULL) { + dma_free_coherent( + amports_get_dma_device(), + USER_DATA_SIZE, + hw->user_data_buffer, + hw->user_data_buffer_phys); + hw->user_data_buffer = NULL; + hw->user_data_buffer_phys = 0; + } +#endif + + if (hw->fw) { + vfree(hw->fw); + hw->fw = NULL; + } + + amvdec_disable(); + /*vdec_disable_DMC(NULL);*/ + + hw->pic_type = 0; + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } +#ifdef DEBUG_PTS + pr_debug("pts hit %d, pts missed %d, i hit %d, missed %d\n", hw->pts_hit, + hw->pts_missed, hw->pts_i_hit, hw->pts_i_missed); + pr_debug("total frame %d, hw->avi_flag %d, rate %d\n", hw->total_frame, hw->avi_flag, + hw->vavs_amstream_dec_info.rate); +#endif + kfree(hw->gvs); + hw->gvs = NULL; + vfree(hw); + return 0; +} +#endif + +static struct platform_driver ammvdec_avs_driver = { +#ifdef DEBUG_MULTI_WITH_AUTOMODE + .probe = ammvdec_avs_probe2, + .remove = ammvdec_avs_remove2, +#else + .probe = ammvdec_avs_probe, + .remove = ammvdec_avs_remove, +#endif +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = MULTI_DRIVER_NAME, + } +}; + +static struct codec_profile_t ammvdec_avs_profile = { + .name = "AVS-V4L", + .profile = "" +}; + +static struct mconfig mavs_configs[] = { + /*MC_PU32("stat", &stat), + MC_PU32("debug_flag", &debug_flag), + MC_PU32("error_recovery_mode", &error_recovery_mode), + MC_PU32("hw->pic_type", &hw->pic_type), + MC_PU32("radr", &radr), + MC_PU32("vf_buf_num", &vf_buf_num), + MC_PU32("vf_buf_num_used", &vf_buf_num_used), + MC_PU32("canvas_base", &canvas_base), + MC_PU32("firmware_sel", &firmware_sel), + */ +}; +static struct mconfig_node mavs_node; + + +static int __init ammvdec_avs_driver_init_module(void) +{ + pr_debug("ammvdec_avs module init\n"); + + if (platform_driver_register(&ammvdec_avs_driver)) { + pr_err("failed to register ammvdec_avs driver\n"); + return -ENODEV; + } + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB) + ammvdec_avs_profile.profile = "mavs+"; + + vcodec_profile_register(&ammvdec_avs_profile); + INIT_REG_NODE_CONFIGS("media.decoder", &mavs_node, + "mavs-v4l", mavs_configs, CONFIG_FOR_RW); + vcodec_feature_register(VFORMAT_AVS, 1); + return 0; +} + + + +static void __exit ammvdec_avs_driver_remove_module(void) +{ + pr_debug("ammvdec_avs module remove.\n"); + + platform_driver_unregister(&ammvdec_avs_driver); +} + +/****************************************/ +/* +module_param(stat, uint, 0664); +MODULE_PARM_DESC(stat, "\n amvdec_avs stat\n"); +*/ +/****************************************** + *module_param(run_flag, uint, 0664); + *MODULE_PARM_DESC(run_flag, "\n run_flag\n"); + * + *module_param(step_flag, uint, 0664); + *MODULE_PARM_DESC(step_flag, "\n step_flag\n"); + ******************************************* + */ +module_param(step, uint, 0664); +MODULE_PARM_DESC(step, "\n step\n"); + +module_param(debug, uint, 0664); +MODULE_PARM_DESC(debug, "\n debug\n"); + +module_param(debug_mask, uint, 0664); +MODULE_PARM_DESC(debug_mask, "\n debug_mask\n"); + +module_param(error_recovery_mode, uint, 0664); +MODULE_PARM_DESC(error_recovery_mode, "\n error_recovery_mode\n"); + +/****************************************** + *module_param(error_watchdog_threshold, uint, 0664); + *MODULE_PARM_DESC(error_watchdog_threshold, "\n error_watchdog_threshold\n"); + * + *module_param(error_watchdog_buf_threshold, uint, 0664); + *MODULE_PARM_DESC(error_watchdog_buf_threshold, + * "\n error_watchdog_buf_threshold\n"); + ******************************************* + */ +/* +module_param(pic_type, uint, 0444); +MODULE_PARM_DESC(pic_type, "\n amdec_vas picture type\n"); +*/ +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(dbg_cmd, uint, 0664); +MODULE_PARM_DESC(dbg_cmd, "\n dbg_cmd\n"); + +module_param(vf_buf_num, uint, 0664); +MODULE_PARM_DESC(vf_buf_num, "\nvf_buf_num\n"); + +/* +module_param(vf_buf_num_used, uint, 0664); +MODULE_PARM_DESC(vf_buf_num_used, "\nvf_buf_num_used\n"); +*/ +module_param(canvas_base, uint, 0664); +MODULE_PARM_DESC(canvas_base, "\ncanvas_base\n"); + + +module_param(firmware_sel, uint, 0664); +MODULE_PARM_DESC(firmware_sel, "\n firmware_sel\n"); + +module_param(disable_longcabac_trans, uint, 0664); +MODULE_PARM_DESC(disable_longcabac_trans, "\n disable_longcabac_trans\n"); + +module_param(dec_control, uint, 0664); +MODULE_PARM_DESC(dec_control, "\n amvdec_vavs decoder control\n"); + +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n avs start_decode_buf_level\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, + "\n avs decode_timeout_val\n"); + +module_param(error_handle_policy, uint, 0664); +MODULE_PARM_DESC(error_handle_policy, + "\n avs error_handle_policy\n"); + +module_param(again_threshold, uint, 0664); +MODULE_PARM_DESC(again_threshold, "\n again_threshold\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_h265 udebug_flag\n"); + +module_param(udebug_pause_pos, uint, 0664); +MODULE_PARM_DESC(udebug_pause_pos, "\n udebug_pause_pos\n"); + +module_param(udebug_pause_val, uint, 0664); +MODULE_PARM_DESC(udebug_pause_val, "\n udebug_pause_val\n"); + +module_param(udebug_pause_decode_idx, uint, 0664); +MODULE_PARM_DESC(udebug_pause_decode_idx, "\n udebug_pause_decode_idx\n"); + +module_param(udebug_pause_ins_id, uint, 0664); +MODULE_PARM_DESC(udebug_pause_ins_id, "\n udebug_pause_ins_id\n"); + +module_param(start_decoding_delay, uint, 0664); +MODULE_PARM_DESC(start_decoding_delay, "\n start_decoding_delay\n"); + +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, + "\n ammvdec_mavs pre_decode_buf_level\n"); + + +#ifdef DEBUG_MULTI_WITH_AUTOMODE +module_param(debug_flag2, uint, 0664); +MODULE_PARM_DESC(debug_flag2, "\n debug_flag2\n"); +#endif +module_param(force_fps, uint, 0664); +MODULE_PARM_DESC(force_fps, "\n force_fps\n"); + +#ifdef DEBUG_MULTI_FRAME_INS +module_param(delay, uint, 0664); +MODULE_PARM_DESC(delay, "\n delay\n"); + +module_param_array(max_run_count, uint, &max_decode_instance_num, 0664); + +#endif + +module_param_array(ins_udebug_flag, uint, &max_decode_instance_num, 0664); + +module_param_array(max_process_time, uint, &max_decode_instance_num, 0664); + +module_param_array(run_count, uint, &max_decode_instance_num, 0664); + +module_param_array(max_get_frame_interval, uint, + &max_decode_instance_num, 0664); + + +module_init(ammvdec_avs_driver_init_module); +module_exit(ammvdec_avs_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC AVS Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Qi Wang <qi.wang@amlogic.com>");
diff --git a/drivers/frame_provider/decoder_v4l/avs_multi/avs_multi.h b/drivers/frame_provider/decoder_v4l/avs_multi/avs_multi.h new file mode 100644 index 0000000..8922b40 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/avs_multi/avs_multi.h
@@ -0,0 +1,90 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef AVS_H_ +#define AVS_H_ + +#ifdef CONFIG_AMLOGIC_AVSP_LONG_CABAC +#define AVSP_LONG_CABAC +#endif +/*#define BITSTREAM_READ_TMP_NO_CACHE*/ + +#ifdef AVSP_LONG_CABAC +#define MAX_CODED_FRAME_SIZE 1500000 /*!< bytes for one frame*/ +#define LOCAL_HEAP_SIZE (1024*1024*10) +/* + *#define MAX_CODED_FRAME_SIZE 240000 + *#define MAX_CODED_FRAME_SIZE 700000 + */ +#define SVA_STREAM_BUF_SIZE 1024 + +extern void *es_write_addr_virt; +extern dma_addr_t es_write_addr_phy; + +extern void *bitstream_read_tmp; +extern dma_addr_t bitstream_read_tmp_phy; +extern void *avsp_heap_adr; + +int avs_get_debug_flag(void); + +int process_long_cabac(void); + +/* bit [6] - skip_mode_flag + * bit [5:4] - picture_type + * bit [3] - picture_structure (0-Field, 1-Frame) + * bit [2] - fixed_picture_qp + * bit [1] - progressive_sequence + * bit [0] - active + */ +#define LONG_CABAC_REQ AV_SCRATCH_K +#define LONG_CABAC_SRC_ADDR AV_SCRATCH_H +#define LONG_CABAC_DES_ADDR AV_SCRATCH_I +/* bit[31:16] - vertical_size + * bit[15:0] - horizontal_size + */ +#define LONG_CABAC_PIC_SIZE AV_SCRATCH_J + +#endif + +/* + *#define PERFORMANCE_DEBUG + *#define DUMP_DEBUG + */ +#define AVS_DEBUG_PRINT 0x01 +#define AVS_DEBUG_OLD_ERROR_HANDLE 0x10 +#define AVS_DEBUG_USE_FULL_SPEED 0x80 +#define AEC_DUMP 0x100 +#define STREAM_INFO_DUMP 0x200 +#define SLICE_INFO_DUMP 0x400 +#define MB_INFO_DUMP 0x800 +#define MB_NUM_DUMP 0x1000 +#define BLOCK_NUM_DUMP 0x2000 +#define COEFF_DUMP 0x4000 +#define ES_DUMP 0x8000 +#define DQUANT_DUMP 0x10000 +#define STREAM_INFO_DUMP_MORE 0x20000 +#define STREAM_INFO_DUMP_MORE2 0x40000 + +extern void *es_write_addr_virt; +extern void *bitstream_read_tmp; +extern dma_addr_t bitstream_read_tmp_phy; +int read_bitstream(unsigned char *Buf, int size); +int u_v(int LenInBits, char *tracestring); + +#endif
diff --git a/drivers/frame_provider/decoder_v4l/h264_multi/Makefile b/drivers/frame_provider/decoder_v4l/h264_multi/Makefile new file mode 100644 index 0000000..48f2c5a --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/h264_multi/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_H264_MULTI) += amvdec_mh264_v4l.o +amvdec_mh264_v4l-objs += vmh264.o h264_dpb.o
diff --git a/drivers/frame_provider/decoder_v4l/h264_multi/h264_dpb.c b/drivers/frame_provider/decoder_v4l/h264_multi/h264_dpb.c new file mode 100644 index 0000000..8ab097e --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/h264_multi/h264_dpb.c
@@ -0,0 +1,6036 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> + +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../decoder/utils/vdec.h" +#include "../../decoder/utils/amvdec.h" + +#include "h264_dpb.h" + +#define FRAME_NUM_MAX_SIZE 0x10000 + +#undef pr_info +#define pr_info printk +int dpb_print(int index, int debug_flag, const char *fmt, ...) +{ + if (((h264_debug_flag & debug_flag) && + ((1 << index) & h264_debug_mask)) + || (debug_flag == PRINT_FLAG_ERROR)) { + unsigned char *buf = kzalloc(512, GFP_ATOMIC); + int len = 0; + va_list args; + + if (!buf) + return 0; + + va_start(args, fmt); + len = sprintf(buf, "%d: ", index); + vsnprintf(buf + len, 512-len, fmt, args); + pr_debug("%s", buf); + va_end(args); + kfree(buf); + } + return 0; +} + +int dpb_print_cont(int index, int debug_flag, const char *fmt, ...) +{ + if (((h264_debug_flag & debug_flag) && + ((1 << index) & h264_debug_mask)) + || (debug_flag == PRINT_FLAG_ERROR)) { + unsigned char *buf = kzalloc(512, GFP_ATOMIC); + int len = 0; + va_list args; + + if (!buf) + return 0; + + va_start(args, fmt); + vsnprintf(buf + len, 512-len, fmt, args); + pr_info("%s", buf); + va_end(args); + kfree(buf); + } + return 0; +} + +unsigned char dpb_is_debug(int index, int debug_flag) +{ + if (((h264_debug_flag & debug_flag) && + ((1 << index) & h264_debug_mask)) + || (debug_flag == PRINT_FLAG_ERROR)) + return 1; + return 0; +} + +#define CHECK_VALID(list_size, mark) {\ + if (list_size > MAX_LIST_SIZE || list_size < 0) { \ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_ERROR, \ + "%s(%d): listXsize[%d] %d is larger than max size\r\n",\ + __func__, __LINE__, mark, list_size);\ + list_size = 0; \ + p_H264_Dpb->dpb_error_flag = __LINE__;\ + } \ + } + +static struct DecRefPicMarking_s + dummy_dec_ref_pic_marking_buffer + [DEC_REF_PIC_MARKING_BUFFER_NUM_MAX]; +static struct StorablePicture dummy_pic; +static struct FrameStore dummy_fs; +static struct StorablePicture *get_new_pic( + struct h264_dpb_stru *p_H264_Dpb, + enum PictureStructure structure, unsigned char is_output); + + +static void init_dummy_fs(void) +{ + dummy_fs.frame = &dummy_pic; + dummy_fs.top_field = &dummy_pic; + dummy_fs.bottom_field = &dummy_pic; + + dummy_pic.top_field = &dummy_pic; + dummy_pic.bottom_field = &dummy_pic; + dummy_pic.frame = &dummy_pic; + + dummy_pic.dec_ref_pic_marking_buffer = + &dummy_dec_ref_pic_marking_buffer[0]; +} + +enum { + LIST_0 = 0, + LIST_1 = 1, + BI_PRED = 2, + BI_PRED_L0 = 3, + BI_PRED_L1 = 4 +}; + +void ref_pic_list_reordering(struct h264_dpb_stru *p_H264_Dpb, + struct Slice *currSlice) +{ + /* struct VideoParameters *p_Vid = currSlice->p_Vid; + * byte dP_nr = assignSE2partition[currSlice->dp_mode][SE_HEADER]; + * DataPartition *partition = &(currSlice->partArr[dP_nr]); + * Bitstream *currStream = partition->bitstream; + */ + int i, j, val; + unsigned short *reorder_cmd = + &p_H264_Dpb->dpb_param.mmco.l0_reorder_cmd[0]; + /* alloc_ref_pic_list_reordering_buffer(currSlice); */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + if (currSlice->slice_type != I_SLICE && + currSlice->slice_type != SI_SLICE) { + /* val = currSlice->ref_pic_list_reordering_flag[LIST_0] = + * read_u_1 ("SH: ref_pic_list_reordering_flag_l0", + * currStream, &p_Dec->UsedBits); + */ + if (reorder_cmd[0] != 3) { + val = currSlice-> + ref_pic_list_reordering_flag[LIST_0] = 1; + } else { + val = currSlice-> + ref_pic_list_reordering_flag[LIST_0] = 0; + } + if (val) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%s, ref_pic_list_reordering_flag[LIST_0] is 1\n", + __func__); + + j = 0; + i = 0; + do { + val = currSlice-> + modification_of_pic_nums_idc[LIST_0][i] = + reorder_cmd[j++]; + /* read_ue_v( + * "SH: modification_of_pic_nums_idc_l0", + * currStream, &p_Dec->UsedBits); + */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%d(%d):val %x\n", i, j, val); + if (j >= 66) { + currSlice-> + ref_pic_list_reordering_flag[LIST_0] = + 0; /* by rain */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "%s error\n", __func__); + break; + } + if (val == 0 || val == 1) { + currSlice-> + abs_diff_pic_num_minus1[LIST_0][i] = + reorder_cmd[j++]; + /* read_ue_v("SH: " + *"abs_diff_pic_num_minus1_l0", + *currStream, &p_Dec->UsedBits); + */ + } else { + if (val == 2) { + currSlice-> + long_term_pic_idx[LIST_0][i] = + reorder_cmd[j++]; + /* read_ue_v( + *"SH: long_term_pic_idx_l0", + *currStream, + *&p_Dec->UsedBits); + */ + } + } + i++; + /* assert (i>currSlice-> + * num_ref_idx_active[LIST_0]); + */ + if ( + +/* + * i>currSlice->num_ref_idx_active[LIST_0] || + */ + i >= REORDERING_COMMAND_MAX_SIZE) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%s error %d %d\n", + __func__, i, + currSlice-> + num_ref_idx_active[LIST_0]); + currSlice-> + ref_pic_list_reordering_flag[LIST_0] = + 0; /* by rain */ + break; + } + if (j >= 66) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, "%s error\n", + __func__); + currSlice-> + ref_pic_list_reordering_flag[LIST_0] = + 0; /* by rain */ + break; + } + + } while (val != 3); + } + } + + if (currSlice->slice_type == B_SLICE) { + reorder_cmd = &p_H264_Dpb->dpb_param.mmco.l1_reorder_cmd[0]; + /* val = currSlice->ref_pic_list_reordering_flag[LIST_1] + *= read_u_1 ("SH: ref_pic_list_reordering_flag_l1", + *currStream, + *&p_Dec->UsedBits); + */ + + if (reorder_cmd[0] != 3) { + val = + currSlice->ref_pic_list_reordering_flag[LIST_1] = 1; + } else { + val = + currSlice->ref_pic_list_reordering_flag[LIST_1] = 0; + } + + if (val) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%s, ref_pic_list_reordering_flag[LIST_1] is 1\n", + __func__); + + j = 0; + i = 0; + do { + val = currSlice-> + modification_of_pic_nums_idc[LIST_1][i] = + reorder_cmd[j++]; + /* read_ue_v( + *"SH: modification_of_pic_nums_idc_l1", + *currStream, + *&p_Dec->UsedBits); + */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%d(%d):val %x\n", + i, j, val); + if (j >= 66) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, "%s error\n", + __func__); + currSlice-> + ref_pic_list_reordering_flag[LIST_1] = + 0; /* by rain */ + break; + } + if (val == 0 || val == 1) { + currSlice-> + abs_diff_pic_num_minus1[LIST_1][i] = + reorder_cmd[j++]; + /* read_ue_v( + *"SH: abs_diff_pic_num_minus1_l1", + *currStream, &p_Dec->UsedBits); + */ + } else { + if (val == 2) { + currSlice-> + long_term_pic_idx[LIST_1][i] = + reorder_cmd[j++]; + /* read_ue_v( + *"SH: long_term_pic_idx_l1", + *currStream, + *&p_Dec->UsedBits); + */ + } + } + i++; + /* assert(i>currSlice-> + * num_ref_idx_active[LIST_1]); + */ + if ( + /*i>currSlice->num_ref_idx_active[LIST_1] || */ + i >= REORDERING_COMMAND_MAX_SIZE) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%s error %d %d\n", + __func__, i, + currSlice-> + num_ref_idx_active[LIST_0]); + currSlice-> + ref_pic_list_reordering_flag[LIST_1] = + 0; /* by rain */ + break; + } + if (j >= 66) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "%s error\n", __func__); + break; + } + } while (val != 3); + } + } + + /* set reference index of redundant slices. */ + /* + *if (currSlice->redundant_pic_cnt && + *(currSlice->slice_type != I_SLICE)) + *{ + * currSlice->redundant_slice_ref_idx = + * currSlice->abs_diff_pic_num_minus1[LIST_0][0] + 1; + *} + */ +} + +void slice_prepare(struct h264_dpb_stru *p_H264_Dpb, + struct DecodedPictureBuffer *p_Dpb, + struct VideoParameters *p_Vid, + struct SPSParameters *sps, struct Slice *pSlice) +{ + int i, j; + /* p_Vid->active_sps = sps; */ + unsigned short *mmco_cmd = &p_H264_Dpb->dpb_param.mmco.mmco_cmd[0]; + /* for decode_poc */ + sps->pic_order_cnt_type = + p_H264_Dpb->dpb_param.l.data[PIC_ORDER_CNT_TYPE]; + sps->log2_max_pic_order_cnt_lsb_minus4 = + p_H264_Dpb->dpb_param.l.data[LOG2_MAX_PIC_ORDER_CNT_LSB] - 4; + sps->num_ref_frames_in_pic_order_cnt_cycle = + p_H264_Dpb-> + dpb_param.l.data[NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE]; + for (i = 0; i < 128; i++) + sps->offset_for_ref_frame[i] = + (short) p_H264_Dpb-> + dpb_param.mmco.offset_for_ref_frame_base[i]; + sps->offset_for_non_ref_pic = + (short) p_H264_Dpb->dpb_param.l.data[OFFSET_FOR_NON_REF_PIC]; + sps->offset_for_top_to_bottom_field = + (short) p_H264_Dpb->dpb_param.l.data + [OFFSET_FOR_TOP_TO_BOTTOM_FIELD]; + + pSlice->frame_num = p_H264_Dpb->dpb_param.dpb.frame_num; + pSlice->idr_flag = + (p_H264_Dpb->dpb_param.dpb.NAL_info_mmco & 0x1f) + == 5 ? 1 : 0; + pSlice->nal_reference_idc = + (p_H264_Dpb->dpb_param.dpb.NAL_info_mmco >> 5) + & 0x3; + pSlice->pic_order_cnt_lsb = + p_H264_Dpb->dpb_param.dpb.pic_order_cnt_lsb; + pSlice->field_pic_flag = 0; + pSlice->bottom_field_flag = 0; + pSlice->delta_pic_order_cnt_bottom = val( + p_H264_Dpb->dpb_param.dpb.delta_pic_order_cnt_bottom); + pSlice->delta_pic_order_cnt[0] = val( + p_H264_Dpb->dpb_param.dpb.delta_pic_order_cnt_0); + pSlice->delta_pic_order_cnt[1] = val( + p_H264_Dpb->dpb_param.dpb.delta_pic_order_cnt_1); + + p_Vid->last_has_mmco_5 = 0; + /* last memory_management_control_operation is 5 */ + p_Vid->last_pic_bottom_field = 0; + p_Vid->max_frame_num = 1 << + (p_H264_Dpb->dpb_param.l.data[LOG2_MAX_FRAME_NUM]); + + /**/ + pSlice->structure = (p_H264_Dpb-> + dpb_param.l.data[NEW_PICTURE_STRUCTURE] == 3) ? + FRAME : p_H264_Dpb->dpb_param.l.data[NEW_PICTURE_STRUCTURE]; + if (pSlice->structure == FRAME) { + pSlice->field_pic_flag = 0; + pSlice->bottom_field_flag = 0; + } else { + pSlice->field_pic_flag = 1; + if (pSlice->structure == TOP_FIELD) + pSlice->bottom_field_flag = 0; + else + pSlice->bottom_field_flag = 1; + } + pSlice->pic_struct = p_H264_Dpb->dpb_param.l.data[PICTURE_STRUCT]; + + sps->num_ref_frames = p_H264_Dpb-> + dpb_param.l.data[MAX_REFERENCE_FRAME_NUM]; + sps->profile_idc = + (p_H264_Dpb->dpb_param.l.data[PROFILE_IDC_MMCO] >> 8) & 0xff; + /*sps->max_dpb_size = p_H264_Dpb->dpb_param.l.data[MAX_DPB_SIZE];*/ + if (pSlice->idr_flag) { + pSlice->long_term_reference_flag = mmco_cmd[0] & 1; + pSlice->no_output_of_prior_pics_flag = (mmco_cmd[0] >> 1) & 1; + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "IDR: long_term_reference_flag %d no_output_of_prior_pics_flag %d\r\n", + pSlice->long_term_reference_flag, + pSlice->no_output_of_prior_pics_flag); + + p_H264_Dpb->long_term_reference_flag = pSlice->long_term_reference_flag; + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "idr set pre_frame_num(%d) to frame_num (%d)\n", + p_Vid->pre_frame_num, pSlice->frame_num); + + p_Vid->pre_frame_num = pSlice->frame_num; + } else if (p_H264_Dpb->mDPB.first_pic_done == 0) { + /* by rain + handle the case when first slice is I instead of IDR + */ + p_Vid->pre_frame_num = pSlice->frame_num; + } + /* pSlice->adaptive_ref_pic_buffering_flag; */ + sps->log2_max_frame_num_minus4 = + p_H264_Dpb->dpb_param.l.data[LOG2_MAX_FRAME_NUM] - 4; + sps->frame_num_gap_allowed = p_H264_Dpb->dpb_param.l.data[FRAME_NUM_GAP_ALLOWED]; + + p_Vid->non_conforming_stream = + p_H264_Dpb->dpb_param.l.data[NON_CONFORMING_STREAM]; + p_Vid->recovery_point = + p_H264_Dpb->dpb_param.l.data[RECOVERY_POINT]; + switch (p_H264_Dpb->dpb_param.l.data[SLICE_TYPE]) { + case I_Slice: + pSlice->slice_type = I_SLICE; + break; + case P_Slice: + pSlice->slice_type = P_SLICE; + break; + case B_Slice: + pSlice->slice_type = B_SLICE; + break; + default: + pSlice->slice_type = NUM_SLICE_TYPES; + break; + } + + pSlice->num_ref_idx_active[LIST_0] = + p_H264_Dpb->dpb_param.dpb.num_ref_idx_l0_active_minus1 + + 1; + /* p_H264_Dpb->dpb_param.l.data[PPS_NUM_REF_IDX_L0_ACTIVE_MINUS1]; */ + pSlice->num_ref_idx_active[LIST_1] = + p_H264_Dpb->dpb_param.dpb.num_ref_idx_l1_active_minus1 + + 1; + /* p_H264_Dpb->dpb_param.l.data[PPS_NUM_REF_IDX_L1_ACTIVE_MINUS1]; */ + + pSlice->p_Vid = p_Vid; + pSlice->p_Dpb = p_Dpb; + /* + p_H264_Dpb->colocated_buf_size = + p_H264_Dpb->dpb_param.l.data[FRAME_SIZE_IN_MB] * 96;*/ + pSlice->first_mb_in_slice = + p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE]; + pSlice->mode_8x8_flags = p_H264_Dpb->dpb_param.l.data[MODE_8X8_FLAGS]; + pSlice->picture_structure_mmco = + p_H264_Dpb->dpb_param.dpb.picture_structure_mmco; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s slice_type is %d, num_ref_idx_active[0,1]=%d,%d nal_reference_idc %d pic struct 0x%x(mmco stru 0x%x)\n", + __func__, pSlice->slice_type, + pSlice->num_ref_idx_active[LIST_0], + pSlice->num_ref_idx_active[LIST_1], + pSlice->nal_reference_idc, + pSlice->structure, + pSlice->picture_structure_mmco); +#ifdef ERROR_CHECK + if (pSlice->num_ref_idx_active[LIST_0] >= MAX_LIST_SIZE) { + pSlice->num_ref_idx_active[LIST_0] = MAX_LIST_SIZE - 1; + p_H264_Dpb->dpb_error_flag = __LINE__; + } + if (pSlice->num_ref_idx_active[LIST_1] >= MAX_LIST_SIZE) { + pSlice->num_ref_idx_active[LIST_1] = MAX_LIST_SIZE - 1; + p_H264_Dpb->dpb_error_flag = __LINE__; + } +#endif + +#if 1 + /* dec_ref_pic_marking_buffer */ + pSlice->adaptive_ref_pic_buffering_flag = 0; + if (pSlice->nal_reference_idc) { + for (i = 0, j = 0; i < 44; j++) { + unsigned short val; + struct DecRefPicMarking_s *tmp_drpm = + &pSlice->dec_ref_pic_marking_buffer[j]; + memset(tmp_drpm, 0, sizeof(struct DecRefPicMarking_s)); + val = tmp_drpm-> + memory_management_control_operation = + mmco_cmd[i++]; + tmp_drpm->Next = NULL; + if (j > 0) { + pSlice-> + dec_ref_pic_marking_buffer[j - 1].Next = + tmp_drpm; + } + if (val == 0 || i >= 44) + break; + pSlice->adaptive_ref_pic_buffering_flag = 1; + if ((val == 1) || (val == 3)) { + tmp_drpm->difference_of_pic_nums_minus1 = + mmco_cmd[i++]; + } + if (val == 2) + tmp_drpm->long_term_pic_num = mmco_cmd[i++]; + if (i >= 44) + break; + if ((val == 3) || (val == 6)) + tmp_drpm->long_term_frame_idx = mmco_cmd[i++]; + if (val == 4) { + tmp_drpm->max_long_term_frame_idx_plus1 = + mmco_cmd[i++]; + } + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "dec_ref_pic_marking_buffer[%d]:operation %x diff_pic_minus1 %x long_pic_num %x long_frame_idx %x max_long_frame_idx_plus1 %x\n", + j, + tmp_drpm->memory_management_control_operation, + tmp_drpm->difference_of_pic_nums_minus1, + tmp_drpm->long_term_pic_num, + tmp_drpm->long_term_frame_idx, + tmp_drpm->max_long_term_frame_idx_plus1); + } + } + + ref_pic_list_reordering(p_H264_Dpb, pSlice); +#endif + + /*VUI*/ + p_H264_Dpb->vui_status = p_H264_Dpb->dpb_param.l.data[VUI_STATUS]; + p_H264_Dpb->aspect_ratio_idc = + p_H264_Dpb->dpb_param.l.data[ASPECT_RATIO_IDC]; + p_H264_Dpb->aspect_ratio_sar_width = + p_H264_Dpb->dpb_param.l.data[ASPECT_RATIO_SAR_WIDTH]; + p_H264_Dpb->aspect_ratio_sar_height = + p_H264_Dpb->dpb_param.l.data[ASPECT_RATIO_SAR_HEIGHT]; + + p_H264_Dpb->fixed_frame_rate_flag = p_H264_Dpb->dpb_param.l.data[ + FIXED_FRAME_RATE_FLAG]; + p_H264_Dpb->num_units_in_tick = + p_H264_Dpb->dpb_param.l.data[NUM_UNITS_IN_TICK]; + p_H264_Dpb->time_scale = p_H264_Dpb->dpb_param.l.data[TIME_SCALE] | + (p_H264_Dpb->dpb_param.l.data[TIME_SCALE + 1] << 16); + + p_H264_Dpb->bitstream_restriction_flag = + (p_H264_Dpb->dpb_param.l.data[SPS_FLAGS2] >> 3) & 0x1; + p_H264_Dpb->num_reorder_frames = + p_H264_Dpb->dpb_param.l.data[NUM_REORDER_FRAMES]; + p_H264_Dpb->max_dec_frame_buffering = + p_H264_Dpb->dpb_param.l.data[MAX_BUFFER_FRAME]; + + /**/ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s return\n", __func__); +} + +static void decode_poc(struct VideoParameters *p_Vid, struct Slice *pSlice) +{ + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Vid, + struct h264_dpb_stru, mVideo); + struct SPSParameters *active_sps = p_Vid->active_sps; + int i; + /* for POC mode 0: */ + unsigned int MaxPicOrderCntLsb = (1 << + (active_sps->log2_max_pic_order_cnt_lsb_minus4 + 4)); + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DEBUG_POC, + "%s:pic_order_cnt_type %d, idr_flag %d last_has_mmco_5 %d last_pic_bottom_field %d pic_order_cnt_lsb %d PrevPicOrderCntLsb %d\r\n", + __func__, + active_sps->pic_order_cnt_type, + pSlice->idr_flag, + p_Vid->last_has_mmco_5, + p_Vid->last_pic_bottom_field, + pSlice->pic_order_cnt_lsb, + p_Vid->PrevPicOrderCntLsb + ); + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DEBUG_POC, + "%s:field_pic_flag %d, bottom_field_flag %d frame_num %d PreviousFrameNum %d PreviousFrameNumOffset %d ax_frame_num %d num_ref_frames_in_pic_order_cnt_cycle %d offset_for_non_ref_pic %d\r\n", + __func__, + pSlice->field_pic_flag, + pSlice->bottom_field_flag, + pSlice->frame_num, + p_Vid->PreviousFrameNum, + p_Vid->PreviousFrameNumOffset, + p_Vid->max_frame_num, + active_sps->num_ref_frames_in_pic_order_cnt_cycle, + active_sps->offset_for_non_ref_pic + ); + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DEBUG_POC, + "%s: delta_pic_order_cnt %d %d nal_reference_idc %d\r\n", + __func__, + pSlice->delta_pic_order_cnt[0], pSlice->delta_pic_order_cnt[1], + pSlice->nal_reference_idc + ); + + + switch (active_sps->pic_order_cnt_type) { + case 0: /* POC MODE 0 */ + /* 1st */ + if (pSlice->idr_flag) { + p_Vid->PrevPicOrderCntMsb = 0; + p_Vid->PrevPicOrderCntLsb = 0; + } else { + if (p_Vid->last_has_mmco_5) { + if (p_Vid->last_pic_bottom_field) { + p_Vid->PrevPicOrderCntMsb = 0; + p_Vid->PrevPicOrderCntLsb = 0; + } else { + p_Vid->PrevPicOrderCntMsb = 0; + p_Vid->PrevPicOrderCntLsb = + pSlice->toppoc; + } + } + } + /* Calculate the MSBs of current picture */ + if (pSlice->pic_order_cnt_lsb < p_Vid->PrevPicOrderCntLsb && + (p_Vid->PrevPicOrderCntLsb - pSlice->pic_order_cnt_lsb) >= + (MaxPicOrderCntLsb / 2)) + pSlice->PicOrderCntMsb = p_Vid->PrevPicOrderCntMsb + + MaxPicOrderCntLsb; + else if (pSlice->pic_order_cnt_lsb > + p_Vid->PrevPicOrderCntLsb && + (pSlice->pic_order_cnt_lsb - + p_Vid->PrevPicOrderCntLsb) > + (MaxPicOrderCntLsb / 2)) + pSlice->PicOrderCntMsb = p_Vid->PrevPicOrderCntMsb - + MaxPicOrderCntLsb; + else + pSlice->PicOrderCntMsb = p_Vid->PrevPicOrderCntMsb; + + /* 2nd */ + if (pSlice->field_pic_flag == 0) { + /* frame pix */ + pSlice->toppoc = pSlice->PicOrderCntMsb + + pSlice->pic_order_cnt_lsb; + pSlice->bottompoc = pSlice->toppoc + + pSlice->delta_pic_order_cnt_bottom; + pSlice->ThisPOC = pSlice->framepoc = + (pSlice->toppoc < pSlice->bottompoc) ? + pSlice->toppoc : pSlice->bottompoc; + /* POC200301 */ + } else if (pSlice->bottom_field_flag == 0) { + /* top field */ + pSlice->ThisPOC = pSlice->toppoc = + pSlice->PicOrderCntMsb + + pSlice->pic_order_cnt_lsb; + } else { + /* bottom field */ + pSlice->ThisPOC = pSlice->bottompoc = + pSlice->PicOrderCntMsb + + pSlice->pic_order_cnt_lsb; + } + pSlice->framepoc = pSlice->ThisPOC; + + p_Vid->ThisPOC = pSlice->ThisPOC; + + /* if ( pSlice->frame_num != p_Vid->PreviousFrameNum) + * Seems redundant + */ + p_Vid->PreviousFrameNum = pSlice->frame_num; + + if (pSlice->nal_reference_idc) { + p_Vid->PrevPicOrderCntLsb = pSlice->pic_order_cnt_lsb; + p_Vid->PrevPicOrderCntMsb = pSlice->PicOrderCntMsb; + } + + break; + + case 1: /* POC MODE 1 */ + /* 1st */ + if (pSlice->idr_flag) { + p_Vid->FrameNumOffset = 0; /* first pix of IDRGOP */ + if (pSlice->frame_num) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "frame_num not equal to zero in IDR picture %d", + -1020); + } else { + if (p_Vid->last_has_mmco_5) { + p_Vid->PreviousFrameNumOffset = 0; + p_Vid->PreviousFrameNum = 0; + } + if (pSlice->frame_num < p_Vid->PreviousFrameNum) { + /* not first pix of IDRGOP */ + p_Vid->FrameNumOffset = + p_Vid->PreviousFrameNumOffset + + p_Vid->max_frame_num; + } else { + p_Vid->FrameNumOffset = + p_Vid->PreviousFrameNumOffset; + } + } + + /* 2nd */ + if (active_sps->num_ref_frames_in_pic_order_cnt_cycle) + pSlice->AbsFrameNum = + p_Vid->FrameNumOffset + pSlice->frame_num; + else + pSlice->AbsFrameNum = 0; + if ((!pSlice->nal_reference_idc) && pSlice->AbsFrameNum > 0) + pSlice->AbsFrameNum--; + + /* 3rd */ + p_Vid->ExpectedDeltaPerPicOrderCntCycle = 0; + + if (active_sps->num_ref_frames_in_pic_order_cnt_cycle) + for (i = 0; i < (int) active_sps-> + num_ref_frames_in_pic_order_cnt_cycle; i++) { + p_Vid->ExpectedDeltaPerPicOrderCntCycle += + active_sps->offset_for_ref_frame[i]; + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DEBUG_POC, + "%s: offset_for_ref_frame %d\r\n", + __func__, + active_sps-> + offset_for_ref_frame[i]); + } + + if (pSlice->AbsFrameNum) { + p_Vid->PicOrderCntCycleCnt = + (pSlice->AbsFrameNum - 1) / + active_sps-> + num_ref_frames_in_pic_order_cnt_cycle; + p_Vid->FrameNumInPicOrderCntCycle = + (pSlice->AbsFrameNum - 1) % + active_sps-> + num_ref_frames_in_pic_order_cnt_cycle; + p_Vid->ExpectedPicOrderCnt = + p_Vid->PicOrderCntCycleCnt * + p_Vid->ExpectedDeltaPerPicOrderCntCycle; + for (i = 0; i <= (int)p_Vid-> + FrameNumInPicOrderCntCycle; i++) { + p_Vid->ExpectedPicOrderCnt += + active_sps->offset_for_ref_frame[i]; + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DEBUG_POC, + "%s: offset_for_ref_frame %d\r\n", + __func__, + active_sps-> + offset_for_ref_frame[i]); + } + } else + p_Vid->ExpectedPicOrderCnt = 0; + + if (!pSlice->nal_reference_idc) + p_Vid->ExpectedPicOrderCnt += + active_sps->offset_for_non_ref_pic; + + if (pSlice->field_pic_flag == 0) { + /* frame pix */ + pSlice->toppoc = p_Vid->ExpectedPicOrderCnt + + pSlice->delta_pic_order_cnt[0]; + pSlice->bottompoc = pSlice->toppoc + + active_sps->offset_for_top_to_bottom_field + + pSlice->delta_pic_order_cnt[1]; + pSlice->ThisPOC = pSlice->framepoc = + (pSlice->toppoc < pSlice->bottompoc) ? + pSlice->toppoc : pSlice->bottompoc; + /* POC200301 */ + } else if (pSlice->bottom_field_flag == 0) { + /* top field */ + pSlice->ThisPOC = pSlice->toppoc = + p_Vid->ExpectedPicOrderCnt + + pSlice->delta_pic_order_cnt[0]; + } else { + /* bottom field */ + pSlice->ThisPOC = pSlice->bottompoc = + p_Vid->ExpectedPicOrderCnt + + active_sps->offset_for_top_to_bottom_field + + pSlice->delta_pic_order_cnt[0]; + } + pSlice->framepoc = pSlice->ThisPOC; + + p_Vid->PreviousFrameNum = pSlice->frame_num; + p_Vid->PreviousFrameNumOffset = p_Vid->FrameNumOffset; + + break; + + + case 2: /* POC MODE 2 */ + if (pSlice->idr_flag) { /* IDR picture */ + p_Vid->FrameNumOffset = 0; /* first pix of IDRGOP */ + pSlice->ThisPOC = pSlice->framepoc = pSlice->toppoc = + pSlice->bottompoc = 0; + if (pSlice->frame_num) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "frame_num not equal to zero in IDR picture %d", + -1020); + } else { + if (p_Vid->last_has_mmco_5) { + p_Vid->PreviousFrameNum = 0; + p_Vid->PreviousFrameNumOffset = 0; + } + if (pSlice->frame_num < p_Vid->PreviousFrameNum) + p_Vid->FrameNumOffset = + p_Vid->PreviousFrameNumOffset + + p_Vid->max_frame_num; + else + p_Vid->FrameNumOffset = + p_Vid->PreviousFrameNumOffset; + + pSlice->AbsFrameNum = p_Vid->FrameNumOffset + + pSlice->frame_num; + if (!pSlice->nal_reference_idc) + pSlice->ThisPOC = + (2 * pSlice->AbsFrameNum - 1); + else + pSlice->ThisPOC = (2 * pSlice->AbsFrameNum); + + if (pSlice->field_pic_flag == 0) + pSlice->toppoc = pSlice->bottompoc = + pSlice->framepoc = pSlice->ThisPOC; + else if (pSlice->bottom_field_flag == 0) + pSlice->toppoc = pSlice->framepoc = + pSlice->ThisPOC; + else + pSlice->bottompoc = pSlice->framepoc = + pSlice->ThisPOC; + } + + p_Vid->PreviousFrameNum = pSlice->frame_num; + p_Vid->PreviousFrameNumOffset = p_Vid->FrameNumOffset; + break; + + + default: + /* error must occurs */ + /* assert( 1==0 ); */ + break; + } +} + +void fill_frame_num_gap(struct VideoParameters *p_Vid, struct Slice *currSlice) +{ + struct h264_dpb_stru *p_H264_Dpb = + container_of(p_Vid, struct h264_dpb_stru, mVideo); + struct SPSParameters *active_sps = p_Vid->active_sps; + int CurrFrameNum; + int UnusedShortTermFrameNum; + struct StorablePicture *picture = NULL; + int tmp1 = currSlice->delta_pic_order_cnt[0]; + int tmp2 = currSlice->delta_pic_order_cnt[1]; + int ret; + + currSlice->delta_pic_order_cnt[0] = + currSlice->delta_pic_order_cnt[1] = 0; + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "A gap in frame number is found, try to fill it.(pre_frame_num %d, max_frame_num %d\n", + p_Vid->pre_frame_num, p_Vid->max_frame_num + ); + + UnusedShortTermFrameNum = (p_Vid->pre_frame_num + 1) + % p_Vid->max_frame_num; + CurrFrameNum = currSlice->frame_num; /*p_Vid->frame_num;*/ + + while (CurrFrameNum != UnusedShortTermFrameNum) { + /*pr_err("CurrFrameNum = %d, UnusedShortTermFrameNum = %d\n", CurrFrameNum, UnusedShortTermFrameNum);*/ + /*picture = alloc_storable_picture + *(p_Vid, FRAME, p_Vid->width, + *p_Vid->height, + *p_Vid->width_cr, + *p_Vid->height_cr, 1); + */ + picture = get_new_pic(p_H264_Dpb, + p_H264_Dpb->mSlice.structure, + /*p_Vid->width, p_Vid->height, + *p_Vid->width_cr, + p_Vid->height_cr,*/ 1); + + if (picture == NULL) { + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "%s Error: get_new_pic return NULL\r\n", + __func__); + /*h264_debug_flag |= PRINT_FLAG_DUMP_DPB;*/ + dump_dpb(p_Dpb, 0); + return; + } + + picture->colocated_buf_index = -1; + picture->buf_spec_num = -1; + picture->buf_spec_is_alloced = 0; + + picture->coded_frame = 1; + picture->pic_num = UnusedShortTermFrameNum; + picture->frame_num = UnusedShortTermFrameNum; + picture->non_existing = 1; + picture->is_output = 1; + picture->used_for_reference = 1; + picture->adaptive_ref_pic_buffering_flag = 0; + #if (MVC_EXTENSION_ENABLE) + picture->view_id = currSlice->view_id; + #endif + + currSlice->frame_num = UnusedShortTermFrameNum; + if (active_sps->pic_order_cnt_type != 0) { + /*decode_poc(p_Vid, p_Vid->ppSliceList[0]);*/ + decode_poc(&p_H264_Dpb->mVideo, &p_H264_Dpb->mSlice); + } + picture->top_poc = currSlice->toppoc; + picture->bottom_poc = currSlice->bottompoc; + picture->frame_poc = currSlice->framepoc; + picture->poc = currSlice->framepoc; + + ret = store_picture_in_dpb(p_H264_Dpb, picture, 0); + if (ret == -1) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "%s Error: store_picture_in_dpb failed, break\n", + __func__); + release_picture(p_H264_Dpb, picture); + bufmgr_force_recover(p_H264_Dpb); + return; + } else if (ret == -2) + release_picture(p_H264_Dpb, picture); + + picture = NULL; + p_Vid->pre_frame_num = UnusedShortTermFrameNum; + UnusedShortTermFrameNum = + (UnusedShortTermFrameNum + 1) % + p_Vid->max_frame_num; + } + currSlice->delta_pic_order_cnt[0] = tmp1; + currSlice->delta_pic_order_cnt[1] = tmp2; + currSlice->frame_num = CurrFrameNum; +} + +void dpb_init_global(struct h264_dpb_stru *p_H264_Dpb, + int id, int actual_dpb_size, int max_reference_size) +{ + int i; + + init_dummy_fs(); + + memset(&p_H264_Dpb->mDPB, 0, sizeof(struct DecodedPictureBuffer)); + + memset(&p_H264_Dpb->mSlice, 0, sizeof(struct Slice)); + memset(&p_H264_Dpb->mVideo, 0, sizeof(struct VideoParameters)); + memset(&p_H264_Dpb->mSPS, 0, sizeof(struct SPSParameters)); + + for (i = 0; i < DPB_SIZE_MAX; i++) { + memset(&(p_H264_Dpb->mFrameStore[i]), 0, + sizeof(struct FrameStore)); + } + + for (i = 0; i < MAX_PIC_BUF_NUM; i++) { + memset(&(p_H264_Dpb->m_PIC[i]), 0, + sizeof(struct StorablePicture)); + p_H264_Dpb->m_PIC[i].index = i; + } + p_H264_Dpb->decoder_index = id; + + /* make sure dpb_init_global + *can be called during decoding + *(in DECODE_STATE_IDLE or DECODE_STATE_READY state) + */ + p_H264_Dpb->mDPB.size = actual_dpb_size; + p_H264_Dpb->max_reference_size = max_reference_size; + p_H264_Dpb->poc_even_odd_flag = 0; +} + +static void init_picture(struct h264_dpb_stru *p_H264_Dpb, + struct Slice *currSlice, + struct StorablePicture *dec_picture) +{ + /* struct VideoParameters *p_Vid = &(p_H264_Dpb->mVideo); */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s dec_picture %p\n", __func__, dec_picture); + dec_picture->top_poc = currSlice->toppoc; + dec_picture->bottom_poc = currSlice->bottompoc; + dec_picture->frame_poc = currSlice->framepoc; + switch (currSlice->structure) { + case TOP_FIELD: { + dec_picture->poc = currSlice->toppoc; + /* p_Vid->number *= 2; */ + break; + } + case BOTTOM_FIELD: { + dec_picture->poc = currSlice->bottompoc; + /* p_Vid->number = p_Vid->number * 2 + 1; */ + break; + } + case FRAME: { + dec_picture->poc = currSlice->framepoc; + break; + } + default: + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "p_Vid->structure not initialized %d\n", 235); + } + + /* dec_picture->slice_type = p_Vid->type; */ + dec_picture->used_for_reference = (currSlice->nal_reference_idc != 0); + dec_picture->idr_flag = currSlice->idr_flag; + dec_picture->no_output_of_prior_pics_flag = + currSlice->no_output_of_prior_pics_flag; + dec_picture->long_term_reference_flag = + currSlice->long_term_reference_flag; +#if 1 + dec_picture->adaptive_ref_pic_buffering_flag = + currSlice->adaptive_ref_pic_buffering_flag; + dec_picture->dec_ref_pic_marking_buffer = + &currSlice->dec_ref_pic_marking_buffer[0]; +#endif + /* currSlice->dec_ref_pic_marking_buffer = NULL; */ + + /* dec_picture->mb_aff_frame_flag = currSlice->mb_aff_frame_flag; */ + /* dec_picture->PicWidthInMbs = p_Vid->PicWidthInMbs; */ + + /* p_Vid->get_mb_block_pos = + * dec_picture->mb_aff_frame_flag ? get_mb_block_pos_mbaff : + * get_mb_block_pos_normal; + */ + /* p_Vid->getNeighbour = + * dec_picture->mb_aff_frame_flag ? getAffNeighbour : + * getNonAffNeighbour; + */ + + dec_picture->pic_num = currSlice->frame_num; + dec_picture->frame_num = currSlice->frame_num; + + /* dec_picture->recovery_frame = + * (unsigned int) ((int) currSlice->frame_num == + * p_Vid->recovery_frame_num); + */ + + dec_picture->coded_frame = (currSlice->structure == FRAME); + + /* dec_picture->chroma_format_idc = active_sps->chroma_format_idc; */ + + /* dec_picture->frame_mbs_only_flag = + * active_sps->frame_mbs_only_flag; + */ + /* dec_picture->frame_cropping_flag = + * active_sps->frame_cropping_flag; + */ + + if ((currSlice->picture_structure_mmco & 0x3) == 3) { + dec_picture->mb_aff_frame_flag = 1; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s, picture_structure_mmco is %x, set mb_aff_frame_flag to 1\n", + __func__, + currSlice->picture_structure_mmco); + } + + if (currSlice->pic_struct < PIC_INVALID) { + dec_picture->pic_struct = currSlice->pic_struct; + } else { + dec_picture->pic_struct = PIC_INVALID; + } + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s pic_struct = %d\n", __func__, dec_picture->pic_struct); +} + +void dump_pic(struct h264_dpb_stru *p_H264_Dpb) +{ + int ii; + struct StorablePicture *pic; + for (ii = 0; ii < MAX_PIC_BUF_NUM; ii++) { + pic = &(p_H264_Dpb->m_PIC[ii]); + if (pic->is_used) { + dpb_print(p_H264_Dpb->decoder_index, 0, + "pic(%d,%d) poc %d is_used %d bufspec %d colbuf %d for_ref %d long_term %d pre_out %d output %d nonexist %d data_flag 0x%x\n", + ii, pic->index, + pic->poc, + pic->is_used, + pic->buf_spec_num, + pic->colocated_buf_index, + pic->used_for_reference, + pic->is_long_term, + pic->pre_output, + pic->is_output, + pic->non_existing, + pic->data_flag); + } + } +} + +/* +static void is_pic_used_by_dpb(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *pic) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + unsigned i; + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->top_field == pic || + p_Dpb->fs[i]->bottom_field == pic || + p_Dpb->fs[i]->frame == pic + ) + break; + } + if (i < p_Dpb->used_size) + return 1; + return 0; +} +*/ + +static struct StorablePicture *get_new_pic(struct h264_dpb_stru *p_H264_Dpb, + enum PictureStructure structure, unsigned char is_output) +{ + struct StorablePicture *s = NULL; + struct StorablePicture *pic; + struct VideoParameters *p_Vid = &(p_H264_Dpb->mVideo); + /* recycle un-used pic */ + int ii = 0; + + for (ii = 0; ii < MAX_PIC_BUF_NUM; ii++) { + pic = &(p_H264_Dpb->m_PIC[ii]); + if (pic->is_used == 0) { + pic->is_used = 1; + s = pic; + break; + } + } + + if (s) { + s->buf_spec_is_alloced = 0; + s->pic_num = 0; + s->frame_num = 0; + s->long_term_frame_idx = 0; + s->long_term_pic_num = 0; + s->used_for_reference = 0; + s->is_long_term = 0; + s->non_existing = 0; + s->is_output = 0; + s->pre_output = 0; + s->max_slice_id = 0; + s->data_flag &= ~(ERROR_FLAG | NODISP_FLAG); +#if (MVC_EXTENSION_ENABLE) + s->view_id = -1; +#endif + + s->structure = structure; + +#if 0 + s->size_x = size_x; + s->size_y = size_y; + s->size_x_cr = size_x_cr; + s->size_y_cr = size_y_cr; + s->size_x_m1 = size_x - 1; + s->size_y_m1 = size_y - 1; + s->size_x_cr_m1 = size_x_cr - 1; + s->size_y_cr_m1 = size_y_cr - 1; + + s->top_field = p_Vid->no_reference_picture; + s->bottom_field = p_Vid->no_reference_picture; + s->frame = p_Vid->no_reference_picture; +#endif + /* s->dec_ref_pic_marking_buffer = NULL; */ + + s->coded_frame = 0; + s->mb_aff_frame_flag = 0; + + s->top_poc = s->bottom_poc = s->poc = 0; + s->seiHasTone_mapping = 0; + s->frame_mbs_only_flag = p_Vid->active_sps->frame_mbs_only_flag; + + if (!p_Vid->active_sps->frame_mbs_only_flag && + structure != FRAME) { + int i, j; + + for (j = 0; j < MAX_NUM_SLICES; j++) { + for (i = 0; i < 2; i++) { + /* s->listX[j][i] = + *calloc(MAX_LIST_SIZE, + *sizeof (struct StorablePicture *)); + *+1 for reordering ??? + + *if (NULL == s->listX[j][i]) + *no_mem_exit("alloc_storable_picture: + *s->listX[i]"); + */ + } + } + } + } else + p_H264_Dpb->buf_alloc_fail = 1; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s %p\n", __func__, s); + return s; +} + +static void free_picture(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *pic) +{ + if (pic == NULL || pic->index < 0 || + pic->index >= MAX_PIC_BUF_NUM) + return; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s %p %d\n", __func__, pic, pic->index); + /* assert(pic->index<MAX_PIC_BUF_NUM); */ + p_H264_Dpb->m_PIC[pic->index].is_used = 0; +} + +static void gen_field_ref_ids(struct VideoParameters *p_Vid, + struct StorablePicture *p) +{ + int i, j; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Vid, + struct h264_dpb_stru, mVideo); + /* ! Generate Frame parameters from field information. */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + + /* copy the list; */ + for (j = 0; j < p_Vid->iSliceNumOfCurrPic; j++) { + if (p->listX[j][LIST_0]) { + p->listXsize[j][LIST_0] = + p_Vid->ppSliceList[j]->listXsize[LIST_0]; + for (i = 0; i < p->listXsize[j][LIST_0]; i++) + p->listX[j][LIST_0][i] = + p_Vid->ppSliceList[j]->listX[LIST_0][i]; + } + if (p->listX[j][LIST_1]) { + p->listXsize[j][LIST_1] = + p_Vid->ppSliceList[j]->listXsize[LIST_1]; + for (i = 0; i < p->listXsize[j][LIST_1]; i++) + p->listX[j][LIST_1][i] = + p_Vid->ppSliceList[j]->listX[LIST_1][i]; + } + } +} + +static void init_dpb(struct h264_dpb_stru *p_H264_Dpb, int type) +{ + unsigned int i; + struct VideoParameters *p_Vid = &p_H264_Dpb->mVideo; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + struct SPSParameters *active_sps = &p_H264_Dpb->mSPS; + + p_Vid->active_sps = active_sps; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + + p_Dpb->p_Vid = p_Vid; + if (p_Dpb->init_done) { + /* free_dpb(p_Dpb); */ + if (p_Vid->no_reference_picture) { + free_picture(p_H264_Dpb, p_Vid->no_reference_picture); + p_Vid->no_reference_picture = NULL; + } + p_Dpb->init_done = 0; + } + + /* p_Dpb->size = 10; //active_sps->max_dpb_size; //16; + * getDpbSize(p_Vid, active_sps) + + * p_Vid->p_Inp->dpb_plus[type==2? 1: 0]; + * p_Dpb->size = active_sps->max_dpb_size; //16; + * getDpbSize(p_Vid, active_sps) + + * p_Vid->p_Inp->dpb_plus[type==2? 1: 0]; + * p_Dpb->size initialzie in vh264.c + */ + p_Dpb->num_ref_frames = active_sps->num_ref_frames; + /* p_Dpb->num_ref_frames initialzie in vh264.c */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s dpb_size is %d num_ref_frames = %d (%d)\n", + __func__, p_Dpb->size, + p_Dpb->num_ref_frames, + active_sps->num_ref_frames); + if (active_sps->num_ref_frames == 0xffff) { + dpb_print(p_H264_Dpb->decoder_index, 0, + "!!!Warning, num_ref_frames = %d is invalid\n", + active_sps->num_ref_frames); + } + +#if 0 + /* ??? */ +#if (MVC_EXTENSION_ENABLE) + if ((unsigned int)active_sps->max_dec_frame_buffering < + active_sps->num_ref_frames) { +#else + if (p_Dpb->size < active_sps->num_ref_frames) { +#endif + error( + "DPB size at specified level is smaller than the specified number of reference frames. This is not allowed.\n", + 1000); + } +#endif + + p_Dpb->used_size = 0; + p_Dpb->last_picture = NULL; + + p_Dpb->ref_frames_in_buffer = 0; + p_Dpb->ltref_frames_in_buffer = 0; + +#if 0 + p_Dpb->fs = calloc(p_Dpb->size, sizeof(struct FrameStore *)); + if (NULL == p_Dpb->fs) + no_mem_exit("init_dpb: p_Dpb->fs"); + + p_Dpb->fs_ref = calloc(p_Dpb->size, sizeof(struct FrameStore *)); + if (NULL == p_Dpb->fs_ref) + no_mem_exit("init_dpb: p_Dpb->fs_ref"); + + p_Dpb->fs_ltref = calloc(p_Dpb->size, sizeof(struct FrameStore *)); + if (NULL == p_Dpb->fs_ltref) + no_mem_exit("init_dpb: p_Dpb->fs_ltref"); +#endif + +#if (MVC_EXTENSION_ENABLE) + p_Dpb->fs_ilref = calloc(1, sizeof(struct FrameStore *)); + if (NULL == p_Dpb->fs_ilref) + no_mem_exit("init_dpb: p_Dpb->fs_ilref"); +#endif + + for (i = 0; i < p_Dpb->size; i++) { + p_Dpb->fs[i] = &(p_H264_Dpb->mFrameStore[i]); + /* alloc_frame_store(); */ + p_Dpb->fs[i]->index = i; + p_Dpb->fs_ref[i] = NULL; + p_Dpb->fs_ltref[i] = NULL; + p_Dpb->fs[i]->layer_id = 0; /* MVC_INIT_VIEW_ID; */ +#if (MVC_EXTENSION_ENABLE) + p_Dpb->fs[i]->view_id = MVC_INIT_VIEW_ID; + p_Dpb->fs[i]->inter_view_flag[0] = + p_Dpb->fs[i]->inter_view_flag[1] = 0; + p_Dpb->fs[i]->anchor_pic_flag[0] = + p_Dpb->fs[i]->anchor_pic_flag[1] = 0; +#endif + } +#if (MVC_EXTENSION_ENABLE) + if (type == 2) { + p_Dpb->fs_ilref[0] = alloc_frame_store(); + /* These may need some cleanups */ + p_Dpb->fs_ilref[0]->view_id = MVC_INIT_VIEW_ID; + p_Dpb->fs_ilref[0]->inter_view_flag[0] = + p_Dpb->fs_ilref[0]->inter_view_flag[1] = 0; + p_Dpb->fs_ilref[0]->anchor_pic_flag[0] = + p_Dpb->fs_ilref[0]->anchor_pic_flag[1] = 0; + /* given that this is in a different buffer, + * do we even need proc_flag anymore? + */ + } else + p_Dpb->fs_ilref[0] = NULL; +#endif + + /* + *for (i = 0; i < 6; i++) + *{ + *currSlice->listX[i] = + * calloc(MAX_LIST_SIZE, sizeof (struct StorablePicture *)); + * +1 for reordering + *if (NULL == currSlice->listX[i]) + *no_mem_exit("init_dpb: currSlice->listX[i]"); + *} + */ + /* allocate a dummy storable picture */ + if (!p_Vid->no_reference_picture) { + p_Vid->no_reference_picture = get_new_pic(p_H264_Dpb, + FRAME, + /*p_Vid->width, p_Vid->height, + *p_Vid->width_cr, p_Vid->height_cr, + */ + 1); + p_Vid->no_reference_picture->top_field = + p_Vid->no_reference_picture; + p_Vid->no_reference_picture->bottom_field = + p_Vid->no_reference_picture; + p_Vid->no_reference_picture->frame = + p_Vid->no_reference_picture; + } + p_Dpb->last_output_poc = INT_MIN; + +#if (MVC_EXTENSION_ENABLE) + p_Dpb->last_output_view_id = -1; +#endif + + p_Vid->last_has_mmco_5 = 0; + + init_colocate_buf(p_H264_Dpb, p_H264_Dpb->max_reference_size); + + p_Dpb->init_done = 1; + +#if 0 +/* ??? */ + /* picture error concealment */ + if (p_Vid->conceal_mode != 0 && !p_Vid->last_out_fs) + p_Vid->last_out_fs = alloc_frame_store(); +#endif +} + +static void dpb_split_field(struct h264_dpb_stru *p_H264_Dpb, + struct FrameStore *fs) +{ + struct StorablePicture *fs_top = NULL, *fs_btm = NULL; + struct StorablePicture *frame = fs->frame; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s %p %p\n", __func__, fs, frame); + + fs->poc = frame->poc; + + if (!frame->frame_mbs_only_flag) { + fs_top = fs->top_field = get_new_pic(p_H264_Dpb, + TOP_FIELD, + /* frame->size_x, frame->size_y, + *frame->size_x_cr, frame->size_y_cr, + */ + 1); + fs_btm = fs->bottom_field = get_new_pic(p_H264_Dpb, + BOTTOM_FIELD, + /*frame->size_x, frame->size_y, + *frame->size_x_cr, frame->size_y_cr, + */ + 1); + if (fs_top == NULL || fs_btm == NULL) + return; +#if 1 +/* rain */ + fs_top->buf_spec_num = frame->buf_spec_num; + fs_btm->buf_spec_num = frame->buf_spec_num; + + fs_top->colocated_buf_index = frame->colocated_buf_index; + fs_btm->colocated_buf_index = frame->colocated_buf_index; + + fs_top->data_flag = frame->data_flag; + fs_btm->data_flag = frame->data_flag; +#endif + fs_top->poc = frame->top_poc; + fs_btm->poc = frame->bottom_poc; + +#if (MVC_EXTENSION_ENABLE) + fs_top->view_id = frame->view_id; + fs_btm->view_id = frame->view_id; +#endif + + fs_top->frame_poc = frame->frame_poc; + + fs_top->bottom_poc = fs_btm->bottom_poc = frame->bottom_poc; + fs_top->top_poc = fs_btm->top_poc = frame->top_poc; + fs_btm->frame_poc = frame->frame_poc; + + fs_top->used_for_reference = fs_btm->used_for_reference + = frame->used_for_reference; + fs_top->is_long_term = fs_btm->is_long_term + = frame->is_long_term; + fs->long_term_frame_idx = fs_top->long_term_frame_idx + = fs_btm->long_term_frame_idx + = frame->long_term_frame_idx; + + fs_top->coded_frame = fs_btm->coded_frame = 1; + fs_top->mb_aff_frame_flag = fs_btm->mb_aff_frame_flag + = frame->mb_aff_frame_flag; + + frame->top_field = fs_top; + frame->bottom_field = fs_btm; + frame->frame = frame; + fs_top->bottom_field = fs_btm; + fs_top->frame = frame; + fs_top->top_field = fs_top; + fs_btm->top_field = fs_top; + fs_btm->frame = frame; + fs_btm->bottom_field = fs_btm; + +#if (MVC_EXTENSION_ENABLE) + fs_top->view_id = fs_btm->view_id = fs->view_id; + fs_top->inter_view_flag = fs->inter_view_flag[0]; + fs_btm->inter_view_flag = fs->inter_view_flag[1]; +#endif + + fs_top->chroma_format_idc = fs_btm->chroma_format_idc = + frame->chroma_format_idc; + fs_top->iCodingType = fs_btm->iCodingType = frame->iCodingType; + } else { + fs->top_field = NULL; + fs->bottom_field = NULL; + frame->top_field = NULL; + frame->bottom_field = NULL; + frame->frame = frame; + } + +} + + +static void dpb_combine_field(struct h264_dpb_stru *p_H264_Dpb, + struct FrameStore *fs) +{ + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + + if (!fs->frame) { + fs->frame = get_new_pic(p_H264_Dpb, + FRAME, + /* fs->top_field->size_x, fs->top_field->size_y*2, + *fs->top_field->size_x_cr, fs->top_field->size_y_cr*2, + */ + 1); + } + if (!fs->frame) + return; +#if 1 +/* rain */ + fs->frame->buf_spec_num = fs->top_field->buf_spec_num; + fs->frame->colocated_buf_index = fs->top_field->colocated_buf_index; + fs->frame->data_flag = fs->top_field->data_flag; + fs->frame->slice_type = fs->top_field->slice_type; + if (fs->bottom_field) + fs->frame->data_flag |= (fs->bottom_field->data_flag & 0xf0); +#endif + + if (fs->bottom_field) { + fs->poc = fs->frame->poc = fs->frame->frame_poc = imin( + fs->top_field->poc, fs->bottom_field->poc); + + fs->bottom_field->frame_poc = fs->top_field->frame_poc = fs->frame->poc; + + fs->bottom_field->top_poc = fs->frame->top_poc = fs->top_field->poc; + fs->top_field->bottom_poc = fs->frame->bottom_poc = + fs->bottom_field->poc; + + fs->frame->used_for_reference = (fs->top_field->used_for_reference && + fs->bottom_field->used_for_reference); + fs->frame->is_long_term = (fs->top_field->is_long_term && + fs->bottom_field->is_long_term); + } + + if (fs->frame->is_long_term) + fs->frame->long_term_frame_idx = fs->long_term_frame_idx; + + fs->frame->top_field = fs->top_field; + if (fs->bottom_field) + fs->frame->bottom_field = fs->bottom_field; + fs->frame->frame = fs->frame; + + fs->frame->coded_frame = 0; + + fs->frame->chroma_format_idc = fs->top_field->chroma_format_idc; + fs->frame->frame_cropping_flag = fs->top_field->frame_cropping_flag; + if (fs->frame->frame_cropping_flag) { + fs->frame->frame_crop_top_offset = + fs->top_field->frame_crop_top_offset; + fs->frame->frame_crop_bottom_offset = + fs->top_field->frame_crop_bottom_offset; + fs->frame->frame_crop_left_offset = + fs->top_field->frame_crop_left_offset; + fs->frame->frame_crop_right_offset = + fs->top_field->frame_crop_right_offset; + } + if (fs->bottom_field) { + fs->top_field->frame = fs->bottom_field->frame = fs->frame; + fs->top_field->top_field = fs->top_field; + fs->top_field->bottom_field = fs->bottom_field; + fs->bottom_field->top_field = fs->top_field; + fs->bottom_field->bottom_field = fs->bottom_field; + } + + /**/ +#if (MVC_EXTENSION_ENABLE) + fs->frame->view_id = fs->view_id; +#endif + fs->frame->iCodingType = fs->top_field->iCodingType; + if (fs->bottom_field && fs->top_field->poc < fs->bottom_field->poc) { + fs->pts = fs->top_field->pts; + fs->pts64 = fs->top_field->pts64; + /*SWPL-7105 fix */ + if ((fs->frame->slice_type == B_SLICE) + && (!fs->bottom_field->pts) &&(!fs->bottom_field->pts64)) { + fs->pts = 0; + fs->pts64 = 0; + } + fs->offset_delimiter = fs->top_field->offset_delimiter; + fs->decoded_frame_size = fs->top_field->pic_size + fs->bottom_field->pic_size; + } else if (fs->bottom_field) { + fs->pts = fs->bottom_field->pts; + fs->pts64 = fs->bottom_field->pts64; + fs->offset_delimiter = fs->bottom_field->offset_delimiter; + fs->decoded_frame_size = fs->top_field->pic_size + fs->bottom_field->pic_size; + } + /* FIELD_CODING ;*/ +} + +static void calculate_frame_no(struct VideoParameters *p_Vid, + struct StorablePicture *p) +{ +#if 0 +/* ??? */ + InputParameters *p_Inp = p_Vid->p_Inp; + /* calculate frame number */ + int psnrPOC = p_Vid->active_sps->mb_adaptive_frame_field_flag ? + p->poc / (p_Inp->poc_scale) : p->poc / (p_Inp->poc_scale); + + if (psnrPOC == 0) { /* && p_Vid->psnr_number) */ + p_Vid->idr_psnr_number = + p_Vid->g_nFrame * p_Vid->ref_poc_gap / (p_Inp->poc_scale); + } + p_Vid->psnr_number = imax(p_Vid->psnr_number, + p_Vid->idr_psnr_number + psnrPOC); + + p_Vid->frame_no = p_Vid->idr_psnr_number + psnrPOC; +#endif +} + +static void insert_picture_in_dpb(struct h264_dpb_stru *p_H264_Dpb, + struct FrameStore *fs, + struct StorablePicture *p, + unsigned char data_flag) +{ + struct vdec_frames_s *mvfrm = p_H264_Dpb->vdec->mvfrm; + struct VideoParameters *p_Vid = &p_H264_Dpb->mVideo; + /* InputParameters *p_Inp = p_Vid->p_Inp; + * dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + * "insert (%s) pic with frame_num #%d, poc %d\n", + * (p->structure == FRAME)?"FRAME": + * (p->structure == TOP_FIELD)?"TOP_FIELD": + * "BOTTOM_FIELD", p->pic_num, p->poc); + * assert (p!=NULL); + * assert (fs!=NULL); + */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s %p %p\n", __func__, fs, p); + p_H264_Dpb->dpb_frame_count++; + fs->dpb_frame_count = p_H264_Dpb->dpb_frame_count; +#if 1 +/* rain */ +/* p->buf_spec_num = fs->index; */ + p->data_flag = data_flag; + fs->data_flag |= data_flag; + fs->buf_spec_num = p->buf_spec_num; + fs->colocated_buf_index = p->colocated_buf_index; +#endif + p->slice_type = p_H264_Dpb->mSlice.slice_type; + switch (p->structure) { + case FRAME: + fs->frame = p; + fs->is_used = 3; + fs->slice_type = p->slice_type; + fs->frame_size = p->frame_size; + fs->offset_delimiter = p->offset_delimiter; + fs->decoded_frame_size = p->pic_size; + if (p->used_for_reference) { + fs->is_reference = 3; + fs->is_orig_reference = 3; + if (p->is_long_term) { + fs->is_long_term = 3; + fs->long_term_frame_idx = + p->long_term_frame_idx; + } + } + fs->pts = p->pts; + fs->pts64 = p->pts64; + fs->layer_id = p->layer_id; +#if (MVC_EXTENSION_ENABLE) + fs->view_id = p->view_id; + fs->inter_view_flag[0] = fs->inter_view_flag[1] = + p->inter_view_flag; + fs->anchor_pic_flag[0] = fs->anchor_pic_flag[1] = + p->anchor_pic_flag; +#endif + /* generate field views */ + /* return; */ + dpb_split_field(p_H264_Dpb, fs); + /* return; */ + break; + case TOP_FIELD: + fs->top_field = p; + fs->is_used |= 1; + fs->layer_id = p->layer_id; + if (fs->frame_size == 0) { + fs->slice_type = p->slice_type; +// fs->pts = p->pts; +// fs->pts64 = p->pts64; + } + fs->frame_size += p->frame_size; +#if (MVC_EXTENSION_ENABLE) + fs->view_id = p->view_id; + fs->inter_view_flag[0] = p->inter_view_flag; + fs->anchor_pic_flag[0] = p->anchor_pic_flag; +#endif + if (p->used_for_reference) { + fs->is_reference |= 1; + fs->is_orig_reference |= 1; + if (p->is_long_term) { + fs->is_long_term |= 1; + fs->long_term_frame_idx = + p->long_term_frame_idx; + } + } + if (fs->is_used == 3) { + /* generate frame view */ + dpb_combine_field(p_H264_Dpb, fs); + } else { + fs->poc = p->poc; + } + gen_field_ref_ids(p_Vid, p); + break; + case BOTTOM_FIELD: + fs->bottom_field = p; + fs->is_used |= 2; + fs->layer_id = p->layer_id; + if (fs->frame_size == 0) { + fs->slice_type = p->slice_type; +// fs->pts = p->pts; +// fs->pts64 = p->pts64; + } + fs->frame_size += p->frame_size; +#if (MVC_EXTENSION_ENABLE) + fs->view_id = p->view_id; + fs->inter_view_flag[1] = p->inter_view_flag; + fs->anchor_pic_flag[1] = p->anchor_pic_flag; +#endif + if (p->used_for_reference) { + fs->is_reference |= 2; + fs->is_orig_reference |= 2; + if (p->is_long_term) { + fs->is_long_term |= 2; + fs->long_term_frame_idx = + p->long_term_frame_idx; + } + } + if (fs->is_used == 3) { + /* generate frame view */ + dpb_combine_field(p_H264_Dpb, fs); + } else { + fs->poc = p->poc; + } + gen_field_ref_ids(p_Vid, p); + break; + } + fs->frame_num = p->pic_num; + fs->recovery_frame = p->recovery_frame; + + fs->is_output = p->is_output; + fs->pre_output = p->pre_output; + + /* picture qos infomation*/ + fs->max_mv = p->max_mv; + fs->avg_mv = p->avg_mv; + fs->min_mv = p->min_mv; + + fs->max_qp = p->max_qp; + fs->avg_qp = p->avg_qp; + fs->min_qp = p->min_qp; + + fs->max_skip = p->max_skip; + fs->avg_skip = p->avg_skip; + fs->min_skip = p->min_skip; + + if (fs->is_used == 3) { + calculate_frame_no(p_Vid, p); +#if 0 +/* ??? */ + if (-1 != p_Vid->p_ref && !p_Inp->silent) + find_snr(p_Vid, fs->frame, &p_Vid->p_ref); +#endif + //fs->pts = p->pts; + //fs->pts64 = p->pts64; + } + if ((fs->frame != NULL) && + ((fs->bottom_field != NULL) || (fs->top_field != NULL))) { + fs->last_field_timestamp = fs->timestamp; + fs->timestamp = p->timestamp; + } else { + fs->timestamp = p->timestamp; + } + if (mvfrm) { + fs->frame_size2 = mvfrm->frame_size; + fs->hw_decode_time = mvfrm->hw_decode_time; + } +} + +void reset_frame_store(struct h264_dpb_stru *p_H264_Dpb, + struct FrameStore *f) +{ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + + if (f) { + if (f->frame) { + free_picture(p_H264_Dpb, f->frame); + f->frame = NULL; + } + if (f->top_field) { + free_picture(p_H264_Dpb, f->top_field); + f->top_field = NULL; + } + if (f->bottom_field) { + free_picture(p_H264_Dpb, f->bottom_field); + f->bottom_field = NULL; + } + + /**/ + f->is_used = 0; + f->is_reference = 0; + f->is_long_term = 0; + f->is_orig_reference = 0; + + f->is_output = 0; + f->pre_output = 0; + f->show_frame = false; + + f->frame = NULL; + f->top_field = NULL; + f->bottom_field = NULL; + + /* free(f); */ + } +} + +void unmark_for_reference(struct DecodedPictureBuffer *p_Dpb, + struct FrameStore *fs) +{ + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s %p %p %p %p\n", __func__, + fs, fs->frame, fs->top_field, fs->bottom_field); + /* return; */ + if (fs->is_used & 1) { + if (fs->top_field) + fs->top_field->used_for_reference = 0; + } + if (fs->is_used & 2) { + if (fs->bottom_field) + fs->bottom_field->used_for_reference = 0; + } + if (fs->is_used == 3) { + if (fs->top_field && fs->bottom_field) { + fs->top_field->used_for_reference = 0; + fs->bottom_field->used_for_reference = 0; + } + fs->frame->used_for_reference = 0; + } + + fs->is_reference = 0; + +} + +static void unmark_for_long_term_reference(struct FrameStore *fs) +{ + if (fs->is_used & 1) { + if (fs->top_field) { + fs->top_field->used_for_reference = 0; + fs->top_field->is_long_term = 0; + } + } + if (fs->is_used & 2) { + if (fs->bottom_field) { + fs->bottom_field->used_for_reference = 0; + fs->bottom_field->is_long_term = 0; + } + } + if (fs->is_used == 3) { + if (fs->top_field && fs->bottom_field) { + fs->top_field->used_for_reference = 0; + fs->top_field->is_long_term = 0; + fs->bottom_field->used_for_reference = 0; + fs->bottom_field->is_long_term = 0; + } + fs->frame->used_for_reference = 0; + fs->frame->is_long_term = 0; + } + + fs->is_reference = 0; + fs->is_long_term = 0; +} + +int get_long_term_flag_by_buf_spec_num(struct h264_dpb_stru *p_H264_Dpb, + int buf_spec_num) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + unsigned int i; + + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->buf_spec_num == buf_spec_num) + return p_Dpb->fs[i]->is_long_term; + } + return -1; +} + +static void update_pic_num(struct h264_dpb_stru *p_H264_Dpb) +{ + unsigned int i; + struct Slice *currSlice = &p_H264_Dpb->mSlice; + struct VideoParameters *p_Vid = currSlice->p_Vid; + struct DecodedPictureBuffer *p_Dpb = currSlice->p_Dpb; + struct SPSParameters *active_sps = p_Vid->active_sps; + int add_top = 0, add_bottom = 0; + int max_frame_num = 1 << (active_sps->log2_max_frame_num_minus4 + 4); + + if (currSlice->structure == FRAME) { + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL || + p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_used == 3) { + if ((p_Dpb->fs_ref[i]->frame-> + used_for_reference) && + (!p_Dpb->fs_ref[i]->frame-> + is_long_term)) { + if (p_Dpb->fs_ref[i]->frame_num > + currSlice->frame_num) { + p_Dpb->fs_ref[i]-> + frame_num_wrap = + p_Dpb->fs_ref[i]->frame_num + - max_frame_num; + } else { + p_Dpb->fs_ref[i]-> + frame_num_wrap = + p_Dpb->fs_ref[i]->frame_num; + } + p_Dpb->fs_ref[i]->frame->pic_num = + p_Dpb->fs_ref[i]->frame_num_wrap; + } + } + } + /* update long_term_pic_num */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ltref[i] == NULL || + p_Dpb->fs_ltref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ltref[i]->is_used == 3) { + if (p_Dpb->fs_ltref[i]->frame->is_long_term) { + p_Dpb->fs_ltref[i]->frame-> + long_term_pic_num = + p_Dpb->fs_ltref[i]->frame-> + long_term_frame_idx; + } + } + } + } else { + if (currSlice->structure == TOP_FIELD) { + add_top = 1; + add_bottom = 0; + } else { + add_top = 0; + add_bottom = 1; + } + + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference) { + if (p_Dpb->fs_ref[i]->frame_num > currSlice-> + frame_num) { + p_Dpb->fs_ref[i]->frame_num_wrap = + p_Dpb->fs_ref[i]->frame_num - + max_frame_num; + } else { + p_Dpb->fs_ref[i]->frame_num_wrap = + p_Dpb->fs_ref[i]->frame_num; + } + if (p_Dpb->fs_ref[i]->is_reference & 1) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->top_field + == NULL) { + p_H264_Dpb->dpb_error_flag = + __LINE__; + continue; + } +#endif + p_Dpb->fs_ref[i]->top_field-> + pic_num = (2 * p_Dpb->fs_ref[i]-> + frame_num_wrap) + add_top; + } + if (p_Dpb->fs_ref[i]->is_reference & 2) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->bottom_field + == NULL) { + p_H264_Dpb->dpb_error_flag = + __LINE__; + continue; + } +#endif + p_Dpb->fs_ref[i]->bottom_field-> + pic_num = (2 * p_Dpb->fs_ref[i]-> + frame_num_wrap) + add_bottom; + } + } + } + /* update long_term_pic_num */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ltref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ltref[i]->is_long_term & 1) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ltref[i]->top_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + p_Dpb->fs_ltref[i]->top_field-> + long_term_pic_num = 2 * + p_Dpb->fs_ltref[i]->top_field-> + long_term_frame_idx + add_top; + } + if (p_Dpb->fs_ltref[i]->is_long_term & 2) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ltref[i]->bottom_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + p_Dpb->fs_ltref[i]->bottom_field-> + long_term_pic_num = 2 * + p_Dpb->fs_ltref[i]->bottom_field-> + long_term_frame_idx + add_bottom; + } + } + } +} + +static void remove_frame_from_dpb(struct h264_dpb_stru *p_H264_Dpb, int pos) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + struct FrameStore *fs = p_Dpb->fs[pos]; + struct FrameStore *tmp; + unsigned int i; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s pos %d %p\n", __func__, pos, fs); + + /* dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + * "remove frame with frame_num #%d\n", fs->frame_num); + */ + switch (fs->is_used) { + case 3: + free_picture(p_H264_Dpb, fs->frame); + free_picture(p_H264_Dpb, fs->top_field); + free_picture(p_H264_Dpb, fs->bottom_field); + fs->frame = NULL; + fs->top_field = NULL; + fs->bottom_field = NULL; + break; + case 2: + free_picture(p_H264_Dpb, fs->bottom_field); + fs->bottom_field = NULL; + break; + case 1: + free_picture(p_H264_Dpb, fs->top_field); + fs->top_field = NULL; + break; + case 0: + break; + default: + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "invalid frame store type %x", 500); + } + fs->data_flag = 0; + fs->is_used = 0; + fs->is_long_term = 0; + fs->is_reference = 0; + fs->is_orig_reference = 0; + fs->frame_size = 0; + /* move empty framestore to end of buffer */ + tmp = p_Dpb->fs[pos]; + + for (i = pos; i < p_Dpb->used_size - 1; i++) + p_Dpb->fs[i] = p_Dpb->fs[i + 1]; + p_Dpb->fs[p_Dpb->used_size - 1] = tmp; + + if (p_Dpb->used_size) + p_Dpb->used_size--; +} + +int is_used_for_reference(struct FrameStore *fs) +{ + if (fs->is_reference) + return 1; + + if (fs->is_used == 3) { /* frame */ + if (fs->frame->used_for_reference) + return 1; + } + + if (fs->is_used & 1) { /* top field */ + if (fs->top_field) { + if (fs->top_field->used_for_reference) + return 1; + } + } + + if (fs->is_used & 2) { /* bottom field */ + if (fs->bottom_field) { + if (fs->bottom_field->used_for_reference) + return 1; + } + } + return 0; +} + +static int remove_unused_frame_from_dpb(struct h264_dpb_stru *p_H264_Dpb) +{ + unsigned int i; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + /* check for frames that were already output and no longer + * used for reference + */ + for (i = 0; i < p_Dpb->used_size; i++) { + if ((!is_used_for_reference(p_Dpb->fs[i])) && + (p_Dpb->fs[i]->colocated_buf_index >= 0)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "release_colocate_buf[%d] for fs[%d]\n", + p_Dpb->fs[i]->colocated_buf_index, i); + + release_colocate_buf(p_H264_Dpb, + p_Dpb->fs[i]->colocated_buf_index); /* rain */ + p_Dpb->fs[i]->colocated_buf_index = -1; + } + } + + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->is_output && + (!is_used_for_reference(p_Dpb->fs[i]))) { + release_buf_spec_num(p_H264_Dpb->vdec, + p_Dpb->fs[i]->buf_spec_num); + p_Dpb->fs[i]->buf_spec_num = -1; + remove_frame_from_dpb(p_H264_Dpb, i); + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%s[%d]\n", + __func__, i); + + return 1; + } + } + return 0; +} + +static int unmark_one_error_out_frame(struct h264_dpb_stru *p_H264_Dpb) +{ + int ret = 0; + unsigned i; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->is_output && + ((p_Dpb->fs[i]->data_flag & ERROR_FLAG) || + (p_Dpb->fs[i]->data_flag & NULL_FLAG)) + ) { + unmark_for_reference(p_Dpb, p_Dpb->fs[i]); + + ret = 1; + break; + } + } + return ret; +} + +static int unmark_one_out_frame(struct h264_dpb_stru *p_H264_Dpb) +{ + int ret = 0; + unsigned i; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->is_output) { + unmark_for_reference(p_Dpb, p_Dpb->fs[i]); + + ret = 1; + } + } + return ret; +} +/* + force_flag, + 1, remove one error buf (is_out is 1) if there is no un-used buf + 2, remove one buf (is_out is 1) if there is no un-used buf +*/ +void bufmgr_h264_remove_unused_frame(struct h264_dpb_stru *p_H264_Dpb, + u8 force_flag) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + int ret = 0; + unsigned char removed_flag = 0; + do { + ret = remove_unused_frame_from_dpb(p_H264_Dpb); + if (ret != 0) + removed_flag = 1; + } while (ret != 0); + if (removed_flag) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%s\r\n", __func__); + dump_dpb(p_Dpb, 0); + } else if (force_flag == 2) { + if (unmark_one_out_frame(p_H264_Dpb)) { + dpb_print(p_H264_Dpb->decoder_index, + 0, "%s, Warnning, force unmark one frame\r\n", + __func__); + update_ref_list(p_Dpb); + remove_unused_frame_from_dpb(p_H264_Dpb); + dump_dpb(p_Dpb, 0); + } + } else if (force_flag == 1) { + if (unmark_one_error_out_frame(p_H264_Dpb)) { + dpb_print(p_H264_Dpb->decoder_index, + 0, "%s, unmark error frame\r\n", + __func__); + update_ref_list(p_Dpb); + remove_unused_frame_from_dpb(p_H264_Dpb); + dump_dpb(p_Dpb, 0); + } + } +} + +#ifdef OUTPUT_BUFFER_IN_C +int is_there_unused_frame_from_dpb(struct DecodedPictureBuffer *p_Dpb) +{ + unsigned int i; + + /* check for frames that were already output and no longer + * used for reference + */ + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->is_output && + (!is_used_for_reference(p_Dpb->fs[i]))) { + return 1; + } + } + return 0; +} +#endif + +static void get_smallest_poc(struct DecodedPictureBuffer *p_Dpb, int *poc, + int *pos) +{ + unsigned int i; + unsigned long flags; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + struct vdec_s *vdec= (struct vdec_s *)p_H264_Dpb->vdec; + void *p = vh264_get_bufspec_lock(vdec); + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%s\n", __func__); + if (p_Dpb->used_size < 1) { + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "Cannot determine smallest POC, DPB empty. %d\n", + 150); + } + + *pos = -1; + *poc = INT_MAX; + if (p == NULL) + return; + spin_lock_irqsave(p, flags); + for (i = 0; i < p_Dpb->used_size; i++) { +#ifdef OUTPUT_BUFFER_IN_C + /* rain */ + if ((*poc > p_Dpb->fs[i]->poc) && + (!p_Dpb->fs[i]->is_output) && + (!p_Dpb->fs[i]->pre_output)) { +#else + if ((*poc > p_Dpb->fs[i]->poc) && (!p_Dpb->fs[i]->is_output)) { +#endif + *poc = p_Dpb->fs[i]->poc; + *pos = i; + } + } + spin_unlock_irqrestore(p, flags); +} + +int output_frames(struct h264_dpb_stru *p_H264_Dpb, unsigned char flush_flag) +{ + int poc, pos; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + int i; + int none_displayed_num = 0; + unsigned char fast_output_flag = 0; + + /* + * When the sei data(dv data) after the PIC done and no flush, + * it is necessary to output a frame of data until + * the number of frames not displayed is greater than the reorder. + */ + if ((p_H264_Dpb->wait_aux_data_flag) && (!flush_flag)) { + int no_output_num = 0; + + for (i = 0; i < p_Dpb->used_size; i++) { + if ((!p_Dpb->fs[i]->is_output) && + (!p_Dpb->fs[i]->pre_output) && + ((p_Dpb->fs[i]->is_used == 3) || + (p_Dpb->fs[i]->data_flag & ERROR_FLAG))) + no_output_num++; + } + + if (no_output_num <= p_H264_Dpb->reorder_output) + return 0; + } + + if (!flush_flag) { + for (i = 0; i < p_Dpb->used_size; i++) { + if ((!p_Dpb->fs[i]->is_output) && + (!p_Dpb->fs[i]->pre_output) &&((p_Dpb->fs[i]->is_used == 3 + ||p_Dpb->fs[i]->data_flag & ERROR_FLAG ))) { + none_displayed_num++; + if ((p_H264_Dpb->first_insert_frame == FirstInsertFrm_IDLE || + p_H264_Dpb->first_insert_frame == FirstInsertFrm_RESET) + && (p_Dpb->fs[i]->is_used == 3) + && (p_Dpb->last_output_poc == INT_MIN)) { + if (p_H264_Dpb->first_insert_frame == FirstInsertFrm_IDLE) + fast_output_flag = 1; + p_H264_Dpb->first_insert_frame = FirstInsertFrm_OUT; + p_H264_Dpb->first_output_poc = p_Dpb->fs[i]->poc; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s first insert frame i %d poc %d frame_num %x\n", + __func__, i, p_Dpb->fs[i]->poc, p_Dpb->fs[i]->frame_num); + } + + /*check poc even/odd*/ + if (p_H264_Dpb->poc_even_odd_flag == 0 && + p_H264_Dpb->decode_pic_count >= 3) + p_H264_Dpb->poc_even_odd_flag = 2; + if (p_Dpb->fs[i]->poc & 0x1) + p_H264_Dpb->poc_even_odd_flag = 1; + /**/ + + if ((p_H264_Dpb->fast_output_enable & 0x1) && + (p_Dpb->fs[i]->data_flag & IDR_FLAG)) + fast_output_flag = 1; + if ((p_H264_Dpb->fast_output_enable & 0x2) && + ((p_Dpb->fs[i]->poc - + p_Dpb->last_output_poc) + == 1)) + fast_output_flag = 1; + if ((p_H264_Dpb->fast_output_enable & 0x4) && + (p_H264_Dpb->poc_even_odd_flag == 2) && + (p_Dpb->fs[i]->is_used == 3) && + ((p_Dpb->fs[i]->poc - + p_Dpb->last_output_poc) + == 2)) + fast_output_flag = 1; + } + } + if (fast_output_flag) + ; + else if (none_displayed_num < + p_H264_Dpb->reorder_output) + return 0; + } + + get_smallest_poc(p_Dpb, &poc, &pos); + + if (pos == -1) + return 0; +#if 0 + if (is_used_for_reference(p_Dpb->fs[pos])) + return 0; +#endif + if (p_H264_Dpb->first_insert_frame == FirstInsertFrm_OUT) { + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s pos %d pos->poc %d first_output_poc %d \n", + __func__, pos, p_Dpb->fs[pos]->poc, p_H264_Dpb->first_output_poc); + + if (p_Dpb->fs[pos]->poc < p_H264_Dpb->first_output_poc) + p_Dpb->fs[pos]->data_flag |= NODISP_FLAG; + else if (p_Dpb->last_output_poc != INT_MIN) + p_H264_Dpb->first_insert_frame = FirstInsertFrm_SKIPDONE; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s first_insert_frame %d \n", __func__, p_H264_Dpb->first_insert_frame); + } + if (prepare_display_buf(p_H264_Dpb->vdec, p_Dpb->fs[pos]) >= 0) { + if (!p_H264_Dpb->without_display_mode && + p_Dpb->fs[pos]->show_frame != false) + p_Dpb->fs[pos]->pre_output = 1; + } else { + if (h264_debug_flag & PRINT_FLAG_DPB_DETAIL) { + dpb_print(p_H264_Dpb->decoder_index, 0, + "%s[%d] poc:%d last_output_poc:%d poc_even_odd_flag:%d\n", + __func__, pos, poc, + p_Dpb->last_output_poc, + p_H264_Dpb->poc_even_odd_flag); + dump_dpb(p_Dpb, 1); + } + return 0; + } + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s[%d] poc %d last_output_poc %d poc_even_odd_flag %d\n", + __func__, pos, poc, + p_Dpb->last_output_poc, + p_H264_Dpb->poc_even_odd_flag); + + p_Dpb->last_output_poc = poc; + return 1; + +} + + +void flush_dpb(struct h264_dpb_stru *p_H264_Dpb) +{ + /* struct VideoParameters *p_Vid = p_Dpb->p_Vid; */ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + unsigned int i; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + + /* diagnostics */ + /* dpb_print(p_H264_Dpb->decoder_index, + *PRINT_FLAG_DPB_DETAIL, + *"Flush remaining frames from the dpb." + *"p_Dpb->size = %d, p_Dpb->used_size = %d\n", + *p_Dpb->size, p_Dpb->used_size); + */ + + if (!p_Dpb->init_done) + return; +/* if(p_Vid->conceal_mode == 0) */ +#if 0 +/* ??? */ + if (p_Vid->conceal_mode != 0) + conceal_non_ref_pics(p_Dpb, 0); +#endif + /* mark all frames unused */ + for (i = 0; i < p_Dpb->used_size; i++) { +#if MVC_EXTENSION_ENABLE + assert(p_Dpb->fs[i]->view_id == p_Dpb->layer_id); +#endif + unmark_for_reference(p_Dpb, p_Dpb->fs[i]); + + } + + while (remove_unused_frame_from_dpb(p_H264_Dpb)) + ; + + /* output frames in POC order */ + while (output_frames(p_H264_Dpb, 1)) + ; + + + p_Dpb->last_output_poc = INT_MIN; +} + +static int is_short_term_reference(struct DecodedPictureBuffer *p_Dpb, + struct FrameStore *fs) +{ + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + if (fs->is_used == 3) { /* frame */ + if ((fs->frame->used_for_reference) && + (!fs->frame->is_long_term)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "[[%s 1]]", + __func__); + return 1; + } + } + + if (fs->is_used & 1) { /* top field */ + if (fs->top_field) { + if ((fs->top_field->used_for_reference) && + (!fs->top_field->is_long_term)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "[[%s 2]]", + __func__); + return 1; + } + } + } + + if (fs->is_used & 2) { /* bottom field */ + if (fs->bottom_field) { + if ((fs->bottom_field->used_for_reference) && + (!fs->bottom_field->is_long_term)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "[[%s 3]]", + __func__); + return 1; + } + } + } + return 0; +} + +static int is_long_term_reference(struct FrameStore *fs) +{ + + if (fs->is_used == 3) { /* frame */ + if ((fs->frame->used_for_reference) && + (fs->frame->is_long_term)) { + return 1; + } + } + + if (fs->is_used & 1) { /* top field */ + if (fs->top_field) { + if ((fs->top_field->used_for_reference) && + (fs->top_field->is_long_term)) { + return 1; + } + } + } + + if (fs->is_used & 2) { /* bottom field */ + if (fs->bottom_field) { + if ((fs->bottom_field->used_for_reference) && + (fs->bottom_field->is_long_term)) { + return 1; + } + } + } + return 0; +} + +void update_ref_list(struct DecodedPictureBuffer *p_Dpb) +{ + unsigned int i, j; + + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s (%d, %d)\n", __func__, p_Dpb->size, p_Dpb->used_size); + for (i = 0, j = 0; i < p_Dpb->used_size; i++) { +#if 1 + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "fs[%d]: fs %p frame %p is_reference %d %d %d\n", + i, p_Dpb->fs[i], p_Dpb->fs[i]->frame, + p_Dpb->fs[i]->frame != NULL ? + p_Dpb->fs[i]->frame->used_for_reference : 0, + p_Dpb->fs[i]->top_field != NULL ? + p_Dpb->fs[i]->top_field->used_for_reference : + 0, + p_Dpb->fs[i]->bottom_field != NULL ? + p_Dpb->fs[i]->bottom_field->used_for_reference : 0); +#endif + if (is_short_term_reference(p_Dpb, p_Dpb->fs[i])) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "fs_ref[%d]=fs[%d]: fs %p\n", j, i, p_Dpb->fs[i]); + p_Dpb->fs_ref[j++] = p_Dpb->fs[i]; + } + } + + p_Dpb->ref_frames_in_buffer = j; + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s dpb size is %d, %d\n", __func__, p_Dpb->size, j); + while (j < p_Dpb->size) { + /* dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + *"fs_ref[%d]=null\n", j); + */ + p_Dpb->fs_ref[j++] = NULL; + } +#ifdef ERROR_CHECK + for (i = 0; i < DPB_SIZE_MAX; i++) { + if (p_Dpb->fs_ref[i] == NULL) + p_Dpb->fs_ref[i] = &dummy_fs; + } +#endif +} + +static void update_ltref_list(struct DecodedPictureBuffer *p_Dpb) +{ + unsigned int i, j; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + for (i = 0, j = 0; i < p_Dpb->used_size; i++) { + if (is_long_term_reference(p_Dpb->fs[i])) + p_Dpb->fs_ltref[j++] = p_Dpb->fs[i]; + } + + p_Dpb->ltref_frames_in_buffer = j; + + while (j < p_Dpb->size) + p_Dpb->fs_ltref[j++] = NULL; +#ifdef ERROR_CHECK + for (i = 0; i < DPB_SIZE_MAX; i++) { + if (p_Dpb->fs_ltref[i] == NULL) + p_Dpb->fs_ltref[i] = &dummy_fs; + } +#endif +} + +static void idr_memory_management(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *p) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s ref_frames_in_buffer %d ltref_frames_in_buffer %d\n", + __func__, p_Dpb->ref_frames_in_buffer, + p_Dpb->ltref_frames_in_buffer); + + + if (p->no_output_of_prior_pics_flag) { + int i; + for (i = 0; i < p_Dpb->used_size; i++) { + unmark_for_reference(p_Dpb, p_Dpb->fs[i]); + if (p_Dpb->fs[i]->is_long_term) + unmark_for_long_term_reference(p_Dpb->fs[i]); + if (!p_Dpb->fs[i]->is_output && !p_Dpb->fs[i]->pre_output) + set_frame_output_flag(p_H264_Dpb, i); + } +#if 0 + /*???*/ + /* free all stored pictures */ + int i; + + for (i = 0; i < p_Dpb->used_size; i++) { + /* reset all reference settings + * free_frame_store(p_Dpb->fs[i]); + * p_Dpb->fs[i] = alloc_frame_store(); + */ + reset_frame_store(p_H264_Dpb, p_Dpb->fs[i]); /* ??? */ + } + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) + p_Dpb->fs_ref[i] = NULL; + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) + p_Dpb->fs_ltref[i] = NULL; + p_Dpb->used_size = 0; +#endif + } else { + flush_dpb(p_H264_Dpb); + } + p_Dpb->last_picture = NULL; + + update_ref_list(p_Dpb); + update_ltref_list(p_Dpb); + p_Dpb->last_output_poc = INT_MIN; + + if (p->long_term_reference_flag) { + p_Dpb->max_long_term_pic_idx = 0; + p->is_long_term = 1; + p->long_term_frame_idx = 0; + } else { + p_Dpb->max_long_term_pic_idx = -1; + p->is_long_term = 0; + } + +#if (MVC_EXTENSION_ENABLE) + p_Dpb->last_output_view_id = -1; +#endif + +} + +static void sliding_window_memory_management( + struct DecodedPictureBuffer *p_Dpb, + struct StorablePicture *p) +{ + unsigned int i; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + unsigned char slide_flag = 0; + unsigned int sliding_margin = imax( + 1, p_Dpb->num_ref_frames) - p_Dpb->ltref_frames_in_buffer; + /* assert (!p->idr_flag); */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s ref_frames_in_buffer %d ltref_frames_in_buffer %d\n", + __func__, p_Dpb->ref_frames_in_buffer, + p_Dpb->ltref_frames_in_buffer); + /* if this is a reference pic with sliding window, + unmark first ref frame */ + if (p_Dpb->ref_frames_in_buffer == sliding_margin) + slide_flag = 1; + /*else if ((h264_error_proc_policy & 0x8) && + (p_Dpb->ref_frames_in_buffer > sliding_margin)) + slide_flag = 1;*/ + + if (slide_flag) { + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->is_reference && + (!(p_Dpb->fs[i]->is_long_term))) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "unmark %d\n", i); + unmark_for_reference(p_Dpb, p_Dpb->fs[i]); + update_ref_list(p_Dpb); + break; + } + } + } + + p->is_long_term = 0; +} + +static void check_num_ref(struct DecodedPictureBuffer *p_Dpb) +{ + if ((int)(p_Dpb->ltref_frames_in_buffer + + p_Dpb->ref_frames_in_buffer) > + imax(1, p_Dpb->num_ref_frames)) { + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "Max. number of reference frames exceeded. Invalid stream. lt %d ref %d mum_ref %d\n", + p_Dpb->ltref_frames_in_buffer, + p_Dpb->ref_frames_in_buffer, + p_Dpb->num_ref_frames); + } +} + +void dump_dpb(struct DecodedPictureBuffer *p_Dpb, u8 force) +{ + unsigned int i; + unsigned char *buf = NULL; + unsigned int buf_size = 512, len = 0; + struct h264_dpb_stru *p_H264_Dpb = + container_of(p_Dpb, struct h264_dpb_stru, mDPB); + +#define DPB_STRCAT(args...) do { \ + len += snprintf(buf + len, \ + buf_size - len, ##args);\ + } while (0) + + if ((h264_debug_flag & PRINT_FLAG_DUMP_DPB) == 0 && + force == 0) + return; + + buf = kzalloc(buf_size, GFP_ATOMIC); + if (buf == NULL) + return; + + for (i = 0; i < p_Dpb->used_size; i++) { + len = 0; + memset(buf, 0, buf_size); + DPB_STRCAT("fn=%d is_used %d ", + p_Dpb->fs[i]->frame_num, + p_Dpb->fs[i]->is_used); + + if (p_Dpb->fs[i]->is_used & 1) { + if (p_Dpb->fs[i]->top_field) + DPB_STRCAT("T: poc=%d pic_num=%d ", + p_Dpb->fs[i]->top_field->poc, + p_Dpb->fs[i]->top_field->pic_num); + else + DPB_STRCAT("T: poc=%d ", + p_Dpb->fs[i]->frame->top_poc); + } + if (p_Dpb->fs[i]->is_used & 2) { + if (p_Dpb->fs[i]->bottom_field) + DPB_STRCAT("B: poc=%d pic_num=%d ", + p_Dpb->fs[i]->bottom_field->poc, + p_Dpb->fs[i]->bottom_field->pic_num); + else + DPB_STRCAT("B: poc=%d ", + p_Dpb->fs[i]->frame->bottom_poc); + } + if (p_Dpb->fs[i]->is_used == 3) { + if (p_Dpb->fs[i]->frame != NULL) + DPB_STRCAT("F: poc=%d pic_num=%d ", + p_Dpb->fs[i]->frame->poc, + p_Dpb->fs[i]->frame->pic_num); + else + DPB_STRCAT("fs[%d] frame is null ", i); + } + DPB_STRCAT("G: poc=%d) ", p_Dpb->fs[i]->poc); + if (p_Dpb->fs[i]->is_reference) + DPB_STRCAT("ref (%d) ", p_Dpb->fs[i]->is_reference); + if (p_Dpb->fs[i]->is_long_term) + DPB_STRCAT("lt_ref (%d) ", p_Dpb->fs[i]->is_reference); + if (p_Dpb->fs[i]->is_output) + DPB_STRCAT("out(displayed) "); + if (p_Dpb->fs[i]->pre_output) + DPB_STRCAT("pre_output(in dispq or displaying) "); + if (p_Dpb->fs[i]->is_used == 3) { + if (p_Dpb->fs[i]->frame != NULL && p_Dpb->fs[i]->frame->non_existing) + DPB_STRCAT("non_existing "); + else + DPB_STRCAT("fs[%d] frame is null ", i); + } + DPB_STRCAT("dpb_frame_count %d ", + p_Dpb->fs[i]->dpb_frame_count); + +#if (MVC_EXTENSION_ENABLE) + if (p_Dpb->fs[i]->is_reference) + DPB_STRCAT("view_id (%d) ", p_Dpb->fs[i]->view_id); +#endif + if (p_Dpb->fs[i]->data_flag) { + DPB_STRCAT("data_flag(0x%x)", + p_Dpb->fs[i]->data_flag); + } + DPB_STRCAT(" bufspec %d\n", + p_Dpb->fs[i]->buf_spec_num); + dpb_print(p_H264_Dpb->decoder_index, 0, "%s", buf); + } + + kfree(buf); +} + +/*! + ************************************************************************ + * \brief + * adaptive memory management + * + ************************************************************************ + */ + +static int get_pic_num_x(struct StorablePicture *p, + int difference_of_pic_nums_minus1) +{ + int currPicNum; + + if (p->structure == FRAME) + currPicNum = p->frame_num; + else + currPicNum = 2 * p->frame_num + 1; + + return currPicNum - (difference_of_pic_nums_minus1 + 1); +} + +/*! + ************************************************************************ + * \brief + * Adaptive Memory Management: Mark short term picture unused + ************************************************************************ + */ +static void mm_unmark_short_term_for_reference(struct DecodedPictureBuffer + *p_Dpb, struct StorablePicture *p, + int difference_of_pic_nums_minus1) +{ + struct h264_dpb_stru *p_H264_Dpb = + container_of(p_Dpb, struct h264_dpb_stru, mDPB); + int picNumX; + + unsigned int i; + + picNumX = get_pic_num_x(p, difference_of_pic_nums_minus1); + + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p->structure == FRAME) { + if ((p_Dpb->fs_ref[i]->is_reference == 3) && + (p_Dpb->fs_ref[i]->is_long_term == 0)) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->frame->pic_num == + picNumX) { + unmark_for_reference(p_Dpb, + p_Dpb->fs_ref[i]); + return; + } + } + } else { + if ((p_Dpb->fs_ref[i]->is_reference & 1) && + (!(p_Dpb->fs_ref[i]->is_long_term & 1))) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->top_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->top_field->pic_num == + picNumX) { + p_Dpb->fs_ref[i]-> + top_field->used_for_reference = 0; + p_Dpb->fs_ref[i]->is_reference &= 2; + if ((p_Dpb->fs_ref[i]->is_used == 3) +#ifdef ERROR_CHECK + && p_Dpb->fs_ref[i]->frame +#endif + ) { + p_Dpb->fs_ref[i]->frame-> + used_for_reference = 0; + } + return; + } + } + if ((p_Dpb->fs_ref[i]->is_reference & 2) && + (!(p_Dpb->fs_ref[i]->is_long_term & 2))) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->bottom_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->bottom_field->pic_num == + picNumX) { + p_Dpb->fs_ref[i]->bottom_field-> + used_for_reference = 0; + p_Dpb->fs_ref[i]->is_reference &= 1; + if ((p_Dpb->fs_ref[i]->is_used == 3) +#ifdef ERROR_CHECK + && p_Dpb->fs_ref[i]->frame +#endif + ) { + p_Dpb->fs_ref[i]->frame-> + used_for_reference = 0; + } + return; + } + } + } + } +} + +/*! + ************************************************************************ + * \brief + * Adaptive Memory Management: Mark long term picture unused + ************************************************************************ + */ +static void mm_unmark_long_term_for_reference(struct DecodedPictureBuffer + *p_Dpb, struct StorablePicture *p, int long_term_pic_num) +{ + unsigned int i; + + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (p->structure == FRAME) { + if ((p_Dpb->fs_ltref[i]->is_reference == 3) && + (p_Dpb->fs_ltref[i]->is_long_term == 3)) { + if (p_Dpb->fs_ltref[i]->frame-> + long_term_pic_num == + long_term_pic_num) { + unmark_for_long_term_reference( + p_Dpb->fs_ltref[i]); + } + } + } else { + if ((p_Dpb->fs_ltref[i]->is_reference & 1) && + ((p_Dpb->fs_ltref[i]->is_long_term & 1))) { + if (p_Dpb->fs_ltref[i]->top_field-> + long_term_pic_num == + long_term_pic_num) { + p_Dpb->fs_ltref[i]->top_field-> + used_for_reference = 0; + p_Dpb->fs_ltref[i]->top_field-> + is_long_term = 0; + p_Dpb->fs_ltref[i]->is_reference &= 2; + p_Dpb->fs_ltref[i]->is_long_term &= 2; + if (p_Dpb->fs_ltref[i]->is_used == 3) { + p_Dpb->fs_ltref[i]->frame-> + used_for_reference = 0; + p_Dpb->fs_ltref[i]->frame-> + is_long_term = 0; + } + return; + } + } + if ((p_Dpb->fs_ltref[i]->is_reference & 2) && + ((p_Dpb->fs_ltref[i]->is_long_term & 2))) { + if (p_Dpb->fs_ltref[i]->bottom_field-> + long_term_pic_num == + long_term_pic_num) { + p_Dpb->fs_ltref[i]->bottom_field-> + used_for_reference = 0; + p_Dpb->fs_ltref[i]->bottom_field-> + is_long_term = 0; + p_Dpb->fs_ltref[i]->is_reference &= 1; + p_Dpb->fs_ltref[i]->is_long_term &= 1; + if (p_Dpb->fs_ltref[i]->is_used == 3) { + p_Dpb->fs_ltref[i]->frame-> + used_for_reference = 0; + p_Dpb->fs_ltref[i]->frame-> + is_long_term = 0; + } + return; + } + } + } + } +} + + +/*! + ************************************************************************ + * \brief + * Mark a long-term reference frame or complementary + * field pair unused for referemce + ************************************************************************ + */ +static void unmark_long_term_frame_for_reference_by_frame_idx( + struct DecodedPictureBuffer *p_Dpb, int long_term_frame_idx) +{ + unsigned int i; + + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (p_Dpb->fs_ltref[i]->long_term_frame_idx == + long_term_frame_idx) + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + } +} + + +static void unmark1(struct DecodedPictureBuffer *p_Dpb, + unsigned int curr_frame_num, int i) +{ + if (p_Dpb->last_picture) { + /*if ((p_Dpb->last_picture != p_Dpb->fs_ltref[i]) || + p_Dpb->last_picture->frame_num != curr_frame_num) {*/ + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + /*} else { + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + }*/ + } +} + +static void unmark2(struct DecodedPictureBuffer *p_Dpb, + int curr_pic_num, int i) +{ + if ((p_Dpb->fs_ltref[i]->frame_num) != + (unsigned int)(curr_pic_num >> 1)) + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); +} + +static void unmark3_top(struct DecodedPictureBuffer *p_Dpb, + unsigned int curr_frame_num, int curr_pic_num, int mark_current, int i) +{ + if (p_Dpb->fs_ltref[i]->is_long_term == 3) { + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + } else { + if (p_Dpb->fs_ltref[i]->is_long_term == 1) { + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + } else { + if (mark_current) + unmark1(p_Dpb, curr_frame_num, i); + else + unmark2(p_Dpb, curr_pic_num, i); + } + } +} + +static void unmark3_bottom(struct DecodedPictureBuffer *p_Dpb, + unsigned int curr_frame_num, int curr_pic_num, int mark_current, int i) +{ + if (p_Dpb->fs_ltref[i]->is_long_term == 2) { + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + } else { + if (mark_current) + unmark1(p_Dpb, curr_frame_num, i); + else + unmark2(p_Dpb, curr_pic_num, i); + } +} + +static void unmark_long_term_field_for_reference_by_frame_idx( + struct DecodedPictureBuffer *p_Dpb, enum PictureStructure structure, + int long_term_frame_idx, int mark_current, unsigned int curr_frame_num, + int curr_pic_num) +{ + struct VideoParameters *p_Vid = p_Dpb->p_Vid; + unsigned int i; + + /* assert(structure!=FRAME); */ + if (curr_pic_num < 0) + curr_pic_num += (2 * p_Vid->max_frame_num); + + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (p_Dpb->fs_ltref[i]->long_term_frame_idx == + long_term_frame_idx) { + if (structure == TOP_FIELD) + unmark3_top(p_Dpb, curr_frame_num, + curr_pic_num, mark_current, i); + + if (structure == BOTTOM_FIELD) + unmark3_bottom(p_Dpb, curr_frame_num, + curr_pic_num, mark_current, i); + } + } +} + +/*! + ************************************************************************ + * \brief + * mark a picture as long-term reference + ************************************************************************ + */ +static void mark_pic_long_term(struct DecodedPictureBuffer *p_Dpb, + struct StorablePicture *p, + int long_term_frame_idx, int picNumX) +{ + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + unsigned int i; + int add_top, add_bottom; + + if (p->structure == FRAME) { + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference == 3) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((!p_Dpb->fs_ref[i]->frame-> + is_long_term) && + (p_Dpb->fs_ref[i]->frame->pic_num == + picNumX)) { + p_Dpb->fs_ref[i]-> + long_term_frame_idx = + p_Dpb->fs_ref[i]->frame-> + long_term_frame_idx = + long_term_frame_idx; + p_Dpb->fs_ref[i]->frame-> + long_term_pic_num = + long_term_frame_idx; + p_Dpb->fs_ref[i]->frame-> + is_long_term = 1; + + if (p_Dpb->fs_ref[i]->top_field && + p_Dpb->fs_ref[i]->bottom_field) { + p_Dpb->fs_ref[i]->top_field-> + long_term_frame_idx = + p_Dpb->fs_ref[i]-> + bottom_field-> + long_term_frame_idx = + long_term_frame_idx; + p_Dpb->fs_ref[i]->top_field-> + long_term_pic_num = + long_term_frame_idx; + p_Dpb->fs_ref[i]-> + bottom_field-> + long_term_pic_num = + long_term_frame_idx; + + p_Dpb->fs_ref[i]->top_field-> + is_long_term = + p_Dpb->fs_ref[i]-> + bottom_field-> + is_long_term + = 1; + + } + p_Dpb->fs_ref[i]->is_long_term = 3; + return; + } + } + } + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "Warning: reference frame for long term marking not found\n"); + } else { + if (p->structure == TOP_FIELD) { + add_top = 1; + add_bottom = 0; + } else { + add_top = 0; + add_bottom = 1; + } + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference & 1) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->top_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((!p_Dpb->fs_ref[i]->top_field-> + is_long_term) && + (p_Dpb->fs_ref[i]->top_field->pic_num == + picNumX)) { + if ((p_Dpb->fs_ref[i]-> + is_long_term) && + (p_Dpb->fs_ref[i]-> + long_term_frame_idx != + long_term_frame_idx)) { + dpb_print(p_H264_Dpb-> + decoder_index, + PRINT_FLAG_DPB_DETAIL, + "Warning: assigning long_term_frame_idx different from other field\n"); + } + + p_Dpb->fs_ref[i]-> + long_term_frame_idx = + p_Dpb->fs_ref[i]->top_field-> + long_term_frame_idx + = long_term_frame_idx; + p_Dpb->fs_ref[i]->top_field-> + long_term_pic_num = + 2 * long_term_frame_idx + + add_top; + p_Dpb->fs_ref[i]->top_field-> + is_long_term = 1; + p_Dpb->fs_ref[i]->is_long_term |= 1; + if ((p_Dpb->fs_ref[i]->is_long_term + == 3) +#ifdef ERROR_CHECK + && p_Dpb->fs_ref[i]->frame +#endif + ) { + p_Dpb->fs_ref[i]->frame-> + is_long_term = 1; + p_Dpb->fs_ref[i]->frame-> + long_term_frame_idx = + p_Dpb->fs_ref[i]-> + frame-> + long_term_pic_num = + long_term_frame_idx; + } + return; + } + } + if (p_Dpb->fs_ref[i]->is_reference & 2) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->bottom_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((!p_Dpb->fs_ref[i]->bottom_field-> + is_long_term) && + (p_Dpb->fs_ref[i]->bottom_field->pic_num + == picNumX)) { + if ((p_Dpb->fs_ref[i]-> + is_long_term) && + (p_Dpb->fs_ref[i]-> + long_term_frame_idx != + long_term_frame_idx)) { + dpb_print(p_H264_Dpb-> + decoder_index, + PRINT_FLAG_DPB_DETAIL, + "Warning: assigning long_term_frame_idx different from other field\n"); + } + + p_Dpb->fs_ref[i]-> + long_term_frame_idx = + p_Dpb->fs_ref[i]->bottom_field + ->long_term_frame_idx + = long_term_frame_idx; + p_Dpb->fs_ref[i]->bottom_field-> + long_term_pic_num = 2 * + long_term_frame_idx + + add_bottom; + p_Dpb->fs_ref[i]->bottom_field-> + is_long_term = 1; + p_Dpb->fs_ref[i]->is_long_term |= 2; + if ((p_Dpb->fs_ref[i]-> + is_long_term == 3) +#ifdef ERROR_CHECK + && p_Dpb->fs_ref[i]->frame +#endif + ) { + p_Dpb->fs_ref[i]->frame-> + is_long_term = 1; + p_Dpb->fs_ref[i]->frame-> + long_term_frame_idx = + p_Dpb->fs_ref[i]-> + frame-> + long_term_pic_num = + long_term_frame_idx; + } + return; + } + } + } + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "Warning: reference field for long term marking not found\n"); + } +} + + +/*! + ************************************************************************ + * \brief + * Assign a long term frame index to a short term picture + ************************************************************************ + */ +static void mm_assign_long_term_frame_idx(struct DecodedPictureBuffer *p_Dpb, + struct StorablePicture *p, int difference_of_pic_nums_minus1, + int long_term_frame_idx) +{ + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + int picNumX = get_pic_num_x(p, difference_of_pic_nums_minus1); + + /* remove frames/fields with same long_term_frame_idx */ + if (p->structure == FRAME) { + unmark_long_term_frame_for_reference_by_frame_idx(p_Dpb, + long_term_frame_idx); + } else { + unsigned int i; + enum PictureStructure structure = FRAME; + + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference & 1) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->top_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->top_field-> + pic_num == picNumX) { + structure = TOP_FIELD; + break; + } + } + if (p_Dpb->fs_ref[i]->is_reference & 2) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->bottom_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->bottom_field-> + pic_num == picNumX) { + structure = BOTTOM_FIELD; + break; + } + } + } + if (structure == FRAME) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "field for long term marking not found %d", + 200); + } + + unmark_long_term_field_for_reference_by_frame_idx(p_Dpb, + structure, + long_term_frame_idx, 0, 0, picNumX); + } + + mark_pic_long_term(p_Dpb, p, long_term_frame_idx, picNumX); +} + +/*! + ************************************************************************ + * \brief + * Set new max long_term_frame_idx + ************************************************************************ + */ +static void mm_update_max_long_term_frame_idx(struct DecodedPictureBuffer + *p_Dpb, int max_long_term_frame_idx_plus1) +{ + unsigned int i; + + p_Dpb->max_long_term_pic_idx = max_long_term_frame_idx_plus1 - 1; + + /* check for invalid frames */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (p_Dpb->fs_ltref[i]->long_term_frame_idx > + p_Dpb->max_long_term_pic_idx) { + unmark_for_long_term_reference(p_Dpb->fs_ltref[i]); + } + } +} + + +/*! + ************************************************************************ + * \brief + * Mark all long term reference pictures unused for reference + ************************************************************************ + */ +static void mm_unmark_all_long_term_for_reference(struct DecodedPictureBuffer + *p_Dpb) +{ + mm_update_max_long_term_frame_idx(p_Dpb, 0); +} + +/*! + ************************************************************************ + * \brief + * Mark all short term reference pictures unused for reference + ************************************************************************ + */ +static void mm_unmark_all_short_term_for_reference(struct DecodedPictureBuffer + *p_Dpb) +{ + unsigned int i; + + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) + unmark_for_reference(p_Dpb, p_Dpb->fs_ref[i]); + update_ref_list(p_Dpb); +} + + +/*! + ************************************************************************ + * \brief + * Mark the current picture used for long term reference + ************************************************************************ + */ +static void mm_mark_current_picture_long_term(struct DecodedPictureBuffer + *p_Dpb, struct StorablePicture *p, int long_term_frame_idx) +{ + /* remove long term pictures with same long_term_frame_idx */ + if (p->structure == FRAME) { + unmark_long_term_frame_for_reference_by_frame_idx(p_Dpb, + long_term_frame_idx); + } else { + unmark_long_term_field_for_reference_by_frame_idx(p_Dpb, + p->structure, long_term_frame_idx, + 1, p->pic_num, 0); + } + + p->is_long_term = 1; + p->long_term_frame_idx = long_term_frame_idx; +} + +static void adaptive_memory_management(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *p) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + struct DecRefPicMarking_s *tmp_drpm; + struct VideoParameters *p_Vid = p_Dpb->p_Vid; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + p_Vid->last_has_mmco_5 = 0; + + /* assert (!p->idr_flag); */ + /* assert (p->adaptive_ref_pic_buffering_flag); */ + + while (p->dec_ref_pic_marking_buffer) { + tmp_drpm = p->dec_ref_pic_marking_buffer; + switch (tmp_drpm->memory_management_control_operation) { + case 0: + if (tmp_drpm->Next != NULL) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "error, memory_management_control_operation = 0 not last operation in buffer\n"); + break; + case 1: + mm_unmark_short_term_for_reference(p_Dpb, p, + tmp_drpm->difference_of_pic_nums_minus1); + update_ref_list(p_Dpb); + break; + case 2: + mm_unmark_long_term_for_reference(p_Dpb, p, + tmp_drpm->long_term_pic_num); + update_ltref_list(p_Dpb); + break; + case 3: + mm_assign_long_term_frame_idx(p_Dpb, p, + tmp_drpm->difference_of_pic_nums_minus1, + tmp_drpm->long_term_frame_idx); + update_ref_list(p_Dpb); + update_ltref_list(p_Dpb); + break; + case 4: + mm_update_max_long_term_frame_idx(p_Dpb, + tmp_drpm->max_long_term_frame_idx_plus1); + update_ltref_list(p_Dpb); + break; + case 5: + mm_unmark_all_short_term_for_reference(p_Dpb); + mm_unmark_all_long_term_for_reference(p_Dpb); + p_Vid->last_has_mmco_5 = 1; + break; + case 6: + mm_mark_current_picture_long_term(p_Dpb, p, + tmp_drpm->long_term_frame_idx); + check_num_ref(p_Dpb); + break; + default: + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "error, invalid memory_management_control_operation in buffer\n"); + } + p->dec_ref_pic_marking_buffer = tmp_drpm->Next; + /* free (tmp_drpm); */ + } + if (p_Vid->last_has_mmco_5) { + p->pic_num = p->frame_num = 0; + + switch (p->structure) { + case TOP_FIELD: { + /* p->poc = p->top_poc = p_Vid->toppoc =0; */ + p->poc = p->top_poc = 0; + break; + } + case BOTTOM_FIELD: { + /* p->poc = p->bottom_poc = p_Vid->bottompoc = 0; */ + p->poc = p->bottom_poc = 0; + break; + } + case FRAME: { + p->top_poc -= p->poc; + p->bottom_poc -= p->poc; + + /* p_Vid->toppoc = p->top_poc; */ + /* p_Vid->bottompoc = p->bottom_poc; */ + + p->poc = imin(p->top_poc, p->bottom_poc); + /* p_Vid->framepoc = p->poc; */ + break; + } + } + /* currSlice->ThisPOC = p->poc; */ +#if (MVC_EXTENSION_ENABLE) + if (p->view_id == 0) { + flush_dpb(p_Vid->p_Dpb_layer[0]); + flush_dpb(p_Vid->p_Dpb_layer[1]); + } else { + flush_dpb(p_Dpb); + } +#else + flush_dpb(p_H264_Dpb); +#endif + } +} + + +int store_picture_in_dpb(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *p, + unsigned char data_flag) +{ + /* struct VideoParameters *p_Vid = p_Dpb->p_Vid; */ + struct VideoParameters *p_Vid = &p_H264_Dpb->mVideo; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + unsigned int i, frame_outside_count = 0; +#if 0 + int poc, pos; +#endif + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s p_Vid %p\n", __func__, p_Vid); + + /* picture error concealment */ + + /* diagnostics */ + /* dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + * "Storing (%s) non-ref pic with frame_num #%d\n", + * (p->type == FRAME)?"FRAME":(p->type == TOP_FIELD)? + * "TOP_FIELD":"BOTTOM_FIELD", p->pic_num); + */ + /* if frame, check for new store, */ + /* assert (p!=NULL); */ + + p_Vid->last_has_mmco_5 = 0; + p_Vid->last_pic_bottom_field = (p->structure == BOTTOM_FIELD); + if (p->idr_flag) { + idr_memory_management(p_H264_Dpb, p); + if (p_H264_Dpb->first_insert_frame == FirstInsertFrm_OUT) + p_H264_Dpb->first_insert_frame = FirstInsertFrm_SKIPDONE; +#if 0 +/* ??? */ + /* picture error concealment */ + memset(p_Vid->pocs_in_dpb, 0, sizeof(int) * 100); +#endif + } else { +#if 1 +/* ??? */ + /* adaptive memory management */ + if (p->used_for_reference && + (p->adaptive_ref_pic_buffering_flag)) + adaptive_memory_management(p_H264_Dpb, p); +#endif + } + + if ((p->structure == TOP_FIELD) || (p->structure == BOTTOM_FIELD)) { + /* check for frame store with same pic_number */ + if (p_Dpb->last_picture) { + if ((int)p_Dpb->last_picture->frame_num == + p->pic_num) { + if (((p->structure == TOP_FIELD) && + (p_Dpb->last_picture->is_used == 2)) || + ((p->structure == BOTTOM_FIELD) && + (p_Dpb->last_picture->is_used == 1))) { + if ((p->used_for_reference && + (p_Dpb->last_picture-> + is_orig_reference != 0)) || + (!p->used_for_reference && + (p_Dpb->last_picture-> + is_orig_reference == 0))) { + insert_picture_in_dpb( + p_H264_Dpb, + p_Dpb->last_picture, + p, data_flag); + update_ref_list(p_Dpb); + update_ltref_list(p_Dpb); + dump_dpb(p_Dpb, 0); + p_Dpb->last_picture = NULL; + return 0; + } + } + } + } + } + /* this is a frame or a field which has no stored + * complementary field + */ + + /* sliding window, if necessary */ + if ((!p->idr_flag) && (p->used_for_reference && + (!p->adaptive_ref_pic_buffering_flag))) { + sliding_window_memory_management(p_Dpb, p); + } + + /* picture error concealment */ + if (p_Vid->conceal_mode != 0) { + for (i = 0; i < p_Dpb->size; i++) + if (p_Dpb->fs[i]->is_reference) + p_Dpb->fs[i]->concealment_reference = 1; + } + + while (remove_unused_frame_from_dpb(p_H264_Dpb)) + ; + + while (output_frames(p_H264_Dpb, 0)) + ; + + /* check for duplicate frame number in short term reference buffer */ + if ((p->used_for_reference) && (!p->is_long_term)) { + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) + continue; +#endif + if (p_Dpb->fs_ref[i]->frame_num == p->frame_num) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "duplicate frame_num in short-term reference picture buffer %d\n", + 500); + if (p_Dpb->fs_ref[i]->dpb_frame_count == p_H264_Dpb->dpb_frame_count) { + dpb_print(p_H264_Dpb->decoder_index, + 0, "duplicate frame, no insert to dpb\n"); + return -2; + } else { + dpb_print(p_H264_Dpb->decoder_index, + 0, "duplicate frame_num release defore ref\n"); + unmark_for_reference(p_Dpb, p_Dpb->fs_ref[i]); + update_ref_list(p_Dpb); + } + } + } + } + /* store at end of buffer */ + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s p_Dpb->used_size %d\n", __func__, p_Dpb->used_size); + if (p_Dpb->used_size >= p_Dpb->size) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "%s Error: used_sizd %d is large than dpb size\r\n", + __func__, p_Dpb->used_size); + /*h264_debug_flag |= PRINT_FLAG_DUMP_DPB;*/ + dump_dpb(p_Dpb, 0); + return -1; + } + + insert_picture_in_dpb(p_H264_Dpb, p_Dpb->fs[p_Dpb->used_size], + p, data_flag); + + /* picture error concealment */ + if (p->idr_flag) + p_Vid->earlier_missing_poc = 0; + + if (p->structure != FRAME) + p_Dpb->last_picture = p_Dpb->fs[p_Dpb->used_size]; + else + p_Dpb->last_picture = NULL; + + p_Dpb->used_size++; +#if 0 +/* ??? */ + if (p_Vid->conceal_mode != 0) + p_Vid->pocs_in_dpb[p_Dpb->used_size - 1] = p->poc; +#endif + update_ref_list(p_Dpb); + update_ltref_list(p_Dpb); + + check_num_ref(p_Dpb); + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->pre_output) + frame_outside_count++; + } + + if (p_H264_Dpb->fast_output_enable == H264_OUTPUT_MODE_FAST) + i = 1; + else + i = 0; + + if (i || (p_H264_Dpb->first_insert_frame < FirstInsertFrm_SKIPDONE)) { + while (output_frames(p_H264_Dpb, i)) + ; + } + + dump_dpb(p_Dpb, 0); + p_Dpb->first_pic_done = 1; /*by rain*/ + + return 0; +} + +void bufmgr_post(struct h264_dpb_stru *p_H264_Dpb) +{ + /*VideoParameters *p_Vid = p_Dpb->p_Vid;*/ + struct VideoParameters *p_Vid = &p_H264_Dpb->mVideo; + + if (p_Vid->last_has_mmco_5) + p_Vid->pre_frame_num = 0; +} +/********************************** + * + * Initialize reference lists + ********************************** + */ +#define __COMPARE(context, p1, p2) comp(p1, p2) +#define __SHORTSORT(lo, hi, width, comp, context) \ + shortsort(lo, hi, width, comp) +#define CUTOFF 8 /* testing shows that this is good value */ +#define STKSIZ (8*sizeof(void *) - 2) + +#undef swap +static void swap( + char *a, + char *b, + size_t width +) +{ + char tmp; + + if (a != b) + /* Do the swap one character at a time to avoid potential + * alignment problems. + */ + while (width--) { + tmp = *a; + *a++ = *b; + *b++ = tmp; + } +} + +static void shortsort( + char *lo, + char *hi, + size_t width, + int (*comp)(const void *, const void *) +) +{ + char *p, *max; + + /* Note: in assertions below, i and j are alway inside original + * bound of array to sort. + */ + + while (hi > lo) { + /* A[i] <= A[j] for i <= j, j > hi */ + max = lo; + for (p = lo + width; p <= hi; p += width) { + /* A[i] <= A[max] for lo <= i < p */ + if (__COMPARE(context, p, max) > 0) + max = p; + /* A[i] <= A[max] for lo <= i <= p */ + } + + /* A[i] <= A[max] for lo <= i <= hi */ + + swap(max, hi, width); + + /* A[i] <= A[hi] for i <= hi, so A[i] <= A[j] for i <= j, + * j >= hi + */ + + hi -= width; + + /* A[i] <= A[j] for i <= j, j > hi, loop top condition + * established + */ + } + /* A[i] <= A[j] for i <= j, j > lo, which implies A[i] <= A[j] + * for i < j, so array is sorted + */ +} + +static void qsort( + void *base, + size_t num, + size_t width, + int (*comp)(const void *, const void *) +) +{ + char *lo, *hi; /* ends of sub-array currently sorting */ + char *mid; /* points to middle of subarray */ + char *loguy, *higuy; /* traveling pointers for partition step */ + size_t size; /* size of the sub-array */ + char *lostk[STKSIZ], *histk[STKSIZ]; + int stkptr; + +/* stack for saving sub-array to be + * processed + */ +#if 0 + /* validation section */ + _VALIDATE_RETURN_VOID(base != NULL || num == 0, EINVAL); + _VALIDATE_RETURN_VOID(width > 0, EINVAL); + _VALIDATE_RETURN_VOID(comp != NULL, EINVAL); +#endif + if (num < 2) + return; /* nothing to do */ + + stkptr = 0; /* initialize stack */ + + lo = (char *)base; + hi = (char *)base + width * (num - 1); /* initialize limits */ + + /* this entry point is for pseudo-recursion calling: setting + * lo and hi and jumping to here is like recursion, but stkptr is + * preserved, locals aren't, so we preserve stuff on the stack + */ +recurse: + + size = (hi - lo) / width + 1; /* number of el's to sort */ + + /* below a certain size, it is faster to use a O(n^2) sorting method */ + if (size <= CUTOFF) { + __SHORTSORT(lo, hi, width, comp, context); + } else { + /* First we pick a partitioning element. The efficiency of + * the algorithm demands that we find one that is approximately + * the median of the values, but also that we select one fast. + * We choose the median of the first, middle, and last + * elements, to avoid bad performance in the face of already + * sorted data, or data that is made up of multiple sorted + * runs appended together. Testing shows that a + * median-of-three algorithm provides better performance than + * simply picking the middle element for the latter case. + */ + + mid = lo + (size / 2) * width; /* find middle element */ + + /* Sort the first, middle, last elements into order */ + if (__COMPARE(context, lo, mid) > 0) + swap(lo, mid, width); + if (__COMPARE(context, lo, hi) > 0) + swap(lo, hi, width); + if (__COMPARE(context, mid, hi) > 0) + swap(mid, hi, width); + + /* We now wish to partition the array into three pieces, one + * consisting of elements <= partition element, one of elements + * equal to the partition element, and one of elements > than + * it. This is done below; comments indicate conditions + * established at every step. + */ + + loguy = lo; + higuy = hi; + + /* Note that higuy decreases and loguy increases on every + * iteration, so loop must terminate. + */ + for (;;) { + /* lo <= loguy < hi, lo < higuy <= hi, + * A[i] <= A[mid] for lo <= i <= loguy, + * A[i] > A[mid] for higuy <= i < hi, + * A[hi] >= A[mid] + */ + + /* The doubled loop is to avoid calling comp(mid,mid), + * since some existing comparison funcs don't work + * when passed the same value for both pointers. + */ + + if (mid > loguy) { + do { + loguy += width; + } while (loguy < mid && + __COMPARE(context, loguy, mid) <= 0); + } + if (mid <= loguy) { + do { + loguy += width; + } while (loguy <= hi && + __COMPARE(context, loguy, mid) <= 0); + } + + /* lo < loguy <= hi+1, A[i] <= A[mid] for + * lo <= i < loguy, + * either loguy > hi or A[loguy] > A[mid] + */ + + do { + higuy -= width; + } while (higuy > mid && + __COMPARE(context, higuy, mid) > 0); + + /* lo <= higuy < hi, A[i] > A[mid] for higuy < i < hi, + * either higuy == lo or A[higuy] <= A[mid] + */ + + if (higuy < loguy) + break; + + /* if loguy > hi or higuy == lo, then we would have + * exited, so A[loguy] > A[mid], A[higuy] <= A[mid], + * loguy <= hi, higuy > lo + */ + + swap(loguy, higuy, width); + + /* If the partition element was moved, follow it. + * Only need to check for mid == higuy, since before + * the swap, A[loguy] > A[mid] implies loguy != mid. + */ + + if (mid == higuy) + mid = loguy; + + /* A[loguy] <= A[mid], A[higuy] > A[mid]; so condition + * at top of loop is re-established + */ + } + + /* A[i] <= A[mid] for lo <= i < loguy, + * A[i] > A[mid] for higuy < i < hi, + * A[hi] >= A[mid] + * higuy < loguy + * implying: + * higuy == loguy-1 + * or higuy == hi - 1, loguy == hi + 1, A[hi] == A[mid] + */ + + /* Find adjacent elements equal to the partition element. The + * doubled loop is to avoid calling comp(mid,mid), since some + * existing comparison funcs don't work when passed the same + * value for both pointers. + */ + + higuy += width; + if (mid < higuy) { + do { + higuy -= width; + } while (higuy > mid && + __COMPARE(context, higuy, mid) == 0); + } + if (mid >= higuy) { + do { + higuy -= width; + } while (higuy > lo && + __COMPARE(context, higuy, mid) == 0); + } + + /* OK, now we have the following: + * higuy < loguy + * lo <= higuy <= hi + * A[i] <= A[mid] for lo <= i <= higuy + * A[i] == A[mid] for higuy < i < loguy + * A[i] > A[mid] for loguy <= i < hi + * A[hi] >= A[mid] + */ + + /* We've finished the partition, now we want to sort the + * subarrays [lo, higuy] and [loguy, hi]. + * We do the smaller one first to minimize stack usage. + * We only sort arrays of length 2 or more. + */ + + if (higuy - lo >= hi - loguy) { + if (lo < higuy) { + lostk[stkptr] = lo; + histk[stkptr] = higuy; + ++stkptr; + } /* save big recursion for later */ + + if (loguy < hi) { + lo = loguy; + goto recurse; /* do small recursion */ + } + } else { + if (loguy < hi) { + lostk[stkptr] = loguy; + histk[stkptr] = hi; + ++stkptr; /* save big recursion for later */ + } + + if (lo < higuy) { + hi = higuy; + goto recurse; /* do small recursion */ + } + } + } + + /* We have sorted the array, except for any pending sorts on the stack. + * Check if there are any, and do them. + */ + + --stkptr; + if (stkptr >= 0) { + lo = lostk[stkptr]; + hi = histk[stkptr]; + goto recurse; /* pop subarray from stack */ + } else + return; /* all subarrays done */ +} + +/*! + ************************************************************************ + * \brief + * compares two stored pictures by picture number for qsort in + * descending order + * + ************************************************************************ + */ +static inline int compare_pic_by_pic_num_desc(const void *arg1, + const void *arg2) +{ + int pic_num1 = (*(struct StorablePicture **)arg1)->pic_num; + int pic_num2 = (*(struct StorablePicture **)arg2)->pic_num; + + if (pic_num1 < pic_num2) + return 1; + if (pic_num1 > pic_num2) + return -1; + else + return 0; +} + +/*! + ************************************************************************ + * \brief + * compares two stored pictures by picture number for qsort in + * descending order + * + ************************************************************************ + */ +static inline int compare_pic_by_lt_pic_num_asc(const void *arg1, + const void *arg2) +{ + int long_term_pic_num1 = + (*(struct StorablePicture **)arg1)->long_term_pic_num; + int long_term_pic_num2 = + (*(struct StorablePicture **)arg2)->long_term_pic_num; + + if (long_term_pic_num1 < long_term_pic_num2) + return -1; + if (long_term_pic_num1 > long_term_pic_num2) + return 1; + else + return 0; +} + +/*! + ************************************************************************ + * \brief + * compares two frame stores by pic_num for qsort in descending order + * + ************************************************************************ + */ +static inline int compare_fs_by_frame_num_desc(const void *arg1, + const void *arg2) +{ + int frame_num_wrap1 = (*(struct FrameStore **)arg1)->frame_num_wrap; + int frame_num_wrap2 = (*(struct FrameStore **)arg2)->frame_num_wrap; + + if (frame_num_wrap1 < frame_num_wrap2) + return 1; + if (frame_num_wrap1 > frame_num_wrap2) + return -1; + else + return 0; +} + + +/*! + ************************************************************************ + * \brief + * compares two frame stores by lt_pic_num for qsort in descending order + * + ************************************************************************ + */ +static inline int compare_fs_by_lt_pic_idx_asc(const void *arg1, + const void *arg2) +{ + int long_term_frame_idx1 = + (*(struct FrameStore **)arg1)->long_term_frame_idx; + int long_term_frame_idx2 = + (*(struct FrameStore **)arg2)->long_term_frame_idx; + + if (long_term_frame_idx1 < long_term_frame_idx2) + return -1; + else if (long_term_frame_idx1 > long_term_frame_idx2) + return 1; + else + return 0; +} + + +/*! + ************************************************************************ + * \brief + * compares two stored pictures by poc for qsort in ascending order + * + ************************************************************************ + */ +static inline int compare_pic_by_poc_asc(const void *arg1, const void *arg2) +{ + int poc1 = (*(struct StorablePicture **)arg1)->poc; + int poc2 = (*(struct StorablePicture **)arg2)->poc; + + if (poc1 < poc2) + return -1; + else if (poc1 > poc2) + return 1; + else + return 0; +} + + +/*! + ************************************************************************ + * \brief + * compares two stored pictures by poc for qsort in descending order + * + ************************************************************************ + */ +static inline int compare_pic_by_poc_desc(const void *arg1, const void *arg2) +{ + int poc1 = (*(struct StorablePicture **)arg1)->poc; + int poc2 = (*(struct StorablePicture **)arg2)->poc; + + if (poc1 < poc2) + return 1; + else if (poc1 > poc2) + return -1; + else + return 0; +} + + +/*! + ************************************************************************ + * \brief + * compares two frame stores by poc for qsort in ascending order + * + ************************************************************************ + */ +static inline int compare_fs_by_poc_asc(const void *arg1, const void *arg2) +{ + int poc1 = (*(struct FrameStore **)arg1)->poc; + int poc2 = (*(struct FrameStore **)arg2)->poc; + + if (poc1 < poc2) + return -1; + else if (poc1 > poc2) + return 1; + else + return 0; +} + + +/*! + ************************************************************************ + * \brief + * compares two frame stores by poc for qsort in descending order + * + ************************************************************************ + */ +static inline int compare_fs_by_poc_desc(const void *arg1, const void *arg2) +{ + int poc1 = (*(struct FrameStore **)arg1)->poc; + int poc2 = (*(struct FrameStore **)arg2)->poc; + + if (poc1 < poc2) + return 1; + else if (poc1 > poc2) + return -1; + else + return 0; +} + +/*! + ************************************************************************ + * \brief + * returns true, if picture is short term reference picture + * + ************************************************************************ + */ +static inline int is_short_ref(struct StorablePicture *s) +{ +#ifdef ERROR_CHECK + return (s && + (s->used_for_reference) && (!(s->is_long_term))); +#else + return (s->used_for_reference) && (!(s->is_long_term)); +#endif +} + + +/*! + ************************************************************************ + * \brief + * returns true, if picture is long term reference picture + * + ************************************************************************ + */ +static inline int is_long_ref(struct StorablePicture *s) +{ +#ifdef ERROR_CHECK + return (s && + s->used_for_reference) && (s->is_long_term); +#else + return (s->used_for_reference) && (s->is_long_term); +#endif +} + +/*! + ************************************************************************ + * \brief + * Initialize reference lists for a P Slice + * + ************************************************************************ + */ +/*! + ************************************************************************ + * \brief + * Generates a alternating field list from a given FrameStore list + * + ************************************************************************ + */ +static void gen_pic_list_from_frame_list(enum PictureStructure currStructure, + struct FrameStore **fs_list, int list_idx, + struct StorablePicture **list, + char *list_size, int long_term) +{ + int top_idx = 0; + int bot_idx = 0; + + int (*is_ref)(struct StorablePicture *s) = (long_term) ? is_long_ref : + is_short_ref; + + + if (currStructure == TOP_FIELD) { + while ((top_idx < list_idx) || (bot_idx < list_idx)) { + for (; top_idx < list_idx; top_idx++) { + if (fs_list[top_idx]->is_used & 1) { + if (is_ref(fs_list[top_idx]-> + top_field)) { + /* short term ref pic */ + list[(short) *list_size] = + fs_list[top_idx]->top_field; + (*list_size)++; + top_idx++; + break; + } + } + } + for (; bot_idx < list_idx; bot_idx++) { + if (fs_list[bot_idx]->is_used & 2) { + if (is_ref(fs_list[bot_idx]-> + bottom_field)) { + /* short term ref pic */ + list[(short) *list_size] = + fs_list[bot_idx]->bottom_field; + (*list_size)++; + bot_idx++; + break; + } + } + } + } + } + if (currStructure == BOTTOM_FIELD) { + while ((top_idx < list_idx) || (bot_idx < list_idx)) { + for (; bot_idx < list_idx; bot_idx++) { + if (fs_list[bot_idx]->is_used & 2) { + if (is_ref(fs_list[bot_idx]-> + bottom_field)) { + /* short term ref pic */ + list[(short) *list_size] = + fs_list[bot_idx]->bottom_field; + (*list_size)++; + bot_idx++; + break; + } + } + } + for (; top_idx < list_idx; top_idx++) { + if (fs_list[top_idx]->is_used & 1) { + if (is_ref(fs_list[top_idx]-> + top_field)) { + /* short term ref pic */ + list[(short) *list_size] = + fs_list[top_idx]->top_field; + (*list_size)++; + top_idx++; + break; + } + } + } + } + } +} + +static void init_lists_p_slice(struct Slice *currSlice) +{ + struct VideoParameters *p_Vid = currSlice->p_Vid; + struct DecodedPictureBuffer *p_Dpb = currSlice->p_Dpb; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + + unsigned int i; + + int list0idx = 0; + int listltidx = 0; + + struct FrameStore **fs_list0; + struct FrameStore **fs_listlt; + +#if (MVC_EXTENSION_ENABLE) + currSlice->listinterviewidx0 = 0; + currSlice->listinterviewidx1 = 0; +#endif + + if (currSlice->structure == FRAME) { + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL || + p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_used == 3) { + if ((p_Dpb->fs_ref[i]->frame-> + used_for_reference) && + (!p_Dpb->fs_ref[i]->frame-> + is_long_term)) { + currSlice->listX[0][list0idx++] = + p_Dpb->fs_ref[i]->frame; + } + } + } + /* order list 0 by PicNum */ + qsort((void *)currSlice->listX[0], list0idx, + sizeof(struct StorablePicture *), + compare_pic_by_pic_num_desc); + currSlice->listXsize[0] = (char) list0idx; + CHECK_VALID(currSlice->listXsize[0], 0); + if (h264_debug_flag & PRINT_FLAG_DPB_DETAIL) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "listX[0] (PicNum): "); + for (i = 0; i < list0idx; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + currSlice->listX[0][i]->pic_num); + } + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + } + /* long term handling */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (p_Dpb->fs_ltref[i]->is_used == 3) { + if (p_Dpb->fs_ltref[i]->frame->is_long_term) { + currSlice->listX[0][list0idx++] = + p_Dpb->fs_ltref[i]->frame; + } + } + } + qsort((void *)&currSlice->listX[0][ + (short) currSlice->listXsize[0]], + list0idx - currSlice->listXsize[0], + sizeof(struct StorablePicture *), + compare_pic_by_lt_pic_num_asc); + currSlice->listXsize[0] = (char) list0idx; + CHECK_VALID(currSlice->listXsize[0], 0); + } else { +#if 0 + fs_list0 = calloc(p_Dpb->size, sizeof(struct FrameStore *)); + if (fs_list0 == NULL) + no_mem_exit("init_lists: fs_list0"); + fs_listlt = calloc(p_Dpb->size, sizeof(struct FrameStore *)); + if (fs_listlt == NULL) + no_mem_exit("init_lists: fs_listlt"); +#else + fs_list0 = &(p_Dpb->fs_list0[0]); + fs_listlt = &(p_Dpb->fs_listlt[0]); +#endif + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference) + fs_list0[list0idx++] = p_Dpb->fs_ref[i]; + } + + qsort((void *)fs_list0, list0idx, sizeof(struct FrameStore *), + compare_fs_by_frame_num_desc); + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "fs_list0 (FrameNum): "); + for (i = 0; i < list0idx; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + fs_list0[i]->frame_num_wrap); + } + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "\n"); + + currSlice->listXsize[0] = 0; + gen_pic_list_from_frame_list(currSlice->structure, fs_list0, + list0idx, currSlice->listX[0], + &currSlice->listXsize[0], 0); + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "listX[0] (PicNum): "); + for (i = 0; i < currSlice->listXsize[0]; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + currSlice->listX[0][i]->pic_num); + } + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "\n"); + + /* long term handling */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) + fs_listlt[listltidx++] = p_Dpb->fs_ltref[i]; + + qsort((void *)fs_listlt, listltidx, sizeof(struct FrameStore *), + compare_fs_by_lt_pic_idx_asc); + + gen_pic_list_from_frame_list(currSlice->structure, fs_listlt, + listltidx, currSlice->listX[0], + &currSlice->listXsize[0], 1); + + /* free(fs_list0); */ + /* free(fs_listlt); */ + } + currSlice->listXsize[1] = 0; + + + /* set max size */ + currSlice->listXsize[0] = (char) imin(currSlice->listXsize[0], + currSlice->num_ref_idx_active[LIST_0]); + currSlice->listXsize[1] = (char) imin(currSlice->listXsize[1], + currSlice->num_ref_idx_active[LIST_1]); + CHECK_VALID(currSlice->listXsize[0], 0); + CHECK_VALID(currSlice->listXsize[1], 1); + + /* set the unused list entries to NULL */ + for (i = currSlice->listXsize[0]; i < (MAX_LIST_SIZE); i++) + currSlice->listX[0][i] = p_Vid->no_reference_picture; + for (i = currSlice->listXsize[1]; i < (MAX_LIST_SIZE); i++) + currSlice->listX[1][i] = p_Vid->no_reference_picture; + +#if PRINTREFLIST +#if (MVC_EXTENSION_ENABLE) + /* print out for h264_debug_flag purpose */ + if ((p_Vid->profile_idc == MVC_HIGH || + p_Vid->profile_idc == STEREO_HIGH) && + currSlice->current_slice_nr == 0) { + if (currSlice->listXsize[0] > 0) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " ** (CurViewID:%d %d) %s Ref Pic List 0 ****\n", + currSlice->view_id, + currSlice->ThisPOC, + currSlice->structure == FRAME ? "FRM" : + (currSlice->structure == TOP_FIELD ? + "TOP" : "BOT")); + for (i = 0; i < (unsigned int)(currSlice-> + listXsize[0]); i++) { /* ref list 0 */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " %2d -> POC: %4d PicNum: %4d ViewID: %d\n", + i, + currSlice->listX[0][i]->poc, + currSlice->listX[0][i]->pic_num, + currSlice->listX[0][i]->view_id); + } + } + } +#endif +#endif +} + + +/*! + ************************************************************************ + * \brief + * Initialize reference lists + * + ************************************************************************ + */ +static void init_mbaff_lists(struct h264_dpb_stru *p_H264_Dpb, + struct Slice *currSlice) +{ + unsigned int j; + int i; + struct VideoParameters *p_Vid = &p_H264_Dpb->mVideo; + for (i = 2; i < 6; i++) { + for (j = 0; j < MAX_LIST_SIZE; j++) + currSlice->listX[i][j] = p_Vid->no_reference_picture; + currSlice->listXsize[i] = 0; + } + + for (i = 0; i < currSlice->listXsize[0]; i++) { +#ifdef ERROR_CHECK + if (currSlice->listX[0][i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + pr_info( + "error currSlice->listX[0][%d] is NULL\r\n", i); + break; + } +#endif + currSlice->listX[2][2 * i] = + currSlice->listX[0][i]->top_field; + currSlice->listX[2][2 * i + 1] = + currSlice->listX[0][i]->bottom_field; + currSlice->listX[4][2 * i] = + currSlice->listX[0][i]->bottom_field; + currSlice->listX[4][2 * i + 1] = + currSlice->listX[0][i]->top_field; + } + currSlice->listXsize[2] = currSlice->listXsize[4] = + currSlice->listXsize[0] * 2; + + for (i = 0; i < currSlice->listXsize[1]; i++) { +#ifdef ERROR_CHECK + if (currSlice->listX[1][i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + pr_info( + "error currSlice->listX[1][%d] is NULL\r\n", i); + break; + } +#endif + currSlice->listX[3][2 * i] = + currSlice->listX[1][i]->top_field; + currSlice->listX[3][2 * i + 1] = + currSlice->listX[1][i]->bottom_field; + currSlice->listX[5][2 * i] = + currSlice->listX[1][i]->bottom_field; + currSlice->listX[5][2 * i + 1] = + currSlice->listX[1][i]->top_field; + } + currSlice->listXsize[3] = currSlice->listXsize[5] = + currSlice->listXsize[1] * 2; +} + + + +static void init_lists_i_slice(struct Slice *currSlice) +{ + +#if (MVC_EXTENSION_ENABLE) + currSlice->listinterviewidx0 = 0; + currSlice->listinterviewidx1 = 0; +#endif + + currSlice->listXsize[0] = 0; + currSlice->listXsize[1] = 0; +} + +static void init_lists_b_slice(struct Slice *currSlice) +{ + struct VideoParameters *p_Vid = currSlice->p_Vid; + struct DecodedPictureBuffer *p_Dpb = currSlice->p_Dpb; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + + unsigned int i; + int j; + + int list0idx = 0; + int list0idx_1 = 0; + int listltidx = 0; + + struct FrameStore **fs_list0; + struct FrameStore **fs_list1; + struct FrameStore **fs_listlt; + +#if (MVC_EXTENSION_ENABLE) + currSlice->listinterviewidx0 = 0; + currSlice->listinterviewidx1 = 0; +#endif + + { + /* B-Slice */ + if (currSlice->structure == FRAME) { + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL || + p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((p_Dpb->fs_ref[i]->is_used == 3) && + ((p_Dpb->fs_ref[i]->frame-> + used_for_reference) && + (!p_Dpb->fs_ref[i]->frame-> + is_long_term)) && + (currSlice->framepoc >= + p_Dpb->fs_ref[i]->frame->poc)) { + /* !KS use >= for error + * concealment + */ + currSlice->listX[0][list0idx++] = + p_Dpb->fs_ref[i]->frame; + } + } + qsort((void *)currSlice->listX[0], list0idx, + sizeof(struct StorablePicture *), + compare_pic_by_poc_desc); + + /* get the backward reference picture + * (POC>current POC) in list0; + */ + list0idx_1 = list0idx; + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL || + p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((p_Dpb->fs_ref[i]->is_used == 3) && + ((p_Dpb->fs_ref[i]->frame-> + used_for_reference) && + (!p_Dpb->fs_ref[i]->frame-> + is_long_term)) && + (currSlice->framepoc < + p_Dpb->fs_ref[i]->frame->poc)) { + currSlice-> + listX[0][list0idx++] = + p_Dpb->fs_ref[i]->frame; + } + } + qsort((void *)&currSlice->listX[0][list0idx_1], + list0idx - list0idx_1, + sizeof(struct StorablePicture *), + compare_pic_by_poc_asc); + + for (j = 0; j < list0idx_1; j++) { + currSlice-> + listX[1][list0idx - list0idx_1 + j] = + currSlice->listX[0][j]; + } + for (j = list0idx_1; j < list0idx; j++) { + currSlice->listX[1][j - list0idx_1] = + currSlice->listX[0][j]; + } + + currSlice->listXsize[0] = currSlice->listXsize[1] = + (char) list0idx; + CHECK_VALID(currSlice->listXsize[0], 0); + CHECK_VALID(currSlice->listXsize[1], 1); + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "listX[0] (PicNum): "); + for (i = 0; i < currSlice->listXsize[0]; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + currSlice->listX[0][i]->pic_num); + } + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "listX[1] (PicNum): "); + for (i = 0; i < currSlice->listXsize[1]; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + currSlice->listX[1][i]->pic_num); + } + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + /* dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "currSlice->listX[0] currPoc=%d (Poc): ", + * p_Vid->framepoc); + * for (i=0; i<currSlice->listXsize[0]; i++) { + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "%d ", currSlice->listX[0][i]->poc); + * } + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, "\n"); + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "currSlice->listX[1] currPoc=%d (Poc): ", + * p_Vid->framepoc); + * for (i=0; i<currSlice->listXsize[1]; i++) { + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "%d ", + * currSlice->listX[1][i]->poc); + * } + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, "\n"); + */ + + /* long term handling */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (p_Dpb->fs_ltref[i]->is_used == 3) { + if (p_Dpb->fs_ltref[i]->frame-> + is_long_term) { + currSlice-> + listX[0][list0idx] = + p_Dpb->fs_ltref[i]->frame; + currSlice-> + listX[1][list0idx++] = + p_Dpb->fs_ltref[i]->frame; + } + } + } + qsort((void *)&currSlice-> + listX[0][(short) currSlice->listXsize[0]], + list0idx - currSlice->listXsize[0], + sizeof(struct StorablePicture *), + compare_pic_by_lt_pic_num_asc); + qsort((void *)&currSlice-> + listX[1][(short) currSlice->listXsize[0]], + list0idx - currSlice->listXsize[0], + sizeof(struct StorablePicture *), + compare_pic_by_lt_pic_num_asc); + currSlice->listXsize[0] = currSlice->listXsize[1] = + (char) list0idx; + CHECK_VALID(currSlice->listXsize[0], 0); + CHECK_VALID(currSlice->listXsize[1], 1); + } else { +#if 0 + fs_list0 = calloc(p_Dpb->size, + sizeof(struct FrameStore *)); + if (fs_list0 == NULL) + no_mem_exit("init_lists: fs_list0"); + fs_list1 = calloc(p_Dpb->size, + sizeof(struct FrameStore *)); + if (fs_list1 == NULL) + no_mem_exit("init_lists: fs_list1"); + fs_listlt = calloc(p_Dpb->size, + sizeof(struct FrameStore *)); + if (fs_listlt == NULL) + no_mem_exit("init_lists: fs_listlt"); +#else + fs_list0 = &(p_Dpb->fs_list0[0]); + fs_list1 = &(p_Dpb->fs_list1[0]); + fs_listlt = &(p_Dpb->fs_listlt[0]); + +#endif + currSlice->listXsize[0] = 0; + currSlice->listXsize[1] = 1; + + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_used) { + if (currSlice->ThisPOC >= + p_Dpb->fs_ref[i]->poc) { + fs_list0[list0idx++] = + p_Dpb->fs_ref[i]; + } + } + } + qsort((void *)fs_list0, list0idx, + sizeof(struct FrameStore *), + compare_fs_by_poc_desc); + list0idx_1 = list0idx; + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_used) { + if (currSlice->ThisPOC < + p_Dpb->fs_ref[i]->poc) { + fs_list0[list0idx++] = + p_Dpb->fs_ref[i]; + } + } + } + qsort((void *)&fs_list0[list0idx_1], + list0idx - list0idx_1, + sizeof(struct FrameStore *), + compare_fs_by_poc_asc); + + for (j = 0; j < list0idx_1; j++) { + fs_list1[list0idx - list0idx_1 + j] = + fs_list0[j]; + } + for (j = list0idx_1; j < list0idx; j++) + fs_list1[j - list0idx_1] = fs_list0[j]; + + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "fs_list0 currPoc=%d (Poc): ", + currSlice->ThisPOC); + for (i = 0; i < list0idx; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + fs_list0[i]->poc); + } + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "fs_list1 currPoc=%d (Poc): ", + currSlice->ThisPOC); + for (i = 0; i < list0idx; i++) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + fs_list1[i]->poc); + } + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + + currSlice->listXsize[0] = 0; + currSlice->listXsize[1] = 0; + gen_pic_list_from_frame_list(currSlice->structure, + fs_list0, list0idx, + currSlice->listX[0], + &currSlice->listXsize[0], 0); + gen_pic_list_from_frame_list(currSlice->structure, + fs_list1, list0idx, + currSlice->listX[1], + &currSlice->listXsize[1], 0); + + /* dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "currSlice->listX[0] currPoc=%d (Poc): ", + * p_Vid->framepoc); + * for (i=0; i<currSlice->listXsize[0]; i++) { + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, "%d ", + * currSlice->listX[0][i]->poc); + * } + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, "\n"); + */ + /* dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "currSlice->listX[1] currPoc=%d (Poc): ", + * p_Vid->framepoc); + * for (i=0; i<currSlice->listXsize[1]; i++) { + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, "%d ", + * currSlice->listX[1][i]->poc); + * } + * dpb_print(p_H264_Dpb->decoder_index, + * PRINT_FLAG_DPB_DETAIL, + * "\n"); + */ + + /* long term handling */ + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) + fs_listlt[listltidx++] = p_Dpb->fs_ltref[i]; + + qsort((void *)fs_listlt, listltidx, + sizeof(struct FrameStore *), + compare_fs_by_lt_pic_idx_asc); + + gen_pic_list_from_frame_list(currSlice->structure, + fs_listlt, listltidx, + currSlice->listX[0], + &currSlice->listXsize[0], 1); + gen_pic_list_from_frame_list(currSlice->structure, + fs_listlt, listltidx, + currSlice->listX[1], + &currSlice->listXsize[1], 1); + + /* free(fs_list0); */ + /* free(fs_list1); */ + /* free(fs_listlt); */ + } + } + + if ((currSlice->listXsize[0] == currSlice->listXsize[1]) && + (currSlice->listXsize[0] > 1)) { + /* check if lists are identical, + *if yes swap first two elements of currSlice->listX[1] + */ + int diff = 0; + + for (j = 0; j < currSlice->listXsize[0]; j++) { + if (currSlice->listX[0][j] != + currSlice->listX[1][j]) { + diff = 1; + break; + } + } + if (!diff) { + struct StorablePicture *tmp_s = + currSlice->listX[1][0]; + currSlice->listX[1][0] = currSlice->listX[1][1]; + currSlice->listX[1][1] = tmp_s; + } + } + + /* set max size */ + currSlice->listXsize[0] = (char) imin(currSlice->listXsize[0], + currSlice->num_ref_idx_active[LIST_0]); + currSlice->listXsize[1] = (char) imin(currSlice->listXsize[1], + currSlice->num_ref_idx_active[LIST_1]); + CHECK_VALID(currSlice->listXsize[0], 0); + CHECK_VALID(currSlice->listXsize[1], 1); + + /* set the unused list entries to NULL */ + for (i = currSlice->listXsize[0]; i < (MAX_LIST_SIZE); i++) + currSlice->listX[0][i] = p_Vid->no_reference_picture; + for (i = currSlice->listXsize[1]; i < (MAX_LIST_SIZE); i++) + currSlice->listX[1][i] = p_Vid->no_reference_picture; + +#if PRINTREFLIST +#if (MVC_EXTENSION_ENABLE) + /* print out for h264_debug_flag purpose */ + if ((p_Vid->profile_idc == MVC_HIGH || + p_Vid->profile_idc == STEREO_HIGH) && + currSlice->current_slice_nr == 0) { + if ((currSlice->listXsize[0] > 0) || + (currSlice->listXsize[1] > 0)) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + if (currSlice->listXsize[0] > 0) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " ** (CurViewID:%d %d) %s Ref Pic List 0 ****\n", + currSlice->view_id, + currSlice->ThisPOC, + currSlice->structure == FRAME ? "FRM" : + (currSlice->structure == TOP_FIELD ? + "TOP" : "BOT")); + for (i = 0; i < (unsigned int)(currSlice-> + listXsize[0]); i++) { /* ref list 0 */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " %2d -> POC: %4d PicNum: %4d ViewID: %d\n", + i, + currSlice->listX[0][i]->poc, + currSlice->listX[0][i]->pic_num, + currSlice->listX[0][i]->view_id); + } + } + if (currSlice->listXsize[1] > 0) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " ** (CurViewID:%d %d) %s Ref Pic List 1 ****\n", + currSlice->view_id, + currSlice->ThisPOC, + currSlice->structure == FRAME ? "FRM" : + (currSlice->structure == TOP_FIELD ? "TOP" : + "BOT")); + for (i = 0; i < (unsigned int)(currSlice-> + listXsize[1]); i++) { /* ref list 1 */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " %2d -> POC: %4d PicNum: %4d ViewID: %d\n", + i, + currSlice->listX[1][i]->poc, + currSlice->listX[1][i]->pic_num, + currSlice->listX[1][i]->view_id); + } + } + } +#endif +#endif +} + +static struct StorablePicture *get_short_term_pic(struct Slice *currSlice, + struct DecodedPictureBuffer *p_Dpb, int picNum) +{ + unsigned int i; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Dpb, + struct h264_dpb_stru, mDPB); + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { + if (currSlice->structure == FRAME) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference == 3) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->frame == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((!p_Dpb->fs_ref[i]->frame-> + is_long_term) && + (p_Dpb->fs_ref[i]->frame-> + pic_num == picNum)) + return p_Dpb->fs_ref[i]->frame; + } + } else { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i] == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if (p_Dpb->fs_ref[i]->is_reference & 1) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->top_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((!p_Dpb->fs_ref[i]->top_field-> + is_long_term) && + (p_Dpb->fs_ref[i]->top_field-> + pic_num == picNum)) + return p_Dpb->fs_ref[i]->top_field; + } + if (p_Dpb->fs_ref[i]->is_reference & 2) { +#ifdef ERROR_CHECK + if (p_Dpb->fs_ref[i]->bottom_field == NULL) { + p_H264_Dpb->dpb_error_flag = __LINE__; + continue; + } +#endif + if ((!p_Dpb->fs_ref[i]->bottom_field-> + is_long_term) && + (p_Dpb->fs_ref[i]->bottom_field-> + pic_num == picNum)) + return p_Dpb->fs_ref[i]->bottom_field; + } + } + } + + return currSlice->p_Vid->no_reference_picture; +} + + +static void reorder_short_term(struct Slice *currSlice, int cur_list, + int num_ref_idx_lX_active_minus1, + int picNumLX, int *refIdxLX) +{ + struct h264_dpb_stru *p_H264_Dpb = container_of(currSlice->p_Vid, + struct h264_dpb_stru, mVideo); + + struct StorablePicture **RefPicListX = currSlice->listX[cur_list]; + int cIdx, nIdx; + + struct StorablePicture *picLX; + + picLX = get_short_term_pic(currSlice, currSlice->p_Dpb, picNumLX); + + for (cIdx = num_ref_idx_lX_active_minus1 + 1; cIdx > *refIdxLX; + cIdx--) { + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s: RefPicListX[ %d ] = RefPicListX[ %d ]\n", + __func__, cIdx, cIdx - 1); + RefPicListX[cIdx] = RefPicListX[cIdx - 1]; + } + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s: RefPicListX[ %d ] = pic %x (%d)\n", __func__, + *refIdxLX, picLX, picNumLX); + + RefPicListX[(*refIdxLX)++] = picLX; + + nIdx = *refIdxLX; + + for (cIdx = *refIdxLX; cIdx <= num_ref_idx_lX_active_minus1 + 1; + cIdx++) { + if (RefPicListX[cIdx]) + if ((RefPicListX[cIdx]->is_long_term) || + (RefPicListX[cIdx]->pic_num != picNumLX)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "%s: RefPicListX[ %d ] = RefPicListX[ %d ]\n", + __func__, nIdx, cIdx); + RefPicListX[nIdx++] = RefPicListX[cIdx]; + } + } +} + + +static struct StorablePicture *get_long_term_pic(struct Slice *currSlice, + struct DecodedPictureBuffer *p_Dpb, int LongtermPicNum) +{ + unsigned int i; + + for (i = 0; i < p_Dpb->ltref_frames_in_buffer; i++) { + if (currSlice->structure == FRAME) { + if (p_Dpb->fs_ltref[i]->is_reference == 3) + if ((p_Dpb->fs_ltref[i]->frame) && + (p_Dpb->fs_ltref[i]->frame-> + is_long_term) && + (p_Dpb->fs_ltref[i]->frame-> + long_term_pic_num == + LongtermPicNum)) + return p_Dpb->fs_ltref[i]->frame; + } else { + if (p_Dpb->fs_ltref[i]->is_reference & 1) + if ((p_Dpb->fs_ltref[i]->top_field) && + (p_Dpb->fs_ltref[i]->top_field-> + is_long_term) && + (p_Dpb->fs_ltref[i]->top_field-> + long_term_pic_num == LongtermPicNum)) + return p_Dpb->fs_ltref[i]->top_field; + + if (p_Dpb->fs_ltref[i]->is_reference & 2) + if ((p_Dpb->fs_ltref[i]->bottom_field) && + (p_Dpb->fs_ltref[i]->bottom_field-> + is_long_term) && + (p_Dpb->fs_ltref[i]->bottom_field-> + long_term_pic_num == + LongtermPicNum)) + return p_Dpb->fs_ltref[i]-> + bottom_field; + } + } + return NULL; +} + +/*! + ************************************************************************ + * \brief + * Reordering process for long-term reference pictures + * + ************************************************************************ + */ +static void reorder_long_term(struct Slice *currSlice, + struct StorablePicture **RefPicListX, + int num_ref_idx_lX_active_minus1, + int LongTermPicNum, int *refIdxLX) +{ + int cIdx, nIdx; + + struct StorablePicture *picLX; + + picLX = get_long_term_pic(currSlice, currSlice->p_Dpb, LongTermPicNum); + + for (cIdx = num_ref_idx_lX_active_minus1 + 1; cIdx > *refIdxLX; cIdx--) + RefPicListX[cIdx] = RefPicListX[cIdx - 1]; + + RefPicListX[(*refIdxLX)++] = picLX; + + nIdx = *refIdxLX; + + for (cIdx = *refIdxLX; cIdx <= num_ref_idx_lX_active_minus1 + 1; + cIdx++) { + if (RefPicListX[cIdx]) { + if ((!RefPicListX[cIdx]->is_long_term) || + (RefPicListX[cIdx]->long_term_pic_num != + LongTermPicNum)) + RefPicListX[nIdx++] = RefPicListX[cIdx]; + } + } +} + +static void reorder_ref_pic_list(struct Slice *currSlice, int cur_list) +{ + int *modification_of_pic_nums_idc = + currSlice->modification_of_pic_nums_idc[cur_list]; + int *abs_diff_pic_num_minus1 = + currSlice->abs_diff_pic_num_minus1[cur_list]; + int *long_term_pic_idx = currSlice->long_term_pic_idx[cur_list]; + int num_ref_idx_lX_active_minus1 = + currSlice->num_ref_idx_active[cur_list] - 1; + + struct VideoParameters *p_Vid = currSlice->p_Vid; + int i; + + int maxPicNum, currPicNum, picNumLXNoWrap, picNumLXPred, picNumLX; + int refIdxLX = 0; + + if (currSlice->structure == FRAME) { + maxPicNum = p_Vid->max_frame_num; + currPicNum = currSlice->frame_num; + } else { + maxPicNum = 2 * p_Vid->max_frame_num; + currPicNum = 2 * currSlice->frame_num + 1; + } + + picNumLXPred = currPicNum; + + for (i = 0; i < REORDERING_COMMAND_MAX_SIZE && + modification_of_pic_nums_idc[i] != 3; i++) { + if (modification_of_pic_nums_idc[i] > 3) { + struct h264_dpb_stru *p_H264_Dpb = + container_of(p_Vid, struct h264_dpb_stru, mVideo); + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "error, Invalid modification_of_pic_nums_idc command\n"); + /*h264_debug_flag = 0x1f;*/ + break; + } + if (modification_of_pic_nums_idc[i] < 2) { + if (modification_of_pic_nums_idc[i] == 0) { + if (picNumLXPred - (abs_diff_pic_num_minus1[i] + + 1) < 0) + picNumLXNoWrap = picNumLXPred - + (abs_diff_pic_num_minus1[i] + 1) + + maxPicNum; + else + picNumLXNoWrap = picNumLXPred - + (abs_diff_pic_num_minus1[i] + 1); + } else { /* (modification_of_pic_nums_idc[i] == 1) */ + if (picNumLXPred + (abs_diff_pic_num_minus1[i] + + 1) >= maxPicNum) + picNumLXNoWrap = picNumLXPred + + (abs_diff_pic_num_minus1[i] + 1) - + maxPicNum; + else + picNumLXNoWrap = picNumLXPred + + (abs_diff_pic_num_minus1[i] + 1); + } + picNumLXPred = picNumLXNoWrap; + + if (picNumLXNoWrap > currPicNum) + picNumLX = picNumLXNoWrap - maxPicNum; + else + picNumLX = picNumLXNoWrap; + +#if (MVC_EXTENSION_ENABLE) + reorder_short_term(currSlice, cur_list, + num_ref_idx_lX_active_minus1, picNumLX, + &refIdxLX, -1); +#else + reorder_short_term(currSlice, cur_list, + num_ref_idx_lX_active_minus1, picNumLX, + &refIdxLX); +#endif + } else { /* (modification_of_pic_nums_idc[i] == 2) */ +#if (MVC_EXTENSION_ENABLE) + reorder_long_term(currSlice, currSlice->listX[cur_list], + num_ref_idx_lX_active_minus1, + long_term_pic_idx[i], &refIdxLX, -1); +#else + reorder_long_term(currSlice, currSlice->listX[cur_list], + num_ref_idx_lX_active_minus1, + long_term_pic_idx[i], &refIdxLX); +#endif + } + + } + /* that's a definition */ + currSlice->listXsize[cur_list] = + (char)(num_ref_idx_lX_active_minus1 + 1); +} + +static void reorder_lists(struct Slice *currSlice) +{ + struct VideoParameters *p_Vid = currSlice->p_Vid; + struct h264_dpb_stru *p_H264_Dpb = container_of(p_Vid, + struct h264_dpb_stru, mVideo); + int i; + + if ((currSlice->slice_type != I_SLICE) && + (currSlice->slice_type != SI_SLICE)) { + if (currSlice->ref_pic_list_reordering_flag[LIST_0]) + reorder_ref_pic_list(currSlice, LIST_0); + if (p_Vid->no_reference_picture == + currSlice-> + listX[0][currSlice->num_ref_idx_active[LIST_0] - 1]) { + if (p_Vid->non_conforming_stream) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "RefPicList0[ %d ] is equal to 'no reference picture'\n", + currSlice-> + num_ref_idx_active[LIST_0] - 1); + else + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "RefPicList0 [ num_ref_idx_l0_active_minus1 ] is equal to 'no reference picture', invalid bitstream %d\n", + 500); + } + /* that's a definition */ + currSlice->listXsize[0] = + (char) imin(currSlice->listXsize[0], + currSlice->num_ref_idx_active[LIST_0]); + CHECK_VALID(currSlice->listXsize[0], 0); + if (h264_debug_flag & PRINT_FLAG_DPB_DETAIL) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "listX[0] reorder (PicNum): "); + for (i = 0; i < currSlice->listXsize[0]; i++) { + dpb_print_cont(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + currSlice->listX[0][i]->pic_num); + } + dpb_print_cont(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + } + } + + if (currSlice->slice_type == B_SLICE) { + if (currSlice->ref_pic_list_reordering_flag[LIST_1]) + reorder_ref_pic_list(currSlice, LIST_1); + if (p_Vid->no_reference_picture == + currSlice->listX[1][currSlice-> + num_ref_idx_active[LIST_1] - 1]) { + if (p_Vid->non_conforming_stream) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "RefPicList1[ %d ] is equal to 'no reference picture'\n", + currSlice-> + num_ref_idx_active[LIST_1] - 1); + else + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "RefPicList1 [ num_ref_idx_l1_active_minus1 ] is equal to 'no reference picture', invalid bitstream %d\n", + 500); + } + /* that's a definition */ + currSlice->listXsize[1] = + (char)currSlice->num_ref_idx_active[LIST_1]; + if (h264_debug_flag & PRINT_FLAG_DPB_DETAIL) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "listX[1] reorder (PicNum): "); + for (i = 0; i < currSlice->listXsize[1]; i++) { + if (currSlice->listX[1][i]) + dpb_print_cont(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "%d ", + currSlice->listX[1][i]->pic_num); + } + dpb_print_cont(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + } + } + + /* free_ref_pic_list_reordering_buffer(currSlice); */ + + if (currSlice->slice_type == P_SLICE) { +#if PRINTREFLIST + unsigned int i; +#if (MVC_EXTENSION_ENABLE) + /* print out for h264_debug_flag purpose */ + if ((p_Vid->profile_idc == MVC_HIGH || + p_Vid->profile_idc == STEREO_HIGH) && + currSlice->current_slice_nr == 0) { + if (currSlice->listXsize[0] > 0 + && (h264_debug_flag & PRINT_FLAG_DPB_DETAIL)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " ** (FinalViewID:%d) %s Ref Pic List 0 ****\n", + currSlice->view_id, + currSlice->structure == FRAME ? + "FRM" : + (currSlice->structure == TOP_FIELD ? + "TOP" : "BOT")); + for (i = 0; i < (unsigned int)(currSlice-> + listXsize[0]); i++) { /* ref list 0 */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " %2d -> POC: %4d PicNum: %4d ViewID: %d\n", + i, + currSlice->listX[0][i]->poc, + currSlice->listX[0][i]-> + pic_num, + currSlice->listX[0][i]-> + view_id); + } + } + } +#endif +#endif + } else if (currSlice->slice_type == B_SLICE) { +#if PRINTREFLIST + unsigned int i; +#if (MVC_EXTENSION_ENABLE) + /* print out for h264_debug_flag purpose */ + if ((p_Vid->profile_idc == MVC_HIGH || + p_Vid->profile_idc == STEREO_HIGH) && + currSlice->current_slice_nr == 0) { + if ((currSlice->listXsize[0] > 0) || + (currSlice->listXsize[1] > 0)) + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, "\n"); + if (currSlice->listXsize[0] > 0 + && (h264_debug_flag & PRINT_FLAG_DPB_DETAIL)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " ** (FinalViewID:%d) %s Ref Pic List 0 ****\n", + currSlice->view_id, + currSlice->structure == FRAME ? + "FRM" : + (currSlice->structure == TOP_FIELD ? + "TOP" : "BOT")); + for (i = 0; i < (unsigned int)(currSlice-> + listXsize[0]); i++) { /* ref list 0 */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " %2d -> POC: %4d PicNum: %4d ViewID: %d\n", + i, + currSlice->listX[0][i]->poc, + currSlice->listX[0][i]-> + pic_num, + currSlice->listX[0][i]-> + view_id); + } + } + if (currSlice->listXsize[1] > 0 + && (h264_debug_flag & PRINT_FLAG_DPB_DETAIL)) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " ** (FinalViewID:%d) %s Ref Pic List 1 ****\n", + currSlice->view_id, + currSlice->structure == FRAME ? + "FRM" : + (currSlice->structure == TOP_FIELD ? + "TOP" : "BOT")); + for (i = 0; i < (unsigned int)(currSlice-> + listXsize[1]); i++) { /* ref list 1 */ + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + " %2d -> POC: %4d PicNum: %4d ViewID: %d\n", + i, + currSlice->listX[1][i]->poc, + currSlice->listX[1][i]-> + pic_num, + currSlice->listX[1][i]-> + view_id); + } + } + } +#endif + +#endif + } +} + +void init_colocate_buf(struct h264_dpb_stru *p_H264_Dpb, int count) +{ + p_H264_Dpb->colocated_buf_map = 0; + p_H264_Dpb->colocated_buf_count = count; +} + +int allocate_colocate_buf(struct h264_dpb_stru *p_H264_Dpb) +{ + int i; + + for (i = 0; i < p_H264_Dpb->colocated_buf_count; i++) { + if (((p_H264_Dpb->colocated_buf_map >> i) & 0x1) == 0) { + p_H264_Dpb->colocated_buf_map |= (1 << i); + break; + } + } + if (i == p_H264_Dpb->colocated_buf_count) { + i = -1; + p_H264_Dpb->buf_alloc_fail = 1; + } + return i; +} + +int release_colocate_buf(struct h264_dpb_stru *p_H264_Dpb, int index) +{ + if (index >= 0) { + if (index >= p_H264_Dpb->colocated_buf_count) { + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_ERROR, + "%s error, index %d is bigger than buf count %d\n", + __func__, index, + p_H264_Dpb->colocated_buf_count); + } else { + if (((p_H264_Dpb->colocated_buf_map >> + index) & 0x1) == 0x1) { + p_H264_Dpb->colocated_buf_map &= + (~(1 << index)); + } else { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_ERROR, + "%s error, index %d is not allocated\n", + __func__, index); + } + } + } + return 0; +} + +void set_frame_output_flag(struct h264_dpb_stru *p_H264_Dpb, int index) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + p_H264_Dpb->mFrameStore[index].is_output = 1; + p_H264_Dpb->mFrameStore[index].pre_output = 0; + p_H264_Dpb->mFrameStore[index].show_frame = false; + dump_dpb(p_Dpb, 0); +} + +#if 0 +void init_old_slice(OldSliceParams *p_old_slice) +{ + p_old_slice->field_pic_flag = 0; + p_old_slice->pps_id = INT_MAX; + p_old_slice->frame_num = INT_MAX; + p_old_slice->nal_ref_idc = INT_MAX; + p_old_slice->idr_flag = 0; + + p_old_slice->pic_oder_cnt_lsb = UINT_MAX; + p_old_slice->delta_pic_oder_cnt_bottom = INT_MAX; + + p_old_slice->delta_pic_order_cnt[0] = INT_MAX; + p_old_slice->delta_pic_order_cnt[1] = INT_MAX; +} + + +void copy_slice_info(struct Slice *currSlice, OldSliceParams *p_old_slice) +{ + struct VideoParameters *p_Vid = currSlice->p_Vid; + + p_old_slice->pps_id = currSlice->pic_parameter_set_id; + p_old_slice->frame_num = currSlice->frame_num; + /* p_Vid->frame_num; */ + p_old_slice->field_pic_flag = + currSlice->field_pic_flag; + /* p_Vid->field_pic_flag; */ + + if (currSlice->field_pic_flag) + p_old_slice->bottom_field_flag = currSlice->bottom_field_flag; + + p_old_slice->nal_ref_idc = currSlice->nal_reference_idc; + p_old_slice->idr_flag = (byte) currSlice->idr_flag; + + if (currSlice->idr_flag) + p_old_slice->idr_pic_id = currSlice->idr_pic_id; + + if (p_Vid->active_sps->pic_order_cnt_type == 0) { + p_old_slice->pic_oder_cnt_lsb = + currSlice->pic_order_cnt_lsb; + p_old_slice->delta_pic_oder_cnt_bottom = + currSlice->delta_pic_order_cnt_bottom; + } + + if (p_Vid->active_sps->pic_order_cnt_type == 1) { + p_old_slice->delta_pic_order_cnt[0] = + currSlice->delta_pic_order_cnt[0]; + p_old_slice->delta_pic_order_cnt[1] = + currSlice->delta_pic_order_cnt[1]; + } +#if (MVC_EXTENSION_ENABLE) + p_old_slice->view_id = currSlice->view_id; + p_old_slice->inter_view_flag = currSlice->inter_view_flag; + p_old_slice->anchor_pic_flag = currSlice->anchor_pic_flag; +#endif + p_old_slice->layer_id = currSlice->layer_id; +} + +int is_new_picture(StorablePicture *dec_picture, struct Slice *currSlice, + OldSliceParams *p_old_slice) +{ + struct VideoParameters *p_Vid = currSlice->p_Vid; + + int result = 0; + + result |= (dec_picture == NULL); + + result |= (p_old_slice->pps_id != currSlice->pic_parameter_set_id); + + result |= (p_old_slice->frame_num != currSlice->frame_num); + + result |= (p_old_slice->field_pic_flag != currSlice->field_pic_flag); + + if (currSlice->field_pic_flag && p_old_slice->field_pic_flag) { + result |= (p_old_slice->bottom_field_flag != + currSlice->bottom_field_flag); + } + + result |= (p_old_slice->nal_ref_idc != + currSlice->nal_reference_idc) && + ((p_old_slice->nal_ref_idc == 0) || + (currSlice->nal_reference_idc == 0)); + result |= (p_old_slice->idr_flag != currSlice->idr_flag); + + if (currSlice->idr_flag && p_old_slice->idr_flag) + result |= (p_old_slice->idr_pic_id != currSlice->idr_pic_id); + + if (p_Vid->active_sps->pic_order_cnt_type == 0) { + result |= (p_old_slice->pic_oder_cnt_lsb != + currSlice->pic_order_cnt_lsb); + if (p_Vid->active_pps-> + bottom_field_pic_order_in_frame_present_flag == 1 && + !currSlice->field_pic_flag) { + result |= (p_old_slice->delta_pic_oder_cnt_bottom != + currSlice->delta_pic_order_cnt_bottom); + } + } + + if (p_Vid->active_sps->pic_order_cnt_type == 1) { + if (!p_Vid->active_sps->delta_pic_order_always_zero_flag) { + result |= (p_old_slice->delta_pic_order_cnt[0] != + currSlice->delta_pic_order_cnt[0]); + if (p_Vid->active_pps-> + bottom_field_pic_order_in_frame_present_flag == 1 && + !currSlice->field_pic_flag) { + result |= (p_old_slice-> + delta_pic_order_cnt[1] != + currSlice->delta_pic_order_cnt[1]); + } + } + } + +#if (MVC_EXTENSION_ENABLE) + result |= (currSlice->view_id != p_old_slice->view_id); + result |= (currSlice->inter_view_flag != p_old_slice->inter_view_flag); + result |= (currSlice->anchor_pic_flag != p_old_slice->anchor_pic_flag); +#endif + result |= (currSlice->layer_id != p_old_slice->layer_id); + return result; +} +#else +int is_new_picture(struct StorablePicture *dec_picture, + struct h264_dpb_stru *p_H264_Dpb, + struct OldSliceParams *p_old_slice) +{ + int ret = 0; + + if (p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE] == 0) + ret = 1; + return ret; +} + +#endif + +/* +* release bufspec and pic for picture not in dpb buf +*/ +int release_picture(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *pic) +{ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + if (p_Dpb->last_picture == NULL) { + if (pic->colocated_buf_index >= 0) { + release_colocate_buf(p_H264_Dpb, + pic->colocated_buf_index); + pic->colocated_buf_index = -1; + } + release_buf_spec_num(p_H264_Dpb->vdec, pic->buf_spec_num); + } else { + if (pic->buf_spec_is_alloced == 1) + release_buf_spec_num(p_H264_Dpb->vdec, + pic->buf_spec_num); + } + + free_picture(p_H264_Dpb, pic); + return 0; +} + +#ifdef ERROR_HANDLE_TEST +/* +* remove all pictures in dpb and release bufspec/pic of them +*/ +void remove_dpb_pictures(struct h264_dpb_stru *p_H264_Dpb) +{ + /* struct VideoParameters *p_Vid = p_Dpb->p_Vid; */ + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + struct Slice *currSlice = &p_H264_Dpb->mSlice; + unsigned i, j; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s\n", __func__); + + if (!p_Dpb->init_done) + return; + + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->colocated_buf_index >= 0) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "release_colocate_buf[%d] for fs[%d]\n", + p_Dpb->fs[i]->colocated_buf_index, i); + + release_colocate_buf(p_H264_Dpb, + p_Dpb->fs[i]->colocated_buf_index); /* rain */ + p_Dpb->fs[i]->colocated_buf_index = -1; + } + if (!p_Dpb->fs[i]->pre_output) { + release_buf_spec_num(p_H264_Dpb->vdec, + p_Dpb->fs[i]->buf_spec_num); + p_Dpb->fs[i]->buf_spec_num = -1; + } + remove_frame_from_dpb(p_H264_Dpb, i); + } + + for (i = 0; i < p_Dpb->used_size; i++) { + p_Dpb->fs_ref[i] = NULL; + p_Dpb->fs_ltref[i] = NULL; + p_Dpb->fs_list0[i] = NULL; + p_Dpb->fs_list1[i] = NULL; + p_Dpb->fs_listlt[i] = NULL; + } + for (i = 0; i < 2; i++) { + currSlice->listXsize[i] = 0; + for (j = 0; j < (MAX_LIST_SIZE * 2); j++) + currSlice->listX[i][j] = NULL; + } + p_Dpb->ref_frames_in_buffer = 0; + p_Dpb->ltref_frames_in_buffer = 0; + p_Dpb->last_output_poc = INT_MIN; +} +#endif + +static void check_frame_store_same_pic_num(struct DecodedPictureBuffer *p_Dpb, + struct StorablePicture *p, struct Slice *currSlice) +{ + if (p_Dpb->last_picture) { + if ((int)p_Dpb->last_picture->frame_num == p->pic_num) { + if (((p->structure == TOP_FIELD) && + (p_Dpb->last_picture->is_used == 2)) || + ((p->structure == BOTTOM_FIELD) && + (p_Dpb->last_picture->is_used == 1))) { + if ((p->used_for_reference && + (p_Dpb->last_picture-> + is_orig_reference != 0)) || + (!p->used_for_reference && + (p_Dpb->last_picture-> + is_orig_reference == 0))) { + p->buf_spec_num = + p_Dpb->last_picture-> + buf_spec_num; + p->buf_spec_is_alloced = 0; + p->colocated_buf_index = p_Dpb-> + last_picture-> + colocated_buf_index; + if (currSlice->structure == + TOP_FIELD) { + p->bottom_poc = + p_Dpb->last_picture-> + bottom_field->poc; + } else { + p->top_poc = + p_Dpb->last_picture-> + top_field->poc; + } + p->frame_poc = imin(p->bottom_poc, + p->top_poc); + } + } + } + } +} + +int h264_slice_header_process(struct h264_dpb_stru *p_H264_Dpb, int *frame_num_gap) +{ + + int new_pic_flag = 0; + struct Slice *currSlice = &p_H264_Dpb->mSlice; + struct VideoParameters *p_Vid = &p_H264_Dpb->mVideo; + struct DecodedPictureBuffer *p_Dpb = + &p_H264_Dpb->mDPB; +#if 0 + new_pic_flag = is_new_picture(p_H264_Dpb->mVideo.dec_picture, + p_H264_Dpb, + &p_H264_Dpb->mVideo.old_slice); + + if (new_pic_flag) { /* new picture */ + if (p_H264_Dpb->mVideo.dec_picture) { + store_picture_in_dpb(p_H264_Dpb, + p_H264_Dpb->mVideo.dec_picture); + /* dump_dpb(&p_H264_Dpb->mDPB); */ + } + } +#else + new_pic_flag = (p_H264_Dpb->mVideo.dec_picture == NULL); +#endif + p_H264_Dpb->buf_alloc_fail = 0; + p_H264_Dpb->dpb_error_flag = 0; + slice_prepare(p_H264_Dpb, &p_H264_Dpb->mDPB, &p_H264_Dpb->mVideo, + &p_H264_Dpb->mSPS, &p_H264_Dpb->mSlice); + + if (p_Dpb->num_ref_frames != p_H264_Dpb->mSPS.num_ref_frames) { + dpb_print(p_H264_Dpb->decoder_index, 0, + "num_ref_frames change from %d to %d\r\n", + p_Dpb->num_ref_frames, p_H264_Dpb->mSPS.num_ref_frames); + p_Dpb->num_ref_frames = p_H264_Dpb->mSPS.num_ref_frames; + } + /* if (p_Vid->active_sps != sps) { */ + if (p_H264_Dpb->mDPB.init_done == 0) { + /*init_global_buffers(p_Vid, 0); + * ** * *if (!p_Vid->no_output_of_prior_pics_flag) + ** * *{ + ** * * flush_dpb(p_Vid->p_Dpb_layer[0]); + ** * *} + ** * *init_dpb(p_Vid, p_Vid->p_Dpb_layer[0], 0); + */ + init_dpb(p_H264_Dpb, 0); + } + + + if (new_pic_flag) { /* new picture */ + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "check frame_num gap: cur frame_num %d pre_frame_num %d max_frmae_num %d\r\n", + currSlice->frame_num, + p_Vid->pre_frame_num, + p_Vid->max_frame_num); + if (p_Vid->recovery_point == 0 && + p_Vid->max_frame_num <= FRAME_NUM_MAX_SIZE && + currSlice->frame_num != p_Vid->pre_frame_num && + currSlice->frame_num != + (p_Vid->pre_frame_num + 1) % p_Vid->max_frame_num) { + struct SPSParameters *active_sps = p_Vid->active_sps; + /*if (active_sps-> + *gaps_in_frame_num_value_allowed_flag + *== 0) { + * error("An unintentional + * loss of pictures occurs! Exit\n", + * 100); + *} + *if (p_Vid->conceal_mode == 0) + */ + if (active_sps->frame_num_gap_allowed) + fill_frame_num_gap(p_Vid, currSlice); + *frame_num_gap = 1; + } + + if (currSlice->nal_reference_idc) { + dpb_print(p_H264_Dpb->decoder_index, + PRINT_FLAG_DPB_DETAIL, + "nal_reference_idc not 0, set pre_frame_num(%d) to frame_num (%d)\n", + p_Vid->pre_frame_num, currSlice->frame_num); + p_Vid->pre_frame_num = currSlice->frame_num; + } + + decode_poc(&p_H264_Dpb->mVideo, &p_H264_Dpb->mSlice); + p_H264_Dpb->mVideo.dec_picture = get_new_pic(p_H264_Dpb, + p_H264_Dpb->mSlice.structure, + /*p_Vid->width, p_Vid->height, + * p_Vid->width_cr, + * p_Vid->height_cr, + */ + 1); + if (p_H264_Dpb->mVideo.dec_picture) { + u32 offset_lo, offset_hi; + struct DecodedPictureBuffer *p_Dpb = + &p_H264_Dpb->mDPB; + struct StorablePicture *p = + p_H264_Dpb->mVideo.dec_picture; + init_picture(p_H264_Dpb, &p_H264_Dpb->mSlice, + p_H264_Dpb->mVideo.dec_picture); +#if 1 + /* rain */ + offset_lo = + p_H264_Dpb->dpb_param.l.data[OFFSET_DELIMITER_LO]; + offset_hi = + p_H264_Dpb->dpb_param.l.data[OFFSET_DELIMITER_HI]; + p_H264_Dpb->mVideo.dec_picture->offset_delimiter = + (offset_lo | offset_hi << 16); + p_H264_Dpb->mVideo.dec_picture->buf_spec_num = -1; + p_H264_Dpb->mVideo.dec_picture-> + colocated_buf_index = -1; + update_pic_num(p_H264_Dpb); + + if ((currSlice->structure == TOP_FIELD) || + (currSlice->structure == BOTTOM_FIELD)) { + /* check for frame store with same + * pic_number + */ + check_frame_store_same_pic_num(p_Dpb, p, + currSlice); + } + + if (p_H264_Dpb->mVideo.dec_picture->buf_spec_num == + -1) { + p_H264_Dpb->mVideo.dec_picture->buf_spec_num = + get_free_buf_idx(p_H264_Dpb->vdec); + if (p_H264_Dpb->mVideo.dec_picture->buf_spec_num + < 0) { + p_H264_Dpb->buf_alloc_fail = 1; + p_H264_Dpb->mVideo.dec_picture-> + buf_spec_is_alloced = 0; + } else + p_H264_Dpb->mVideo.dec_picture-> + buf_spec_is_alloced = 1; + + if (p_H264_Dpb->mVideo.dec_picture-> + used_for_reference) { + p_H264_Dpb->mVideo.dec_picture-> + colocated_buf_index = + allocate_colocate_buf( + p_H264_Dpb); + } + } +#endif + if (post_picture_early(p_H264_Dpb->vdec, + p_H264_Dpb->mVideo.dec_picture->buf_spec_num)) + return -1; + } + } + + if (p_H264_Dpb->buf_alloc_fail) + return -1; + + if (p_H264_Dpb->mSlice.slice_type == P_SLICE) + init_lists_p_slice(&p_H264_Dpb->mSlice); + else if (p_H264_Dpb->mSlice.slice_type == B_SLICE) + init_lists_b_slice(&p_H264_Dpb->mSlice); + else + init_lists_i_slice(&p_H264_Dpb->mSlice); + + reorder_lists(&p_H264_Dpb->mSlice); + + if (p_H264_Dpb->mSlice.structure == FRAME) + init_mbaff_lists(p_H264_Dpb, &p_H264_Dpb->mSlice); + + if (new_pic_flag) + return 1; + + return 0; +} + +enum PictureStructure get_cur_slice_picture_struct( + struct h264_dpb_stru *p_H264_Dpb) +{ + struct Slice *currSlice = &p_H264_Dpb->mSlice; + return currSlice->structure; +} + +static unsigned char is_pic_in_dpb(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *pic) +{ + unsigned char ret = 0; + int i; + struct DecodedPictureBuffer *p_Dpb = + &p_H264_Dpb->mDPB; + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->top_field == pic || + p_Dpb->fs[i]->bottom_field == pic || + p_Dpb->fs[i]->frame == pic) { + ret = 1; + break; + } + } + return ret; +} + +int dpb_check_ref_list_error( + struct h264_dpb_stru *p_H264_Dpb) +{ + int i; + /*int j;*/ + struct Slice *currSlice = &p_H264_Dpb->mSlice; + /* in first output, ignore ref check */ + if ((p_H264_Dpb->first_insert_frame == FirstInsertFrm_OUT) && + (p_H264_Dpb->mVideo.dec_picture) && + p_H264_Dpb->first_output_poc > p_H264_Dpb->mVideo.dec_picture->poc) { + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "p_H264_Dpb->first_output_poc %d, p_H264_Dpb->mVideo.dec_picture->poc %d\n", + p_H264_Dpb->first_output_poc, p_H264_Dpb->mVideo.dec_picture->poc); + return 0; + } + if ((currSlice->slice_type != I_SLICE) && + (currSlice->slice_type != SI_SLICE)) { + for (i = 0; i < currSlice->listXsize[0]; i++) { + /*for (j = i + 1; j < currSlice->listXsize[0]; j++) { + if(currSlice->listX[0][i]->pic_num == + currSlice->listX[0][j]->pic_num) + return 1; + }*/ + if (currSlice->listX[0][i] == NULL) + return 5; + if (!is_pic_in_dpb(p_H264_Dpb, + currSlice->listX[0][i])) + return 1; + if (currSlice->listX[0][i]->frame && + currSlice->listX[0][i]->frame->non_existing) + return 3; + } + } + + if (currSlice->slice_type == B_SLICE) { + for (i = 0; i < currSlice->listXsize[1]; i++) { + /*for (j = i + 1; j < currSlice->listXsize[1]; j++) { + if(currSlice->listX[1][i]->pic_num == + currSlice->listX[1][j]->pic_num) + return 2; + } + for (j = 0; j < currSlice->listXsize[0]; j++) { + if(currSlice->listX[1][i]->pic_num == + currSlice->listX[0][j]->pic_num) + return 3; + }*/ + if (currSlice->listX[1][i] == NULL) + return 6; + if (!is_pic_in_dpb(p_H264_Dpb, + currSlice->listX[1][i])) + return 2; + if (currSlice->listX[1][i]->frame && + currSlice->listX[1][i]->frame->non_existing) + return 4; +#if 0 + if (currSlice->listXsize[0] == 1 && + currSlice->listXsize[1] == 1 && + currSlice->listX[1][0] == + currSlice->listX[0][0]) + return 3; +#endif + } + } + return 0; +} +
diff --git a/drivers/frame_provider/decoder_v4l/h264_multi/h264_dpb.h b/drivers/frame_provider/decoder_v4l/h264_multi/h264_dpb.h new file mode 100644 index 0000000..e1d1ab4 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/h264_multi/h264_dpb.h
@@ -0,0 +1,1008 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef H264_DPB_H_ +#define H264_DPB_H_ + +#define ERROR_CHECK + +#define OUTPUT_BUFFER_IN_C + +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_VDEC_STATUS 0X0001 +#define PRINT_FLAG_UCODE_EVT 0x0002 +#define PRINT_FLAG_MMU_DETAIL 0x0004 +#define PRINT_FLAG_ERRORFLAG_DBG 0x0008 +#define PRINT_FLAG_DPB_DETAIL 0x0010 +#define PRINT_FLAG_DEC_DETAIL 0x0020 +#define PRINT_FLAG_VDEC_DETAIL 0x0040 +#define PRINT_FLAG_DUMP_DPB 0x0080 +#define PRINT_FRAMEBASE_DATA 0x0100 +#define PRINT_FLAG_DEBUG_POC 0x0200 +#define RRINT_FLAG_RPM 0x0400 +#define DEBUG_DISABLE_RUNREADY_RMBUF 0x0800 +#define PRINT_FLAG_DUMP_BUFSPEC 0x1000 +#define PRINT_FLAG_FCC_STATUS 0x2000 +#define PRINT_FLAG_SEI_DETAIL 0x4000 +#define PRINT_FLAG_V4L_DETAIL 0x8000 +#define DISABLE_ERROR_HANDLE 0x10000 +#define DEBUG_DUMP_STAT 0x80000 +#define DEBUG_TIMEOUT_DEC_STAT 0x800000 + +/*setting canvas mode and endian. + if this flag is set, value of canvas mode + will according to the value of mem_map_mode. + endian will be forced set to 0 in + CANVAS_BLKMODE_LINEAR mode. + otherwise picture will display abnormal. + if this flag is not set, value of canvas mode + will be determined by the user speace config. + endian will be set 7 in CANVAS_BLKMODE_LINEAR mode. +*/ +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 + +#define MVC_EXTENSION_ENABLE 0 +#define PRINTREFLIST 0 + +#define MAX_LIST_SIZE 33 + +#define H264_OUTPUT_MODE_NORMAL 0x4 +#define H264_OUTPUT_MODE_FAST 0x8 + +//#define FALSE 0 + +#define H264_SLICE_HEAD_DONE 0x01 +#define H264_PIC_DATA_DONE 0x02 +/*#define H264_SPS_DONE 0x03*/ +/*#define H264_PPS_DONE 0x04*/ +/*#define H264_SLICE_DATA_DONE 0x05*/ +/*#define H264_DATA_END 0x06*/ + +#define H264_CONFIG_REQUEST 0x11 +#define H264_DATA_REQUEST 0x12 +#define H264_WRRSP_REQUEST 0x13 +#define H264_WRRSP_DONE 0x14 + +#define H264_DECODE_BUFEMPTY 0x20 +#define H264_DECODE_TIMEOUT 0x21 +#define H264_SEARCH_BUFEMPTY 0x22 +#define H264_DECODE_OVER_SIZE 0x23 + +#define VIDEO_SIGNAL_LOW 0x26 +#define VIDEO_SIGNAL_HIGHT 0x27 + + +#define H264_FIND_NEXT_PIC_NAL 0x50 +#define H264_FIND_NEXT_DVEL_NAL 0x51 +#define H264_AUX_DATA_READY 0x52 + +#define H264_SEI_DATA_READY 0x53 +#define H264_SEI_DATA_DONE 0x54 + + /* 0x8x, search state*/ +#define H264_STATE_SEARCH_AFTER_SPS 0x80 +#define H264_STATE_SEARCH_AFTER_PPS 0x81 +#define H264_STATE_PARSE_SLICE_HEAD 0x82 +#define H264_STATE_SEARCH_HEAD 0x83 + /**/ +#define H264_ACTION_SEARCH_HEAD 0xf0 +#define H264_ACTION_DECODE_SLICE 0xf1 +#define H264_ACTION_CONFIG_DONE 0xf2 +#define H264_ACTION_DECODE_NEWPIC 0xf3 +#define H264_ACTION_DECODE_START 0xff + +#define RPM_BEGIN 0x0 +#define RPM_END 0x400 + +#define val(s) (s[0]|(s[1]<<16)) + +#define FRAME_IN_DPB 24 +#define DPB_OFFSET 0x100 +#define MMCO_OFFSET 0x200 +union param { +#if 0 +#define H_TIME_STAMP_START 0X00 +#define H_TIME_STAMP_END 0X17 +#define PTS_ZERO_0 0X18 +#define PTS_ZERO_1 0X19 +#endif +#define FIXED_FRAME_RATE_FLAG 0X21 + +#define OFFSET_DELIMITER_LO 0x2f +#define OFFSET_DELIMITER_HI 0x30 + + +#define SLICE_IPONLY_BREAK 0X5C +#define PREV_MAX_REFERENCE_FRAME_NUM 0X5D +#define EOS 0X5E +#define FRAME_PACKING_TYPE 0X5F +#define OLD_POC_PAR_1 0X60 +#define OLD_POC_PAR_2 0X61 +#define PREV_MBX 0X62 +#define PREV_MBY 0X63 +#define ERROR_SKIP_MB_NUM 0X64 +#define ERROR_MB_STATUS 0X65 +#define L0_PIC0_STATUS 0X66 +#define TIMEOUT_COUNTER 0X67 +#define BUFFER_SIZE 0X68 +#define BUFFER_SIZE_HI 0X69 +#define CROPPING_LEFT_RIGHT 0X6A +#define CROPPING_TOP_BOTTOM 0X6B +#if 1 + /* sps_flags2: + *bit 3, bitstream_restriction_flag + *bit 2, pic_struct_present_flag + *bit 1, vcl_hrd_parameters_present_flag + *bit 0, nal_hrd_parameters_present_flag + */ +#define SPS_FLAGS2 0x6c +#define NUM_REORDER_FRAMES 0x6d +#else +#define POC_SELECT_NEED_SWAP 0X6C +#define POC_SELECT_SWAP 0X6D +#endif +#define MAX_BUFFER_FRAME 0X6E + +#define NON_CONFORMING_STREAM 0X70 +#define RECOVERY_POINT 0X71 +#define POST_CANVAS 0X72 +#define POST_CANVAS_H 0X73 +#define SKIP_PIC_COUNT 0X74 +#define TARGET_NUM_SCALING_LIST 0X75 +#define FF_POST_ONE_FRAME 0X76 +#define PREVIOUS_BIT_CNT 0X77 +#define MB_NOT_SHIFT_COUNT 0X78 +#define PIC_STATUS 0X79 +#define FRAME_COUNTER 0X7A +#define NEW_SLICE_TYPE 0X7B +#define NEW_PICTURE_STRUCTURE 0X7C +#define NEW_FRAME_NUM 0X7D +#define NEW_IDR_PIC_ID 0X7E +#define IDR_PIC_ID 0X7F + +/* h264 LOCAL */ +#define NAL_UNIT_TYPE 0X80 +#define NAL_REF_IDC 0X81 +#define SLICE_TYPE 0X82 +#define LOG2_MAX_FRAME_NUM 0X83 +#define FRAME_MBS_ONLY_FLAG 0X84 +#define PIC_ORDER_CNT_TYPE 0X85 +#define LOG2_MAX_PIC_ORDER_CNT_LSB 0X86 +#define PIC_ORDER_PRESENT_FLAG 0X87 +#define REDUNDANT_PIC_CNT_PRESENT_FLAG 0X88 +#define PIC_INIT_QP_MINUS26 0X89 +#define DEBLOCKING_FILTER_CONTROL_PRESENT_FLAG 0X8A +#define NUM_SLICE_GROUPS_MINUS1 0X8B +#define MODE_8X8_FLAGS 0X8C +#define ENTROPY_CODING_MODE_FLAG 0X8D +#define SLICE_QUANT 0X8E +#define TOTAL_MB_HEIGHT 0X8F +#define PICTURE_STRUCTURE 0X90 +#define TOP_INTRA_TYPE 0X91 +#define RV_AI_STATUS 0X92 +#define AI_READ_START 0X93 +#define AI_WRITE_START 0X94 +#define AI_CUR_BUFFER 0X95 +#define AI_DMA_BUFFER 0X96 +#define AI_READ_OFFSET 0X97 +#define AI_WRITE_OFFSET 0X98 +#define AI_WRITE_OFFSET_SAVE 0X99 +#define RV_AI_BUFF_START 0X9A +#define I_PIC_MB_COUNT 0X9B +#define AI_WR_DCAC_DMA_CTRL 0X9C +#define SLICE_MB_COUNT 0X9D +#define PICTYPE 0X9E +#define SLICE_GROUP_MAP_TYPE 0X9F +#define MB_TYPE 0XA0 +#define MB_AFF_ADDED_DMA 0XA1 +#define PREVIOUS_MB_TYPE 0XA2 +#define WEIGHTED_PRED_FLAG 0XA3 +#define WEIGHTED_BIPRED_IDC 0XA4 +/* bit 3:2 - PICTURE_STRUCTURE + * bit 1 - MB_ADAPTIVE_FRAME_FIELD_FLAG + * bit 0 - FRAME_MBS_ONLY_FLAG + */ +#define MBFF_INFO 0XA5 +#define TOP_INTRA_TYPE_TOP 0XA6 + +#define RV_AI_BUFF_INC 0xa7 + +#define DEFAULT_MB_INFO_LO 0xa8 + +/* 0 -- no need to read + * 1 -- need to wait Left + * 2 -- need to read Intra + * 3 -- need to read back MV + */ +#define NEED_READ_TOP_INFO 0xa9 +/* 0 -- idle + * 1 -- wait Left + * 2 -- reading top Intra + * 3 -- reading back MV + */ +#define READ_TOP_INFO_STATE 0xaa +#define DCAC_MBX 0xab +#define TOP_MB_INFO_OFFSET 0xac +#define TOP_MB_INFO_RD_IDX 0xad +#define TOP_MB_INFO_WR_IDX 0xae + +#define VLD_NO_WAIT 0 +#define VLD_WAIT_BUFFER 1 +#define VLD_WAIT_HOST 2 +#define VLD_WAIT_GAP 3 + +#define VLD_WAITING 0xaf + +#define MB_X_NUM 0xb0 +/* #define MB_WIDTH 0xb1 */ +#define MB_HEIGHT 0xb2 +#define MBX 0xb3 +#define TOTAL_MBY 0xb4 +#define INTR_MSK_SAVE 0xb5 + +/* #define has_time_stamp 0xb6 */ +#define NEED_DISABLE_PPE 0xb6 +#define IS_NEW_PICTURE 0XB7 +#define PREV_NAL_REF_IDC 0XB8 +#define PREV_NAL_UNIT_TYPE 0XB9 +#define FRAME_MB_COUNT 0XBA +#define SLICE_GROUP_UCODE 0XBB +#define SLICE_GROUP_CHANGE_RATE 0XBC +#define SLICE_GROUP_CHANGE_CYCLE_LEN 0XBD +#define DELAY_LENGTH 0XBE +#define PICTURE_STRUCT 0XBF +/* #define pre_picture_struct 0xc0 */ +#define DCAC_PREVIOUS_MB_TYPE 0xc1 + +#define TIME_STAMP 0XC2 +#define H_TIME_STAMP 0XC3 +#define VPTS_MAP_ADDR 0XC4 +#define H_VPTS_MAP_ADDR 0XC5 + +/*#define MAX_DPB_SIZE 0XC6*/ +#define PIC_INSERT_FLAG 0XC7 + +#define TIME_STAMP_START 0XC8 +#define TIME_STAMP_END 0XDF + +#define OFFSET_FOR_NON_REF_PIC 0XE0 +#define OFFSET_FOR_TOP_TO_BOTTOM_FIELD 0XE2 +#define MAX_REFERENCE_FRAME_NUM 0XE4 +#define FRAME_NUM_GAP_ALLOWED 0XE5 +#define NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE 0XE6 +#define PROFILE_IDC_MMCO 0XE7 +#define LEVEL_IDC_MMCO 0XE8 +#define FRAME_SIZE_IN_MB 0XE9 +#define DELTA_PIC_ORDER_ALWAYS_ZERO_FLAG 0XEA +#define PPS_NUM_REF_IDX_L0_ACTIVE_MINUS1 0XEB +#define PPS_NUM_REF_IDX_L1_ACTIVE_MINUS1 0XEC +#define CURRENT_SPS_ID 0XED +#define CURRENT_PPS_ID 0XEE +/* bit 0 - sequence parameter set may change + * bit 1 - picture parameter set may change + * bit 2 - new dpb just inited + * bit 3 - IDR picture not decoded yet + * bit 5:4 - 0: mb level code loaded 1: picture + * level code loaded 2: slice level code loaded + */ +#define DECODE_STATUS 0XEF +#define FIRST_MB_IN_SLICE 0XF0 +#define PREV_MB_WIDTH 0XF1 +#define PREV_FRAME_SIZE_IN_MB 0XF2 +/*#define MAX_REFERENCE_FRAME_NUM_IN_MEM 0XF3*/ +/* bit 0 - aspect_ratio_info_present_flag + * bit 1 - timing_info_present_flag + * bit 2 - nal_hrd_parameters_present_flag + * bit 3 - vcl_hrd_parameters_present_flag + * bit 4 - pic_struct_present_flag + * bit 5 - bitstream_restriction_flag + */ +#define VUI_STATUS 0XF4 +#define ASPECT_RATIO_IDC 0XF5 +#define ASPECT_RATIO_SAR_WIDTH 0XF6 +#define ASPECT_RATIO_SAR_HEIGHT 0XF7 +#define NUM_UNITS_IN_TICK 0XF8 +#define TIME_SCALE 0XFA +#define CURRENT_PIC_INFO 0XFC +#define DPB_BUFFER_INFO 0XFD +#define REFERENCE_POOL_INFO 0XFE +#define REFERENCE_LIST_INFO 0XFF + struct{ + unsigned short data[RPM_END-RPM_BEGIN]; + } l; + struct{ + unsigned short dump[DPB_OFFSET]; + unsigned short dpb_base[FRAME_IN_DPB<<3]; + + unsigned short dpb_max_buffer_frame; + unsigned short actual_dpb_size; + + unsigned short colocated_buf_status; + + unsigned short num_forward_short_term_reference_pic; + unsigned short num_short_term_reference_pic; + unsigned short num_reference_pic; + + unsigned short current_dpb_index; + unsigned short current_decoded_frame_num; + unsigned short current_reference_frame_num; + + unsigned short l0_size; + unsigned short l1_size; + + /* [6:5] : nal_ref_idc */ + /* [4:0] : nal_unit_type */ + unsigned short NAL_info_mmco; + + /* [1:0] : 00 - top field, 01 - bottom field, + * 10 - frame, 11 - mbaff frame + */ + unsigned short picture_structure_mmco; + + unsigned short frame_num; + unsigned short pic_order_cnt_lsb; + + unsigned short num_ref_idx_l0_active_minus1; + unsigned short num_ref_idx_l1_active_minus1; + + unsigned short PrevPicOrderCntLsb; + unsigned short PreviousFrameNum; + + /* 32 bits variables */ + unsigned short delta_pic_order_cnt_bottom[2]; + unsigned short delta_pic_order_cnt_0[2]; + unsigned short delta_pic_order_cnt_1[2]; + + unsigned short PrevPicOrderCntMsb[2]; + unsigned short PrevFrameNumOffset[2]; + + unsigned short frame_pic_order_cnt[2]; + unsigned short top_field_pic_order_cnt[2]; + unsigned short bottom_field_pic_order_cnt[2]; + + unsigned short colocated_mv_addr_start[2]; + unsigned short colocated_mv_addr_end[2]; + unsigned short colocated_mv_wr_addr[2]; + + unsigned short frame_crop_left_offset; + unsigned short frame_crop_right_offset; + unsigned short frame_crop_top_offset; + unsigned short frame_crop_bottom_offset; + unsigned short chroma_format_idc; + } dpb; + struct { + unsigned short dump[MMCO_OFFSET]; + + /* array base address for offset_for_ref_frame */ + unsigned short offset_for_ref_frame_base[128]; + + /* 0 - Index in DPB + * 1 - Picture Flag + * [ 2] : 0 - short term reference, + * 1 - long term reference + * [ 1] : bottom field + * [ 0] : top field + * 2 - Picture Number (short term or long term) low 16 bits + * 3 - Picture Number (short term or long term) high 16 bits + */ + unsigned short reference_base[128]; + + /* command and parameter, until command is 3 */ + unsigned short l0_reorder_cmd[66]; + unsigned short l1_reorder_cmd[66]; + + /* command and parameter, until command is 0 */ + unsigned short mmco_cmd[44]; + + unsigned short l0_base[40]; + unsigned short l1_base[40]; + } mmco; + struct { + /* from ucode lmem, do not change this struct */ + } p; +}; + + +struct StorablePicture; +struct VideoParameters; +struct DecodedPictureBuffer; + +/* New enum for field processing */ +enum PictureStructure { + FRAME, + TOP_FIELD, + BOTTOM_FIELD +}; + +typedef enum { + PIC_SINGLE_FRAME = 0, + PIC_TOP, + PIC_BOT, + PIC_TOP_BOT, + PIC_BOT_TOP, + PIC_TOP_BOT_TOP = 5, + PIC_BOT_TOP_BOT, + PIC_DOUBLE_FRAME, + PIC_TRIPLE_FRAME, + PIC_INVALID, +} PicStruct_E; + +#define I_Slice 2 +#define P_Slice 5 +#define B_Slice 6 +#define P_Slice_0 0 +#define B_Slice_1 1 +#define I_Slice_7 7 + +enum SliceType { + P_SLICE = 0, + B_SLICE = 1, + I_SLICE = 2, + SP_SLICE = 3, + SI_SLICE = 4, + NUM_SLICE_TYPES = 5 +}; + +enum ProfileIDC { + FREXT_CAVLC444 = 44, /*!< YUV 4:4:4/14 "CAVLC 4:4:4"*/ + BASELINE = 66, /*!< YUV 4:2:0/8 "Baseline"*/ + MAIN = 77, /*!< YUV 4:2:0/8 "Main"*/ + EXTENDED = 88, /*!< YUV 4:2:0/8 "Extended"*/ + FREXT_HP = 100, /*!< YUV 4:2:0/8 "High"*/ + FREXT_Hi10P = 110, /*!< YUV 4:2:0/10 "High 10"*/ + FREXT_Hi422 = 122, /*!< YUV 4:2:2/10 "High 4:2:2"*/ + FREXT_Hi444 = 244, /*!< YUV 4:4:4/14 "High 4:4:4"*/ + MVC_HIGH = 118, /*!< YUV 4:2:0/8 "Multiview High"*/ + STEREO_HIGH = 128 /*!< YUV 4:2:0/8 "Stereo High"*/ +}; + +enum FirstInsertFrm_State { + FirstInsertFrm_IDLE = 0, + FirstInsertFrm_OUT = 1, + FirstInsertFrm_RESET = 2, + FirstInsertFrm_SKIPDONE = 3, +}; + + +struct SPSParameters { + unsigned int profile_idc; + unsigned int level_idc; + int pic_order_cnt_type; + int log2_max_pic_order_cnt_lsb_minus4; + int num_ref_frames_in_pic_order_cnt_cycle; + short offset_for_ref_frame[128]; + short offset_for_non_ref_pic; + short offset_for_top_to_bottom_field; + + /**/ + int frame_mbs_only_flag; + int num_ref_frames; + int max_dpb_size; + int log2_max_frame_num_minus4; + int frame_num_gap_allowed; +}; + +#define DEC_REF_PIC_MARKING_BUFFER_NUM_MAX 45 +struct DecRefPicMarking_s { + int memory_management_control_operation; + int difference_of_pic_nums_minus1; + int long_term_pic_num; + int long_term_frame_idx; + int max_long_term_frame_idx_plus1; + struct DecRefPicMarking_s *Next; +}; + +#define REORDERING_COMMAND_MAX_SIZE 33 +struct Slice { + int first_mb_in_slice; + int mode_8x8_flags; + int picture_structure_mmco; + + int frame_num; + int idr_flag; + int toppoc; + int bottompoc; + int framepoc; + int pic_order_cnt_lsb; + int PicOrderCntMsb; + unsigned char field_pic_flag; + unsigned char bottom_field_flag; + int ThisPOC; + int nal_reference_idc; + int AbsFrameNum; + int delta_pic_order_cnt_bottom; + int delta_pic_order_cnt[2]; + + /**/ + char listXsize[6]; + struct StorablePicture *listX[6][MAX_LIST_SIZE * 2]; + + /**/ + enum PictureStructure structure; + int long_term_reference_flag; + int no_output_of_prior_pics_flag; + int adaptive_ref_pic_buffering_flag; + + struct VideoParameters *p_Vid; + struct DecodedPictureBuffer *p_Dpb; + int num_ref_idx_active[2]; /* number of available list references */ + + /*modification*/ + int slice_type; /* slice type */ + int ref_pic_list_reordering_flag[2]; + int modification_of_pic_nums_idc[2][REORDERING_COMMAND_MAX_SIZE]; + int abs_diff_pic_num_minus1[2][REORDERING_COMMAND_MAX_SIZE]; + int long_term_pic_idx[2][REORDERING_COMMAND_MAX_SIZE]; + /**/ + unsigned char dec_ref_pic_marking_buffer_valid; + struct DecRefPicMarking_s + dec_ref_pic_marking_buffer[DEC_REF_PIC_MARKING_BUFFER_NUM_MAX]; + int pic_struct; +}; + +struct OldSliceParams { + unsigned int field_pic_flag; + unsigned int frame_num; + int nal_ref_idc; + unsigned int pic_oder_cnt_lsb; + int delta_pic_oder_cnt_bottom; + int delta_pic_order_cnt[2]; + unsigned char bottom_field_flag; + unsigned char idr_flag; + int idr_pic_id; + int pps_id; +#if (MVC_EXTENSION_ENABLE) + int view_id; + int inter_view_flag; + int anchor_pic_flag; +#endif + int layer_id; +}; + +struct VideoParameters { + int PrevPicOrderCntMsb; + int PrevPicOrderCntLsb; + unsigned char last_has_mmco_5; + unsigned char last_pic_bottom_field; + int ThisPOC; + int PreviousFrameNum; + int FrameNumOffset; + int PreviousFrameNumOffset; + int max_frame_num; + unsigned int pre_frame_num; + int ExpectedDeltaPerPicOrderCntCycle; + int PicOrderCntCycleCnt; + int FrameNumInPicOrderCntCycle; + int ExpectedPicOrderCnt; + + /**/ + struct SPSParameters *active_sps; + struct Slice **ppSliceList; + int iSliceNumOfCurrPic; + int conceal_mode; + int earlier_missing_poc; + int pocs_in_dpb[100]; + + struct OldSliceParams old_slice; + /**/ + struct StorablePicture *dec_picture; + struct StorablePicture *no_reference_picture; + + /*modification*/ + int non_conforming_stream; + int recovery_point; +}; + +static inline int imin(int a, int b) +{ + return ((a) < (b)) ? (a) : (b); +} + +static inline int imax(int a, int b) +{ + return ((a) > (b)) ? (a) : (b); +} + +#define MAX_PIC_BUF_NUM 128 +#define MAX_NUM_SLICES 50 + +struct StorablePicture { +/**/ + int width; + int height; + + int y_canvas_index; + int u_canvas_index; + int v_canvas_index; +/**/ + int index; + unsigned char is_used; + + enum PictureStructure structure; + + int poc; + int top_poc; + int bottom_poc; + int frame_poc; + unsigned int frame_num; + unsigned int recovery_frame; + + int pic_num; + int buf_spec_num; + int buf_spec_is_alloced; + int colocated_buf_index; + int long_term_pic_num; + int long_term_frame_idx; + + unsigned char is_long_term; + int used_for_reference; + int is_output; +#if 1 + /* rain */ + int pre_output; +#endif + int non_existing; + int separate_colour_plane_flag; + + short max_slice_id; + + int size_x, size_y, size_x_cr, size_y_cr; + int size_x_m1, size_y_m1, size_x_cr_m1, size_y_cr_m1; + int coded_frame; + int mb_aff_frame_flag; + unsigned int PicWidthInMbs; + unsigned int PicSizeInMbs; + int iLumaPadY, iLumaPadX; + int iChromaPadY, iChromaPadX; + + /* for mb aff, if frame for referencing the top field */ + struct StorablePicture *top_field; + /* for mb aff, if frame for referencing the bottom field */ + struct StorablePicture *bottom_field; + /* for mb aff, if field for referencing the combined frame */ + struct StorablePicture *frame; + + int slice_type; + int idr_flag; + int no_output_of_prior_pics_flag; + int long_term_reference_flag; + int adaptive_ref_pic_buffering_flag; + + int chroma_format_idc; + int frame_mbs_only_flag; + int frame_cropping_flag; + int frame_crop_left_offset; + int frame_crop_right_offset; + int frame_crop_top_offset; + int frame_crop_bottom_offset; + int qp; + int chroma_qp_offset[2]; + int slice_qp_delta; + /* stores the memory management control operations */ + struct DecRefPicMarking_s *dec_ref_pic_marking_buffer; + + /* picture error concealment */ + /*indicates if this is a concealed picture */ + int concealed_pic; + + /* variables for tone mapping */ + int seiHasTone_mapping; + int tone_mapping_model_id; + int tonemapped_bit_depth; + /* imgpel* tone_mapping_lut; tone mapping look up table */ + + int proc_flag; +#if (MVC_EXTENSION_ENABLE) + int view_id; + int inter_view_flag; + int anchor_pic_flag; +#endif + int iLumaStride; + int iChromaStride; + int iLumaExpandedHeight; + int iChromaExpandedHeight; + /* imgpel **cur_imgY; for more efficient get_block_luma */ + int no_ref; + int iCodingType; + + char listXsize[MAX_NUM_SLICES][2]; + struct StorablePicture **listX[MAX_NUM_SLICES][2]; + int layer_id; + u32 offset_delimiter; + u32 pts; + u64 pts64; + u64 timestamp; + unsigned char data_flag; + int pic_struct; + + /* picture qos infomation*/ + int frame_size; + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; + u32 pic_size; +}; + +struct FrameStore { + /* rain */ + int buf_spec_num; + /* rain */ + int colocated_buf_index; + + /* 0=empty; 1=top; 2=bottom; 3=both fields (or frame) */ + int is_used; + /* 0=not used for ref; 1=top used; 2=bottom used; + * 3=both fields (or frame) used + */ + int is_reference; + /* 0=not used for ref; 1=top used; 2=bottom used; + * 3=both fields (or frame) used + */ + int is_long_term; + /* original marking by nal_ref_idc: 0=not used for ref; 1=top used; + * 2=bottom used; 3=both fields (or frame) used + */ + int is_orig_reference; + + int is_non_existent; + + unsigned int frame_num; + unsigned int recovery_frame; + + int frame_num_wrap; + int long_term_frame_idx; + int is_output; +#if 1 + /* rain */ + int pre_output; + /* index in gFrameStore */ + int index; +#define I_FLAG 0x01 +#define IDR_FLAG 0x02 +#define ERROR_FLAG 0x10 +#define NULL_FLAG 0x20 +#define NODISP_FLAG 0x80 + unsigned char data_flag; +#endif + int poc; + + /* picture error concealment */ + int concealment_reference; + + struct StorablePicture *frame; + struct StorablePicture *top_field; + struct StorablePicture *bottom_field; + +#if (MVC_EXTENSION_ENABLE) + int view_id; + int inter_view_flag[2]; + int anchor_pic_flag[2]; +#endif + int layer_id; + u32 offset_delimiter; + u32 pts; + u64 pts64; + u64 timestamp; + + + /* picture qos infomation*/ + int slice_type; + int frame_size; + + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; + int dpb_frame_count; + u32 hw_decode_time; + u32 frame_size2; // For recording the chunk->size in frame mode + bool show_frame; + struct dma_fence *fence; + u32 decoded_frame_size; + u64 last_field_timestamp; +}; + +/* #define DPB_SIZE_MAX 16 */ +#define DPB_SIZE_MAX 32 +struct DecodedPictureBuffer { + struct VideoParameters *p_Vid; + /* InputParameters *p_Inp; ??? */ + struct FrameStore *fs[DPB_SIZE_MAX]; + struct FrameStore *fs_ref[DPB_SIZE_MAX]; + struct FrameStore *fs_ltref[DPB_SIZE_MAX]; + /* inter-layer reference (for multi-layered codecs) */ + struct FrameStore *fs_ilref[DPB_SIZE_MAX]; + /**/ + struct FrameStore *fs_list0[DPB_SIZE_MAX]; + struct FrameStore *fs_list1[DPB_SIZE_MAX]; + struct FrameStore *fs_listlt[DPB_SIZE_MAX]; + + /**/ + unsigned int size; + unsigned int used_size; + unsigned int ref_frames_in_buffer; + unsigned int ltref_frames_in_buffer; + int last_output_poc; +#if (MVC_EXTENSION_ENABLE) + int last_output_view_id; +#endif + int max_long_term_pic_idx; + + + int init_done; + int first_pic_done; /*by rain*/ + int num_ref_frames; + + struct FrameStore *last_picture; + unsigned int used_size_il; + int layer_id; + + /* DPB related function; */ +}; + +struct h264_dpb_stru { + struct vdec_s *vdec; + int decoder_index; + + union param dpb_param; + + int decode_idx; + int buf_num; + int curr_POC; + int reorder_pic_num; + unsigned int dec_dpb_size; + u8 fast_output_enable; + /*poc_even_flag: + 0, init; 1, odd; 2, even*/ + u8 poc_even_odd_flag; + u32 decode_pic_count; + /**/ + unsigned int max_reference_size; + + unsigned int colocated_buf_map; + unsigned int colocated_buf_count; + unsigned int colocated_mv_addr_start; + unsigned int colocated_mv_addr_end; + unsigned int colocated_buf_size; + + struct DecodedPictureBuffer mDPB; + struct Slice mSlice; + struct VideoParameters mVideo; + struct SPSParameters mSPS; + + struct StorablePicture m_PIC[MAX_PIC_BUF_NUM]; + struct FrameStore mFrameStore[DPB_SIZE_MAX]; + + /*vui*/ + unsigned int vui_status; + unsigned int num_units_in_tick; + unsigned int time_scale; + unsigned int fixed_frame_rate_flag; + unsigned int aspect_ratio_idc; + unsigned int aspect_ratio_sar_width; + unsigned int aspect_ratio_sar_height; + u8 bitstream_restriction_flag; + u16 num_reorder_frames; + u16 max_dec_frame_buffering; + + unsigned int frame_crop_left_offset; + unsigned int frame_crop_right_offset; + unsigned int frame_crop_top_offset; + unsigned int frame_crop_bottom_offset; + unsigned int chroma_format_idc; + + unsigned int dec_dpb_status; + unsigned int last_dpb_status; + unsigned char buf_alloc_fail; + unsigned int dpb_error_flag; + unsigned int reorder_output; + unsigned int first_insert_frame; + int first_output_poc; + int dpb_frame_count; + /* need wait aux data when there is data after pic done in dv stream */ + bool wait_aux_data_flag; + u32 without_display_mode; + int long_term_reference_flag; +}; + + +extern unsigned int h264_debug_flag; +extern unsigned int h264_debug_mask; + +int dpb_print(int indext, int debug_flag, const char *fmt, ...); + +int dpb_print_cont(int index, int debug_flag, const char *fmt, ...); + +unsigned char dpb_is_debug(int index, int debug_flag); + +int prepare_display_buf(struct vdec_s *vdec, struct FrameStore *frame); + +int release_buf_spec_num(struct vdec_s *vdec, int buf_spec_num); + +void set_frame_output_flag(struct h264_dpb_stru *p_H264_Dpb, int index); + +int is_there_unused_frame_from_dpb(struct DecodedPictureBuffer *p_Dpb); + +int h264_slice_header_process(struct h264_dpb_stru *p_H264_Dpb, int *frame_num_gap); + +void dpb_init_global(struct h264_dpb_stru *p_H264_Dpb, + int id, int actual_dpb_size, int max_reference_size); + +void init_colocate_buf(struct h264_dpb_stru *p_H264_Dpb, int count); + +int release_colocate_buf(struct h264_dpb_stru *p_H264_Dpb, int index); + +int get_free_buf_idx(struct vdec_s *vdec); + +int store_picture_in_dpb(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *p, unsigned char data_flag); + +int release_picture(struct h264_dpb_stru *p_H264_Dpb, + struct StorablePicture *pic); + +void remove_dpb_pictures(struct h264_dpb_stru *p_H264_Dpb); + +void bufmgr_post(struct h264_dpb_stru *p_H264_Dpb); + +void bufmgr_force_recover(struct h264_dpb_stru *p_H264_Dpb); + +int get_long_term_flag_by_buf_spec_num(struct h264_dpb_stru *p_H264_Dpb, + int buf_spec_num); + +void bufmgr_h264_remove_unused_frame(struct h264_dpb_stru *p_H264_Dpb, + u8 force_flag); + +void flush_dpb(struct h264_dpb_stru *p_H264_Dpb); + +void print_pic_info(int decindex, const char *info, + struct StorablePicture *pic, + int slice_type); +void dump_dpb(struct DecodedPictureBuffer *p_Dpb, u8 force); + +void dump_pic(struct h264_dpb_stru *p_H264_Dpb); + +void * vh264_get_bufspec_lock(struct vdec_s *vdec); + +enum PictureStructure get_cur_slice_picture_struct( + struct h264_dpb_stru *p_H264_Dpb); + +int dpb_check_ref_list_error( + struct h264_dpb_stru *p_H264_Dpb); + +void unmark_for_reference(struct DecodedPictureBuffer *p_Dpb, + struct FrameStore *fs); + +void update_ref_list(struct DecodedPictureBuffer *p_Dpb); + +int post_picture_early(struct vdec_s *vdec, int index); + +int is_used_for_reference(struct FrameStore *fs); + +#endif
diff --git a/drivers/frame_provider/decoder_v4l/h264_multi/vh264.h b/drivers/frame_provider/decoder_v4l/h264_multi/vh264.h new file mode 100644 index 0000000..6c8e4ad --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/h264_multi/vh264.h
@@ -0,0 +1,27 @@ +/* + * drivers/amlogic/media/frame_provider/decoder/h264/vh264.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VH264_H +#define VH264_H + +extern int query_video_status(int type, int *value); + +/* extern s32 vh264_init(void); */ + +extern s32 vh264_release(void); + +#endif /* VMPEG4_H */
diff --git a/drivers/frame_provider/decoder_v4l/h264_multi/vmh264.c b/drivers/frame_provider/decoder_v4l/h264_multi/vmh264.c new file mode 100644 index 0000000..78539de --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/h264_multi/vmh264.c
@@ -0,0 +1,11464 @@ +/* + * drivers/amlogic/amports/vh264.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/random.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/tsync.h> +#include <linux/workqueue.h> +#include <linux/dma-mapping.h> +#include <linux/atomic.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../../decoder/utils/vdec_input.h" +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/sched/clock.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../decoder/utils/vdec.h" +#include "../../decoder/utils/amvdec.h" +#include "vh264.h" +#include "../../../stream_input/amports/streambuf.h" +#include <linux/delay.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../../decoder/utils/decoder_mmu_box.h" +#include "../../decoder/utils/decoder_bmmu_box.h" +#include "../../decoder/utils/firmware.h" +#include <linux/uaccess.h> +#include "../../decoder/utils/config_parser.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../../decoder/utils/vdec_v4l2_buffer_ops.h" +#include <linux/crc32.h> +#include <media/v4l2-mem2mem.h> +#include "../../decoder/utils/vdec_feature.h" + +#define DETECT_WRONG_MULTI_SLICE + +/* +to enable DV of frame mode +#define DOLBY_META_SUPPORT in ucode +*/ + +#undef pr_info +#define pr_info printk +#define VDEC_DW +#define DEBUG_UCODE +#define MEM_NAME "codec_m264" +#define MULTI_INSTANCE_FRAMEWORK +/* #define ONE_COLOCATE_BUF_PER_DECODE_BUF */ +#include "h264_dpb.h" +/* #define SEND_PARAM_WITH_REG */ + +#define DRIVER_NAME "ammvdec_h264_v4l" +#define DRIVER_HEADER_NAME "ammvdec_h264_header" + +#define CHECK_INTERVAL (HZ/100) + +#define SEI_DATA_SIZE (8*1024) +#define SEI_ITU_DATA_SIZE (4*1024) + +#define RATE_MEASURE_NUM 8 +#define RATE_CORRECTION_THRESHOLD 5 +#define RATE_2397_FPS 4004 /* 23.97 */ +#define RATE_25_FPS 3840 /* 25 */ +#define RATE_2997_FPS 3203 /* 29.97 */ +#define RATE_5994_FPS 1601 /* 59.94 */ +#define RATE_11990_FPS 800 /* 119.90 */ +#define DUR2PTS(x) ((x)*90/96) +#define PTS2DUR(x) ((x)*96/90) +#define DUR2PTS_REM(x) (x*90 - DUR2PTS(x)*96) +#define FIX_FRAME_RATE_CHECK_IFRAME_NUM 2 + +#define FIX_FRAME_RATE_OFF 0 +#define FIX_FRAME_RATE_ON 1 +#define FIX_FRAME_RATE_SMOOTH_CHECKING 2 + +#define DEC_CONTROL_FLAG_FORCE_2997_1080P_INTERLACE 0x0001 +#define DEC_CONTROL_FLAG_FORCE_2500_576P_INTERLACE 0x0002 +#define DEC_CONTROL_FLAG_FORCE_RATE_2397_FPS_FIX_FRAME_RATE 0x0010 +#define DEC_CONTROL_FLAG_FORCE_RATE_2997_FPS_FIX_FRAME_RATE 0x0020 + +#define DECODE_ID(hw) (hw_to_vdec(hw)->id) + +#define RATE_MEASURE_NUM 8 +#define RATE_CORRECTION_THRESHOLD 5 +#define RATE_24_FPS 4004 /* 23.97 */ +#define RATE_25_FPS 3840 /* 25 */ +#define DUR2PTS(x) ((x)*90/96) +#define PTS2DUR(x) ((x)*96/90) +#define DUR2PTS_REM(x) (x*90 - DUR2PTS(x)*96) +#define FIX_FRAME_RATE_CHECK_IDRFRAME_NUM 2 + +#define ALIGN_WIDTH(x) (ALIGN((x), 64)) +#define ALIGN_HEIGHT(x) (ALIGN((x), 32)) + +#define H264_DEV_NUM 9 + +#define CONSTRAIN_MAX_BUF_NUM + +#define H264_MMU +#define VIDEO_SIGNAL_TYPE_AVAILABLE_MASK 0x20000000 +#define INVALID_IDX -1 /* Invalid buffer index.*/ + +static int mmu_enable; +/*mmu do not support mbaff*/ +static int force_enable_mmu = 0; +unsigned int h264_debug_flag; /* 0xa0000000; */ +unsigned int h264_debug_mask = 0xff; + /* + *h264_debug_cmd: + * 0x1xx, force decoder id of xx to be disconnected + */ +unsigned int h264_debug_cmd; + +static int ref_b_frame_error_max_count = 50; + +static unsigned int dec_control = + DEC_CONTROL_FLAG_FORCE_2997_1080P_INTERLACE | + DEC_CONTROL_FLAG_FORCE_2500_576P_INTERLACE; + +static unsigned int force_rate_streambase; +static unsigned int force_rate_framebase; +static unsigned int force_disp_bufspec_num; +static unsigned int fixed_frame_rate_mode; +static unsigned int error_recovery_mode_in; +static int start_decode_buf_level = 0x4000; +static int pre_decode_buf_level = 0x1000; +static int stream_mode_start_num = 4; +static unsigned int colocate_old_cal; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +/*to make reorder size difference of bl and el not too big*/ +static unsigned int reorder_dpb_size_margin_dv = 16; +#endif +static unsigned int reorder_dpb_size_margin = 6; +static unsigned int reference_buf_margin = 4; + +#ifdef CONSTRAIN_MAX_BUF_NUM +static u32 run_ready_max_vf_only_num; +static u32 run_ready_display_q_num; + /*0: not check + 0xff: mDPB.size + */ +static u32 run_ready_max_buf_num = 0xff; +#endif + +static u32 run_ready_min_buf_num = 2; + +#define VDEC_ASSIST_CANVAS_BLK32 0x5 + + +static unsigned int max_alloc_buf_count; +static unsigned int decode_timeout_val = 100; +static unsigned int errordata_timeout_val = 50; +static unsigned int get_data_timeout_val = 2000; +#if 1 +/* H264_DATA_REQUEST does not work, disable it, +decode has error for data in none continuous address +*/ +static unsigned int frame_max_data_packet; +#else +static unsigned int frame_max_data_packet = 8; +#endif +static unsigned int radr; +static unsigned int rval; +static u32 endian = 0xff0; + +/* + udebug_flag: + bit 0, enable ucode print + bit 1, enable ucode detail print + bit 3, disable ucode watchdog + bit [31:16] not 0, pos to dump lmem + bit 2, pop bits to lmem + bit [11:8], pre-pop bits for alignment (when bit 2 is 1) +*/ +static u32 udebug_flag; +/* + when udebug_flag[1:0] is not 0 + udebug_pause_pos not 0, + pause position +*/ +static u32 udebug_pause_pos; +/* + when udebug_flag[1:0] is not 0 + and udebug_pause_pos is not 0, + pause only when DEBUG_REG2 is equal to this val +*/ +static u32 udebug_pause_val; + +static u32 udebug_pause_decode_idx; + +static unsigned int disp_vframe_valve_level; + +static unsigned int max_decode_instance_num = H264_DEV_NUM; +static unsigned int decode_frame_count[H264_DEV_NUM]; +static unsigned int display_frame_count[H264_DEV_NUM]; +static unsigned int max_process_time[H264_DEV_NUM]; +static unsigned int max_get_frame_interval[H264_DEV_NUM]; +static unsigned int run_count[H264_DEV_NUM]; +static unsigned int input_empty[H264_DEV_NUM]; +static unsigned int not_run_ready[H264_DEV_NUM]; +static unsigned int ref_frame_mark_flag[H264_DEV_NUM] = +{1, 1, 1, 1, 1, 1, 1, 1, 1}; + +#define VDEC_CLOCK_ADJUST_FRAME 30 +static unsigned int clk_adj_frame_count; + +/* + *bit[3:0]: 0, run ; 1, pause; 3, step + *bit[4]: 1, schedule run + */ +static unsigned int step[H264_DEV_NUM]; + +#define AUX_BUF_ALIGN(adr) ((adr + 0xf) & (~0xf)) +static u32 prefix_aux_buf_size = (16 * 1024); +static u32 suffix_aux_buf_size; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +static u32 dv_toggle_prov_name; + +static u32 dolby_meta_with_el; +#endif + +/* + bit[8] + 0: use sys_info[bit 3] + not 0:use i_only_flag[7:0] + bit[7:0]: + bit 0, 1: only display I picture; + bit 1, 1: only decode I picture; +*/ +static unsigned int i_only_flag; + +/* + error_proc_policy: + bit[0] send_error_frame_flag; + (valid when bit[31] is 1, otherwise use sysinfo) + bit[1] do not decode if config_decode_buf() fail + bit[2] force release buf if in deadlock + bit[3] force sliding window ref_frames_in_buffer > num_ref_frames + bit[4] check inactive of receiver + bit[5] reset buffmgr if in deadlock + bit[6] reset buffmgr if bufspec, collocate buf, pic alloc fail + bit[7] reset buffmgr if dpb error + + bit[8] check total mbx/mby of decoded frame + bit[9] check ERROR_STATUS_REG + bit[10] check reference list + bit[11] mark error if dpb error + bit[12] i_only when error happen + bit[13] 0: mark error according to last pic, 1: ignore mark error + bit[14] 0: result done when timeout from ucode. 1: reset bufmgr when timeout. + bit[15] 1: dpb_frame_count If the dpb_frame_count difference is large, it moves out of the DPB buffer. + bit[16] 1: check slice header number. + bit[17] 1: If the decoded Mb count is insufficient but greater than the threshold, it is considered the correct frame. + bit[18] 1: time out status, store pic to dpb buffer. + bit[19] 1: If a lot b frames are wrong consecutively, the DPB queue reset. + bit[20] 1: fixed some error stream will lead to the diffusion of the error, resulting playback stuck. + bit[21] 1: fixed DVB loop playback cause jetter issue. + bit[22] 1: In streaming mode, support for discarding data. + bit[23] 0: set error flag on frame number gap error and drop it, 1: ignore error. +*/ +static unsigned int error_proc_policy = 0x3fCfb6; /*0x1f14*/ + +static unsigned int v4l_error_policy = 0x8017C3B5; //default + +/* + error_skip_count: + bit[11:0] error skip frame count + bit[15:12] error skip i picture count +*/ +static unsigned int error_skip_count = (0x2 << 12) | 0x40; + +static unsigned int force_sliding_margin; +/* + bit[1:0]: + 0, start playing from any frame + 1, start playing from I frame + bit[15:8]: the count of skip frames after first I + 2, start playing from second I frame (decode from the first I) + bit[15:8]: the max count of skip frames after first I + 3, start playing from IDR +*/ +static unsigned int first_i_policy = 1; + +/* + fast_output_enable: + bit [0], output frame if there is IDR in list + bit [1], output frame if the current poc is 1 big than the previous poc + bit [2], if even poc only, output frame ifthe cuurent poc + is 2 big than the previous poc + bit [3], ip only +*/ +static unsigned int fast_output_enable = H264_OUTPUT_MODE_NORMAL; + +static unsigned int enable_itu_t35 = 1; + +static unsigned int frmbase_cont_bitlevel = 0x40; + +static unsigned int frmbase_cont_bitlevel2 = 0x1; + +static unsigned int check_slice_num = 30; + +static unsigned int mb_count_threshold = 5; /*percentage*/ + +#define MH264_USERDATA_ENABLE + +/* DOUBLE_WRITE_MODE is enabled only when NV21 8 bit output is needed */ +/* hevc->double_write_mode: + 0, no double write + 1, 1:1 ratio + 2, (1/4):(1/4) ratio + 3, (1/4):(1/4) ratio, with both compressed frame included + 4, (1/2):(1/2) ratio + 0x10, double write only + 0x10000: vdec dw horizotal 1/2 + 0x20000: vdec dw horizotal/vertical 1/2 +*/ +static u32 double_write_mode; +static u32 without_display_mode; + +static int loop_playback_poc_threshold = 400; +static int poc_threshold = 50; + +static u32 lookup_check_conut = 30; + + +/* + *[3:0] 0: default use config from omx. + * 1: force enable fence. + * 2: disable fence. + *[7:4] 0: fence use for driver. + * 1: fence fd use for app. + */ +static u32 force_config_fence; + +static u32 adjust_dpb_size = 13; + +#define IS_VDEC_DW(hw) (hw->double_write_mode >> 16 & 0xf) + +static void vmh264_dump_state(struct vdec_s *vdec); + +#define is_in_parsing_state(status) \ + ((status == H264_ACTION_SEARCH_HEAD) || \ + ((status & 0xf0) == 0x80)) + +#define is_interlace(frame) \ + ((frame->frame &&\ + frame->top_field &&\ + frame->bottom_field &&\ + (!frame->frame->coded_frame)) || \ + (frame->frame && \ + frame->frame->coded_frame && \ + (!frame->frame->frame_mbs_only_flag) && \ + frame->frame->structure == FRAME)) + +static inline bool close_to(int a, int b, int m) +{ + return (abs(a - b) < m) ? true : false; +} + +#if 0 +#define h264_alloc_hw_stru(dev, size, opt) devm_kzalloc(dev, size, opt) +#define h264_free_hw_stru(dev, hw) devm_kfree(dev, hw) +#else +#define h264_alloc_hw_stru(dev, size, opt) vzalloc(size) +#define h264_free_hw_stru(dev, hw) vfree(hw) +#endif + +/* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +#define NV21 +/* #endif */ + +/* 12M for L41 */ +#define MAX_DPB_BUFF_SIZE (12*1024*1024) +#define DEFAULT_MEM_SIZE (32*1024*1024) +#define AVIL_DPB_BUFF_SIZE 0x01ec2000 + +#define DEF_BUF_START_ADDR 0x00000000 +#define mem_sps_base 0x01c3c00 +#define mem_pps_base 0x01cbc00 +/*#define V_BUF_ADDR_OFFSET (0x13e000)*/ +u32 V_BUF_ADDR_OFFSET = 0x200000; +#define DCAC_READ_MARGIN (64 * 1024) + + +#define EXTEND_SAR 0xff +#define BUFSPEC_POOL_SIZE 64 +#define VF_POOL_SIZE 64 +#define VF_POOL_NUM 2 +#define MAX_VF_BUF_NUM 27 +#define BMMU_MAX_BUFFERS (BUFSPEC_POOL_SIZE + 3) +#define BMMU_REF_IDX (BUFSPEC_POOL_SIZE) +#define BMMU_DPB_IDX (BUFSPEC_POOL_SIZE + 1) +#define BMMU_EXTIF_IDX (BUFSPEC_POOL_SIZE + 2) +#define EXTIF_BUF_SIZE (0x10000 * 2) + +#define HEADER_BUFFER_IDX(n) (n) +#define VF_BUFFER_IDX(n) (n) + + +#define PUT_INTERVAL (HZ/100) +#define NO_DISP_WD_COUNT (3 * HZ / PUT_INTERVAL) + +#define MMU_MAX_BUFFERS BUFSPEC_POOL_SIZE +#define SWITCHING_STATE_OFF 0 +#define SWITCHING_STATE_ON_CMD3 1 +#define SWITCHING_STATE_ON_CMD1 2 + + + +#define INCPTR(p) ptr_atomic_wrap_inc(&p) + +#define SLICE_TYPE_I 2 +#define SLICE_TYPE_P 5 +#define SLICE_TYPE_B 6 + +struct buffer_spec_s { + /* + used: + -1, none allocated + 0, allocated, free + 1, used by dpb + 2, in disp queue; + 3, in disp queue, isolated, + do not use for dpb when vf_put; + 4, to release + 5, in disp queue, isolated (but not to release) + do not use for dpb when vf_put; + */ + unsigned int used; + unsigned int info0; + unsigned int info1; + unsigned int info2; + unsigned int y_addr; + unsigned int u_addr; + unsigned int v_addr; + + int y_canvas_index; + int u_canvas_index; + int v_canvas_index; + +#ifdef VDEC_DW + unsigned int vdec_dw_y_addr; + unsigned int vdec_dw_u_addr; + unsigned int vdec_dw_v_addr; + + int vdec_dw_y_canvas_index; + int vdec_dw_u_canvas_index; + int vdec_dw_v_canvas_index; +#ifdef NV21 + struct canvas_config_s vdec_dw_canvas_config[2]; +#else + struct canvas_config_s vdec_dw_canvas_config[3]; +#endif +#endif + +#ifdef NV21 + struct canvas_config_s canvas_config[2]; +#else + struct canvas_config_s canvas_config[3]; +#endif + unsigned long cma_alloc_addr; + unsigned int buf_adr; +#ifdef H264_MMU + unsigned long alloc_header_addr; +#endif + char *aux_data_buf; + int aux_data_size; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + unsigned char dv_enhance_exist; +#endif + int canvas_pos; + int vf_ref; + /*unsigned int comp_body_size;*/ + unsigned int dw_y_adr; + unsigned int dw_u_v_adr; + int fs_idx; + int ctx_buf_idx; +}; + +#define AUX_DATA_SIZE(pic) (hw->buffer_spec[pic->buf_spec_num].aux_data_size) +#define AUX_DATA_BUF(pic) (hw->buffer_spec[pic->buf_spec_num].aux_data_buf) +#define DEL_EXIST(h, p) (h->buffer_spec[p->buf_spec_num].dv_enhance_exist) + + +#define vdec_dw_spec2canvas(x) \ + (((x)->vdec_dw_v_canvas_index << 16) | \ + ((x)->vdec_dw_u_canvas_index << 8) | \ + ((x)->vdec_dw_y_canvas_index << 0)) + + +#define spec2canvas(x) \ + (((x)->v_canvas_index << 16) | \ + ((x)->u_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + +#define FRAME_INDEX(vf_index) (vf_index & 0xff) +#define BUFSPEC_INDEX(vf_index) ((vf_index >> 8) & 0xff) +#define VF_INDEX(frm_idx, bufspec_idx) (frm_idx | (bufspec_idx << 8)) + +static struct vframe_s *vh264_vf_peek(void *); +static struct vframe_s *vh264_vf_get(void *); +static void vh264_vf_put(struct vframe_s *, void *); +static int vh264_vf_states(struct vframe_states *states, void *); +static int vh264_event_cb(int type, void *data, void *private_data); +static void vh264_work(struct work_struct *work); +static void vh264_timeout_work(struct work_struct *work); +static void vh264_notify_work(struct work_struct *work); +#ifdef MH264_USERDATA_ENABLE +static void user_data_ready_notify_work(struct work_struct *work); +static void vmh264_wakeup_userdata_poll(struct vdec_s *vdec); +#endif + +static const char vh264_dec_id[] = "vh264-dev"; + +#define PROVIDER_NAME "vdec.h264" + +static const struct vframe_operations_s vf_provider_ops = { + .peek = vh264_vf_peek, + .get = vh264_vf_get, + .put = vh264_vf_put, + .event_cb = vh264_event_cb, + .vf_states = vh264_vf_states, +}; + +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_CONFIG_PARAM 3 +#define DEC_RESULT_GET_DATA 4 +#define DEC_RESULT_GET_DATA_RETRY 5 +#define DEC_RESULT_ERROR 6 +#define DEC_RESULT_EOS 7 +#define DEC_RESULT_FORCE_EXIT 8 +#define DEC_RESULT_TIMEOUT 9 +#define DEC_RESULT_NEED_MORE_BUFFER 10 + + +/* + *static const char *dec_result_str[] = { + * "DEC_RESULT_NONE ", + * "DEC_RESULT_DONE ", + * "DEC_RESULT_AGAIN ", + * "DEC_RESULT_CONFIG_PARAM", + * "DEC_RESULT_GET_DATA ", + * "DEC_RESULT_GET_DA_RETRY", + * "DEC_RESULT_ERROR ", + *}; + */ + +#define UCODE_IP_ONLY 2 +#define UCODE_IP_ONLY_PARAM 1 + +#define MC_OFFSET_HEADER 0x0000 +#define MC_OFFSET_DATA 0x1000 +#define MC_OFFSET_MMCO 0x2000 +#define MC_OFFSET_LIST 0x3000 +#define MC_OFFSET_SLICE 0x4000 +#define MC_OFFSET_MAIN 0x5000 + +#define MC_TOTAL_SIZE ((20+16)*SZ_1K) +#define MC_SWAP_SIZE (4*SZ_1K) +#define MODE_ERROR 0 +#define MODE_FULL 1 + +#define DFS_HIGH_THEASHOLD 3 + +#define INIT_FLAG_REG AV_SCRATCH_2 +#define HEAD_PADING_REG AV_SCRATCH_3 +#define UCODE_WATCHDOG_REG AV_SCRATCH_7 +#define LMEM_DUMP_ADR AV_SCRATCH_L +#define DEBUG_REG1 AV_SCRATCH_M +#define DEBUG_REG2 AV_SCRATCH_N +#define FRAME_COUNTER_REG AV_SCRATCH_I +#define RPM_CMD_REG AV_SCRATCH_A +#define H264_DECODE_SIZE AV_SCRATCH_E +#define H264_DECODE_MODE AV_SCRATCH_4 +#define H264_DECODE_SEQINFO AV_SCRATCH_5 +#define H264_AUX_ADR AV_SCRATCH_C +#define H264_AUX_DATA_SIZE AV_SCRATCH_H + +#define H264_DECODE_INFO M4_CONTROL_REG /* 0xc29 */ +#define DPB_STATUS_REG AV_SCRATCH_J +#define ERROR_STATUS_REG AV_SCRATCH_9 + /* + NAL_SEARCH_CTL: bit 0, enable itu_t35 + NAL_SEARCH_CTL: bit 1, enable mmu + NAL_SEARCH_CTL: bit 2, detect frame_mbs_only_flag whether switch resolution + NAL_SEARCH_CTL: bit 15,bitstream_restriction_flag + */ +#define NAL_SEARCH_CTL AV_SCRATCH_9 +#define MBY_MBX MB_MOTION_MODE /*0xc07*/ + +#define DECODE_MODE_SINGLE 0x0 +#define DECODE_MODE_MULTI_FRAMEBASE 0x1 +#define DECODE_MODE_MULTI_STREAMBASE 0x2 +#define DECODE_MODE_MULTI_DVBAL 0x3 +#define DECODE_MODE_MULTI_DVENL 0x4 +static DEFINE_MUTEX(vmh264_mutex); +static DEFINE_MUTEX(reset_mutex); + + + +#ifdef MH264_USERDATA_ENABLE + +struct mh264_userdata_record_t { + struct userdata_meta_info_t meta_info; + u32 rec_start; + u32 rec_len; +}; + +struct mh264_ud_record_wait_node_t { + struct list_head list; + struct mh264_userdata_record_t ud_record; +}; +#define USERDATA_FIFO_NUM 256 +#define MAX_FREE_USERDATA_NODES 5 + +struct mh264_userdata_info_t { + struct mh264_userdata_record_t records[USERDATA_FIFO_NUM]; + u8 *data_buf; + u8 *data_buf_end; + u32 buf_len; + u32 read_index; + u32 write_index; + u32 last_wp; +}; + + +#endif + +struct mh264_fence_vf_t { + u32 used_size; + struct vframe_s *fence_vf[VF_POOL_SIZE]; +}; + + +struct vdec_h264_hw_s { + spinlock_t lock; + spinlock_t bufspec_lock; + int id; + struct platform_device *platform_dev; + unsigned long cma_alloc_addr; + /* struct page *collocate_cma_alloc_pages; */ + unsigned long collocate_cma_alloc_addr; + + u32 prefix_aux_size; + u32 suffix_aux_size; + void *aux_addr; + dma_addr_t aux_phy_addr; + + /* buffer for store all sei data */ + void *sei_data_buf; + u32 sei_data_len; + + /* buffer for storing one itu35 recored */ + void *sei_itu_data_buf; + u32 sei_itu_data_len; + + /* recycle buffer for user data storing all itu35 records */ + void *sei_user_data_buffer; + u32 sei_user_data_wp; +#ifdef MH264_USERDATA_ENABLE + struct work_struct user_data_ready_work; +#endif + struct StorablePicture *last_dec_picture; + + ulong lmem_phy_addr; + dma_addr_t lmem_addr; + + void *bmmu_box; +#ifdef H264_MMU + void *mmu_box; + void *frame_mmu_map_addr; + dma_addr_t frame_mmu_map_phy_addr; + u32 hevc_cur_buf_idx; + u32 losless_comp_body_size; + u32 losless_comp_body_size_sao; + u32 losless_comp_header_size; + u32 mc_buffer_size_u_v; + u32 mc_buffer_size_u_v_h; + u32 is_idr_frame; + u32 is_new_pic; + u32 frame_done; + u32 frame_busy; + unsigned long extif_addr; + int double_write_mode; + int mmu_enable; +#endif + + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + + int cur_pool; + struct vframe_s vfpool[VF_POOL_NUM][VF_POOL_SIZE]; + struct buffer_spec_s buffer_spec[BUFSPEC_POOL_SIZE]; + struct vframe_s switching_fense_vf; + struct h264_dpb_stru dpb; + u8 init_flag; + u8 first_sc_checked; + u8 has_i_frame; + u8 config_bufmgr_done; + u32 max_reference_size; + u32 decode_pic_count; + u32 reflist_error_count; + int start_search_pos; + u32 reg_iqidct_control; + bool reg_iqidct_control_init_flag; + u32 reg_vcop_ctrl_reg; + u32 reg_rv_ai_mb_count; + u32 vld_dec_control; + struct vframe_s vframe_dummy; + + unsigned char buffer_empty_flag; + + u32 frame_width; + u32 frame_height; + u32 frame_dur; + u32 frame_prog; + u32 frame_packing_type; + + struct vframe_chunk_s *chunk; + + u32 stat; + unsigned long buf_start; + u32 buf_offset; + u32 buf_size; + /* u32 ucode_map_start; */ + u32 pts_outside; + u32 sync_outside; + u32 vh264_ratio; + u32 vh264_rotation; + u32 use_idr_framerate; + + u32 seq_info; + u32 seq_info2; + u32 video_signal_from_vui; /*to do .. */ + u32 timing_info_present_flag; + u32 fixed_frame_rate_flag; + u32 bitstream_restriction_flag; + u32 num_reorder_frames; + u32 max_dec_frame_buffering; + u32 iframe_count; + u32 aspect_ratio_info; + u32 num_units_in_tick; + u32 time_scale; + u32 h264_ar; + bool h264_first_valid_pts_ready; + u32 h264pts1; + u32 h264pts2; + u32 pts_duration; + u32 h264_pts_count; + u32 duration_from_pts_done; + u32 pts_unstable; + u32 unstable_pts; + u32 last_checkout_pts; + u32 max_refer_buf; + + s32 vh264_stream_switching_state; + struct vframe_s *p_last_vf; + u32 last_pts; + u32 last_pts_remainder; + u32 last_duration; + u32 last_mb_width, last_mb_height; + bool check_pts_discontinue; + bool pts_discontinue; + u32 wait_buffer_counter; + u32 first_offset; + u32 first_pts; + u64 first_pts64; + bool first_pts_cached; + u64 last_pts64; +#if 0 + void *sei_data_buffer; + dma_addr_t sei_data_buffer_phys; +#endif + + uint error_recovery_mode; + uint mb_total; + uint mb_width; + uint mb_height; + + uint i_only; + int skip_frame_count; + bool no_poc_reorder_flag; + bool send_error_frame_flag; + dma_addr_t mc_dma_handle; + void *mc_cpu_addr; + int vh264_reset; + + atomic_t vh264_active; + + struct dec_sysinfo vh264_amstream_dec_info; + + int dec_result; + u32 timeout_processing; + struct work_struct work; + struct work_struct notify_work; + struct work_struct timeout_work; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + + struct timer_list check_timer; + + /**/ + unsigned int last_frame_time; + atomic_t vf_pre_count; + atomic_t vf_get_count; + atomic_t vf_put_count; + + /* timeout handle */ + unsigned long int start_process_time; + unsigned int last_mby_mbx; + unsigned int last_vld_level; + unsigned int decode_timeout_count; + unsigned int timeout_num; + unsigned int search_dataempty_num; + unsigned int decode_timeout_num; + unsigned int decode_dataempty_num; + unsigned int buffer_empty_recover_num; + + unsigned get_data_count; + unsigned get_data_start_time; + /**/ + + /*log*/ + unsigned int packet_write_success_count; + unsigned int packet_write_EAGAIN_count; + unsigned int packet_write_ENOMEM_count; + unsigned int packet_write_EFAULT_count; + unsigned int total_read_size_pre; + unsigned int total_read_size; + unsigned int frame_count_pre; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + u8 switch_dvlayer_flag; + u8 got_valid_nal; +#endif + u8 eos; + u8 data_flag; + u32 no_error_count; + u32 no_error_i_count; + /* + NODISP_FLAG + */ + u8 dec_flag; + + u32 ucode_pause_pos; + + u8 reset_bufmgr_flag; + u32 reset_bufmgr_count; + ulong timeout; + u32 timeout_flag; + u32 cfg_param1; + u32 cfg_param2; + u32 cfg_param3; + u32 cfg_param4; + u32 cfg_bitstream_restriction_flag; + int valve_count; + u8 next_again_flag; + u32 pre_parser_wr_ptr; + struct firmware_s *fw; + struct firmware_s *fw_mmu; +#ifdef MH264_USERDATA_ENABLE + /*user data*/ + struct mutex userdata_mutex; + struct mh264_userdata_info_t userdata_info; + struct mh264_userdata_record_t ud_record; + int wait_for_udr_send; +#endif + u32 no_mem_count; + u32 canvas_mode; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + wait_queue_head_t wait_q; + u32 reg_g_status; + struct mutex chunks_mutex; + int need_cache_size; + u64 sc_start_time; + u8 frmbase_cont_flag; + struct vframe_qos_s vframe_qos; + int frameinfo_enable; + bool first_head_check_flag; + unsigned int height_aspect_ratio; + unsigned int width_aspect_ratio; + unsigned int first_i_policy; + u32 reorder_dpb_size_margin; + bool wait_reset_done_flag; +#ifdef DETECT_WRONG_MULTI_SLICE + unsigned int multi_slice_pic_check_count; + /* multi_slice_pic_flag: + 0, unknown; + 1, single slice; + 2, multi slice + */ + unsigned int multi_slice_pic_flag; + unsigned int picture_slice_count; + unsigned int cur_picture_slice_count; + unsigned char force_slice_as_picture_flag; + unsigned int last_picture_slice_count; + unsigned int first_pre_frame_num; +#endif + u32 res_ch_flag; + u32 b_frame_error_count; + struct vdec_info gvs; + u32 kpi_first_i_comming; + u32 kpi_first_i_decoded; + int sidebind_type; + int sidebind_channel_id; + u32 low_latency_mode; + int ip_field_error_count; + int buffer_wrap[BUFSPEC_POOL_SIZE]; + int loop_flag; + int loop_last_poc; + bool enable_fence; + int fence_usage; + bool discard_dv_data; + u32 metadata_config_flag; + int vdec_pg_enable_flag; + u32 save_reg_f; + u32 start_bit_cnt; + u32 right_frame_count; + u32 wrong_frame_count; + u32 error_frame_width; + u32 error_frame_height; + ulong fb_token; + struct mh264_fence_vf_t fence_vf_s; + struct mutex fence_mutex; + u32 no_decoder_buffer_flag; + u32 video_signal_type; + bool need_free_aux_data; + u32 error_proc_policy; + struct trace_decoder_name trace; + int csd_change_flag; +}; + +static u32 again_threshold; + +static void timeout_process(struct vdec_h264_hw_s *hw); +static void dump_bufspec(struct vdec_h264_hw_s *hw, + const char *caller); +static void h264_reconfig(struct vdec_h264_hw_s *hw); +static void h264_reset_bufmgr_v4l(struct vdec_s *vdec, int flush_flag); +static void vh264_local_init(struct vdec_h264_hw_s *hw, bool is_reset); +static int vh264_hw_ctx_restore(struct vdec_h264_hw_s *hw); +static int vh264_stop(struct vdec_h264_hw_s *hw); +static s32 vh264_init(struct vdec_h264_hw_s *hw); +static void set_frame_info(struct vdec_h264_hw_s *hw, struct vframe_s *vf, + u32 index); +static void release_aux_data(struct vdec_h264_hw_s *hw, + int buf_spec_num); +#ifdef ERROR_HANDLE_TEST +static void h264_clear_dpb(struct vdec_h264_hw_s *hw); +#endif + +#define H265_PUT_SAO_4K_SET 0x03 +#define H265_ABORT_SAO_4K_SET 0x04 +#define H265_ABORT_SAO_4K_SET_DONE 0x05 + +#define SYS_COMMAND HEVC_ASSIST_SCRATCH_0 +#define H265_CHECK_AXI_INFO_BASE HEVC_ASSIST_SCRATCH_8 +#define H265_SAO_4K_SET_BASE HEVC_ASSIST_SCRATCH_9 +#define H265_SAO_4K_SET_COUNT HEVC_ASSIST_SCRATCH_A +#define HEVCD_MPP_ANC2AXI_TBL_DATA 0x3464 + + +#define HEVC_CM_HEADER_START_ADDR 0x3628 +#define HEVC_CM_BODY_START_ADDR 0x3626 +#define HEVC_CM_BODY_LENGTH 0x3627 +#define HEVC_CM_HEADER_LENGTH 0x3629 +#define HEVC_CM_HEADER_OFFSET 0x362b +#define HEVC_SAO_CTRL9 0x362d +#define HEVCD_MPP_DECOMP_CTL3 0x34c4 +#define HEVCD_MPP_VDEC_MCR_CTL 0x34c8 +#define HEVC_DBLK_CFGB 0x350b +#define HEVC_ASSIST_MMU_MAP_ADDR 0x3009 + +#define H265_DW_NO_SCALE +#define H265_MEM_MAP_MODE 0 /*0:linear 1:32x32 2:64x32*/ +#define H265_LOSLESS_COMPRESS_MODE +#define MAX_FRAME_4K_NUM 0x1200 +#define FRAME_MMU_MAP_SIZE (MAX_FRAME_4K_NUM * 4) + +/* 0:linear 1:32x32 2:64x32 ; m8baby test1902 */ +static u32 mem_map_mode = H265_MEM_MAP_MODE; + +#define MAX_SIZE_4K (4096 * 2304) +#define MAX_SIZE_2K (1920 * 1088) + + + +static int is_oversize(int w, int h) +{ + int max = MAX_SIZE_4K; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) + max = MAX_SIZE_2K; + + if (w < 0 || h < 0) + return true; + + if (h != 0 && (w > max / h)) + return true; + + return false; +} + +static void vmh264_udc_fill_vpts(struct vdec_h264_hw_s *hw, + int frame_type, + u32 vpts, + u32 vpts_valid); +static int compute_losless_comp_body_size(int width, + int height, int bit_depth_10); +static int compute_losless_comp_header_size(int width, int height); + +static int hevc_alloc_mmu(struct vdec_h264_hw_s *hw, int pic_idx, + int pic_width, int pic_height, u16 bit_depth, + unsigned int *mmu_index_adr) { + int cur_buf_idx; + int bit_depth_10 = (bit_depth != 0x00); + int picture_size; + u32 cur_mmu_4k_number; + + WRITE_VREG(CURR_CANVAS_CTRL, pic_idx<<24); + cur_buf_idx = READ_VREG(CURR_CANVAS_CTRL)&0xff; + picture_size = compute_losless_comp_body_size(pic_width, + pic_height, bit_depth_10); + cur_mmu_4k_number = ((picture_size+(1<<12)-1) >> 12); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, + "alloc_mmu new_fb_idx %d picture_size %d cur_mmu_4k_number %d\n", + cur_buf_idx, picture_size, cur_mmu_4k_number); + + if (cur_mmu_4k_number > MAX_FRAME_4K_NUM) { + pr_err("hevc_alloc_mmu cur_mmu_4k_number %d unsupport\n", + cur_mmu_4k_number); + return -1; + } + + return decoder_mmu_box_alloc_idx( + hw->mmu_box, + cur_buf_idx, + cur_mmu_4k_number, + mmu_index_adr); +} + +static int compute_losless_comp_body_size(int width, + int height, int bit_depth_10) +{ + int width_x64; + int height_x32; + int bsize; + + width_x64 = width + 63; + width_x64 >>= 6; + + height_x32 = height + 31; + height_x32 >>= 5; + +#ifdef H264_MMU + bsize = (bit_depth_10 ? 4096 : 3264) * width_x64*height_x32; +#else + bsize = (bit_depth_10 ? 4096 : 3072) * width_x64*height_x32; +#endif + return bsize; +} + +static int compute_losless_comp_header_size(int width, int height) +{ + int width_x64; + int width_x128; + int height_x64; + int hsize; + + width_x64 = width + 63; + width_x64 >>= 6; + + width_x128 = width + 127; + width_x128 >>= 7; + + height_x64 = height + 63; + height_x64 >>= 6; + +#ifdef H264_MMU + hsize = 128*width_x64*height_x64; +#else + hsize = 32*width_x128*height_x64; +#endif + return hsize; +} + +static int get_dw_size(struct vdec_h264_hw_s *hw, u32 *pdw_buffer_size_u_v_h) +{ + int pic_width, pic_height; + int lcu_size = 16; + int dw_buf_size; + u32 dw_buffer_size_u_v; + u32 dw_buffer_size_u_v_h; + int dw_mode = hw->double_write_mode; + + pic_width = hw->frame_width; + pic_height = hw->frame_height; + + if (dw_mode) { + int pic_width_dw = pic_width / + get_double_write_ratio(hw->double_write_mode); + int pic_height_dw = pic_height / + get_double_write_ratio(hw->double_write_mode); + + int pic_width_lcu_dw = (pic_width_dw % lcu_size) ? + pic_width_dw / lcu_size + 1 : + pic_width_dw / lcu_size; + int pic_height_lcu_dw = (pic_height_dw % lcu_size) ? + pic_height_dw / lcu_size + 1 : + pic_height_dw / lcu_size; + int lcu_total_dw = pic_width_lcu_dw * pic_height_lcu_dw; + + + dw_buffer_size_u_v = lcu_total_dw * lcu_size * lcu_size / 2; + dw_buffer_size_u_v_h = (dw_buffer_size_u_v + 0xffff) >> 16; + /*64k alignment*/ + dw_buf_size = ((dw_buffer_size_u_v_h << 16) * 3); + *pdw_buffer_size_u_v_h = dw_buffer_size_u_v_h; + } else { + *pdw_buffer_size_u_v_h = 0; + dw_buf_size = 0; + } + + return dw_buf_size; +} + + +static void hevc_mcr_config_canv2axitbl(struct vdec_h264_hw_s *hw, int restore) +{ + int i, size; + u32 canvas_addr; + unsigned long maddr; + int num_buff = hw->dpb.mDPB.size; + int dw_size = 0; + u32 dw_buffer_size_u_v_h; + u32 blkmode = hw->canvas_mode; + int dw_mode = hw->double_write_mode; + + canvas_addr = ANC0_CANVAS_ADDR; + for (i = 0; i < num_buff; i++) + WRITE_VREG((canvas_addr + i), i | (i << 8) | (i << 16)); + + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, (0x1 << 1) | (0x1 << 2)); + size = hw->losless_comp_body_size + hw->losless_comp_header_size; + + + dw_size = get_dw_size(hw, &dw_buffer_size_u_v_h); + size += dw_size; + if (size > 0) + size += 0x10000; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL, + "dw_buffer_size_u_v_h = %d, dw_size = 0x%x, size = 0x%x\n", + dw_buffer_size_u_v_h, dw_size, size); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL, + "body_size = %d, header_size = %d, body_size_sao = %d\n", + hw->losless_comp_body_size, + hw->losless_comp_header_size, + hw->losless_comp_body_size_sao); + + for (i = 0; i < num_buff; i++) { + if (!restore) { + if (decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, + HEADER_BUFFER_IDX(i), size, + DRIVER_HEADER_NAME, &maddr) < 0) { + dpb_print(DECODE_ID(hw), 0, + "%s malloc compress header failed %d\n", + DRIVER_HEADER_NAME, i); + return; + } + } else + maddr = hw->buffer_spec[i].alloc_header_addr; + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, maddr >> 5); + hw->buffer_spec[i].alloc_header_addr = maddr; + dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL, + "%s : canvas: %d axiaddr:%x size 0x%x\n", + __func__, i, (u32)maddr, size); + + if (dw_mode) { + u32 addr; + int canvas_w; + int canvas_h; + + canvas_w = hw->frame_width / + get_double_write_ratio(hw->double_write_mode); + canvas_h = hw->frame_height / + get_double_write_ratio(hw->double_write_mode); + + if (hw->canvas_mode == 0) + canvas_w = ALIGN(canvas_w, 32); + else + canvas_w = ALIGN(canvas_w, 64); + canvas_h = ALIGN(canvas_h, 32); + + hw->buffer_spec[i].dw_y_adr = + maddr + hw->losless_comp_header_size; + + hw->buffer_spec[i].dw_y_adr = + ((hw->buffer_spec[i].dw_y_adr + 0xffff) >> 16) + << 16; + hw->buffer_spec[i].dw_u_v_adr = + hw->buffer_spec[i].dw_y_adr + + (dw_buffer_size_u_v_h << 16) * 2; + + + hw->buffer_spec[i].buf_adr + = hw->buffer_spec[i].dw_y_adr; + addr = hw->buffer_spec[i].buf_adr; + + + dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL, + "dw_y_adr = 0x%x, dw_u_v_adr = 0x%x, y_addr = 0x%x, u_addr = 0x%x, v_addr = 0x%x, width = %d, height = %d\n", + hw->buffer_spec[i].dw_y_adr, + hw->buffer_spec[i].dw_u_v_adr, + hw->buffer_spec[i].y_addr, + hw->buffer_spec[i].u_addr, + hw->buffer_spec[i].v_addr, + canvas_w, + canvas_h); + + hw->buffer_spec[i].canvas_config[0].phy_addr = + hw->buffer_spec[i].dw_y_adr; + hw->buffer_spec[i].canvas_config[0].width = canvas_w; + hw->buffer_spec[i].canvas_config[0].height = canvas_h; + hw->buffer_spec[i].canvas_config[0].block_mode = + blkmode; + hw->buffer_spec[i].canvas_config[0].endian = 7; + + hw->buffer_spec[i].canvas_config[1].phy_addr = + hw->buffer_spec[i].dw_u_v_adr; + hw->buffer_spec[i].canvas_config[1].width = canvas_w; + hw->buffer_spec[i].canvas_config[1].height = canvas_h; + hw->buffer_spec[i].canvas_config[1].block_mode = + blkmode; + hw->buffer_spec[i].canvas_config[1].endian = 7; + } + } + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1); + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (0<<1) | 1); + for (i = 0; i < 32; i++) + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); + return; +} +static void hevc_mcr_config_mc_ref(struct vdec_h264_hw_s *hw) +{ + u32 i; + u32 ref_canv; + struct Slice *pSlice = &(hw->dpb.mSlice); + /*REFLIST[0]*/ + for (i = 0; i < (unsigned int)(pSlice->listXsize[0]); i++) { + struct StorablePicture *ref = pSlice->listX[0][i]; + if (ref == NULL) + return; + WRITE_VREG(CURR_CANVAS_CTRL, ref->buf_spec_num<<24); + ref_canv = READ_VREG(CURR_CANVAS_CTRL)&0xffffff; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (ref->buf_spec_num & 0x3f) << 8); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, ref_canv); + } + /*REFLIST[1]*/ + for (i = 0; i < (unsigned int)(pSlice->listXsize[1]); i++) { + struct StorablePicture *ref = pSlice->listX[1][i]; + if (ref == NULL) + return; + WRITE_VREG(CURR_CANVAS_CTRL, ref->buf_spec_num<<24); + ref_canv = READ_VREG(CURR_CANVAS_CTRL)&0xffffff; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (ref->buf_spec_num & 0x3f) << 8); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, ref_canv); + } + return; +} + +static void hevc_mcr_config_mcrcc(struct vdec_h264_hw_s *hw) +{ + u32 rdata32; + u32 rdata32_2; + u32 slice_type; + struct StorablePicture *ref; + struct Slice *pSlice; + slice_type = hw->dpb.mSlice.slice_type; + pSlice = &(hw->dpb.mSlice); + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2); + if (slice_type == I_SLICE) { + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x0); + return; + } + if (slice_type == B_SLICE) { + ref = pSlice->listX[0][0]; + if (ref == NULL) + return; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + ((ref->buf_spec_num & 0x3f) << 8)); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + ref = pSlice->listX[1][0]; + if (ref == NULL) + return; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + ((ref->buf_spec_num & 0x3f) << 8)); + rdata32_2 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32_2 = rdata32_2 & 0xffff; + rdata32_2 = rdata32_2 | (rdata32_2 << 16); + if (rdata32 == rdata32_2) { + ref = pSlice->listX[1][1]; + if (ref == NULL) + return; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + ((ref->buf_spec_num & 0x3f) << 8)); + rdata32_2 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32_2 = rdata32_2 & 0xffff; + rdata32_2 = rdata32_2 | (rdata32_2 << 16); + } + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32_2); + } else { /*P-PIC*/ + ref = pSlice->listX[0][0]; + if (ref == NULL) + return; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + ((ref->buf_spec_num & 0x3f) << 8)); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + ref = pSlice->listX[0][1]; + if (ref == NULL) + return; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + ((ref->buf_spec_num & 0x3f) << 8)); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + } + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); + return; +} + + +static void hevc_mcr_sao_global_hw_init(struct vdec_h264_hw_s *hw, + u32 width, u32 height) { + u32 data32; + u32 lcu_x_num, lcu_y_num; + u32 lcu_total; + u32 mc_buffer_size_u_v; + u32 mc_buffer_size_u_v_h; + int dw_mode = hw->double_write_mode; + + /*lcu_x_num = (width + 15) >> 4;*/ + // width need to be round to 64 pixel -- case0260 1/10/2020 + lcu_x_num = (((width + 63) >> 6) << 2); + lcu_y_num = (height + 15) >> 4; + lcu_total = lcu_x_num * lcu_y_num; + + hw->mc_buffer_size_u_v = mc_buffer_size_u_v = lcu_total*16*16/2; + hw->mc_buffer_size_u_v_h = + mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff)>>16; + + hw->losless_comp_body_size = 0; + + hw->losless_comp_body_size_sao = + compute_losless_comp_body_size(width, height, 0); + hw->losless_comp_header_size = + compute_losless_comp_header_size(width, height); + + WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x1); /*sw reset ipp10b_top*/ + WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x0); /*sw reset ipp10b_top*/ + + /* setup lcu_size = 16*/ + WRITE_VREG(HEVCD_IPP_TOP_LCUCONFIG, 16); /*set lcu size = 16*/ + /*pic_width/pic_height*/ + WRITE_VREG(HEVCD_IPP_TOP_FRMCONFIG, + (height & 0xffff) << 16 | (width & 0xffff)); + /* bitdepth_luma = 8*/ + /* bitdepth_chroma = 8*/ + WRITE_VREG(HEVCD_IPP_BITDEPTH_CONFIG, 0x0);/*set bit-depth 8 */ + +#ifdef H265_LOSLESS_COMPRESS_MODE + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0); +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + data32 |= (hw->canvas_mode << 4); + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); + + WRITE_VREG(HEVCD_MPP_DECOMP_CTL3, + (0x80 << 20) | (0x80 << 10) | (0xff)); + + WRITE_VREG(HEVCD_MPP_VDEC_MCR_CTL, 0x1 | (0x1 << 4)); + + /*comfig vdec:h264:mdec to use hevc mcr/mcrcc/decomp*/ + WRITE_VREG(MDEC_PIC_DC_MUX_CTRL, + READ_VREG(MDEC_PIC_DC_MUX_CTRL) | 0x1 << 31); + /* ipp_enable*/ + WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x1 << 1); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(HEVC_DBLK_CFG1, 0x2); // set ctusize==16 + WRITE_VREG(HEVC_DBLK_CFG2, ((height & 0xffff)<<16) | (width & 0xffff)); + if (dw_mode & 0x10) + WRITE_VREG(HEVC_DBLK_CFGB, 0x40405603); + else if (dw_mode) + WRITE_VREG(HEVC_DBLK_CFGB, 0x40405703); + else + WRITE_VREG(HEVC_DBLK_CFGB, 0x40405503); + } + + data32 = READ_VREG(HEVC_SAO_CTRL0); + data32 &= (~0xf); + data32 |= 0x4; + WRITE_VREG(HEVC_SAO_CTRL0, data32); + WRITE_VREG(HEVC_SAO_PIC_SIZE, (height & 0xffff) << 16 | + (width & 0xffff)); + data32 = ((lcu_x_num-1) | (lcu_y_num-1) << 16); + + WRITE_VREG(HEVC_SAO_PIC_SIZE_LCU, data32); + data32 = (lcu_x_num | lcu_y_num << 16); + WRITE_VREG(HEVC_SAO_TILE_SIZE_LCU, data32); + data32 = (mc_buffer_size_u_v_h << 16) << 1; + WRITE_VREG(HEVC_SAO_Y_LENGTH, data32); + data32 = (mc_buffer_size_u_v_h << 16); + WRITE_VREG(HEVC_SAO_C_LENGTH, data32); + + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + data32 &= (~0xff0); + data32 |= endian; /* Big-Endian per 64-bit */ + + if (hw->mmu_enable && (dw_mode & 0x10)) + data32 |= ((hw->canvas_mode << 12) |1); + else if (hw->mmu_enable && dw_mode) + data32 |= ((hw->canvas_mode << 12)); + else + data32 |= ((hw->canvas_mode << 12)|2); + + WRITE_VREG(HEVC_SAO_CTRL1, data32); + +#ifdef H265_DW_NO_SCALE + WRITE_VREG(HEVC_SAO_CTRL5, READ_VREG(HEVC_SAO_CTRL5) & ~(0xff << 16)); + if (hw->mmu_enable && dw_mode) { + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 &= (~(0xff << 16)); + if (dw_mode == 2 || + dw_mode == 3) + data32 |= (0xff<<16); + else if (dw_mode == 4) + data32 |= (0x33<<16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + + +#endif + + +#ifdef H265_LOSLESS_COMPRESS_MODE + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (1<<9); /*8-bit smem-mode*/ + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + WRITE_VREG(HEVC_CM_BODY_LENGTH, hw->losless_comp_body_size_sao); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, hw->losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, hw->losless_comp_header_size); +#endif + +#ifdef H265_LOSLESS_COMPRESS_MODE + WRITE_VREG(HEVC_SAO_CTRL9, READ_VREG(HEVC_SAO_CTRL9) | (0x1 << 1)); + WRITE_VREG(HEVC_SAO_CTRL5, READ_VREG(HEVC_SAO_CTRL5) | (0x1 << 10)); +#endif + + WRITE_VREG(HEVC_SAO_CTRL9, READ_VREG(HEVC_SAO_CTRL9) | 0x1 << 7); + + memset(hw->frame_mmu_map_addr, 0, FRAME_MMU_MAP_SIZE); + + WRITE_VREG(MDEC_EXTIF_CFG0, hw->extif_addr); + WRITE_VREG(MDEC_EXTIF_CFG1, 0x80000000); + return; +} + +static void hevc_sao_set_slice_type(struct vdec_h264_hw_s *hw, + u32 is_new_pic, u32 is_idr) +{ + hw->is_new_pic = is_new_pic; + hw->is_idr_frame = is_idr; + return; +} + +static void hevc_sao_set_pic_buffer(struct vdec_h264_hw_s *hw, + struct StorablePicture *pic) { + u32 mc_y_adr; + u32 mc_u_v_adr; + u32 dw_y_adr; + u32 dw_u_v_adr; + u32 canvas_addr; + int ret; + int dw_mode = hw->double_write_mode; + if (hw->is_new_pic != 1) + return; + + if (hw->is_idr_frame) { + /* William TBD */ + memset(hw->frame_mmu_map_addr, 0, FRAME_MMU_MAP_SIZE); + } + + WRITE_VREG(CURR_CANVAS_CTRL, pic->buf_spec_num << 24); + canvas_addr = READ_VREG(CURR_CANVAS_CTRL)&0xffffff; + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, (0x0 << 1) | + (0x0 << 2) | ((canvas_addr & 0xff) << 8)); + mc_y_adr = READ_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA) << 5; + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, (0x0 << 1) | + (0x0 << 2) | (((canvas_addr >> 8) & 0xff) << 8)); + mc_u_v_adr = READ_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA) << 5; + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1); + + + if (dw_mode) { + dw_y_adr = hw->buffer_spec[pic->buf_spec_num].dw_y_adr; + dw_u_v_adr = hw->buffer_spec[pic->buf_spec_num].dw_u_v_adr; + } else { + dw_y_adr = 0; + dw_u_v_adr = 0; + } +#ifdef H265_LOSLESS_COMPRESS_MODE + if (dw_mode) + WRITE_VREG(HEVC_SAO_Y_START_ADDR, dw_y_adr); + WRITE_VREG(HEVC_CM_BODY_START_ADDR, mc_y_adr); +#ifdef H264_MMU + WRITE_VREG(HEVC_CM_HEADER_START_ADDR, mc_y_adr); +#else + WRITE_VREG(HEVC_CM_HEADER_START_ADDR, + (mc_y_adr + hw->losless_comp_body_size)); +#endif +#else + WRITE_VREG(HEVC_SAO_Y_START_ADDR, mc_y_adr); +#endif + +#ifndef H265_LOSLESS_COMPRESS_MODE + WRITE_VREG(HEVC_SAO_C_START_ADDR, mc_u_v_adr); +#else + if (dw_mode) + WRITE_VREG(HEVC_SAO_C_START_ADDR, dw_u_v_adr); +#endif + +#ifndef LOSLESS_COMPRESS_MODE + if (dw_mode) { + WRITE_VREG(HEVC_SAO_Y_WPTR, mc_y_adr); + WRITE_VREG(HEVC_SAO_C_WPTR, mc_u_v_adr); + } +#else + WRITE_VREG(HEVC_SAO_Y_WPTR, dw_y_adr); + WRITE_VREG(HEVC_SAO_C_WPTR, dw_u_v_adr); +#endif + + ret = hevc_alloc_mmu(hw, pic->buf_spec_num, + (hw->mb_width << 4), (hw->mb_height << 4), 0x0, + hw->frame_mmu_map_addr); + if (ret != 0) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, "can't alloc need mmu1,idx %d ret =%d\n", + pic->buf_spec_num, + ret); + return; + } + + /*Reset SAO + Enable SAO slice_start*/ + if (hw->mmu_enable && get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + WRITE_VREG(HEVC_DBLK_CFG0, 0x1); // reset buffer32x4 in lpf for every picture + WRITE_VREG(HEVC_SAO_INT_STATUS, + READ_VREG(HEVC_SAO_INT_STATUS) | 0x1 << 28); + WRITE_VREG(HEVC_SAO_INT_STATUS, + READ_VREG(HEVC_SAO_INT_STATUS) | 0x1 << 31); + /*pr_info("hevc_sao_set_pic_buffer:mc_y_adr: %x\n", mc_y_adr);*/ + /*Send coommand to hevc-code to supply 4k buffers to sao*/ + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(H265_SAO_4K_SET_BASE, (u32)hw->frame_mmu_map_phy_addr); + WRITE_VREG(H265_SAO_4K_SET_COUNT, MAX_FRAME_4K_NUM); + } else + WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR, (u32)hw->frame_mmu_map_phy_addr); + WRITE_VREG(SYS_COMMAND, H265_PUT_SAO_4K_SET); + hw->frame_busy = 1; + return; +} + + +static void hevc_set_unused_4k_buff_idx(struct vdec_h264_hw_s *hw, + u32 buf_spec_num) { + WRITE_VREG(CURR_CANVAS_CTRL, buf_spec_num<<24); + hw->hevc_cur_buf_idx = READ_VREG(CURR_CANVAS_CTRL)&0xff; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, " %s cur_buf_idx %d buf_spec_num %d\n", + __func__, hw->hevc_cur_buf_idx, buf_spec_num); + return; +} + + +static void hevc_set_frame_done(struct vdec_h264_hw_s *hw) +{ + ulong timeout = jiffies + HZ; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, "hevc_frame_done...set\n"); + while ((READ_VREG(HEVC_SAO_INT_STATUS) & 0x1) == 0) { + if (time_after(jiffies, timeout)) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, " %s..timeout!\n", __func__); + break; + } + } + timeout = jiffies + HZ; + while (READ_VREG(HEVC_CM_CORE_STATUS) & 0x1) { + if (time_after(jiffies, timeout)) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, " %s cm_core..timeout!\n", __func__); + break; + } + } + WRITE_VREG(HEVC_SAO_INT_STATUS, 0x1); + hw->frame_done = 1; + return; +} + +static void release_cur_decoding_buf(struct vdec_h264_hw_s *hw) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + if (p_H264_Dpb->mVideo.dec_picture) { + release_picture(p_H264_Dpb, + p_H264_Dpb->mVideo.dec_picture); + p_H264_Dpb->mVideo.dec_picture->data_flag &= ~ERROR_FLAG; + p_H264_Dpb->mVideo.dec_picture = NULL; + if (hw->mmu_enable) + hevc_set_frame_done(hw); + } +} + +static void hevc_sao_wait_done(struct vdec_h264_hw_s *hw) +{ + ulong timeout = jiffies + HZ; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, "hevc_sao_wait_done...start\n"); + while ((READ_VREG(HEVC_SAO_INT_STATUS) >> 31)) { + if (time_after(jiffies, timeout)) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, + "hevc_sao_wait_done...wait timeout!\n"); + break; + } + } + timeout = jiffies + HZ; + if ((hw->frame_busy == 1) && (hw->frame_done == 1) ) { + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(SYS_COMMAND, H265_ABORT_SAO_4K_SET); + while ((READ_VREG(SYS_COMMAND) & 0xff) != + H265_ABORT_SAO_4K_SET_DONE) { + if (time_after(jiffies, timeout)) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, + "wait h265_abort_sao_4k_set_done timeout!\n"); + break; + } + } + } + amhevc_stop(); + hw->frame_busy = 0; + hw->frame_done = 0; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, + "sao wait done ,hevc stop!\n"); + } + return; +} +static void buf_spec_init(struct vdec_h264_hw_s *hw, bool buffer_reset_flag) +{ + int i; + unsigned long flags; + spin_lock_irqsave(&hw->bufspec_lock, flags); + + for (i = 0; i < VF_POOL_SIZE; i++) { + struct vframe_s *vf = &hw->vfpool[hw->cur_pool][i]; + u32 ref_idx = BUFSPEC_INDEX(vf->index); + if ((vf->index != -1) && + (hw->buffer_spec[ref_idx].vf_ref == 0) && + (hw->buffer_spec[ref_idx].used != -1)) { + vf->index = -1; + } + } + + hw->cur_pool++; + if (hw->cur_pool >= VF_POOL_NUM) + hw->cur_pool = 0; + + for (i = 0; i < VF_POOL_SIZE; i++) { + struct vframe_s *vf = &hw->vfpool[hw->cur_pool][i]; + u32 ref_idx = BUFSPEC_INDEX(vf->index); + if ((vf->index != -1) && + (hw->buffer_spec[ref_idx].vf_ref == 0) && + (hw->buffer_spec[ref_idx].used != -1)) { + vf->index = -1; + } + } + /* buffers are alloced when error reset, v4l must find buffer by buffer_wrap[] */ + if (hw->reset_bufmgr_flag && buffer_reset_flag) { + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->buffer_spec[i].used == 1 || hw->buffer_spec[i].used == 2) + hw->buffer_spec[i].used = 0; + } + } else { + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + hw->buffer_spec[i].used = -1; + hw->buffer_spec[i].canvas_pos = -1; + hw->buffer_wrap[i] = -1; + } + } + + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + spin_unlock_irqrestore(&hw->bufspec_lock, flags); +} + + +/*is active in buf management */ +static unsigned char is_buf_spec_in_use(struct vdec_h264_hw_s *hw, + int buf_spec_num) +{ + unsigned char ret = 0; + if (hw->buffer_spec[buf_spec_num].used == 1 || + hw->buffer_spec[buf_spec_num].used == 2 || + hw->buffer_spec[buf_spec_num].used == 3 || + hw->buffer_spec[buf_spec_num].used == 5) + ret = 1; + return ret; +} + +static unsigned char is_buf_spec_in_disp_q(struct vdec_h264_hw_s *hw, + int buf_spec_num) +{ + unsigned char ret = 0; + if (hw->buffer_spec[buf_spec_num].used == 2 || + hw->buffer_spec[buf_spec_num].used == 3 || + hw->buffer_spec[buf_spec_num].used == 5) + ret = 1; + return ret; +} + +static int alloc_one_buf_spec(struct vdec_h264_hw_s *hw, int i) +{ + struct vdec_s *vdec = hw_to_vdec(hw); + if (hw->mmu_enable) { + if (hw->buffer_spec[i].alloc_header_addr) + return 0; + else + return -1; + } else { + + int buf_size = (hw->mb_total << 8) + (hw->mb_total << 7); + int addr; +#ifdef VDEC_DW + int orig_buf_size; + orig_buf_size = buf_size; + if (IS_VDEC_DW(hw) == 1) + buf_size += (hw->mb_total << 7) + (hw->mb_total << 6); + else if (IS_VDEC_DW(hw) == 2) + buf_size += (hw->mb_total << 6) + (hw->mb_total << 5); + else if (IS_VDEC_DW(hw) == 4) + buf_size += (hw->mb_total << 4) + (hw->mb_total << 3); + else if (IS_VDEC_DW(hw) == 8) + buf_size += (hw->mb_total << 2) + (hw->mb_total << 1); + if (IS_VDEC_DW(hw)) { + u32 align_size; + /* add align padding size for blk64x32: (mb_w<<4)*32, (mb_h<<4)*64 */ + align_size = ((hw->mb_width << 9) + (hw->mb_height << 10)) / IS_VDEC_DW(hw); + /* double align padding size for uv*/ + align_size <<= 1; + buf_size += align_size + PAGE_SIZE; + } +#endif + if (hw->buffer_spec[i].cma_alloc_addr) + return 0; + + if (decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, i, + PAGE_ALIGN(buf_size), DRIVER_NAME, + &hw->buffer_spec[i].cma_alloc_addr) < 0) { + hw->buffer_spec[i].cma_alloc_addr = 0; + if (hw->no_mem_count++ > 3) { + hw->stat |= DECODER_FATAL_ERROR_NO_MEM; + hw->reset_bufmgr_flag = 1; + } + dpb_print(DECODE_ID(hw), 0, + "%s, fail to alloc buf for bufspec%d, try later\n", + __func__, i + ); + return -1; + } else { + hw->no_mem_count = 0; + hw->stat &= ~DECODER_FATAL_ERROR_NO_MEM; + } + if (!vdec_secure(vdec)) { + /*init internal buf*/ + char *tmpbuf = (char *)codec_mm_phys_to_virt(hw->buffer_spec[i].cma_alloc_addr); + if (tmpbuf) { + memset(tmpbuf, 0, PAGE_ALIGN(buf_size)); + codec_mm_dma_flush(tmpbuf, + PAGE_ALIGN(buf_size), + DMA_TO_DEVICE); + } else { + tmpbuf = codec_mm_vmap(hw->buffer_spec[i].cma_alloc_addr, PAGE_ALIGN(buf_size)); + if (tmpbuf) { + memset(tmpbuf, 0, PAGE_ALIGN(buf_size)); + codec_mm_dma_flush(tmpbuf, + PAGE_ALIGN(buf_size), + DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(tmpbuf); + } + } + } + hw->buffer_spec[i].buf_adr = + hw->buffer_spec[i].cma_alloc_addr; + addr = hw->buffer_spec[i].buf_adr; + + + hw->buffer_spec[i].y_addr = addr; + addr += hw->mb_total << 8; + hw->buffer_spec[i].u_addr = addr; + hw->buffer_spec[i].v_addr = addr; + addr += hw->mb_total << 7; + + hw->buffer_spec[i].canvas_config[0].phy_addr = + hw->buffer_spec[i].y_addr; + hw->buffer_spec[i].canvas_config[0].width = + hw->mb_width << 4; + hw->buffer_spec[i].canvas_config[0].height = + hw->mb_height << 4; + hw->buffer_spec[i].canvas_config[0].block_mode = + hw->canvas_mode; + + hw->buffer_spec[i].canvas_config[1].phy_addr = + hw->buffer_spec[i].u_addr; + hw->buffer_spec[i].canvas_config[1].width = + hw->mb_width << 4; + hw->buffer_spec[i].canvas_config[1].height = + hw->mb_height << 3; + hw->buffer_spec[i].canvas_config[1].block_mode = + hw->canvas_mode; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s, alloc buf for bufspec%d\n", + __func__, i); +#ifdef VDEC_DW + if (!IS_VDEC_DW(hw)) + return 0; + else { + int w_shift = 3, h_shift = 3; + + if (IS_VDEC_DW(hw) == 1) { + w_shift = 3; + h_shift = 4; + } else if (IS_VDEC_DW(hw) == 2) { + w_shift = 3; + h_shift = 3; + } else if (IS_VDEC_DW(hw) == 4) { + w_shift = 2; + h_shift = 2; + } else if (IS_VDEC_DW(hw) == 8) { + w_shift = 1; + h_shift = 1; + } + + addr = hw->buffer_spec[i].cma_alloc_addr + PAGE_ALIGN(orig_buf_size); + hw->buffer_spec[i].vdec_dw_y_addr = addr; + addr += ALIGN_WIDTH(hw->mb_width << w_shift) * ALIGN_HEIGHT(hw->mb_height << h_shift); + hw->buffer_spec[i].vdec_dw_u_addr = addr; + hw->buffer_spec[i].vdec_dw_v_addr = addr; + addr += hw->mb_total << (w_shift + h_shift - 1); + + hw->buffer_spec[i].vdec_dw_canvas_config[0].phy_addr = + hw->buffer_spec[i].vdec_dw_y_addr; + hw->buffer_spec[i].vdec_dw_canvas_config[0].width = + ALIGN_WIDTH(hw->mb_width << w_shift); + hw->buffer_spec[i].vdec_dw_canvas_config[0].height = + ALIGN_HEIGHT(hw->mb_height << h_shift); + hw->buffer_spec[i].vdec_dw_canvas_config[0].block_mode = + hw->canvas_mode; + + hw->buffer_spec[i].vdec_dw_canvas_config[1].phy_addr = + hw->buffer_spec[i].vdec_dw_u_addr; + hw->buffer_spec[i].vdec_dw_canvas_config[1].width = + ALIGN_WIDTH(hw->mb_width << w_shift); + hw->buffer_spec[i].vdec_dw_canvas_config[1].height = + ALIGN_HEIGHT(hw->mb_height << (h_shift - 1)); + hw->buffer_spec[i].vdec_dw_canvas_config[1].block_mode = + hw->canvas_mode; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s, vdec_dw: alloc buf for bufspec%d blkmod %d\n", + __func__, i, hw->canvas_mode); + } +#endif + } + return 0; +} + +static void vh264_put_video_frame(void *vdec_ctx, struct vframe_s *vf) +{ + vh264_vf_put(vf, vdec_ctx); +} + +static void vh264_get_video_frame(void *vdec_ctx, struct vframe_s **vf) +{ + *vf = vh264_vf_get(vdec_ctx); +} + +static struct task_ops_s task_dec_ops = { + .type = TASK_TYPE_DEC, + .get_vframe = vh264_get_video_frame, + .put_vframe = vh264_put_video_frame, +}; + +static int alloc_one_buf_spec_from_queue(struct vdec_h264_hw_s *hw, int idx) +{ + int ret = 0; + struct aml_vcodec_ctx *ctx = NULL; + struct buffer_spec_s *bs = &hw->buffer_spec[idx]; + struct canvas_config_s *y_canvas_cfg = NULL; + struct canvas_config_s *c_canvas_cfg = NULL; + struct vdec_v4l2_buffer *fb = NULL; + unsigned int y_addr = 0, c_addr = 0; + + if (IS_ERR_OR_NULL(hw->v4l2_ctx)) { + pr_err("the v4l context has err.\n"); + return -1; + } + + if (bs->cma_alloc_addr) + return 0; + + ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), try alloc from v4l queue buf size: %d\n", + ctx->id, __func__, + (hw->mb_total << 8) + (hw->mb_total << 7)); + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] get fb fail.\n", ctx->id); + return ret; + } + + bs->cma_alloc_addr = (unsigned long)fb; + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), cma alloc addr: 0x%x, out %d dec %d\n", + ctx->id, __func__, bs->cma_alloc_addr, + ctx->cap_pool.out, ctx->cap_pool.dec); + + if (fb->num_planes == 1) { + y_addr = fb->m.mem[0].addr; + c_addr = fb->m.mem[0].addr + fb->m.mem[0].offset; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + } else if (fb->num_planes == 2) { + y_addr = fb->m.mem[0].addr; + c_addr = fb->m.mem[1].addr; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + fb->m.mem[1].bytes_used = fb->m.mem[1].size; + } + + fb->task->attach(fb->task, &task_dec_ops, hw_to_vdec(hw)); + fb->status = FB_ST_DECODER; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), y_addr: %x, size: %u\n", + ctx->id, __func__, y_addr, fb->m.mem[0].size); + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), c_addr: %x, size: %u\n", + ctx->id, __func__, c_addr, fb->m.mem[1].size); + + bs->y_addr = y_addr; + bs->u_addr = c_addr; + bs->v_addr = c_addr; + + y_canvas_cfg = &bs->canvas_config[0]; + c_canvas_cfg = &bs->canvas_config[1]; + + y_canvas_cfg->phy_addr = y_addr; + y_canvas_cfg->width = hw->mb_width << 4; + y_canvas_cfg->height = hw->mb_height << 4; + y_canvas_cfg->block_mode = hw->canvas_mode; + //fb->m.mem[0].bytes_used = y_canvas_cfg->width * y_canvas_cfg->height; + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), y_w: %d, y_h: %d\n", ctx->id, __func__, + y_canvas_cfg->width,y_canvas_cfg->height); + + c_canvas_cfg->phy_addr = c_addr; + c_canvas_cfg->width = hw->mb_width << 4; + c_canvas_cfg->height = hw->mb_height << 3; + c_canvas_cfg->block_mode = hw->canvas_mode; + //fb->m.mem[1].bytes_used = c_canvas_cfg->width * c_canvas_cfg->height; + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), c_w: %d, c_h: %d\n", ctx->id, __func__, + c_canvas_cfg->width, c_canvas_cfg->height); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), alloc buf for bufspec%d\n", ctx->id, __func__, idx); + + return ret; +} + +static void config_decode_canvas(struct vdec_h264_hw_s *hw, int i) +{ + int blkmode = hw->canvas_mode; + int endian = 0; + + if (blkmode == CANVAS_BLKMODE_LINEAR) { + if ((h264_debug_flag & IGNORE_PARAM_FROM_CONFIG) == 0) + endian = 7; + else + endian = 0; + } + + if (hw->is_used_v4l) + endian = 7; + + config_cav_lut_ex(hw->buffer_spec[i]. + y_canvas_index, + hw->buffer_spec[i].y_addr, + hw->mb_width << 4, + hw->mb_height << 4, + CANVAS_ADDR_NOWRAP, + blkmode, + endian, + VDEC_1); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(VDEC_ASSIST_CANVAS_BLK32, + (1 << 11) | /* canvas_blk32_wr */ + (blkmode << 10) | /* canvas_blk32*/ + (1 << 8) | /* canvas_index_wr*/ + (hw->buffer_spec[i].y_canvas_index << 0) /* canvas index*/ + ); + } + + config_cav_lut_ex(hw->buffer_spec[i]. + u_canvas_index, + hw->buffer_spec[i].u_addr, + hw->mb_width << 4, + hw->mb_height << 3, + CANVAS_ADDR_NOWRAP, + blkmode, + endian, + VDEC_1); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(VDEC_ASSIST_CANVAS_BLK32, + (1 << 11) | + (blkmode << 10) | + (1 << 8) | + (hw->buffer_spec[i].u_canvas_index << 0)); + } + + WRITE_VREG(ANC0_CANVAS_ADDR + hw->buffer_spec[i].canvas_pos, + spec2canvas(&hw->buffer_spec[i])); + + +#ifdef VDEC_DW + if (!IS_VDEC_DW(hw)) + return; + else { + config_cav_lut_ex(hw->buffer_spec[i]. + vdec_dw_y_canvas_index, + hw->buffer_spec[i].vdec_dw_canvas_config[0].phy_addr, + hw->buffer_spec[i].vdec_dw_canvas_config[0].width, + hw->buffer_spec[i].vdec_dw_canvas_config[0].height, + CANVAS_ADDR_NOWRAP, + blkmode, + endian, + VDEC_1); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(VDEC_ASSIST_CANVAS_BLK32, + (1 << 11) | + (blkmode << 10) | + (1 << 8) | + (hw->buffer_spec[i].vdec_dw_y_canvas_index << 0)); + } + + config_cav_lut_ex(hw->buffer_spec[i]. + vdec_dw_u_canvas_index, + hw->buffer_spec[i].vdec_dw_canvas_config[1].phy_addr, + hw->buffer_spec[i].vdec_dw_canvas_config[1].width, + hw->buffer_spec[i].vdec_dw_canvas_config[1].height, + CANVAS_ADDR_NOWRAP, + blkmode, + endian, + VDEC_1); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(VDEC_ASSIST_CANVAS_BLK32, + (1 << 11) | + (blkmode << 10) | + (1 << 8) | + (hw->buffer_spec[i].vdec_dw_u_canvas_index << 0)); + } + } +#endif +} + +static void config_decode_canvas_ex(struct vdec_h264_hw_s *hw, int i) +{ + u32 blkmode = hw->canvas_mode; + int canvas_w; + int canvas_h; + + canvas_w = hw->frame_width / + get_double_write_ratio(hw->double_write_mode); + canvas_h = hw->frame_height / + get_double_write_ratio(hw->double_write_mode); + + if (hw->canvas_mode == 0) + canvas_w = ALIGN(canvas_w, 32); + else + canvas_w = ALIGN(canvas_w, 64); + canvas_h = ALIGN(canvas_h, 32); + + config_cav_lut_ex(hw->buffer_spec[i]. + y_canvas_index, + hw->buffer_spec[i].dw_y_adr, + canvas_w, + canvas_h, + CANVAS_ADDR_NOWRAP, + blkmode, + 7, + VDEC_HEVC); + + config_cav_lut_ex(hw->buffer_spec[i]. + u_canvas_index, + hw->buffer_spec[i].dw_u_v_adr, + canvas_w, + canvas_h, + CANVAS_ADDR_NOWRAP, + blkmode, + 7, + VDEC_HEVC); +} + +static int v4l_get_free_buffer_spec(struct vdec_h264_hw_s *hw) +{ + int i; + + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->buffer_spec[i].cma_alloc_addr == 0) + return i; + } + + return -1; +} + +static int v4l_find_buffer_spec_idx(struct vdec_h264_hw_s *hw, unsigned int v4l_indx) +{ + int i; + + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->buffer_wrap[i] == v4l_indx) + return i; + } + return -1; +} + +static int v4l_get_free_buf_idx(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct aml_vcodec_ctx * v4l = hw->v4l2_ctx; + struct v4l_buff_pool *pool = &v4l->cap_pool; + struct buffer_spec_s *pic = NULL; + int i, rt, idx = INVALID_IDX; + ulong flags; + u32 state = 0, index; + + spin_lock_irqsave(&hw->bufspec_lock, flags); + for (i = 0; i < pool->in; ++i) { + state = (pool->seq[i] >> 16); + index = (pool->seq[i] & 0xffff); + + switch (state) { + case V4L_CAP_BUFF_IN_DEC: + rt = v4l_find_buffer_spec_idx(hw, index); + if (rt >= 0) { + pic = &hw->buffer_spec[rt]; + if ((pic->vf_ref == 0) && + (pic->used == 0) && + pic->cma_alloc_addr) { + idx = rt; + } + } + break; + case V4L_CAP_BUFF_IN_M2M: + rt = v4l_get_free_buffer_spec(hw); + if (rt >= 0) { + pic = &hw->buffer_spec[rt]; + if (!alloc_one_buf_spec_from_queue(hw, rt)) { + struct vdec_v4l2_buffer *fb; + config_decode_canvas(hw, rt); + fb = (struct vdec_v4l2_buffer *)pic->cma_alloc_addr; + hw->buffer_wrap[rt] = fb->buf_idx; + idx = rt; + } + } + break; + default: + break; + } + + if (idx != INVALID_IDX) { + pic->used = 1; + break; + } + } + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + + if (idx < 0) { + dpb_print(DECODE_ID(hw), 0, "%s fail, state %d\n", __func__, state); + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + dpb_print(DECODE_ID(hw), 0, "%s, %d\n", + __func__, hw->buffer_wrap[i]); + } + vmh264_dump_state(vdec); + } else { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *)pic->cma_alloc_addr; + + fb->status = FB_ST_DECODER; + + v4l->aux_infos.bind_sei_buffer(v4l, &pic->aux_data_buf, + &pic->aux_data_size, &pic->ctx_buf_idx); + } + + return idx; +} + +int get_free_buf_idx(struct vdec_s *vdec) +{ + int i; + unsigned long addr, flags; + int index = -1; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + int buf_total = BUFSPEC_POOL_SIZE; + + if (hw->is_used_v4l) + return v4l_get_free_buf_idx(vdec); + + spin_lock_irqsave(&hw->bufspec_lock, flags); + /*hw->start_search_pos = 0;*/ + for (i = hw->start_search_pos; i < buf_total; i++) { + if (hw->mmu_enable) + addr = hw->buffer_spec[i].alloc_header_addr; + else + addr = hw->buffer_spec[i].cma_alloc_addr; + + if (hw->buffer_spec[i].vf_ref == 0 && + hw->buffer_spec[i].used == 0 && addr) { + hw->buffer_spec[i].used = 1; + hw->start_search_pos = i+1; + index = i; + hw->buffer_wrap[i] = index; + break; + } + } + if (index < 0) { + for (i = 0; i < hw->start_search_pos; i++) { + if (hw->mmu_enable) + addr = hw->buffer_spec[i].alloc_header_addr; + else + addr = hw->buffer_spec[i].cma_alloc_addr; + + if (hw->buffer_spec[i].vf_ref == 0 && + hw->buffer_spec[i].used == 0 && addr) { + hw->buffer_spec[i].used = 1; + hw->start_search_pos = i+1; + index = i; + hw->buffer_wrap[i] = index; + break; + } + } + } + + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + if (hw->start_search_pos >= buf_total) + hw->start_search_pos = 0; + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s, buf_spec_num %d\n", __func__, index); + + if (index < 0) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s fail\n", __func__); + vmh264_dump_state(vdec); + } + + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + return index; +} + +int release_buf_spec_num(struct vdec_s *vdec, int buf_spec_num) +{ + /*u32 cur_buf_idx;*/ + unsigned long flags; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL, + "%s buf_spec_num %d used %d\n", + __func__, buf_spec_num, + buf_spec_num > 0 ? hw->buffer_spec[buf_spec_num].used : 0); + if (buf_spec_num >= 0 && + buf_spec_num < BUFSPEC_POOL_SIZE + ) { + spin_lock_irqsave(&hw->bufspec_lock, flags); + hw->buffer_spec[buf_spec_num].used = 0; + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + if (hw->mmu_enable) { + /*WRITE_VREG(CURR_CANVAS_CTRL, buf_spec_num<<24); + cur_buf_idx = READ_VREG(CURR_CANVAS_CTRL); + cur_buf_idx = cur_buf_idx&0xff;*/ + decoder_mmu_box_free_idx(hw->mmu_box, buf_spec_num); + } + release_aux_data(hw, buf_spec_num); + } + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + return 0; +} + +static void config_buf_specs(struct vdec_s *vdec) +{ + int i, j; + unsigned long flags; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + int mode = IS_VDEC_DW(hw) ? 2 : 1; + + spin_lock_irqsave(&hw->bufspec_lock, flags); + for (i = 0, j = 0; + j < hw->dpb.mDPB.size + && i < BUFSPEC_POOL_SIZE; + i++) { + int canvas; + if (hw->buffer_spec[i].used != -1) + continue; + if (vdec->parallel_dec == 1) { + if (hw->buffer_spec[i].y_canvas_index == -1) + hw->buffer_spec[i].y_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].u_canvas_index == -1) { + hw->buffer_spec[i].u_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->buffer_spec[i].v_canvas_index = hw->buffer_spec[i].u_canvas_index; + } +#ifdef VDEC_DW + if (IS_VDEC_DW(hw)) { + if (hw->buffer_spec[i].vdec_dw_y_canvas_index == -1) + hw->buffer_spec[i].vdec_dw_y_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].vdec_dw_u_canvas_index == -1) { + hw->buffer_spec[i].vdec_dw_u_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->buffer_spec[i].vdec_dw_v_canvas_index = + hw->buffer_spec[i].vdec_dw_u_canvas_index; + } + } +#endif + } else { + canvas = vdec->get_canvas(j * mode, 2); + hw->buffer_spec[i].y_canvas_index = canvas_y(canvas); + hw->buffer_spec[i].u_canvas_index = canvas_u(canvas); + hw->buffer_spec[i].v_canvas_index = canvas_v(canvas); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DPB_DETAIL, + "config canvas (%d) %x for bufspec %d\r\n", + j, canvas, i); +#ifdef VDEC_DW + if (IS_VDEC_DW(hw)) { + canvas = vdec->get_canvas(j * mode + 1, 2); + hw->buffer_spec[i].vdec_dw_y_canvas_index = canvas_y(canvas); + hw->buffer_spec[i].vdec_dw_u_canvas_index = canvas_u(canvas); + hw->buffer_spec[i].vdec_dw_v_canvas_index = canvas_v(canvas); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DPB_DETAIL, + "vdec_dw: config canvas (%d) %x for bufspec %d\r\n", + j, canvas, i); + } +#endif + } + + hw->buffer_spec[i].used = 0; + hw->buffer_spec[i].canvas_pos = j; + + + j++; + } + spin_unlock_irqrestore(&hw->bufspec_lock, flags); +} + +static void config_buf_specs_ex(struct vdec_s *vdec) +{ + int i, j; + unsigned long flags; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + int mode = IS_VDEC_DW(hw) ? 2 : 1; + + spin_lock_irqsave(&hw->bufspec_lock, flags); + for (i = 0, j = 0; + j < hw->dpb.mDPB.size + && i < BUFSPEC_POOL_SIZE; + i++) { + int canvas = 0; + if (hw->buffer_spec[i].used != -1) + continue; + if (vdec->parallel_dec == 1) { + if (hw->buffer_spec[i].y_canvas_index == -1) + hw->buffer_spec[i].y_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].u_canvas_index == -1) { + hw->buffer_spec[i].u_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->buffer_spec[i].v_canvas_index = hw->buffer_spec[i].u_canvas_index; + } +#ifdef VDEC_DW + if (IS_VDEC_DW(hw)) { + if (hw->buffer_spec[i].vdec_dw_y_canvas_index == -1) + hw->buffer_spec[i].vdec_dw_y_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].vdec_dw_u_canvas_index == -1) { + hw->buffer_spec[i].vdec_dw_u_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->buffer_spec[i].vdec_dw_v_canvas_index = + hw->buffer_spec[i].vdec_dw_u_canvas_index; + } + } +#endif + } else { + canvas = vdec->get_canvas(j* mode, 2); + hw->buffer_spec[i].y_canvas_index = canvas_y(canvas); + hw->buffer_spec[i].u_canvas_index = canvas_u(canvas); + hw->buffer_spec[i].v_canvas_index = canvas_v(canvas); + + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DPB_DETAIL, + "config canvas (%d) %x for bufspec %d\r\n", + j, canvas, i); +#ifdef VDEC_DW + if (IS_VDEC_DW(hw)) { + canvas = vdec->get_canvas(j*mode + 1, 2); + hw->buffer_spec[i].vdec_dw_y_canvas_index = canvas_y(canvas); + hw->buffer_spec[i].vdec_dw_u_canvas_index = canvas_u(canvas); + hw->buffer_spec[i].vdec_dw_v_canvas_index = canvas_v(canvas); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DPB_DETAIL, + "vdec_dw: config canvas (%d) %x for bufspec %d\r\n", + j, canvas, i); + } +#endif + } + + hw->buffer_spec[i].used = 0; + hw->buffer_spec[i].alloc_header_addr = 0; + hw->buffer_spec[i].canvas_pos = j; + + j++; + } + spin_unlock_irqrestore(&hw->bufspec_lock, flags); +} + + +static void dealloc_buf_specs(struct vdec_h264_hw_s *hw, + unsigned char release_all) +{ + int i; + unsigned long flags; + unsigned char dealloc_flag = 0; + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->buffer_spec[i].used == 4 || + release_all) { + dealloc_flag = 1; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DPB_DETAIL, + "%s buf_spec_num %d\n", + __func__, i + ); + spin_lock_irqsave + (&hw->bufspec_lock, flags); + hw->buffer_spec[i].used = -1; + spin_unlock_irqrestore + (&hw->bufspec_lock, flags); + release_aux_data(hw, i); + + if (!hw->mmu_enable) { + if (hw->buffer_spec[i].cma_alloc_addr) { + if (!hw->is_used_v4l) { + decoder_bmmu_box_free_idx( + hw->bmmu_box, + i); + } + spin_lock_irqsave + (&hw->bufspec_lock, flags); + hw->buffer_spec[i].cma_alloc_addr = 0; + hw->buffer_spec[i].buf_adr = 0; + spin_unlock_irqrestore + (&hw->bufspec_lock, flags); + } + } else { + if (hw->buffer_spec[i].alloc_header_addr) { + decoder_mmu_box_free_idx( + hw->mmu_box, + i); + spin_lock_irqsave + (&hw->bufspec_lock, flags); + hw->buffer_spec[i]. + alloc_header_addr = 0; + hw->buffer_spec[i].buf_adr = 0; + spin_unlock_irqrestore + (&hw->bufspec_lock, flags); + } + } + } + } + if (dealloc_flag && + dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + return; +} + +unsigned char have_free_buf_spec(struct vdec_s *vdec) +{ + int i; + unsigned long addr; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct aml_vcodec_ctx * ctx = hw->v4l2_ctx; + int canvas_pos_min = BUFSPEC_POOL_SIZE; + int index = -1; + int ret = 0; + int allocated_count = 0; + + if (hw->is_used_v4l) { + struct h264_dpb_stru *dpb = &hw->dpb; + int free_count = 0; + int used_count = 0; + + /* trigger to parse head data. */ + if (!hw->v4l_params_parsed) + return 1; + + if (dpb->mDPB.used_size >= dpb->mDPB.size - 1) + return 0; + + if (ctx->cap_pool.dec < hw->dpb.mDPB.size) { + if (ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token)) { + free_count = + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) + 1; + } + } + + for (i = 0; i < hw->dpb.mDPB.size; i++) { + if (hw->buffer_spec[i].used == 0 && + hw->buffer_spec[i].vf_ref == 0 && + hw->buffer_spec[i].cma_alloc_addr) { + free_count++; + } else if (hw->buffer_spec[i].cma_alloc_addr) + used_count++; + } + + ATRACE_COUNTER("V_ST_DEC-free_buff_count", free_count); + ATRACE_COUNTER("V_ST_DEC-used_buff_count", used_count); + + return free_count >= run_ready_min_buf_num ? 1 : 0; + } + + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->mmu_enable) + addr = hw->buffer_spec[i].alloc_header_addr; + else + addr = hw->buffer_spec[i].cma_alloc_addr; + if (hw->buffer_spec[i].used == 0 && + hw->buffer_spec[i].vf_ref == 0) { + + if (addr) + return 1; + if (hw->buffer_spec[i].canvas_pos < canvas_pos_min) { + canvas_pos_min = hw->buffer_spec[i].canvas_pos; + index = i; + } + } + if (addr) + allocated_count++; + } + if (index >= 0) { + mutex_lock(&vmh264_mutex); + dealloc_buf_specs(hw, 0); + if (max_alloc_buf_count == 0 || + allocated_count < max_alloc_buf_count) { + if (alloc_one_buf_spec(hw, index) >= 0) + ret = 1; + } + mutex_unlock(&vmh264_mutex); + } + + return ret; +} + +static int get_buf_spec_by_canvas_pos(struct vdec_h264_hw_s *hw, + int canvas_pos) +{ + int i; + int j = 0; + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->buffer_spec[i].canvas_pos >= 0) { + if (j == canvas_pos) + return i; + j++; + } + } + return -1; +} +static void update_vf_memhandle(struct vdec_h264_hw_s *hw, + struct vframe_s *vf, int index) +{ + if (index < 0) { + vf->mem_handle = NULL; + vf->mem_head_handle = NULL; + } else if (vf->type & VIDTYPE_SCATTER) { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + hw->mmu_box, index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, HEADER_BUFFER_IDX(index)); + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, VF_BUFFER_IDX(index)); + /* vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, HEADER_BUFFER_IDX(index));*/ + } + return; +} +static int check_force_interlace(struct vdec_h264_hw_s *hw, + struct FrameStore *frame) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int bForceInterlace = 0; + /* no di in secure mode, disable force di */ + if (vdec_secure(hw_to_vdec(hw))) + return 0; + + if (hw->is_used_v4l) + return 0; + + if ((dec_control & DEC_CONTROL_FLAG_FORCE_2997_1080P_INTERLACE) + && hw->bitstream_restriction_flag + && (hw->frame_width == 1920) + && (hw->frame_height >= 1080) /* For being compatible with a fake progressive stream which is interlaced actually*/ + && (hw->frame_dur == 3203 || (hw->frame_dur == 3840 && p_H264_Dpb->mSPS.profile_idc == 100 && + p_H264_Dpb->mSPS.level_idc == 40))) { + bForceInterlace = 1; + } else if ((dec_control & DEC_CONTROL_FLAG_FORCE_2500_576P_INTERLACE) + && (hw->frame_width == 720) + && (hw->frame_height == 576) + && (hw->frame_dur == 3840)) { + bForceInterlace = 1; + } + if (hw->is_used_v4l && (bForceInterlace == 0) && frame->frame) { + bForceInterlace = (frame->frame->mb_aff_frame_flag)?1:0; + } + return bForceInterlace; +} + +static void fill_frame_info(struct vdec_h264_hw_s *hw, struct FrameStore *frame) +{ + struct vframe_qos_s *vframe_qos = &hw->vframe_qos; + + if (frame->slice_type == I_SLICE) + vframe_qos->type = 1; + else if (frame->slice_type == P_SLICE) + vframe_qos->type = 2; + else if (frame->slice_type == B_SLICE) + vframe_qos->type = 3; + + if (input_frame_based(hw_to_vdec(hw))) + vframe_qos->size = frame->frame_size2; + else + vframe_qos->size = frame->frame_size; + vframe_qos->pts = frame->pts64; + + vframe_qos->max_mv = frame->max_mv; + vframe_qos->avg_mv = frame->avg_mv; + vframe_qos->min_mv = frame->min_mv; +/* + pr_info("mv: max:%d, avg:%d, min:%d\n", + vframe_qos->max_mv, + vframe_qos->avg_mv, + vframe_qos->min_mv); +*/ + + vframe_qos->max_qp = frame->max_qp; + vframe_qos->avg_qp = frame->avg_qp; + vframe_qos->min_qp = frame->min_qp; +/* + pr_info("qp: max:%d, avg:%d, min:%d\n", + vframe_qos->max_qp, + vframe_qos->avg_qp, + vframe_qos->min_qp); +*/ + + vframe_qos->max_skip = frame->max_skip; + vframe_qos->avg_skip = frame->avg_skip; + vframe_qos->min_skip = frame->min_skip; +/* + pr_info("skip: max:%d, avg:%d, min:%d\n", + vframe_qos->max_skip, + vframe_qos->avg_skip, + vframe_qos->min_skip); +*/ + vframe_qos->num++; +} + +static int is_iframe(struct FrameStore *frame) { + + if (frame->frame && frame->frame->slice_type == I_SLICE) { + return 1; + } + return 0; +} + +static int post_prepare_process(struct vdec_s *vdec, struct FrameStore *frame) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + int buffer_index = frame->buf_spec_num; + + if (buffer_index < 0 || buffer_index >= BUFSPEC_POOL_SIZE) { + dpb_print(DECODE_ID(hw), 0, + "%s, buffer_index 0x%x is beyond range\n", + __func__, buffer_index); + return -1; + } + + if (force_disp_bufspec_num & 0x100) { + /*recycle directly*/ + if (hw->buffer_spec[frame->buf_spec_num].used != 3 && + hw->buffer_spec[frame->buf_spec_num].used != 5) + set_frame_output_flag(&hw->dpb, frame->index); + + /*make pre_output not set*/ + return -1; + } + if (hw->error_proc_policy & 0x1000) { + int error_skip_i_count = (error_skip_count >> 12) & 0xf; + int error_skip_frame_count = error_skip_count & 0xfff; + if (((hw->no_error_count < error_skip_frame_count) + && (error_skip_i_count == 0 || + hw->no_error_i_count < error_skip_i_count)) + && (!(frame->data_flag & I_FLAG))) + frame->data_flag |= ERROR_FLAG; + } + + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, + "%s, buffer_index 0x%x frame_error %x poc %d hw error %x hw error_proc_policy %x\n", + __func__, buffer_index, + frame->data_flag & ERROR_FLAG, + frame->poc, hw->data_flag & ERROR_FLAG, + hw->error_proc_policy); + + if (frame->frame == NULL && + ((frame->is_used == 1 && frame->top_field) + || (frame->is_used == 2 && frame->bottom_field))) { + if (hw->i_only) { + if (frame->is_used == 1) + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s No bottom_field !! frame_num %d used %d\n", + __func__, frame->frame_num, frame->is_used); + if (frame->is_used == 2) + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s No top_field !! frame_num %d used %d\n", + __func__, frame->frame_num, frame->is_used); + } + else { + frame->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, + "%s Error frame_num %d used %d\n", + __func__, frame->frame_num, frame->is_used); + } + } + if (vdec_stream_based(vdec) && !(frame->data_flag & NODISP_FLAG)) { + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + if ((pts_lookup_offset_us64(PTS_TYPE_VIDEO, + frame->offset_delimiter, &frame->pts, &frame->frame_size, + 0, &frame->pts64) == 0)) { + if ((lookup_check_conut && (atomic_read(&hw->vf_pre_count) > lookup_check_conut) && + (hw->wrong_frame_count > hw->right_frame_count)) && + ((frame->decoded_frame_size * 2 < frame->frame_size))) { + /*resolve many frame only one check in pts, cause playback unsmooth issue*/ + frame->pts64 = hw->last_pts64 +DUR2PTS(hw->frame_dur) ; + frame->pts = hw->last_pts + DUR2PTS(hw->frame_dur); + } + hw->right_frame_count++; + } else { + frame->pts64 = hw->last_pts64 +DUR2PTS(hw->frame_dur) ; + frame->pts = hw->last_pts + DUR2PTS(hw->frame_dur); + hw->wrong_frame_count++; + } + } + + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s error= 0x%x poc = %d offset= 0x%x pts= 0x%x last_pts =0x%x pts64 = %lld last_pts64= %lld duration = %d\n", + __func__, (frame->data_flag & ERROR_FLAG), frame->poc, + frame->offset_delimiter, frame->pts,hw->last_pts, + frame->pts64, hw->last_pts64, hw->frame_dur); + hw->last_pts64 = frame->pts64; + hw->last_pts = frame->pts; + } + + /* SWPL-18973 96000/15=6400, less than 15fps check */ + if ((!hw->duration_from_pts_done) && (hw->frame_dur > 6400ULL)) { + if ((check_force_interlace(hw, frame)) && + (frame->slice_type == I_SLICE) && + (hw->pts_outside)) { + if ((!hw->h264_pts_count) || (!hw->h264pts1)) { + hw->h264pts1 = frame->pts; + hw->h264_pts_count = 0; + } else if (frame->pts > hw->h264pts1) { + u32 calc_dur = + PTS2DUR(frame->pts - hw->h264pts1); + calc_dur = ((calc_dur/hw->h264_pts_count) << 1); + if (hw->frame_dur < (calc_dur + 200) && + hw->frame_dur > (calc_dur - 200)) { + hw->frame_dur >>= 1; + vdec_schedule_work(&hw->notify_work); + dpb_print(DECODE_ID(hw), 0, + "correct frame_dur %d, calc_dur %d, count %d\n", + hw->frame_dur, (calc_dur >> 1), hw->h264_pts_count); + hw->duration_from_pts_done = 1; + hw->h264_pts_count = 0; + } + } + } + hw->h264_pts_count++; + } + + if (frame->data_flag & ERROR_FLAG) { + vdec_count_info(&hw->gvs, 1, 0); + if (frame->slice_type == I_SLICE) { + hw->gvs.i_concealed_frames++; + } else if (frame->slice_type == P_SLICE) { + hw->gvs.p_concealed_frames++; + } else if (frame->slice_type == B_SLICE) { + hw->gvs.b_concealed_frames++; + } + if (!hw->send_error_frame_flag) { + hw->gvs.drop_frame_count++; + if (frame->slice_type == I_SLICE) { + hw->gvs.i_lost_frames++; + } else if (frame->slice_type == P_SLICE) { + hw->gvs.p_lost_frames++; + } else if (frame->slice_type == B_SLICE) { + hw->gvs.b_lost_frames++; + } + } + + } + + if ((!hw->enable_fence) && + ((frame->data_flag & NODISP_FLAG) || + (frame->data_flag & NULL_FLAG) || + ((!hw->send_error_frame_flag) && + (frame->data_flag & ERROR_FLAG)) || + ((hw->i_only & 0x1) && + (!(frame->data_flag & I_FLAG))))) { + frame->show_frame = false; + return 0; + } + + if (dpb_is_debug(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL)) { + dpb_print(DECODE_ID(hw), 0, + "%s, fs[%d] poc %d, buf_spec_num %d\n", + __func__, frame->index, frame->poc, + frame->buf_spec_num); + print_pic_info(DECODE_ID(hw), "predis_frm", + frame->frame, -1); + print_pic_info(DECODE_ID(hw), "predis_top", + frame->top_field, -1); + print_pic_info(DECODE_ID(hw), "predis_bot", + frame->bottom_field, -1); + } + + frame->show_frame = true; + + return 0; +} + +static int post_video_frame(struct vdec_s *vdec, struct FrameStore *frame) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct vframe_s *vf = NULL; + int buffer_index = frame->buf_spec_num; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + ulong nv_order = VIDTYPE_VIU_NV21; + int bForceInterlace = 0; + int vf_count = 1; + int i; + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + } + + if (!is_interlace(frame)) + vf_count = 1; + else + vf_count = 2; + + bForceInterlace = check_force_interlace(hw, frame); + if (bForceInterlace) + vf_count = 2; + + if (!hw->enable_fence) + hw->buffer_spec[buffer_index].vf_ref = 0; + fill_frame_info(hw, frame); + + if ((hw->is_used_v4l) && + ((vdec->prog_only) || (!v4l2_ctx->vpp_is_need))) + vf_count = 1; + + for (i = 0; i < vf_count; i++) { + if (kfifo_get(&hw->newframe_q, &vf) == 0 || + vf == NULL) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s fatal error, no available buffer slot.\n", + __func__); + return -1; + } + vf->duration_pulldown = 0; + if (!(is_iframe(frame)) && hw->unstable_pts) { + vf->pts = 0; + vf->pts_us64 = 0; + vf->timestamp = 0; + vf->index = VF_INDEX(frame->index, buffer_index); + } else { + if ((i == 0) && (vf_count == 2)) + vf->timestamp = frame->last_field_timestamp; + else + vf->timestamp = frame->timestamp; + vf->pts = frame->pts; + vf->pts_us64 = frame->pts64; + if ((i > 0) && v4l2_ctx->second_field_pts_mode) { + vf->timestamp = 0; + } + vf->index = VF_INDEX(frame->index, buffer_index); + } + + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->buffer_spec[buffer_index].cma_alloc_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + } + + if (hw->enable_fence) { + /* fill fence information. */ + if (hw->fence_usage == FENCE_USE_FOR_DRIVER) + vf->fence = frame->fence; + } + + if (hw->mmu_enable) { + if (hw->double_write_mode & 0x10) { + /* double write only */ + vf->compBodyAddr = 0; + vf->compHeadAddr = 0; + } else { + /*head adr*/ + vf->compHeadAddr = + hw->buffer_spec[buffer_index].alloc_header_addr; + /*body adr*/ + vf->compBodyAddr = 0; + vf->canvas0Addr = vf->canvas1Addr = 0; + } + + vf->type = VIDTYPE_SCATTER; + + if (hw->double_write_mode) { + vf->type |= VIDTYPE_PROGRESSIVE + | VIDTYPE_VIU_FIELD; + vf->type |= nv_order; + if (hw->double_write_mode == 3) + vf->type |= VIDTYPE_COMPRESS; + + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + hw->buffer_spec[buffer_index]. + canvas_config[0]; + vf->canvas0_config[1] = + hw->buffer_spec[buffer_index]. + canvas_config[1]; + + vf->canvas1_config[0] = + hw->buffer_spec[buffer_index]. + canvas_config[0]; + vf->canvas1_config[1] = + hw->buffer_spec[buffer_index]. + canvas_config[1]; + + } else { + vf->type |= + VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; + vf->canvas0Addr = vf->canvas1Addr = 0; + } + + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + + vf->compWidth = hw->frame_width; + vf->compHeight = hw->frame_height; + } else { + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | + nv_order; + + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(&hw->buffer_spec[buffer_index]); +#ifdef VDEC_DW + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T7) { + if (IS_VDEC_DW(hw)) + vf->canvas0Addr = vf->canvas1Addr = + vdec_dw_spec2canvas(&hw->buffer_spec[buffer_index]); + } else { + if (IS_VDEC_DW(hw)) + vf->canvas0Addr = vf->canvas1Addr = -1; + } +#endif + + } + if (frame->data_flag & ERROR_FLAG) { + vf->frame_type |= V4L2_BUF_FLAG_ERROR; + } + + set_frame_info(hw, vf, buffer_index); + if (hw->discard_dv_data) { + vf->discard_dv_data = true; + } + + if (hw->mmu_enable && hw->double_write_mode) { + vf->width = hw->frame_width / + get_double_write_ratio(hw->double_write_mode); + vf->height = hw->frame_height / + get_double_write_ratio(hw->double_write_mode); + } + + if (frame->slice_type == I_SLICE) { + vf->frame_type |= V4L2_BUF_FLAG_KEYFRAME; + } else if (frame->slice_type == P_SLICE) { + vf->frame_type |= V4L2_BUF_FLAG_PFRAME; + } else if (frame->slice_type == B_SLICE) { + vf->frame_type |= V4L2_BUF_FLAG_BFRAME; + } + + vf->flag = 0; + if (frame->data_flag & I_FLAG) + vf->flag |= VFRAME_FLAG_SYNCFRAME; + if (frame->data_flag & ERROR_FLAG) + vf->flag |= VFRAME_FLAG_ERROR_RECOVERY; + update_vf_memhandle(hw, vf, buffer_index); + + if (!hw->enable_fence) { + hw->buffer_spec[buffer_index].used = 2; + hw->buffer_spec[buffer_index].vf_ref++; + } + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s %d frame = %p top_field = %p bottom_field = %p\n", __func__, __LINE__, frame->frame, + frame->top_field, frame->bottom_field); + + if (frame->frame != NULL) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s %d coded_frame = %d frame_mbs_only_flag = %d structure = %d\n", __func__, __LINE__, + frame->frame->coded_frame, frame->frame->frame_mbs_only_flag, frame->frame->structure); + } + + if (bForceInterlace || is_interlace(frame)) { + vf->type = + VIDTYPE_INTERLACE_FIRST | + nv_order; + + if (frame->frame != NULL && + (frame->frame->pic_struct == PIC_TOP_BOT || + frame->frame->pic_struct == PIC_BOT_TOP) && + frame->frame->coded_frame) { + if (frame->frame != NULL && frame->frame->pic_struct == PIC_TOP_BOT) { + vf->type |= (i == 0 ? + VIDTYPE_INTERLACE_TOP : + VIDTYPE_INTERLACE_BOTTOM); + } else if (frame->frame != NULL && frame->frame->pic_struct == PIC_BOT_TOP) { + vf->type |= (i == 0 ? + VIDTYPE_INTERLACE_BOTTOM : + VIDTYPE_INTERLACE_TOP); + } + } else if (frame->top_field != NULL && frame->bottom_field != NULL) {/*top first*/ + if (frame->top_field->poc <= frame->bottom_field->poc) + vf->type |= (i == 0 ? + VIDTYPE_INTERLACE_TOP : + VIDTYPE_INTERLACE_BOTTOM); + else + vf->type |= (i == 0 ? + VIDTYPE_INTERLACE_BOTTOM : + VIDTYPE_INTERLACE_TOP); + } else { + vf->type |= (i == 0 ? + VIDTYPE_INTERLACE_TOP : + VIDTYPE_INTERLACE_BOTTOM); + } + vf->duration = vf->duration/2; + if (i == 1) { + vf->pts = 0; + vf->pts_us64 = 0; + } + + if (frame->frame) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s %d type = 0x%x pic_struct = %d pts = 0x%x pts_us64 = 0x%llx bForceInterlace = %d\n", + __func__, __LINE__, vf->type, frame->frame->pic_struct, + vf->pts, vf->pts_us64, bForceInterlace); + } + } + + if (hw->i_only) { + if (vf_count == 1 && frame->is_used == 1 && frame->top_field + && frame->bottom_field == NULL && frame->frame == NULL) { + vf->type = + VIDTYPE_INTERLACE_FIRST | + nv_order; + vf->type |= VIDTYPE_INTERLACE_TOP; + vf->duration = vf->duration/2; + } + + if (vf_count == 1 && frame->is_used == 2 && frame->bottom_field + && frame->top_field == NULL && frame->frame == NULL) { + vf->type = + VIDTYPE_INTERLACE_FIRST | + nv_order; + vf->type |= VIDTYPE_INTERLACE_BOTTOM; + vf->duration = vf->duration/2; + } + } + + /*vf->ratio_control |= (0x3FF << DISP_RATIO_ASPECT_RATIO_BIT);*/ + vf->sar_width = hw->width_aspect_ratio; + vf->sar_height = hw->height_aspect_ratio; + if (!vdec->vbuf.use_ptsserv && vdec_stream_based(vdec)) { + /* offset for tsplayer pts lookup */ + if (i == 0) { + vf->pts_us64 = + (((u64)vf->duration << 32) & + 0xffffffff00000000) | frame->offset_delimiter; + vf->pts = 0; + } else { + vf->pts_us64 = (u64)-1; + vf->pts = 0; + } + } + atomic_add(1, &hw->vf_pre_count); + vdec_vframe_ready(hw_to_vdec(hw), vf); + if (!frame->show_frame) { + vh264_vf_put(vf, vdec); + atomic_add(1, &hw->vf_get_count); + continue; + } + + if (i == 0) { + struct vdec_s *pvdec; + struct vdec_info vs; + + pvdec = hw_to_vdec(hw); + memset(&vs, 0, sizeof(struct vdec_info)); + pvdec->dec_status(pvdec, &vs); + decoder_do_frame_check(pvdec, vf); + vdec_fill_vdec_frame(pvdec, &hw->vframe_qos, &vs, vf, frame->hw_decode_time); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "[%s:%d] i_decoded_frame = %d p_decoded_frame = %d b_decoded_frame = %d\n", + __func__, __LINE__,vs.i_decoded_frames,vs.p_decoded_frames,vs.b_decoded_frames); + } + + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->trace.pts_name, vf->timestamp); + ATRACE_COUNTER(hw->trace.disp_q_name, kfifo_len(&hw->display_q)); + ATRACE_COUNTER(hw->trace.new_q_name, kfifo_len(&hw->newframe_q)); + vdec->vdec_fps_detec(vdec->id); +#ifdef AUX_DATA_CRC + decoder_do_aux_data_check(vdec, hw->buffer_spec[buffer_index].aux_data_buf, + hw->buffer_spec[buffer_index].aux_data_size); +#endif + + dpb_print(DECODE_ID(hw), PRINT_FLAG_SEI_DETAIL, "aux_data_size: %d, signal_type: 0x%x\n", + hw->buffer_spec[buffer_index].aux_data_size, hw->video_signal_type); + + if (dpb_is_debug(DECODE_ID(hw), PRINT_FLAG_SEI_DETAIL)) { + int i = 0; + PR_INIT(128); + + for (i = 0; i < hw->buffer_spec[buffer_index].aux_data_size; i++) { + PR_FILL("%02x ", hw->buffer_spec[buffer_index].aux_data_buf[i]); + if (((i + 1) & 0xf) == 0) + PR_INFO(hw->id); + } + PR_INFO(hw->id); + } + + if (hw->is_used_v4l) { + if ((hw->buffer_spec[buffer_index].aux_data_size == 0) && + (frame->slice_type == I_SLICE) && + (atomic_read(&hw->vf_pre_count) == 1)) { + hw->need_free_aux_data = true; + } + + if (hw->need_free_aux_data) { + v4l2_ctx->aux_infos.free_one_sei_buffer(v4l2_ctx, + &hw->buffer_spec[buffer_index].aux_data_buf, + &hw->buffer_spec[buffer_index].aux_data_size, + hw->buffer_spec[buffer_index].ctx_buf_idx); + } else { + if (!hw->discard_dv_data) + v4l2_ctx->aux_infos.bind_dv_buffer(v4l2_ctx, + &vf->src_fmt.comp_buf, + &vf->src_fmt.md_buf); + update_vframe_src_fmt(vf, + hw->buffer_spec[buffer_index].aux_data_buf, + hw->buffer_spec[buffer_index].aux_data_size, + false, vdec->vf_provider_name, NULL); + } + } + + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vh264_vf_put(vh264_vf_get(vdec), vdec); + } else { + set_meta_data_to_vf(vf, UVM_META_DATA_VF_BASE_INFOS, hw->v4l2_ctx); + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } else + vh264_vf_put(vh264_vf_get(vdec), vdec); + } + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + + return 0; +} + +int post_picture_early(struct vdec_s *vdec, int index) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct h264_dpb_stru *dpb_stru = &hw->dpb; + struct FrameStore fs; + u32 offset_lo, offset_hi; + + if (!hw->enable_fence) + return 0; + + /* create fence for each buffers. */ + if (vdec_timeline_create_fence(vdec->sync)) + return -1; + + memset(&fs, 0, sizeof(fs)); + + fs.buf_spec_num = index; + fs.fence = vdec->sync->fence; + fs.slice_type = dpb_stru->mSlice.slice_type; + fs.dpb_frame_count = dpb_stru->dpb_frame_count; + + offset_lo = dpb_stru->dpb_param.l.data[OFFSET_DELIMITER_LO]; + offset_hi = dpb_stru->dpb_param.l.data[OFFSET_DELIMITER_HI]; + fs.offset_delimiter = (offset_lo | offset_hi << 16); + + if (hw->chunk) { + fs.pts = hw->chunk->pts; + fs.pts64 = hw->chunk->pts64; + fs.timestamp = hw->chunk->timestamp; + } + fs.show_frame = true; + post_video_frame(vdec, &fs); + + display_frame_count[DECODE_ID(hw)]++; + return 0; +} + +int prepare_display_buf(struct vdec_s *vdec, struct FrameStore *frame) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)vdec->private; + + if (hw->enable_fence) { + int i, j, used_size, ret; + int signed_count = 0; + struct vframe_s *signed_fence[VF_POOL_SIZE]; + + post_prepare_process(vdec, frame); + + if (!frame->show_frame) + pr_info("do not display.\n"); + + hw->buffer_spec[frame->buf_spec_num].used = 2; + hw->buffer_spec[frame->buf_spec_num].vf_ref = 1; + hw->buffer_spec[frame->buf_spec_num].fs_idx = frame->index; + + /* notify signal to wake up wq of fence. */ + vdec_timeline_increase(vdec->sync, 1); + + mutex_lock(&hw->fence_mutex); + used_size = hw->fence_vf_s.used_size; + if (used_size) { + for (i = 0, j = 0; i < VF_POOL_SIZE && j < used_size; i++) { + if (hw->fence_vf_s.fence_vf[i] != NULL) { + ret = dma_fence_get_status(hw->fence_vf_s.fence_vf[i]->fence); + if (ret == 1) { + signed_fence[signed_count] = hw->fence_vf_s.fence_vf[i]; + hw->fence_vf_s.fence_vf[i] = NULL; + hw->fence_vf_s.used_size--; + signed_count++; + } + j++; + } + } + } + mutex_unlock(&hw->fence_mutex); + if (signed_count != 0) { + for (i = 0; i < signed_count; i++) + vh264_vf_put(signed_fence[i], vdec); + } + + return 0; + } + + if (post_prepare_process(vdec, frame)) + return -1; + + if (post_video_frame(vdec, frame)) + return -1; + + display_frame_count[DECODE_ID(hw)]++; + return 0; +} + +int notify_v4l_eos(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = &hw->vframe_dummy; + struct vdec_v4l2_buffer *fb = NULL; + int index = INVALID_IDX; + ulong expires; + + if (hw->eos) { + expires = jiffies + msecs_to_jiffies(2000); + while (!have_free_buf_spec(vdec)) { + if (time_after(jiffies, expires)) { + pr_err("[%d] H264 isn't enough buff for notify eos.\n", ctx->id); + return 0; + } + } + + index = v4l_get_free_buf_idx(vdec); + if (INVALID_IDX == index) { + pr_err("[%d] H264 EOS get free buff fail.\n", ctx->id); + return 0; + } + + fb = (struct vdec_v4l2_buffer *) + hw->buffer_spec[index].cma_alloc_addr; + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + vf->v4l_mem_handle = (ulong)fb; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + + ATRACE_COUNTER(hw->trace.pts_name, vf->timestamp); + + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + + pr_info("[%d] H264 EOS notify.\n", ctx->id); + } + + return 0; +} + +/****************** + * Hardware config + */ +char *slice_type_name[] = { + "P_SLICE ", + "B_SLICE ", + "I_SLICE ", + "SP_SLICE", + "SI_SLICE", +}; + +char *picture_structure_name[] = { + "FRAME", + "TOP_FIELD", + "BOTTOM_FIELD" +}; + +void print_pic_info(int decindex, const char *info, + struct StorablePicture *pic, + int slice_type) +{ + if (pic) + dpb_print(decindex, PRINT_FLAG_DEC_DETAIL, + "%s: %s (original %s), %s, mb_aff_frame_flag %d poc %d, pic_num %d, buf_spec_num %d data_flag 0x%x\n", + info, + picture_structure_name[pic->structure], + pic->coded_frame ? "Frame" : "Field", + (slice_type < 0 || + slice_type >= (sizeof(slice_type_name) / sizeof(slice_type_name[0]))) ? "" : slice_type_name[slice_type], + pic->mb_aff_frame_flag, + pic->poc, + pic->pic_num, + pic->buf_spec_num, + pic->data_flag); +} + +static void reset_process_time(struct vdec_h264_hw_s *hw) +{ + if (hw->start_process_time) { + unsigned process_time = + 1000 * (jiffies - hw->start_process_time) / HZ; + hw->start_process_time = 0; + if (process_time > max_process_time[DECODE_ID(hw)]) + max_process_time[DECODE_ID(hw)] = process_time; + } +} + +static void start_process_time(struct vdec_h264_hw_s *hw) +{ + hw->decode_timeout_count = 10; + hw->start_process_time = jiffies; +} + +static void config_aux_buf(struct vdec_h264_hw_s *hw) +{ + WRITE_VREG(H264_AUX_ADR, hw->aux_phy_addr); + WRITE_VREG(H264_AUX_DATA_SIZE, + ((hw->prefix_aux_size >> 4) << 16) | + (hw->suffix_aux_size >> 4) + ); +} + +/* +* dv_meta_flag: 1, dolby meta only; 2, not include dolby meta +*/ +static void set_aux_data(struct vdec_h264_hw_s *hw, + struct StorablePicture *pic, unsigned char suffix_flag, + unsigned char dv_meta_flag, struct vdec_h264_hw_s *hw_b) +{ + int i; + unsigned short *aux_adr; + unsigned size_reg_val = + READ_VREG(H264_AUX_DATA_SIZE); + unsigned aux_count = 0; + int aux_size = 0; + struct vdec_h264_hw_s *hw_buf = hw_b ? hw_b : hw; + if (pic == NULL || pic->buf_spec_num < 0 || pic->buf_spec_num >= BUFSPEC_POOL_SIZE + || (!is_buf_spec_in_use(hw, pic->buf_spec_num))) + return; + + if (suffix_flag) { + aux_adr = (unsigned short *) + (hw_buf->aux_addr + + hw_buf->prefix_aux_size); + aux_count = + ((size_reg_val & 0xffff) << 4) + >> 1; + aux_size = + hw_buf->suffix_aux_size; + } else { + aux_adr = + (unsigned short *)hw_buf->aux_addr; + aux_count = + ((size_reg_val >> 16) << 4) + >> 1; + aux_size = + hw_buf->prefix_aux_size; + } + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_SEI_DETAIL)) { + dpb_print(DECODE_ID(hw), 0, + "%s:poc %d old size %d count %d,suf %d dv_flag %d\r\n", + __func__, pic->poc, AUX_DATA_SIZE(pic), + aux_count, suffix_flag, dv_meta_flag); + } + if (aux_size > 0 && aux_count > 0) { + int heads_size = 0; + + for (i = 0; i < aux_count; i++) { + unsigned char tag = aux_adr[i] >> 8; + if (tag != 0 && tag != 0xff) { + if (dv_meta_flag == 0) + heads_size += 8; + else if (dv_meta_flag == 1 && tag == 0x1) + heads_size += 8; + else if (dv_meta_flag == 2 && tag != 0x1) + heads_size += 8; + } + } + + if (AUX_DATA_BUF(pic)) { + unsigned char valid_tag = 0; + unsigned char *h = + AUX_DATA_BUF(pic) + + AUX_DATA_SIZE(pic); + unsigned char *p = h + 8; + int len = 0; + int padding_len = 0; + for (i = 0; i < aux_count; i += 4) { + int ii; + unsigned char tag = aux_adr[i + 3] >> 8; + if (tag != 0 && tag != 0xff) { + if (dv_meta_flag == 0) + valid_tag = 1; + else if (dv_meta_flag == 1 + && tag == 0x1) + valid_tag = 1; + else if (dv_meta_flag == 2 + && tag != 0x1) + valid_tag = 1; + else + valid_tag = 0; + if (valid_tag && len > 0) { + AUX_DATA_SIZE(pic) += + (len + 8); + h[0] = + (len >> 24) & 0xff; + h[1] = + (len >> 16) & 0xff; + h[2] = + (len >> 8) & 0xff; + h[3] = + (len >> 0) & 0xff; + h[6] = + (padding_len >> 8) + & 0xff; + h[7] = + (padding_len) & 0xff; + h += (len + 8); + p += 8; + len = 0; + padding_len = 0; + } + if (valid_tag) { + h[4] = tag; + h[5] = 0; + h[6] = 0; + h[7] = 0; + } + } + if (valid_tag) { + for (ii = 0; ii < 4; ii++) { + unsigned short aa = + aux_adr[i + 3 + - ii]; + *p = aa & 0xff; + p++; + len++; + /*if ((aa >> 8) == 0xff) + padding_len++;*/ + } + } + } + if (len > 0) { + AUX_DATA_SIZE(pic) += (len + 8); + h[0] = (len >> 24) & 0xff; + h[1] = (len >> 16) & 0xff; + h[2] = (len >> 8) & 0xff; + h[3] = (len >> 0) & 0xff; + h[6] = (padding_len >> 8) & 0xff; + h[7] = (padding_len) & 0xff; + } + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_SEI_DETAIL)) { + dpb_print(DECODE_ID(hw), 0, + "aux: (size %d) suffix_flag %d\n", + AUX_DATA_SIZE(pic), suffix_flag); + for (i = 0; i < AUX_DATA_SIZE(pic); i++) { + dpb_print_cont(DECODE_ID(hw), 0, + "%02x ", AUX_DATA_BUF(pic)[i]); + if (((i + 1) & 0xf) == 0) + dpb_print_cont( + DECODE_ID(hw), + 0, "\n"); + } + dpb_print_cont(DECODE_ID(hw), + 0, "\n"); + } + } + } + +} + +static void release_aux_data(struct vdec_h264_hw_s *hw, + int buf_spec_num) +{ +#if 0 + kfree(hw->buffer_spec[buf_spec_num].aux_data_buf); + hw->buffer_spec[buf_spec_num].aux_data_buf = NULL; + hw->buffer_spec[buf_spec_num].aux_data_size = 0; +#endif +} + +static void dump_aux_buf(struct vdec_h264_hw_s *hw) +{ + int i; + unsigned short *aux_adr = + (unsigned short *) + hw->aux_addr; + unsigned aux_size = + (READ_VREG(H264_AUX_DATA_SIZE) + >> 16) << 4; + + if (hw->prefix_aux_size > 0) { + dpb_print(DECODE_ID(hw), + 0, + "prefix aux: (size %d)\n", + aux_size); + for (i = 0; i < + (aux_size >> 1); i++) { + dpb_print_cont(DECODE_ID(hw), + 0, + "%04x ", + *(aux_adr + i)); + if (((i + 1) & 0xf) + == 0) + dpb_print_cont( + DECODE_ID(hw), + 0, "\n"); + } + } + if (hw->suffix_aux_size > 0) { + aux_adr = (unsigned short *) + (hw->aux_addr + + hw->prefix_aux_size); + aux_size = + (READ_VREG(H264_AUX_DATA_SIZE) & 0xffff) + << 4; + dpb_print(DECODE_ID(hw), + 0, + "suffix aux: (size %d)\n", + aux_size); + for (i = 0; i < + (aux_size >> 1); i++) { + dpb_print_cont(DECODE_ID(hw), + 0, + "%04x ", *(aux_adr + i)); + if (((i + 1) & 0xf) == 0) + dpb_print_cont(DECODE_ID(hw), + 0, "\n"); + } + } +} + +#ifdef VDEC_DW + +struct vdec_dw_param_set{ + char dw_x_shrink_1st; + char dw_x_shrink_2nd; + char dw_x_shrink_3rd; + char dw_y_shrink_1st; + char dw_y_shrink_2nd; + char dw_y_shrink_3rd; + char dw_merge_8to16; + char dw_merge_16to32; + char dw_dma_blk_mode; + char dw_bwsave_mode; +}; +//#define FOR_LPDDR4_EFFICIENCY + +static void h264_vdec_dw_cfg(struct vdec_h264_hw_s *hw, int canvas_pos) +{ + u32 data32 = 0, stride = 0; + struct vdec_dw_param_set *p = NULL; + struct vdec_dw_param_set dw_param_set_pool[] = { + /*x1, x2, x3, y1, y2, y3, m8t6, m16to32 */ + //{0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, /* 1/1, 1/1 */ + {1, 0, 0, 0, 0, 0, 0, 0, 0, 1}, /* 1/2, 1/1 */ + {1, 0, 0, 1, 0, 0, 0, 0, 0, 1}, /* 1/2, 1/2 */ + //{1, 0, 0, 1, 1, 0, 0, 0, 0, 1}, /* 1/4, 1/2 */ + {2, 0, 1, 1, 3, 0, 0, 1, 0, 1}, /* 1/4, 1/4 */ + //{1, 1, 1, 0, 1, 1, 1, 1, 0, 1}, /*> 1080p 1/8, 1/4 */ + {1, 1, 1, 1, 1, 1, 1, 1, 0, 1}, /*> 1080p 1/8, 1/8 */ + }; + + if (IS_VDEC_DW(hw)) + p = &dw_param_set_pool[__ffs(IS_VDEC_DW(hw))]; + else + return; + + WRITE_VREG(MDEC_DOUBLEW_CFG3, + hw->buffer_spec[canvas_pos].vdec_dw_y_addr); // luma start address + WRITE_VREG(MDEC_DOUBLEW_CFG4, + hw->buffer_spec[canvas_pos].vdec_dw_u_addr); // chroma start address + + stride = ALIGN_WIDTH((hw->mb_width << 4) / (IS_VDEC_DW(hw))); + if ((IS_VDEC_DW(hw)) == 1) //width 1/2 + stride >>= 1; + data32 = (stride << 16) | stride; + WRITE_VREG(MDEC_DOUBLEW_CFG5, data32); // chroma stride | luma stride + + data32 = 0; + p->dw_dma_blk_mode = hw->canvas_mode; + data32 |= ((p->dw_x_shrink_1st << 0 ) | // 1st down-scale horizontal, 00:no-scale 01:1/2avg 10:left 11:right + (p->dw_y_shrink_1st << 2 ) | // 1st down-scale vertical, 00:no-scale 01:1/2avg 10:up 11:down + (p->dw_x_shrink_2nd << 4 ) | // 2nd down-scale horizontal, 00:no-scale 01:1/2avg 10:left 11:right + (p->dw_y_shrink_2nd << 6 ) | // 2nd down-scale vertical, 00:no-scale 01:1/2avg 10:up 11:down + (p->dw_x_shrink_3rd << 8 ) | // 3rd down-scale horizontal, 00:no-scale 01:1/2avg 10:left 11:right + (p->dw_y_shrink_3rd << 10) | // 3rd down-scale vertical, 00:no-scale 01:1/2avg 10:up 11:down + (p->dw_merge_8to16 << 12 ) | // 8->16 horizontal block merge for better ddr efficiency + (p->dw_merge_16to32 << 13) | // 16->32 horizontal block merge for better ddr efficiency + (p->dw_dma_blk_mode << 14) | // DMA block mode, 0:linear 1:32x32 2:64x32 +#ifdef FOR_LPDDR4_EFFICIENCY + (1 << 19) | +#endif + (p->dw_bwsave_mode << 22)); // Save line buffers to save band width + WRITE_VREG(MDEC_DOUBLEW_CFG1, data32); // add some special tests here + + data32 = 0; + data32 |= (1 << 0) | (0 << 27); + WRITE_VREG(MDEC_DOUBLEW_CFG0, data32); // Double Write Enable | source from dblk + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "vdec_double_write mode %d\n", + IS_VDEC_DW(hw)); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "param {%d, %d, %d, %d, %d, %d, %d, %d, %d}\n", + p->dw_x_shrink_1st, + p->dw_y_shrink_1st, + p->dw_x_shrink_2nd, + p->dw_y_shrink_2nd, + p->dw_x_shrink_3rd, + p->dw_y_shrink_3rd, + p->dw_merge_8to16, + p->dw_merge_16to32, + p->dw_dma_blk_mode); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "cfg0,1,3,4,5 = {%x, %x, %x, %x, %x}\n", + READ_VREG(MDEC_DOUBLEW_CFG0), + READ_VREG(MDEC_DOUBLEW_CFG1), + READ_VREG(MDEC_DOUBLEW_CFG3), + READ_VREG(MDEC_DOUBLEW_CFG4), + READ_VREG(MDEC_DOUBLEW_CFG5)); +} +#endif + +static void config_decode_mode(struct vdec_h264_hw_s *hw) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + struct vdec_s *vdec = hw_to_vdec(hw); +#endif + if (input_frame_based(hw_to_vdec(hw))) + WRITE_VREG(H264_DECODE_MODE, + DECODE_MODE_MULTI_FRAMEBASE); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (vdec->slave) + WRITE_VREG(H264_DECODE_MODE, + (hw->got_valid_nal << 8) | + DECODE_MODE_MULTI_DVBAL); + else if (vdec->master) + WRITE_VREG(H264_DECODE_MODE, + (hw->got_valid_nal << 8) | + DECODE_MODE_MULTI_DVENL); +#endif + else + WRITE_VREG(H264_DECODE_MODE, + DECODE_MODE_MULTI_STREAMBASE); + WRITE_VREG(H264_DECODE_SEQINFO, + hw->seq_info2); + WRITE_VREG(HEAD_PADING_REG, 0); + + if (hw->init_flag == 0) + WRITE_VREG(INIT_FLAG_REG, 0); + else + WRITE_VREG(INIT_FLAG_REG, 1); +} + +int config_decode_buf(struct vdec_h264_hw_s *hw, struct StorablePicture *pic) +{ + /* static int count = 0; */ + int ret = 0; + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + struct Slice *pSlice = &(p_H264_Dpb->mSlice); + unsigned int colocate_adr_offset; + unsigned int val; + struct StorablePicture *last_pic = hw->last_dec_picture; + +#ifdef ONE_COLOCATE_BUF_PER_DECODE_BUF + int colocate_buf_index; +#endif +#define H264_BUFFER_INFO_INDEX PMV3_X /* 0xc24 */ +#define H264_BUFFER_INFO_DATA PMV2_X /* 0xc22 */ +#define H264_CURRENT_POC_IDX_RESET LAST_SLICE_MV_ADDR /* 0xc30 */ +#define H264_CURRENT_POC LAST_MVY /* 0xc32 shared with conceal MV */ + +#define H264_CO_MB_WR_ADDR VLD_C38 /* 0xc38 */ +/* bit 31:30 -- L1[0] picture coding structure, + * 00 - top field, 01 - bottom field, + * 10 - frame, 11 - mbaff frame + * bit 29 - L1[0] top/bot for B field pciture , 0 - top, 1 - bot + * bit 28:0 h264_co_mb_mem_rd_addr[31:3] + * -- only used for B Picture Direct mode [2:0] will set to 3'b000 + */ +#define H264_CO_MB_RD_ADDR VLD_C39 /* 0xc39 */ + +/* bit 15 -- flush co_mb_data to DDR -- W-Only + * bit 14 -- h264_co_mb_mem_wr_addr write Enable -- W-Only + * bit 13 -- h264_co_mb_info_wr_ptr write Enable -- W-Only + * bit 9 -- soft_reset -- W-Only + * bit 8 -- upgent + * bit 7:2 -- h264_co_mb_mem_wr_addr + * bit 1:0 -- h264_co_mb_info_wr_ptr + */ +#define H264_CO_MB_RW_CTL VLD_C3D /* 0xc3d */ +#define DCAC_DDR_BYTE64_CTL 0x0e1d + unsigned long canvas_adr; + unsigned int ref_reg_val; + unsigned int one_ref_cfg = 0; + int h264_buffer_info_data_write_count; + int i, j; + unsigned int colocate_wr_adr; + unsigned int colocate_rd_adr; + unsigned char use_direct_8x8; + int canvas_pos; + canvas_pos = hw->buffer_spec[pic->buf_spec_num].canvas_pos; + WRITE_VREG(H264_CURRENT_POC_IDX_RESET, 0); + WRITE_VREG(H264_CURRENT_POC, pic->frame_poc); + WRITE_VREG(H264_CURRENT_POC, pic->top_poc); + WRITE_VREG(H264_CURRENT_POC, pic->bottom_poc); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "%s: pic_num is %d, poc is %d (%d, %d, %d), buf_spec_num %d canvas_pos %d\n", + __func__, pic->pic_num, pic->poc, pic->frame_poc, + pic->top_poc, pic->bottom_poc, pic->buf_spec_num, + canvas_pos); + print_pic_info(DECODE_ID(hw), "cur", pic, pSlice->slice_type); + +#ifdef VDEC_DW + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T7) { + if (IS_VDEC_DW(hw) && pic->mb_aff_frame_flag) + WRITE_VREG(MDEC_DOUBLEW_CFG0, + (READ_VREG(MDEC_DOUBLEW_CFG0) & (~(1 << 30)))); + } +#endif + WRITE_VREG(CURR_CANVAS_CTRL, canvas_pos << 24); + canvas_adr = READ_VREG(CURR_CANVAS_CTRL) & 0xffffff; + + if (!hw->mmu_enable) { + WRITE_VREG(REC_CANVAS_ADDR, canvas_adr); + WRITE_VREG(DBKR_CANVAS_ADDR, canvas_adr); + WRITE_VREG(DBKW_CANVAS_ADDR, canvas_adr); +#ifdef VDEC_DW + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T7) { + WRITE_VREG(MDEC_DOUBLEW_CFG1, + (hw->buffer_spec[canvas_pos].vdec_dw_y_canvas_index | + (hw->buffer_spec[canvas_pos].vdec_dw_u_canvas_index << 8))); + } else { + h264_vdec_dw_cfg(hw, canvas_pos); + } +#endif + } else + hevc_sao_set_pic_buffer(hw, pic); + + if (pic->mb_aff_frame_flag) + hw->buffer_spec[pic->buf_spec_num].info0 = 0xf4c0; + else if (pic->structure == TOP_FIELD) + hw->buffer_spec[pic->buf_spec_num].info0 = 0xf400; + else if (pic->structure == BOTTOM_FIELD) + hw->buffer_spec[pic->buf_spec_num].info0 = 0xf440; + else + hw->buffer_spec[pic->buf_spec_num].info0 = 0xf480; + + if (pic->bottom_poc < pic->top_poc) + hw->buffer_spec[pic->buf_spec_num].info0 |= 0x100; + + hw->buffer_spec[pic->buf_spec_num].info1 = pic->top_poc; + hw->buffer_spec[pic->buf_spec_num].info2 = pic->bottom_poc; + WRITE_VREG(H264_BUFFER_INFO_INDEX, 16); + + for (j = 0; j < hw->dpb.mDPB.size; j++) { + int long_term_flag; + i = get_buf_spec_by_canvas_pos(hw, j); + if (i < 0) + break; + long_term_flag = + get_long_term_flag_by_buf_spec_num(p_H264_Dpb, i); + if (long_term_flag > 0) { + if (long_term_flag & 0x1) + hw->buffer_spec[i].info0 |= (1 << 4); + else + hw->buffer_spec[i].info0 &= ~(1 << 4); + + if (long_term_flag & 0x2) + hw->buffer_spec[i].info0 |= (1 << 5); + else + hw->buffer_spec[i].info0 &= ~(1 << 5); + } + + if (i == pic->buf_spec_num) + WRITE_VREG(H264_BUFFER_INFO_DATA, + hw->buffer_spec[i].info0 | 0xf); + else + WRITE_VREG(H264_BUFFER_INFO_DATA, + hw->buffer_spec[i].info0); + WRITE_VREG(H264_BUFFER_INFO_DATA, hw->buffer_spec[i].info1); + WRITE_VREG(H264_BUFFER_INFO_DATA, hw->buffer_spec[i].info2); + } + + /* config reference buffer */ + if (hw->mmu_enable) { + hevc_mcr_config_mc_ref(hw); + hevc_mcr_config_mcrcc(hw); + } + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "list0 size %d\n", pSlice->listXsize[0]); + WRITE_VREG(H264_BUFFER_INFO_INDEX, 0); + ref_reg_val = 0; + j = 0; + h264_buffer_info_data_write_count = 0; + + //disable this read cache when frame width <= 64 (4MBs) + //IQIDCT_CONTROL, bit[16] dcac_dma_read_cache_disable + if (hw->frame_width <= 64) { + SET_VREG_MASK(IQIDCT_CONTROL,(1 << 16)); + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)) + // Disable DDR_BYTE64_CACHE + WRITE_VREG(DCAC_DDR_BYTE64_CTL, + (READ_VREG(DCAC_DDR_BYTE64_CTL) & (~0xf)) | 0xa); + } + else + CLEAR_VREG_MASK(IQIDCT_CONTROL,(1 << 16)); + + if (last_pic) + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, + "last_pic->data_flag %x slice_type %x last_pic->slice_type %x\n", + last_pic->data_flag, pSlice->slice_type, last_pic->slice_type); + if (!hw->i_only && !(hw->error_proc_policy & 0x2000) && + last_pic && (last_pic->data_flag & ERROR_FLAG) + && (!(last_pic->slice_type == B_SLICE)) + && (!(pSlice->slice_type == I_SLICE))) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, + "no i/idr error mark\n"); + hw->data_flag |= ERROR_FLAG; + pic->data_flag |= ERROR_FLAG; + } + + for (i = 0; i < (unsigned int)(pSlice->listXsize[0]); i++) { + /*ref list 0 */ + struct StorablePicture *ref = pSlice->listX[0][i]; + unsigned int cfg; + /* bit[6:5] - frame/field info, + * 01 - top, 10 - bottom, 11 - frame + */ + #ifdef ERROR_CHECK + if (ref == NULL) { + hw->data_flag |= ERROR_FLAG; + pic->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, " ref list0 NULL\n"); + return -1; + } + if ((ref->data_flag & ERROR_FLAG) && ref_frame_mark_flag[DECODE_ID(hw)]) { + hw->data_flag |= ERROR_FLAG; + pic->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, " ref error mark1 \n"); + } + + if (hw->error_proc_policy & 0x80000) { + if (ref_b_frame_error_max_count && + ref->slice_type == B_SLICE) { + if (ref->data_flag & ERROR_FLAG) + hw->b_frame_error_count++; + else + hw->b_frame_error_count = 0; + if (hw->b_frame_error_count > ref_b_frame_error_max_count) { + hw->b_frame_error_count = 0; + dpb_print(DECODE_ID(hw), 0, + "error %d B frame, reset dpb buffer\n", + ref_b_frame_error_max_count); + return -1; + } + } + } + + if (ref->data_flag & NULL_FLAG) + hw->data_flag |= NULL_FLAG; +#endif + canvas_pos = hw->buffer_spec[ref->buf_spec_num].canvas_pos; + + if (ref->structure == TOP_FIELD) + cfg = 0x1; + else if (ref->structure == BOTTOM_FIELD) + cfg = 0x2; + else /* FRAME */ + cfg = 0x3; + + one_ref_cfg = (canvas_pos & 0x1f) | (cfg << 5); + ref_reg_val <<= 8; + ref_reg_val |= one_ref_cfg; + j++; + + if (j == 4) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "H264_BUFFER_INFO_DATA: %x\n", ref_reg_val); + WRITE_VREG(H264_BUFFER_INFO_DATA, ref_reg_val); + h264_buffer_info_data_write_count++; + j = 0; + } + print_pic_info(DECODE_ID(hw), "list0", + pSlice->listX[0][i], -1); + } + if (j != 0) { + while (j != 4) { + ref_reg_val <<= 8; + ref_reg_val |= one_ref_cfg; + j++; + } + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "H264_BUFFER_INFO_DATA: %x\n", + ref_reg_val); + WRITE_VREG(H264_BUFFER_INFO_DATA, ref_reg_val); + h264_buffer_info_data_write_count++; + } + ref_reg_val = (one_ref_cfg << 24) | (one_ref_cfg<<16) | + (one_ref_cfg << 8) | one_ref_cfg; + for (i = h264_buffer_info_data_write_count; i < 8; i++) + WRITE_VREG(H264_BUFFER_INFO_DATA, ref_reg_val); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "list1 size %d\n", pSlice->listXsize[1]); + WRITE_VREG(H264_BUFFER_INFO_INDEX, 8); + ref_reg_val = 0; + j = 0; + + for (i = 0; i < (unsigned int)(pSlice->listXsize[1]); i++) { + /* ref list 0 */ + struct StorablePicture *ref = pSlice->listX[1][i]; + unsigned int cfg; + /* bit[6:5] - frame/field info, + * 01 - top, 10 - bottom, 11 - frame + */ + + #ifdef ERROR_CHECK + if (ref == NULL) { + hw->data_flag |= ERROR_FLAG; + pic->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, " ref error list1 NULL\n"); + return -2; + } + if ((ref->data_flag & ERROR_FLAG) && (ref_frame_mark_flag[DECODE_ID(hw)])) { + pic->data_flag |= ERROR_FLAG; + hw->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, " ref error mark2\n"); + } + if (ref->data_flag & NULL_FLAG) + hw->data_flag |= NULL_FLAG; +#endif + canvas_pos = hw->buffer_spec[ref->buf_spec_num].canvas_pos; + if (ref->structure == TOP_FIELD) + cfg = 0x1; + else if (ref->structure == BOTTOM_FIELD) + cfg = 0x2; + else /* FRAME */ + cfg = 0x3; + one_ref_cfg = (canvas_pos & 0x1f) | (cfg << 5); + ref_reg_val <<= 8; + ref_reg_val |= one_ref_cfg; + j++; + + if (j == 4) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "H264_BUFFER_INFO_DATA: %x\n", + ref_reg_val); + WRITE_VREG(H264_BUFFER_INFO_DATA, ref_reg_val); + j = 0; + } + print_pic_info(DECODE_ID(hw), "list1", + pSlice->listX[1][i], -1); + } + if (j != 0) { + while (j != 4) { + ref_reg_val <<= 8; + ref_reg_val |= one_ref_cfg; + j++; + } + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "H264_BUFFER_INFO_DATA: %x\n", ref_reg_val); + WRITE_VREG(H264_BUFFER_INFO_DATA, ref_reg_val); + } + + /* configure co-locate buffer */ + while ((READ_VREG(H264_CO_MB_RW_CTL) >> 11) & 0x1) + ; + if ((pSlice->mode_8x8_flags & 0x4) && + (pSlice->mode_8x8_flags & 0x2)) + use_direct_8x8 = 1; + else + use_direct_8x8 = 0; + +#ifndef ONE_COLOCATE_BUF_PER_DECODE_BUF + colocate_adr_offset = + ((pic->structure == FRAME && pic->mb_aff_frame_flag == 0) + ? 1 : 2) * 96; + if (use_direct_8x8) + colocate_adr_offset >>= 2; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "colocate buf size of each mb 0x%x first_mb_in_slice 0x%x colocate_adr_offset 0x%x\r\n", + colocate_adr_offset, pSlice->first_mb_in_slice, + colocate_adr_offset * pSlice->first_mb_in_slice); + + colocate_adr_offset *= pSlice->first_mb_in_slice; + + if ((pic->colocated_buf_index >= 0) && + (pic->colocated_buf_index < p_H264_Dpb->colocated_buf_count)) { + colocate_wr_adr = p_H264_Dpb->colocated_mv_addr_start + + ((p_H264_Dpb->colocated_buf_size * + pic->colocated_buf_index) + >> (use_direct_8x8 ? 2 : 0)); + if ((colocate_wr_adr + p_H264_Dpb->colocated_buf_size) > + p_H264_Dpb->colocated_mv_addr_end) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "Error, colocate buf is not enough, index is %d\n", + pic->colocated_buf_index); + ret = -3; + } + val = colocate_wr_adr + colocate_adr_offset; + WRITE_VREG(H264_CO_MB_WR_ADDR, val); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "WRITE_VREG(H264_CO_MB_WR_ADDR) = %x, first_mb_in_slice %x pic_structure %x colocate_adr_offset %x mode_8x8_flags %x colocated_buf_size %x\n", + val, pSlice->first_mb_in_slice, pic->structure, + colocate_adr_offset, pSlice->mode_8x8_flags, + p_H264_Dpb->colocated_buf_size); + } else { + WRITE_VREG(H264_CO_MB_WR_ADDR, 0xffffffff); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "WRITE_VREG(H264_CO_MB_WR_ADDR) = 0xffffffff\n"); + } +#else + colocate_buf_index = hw->buffer_spec[pic->buf_spec_num].canvas_pos; + colocate_adr_offset = + ((pic->structure == FRAME && pic->mb_aff_frame_flag == 0) ? 1 : 2) * 96; + if (use_direct_8x8) + colocate_adr_offset >>= 2; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "colocate buf size of each mb 0x%x first_mb_in_slice 0x%x colocate_adr_offset 0x%x\r\n", + colocate_adr_offset, pSlice->first_mb_in_slice, + colocate_adr_offset * pSlice->first_mb_in_slice); + + colocate_adr_offset *= pSlice->first_mb_in_slice; + + colocate_wr_adr = p_H264_Dpb->colocated_mv_addr_start + + ((p_H264_Dpb->colocated_buf_size * colocate_buf_index) >> + (use_direct_8x8 ? 2 : 0)); + + if ((colocate_wr_adr + p_H264_Dpb->colocated_buf_size) > + p_H264_Dpb->colocated_mv_addr_end) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "Error, colocate buf is not enough, col buf index is %d\n", + colocate_buf_index); + ret = -4; + } + val = colocate_wr_adr + colocate_adr_offset; + WRITE_VREG(H264_CO_MB_WR_ADDR, val); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "WRITE_VREG(H264_CO_MB_WR_ADDR) = %x, first_mb_in_slice %x pic_structure %x colocate_adr_offset %x mode_8x8_flags %x colocated_buf_size %x\n", + val, pSlice->first_mb_in_slice, pic->structure, + colocate_adr_offset, pSlice->mode_8x8_flags, + p_H264_Dpb->colocated_buf_size); +#endif + if (pSlice->listXsize[1] > 0) { + struct StorablePicture *colocate_pic = pSlice->listX[1][0]; + /* H264_CO_MB_RD_ADDR[bit 31:30], + * original picture structure of L1[0], + * 00 - top field, 01 - bottom field, + * 10 - frame, 11 - mbaff frame + */ + int l10_structure, cur_structure; + int cur_colocate_ref_type; + /* H264_CO_MB_RD_ADDR[bit 29], top/bot for B field pciture, + * 0 - top, 1 - bot + */ + unsigned int val; + unsigned int colocate_rd_adr_offset; + unsigned int mby_mbx; + unsigned int mby, mbx; +#ifdef ERROR_CHECK + if (colocate_pic == NULL) { + hw->data_flag |= ERROR_FLAG; + pic->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, " colocate error pic NULL\n"); + return -5; + } + if (colocate_pic->data_flag & ERROR_FLAG) { + pic->data_flag |= ERROR_FLAG; + hw->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, " colocare ref error mark\n"); + } + if (colocate_pic->data_flag & NULL_FLAG) + hw->data_flag |= NULL_FLAG; +#endif + + if (colocate_pic->mb_aff_frame_flag) + l10_structure = 3; + else { + if (colocate_pic->coded_frame) + l10_structure = 2; + else + l10_structure = (colocate_pic->structure == + BOTTOM_FIELD) ? 1 : 0; + } + + //ALLEGRO_FIX, ported from single mode ucode + mby_mbx = READ_VREG(MBY_MBX); + mby = pSlice->first_mb_in_slice / hw->mb_width; + mbx = pSlice->first_mb_in_slice % hw->mb_width; + if (pic->mb_aff_frame_flag) + cur_structure = 3; + else { + if (pic->coded_frame) + cur_structure = 2; + else + cur_structure = (pic->structure == + BOTTOM_FIELD) ? 1 : 0; + } + if (cur_structure < 2) { + //current_field_structure + if (l10_structure != 2) { + colocate_rd_adr_offset = pSlice->first_mb_in_slice * 2; + } else { + // field_ref_from_frame co_mv_rd_addr : + // mby*2*mb_width + mbx + colocate_rd_adr_offset = mby * 2 * hw->mb_width + mbx; + } + + } else { + //current_frame_structure + if (l10_structure < 2) { + //calculate_co_mv_offset_frame_ref_field: + // frame_ref_from_field co_mv_rd_addr : + // (mby/2*mb_width+mbx)*2 + colocate_rd_adr_offset = ((mby / 2) * hw->mb_width + mbx) * 2; + } else if (cur_structure == 2) { + colocate_rd_adr_offset = pSlice->first_mb_in_slice; + } else { + //mbaff frame case1196 + colocate_rd_adr_offset = pSlice->first_mb_in_slice * 2; + } + + } + + colocate_rd_adr_offset *= 96; + if (use_direct_8x8) + colocate_rd_adr_offset >>= 2; + + if (colocate_old_cal) + colocate_rd_adr_offset = colocate_adr_offset; + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "first_mb_in_slice 0x%x 0x%x 0x%x (MBY_MBX reg 0x%x) use_direct_8x8 %d cur %d (mb_aff_frame_flag %d, coded_frame %d structure %d) col %d (mb_aff_frame_flag %d, coded_frame %d structure %d) offset 0x%x rdoffset 0x%x\n", + pSlice->first_mb_in_slice, mby, mbx, mby_mbx, use_direct_8x8, + cur_structure, pic->mb_aff_frame_flag, pic->coded_frame, pic->structure, + l10_structure, colocate_pic->mb_aff_frame_flag, colocate_pic->coded_frame, colocate_pic->structure, + colocate_adr_offset, + colocate_rd_adr_offset); + +#if 0 + /*case0016, p16, + *cur_colocate_ref_type should be configured base on current pic + */ + if (pic->structure == FRAME && + pic->mb_aff_frame_flag) + cur_colocate_ref_type = 0; + else if (pic->structure == BOTTOM_FIELD) + cur_colocate_ref_type = 1; + else + cur_colocate_ref_type = 0; +#else + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + " CUR TMP DEBUG : mb_aff_frame_flag : %d, structure : %d coded_frame %d\n", + pic->mb_aff_frame_flag, + pic->structure, + pic->coded_frame); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + " COL TMP DEBUG : mb_aff_frame_flag : %d, structure : %d coded_frame %d\n", + colocate_pic->mb_aff_frame_flag, + colocate_pic->structure, + colocate_pic->coded_frame); + if (pic->structure == FRAME || pic->mb_aff_frame_flag) { + cur_colocate_ref_type = + (abs(pic->poc - colocate_pic->top_poc) + < abs(pic->poc - + colocate_pic->bottom_poc)) ? 0 : 1; + } else + cur_colocate_ref_type = + (colocate_pic->structure + == BOTTOM_FIELD) ? 1 : 0; +#endif + +#ifndef ONE_COLOCATE_BUF_PER_DECODE_BUF + if ((colocate_pic->colocated_buf_index >= 0) && + (colocate_pic->colocated_buf_index < + p_H264_Dpb->colocated_buf_count)) { + colocate_rd_adr = p_H264_Dpb->colocated_mv_addr_start + + ((p_H264_Dpb->colocated_buf_size * + colocate_pic->colocated_buf_index) + >> (use_direct_8x8 ? 2 : 0)); + if ((colocate_rd_adr + p_H264_Dpb->colocated_buf_size) > + p_H264_Dpb->colocated_mv_addr_end) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_ERROR, + "Error, colocate buf is not enough, index is %d\n", + colocate_pic->colocated_buf_index); + ret = -6; + } + /* bit 31:30 -- L1[0] picture coding structure, + * 00 - top field, 01 - bottom field, + * 10 - frame, 11 - mbaff frame + * bit 29 - L1[0] top/bot for B field pciture, + * 0 - top, 1 - bot + * bit 28:0 h264_co_mb_mem_rd_addr[31:3] + * -- only used for B Picture Direct mode + * [2:0] will set to 3'b000 + */ + /* #define H264_CO_MB_RD_ADDR VLD_C39 0xc39 */ + val = ((colocate_rd_adr+colocate_rd_adr_offset) >> 3) | + (l10_structure << 30) | + (cur_colocate_ref_type << 29); + WRITE_VREG(H264_CO_MB_RD_ADDR, val); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "co idx %d, WRITE_VREG(H264_CO_MB_RD_ADDR) = %x, addr %x L1(0) pic_structure %d mbaff %d\n", + colocate_pic->colocated_buf_index, + val, colocate_rd_adr + colocate_rd_adr_offset, + colocate_pic->structure, + colocate_pic->mb_aff_frame_flag); + } else { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "Error, reference pic has no colocated buf\n"); + ret = -7; + } +#else + colocate_buf_index = + hw->buffer_spec[colocate_pic->buf_spec_num].canvas_pos; + colocate_rd_adr = p_H264_Dpb->colocated_mv_addr_start + + ((p_H264_Dpb->colocated_buf_size * + colocate_buf_index) + >> (use_direct_8x8 ? 2 : 0)); + if ((colocate_rd_adr + p_H264_Dpb->colocated_buf_size) > + p_H264_Dpb->colocated_mv_addr_end) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "Error, colocate buf is not enough, col buf index is %d\n", + colocate_buf_index); + ret = -8; + } + /* bit 31:30 -- L1[0] picture coding structure, + * 00 - top field, 01 - bottom field, + * 10 - frame, 11 - mbaff frame + * bit 29 - L1[0] top/bot for B field pciture, + * 0 - top, 1 - bot + * bit 28:0 h264_co_mb_mem_rd_addr[31:3] + * -- only used for B Picture Direct mode + * [2:0] will set to 3'b000 + */ + /* #define H264_CO_MB_RD_ADDR VLD_C39 0xc39 */ + val = ((colocate_rd_adr+colocate_rd_adr_offset)>>3) | + (l10_structure << 30) | (cur_colocate_ref_type << 29); + WRITE_VREG(H264_CO_MB_RD_ADDR, val); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "WRITE_VREG(H264_CO_MB_RD_ADDR) = %x, L1(0) pic_structure %d mbaff %d\n", + val, colocate_pic->structure, + colocate_pic->mb_aff_frame_flag); +#endif + } + return ret; +} + +static int vh264_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + struct vdec_s *vdec = op_arg; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + + spin_lock_irqsave(&hw->lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hw->newframe_q); + states->buf_avail_num = kfifo_len(&hw->display_q); + + spin_unlock_irqrestore(&hw->lock, flags); + + return 0; +} + +static struct vframe_s *vh264_vf_peek(void *op_arg) +{ + struct vframe_s *vf[2] = {0, 0}; + struct vdec_s *vdec = op_arg; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + + if (!hw) + return NULL; + + if (force_disp_bufspec_num & 0x100) { + if (force_disp_bufspec_num & 0x200) + return NULL; + return &hw->vframe_dummy; + } + + if (kfifo_out_peek(&hw->display_q, (void *)&vf, 2)) { + if (vf[1]) { + vf[0]->next_vf_pts_valid = true; + vf[0]->next_vf_pts = vf[1]->pts; + } else + vf[0]->next_vf_pts_valid = false; + return vf[0]; + } + + return NULL; +} + +static struct vframe_s *vh264_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + ulong nv_order = VIDTYPE_VIU_NV21; + + if (!hw) + return NULL; + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + } + + if (force_disp_bufspec_num & 0x100) { + int buffer_index = force_disp_bufspec_num & 0xff; + if (force_disp_bufspec_num & 0x200) + return NULL; + + vf = &hw->vframe_dummy; + vf->duration_pulldown = 0; + vf->pts = 0; + vf->pts_us64 = 0; + set_frame_info(hw, vf, buffer_index); + vf->flag = 0; + if (hw->mmu_enable) { + if (hw->double_write_mode & 0x10) { + /* double write only */ + vf->compBodyAddr = 0; + vf->compHeadAddr = 0; + } else { + /*head adr*/ + vf->compHeadAddr = + hw->buffer_spec[buffer_index].alloc_header_addr; + /*body adr*/ + vf->compBodyAddr = 0; + vf->canvas0Addr = vf->canvas1Addr = 0; + } + + vf->type = VIDTYPE_SCATTER; + + if (hw->double_write_mode) { + vf->type |= VIDTYPE_PROGRESSIVE + | VIDTYPE_VIU_FIELD; + vf->type |= nv_order; + if (hw->double_write_mode == 3) + vf->type |= VIDTYPE_COMPRESS; + + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + hw->buffer_spec[buffer_index]. + canvas_config[0]; + vf->canvas0_config[1] = + hw->buffer_spec[buffer_index]. + canvas_config[1]; + + vf->canvas1_config[0] = + hw->buffer_spec[buffer_index]. + canvas_config[0]; + vf->canvas1_config[1] = + hw->buffer_spec[buffer_index]. + canvas_config[1]; + } else { + vf->type |= + VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; + vf->canvas0Addr = vf->canvas1Addr = 0; + } + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + + vf->compWidth = hw->frame_width; + vf->compHeight = hw->frame_height; + + if (hw->double_write_mode) { + vf->width = hw->frame_width / + get_double_write_ratio(hw->double_write_mode); + vf->height = hw->frame_height / + get_double_write_ratio(hw->double_write_mode); + } + } else { + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | + nv_order; + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(&hw->buffer_spec[buffer_index]); + } + + /*vf->mem_handle = decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, buffer_index);*/ + update_vf_memhandle(hw, vf, buffer_index); + force_disp_bufspec_num |= 0x200; + return vf; + } + + if (kfifo_get(&hw->display_q, &vf)) { + int time = jiffies; + unsigned int frame_interval = + 1000*(time - hw->last_frame_time)/HZ; + struct vframe_s *next_vf = NULL; + ATRACE_COUNTER(hw->trace.disp_q_name, kfifo_len(&hw->display_q)); + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_VDEC_DETAIL)) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int frame_index = FRAME_INDEX(vf->index); + if (frame_index < 0 || + frame_index >= DPB_SIZE_MAX) { + dpb_print(DECODE_ID(hw), 0, + "%s vf index 0x%x error\r\n", + __func__, vf->index); + } else { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "%s buf_spec_num %d vf %p poc %d dur %d pts %d interval %dms, ts: %lld\n", + __func__, BUFSPEC_INDEX(vf->index), vf, + p_H264_Dpb->mFrameStore[frame_index].poc, + vf->duration, vf->pts, frame_interval, vf->timestamp); + } + } + if (hw->last_frame_time > 0) { + if (frame_interval > + max_get_frame_interval[DECODE_ID(hw)]) + max_get_frame_interval[DECODE_ID(hw)] + = frame_interval; + } + hw->last_frame_time = time; + vf->index_disp = atomic_read(&hw->vf_get_count); + atomic_add(1, &hw->vf_get_count); + if (kfifo_peek(&hw->display_q, &next_vf) && next_vf) { + vf->next_vf_pts_valid = true; + vf->next_vf_pts = next_vf->pts; + } else + vf->next_vf_pts_valid = false; + + return vf; + } + + return NULL; +} + +static bool vf_valid_check(struct vframe_s *vf, struct vdec_h264_hw_s *hw) { + int i,j; + if (hw->is_used_v4l) + return true; + for (i = 0; i < VF_POOL_SIZE; i++) { + for (j = 0; j < VF_POOL_NUM; j ++) { + if (vf == &(hw->vfpool[j][i]) || vf == &hw->vframe_dummy) + return true; + } + } + dpb_print(DECODE_ID(hw), 0, " invalid vf been put, vf = %p\n", vf); + for (i = 0; i < VF_POOL_SIZE; i++) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "dump vf [%d]= %p\n", i, &(hw->vfpool[hw->cur_pool][i])); + } + return false; +} + +static void vh264_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + unsigned long flags; + int buf_spec_num; + int frame_index; + + if (vf == (&hw->vframe_dummy)) + return; + + if (!vf) + return; + + if (vf->index == -1) { + dpb_print(DECODE_ID(hw), 0, + "Warning: %s vf %p invalid index\r\n", + __func__, vf); + return; + } + + if (hw->enable_fence && vf->fence) { + int ret, i; + + mutex_lock(&hw->fence_mutex); + ret = dma_fence_get_status(vf->fence); + if (ret == 0) { + for (i = 0; i < VF_POOL_SIZE; i++) { + if (hw->fence_vf_s.fence_vf[i] == NULL) { + hw->fence_vf_s.fence_vf[i] = vf; + hw->fence_vf_s.used_size++; + mutex_unlock(&hw->fence_mutex); + return; + } + } + } + mutex_unlock(&hw->fence_mutex); + } + + buf_spec_num = BUFSPEC_INDEX(vf->index); + if (hw->enable_fence) + frame_index = hw->buffer_spec[buf_spec_num].fs_idx; + else + frame_index = FRAME_INDEX(vf->index); + + if (frame_index < 0 || + frame_index >= DPB_SIZE_MAX || + buf_spec_num < 0 || + buf_spec_num >= BUFSPEC_POOL_SIZE) { + dpb_print(DECODE_ID(hw), 0, + "%s vf index 0x%x error\r\n", + __func__, vf->index); + return; + } + /*get_buf_spec_idx_by_canvas_config(hw, + &vf->canvas0_config[0]);*/ + + if (hw->enable_fence && vf->fence) { + vdec_fence_put(vf->fence); + vf->fence = NULL; + } + + if (vf->meta_data_buf) { + vf->meta_data_buf = NULL; + vf->meta_data_size = 0; + } + + spin_lock_irqsave(&hw->bufspec_lock, flags); + if (hw->buffer_spec[buf_spec_num].used == 2) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s %p to fs[%d], poc %d buf_spec_num %d used %d vf_ref %d\n", + __func__, vf, frame_index, + p_H264_Dpb->mFrameStore[frame_index].poc, + buf_spec_num, + hw->buffer_spec[buf_spec_num].used, + hw->buffer_spec[buf_spec_num].vf_ref); + hw->buffer_spec[buf_spec_num].vf_ref--; + if (hw->buffer_spec[buf_spec_num].vf_ref <= 0) + set_frame_output_flag(&hw->dpb, frame_index); + } else { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s %p isolated vf, buf_spec_num %d used %d vf_ref %d\n", + __func__, vf, buf_spec_num, + hw->buffer_spec[buf_spec_num].used, + hw->buffer_spec[buf_spec_num].vf_ref); + hw->buffer_spec[buf_spec_num].vf_ref--; + if (hw->buffer_spec[buf_spec_num].vf_ref <= 0) { + if (hw->buffer_spec[buf_spec_num].used == 3) + hw->buffer_spec[buf_spec_num].used = 4; + else if (hw->buffer_spec[buf_spec_num].used == 5) + hw->buffer_spec[buf_spec_num].used = 0; + } + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + + } + + if (hw->is_used_v4l) { + struct buffer_spec_s *pic = &hw->buffer_spec[buf_spec_num]; + + if (vf->v4l_mem_handle != pic->cma_alloc_addr) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "H264 update fb handle, old:%llx, new:%llx\n", + pic->cma_alloc_addr, vf->v4l_mem_handle); + + pic->cma_alloc_addr = vf->v4l_mem_handle; + } + } + + if (vf && (vf_valid_check(vf, hw) == true)) { + atomic_add(1, &hw->vf_put_count); + kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->trace.new_q_name, kfifo_len(&hw->newframe_q)); + } +#define ASSIST_MBOX1_IRQ_REG VDEC_ASSIST_MBOX1_IRQ_REG + if (hw->buffer_empty_flag) + WRITE_VREG(ASSIST_MBOX1_IRQ_REG, 0x1); + spin_unlock_irqrestore(&hw->bufspec_lock, flags); +} + + +void * vh264_get_bufspec_lock(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + if (hw) + return (&hw->bufspec_lock); + else + return NULL; +} +static int vh264_event_cb(int type, void *data, void *op_arg) +{ + unsigned long flags; + struct vdec_s *vdec = op_arg; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + + if (type & VFRAME_EVENT_RECEIVER_GET_AUX_DATA) { + struct provider_aux_req_s *req = + (struct provider_aux_req_s *)data; + int buf_spec_num; + + if (!req->vf) { + req->aux_size = atomic_read(&hw->vf_put_count); + return 0; + } + buf_spec_num = BUFSPEC_INDEX(req->vf->index); + spin_lock_irqsave(&hw->lock, flags); + req->aux_buf = NULL; + req->aux_size = 0; + if (buf_spec_num >= 0 && + buf_spec_num < BUFSPEC_POOL_SIZE && + is_buf_spec_in_disp_q(hw, buf_spec_num) + ) { + req->aux_buf = + hw->buffer_spec[buf_spec_num].aux_data_buf; + req->aux_size = + hw->buffer_spec[buf_spec_num].aux_data_size; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + req->dv_enhance_exist = + hw->buffer_spec[buf_spec_num].dv_enhance_exist; +#else + req->dv_enhance_exist = 0; +#endif + } + spin_unlock_irqrestore(&hw->lock, flags); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s(type 0x%x vf buf_spec_num 0x%x)=>size 0x%x\n", + __func__, type, buf_spec_num, req->aux_size); + } else if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +static void set_frame_info(struct vdec_h264_hw_s *hw, struct vframe_s *vf, + u32 index) +{ + int endian_tmp; + struct canvas_config_s *p_canvas_config; + int force_rate = input_frame_based(hw_to_vdec(hw)) ? + force_rate_framebase : force_rate_streambase; + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s (%d,%d) dur %d, vf %p, index %d\n", __func__, + hw->frame_width, hw->frame_height, hw->frame_dur, vf, index); + + /* signal_type */ + if (hw->video_signal_from_vui & VIDEO_SIGNAL_TYPE_AVAILABLE_MASK) { + vf->signal_type = hw->video_signal_from_vui; + if (hw->is_used_v4l) { + struct aml_vdec_hdr_infos hdr; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + memset(&hdr, 0, sizeof(hdr)); + hdr.signal_type = hw->video_signal_from_vui; + vdec_v4l_set_hdr_infos(ctx, &hdr); + } + } else + vf->signal_type = 0; + hw->video_signal_type = vf->signal_type; + + vf->width = hw->frame_width; + vf->height = hw->frame_height; + if (force_rate) { + if (force_rate == -1) + vf->duration = 0; + else + vf->duration = 96000/force_rate; + } else + vf->duration = hw->frame_dur; + vf->ratio_control = + (min(hw->h264_ar, (u32) DISP_RATIO_ASPECT_RATIO_MAX)) << + DISP_RATIO_ASPECT_RATIO_BIT; + vf->orientation = hw->vh264_rotation; + + vf->sidebind_type = hw->sidebind_type; + vf->sidebind_channel_id = hw->sidebind_channel_id; + + if (hw->mmu_enable) + return; + + vf->canvas0Addr = vf->canvas1Addr = -1; +#ifdef NV21 + vf->plane_num = 2; +#else + vf->plane_num = 3; +#endif + + if (IS_VDEC_DW(hw)) { + if (IS_VDEC_DW(hw) == 1) + vf->width = hw->frame_width / 2; + else + vf->width = (hw->frame_width / IS_VDEC_DW(hw)); + vf->height = (hw->frame_height / IS_VDEC_DW(hw)); + p_canvas_config = &hw->buffer_spec[index].vdec_dw_canvas_config[0]; + } else + p_canvas_config = &hw->buffer_spec[index].canvas_config[0]; + + vf->canvas0_config[0] = p_canvas_config[0]; + vf->canvas0_config[1] = p_canvas_config[1]; + vf->canvas1_config[0] = p_canvas_config[0]; + vf->canvas1_config[1] = p_canvas_config[1]; +#ifndef NV21 + vf->canvas0_config[2] = p_canvas_config[2]; + vf->canvas1_config[2] = p_canvas_config[2]; +#endif + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) { + endian_tmp = (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 7 : 0; + } else { + endian_tmp = (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 0 : 7; + } + vf->canvas0_config[0].endian = endian_tmp; + vf->canvas0_config[1].endian = endian_tmp; + vf->canvas1_config[0].endian = endian_tmp; + vf->canvas1_config[1].endian = endian_tmp; +#ifndef NV21 + vf->canvas0_config[2].endian = endian_tmp; + vf->canvas1_config[2].endian = endian_tmp; +#endif +} + +static void get_picture_qos_info(struct StorablePicture *picture) +{ + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + unsigned char a[3]; + unsigned char i, j, t; + unsigned long data; + + get_random_bytes(&data, sizeof(unsigned long)); + if (picture->slice_type == I_SLICE) + data = 0; + a[0] = data & 0xff; + a[1] = (data >> 8) & 0xff; + a[2] = (data >> 16) & 0xff; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_mv = a[2]; + picture->avg_mv = a[1]; + picture->min_mv = a[0]; + /* + pr_info("mv data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + */ + + get_random_bytes(&data, sizeof(unsigned long)); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_qp = a[2]; + picture->avg_qp = a[1]; + picture->min_qp = a[0]; + /* + pr_info("qp data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + */ + + get_random_bytes(&data, sizeof(unsigned long)); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_skip = a[2]; + picture->avg_skip = a[1]; + picture->min_skip = a[0]; + + + /* + pr_info("skip data %x a[0]= %x a[1]= %x a[2]= %x\n", + data,a[0], a[1], a[2]); + */ + } else { + uint32_t blk88_y_count; + uint32_t blk88_c_count; + uint32_t blk22_mv_count; + uint32_t rdata32; + int32_t mv_hi; + int32_t mv_lo; + uint32_t rdata32_l; + uint32_t mvx_L0_hi; + uint32_t mvy_L0_hi; + uint32_t mvx_L1_hi; + uint32_t mvy_L1_hi; + int64_t value; + uint64_t temp_value; +/* +#define DEBUG_QOS +*/ +#ifdef DEBUG_QOS + int pic_number = picture->poc; +#endif + + picture->max_mv = 0; + picture->avg_mv = 0; + picture->min_mv = 0; + + picture->max_skip = 0; + picture->avg_skip = 0; + picture->min_skip = 0; + + picture->max_qp = 0; + picture->avg_qp = 0; + picture->min_qp = 0; + + + + + + /* set rd_idx to 0 */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, 0); + blk88_y_count = READ_VREG(VDEC_PIC_QUALITY_DATA); + if (blk88_y_count == 0) { +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_y_sum */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_y_count, + rdata32, blk88_y_count); +#endif + picture->avg_qp = rdata32/blk88_y_count; + /* intra_y_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + /* skipped_y_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + picture->avg_skip = rdata32*100/blk88_y_count; + /* coeff_non_zero_y_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_y_count*1)), + '%', rdata32); +#endif + /* blk66_c_count */ + blk88_c_count = READ_VREG(VDEC_PIC_QUALITY_DATA); + if (blk88_c_count == 0) { +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_c_sum */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_c_count, + rdata32, blk88_c_count); +#endif + /* intra_c_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* skipped_cu_c_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* coeff_non_zero_c_count */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_c_count*1)), + '%', rdata32); +#endif + + /* 1'h0, qp_c_max[6:0], 1'h0, qp_c_min[6:0], + 1'h0, qp_y_max[6:0], 1'h0, qp_y_min[6:0] */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y QP min : %d\n", + pic_number, (rdata32>>0)&0xff); +#endif + picture->min_qp = (rdata32>>0)&0xff; + +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] Y QP max : %d\n", + pic_number, (rdata32>>8)&0xff); +#endif + picture->max_qp = (rdata32>>8)&0xff; + +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] C QP min : %d\n", + pic_number, (rdata32>>16)&0xff); + pr_info(" [Picture %d Quality] C QP max : %d\n", + pic_number, (rdata32>>24)&0xff); +#endif + + /* blk22_mv_count */ + blk22_mv_count = READ_VREG(VDEC_PIC_QUALITY_DATA); + if (blk22_mv_count == 0) { +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] NO MV Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* mvy_L1_count[39:32], mvx_L1_count[39:32], + mvy_L0_count[39:32], mvx_L0_count[39:32] */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + /* should all be 0x00 or 0xff */ +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MV AVG High Bits: 0x%X\n", + pic_number, rdata32); +#endif + mvx_L0_hi = ((rdata32>>0)&0xff); + mvy_L0_hi = ((rdata32>>8)&0xff); + mvx_L1_hi = ((rdata32>>16)&0xff); + mvy_L1_hi = ((rdata32>>24)&0xff); + + /* mvx_L0_count[31:0] */ + rdata32_l = READ_VREG(VDEC_PIC_QUALITY_DATA); + temp_value = mvx_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvx_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + value = div_s64(value, blk22_mv_count); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVX_L0 AVG : %d (%lld/%d)\n", + pic_number, (int)(value), + value, blk22_mv_count); +#endif + picture->avg_mv = value; + + /* mvy_L0_count[31:0] */ + rdata32_l = READ_VREG(VDEC_PIC_QUALITY_DATA); + temp_value = mvy_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvy_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVY_L0 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvx_L1_count[31:0] */ + rdata32_l = READ_VREG(VDEC_PIC_QUALITY_DATA); + temp_value = mvx_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvx_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVX_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvy_L1_count[31:0] */ + rdata32_l = READ_VREG(VDEC_PIC_QUALITY_DATA); + temp_value = mvy_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvy_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVY_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* {mvx_L0_max, mvx_L0_min} // format : {sign, abs[14:0]} */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVX_L0 MAX : %d\n", + pic_number, mv_hi); +#endif + picture->max_mv = mv_hi; + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] MVX_L0 MIN : %d\n", + pic_number, mv_lo); +#endif + picture->min_mv = mv_lo; + +#ifdef DEBUG_QOS + /* {mvy_L0_max, mvy_L0_min} */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + pr_info(" [Picture %d Quality] MVY_L0 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + pr_info(" [Picture %d Quality] MVY_L0 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvx_L1_max, mvx_L1_min} */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + pr_info(" [Picture %d Quality] MVX_L1 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + pr_info(" [Picture %d Quality] MVX_L1 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvy_L1_max, mvy_L1_min} */ + rdata32 = READ_VREG(VDEC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + pr_info(" [Picture %d Quality] MVY_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + pr_info(" [Picture %d Quality] MVY_L1 MIN : %d\n", + pic_number, mv_lo); +#endif + + rdata32 = READ_VREG(VDEC_PIC_QUALITY_CTRL); +#ifdef DEBUG_QOS + pr_info(" [Picture %d Quality] After Read : VDEC_PIC_QUALITY_CTRL : 0x%x\n", + pic_number, rdata32); +#endif + /* reset all counts */ + WRITE_VREG(VDEC_PIC_QUALITY_CTRL, (1<<8)); + } +} + +static int get_dec_dpb_size(struct vdec_h264_hw_s *hw, int mb_width, + int mb_height, int level_idc) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int pic_size = mb_width * mb_height * 384; + int size = 0, size_vui; + + switch (level_idc) { + case 9: + size = 152064; + break; + case 10: + size = 152064; + break; + case 11: + size = 345600; + break; + case 12: + size = 912384; + break; + case 13: + size = 912384; + break; + case 20: + size = 912384; + break; + case 21: + size = 1824768; + break; + case 22: + size = 3110400; + break; + case 30: + size = 3110400; + break; + case 31: + size = 6912000; + break; + case 32: + size = 7864320; + break; + case 40: + size = 12582912; + break; + case 41: + size = 12582912; + break; + case 42: + size = 13369344; + break; + case 50: + size = 42393600; + break; + case 51: + case 52: + default: + size = 70778880; + break; + } + + size /= pic_size; + size = imin(size, 16); + dpb_print(DECODE_ID(hw), 0, + "level_idc = %d pic_size = %d size = %d\n", + level_idc, pic_size, size); + if (p_H264_Dpb->bitstream_restriction_flag) { + if ((int)p_H264_Dpb->max_dec_frame_buffering > size) { + dpb_print(DECODE_ID(hw), 0, + "max_dec_frame_buffering larger than MaxDpbSize.\n"); + } + size_vui = imax (1, p_H264_Dpb->max_dec_frame_buffering); + if (size_vui < size) { + dpb_print(DECODE_ID(hw), 0, + "Warning: max_dec_frame_buffering(%d) is less than DPB size(%d) calculated from Profile/Level.\n", + size_vui, size); + } + size = size_vui; + } + + size += 2; /* need two more buffer */ + + if (!hw->discard_dv_data) { + size += 1; + dpb_print(DECODE_ID(hw), 0, "dv stream need one more buffer.\n"); + } + + return size; +} + +static int get_dec_dpb_size_active(struct vdec_h264_hw_s *hw, u32 param1, u32 param4) +{ + int mb_width, mb_total; + int mb_height = 0; + int dec_dpb_size; + int level_idc = param4 & 0xff; + + mb_width = param1 & 0xff; + mb_total = (param1 >> 8) & 0xffff; + if (!mb_width && mb_total) /*for 4k2k*/ + mb_width = 256; + if (mb_width) + mb_height = mb_total/mb_width; + if (mb_width <= 0 || mb_height <= 0 || + is_oversize(mb_width << 4, mb_height << 4)) { + dpb_print(DECODE_ID(hw), 0, + "!!!wrong param1 0x%x mb_width/mb_height (0x%x/0x%x) %x\r\n", + param1, + mb_width, + mb_height); + hw->error_frame_width = mb_width << 4; + hw->error_frame_height = mb_height << 4; + return -1; + } + hw->error_frame_width = 0; + hw->error_frame_height = 0; + + dec_dpb_size = get_dec_dpb_size(hw , mb_width, mb_height, level_idc); + + if (hw->no_poc_reorder_flag) + dec_dpb_size = 1; + + return dec_dpb_size; +} + +static void vh264_config_canvs_for_mmu(struct vdec_h264_hw_s *hw) +{ + int i, j; + + if (hw->double_write_mode) { + mutex_lock(&vmh264_mutex); + if (hw->decode_pic_count == 0) { + for (j = 0; j < hw->dpb.mDPB.size; j++) { + i = get_buf_spec_by_canvas_pos(hw, j); + if (i >= 0) + config_decode_canvas_ex(hw, i); + } + } + mutex_unlock(&vmh264_mutex); + } +} + +static int vh264_set_params(struct vdec_h264_hw_s *hw, + u32 param1, u32 param2, u32 param3, u32 param4, bool buffer_reset_flag) +{ + int i, j; + int mb_width, mb_total; + int max_reference_size, level_idc; + int mb_height = 0; + unsigned long flags; + /*int mb_mv_byte;*/ + struct vdec_s *vdec = hw_to_vdec(hw); + u32 seq_info2; + int ret = 0; + int active_buffer_spec_num; + unsigned int buf_size; + unsigned int frame_mbs_only_flag; + unsigned int chroma_format_idc; + unsigned int crop_bottom, crop_right; + unsigned int used_reorder_dpb_size_margin + = hw->reorder_dpb_size_margin; + u8 *colocate_vaddr = NULL; + int dec_dpb_size_change = 0; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->master || vdec->slave) + used_reorder_dpb_size_margin = + reorder_dpb_size_margin_dv; +#endif + seq_info2 = param1; + hw->seq_info = param2; + + mb_width = seq_info2 & 0xff; + mb_total = (seq_info2 >> 8) & 0xffff; + if (!mb_width && mb_total) /*for 4k2k*/ + mb_width = 256; + if (mb_width) + mb_height = mb_total/mb_width; + if (mb_width <= 0 || mb_height <= 0 || + is_oversize(mb_width << 4, mb_height << 4)) { + dpb_print(DECODE_ID(hw), 0, + "!!!wrong seq_info2 0x%x mb_width/mb_height (0x%x/0x%x) %x\r\n", + seq_info2, + mb_width, + mb_height); + hw->error_frame_width = mb_width << 4; + hw->error_frame_height = mb_height << 4; + return -1; + } + hw->error_frame_width = 0; + hw->error_frame_height = 0; + + dec_dpb_size_change = hw->dpb.dec_dpb_size != get_dec_dpb_size_active(hw, param1, param4); + + if (((seq_info2 != 0 && + hw->seq_info2 != seq_info2) || hw->csd_change_flag) && + hw->seq_info2 != 0 + ) { + if (((hw->seq_info2 & 0x80ffffff) != (param1 & 0x80ffffff)) || dec_dpb_size_change) { /*picture size changed*/ + h264_reconfig(hw); + } else { + /*someting changes and not including dpb_size, width, height, ...*/ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + u32 reg_val = param4; + max_reference_size = (reg_val >> 8) & 0xff; + hw->dpb.reorder_output = max_reference_size; + + hw->cfg_param1 = param1; + hw->cfg_bitstream_restriction_flag = hw->bitstream_restriction_flag; + + hw->seq_info2 = seq_info2; + + if (p_H264_Dpb->bitstream_restriction_flag && + p_H264_Dpb->num_reorder_frames <= p_H264_Dpb->max_dec_frame_buffering && + p_H264_Dpb->num_reorder_frames >= 0) { + hw->dpb.reorder_output = hw->num_reorder_frames + 1; + } + + hw->max_reference_size = + max_reference_size + reference_buf_margin; + + if (hw->max_reference_size > MAX_VF_BUF_NUM) + hw->max_reference_size = MAX_VF_BUF_NUM; + hw->dpb.max_reference_size = hw->max_reference_size; + } + } + + if (hw->config_bufmgr_done == 0) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + u32 reg_val; + int sub_width_c = 0, sub_height_c = 0; + + hw->cfg_param1 = param1; + hw->cfg_param2 = param2; + hw->cfg_param3 = param3; + hw->cfg_param4 = param4; + hw->cfg_bitstream_restriction_flag = hw->bitstream_restriction_flag; + + hw->seq_info2 = seq_info2; + dpb_print(DECODE_ID(hw), 0, + "AV_SCRATCH_1 = %x, AV_SCRATCH_2 %x\r\n", + seq_info2, hw->seq_info); + + dpb_init_global(&hw->dpb, + DECODE_ID(hw), 0, 0); + + p_H264_Dpb->fast_output_enable = fast_output_enable; + /*mb_mv_byte = (seq_info2 & 0x80000000) ? 24 : 96;*/ + + if (hw->enable_fence) + p_H264_Dpb->fast_output_enable = H264_OUTPUT_MODE_FAST; + +#if 1 + /*crop*/ + /* AV_SCRATCH_2 + bit 15: frame_mbs_only_flag + bit 13-14: chroma_format_idc */ + frame_mbs_only_flag = (hw->seq_info >> 15) & 0x01; + if (p_H264_Dpb->mSPS.profile_idc != 100 && + p_H264_Dpb->mSPS.profile_idc != 110 && + p_H264_Dpb->mSPS.profile_idc != 122 && + p_H264_Dpb->mSPS.profile_idc != 144) { + p_H264_Dpb->chroma_format_idc = 1; + } + chroma_format_idc = p_H264_Dpb->chroma_format_idc; + + /* @AV_SCRATCH_6.31-16 = (left << 8 | right ) << 1 + @AV_SCRATCH_6.15-0 = (top << 8 | bottom ) << + (2 - frame_mbs_only_flag) */ + + switch (chroma_format_idc) { + case 1: + sub_width_c = 2; + sub_height_c = 2; + break; + + case 2: + sub_width_c = 2; + sub_height_c = 1; + break; + + case 3: + sub_width_c = 1; + sub_height_c = 1; + break; + + default: + break; + } + + if (chroma_format_idc == 0) { + crop_right = p_H264_Dpb->frame_crop_right_offset; + crop_bottom = p_H264_Dpb->frame_crop_bottom_offset * + (2 - frame_mbs_only_flag); + } else { + crop_right = sub_width_c * p_H264_Dpb->frame_crop_right_offset; + crop_bottom = sub_height_c * p_H264_Dpb->frame_crop_bottom_offset * + (2 - frame_mbs_only_flag); + } + + p_H264_Dpb->mSPS.frame_mbs_only_flag = frame_mbs_only_flag; + hw->frame_width = mb_width << 4; + hw->frame_height = mb_height << 4; + + hw->frame_width = hw->frame_width - crop_right; + hw->frame_height = hw->frame_height - crop_bottom; + + dpb_print(DECODE_ID(hw), 0, + "chroma_format_idc = %d frame_mbs_only_flag %d, crop_bottom %d, frame_height %d,\n", + chroma_format_idc, frame_mbs_only_flag, crop_bottom, hw->frame_height); + dpb_print(DECODE_ID(hw), 0, + "mb_height %d,crop_right %d, frame_width %d, mb_width %d\n", + mb_height, crop_right, + hw->frame_width, mb_width); + + if (hw->frame_height == 1088 && (crop_right != 0 || crop_bottom != 0)) + hw->frame_height = 1080; +#endif + reg_val = param4; + level_idc = reg_val & 0xff; + p_H264_Dpb->mSPS.level_idc = level_idc; + max_reference_size = (reg_val >> 8) & 0xff; + hw->dpb.reorder_output = max_reference_size; + hw->dpb.dec_dpb_size = + get_dec_dpb_size(hw , mb_width, mb_height, level_idc); + if (!hw->mmu_enable) { + mb_width = (mb_width+3) & 0xfffffffc; + mb_height = (mb_height+3) & 0xfffffffc; + } + mb_total = mb_width * mb_height; + hw->mb_width = mb_width; + hw->mb_height = mb_height; + hw->mb_total = mb_total; + if (hw->mmu_enable) + hevc_mcr_sao_global_hw_init(hw, + (hw->mb_width << 4), (hw->mb_height << 4)); + + dpb_print(DECODE_ID(hw), 0, + "mb height/widht/total: %x/%x/%x level_idc %x max_ref_num %x\n", + mb_height, mb_width, mb_total, + level_idc, max_reference_size); + + p_H264_Dpb->colocated_buf_size = mb_total * 96; + + dpb_print(DECODE_ID(hw), 0, + "restriction_flag=%d, max_dec_frame_buffering=%d, dec_dpb_size=%d num_reorder_frames %d used_reorder_dpb_size_margin %d\n", + hw->bitstream_restriction_flag, + hw->max_dec_frame_buffering, + hw->dpb.dec_dpb_size, + hw->num_reorder_frames, + used_reorder_dpb_size_margin); + + if (p_H264_Dpb->bitstream_restriction_flag && + p_H264_Dpb->num_reorder_frames <= p_H264_Dpb->max_dec_frame_buffering && + p_H264_Dpb->num_reorder_frames >= 0) { + hw->dpb.reorder_output = hw->num_reorder_frames + 1; + } + + active_buffer_spec_num = + hw->dpb.dec_dpb_size + + used_reorder_dpb_size_margin; + hw->max_reference_size = + max_reference_size + reference_buf_margin; + + if (active_buffer_spec_num > MAX_VF_BUF_NUM) { + active_buffer_spec_num = MAX_VF_BUF_NUM; + hw->dpb.dec_dpb_size = active_buffer_spec_num + - used_reorder_dpb_size_margin; + dpb_print(DECODE_ID(hw), 0, + "active_buffer_spec_num is larger than MAX %d, set dec_dpb_size to %d\n", + MAX_VF_BUF_NUM, hw->dpb.dec_dpb_size); + } + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vdec_pic_info pic; + + vdec_v4l_get_pic_info(ctx, &pic); + + active_buffer_spec_num = pic.dpb_frames + + pic.dpb_margin; + } + + hw->dpb.mDPB.size = active_buffer_spec_num; + if (hw->max_reference_size > MAX_VF_BUF_NUM) + hw->max_reference_size = MAX_VF_BUF_NUM; + hw->dpb.max_reference_size = hw->max_reference_size; + + if (hw->no_poc_reorder_flag) + hw->dpb.dec_dpb_size = 1; + dpb_print(DECODE_ID(hw), 0, + "%s active_buf_spec_num %d dec_dpb_size %d collocate_buf_num %d\r\n", + __func__, active_buffer_spec_num, + hw->dpb.dec_dpb_size, + hw->max_reference_size); + + if (hw->kpi_first_i_comming == 0) { + hw->kpi_first_i_comming = 1; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "[vdec_kpi][%s] First I frame comming.\n", __func__); + } + + buf_size = (hw->mb_total << 8) + (hw->mb_total << 7); + + mutex_lock(&vmh264_mutex); + if (!hw->mmu_enable) { + if (!buffer_reset_flag) + config_buf_specs(vdec); + i = get_buf_spec_by_canvas_pos(hw, 0); + + if (hw->is_used_v4l) { + if (i != -1) { + pr_info("v4l: delay alloc the buffer.\n"); + } + } else { + if ((i != -1) && alloc_one_buf_spec(hw, i) >= 0) + config_decode_canvas(hw, i); + else + ret = -1; + } + } else { + if (hw->double_write_mode) { + config_buf_specs_ex(vdec); + } else { + spin_lock_irqsave(&hw->bufspec_lock, flags); + for (i = 0, j = 0; + j < active_buffer_spec_num + && i < BUFSPEC_POOL_SIZE; + i++) { + if (hw->buffer_spec[i].used != -1) + continue; + hw->buffer_spec[i].used = 0; + hw->buffer_spec[i]. + alloc_header_addr = 0; + hw->buffer_spec[i].canvas_pos = j; + j++; + } + spin_unlock_irqrestore(&hw->bufspec_lock, + flags); + } + hevc_mcr_config_canv2axitbl(hw, 0); + } + mutex_unlock(&vmh264_mutex); + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, __func__); + +#ifdef ONE_COLOCATE_BUF_PER_DECODE_BUF + buf_size = PAGE_ALIGN( + p_H264_Dpb->colocated_buf_size * + active_buffer_spec_num); +#else + buf_size = PAGE_ALIGN( + p_H264_Dpb->colocated_buf_size * + hw->max_reference_size); +#endif + + if (decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, BMMU_REF_IDX, + buf_size, DRIVER_NAME, + &hw->collocate_cma_alloc_addr) < 0) + return -1; + if (!vdec_secure(vdec)) { + /* clear for some mosaic problem after reset bufmgr */ + colocate_vaddr = codec_mm_vmap(hw->collocate_cma_alloc_addr, buf_size); + if (colocate_vaddr != NULL) { + memset(colocate_vaddr, 0, buf_size); + codec_mm_dma_flush(colocate_vaddr, buf_size, DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(colocate_vaddr); + } + } + + hw->dpb.colocated_mv_addr_start = + hw->collocate_cma_alloc_addr; +#ifdef ONE_COLOCATE_BUF_PER_DECODE_BUF + hw->dpb.colocated_mv_addr_end = + hw->dpb.colocated_mv_addr_start + + (p_H264_Dpb->colocated_buf_size * + active_buffer_spec_num); +#else + hw->dpb.colocated_mv_addr_end = + hw->dpb.colocated_mv_addr_start + + (p_H264_Dpb->colocated_buf_size * + hw->max_reference_size); +#endif + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "callocate cma, %lx, %x\n", + hw->collocate_cma_alloc_addr, + hw->dpb.colocated_mv_addr_start); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "colocated_mv_addr_start %x colocated_mv_addr_end %x\n", + hw->dpb.colocated_mv_addr_start, + hw->dpb.colocated_mv_addr_end); + if (!hw->mmu_enable) { + mutex_lock(&vmh264_mutex); + if (ret >= 0 && hw->decode_pic_count == 0) { + int buf_cnt; + /* h264_reconfig: alloc later*/ + buf_cnt = hw->dpb.mDPB.size; + + for (j = 1; j < buf_cnt; j++) { + i = get_buf_spec_by_canvas_pos(hw, j); + + if (hw->is_used_v4l) { + pr_info("v4l: delay alloc the buffer.\n"); + break; + } else if (alloc_one_buf_spec(hw, i) < 0) + break; + + config_decode_canvas(hw, i); + } + } + mutex_unlock(&vmh264_mutex); + } else { + vh264_config_canvs_for_mmu(hw); + } + + hw->config_bufmgr_done = 1; + + /*end of config_bufmgr_done */ + } + + return ret; +} + +static void vui_config(struct vdec_h264_hw_s *hw) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int aspect_ratio_info_present_flag, aspect_ratio_idc; + /*time*/ + hw->num_units_in_tick = p_H264_Dpb->num_units_in_tick; + hw->time_scale = p_H264_Dpb->time_scale; + hw->timing_info_present_flag = p_H264_Dpb->vui_status & 0x2; + + hw->bitstream_restriction_flag = + p_H264_Dpb->bitstream_restriction_flag; + hw->num_reorder_frames = + p_H264_Dpb->num_reorder_frames; + hw->max_dec_frame_buffering = + p_H264_Dpb->max_dec_frame_buffering; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "vui_config: pdb %d, %d, %d\n", + p_H264_Dpb->bitstream_restriction_flag, + p_H264_Dpb->num_reorder_frames, + p_H264_Dpb->max_dec_frame_buffering); + + hw->fixed_frame_rate_flag = 0; + if (hw->timing_info_present_flag) { + hw->fixed_frame_rate_flag = + p_H264_Dpb->fixed_frame_rate_flag; + + if (hw->is_used_v4l && (p_H264_Dpb->dpb_param.l.data[SLICE_TYPE] == I_Slice) && + (hw->num_units_in_tick != 0)) { + if (hw->num_units_in_tick % 1001 == 0) { + int multiple = hw->num_units_in_tick / 1001; + + if (hw->time_scale / multiple == 120000) { + hw->frame_dur = RATE_11990_FPS; + if (hw->fixed_frame_rate_flag == 1) + hw->frame_dur = RATE_5994_FPS; + } else if (hw->time_scale / multiple == 60000) { + hw->frame_dur = RATE_5994_FPS; + if (hw->fixed_frame_rate_flag == 1) + hw->frame_dur = RATE_2997_FPS; + } else if ((hw->time_scale / multiple == 48000) && + (hw->fixed_frame_rate_flag == 1)) { + hw->frame_dur = RATE_2397_FPS; + } else if (hw->time_scale / multiple == 30000) { + hw->frame_dur = RATE_2997_FPS; + } else if (hw->time_scale / multiple == 24000) { + hw->frame_dur = RATE_2397_FPS; + } + } else { + u32 frame_rate = hw->time_scale / hw->num_units_in_tick; + + if (hw->fixed_frame_rate_flag == 1) { + frame_rate = frame_rate / 2; + } + hw->frame_dur = 96000 / frame_rate; + } + } + + if (((hw->num_units_in_tick * 120) >= hw->time_scale && + ((!hw->sync_outside) || + (!hw->frame_dur))) + && hw->num_units_in_tick && hw->time_scale) { + if (hw->use_idr_framerate || + hw->fixed_frame_rate_flag || + !hw->frame_dur || + !hw->duration_from_pts_done + /*|| vh264_running*/) { + u32 frame_dur_es = + div_u64(96000ULL * 2 * hw->num_units_in_tick, + hw->time_scale); + if (hw->frame_dur != frame_dur_es) { + hw->h264_first_valid_pts_ready = false; + hw->h264pts1 = 0; + hw->h264pts2 = 0; + hw->h264_pts_count = 0; + hw->duration_from_pts_done = 0; + fixed_frame_rate_mode = + FIX_FRAME_RATE_OFF; + hw->pts_duration = 0; + hw->frame_dur = frame_dur_es; + if (!hw->fixed_frame_rate_flag && (p_H264_Dpb->mSPS.profile_idc != BASELINE)) { + if (frame_dur_es == 7680) + hw->frame_dur = frame_dur_es /2; + } + vdec_schedule_work(&hw->notify_work); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DEC_DETAIL, + "frame_dur %d from timing_info\n", + hw->frame_dur); + } + + /*hack to avoid use ES frame duration when + *it's half of the rate from system info + * sometimes the encoder is given a wrong + * frame rate but the system side information + * is more reliable + *if ((frame_dur * 2) != frame_dur_es) { + * frame_dur = frame_dur_es; + *} + */ + } + } + } else { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "H.264: timing_info not present\n"); + } + + /*aspect ratio*/ + aspect_ratio_info_present_flag = + p_H264_Dpb->vui_status & 0x1; + aspect_ratio_idc = p_H264_Dpb->aspect_ratio_idc; + + if (aspect_ratio_info_present_flag) { + if (aspect_ratio_idc == EXTEND_SAR) { + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = + p_H264_Dpb->aspect_ratio_sar_height; + hw->width_aspect_ratio = + p_H264_Dpb->aspect_ratio_sar_width; + } else { + /* pr_info("v264dec: aspect_ratio_idc = %d\n", + aspect_ratio_idc); */ + + switch (aspect_ratio_idc) { + case 1: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 1; + hw->width_aspect_ratio = 1; + break; + case 2: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 12; + break; + case 3: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 10; + break; + case 4: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 16; + break; + case 5: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 33; + hw->width_aspect_ratio = 40; + break; + case 6: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 24; + break; + case 7: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 20; + break; + case 8: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 32; + break; + case 9: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 33; + hw->width_aspect_ratio = 80; + break; + case 10: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 18; + break; + case 11: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 11; + hw->width_aspect_ratio = 15; + break; + case 12: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 33; + hw->width_aspect_ratio = 64; + break; + case 13: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 99; + hw->width_aspect_ratio = 160; + break; + case 14: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 3; + hw->width_aspect_ratio = 4; + break; + case 15: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 2; + hw->width_aspect_ratio = 3; + break; + case 16: + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 1; + hw->width_aspect_ratio = 2; + break; + default: + if (hw->vh264_ratio >> 16) { + hw->h264_ar = (hw->frame_height * + (hw->vh264_ratio & 0xffff) * + 0x100 + + ((hw->vh264_ratio >> 16) * + hw->frame_width / 2)) / + ((hw->vh264_ratio >> 16) * + hw->frame_width); + hw->height_aspect_ratio = 1; + hw->width_aspect_ratio = 1; + } else { + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 1; + hw->width_aspect_ratio = 1; + } + break; + } + } + } else { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "v264dec: aspect_ratio not available from source\n"); + if (hw->vh264_ratio >> 16) { + /* high 16 bit is width, low 16 bit is height */ + hw->h264_ar = + ((hw->vh264_ratio & 0xffff) * + hw->frame_height * 0x100 + + (hw->vh264_ratio >> 16) * + hw->frame_width / 2) / + ((hw->vh264_ratio >> 16) * + hw->frame_width); + hw->height_aspect_ratio = 1; + hw->width_aspect_ratio = 1; + } else { + hw->h264_ar = 0x3ff; + hw->height_aspect_ratio = 1; + hw->width_aspect_ratio = 1; + } + } + + if (hw->pts_unstable && (hw->fixed_frame_rate_flag == 0)) { + if (((hw->frame_dur == RATE_2397_FPS) + && (dec_control + & DEC_CONTROL_FLAG_FORCE_RATE_2397_FPS_FIX_FRAME_RATE)) + || ((RATE_2997_FPS == + hw->frame_dur) && + (dec_control & + DEC_CONTROL_FLAG_FORCE_RATE_2997_FPS_FIX_FRAME_RATE))) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "force fix frame rate\n"); + hw->fixed_frame_rate_flag = 0x40; + } + } + + /*video_signal_from_vui: to do .. */ +} + +static void bufmgr_recover(struct vdec_h264_hw_s *hw) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 2); + if (hw->error_proc_policy & 0x20) { + if (!hw->is_used_v4l) + hw->reset_bufmgr_flag = 1; + } +} + +void bufmgr_force_recover(struct h264_dpb_stru *p_H264_Dpb) +{ + struct vdec_h264_hw_s *hw = + container_of(p_H264_Dpb, struct vdec_h264_hw_s, dpb); + + dpb_print(DECODE_ID(hw), 0, "call %s\n", __func__); + + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 2); + hw->reset_bufmgr_flag = 1; +} + +#ifdef CONSTRAIN_MAX_BUF_NUM +static int get_vf_ref_only_buf_count(struct vdec_h264_hw_s *hw) +{ + int i; + int count = 0; + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (is_buf_spec_in_disp_q(hw, i) && + hw->buffer_spec[i].vf_ref > 0) + count++; + } + return count; +} + +static int get_used_buf_count(struct vdec_h264_hw_s *hw) +{ + int i; + int count = 0; + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (is_buf_spec_in_use(hw, i)) + count++; + } + return count; +} +#endif + + +static bool is_buffer_available(struct vdec_s *vdec) +{ + bool buffer_available = 1; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)(vdec->private); + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + int i, frame_outside_count = 0, inner_size = 0; + if ((kfifo_len(&hw->newframe_q) <= 0) || + ((hw->config_bufmgr_done) && (!have_free_buf_spec(vdec))) || + ((p_H264_Dpb->mDPB.init_done) && + (p_H264_Dpb->mDPB.used_size >= (p_H264_Dpb->mDPB.size - 1)) && + (is_there_unused_frame_from_dpb(&p_H264_Dpb->mDPB) == 0))) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "%s, empty, newq(%d), free_spec(%d), initdon(%d), used_size(%d/%d), unused_fr_dpb(%d)\n", + __func__, + kfifo_len(&hw->newframe_q), + have_free_buf_spec(vdec), + p_H264_Dpb->mDPB.init_done, + p_H264_Dpb->mDPB.used_size, p_H264_Dpb->mDPB.size, + is_there_unused_frame_from_dpb(&p_H264_Dpb->mDPB) + ); + buffer_available = 0; + if (dpb_is_debug(DECODE_ID(hw), + DEBUG_DISABLE_RUNREADY_RMBUF)) + return buffer_available; + + if ((hw->error_proc_policy & 0x4) && + (hw->error_proc_policy & 0x8)) { + if ((kfifo_len(&hw->display_q) <= 0) && + (p_H264_Dpb->mDPB.used_size >= + (p_H264_Dpb->mDPB.size - 1)) && + (p_Dpb->ref_frames_in_buffer > + (imax( + 1, p_Dpb->num_ref_frames) + - p_Dpb->ltref_frames_in_buffer + + force_sliding_margin))){ + bufmgr_recover(hw); + } else { + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 1); + } + } else if ((hw->error_proc_policy & 0x4) && + (kfifo_len(&hw->display_q) <= 0) && + ((p_H264_Dpb->mDPB.used_size >= + (p_H264_Dpb->mDPB.size - 1)) || + (!have_free_buf_spec(vdec))) && + (hw->discard_dv_data)) { + unsigned long flags; + spin_lock_irqsave(&hw->bufspec_lock, flags); + + for (i = 0; i < p_Dpb->used_size; i++) { + if (p_Dpb->fs[i]->pre_output) + frame_outside_count++; + else if (p_Dpb->fs[i]->is_output && !is_used_for_reference(p_Dpb->fs[i])) { + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 0); + return 0; + } + } + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + inner_size = p_Dpb->size - frame_outside_count; + + if (inner_size >= p_H264_Dpb->dec_dpb_size) { + if (p_H264_Dpb->mDPB.used_size >= + p_H264_Dpb->mDPB.size) { + bufmgr_recover(hw); + } else if (p_H264_Dpb->mDPB.used_size >= + (p_H264_Dpb->mDPB.size - 1)) { + if (inner_size > p_H264_Dpb->dec_dpb_size) { + bufmgr_recover(hw); + } + } + } + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 0); + } else if ((hw->error_proc_policy & 0x8) && + (p_Dpb->ref_frames_in_buffer > + (imax( + 1, p_Dpb->num_ref_frames) + - p_Dpb->ltref_frames_in_buffer + + force_sliding_margin))) + bufmgr_recover(hw); + else + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 1); + + if (hw->reset_bufmgr_flag == 1) + buffer_available = 1; + } + + if (hw->is_used_v4l) + buffer_available = have_free_buf_spec(vdec); + + return buffer_available; +} + +#define AUX_TAG_SEI 0x2 + +#define SEI_BUFFERING_PERIOD 0 +#define SEI_PicTiming 1 +#define SEI_USER_DATA 4 +#define SEI_RECOVERY_POINT 6 + +/* + ************************************************************************* + * Function:Reads bits from the bitstream buffer + * Input: + byte buffer[] + containing sei message data bits + int totbitoffset + bit offset from start of partition + int bytecount + total bytes in bitstream + int numbits + number of bits to read + * Output: + int *info + * Return: + -1: failed + > 0: the count of bit read + * Attention: + ************************************************************************* + */ + +static int get_bits(unsigned char buffer[], + int totbitoffset, + int *info, + int bytecount, + int numbits) +{ + register int inf; + long byteoffset; + int bitoffset; + + int bitcounter = numbits; + + byteoffset = totbitoffset / 8; + bitoffset = 7 - (totbitoffset % 8); + + inf = 0; + while (numbits) { + inf <<= 1; + inf |= (buffer[byteoffset] & (0x01 << bitoffset)) >> bitoffset; + numbits--; + bitoffset--; + if (bitoffset < 0) { + byteoffset++; + bitoffset += 8; + if (byteoffset > bytecount) + return -1; + } + } + + *info = inf; + + + return bitcounter; +} + +static int parse_one_sei_record(struct vdec_h264_hw_s *hw, + u8 *sei_data_buf, + u8 *sei_data_buf_end) +{ + int payload_type; + int payload_size; + u8 *p_sei; + int temp = 0; + int bit_offset; + int read_size; + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + + p_sei = sei_data_buf; + read_size = 0; + payload_type = 0; + do { + if (p_sei >= sei_data_buf_end) + return read_size; + + payload_type += *p_sei; + read_size++; + } while (*p_sei++ == 255); + + + payload_size = 0; + do { + if (p_sei >= sei_data_buf_end) + return read_size; + + payload_size += *p_sei; + read_size++; + } while (*p_sei++ == 255); + + + if (p_sei + payload_size > sei_data_buf_end) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s: payload_type = %d, payload_size = %d is over\n", + __func__, payload_type, payload_size); + return read_size; + } + bit_offset = 0; + + if (payload_size <= 0) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "%s warning: this is a null sei message for payload_type = %d\n", + __func__, payload_type); + return read_size; + } + p_H264_Dpb->vui_status = p_H264_Dpb->dpb_param.l.data[VUI_STATUS]; + switch (payload_type) { + case SEI_BUFFERING_PERIOD: + break; + case SEI_PicTiming: + if (p_H264_Dpb->vui_status & 0xc) { + int cpb_removal_delay; + int dpb_output_delay; + u32 delay_len; + + delay_len = p_H264_Dpb->dpb_param.l.data[DELAY_LENGTH]; + cpb_removal_delay + = (delay_len & 0x1F) + 1; + dpb_output_delay + = ((delay_len >> 5) & 0x1F) + 1; + + get_bits(p_sei, bit_offset, + &temp, payload_size, + dpb_output_delay+cpb_removal_delay); + bit_offset += dpb_output_delay+cpb_removal_delay; + } + if (p_H264_Dpb->vui_status & 0x10) { + get_bits(p_sei, bit_offset, &temp, payload_size, 4); + bit_offset += 4; + p_H264_Dpb->dpb_param.l.data[PICTURE_STRUCT] = temp; + } + break; + case SEI_USER_DATA: + if (enable_itu_t35) { + int i; + int j; + int data_len; + u8 *user_data_buf; + + user_data_buf + = hw->sei_itu_data_buf + hw->sei_itu_data_len; + /* user data length should be align with 8 bytes, + if not, then padding with zero*/ + for (i = 0; i < payload_size; i += 8) { + if (hw->sei_itu_data_len + i >= SEI_ITU_DATA_SIZE) + break; // Avoid out-of-bound writing + for (j = 0; j < 8; j++) { + int index; + + index = i+7-j; + if (index >= payload_size) + user_data_buf[i+j] = 0; + else + user_data_buf[i+j] + = p_sei[i+7-j]; + } + } + + data_len = payload_size; + if (payload_size % 8) + data_len = ((payload_size + 8) >> 3) << 3; + + hw->sei_itu_data_len += data_len; + if (hw->sei_itu_data_len >= SEI_ITU_DATA_SIZE) + hw->sei_itu_data_len = SEI_ITU_DATA_SIZE; + /* + dpb_print(DECODE_ID(hw), 0, + "%s: user data, and len = %d:\n", + __func__, hw->sei_itu_data_len); + */ + } + break; + case SEI_RECOVERY_POINT: + p_H264_Dpb->dpb_param.l.data[RECOVERY_POINT] = 1; + break; + } + + return read_size + payload_size; +} + +static void parse_sei_data(struct vdec_h264_hw_s *hw, + u8 *sei_data_buf, + int len) +{ + char *p_sei; + char *p_sei_end; + int parsed_size; + int read_size; + + + p_sei = sei_data_buf; + p_sei_end = p_sei + len; + parsed_size = 0; + while (parsed_size < len) { + read_size = parse_one_sei_record(hw, p_sei, p_sei_end); + p_sei += read_size; + parsed_size += read_size; + if (*p_sei == 0x80) { + p_sei++; + parsed_size++; + } + } +} + +static void check_decoded_pic_error(struct vdec_h264_hw_s *hw) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + struct StorablePicture *p = p_H264_Dpb->mVideo.dec_picture; + unsigned mby_mbx = READ_VREG(MBY_MBX); + unsigned mb_total = (hw->seq_info2 >> 8) & 0xffff; + unsigned mb_width = hw->seq_info2 & 0xff; + unsigned decode_mb_count; + if (!mb_width && mb_total) /*for 4k2k*/ + mb_width = 256; + decode_mb_count = ((mby_mbx & 0xff) * mb_width + + (((mby_mbx >> 8) & 0xff) + 1)); + if ((mby_mbx == 0) && (p_H264_Dpb->dec_dpb_status != H264_SLICE_HEAD_DONE)) { + dpb_print(DECODE_ID(hw), 0, + "mby_mbx is zero\n"); + return; + } + if (get_cur_slice_picture_struct(p_H264_Dpb) != FRAME) + mb_total /= 2; + + if ((hw->error_proc_policy & 0x200) && + READ_VREG(ERROR_STATUS_REG) != 0) { + p->data_flag |= ERROR_FLAG; + } + + if (error_proc_policy & 0x100 && !(p->data_flag & ERROR_FLAG)) { + if (decode_mb_count < mb_total) { + p->data_flag |= ERROR_FLAG; + if (((error_proc_policy & 0x20000) && + decode_mb_count >= mb_total * (100 - mb_count_threshold) / 100)) { + p->data_flag &= ~ERROR_FLAG; + } + } + } + + if ((hw->error_proc_policy & 0x100000) && + hw->last_dec_picture && + (hw->last_dec_picture->slice_type == I_SLICE) && + (hw->dpb.mSlice.slice_type == P_SLICE)) { + if ((p->data_flag & ERROR_FLAG) && + (decode_mb_count >= mb_total)) { + hw->ip_field_error_count++; + if (hw->ip_field_error_count == 4) { + unsigned int i; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + for (i = 0; i < p_Dpb->ref_frames_in_buffer; i++) { + if (p_Dpb->fs_ref[i]->top_field) + p_Dpb->fs_ref[i]->top_field->data_flag &= ~ERROR_FLAG; + if (p_Dpb->fs_ref[i]->bottom_field) + p_Dpb->fs_ref[i]->bottom_field->data_flag &= ~ERROR_FLAG; + if (p_Dpb->fs_ref[i]->frame) + p_Dpb->fs_ref[i]->frame->data_flag &= ~ERROR_FLAG; + } + hw->ip_field_error_count = 0; + p->data_flag &= ~ERROR_FLAG; + hw->data_flag &= ~ERROR_FLAG; + dpb_print(DECODE_ID(hw), 0, + "clear all ref frame error flag\n"); + } + } else { + if (hw->ip_field_error_count > 0) + dpb_print(DECODE_ID(hw), 0, + "clear error count %d\n", hw->ip_field_error_count); + hw->ip_field_error_count = 0; + } + } + + if (p->data_flag & ERROR_FLAG) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG, + "%s: decode error, seq_info2 0x%x, mby_mbx 0x%x, mb_total %d decoded mb_count %d ERROR_STATUS_REG 0x%x\n", + __func__, + hw->seq_info2, + mby_mbx, + mb_total, + decode_mb_count, + READ_VREG(ERROR_STATUS_REG) + ); + + } +} + +static int vh264_pic_done_proc(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)(vdec->private); + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int ret; + int i; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; + + if (input_frame_based(vdec) && + (!(hw->i_only & 0x2)) && + frmbase_cont_bitlevel != 0 && + READ_VREG(VIFF_BIT_CNT) > + frmbase_cont_bitlevel) { + /*handle the case: multi pictures in one packet*/ + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s H264_PIC_DATA_DONE decode slice count %d, continue (bitcnt 0x%x)\n", + __func__, + hw->decode_pic_count, + READ_VREG(VIFF_BIT_CNT)); + hw->frmbase_cont_flag = 1; + } else + hw->frmbase_cont_flag = 0; + + if (p_H264_Dpb->mVideo.dec_picture) { + get_picture_qos_info(p_H264_Dpb->mVideo.dec_picture); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + DEL_EXIST(hw, + p_H264_Dpb->mVideo.dec_picture) = 0; + if (vdec->master) { + struct vdec_h264_hw_s *hw_ba = + (struct vdec_h264_hw_s *) + vdec->master->private; + if (hw_ba->last_dec_picture) + DEL_EXIST(hw_ba, + hw_ba->last_dec_picture) + = 1; + } +#endif + mutex_lock(&hw->chunks_mutex); + if (hw->chunk) { + p_H264_Dpb->mVideo.dec_picture->pts = + hw->chunk->pts; + p_H264_Dpb->mVideo.dec_picture->pts64 = + hw->chunk->pts64; + p_H264_Dpb->mVideo.dec_picture->timestamp = + hw->chunk->timestamp; +#ifdef MH264_USERDATA_ENABLE + vmh264_udc_fill_vpts(hw, + p_H264_Dpb->mSlice.slice_type, + hw->chunk->pts, 1); +#endif + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + } else if (vdec->master) { + /*dv enhance layer, + do not checkout pts*/ + struct StorablePicture *pic = + p_H264_Dpb->mVideo.dec_picture; + pic->pts = 0; + pic->pts64 = 0; +#endif + } else { + struct StorablePicture *pic = + p_H264_Dpb->mVideo.dec_picture; + u32 offset = pic->offset_delimiter; + pic->pic_size = (hw->start_bit_cnt - READ_VREG(VIFF_BIT_CNT)) >> 3; + if (pts_pickout_offset_us64(PTS_TYPE_VIDEO, + offset, &pic->pts, 0, &pic->pts64)) { + pic->pts = 0; + pic->pts64 = 0; +#ifdef MH264_USERDATA_ENABLE + vmh264_udc_fill_vpts(hw, + p_H264_Dpb->mSlice.slice_type, + pic->pts, 0); +#endif + } else { +#ifdef MH264_USERDATA_ENABLE + vmh264_udc_fill_vpts(hw, + p_H264_Dpb->mSlice.slice_type, + pic->pts, 1); +#endif + } + + } + mutex_unlock(&hw->chunks_mutex); + + check_decoded_pic_error(hw); +#ifdef ERROR_HANDLE_TEST + if ((hw->data_flag & ERROR_FLAG) + && (hw->error_proc_policy & 0x80)) { + release_cur_decoding_buf(hw); + h264_clear_dpb(hw); + hw->dec_flag = 0; + hw->data_flag = 0; + hw->skip_frame_count = 0; + hw->has_i_frame = 0; + hw->no_error_count = 0xfff; + hw->no_error_i_count = 0xf; + } else +#endif + if (hw->error_proc_policy & 0x200000) { + if (!hw->loop_flag) { + for (i = 0; i < p_Dpb->used_size; i++) { + if ((p_H264_Dpb->mVideo.dec_picture->poc + loop_playback_poc_threshold < p_Dpb->fs[i]->poc) && + !p_Dpb->fs[i]->is_output && + !p_Dpb->fs[i]->pre_output) { + hw->loop_flag = 1; + hw->loop_last_poc = p_H264_Dpb->mVideo.dec_picture->poc; + break; + } + } + } else { + if ((p_H264_Dpb->mVideo.dec_picture->poc >= hw->loop_last_poc - poc_threshold) && + (p_H264_Dpb->mVideo.dec_picture->poc <= hw->loop_last_poc + poc_threshold)) { + if (hw->loop_flag >= 5) { + for (i = 0; i < p_Dpb->used_size; i++) { + if ((hw->loop_last_poc + loop_playback_poc_threshold < p_Dpb->fs[i]->poc) && + !p_Dpb->fs[i]->is_output && + !p_Dpb->fs[i]->pre_output) { + p_Dpb->fs[i]->is_output = 1; + } + } + hw->loop_flag = 0; + } else + hw->loop_flag++; + } else + hw->loop_flag = 0; + } + } + p_H264_Dpb->wait_aux_data_flag = ((!hw->discard_dv_data) && (hw->frmbase_cont_flag)); + ret = store_picture_in_dpb(p_H264_Dpb, + p_H264_Dpb->mVideo.dec_picture, + hw->data_flag | hw->dec_flag | + p_H264_Dpb->mVideo.dec_picture->data_flag); + + + + if (ret == -1) { + release_cur_decoding_buf(hw); + bufmgr_force_recover(p_H264_Dpb); + } else if (ret == -2) { + release_cur_decoding_buf(hw); + } else { + if (hw->data_flag & ERROR_FLAG) { + hw->no_error_count = 0; + hw->no_error_i_count = 0; + } else { + hw->no_error_count++; + if (hw->data_flag & I_FLAG) + hw->no_error_i_count++; + } + if (hw->mmu_enable) + hevc_set_unused_4k_buff_idx(hw, + p_H264_Dpb->mVideo. + dec_picture->buf_spec_num); + bufmgr_post(p_H264_Dpb); + hw->last_dec_picture = + p_H264_Dpb->mVideo.dec_picture; + p_H264_Dpb->mVideo.dec_picture = NULL; + /* dump_dpb(&p_H264_Dpb->mDPB); */ + hw->has_i_frame = 1; + if (hw->mmu_enable) + hevc_set_frame_done(hw); + hw->decode_pic_count++; + p_H264_Dpb->decode_pic_count = hw->decode_pic_count; + if (hw->skip_frame_count > 0) { + /*skip n frame after first I */ + hw->skip_frame_count--; + if (hw->skip_frame_count == 0) + hw->dec_flag &= (~NODISP_FLAG); + } else if (hw->skip_frame_count < -1) { + /*skip n frame after first I until second I */ + hw->skip_frame_count++; + if (hw->skip_frame_count == -1) + hw->dec_flag &= (~NODISP_FLAG); + } + } + } + return 0; +} + +static irqreturn_t vh264_isr_thread_fn(struct vdec_s *vdec, int irq) +{ + int i; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)(vdec->private); + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + unsigned int dec_dpb_status = p_H264_Dpb->dec_dpb_status; + u32 debug_tag; + + if (dec_dpb_status == H264_SLICE_HEAD_DONE || + p_H264_Dpb->dec_dpb_status == H264_CONFIG_REQUEST) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_START); + } + else if (dec_dpb_status == H264_PIC_DATA_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_PIC_DONE_START); + } + else if (dec_dpb_status == H264_SEI_DATA_READY) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_SEI_START); + else if (dec_dpb_status == H264_AUX_DATA_READY) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_AUX_START); + + if (dec_dpb_status == H264_CONFIG_REQUEST) { +#if 1 + unsigned short *p = (unsigned short *)hw->lmem_addr; + for (i = 0; i < (RPM_END-RPM_BEGIN); i += 4) { + int ii; + for (ii = 0; ii < 4; ii++) { + p_H264_Dpb->dpb_param.l.data[i+ii] = + p[i+3-ii]; + if (dpb_is_debug(DECODE_ID(hw), + RRINT_FLAG_RPM)) { + if (((i + ii) & 0xf) == 0) + dpb_print(DECODE_ID(hw), + 0, "%04x:", + i); + dpb_print_cont(DECODE_ID(hw), + 0, "%04x ", + p[i+3-ii]); + if (((i + ii + 1) & 0xf) == 0) + dpb_print_cont( + DECODE_ID(hw), + 0, "\r\n"); + } + } + } + + if (p_H264_Dpb->bitstream_restriction_flag != + ((p_H264_Dpb->dpb_param.l.data[SPS_FLAGS2] >> 3) & 0x1)) { + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "p_H264_Dpb->bitstream_restriction_flag 0x%x, new 0x%x\n", + p_H264_Dpb->bitstream_restriction_flag, ((p_H264_Dpb->dpb_param.l.data[SPS_FLAGS2] >> 3) & 0x1)); + hw->csd_change_flag = 1; + } + p_H264_Dpb->bitstream_restriction_flag = + (p_H264_Dpb->dpb_param.l.data[SPS_FLAGS2] >> 3) & 0x1; + p_H264_Dpb->num_reorder_frames = + p_H264_Dpb->dpb_param.l.data[NUM_REORDER_FRAMES]; + p_H264_Dpb->max_dec_frame_buffering = + p_H264_Dpb->dpb_param.l.data[MAX_BUFFER_FRAME]; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL, + "H264_CONFIG_REQUEST: pdb %d, %d, %d\n", + p_H264_Dpb->bitstream_restriction_flag, + p_H264_Dpb->num_reorder_frames, + p_H264_Dpb->max_dec_frame_buffering); + hw->bitstream_restriction_flag = + p_H264_Dpb->bitstream_restriction_flag; + hw->num_reorder_frames = + p_H264_Dpb->num_reorder_frames; + hw->max_dec_frame_buffering = + p_H264_Dpb->max_dec_frame_buffering; + + /*crop*/ + p_H264_Dpb->chroma_format_idc = p_H264_Dpb->dpb_param.dpb.chroma_format_idc; + p_H264_Dpb->frame_crop_left_offset = p_H264_Dpb->dpb_param.dpb.frame_crop_left_offset; + p_H264_Dpb->frame_crop_right_offset = p_H264_Dpb->dpb_param.dpb.frame_crop_right_offset; + p_H264_Dpb->frame_crop_top_offset = p_H264_Dpb->dpb_param.dpb.frame_crop_top_offset; + p_H264_Dpb->frame_crop_bottom_offset = p_H264_Dpb->dpb_param.dpb.frame_crop_bottom_offset; + + dpb_print(p_H264_Dpb->decoder_index, PRINT_FLAG_DPB_DETAIL, + "%s chroma_format_idc %d crop offset: left %d right %d top %d bottom %d\n", + __func__, p_H264_Dpb->chroma_format_idc, + p_H264_Dpb->frame_crop_left_offset, + p_H264_Dpb->frame_crop_right_offset, + p_H264_Dpb->frame_crop_top_offset, + p_H264_Dpb->frame_crop_bottom_offset); +#endif + + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_CONFIG_DONE); + reset_process_time(hw); + hw->reg_iqidct_control = READ_VREG(IQIDCT_CONTROL); + hw->reg_iqidct_control_init_flag = 1; + hw->dec_result = DEC_RESULT_CONFIG_PARAM; +#ifdef DETECT_WRONG_MULTI_SLICE + /*restart check count and set 'unknown'*/ + dpb_print(DECODE_ID(hw), PRINT_FLAG_UCODE_EVT, + "%s MULTI_SLICE_DETECT (check_count %d slice_count %d cur_slice_count %d flag %d), H264_CONFIG_REQUEST => restart check\n", + __func__, + hw->multi_slice_pic_check_count, + hw->picture_slice_count, + hw->cur_picture_slice_count, + hw->multi_slice_pic_flag); + + hw->multi_slice_pic_check_count = 0; + hw->multi_slice_pic_flag = 0; + hw->picture_slice_count = 0; +#endif + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + vdec_schedule_work(&hw->work); + } else if (dec_dpb_status == H264_SLICE_HEAD_DONE) { + u16 data_hight; + u16 data_low; + u32 video_signal; + + int slice_header_process_status = 0; + int I_flag; + int frame_num_gap = 0; + union param dpb_param_bak; + /*unsigned char is_idr;*/ + unsigned short *p = (unsigned short *)hw->lmem_addr; + unsigned mb_width = hw->seq_info2 & 0xff; + unsigned short first_mb_in_slice; + unsigned int decode_mb_count, mby_mbx; + struct StorablePicture *pic = p_H264_Dpb->mVideo.dec_picture; + reset_process_time(hw); + hw->frmbase_cont_flag = 0; + + if ((pic != NULL) && (pic->mb_aff_frame_flag == 1)) + first_mb_in_slice = p[FIRST_MB_IN_SLICE + 3] * 2; + else + first_mb_in_slice = p[FIRST_MB_IN_SLICE + 3]; + +#ifdef DETECT_WRONG_MULTI_SLICE + hw->cur_picture_slice_count++; + + if ((hw->error_proc_policy & 0x10000) && + (hw->cur_picture_slice_count > 1) && + (first_mb_in_slice == 0) && + (hw->multi_slice_pic_flag == 0)) + hw->multi_slice_pic_check_count = 0; + + if ((hw->error_proc_policy & 0x10000) && + (hw->cur_picture_slice_count > 1) && + (hw->multi_slice_pic_flag == 1)) { + dpb_print(DECODE_ID(hw), 0, + "%s MULTI_SLICE_DETECT (check_count %d slice_count %d cur_slice_count %d flag %d), WRONG_MULTI_SLICE detected, insert picture\n", + __func__, + hw->multi_slice_pic_check_count, + hw->picture_slice_count, + hw->cur_picture_slice_count, + hw->multi_slice_pic_flag); + + mby_mbx = READ_VREG(MBY_MBX); + decode_mb_count = ((mby_mbx & 0xff) * mb_width + + (((mby_mbx >> 8) & 0xff) + 1)); + + if (first_mb_in_slice == decode_mb_count && + first_mb_in_slice != 0) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s first_mb_in_slice = %d \n", + __func__, first_mb_in_slice); + + hw->multi_slice_pic_flag = 0; + hw->multi_slice_pic_check_count = 0; + } else if (hw->cur_picture_slice_count > hw->last_picture_slice_count) { + vh264_pic_done_proc(vdec); + //if (p_H264_Dpb->mDPB.used_size == p_H264_Dpb->mDPB.size) { + if (!have_free_buf_spec(vdec)) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, "dpb full, wait buffer\n"); + p_H264_Dpb->mVideo.pre_frame_num = hw->first_pre_frame_num; + hw->last_picture_slice_count = hw->cur_picture_slice_count; + hw->no_decoder_buffer_flag = 1; + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + } + else { + if (p_H264_Dpb->mVideo.dec_picture) { + if (p_H264_Dpb->mVideo.dec_picture->colocated_buf_index >= 0) { + release_colocate_buf(p_H264_Dpb, + p_H264_Dpb->mVideo.dec_picture->colocated_buf_index); + p_H264_Dpb->mVideo.dec_picture->colocated_buf_index = -1; + } + } + release_cur_decoding_buf(hw); + } + } +#endif + + hw->reg_iqidct_control = READ_VREG(IQIDCT_CONTROL); + hw->reg_iqidct_control_init_flag = 1; + hw->reg_vcop_ctrl_reg = READ_VREG(VCOP_CTRL_REG); + hw->reg_rv_ai_mb_count = READ_VREG(RV_AI_MB_COUNT); + hw->vld_dec_control = READ_VREG(VLD_DECODE_CONTROL); + if (input_frame_based(vdec) && + frmbase_cont_bitlevel2 != 0 && + READ_VREG(VIFF_BIT_CNT) < + frmbase_cont_bitlevel2 && + hw->get_data_count >= 0x70000000) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s H264_SLICE_HEAD_DONE with small bitcnt %d, goto empty_proc\n", + __func__, + READ_VREG(VIFF_BIT_CNT)); + + goto empty_proc; + } + +#if 0 + if (p_H264_Dpb->mVideo.dec_picture == NULL) { + if (!is_buffer_available(vdec)) { + hw->buffer_empty_flag = 1; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_UCODE_EVT, + "%s, buffer_empty, newframe_q(%d), have_free_buf_spec(%d), init_done(%d), used_size(%d/%d), is_there_unused_frame_from_dpb(%d)\n", + __func__, + kfifo_len(&hw->newframe_q), + have_free_buf_spec(vdec), + p_H264_Dpb->mDPB.init_done, + p_H264_Dpb->mDPB.used_size, + p_H264_Dpb->mDPB.size, + is_there_unused_frame_from_dpb( + &p_H264_Dpb->mDPB)); + return IRQ_HANDLED; + } + } + + hw->buffer_empty_flag = 0; +#endif +#ifdef SEND_PARAM_WITH_REG + for (i = 0; i < (RPM_END-RPM_BEGIN); i++) { + unsigned int data32; + + do { + data32 = READ_VREG(RPM_CMD_REG); + /* printk("%x\n", data32); */ + } while ((data32&0x10000) == 0); + p_H264_Dpb->dpb_param.l.data[i] = data32 & 0xffff; + WRITE_VREG(RPM_CMD_REG, 0); + /* printk("%x:%x\n", i,data32); */ + } +#else + dpb_param_bak = p_H264_Dpb->dpb_param; + ATRACE_COUNTER(hw->trace.decode_header_time_name, TRACE_HEADER_RPM_START); + for (i = 0; i < (RPM_END-RPM_BEGIN); i += 4) { + int ii; + + for (ii = 0; ii < 4; ii++) { + p_H264_Dpb->dpb_param.l.data[i+ii] = + p[i+3-ii]; + if (dpb_is_debug(DECODE_ID(hw), + RRINT_FLAG_RPM)) { + if (((i + ii) & 0xf) == 0) + dpb_print(DECODE_ID(hw), + 0, "%04x:", + i); + dpb_print_cont(DECODE_ID(hw), + 0, "%04x ", + p[i+3-ii]); + if (((i + ii + 1) & 0xf) == 0) + dpb_print_cont( + DECODE_ID(hw), + 0, "\r\n"); + } + } + } + ATRACE_COUNTER(hw->trace.decode_header_time_name, TRACE_HEADER_RPM_END); +#endif +#ifdef DETECT_WRONG_MULTI_SLICE + + if (p_H264_Dpb->mVideo.dec_picture && + hw->multi_slice_pic_flag == 2 && + (p_H264_Dpb->dpb_param.l.data[SLICE_TYPE] != dpb_param_bak.l.data[SLICE_TYPE] || + dpb_param_bak.l.data[FIRST_MB_IN_SLICE] > p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE])) { + dpb_print(DECODE_ID(hw), 0, + "decode next pic, save before, SLICE_TYPE BAK %d, SLICE_TYPE %d, FIRST_MB_IN_SLICE BAK %d, FIRST_MB_IN_SLICE %d\n", + dpb_param_bak.l.data[SLICE_TYPE], p_H264_Dpb->dpb_param.l.data[SLICE_TYPE], + dpb_param_bak.l.data[FIRST_MB_IN_SLICE], p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE]); + vh264_pic_done_proc(vdec); + } +#endif + data_low = p_H264_Dpb->dpb_param.l.data[VIDEO_SIGNAL_LOW]; + data_hight = p_H264_Dpb->dpb_param.l.data[VIDEO_SIGNAL_HIGHT]; + + video_signal = (data_hight << 16) | data_low; + hw->video_signal_from_vui = + ((video_signal & 0xffff) << 8) | + ((video_signal & 0xff0000) >> 16) | + ((video_signal & 0x3f000000)); + + + /*dpb_print(DECODE_ID(hw), + 0, + "video_signal_from_vui:0x%x, " + "data_low:0x%x, data_hight:0x%x\n", + hw->video_signal_from_vui, + data_low, + data_hight);*/ + + parse_sei_data(hw, hw->sei_data_buf, hw->sei_data_len); + + if (hw->config_bufmgr_done == 0) { + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_UCODE_EVT, + "config_bufmgr not done, discard frame\n"); + return IRQ_HANDLED; + } else if ((hw->first_i_policy & 0x3) != 0) { + unsigned char is_i_slice = + (p_H264_Dpb->dpb_param.l.data[SLICE_TYPE] + == I_Slice) + ? 1 : 0; + unsigned char is_idr = + ((p_H264_Dpb->dpb_param.dpb.NAL_info_mmco & 0x1f) + == 5); + if ((hw->first_i_policy & 0x3) == 0x3) + is_i_slice = is_idr; + if (!is_i_slice) { + if (hw->has_i_frame == 0) { + amvdec_stop(); + vdec->mc_loaded = 0; + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + dpb_print(DECODE_ID(hw), + PRINT_FLAG_UCODE_EVT, + "has_i_frame is 0, discard none I(DR) frame silce_type %d is_idr %d\n", p_H264_Dpb->dpb_param.l.data[SLICE_TYPE], is_idr); + return IRQ_HANDLED; + } + } else { + if (hw->skip_frame_count < 0 || is_idr) { + /* second I */ + hw->dec_flag &= (~NODISP_FLAG); + hw->skip_frame_count = 0; + } + if (hw->has_i_frame == 0 && + (!is_idr)) { + int skip_count = + (hw->first_i_policy >> 8) & 0xff; + /* first I (not IDR) */ + if ((hw->first_i_policy & 0x3) == 2) + hw->skip_frame_count = + -1 - skip_count; + else + hw->skip_frame_count = + skip_count; + if (hw->skip_frame_count != 0) + hw->dec_flag |= NODISP_FLAG; + } + } + } + dpb_print(DECODE_ID(hw), PRINT_FLAG_UCODE_EVT, + "current dpb index %d, poc %d, top/bot poc (%d,%d)\n", + p_H264_Dpb->dpb_param.dpb.current_dpb_index, + val(p_H264_Dpb->dpb_param.dpb.frame_pic_order_cnt), + val(p_H264_Dpb->dpb_param.dpb.top_field_pic_order_cnt), + val(p_H264_Dpb->dpb_param.dpb.top_field_pic_order_cnt)); + I_flag = (p_H264_Dpb->dpb_param.l.data[SLICE_TYPE] == I_Slice) + ? I_FLAG : 0; + + if ((hw->i_only & 0x2) && (I_flag & I_FLAG)) + flush_dpb(p_H264_Dpb); + + if ((hw->i_only & 0x2) && (!(I_flag & I_FLAG)) && + (p_H264_Dpb->mSlice.structure == FRAME)) { + hw->data_flag = NULL_FLAG; + goto pic_done_proc; + } + + slice_header_process_status = + h264_slice_header_process(p_H264_Dpb, &frame_num_gap); + if (hw->mmu_enable) + hevc_sao_set_slice_type(hw, + slice_header_process_status, + hw->dpb.mSlice.idr_flag); + vui_config(hw); + + if (slice_header_process_status == -1) { + amvdec_stop(); + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + + if (p_H264_Dpb->mVideo.dec_picture) { + int cfg_ret = 0; + bool field_pic_flag = false; + unsigned mby_mbx = READ_VREG(MBY_MBX); + struct StorablePicture *p = + p_H264_Dpb->mVideo.dec_picture; + + if (slice_header_process_status == 1) { + if (!p_H264_Dpb->mSPS.frame_mbs_only_flag) { + field_pic_flag = + (p_H264_Dpb->mSlice.structure == TOP_FIELD || + p_H264_Dpb->mSlice.structure == BOTTOM_FIELD) ? + true : false; + } + + vdec_set_profile_level(vdec, p_H264_Dpb->mSPS.profile_idc, + p_H264_Dpb->mSPS.level_idc); + + if (!field_pic_flag && (((p_H264_Dpb->mSPS.profile_idc == BASELINE) && + (p_H264_Dpb->dec_dpb_size < 2)) || + (((unsigned long)(hw->vh264_amstream_dec_info + .param)) & 0x8) || hw->low_latency_mode & 0x8)) { + p_H264_Dpb->fast_output_enable = + H264_OUTPUT_MODE_FAST; + } + else + p_H264_Dpb->fast_output_enable + = fast_output_enable; + if (hw->enable_fence) + p_H264_Dpb->fast_output_enable = H264_OUTPUT_MODE_FAST; + + hw->data_flag = I_flag; + if ((p_H264_Dpb-> + dpb_param.dpb.NAL_info_mmco & 0x1f) + == 5) + hw->data_flag |= IDR_FLAG; + if ((p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE]) && !mby_mbx) { + p->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_VDEC_STATUS, + "one slice error in muulti-slice first_mb 0x%x mby_mbx 0x%x slice_type %d\n", + p_H264_Dpb->dpb_param.l. + data[FIRST_MB_IN_SLICE], + READ_VREG(MBY_MBX), + p->slice_type); + } + dpb_print(DECODE_ID(hw), + PRINT_FLAG_VDEC_STATUS, + "==================> frame count %d to skip %d\n", + hw->decode_pic_count+1, + hw->skip_frame_count); + } else if (hw->error_proc_policy & 0x100){ + unsigned decode_mb_count = + ((mby_mbx & 0xff) * hw->mb_width + + (((mby_mbx >> 8) & 0xff) + 1)); + if (decode_mb_count < + ((p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE]) * + (1 + p->mb_aff_frame_flag)) && decode_mb_count) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_VDEC_STATUS, + "Error detect! first_mb 0x%x mby_mbx 0x%x decode_mb 0x%x\n", + p_H264_Dpb->dpb_param.l. + data[FIRST_MB_IN_SLICE], + READ_VREG(MBY_MBX), + decode_mb_count); + p->data_flag |= ERROR_FLAG; + }/* else if (!p_H264_Dpb->dpb_param.l.data[FIRST_MB_IN_SLICE] && decode_mb_count) { + p->data_flag |= ERROR_FLAG; + goto pic_done_proc; + }*/ + } + + if (!I_flag && frame_num_gap && !p_H264_Dpb->long_term_reference_flag) { + if (!(hw->error_proc_policy & 0x800000)) { + hw->data_flag |= ERROR_FLAG; + p_H264_Dpb->mVideo.dec_picture->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), 0, "frame number gap error\n"); + } + } + + if (hw->error_proc_policy & 0x400) { + int ret = dpb_check_ref_list_error(p_H264_Dpb); + if (ret != 0) { + hw->reflist_error_count ++; + dpb_print(DECODE_ID(hw), 0, + "reference list error %d frame count %d to skip %d reflist_error_count %d\n", + ret, + hw->decode_pic_count+1, + hw->skip_frame_count, + hw->reflist_error_count); + + p_H264_Dpb->mVideo.dec_picture->data_flag = NODISP_FLAG; + if (((hw->error_proc_policy & 0x80) + && ((hw->dec_flag & + NODISP_FLAG) == 0)) ||(hw->reflist_error_count > 50)) { + hw->reset_bufmgr_flag = 1; + hw->reflist_error_count =0; + amvdec_stop(); + vdec->mc_loaded = 0; + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + } else + hw->reflist_error_count = 0; + } + if ((hw->error_proc_policy & 0x800) && (!(hw->i_only & 0x2)) + && p_H264_Dpb->dpb_error_flag != 0) { + dpb_print(DECODE_ID(hw), 0, + "dpb error %d\n", + p_H264_Dpb->dpb_error_flag); + hw->data_flag |= ERROR_FLAG; + p_H264_Dpb->mVideo.dec_picture->data_flag |= ERROR_FLAG; + if ((hw->error_proc_policy & 0x80) && + ((hw->dec_flag & NODISP_FLAG) == 0)) { + hw->reset_bufmgr_flag = 1; + amvdec_stop(); + vdec->mc_loaded = 0; + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + } + ATRACE_COUNTER(hw->trace.decode_header_time_name, TRACE_HEADER_REGISTER_START); + cfg_ret = config_decode_buf(hw, + p_H264_Dpb->mVideo.dec_picture); + ATRACE_COUNTER(hw->trace.decode_header_time_name, TRACE_HEADER_REGISTER_END); + if (cfg_ret < 0) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "config_decode_buf fail (%d)\n", + cfg_ret); + if (hw->error_proc_policy & 0x2) { + release_cur_decoding_buf(hw); + /*hw->data_flag |= ERROR_FLAG;*/ + hw->reset_bufmgr_flag = 1; + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } else + hw->data_flag |= ERROR_FLAG; + p_H264_Dpb->mVideo.dec_picture->data_flag |= ERROR_FLAG; + } + } + + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + + if (slice_header_process_status == 1) + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_DECODE_NEWPIC); + else + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_DECODE_SLICE); + hw->last_mby_mbx = 0; + hw->last_vld_level = 0; + start_process_time(hw); + } else if (dec_dpb_status == H264_PIC_DATA_DONE + ||((dec_dpb_status == H264_DATA_REQUEST) && input_frame_based(vdec))) { +#ifdef DETECT_WRONG_MULTI_SLICE + dpb_print(DECODE_ID(hw), PRINT_FLAG_UCODE_EVT, + "%s MULTI_SLICE_DETECT (check_count %d slice_count %d cur_slice_count %d flag %d), H264_PIC_DATA_DONE\n", + __func__, + hw->multi_slice_pic_check_count, + hw->picture_slice_count, + hw->cur_picture_slice_count, + hw->multi_slice_pic_flag); + + if (hw->multi_slice_pic_check_count < check_slice_num) { + hw->multi_slice_pic_check_count++; + if (hw->cur_picture_slice_count != + hw->picture_slice_count) { + /*restart check count and set 'unknown'*/ + hw->multi_slice_pic_check_count = 0; + hw->multi_slice_pic_flag = 0; + } + hw->picture_slice_count = + hw->cur_picture_slice_count; + } else if (hw->multi_slice_pic_check_count >= check_slice_num) { + if (hw->picture_slice_count > 1) + hw->multi_slice_pic_flag = 2; + else + hw->multi_slice_pic_flag = 1; + } +#endif + +pic_done_proc: + reset_process_time(hw); + if ((dec_dpb_status == H264_SEARCH_BUFEMPTY) || + (dec_dpb_status == H264_DECODE_BUFEMPTY) || + (dec_dpb_status == H264_DECODE_TIMEOUT) || + ((dec_dpb_status == H264_DATA_REQUEST) && input_frame_based(vdec))) { + hw->data_flag |= ERROR_FLAG; + if (hw->dpb.mVideo.dec_picture) + hw->dpb.mVideo.dec_picture->data_flag |= ERROR_FLAG; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "%s, mark err_frame\n", __func__); + } + vh264_pic_done_proc(vdec); + + if (hw->frmbase_cont_flag) { + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 0); + if (!have_free_buf_spec(vdec)) { + hw->dec_result = DEC_RESULT_NEED_MORE_BUFFER; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + /*do not DEC_RESULT_GET_DATA*/ + hw->get_data_count = 0x7fffffff; + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_SEARCH_HEAD); + decode_frame_count[DECODE_ID(hw)]++; + if (p_H264_Dpb->mSlice.slice_type == I_SLICE) { + hw->gvs.i_decoded_frames++; + } else if (p_H264_Dpb->mSlice.slice_type == P_SLICE) { + hw->gvs.p_decoded_frames++; + } else if (p_H264_Dpb->mSlice.slice_type == B_SLICE) { + hw->gvs.b_decoded_frames++; + } + start_process_time(hw); + return IRQ_HANDLED; + } + amvdec_stop(); + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s %s decode slice count %d\n", + __func__, + (dec_dpb_status == H264_PIC_DATA_DONE) ? + "H264_PIC_DATA_DONE" : + (dec_dpb_status == H264_FIND_NEXT_PIC_NAL) ? + "H264_FIND_NEXT_PIC_NAL" : "H264_FIND_NEXT_DVEL_NAL", + hw->decode_pic_count); + if (hw->kpi_first_i_decoded == 0) { + hw->kpi_first_i_decoded = 1; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "[vdec_kpi][%s] First I frame decoded.\n", __func__); + } + /* WRITE_VREG(DPB_STATUS_REG, H264_ACTION_SEARCH_HEAD); */ + hw->dec_result = DEC_RESULT_DONE; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->slave && + dec_dpb_status == H264_FIND_NEXT_DVEL_NAL) { + struct vdec_h264_hw_s *hw_el = + (struct vdec_h264_hw_s *)(vdec->slave->private); + hw_el->got_valid_nal = 0; + hw->switch_dvlayer_flag = 1; + } else if (vdec->master && + dec_dpb_status == H264_FIND_NEXT_PIC_NAL) { + struct vdec_h264_hw_s *hw_bl = + (struct vdec_h264_hw_s *)(vdec->master->private); + hw_bl->got_valid_nal = 0; + hw->switch_dvlayer_flag = 1; + } else { + hw->switch_dvlayer_flag = 0; + hw->got_valid_nal = 1; + } +#endif + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + } else if ( + (dec_dpb_status == H264_FIND_NEXT_PIC_NAL) || + (dec_dpb_status == H264_FIND_NEXT_DVEL_NAL)) { + goto pic_done_proc; +#endif + } else if (dec_dpb_status == H264_AUX_DATA_READY) { + reset_process_time(hw); + if (READ_VREG(H264_AUX_DATA_SIZE) != 0) { + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_SEI_DETAIL)) + dump_aux_buf(hw); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec_frame_based(vdec)) { + if (hw->last_dec_picture) + set_aux_data(hw, + hw->last_dec_picture, 0, 0, NULL); + } else if (vdec->dolby_meta_with_el || vdec->slave) { + if (hw->last_dec_picture) + set_aux_data(hw, hw->last_dec_picture, + 0, 0, NULL); + } else { + if (vdec->master) { + struct vdec_h264_hw_s *hw_bl = + (struct vdec_h264_hw_s *) + (vdec->master->private); + if (hw_bl->last_dec_picture != NULL) { + set_aux_data(hw_bl, + hw_bl->last_dec_picture, + 0, 1, hw); + } + set_aux_data(hw, + hw->last_dec_picture, + 0, 2, NULL); + } + } +#else + if (hw->last_dec_picture) + set_aux_data(hw, + hw->last_dec_picture, 0, 0, NULL); +#endif + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + hw->switch_dvlayer_flag = 0; + hw->got_valid_nal = 1; +#endif + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s H264_AUX_DATA_READY\n", __func__); + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + } else if (/*(dec_dpb_status == H264_DATA_REQUEST) ||*/ + (dec_dpb_status == H264_SEARCH_BUFEMPTY) || + (dec_dpb_status == H264_DECODE_BUFEMPTY) || + (dec_dpb_status == H264_DECODE_TIMEOUT)) { +empty_proc: + reset_process_time(hw); + if ((hw->error_proc_policy & 0x40000) && + ((dec_dpb_status == H264_DECODE_TIMEOUT) || + (!hw->frmbase_cont_flag && (dec_dpb_status == H264_SEARCH_BUFEMPTY || dec_dpb_status == H264_DECODE_BUFEMPTY) && input_frame_based(vdec)))) + goto pic_done_proc; + if (!hw->frmbase_cont_flag) + release_cur_decoding_buf(hw); + + if (input_frame_based(vdec) || + (READ_VREG(VLD_MEM_VIFIFO_LEVEL) > 0x200)) { + if (h264_debug_flag & + DISABLE_ERROR_HANDLE) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_ERROR, + "%s decoding error, level 0x%x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + goto send_again; + } + amvdec_stop(); + vdec->mc_loaded = 0; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s %s\n", __func__, + (dec_dpb_status == H264_SEARCH_BUFEMPTY) ? + "H264_SEARCH_BUFEMPTY" : + (dec_dpb_status == H264_DECODE_BUFEMPTY) ? + "H264_DECODE_BUFEMPTY" : + (dec_dpb_status == H264_DECODE_TIMEOUT) ? + "H264_DECODE_TIMEOUT" : + "OTHER"); + hw->dec_result = DEC_RESULT_DONE; + + if (dec_dpb_status == H264_SEARCH_BUFEMPTY) + hw->search_dataempty_num++; + else if (dec_dpb_status == H264_DECODE_TIMEOUT) { + hw->decode_timeout_num++; + if (hw->error_proc_policy & 0x4000) { + hw->data_flag |= ERROR_FLAG; + if ((p_H264_Dpb->last_dpb_status == H264_DECODE_TIMEOUT) || + (p_H264_Dpb->last_dpb_status == H264_PIC_DATA_DONE) || + ((p_H264_Dpb->last_dpb_status == H264_SLICE_HEAD_DONE) && + (p_H264_Dpb->mSlice.slice_type != B_SLICE))) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_ERROR, "%s last dpb status 0x%x need bugmgr reset \n", + p_H264_Dpb->last_dpb_status, __func__); + hw->reset_bufmgr_flag = 1; + } + } + } else if (dec_dpb_status == H264_DECODE_BUFEMPTY) + hw->decode_dataempty_num++; + if (!hw->frmbase_cont_flag) + hw->data_flag |= ERROR_FLAG; + + vdec_schedule_work(&hw->work); + } else { + /* WRITE_VREG(DPB_STATUS_REG, H264_ACTION_INIT); */ +#ifdef DETECT_WRONG_MULTI_SLICE + if (hw->error_proc_policy & 0x10000) { + p_H264_Dpb->mVideo.pre_frame_num = hw->first_pre_frame_num; + } + hw->last_picture_slice_count = hw->cur_picture_slice_count; +#endif + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_AGAIN\n", __func__); +send_again: + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } + } else if (dec_dpb_status == H264_DATA_REQUEST) { + reset_process_time(hw); + if (input_frame_based(vdec)) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_VDEC_STATUS, + "%s H264_DATA_REQUEST (%d)\n", + __func__, hw->get_data_count); + hw->dec_result = DEC_RESULT_GET_DATA; + hw->reg_iqidct_control = READ_VREG(IQIDCT_CONTROL); + hw->reg_iqidct_control_init_flag = 1; + hw->get_data_start_time = jiffies; + hw->get_data_count++; + if (hw->get_data_count >= frame_max_data_packet) + goto empty_proc; + vdec_schedule_work(&hw->work); + } else + goto empty_proc; + } else if (dec_dpb_status == H264_DECODE_OVER_SIZE) { + dpb_print(DECODE_ID(hw), 0, + "vmh264 decode oversize !!\n"); + release_cur_decoding_buf(hw); + hw->data_flag |= ERROR_FLAG; + hw->stat |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + reset_process_time(hw); + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } else if (dec_dpb_status == H264_SEI_DATA_READY) { + int aux_data_len; + aux_data_len = + (READ_VREG(H264_AUX_DATA_SIZE) >> 16) << 4; + + if (aux_data_len > SEI_DATA_SIZE) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "sei data size more than 4K: %d, discarded it\n", + hw->sei_itu_data_len); + hw->sei_itu_data_len = 0; + } + + if (aux_data_len != 0) { + u8 *trans_data_buf; + u8 *sei_data_buf; + u8 swap_byte; + +#if 0 + dump_aux_buf(hw); +#endif + trans_data_buf = (u8 *)hw->aux_addr; + + if (trans_data_buf[7] == AUX_TAG_SEI) { + int left_len; + + sei_data_buf = (u8 *)hw->sei_data_buf + + hw->sei_data_len; + left_len = SEI_DATA_SIZE - hw->sei_data_len; + if (aux_data_len/2 <= left_len) { + for (i = 0; i < aux_data_len/2; i++) + sei_data_buf[i] + = trans_data_buf[i*2]; + + aux_data_len = aux_data_len / 2; + for (i = 0; i < aux_data_len; i = i+4) { + swap_byte = sei_data_buf[i]; + sei_data_buf[i] + = sei_data_buf[i+3]; + sei_data_buf[i+3] = swap_byte; + + swap_byte = sei_data_buf[i+1]; + sei_data_buf[i+1] + = sei_data_buf[i+2]; + sei_data_buf[i+2] = swap_byte; + } + + for (i = aux_data_len-1; i >= 0; i--) + if (sei_data_buf[i] != 0) + break; + + hw->sei_data_len += i+1; + } else + dpb_print(DECODE_ID(hw), + PRINT_FLAG_ERROR, + "sei data size %d and more than left space: %d, discarded it\n", + hw->sei_itu_data_len, + left_len); + } + } + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + WRITE_VREG(DPB_STATUS_REG, H264_SEI_DATA_DONE); + + return IRQ_HANDLED; + } + + + /* ucode debug */ + debug_tag = READ_VREG(DEBUG_REG1); + if (debug_tag & 0x10000) { + unsigned short *p = (unsigned short *)hw->lmem_addr; + + dpb_print(DECODE_ID(hw), 0, + "LMEM<tag %x>:\n", debug_tag); + for (i = 0; i < 0x400; i += 4) { + int ii; + if ((i & 0xf) == 0) + dpb_print_cont(DECODE_ID(hw), 0, + "%03x: ", i); + for (ii = 0; ii < 4; ii++) + dpb_print_cont(DECODE_ID(hw), 0, + "%04x ", p[i+3-ii]); + if (((i+ii) & 0xf) == 0) + dpb_print_cont(DECODE_ID(hw), 0, + "\n"); + } + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == + hw->decode_pic_count) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_VREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hw->ucode_pause_pos = udebug_pause_pos; + } + else if (debug_tag & 0x20000) + hw->ucode_pause_pos = 0xffffffff; + if (hw->ucode_pause_pos) + reset_process_time(hw); + else + WRITE_VREG(DEBUG_REG1, 0); + } else if (debug_tag != 0) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_UCODE_EVT, + "dbg%x: %x\n", debug_tag, + READ_VREG(DEBUG_REG2)); + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == + hw->decode_pic_count) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_VREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hw->ucode_pause_pos = udebug_pause_pos; + } + if (hw->ucode_pause_pos) + reset_process_time(hw); + else + WRITE_VREG(DEBUG_REG1, 0); + } + /**/ + return IRQ_HANDLED; +} + +static irqreturn_t vh264_isr(struct vdec_s *vdec, int irq) +{ + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)(vdec->private); + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + if (!hw) + return IRQ_HANDLED; + + if (hw->eos) + return IRQ_HANDLED; + + p_H264_Dpb->vdec = vdec; + p_H264_Dpb->dec_dpb_status = READ_VREG(DPB_STATUS_REG); + + if (p_H264_Dpb->dec_dpb_status == H264_SLICE_HEAD_DONE || + p_H264_Dpb->dec_dpb_status == H264_CONFIG_REQUEST) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_HEAD_DONE); + } + else if (p_H264_Dpb->dec_dpb_status == H264_PIC_DATA_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_PIC_DONE); + } + else if (p_H264_Dpb->dec_dpb_status == H264_SEI_DATA_READY) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_SEI_DONE); + else if (p_H264_Dpb->dec_dpb_status == H264_AUX_DATA_READY) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_AUX_DONE); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_UCODE_EVT, + "%s DPB_STATUS_REG: 0x%x, run(%d) last_state (%x) ERROR_STATUS_REG 0x%x, sb (0x%x 0x%x 0x%x) bitcnt 0x%x mby_mbx 0x%x\n", + __func__, + p_H264_Dpb->dec_dpb_status, + run_count[DECODE_ID(hw)], + hw->dec_result, + READ_VREG(ERROR_STATUS_REG), + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VIFF_BIT_CNT), + READ_VREG(MBY_MBX)); + + ATRACE_COUNTER("V_ST_DEC-decode_state", p_H264_Dpb->dec_dpb_status); + + if (p_H264_Dpb->dec_dpb_status == H264_WRRSP_REQUEST) { + if (hw->mmu_enable) + hevc_sao_wait_done(hw); + WRITE_VREG(DPB_STATUS_REG, H264_WRRSP_DONE); + return IRQ_HANDLED; + } + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_END); + return IRQ_WAKE_THREAD; + +} + +static void timeout_process(struct vdec_h264_hw_s *hw) +{ + struct vdec_s *vdec = hw_to_vdec(hw); + + /* + * In this very timeout point,the vh264_work arrives, + * or in some cases the system become slow, then come + * this second timeout. In both cases we return. + */ + if (work_pending(&hw->work) || + work_busy(&hw->work) || + work_busy(&hw->timeout_work) || + work_pending(&hw->timeout_work)) { + pr_err("%s h264[%d] work pending, do nothing.\n",__func__, vdec->id); + return; + } + + hw->timeout_num++; + amvdec_stop(); + vdec->mc_loaded = 0; + if (hw->mmu_enable) { + hevc_set_frame_done(hw); + hevc_sao_wait_done(hw); + } + dpb_print(DECODE_ID(hw), + PRINT_FLAG_ERROR, "%s decoder timeout, DPB_STATUS_REG 0x%x\n", __func__, READ_VREG(DPB_STATUS_REG)); + release_cur_decoding_buf(hw); + hw->dec_result = DEC_RESULT_TIMEOUT; + hw->data_flag |= ERROR_FLAG; + + if (work_pending(&hw->work)) + return; + vdec_schedule_work(&hw->timeout_work); +} + +static void dump_bufspec(struct vdec_h264_hw_s *hw, + const char *caller) +{ + int i; + dpb_print(DECODE_ID(hw), 0, + "%s in %s:\n", __func__, caller); + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (hw->buffer_spec[i].used == -1) + continue; + dpb_print(DECODE_ID(hw), 0, + "bufspec (%d): used %d adr 0x%x(%lx) canvas(%d) vf_ref(%d) ", + i, hw->buffer_spec[i].used, + hw->buffer_spec[i].buf_adr, + hw->buffer_spec[i].cma_alloc_addr, + hw->buffer_spec[i].canvas_pos, + hw->buffer_spec[i].vf_ref + ); +#ifdef CONFIG_AM_VDEC_DV + dpb_print_cont(DECODE_ID(hw), 0, + "dv_el_exist %d", + hw->buffer_spec[i].dv_enhance_exist + ); +#endif + dpb_print_cont(DECODE_ID(hw), 0, "\n"); + } + +} + +static void vmh264_dump_state(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)(vdec->private); + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + dpb_print(DECODE_ID(hw), 0, + "====== %s\n", __func__); + dpb_print(DECODE_ID(hw), 0, + "width/height (%d/%d), num_reorder_frames %d dec_dpb_size %d dpb size(bufspec count) %d max_reference_size(collocate count) %d i_only %d signal_type 0x%x send_err %d\n", + hw->frame_width, + hw->frame_height, + hw->num_reorder_frames, + hw->dpb.dec_dpb_size, + hw->dpb.mDPB.size, + hw->max_reference_size, + hw->i_only, + hw->video_signal_type, + hw->send_error_frame_flag + ); + + dpb_print(DECODE_ID(hw), 0, + "is_framebase(%d), eos %d, state 0x%x, dec_result 0x%x dec_frm %d disp_frm %d run %d not_run_ready %d input_empty %d bufmgr_reset_cnt %d error_frame_count = %d, drop_frame_count = %d\n", + input_frame_based(vdec), + hw->eos, + hw->stat, + hw->dec_result, + decode_frame_count[DECODE_ID(hw)], + display_frame_count[DECODE_ID(hw)], + run_count[DECODE_ID(hw)], + not_run_ready[DECODE_ID(hw)], + input_empty[DECODE_ID(hw)], + hw->reset_bufmgr_count, + hw->gvs.error_frame_count, + hw->gvs.drop_frame_count + ); + +#ifdef DETECT_WRONG_MULTI_SLICE + dpb_print(DECODE_ID(hw), 0, + "MULTI_SLICE_DETECT (check_count %d slice_count %d cur_slice_count %d flag %d)\n", + hw->multi_slice_pic_check_count, + hw->picture_slice_count, + hw->cur_picture_slice_count, + hw->multi_slice_pic_flag); +#endif + if (!hw->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + dpb_print(DECODE_ID(hw), 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + + dpb_print(DECODE_ID(hw), 0, + "%s, newq(%d/%d), dispq(%d/%d) vf prepare/get/put (%d/%d/%d), free_spec(%d), initdon(%d), used_size(%d/%d), unused_fr_dpb(%d) fast_output_enable %x \n", + __func__, + kfifo_len(&hw->newframe_q), + VF_POOL_SIZE, + kfifo_len(&hw->display_q), + VF_POOL_SIZE, + hw->vf_pre_count, + hw->vf_get_count, + hw->vf_put_count, + have_free_buf_spec(vdec), + p_H264_Dpb->mDPB.init_done, + p_H264_Dpb->mDPB.used_size, p_H264_Dpb->mDPB.size, + is_there_unused_frame_from_dpb(&p_H264_Dpb->mDPB), + p_H264_Dpb->fast_output_enable + ); + + dump_dpb(&p_H264_Dpb->mDPB, 1); + dump_pic(p_H264_Dpb); + dump_bufspec(hw, __func__); + + dpb_print(DECODE_ID(hw), 0, + "DPB_STATUS_REG=0x%x\n", + READ_VREG(DPB_STATUS_REG)); + dpb_print(DECODE_ID(hw), 0, + "MPC_E=0x%x\n", + READ_VREG(MPC_E)); + dpb_print(DECODE_ID(hw), 0, + "H264_DECODE_MODE=0x%x\n", + READ_VREG(H264_DECODE_MODE)); + dpb_print(DECODE_ID(hw), 0, + "MBY_MBX=0x%x\n", + READ_VREG(MBY_MBX)); + dpb_print(DECODE_ID(hw), 0, + "H264_DECODE_SIZE=0x%x\n", + READ_VREG(H264_DECODE_SIZE)); + dpb_print(DECODE_ID(hw), 0, + "VIFF_BIT_CNT=0x%x\n", + READ_VREG(VIFF_BIT_CNT)); + dpb_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_LEVEL=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + dpb_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_WP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP)); + dpb_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_RP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_RP)); + dpb_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + dpb_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (input_frame_based(vdec) && + dpb_is_debug(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA) + ) { + int jj; + if (hw->chunk && hw->chunk->block && + hw->chunk->size > 0) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, hw->chunk->size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + dpb_print(DECODE_ID(hw), 0, + "frame data size 0x%x\n", + hw->chunk->size); + for (jj = 0; jj < hw->chunk->size; jj++) { + if ((jj & 0xf) == 0) + dpb_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + dpb_print_cont(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + dpb_print_cont(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } +} + + +static void check_timer_func(struct timer_list *timer) +{ + struct vdec_h264_hw_s *hw = container_of(timer, + struct vdec_h264_hw_s, check_timer); + struct vdec_s *vdec = hw_to_vdec(hw); + int error_skip_frame_count = error_skip_count & 0xfff; + unsigned int timeout_val = decode_timeout_val; + if (timeout_val != 0 && + hw->no_error_count < error_skip_frame_count) + timeout_val = errordata_timeout_val; + if ((h264_debug_cmd & 0x100) != 0 && + DECODE_ID(hw) == (h264_debug_cmd & 0xff)) { + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + pr_info("vdec %d is forced to be disconnected\n", + h264_debug_cmd & 0xff); + h264_debug_cmd = 0; + return; + } + if ((h264_debug_cmd & 0x200) != 0 && + DECODE_ID(hw) == (h264_debug_cmd & 0xff)) { + pr_debug("vdec %d is forced to reset bufmgr\n", + h264_debug_cmd & 0xff); + hw->reset_bufmgr_flag = 1; + h264_debug_cmd = 0; + return; + } + + if (vdec->next_status == VDEC_STATUS_DISCONNECTED && + !hw->is_used_v4l) { + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + pr_debug("vdec requested to be disconnected\n"); + return; + } + + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + + if (((h264_debug_flag & DISABLE_ERROR_HANDLE) == 0) && + (timeout_val > 0) && + (hw->start_process_time > 0) && + ((1000 * (jiffies - hw->start_process_time) / HZ) + > timeout_val) + ) { + u32 dpb_status = READ_VREG(DPB_STATUS_REG); + u32 mby_mbx = READ_VREG(MBY_MBX); + if ((dpb_status == H264_ACTION_DECODE_NEWPIC) || + (dpb_status == H264_ACTION_DECODE_SLICE) || + (dpb_status == H264_SEI_DATA_DONE) || + (dpb_status == H264_STATE_SEARCH_HEAD) || + (dpb_status == H264_SLICE_HEAD_DONE) || + (dpb_status == H264_SEI_DATA_READY)) { + if (h264_debug_flag & DEBUG_TIMEOUT_DEC_STAT) + pr_debug("%s dpb_status = 0x%x last_mby_mbx = %u mby_mbx = %u\n", + __func__, dpb_status, hw->last_mby_mbx, mby_mbx); + + if (hw->last_mby_mbx == mby_mbx) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + { + reset_process_time(hw); + timeout_process(hw); + } + } else + start_process_time(hw); + } else if (is_in_parsing_state(dpb_status)) { + if (hw->last_vld_level == + READ_VREG(VLD_MEM_VIFIFO_LEVEL)) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + { + reset_process_time(hw); + timeout_process(hw); + } + } + } + hw->last_vld_level = + READ_VREG(VLD_MEM_VIFIFO_LEVEL); + hw->last_mby_mbx = mby_mbx; + } + + if ((hw->ucode_pause_pos != 0) && + (hw->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != hw->ucode_pause_pos) { + hw->ucode_pause_pos = 0; + WRITE_VREG(DEBUG_REG1, 0); + } + + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static int dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + u32 ar, ar_tmp; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + + if (!hw) + return -1; + + vstatus->frame_width = hw->frame_width; + vstatus->frame_height = hw->frame_height; + if (hw->error_frame_width && + hw->error_frame_height) { + vstatus->frame_width = hw->error_frame_width; + vstatus->frame_height = hw->error_frame_height; + } + if (hw->frame_dur != 0) { + vstatus->frame_dur = hw->frame_dur; + vstatus->frame_rate = ((96000 * 10 / hw->frame_dur) % 10) < 5 ? + 96000 / hw->frame_dur : (96000 / hw->frame_dur +1); + } + else + vstatus->frame_rate = -1; + vstatus->error_count = hw->gvs.error_frame_count; + vstatus->status = hw->stat; + if (hw->h264_ar == 0x3ff) + ar_tmp = (0x100 * + hw->frame_height * hw->height_aspect_ratio) / + (hw->frame_width * hw->width_aspect_ratio); + else + ar_tmp = hw->h264_ar; + ar = min_t(u32, + ar_tmp, + DISP_RATIO_ASPECT_RATIO_MAX); + vstatus->ratio_control = + ar << DISP_RATIO_ASPECT_RATIO_BIT; + + vstatus->error_frame_count = hw->gvs.error_frame_count; + vstatus->drop_frame_count = hw->gvs.drop_frame_count; + vstatus->frame_count = decode_frame_count[DECODE_ID(hw)]; + vstatus->i_decoded_frames = hw->gvs.i_decoded_frames; + vstatus->i_lost_frames = hw->gvs.i_lost_frames; + vstatus->i_concealed_frames = hw->gvs.i_concealed_frames; + vstatus->p_decoded_frames = hw->gvs.p_decoded_frames; + vstatus->p_lost_frames = hw->gvs.p_lost_frames; + vstatus->p_concealed_frames = hw->gvs.p_concealed_frames; + vstatus->b_decoded_frames = hw->gvs.b_decoded_frames; + vstatus->b_lost_frames = hw->gvs.b_lost_frames; + vstatus->b_concealed_frames = hw->gvs.b_concealed_frames; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s-%02d", DRIVER_NAME, hw->id); + + return 0; +} + +static int vh264_hw_ctx_restore(struct vdec_h264_hw_s *hw) +{ + int i, j; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + hw->frmbase_cont_flag = 0; + + /* if (hw->init_flag == 0) { */ + if (h264_debug_flag & 0x40000000) { + /* if (1) */ + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s, reset register\n", __func__); + + while (READ_VREG(DCAC_DMA_CTRL) & 0x8000) + ; + while (READ_VREG(LMEM_DMA_CTRL) & 0x8000) + ; /* reg address is 0x350 */ + +#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + WRITE_VREG(DOS_SW_RESET0, (1<<7) | (1<<6) | (1<<4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + + WRITE_VREG(DOS_SW_RESET0, (1<<7) | (1<<6) | (1<<4)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(DOS_SW_RESET0, (1<<9) | (1<<8)); + WRITE_VREG(DOS_SW_RESET0, 0); + + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + READ_VREG(DOS_SW_RESET0); + +#else + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + READ_RESET_REG(RESET0_REGISTER); + WRITE_RESET_REG(RESET0_REGISTER, + RESET_IQIDCT | RESET_MC | RESET_VLD_PART); + + WRITE_RESET_REG(RESET2_REGISTER, RESET_PIC_DC | RESET_DBLK); +#endif + WRITE_VREG(POWER_CTL_VLD, + READ_VREG(POWER_CTL_VLD) | (0 << 10) | + (1 << 9) | (1 << 6)); + } else { + /* WRITE_VREG(POWER_CTL_VLD, + * READ_VREG(POWER_CTL_VLD) | (0 << 10) | (1 << 9) ); + */ + WRITE_VREG(POWER_CTL_VLD, + READ_VREG(POWER_CTL_VLD) | + (0 << 10) | (1 << 9) | (1 << 6)); + } + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1<<17); +#endif + + /* cbcr_merge_swap_en */ + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + else + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + } else { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + else + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + } + + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 0xbf << 24); + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 0xbf << 24); + + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 31); + if (hw->mmu_enable) { + SET_VREG_MASK(MDEC_PIC_DC_MUX_CTRL, 1<<31); + /* sw reset to extif hardware */ + SET_VREG_MASK(MDEC_EXTIF_CFG1, 1<<30); + CLEAR_VREG_MASK(MDEC_EXTIF_CFG1, 1<<30); + } else { + CLEAR_VREG_MASK(MDEC_PIC_DC_MUX_CTRL, 1 << 31); + WRITE_VREG(MDEC_EXTIF_CFG1, 0); + } + + +#if 1 /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + /* pr_info("vh264 meson8 prot init\n"); */ + WRITE_VREG(MDEC_PIC_DC_THRESH, 0x404038aa); +#endif + +#ifdef VDEC_DW + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T7) { + if (IS_VDEC_DW(hw)) { + u32 data = ((1 << 30) |(1 << 0) |(1 << 8)); + + if (IS_VDEC_DW(hw) == 2) + data |= (1 << 9); + WRITE_VREG(MDEC_DOUBLEW_CFG0, data); /* Double Write Enable*/ + } + } +#endif + if (hw->dpb.mDPB.size > 0) { + WRITE_VREG(AV_SCRATCH_7, (hw->max_reference_size << 24) | + (hw->dpb.mDPB.size << 16) | + (hw->dpb.mDPB.size << 8)); + + for (j = 0; j < hw->dpb.mDPB.size; j++) { + i = get_buf_spec_by_canvas_pos(hw, j); + if (i < 0) + break; + + if (!hw->mmu_enable && + hw->buffer_spec[i].cma_alloc_addr) + config_decode_canvas(hw, i); + if (hw->mmu_enable && hw->double_write_mode) + config_decode_canvas_ex(hw, i); + } + } else { + WRITE_VREG(AV_SCRATCH_0, 0); + WRITE_VREG(AV_SCRATCH_9, 0); + } + + if (hw->init_flag == 0) + WRITE_VREG(DPB_STATUS_REG, 0); + else + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_DECODE_START); + + WRITE_VREG(FRAME_COUNTER_REG, hw->decode_pic_count); + WRITE_VREG(AV_SCRATCH_8, hw->buf_offset); + if (!tee_enabled()) + WRITE_VREG(AV_SCRATCH_G, hw->mc_dma_handle); + + /* hw->error_recovery_mode = (error_recovery_mode != 0) ? + * error_recovery_mode : error_recovery_mode_in; + */ + /* WRITE_VREG(AV_SCRATCH_F, + * (READ_VREG(AV_SCRATCH_F) & 0xffffffc3) ); + */ + WRITE_VREG(AV_SCRATCH_F, (hw->save_reg_f & 0xffffffc3) | + ((error_recovery_mode_in & 0x1) << 4)); + /*if (hw->ucode_type == UCODE_IP_ONLY_PARAM) + SET_VREG_MASK(AV_SCRATCH_F, 1 << 6); + else*/ + CLEAR_VREG_MASK(AV_SCRATCH_F, 1 << 6); + + WRITE_VREG(LMEM_DUMP_ADR, (u32)hw->lmem_phy_addr); +#if 1 /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + WRITE_VREG(MDEC_PIC_DC_THRESH, 0x404038aa); +#endif + + WRITE_VREG(DEBUG_REG1, 0); + WRITE_VREG(DEBUG_REG2, 0); + + /*Because CSD data is not found at playback start, + the IQIDCT_CONTROL register is not saved, + the initialized value 0x200 of IQIDCT_CONTROL is set*/ + if (hw->init_flag && (hw->reg_iqidct_control_init_flag == 0)) + WRITE_VREG(IQIDCT_CONTROL, 0x200); + + if (hw->reg_iqidct_control) + WRITE_VREG(IQIDCT_CONTROL, hw->reg_iqidct_control); + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "IQIDCT_CONTROL = 0x%x\n", READ_VREG(IQIDCT_CONTROL)); + + if (hw->reg_vcop_ctrl_reg) + WRITE_VREG(VCOP_CTRL_REG, hw->reg_vcop_ctrl_reg); + if (hw->vld_dec_control) + WRITE_VREG(VLD_DECODE_CONTROL, hw->vld_dec_control); + return 0; +} + +static int vmh264_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)vdec->private; + if (i_only_flag & 0x100) + return 0; + if (trickmode == TRICKMODE_I) + hw->i_only = 0x3; + else if (trickmode == TRICKMODE_NONE) + hw->i_only = 0x0; + return 0; +} + +static unsigned char amvdec_enable_flag; +static void vh264_local_init(struct vdec_h264_hw_s *hw, bool is_reset) +{ + int i; + hw->init_flag = 0; + hw->first_sc_checked= 0; + hw->eos = 0; + hw->valve_count = 0; + hw->config_bufmgr_done = 0; + hw->start_process_time = 0; + hw->has_i_frame = 0; + hw->no_error_count = 0xfff; + hw->no_error_i_count = 0xf; + + hw->dec_flag = 0; + hw->data_flag = 0; + hw->skip_frame_count = 0; + hw->reg_iqidct_control = 0; + hw->reg_iqidct_control_init_flag = 0; + hw->reg_vcop_ctrl_reg = 0; + hw->reg_rv_ai_mb_count = 0; + hw->vld_dec_control = 0; + hw->decode_timeout_count = 0; + hw->no_mem_count = 0; + hw->vh264_ratio = hw->vh264_amstream_dec_info.ratio; + /* vh264_ratio = 0x100; */ + + hw->vh264_rotation = (((unsigned long) + hw->vh264_amstream_dec_info.param) >> 16) & 0xffff; + + hw->frame_prog = 0; + hw->frame_width = hw->vh264_amstream_dec_info.width; + hw->frame_height = hw->vh264_amstream_dec_info.height; + hw->frame_dur = hw->vh264_amstream_dec_info.rate; + hw->pts_outside = ((unsigned long) + hw->vh264_amstream_dec_info.param) & 0x01; + hw->sync_outside = ((unsigned long) + hw->vh264_amstream_dec_info.param & 0x02) >> 1; + hw->use_idr_framerate = ((unsigned long) + hw->vh264_amstream_dec_info.param & 0x04) >> 2; + hw->max_refer_buf = !(((unsigned long) + hw->vh264_amstream_dec_info.param & 0x10) >> 4); + if (hw->frame_dur < 96000/960) { + /*more than 960fps,it should not be a correct value, + *give default 30fps + */ + hw->frame_dur = 96000/30; + } + + hw->unstable_pts = (((unsigned long) hw->vh264_amstream_dec_info.param & 0x40) >> 6); + + hw->first_i_policy = first_i_policy; + + pr_info("H264 sysinfo: %dx%d duration=%d, pts_outside=%d\n", + hw->frame_width, hw->frame_height, hw->frame_dur, hw->pts_outside); + pr_debug("sync_outside=%d, use_idr_framerate=%d, is_used_v4l: %d\n", + hw->sync_outside, hw->use_idr_framerate, hw->is_used_v4l); + + if (i_only_flag & 0x100) + hw->i_only = i_only_flag & 0xff; + if (hw->i_only) + hw->dpb.first_insert_frame = FirstInsertFrm_SKIPDONE; + + if ((unsigned long) hw->vh264_amstream_dec_info.param + & 0x08) + hw->no_poc_reorder_flag = 1; + + error_recovery_mode_in = 1; /*ucode control?*/ + if (hw->error_proc_policy & 0x80000000) + hw->send_error_frame_flag = hw->error_proc_policy & 0x1; + else if ((unsigned long) hw->vh264_amstream_dec_info.param & 0x20) + hw->send_error_frame_flag = 0; /*Don't display mark err frames*/ + + if (!is_reset) { + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &(hw->vfpool[hw->cur_pool][i]); + hw->vfpool[hw->cur_pool][i].index = -1; /* VF_BUF_NUM; */ + hw->vfpool[hw->cur_pool][i].bufWidth = 1920; + kfifo_put(&hw->newframe_q, vf); + } + } + + hw->duration_from_pts_done = 0; + + hw->p_last_vf = NULL; + hw->vh264_stream_switching_state = SWITCHING_STATE_OFF; + hw->hevc_cur_buf_idx = 0xffff; + + init_waitqueue_head(&hw->wait_q); + + return; +} + +static s32 vh264_init(struct vdec_h264_hw_s *hw) +{ + int size = -1; + int fw_size = 0x1000 * 16; + int fw_mmu_size = 0x1000 * 16; + struct firmware_s *fw = NULL, *fw_mmu = NULL; + + /* int trickmode_fffb = 0; */ + + /* pr_info("\nvh264_init\n"); */ + /* init_timer(&hw->recycle_timer); */ + + /* timer init */ + timer_setup(&hw->check_timer, check_timer_func, 0); + hw->check_timer.expires = jiffies + CHECK_INTERVAL; + + /* add_timer(&hw->check_timer); */ + hw->stat |= STAT_TIMER_ARM; + hw->stat |= STAT_ISR_REG; + + mutex_init(&hw->chunks_mutex); + vh264_local_init(hw, false); + INIT_WORK(&hw->work, vh264_work); + INIT_WORK(&hw->notify_work, vh264_notify_work); + INIT_WORK(&hw->timeout_work, vh264_timeout_work); +#ifdef MH264_USERDATA_ENABLE + INIT_WORK(&hw->user_data_ready_work, user_data_ready_notify_work); +#endif + + /*if (!amvdec_enable_flag) { + amvdec_enable_flag = true; + amvdec_enable(); + if (hw->mmu_enable) + amhevc_enable(); + }*/ + if (hw->mmu_enable) { + + hw->frame_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + FRAME_MMU_MAP_SIZE, + &hw->frame_mmu_map_phy_addr, GFP_KERNEL); + if (hw->frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -ENOMEM; + } + } + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + size = get_firmware_data(VIDEO_DEC_H264_MULTI, fw->data); + if (size < 0) { + pr_err("get firmware fail.\n"); + vfree(fw); + return -1; + } + + fw->len = size; + hw->fw = fw; + + if (hw->mmu_enable) { + fw_mmu = vmalloc(sizeof(struct firmware_s) + fw_mmu_size); + if (IS_ERR_OR_NULL(fw_mmu)) + return -ENOMEM; + + size = get_firmware_data(VIDEO_DEC_H264_MULTI_MMU, fw_mmu->data); + if (size < 0) { + pr_err("get mmu fw fail.\n"); + vfree(fw_mmu); + return -1; + } + + fw_mmu->len = size; + hw->fw_mmu = fw_mmu; + } + + if (!tee_enabled()) { + /* -- ucode loading (amrisc and swap code) */ + hw->mc_cpu_addr = + dma_alloc_coherent(amports_get_dma_device(), MC_TOTAL_SIZE, + &hw->mc_dma_handle, GFP_KERNEL); + if (!hw->mc_cpu_addr) { + amvdec_enable_flag = false; + amvdec_disable(); + hw->vdec_pg_enable_flag = 0; + if (hw->mmu_enable) + amhevc_disable(); + pr_info("vh264_init: Can not allocate mc memory.\n"); + return -ENOMEM; + } + + /*pr_info("264 ucode swap area: phyaddr %p, cpu vaddr %p\n", + (void *)hw->mc_dma_handle, hw->mc_cpu_addr); + */ + + /*ret = amvdec_loadmc_ex(VFORMAT_H264, NULL, buf);*/ + + /*header*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_HEADER, + fw->data + 0x4000, MC_SWAP_SIZE); + /*data*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_DATA, + fw->data + 0x2000, MC_SWAP_SIZE); + /*mmco*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_MMCO, + fw->data + 0x6000, MC_SWAP_SIZE); + /*list*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_LIST, + fw->data + 0x3000, MC_SWAP_SIZE); + /*slice*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_SLICE, + fw->data + 0x5000, MC_SWAP_SIZE); + /*main*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_MAIN, + fw->data, 0x2000); + /*data*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_MAIN + 0x2000, + fw->data + 0x2000, 0x1000); + /*slice*/ + memcpy((u8 *) hw->mc_cpu_addr + MC_OFFSET_MAIN + 0x3000, + fw->data + 0x5000, 0x1000); + } + +#if 1 /* #ifdef BUFFER_MGR_IN_C */ + hw->lmem_addr = (dma_addr_t)dma_alloc_coherent(amports_get_dma_device(), + PAGE_SIZE, (dma_addr_t *)&hw->lmem_phy_addr, GFP_KERNEL); + + if (hw->lmem_addr == 0) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + return -1; + } + pr_debug("%s, phy_addr=%lx vaddr=%p\n", + __func__, hw->lmem_phy_addr, (void *)hw->lmem_addr); + + if (prefix_aux_buf_size > 0 || + suffix_aux_buf_size > 0) { + u32 aux_buf_size; + hw->prefix_aux_size = AUX_BUF_ALIGN(prefix_aux_buf_size); + hw->suffix_aux_size = AUX_BUF_ALIGN(suffix_aux_buf_size); + aux_buf_size = hw->prefix_aux_size + hw->suffix_aux_size; + hw->aux_addr = dma_alloc_coherent(amports_get_dma_device(), + aux_buf_size, &hw->aux_phy_addr, + GFP_KERNEL); + if (hw->aux_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + return -1; + } + + hw->sei_data_buf = kmalloc(SEI_DATA_SIZE, GFP_KERNEL); + if (hw->sei_data_buf == NULL) { + pr_err("%s: failed to alloc sei itu data buffer\n", + __func__); + return -1; + } + hw->sei_itu_data_buf = kmalloc(SEI_ITU_DATA_SIZE, GFP_KERNEL); + if (hw->sei_itu_data_buf == NULL) { + pr_err("%s: failed to alloc sei itu data buffer\n", + __func__); + dma_free_coherent(amports_get_dma_device(), + hw->prefix_aux_size + hw->suffix_aux_size, hw->aux_addr, + hw->aux_phy_addr); + hw->aux_addr = NULL; + kfree(hw->sei_data_buf); + hw->sei_data_buf = NULL; + + return -1; + } + + if (NULL == hw->sei_user_data_buffer) { + hw->sei_user_data_buffer = kmalloc(USER_DATA_SIZE, + GFP_KERNEL); + if (!hw->sei_user_data_buffer) { + pr_info("%s: Can not allocate sei_data_buffer\n", + __func__); + dma_free_coherent(amports_get_dma_device(), + hw->prefix_aux_size + hw->suffix_aux_size, hw->aux_addr, + hw->aux_phy_addr); + hw->aux_addr = NULL; + kfree(hw->sei_data_buf); + hw->sei_data_buf = NULL; + kfree(hw->sei_itu_data_buf); + hw->sei_itu_data_buf = NULL; + + return -1; + } + hw->sei_user_data_wp = 0; + } + } +/* BUFFER_MGR_IN_C */ +#endif + hw->stat |= STAT_MC_LOAD; + + /* add memory barrier */ + wmb(); + + return 0; +} + +static int vh264_stop(struct vdec_h264_hw_s *hw) +{ + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } +#ifdef VDEC_DW + WRITE_VREG(MDEC_DOUBLEW_CFG0, 0); + WRITE_VREG(MDEC_DOUBLEW_CFG1, 0); +#endif +#ifdef MH264_USERDATA_ENABLE + cancel_work_sync(&hw->user_data_ready_work); +#endif + cancel_work_sync(&hw->notify_work); + cancel_work_sync(&hw->timeout_work); + cancel_work_sync(&hw->work); + + if (hw->stat & STAT_MC_LOAD) { + if (hw->mc_cpu_addr != NULL) { + dma_free_coherent(amports_get_dma_device(), + MC_TOTAL_SIZE, hw->mc_cpu_addr, + hw->mc_dma_handle); + hw->mc_cpu_addr = NULL; + } + if (hw->frame_mmu_map_addr != NULL) { + dma_free_coherent(amports_get_dma_device(), + FRAME_MMU_MAP_SIZE, hw->frame_mmu_map_addr, + hw->frame_mmu_map_phy_addr); + hw->frame_mmu_map_addr = NULL; + } + + } + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + if (hw->lmem_addr) { + dma_free_coherent(amports_get_dma_device(), + PAGE_SIZE, (void *)hw->lmem_addr, + hw->lmem_phy_addr); + hw->lmem_addr = 0; + } + + if (hw->aux_addr) { + dma_free_coherent(amports_get_dma_device(), + hw->prefix_aux_size + hw->suffix_aux_size, hw->aux_addr, + hw->aux_phy_addr); + hw->aux_addr = NULL; + } + if (hw->sei_data_buf != NULL) { + kfree(hw->sei_data_buf); + hw->sei_data_buf = NULL; + } + if (hw->sei_itu_data_buf != NULL) { + kfree(hw->sei_itu_data_buf); + hw->sei_itu_data_buf = NULL; + } + if (hw->sei_user_data_buffer != NULL) { + kfree(hw->sei_user_data_buffer); + hw->sei_user_data_buffer = NULL; + } + /* amvdec_disable(); */ + + vfree(hw->fw); + hw->fw = NULL; + + if (hw->mmu_enable) { + vfree(hw->fw_mmu); + hw->fw_mmu = NULL; + } + + dpb_print(DECODE_ID(hw), 0, + "%s\n", + __func__); + return 0; +} + +static void wait_vmh264_search_done(struct vdec_h264_hw_s *hw) +{ + u32 vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + int count = 0; + do { + usleep_range(100, 500); + if (vld_rp == READ_VREG(VLD_MEM_VIFIFO_RP)) + break; + if (count > 2000) { + dpb_print(DECODE_ID(hw), + PRINT_FLAG_ERROR, "%s timeout count %d vld_rp 0x%x VLD_MEM_VIFIFO_RP 0x%x\n", + __func__, count, vld_rp, READ_VREG(VLD_MEM_VIFIFO_RP)); + break; + } else + vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + count++; + } while (1); +} + +static void vh264_notify_work(struct work_struct *work) +{ + struct vdec_h264_hw_s *hw = container_of(work, + struct vdec_h264_hw_s, notify_work); + struct vdec_s *vdec = hw_to_vdec(hw); + + if (hw->is_used_v4l) + return; + + if (vdec->fr_hint_state == VDEC_NEED_HINT) { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long)hw->frame_dur)); + vdec->fr_hint_state = VDEC_HINTED; + } + + return; +} + +#ifdef MH264_USERDATA_ENABLE +static void vmh264_reset_udr_mgr(struct vdec_h264_hw_s *hw) +{ + hw->wait_for_udr_send = 0; + hw->sei_itu_data_len = 0; + memset(&hw->ud_record, 0, sizeof(hw->ud_record)); +} + +static void vmh264_crate_userdata_manager( + struct vdec_h264_hw_s *hw, + u8 *userdata_buf, + int buf_len) +{ + if (hw) { + + + mutex_init(&hw->userdata_mutex); + + memset(&hw->userdata_info, 0, + sizeof(struct mh264_userdata_info_t)); + hw->userdata_info.data_buf = userdata_buf; + hw->userdata_info.buf_len = buf_len; + hw->userdata_info.data_buf_end = userdata_buf + buf_len; + + vmh264_reset_udr_mgr(hw); + + } +} + +static void vmh264_destroy_userdata_manager(struct vdec_h264_hw_s *hw) +{ + if (hw) + memset(&hw->userdata_info, + 0, + sizeof(struct mh264_userdata_info_t)); +} + +/* +#define DUMP_USERDATA_RECORD +*/ +#ifdef DUMP_USERDATA_RECORD + +#define MAX_USER_DATA_SIZE 3145728 +static void *user_data_buf; +static unsigned char *pbuf_start; +static int total_len; +static int bskip; +static int n_userdata_id; + +static void print_data(unsigned char *pdata, + int len, + unsigned int poc_number, + unsigned int flag, + unsigned int duration, + unsigned int vpts, + unsigned int vpts_valid, + int rec_id) +{ + int nLeft; + + nLeft = len; +#if 0 + pr_info("%d len:%d, flag:%d, dur:%d, vpts:0x%x, valid:%d, poc:%d\n", + rec_id, len, flag, + duration, vpts, vpts_valid, poc_number); +#endif + pr_info("%d len = %d, flag = %d, vpts = 0x%x\n", + rec_id, len, flag, vpts); + + if (len == 96) { + int i; + nLeft = 72; + while (nLeft >= 16) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7], + pdata[8], pdata[9], pdata[10], pdata[11], + pdata[12], pdata[13], pdata[14], pdata[15]); + nLeft -= 16; + pdata += 16; + } + + + while (nLeft > 0) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + + i = 0; + nLeft = 96-72; + while (i < nLeft) { + if (pdata[0] != 0) { + pr_info("some data error\n"); + break; + } + pdata++; + i++; + } + } else { + while (nLeft >= 16) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7], + pdata[8], pdata[9], pdata[10], pdata[11], + pdata[12], pdata[13], pdata[14], pdata[15]); + nLeft -= 16; + pdata += 16; + } + + + while (nLeft > 0) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } + + } +} + +static void push_to_buf(struct vdec_h264_hw_s *hw, + u8 *pdata, + int len, + struct userdata_meta_info_t *pmeta); + +static void dump_userdata_record(struct vdec_h264_hw_s *hw, + struct mh264_userdata_record_t *record) +{ + if (record && hw) { + u8 *pdata; + + pdata = hw->userdata_info.data_buf + record->rec_start; +/* + print_data(pdata, + record->rec_len, + record->meta_info.flags, + record->meta_info.duration, + record->meta_info.vpts, + record->meta_info.vpts_valid, + n_record_id); +*/ + push_to_buf(hw, pdata, record->rec_len, &record->meta_info); + n_userdata_id++; + } +} + + +static void push_to_buf(struct vdec_h264_hw_s *hw, + u8 *pdata, int len, + struct userdata_meta_info_t *pmeta) +{ + u32 *pLen; + int info_cnt; + u8 *pbuf_end; + + if (!user_data_buf) + return; + + if (bskip) { + pr_info("over size, skip\n"); + return; + } + info_cnt = 0; + pLen = (u32 *)pbuf_start; + + *pLen = len; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->poc_number; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->duration; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->flags; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->vpts; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->vpts_valid; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + + *pLen = n_userdata_id; + pbuf_start += sizeof(u32); + info_cnt++; + pLen++; + + + + pbuf_end = (u8 *)hw->sei_user_data_buffer + USER_DATA_SIZE; + if (pdata + len > pbuf_end) { + int first_section_len; + + first_section_len = pbuf_end - pdata; + memcpy(pbuf_start, pdata, first_section_len); + pdata = (u8 *)hw->sei_user_data_buffer; + pbuf_start += first_section_len; + memcpy(pbuf_start, pdata, len - first_section_len); + pbuf_start += len - first_section_len; + } else { + memcpy(pbuf_start, pdata, len); + pbuf_start += len; + } + + total_len += len + info_cnt * sizeof(u32); + if (total_len >= MAX_USER_DATA_SIZE-4096) + bskip = 1; +} + +static void show_user_data_buf(void) +{ + u8 *pbuf; + int len; + unsigned int flag; + unsigned int duration; + unsigned int vpts; + unsigned int vpts_valid; + unsigned int poc_number; + int rec_id; + + pr_info("show user data buf\n"); + pbuf = user_data_buf; + + while (pbuf < pbuf_start) { + u32 *pLen; + + pLen = (u32 *)pbuf; + + len = *pLen; + pLen++; + pbuf += sizeof(u32); + + poc_number = *pLen; + pLen++; + pbuf += sizeof(u32); + + duration = *pLen; + pLen++; + pbuf += sizeof(u32); + + flag = *pLen; + pLen++; + pbuf += sizeof(u32); + + vpts = *pLen; + pLen++; + pbuf += sizeof(u32); + + vpts_valid = *pLen; + pLen++; + pbuf += sizeof(u32); + + rec_id = *pLen; + pLen++; + pbuf += sizeof(u32); + + print_data(pbuf, len, poc_number, flag, + duration, vpts, + vpts_valid, rec_id); + pbuf += len; + msleep(30); + } +} + +static int vmh264_init_userdata_dump(void) +{ + user_data_buf = kmalloc(MAX_USER_DATA_SIZE, GFP_KERNEL); + if (user_data_buf) + return 1; + else + return 0; +} + +static void vmh264_dump_userdata(void) +{ + if (user_data_buf) { + show_user_data_buf(); + kfree(user_data_buf); + user_data_buf = NULL; + } +} + +static void vmh264_reset_user_data_buf(void) +{ + total_len = 0; + pbuf_start = user_data_buf; + bskip = 0; + n_userdata_id = 0; +} +#endif + + +static void vmh264_udc_fill_vpts(struct vdec_h264_hw_s *hw, + int frame_type, + u32 vpts, + u32 vpts_valid) +{ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + + unsigned char *pdata; + u8 *pmax_sei_data_buffer; + u8 *sei_data_buf; + int i; + int wp; + int data_length; + struct mh264_userdata_record_t *p_userdata_rec; + + +#ifdef MH264_USERDATA_ENABLE + struct userdata_meta_info_t meta_info; + memset(&meta_info, 0, sizeof(meta_info)); +#endif + + if (hw->sei_itu_data_len <= 0) + return; + + pdata = (u8 *)hw->sei_user_data_buffer + hw->sei_user_data_wp; + pmax_sei_data_buffer = (u8 *)hw->sei_user_data_buffer + USER_DATA_SIZE; + sei_data_buf = (u8 *)hw->sei_itu_data_buf; + for (i = 0; i < hw->sei_itu_data_len; i++) { + *pdata++ = sei_data_buf[i]; + if (pdata >= pmax_sei_data_buffer) + pdata = (u8 *)hw->sei_user_data_buffer; + } + + hw->sei_user_data_wp = (hw->sei_user_data_wp + + hw->sei_itu_data_len) % USER_DATA_SIZE; + hw->sei_itu_data_len = 0; + +#ifdef MH264_USERDATA_ENABLE + meta_info.duration = hw->frame_dur; + meta_info.flags |= (VFORMAT_H264 << 3); + + meta_info.vpts = vpts; + meta_info.vpts_valid = vpts_valid; + meta_info.poc_number = + p_H264_Dpb->mVideo.dec_picture->poc; + + + wp = hw->sei_user_data_wp; + + if (hw->sei_user_data_wp > hw->userdata_info.last_wp) + data_length = wp - hw->userdata_info.last_wp; + else + data_length = wp + hw->userdata_info.buf_len + - hw->userdata_info.last_wp; + + if (data_length & 0x7) + data_length = (((data_length + 8) >> 3) << 3); + + p_userdata_rec = &hw->ud_record; + p_userdata_rec->meta_info = meta_info; + p_userdata_rec->rec_start = hw->userdata_info.last_wp; + p_userdata_rec->rec_len = data_length; + hw->userdata_info.last_wp = wp; + + p_userdata_rec->meta_info.flags |= + p_H264_Dpb->mVideo.dec_picture->pic_struct << 12; + + hw->wait_for_udr_send = 1; + vdec_schedule_work(&hw->user_data_ready_work); +#endif +} + + +static void user_data_ready_notify_work(struct work_struct *work) +{ + struct vdec_h264_hw_s *hw = container_of(work, + struct vdec_h264_hw_s, user_data_ready_work); + + + mutex_lock(&hw->userdata_mutex); + + hw->userdata_info.records[hw->userdata_info.write_index] + = hw->ud_record; + hw->userdata_info.write_index++; + if (hw->userdata_info.write_index >= USERDATA_FIFO_NUM) + hw->userdata_info.write_index = 0; + + mutex_unlock(&hw->userdata_mutex); + +#ifdef DUMP_USERDATA_RECORD + dump_userdata_record(hw, &hw->ud_record); +#endif + vdec_wakeup_userdata_poll(hw_to_vdec(hw)); + + hw->wait_for_udr_send = 0; +} + +static int vmh264_user_data_read(struct vdec_s *vdec, + struct userdata_param_t *puserdata_para) +{ + struct vdec_h264_hw_s *hw = NULL; + int rec_ri, rec_wi; + int rec_len; + u8 *rec_data_start; + u8 *pdest_buf; + struct mh264_userdata_record_t *p_userdata_rec; + u32 data_size; + u32 res; + int copy_ok = 1; + + hw = (struct vdec_h264_hw_s *)vdec->private; + + pdest_buf = puserdata_para->pbuf_addr; + + mutex_lock(&hw->userdata_mutex); + +/* + pr_info("ri = %d, wi = %d\n", + lg_p_mpeg12_userdata_info->read_index, + lg_p_mpeg12_userdata_info->write_index); +*/ + rec_ri = hw->userdata_info.read_index; + rec_wi = hw->userdata_info.write_index; + + if (rec_ri == rec_wi) { + mutex_unlock(&hw->userdata_mutex); + return 0; + } + + p_userdata_rec = hw->userdata_info.records + rec_ri; + + rec_len = p_userdata_rec->rec_len; + rec_data_start = p_userdata_rec->rec_start + hw->userdata_info.data_buf; +/* + pr_info("rec_len:%d, rec_start:%d, buf_len:%d\n", + p_userdata_rec->rec_len, + p_userdata_rec->rec_start, + puserdata_para->buf_len); +*/ + if (rec_len <= puserdata_para->buf_len) { + /* dvb user data buffer is enought to + copy the whole recored. */ + data_size = rec_len; + if (rec_data_start + data_size + > hw->userdata_info.data_buf_end) { + int first_section_len; + + first_section_len = hw->userdata_info.buf_len - + p_userdata_rec->rec_start; + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + first_section_len); + if (res) { + pr_info("p1 read not end res=%d, request=%d\n", + res, first_section_len); + copy_ok = 0; + + p_userdata_rec->rec_len -= + first_section_len - res; + p_userdata_rec->rec_start += + first_section_len - res; + puserdata_para->data_size = + first_section_len - res; + } else { + res = (u32)copy_to_user( + (void *)(pdest_buf+first_section_len), + (void *)hw->userdata_info.data_buf, + data_size - first_section_len); + if (res) { + pr_info("p2 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + p_userdata_rec->rec_len -= + data_size - res; + p_userdata_rec->rec_start = + data_size - first_section_len - res; + puserdata_para->data_size = + data_size - res; + } + } else { + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + data_size); + if (res) { + pr_info("p3 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start += data_size - res; + puserdata_para->data_size = data_size - res; + } + + if (copy_ok) { + hw->userdata_info.read_index++; + if (hw->userdata_info.read_index >= USERDATA_FIFO_NUM) + hw->userdata_info.read_index = 0; + } + } else { + /* dvb user data buffer is not enought + to copy the whole recored. */ + data_size = puserdata_para->buf_len; + if (rec_data_start + data_size + > hw->userdata_info.data_buf_end) { + int first_section_len; + + first_section_len = hw->userdata_info.buf_len - + p_userdata_rec->rec_start; + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + first_section_len); + if (res) { + pr_info("p4 read not end res=%d, request=%d\n", + res, first_section_len); + copy_ok = 0; + p_userdata_rec->rec_len -= + first_section_len - res; + p_userdata_rec->rec_start += + first_section_len - res; + puserdata_para->data_size = + first_section_len - res; + } else { + /* first secton copy is ok*/ + res = (u32)copy_to_user( + (void *)(pdest_buf+first_section_len), + (void *)hw->userdata_info.data_buf, + data_size - first_section_len); + if (res) { + pr_info("p5 read not end res=%d, request=%d\n", + res, + data_size - first_section_len); + copy_ok = 0; + } + + p_userdata_rec->rec_len -= + data_size - res; + p_userdata_rec->rec_start = + data_size - first_section_len - res; + puserdata_para->data_size = + data_size - res; + } + } else { + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + data_size); + if (res) { + pr_info("p6 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start += data_size - res; + puserdata_para->data_size = data_size - res; + } + + if (copy_ok) { + hw->userdata_info.read_index++; + if (hw->userdata_info.read_index >= USERDATA_FIFO_NUM) + hw->userdata_info.read_index = 0; + } + + } + puserdata_para->meta_info = p_userdata_rec->meta_info; + + if (hw->userdata_info.read_index <= hw->userdata_info.write_index) + puserdata_para->meta_info.records_in_que = + hw->userdata_info.write_index - + hw->userdata_info.read_index; + else + puserdata_para->meta_info.records_in_que = + hw->userdata_info.write_index + + USERDATA_FIFO_NUM - + hw->userdata_info.read_index; + + puserdata_para->version = (0<<24|0<<16|0<<8|1); + + mutex_unlock(&hw->userdata_mutex); + + return 1; +} + +static void vmh264_reset_userdata_fifo(struct vdec_s *vdec, int bInit) +{ + struct vdec_h264_hw_s *hw = NULL; + + hw = (struct vdec_h264_hw_s *)vdec->private; + + if (hw) { + mutex_lock(&hw->userdata_mutex); + pr_info("vmh264_reset_userdata_fifo: bInit: %d, ri: %d, wi: %d\n", + bInit, + hw->userdata_info.read_index, + hw->userdata_info.write_index); + hw->userdata_info.read_index = 0; + hw->userdata_info.write_index = 0; + + if (bInit) + hw->userdata_info.last_wp = 0; + mutex_unlock(&hw->userdata_mutex); + } +} + +static void vmh264_wakeup_userdata_poll(struct vdec_s *vdec) +{ + amstream_wakeup_userdata_poll(vdec); +} + +#endif + +static int vmh264_get_ps_info(struct vdec_h264_hw_s *hw, + u32 param1, u32 param2, u32 param3, u32 param4, + struct aml_vdec_ps_infos *ps) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + struct vdec_s *vdec = hw_to_vdec(hw); +#endif + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vdec_pic_info pic; + int mb_width, mb_total; + int mb_height = 0; + int active_buffer_spec_num, dec_dpb_size; + int max_reference_size ,level_idc; + u32 frame_mbs_only_flag; + u32 chroma_format_idc; + u32 crop_bottom, crop_right; + int sub_width_c = 0, sub_height_c = 0; + u32 frame_width, frame_height; + u32 used_reorder_dpb_size_margin + = hw->reorder_dpb_size_margin; + + level_idc = param4 & 0xff; + max_reference_size = (param4 >> 8) & 0xff; + hw->dpb.mSPS.level_idc = level_idc; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->master || vdec->slave) + used_reorder_dpb_size_margin = + reorder_dpb_size_margin_dv; +#endif + mb_width = param1 & 0xff; + mb_total = (param1 >> 8) & 0xffff; + if (!mb_width && mb_total) /*for 4k2k*/ + mb_width = 256; + if (mb_width) + mb_height = mb_total/mb_width; + if (mb_width <= 0 || mb_height <= 0 || + is_oversize(mb_width << 4, mb_height << 4)) { + dpb_print(DECODE_ID(hw), 0, + "!!!wrong param1 0x%x mb_width/mb_height (0x%x/0x%x) %x\r\n", + param1, + mb_width, + mb_height); + hw->error_frame_width = mb_width << 4; + hw->error_frame_height = mb_height << 4; + return -1; + } + hw->error_frame_width = 0; + hw->error_frame_height = 0; + + dec_dpb_size = get_dec_dpb_size(hw , mb_width, mb_height, level_idc); + + dpb_print(DECODE_ID(hw), 0, + "v4l restriction:%d, max buffering:%d, DPB size:%d, reorder frames:%d, margin:%d\n", + hw->bitstream_restriction_flag, + hw->max_dec_frame_buffering, + dec_dpb_size, + hw->num_reorder_frames, + used_reorder_dpb_size_margin); + + active_buffer_spec_num = + dec_dpb_size + + used_reorder_dpb_size_margin; + + if (active_buffer_spec_num > MAX_VF_BUF_NUM) { + active_buffer_spec_num = MAX_VF_BUF_NUM; + dec_dpb_size = active_buffer_spec_num + - used_reorder_dpb_size_margin; + } + + hw->dpb.mDPB.size = active_buffer_spec_num; + + if (hw->no_poc_reorder_flag) + dec_dpb_size = 1; + + /* + * crop + * AV_SCRATCH_2 + * bit 15: frame_mbs_only_flag + * bit 13-14: chroma_format_idc + */ + hw->seq_info = param2; + frame_mbs_only_flag = (hw->seq_info >> 15) & 0x01; + if (hw->dpb.mSPS.profile_idc != 100 && + hw->dpb.mSPS.profile_idc != 110 && + hw->dpb.mSPS.profile_idc != 122 && + hw->dpb.mSPS.profile_idc != 144) { + hw->dpb.chroma_format_idc = 1; + } + chroma_format_idc = hw->dpb.chroma_format_idc; + + /* + * AV_SCRATCH_6 bit 31-16 = (left << 8 | right ) << 1 + * AV_SCRATCH_6 bit 15-0 = (top << 8 | bottom ) << + * (2 - frame_mbs_only_flag) + */ + switch (chroma_format_idc) { + case 1: + sub_width_c = 2; + sub_height_c = 2; + break; + + case 2: + sub_width_c = 2; + sub_height_c = 1; + break; + + case 3: + sub_width_c = 1; + sub_height_c = 1; + break; + + default: + break; + } + + if (chroma_format_idc == 0) { + crop_right = hw->dpb.frame_crop_right_offset; + crop_bottom = hw->dpb.frame_crop_bottom_offset * + (2 - frame_mbs_only_flag); + } else { + crop_right = sub_width_c * hw->dpb.frame_crop_right_offset; + crop_bottom = sub_height_c * hw->dpb.frame_crop_bottom_offset * + (2 - frame_mbs_only_flag); + } + + frame_width = mb_width << 4; + frame_height = mb_height << 4; + + frame_width = frame_width - crop_right; + frame_height = frame_height - crop_bottom; + + ps->profile = level_idc; + ps->ref_frames = max_reference_size; + ps->mb_width = mb_width; + ps->mb_height = mb_height; + ps->visible_width = frame_width; + ps->visible_height = frame_height; + ps->coded_width = ALIGN(mb_width << 4, 64); + ps->coded_height = ALIGN(mb_height << 4, 64); + ps->dpb_frames = dec_dpb_size + 1; /* +1 for two frames in one packet */ + ps->dpb_margin = used_reorder_dpb_size_margin; + ps->dpb_size = active_buffer_spec_num; + ps->field = frame_mbs_only_flag ? + V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED; + + /* update reoder and margin num. */ + if (hw->res_ch_flag) { + vdec_v4l_get_pic_info(ctx, &pic); + ps->dpb_frames = pic.dpb_frames; + ps->dpb_margin = pic.dpb_margin; + } + + if ((ps->dpb_frames >= 16) && (ps->coded_width > 1280) && + (ps->coded_height > 768)) { + if (ps->field == V4L2_FIELD_NONE) { + ps->dpb_frames = adjust_dpb_size; + } else { + ps->dpb_frames = adjust_dpb_size - 2; + } + } + + dpb_print(DECODE_ID(hw), 0, + "Res:%dx%d, DPB size:%d, margin:%d, scan:%s\n", + ps->visible_width, ps->visible_height, + ps->dpb_frames, ps->dpb_margin, + (ps->field == V4L2_FIELD_NONE) ? "P" : "I"); + + return 0; +} + +static int v4l_res_change(struct vdec_h264_hw_s *hw, + u32 param1, u32 param2, + u32 param3, u32 param4) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int ret = 0; + int dec_dpb_size_change = hw->dpb.dec_dpb_size != get_dec_dpb_size_active(hw, param1, param4); + + if (ctx->param_sets_from_ucode && + hw->res_ch_flag == 0) { + if (((param1 != 0 && + hw->seq_info2 != param1) || hw->csd_change_flag) && + hw->seq_info2 != 0) { + if (((hw->seq_info2 & 0x80ffffff) != (param1 & 0x80ffffff)) || dec_dpb_size_change) { /*picture size changed*/ + struct aml_vdec_ps_infos ps; + dpb_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "h264 res_change\n"); + if (vmh264_get_ps_info(hw, param1, + param2, param3, param4, &ps) < 0) { + dpb_print(DECODE_ID(hw), 0, + "set parameters error\n"); + } + hw->v4l_params_parsed = false; + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + hw->res_ch_flag = 1; + ctx->v4l_resolution_change = 1; + hw->bitstream_restriction_flag = hw->cfg_bitstream_restriction_flag; // restore the old value when v4l res change + amvdec_stop(); + if (hw->mmu_enable) + amhevc_stop(); + hw->eos = 1; + flush_dpb(p_H264_Dpb); + //del_timer_sync(&hw->check_timer); + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(hw_to_vdec(hw)); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + ret = 1; + } + } + } + + return ret; + +} + +static void vh264_work_implement(struct vdec_h264_hw_s *hw, + struct vdec_s *vdec, int from) +{ + /* finished decoding one frame or error, + * notify vdec core to switch context + */ + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + + if (hw->dec_result == DEC_RESULT_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_START); + } else if (hw->dec_result == DEC_RESULT_AGAIN) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_AGAIN); + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "%s dec_result %d %x %x %x\n", + __func__, + hw->dec_result, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP)); + + ATRACE_COUNTER("V_ST_DEC-work_state", hw->dec_result); + + if (!hw->mmu_enable) { + mutex_lock(&vmh264_mutex); + dealloc_buf_specs(hw, 0); + mutex_unlock(&vmh264_mutex); + } + hw->save_reg_f = READ_VREG(AV_SCRATCH_F); + hw->dpb.last_dpb_status = hw->dpb.dec_dpb_status; + if (hw->dec_result == DEC_RESULT_CONFIG_PARAM) { + u32 param1 = READ_VREG(AV_SCRATCH_1); + u32 param2 = READ_VREG(AV_SCRATCH_2); + u32 param3 = READ_VREG(AV_SCRATCH_6); + u32 param4 = READ_VREG(AV_SCRATCH_B); + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (hw->is_used_v4l && + ctx->param_sets_from_ucode) { + if (!v4l_res_change(hw, param1, param2, param3, param4)) { + if (!hw->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + dpb_print(DECODE_ID(hw), + PRINT_FLAG_DEC_DETAIL, + "h264 parsered csd data\n"); + if (vmh264_get_ps_info(hw, + param1, param2, + param3, param4, &ps) < 0) { + dpb_print(DECODE_ID(hw), 0, + "set parameters error\n"); + } + hw->v4l_params_parsed = true; + vdec_v4l_set_ps_infos(ctx, &ps); + + amvdec_stop(); + if (hw->mmu_enable) + amhevc_stop(); + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_START); + } else { + if (vh264_set_params(hw, param1, + param2, param3, param4, false) < 0) { + hw->init_flag = 0; + dpb_print(DECODE_ID(hw), 0, "set parameters error, init_flag: %u\n", + hw->init_flag); + } + + WRITE_VREG(AV_SCRATCH_0, (hw->max_reference_size<<24) | + (hw->dpb.mDPB.size<<16) | + (hw->dpb.mDPB.size<<8)); + hw->res_ch_flag = 0; + start_process_time(hw); + return; + } + } + } else { + if (vh264_set_params(hw, param1, + param2, param3, param4, false) < 0) { + hw->init_flag = 0; + dpb_print(DECODE_ID(hw), 0, "set parameters error, init_flag: %u\n", + hw->init_flag); + } + + WRITE_VREG(AV_SCRATCH_0, (hw->max_reference_size<<24) | + (hw->dpb.mDPB.size<<16) | + (hw->dpb.mDPB.size<<8)); + start_process_time(hw); + return; + } + } else + if (((hw->dec_result == DEC_RESULT_GET_DATA) || + (hw->dec_result == DEC_RESULT_GET_DATA_RETRY)) + && (hw_to_vdec(hw)->next_status != + VDEC_STATUS_DISCONNECTED)) { + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + + if (hw->dec_result == DEC_RESULT_GET_DATA) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA %x %x %x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP)); + mutex_lock(&hw->chunks_mutex); + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk = NULL; + mutex_unlock(&hw->chunks_mutex); + vdec_clean_input(vdec); + } + if ((hw->dec_result == DEC_RESULT_GET_DATA_RETRY) && + ((1000 * (jiffies - hw->get_data_start_time) / HZ) + > get_data_timeout_val)) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA_RETRY timeout\n", + __func__); + goto result_done; + } + if (is_buffer_available(vdec)) { + int r; + int decode_size; + r = vdec_prepare_input(vdec, &hw->chunk); + if (r < 0 && (hw_to_vdec(hw)->next_status != + VDEC_STATUS_DISCONNECTED)) { + hw->dec_result = DEC_RESULT_GET_DATA_RETRY; + + dpb_print(DECODE_ID(hw), + PRINT_FLAG_VDEC_DETAIL, + "vdec_prepare_input: Insufficient data\n"); + vdec_schedule_work(&hw->work); + return; + } + hw->dec_result = DEC_RESULT_NONE; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: chunk size 0x%x\n", + __func__, hw->chunk->size); + + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA)) { + int jj; + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap( + hw->chunk->block->start + + hw->chunk->offset, r); + else + data = ((u8 *) + hw->chunk->block->start_virt) + + hw->chunk->offset; + + for (jj = 0; jj < r; jj++) { + if ((jj & 0xf) == 0) + dpb_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + dpb_print_cont(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + dpb_print_cont(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + WRITE_VREG(POWER_CTL_VLD, + READ_VREG(POWER_CTL_VLD) | + (0 << 10) | (1 << 9) | (1 << 6)); + WRITE_VREG(H264_DECODE_INFO, (1<<13)); + decode_size = hw->chunk->size + + (hw->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + WRITE_VREG(H264_DECODE_SIZE, decode_size); + WRITE_VREG(VIFF_BIT_CNT, decode_size * 8); + vdec_enable_input(vdec); + + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_SEARCH_HEAD); + start_process_time(hw); + } else{ + if (hw_to_vdec(hw)->next_status + != VDEC_STATUS_DISCONNECTED) { + hw->dec_result = DEC_RESULT_GET_DATA_RETRY; + vdec_schedule_work(&hw->work); + } + } + return; + } else if (hw->dec_result == DEC_RESULT_DONE || + hw->dec_result == DEC_RESULT_TIMEOUT) { + /* if (!hw->ctx_valid) + hw->ctx_valid = 1; */ + if ((hw->dec_result == DEC_RESULT_TIMEOUT) && + !hw->i_only && (hw->error_proc_policy & 0x2)) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + dpb_print(DECODE_ID(hw), 0, + "%s, decode timeout flush dpb\n", + __func__); + flush_dpb(p_H264_Dpb); + } +result_done: + { + if (hw->error_proc_policy & 0x8000) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int i; + struct DecodedPictureBuffer *p_Dpb = &p_H264_Dpb->mDPB; + + for (i = 0; i < p_Dpb->used_size; i++) { + int i_flag = p_Dpb->fs[i]->bottom_field || p_Dpb->fs[i]->top_field; + int threshold = (i_flag || (hw->max_reference_size >= 12)) ? ((50 + p_Dpb->used_size) * 2) : 50 + p_Dpb->used_size; + if ((p_Dpb->fs[i]->dpb_frame_count + threshold + < p_H264_Dpb->dpb_frame_count) && + p_Dpb->fs[i]->is_reference && + !p_Dpb->fs[i]->is_long_term && + p_Dpb->fs[i]->is_output) { + dpb_print(DECODE_ID(hw), + 0, + "unmark reference dpb_frame_count diffrence large in dpb\n"); + unmark_for_reference(p_Dpb, p_Dpb->fs[i]); + update_ref_list(p_Dpb); + } + } + } + } + if (hw->mmu_enable + && hw->frame_busy && hw->frame_done) { + long used_4k_num; + hevc_sao_wait_done(hw); + if (hw->hevc_cur_buf_idx != 0xffff) { + used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); + if (used_4k_num >= 0) + dpb_print(DECODE_ID(hw), + PRINT_FLAG_MMU_DETAIL, + "release unused buf , used_4k_num %ld index %d\n", + used_4k_num, hw->hevc_cur_buf_idx); + hevc_mmu_dma_check(hw_to_vdec(hw)); + decoder_mmu_box_free_idx_tail( + hw->mmu_box, + hw->hevc_cur_buf_idx, + used_4k_num); + hw->hevc_cur_buf_idx = 0xffff; + } + } + decode_frame_count[DECODE_ID(hw)]++; + if (hw->dpb.mSlice.slice_type == I_SLICE) { + hw->gvs.i_decoded_frames++; + } else if (hw->dpb.mSlice.slice_type == P_SLICE) { + hw->gvs.p_decoded_frames++; + } else if (hw->dpb.mSlice.slice_type == B_SLICE) { + hw->gvs.b_decoded_frames++; + } + amvdec_stop(); + + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s dec_result %d %x %x %x\n", + __func__, + hw->dec_result, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP)); + mutex_lock(&hw->chunks_mutex); + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + hw->chunk = NULL; + mutex_unlock(&hw->chunks_mutex); + } else if (hw->dec_result == DEC_RESULT_AGAIN) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec) && (hw_to_vdec(hw)->next_status != + VDEC_STATUS_DISCONNECTED) && (hw->no_decoder_buffer_flag == 0)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + amvdec_stop(); + if (hw->mmu_enable) + amhevc_stop(); + hw->no_decoder_buffer_flag = 0; + hw->next_again_flag = 1; + } else if (hw->dec_result == DEC_RESULT_EOS) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: end of stream\n", + __func__); + amvdec_stop(); + if (hw->mmu_enable) + amhevc_stop(); + hw->eos = 1; + flush_dpb(p_H264_Dpb); + if (hw->is_used_v4l) { + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(hw_to_vdec(hw)); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + } + mutex_lock(&hw->chunks_mutex); + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + hw->chunk = NULL; + mutex_unlock(&hw->chunks_mutex); + vdec_clean_input(vdec); + } else if (hw->dec_result == DEC_RESULT_FORCE_EXIT) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: force exit\n", + __func__); + amvdec_stop(); + if (hw->mmu_enable) + amhevc_stop(); + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + } else if (hw->dec_result == DEC_RESULT_NEED_MORE_BUFFER) { + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 0); + if (!have_free_buf_spec(vdec)) { + if (vdec->next_status == VDEC_STATUS_DISCONNECTED) + hw->dec_result = DEC_RESULT_AGAIN; + else + hw->dec_result = DEC_RESULT_NEED_MORE_BUFFER; + vdec_schedule_work(&hw->work); + } else { + hw->get_data_count = 0x7fffffff; + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_SEARCH_HEAD); + decode_frame_count[DECODE_ID(hw)]++; + if (p_H264_Dpb->mSlice.slice_type == I_SLICE) { + hw->gvs.i_decoded_frames++; + } else if (p_H264_Dpb->mSlice.slice_type == P_SLICE) { + hw->gvs.p_decoded_frames++; + } else if (p_H264_Dpb->mSlice.slice_type == B_SLICE) { + hw->gvs.b_decoded_frames++; + } + start_process_time(hw); + } + return; + } + + if (p_H264_Dpb->mVideo.dec_picture) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s, release decoded picture\n", __func__); + release_cur_decoding_buf(hw); + } + + WRITE_VREG(ASSIST_MBOX1_MASK, 0); + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; +#ifdef DETECT_WRONG_MULTI_SLICE + if (hw->dec_result != DEC_RESULT_AGAIN) + hw->last_picture_slice_count = 0; +#endif + ATRACE_COUNTER(hw->trace.decode_work_time_name, TRACE_WORK_WAIT_SEARCH_DONE_START); + wait_vmh264_search_done(hw); + ATRACE_COUNTER(hw->trace.decode_work_time_name, TRACE_WORK_WAIT_SEARCH_DONE_END); + /* mark itself has all HW resource released and input released */ + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (hw->switch_dvlayer_flag) { + if (vdec->slave) + vdec_set_next_sched(vdec, vdec->slave); + else if (vdec->master) + vdec_set_next_sched(vdec, vdec->master); + } else if (vdec->slave || vdec->master) + vdec_set_next_sched(vdec, vdec); +#endif + + if (from == 1) { + /* This is a timeout work */ + if (work_pending(&hw->work)) { + /* + * The vh264_work arrives at the last second, + * give it a chance to handle the scenario. + */ + return; + } + } + + if (hw->dec_result == DEC_RESULT_DONE || hw->dec_result == DEC_RESULT_CONFIG_PARAM) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_END); + } + + /* mark itself has all HW resource released and input released */ + if (vdec->parallel_dec == 1) { + if (hw->mmu_enable == 0) + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1); + else + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + } else + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + wake_up_interruptible(&hw->wait_q); + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !hw->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + ATRACE_COUNTER("V_ST_DEC-chunk_size", 0); + + if (hw->vdec_cb) + hw->vdec_cb(hw_to_vdec(hw), hw->vdec_cb_arg); +} + + +static void vh264_work(struct work_struct *work) +{ + struct vdec_h264_hw_s *hw = container_of(work, + struct vdec_h264_hw_s, work); + struct vdec_s *vdec = hw_to_vdec(hw); + + vh264_work_implement(hw, vdec, 0); +} + + +static void vh264_timeout_work(struct work_struct *work) +{ + struct vdec_h264_hw_s *hw = container_of(work, + struct vdec_h264_hw_s, timeout_work); + struct vdec_s *vdec = hw_to_vdec(hw); + + if (work_pending(&hw->work)) + return; + + hw->timeout_processing = 1; + vh264_work_implement(hw, vdec, 1); +} + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + bool ret = 0; + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)vdec->private; + int tvp = vdec_secure(hw_to_vdec(hw)) ? + CODEC_MM_FLAGS_TVP : 0; + + if (hw->timeout_processing && + (work_pending(&hw->work) || work_busy(&hw->work) || + work_pending(&hw->timeout_work) || work_busy(&hw->timeout_work))) { + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "h264 work pending, not ready for run.\n"); + return 0; + } + hw->timeout_processing = 0; + if (!hw->first_sc_checked && hw->mmu_enable) { + int size = decoder_mmu_box_sc_check(hw->mmu_box, tvp); + hw->first_sc_checked =1; + dpb_print(DECODE_ID(hw), 0, + "vmh264 cached=%d need_size=%d speed= %d ms\n", + size, (hw->need_cache_size >> PAGE_SHIFT), + (int)(get_jiffies_64() - hw->sc_start_time) * 1000/HZ); + } + + if (vdec_stream_based(vdec) && (hw->init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + + if (level < pre_decode_buf_level) + return 0; + } + +#ifndef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->master) + return 0; +#endif + if (hw->eos) + return 0; + + if (hw->stat & DECODER_FATAL_ERROR_NO_MEM) + return 0; + + if (disp_vframe_valve_level && + kfifo_len(&hw->display_q) >= + disp_vframe_valve_level) { + hw->valve_count--; + if (hw->valve_count <= 0) + hw->valve_count = 2; + else + return 0; + } + if (hw->next_again_flag && + (!vdec_frame_based(vdec))) { + u32 parser_wr_ptr = STBUF_READ(&vdec->vbuf, get_wp); + if (parser_wr_ptr >= hw->pre_parser_wr_ptr && + (parser_wr_ptr - hw->pre_parser_wr_ptr) < + again_threshold) { + int r = vdec_sync_input(vdec); + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "%s buf lelvel:%x\n", __func__, r); + return 0; + } + } + + if (h264_debug_flag & 0x20000000) { + /* pr_info("%s, a\n", __func__); */ + ret = 1; + } else + ret = is_buffer_available(vdec); + +#ifdef CONSTRAIN_MAX_BUF_NUM + if (ret && (hw->dpb.mDPB.size > 0)) { /*make sure initilized*/ + if (run_ready_max_vf_only_num > 0 && + get_vf_ref_only_buf_count(hw) >= + run_ready_max_vf_only_num + ) + ret = 0; + if (run_ready_display_q_num > 0 && + kfifo_len(&hw->display_q) >= + run_ready_display_q_num) + ret = 0; + /*avoid more buffers consumed when + switching resolution*/ + if (run_ready_max_buf_num == 0xff && + get_used_buf_count(hw) > + hw->dpb.mDPB.size) + ret = 0; + else if (run_ready_max_buf_num && + get_used_buf_count(hw) >= + run_ready_max_buf_num) + ret = 0; + if (ret == 0) + bufmgr_h264_remove_unused_frame(&hw->dpb, 0); + } +#endif + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode) { + if (hw->v4l_params_parsed) { + if (ctx->cap_pool.dec < hw->dpb.mDPB.size) { + if (is_buffer_available(vdec)) + ret = 1; + else + ret = 0; + } + } else { + if (ctx->v4l_resolution_change) + ret = 0; + } + } else if (!ctx->v4l_codec_dpb_ready) { + if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < + run_ready_min_buf_num) + ret = 0; + } + } + + if (ret) + not_run_ready[DECODE_ID(hw)] = 0; + else + not_run_ready[DECODE_ID(hw)]++; + if (vdec->parallel_dec == 1) { + if (hw->mmu_enable == 0) + return ret ? (CORE_MASK_VDEC_1) : 0; + else + return ret ? (CORE_MASK_VDEC_1 | CORE_MASK_HEVC) : 0; + } else + return ret ? (CORE_MASK_VDEC_1 | CORE_MASK_HEVC) : 0; +} + +static unsigned char get_data_check_sum + (struct vdec_h264_hw_s *hw, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)vdec->private; + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int size, ret = -1; + if (!hw->vdec_pg_enable_flag) { + hw->vdec_pg_enable_flag = 1; + amvdec_enable(); + if (hw->mmu_enable) + amhevc_enable(); + } + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_RUN_START); + + run_count[DECODE_ID(hw)]++; + vdec_reset_core(vdec); + if (hw->mmu_enable) + hevc_reset_core(vdec); + hw->vdec_cb_arg = arg; + hw->vdec_cb = callback; + +#ifdef DETECT_WRONG_MULTI_SLICE + hw->cur_picture_slice_count = 0; +#endif + + if (kfifo_len(&hw->display_q) > VF_POOL_SIZE) { + hw->reset_bufmgr_flag = 1; + dpb_print(DECODE_ID(hw), 0, + "kfifo len:%d invaild, need bufmgr reset\n", + kfifo_len(&hw->display_q)); + } + + if (vdec_stream_based(vdec)) { + hw->pre_parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + hw->next_again_flag = 0; + } + + if (hw->reset_bufmgr_flag || + ((hw->error_proc_policy & 0x40) && + p_H264_Dpb->buf_alloc_fail)) { + h264_reset_bufmgr_v4l(vdec, 1); + //flag must clear after reset for v4l buf_spec_init use + hw->reset_bufmgr_flag = 0; + } + + if (h264_debug_cmd & 0xf000) { + if (((h264_debug_cmd >> 12) & 0xf) + == (DECODE_ID(hw) + 1)) { + h264_reconfig(hw); + h264_debug_cmd &= (~0xf000); + } + } + /* hw->chunk = vdec_prepare_input(vdec); */ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->slave || vdec->master) + vdec_set_flag(vdec, VDEC_FLAG_SELF_INPUT_CONTEXT); +#endif + size = vdec_prepare_input(vdec, &hw->chunk); + if ((size < 0) || + (input_frame_based(vdec) && hw->chunk == NULL)) { + input_empty[DECODE_ID(hw)]++; + hw->dec_result = DEC_RESULT_AGAIN; + + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_DETAIL, + "vdec_prepare_input: Insufficient data\n"); + + vdec_schedule_work(&hw->work); + return; + } + + ATRACE_COUNTER("V_ST_DEC-chunk_size", size); + + input_empty[DECODE_ID(hw)] = 0; + + hw->dec_result = DEC_RESULT_NONE; + hw->get_data_count = 0; + hw->csd_change_flag = 0; +#if 0 + pr_info("VLD_MEM_VIFIFO_LEVEL = 0x%x, rp = 0x%x, wp = 0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_MEM_VIFIFO_WP)); +#endif + + if (input_frame_based(vdec) && !vdec_secure(vdec)) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_VDEC_STATUS) + ) { + dpb_print(DECODE_ID(hw), 0, + "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, size, get_data_check_sum(hw, size), + data[0], data[1], data[2], data[3], + data[4], data[5], data[size - 4], + data[size - 3], data[size - 2], + data[size - 1]); + } + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA) + ) { + int jj; + + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + dpb_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + dpb_print_cont(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + dpb_print_cont(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "\n"); + } + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } else + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: %x %x %x %x %x size 0x%x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + STBUF_READ(&vdec->vbuf, get_rp), + STBUF_READ(&vdec->vbuf, get_wp), + size); + + start_process_time(hw); + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + WRITE_VREG(AV_SCRATCH_G, hw->reg_g_status); + } else { + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_START); + ret = amvdec_vdec_loadmc_ex(VFORMAT_H264, "mh264", vdec, hw->fw->data); + if (ret < 0) { + amvdec_enable_flag = false; + amvdec_disable(); + hw->vdec_pg_enable_flag = 0; + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "MH264 the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + vdec->mc_type = VFORMAT_H264; + hw->reg_g_status = READ_VREG(AV_SCRATCH_G); + if (hw->mmu_enable) { + ret = amhevc_loadmc_ex(VFORMAT_H264, "mh264_mmu", + hw->fw_mmu->data); + if (ret < 0) { + amvdec_enable_flag = false; + amhevc_disable(); + dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "MH264_MMU the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + vdec->mc_type = ((1 << 16) | VFORMAT_H264); + } + vdec->mc_loaded = 0; + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_END); + } + vmh264_reset_udr_mgr(hw); + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_START); + if (vh264_hw_ctx_restore(hw) < 0) { + vdec_schedule_work(&hw->work); + return; + } + if (error_proc_policy & 0x10000) { + hw->first_pre_frame_num = p_H264_Dpb->mVideo.pre_frame_num; + } + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_END); + if (input_frame_based(vdec)) { + int decode_size = 0; + + decode_size = hw->chunk->size + + (hw->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + WRITE_VREG(H264_DECODE_INFO, (1<<13)); + WRITE_VREG(H264_DECODE_SIZE, decode_size); + WRITE_VREG(VIFF_BIT_CNT, decode_size * 8); + if (vdec->mvfrm) + vdec->mvfrm->frame_size = hw->chunk->size; + } else { + if (size <= 0) + size = 0x7fffffff; /*error happen*/ + WRITE_VREG(H264_DECODE_INFO, (1<<13)); + WRITE_VREG(H264_DECODE_SIZE, size); + WRITE_VREG(VIFF_BIT_CNT, size * 8); + hw->start_bit_cnt = size * 8; + } + config_aux_buf(hw); + config_decode_mode(hw); + vdec_enable_input(vdec); + WRITE_VREG(NAL_SEARCH_CTL, 0); + hw->sei_data_len = 0; + if (enable_itu_t35) + WRITE_VREG(NAL_SEARCH_CTL, READ_VREG(NAL_SEARCH_CTL) | 0x1); + if (!hw->init_flag) { + if (hw->mmu_enable) + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | 0x2); + else + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) & (~0x2)); + } + WRITE_VREG(NAL_SEARCH_CTL, READ_VREG(NAL_SEARCH_CTL) | (1 << 2) | (hw->bitstream_restriction_flag << 15)); + + if (udebug_flag) + WRITE_VREG(AV_SCRATCH_K, udebug_flag); + hw->stat |= STAT_TIMER_ARM; + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + + if (hw->mmu_enable) + SET_VREG_MASK(VDEC_ASSIST_MMC_CTRL1, 1 << 3); + else + CLEAR_VREG_MASK(VDEC_ASSIST_MMC_CTRL1, 1 << 3); + } + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + amvdec_start(); + if (hw->mmu_enable /*&& !hw->frame_busy && !hw->frame_done*/) { + WRITE_VREG(HEVC_ASSIST_SCRATCH_0, 0x0); + amhevc_start(); + if (hw->config_bufmgr_done) { + hevc_mcr_sao_global_hw_init(hw, + (hw->mb_width << 4), (hw->mb_height << 4)); + hevc_mcr_config_canv2axitbl(hw, 1); + } + } + + /* if (hw->init_flag) { */ + WRITE_VREG(DPB_STATUS_REG, H264_ACTION_SEARCH_HEAD); + /* } */ + + hw->init_flag = 1; + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_RUN_END); +} + +static void clear_refer_bufs(struct vdec_h264_hw_s *hw) +{ + int i; + ulong flags; + + if (hw->is_used_v4l) { + spin_lock_irqsave(&hw->bufspec_lock, flags); + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + hw->buffer_spec[i].used = -1; + hw->buffer_spec[i].cma_alloc_addr = 0; + hw->buffer_spec[i].buf_adr = 0; + } + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + } + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &(hw->vfpool[hw->cur_pool][i]); + hw->vfpool[hw->cur_pool][i].index = -1; /* VF_BUF_NUM; */ + hw->vfpool[hw->cur_pool][i].bufWidth = 1920; + kfifo_put(&hw->newframe_q, vf); + } +} + +static void reset(struct vdec_s *vdec) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *)vdec->private; + + pr_info("vmh264 reset\n"); + + cancel_work_sync(&hw->work); + cancel_work_sync(&hw->notify_work); + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + if (hw->mmu_enable) + amhevc_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + hw->eos = 0; + hw->decode_pic_count = 0; + + reset_process_time(hw); + h264_reset_bufmgr_v4l(vdec, 0); + clear_refer_bufs(hw); + + atomic_set(&hw->vf_pre_count, 0); + atomic_set(&hw->vf_get_count, 0); + atomic_set(&hw->vf_put_count, 0); + + dpb_print(DECODE_ID(hw), 0, "%s\n", __func__); +} + +static void h264_reconfig(struct vdec_h264_hw_s *hw) +{ + int i; + unsigned long flags; + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + struct vdec_s *vdec = hw_to_vdec(hw); + dpb_print(DECODE_ID(hw), 0, + "%s\n", __func__); + /* after calling flush_dpb() and bufmgr_h264_remove_unused_frame(), + all buffers are in display queue (used == 2), + or free (used == 0) + */ + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, "pre h264_reconfig"); + + flush_dpb(p_H264_Dpb); + bufmgr_h264_remove_unused_frame(p_H264_Dpb, 0); + + if (hw->collocate_cma_alloc_addr) { + decoder_bmmu_box_free_idx( + hw->bmmu_box, + BMMU_REF_IDX); + hw->collocate_cma_alloc_addr = 0; + hw->dpb.colocated_mv_addr_start = 0; + hw->dpb.colocated_mv_addr_end = 0; + } + spin_lock_irqsave(&hw->bufspec_lock, flags); + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + if (vdec->parallel_dec == 1) { + vdec->free_canvas_ex(hw->buffer_spec[i].y_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].u_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].v_canvas_index, vdec->id); + hw->buffer_spec[i].y_canvas_index = -1; + hw->buffer_spec[i].u_canvas_index = -1; + hw->buffer_spec[i].v_canvas_index = -1; +#ifdef VDEC_DW + if (IS_VDEC_DW(hw)) { + vdec->free_canvas_ex(hw->buffer_spec[i].vdec_dw_y_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].vdec_dw_u_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].vdec_dw_v_canvas_index, vdec->id); + hw->buffer_spec[i].vdec_dw_y_canvas_index = -1; + hw->buffer_spec[i].vdec_dw_u_canvas_index = -1; + hw->buffer_spec[i].vdec_dw_v_canvas_index = -1; +#endif + } + } + /*make sure buffers not put back to bufmgr when + vf_put is called*/ + if (hw->buffer_spec[i].used == 2) + hw->buffer_spec[i].used = 3; + + /* ready to release "free buffers" + */ + if (hw->buffer_spec[i].used == 0) + hw->buffer_spec[i].used = 4; + + hw->buffer_spec[i].canvas_pos = -1; + + if (hw->buffer_spec[i].used == 4 && + hw->buffer_spec[i].vf_ref != 0 && + hw->buffer_spec[i].cma_alloc_addr) { + hw->buffer_spec[i].used = 3; + } + } + spin_unlock_irqrestore(&hw->bufspec_lock, flags); + hw->has_i_frame = 0; + hw->config_bufmgr_done = 0; + + if (hw->is_used_v4l) { + mutex_lock(&vmh264_mutex); + dealloc_buf_specs(hw, 1); + mutex_unlock(&vmh264_mutex); + } + + if (dpb_is_debug(DECODE_ID(hw), + PRINT_FLAG_DUMP_BUFSPEC)) + dump_bufspec(hw, "after h264_reconfig"); +} + +#ifdef ERROR_HANDLE_TEST +static void h264_clear_dpb(struct vdec_h264_hw_s *hw) +{ + int i; + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s\n", __func__); + remove_dpb_pictures(p_H264_Dpb); + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + /*make sure buffers not put back to bufmgr when + vf_put is called*/ + if (hw->buffer_spec[i].used == 2) + hw->buffer_spec[i].used = 5; + } + +} +#endif + +static void h264_reset_bufmgr_v4l(struct vdec_s *vdec, int flush_flag) +{ + ulong timeout; + struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private; + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; +#if 0 + struct h264_dpb_stru *p_H264_Dpb = &hw->dpb; + int actual_dpb_size, max_reference_size; + int reorder_pic_num; + unsigned int colocated_buf_size; + unsigned int colocated_mv_addr_start; + unsigned int colocated_mv_addr_end; + dpb_print(DECODE_ID(hw), 0, + "%s\n", __func__); + + for (i = 0; i < VF_POOL_SIZE; i++) + hw->vfpool[hw->cur_pool][i].index = -1; /* VF_BUF_NUM; */ + + actual_dpb_size = p_H264_Dpb->mDPB.size; + max_reference_size = p_H264_Dpb->max_reference_size; + reorder_pic_num = p_H264_Dpb->reorder_pic_num; + + colocated_buf_size = p_H264_Dpb->colocated_buf_size; + colocated_mv_addr_start = p_H264_Dpb->colocated_mv_addr_start; + colocated_mv_addr_end = p_H264_Dpb->colocated_mv_addr_end; + + hw->cur_pool++; + if (hw->cur_pool >= VF_POOL_NUM) + hw->cur_pool = 0; + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &(hw->vfpool[hw->cur_pool][i]); + hw->vfpool[hw->cur_pool][i].index = -1; /* VF_BUF_NUM; */ + hw->vfpool[hw->cur_pool][i].bufWidth = 1920; + kfifo_put(&hw->newframe_q, vf); + } + + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) + hw->buffer_spec[i].used = 0; + + dpb_init_global(&hw->dpb, + DECODE_ID(hw), 0, 0); + p_H264_Dpb->mDPB.size = actual_dpb_size; + p_H264_Dpb->max_reference_size = max_reference_size; + p_H264_Dpb->reorder_pic_num = reorder_pic_num; + + p_H264_Dpb->colocated_buf_size = colocated_buf_size; + p_H264_Dpb->colocated_mv_addr_start = colocated_mv_addr_start; + p_H264_Dpb->colocated_mv_addr_end = colocated_mv_addr_end; + + p_H264_Dpb->fast_output_enable = fast_output_enable; + hw->has_i_frame = 0; +#else + mutex_lock(&reset_mutex); + dpb_print(DECODE_ID(hw), 0, + "%s frame count %d to skip %d\n\n", + __func__, hw->decode_pic_count+1, + hw->skip_frame_count); + + /* If the caller is from reset, then we don't call flush_dbp */ + if (flush_flag) + flush_dpb(&hw->dpb); + + if (!hw->is_used_v4l) { + timeout = jiffies + HZ; + while (kfifo_len(&hw->display_q) > 0) { + if (time_after(jiffies, timeout)) + break; + schedule(); + } + } + + buf_spec_init(hw, true); + + vh264_local_init(hw, true); + /*hw->decode_pic_count = 0; + hw->seq_info2 = 0;*/ + + if (vh264_set_params(hw, + hw->cfg_param1, + hw->cfg_param2, + hw->cfg_param3, + hw->cfg_param4, hw->reset_bufmgr_flag) < 0) + hw->stat |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + else + hw->stat &= (~DECODER_FATAL_ERROR_SIZE_OVERFLOW); + + /*drop 3 frames after reset bufmgr if bit0 is set 1 */ + if (first_i_policy & 0x01) + hw->first_i_policy = (3 << 8) | first_i_policy; + + p_H264_Dpb->first_insert_frame = FirstInsertFrm_RESET; + + if (hw->stat & DECODER_FATAL_ERROR_SIZE_OVERFLOW) + hw->init_flag = 0; + else + hw->init_flag = 1; + + hw->reset_bufmgr_count++; + mutex_unlock(&reset_mutex); +#endif +} + +int ammvdec_h264_mmu_init(struct vdec_h264_hw_s *hw) +{ + int ret = -1; + int tvp_flag = vdec_secure(hw_to_vdec(hw)) ? + CODEC_MM_FLAGS_TVP : 0; + int buf_size = 64; + + pr_debug("ammvdec_h264_mmu_init tvp = 0x%x mmu_enable %d\n", + tvp_flag, hw->mmu_enable); + hw->need_cache_size = buf_size * SZ_1M; + hw->sc_start_time = get_jiffies_64(); + if (hw->mmu_enable && !hw->mmu_box) { + hw->mmu_box = decoder_mmu_box_alloc_box(DRIVER_NAME, + hw->id, + MMU_MAX_BUFFERS, + hw->need_cache_size, + tvp_flag); + if (!hw->mmu_box) { + pr_err("h264 4k alloc mmu box failed!!\n"); + return -1; + } + ret = 0; + } + if (!hw->bmmu_box) { + hw->bmmu_box = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + hw->id, + BMMU_MAX_BUFFERS, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + tvp_flag); + if (hw->bmmu_box) + ret = 0; + } + return ret; +} +int ammvdec_h264_mmu_release(struct vdec_h264_hw_s *hw) +{ + if (hw->mmu_box) { + decoder_mmu_box_free(hw->mmu_box); + hw->mmu_box = NULL; + } + if (hw->bmmu_box) { + decoder_bmmu_box_free(hw->bmmu_box); + hw->bmmu_box = NULL; + } + return 0; +} + +static int ammvdec_h264_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_h264_hw_s *hw = NULL; + char *tmpbuf; + int config_val; + + if (pdata == NULL) { + pr_info("\nammvdec_h264 memory resource undefined.\n"); + return -EFAULT; + } + + hw = (struct vdec_h264_hw_s *)h264_alloc_hw_stru(&pdev->dev, + sizeof(struct vdec_h264_hw_s), GFP_KERNEL); + if (hw == NULL) { + pr_info("\nammvdec_h264 device data allocation failed\n"); + return -ENOMEM; + } + hw->id = pdev->id; + hw->platform_dev = pdev; + + snprintf(hw->trace.vdec_name, sizeof(hw->trace.vdec_name), + "h264-%d", hw->id); + snprintf(hw->trace.pts_name, sizeof(hw->trace.pts_name), + "%s-timestamp", hw->trace.vdec_name); + snprintf(hw->trace.new_q_name, sizeof(hw->trace.new_q_name), + "%s-newframe_q", hw->trace.vdec_name); + snprintf(hw->trace.disp_q_name, sizeof(hw->trace.disp_q_name), + "%s-dispframe_q", hw->trace.vdec_name); + snprintf(hw->trace.decode_time_name, sizeof(hw->trace.decode_time_name), + "decoder_time%d", pdev->id); + snprintf(hw->trace.decode_run_time_name, sizeof(hw->trace.decode_run_time_name), + "decoder_run_time%d", pdev->id); + snprintf(hw->trace.decode_header_time_name, sizeof(hw->trace.decode_header_time_name), + "decoder_header_time%d", pdev->id); + snprintf(hw->trace.decode_work_time_name, sizeof(hw->trace.decode_work_time_name), + "decoder_work_time%d", pdev->id); + + /* the ctx from v4l2 driver. */ + hw->v4l2_ctx = pdata->private; + + platform_set_drvdata(pdev, pdata); + + hw->mmu_enable = 0; + hw->first_head_check_flag = 0; + + if (pdata->sys_info) + hw->vh264_amstream_dec_info = *pdata->sys_info; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) + force_enable_mmu = 1; + + if (force_enable_mmu && pdata->sys_info && + (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_TXLX) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_GXLX) && + (pdata->sys_info->height * pdata->sys_info->width + > 1920 * 1088)) + hw->mmu_enable = 1; + + if (hw->mmu_enable && + (pdata->frame_base_video_path == FRAME_BASE_PATH_IONVIDEO)) { + hw->mmu_enable = 0; + pr_info("ionvideo needs disable mmu, path= %d \n", + pdata->frame_base_video_path); + } + + if (ammvdec_h264_mmu_init(hw)) { + h264_free_hw_stru(&pdev->dev, (void *)hw); + pr_info("\nammvdec_h264 mmu alloc failed!\n"); + return -ENOMEM; + } + + if (pdata->config_len) { + dpb_print(DECODE_ID(hw), 0, "pdata->config=%s\n", pdata->config); + /*use ptr config for doubel_write_mode, etc*/ + if (get_config_int(pdata->config, + "mh264_double_write_mode", &config_val) == 0) + hw->double_write_mode = config_val; + else + hw->double_write_mode = double_write_mode; + + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + hw->is_used_v4l = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_buffer_margin", + &config_val) == 0) + hw->reorder_dpb_size_margin = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + hw->canvas_mode = config_val; + if (get_config_int(pdata->config, + "parm_v4l_low_latency_mode", + &config_val) == 0) { + hw->low_latency_mode = (config_val & 1) ? 0x8:0; + hw->enable_fence = (config_val & 2) ? 1 : 0; + } + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + hw->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + hw->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, + "parm_enable_fence", + &config_val) == 0) + hw->enable_fence = config_val; + + if (get_config_int(pdata->config, + "parm_fence_usage", + &config_val) == 0) + hw->fence_usage = config_val; + + if (hw->is_used_v4l) { + if (get_config_int(pdata->config, + "parm_v4l_metadata_config_flag", + &config_val) == 0) { + hw->metadata_config_flag = config_val; + hw->discard_dv_data = hw->metadata_config_flag & VDEC_CFG_FLAG_DV_NEGATIVE; + if (config_val & VDEC_CFG_FLAG_DIS_ERR_POLICY) { + hw->error_proc_policy = v4l_error_policy; //default + } else { + hw->error_proc_policy = error_proc_policy; + } + } else { + hw->discard_dv_data = 1; //default + hw->error_proc_policy = error_proc_policy; + } + } else { + if (get_config_int(pdata->config, + "negative_dv", + &config_val) == 0) { + hw->discard_dv_data = config_val; + } else { + hw->discard_dv_data = 1; //default + } + } + /*if (get_config_int(pdata->config, + "parm_v4l_duration", + &config_val) == 0) + vdec_frame_rate_uevent(config_val);*/ + if (hw->discard_dv_data) + dpb_print(DECODE_ID(hw), 0, "discard dv data\n"); + } else + hw->double_write_mode = double_write_mode; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) + hw->double_write_mode = 3; + + if (force_config_fence) { + hw->enable_fence = true; + hw->fence_usage = (force_config_fence >> 4) & 0xf; + if (force_config_fence & 0x2) + hw->enable_fence = false; + dpb_print(DECODE_ID(hw), 0, + "enable fence: %d, fence usage: %d\n", + hw->enable_fence, hw->fence_usage); + } + + if (!hw->is_used_v4l) { + hw->reorder_dpb_size_margin = reorder_dpb_size_margin; + hw->canvas_mode = mem_map_mode; + + if ((h264_debug_flag & IGNORE_PARAM_FROM_CONFIG) == 0) + hw->canvas_mode = pdata->canvas_mode; + } + + if (hw->is_used_v4l && (hw->v4l2_ctx != NULL)) { + struct aml_vcodec_ctx *ctx = hw->v4l2_ctx; + + ctx->aux_infos.alloc_buffer(ctx, SEI_TYPE); + + if (!hw->discard_dv_data) + ctx->aux_infos.alloc_buffer(ctx, DV_TYPE); + } + + if (hw->mmu_enable) { + hw->canvas_mode = CANVAS_BLKMODE_LINEAR; + hw->double_write_mode &= 0xffff; + } + + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) && hw->enable_fence) { + hw->canvas_mode = 1; + } + + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + hw->buffer_spec[i].y_canvas_index = -1; + hw->buffer_spec[i].u_canvas_index = -1; + hw->buffer_spec[i].v_canvas_index = -1; +#ifdef VDEC_DW + if (IS_VDEC_DW(hw)) { + hw->buffer_spec[i].vdec_dw_y_canvas_index = -1; + hw->buffer_spec[i].vdec_dw_u_canvas_index = -1; + hw->buffer_spec[i].vdec_dw_v_canvas_index = -1; + } +#endif + } + } + + dpb_print(DECODE_ID(hw), 0, + "%s mmu_enable %d double_write_mode 0x%x\n", + __func__, hw->mmu_enable, hw->double_write_mode); + + pdata->private = hw; + pdata->dec_status = dec_status; + pdata->set_trickmode = vmh264_set_trickmode; + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = vh264_isr; + pdata->threaded_irq_handler = vh264_isr_thread_fn; + pdata->dump_state = vmh264_dump_state; + +#ifdef MH264_USERDATA_ENABLE + pdata->wakeup_userdata_poll = vmh264_wakeup_userdata_poll; + pdata->user_data_read = vmh264_user_data_read; + pdata->reset_userdata_fifo = vmh264_reset_userdata_fifo; +#else + pdata->wakeup_userdata_poll = NULL; + pdata->user_data_read = NULL; + pdata->reset_userdata_fifo = NULL; +#endif + if (pdata->use_vfm_path) { + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + hw->frameinfo_enable = 1; + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (vdec_dual(pdata)) { + if (dv_toggle_prov_name) /*debug purpose*/ + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVBL_PROVIDER_NAME : + VFM_DEC_DVEL_PROVIDER_NAME); + else + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVEL_PROVIDER_NAME : + VFM_DEC_DVBL_PROVIDER_NAME); + } +#endif + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + PROVIDER_NAME ".%02x", pdev->id & 0xff); + + if (!hw->is_used_v4l) + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vf_provider_ops, pdata); + + platform_set_drvdata(pdev, pdata); + + buf_spec_init(hw, false); + + hw->platform_dev = pdev; + +#ifdef DUMP_USERDATA_RECORD + vmh264_init_userdata_dump(); + vmh264_reset_user_data_buf(); +#endif + if (decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, BMMU_DPB_IDX, + V_BUF_ADDR_OFFSET, DRIVER_NAME, &hw->cma_alloc_addr) < 0) { + h264_free_hw_stru(&pdev->dev, (void *)hw); + pdata->dec_status = NULL; + return -ENOMEM; + } + + hw->buf_offset = hw->cma_alloc_addr - DEF_BUF_START_ADDR + + DCAC_READ_MARGIN; + if (hw->mmu_enable) { + u32 extif_size = EXTIF_BUF_SIZE; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + extif_size <<= 1; + if (decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, BMMU_EXTIF_IDX, + extif_size, DRIVER_NAME, &hw->extif_addr) < 0) { + h264_free_hw_stru(&pdev->dev, (void *)hw); + pdata->dec_status = NULL; + return -ENOMEM; + } + } + if (!vdec_secure(pdata)) { +#if 1 + /*init internal buf*/ + tmpbuf = (char *)codec_mm_phys_to_virt(hw->cma_alloc_addr); + if (tmpbuf) { + memset(tmpbuf, 0, V_BUF_ADDR_OFFSET); + codec_mm_dma_flush(tmpbuf, + V_BUF_ADDR_OFFSET, + DMA_TO_DEVICE); + } else { + tmpbuf = codec_mm_vmap(hw->cma_alloc_addr, + V_BUF_ADDR_OFFSET); + if (tmpbuf) { + memset(tmpbuf, 0, V_BUF_ADDR_OFFSET); + codec_mm_dma_flush(tmpbuf, + V_BUF_ADDR_OFFSET, + DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(tmpbuf); + } + } +#else + /*init sps/pps internal buf 64k*/ + tmpbuf = (char *)codec_mm_phys_to_virt(hw->cma_alloc_addr + + (mem_sps_base - DEF_BUF_START_ADDR)); + memset(tmpbuf, 0, 0x10000); + dma_sync_single_for_device(amports_get_dma_device(), + hw->cma_alloc_addr + + (mem_sps_base - DEF_BUF_START_ADDR), + 0x10000, DMA_TO_DEVICE); +#endif + } + /**/ + +#if 0 + if (NULL == hw->sei_data_buffer) { + hw->sei_data_buffer = + dma_alloc_coherent(amports_get_dma_device(), + USER_DATA_SIZE, + &hw->sei_data_buffer_phys, GFP_KERNEL); + if (!hw->sei_data_buffer) { + pr_info("%s: Can not allocate sei_data_buffer\n", + __func__); + ammvdec_h264_mmu_release(hw); + h264_free_hw_stru(&pdev->dev, (void *)hw); + return -ENOMEM; + } + /* pr_info("buffer 0x%x, phys 0x%x, remap 0x%x\n", + sei_data_buffer, sei_data_buffer_phys, + (u32)sei_data_buffer_remap); */ + } +#endif + dpb_print(DECODE_ID(hw), 0, "ammvdec_h264 mem-addr=%lx,buff_offset=%x,buf_start=%lx\n", + pdata->mem_start, hw->buf_offset, hw->cma_alloc_addr); + + vdec_source_changed(VFORMAT_H264, 3840, 2160, 60); + + if (hw->mmu_enable) + hevc_source_changed(VFORMAT_HEVC, 3840, 2160, 60); + + if (vh264_init(hw) < 0) { + pr_info("\nammvdec_h264 init failed.\n"); + ammvdec_h264_mmu_release(hw); + h264_free_hw_stru(&pdev->dev, (void *)hw); + pdata->dec_status = NULL; + return -ENODEV; + } +#ifdef MH264_USERDATA_ENABLE + vmh264_crate_userdata_manager(hw, + hw->sei_user_data_buffer, + USER_DATA_SIZE); +#endif + +#ifdef AUX_DATA_CRC + vdec_aux_data_check_init(pdata); +#endif + + vdec_set_prepare_level(pdata, start_decode_buf_level); + if (pdata->parallel_dec == 1) { + if (hw->mmu_enable == 0) + vdec_core_request(pdata, CORE_MASK_VDEC_1); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } + } else + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + + atomic_set(&hw->vh264_active, 1); + vdec_set_vframe_comm(pdata, DRIVER_NAME); + display_frame_count[DECODE_ID(hw)] = 0; + decode_frame_count[DECODE_ID(hw)] = 0; + hw->dpb.without_display_mode = without_display_mode; + mutex_init(&hw->fence_mutex); + if (hw->enable_fence) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + pdata->sync = vdec_sync_get(); + if (!pdata->sync) { + dpb_print(DECODE_ID(hw), 0, "alloc fence timeline error\n"); + ammvdec_h264_mmu_release(hw); + h264_free_hw_stru(&pdev->dev, (void *)hw); + pdata->dec_status = NULL; + return -ENODEV; + } + ctx->sync = pdata->sync; + pdata->sync->usage = hw->fence_usage; + /* creat timeline. */ + vdec_timeline_create(pdata->sync, DRIVER_NAME); + vdec_timeline_get(pdata->sync); + } + + return 0; +} + +static void vdec_fence_release(struct vdec_h264_hw_s *hw, + struct vdec_sync *sync) +{ + ulong expires; + + /* clear display pool. */ + clear_refer_bufs(hw); + + /* notify signal to wake up all fences. */ + vdec_timeline_increase(sync, VF_POOL_SIZE); + + expires = jiffies + msecs_to_jiffies(2000); + while (!check_objs_all_signaled(sync)) { + if (time_after(jiffies, expires)) { + pr_err("wait fence signaled timeout.\n"); + break; + } + } + + pr_info("fence start release\n"); + + /* decreases refcnt of timeline. */ + vdec_timeline_put(sync); +} + +static int ammvdec_h264_remove(struct platform_device *pdev) +{ + struct vdec_h264_hw_s *hw = + (struct vdec_h264_hw_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + int i; + + struct vdec_s *vdec = hw_to_vdec(hw); + + if (vdec->next_status == VDEC_STATUS_DISCONNECTED + && (vdec->status == VDEC_STATUS_ACTIVE)) { + dpb_print(DECODE_ID(hw), 0, + "%s force exit %d\n", __func__, __LINE__); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + wait_event_interruptible_timeout(hw->wait_q, + (vdec->status == VDEC_STATUS_CONNECTED), + msecs_to_jiffies(1000)); /* wait for work done */ + } + + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) + release_aux_data(hw, i); + + atomic_set(&hw->vh264_active, 0); + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + + vh264_stop(hw); +#ifdef MH264_USERDATA_ENABLE +#ifdef DUMP_USERDATA_RECORD + vmh264_dump_userdata(); +#endif + vmh264_destroy_userdata_manager(hw); +#endif + /* vdec_source_changed(VFORMAT_H264, 0, 0, 0); */ + +#ifdef AUX_DATA_CRC + vdec_aux_data_check_exit(vdec); +#endif + + atomic_set(&hw->vh264_active, 0); + if (vdec->parallel_dec == 1) { + if (hw->mmu_enable == 0) + vdec_core_release(vdec, CORE_MASK_VDEC_1); + else + vdec_core_release(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC | + CORE_MASK_COMBINE); + } else + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + vdec_set_status(hw_to_vdec(hw), VDEC_STATUS_DISCONNECTED); + if (vdec->parallel_dec == 1) { + for (i = 0; i < BUFSPEC_POOL_SIZE; i++) { + vdec->free_canvas_ex(hw->buffer_spec[i].y_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].u_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].v_canvas_index, vdec->id); + if (IS_VDEC_DW(hw)) { + vdec->free_canvas_ex(hw->buffer_spec[i].vdec_dw_y_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].vdec_dw_u_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].vdec_dw_v_canvas_index, vdec->id); + } + } + } + + if (hw->enable_fence) + vdec_fence_release(hw, vdec->sync); + + ammvdec_h264_mmu_release(hw); + h264_free_hw_stru(&pdev->dev, (void *)hw); + clk_adj_frame_count = 0; + + return 0; +} + +/****************************************/ + +static struct platform_driver ammvdec_h264_driver = { + .probe = ammvdec_h264_probe, + .remove = ammvdec_h264_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t ammvdec_h264_profile = { + .name = "H.264-V4L", + .profile = "" +}; + +static struct mconfig hm264_configs[] = { + MC_PU32("h264_debug_flag", &h264_debug_flag), + MC_PI32("start_decode_buf_level", &start_decode_buf_level), + MC_PU32("fixed_frame_rate_mode", &fixed_frame_rate_mode), + MC_PU32("decode_timeout_val", &decode_timeout_val), + MC_PU32("reorder_dpb_size_margin", &reorder_dpb_size_margin), + MC_PU32("reference_buf_margin", &reference_buf_margin), + MC_PU32("radr", &radr), + MC_PU32("rval", &rval), + MC_PU32("h264_debug_mask", &h264_debug_mask), + MC_PU32("h264_debug_cmd", &h264_debug_cmd), + MC_PI32("force_rate_streambase", &force_rate_streambase), + MC_PI32("dec_control", &dec_control), + MC_PI32("force_rate_framebase", &force_rate_framebase), + MC_PI32("force_disp_bufspec_num", &force_disp_bufspec_num), + MC_PU32("prefix_aux_buf_size", &prefix_aux_buf_size), + MC_PU32("suffix_aux_buf_size", &suffix_aux_buf_size), +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + MC_PU32("reorder_dpb_size_margin_dv", &reorder_dpb_size_margin_dv), + MC_PU32("dv_toggle_prov_name", &dv_toggle_prov_name), + MC_PU32("dolby_meta_with_el", &dolby_meta_with_el), +#endif + MC_PU32("i_only_flag", &i_only_flag), + MC_PU32("force_rate_streambase", &force_rate_streambase), +}; +static struct mconfig_node hm264_node; + + +static int __init ammvdec_h264_driver_init_module(void) +{ + pr_info("ammvdec_h264 module init\n"); + if (platform_driver_register(&ammvdec_h264_driver)) { + pr_info("failed to register ammvdec_h264 driver\n"); + return -ENODEV; + } + + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_TXLX) { + ammvdec_h264_profile.profile = + "4k, dwrite, compressed, frame_dv, fence"; + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXTVBB) { + ammvdec_h264_profile.profile = "4k, frame_dv, fence"; + } + } else { + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D || is_cpu_s4_s805x2()) { + ammvdec_h264_profile.profile = + "dwrite, compressed, frame_dv, v4l"; + } else { + ammvdec_h264_profile.profile = + "dwrite, compressed, v4l"; + } + } + + vcodec_profile_register(&ammvdec_h264_profile); + + INIT_REG_NODE_CONFIGS("media.decoder", &hm264_node, + "mh264-v4l", hm264_configs, CONFIG_FOR_RW); + vcodec_feature_register(VFORMAT_H264, 1); + return 0; +} + +static void __exit ammvdec_h264_driver_remove_module(void) +{ + pr_info("ammvdec_h264 module remove.\n"); + + platform_driver_unregister(&ammvdec_h264_driver); +} + +/****************************************/ +module_param(h264_debug_flag, uint, 0664); +MODULE_PARM_DESC(h264_debug_flag, "\n ammvdec_h264 h264_debug_flag\n"); + +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n ammvdec_h264 start_decode_buf_level\n"); + +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, "\n ammvdec_h264 pre_decode_buf_level\n"); + +module_param(fixed_frame_rate_mode, uint, 0664); +MODULE_PARM_DESC(fixed_frame_rate_mode, "\namvdec_h264 fixed_frame_rate_mode\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, "\n amvdec_h264 decode_timeout_val\n"); + +module_param(errordata_timeout_val, uint, 0664); +MODULE_PARM_DESC(errordata_timeout_val, "\n amvdec_h264 errordata_timeout_val\n"); + +module_param(get_data_timeout_val, uint, 0664); +MODULE_PARM_DESC(get_data_timeout_val, "\n amvdec_h264 get_data_timeout_val\n"); + +module_param(frame_max_data_packet, uint, 0664); +MODULE_PARM_DESC(frame_max_data_packet, "\n amvdec_h264 frame_max_data_packet\n"); + +module_param(reorder_dpb_size_margin, uint, 0664); +MODULE_PARM_DESC(reorder_dpb_size_margin, "\n ammvdec_h264 reorder_dpb_size_margin\n"); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +module_param(reorder_dpb_size_margin_dv, uint, 0664); +MODULE_PARM_DESC(reorder_dpb_size_margin_dv, + "\n ammvdec_h264 reorder_dpb_size_margin_dv\n"); +#endif + +module_param(reference_buf_margin, uint, 0664); +MODULE_PARM_DESC(reference_buf_margin, "\n ammvdec_h264 reference_buf_margin\n"); + +#ifdef CONSTRAIN_MAX_BUF_NUM +module_param(run_ready_max_vf_only_num, uint, 0664); +MODULE_PARM_DESC(run_ready_max_vf_only_num, "\n run_ready_max_vf_only_num\n"); + +module_param(run_ready_display_q_num, uint, 0664); +MODULE_PARM_DESC(run_ready_display_q_num, "\n run_ready_display_q_num\n"); + +module_param(run_ready_max_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_max_buf_num, "\n run_ready_max_buf_num\n"); +#endif + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(h264_debug_mask, uint, 0664); +MODULE_PARM_DESC(h264_debug_mask, "\n amvdec_h264 h264_debug_mask\n"); + +module_param(h264_debug_cmd, uint, 0664); +MODULE_PARM_DESC(h264_debug_cmd, "\n amvdec_h264 h264_debug_cmd\n"); + +module_param(force_rate_streambase, int, 0664); +MODULE_PARM_DESC(force_rate_streambase, "\n amvdec_h264 force_rate_streambase\n"); + +module_param(dec_control, int, 0664); +MODULE_PARM_DESC(dec_control, "\n amvdec_h264 dec_control\n"); + +module_param(force_rate_framebase, int, 0664); +MODULE_PARM_DESC(force_rate_framebase, "\n amvdec_h264 force_rate_framebase\n"); + +module_param(force_disp_bufspec_num, int, 0664); +MODULE_PARM_DESC(force_disp_bufspec_num, "\n amvdec_h264 force_disp_bufspec_num\n"); + +module_param(V_BUF_ADDR_OFFSET, int, 0664); +MODULE_PARM_DESC(V_BUF_ADDR_OFFSET, "\n amvdec_h264 V_BUF_ADDR_OFFSET\n"); + +module_param(prefix_aux_buf_size, uint, 0664); +MODULE_PARM_DESC(prefix_aux_buf_size, "\n prefix_aux_buf_size\n"); + +module_param(suffix_aux_buf_size, uint, 0664); +MODULE_PARM_DESC(suffix_aux_buf_size, "\n suffix_aux_buf_size\n"); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +module_param(dv_toggle_prov_name, uint, 0664); +MODULE_PARM_DESC(dv_toggle_prov_name, "\n dv_toggle_prov_name\n"); + +module_param(dolby_meta_with_el, uint, 0664); +MODULE_PARM_DESC(dolby_meta_with_el, "\n dolby_meta_with_el\n"); + +#endif + +module_param(fast_output_enable, uint, 0664); +MODULE_PARM_DESC(fast_output_enable, "\n amvdec_h264 fast_output_enable\n"); + +module_param(error_proc_policy, uint, 0664); +MODULE_PARM_DESC(error_proc_policy, "\n amvdec_h264 error_proc_policy\n"); + +module_param(v4l_error_policy, uint, 0664); +MODULE_PARM_DESC(v4l_error_policy, "\n amvdec_h264 v4l_error_policy\n"); + + +module_param(error_skip_count, uint, 0664); +MODULE_PARM_DESC(error_skip_count, "\n amvdec_h264 error_skip_count\n"); + +module_param(force_sliding_margin, uint, 0664); +MODULE_PARM_DESC(force_sliding_margin, "\n amvdec_h264 force_sliding_margin\n"); + +module_param(i_only_flag, uint, 0664); +MODULE_PARM_DESC(i_only_flag, "\n amvdec_h264 i_only_flag\n"); + +module_param(first_i_policy, uint, 0664); +MODULE_PARM_DESC(first_i_policy, "\n amvdec_h264 first_i_policy\n"); + +module_param(frmbase_cont_bitlevel, uint, 0664); +MODULE_PARM_DESC(frmbase_cont_bitlevel, + "\n amvdec_h264 frmbase_cont_bitlevel\n"); + +module_param(frmbase_cont_bitlevel2, uint, 0664); +MODULE_PARM_DESC(frmbase_cont_bitlevel2, + "\n amvdec_h264 frmbase_cont_bitlevel\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_mh264 udebug_flag\n"); + +module_param(udebug_pause_pos, uint, 0664); +MODULE_PARM_DESC(udebug_pause_pos, "\n udebug_pause_pos\n"); + +module_param(udebug_pause_val, uint, 0664); +MODULE_PARM_DESC(udebug_pause_val, "\n udebug_pause_val\n"); + +module_param(udebug_pause_decode_idx, uint, 0664); +MODULE_PARM_DESC(udebug_pause_decode_idx, "\n udebug_pause_decode_idx\n"); + +module_param(max_alloc_buf_count, uint, 0664); +MODULE_PARM_DESC(max_alloc_buf_count, "\n amvdec_h264 max_alloc_buf_count\n"); + +module_param(enable_itu_t35, uint, 0664); +MODULE_PARM_DESC(enable_itu_t35, "\n amvdec_h264 enable_itu_t35\n"); + +module_param(endian, uint, 0664); +MODULE_PARM_DESC(endian, "\nrval\n"); + +module_param(mmu_enable, uint, 0664); +MODULE_PARM_DESC(mmu_enable, "\n mmu_enable\n"); + +module_param(force_enable_mmu, uint, 0664); +MODULE_PARM_DESC(force_enable_mmu, "\n force_enable_mmu\n"); + +module_param(again_threshold, uint, 0664); +MODULE_PARM_DESC(again_threshold, "\n again_threshold\n"); + +module_param(stream_mode_start_num, uint, 0664); +MODULE_PARM_DESC(stream_mode_start_num, "\n stream_mode_start_num\n"); + +module_param(colocate_old_cal, uint, 0664); +MODULE_PARM_DESC(colocate_old_cal, "\n amvdec_mh264 colocate_old_cal\n"); + +/* +module_param(trigger_task, uint, 0664); +MODULE_PARM_DESC(trigger_task, "\n amvdec_h264 trigger_task\n"); +*/ +module_param_array(decode_frame_count, uint, &max_decode_instance_num, 0664); + +module_param_array(display_frame_count, uint, &max_decode_instance_num, 0664); + +module_param_array(max_process_time, uint, &max_decode_instance_num, 0664); + +module_param_array(run_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(not_run_ready, uint, + &max_decode_instance_num, 0664); + +module_param_array(input_empty, uint, + &max_decode_instance_num, 0664); + +module_param_array(max_get_frame_interval, uint, + &max_decode_instance_num, 0664); + +module_param_array(step, uint, &max_decode_instance_num, 0664); + +module_param_array(ref_frame_mark_flag, uint, &max_decode_instance_num, 0664); + +module_param(disp_vframe_valve_level, uint, 0664); +MODULE_PARM_DESC(disp_vframe_valve_level, "\n disp_vframe_valve_level\n"); + +module_param(double_write_mode, uint, 0664); +MODULE_PARM_DESC(double_write_mode, "\n double_write_mode\n"); + +module_param(mem_map_mode, uint, 0664); +MODULE_PARM_DESC(mem_map_mode, "\n mem_map_mode\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n without_display_mode\n"); + +module_param(check_slice_num, uint, 0664); +MODULE_PARM_DESC(check_slice_num, "\n check_slice_num\n"); + +module_param(mb_count_threshold, uint, 0664); +MODULE_PARM_DESC(mb_count_threshold, "\n mb_count_threshold\n"); + +module_param(loop_playback_poc_threshold, int, 0664); +MODULE_PARM_DESC(loop_playback_poc_threshold, "\n loop_playback_poc_threshold\n"); + +module_param(poc_threshold, int, 0664); +MODULE_PARM_DESC(poc_threshold, "\n poc_threshold\n"); + +module_param(force_config_fence, uint, 0664); +MODULE_PARM_DESC(force_config_fence, "\n force enable fence\n"); + +module_param(adjust_dpb_size, uint, 0664); +MODULE_PARM_DESC(adjust_dpb_size, "\n adjust dpb size\n"); + +module_init(ammvdec_h264_driver_init_module); +module_exit(ammvdec_h264_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC H264 Video Decoder Driver"); +MODULE_LICENSE("GPL");
diff --git a/drivers/frame_provider/decoder_v4l/h265/Makefile b/drivers/frame_provider/decoder_v4l/h265/Makefile new file mode 100644 index 0000000..51a37f5 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/h265/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_H265) += amvdec_h265_v4l.o +amvdec_h265_v4l-objs += vh265.o
diff --git a/drivers/frame_provider/decoder_v4l/h265/vh265.c b/drivers/frame_provider/decoder_v4l/h265/vh265.c new file mode 100644 index 0000000..cc36fab --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/h265/vh265.c
@@ -0,0 +1,15687 @@ +/* + * drivers/amlogic/amports/vh265.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/timer.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/sched/clock.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../../decoder/utils/decoder_mmu_box.h" +#include "../../decoder/utils/decoder_bmmu_box.h" +#include "../../decoder/utils/config_parser.h" +#include "../../decoder/utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../../decoder/utils/vdec_v4l2_buffer_ops.h" +#include <media/v4l2-mem2mem.h> +#include <linux/crc32.h> + +/* +to enable DV of frame mode +#define DOLBY_META_SUPPORT in ucode +*/ + +#define HEVC_8K_LFTOFFSET_FIX +#define SUPPORT_LONG_TERM_RPS + +//#define CO_MV_COMPRESS + +#define CONSTRAIN_MAX_BUF_NUM + +#define SWAP_HEVC_UCODE +#define DETREFILL_ENABLE + +#define AGAIN_HAS_THRESHOLD +/*#define TEST_NO_BUF*/ +#define HEVC_PIC_STRUCT_SUPPORT +#define MULTI_INSTANCE_SUPPORT +#define USE_UNINIT_SEMA + + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ +#define MPRED_8K_MV_BUF_SIZE (0x120000*4) +#define MPRED_4K_MV_BUF_SIZE (0x120000) +#define MPRED_MV_BUF_SIZE (0x3fc00) + +#define MMU_COMPRESS_HEADER_SIZE_1080P 0x10000 +#define MMU_COMPRESS_HEADER_SIZE_4K 0x48000 +#define MMU_COMPRESS_HEADER_SIZE_8K 0x120000 +#define DB_NUM 20 + +#define MAX_FRAME_4K_NUM 0x1200 +#define MAX_FRAME_8K_NUM ((MAX_FRAME_4K_NUM) * 4) + +//#define FRAME_MMU_MAP_SIZE (MAX_FRAME_4K_NUM * 4) +#define H265_MMU_MAP_BUFFER HEVC_ASSIST_SCRATCH_7 + +#define HEVC_ASSIST_MMU_MAP_ADDR 0x3009 + +#define HEVC_CM_HEADER_START_ADDR 0x3628 +#define HEVC_SAO_MMU_VH1_ADDR 0x363b +#define HEVC_SAO_MMU_VH0_ADDR 0x363a + +#define HEVC_DBLK_CFGB 0x350b +#define HEVCD_MPP_DECOMP_AXIURG_CTL 0x34c7 +#define SWAP_HEVC_OFFSET (3 * 0x1000) + +#define MEM_NAME "codec_265" +/* #include <mach/am_regs.h> */ +#include <linux/amlogic/media/utils/vdec_reg.h> + +#include "../../decoder/utils/vdec.h" +#include "../../decoder/utils/amvdec.h" +#include <linux/amlogic/media/video_sink/video.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../../decoder/utils/vdec_feature.h" + +#define SEND_LMEM_WITH_RPM +#define SUPPORT_10BIT +/* #define ERROR_HANDLE_DEBUG */ + +#ifndef STAT_KTHREAD +#define STAT_KTHREAD 0x40 +#endif + +#ifdef MULTI_INSTANCE_SUPPORT +#define MAX_DECODE_INSTANCE_NUM 9 +#define MULTI_DRIVER_NAME "ammvdec_h265_v4l" +#endif +#define DRIVER_NAME "amvdec_h265_v4l" +#define DRIVER_HEADER_NAME "amvdec_h265_header" + +#define PUT_INTERVAL (HZ/100) +#define ERROR_SYSTEM_RESET_COUNT 200 + +#define PTS_NORMAL 0 +#define PTS_NONE_REF_USE_DURATION 1 + +#define PTS_MODE_SWITCHING_THRESHOLD 3 +#define PTS_MODE_SWITCHING_RECOVERY_THREASHOLD 3 + +#define DUR2PTS(x) ((x)*90/96) + +#define MAX_SIZE_8K (8192 * 4608) +#define MAX_SIZE_4K (4096 * 2304) +#define MAX_SIZE_2K (1920 * 1088) + +#define IS_8K_SIZE(w, h) (((w) * (h)) > MAX_SIZE_4K) +#define IS_4K_SIZE(w, h) (((w) * (h)) > (1920*1088)) + +#define SEI_UserDataITU_T_T35 4 +#define INVALID_IDX -1 /* Invalid buffer index.*/ + +static struct semaphore h265_sema; + +struct hevc_state_s; +static int hevc_print(struct hevc_state_s *hevc, + int debug_flag, const char *fmt, ...); +static int hevc_print_cont(struct hevc_state_s *hevc, + int debug_flag, const char *fmt, ...); +static int vh265_vf_states(struct vframe_states *states, void *); +static struct vframe_s *vh265_vf_peek(void *); +static struct vframe_s *vh265_vf_get(void *); +static void vh265_vf_put(struct vframe_s *, void *); +static int vh265_event_cb(int type, void *data, void *private_data); +#ifdef MULTI_INSTANCE_SUPPORT +static int vmh265_stop(struct hevc_state_s *hevc); +static s32 vh265_init(struct vdec_s *vdec); +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask); +static void reset_process_time(struct hevc_state_s *hevc); +static void start_process_time(struct hevc_state_s *hevc); +static void restart_process_time(struct hevc_state_s *hevc); +static void timeout_process(struct hevc_state_s *hevc); +#else +static s32 vh265_init(struct hevc_state_s *hevc); +#endif +static void vh265_prot_init(struct hevc_state_s *hevc); +static int vh265_local_init(struct hevc_state_s *hevc); +static void vh265_check_timer_func(struct timer_list *timer); +static void config_decode_mode(struct hevc_state_s *hevc); + +static const char vh265_dec_id[] = "vh265-dev"; + +#define PROVIDER_NAME "decoder.h265" +#define MULTI_INSTANCE_PROVIDER_NAME "vdec.h265" + +static const struct vframe_operations_s vh265_vf_provider = { + .peek = vh265_vf_peek, + .get = vh265_vf_get, + .put = vh265_vf_put, + .event_cb = vh265_event_cb, + .vf_states = vh265_vf_states, +}; + +static struct vframe_provider_s vh265_vf_prov; + +static u32 bit_depth_luma; +static u32 bit_depth_chroma; +static u32 video_signal_type; +static int start_decode_buf_level = 0x8000; +static unsigned int decode_timeout_val = 200; + +static u32 run_ready_min_buf_num = 1; +static u32 disable_ip_mode; +static u32 print_lcu_error = 1; +/*data_resend_policy: + bit 0, stream base resend data when decoding buf empty +*/ +static u32 data_resend_policy = 1; +static int poc_num_margin = 1000; +static int poc_error_limit = 30; + +static u32 dirty_time_threshold = 2000; +static u32 dirty_count_threshold = 200; +static u32 dirty_buffersize_threshold = 0x800000; + + +#define VIDEO_SIGNAL_TYPE_AVAILABLE_MASK 0x20000000 +/* +static const char * const video_format_names[] = { + "component", "PAL", "NTSC", "SECAM", + "MAC", "unspecified", "unspecified", "unspecified" +}; + +static const char * const color_primaries_names[] = { + "unknown", "bt709", "undef", "unknown", + "bt470m", "bt470bg", "smpte170m", "smpte240m", + "film", "bt2020" +}; + +static const char * const transfer_characteristics_names[] = { + "unknown", "bt709", "undef", "unknown", + "bt470m", "bt470bg", "smpte170m", "smpte240m", + "linear", "log100", "log316", "iec61966-2-4", + "bt1361e", "iec61966-2-1", "bt2020-10", "bt2020-12", + "smpte-st-2084", "smpte-st-428" +}; + +static const char * const matrix_coeffs_names[] = { + "GBR", "bt709", "undef", "unknown", + "fcc", "bt470bg", "smpte170m", "smpte240m", + "YCgCo", "bt2020nc", "bt2020c" +}; +*/ +#ifdef SUPPORT_10BIT +#define HEVC_CM_BODY_START_ADDR 0x3626 +#define HEVC_CM_BODY_LENGTH 0x3627 +#define HEVC_CM_HEADER_LENGTH 0x3629 +#define HEVC_CM_HEADER_OFFSET 0x362b +#define HEVC_SAO_CTRL9 0x362d +#define LOSLESS_COMPRESS_MODE +/* DOUBLE_WRITE_MODE is enabled only when NV21 8 bit output is needed */ +/* double_write_mode: + * 0, no double write; + * 1, 1:1 ratio; + * 2, (1/4):(1/4) ratio; + * 3, (1/4):(1/4) ratio, with both compressed frame included + * 4, (1/2):(1/2) ratio; + * 5, (1/2):(1/2) ratio, with both compressed frame included + * 8, (1/8):(1/8) ratio, from t7 + * 0x10, double write only + * 0x100, if > 1080p,use mode 4,else use mode 1; + * 0x200, if > 1080p,use mode 2,else use mode 1; + * 0x300, if > 720p, use mode 4, else use mode 1; + * 0x1000,if > 1080p,use mode 3, else if > 960*540, use mode 4, else use mode 1; + */ +static u32 double_write_mode; + +/*#define DECOMP_HEADR_SURGENT*/ + +static u32 mem_map_mode; /* 0:linear 1:32x32 2:64x32 ; m8baby test1902 */ +static u32 enable_mem_saving = 1; +static u32 workaround_enable; +static u32 force_w_h; +#endif +static u32 force_fps; +static u32 pts_unstable; +#define H265_DEBUG_BUFMGR 0x01 +#define H265_DEBUG_BUFMGR_MORE 0x02 +#define H265_DEBUG_DETAIL 0x04 +#define H265_DEBUG_REG 0x08 +#define H265_DEBUG_MAN_SEARCH_NAL 0x10 +#define H265_DEBUG_MAN_SKIP_NAL 0x20 +#define H265_DEBUG_DISPLAY_CUR_FRAME 0x40 +#define H265_DEBUG_FORCE_CLK 0x80 +#define H265_DEBUG_SEND_PARAM_WITH_REG 0x100 +#define H265_DEBUG_NO_DISPLAY 0x200 +#define H265_DEBUG_DISCARD_NAL 0x400 +#define H265_DEBUG_OUT_PTS 0x800 +#define H265_DEBUG_DUMP_PIC_LIST 0x1000 +#define H265_DEBUG_PRINT_SEI 0x2000 +#define H265_DEBUG_PIC_STRUCT 0x4000 +#define H265_DEBUG_HAS_AUX_IN_SLICE 0x8000 +#define H265_DEBUG_DIS_LOC_ERROR_PROC 0x10000 +#define H265_DEBUG_DIS_SYS_ERROR_PROC 0x20000 +#define H265_NO_CHANG_DEBUG_FLAG_IN_CODE 0x40000 +#define H265_DEBUG_TRIG_SLICE_SEGMENT_PROC 0x80000 +#define H265_DEBUG_HW_RESET 0x100000 +#define H265_CFG_CANVAS_IN_DECODE 0x200000 +#define H265_DEBUG_DV 0x400000 +#define H265_DEBUG_NO_EOS_SEARCH_DONE 0x800000 +#define H265_DEBUG_NOT_USE_LAST_DISPBUF 0x1000000 +#define H265_DEBUG_IGNORE_CONFORMANCE_WINDOW 0x2000000 +#define H265_DEBUG_WAIT_DECODE_DONE_WHEN_STOP 0x4000000 +#ifdef MULTI_INSTANCE_SUPPORT +#define PRINT_FLAG_ERROR 0x0 +#define IGNORE_PARAM_FROM_CONFIG 0x08000000 +#define PRINT_FRAMEBASE_DATA 0x10000000 +#define PRINT_FLAG_VDEC_STATUS 0x20000000 +#define PRINT_FLAG_VDEC_DETAIL 0x40000000 +#define PRINT_FLAG_V4L_DETAIL 0x80000000 +#endif + +#define BUF_POOL_SIZE 32 +#define MAX_BUF_NUM 24 +#define MAX_REF_PIC_NUM 24 +#define MAX_REF_ACTIVE 16 + +#ifdef MV_USE_FIXED_BUF +#define BMMU_MAX_BUFFERS (BUF_POOL_SIZE + 1) +#define VF_BUFFER_IDX(n) (n) +#define BMMU_WORKSPACE_ID (BUF_POOL_SIZE) +#else +#define BMMU_MAX_BUFFERS (BUF_POOL_SIZE + 1 + MAX_REF_PIC_NUM) +#define VF_BUFFER_IDX(n) (n) +#define BMMU_WORKSPACE_ID (BUF_POOL_SIZE) +#define MV_BUFFER_IDX(n) (BUF_POOL_SIZE + 1 + n) +#endif + +#define HEVC_MV_INFO 0x310d +#define HEVC_QP_INFO 0x3137 +#define HEVC_SKIP_INFO 0x3136 + +const u32 h265_version = 201602101; +static u32 debug_mask = 0xffffffff; +static u32 log_mask; +static u32 debug; +static u32 radr; +static u32 rval; +static u32 dbg_cmd; +static u32 dump_nal; +static u32 dbg_skip_decode_index; +/* + * bit 0~3, for HEVCD_IPP_AXIIF_CONFIG endian config + * bit 8~23, for HEVC_SAO_CTRL1 endian config + */ +static u32 endian; +#define HEVC_CONFIG_BIG_ENDIAN ((0x880 << 8) | 0x8) +#define HEVC_CONFIG_LITTLE_ENDIAN ((0xff0 << 8) | 0xf) + +#ifdef ERROR_HANDLE_DEBUG +static u32 dbg_nal_skip_flag; + /* bit[0], skip vps; bit[1], skip sps; bit[2], skip pps */ +static u32 dbg_nal_skip_count; +#endif +/*for debug*/ +static u32 force_bufspec; + +/* + udebug_flag: + bit 0, enable ucode print + bit 1, enable ucode detail print + bit [31:16] not 0, pos to dump lmem + bit 2, pop bits to lmem + bit [11:8], pre-pop bits for alignment (when bit 2 is 1) +*/ +static u32 udebug_flag; +/* + when udebug_flag[1:0] is not 0 + udebug_pause_pos not 0, + pause position +*/ +static u32 udebug_pause_pos; +/* + when udebug_flag[1:0] is not 0 + and udebug_pause_pos is not 0, + pause only when DEBUG_REG2 is equal to this val +*/ +static u32 udebug_pause_val; + +static u32 udebug_pause_decode_idx; + +static u32 decode_pic_begin; +static uint slice_parse_begin; +static u32 step; +static bool is_reset; + +#ifdef CONSTRAIN_MAX_BUF_NUM +static u32 run_ready_max_vf_only_num; +static u32 run_ready_display_q_num; + /*0: not check + 0xff: work_pic_num + */ +static u32 run_ready_max_buf_num = 0xff; +#endif + +static u32 dynamic_buf_num_margin = 7; +static u32 buf_alloc_width; +static u32 buf_alloc_height; + +static u32 max_buf_num = 16; +static u32 buf_alloc_size; +/*static u32 re_config_pic_flag;*/ +/* + *bit[0]: 0, + *bit[1]: 0, always release cma buffer when stop + *bit[1]: 1, never release cma buffer when stop + *bit[0]: 1, when stop, release cma buffer if blackout is 1; + *do not release cma buffer is blackout is not 1 + * + *bit[2]: 0, when start decoding, check current displayed buffer + * (only for buffer decoded by h265) if blackout is 0 + * 1, do not check current displayed buffer + * + *bit[3]: 1, if blackout is not 1, do not release current + * displayed cma buffer always. + */ +/* set to 1 for fast play; + * set to 8 for other case of "keep last frame" + */ +static u32 buffer_mode = 1; + +/* buffer_mode_dbg: debug only*/ +static u32 buffer_mode_dbg = 0xffff0000; +/**/ +/* + *bit[1:0]PB_skip_mode: 0, start decoding at begin; + *1, start decoding after first I; + *2, only decode and display none error picture; + *3, start decoding and display after IDR,etc + *bit[31:16] PB_skip_count_after_decoding (decoding but not display), + *only for mode 0 and 1. + */ +static u32 nal_skip_policy = 2; + +/* + *bit 0, 1: only display I picture; + *bit 1, 1: only decode I picture; + */ +static u32 i_only_flag; +static u32 skip_nal_count = 500; +/* +bit 0, fast output first I picture +*/ +static u32 fast_output_enable = 1; + +static u32 frmbase_cont_bitlevel = 0x60; + +/* +use_cma: 1, use both reserver memory and cma for buffers +2, only use cma for buffers +*/ +static u32 use_cma = 2; + +#define AUX_BUF_ALIGN(adr) ((adr + 0xf) & (~0xf)) +/* +static u32 prefix_aux_buf_size = (16 * 1024); +static u32 suffix_aux_buf_size; +*/ +static u32 prefix_aux_buf_size = (12 * 1024); +static u32 suffix_aux_buf_size = (12 * 1024); + +static u32 max_decoding_time; +/* + *error handling + */ +/*error_handle_policy: + *bit 0: 0, auto skip error_skip_nal_count nals before error recovery; + *1, skip error_skip_nal_count nals before error recovery; + *bit 1 (valid only when bit0 == 1): + *1, wait vps/sps/pps after error recovery; + *bit 2 (valid only when bit0 == 0): + *0, auto search after error recovery (hevc_recover() called); + *1, manual search after error recovery + *(change to auto search after get IDR: WRITE_VREG(NAL_SEARCH_CTL, 0x2)) + * + *bit 4: 0, set error_mark after reset/recover + * 1, do not set error_mark after reset/recover + * + *bit 5: 0, check total lcu for every picture + * 1, do not check total lcu + * + *bit 6: 0, do not check head error + * 1, check head error + * + *bit 7: 0, allow to print over decode + * 1, NOT allow to print over decode + * + *bit 8: 0, use interlace policy + * 1, NOT use interlace policy + *bit 9: 0, discard dirty data on playback start + * 1, do not discard dirty data on playback start + * + */ + +static u32 error_handle_policy; +static u32 error_skip_nal_count = 6; +static u32 error_handle_threshold = 30; +static u32 error_handle_nal_skip_threshold = 10; +static u32 error_handle_system_threshold = 30; +static u32 interlace_enable = 1; +static u32 fr_hint_status; + + /* + *parser_sei_enable: + * bit 0, sei; + * bit 1, sei_suffix (fill aux buf) + * bit 2, fill sei to aux buf (when bit 0 is 1) + * bit 8, debug flag + */ +static u32 parser_sei_enable; +static u32 parser_dolby_vision_enable = 1; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +static u32 dolby_meta_with_el; +static u32 dolby_el_flush_th = 2; +#endif +/* this is only for h265 mmu enable */ + +static u32 mmu_enable = 1; +static u32 mmu_enable_force; +static u32 work_buf_size; +static unsigned int force_disp_pic_index; +static unsigned int disp_vframe_valve_level; +static int pre_decode_buf_level = 0x1000; +static unsigned int pic_list_debug; +#ifdef HEVC_8K_LFTOFFSET_FIX + /* performance_profile: bit 0, multi slice in ucode + */ +static unsigned int performance_profile = 1; +#endif +#ifdef MULTI_INSTANCE_SUPPORT +static unsigned int max_decode_instance_num + = MAX_DECODE_INSTANCE_NUM; +static unsigned int decode_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int display_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int max_process_time[MAX_DECODE_INSTANCE_NUM]; +static unsigned int max_get_frame_interval[MAX_DECODE_INSTANCE_NUM]; +static unsigned int run_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int input_empty[MAX_DECODE_INSTANCE_NUM]; +static unsigned int not_run_ready[MAX_DECODE_INSTANCE_NUM]; +static unsigned int ref_frame_mark_flag[MAX_DECODE_INSTANCE_NUM] = +{1, 1, 1, 1, 1, 1, 1, 1, 1}; + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +static unsigned char get_idx(struct hevc_state_s *hevc); +#endif + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +static u32 dv_toggle_prov_name; + +static u32 dv_debug; + +static u32 force_bypass_dvenl; +#endif +#endif + +/* + *[3:0] 0: default use config from omx. + * 1: force enable fence. + * 2: disable fence. + *[7:4] 0: fence use for driver. + * 1: fence fd use for app. + */ +static u32 force_config_fence; + +/* + *The parameter sps_max_dec_pic_buffering_minus1_0+1 + *in SPS is the minimum DPB size required for stream + *(note: this parameter does not include the frame + *currently being decoded) +1 (decoding the current + *frame) +1 (decoding the current frame will only + *update refrence frame information, such as reference + *relation, when the next frame is decoded) + */ +static u32 detect_stuck_buffer_margin = 3; + + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +#define get_dbg_flag(hevc) ((debug_mask & (1 << hevc->index)) ? debug : 0) +#define get_dbg_flag2(hevc) ((debug_mask & (1 << get_idx(hevc))) ? debug : 0) +#define is_log_enable(hevc) ((log_mask & (1 << hevc->index)) ? 1 : 0) +#else +#define get_dbg_flag(hevc) debug +#define get_dbg_flag2(hevc) debug +#define is_log_enable(hevc) (log_mask ? 1 : 0) +#define get_valid_double_write_mode(hevc) double_write_mode +#define get_buf_alloc_width(hevc) buf_alloc_width +#define get_buf_alloc_height(hevc) buf_alloc_height +#define get_dynamic_buf_num_margin(hevc) dynamic_buf_num_margin +#endif +#define get_buffer_mode(hevc) buffer_mode + + +static DEFINE_SPINLOCK(lock); +struct task_struct *h265_task = NULL; +#undef DEBUG_REG +#ifdef DEBUG_REG +void WRITE_VREG_DBG(unsigned adr, unsigned val) +{ + if (debug & H265_DEBUG_REG) + pr_info("%s(%x, %x)\n", __func__, adr, val); + WRITE_VREG(adr, val); +} + +#undef WRITE_VREG +#define WRITE_VREG WRITE_VREG_DBG +#endif +extern u32 trickmode_i; + +static DEFINE_MUTEX(vh265_mutex); + +static DEFINE_MUTEX(vh265_log_mutex); + +//static struct vdec_info *gvs; + +static u32 without_display_mode; + +static u32 mv_buf_dynamic_alloc; + +/************************************************** + * + *h265 buffer management include + * + *************************************************** + */ +enum NalUnitType { + NAL_UNIT_CODED_SLICE_TRAIL_N = 0, /* 0 */ + NAL_UNIT_CODED_SLICE_TRAIL_R, /* 1 */ + + NAL_UNIT_CODED_SLICE_TSA_N, /* 2 */ + /* Current name in the spec: TSA_R */ + NAL_UNIT_CODED_SLICE_TLA, /* 3 */ + + NAL_UNIT_CODED_SLICE_STSA_N, /* 4 */ + NAL_UNIT_CODED_SLICE_STSA_R, /* 5 */ + + NAL_UNIT_CODED_SLICE_RADL_N, /* 6 */ + /* Current name in the spec: RADL_R */ + NAL_UNIT_CODED_SLICE_DLP, /* 7 */ + + NAL_UNIT_CODED_SLICE_RASL_N, /* 8 */ + /* Current name in the spec: RASL_R */ + NAL_UNIT_CODED_SLICE_TFD, /* 9 */ + + NAL_UNIT_RESERVED_10, + NAL_UNIT_RESERVED_11, + NAL_UNIT_RESERVED_12, + NAL_UNIT_RESERVED_13, + NAL_UNIT_RESERVED_14, + NAL_UNIT_RESERVED_15, + + /* Current name in the spec: BLA_W_LP */ + NAL_UNIT_CODED_SLICE_BLA, /* 16 */ + /* Current name in the spec: BLA_W_DLP */ + NAL_UNIT_CODED_SLICE_BLANT, /* 17 */ + NAL_UNIT_CODED_SLICE_BLA_N_LP, /* 18 */ + /* Current name in the spec: IDR_W_DLP */ + NAL_UNIT_CODED_SLICE_IDR, /* 19 */ + NAL_UNIT_CODED_SLICE_IDR_N_LP, /* 20 */ + NAL_UNIT_CODED_SLICE_CRA, /* 21 */ + NAL_UNIT_RESERVED_22, + NAL_UNIT_RESERVED_23, + + NAL_UNIT_RESERVED_24, + NAL_UNIT_RESERVED_25, + NAL_UNIT_RESERVED_26, + NAL_UNIT_RESERVED_27, + NAL_UNIT_RESERVED_28, + NAL_UNIT_RESERVED_29, + NAL_UNIT_RESERVED_30, + NAL_UNIT_RESERVED_31, + + NAL_UNIT_VPS, /* 32 */ + NAL_UNIT_SPS, /* 33 */ + NAL_UNIT_PPS, /* 34 */ + NAL_UNIT_ACCESS_UNIT_DELIMITER, /* 35 */ + NAL_UNIT_EOS, /* 36 */ + NAL_UNIT_EOB, /* 37 */ + NAL_UNIT_FILLER_DATA, /* 38 */ + NAL_UNIT_SEI, /* 39 Prefix SEI */ + NAL_UNIT_SEI_SUFFIX, /* 40 Suffix SEI */ + NAL_UNIT_RESERVED_41, + NAL_UNIT_RESERVED_42, + NAL_UNIT_RESERVED_43, + NAL_UNIT_RESERVED_44, + NAL_UNIT_RESERVED_45, + NAL_UNIT_RESERVED_46, + NAL_UNIT_RESERVED_47, + NAL_UNIT_UNSPECIFIED_48, + NAL_UNIT_UNSPECIFIED_49, + NAL_UNIT_UNSPECIFIED_50, + NAL_UNIT_UNSPECIFIED_51, + NAL_UNIT_UNSPECIFIED_52, + NAL_UNIT_UNSPECIFIED_53, + NAL_UNIT_UNSPECIFIED_54, + NAL_UNIT_UNSPECIFIED_55, + NAL_UNIT_UNSPECIFIED_56, + NAL_UNIT_UNSPECIFIED_57, + NAL_UNIT_UNSPECIFIED_58, + NAL_UNIT_UNSPECIFIED_59, + NAL_UNIT_UNSPECIFIED_60, + NAL_UNIT_UNSPECIFIED_61, + NAL_UNIT_UNSPECIFIED_62, + NAL_UNIT_UNSPECIFIED_63, + NAL_UNIT_INVALID, +}; + +/* --------------------------------------------------- */ +/* Amrisc Software Interrupt */ +/* --------------------------------------------------- */ +#define AMRISC_STREAM_EMPTY_REQ 0x01 +#define AMRISC_PARSER_REQ 0x02 +#define AMRISC_MAIN_REQ 0x04 + +/* --------------------------------------------------- */ +/* HEVC_DEC_STATUS define */ +/* --------------------------------------------------- */ +#define HEVC_DEC_IDLE 0x0 +#define HEVC_NAL_UNIT_VPS 0x1 +#define HEVC_NAL_UNIT_SPS 0x2 +#define HEVC_NAL_UNIT_PPS 0x3 +#define HEVC_NAL_UNIT_CODED_SLICE_SEGMENT 0x4 +#define HEVC_CODED_SLICE_SEGMENT_DAT 0x5 +#define HEVC_SLICE_DECODING 0x6 +#define HEVC_NAL_UNIT_SEI 0x7 +#define HEVC_SLICE_SEGMENT_DONE 0x8 +#define HEVC_NAL_SEARCH_DONE 0x9 +#define HEVC_DECPIC_DATA_DONE 0xa +#define HEVC_DECPIC_DATA_ERROR 0xb +#define HEVC_SEI_DAT 0xc +#define HEVC_SEI_DAT_DONE 0xd +#define HEVC_NAL_DECODE_DONE 0xe +#define HEVC_OVER_DECODE 0xf + +#define HEVC_DATA_REQUEST 0x12 + +#define HEVC_DECODE_BUFEMPTY 0x20 +#define HEVC_DECODE_TIMEOUT 0x21 +#define HEVC_SEARCH_BUFEMPTY 0x22 +#define HEVC_DECODE_OVER_SIZE 0x23 +#define HEVC_DECODE_BUFEMPTY2 0x24 +#define HEVC_FIND_NEXT_PIC_NAL 0x50 +#define HEVC_FIND_NEXT_DVEL_NAL 0x51 + +#define HEVC_DUMP_LMEM 0x30 + +#define HEVC_4k2k_60HZ_NOT_SUPPORT 0x80 +#define HEVC_DISCARD_NAL 0xf0 +#define HEVC_ACTION_DEC_CONT 0xfd +#define HEVC_ACTION_ERROR 0xfe +#define HEVC_ACTION_DONE 0xff + +/* --------------------------------------------------- */ +/* Include "parser_cmd.h" */ +/* --------------------------------------------------- */ +#define PARSER_CMD_SKIP_CFG_0 0x0000090b + +#define PARSER_CMD_SKIP_CFG_1 0x1b14140f + +#define PARSER_CMD_SKIP_CFG_2 0x001b1910 + +#define PARSER_CMD_NUMBER 37 + +/************************************************** + * + *h265 buffer management + * + *************************************************** + */ +/* #define BUFFER_MGR_ONLY */ +/* #define CONFIG_HEVC_CLK_FORCED_ON */ +/* #define ENABLE_SWAP_TEST */ +#define MCRCC_ENABLE +#define INVALID_POC 0x80000000 + +#define HEVC_DEC_STATUS_REG HEVC_ASSIST_SCRATCH_0 +#define HEVC_RPM_BUFFER HEVC_ASSIST_SCRATCH_1 +#define HEVC_SHORT_TERM_RPS HEVC_ASSIST_SCRATCH_2 +#define HEVC_VPS_BUFFER HEVC_ASSIST_SCRATCH_3 +#define HEVC_SPS_BUFFER HEVC_ASSIST_SCRATCH_4 +#define HEVC_PPS_BUFFER HEVC_ASSIST_SCRATCH_5 +#define HEVC_SAO_UP HEVC_ASSIST_SCRATCH_6 +#define HEVC_STREAM_SWAP_BUFFER HEVC_ASSIST_SCRATCH_7 +#define HEVC_STREAM_SWAP_BUFFER2 HEVC_ASSIST_SCRATCH_8 +#define HEVC_sao_mem_unit HEVC_ASSIST_SCRATCH_9 +#define HEVC_SAO_ABV HEVC_ASSIST_SCRATCH_A +#define HEVC_sao_vb_size HEVC_ASSIST_SCRATCH_B +#define HEVC_SAO_VB HEVC_ASSIST_SCRATCH_C +#define HEVC_SCALELUT HEVC_ASSIST_SCRATCH_D +#define HEVC_WAIT_FLAG HEVC_ASSIST_SCRATCH_E +#define RPM_CMD_REG HEVC_ASSIST_SCRATCH_F +#define LMEM_DUMP_ADR HEVC_ASSIST_SCRATCH_F +#ifdef ENABLE_SWAP_TEST +#define HEVC_STREAM_SWAP_TEST HEVC_ASSIST_SCRATCH_L +#endif + +/*#define HEVC_DECODE_PIC_BEGIN_REG HEVC_ASSIST_SCRATCH_M*/ +/*#define HEVC_DECODE_PIC_NUM_REG HEVC_ASSIST_SCRATCH_N*/ +#define HEVC_DECODE_SIZE HEVC_ASSIST_SCRATCH_N + /*do not define ENABLE_SWAP_TEST*/ +#define HEVC_AUX_ADR HEVC_ASSIST_SCRATCH_L +#define HEVC_AUX_DATA_SIZE HEVC_ASSIST_SCRATCH_M + +#define DEBUG_REG1 HEVC_ASSIST_SCRATCH_G +#define DEBUG_REG2 HEVC_ASSIST_SCRATCH_H +/* + *ucode parser/search control + *bit 0: 0, header auto parse; 1, header manual parse + *bit 1: 0, auto skip for noneseamless stream; 1, no skip + *bit [3:2]: valid when bit1==0; + *0, auto skip nal before first vps/sps/pps/idr; + *1, auto skip nal before first vps/sps/pps + *2, auto skip nal before first vps/sps/pps, + * and not decode until the first I slice (with slice address of 0) + * + *3, auto skip before first I slice (nal_type >=16 && nal_type<=21) + *bit [15:4] nal skip count (valid when bit0 == 1 (manual mode) ) + *bit [16]: for NAL_UNIT_EOS when bit0 is 0: + * 0, send SEARCH_DONE to arm ; 1, do not send SEARCH_DONE to arm + *bit [17]: for NAL_SEI when bit0 is 0: + * 0, do not parse/fetch SEI in ucode; + * 1, parse/fetch SEI in ucode + *bit [18]: for NAL_SEI_SUFFIX when bit0 is 0: + * 0, do not fetch NAL_SEI_SUFFIX to aux buf; + * 1, fetch NAL_SEL_SUFFIX data to aux buf + *bit [19]: + * 0, parse NAL_SEI in ucode + * 1, fetch NAL_SEI to aux buf + *bit [20]: for DOLBY_VISION_META + * 0, do not fetch DOLBY_VISION_META to aux buf + * 1, fetch DOLBY_VISION_META to aux buf + */ +#define NAL_SEARCH_CTL HEVC_ASSIST_SCRATCH_I + /*read only*/ +#define CUR_NAL_UNIT_TYPE HEVC_ASSIST_SCRATCH_J + /* + [15 : 8] rps_set_id + [7 : 0] start_decoding_flag + */ +#define HEVC_DECODE_INFO HEVC_ASSIST_SCRATCH_1 + /*set before start decoder*/ +#define HEVC_DECODE_MODE HEVC_ASSIST_SCRATCH_J +#define HEVC_DECODE_MODE2 HEVC_ASSIST_SCRATCH_H +#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K + +#define DECODE_MODE_SINGLE 0x0 +#define DECODE_MODE_MULTI_FRAMEBASE 0x1 +#define DECODE_MODE_MULTI_STREAMBASE 0x2 +#define DECODE_MODE_MULTI_DVBAL 0x3 +#define DECODE_MODE_MULTI_DVENL 0x4 + +#define MAX_INT 0x7FFFFFFF + +#define RPM_BEGIN 0x100 +#define modification_list_cur 0x148 +#define RPM_END 0x180 +#ifdef SUPPORT_LONG_TERM_RPS +/* + */ +#define RPS_END 0x8000 +#define RPS_LT_BIT 14 +#define RPS_USED_BIT 13 +#define RPS_SIGN_BIT 12 + + +#else +#define RPS_END 0x8000 +#define RPS_USED_BIT 14 +#define RPS_SIGN_BIT 13 +#endif +/* MISC_FLAG0 */ +#define PCM_LOOP_FILTER_DISABLED_FLAG_BIT 0 +#define PCM_ENABLE_FLAG_BIT 1 +#define LOOP_FILER_ACROSS_TILES_ENABLED_FLAG_BIT 2 +#define PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT 3 +#define DEBLOCKING_FILTER_OVERRIDE_ENABLED_FLAG_BIT 4 +#define PPS_DEBLOCKING_FILTER_DISABLED_FLAG_BIT 5 +#define DEBLOCKING_FILTER_OVERRIDE_FLAG_BIT 6 +#define SLICE_DEBLOCKING_FILTER_DISABLED_FLAG_BIT 7 +#define SLICE_SAO_LUMA_FLAG_BIT 8 +#define SLICE_SAO_CHROMA_FLAG_BIT 9 +#define SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT 10 + +union param_u { + struct { + unsigned short data[RPM_END - RPM_BEGIN]; + } l; + struct { + /* from ucode lmem, do not change this struct */ + unsigned short CUR_RPS[0x10]; + unsigned short num_ref_idx_l0_active; + unsigned short num_ref_idx_l1_active; + unsigned short slice_type; + unsigned short slice_temporal_mvp_enable_flag; + unsigned short dependent_slice_segment_flag; + unsigned short slice_segment_address; + unsigned short num_title_rows_minus1; + unsigned short pic_width_in_luma_samples; + unsigned short pic_height_in_luma_samples; + unsigned short log2_min_coding_block_size_minus3; + unsigned short log2_diff_max_min_coding_block_size; + unsigned short log2_max_pic_order_cnt_lsb_minus4; + unsigned short POClsb; + unsigned short collocated_from_l0_flag; + unsigned short collocated_ref_idx; + unsigned short log2_parallel_merge_level; + unsigned short five_minus_max_num_merge_cand; + unsigned short sps_num_reorder_pics_0; + unsigned short modification_flag; + unsigned short tiles_enabled_flag; + unsigned short num_tile_columns_minus1; + unsigned short num_tile_rows_minus1; + unsigned short tile_width[12]; + unsigned short tile_height[8]; + unsigned short misc_flag0; + unsigned short pps_beta_offset_div2; + unsigned short pps_tc_offset_div2; + unsigned short slice_beta_offset_div2; + unsigned short slice_tc_offset_div2; + unsigned short pps_cb_qp_offset; + unsigned short pps_cr_qp_offset; + unsigned short first_slice_segment_in_pic_flag; + unsigned short m_temporalId; + unsigned short m_nalUnitType; + + unsigned short vui_num_units_in_tick_hi; + unsigned short vui_num_units_in_tick_lo; + unsigned short vui_time_scale_hi; + unsigned short vui_time_scale_lo; + unsigned short bit_depth; + unsigned short profile_etc; + unsigned short sei_frame_field_info; + unsigned short video_signal_type; + unsigned short modification_list[0x20]; + unsigned short conformance_window_flag; + unsigned short conf_win_left_offset; + unsigned short conf_win_right_offset; + unsigned short conf_win_top_offset; + unsigned short conf_win_bottom_offset; + unsigned short chroma_format_idc; + unsigned short color_description; + unsigned short aspect_ratio_idc; + unsigned short sar_width; + unsigned short sar_height; + unsigned short sps_max_dec_pic_buffering_minus1_0; + } p; +}; + +#define RPM_BUF_SIZE (0x80*2) +/* non mmu mode lmem size : 0x400, mmu mode : 0x500*/ +#define LMEM_BUF_SIZE (0x500 * 2) + +struct buff_s { + u32 buf_start; + u32 buf_size; + u32 buf_end; +}; + +struct BuffInfo_s { + u32 max_width; + u32 max_height; + unsigned int start_adr; + unsigned int end_adr; + struct buff_s ipp; + struct buff_s sao_abv; + struct buff_s sao_vb; + struct buff_s short_term_rps; + struct buff_s vps; + struct buff_s sps; + struct buff_s pps; + struct buff_s sao_up; + struct buff_s swap_buf; + struct buff_s swap_buf2; + struct buff_s scalelut; + struct buff_s dblk_para; + struct buff_s dblk_data; + struct buff_s dblk_data2; + struct buff_s mmu_vbh; + struct buff_s cm_header; + struct buff_s mpred_above; +#ifdef MV_USE_FIXED_BUF + struct buff_s mpred_mv; +#endif + struct buff_s rpm; + struct buff_s lmem; +}; + +//#define VBH_BUF_SIZE (2 * 16 * 2304) +//#define VBH_BUF_COUNT 4 + + /*mmu_vbh buf is used by HEVC_SAO_MMU_VH0_ADDR, HEVC_SAO_MMU_VH1_ADDR*/ +#define VBH_BUF_SIZE_1080P 0x3000 +#define VBH_BUF_SIZE_4K 0x5000 +#define VBH_BUF_SIZE_8K 0xa000 +#define VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh.buf_size / 2) + /*mmu_vbh_dw buf is used by HEVC_SAO_MMU_VH0_ADDR2,HEVC_SAO_MMU_VH1_ADDR2, + HEVC_DW_VH0_ADDDR, HEVC_DW_VH1_ADDDR*/ +#define DW_VBH_BUF_SIZE_1080P (VBH_BUF_SIZE_1080P * 2) +#define DW_VBH_BUF_SIZE_4K (VBH_BUF_SIZE_4K * 2) +#define DW_VBH_BUF_SIZE_8K (VBH_BUF_SIZE_8K * 2) +#define DW_VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh_dw.buf_size / 4) + +/* necessary 4K page size align for t7/t3 decoder and after */ +#define WORKBUF_ALIGN(addr) (ALIGN(addr, PAGE_SIZE)) + +#define WORK_BUF_SPEC_NUM 6 +static struct BuffInfo_s amvh265_workbuff_spec[WORK_BUF_SPEC_NUM] = { + { + /* 8M bytes */ + .max_width = 1920, + .max_height = 1088, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x30000, + }, + .sao_vb = { + .buf_size = 0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = + * 32Kbytes (0x8000) + */ + .buf_size = 0x8000, + }, + .dblk_para = { +#ifdef SUPPORT_10BIT + .buf_size = 0x40000, +#else + /* DBLK -> Max 256(4096/16) LCU, each para + *512bytes(total:0x20000), data 1024bytes(total:0x40000) + */ + .buf_size = 0x20000, +#endif + }, + .dblk_data = { + .buf_size = 0x40000, + }, + .dblk_data2 = { + .buf_size = 0x80000 * 2, + }, /*dblk data for adapter*/ + .mmu_vbh = { + .buf_size = 0x5000, /*2*16*2304/4, 4K*/ + }, +#if 0 + .cm_header = {/* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8)*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (MAX_REF_PIC_NUM + 1), + }, +#endif + .mpred_above = { + .buf_size = 0x8000, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = {/* 1080p, 0x40000 per buffer */ + .buf_size = 0x40000 * MAX_REF_PIC_NUM, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x500 * 2, + } + }, + { + .max_width = 4096, + .max_height = 2048, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x30000, + }, + .sao_vb = { + .buf_size = 0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = 32Kbytes + * (0x8000) + */ + .buf_size = 0x8000, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, each para + * 512bytes(total:0x20000), + * data 1024bytes(total:0x40000) + */ + .buf_size = 0x20000, + }, + .dblk_data = { + .buf_size = 0x80000, + }, + .dblk_data2 = { + .buf_size = 0x80000, + }, /*dblk data for adapter*/ + .mmu_vbh = { + .buf_size = 0x5000, /*2*16*2304/4, 4K*/ + }, +#if 0 + .cm_header = {/*0x44000 = ((1088*2*1024*4)/32/4)*(32/8)*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (MAX_REF_PIC_NUM + 1), + }, +#endif + .mpred_above = { + .buf_size = 0x8000, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = MPRED_4K_MV_BUF_SIZE * MAX_REF_PIC_NUM, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x500 * 2, + } + }, + + { + .max_width = 4096*2, + .max_height = 2048*2, + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x4000*2, + }, + .sao_abv = { + .buf_size = 0x30000*2, + }, + .sao_vb = { + .buf_size = 0x30000*2, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .sps = { + // SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .pps = { + // PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, total 0x2000 bytes + .buf_size = 0x2000, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0x2800*2, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0x8000*2, + }, + .dblk_para = {.buf_size = 0x40000*2, }, // dblk parameter + .dblk_data = {.buf_size = 0x80000*2, }, // dblk data for left/top + .dblk_data2 = {.buf_size = 0x80000*2, }, // dblk data for adapter + .mmu_vbh = { + .buf_size = 0x5000*2, //2*16*2304/4, 4K + }, +#if 0 + .cm_header = { + .buf_size = MMU_COMPRESS_8K_HEADER_SIZE * + MAX_REF_PIC_NUM, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif + .mpred_above = { + .buf_size = 0x8000*2, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + .buf_size = MPRED_8K_MV_BUF_SIZE * MAX_REF_PIC_NUM, //4k2k , 0x120000 per buffer + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x500 * 2, + }, + }, + { + /* 8M bytes */ + .max_width = 1920, + .max_height = 1088, + .ipp = {/*checked*/ + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x1e00, + }, + .sao_abv = { + .buf_size = 0, //0x30000, + }, + .sao_vb = { + .buf_size = 0, //0x30000, + }, + .short_term_rps = {/*checked*/ + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = {/*checked*/ + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = {/*checked*/ + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = {/*checked*/ + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0, //0x2800, + }, + .swap_buf = {/*checked*/ + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = {/*checked*/ + .buf_size = 0x800, + }, + .scalelut = {/*checked*/ + /* support up to 32 SCALELUT 1024x32 = + * 32Kbytes (0x8000) + */ + .buf_size = 0x8000, + }, + .dblk_para = {.buf_size = 0x14500, }, // dblk parameter + .dblk_data = {.buf_size = 0x62800, }, // dblk data for left/top + .dblk_data2 = {.buf_size = 0x22800, }, // dblk data for adapter + .mmu_vbh = {/*checked*/ + .buf_size = VBH_BUF_SIZE_1080P, /*2*16*2304/4, 4K*/ + }, +#if 0 + .cm_header = {/*checked*//* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8)*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE_1080P * + (MAX_REF_PIC_NUM + 1), + }, +#endif + .mpred_above = {/*checked*/ + .buf_size = 0x1e00, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = {/*checked*//* 1080p, 0x40000 per buffer */ + .buf_size = MPRED_MV_BUF_SIZE * MAX_REF_PIC_NUM, + }, +#endif + .rpm = {/*checked*/ + .buf_size = RPM_BUF_SIZE, + }, + .lmem = {/*checked*/ + .buf_size = 0x500 * 2, + } + }, + { + .max_width = 4096, + .max_height = 2048, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0, //0x30000, + }, + .sao_vb = { + .buf_size = 0, //0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0, //0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = 32Kbytes + * (0x8000) + */ + .buf_size = 0x8000, + }, + .dblk_para = {.buf_size = 0x19100, }, // dblk parameter + .dblk_data = {.buf_size = 0x88800, }, // dblk data for left/top + .dblk_data2 = {.buf_size = 0x48800, }, // dblk data for adapter + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_4K, /*2*16*2304/4, 4K*/ + }, +#if 0 + .cm_header = {/*0x44000 = ((1088*2*1024*4)/32/4)*(32/8)*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE_4K * + (MAX_REF_PIC_NUM + 1), + }, +#endif + .mpred_above = { + .buf_size = 0x4000, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = MPRED_4K_MV_BUF_SIZE * MAX_REF_PIC_NUM, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x500 * 2, + } + }, + + { + .max_width = 4096*2, + .max_height = 2048*2, + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x4000*2, + }, + .sao_abv = { + .buf_size = 0, //0x30000*2, + }, + .sao_vb = { + .buf_size = 0, //0x30000*2, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .sps = { + // SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .pps = { + // PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, total 0x2000 bytes + .buf_size = 0x2000, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0, //0x2800*2, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0x8000, //0x8000*2, + }, + .dblk_para = {.buf_size = 0x32100, }, // dblk parameter + .dblk_data = {.buf_size = 0x110800, }, // dblk data for left/top + .dblk_data2 = {.buf_size = 0x90800, }, // dblk data for adapter + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_8K, //2*16*2304/4, 4K + }, +#if 0 + .cm_header = { + .buf_size = MMU_COMPRESS_HEADER_SIZE_8K * + MAX_REF_PIC_NUM, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif + .mpred_above = { + .buf_size = 0x8000, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + .buf_size = MPRED_8K_MV_BUF_SIZE * MAX_REF_PIC_NUM, //4k2k , 0x120000 per buffer + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x500 * 2, + }, + } +}; + +static void init_buff_spec(struct hevc_state_s *hevc, + struct BuffInfo_s *buf_spec) +{ + buf_spec->ipp.buf_start = + WORKBUF_ALIGN(buf_spec->start_adr); + buf_spec->sao_abv.buf_start = + WORKBUF_ALIGN(buf_spec->ipp.buf_start + buf_spec->ipp.buf_size); + buf_spec->sao_vb.buf_start = + WORKBUF_ALIGN(buf_spec->sao_abv.buf_start + buf_spec->sao_abv.buf_size); + buf_spec->short_term_rps.buf_start = + WORKBUF_ALIGN(buf_spec->sao_vb.buf_start + buf_spec->sao_vb.buf_size); + buf_spec->vps.buf_start = + WORKBUF_ALIGN(buf_spec->short_term_rps.buf_start + buf_spec->short_term_rps.buf_size); + buf_spec->sps.buf_start = + WORKBUF_ALIGN(buf_spec->vps.buf_start + buf_spec->vps.buf_size); + buf_spec->pps.buf_start = + WORKBUF_ALIGN(buf_spec->sps.buf_start + buf_spec->sps.buf_size); + buf_spec->sao_up.buf_start = + WORKBUF_ALIGN(buf_spec->pps.buf_start + buf_spec->pps.buf_size); + buf_spec->swap_buf.buf_start = + WORKBUF_ALIGN(buf_spec->sao_up.buf_start + buf_spec->sao_up.buf_size); + buf_spec->swap_buf2.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf.buf_start + buf_spec->swap_buf.buf_size); + buf_spec->scalelut.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf2.buf_start + buf_spec->swap_buf2.buf_size); + buf_spec->dblk_para.buf_start = + WORKBUF_ALIGN(buf_spec->scalelut.buf_start + buf_spec->scalelut.buf_size); + buf_spec->dblk_data.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_para.buf_start + buf_spec->dblk_para.buf_size); + buf_spec->dblk_data2.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data.buf_start + buf_spec->dblk_data.buf_size); + buf_spec->mmu_vbh.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data2.buf_start + buf_spec->dblk_data2.buf_size); + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh.buf_start + buf_spec->mmu_vbh.buf_size); +#ifdef MV_USE_FIXED_BUF + buf_spec->mpred_mv.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_mv.buf_start + buf_spec->mpred_mv.buf_size); +#else + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); +#endif + buf_spec->lmem.buf_start = + WORKBUF_ALIGN(buf_spec->rpm.buf_start + buf_spec->rpm.buf_size); + buf_spec->end_adr = + WORKBUF_ALIGN(buf_spec->lmem.buf_start + buf_spec->lmem.buf_size); + + if (hevc && get_dbg_flag2(hevc)) { + hevc_print(hevc, 0, + "%s workspace (%x %x) size = %x\n", __func__, + buf_spec->start_adr, buf_spec->end_adr, + buf_spec->end_adr - buf_spec->start_adr); + + hevc_print(hevc, 0, + "ipp.buf_start :%x\n", + buf_spec->ipp.buf_start); + hevc_print(hevc, 0, + "sao_abv.buf_start :%x\n", + buf_spec->sao_abv.buf_start); + hevc_print(hevc, 0, + "sao_vb.buf_start :%x\n", + buf_spec->sao_vb.buf_start); + hevc_print(hevc, 0, + "short_term_rps.buf_start :%x\n", + buf_spec->short_term_rps.buf_start); + hevc_print(hevc, 0, + "vps.buf_start :%x\n", + buf_spec->vps.buf_start); + hevc_print(hevc, 0, + "sps.buf_start :%x\n", + buf_spec->sps.buf_start); + hevc_print(hevc, 0, + "pps.buf_start :%x\n", + buf_spec->pps.buf_start); + hevc_print(hevc, 0, + "sao_up.buf_start :%x\n", + buf_spec->sao_up.buf_start); + hevc_print(hevc, 0, + "swap_buf.buf_start :%x\n", + buf_spec->swap_buf.buf_start); + hevc_print(hevc, 0, + "swap_buf2.buf_start :%x\n", + buf_spec->swap_buf2.buf_start); + hevc_print(hevc, 0, + "scalelut.buf_start :%x\n", + buf_spec->scalelut.buf_start); + hevc_print(hevc, 0, + "dblk_para.buf_start :%x\n", + buf_spec->dblk_para.buf_start); + hevc_print(hevc, 0, + "dblk_data.buf_start :%x\n", + buf_spec->dblk_data.buf_start); + hevc_print(hevc, 0, + "dblk_data2.buf_start :%x\n", + buf_spec->dblk_data2.buf_start); + hevc_print(hevc, 0, + "mpred_above.buf_start :%x\n", + buf_spec->mpred_above.buf_start); +#ifdef MV_USE_FIXED_BUF + hevc_print(hevc, 0, + "mpred_mv.buf_start :%x\n", + buf_spec->mpred_mv.buf_start); +#endif + if ((get_dbg_flag2(hevc) + & + H265_DEBUG_SEND_PARAM_WITH_REG) + == 0) { + hevc_print(hevc, 0, + "rpm.buf_start :%x\n", + buf_spec->rpm.buf_start); + } + } + +} + +enum SliceType { + B_SLICE, + P_SLICE, + I_SLICE +}; + +/*USE_BUF_BLOCK*/ +struct BUF_s { + ulong start_adr; + u32 size; + u32 luma_size; + ulong header_addr; + u32 header_size; + int used_flag; + ulong v4l_ref_buf_addr; + ulong chroma_addr; + u32 chroma_size; +} /*BUF_t */; + +/* level 6, 6.1 maximum slice number is 800; other is 200 */ +#define MAX_SLICE_NUM 800 +struct PIC_s { + int index; + int scatter_alloc; + int BUF_index; + int mv_buf_index; + int POC; + int decode_idx; + int slice_type; + int RefNum_L0; + int RefNum_L1; + int num_reorder_pic; + int stream_offset; + unsigned char referenced; + unsigned char output_mark; + unsigned char recon_mark; + unsigned char output_ready; + unsigned char error_mark; + //dis_mark = 0:discard mark,dis_mark = 1:no discard mark + unsigned char dis_mark; + /**/ int slice_idx; + int m_aiRefPOCList0[MAX_SLICE_NUM][16]; + int m_aiRefPOCList1[MAX_SLICE_NUM][16]; +#ifdef SUPPORT_LONG_TERM_RPS + unsigned char long_term_ref; + unsigned char m_aiRefLTflgList0[MAX_SLICE_NUM][16]; + unsigned char m_aiRefLTflgList1[MAX_SLICE_NUM][16]; +#endif + /*buffer */ + unsigned int header_adr; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + unsigned char dv_enhance_exist; +#endif + char *aux_data_buf; + int aux_data_size; + unsigned long cma_alloc_addr; + struct page *alloc_pages; + unsigned int mpred_mv_wr_start_addr; + int mv_size; + unsigned int mc_y_adr; + unsigned int mc_u_v_adr; +#ifdef SUPPORT_10BIT + /*unsigned int comp_body_size;*/ + unsigned int dw_y_adr; + unsigned int dw_u_v_adr; +#endif + u32 luma_size; + u32 chroma_size; + + int mc_canvas_y; + int mc_canvas_u_v; + int width; + int height; + + int y_canvas_index; + int uv_canvas_index; +#ifdef MULTI_INSTANCE_SUPPORT + struct canvas_config_s canvas_config[2]; +#endif +#ifdef SUPPORT_10BIT + int mem_saving_mode; + u32 bit_depth_luma; + u32 bit_depth_chroma; +#endif +#ifdef LOSLESS_COMPRESS_MODE + unsigned int losless_comp_body_size; +#endif + unsigned char pic_struct; + int vf_ref; + + u32 pts; + u64 pts64; + u64 timestamp; + + u32 aspect_ratio_idc; + u32 sar_width; + u32 sar_height; + u32 double_write_mode; + u32 video_signal_type; + unsigned short conformance_window_flag; + unsigned short conf_win_left_offset; + unsigned short conf_win_right_offset; + unsigned short conf_win_top_offset; + unsigned short conf_win_bottom_offset; + unsigned short chroma_format_idc; + + /* picture qos infomation*/ + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; + + u32 hw_decode_time; + u32 frame_size; // For frame base mode + bool ip_mode; + u32 hdr10p_data_size; + char *hdr10p_data_buf; + struct dma_fence *fence; + bool show_frame; + int ctx_buf_idx; +} /*PIC_t */; + +#define MAX_TILE_COL_NUM 10 +#define MAX_TILE_ROW_NUM 20 +struct tile_s { + int width; + int height; + int start_cu_x; + int start_cu_y; + + unsigned int sao_vb_start_addr; + unsigned int sao_abv_start_addr; +}; + +#define SEI_MASTER_DISPLAY_COLOR_MASK 0x00000001 +#define SEI_CONTENT_LIGHT_LEVEL_MASK 0x00000002 +#define SEI_HDR10PLUS_MASK 0x00000004 +#define SEI_HDR_CUVA_MASK 0x00000008 + + +#define VF_POOL_SIZE 32 + +#ifdef MULTI_INSTANCE_SUPPORT +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_CONFIG_PARAM 3 +#define DEC_RESULT_ERROR 4 +#define DEC_INIT_PICLIST 5 +#define DEC_UNINIT_PICLIST 6 +#define DEC_RESULT_GET_DATA 7 +#define DEC_RESULT_GET_DATA_RETRY 8 +#define DEC_RESULT_EOS 9 +#define DEC_RESULT_FORCE_EXIT 10 +#define DEC_RESULT_FREE_CANVAS 11 + + +static void vh265_work(struct work_struct *work); +static void vh265_timeout_work(struct work_struct *work); +static void vh265_notify_work(struct work_struct *work); + +#endif + +struct debug_log_s { + struct list_head list; + uint8_t data; /*will alloc more size*/ +}; + +struct mh265_fence_vf_t { + u32 used_size; + struct vframe_s *fence_vf[VF_POOL_SIZE]; +}; + +struct hevc_state_s { +#ifdef MULTI_INSTANCE_SUPPORT + struct platform_device *platform_dev; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + struct vframe_chunk_s *chunk; + int dec_result; + u32 timeout_processing; + struct work_struct work; + struct work_struct timeout_work; + struct work_struct notify_work; + struct work_struct set_clk_work; + /* timeout handle */ + unsigned long int start_process_time; + unsigned int last_lcu_idx; + unsigned int decode_timeout_count; + unsigned int timeout_num; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + unsigned char switch_dvlayer_flag; + unsigned char no_switch_dvlayer_count; + unsigned char bypass_dvenl_enable; + unsigned char bypass_dvenl; +#endif + unsigned char start_parser_type; + /*start_decoding_flag: + vps/pps/sps/idr info from ucode*/ + unsigned char start_decoding_flag; + unsigned char rps_set_id; + unsigned char eos; + int pic_decoded_lcu_idx; + u8 over_decode; + u8 empty_flag; +#endif + struct vframe_s vframe_dummy; + char *provider_name; + int index; + struct device *cma_dev; + unsigned char m_ins_flag; + unsigned char dolby_enhance_flag; + unsigned long buf_start; + u32 buf_size; + u32 mv_buf_size; + + struct BuffInfo_s work_space_buf_store; + struct BuffInfo_s *work_space_buf; + + u8 aux_data_dirty; + u32 prefix_aux_size; + u32 suffix_aux_size; + void *aux_addr; + void *rpm_addr; + void *lmem_addr; + dma_addr_t aux_phy_addr; + dma_addr_t rpm_phy_addr; + dma_addr_t lmem_phy_addr; + + unsigned int pic_list_init_flag; + unsigned int use_cma_flag; + + unsigned short *rpm_ptr; + unsigned short *lmem_ptr; + unsigned short *debug_ptr; + int debug_ptr_size; + int pic_w; + int pic_h; + int lcu_x_num; + int lcu_y_num; + int lcu_total; + int lcu_size; + int lcu_size_log2; + int lcu_x_num_pre; + int lcu_y_num_pre; + int first_pic_after_recover; + + int num_tile_col; + int num_tile_row; + int tile_enabled; + int tile_x; + int tile_y; + int tile_y_x; + int tile_start_lcu_x; + int tile_start_lcu_y; + int tile_width_lcu; + int tile_height_lcu; + + int slice_type; + unsigned int slice_addr; + unsigned int slice_segment_addr; + + unsigned char interlace_flag; + unsigned char curr_pic_struct; + unsigned char frame_field_info_present_flag; + + unsigned short sps_num_reorder_pics_0; + unsigned short misc_flag0; + int m_temporalId; + int m_nalUnitType; + int TMVPFlag; + int isNextSliceSegment; + int LDCFlag; + int m_pocRandomAccess; + int plevel; + int MaxNumMergeCand; + + int new_pic; + int new_tile; + int curr_POC; + int iPrevPOC; +#ifdef MULTI_INSTANCE_SUPPORT + int decoded_poc; + struct PIC_s *decoding_pic; +#endif + int iPrevTid0POC; + int list_no; + int RefNum_L0; + int RefNum_L1; + int ColFromL0Flag; + int LongTerm_Curr; + int LongTerm_Col; + int Col_POC; + int LongTerm_Ref; +#ifdef MULTI_INSTANCE_SUPPORT + int m_pocRandomAccess_bak; + int curr_POC_bak; + int iPrevPOC_bak; + int iPrevTid0POC_bak; + unsigned char start_parser_type_bak; + unsigned char start_decoding_flag_bak; + unsigned char rps_set_id_bak; + int pic_decoded_lcu_idx_bak; + int decode_idx_bak; +#endif + struct PIC_s *cur_pic; + struct PIC_s *col_pic; + int skip_flag; + int decode_idx; + int slice_idx; + unsigned char have_vps; + unsigned char have_sps; + unsigned char have_pps; + unsigned char have_valid_start_slice; + unsigned char wait_buf; + unsigned char error_flag; + unsigned int error_skip_nal_count; + long used_4k_num; + + unsigned char + ignore_bufmgr_error; /* bit 0, for decoding; + bit 1, for displaying + bit 1 must be set if bit 0 is 1*/ + int PB_skip_mode; + int PB_skip_count_after_decoding; +#ifdef SUPPORT_10BIT + int mem_saving_mode; +#endif +#ifdef LOSLESS_COMPRESS_MODE + unsigned int losless_comp_body_size; +#endif + int pts_mode; + int last_lookup_pts; + int last_pts; + u64 last_lookup_pts_us64; + u64 last_pts_us64; + u32 shift_byte_count_lo; + u32 shift_byte_count_hi; + int pts_mode_switching_count; + int pts_mode_recovery_count; + + int pic_num; + + /**/ + union param_u param; + + struct tile_s m_tile[MAX_TILE_ROW_NUM][MAX_TILE_COL_NUM]; + + struct timer_list timer; + struct BUF_s m_BUF[BUF_POOL_SIZE]; + struct BUF_s m_mv_BUF[MAX_REF_PIC_NUM]; + struct PIC_s *m_PIC[MAX_REF_PIC_NUM]; + + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(pending_q, struct vframe_s *, VF_POOL_SIZE); + struct vframe_s vfpool[VF_POOL_SIZE]; + + u32 stat; + u32 frame_width; + u32 frame_height; + u32 frame_dur; + u32 frame_ar; + u32 bit_depth_luma; + u32 bit_depth_chroma; + u32 video_signal_type; + u32 video_signal_type_debug; + u32 saved_resolution; + bool get_frame_dur; + u32 error_watchdog_count; + u32 error_skip_nal_wt_cnt; + u32 error_system_watchdog_count; + +#ifdef DEBUG_PTS + unsigned long pts_missed; + unsigned long pts_hit; +#endif + struct dec_sysinfo vh265_amstream_dec_info; + unsigned char init_flag; + unsigned char first_sc_checked; + unsigned char uninit_list; + u32 start_decoding_time; + + int show_frame_num; +#ifdef USE_UNINIT_SEMA + struct semaphore h265_uninit_done_sema; +#endif + int fatal_error; + + + u32 sei_present_flag; + void *frame_mmu_map_addr; + dma_addr_t frame_mmu_map_phy_addr; + unsigned int mmu_mc_buf_start; + unsigned int mmu_mc_buf_end; + unsigned int mmu_mc_start_4k_adr; + void *mmu_box; + void *bmmu_box; + int mmu_enable; + + unsigned int dec_status; + + /* data for SEI_MASTER_DISPLAY_COLOR */ + unsigned int primaries[3][2]; + unsigned int white_point[2]; + unsigned int luminance[2]; + /* data for SEI_CONTENT_LIGHT_LEVEL */ + unsigned int content_light_level[2]; + + struct PIC_s *pre_top_pic; + struct PIC_s *pre_bot_pic; + +#ifdef MULTI_INSTANCE_SUPPORT + int double_write_mode; + int dynamic_buf_num_margin; + int start_action; + int save_buffer_mode; +#endif + u32 i_only; + struct list_head log_list; + u32 ucode_pause_pos; + u32 start_shift_bytes; + + atomic_t vf_pre_count; + atomic_t vf_get_count; + atomic_t vf_put_count; +#ifdef SWAP_HEVC_UCODE + dma_addr_t mc_dma_handle; + void *mc_cpu_addr; + int swap_size; + ulong swap_addr; +#endif +#ifdef DETREFILL_ENABLE + dma_addr_t detbuf_adr; + u16 *detbuf_adr_virt; + u8 delrefill_check; +#endif + u8 head_error_flag; + int valve_count; + struct firmware_s *fw; + int max_pic_w; + int max_pic_h; +#ifdef AGAIN_HAS_THRESHOLD + u8 next_again_flag; + u32 pre_parser_wr_ptr; +#endif + u32 ratio_control; + u32 first_pic_flag; + u32 decode_size; + struct mutex chunks_mutex; + int need_cache_size; + u64 sc_start_time; + u32 skip_nal_count; + bool is_swap; + bool is_4k; + + int frameinfo_enable; + struct vframe_qos_s vframe_qos; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + u32 mem_map_mode; + u32 performance_profile; + struct vdec_info *gvs; + bool ip_mode; + u32 kpi_first_i_comming; + u32 kpi_first_i_decoded; + int sidebind_type; + int sidebind_channel_id; + u32 again_count; + u64 again_timeout_jiffies; + u32 pre_parser_video_rp; + u32 pre_parser_video_wp; + bool dv_duallayer; + u32 poc_error_count; + u32 timeout_flag; + ulong timeout; + bool discard_dv_data; + bool enable_fence; + int fence_usage; + int buffer_wrap[MAX_REF_PIC_NUM]; + int low_latency_flag; + u32 metadata_config_flag; + int last_width; + int last_height; + int used_buf_num; + u32 dirty_shift_flag; + u32 endian; + ulong fb_token; + int send_frame_flag; + struct vdec_v4l2_buffer *pair_fb[2]; + struct mh265_fence_vf_t fence_vf_s; + struct mutex fence_mutex; + bool resolution_change; + dma_addr_t rdma_phy_adr; + unsigned *rdma_adr; + bool no_need_aux_data; + struct trace_decoder_name trace; +} /*hevc_stru_t */; + +#ifdef AGAIN_HAS_THRESHOLD +static u32 again_threshold; +#endif +#ifdef SEND_LMEM_WITH_RPM +#define get_lmem_params(hevc, ladr) \ + hevc->lmem_ptr[ladr - (ladr & 0x3) + 3 - (ladr & 0x3)] + + +static int get_frame_mmu_map_size(void) +{ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + return (MAX_FRAME_8K_NUM * 4); + + return (MAX_FRAME_4K_NUM * 4); +} + +static int is_oversize(int w, int h) +{ + int max = (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1)? + MAX_SIZE_8K : MAX_SIZE_4K; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) + max = MAX_SIZE_2K; + + if (w < 0 || h < 0) + return true; + + if (h != 0 && (w > max / h)) + return true; + + return false; +} + +int is_oversize_ex(int w, int h) +{ + int max = (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) ? + MAX_SIZE_8K : MAX_SIZE_4K; + + if (w == 0 || h == 0) + return true; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + if (w > 8192 || h > 4608) + return true; + } else { + if (w > 4096 || h > 2304) + return true; + } + + if (w < 0 || h < 0) + return true; + + if (h != 0 && (w > max / h)) + return true; + + return false; +} + + +void check_head_error(struct hevc_state_s *hevc) +{ +#define pcm_enabled_flag 0x040 +#define pcm_sample_bit_depth_luma 0x041 +#define pcm_sample_bit_depth_chroma 0x042 + hevc->head_error_flag = 0; + if ((error_handle_policy & 0x40) == 0) + return; + if (get_lmem_params(hevc, pcm_enabled_flag)) { + uint16_t pcm_depth_luma = get_lmem_params( + hevc, pcm_sample_bit_depth_luma); + uint16_t pcm_sample_chroma = get_lmem_params( + hevc, pcm_sample_bit_depth_chroma); + if (pcm_depth_luma > + hevc->bit_depth_luma || + pcm_sample_chroma > + hevc->bit_depth_chroma) { + hevc_print(hevc, 0, + "error, pcm bit depth %d, %d is greater than normal bit depth %d, %d\n", + pcm_depth_luma, + pcm_sample_chroma, + hevc->bit_depth_luma, + hevc->bit_depth_chroma); + hevc->head_error_flag = 1; + } + } +} +#endif + +#ifdef SUPPORT_10BIT +/* Losless compression body buffer size 4K per 64x32 (jt) */ +static int compute_losless_comp_body_size(struct hevc_state_s *hevc, + int width, int height, int mem_saving_mode) +{ + int width_x64; + int height_x32; + int bsize; + + width_x64 = width + 63; + width_x64 >>= 6; + + height_x32 = height + 31; + height_x32 >>= 5; + if (mem_saving_mode == 1 && hevc->mmu_enable) + bsize = 3200 * width_x64 * height_x32; + else if (mem_saving_mode == 1) + bsize = 3072 * width_x64 * height_x32; + else + bsize = 4096 * width_x64 * height_x32; + + return bsize; +} + +/* Losless compression header buffer size 32bytes per 128x64 (jt) */ +static int compute_losless_comp_header_size(int width, int height) +{ + int width_x128; + int height_x64; + int hsize; + + width_x128 = width + 127; + width_x128 >>= 7; + + height_x64 = height + 63; + height_x64 >>= 6; + + hsize = 32*width_x128*height_x64; + + return hsize; +} +#endif + +static int add_log(struct hevc_state_s *hevc, + const char *fmt, ...) +{ +#define HEVC_LOG_BUF 196 + struct debug_log_s *log_item; + unsigned char buf[HEVC_LOG_BUF]; + int len = 0; + va_list args; + mutex_lock(&vh265_log_mutex); + va_start(args, fmt); + len = sprintf(buf, "<%ld> <%05d> ", + jiffies, hevc->decode_idx); + len += vsnprintf(buf + len, + HEVC_LOG_BUF - len, fmt, args); + va_end(args); + log_item = kmalloc( + sizeof(struct debug_log_s) + len, + GFP_KERNEL); + if (log_item) { + INIT_LIST_HEAD(&log_item->list); + strcpy(&log_item->data, buf); + list_add_tail(&log_item->list, + &hevc->log_list); + } + mutex_unlock(&vh265_log_mutex); + return 0; +} + +static void dump_log(struct hevc_state_s *hevc) +{ + int i = 0; + struct debug_log_s *log_item, *tmp; + mutex_lock(&vh265_log_mutex); + list_for_each_entry_safe(log_item, tmp, &hevc->log_list, list) { + hevc_print(hevc, 0, + "[LOG%04d]%s\n", + i++, + &log_item->data); + list_del(&log_item->list); + kfree(log_item); + } + mutex_unlock(&vh265_log_mutex); +} + +static unsigned char is_skip_decoding(struct hevc_state_s *hevc, + struct PIC_s *pic) +{ + if (pic->error_mark + && ((hevc->ignore_bufmgr_error & 0x1) == 0)) + return 1; + return 0; +} + +static int get_pic_poc(struct hevc_state_s *hevc, + unsigned int idx) +{ + if (idx != 0xff + && idx < MAX_REF_PIC_NUM + && hevc->m_PIC[idx]) + return hevc->m_PIC[idx]->POC; + return INVALID_POC; +} + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +static int get_valid_double_write_mode(struct hevc_state_s *hevc) +{ + return (hevc->m_ins_flag && + ((double_write_mode & 0x80000000) == 0)) ? + hevc->double_write_mode : + (double_write_mode & 0x7fffffff); +} + +static int get_dynamic_buf_num_margin(struct hevc_state_s *hevc) +{ + return (hevc->m_ins_flag && + ((dynamic_buf_num_margin & 0x80000000) == 0)) ? + hevc->dynamic_buf_num_margin : + (dynamic_buf_num_margin & 0x7fffffff); +} +#endif + +static int get_double_write_mode(struct hevc_state_s *hevc) +{ + u32 valid_dw_mode = get_valid_double_write_mode(hevc); + int w = hevc->pic_w; + int h = hevc->pic_h; + u32 dw = 0x1; /*1:1*/ + + if (hevc->is_used_v4l) { + unsigned int out; + + vdec_v4l_get_dw_mode(hevc->v4l2_ctx, &out); + dw = out; + return dw; + } + + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + case 0x1000: + if (w * h > 1920 * 1080) + dw = 3; + else if (w * h > 960 * 540) + dw = 5; + else + dw = 1; + break; + default: + dw = valid_dw_mode; + break; + } + return dw; +} + +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +static unsigned char get_idx(struct hevc_state_s *hevc) +{ + return hevc->index; +} +#endif + +#undef pr_info +#define pr_info printk +static int hevc_print(struct hevc_state_s *hevc, + int flag, const char *fmt, ...) +{ +#define HEVC_PRINT_BUF 512 + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + if (hevc == NULL || + (flag == 0) || + ((debug_mask & + (1 << hevc->index)) + && (debug & flag))) { +#endif + va_list args; + + va_start(args, fmt); + if (hevc) + len = sprintf(buf, "[%d]", hevc->index); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_debug("%s", buf); + va_end(args); +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + } +#endif + return 0; +} + +static int hevc_print_cont(struct hevc_state_s *hevc, + int flag, const char *fmt, ...) +{ + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + if (hevc == NULL || + (flag == 0) || + ((debug_mask & + (1 << hevc->index)) + && (debug & flag))) { +#endif + va_list args; + + va_start(args, fmt); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + } +#endif + return 0; +} + +static void put_mv_buf(struct hevc_state_s *hevc, + struct PIC_s *pic); + +static void update_vf_memhandle(struct hevc_state_s *hevc, + struct vframe_s *vf, struct PIC_s *pic); + +static void set_canvas(struct hevc_state_s *hevc, struct PIC_s *pic); + +static void release_aux_data(struct hevc_state_s *hevc, + struct PIC_s *pic); +static void release_pic_mmu_buf(struct hevc_state_s *hevc, struct PIC_s *pic); + +#ifdef MULTI_INSTANCE_SUPPORT +static void backup_decode_state(struct hevc_state_s *hevc) +{ + hevc->m_pocRandomAccess_bak = hevc->m_pocRandomAccess; + hevc->curr_POC_bak = hevc->curr_POC; + hevc->iPrevPOC_bak = hevc->iPrevPOC; + hevc->iPrevTid0POC_bak = hevc->iPrevTid0POC; + hevc->start_parser_type_bak = hevc->start_parser_type; + hevc->start_decoding_flag_bak = hevc->start_decoding_flag; + hevc->rps_set_id_bak = hevc->rps_set_id; + hevc->pic_decoded_lcu_idx_bak = hevc->pic_decoded_lcu_idx; + hevc->decode_idx_bak = hevc->decode_idx; + +} + +static void restore_decode_state(struct hevc_state_s *hevc) +{ + struct vdec_s *vdec = hw_to_vdec(hevc); + if (!vdec_has_more_input(vdec)) { + hevc->pic_decoded_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + return; + } + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: discard pic index 0x%x\n", + __func__, hevc->decoding_pic ? + hevc->decoding_pic->index : 0xff); + if (hevc->decoding_pic) { + hevc->decoding_pic->error_mark = 0; + hevc->decoding_pic->output_ready = 0; + hevc->decoding_pic->show_frame = false; + hevc->decoding_pic->output_mark = 0; + hevc->decoding_pic->referenced = 0; + hevc->decoding_pic->POC = INVALID_POC; + put_mv_buf(hevc, hevc->decoding_pic); + release_aux_data(hevc, hevc->decoding_pic); + hevc->decoding_pic = NULL; + } + /*if (vdec_stream_based(vdec) && + (hevc->decode_idx - hevc->decode_idx_bak > 1)) { + int i; + hevc_print(hevc, 0, "decode_idx %d, decode_idx_bak %d\n", + hevc->decode_idx, hevc->decode_idx_bak); + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + struct PIC_s *pic; + pic = hevc->m_PIC[i]; + if (pic == NULL || + (pic->index == -1) || + (pic->BUF_index == -1) || + (pic->POC == INVALID_POC)) + continue; + if ((pic->decode_idx >= hevc->decode_idx_bak) && + pic->decode_idx != (hevc->decode_idx - 1)) { + hevc_print(hevc, 0, "release error buffer\n"); + pic->error_mark = 0; + pic->output_ready = 0; + pic->show_frame = false; + pic->output_mark = 0; + pic->referenced = 0; + pic->POC = INVALID_POC; + put_mv_buf(hevc, pic); + release_aux_data(hevc, pic); + } + } + }*/ + hevc->decode_idx = hevc->decode_idx_bak; + hevc->m_pocRandomAccess = hevc->m_pocRandomAccess_bak; + hevc->curr_POC = hevc->curr_POC_bak; + hevc->iPrevPOC = hevc->iPrevPOC_bak; + hevc->iPrevTid0POC = hevc->iPrevTid0POC_bak; + hevc->start_parser_type = hevc->start_parser_type_bak; + hevc->start_decoding_flag = hevc->start_decoding_flag_bak; + hevc->rps_set_id = hevc->rps_set_id_bak; + hevc->pic_decoded_lcu_idx = hevc->pic_decoded_lcu_idx_bak; + + if (hevc->pic_list_init_flag == 1) + hevc->pic_list_init_flag = 0; + /*if (hevc->decode_idx == 0) + hevc->start_decoding_flag = 0;*/ + + hevc->slice_idx = 0; + hevc->used_4k_num = -1; +} +#endif + +static void hevc_init_stru(struct hevc_state_s *hevc, + struct BuffInfo_s *buf_spec_i) +{ + int i; + INIT_LIST_HEAD(&hevc->log_list); + hevc->work_space_buf = buf_spec_i; + hevc->prefix_aux_size = 0; + hevc->suffix_aux_size = 0; + hevc->aux_addr = NULL; + hevc->rpm_addr = NULL; + hevc->lmem_addr = NULL; + + hevc->curr_POC = INVALID_POC; + + hevc->pic_list_init_flag = 0; + hevc->use_cma_flag = 0; + hevc->decode_idx = 0; + hevc->slice_idx = 0; + hevc->new_pic = 0; + hevc->new_tile = 0; + hevc->iPrevPOC = 0; + hevc->list_no = 0; + /* int m_uiMaxCUWidth = 1<<7; */ + /* int m_uiMaxCUHeight = 1<<7; */ + hevc->m_pocRandomAccess = MAX_INT; + hevc->tile_enabled = 0; + hevc->tile_x = 0; + hevc->tile_y = 0; + hevc->iPrevTid0POC = 0; + hevc->slice_addr = 0; + hevc->slice_segment_addr = 0; + hevc->skip_flag = 0; + hevc->misc_flag0 = 0; + + hevc->cur_pic = NULL; + hevc->col_pic = NULL; + hevc->wait_buf = 0; + hevc->error_flag = 0; + hevc->head_error_flag = 0; + hevc->error_skip_nal_count = 0; + hevc->have_vps = 0; + hevc->have_sps = 0; + hevc->have_pps = 0; + hevc->have_valid_start_slice = 0; + + hevc->pts_mode = PTS_NORMAL; + hevc->last_pts = 0; + hevc->last_lookup_pts = 0; + hevc->last_pts_us64 = 0; + hevc->last_lookup_pts_us64 = 0; + hevc->pts_mode_switching_count = 0; + hevc->pts_mode_recovery_count = 0; + + hevc->PB_skip_mode = nal_skip_policy & 0x3; + hevc->PB_skip_count_after_decoding = (nal_skip_policy >> 16) & 0xffff; + if (hevc->PB_skip_mode == 0) + hevc->ignore_bufmgr_error = 0x1; + else + hevc->ignore_bufmgr_error = 0x0; + + if (hevc->is_used_v4l) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + if (hevc->m_PIC[i] != NULL) { + char *addr = NULL; + int size; + unsigned long flags; + + spin_lock_irqsave(&lock, flags); + addr = hevc->m_PIC[i]->aux_data_buf; + size = hevc->m_PIC[i]->aux_data_size; + + memset(hevc->m_PIC[i], 0 ,sizeof(struct PIC_s)); + hevc->m_PIC[i]->index = i; + hevc->m_PIC[i]->aux_data_buf = addr; + hevc->m_PIC[i]->aux_data_size = size; + spin_unlock_irqrestore(&lock, flags); + } + } + } + + hevc->pic_num = 0; + hevc->lcu_x_num_pre = 0; + hevc->lcu_y_num_pre = 0; + hevc->first_pic_after_recover = 0; + + hevc->pre_top_pic = NULL; + hevc->pre_bot_pic = NULL; + + hevc->sei_present_flag = 0; + hevc->valve_count = 0; + hevc->first_pic_flag = 0; +#ifdef MULTI_INSTANCE_SUPPORT + hevc->decoded_poc = INVALID_POC; + hevc->start_process_time = 0; + hevc->last_lcu_idx = 0; + hevc->decode_timeout_count = 0; + hevc->timeout_num = 0; + hevc->eos = 0; + hevc->pic_decoded_lcu_idx = -1; + hevc->over_decode = 0; + hevc->used_4k_num = -1; + hevc->start_decoding_flag = 0; + hevc->rps_set_id = 0; + backup_decode_state(hevc); +#endif +#ifdef DETREFILL_ENABLE + hevc->detbuf_adr = 0; + hevc->detbuf_adr_virt = NULL; +#endif +} + +static int post_picture_early(struct vdec_s *vdec, int index); +static int prepare_display_buf(struct vdec_s *vdec, struct PIC_s *pic); +static int H265_alloc_mmu(struct hevc_state_s *hevc, + struct PIC_s *new_pic, unsigned short bit_depth, + unsigned int *mmu_index_adr); + +#ifdef DETREFILL_ENABLE +#define DETREFILL_BUF_SIZE (4 * 0x4000) +#define HEVC_SAO_DBG_MODE0 0x361e +#define HEVC_SAO_DBG_MODE1 0x361f +#define HEVC_SAO_CTRL10 0x362e +#define HEVC_SAO_CTRL11 0x362f +static int init_detrefill_buf(struct hevc_state_s *hevc) +{ + if (hevc->detbuf_adr_virt) + return 0; + + hevc->detbuf_adr_virt = + (void *)dma_alloc_coherent(amports_get_dma_device(), + DETREFILL_BUF_SIZE, &hevc->detbuf_adr, + GFP_KERNEL); + + if (hevc->detbuf_adr_virt == NULL) { + pr_err("%s: failed to alloc ETREFILL_BUF\n", __func__); + return -1; + } + return 0; +} + +static void uninit_detrefill_buf(struct hevc_state_s *hevc) +{ + if (hevc->detbuf_adr_virt) { + dma_free_coherent(amports_get_dma_device(), + DETREFILL_BUF_SIZE, hevc->detbuf_adr_virt, + hevc->detbuf_adr); + + hevc->detbuf_adr_virt = NULL; + hevc->detbuf_adr = 0; + } +} + +/* + * convert uncompressed frame buffer data from/to ddr + */ +static void convUnc8x4blk(uint16_t* blk8x4Luma, + uint16_t* blk8x4Cb, uint16_t* blk8x4Cr, uint16_t* cmBodyBuf, int32_t direction) +{ + if (direction == 0) { + blk8x4Luma[3 + 0 * 8] = ((cmBodyBuf[0] >> 0)) & 0x3ff; + blk8x4Luma[3 + 1 * 8] = ((cmBodyBuf[1] << 6) + | (cmBodyBuf[0] >> 10)) & 0x3ff; + blk8x4Luma[3 + 2 * 8] = ((cmBodyBuf[1] >> 4)) & 0x3ff; + blk8x4Luma[3 + 3 * 8] = ((cmBodyBuf[2] << 2) + | (cmBodyBuf[1] >> 14)) & 0x3ff; + blk8x4Luma[7 + 0 * 8] = ((cmBodyBuf[3] << 8) + | (cmBodyBuf[2] >> 8)) & 0x3ff; + blk8x4Luma[7 + 1 * 8] = ((cmBodyBuf[3] >> 2)) & 0x3ff; + blk8x4Luma[7 + 2 * 8] = ((cmBodyBuf[4] << 4) + | (cmBodyBuf[3] >> 12)) & 0x3ff; + blk8x4Luma[7 + 3 * 8] = ((cmBodyBuf[4] >> 6)) & 0x3ff; + blk8x4Cb [0 + 0 * 4] = ((cmBodyBuf[5] >> 0)) & 0x3ff; + blk8x4Cr [0 + 0 * 4] = ((cmBodyBuf[6] << 6) + | (cmBodyBuf[5] >> 10)) & 0x3ff; + blk8x4Cb [0 + 1 * 4] = ((cmBodyBuf[6] >> 4)) & 0x3ff; + blk8x4Cr [0 + 1 * 4] = ((cmBodyBuf[7] << 2) + | (cmBodyBuf[6] >> 14)) & 0x3ff; + + blk8x4Luma[0 + 0 * 8] = ((cmBodyBuf[0 + 8] >> 0)) & 0x3ff; + blk8x4Luma[1 + 0 * 8] = ((cmBodyBuf[1 + 8] << 6) | + (cmBodyBuf[0 + 8] >> 10)) & 0x3ff; + blk8x4Luma[2 + 0 * 8] = ((cmBodyBuf[1 + 8] >> 4)) & 0x3ff; + blk8x4Luma[0 + 1 * 8] = ((cmBodyBuf[2 + 8] << 2) | + (cmBodyBuf[1 + 8] >> 14)) & 0x3ff; + blk8x4Luma[1 + 1 * 8] = ((cmBodyBuf[3 + 8] << 8) | + (cmBodyBuf[2 + 8] >> 8)) & 0x3ff; + blk8x4Luma[2 + 1 * 8] = ((cmBodyBuf[3 + 8] >> 2)) & 0x3ff; + blk8x4Luma[0 + 2 * 8] = ((cmBodyBuf[4 + 8] << 4) | + (cmBodyBuf[3 + 8] >> 12)) & 0x3ff; + blk8x4Luma[1 + 2 * 8] = ((cmBodyBuf[4 + 8] >> 6)) & 0x3ff; + blk8x4Luma[2 + 2 * 8] = ((cmBodyBuf[5 + 8] >> 0)) & 0x3ff; + blk8x4Luma[0 + 3 * 8] = ((cmBodyBuf[6 + 8] << 6) | + (cmBodyBuf[5 + 8] >> 10)) & 0x3ff; + blk8x4Luma[1 + 3 * 8] = ((cmBodyBuf[6 + 8] >> 4)) & 0x3ff; + blk8x4Luma[2 + 3 * 8] = ((cmBodyBuf[7 + 8] << 2) | + (cmBodyBuf[6 + 8] >> 14)) & 0x3ff; + + blk8x4Luma[4 + 0 * 8] = ((cmBodyBuf[0 + 16] >> 0)) & 0x3ff; + blk8x4Luma[5 + 0 * 8] = ((cmBodyBuf[1 + 16] << 6) | + (cmBodyBuf[0 + 16] >> 10)) & 0x3ff; + blk8x4Luma[6 + 0 * 8] = ((cmBodyBuf[1 + 16] >> 4)) & 0x3ff; + blk8x4Luma[4 + 1 * 8] = ((cmBodyBuf[2 + 16] << 2) | + (cmBodyBuf[1 + 16] >> 14)) & 0x3ff; + blk8x4Luma[5 + 1 * 8] = ((cmBodyBuf[3 + 16] << 8) | + (cmBodyBuf[2 + 16] >> 8)) & 0x3ff; + blk8x4Luma[6 + 1 * 8] = ((cmBodyBuf[3 + 16] >> 2)) & 0x3ff; + blk8x4Luma[4 + 2 * 8] = ((cmBodyBuf[4 + 16] << 4) | + (cmBodyBuf[3 + 16] >> 12)) & 0x3ff; + blk8x4Luma[5 + 2 * 8] = ((cmBodyBuf[4 + 16] >> 6)) & 0x3ff; + blk8x4Luma[6 + 2 * 8] = ((cmBodyBuf[5 + 16] >> 0)) & 0x3ff; + blk8x4Luma[4 + 3 * 8] = ((cmBodyBuf[6 + 16] << 6) | + (cmBodyBuf[5 + 16] >> 10)) & 0x3ff; + blk8x4Luma[5 + 3 * 8] = ((cmBodyBuf[6 + 16] >> 4)) & 0x3ff; + blk8x4Luma[6 + 3 * 8] = ((cmBodyBuf[7 + 16] << 2) | + (cmBodyBuf[6 + 16] >> 14)) & 0x3ff; + + blk8x4Cb[1 + 0 * 4] = ((cmBodyBuf[0 + 24] >> 0)) & 0x3ff; + blk8x4Cr[1 + 0 * 4] = ((cmBodyBuf[1 + 24] << 6) | + (cmBodyBuf[0 + 24] >> 10)) & 0x3ff; + blk8x4Cb[2 + 0 * 4] = ((cmBodyBuf[1 + 24] >> 4)) & 0x3ff; + blk8x4Cr[2 + 0 * 4] = ((cmBodyBuf[2 + 24] << 2) | + (cmBodyBuf[1 + 24] >> 14)) & 0x3ff; + blk8x4Cb[3 + 0 * 4] = ((cmBodyBuf[3 + 24] << 8) | + (cmBodyBuf[2 + 24] >> 8)) & 0x3ff; + blk8x4Cr[3 + 0 * 4] = ((cmBodyBuf[3 + 24] >> 2)) & 0x3ff; + blk8x4Cb[1 + 1 * 4] = ((cmBodyBuf[4 + 24] << 4) | + (cmBodyBuf[3 + 24] >> 12)) & 0x3ff; + blk8x4Cr[1 + 1 * 4] = ((cmBodyBuf[4 + 24] >> 6)) & 0x3ff; + blk8x4Cb[2 + 1 * 4] = ((cmBodyBuf[5 + 24] >> 0)) & 0x3ff; + blk8x4Cr[2 + 1 * 4] = ((cmBodyBuf[6 + 24] << 6) | + (cmBodyBuf[5 + 24] >> 10)) & 0x3ff; + blk8x4Cb[3 + 1 * 4] = ((cmBodyBuf[6 + 24] >> 4)) & 0x3ff; + blk8x4Cr[3 + 1 * 4] = ((cmBodyBuf[7 + 24] << 2) | + (cmBodyBuf[6 + 24] >> 14)) & 0x3ff; + } else { + cmBodyBuf[0 + 8 * 0] = (blk8x4Luma[3 + 1 * 8] << 10) | + blk8x4Luma[3 + 0 * 8]; + cmBodyBuf[1 + 8 * 0] = (blk8x4Luma[3 + 3 * 8] << 14) | + (blk8x4Luma[3 + 2 * 8] << 4) | (blk8x4Luma[3 + 1 * 8] >> 6); + cmBodyBuf[2 + 8 * 0] = (blk8x4Luma[7 + 0 * 8] << 8) | + (blk8x4Luma[3 + 3 * 8] >> 2); + cmBodyBuf[3 + 8 * 0] = (blk8x4Luma[7 + 2 * 8] << 12) | + (blk8x4Luma[7 + 1 * 8] << 2) | (blk8x4Luma[7 + 0 * 8] >>8); + cmBodyBuf[4 + 8 * 0] = (blk8x4Luma[7 + 3 * 8] << 6) | + (blk8x4Luma[7 + 2 * 8] >>4); + cmBodyBuf[5 + 8 * 0] = (blk8x4Cr[0 + 0 * 4] << 10) | + blk8x4Cb[0 + 0 * 4]; + cmBodyBuf[6 + 8 * 0] = (blk8x4Cr[0 + 1 * 4] << 14) | + (blk8x4Cb[0 + 1 * 4] << 4) | (blk8x4Cr[0 + 0 * 4] >> 6); + cmBodyBuf[7 + 8 * 0] = (0<< 8) | (blk8x4Cr[0 + 1 * 4] >> 2); + + cmBodyBuf[0 + 8 * 1] = (blk8x4Luma[1 + 0 * 8] << 10) | + blk8x4Luma[0 + 0 * 8]; + cmBodyBuf[1 + 8 * 1] = (blk8x4Luma[0 + 1 * 8] << 14) | + (blk8x4Luma[2 + 0 * 8] << 4) | (blk8x4Luma[1 + 0 * 8] >> 6); + cmBodyBuf[2 + 8 * 1] = (blk8x4Luma[1 + 1 * 8] << 8) | + (blk8x4Luma[0 + 1 * 8] >> 2); + cmBodyBuf[3 + 8 * 1] = (blk8x4Luma[0 + 2 * 8] << 12) | + (blk8x4Luma[2 + 1 * 8] << 2) | (blk8x4Luma[1 + 1 * 8] >>8); + cmBodyBuf[4 + 8 * 1] = (blk8x4Luma[1 + 2 * 8] << 6) | + (blk8x4Luma[0 + 2 * 8] >>4); + cmBodyBuf[5 + 8 * 1] = (blk8x4Luma[0 + 3 * 8] << 10) | + blk8x4Luma[2 + 2 * 8]; + cmBodyBuf[6 + 8 * 1] = (blk8x4Luma[2 + 3 * 8] << 14) | + (blk8x4Luma[1 + 3 * 8] << 4) | (blk8x4Luma[0 + 3 * 8] >> 6); + cmBodyBuf[7 + 8 * 1] = (0<< 8) | (blk8x4Luma[2 + 3 * 8] >> 2); + + cmBodyBuf[0 + 8 * 2] = (blk8x4Luma[5 + 0 * 8] << 10) | + blk8x4Luma[4 + 0 * 8]; + cmBodyBuf[1 + 8 * 2] = (blk8x4Luma[4 + 1 * 8] << 14) | + (blk8x4Luma[6 + 0 * 8] << 4) | (blk8x4Luma[5 + 0 * 8] >> 6); + cmBodyBuf[2 + 8 * 2] = (blk8x4Luma[5 + 1 * 8] << 8) | + (blk8x4Luma[4 + 1 * 8] >> 2); + cmBodyBuf[3 + 8 * 2] = (blk8x4Luma[4 + 2 * 8] << 12) | + (blk8x4Luma[6 + 1 * 8] << 2) | (blk8x4Luma[5 + 1 * 8] >>8); + cmBodyBuf[4 + 8 * 2] = (blk8x4Luma[5 + 2 * 8] << 6) | + (blk8x4Luma[4 + 2 * 8] >>4); + cmBodyBuf[5 + 8 * 2] = (blk8x4Luma[4 + 3 * 8] << 10) | + blk8x4Luma[6 + 2 * 8]; + cmBodyBuf[6 + 8 * 2] = (blk8x4Luma[6 + 3 * 8] << 14) | + (blk8x4Luma[5 + 3 * 8] << 4) | (blk8x4Luma[4 + 3 * 8] >> 6); + cmBodyBuf[7 + 8 * 2] = (0<< 8) | (blk8x4Luma[6 + 3 * 8] >> 2); + + cmBodyBuf[0 + 8 * 3] = (blk8x4Cr[1 + 0 * 4] << 10) | + blk8x4Cb[1 + 0 * 4]; + cmBodyBuf[1 + 8 * 3] = (blk8x4Cr[2 + 0 * 4] << 14) | + (blk8x4Cb[2 + 0 * 4] << 4) | (blk8x4Cr[1 + 0 * 4] >> 6); + cmBodyBuf[2 + 8 * 3] = (blk8x4Cb[3 + 0 * 4] << 8) | + (blk8x4Cr[2 + 0 * 4] >> 2); + cmBodyBuf[3 + 8 * 3] = (blk8x4Cb[1 + 1 * 4] << 12) | + (blk8x4Cr[3 + 0 * 4] << 2) | (blk8x4Cb[3 + 0 * 4] >>8); + cmBodyBuf[4 + 8 * 3] = (blk8x4Cr[1 + 1 * 4] << 6) | + (blk8x4Cb[1 + 1 * 4] >>4); + cmBodyBuf[5 + 8 * 3] = (blk8x4Cr[2 + 1 * 4] << 10) | + blk8x4Cb[2 + 1 * 4]; + cmBodyBuf[6 + 8 * 3] = (blk8x4Cr[3 + 1 * 4] << 14) | + (blk8x4Cb[3 + 1 * 4] << 4) | (blk8x4Cr[2 + 1 * 4] >> 6); + cmBodyBuf[7 + 8 * 3] = (0 << 8) | (blk8x4Cr[3 + 1 * 4] >> 2); + } +} + +static void corrRefillWithAmrisc ( + struct hevc_state_s *hevc, + uint32_t cmHeaderBaseAddr, + uint32_t picWidth, + uint32_t ctuPosition) +{ + int32_t i; + uint16_t ctux = (ctuPosition>>16) & 0xffff; + uint16_t ctuy = (ctuPosition>> 0) & 0xffff; + int32_t aboveCtuAvailable = (ctuy) ? 1 : 0; + + uint16_t *cmBodyBuf = NULL; + + uint32_t pic_width_x64_pre = picWidth + 0x3f; + uint32_t pic_width_x64 = pic_width_x64_pre >> 6; + uint32_t stride64x64 = pic_width_x64 * 128; + uint32_t addr_offset64x64_abv = stride64x64 * + (aboveCtuAvailable ? ctuy - 1 : ctuy) + 128 * ctux; + uint32_t addr_offset64x64_cur = stride64x64*ctuy + 128 * ctux; + uint32_t cmHeaderAddrAbv = cmHeaderBaseAddr + addr_offset64x64_abv; + uint32_t cmHeaderAddrCur = cmHeaderBaseAddr + addr_offset64x64_cur; + unsigned int tmpData32; + + uint16_t blkBuf0Y[32]; + uint16_t blkBuf0Cb[8]; + uint16_t blkBuf0Cr[8]; + uint16_t blkBuf1Y[32]; + uint16_t blkBuf1Cb[8]; + uint16_t blkBuf1Cr[8]; + int32_t blkBufCnt = 0; + + int32_t blkIdx; + PR_INIT(128); + + cmBodyBuf = vzalloc(sizeof(uint16_t) * 32 * 18); + if (!cmBodyBuf) + return; + + WRITE_VREG(HEVC_SAO_CTRL10, cmHeaderAddrAbv); + WRITE_VREG(HEVC_SAO_CTRL11, cmHeaderAddrCur); + WRITE_VREG(HEVC_SAO_DBG_MODE0, hevc->detbuf_adr); + WRITE_VREG(HEVC_SAO_DBG_MODE1, 2); + + for (i = 0; i < 32 * 18; i++) + cmBodyBuf[i] = 0; + + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s, %d\n", __func__, __LINE__); + do { + tmpData32 = READ_VREG(HEVC_SAO_DBG_MODE1); + } while (tmpData32); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s, %d\n", __func__, __LINE__); + + hevc_print(hevc, H265_DEBUG_DETAIL, + "cmBodyBuf from detbuf:\n"); + for (i = 0; i < 32 * 18; i++) { + cmBodyBuf[i] = hevc->detbuf_adr_virt[i]; + if (get_dbg_flag(hevc) & + H265_DEBUG_DETAIL) { + if ((i & 0xf) == 0) + PR_INFO(hevc->index); + PR_FILL("%02x ", cmBodyBuf[i]); + } + } + PR_INFO(hevc->index); + + for (i = 0; i < 32; i++) + blkBuf0Y[i] = 0; + for (i = 0; i < 8; i++) + blkBuf0Cb[i] = 0; + for (i = 0; i < 8; i++) + blkBuf0Cr[i] = 0; + for (i = 0; i < 32; i++) + blkBuf1Y[i] = 0; + for (i = 0; i < 8; i++) + blkBuf1Cb[i] = 0; + for (i = 0; i < 8; i++) + blkBuf1Cr[i] = 0; + + for (blkIdx = 0; blkIdx < 18; blkIdx++) { + int32_t inAboveCtu = (blkIdx<2) ? 1 : 0; + int32_t restoreEnable = (blkIdx>0) ? 1 : 0; + uint16_t* blkY = (blkBufCnt==0) ? blkBuf0Y : blkBuf1Y ; + uint16_t* blkCb = (blkBufCnt==0) ? blkBuf0Cb : blkBuf1Cb; + uint16_t* blkCr = (blkBufCnt==0) ? blkBuf0Cr : blkBuf1Cr; + uint16_t* cmBodyBufNow = cmBodyBuf + (blkIdx * 32); + + if (!aboveCtuAvailable && inAboveCtu) + continue; + + /* detRefillBuf --> 8x4block*/ + convUnc8x4blk(blkY, blkCb, blkCr, cmBodyBufNow, 0); + + if (restoreEnable) { + blkY[3 + 0 * 8] = blkY[2 + 0 * 8] + 2; + blkY[4 + 0 * 8] = blkY[1 + 0 * 8] + 3; + blkY[5 + 0 * 8] = blkY[0 + 0 * 8] + 1; + blkY[6 + 0 * 8] = blkY[0 + 0 * 8] + 2; + blkY[7 + 0 * 8] = blkY[1 + 0 * 8] + 2; + blkY[3 + 1 * 8] = blkY[2 + 1 * 8] + 1; + blkY[4 + 1 * 8] = blkY[1 + 1 * 8] + 2; + blkY[5 + 1 * 8] = blkY[0 + 1 * 8] + 2; + blkY[6 + 1 * 8] = blkY[0 + 1 * 8] + 2; + blkY[7 + 1 * 8] = blkY[1 + 1 * 8] + 3; + blkY[3 + 2 * 8] = blkY[2 + 2 * 8] + 3; + blkY[4 + 2 * 8] = blkY[1 + 2 * 8] + 1; + blkY[5 + 2 * 8] = blkY[0 + 2 * 8] + 3; + blkY[6 + 2 * 8] = blkY[0 + 2 * 8] + 3; + blkY[7 + 2 * 8] = blkY[1 + 2 * 8] + 3; + blkY[3 + 3 * 8] = blkY[2 + 3 * 8] + 0; + blkY[4 + 3 * 8] = blkY[1 + 3 * 8] + 0; + blkY[5 + 3 * 8] = blkY[0 + 3 * 8] + 1; + blkY[6 + 3 * 8] = blkY[0 + 3 * 8] + 2; + blkY[7 + 3 * 8] = blkY[1 + 3 * 8] + 1; + blkCb[1 + 0 * 4] = blkCb[0 + 0 * 4]; + blkCb[2 + 0 * 4] = blkCb[0 + 0 * 4]; + blkCb[3 + 0 * 4] = blkCb[0 + 0 * 4]; + blkCb[1 + 1 * 4] = blkCb[0 + 1 * 4]; + blkCb[2 + 1 * 4] = blkCb[0 + 1 * 4]; + blkCb[3 + 1 * 4] = blkCb[0 + 1 * 4]; + blkCr[1 + 0 * 4] = blkCr[0 + 0 * 4]; + blkCr[2 + 0 * 4] = blkCr[0 + 0 * 4]; + blkCr[3 + 0 * 4] = blkCr[0 + 0 * 4]; + blkCr[1 + 1 * 4] = blkCr[0 + 1 * 4]; + blkCr[2 + 1 * 4] = blkCr[0 + 1 * 4]; + blkCr[3 + 1 * 4] = blkCr[0 + 1 * 4]; + + /*Store data back to DDR*/ + convUnc8x4blk(blkY, blkCb, blkCr, cmBodyBufNow, 1); + } + + blkBufCnt = (blkBufCnt==1) ? 0 : blkBufCnt + 1; + } + + hevc_print(hevc, H265_DEBUG_DETAIL, + "cmBodyBuf to detbuf:\n"); + for (i = 0; i < 32 * 18; i++) { + hevc->detbuf_adr_virt[i] = cmBodyBuf[i]; + if (get_dbg_flag(hevc) & + H265_DEBUG_DETAIL) { + if ((i & 0xf) == 0) + PR_INFO(hevc->index); + PR_FILL("%02x ", cmBodyBuf[i]); + } + } + + PR_INFO(hevc->index); + + WRITE_VREG(HEVC_SAO_DBG_MODE1, 3); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s, %d\n", __func__, __LINE__); + do { + tmpData32 = READ_VREG(HEVC_SAO_DBG_MODE1); + } while (tmpData32); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s, %d\n", __func__, __LINE__); + vfree(cmBodyBuf); +} + +static void delrefill(struct hevc_state_s *hevc) +{ + /* + * corrRefill + */ + /*HEVC_SAO_DBG_MODE0: picGlobalVariable + [31:30]error number + [29:20]error2([9:7]tilex[6:0]ctuy) + [19:10]error1 [9:0]error0*/ + uint32_t detResult = READ_VREG(HEVC_ASSIST_SCRATCH_3); + uint32_t errorIdx; + uint32_t errorNum = (detResult>>30); + + if (detResult) { + hevc_print(hevc, H265_DEBUG_BUFMGR, + "[corrRefillWithAmrisc] detResult=%08x\n", detResult); + for (errorIdx = 0; errorIdx < errorNum; errorIdx++) { + uint32_t errorPos = errorIdx * 10; + uint32_t errorResult = (detResult >> errorPos) & 0x3ff; + uint32_t tilex = (errorResult >> 7) - 1; + uint16_t ctux = hevc->m_tile[0][tilex].start_cu_x + + hevc->m_tile[0][tilex].width - 1; + uint16_t ctuy = (uint16_t)(errorResult & 0x7f); + uint32_t ctuPosition = (ctux<< 16) + ctuy; + hevc_print(hevc, H265_DEBUG_BUFMGR, + "Idx:%d tilex:%d ctu(%d(0x%x), %d(0x%x))\n", + errorIdx,tilex,ctux,ctux, ctuy,ctuy); + corrRefillWithAmrisc( + hevc, + (uint32_t)hevc->cur_pic->header_adr, + hevc->pic_w, + ctuPosition); + } + + WRITE_VREG(HEVC_ASSIST_SCRATCH_3, 0); /*clear status*/ + WRITE_VREG(HEVC_SAO_DBG_MODE0, 0); + WRITE_VREG(HEVC_SAO_DBG_MODE1, 1); + } +} +#endif + +static void get_rpm_param(union param_u *params) +{ + int i; + unsigned int data32; + + for (i = 0; i < 128; i++) { + do { + data32 = READ_VREG(RPM_CMD_REG); + /* hevc_print(hevc, 0, "%x\n", data32); */ + } while ((data32 & 0x10000) == 0); + params->l.data[i] = data32 & 0xffff; + /* hevc_print(hevc, 0, "%x\n", data32); */ + WRITE_VREG(RPM_CMD_REG, 0); + } +} + +static struct PIC_s *get_pic_by_POC(struct hevc_state_s *hevc, int POC) +{ + int i; + struct PIC_s *pic; + struct PIC_s *ret_pic = NULL; + if (POC == INVALID_POC) + return NULL; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1 || + pic->BUF_index == -1) + continue; + if (pic->POC == POC) { + if (ret_pic == NULL) + ret_pic = pic; + else { + if (pic->decode_idx > ret_pic->decode_idx) + ret_pic = pic; + } + } + } + return ret_pic; +} + +static struct PIC_s *get_ref_pic_by_POC(struct hevc_state_s *hevc, int POC) +{ + int i; + struct PIC_s *pic; + struct PIC_s *ret_pic = NULL; + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1 || + pic->BUF_index == -1) + continue; + /*Add width and height of ref picture detection, + resolved incorrectly referenced frame.*/ + if ((pic->POC == POC) && (pic->referenced) && + (hevc->pic_w == pic->width) && + (hevc->pic_h == pic->height)) { + if (ret_pic == NULL) + ret_pic = pic; + else { + if (pic->decode_idx > ret_pic->decode_idx) + ret_pic = pic; + } + } + } + + return ret_pic; +} + +static unsigned int log2i(unsigned int val) +{ + unsigned int ret = -1; + + while (val != 0) { + val >>= 1; + ret++; + } + return ret; +} + +static int init_buf_spec(struct hevc_state_s *hevc); + + +static void uninit_mmu_buffers(struct hevc_state_s *hevc) +{ + if (hevc->mmu_box) { + decoder_mmu_box_free(hevc->mmu_box); + hevc->mmu_box = NULL; + } + + if (hevc->bmmu_box) { + /* release workspace */ + decoder_bmmu_box_free_idx(hevc->bmmu_box, + BMMU_WORKSPACE_ID); + decoder_bmmu_box_free(hevc->bmmu_box); + hevc->bmmu_box = NULL; + } +} + +/* return in MB */ +static int hevc_max_mmu_buf_size(int max_w, int max_h) +{ + int buf_size = 64; + + if ((max_w * max_h) > 0 && + (max_w * max_h) <= 1920*1088) { + buf_size = 24; + } + return buf_size; +} + +static int init_mmu_buffers(struct hevc_state_s *hevc, bool bmmu_flag) +{ + int tvp_flag = vdec_secure(hw_to_vdec(hevc)) ? + CODEC_MM_FLAGS_TVP : 0; + int buf_size = hevc_max_mmu_buf_size(hevc->max_pic_w, + hevc->max_pic_h); + + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, "%s max_w %d max_h %d\n", + __func__, hevc->max_pic_w, hevc->max_pic_h); + } + + hevc->need_cache_size = buf_size * SZ_1M; + hevc->sc_start_time = get_jiffies_64(); + if (hevc->mmu_enable && !hevc->is_used_v4l) { + hevc->mmu_box = decoder_mmu_box_alloc_box(DRIVER_NAME, + hevc->index, + MAX_REF_PIC_NUM, + buf_size * SZ_1M, + tvp_flag + ); + if (!hevc->mmu_box) { + pr_err("h265 alloc mmu box failed!!\n"); + return -1; + } + } + + if (!bmmu_flag) + return 0; + + hevc->bmmu_box = decoder_bmmu_box_alloc_box(DRIVER_NAME, + hevc->index, + BMMU_MAX_BUFFERS, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + tvp_flag); + if (!hevc->bmmu_box) { + if (hevc->mmu_box) + decoder_mmu_box_free(hevc->mmu_box); + hevc->mmu_box = NULL; + pr_err("h265 alloc mmu box failed!!\n"); + return -1; + } + return 0; +} + +struct buf_stru_s +{ + int lcu_total; + int mc_buffer_size_h; + int mc_buffer_size_u_v_h; +}; + +#ifndef MV_USE_FIXED_BUF +static void dealloc_mv_bufs(struct hevc_state_s *hevc) +{ + int i; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + if (hevc->m_mv_BUF[i].start_adr) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "dealloc mv buf(%d) adr 0x%p size 0x%x used_flag %d\n", + i, hevc->m_mv_BUF[i].start_adr, + hevc->m_mv_BUF[i].size, + hevc->m_mv_BUF[i].used_flag); + decoder_bmmu_box_free_idx( + hevc->bmmu_box, + MV_BUFFER_IDX(i)); + hevc->m_mv_BUF[i].start_adr = 0; + hevc->m_mv_BUF[i].size = 0; + hevc->m_mv_BUF[i].used_flag = 0; + } + } + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + if (hevc->m_PIC[i] != NULL) + hevc->m_PIC[i]->mv_buf_index = -1; + } +} + +static int alloc_mv_buf(struct hevc_state_s *hevc, int i) +{ + int ret = 0; + /*get_cma_alloc_ref();*/ /*DEBUG_TMP*/ + if (decoder_bmmu_box_alloc_buf_phy + (hevc->bmmu_box, + MV_BUFFER_IDX(i), hevc->mv_buf_size, + DRIVER_NAME, + &hevc->m_mv_BUF[i].start_adr) < 0) { + hevc->m_mv_BUF[i].start_adr = 0; + ret = -1; + } else { + hevc->m_mv_BUF[i].size = hevc->mv_buf_size; + hevc->m_mv_BUF[i].used_flag = 0; + ret = 0; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "MV Buffer %d: start_adr %p size %x\n", + i, + (void *)hevc->m_mv_BUF[i].start_adr, + hevc->m_mv_BUF[i].size); + } + if (!vdec_secure(hw_to_vdec(hevc)) && (hevc->m_mv_BUF[i].start_adr)) { + void *mem_start_virt; + mem_start_virt = + codec_mm_phys_to_virt(hevc->m_mv_BUF[i].start_adr); + if (mem_start_virt) { + memset(mem_start_virt, 0, hevc->m_mv_BUF[i].size); + codec_mm_dma_flush(mem_start_virt, + hevc->m_mv_BUF[i].size, DMA_TO_DEVICE); + } else { + mem_start_virt = codec_mm_vmap( + hevc->m_mv_BUF[i].start_adr, + hevc->m_mv_BUF[i].size); + if (mem_start_virt) { + memset(mem_start_virt, 0, hevc->m_mv_BUF[i].size); + codec_mm_dma_flush(mem_start_virt, + hevc->m_mv_BUF[i].size, + DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(mem_start_virt); + } else { + /*not virt for tvp playing, + may need clear on ucode.*/ + pr_err("ref %s mem_start_virt failed\n", __func__); + } + } + } + } + /*put_cma_alloc_ref();*/ /*DEBUG_TMP*/ + return ret; +} +#endif + +static int get_mv_buf(struct hevc_state_s *hevc, struct PIC_s *pic) +{ +#ifdef MV_USE_FIXED_BUF + if (pic && pic->index >= 0) { + int mv_size; + if (IS_8K_SIZE(pic->width, pic->height)) + mv_size = MPRED_8K_MV_BUF_SIZE; + else if (IS_4K_SIZE(pic->width, pic->height)) + mv_size = MPRED_4K_MV_BUF_SIZE; /*0x120000*/ + else + mv_size = MPRED_MV_BUF_SIZE; + + pic->mpred_mv_wr_start_addr = + hevc->work_space_buf->mpred_mv.buf_start + + (pic->index * mv_size); + pic->mv_size = mv_size; + } + return 0; +#else + int i; + int ret = -1; + int new_size; + if (mv_buf_dynamic_alloc) { + int MV_MEM_UNIT = + hevc->lcu_size_log2 == 6 ? 0x200 : hevc->lcu_size_log2 == + 5 ? 0x80 : 0x20; + int extended_pic_width = (pic->width + hevc->lcu_size -1) + & (~(hevc->lcu_size - 1)); + int extended_pic_height = (pic->height + hevc->lcu_size -1) + & (~(hevc->lcu_size - 1)); + int lcu_x_num = extended_pic_width / hevc->lcu_size; + int lcu_y_num = extended_pic_height / hevc->lcu_size; + new_size = lcu_x_num * lcu_y_num * MV_MEM_UNIT; + hevc->mv_buf_size = (new_size + 0xffff) & (~0xffff); + } else { + if (IS_8K_SIZE(pic->width, pic->height)) + new_size = MPRED_8K_MV_BUF_SIZE; + else if (IS_4K_SIZE(pic->width, pic->height)) + new_size = MPRED_4K_MV_BUF_SIZE; /*0x120000*/ + else + new_size = MPRED_MV_BUF_SIZE; + + if (new_size != hevc->mv_buf_size) { + dealloc_mv_bufs(hevc); + hevc->mv_buf_size = new_size; + } + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + if (hevc->m_mv_BUF[i].start_adr && + hevc->m_mv_BUF[i].used_flag == 0) { + hevc->m_mv_BUF[i].used_flag = 1; + ret = i; + break; + } + } + } + if (ret < 0) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + if (hevc->m_mv_BUF[i].start_adr == 0) { + if (alloc_mv_buf(hevc, i) >= 0) { + hevc->m_mv_BUF[i].used_flag = 1; + ret = i; + } + break; + } + } + } + + if (ret >= 0) { + pic->mv_buf_index = ret; + pic->mv_size = hevc->m_mv_BUF[ret].size; + pic->mpred_mv_wr_start_addr = + (hevc->m_mv_BUF[ret].start_adr + 0xffff) & + (~0xffff); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s => %d (0x%x) size 0x%x\n", + __func__, ret, + pic->mpred_mv_wr_start_addr, + pic->mv_size); + + } else { + hevc_print(hevc, 0, + "%s: Error, mv buf is not enough\n", + __func__); + } + return ret; + +#endif +} + +static void put_mv_buf(struct hevc_state_s *hevc, + struct PIC_s *pic) +{ +#ifndef MV_USE_FIXED_BUF + int i = pic->mv_buf_index; + if (i < 0 || i >= MAX_REF_PIC_NUM) { + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s: index %d beyond range\n", + __func__, i); + return; + } + if (mv_buf_dynamic_alloc) { + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s(%d)\n", + __func__, i); + + decoder_bmmu_box_free_idx( + hevc->bmmu_box, + MV_BUFFER_IDX(i)); + hevc->m_mv_BUF[i].start_adr = 0; + hevc->m_mv_BUF[i].size = 0; + hevc->m_mv_BUF[i].used_flag = 0; + pic->mv_buf_index = -1; + return; + } + + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s(%d): used_flag(%d)\n", + __func__, i, + hevc->m_mv_BUF[i].used_flag); + + if (hevc->m_mv_BUF[i].start_adr && + hevc->m_mv_BUF[i].used_flag) + hevc->m_mv_BUF[i].used_flag = 0; + pic->mv_buf_index = -1; +#endif +} + +static int cal_current_buf_size(struct hevc_state_s *hevc, + struct buf_stru_s *buf_stru) +{ + + int buf_size; + int pic_width = hevc->pic_w; + int pic_height = hevc->pic_h; + int lcu_size = hevc->lcu_size; + int pic_width_lcu = (pic_width % lcu_size) ? pic_width / lcu_size + + 1 : pic_width / lcu_size; + int pic_height_lcu = (pic_height % lcu_size) ? pic_height / lcu_size + + 1 : pic_height / lcu_size; + /*SUPPORT_10BIT*/ + int losless_comp_header_size = compute_losless_comp_header_size + (pic_width, pic_height); + /*always alloc buf for 10bit*/ + int losless_comp_body_size = compute_losless_comp_body_size + (hevc, pic_width, pic_height, 0); + int mc_buffer_size = losless_comp_header_size + + losless_comp_body_size; + int mc_buffer_size_h = (mc_buffer_size + 0xffff) >> 16; + int mc_buffer_size_u_v_h = 0; + + int dw_mode = get_double_write_mode(hevc); + + if (hevc->mmu_enable) { + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (IS_8K_SIZE(hevc->pic_w, hevc->pic_h))) + buf_size = ((MMU_COMPRESS_HEADER_SIZE_8K + 0xffff) >> 16) + << 16; + else + buf_size = ((MMU_COMPRESS_HEADER_SIZE_4K + 0xffff) >> 16) + << 16; + } else + buf_size = 0; + + if (dw_mode) { + int pic_width_dw = pic_width / + get_double_write_ratio(dw_mode); + int pic_height_dw = pic_height / + get_double_write_ratio(dw_mode); + + int pic_width_lcu_dw = (pic_width_dw % lcu_size) ? + pic_width_dw / lcu_size + 1 : + pic_width_dw / lcu_size; + int pic_height_lcu_dw = (pic_height_dw % lcu_size) ? + pic_height_dw / lcu_size + 1 : + pic_height_dw / lcu_size; + int lcu_total_dw = pic_width_lcu_dw * pic_height_lcu_dw; + + int mc_buffer_size_u_v = lcu_total_dw * lcu_size * lcu_size / 2; + mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16; + /*64k alignment*/ + buf_size += ((mc_buffer_size_u_v_h << 16) * 3); + } + + if ((!hevc->mmu_enable) && + ((dw_mode & 0x10) == 0)) { + /* use compress mode without mmu, + need buf for compress decoding*/ + buf_size += (mc_buffer_size_h << 16); + } + + /*in case start adr is not 64k alignment*/ + if (buf_size > 0) + buf_size += 0x10000; + + if (buf_stru) { + buf_stru->lcu_total = pic_width_lcu * pic_height_lcu; + buf_stru->mc_buffer_size_h = mc_buffer_size_h; + buf_stru->mc_buffer_size_u_v_h = mc_buffer_size_u_v_h; + } + + hevc_print(hevc, PRINT_FLAG_V4L_DETAIL,"pic width: %d, pic height: %d, headr: %d, body: %d, size h: %d, size uvh: %d, buf size: %x\n", + pic_width, pic_height, losless_comp_header_size, + losless_comp_body_size, mc_buffer_size_h, + mc_buffer_size_u_v_h, buf_size); + + return buf_size; +} + +static int hevc_get_header_size(int w, int h) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (IS_8K_SIZE(w, h))) + return ALIGN(MMU_COMPRESS_HEADER_SIZE_8K, 0x10000); + else + return ALIGN(MMU_COMPRESS_HEADER_SIZE_4K, 0x10000); +} + +static void hevc_put_video_frame(void *vdec_ctx, struct vframe_s *vf) +{ + vh265_vf_put(vf, vdec_ctx); +} + +static void hevc_get_video_frame(void *vdec_ctx, struct vframe_s **vf) +{ + *vf = vh265_vf_get(vdec_ctx); +} + +static struct internal_comp_buf* v4lfb_to_icomp_buf( + struct hevc_state_s *hevc, + struct vdec_v4l2_buffer *fb) +{ + struct aml_video_dec_buf *aml_fb = NULL; + struct aml_vcodec_ctx * v4l2_ctx = hevc->v4l2_ctx; + + aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer); + return &v4l2_ctx->comp_bufs[aml_fb->internal_index]; +} + +static struct internal_comp_buf* index_to_icomp_buf( + struct hevc_state_s *hevc, int index) +{ + struct aml_video_dec_buf *aml_fb = NULL; + struct aml_vcodec_ctx * v4l2_ctx = hevc->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + + fb = (struct vdec_v4l2_buffer *) + hevc->m_PIC[index]->cma_alloc_addr; + aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer); + return &v4l2_ctx->comp_bufs[aml_fb->internal_index]; +} + +static struct task_ops_s task_dec_ops = { + .type = TASK_TYPE_DEC, + .get_vframe = hevc_get_video_frame, + .put_vframe = hevc_put_video_frame, +}; + +static int v4l_alloc_buf(struct hevc_state_s *hevc, struct PIC_s *pic) +{ + int ret = -1; + int i = pic->index; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)hevc->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, hevc->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + hevc_print(hevc, 0, "[%d] H265 get buffer fail.\n", ctx->id); + return ret; + } + + fb->task->attach(fb->task, &task_dec_ops, hw_to_vdec(hevc)); + fb->status = FB_ST_DECODER; + + if (hevc->mmu_enable) { + struct internal_comp_buf *ibuf = v4lfb_to_icomp_buf(hevc, fb); + hevc->m_BUF[i].header_addr = ibuf->header_addr; + } + + hevc->m_BUF[i].used_flag = 1; + hevc->m_BUF[i].v4l_ref_buf_addr = (ulong)fb; + pic->cma_alloc_addr = hevc->m_BUF[i].v4l_ref_buf_addr; + if (fb->num_planes == 1) { + hevc->m_BUF[i].start_adr = fb->m.mem[0].addr; + hevc->m_BUF[i].luma_size = fb->m.mem[0].offset; + hevc->m_BUF[i].size = fb->m.mem[0].size; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + pic->dw_y_adr = hevc->m_BUF[i].start_adr; + pic->dw_u_v_adr = pic->dw_y_adr + hevc->m_BUF[i].luma_size; + pic->luma_size = fb->m.mem[0].offset; + pic->chroma_size = fb->m.mem[0].size - fb->m.mem[0].offset; + } else if (fb->num_planes == 2) { + hevc->m_BUF[i].start_adr = fb->m.mem[0].addr; + hevc->m_BUF[i].luma_size = fb->m.mem[0].size; + hevc->m_BUF[i].chroma_addr = fb->m.mem[1].addr; + hevc->m_BUF[i].chroma_size = fb->m.mem[1].size; + hevc->m_BUF[i].size = fb->m.mem[0].size + fb->m.mem[1].size; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + fb->m.mem[1].bytes_used = fb->m.mem[1].size; + pic->dw_y_adr = hevc->m_BUF[i].start_adr; + pic->dw_u_v_adr = hevc->m_BUF[i].chroma_addr; + pic->luma_size = fb->m.mem[0].size; + pic->chroma_size = fb->m.mem[1].size; + } + + return ret; +} + +static int alloc_buf(struct hevc_state_s *hevc) +{ + int i; + int ret = -1; + int buf_size = cal_current_buf_size(hevc, NULL); + + if (hevc->fatal_error & DECODER_FATAL_ERROR_NO_MEM) + return ret; + + for (i = 0; i < BUF_POOL_SIZE; i++) { + if (hevc->m_BUF[i].start_adr == 0) + break; + } + if (i < BUF_POOL_SIZE) { + if (buf_size > 0) { + ret = decoder_bmmu_box_alloc_buf_phy + (hevc->bmmu_box, + VF_BUFFER_IDX(i), buf_size, + DRIVER_NAME, + &hevc->m_BUF[i].start_adr); + if (ret < 0) { + hevc->m_BUF[i].start_adr = 0; + if (i <= 8) { + hevc->fatal_error |= + DECODER_FATAL_ERROR_NO_MEM; + hevc_print(hevc, PRINT_FLAG_ERROR, + "%s[%d], size: %d, no mem fatal err\n", + __func__, i, buf_size); + } + } + + if (ret >= 0) { + hevc->m_BUF[i].size = buf_size; + hevc->m_BUF[i].used_flag = 0; + ret = 0; + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "Buffer %d: start_adr %p size %x\n", + i, + (void *)hevc->m_BUF[i].start_adr, + hevc->m_BUF[i].size); + } + /*flush the buffer make sure no cache dirty*/ + if (!vdec_secure(hw_to_vdec(hevc)) && (hevc->m_BUF[i].start_adr)) { + void *mem_start_virt; + mem_start_virt = + codec_mm_phys_to_virt(hevc->m_BUF[i].start_adr); + if (mem_start_virt) { + memset(mem_start_virt, 0, hevc->m_BUF[i].size); + codec_mm_dma_flush(mem_start_virt, + hevc->m_BUF[i].size, DMA_TO_DEVICE); + } else { + codec_mm_memset(hevc->m_BUF[i].start_adr, + 0, hevc->m_BUF[i].size); + } + } + } + /*put_cma_alloc_ref();*/ /*DEBUG_TMP*/ + } else + ret = 0; + } + + if (ret >= 0) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "alloc buf(%d) for %d/%d size 0x%x) => %p\n", + i, hevc->pic_w, hevc->pic_h, + buf_size, + hevc->m_BUF[i].start_adr); + } + } else { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "alloc buf(%d) for %d/%d size 0x%x) => Fail!!!\n", + i, hevc->pic_w, hevc->pic_h, + buf_size); + } + } + return ret; +} + +static void set_buf_unused(struct hevc_state_s *hevc, int i) +{ + if (i >= 0 && i < BUF_POOL_SIZE) + hevc->m_BUF[i].used_flag = 0; +} + +static void dealloc_unused_buf(struct hevc_state_s *hevc) +{ + int i; + for (i = 0; i < BUF_POOL_SIZE; i++) { + if (hevc->m_BUF[i].start_adr && + hevc->m_BUF[i].used_flag == 0) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "dealloc buf(%d) adr 0x%p size 0x%x\n", + i, hevc->m_BUF[i].start_adr, + hevc->m_BUF[i].size); + } + if (!hevc->is_used_v4l) + decoder_bmmu_box_free_idx( + hevc->bmmu_box, + VF_BUFFER_IDX(i)); + hevc->m_BUF[i].start_adr = 0; + hevc->m_BUF[i].header_addr = 0; + hevc->m_BUF[i].size = 0; + } + } +} + +static void dealloc_pic_buf(struct hevc_state_s *hevc, + struct PIC_s *pic) +{ + int i = pic->BUF_index; + pic->BUF_index = -1; + if (i >= 0 && + i < BUF_POOL_SIZE && + hevc->m_BUF[i].start_adr) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "dealloc buf(%d) adr 0x%p size 0x%x\n", + i, hevc->m_BUF[i].start_adr, + hevc->m_BUF[i].size); + } + + if (!hevc->is_used_v4l) + decoder_bmmu_box_free_idx( + hevc->bmmu_box, + VF_BUFFER_IDX(i)); + hevc->m_BUF[i].used_flag = 0; + hevc->m_BUF[i].start_adr = 0; + hevc->m_BUF[i].header_addr = 0; + hevc->m_BUF[i].size = 0; + } +} + +static int v4l_parser_work_pic_num(struct hevc_state_s *hevc) +{ + int used_buf_num = 0; + + pr_debug("margin = %d, sps_max_dec_pic_buffering_minus1_0 = %d\n", + get_dynamic_buf_num_margin(hevc), + hevc->param.p.sps_max_dec_pic_buffering_minus1_0); + + used_buf_num = hevc->param.p.sps_max_dec_pic_buffering_minus1_0 + 1; + /* + 1. need one more for multi instance, as apply_ref_pic_set() + has no chanch to run to clear referenced flag in some case + 2. for eos add more buffer to flush. + */ + used_buf_num += 2; + + if (hevc->save_buffer_mode) + hevc_print(hevc, 0, + "save buf _mode : dynamic_buf_num_margin %d ----> %d \n", + dynamic_buf_num_margin, hevc->dynamic_buf_num_margin); + + if (used_buf_num > max_buf_num) + used_buf_num = max_buf_num; + return used_buf_num; +} + +static int get_alloc_pic_count(struct hevc_state_s *hevc) +{ + int alloc_pic_count = 0; + int i; + struct PIC_s *pic; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic && pic->index >= 0) + alloc_pic_count++; + } + return alloc_pic_count; +} + +static int v4l_config_pic(struct hevc_state_s *hevc, struct PIC_s *pic) +{ + int i = pic->index; + int dw_mode = get_double_write_mode(hevc); + + if (hevc->mmu_enable) + pic->header_adr = hevc->m_BUF[i].header_addr; + + pic->BUF_index = i; + pic->POC = INVALID_POC; + pic->mc_canvas_y = pic->index; + pic->mc_canvas_u_v = pic->index; + + if (dw_mode & 0x10) { + pic->mc_canvas_y = (pic->index << 1); + pic->mc_canvas_u_v = (pic->index << 1) + 1; + pic->mc_y_adr = pic->dw_y_adr; + pic->mc_u_v_adr = pic->dw_u_v_adr; + } + + return 0; +} + +static int config_pic(struct hevc_state_s *hevc, struct PIC_s *pic) +{ + int ret = -1; + int i; + /*int lcu_size_log2 = hevc->lcu_size_log2; + int MV_MEM_UNIT=lcu_size_log2== + 6 ? 0x100 : lcu_size_log2==5 ? 0x40 : 0x10;*/ + /*int MV_MEM_UNIT = lcu_size_log2 == 6 ? 0x200 : lcu_size_log2 == + 5 ? 0x80 : 0x20; + int mpred_mv_end = hevc->work_space_buf->mpred_mv.buf_start + + hevc->work_space_buf->mpred_mv.buf_size;*/ + unsigned int y_adr = 0; + struct buf_stru_s buf_stru; + int buf_size = cal_current_buf_size(hevc, &buf_stru); + int dw_mode = get_double_write_mode(hevc); + + for (i = 0; i < BUF_POOL_SIZE; i++) { + if (hevc->m_BUF[i].start_adr != 0 && + hevc->m_BUF[i].used_flag == 0 && + buf_size <= hevc->m_BUF[i].size) { + hevc->m_BUF[i].used_flag = 1; + break; + } + } + + if (i >= BUF_POOL_SIZE) + return -1; + + if (hevc->mmu_enable) { + pic->header_adr = hevc->m_BUF[i].start_adr; + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (IS_8K_SIZE(hevc->pic_w, hevc->pic_h))) + y_adr = hevc->m_BUF[i].start_adr + + MMU_COMPRESS_HEADER_SIZE_8K; + else + y_adr = hevc->m_BUF[i].start_adr + + MMU_COMPRESS_HEADER_SIZE_4K; + } else + y_adr = hevc->m_BUF[i].start_adr; + + y_adr = ((y_adr + 0xffff) >> 16) << 16; /*64k alignment*/ + + pic->POC = INVALID_POC; + /*ensure get_pic_by_POC() + not get the buffer not decoded*/ + pic->BUF_index = i; + + if ((!hevc->mmu_enable) && + ((dw_mode & 0x10) == 0) + ) { + pic->mc_y_adr = y_adr; + y_adr += (buf_stru.mc_buffer_size_h << 16); + } + pic->mc_canvas_y = pic->index; + pic->mc_canvas_u_v = pic->index; + if (dw_mode & 0x10) { + pic->mc_y_adr = y_adr; + pic->mc_u_v_adr = y_adr + + ((buf_stru.mc_buffer_size_u_v_h << 16) << 1); + pic->mc_canvas_y = (pic->index << 1); + pic->mc_canvas_u_v = (pic->index << 1) + 1; + + pic->dw_y_adr = pic->mc_y_adr; + pic->dw_u_v_adr = pic->mc_u_v_adr; + } else if (dw_mode) { + pic->dw_y_adr = y_adr; + pic->dw_u_v_adr = pic->dw_y_adr + + ((buf_stru.mc_buffer_size_u_v_h << 16) << 1); + } + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "%s index %d BUF_index %d mc_y_adr %x\n", + __func__, pic->index, + pic->BUF_index, pic->mc_y_adr); + if (hevc->mmu_enable && + dw_mode) + hevc_print(hevc, 0, + "mmu double write adr %ld\n", + pic->cma_alloc_addr); + } + ret = 0; + + return ret; +} + +static void init_pic_list(struct hevc_state_s *hevc) +{ + int i; + int init_buf_num = hevc->used_buf_num; + int dw_mode = get_double_write_mode(hevc); + struct vdec_s *vdec = hw_to_vdec(hevc); + /*alloc decoder buf will be delay if work on v4l. */ + if (!hevc->is_used_v4l) { + for (i = 0; i < init_buf_num; i++) { + if (alloc_buf(hevc) < 0) { + if (i <= 8) { + /*if alloced (i+1)>=9 + don't send errors.*/ + hevc->fatal_error |= + DECODER_FATAL_ERROR_NO_MEM; + } + break; + } + } + } + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + struct PIC_s *pic = hevc->m_PIC[i]; + + if (!pic) { + pic = vzalloc(sizeof(struct PIC_s)); + if (pic == NULL) { + hevc_print(hevc, 0, + "%s: alloc pic %d fail!!!\n", + __func__, i); + break; + } + hevc->m_PIC[i] = pic; + } else { + char *aux_data_tmp; + u32 aux_data_size; + + aux_data_tmp = pic->aux_data_buf; + aux_data_size = pic->aux_data_size; + memset(pic, 0, sizeof(struct PIC_s)); + pic->aux_data_buf = aux_data_tmp; + pic->aux_data_size = aux_data_size; + } + + pic->index = i; + pic->BUF_index = -1; + pic->mv_buf_index = -1; + if (vdec->parallel_dec == 1) { + pic->y_canvas_index = -1; + pic->uv_canvas_index = -1; + } + + pic->width = hevc->pic_w; + pic->height = hevc->pic_h; + pic->double_write_mode = dw_mode; + pic->POC = INVALID_POC; + + /*config canvas will be delay if work on v4l. */ + if (!hevc->is_used_v4l) { + if (config_pic(hevc, pic) < 0) { + if (get_dbg_flag(hevc)) + hevc_print(hevc, 0, + "Config_pic %d fail\n", pic->index); + pic->index = -1; + i++; + break; + } + + if (pic->double_write_mode) + set_canvas(hevc, pic); + } + } +} + +static void uninit_pic_list(struct hevc_state_s *hevc) +{ + struct vdec_s *vdec = hw_to_vdec(hevc); + int i; +#ifndef MV_USE_FIXED_BUF + dealloc_mv_bufs(hevc); +#endif + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + struct PIC_s *pic = hevc->m_PIC[i]; + + if (pic) { + if (vdec->parallel_dec == 1) { + vdec->free_canvas_ex(pic->y_canvas_index, vdec->id); + vdec->free_canvas_ex(pic->uv_canvas_index, vdec->id); + } + release_aux_data(hevc, pic); + vfree(pic); + hevc->m_PIC[i] = NULL; + } + } +} + +#ifdef LOSLESS_COMPRESS_MODE +static void init_decode_head_hw(struct hevc_state_s *hevc) +{ + + struct BuffInfo_s *buf_spec = hevc->work_space_buf; + unsigned int data32; + + int losless_comp_header_size = + compute_losless_comp_header_size(hevc->pic_w, + hevc->pic_h); + int losless_comp_body_size = compute_losless_comp_body_size(hevc, + hevc->pic_w, hevc->pic_h, hevc->mem_saving_mode); + + hevc->losless_comp_body_size = losless_comp_body_size; + + + if (hevc->mmu_enable) { + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0); + } else { + if (hevc->mem_saving_mode == 1) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, + (1 << 3) | ((workaround_enable & 2) ? 1 : 0)); + else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, + ((workaround_enable & 2) ? 1 : 0)); + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, (losless_comp_body_size >> 5)); + /* + *WRITE_VREG(HEVCD_MPP_DECOMP_CTL3,(0xff<<20) | (0xff<<10) | 0xff); + * //8-bit mode + */ + } + WRITE_VREG(HEVC_CM_BODY_LENGTH, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, losless_comp_header_size); + + if (hevc->mmu_enable) { + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR, buf_spec->mmu_vbh.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR, + buf_spec->mmu_vbh.buf_start + + buf_spec->mmu_vbh.buf_size/2); + data32 = READ_VREG(HEVC_SAO_CTRL9); + data32 |= 0x1; + WRITE_VREG(HEVC_SAO_CTRL9, data32); + + /* use HEVC_CM_HEADER_START_ADDR */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (1<<10); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + + if (!hevc->m_ins_flag) + hevc_print(hevc, 0, + "%s: (%d, %d) body_size 0x%x header_size 0x%x\n", + __func__, hevc->pic_w, hevc->pic_h, + losless_comp_body_size, losless_comp_header_size); + +} +#endif +#define HEVCD_MPP_ANC2AXI_TBL_DATA 0x3464 + +static void init_pic_list_hw(struct hevc_state_s *hevc) +{ + int i; + int cur_pic_num = MAX_REF_PIC_NUM; + int dw_mode = get_double_write_mode(hevc); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (0x1 << 2)); + else + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x0); + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + if (hevc->m_PIC[i] == NULL || + hevc->m_PIC[i]->index == -1) { + cur_pic_num = i; + break; + } + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) { + if (hevc->mmu_enable && ((dw_mode & 0x10) == 0)) + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + hevc->m_PIC[i]->header_adr>>5); + else + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + hevc->m_PIC[i]->mc_y_adr >> 5); + } else + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + hevc->m_PIC[i]->mc_y_adr | + (hevc->m_PIC[i]->mc_canvas_y << 8) | 0x1); + if (dw_mode & 0x10) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) { + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + hevc->m_PIC[i]->mc_u_v_adr >> 5); + } + else + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + hevc->m_PIC[i]->mc_u_v_adr | + (hevc->m_PIC[i]->mc_canvas_u_v << 8) + | 0x1); + } + } + if (cur_pic_num == 0) + return; + + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1); + + /* Zero out canvas registers in IPP -- avoid simulation X */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 1); + for (i = 0; i < 32; i++) + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); + +#ifdef LOSLESS_COMPRESS_MODE + if ((dw_mode & 0x10) == 0) + init_decode_head_hw(hevc); +#endif + +} + + +static void dump_pic_list(struct hevc_state_s *hevc) +{ + int i; + struct PIC_s *pic; + PR_INIT(256); + + hevc_print(hevc, 0, + "pic_list_init_flag is %d\r\n", hevc->pic_list_init_flag); + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + PR_FILL("index %d buf_idx %d mv_idx %d decode_idx:%d, POC:%d, referenced:%d (LT %d), ", + pic->index, pic->BUF_index, +#ifndef MV_USE_FIXED_BUF + pic->mv_buf_index, +#else + -1, +#endif + pic->decode_idx, pic->POC, pic->referenced +#ifdef SUPPORT_LONG_TERM_RPS + , pic->long_term_ref +#else + , 0 +#endif + ); + PR_FILL("num_reorder_pic:%d, output_mark:%d, error_mark:%d w/h %d,%d", + pic->num_reorder_pic, pic->output_mark, pic->error_mark, + pic->width, pic->height); + PR_FILL("output_ready:%d, mv_wr_start %x vf_ref %d", + pic->output_ready, pic->mpred_mv_wr_start_addr, + pic->vf_ref); + PR_INFO(hevc->index); + } +} + +static void clear_referenced_flag(struct hevc_state_s *hevc) +{ + int i; + struct PIC_s *pic; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if (pic->referenced) { + pic->referenced = 0; + put_mv_buf(hevc, pic); + } + } +} + +static void clear_poc_flag(struct hevc_state_s *hevc) +{ + int i; + struct PIC_s *pic; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + pic->POC = INVALID_POC; + } +} + +static struct PIC_s *output_pic(struct hevc_state_s *hevc, + unsigned char flush_flag) +{ + int num_pic_not_yet_display = 0; + int i, fisrt_pic_flag = 0; + struct PIC_s *pic; + struct PIC_s *pic_display = NULL; + struct vdec_s *vdec = hw_to_vdec(hevc); + + if (hevc->i_only & 0x4) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || + (pic->index == -1) || + (pic->BUF_index == -1) || + (pic->POC == INVALID_POC)) + continue; + if (pic->output_mark) { + if (pic_display) { + if (pic->decode_idx < + pic_display->decode_idx) + pic_display = pic; + + } else + pic_display = pic; + + } + } + if (pic_display) { + pic_display->output_mark = 0; + pic_display->recon_mark = 0; + pic_display->output_ready = 1; + pic_display->referenced = 0; + put_mv_buf(hevc, pic_display); + } + } else { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || + (pic->index == -1) || + (pic->BUF_index == -1) || + (pic->POC == INVALID_POC)) + continue; + if (pic->output_mark) + num_pic_not_yet_display++; + if (pic->slice_type == 2 && + atomic_read(&hevc->vf_pre_count) == 0 && + fast_output_enable & 0x1) { + /*fast output for first I picture*/ + pic->num_reorder_pic = 0; + if (vdec->master || vdec->slave) + pic_display = pic; + fisrt_pic_flag = 1; + hevc_print(hevc, 0, "VH265: output first frame\n"); + } + } + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || + (pic->index == -1) || + (pic->BUF_index == -1) || + (pic->POC == INVALID_POC)) + continue; + if (pic->output_mark) { + if (pic_display) { + if (pic->POC < pic_display->POC) + pic_display = pic; + else if ((pic->POC == pic_display->POC) + && (pic->decode_idx < + pic_display-> + decode_idx)) + pic_display + = pic; + } else + pic_display = pic; + } + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + /* dv wait cur_pic all data get, + some data may get after picture output */ + if ((vdec->master || vdec->slave) + && (pic_display == hevc->cur_pic) && + (!flush_flag) && + (hevc->bypass_dvenl && !dolby_meta_with_el) + && (!fisrt_pic_flag)) + pic_display = NULL; +#endif + if (pic_display) { + if ((num_pic_not_yet_display > + pic_display->num_reorder_pic) + || flush_flag) { + pic_display->output_mark = 0; + pic_display->recon_mark = 0; + pic_display->output_ready = 1; + } else if (num_pic_not_yet_display >= + (MAX_REF_PIC_NUM - 1)) { + pic_display->output_mark = 0; + pic_display->recon_mark = 0; + pic_display->output_ready = 1; + hevc_print(hevc, 0, + "Warning, num_reorder_pic %d is byeond buf num\n", + pic_display->num_reorder_pic); + } else + pic_display = NULL; + } + } + + if (pic_display && hevc->sps_num_reorder_pics_0 && + (atomic_read(&hevc->vf_pre_count) == 1) && (hevc->first_pic_flag == 1)) { + pic_display = NULL; + hevc->first_pic_flag = 2; + } + return pic_display; +} + +static int config_mc_buffer(struct hevc_state_s *hevc, struct PIC_s *cur_pic) +{ + int i; + struct PIC_s *pic; + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "config_mc_buffer entered .....\n"); + if (cur_pic->slice_type != 2) { /* P and B pic */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 1); + for (i = 0; i < cur_pic->RefNum_L0; i++) { + pic = + get_ref_pic_by_POC(hevc, + cur_pic-> + m_aiRefPOCList0[cur_pic-> + slice_idx][i]); + if (pic) { + if ((pic->width != hevc->pic_w) || + (pic->height != hevc->pic_h)) { + hevc_print(hevc, 0, + "%s: Wrong reference pic (poc %d) width/height %d/%d\n", + __func__, pic->POC, + pic->width, pic->height); + cur_pic->error_mark = 1; + } + if (pic->error_mark && (ref_frame_mark_flag[hevc->index])) + cur_pic->error_mark = 1; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v << 16) + | (pic->mc_canvas_u_v + << 8) | + pic->mc_canvas_y); + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print_cont(hevc, 0, + "refid %x mc_canvas_u_v %x", + i, pic->mc_canvas_u_v); + hevc_print_cont(hevc, 0, + " mc_canvas_y %x\n", + pic->mc_canvas_y); + } + } else + cur_pic->error_mark = 1; + + if (pic == NULL || pic->error_mark) { + hevc_print(hevc, 0, + "Error %s, %dth poc (%d) %s", + __func__, i, + cur_pic->m_aiRefPOCList0[cur_pic-> + slice_idx][i], + pic ? "has error" : + "not in list0"); + } + } + } + if (cur_pic->slice_type == 0) { /* B pic */ + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "config_mc_buffer RefNum_L1\n"); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (0 << 1) | 1); + + for (i = 0; i < cur_pic->RefNum_L1; i++) { + pic = + get_ref_pic_by_POC(hevc, + cur_pic-> + m_aiRefPOCList1[cur_pic-> + slice_idx][i]); + if (pic) { + if ((pic->width != hevc->pic_w) || + (pic->height != hevc->pic_h)) { + hevc_print(hevc, 0, + "%s: Wrong reference pic (poc %d) width/height %d/%d\n", + __func__, pic->POC, + pic->width, pic->height); + cur_pic->error_mark = 1; + } + + if (pic->error_mark && (ref_frame_mark_flag[hevc->index])) + cur_pic->error_mark = 1; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic->mc_canvas_u_v << 16) + | (pic->mc_canvas_u_v + << 8) | + pic->mc_canvas_y); + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print_cont(hevc, 0, + "refid %x mc_canvas_u_v %x", + i, pic->mc_canvas_u_v); + hevc_print_cont(hevc, 0, + " mc_canvas_y %x\n", + pic->mc_canvas_y); + } + } else + cur_pic->error_mark = 1; + + if (pic == NULL || pic->error_mark) { + hevc_print(hevc, 0, + "Error %s, %dth poc (%d) %s", + __func__, i, + cur_pic->m_aiRefPOCList1[cur_pic-> + slice_idx][i], + pic ? "has error" : + "not in list1"); + } + } + } + return 0; +} + +#ifdef SUPPORT_LONG_TERM_RPS +static unsigned char is_ref_long_term(struct hevc_state_s *hevc, int poc) +{ + int ii; + struct PIC_s *pic; + for (ii = 0; ii < MAX_REF_PIC_NUM; ii++) { + pic = hevc->m_PIC[ii]; + if (pic == NULL || + pic->index == -1 || + pic->BUF_index == -1 + ) + continue; + + if (pic->referenced && pic->POC == poc + && pic->long_term_ref) + return 1; + } + return 0; +} + +#endif + +static void apply_ref_pic_set(struct hevc_state_s *hevc, int cur_poc, + union param_u *params) +{ + int ii, i; + int poc_tmp; + struct PIC_s *pic; + unsigned char is_referenced; + /* hevc_print(hevc, 0, + "%s cur_poc %d\n", __func__, cur_poc); */ + if (pic_list_debug & 0x2) { + pr_err("cur poc %d\n", cur_poc); + } + for (ii = 0; ii < MAX_REF_PIC_NUM; ii++) { + pic = hevc->m_PIC[ii]; + if (pic == NULL || + pic->index == -1 || + pic->BUF_index == -1 + ) + continue; + +#ifdef SUPPORT_LONG_TERM_RPS + pic->long_term_ref = 0; +#endif + if ((pic->referenced == 0 || pic->POC == cur_poc)) + continue; + is_referenced = 0; + + for (i = 0; i < 16; i++) { + int delt; +#ifdef SUPPORT_LONG_TERM_RPS + if (params->p.CUR_RPS[i] == RPS_END) + break; +#else + if (params->p.CUR_RPS[i] & 0x8000) + break; +#endif + delt = + params->p.CUR_RPS[i] & + ((1 << (RPS_USED_BIT - 1)) - 1); + if (params->p.CUR_RPS[i] & (1 << (RPS_USED_BIT - 1))) { + poc_tmp = + cur_poc - ((1 << (RPS_USED_BIT - 1)) - + delt); + } else + poc_tmp = cur_poc + delt; + if (poc_tmp == pic->POC) { +#ifdef SUPPORT_LONG_TERM_RPS + if (params->p.CUR_RPS[i] & (1 << (RPS_LT_BIT))) + pic->long_term_ref = 1; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + hevc_print(hevc, 0, "%d: CUR_RPS 0x%x, LT %d\n", + i, params->p.CUR_RPS[i], + pic->long_term_ref); +#endif + is_referenced = 1; + break; + } + } + if (is_referenced == 0) { + pic->referenced = 0; + put_mv_buf(hevc, pic); + /* hevc_print(hevc, 0, + "set poc %d reference to 0\n", pic->POC); */ + if (pic_list_debug & 0x2) { + pr_err("set poc %d reference to 0\n", pic->POC); + } + } + } + +} + +static void set_ref_pic_list(struct hevc_state_s *hevc, union param_u *params) +{ + struct PIC_s *pic = hevc->cur_pic; + int i, rIdx; + int num_neg = 0; + int num_pos = 0; + int total_num; + int num_ref_idx_l0_active = + (params->p.num_ref_idx_l0_active > + MAX_REF_ACTIVE) ? MAX_REF_ACTIVE : + params->p.num_ref_idx_l0_active; + int num_ref_idx_l1_active = + (params->p.num_ref_idx_l1_active > + MAX_REF_ACTIVE) ? MAX_REF_ACTIVE : + params->p.num_ref_idx_l1_active; + + int RefPicSetStCurr0[16]; + int RefPicSetStCurr1[16]; +#ifdef SUPPORT_LONG_TERM_RPS + int num_lt = 0; + int RefPicSetLtCurr[16]; +#endif + PR_INIT(128); + + for (i = 0; i < 16; i++) { + RefPicSetStCurr0[i] = 0; + RefPicSetStCurr1[i] = 0; + pic->m_aiRefPOCList0[pic->slice_idx][i] = 0; + pic->m_aiRefPOCList1[pic->slice_idx][i] = 0; + } + for (i = 0; i < 16; i++) { +#ifdef SUPPORT_LONG_TERM_RPS + if (params->p.CUR_RPS[i] == RPS_END) + break; +#else + if (params->p.CUR_RPS[i] & 0x8000) + break; +#endif + if ((params->p.CUR_RPS[i] >> RPS_USED_BIT) & 1) { + int delt = + params->p.CUR_RPS[i] & + ((1 << (RPS_USED_BIT - 1)) - 1); + + if ((params->p.CUR_RPS[i] >> (RPS_USED_BIT - 1)) & 1) { +#ifdef SUPPORT_LONG_TERM_RPS + if ((params->p.CUR_RPS[i] >> RPS_LT_BIT) & 1) { + RefPicSetLtCurr[num_lt] = + pic->POC - ((1 << (RPS_USED_BIT - 1)) - + delt); + num_lt++; + continue; + } +#endif + + RefPicSetStCurr0[num_neg] = + pic->POC - ((1 << (RPS_USED_BIT - 1)) - + delt); + /* hevc_print(hevc, 0, + * "RefPicSetStCurr0 %x %x %x\n", + * RefPicSetStCurr0[num_neg], pic->POC, + * (0x800-(params[i]&0x7ff))); + */ + num_neg++; + } else { +#ifdef SUPPORT_LONG_TERM_RPS + if ((params->p.CUR_RPS[i] >> RPS_LT_BIT) & 1) { + RefPicSetLtCurr[num_lt] = pic->POC + delt; + num_lt++; + continue; + } +#endif + RefPicSetStCurr1[num_pos] = pic->POC + delt; + /* hevc_print(hevc, 0, + * "RefPicSetStCurr1 %d\n", + * RefPicSetStCurr1[num_pos]); + */ + num_pos++; + } + } + } +#ifdef SUPPORT_LONG_TERM_RPS + total_num = num_neg + num_pos + num_lt; +#else + total_num = num_neg + num_pos; +#endif + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "%s: curpoc %d slice_type %d, total %d ", + __func__, pic->POC, params->p.slice_type, total_num); +#ifdef SUPPORT_LONG_TERM_RPS + hevc_print_cont(hevc, 0, + "num_neg %d num_lt %d num_list0 %d num_list1 %d\n", + num_neg, num_lt, num_ref_idx_l0_active, num_ref_idx_l1_active); +#else + hevc_print_cont(hevc, 0, + "num_neg %d num_list0 %d num_list1 %d\n", + num_neg, num_ref_idx_l0_active, num_ref_idx_l1_active); +#endif + + } + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "HEVC Stream buf start "); + hevc_print_cont(hevc, 0, + "%x end %x wr %x rd %x lev %x ctl %x intctl %x\n", + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_FIFO_CTL), + READ_VREG(HEVC_PARSER_INT_CONTROL)); + } + + if (total_num > 0) { + if (params->p.modification_flag & 0x1) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, "ref0 POC (modification):"); + for (rIdx = 0; rIdx < num_ref_idx_l0_active; rIdx++) { + int cIdx = params->p.modification_list[rIdx]; + + pic->m_aiRefPOCList0[pic->slice_idx][rIdx] = +#ifdef SUPPORT_LONG_TERM_RPS + cIdx >= (num_neg + num_pos) ? + RefPicSetLtCurr[cIdx - num_neg - num_pos] : +#endif + (cIdx >= + num_neg ? RefPicSetStCurr1[cIdx - + num_neg] : + RefPicSetStCurr0[cIdx]); + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + PR_FILL("%d ", + pic->m_aiRefPOCList0[pic-> + slice_idx] + [rIdx]); + } + } + } else { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, "ref0 POC:"); + for (rIdx = 0; rIdx < num_ref_idx_l0_active; rIdx++) { + int cIdx = rIdx % total_num; + + pic->m_aiRefPOCList0[pic->slice_idx][rIdx] = +#ifdef SUPPORT_LONG_TERM_RPS + cIdx >= (num_neg + num_pos) ? + RefPicSetLtCurr[cIdx - num_neg - num_pos] : +#endif + (cIdx >= + num_neg ? RefPicSetStCurr1[cIdx - + num_neg] : + RefPicSetStCurr0[cIdx]); + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + PR_FILL("%d ", + pic->m_aiRefPOCList0[pic-> + slice_idx] + [rIdx]); + } + } + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + PR_INFO(hevc->index); + if (params->p.slice_type == B_SLICE) { + if (params->p.modification_flag & 0x2) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "ref1 POC (modification):"); + for (rIdx = 0; rIdx < num_ref_idx_l1_active; + rIdx++) { + int cIdx; + + if (params->p.modification_flag & 0x1) { + cIdx = + params->p. + modification_list + [num_ref_idx_l0_active + + rIdx]; + } else { + cIdx = + params->p. + modification_list[rIdx]; + } + pic->m_aiRefPOCList1[pic-> + slice_idx][rIdx] = +#ifdef SUPPORT_LONG_TERM_RPS + cIdx >= (num_neg + num_pos) ? + RefPicSetLtCurr[cIdx - num_neg - num_pos] : +#endif + (cIdx >= + num_pos ? + RefPicSetStCurr0[cIdx - num_pos] + : RefPicSetStCurr1[cIdx]); + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + PR_FILL("%d ", + pic-> + m_aiRefPOCList1[pic-> + slice_idx] + [rIdx]); + } + } + } else { + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, "ref1 POC:"); + for (rIdx = 0; rIdx < num_ref_idx_l1_active; + rIdx++) { + int cIdx = rIdx % total_num; + + pic->m_aiRefPOCList1[pic-> + slice_idx][rIdx] = +#ifdef SUPPORT_LONG_TERM_RPS + cIdx >= (num_neg + num_pos) ? + RefPicSetLtCurr[cIdx - num_neg - num_pos] : +#endif + (cIdx >= + num_pos ? + RefPicSetStCurr0[cIdx - + num_pos] + : RefPicSetStCurr1[cIdx]); + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + PR_FILL("%d ", + pic-> + m_aiRefPOCList1[pic-> + slice_idx] + [rIdx]); + } + } + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + PR_INFO(hevc->index); + } + } + /*set m_PIC */ + pic->slice_type = (params->p.slice_type == I_SLICE) ? 2 : + (params->p.slice_type == P_SLICE) ? 1 : + (params->p.slice_type == B_SLICE) ? 0 : 3; + pic->RefNum_L0 = num_ref_idx_l0_active; + pic->RefNum_L1 = num_ref_idx_l1_active; +} + +static void update_tile_info(struct hevc_state_s *hevc, int pic_width_cu, + int pic_height_cu, int sao_mem_unit, + union param_u *params) +{ + int i, j; + int start_cu_x, start_cu_y; + int sao_vb_size = (sao_mem_unit + (2 << 4)) * pic_height_cu; + int sao_abv_size = sao_mem_unit * pic_width_cu; + PR_INIT(128); +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + int tmpRefillLcuSize = 1 << + (params->p.log2_min_coding_block_size_minus3 + + 3 + params->p.log2_diff_max_min_coding_block_size); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%x, %x, %x, %x\n", + params->p.slice_segment_address, + params->p.bit_depth, + params->p.tiles_enabled_flag, + tmpRefillLcuSize); + if (params->p.slice_segment_address == 0 && + params->p.bit_depth != 0 && + (params->p.tiles_enabled_flag & 1) && + tmpRefillLcuSize == 64) + hevc->delrefill_check = 1; + else + hevc->delrefill_check = 0; + } +#endif + + hevc->tile_enabled = params->p.tiles_enabled_flag & 1; + if (params->p.tiles_enabled_flag & 1) { + hevc->num_tile_col = params->p.num_tile_columns_minus1 + 1; + hevc->num_tile_row = params->p.num_tile_rows_minus1 + 1; + + if (hevc->num_tile_row > MAX_TILE_ROW_NUM + || hevc->num_tile_row <= 0) { + hevc->num_tile_row = 1; + hevc_print(hevc, 0, + "%s: num_tile_rows_minus1 (%d) error!!\n", + __func__, params->p.num_tile_rows_minus1); + } + if (hevc->num_tile_col > MAX_TILE_COL_NUM + || hevc->num_tile_col <= 0) { + hevc->num_tile_col = 1; + hevc_print(hevc, 0, + "%s: num_tile_columns_minus1 (%d) error!!\n", + __func__, params->p.num_tile_columns_minus1); + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "%s pic_w_cu %d pic_h_cu %d tile_enabled ", + __func__, pic_width_cu, pic_height_cu); + hevc_print_cont(hevc, 0, + "num_tile_col %d num_tile_row %d:\n", + hevc->num_tile_col, hevc->num_tile_row); + } + + if (params->p.tiles_enabled_flag & 2) { /* uniform flag */ + int w = pic_width_cu / hevc->num_tile_col; + int h = pic_height_cu / hevc->num_tile_row; + + start_cu_y = 0; + for (i = 0; i < hevc->num_tile_row; i++) { + start_cu_x = 0; + for (j = 0; j < hevc->num_tile_col; j++) { + if (j == (hevc->num_tile_col - 1)) { + hevc->m_tile[i][j].width = + pic_width_cu - + start_cu_x; + } else + hevc->m_tile[i][j].width = w; + if (i == (hevc->num_tile_row - 1)) { + hevc->m_tile[i][j].height = + pic_height_cu - + start_cu_y; + } else + hevc->m_tile[i][j].height = h; + hevc->m_tile[i][j].start_cu_x + = start_cu_x; + hevc->m_tile[i][j].start_cu_y + = start_cu_y; + hevc->m_tile[i][j].sao_vb_start_addr = + hevc->work_space_buf->sao_vb. + buf_start + j * sao_vb_size; + hevc->m_tile[i][j].sao_abv_start_addr = + hevc->work_space_buf->sao_abv. + buf_start + i * sao_abv_size; + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + PR_FILL( + "{y=%d, x=%d w %d h %d ", + i, j, hevc->m_tile[i][j].width, + hevc->m_tile[i][j].height); + PR_FILL( + "start_x %d start_y %d ", + hevc->m_tile[i][j].start_cu_x, + hevc->m_tile[i][j].start_cu_y); + PR_FILL( + "sao_vb_start 0x%x ", + hevc->m_tile[i][j]. + sao_vb_start_addr); + PR_FILL( + "sao_abv_start 0x%x}\n", + hevc->m_tile[i][j]. + sao_abv_start_addr); + PR_INFO(hevc->index); + } + start_cu_x += hevc->m_tile[i][j].width; + + } + start_cu_y += hevc->m_tile[i][0].height; + } + } else { + start_cu_y = 0; + for (i = 0; i < hevc->num_tile_row; i++) { + start_cu_x = 0; + for (j = 0; j < hevc->num_tile_col; j++) { + if (j == (hevc->num_tile_col - 1)) { + hevc->m_tile[i][j].width = + pic_width_cu - + start_cu_x; + } else { + hevc->m_tile[i][j].width = + params->p.tile_width[j]; + } + if (i == (hevc->num_tile_row - 1)) { + hevc->m_tile[i][j].height = + pic_height_cu - + start_cu_y; + } else { + hevc->m_tile[i][j].height = + params-> + p.tile_height[i]; + } + hevc->m_tile[i][j].start_cu_x + = start_cu_x; + hevc->m_tile[i][j].start_cu_y + = start_cu_y; + hevc->m_tile[i][j].sao_vb_start_addr = + hevc->work_space_buf->sao_vb. + buf_start + j * sao_vb_size; + hevc->m_tile[i][j].sao_abv_start_addr = + hevc->work_space_buf->sao_abv. + buf_start + i * sao_abv_size; + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + PR_FILL( + "{y=%d, x=%d w %d h %d ", + i, j, hevc->m_tile[i][j].width, + hevc->m_tile[i][j].height); + PR_FILL( + "start_x %d start_y %d ", + hevc->m_tile[i][j].start_cu_x, + hevc->m_tile[i][j].start_cu_y); + PR_FILL( + "sao_vb_start 0x%x ", + hevc->m_tile[i][j]. + sao_vb_start_addr); + PR_FILL( + "sao_abv_start 0x%x}\n", + hevc->m_tile[i][j]. + sao_abv_start_addr); + PR_INFO(hevc->index); + } + start_cu_x += hevc->m_tile[i][j].width; + } + start_cu_y += hevc->m_tile[i][0].height; + } + } + } else { + hevc->num_tile_col = 1; + hevc->num_tile_row = 1; + hevc->m_tile[0][0].width = pic_width_cu; + hevc->m_tile[0][0].height = pic_height_cu; + hevc->m_tile[0][0].start_cu_x = 0; + hevc->m_tile[0][0].start_cu_y = 0; + hevc->m_tile[0][0].sao_vb_start_addr = + hevc->work_space_buf->sao_vb.buf_start; + hevc->m_tile[0][0].sao_abv_start_addr = + hevc->work_space_buf->sao_abv.buf_start; + } +} + +static int get_tile_index(struct hevc_state_s *hevc, int cu_adr, + int pic_width_lcu) +{ + int cu_x; + int cu_y; + int tile_x = 0; + int tile_y = 0; + int i; + + if (pic_width_lcu == 0) { + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "%s Error, pic_width_lcu is 0, pic_w %d, pic_h %d\n", + __func__, hevc->pic_w, hevc->pic_h); + } + return -1; + } + cu_x = cu_adr % pic_width_lcu; + cu_y = cu_adr / pic_width_lcu; + if (hevc->tile_enabled) { + for (i = 0; i < hevc->num_tile_col; i++) { + if (cu_x >= hevc->m_tile[0][i].start_cu_x) + tile_x = i; + else + break; + } + for (i = 0; i < hevc->num_tile_row; i++) { + if (cu_y >= hevc->m_tile[i][0].start_cu_y) + tile_y = i; + else + break; + } + } + return (tile_x) | (tile_y << 8); +} + +static void print_scratch_error(int error_num) +{ +#if 0 + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + " ERROR : HEVC_ASSIST_SCRATCH_TEST Error : %d\n", + error_num); + } +#endif +} + +static void hevc_config_work_space_hw(struct hevc_state_s *hevc) +{ + struct BuffInfo_s *buf_spec = hevc->work_space_buf; + + if (get_dbg_flag(hevc)) + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s %x %x %x %x %x %x %x %x %x %x %x %x %x\n", + __func__, + buf_spec->ipp.buf_start, + buf_spec->start_adr, + buf_spec->short_term_rps.buf_start, + buf_spec->vps.buf_start, + buf_spec->sps.buf_start, + buf_spec->pps.buf_start, + buf_spec->sao_up.buf_start, + buf_spec->swap_buf.buf_start, + buf_spec->swap_buf2.buf_start, + buf_spec->scalelut.buf_start, + buf_spec->dblk_para.buf_start, + buf_spec->dblk_data.buf_start, + buf_spec->dblk_data2.buf_start); + WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, buf_spec->ipp.buf_start); + if ((get_dbg_flag(hevc) & H265_DEBUG_SEND_PARAM_WITH_REG) == 0) + WRITE_VREG(HEVC_RPM_BUFFER, (u32)hevc->rpm_phy_addr); + WRITE_VREG(HEVC_SHORT_TERM_RPS, buf_spec->short_term_rps.buf_start); + WRITE_VREG(HEVC_VPS_BUFFER, buf_spec->vps.buf_start); + WRITE_VREG(HEVC_SPS_BUFFER, buf_spec->sps.buf_start); + WRITE_VREG(HEVC_PPS_BUFFER, buf_spec->pps.buf_start); + WRITE_VREG(HEVC_SAO_UP, buf_spec->sao_up.buf_start); + if (hevc->mmu_enable) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR, hevc->frame_mmu_map_phy_addr); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "write HEVC_ASSIST_MMU_MAP_ADDR\n"); + } else + WRITE_VREG(H265_MMU_MAP_BUFFER, hevc->frame_mmu_map_phy_addr); + } /*else + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER, + buf_spec->swap_buf.buf_start); + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, buf_spec->swap_buf2.buf_start);*/ + WRITE_VREG(HEVC_SCALELUT, buf_spec->scalelut.buf_start); +#ifdef HEVC_8K_LFTOFFSET_FIX + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + if (buf_spec->max_width <= 4096 && buf_spec->max_height <= 2304) + WRITE_VREG(HEVC_DBLK_CFG3, 0x4010); + else + WRITE_VREG(HEVC_DBLK_CFG3, 0x8020); + //WRITE_VREG(HEVC_DBLK_CFG3, 0x808020); /*offset should x2 if 8k*/ + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "write HEVC_DBLK_CFG3 to %x\n", READ_VREG(HEVC_DBLK_CFG3)); + } +#endif + /* cfg_p_addr */ + WRITE_VREG(HEVC_DBLK_CFG4, buf_spec->dblk_para.buf_start); + /* cfg_d_addr */ + WRITE_VREG(HEVC_DBLK_CFG5, buf_spec->dblk_data.buf_start); + + WRITE_VREG(HEVC_DBLK_CFGE, buf_spec->dblk_data2.buf_start); + + WRITE_VREG(LMEM_DUMP_ADR, (u32)hevc->lmem_phy_addr); +} + +static void parser_cmd_write(void) +{ + u32 i; + const unsigned short parser_cmd[PARSER_CMD_NUMBER] = { + 0x0401, 0x8401, 0x0800, 0x0402, 0x9002, 0x1423, + 0x8CC3, 0x1423, 0x8804, 0x9825, 0x0800, 0x04FE, + 0x8406, 0x8411, 0x1800, 0x8408, 0x8409, 0x8C2A, + 0x9C2B, 0x1C00, 0x840F, 0x8407, 0x8000, 0x8408, + 0x2000, 0xA800, 0x8410, 0x04DE, 0x840C, 0x840D, + 0xAC00, 0xA000, 0x08C0, 0x08E0, 0xA40E, 0xFC00, + 0x7C00 + }; + for (i = 0; i < PARSER_CMD_NUMBER; i++) + WRITE_VREG(HEVC_PARSER_CMD_WRITE, parser_cmd[i]); +} + +static void hevc_init_decoder_hw(struct hevc_state_s *hevc, + int decode_pic_begin, int decode_pic_num) +{ + unsigned int data32; + int i; +#if 0 + if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) { + /* Set MCR fetch priorities*/ + data32 = 0x1 | (0x1 << 2) | (0x1 <<3) | + (24 << 4) | (32 << 11) | (24 << 18) | (32 << 25); + WRITE_VREG(HEVCD_MPP_DECOMP_AXIURG_CTL, data32); + } +#endif +#if 1 + /* m8baby test1902 */ + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "%s\n", __func__); + data32 = READ_VREG(HEVC_PARSER_VERSION); + if (data32 != 0x00010001) { + print_scratch_error(25); + return; + } + WRITE_VREG(HEVC_PARSER_VERSION, 0x5a5a55aa); + data32 = READ_VREG(HEVC_PARSER_VERSION); + if (data32 != 0x5a5a55aa) { + print_scratch_error(26); + return; + } +#if 0 + /* test Parser Reset */ + /* reset iqit to start mem init again */ + WRITE_VREG(DOS_SW_RESET3, (1 << 14) | + (1 << 3) /* reset_whole parser */ + ); + WRITE_VREG(DOS_SW_RESET3, 0); /* clear reset_whole parser */ + data32 = READ_VREG(HEVC_PARSER_VERSION); + if (data32 != 0x00010001) + hevc_print(hevc, 0, + "Test Parser Fatal Error\n"); +#endif + /* reset iqit to start mem init again */ + WRITE_VREG(DOS_SW_RESET3, (1 << 14) + ); + CLEAR_VREG_MASK(HEVC_CABAC_CONTROL, 1); + CLEAR_VREG_MASK(HEVC_PARSER_CORE_CONTROL, 1); + +#endif + if (!hevc->m_ins_flag) { + data32 = READ_VREG(HEVC_STREAM_CONTROL); + data32 = data32 | (1 << 0); /* stream_fetch_enable */ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + data32 |= (0xf << 25); /*arwlen_axi_max*/ + WRITE_VREG(HEVC_STREAM_CONTROL, data32); + } + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x00000100) { + print_scratch_error(29); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x00000300) { + print_scratch_error(30); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x12345678); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x9abcdef0); + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x12345678) { + print_scratch_error(31); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x9abcdef0) { + print_scratch_error(32); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x00000100); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x00000300); + + data32 = READ_VREG(HEVC_PARSER_INT_CONTROL); + data32 &= 0x03ffffff; + data32 = data32 | (3 << 29) | (2 << 26) | (1 << 24) + | /* stream_buffer_empty_int_amrisc_enable */ + (1 << 22) | /* stream_fifo_empty_int_amrisc_enable*/ + (1 << 7) | /* dec_done_int_cpu_enable */ + (1 << 4) | /* startcode_found_int_cpu_enable */ + (0 << 3) | /* startcode_found_int_amrisc_enable */ + (1 << 0) /* parser_int_enable */ + ; + WRITE_VREG(HEVC_PARSER_INT_CONTROL, data32); + + data32 = READ_VREG(HEVC_SHIFT_STATUS); + data32 = data32 | (1 << 1) | /* emulation_check_on */ + (1 << 0) /* startcode_check_on */ + ; + WRITE_VREG(HEVC_SHIFT_STATUS, data32); + + WRITE_VREG(HEVC_SHIFT_CONTROL, (3 << 6) |/* sft_valid_wr_position */ + (2 << 4) | /* emulate_code_length_sub_1 */ + (2 << 1) | /* start_code_length_sub_1 */ + (1 << 0) /* stream_shift_enable */ + ); + + WRITE_VREG(HEVC_CABAC_CONTROL, (1 << 0) /* cabac_enable */ + ); + /* hevc_parser_core_clk_en */ + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, (1 << 0) + ); + + WRITE_VREG(HEVC_DEC_STATUS_REG, 0); + + /* Initial IQIT_SCALELUT memory -- just to avoid X in simulation */ + if (is_rdma_enable()) + rdma_back_end_work(hevc->rdma_phy_adr, RDMA_SIZE); + else { + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 0);/*cfg_p_addr*/ + for (i = 0; i < 1024; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, 0); + } + +#ifdef ENABLE_SWAP_TEST + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 100); +#endif + + /*WRITE_VREG(HEVC_DECODE_PIC_BEGIN_REG, 0);*/ + /*WRITE_VREG(HEVC_DECODE_PIC_NUM_REG, 0xffffffff);*/ + WRITE_VREG(HEVC_DECODE_SIZE, 0); + /*WRITE_VREG(HEVC_DECODE_COUNT, 0);*/ + /* Send parser_cmd */ + WRITE_VREG(HEVC_PARSER_CMD_WRITE, (1 << 16) | (0 << 0)); + + parser_cmd_write(); + + WRITE_VREG(HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2); + + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + /* (1 << 8) | // sao_sw_pred_enable */ + (1 << 5) | /* parser_sao_if_en */ + (1 << 2) | /* parser_mpred_if_en */ + (1 << 0) /* parser_scaler_if_en */ + ); + + /* Changed to Start MPRED in microcode */ + /* + * hevc_print(hevc, 0, "[test.c] Start MPRED\n"); + * WRITE_VREG(HEVC_MPRED_INT_STATUS, + * (1<<31) + * ); + */ + + WRITE_VREG(HEVCD_IPP_TOP_CNTL, (0 << 1) | /* enable ipp */ + (1 << 0) /* software reset ipp and mpp */ + ); + WRITE_VREG(HEVCD_IPP_TOP_CNTL, (1 << 1) | /* enable ipp */ + (0 << 0) /* software reset ipp and mpp */ + ); + + if (get_double_write_mode(hevc) & 0x10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, + 0x1 << 31 /*/Enable NV21 reference read mode for MC*/ + ); + +} + +static void decoder_hw_reset(void) +{ + int i; + unsigned int data32; + + /* reset iqit to start mem init again */ + WRITE_VREG(DOS_SW_RESET3, (1 << 14) + ); + CLEAR_VREG_MASK(HEVC_CABAC_CONTROL, 1); + CLEAR_VREG_MASK(HEVC_PARSER_CORE_CONTROL, 1); + + data32 = READ_VREG(HEVC_STREAM_CONTROL); + data32 = data32 | (1 << 0) /* stream_fetch_enable */ + ; + WRITE_VREG(HEVC_STREAM_CONTROL, data32); + + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x00000100) { + print_scratch_error(29); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x00000300) { + print_scratch_error(30); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x12345678); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x9abcdef0); + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x12345678) { + print_scratch_error(31); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x9abcdef0) { + print_scratch_error(32); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x00000100); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x00000300); + + data32 = READ_VREG(HEVC_PARSER_INT_CONTROL); + data32 &= 0x03ffffff; + data32 = data32 | (3 << 29) | (2 << 26) | (1 << 24) + | /* stream_buffer_empty_int_amrisc_enable */ + (1 << 22) | /*stream_fifo_empty_int_amrisc_enable */ + (1 << 7) | /* dec_done_int_cpu_enable */ + (1 << 4) | /* startcode_found_int_cpu_enable */ + (0 << 3) | /* startcode_found_int_amrisc_enable */ + (1 << 0) /* parser_int_enable */ + ; + WRITE_VREG(HEVC_PARSER_INT_CONTROL, data32); + + data32 = READ_VREG(HEVC_SHIFT_STATUS); + data32 = data32 | (1 << 1) | /* emulation_check_on */ + (1 << 0) /* startcode_check_on */ + ; + WRITE_VREG(HEVC_SHIFT_STATUS, data32); + + WRITE_VREG(HEVC_SHIFT_CONTROL, (3 << 6) |/* sft_valid_wr_position */ + (2 << 4) | /* emulate_code_length_sub_1 */ + (2 << 1) | /* start_code_length_sub_1 */ + (1 << 0) /* stream_shift_enable */ + ); + + WRITE_VREG(HEVC_CABAC_CONTROL, (1 << 0) /* cabac_enable */ + ); + /* hevc_parser_core_clk_en */ + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, (1 << 0) + ); + + /* Initial IQIT_SCALELUT memory -- just to avoid X in simulation */ + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 0); /* cfg_p_addr */ + for (i = 0; i < 1024; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, 0); + + /* Send parser_cmd */ + WRITE_VREG(HEVC_PARSER_CMD_WRITE, (1 << 16) | (0 << 0)); + + parser_cmd_write(); + + WRITE_VREG(HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2); + + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + /* (1 << 8) | // sao_sw_pred_enable */ + (1 << 5) | /* parser_sao_if_en */ + (1 << 2) | /* parser_mpred_if_en */ + (1 << 0) /* parser_scaler_if_en */ + ); + + WRITE_VREG(HEVCD_IPP_TOP_CNTL, (0 << 1) | /* enable ipp */ + (1 << 0) /* software reset ipp and mpp */ + ); + WRITE_VREG(HEVCD_IPP_TOP_CNTL, (1 << 1) | /* enable ipp */ + (0 << 0) /* software reset ipp and mpp */ + ); +} + +#ifdef CONFIG_HEVC_CLK_FORCED_ON +static void config_hevc_clk_forced_on(void) +{ + unsigned int rdata32; + /* IQIT */ + rdata32 = READ_VREG(HEVC_IQIT_CLK_RST_CTRL); + WRITE_VREG(HEVC_IQIT_CLK_RST_CTRL, rdata32 | (0x1 << 2)); + + /* DBLK */ + rdata32 = READ_VREG(HEVC_DBLK_CFG0); + WRITE_VREG(HEVC_DBLK_CFG0, rdata32 | (0x1 << 2)); + + /* SAO */ + rdata32 = READ_VREG(HEVC_SAO_CTRL1); + WRITE_VREG(HEVC_SAO_CTRL1, rdata32 | (0x1 << 2)); + + /* MPRED */ + rdata32 = READ_VREG(HEVC_MPRED_CTRL1); + WRITE_VREG(HEVC_MPRED_CTRL1, rdata32 | (0x1 << 24)); + + /* PARSER */ + rdata32 = READ_VREG(HEVC_STREAM_CONTROL); + WRITE_VREG(HEVC_STREAM_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_SHIFT_CONTROL); + WRITE_VREG(HEVC_SHIFT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_CABAC_CONTROL); + WRITE_VREG(HEVC_CABAC_CONTROL, rdata32 | (0x1 << 13)); + rdata32 = READ_VREG(HEVC_PARSER_CORE_CONTROL); + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_INT_CONTROL); + WRITE_VREG(HEVC_PARSER_INT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_IF_CONTROL); + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + rdata32 | (0x3 << 5) | (0x3 << 2) | (0x3 << 0)); + + /* IPP */ + rdata32 = READ_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG); + WRITE_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG, rdata32 | 0xffffffff); + + /* MCRCC */ + rdata32 = READ_VREG(HEVCD_MCRCC_CTL1); + WRITE_VREG(HEVCD_MCRCC_CTL1, rdata32 | (0x1 << 3)); +} +#endif + +#ifdef MCRCC_ENABLE +static void config_mcrcc_axi_hw(struct hevc_state_s *hevc, int slice_type) +{ + unsigned int rdata32; + unsigned int rdata32_2; + int l0_cnt = 0; + int l1_cnt = 0x7fff; + + if (get_double_write_mode(hevc) & 0x10) { + l0_cnt = hevc->cur_pic->RefNum_L0; + l1_cnt = hevc->cur_pic->RefNum_L1; + } + + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2); /* reset mcrcc */ + + if (slice_type == 2) { /* I-PIC */ + /* remove reset -- disables clock */ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x0); + return; + } + + if (slice_type == 0) { /* B-PIC */ + /* Programme canvas0 */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 0); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + /* Programme canvas1 */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (1 << 1) | 0); + rdata32_2 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32_2 = rdata32_2 & 0xffff; + rdata32_2 = rdata32_2 | (rdata32_2 << 16); + if (rdata32 == rdata32_2 && l1_cnt > 1) { + rdata32_2 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32_2 = rdata32_2 & 0xffff; + rdata32_2 = rdata32_2 | (rdata32_2 << 16); + } + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32_2); + } else { /* P-PIC */ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (1 << 1) | 0); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + if (l0_cnt == 1) { + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + } else { + /* Programme canvas1 */ + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + } + } + /* enable mcrcc progressive-mode */ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); +} +#endif + +static void config_title_hw(struct hevc_state_s *hevc, int sao_vb_size, + int sao_mem_unit) +{ + WRITE_VREG(HEVC_sao_mem_unit, sao_mem_unit); + WRITE_VREG(HEVC_SAO_ABV, hevc->work_space_buf->sao_abv.buf_start); + WRITE_VREG(HEVC_sao_vb_size, sao_vb_size); + WRITE_VREG(HEVC_SAO_VB, hevc->work_space_buf->sao_vb.buf_start); +} + +static u32 init_aux_size; +static int aux_data_is_avaible(struct hevc_state_s *hevc) +{ + u32 reg_val; + + reg_val = READ_VREG(HEVC_AUX_DATA_SIZE); + if (reg_val != 0 && reg_val != init_aux_size) + return 1; + else + return 0; +} + +static void config_aux_buf(struct hevc_state_s *hevc) +{ + WRITE_VREG(HEVC_AUX_ADR, hevc->aux_phy_addr); + init_aux_size = ((hevc->prefix_aux_size >> 4) << 16) | + (hevc->suffix_aux_size >> 4); + WRITE_VREG(HEVC_AUX_DATA_SIZE, init_aux_size); +} + +static void config_mpred_hw(struct hevc_state_s *hevc) +{ + int i; + unsigned int data32; + struct PIC_s *cur_pic = hevc->cur_pic; + struct PIC_s *col_pic = hevc->col_pic; + int AMVP_MAX_NUM_CANDS_MEM = 3; + int AMVP_MAX_NUM_CANDS = 2; + int NUM_CHROMA_MODE = 5; + int DM_CHROMA_IDX = 36; + int above_ptr_ctrl = 0; + int buffer_linear = 1; + int cu_size_log2 = 3; + + int mpred_mv_rd_start_addr; + int mpred_curr_lcu_x; + int mpred_curr_lcu_y; + int mpred_above_buf_start; + int mpred_mv_rd_ptr; + int mpred_mv_rd_ptr_p1; + int mpred_mv_rd_end_addr; + int MV_MEM_UNIT; + int mpred_mv_wr_ptr; + int *ref_poc_L0, *ref_poc_L1; + + int above_en; + int mv_wr_en; + int mv_rd_en; + int col_isIntra; + + if (hevc->slice_type != 2) { + above_en = 1; + mv_wr_en = 1; + mv_rd_en = 1; + col_isIntra = 0; + } else { + above_en = 1; + mv_wr_en = 1; + mv_rd_en = 0; + col_isIntra = 0; + } + + mpred_mv_rd_start_addr = col_pic->mpred_mv_wr_start_addr; + data32 = READ_VREG(HEVC_MPRED_CURR_LCU); + mpred_curr_lcu_x = data32 & 0xffff; + mpred_curr_lcu_y = (data32 >> 16) & 0xffff; + + MV_MEM_UNIT = + hevc->lcu_size_log2 == 6 ? 0x200 : hevc->lcu_size_log2 == + 5 ? 0x80 : 0x20; + mpred_mv_rd_ptr = + mpred_mv_rd_start_addr + (hevc->slice_addr * MV_MEM_UNIT); + + mpred_mv_rd_ptr_p1 = mpred_mv_rd_ptr + MV_MEM_UNIT; + mpred_mv_rd_end_addr = + mpred_mv_rd_start_addr + + col_pic->mv_size; + //((hevc->lcu_x_num * hevc->lcu_y_num) * MV_MEM_UNIT); + + mpred_above_buf_start = hevc->work_space_buf->mpred_above.buf_start; + + mpred_mv_wr_ptr = + cur_pic->mpred_mv_wr_start_addr + + (hevc->slice_addr * MV_MEM_UNIT); + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "cur pic index %d col pic index %d\n", cur_pic->index, + col_pic->index); + } + + WRITE_VREG(HEVC_MPRED_MV_WR_START_ADDR, + cur_pic->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_RD_START_ADDR, mpred_mv_rd_start_addr); + + data32 = ((hevc->lcu_x_num - hevc->tile_width_lcu) * MV_MEM_UNIT); + WRITE_VREG(HEVC_MPRED_MV_WR_ROW_JUMP, data32); + WRITE_VREG(HEVC_MPRED_MV_RD_ROW_JUMP, data32); + + data32 = READ_VREG(HEVC_MPRED_CTRL0); + data32 = ((hevc->slice_type & 3) | + (hevc->new_pic & 1) << 2 | + (hevc->new_tile & 1) << 3 | + (hevc->isNextSliceSegment & 1)<< 4 | + (hevc->TMVPFlag & 1)<< 5 | + (hevc->LDCFlag & 1) << 6 | + (hevc->ColFromL0Flag & 1)<< 7 | + (above_ptr_ctrl & 1)<< 8 | + (above_en & 1) << 9 | + (mv_wr_en & 1) << 10 | + (mv_rd_en & 1)<< 11 | + (col_isIntra & 1)<< 12 | + (buffer_linear & 1)<< 13 | + (hevc->LongTerm_Curr & 1) << 14 | + (hevc->LongTerm_Col & 1) << 15 | + (hevc->lcu_size_log2 & 0xf) << 16 | + (cu_size_log2 & 0xf) << 20 | (hevc->plevel & 0x7) << 24); + data32 &= ~(1<< 28); + WRITE_VREG(HEVC_MPRED_CTRL0, data32); + + data32 = READ_VREG(HEVC_MPRED_CTRL1); + data32 = ( +#if 0 + /* no set in m8baby test1902 */ + /* Don't override clk_forced_on , */ + (data32 & (0x1 << 24)) | +#endif + hevc->MaxNumMergeCand | + AMVP_MAX_NUM_CANDS << 4 | + AMVP_MAX_NUM_CANDS_MEM << 8 | + NUM_CHROMA_MODE << 12 | DM_CHROMA_IDX << 16); + WRITE_VREG(HEVC_MPRED_CTRL1, data32); + + data32 = (hevc->pic_w | hevc->pic_h << 16); + WRITE_VREG(HEVC_MPRED_PIC_SIZE, data32); + + data32 = ((hevc->lcu_x_num - 1) | (hevc->lcu_y_num - 1) << 16); + WRITE_VREG(HEVC_MPRED_PIC_SIZE_LCU, data32); + + data32 = (hevc->tile_start_lcu_x | hevc->tile_start_lcu_y << 16); + WRITE_VREG(HEVC_MPRED_TILE_START, data32); + + data32 = (hevc->tile_width_lcu | hevc->tile_height_lcu << 16); + WRITE_VREG(HEVC_MPRED_TILE_SIZE_LCU, data32); + + data32 = (hevc->RefNum_L0 | hevc->RefNum_L1 << 8 | 0 + /* col_RefNum_L0<<16| */ + /* col_RefNum_L1<<24 */ + ); + WRITE_VREG(HEVC_MPRED_REF_NUM, data32); + +#ifdef SUPPORT_LONG_TERM_RPS + data32 = 0; + for (i = 0; i < hevc->RefNum_L0; i++) { + if (is_ref_long_term(hevc, + cur_pic->m_aiRefPOCList0 + [cur_pic->slice_idx][i])) + data32 = data32 | (1 << i); + } + for (i = 0; i < hevc->RefNum_L1; i++) { + if (is_ref_long_term(hevc, + cur_pic->m_aiRefPOCList1 + [cur_pic->slice_idx][i])) + data32 = data32 | (1 << (i + 16)); + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "LongTerm_Ref 0x%x\n", data32); + } +#else + data32 = hevc->LongTerm_Ref; +#endif + WRITE_VREG(HEVC_MPRED_LT_REF, data32); + + data32 = 0; + for (i = 0; i < hevc->RefNum_L0; i++) + data32 = data32 | (1 << i); + WRITE_VREG(HEVC_MPRED_REF_EN_L0, data32); + + data32 = 0; + for (i = 0; i < hevc->RefNum_L1; i++) + data32 = data32 | (1 << i); + WRITE_VREG(HEVC_MPRED_REF_EN_L1, data32); + + WRITE_VREG(HEVC_MPRED_CUR_POC, hevc->curr_POC); + WRITE_VREG(HEVC_MPRED_COL_POC, hevc->Col_POC); + + /* below MPRED Ref_POC_xx_Lx registers must follow Ref_POC_xx_L0 -> + * Ref_POC_xx_L1 in pair write order!!! + */ + ref_poc_L0 = &(cur_pic->m_aiRefPOCList0[cur_pic->slice_idx][0]); + ref_poc_L1 = &(cur_pic->m_aiRefPOCList1[cur_pic->slice_idx][0]); + + WRITE_VREG(HEVC_MPRED_L0_REF00_POC, ref_poc_L0[0]); + WRITE_VREG(HEVC_MPRED_L1_REF00_POC, ref_poc_L1[0]); + + WRITE_VREG(HEVC_MPRED_L0_REF01_POC, ref_poc_L0[1]); + WRITE_VREG(HEVC_MPRED_L1_REF01_POC, ref_poc_L1[1]); + + WRITE_VREG(HEVC_MPRED_L0_REF02_POC, ref_poc_L0[2]); + WRITE_VREG(HEVC_MPRED_L1_REF02_POC, ref_poc_L1[2]); + + WRITE_VREG(HEVC_MPRED_L0_REF03_POC, ref_poc_L0[3]); + WRITE_VREG(HEVC_MPRED_L1_REF03_POC, ref_poc_L1[3]); + + WRITE_VREG(HEVC_MPRED_L0_REF04_POC, ref_poc_L0[4]); + WRITE_VREG(HEVC_MPRED_L1_REF04_POC, ref_poc_L1[4]); + + WRITE_VREG(HEVC_MPRED_L0_REF05_POC, ref_poc_L0[5]); + WRITE_VREG(HEVC_MPRED_L1_REF05_POC, ref_poc_L1[5]); + + WRITE_VREG(HEVC_MPRED_L0_REF06_POC, ref_poc_L0[6]); + WRITE_VREG(HEVC_MPRED_L1_REF06_POC, ref_poc_L1[6]); + + WRITE_VREG(HEVC_MPRED_L0_REF07_POC, ref_poc_L0[7]); + WRITE_VREG(HEVC_MPRED_L1_REF07_POC, ref_poc_L1[7]); + + WRITE_VREG(HEVC_MPRED_L0_REF08_POC, ref_poc_L0[8]); + WRITE_VREG(HEVC_MPRED_L1_REF08_POC, ref_poc_L1[8]); + + WRITE_VREG(HEVC_MPRED_L0_REF09_POC, ref_poc_L0[9]); + WRITE_VREG(HEVC_MPRED_L1_REF09_POC, ref_poc_L1[9]); + + WRITE_VREG(HEVC_MPRED_L0_REF10_POC, ref_poc_L0[10]); + WRITE_VREG(HEVC_MPRED_L1_REF10_POC, ref_poc_L1[10]); + + WRITE_VREG(HEVC_MPRED_L0_REF11_POC, ref_poc_L0[11]); + WRITE_VREG(HEVC_MPRED_L1_REF11_POC, ref_poc_L1[11]); + + WRITE_VREG(HEVC_MPRED_L0_REF12_POC, ref_poc_L0[12]); + WRITE_VREG(HEVC_MPRED_L1_REF12_POC, ref_poc_L1[12]); + + WRITE_VREG(HEVC_MPRED_L0_REF13_POC, ref_poc_L0[13]); + WRITE_VREG(HEVC_MPRED_L1_REF13_POC, ref_poc_L1[13]); + + WRITE_VREG(HEVC_MPRED_L0_REF14_POC, ref_poc_L0[14]); + WRITE_VREG(HEVC_MPRED_L1_REF14_POC, ref_poc_L1[14]); + + WRITE_VREG(HEVC_MPRED_L0_REF15_POC, ref_poc_L0[15]); + WRITE_VREG(HEVC_MPRED_L1_REF15_POC, ref_poc_L1[15]); + + if (hevc->new_pic) { + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, mpred_above_buf_start); + WRITE_VREG(HEVC_MPRED_MV_WPTR, mpred_mv_wr_ptr); + /* WRITE_VREG(HEVC_MPRED_MV_RPTR,mpred_mv_rd_ptr); */ + WRITE_VREG(HEVC_MPRED_MV_RPTR, mpred_mv_rd_start_addr); + } else if (!hevc->isNextSliceSegment) { + /* WRITE_VREG(HEVC_MPRED_MV_RPTR,mpred_mv_rd_ptr_p1); */ + WRITE_VREG(HEVC_MPRED_MV_RPTR, mpred_mv_rd_ptr); + } + + WRITE_VREG(HEVC_MPRED_MV_RD_END_ADDR, mpred_mv_rd_end_addr); +} + +static void config_sao_hw(struct hevc_state_s *hevc, union param_u *params) +{ + unsigned int data32, data32_2; + int misc_flag0 = hevc->misc_flag0; + int slice_deblocking_filter_disabled_flag = 0; + + int mc_buffer_size_u_v = + hevc->lcu_total * hevc->lcu_size * hevc->lcu_size / 2; + int mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16; + struct PIC_s *cur_pic = hevc->cur_pic; + struct aml_vcodec_ctx * v4l2_ctx = hevc->v4l2_ctx; + PR_INIT(128); + + data32 = READ_VREG(HEVC_SAO_CTRL0); + data32 &= (~0xf); + data32 |= hevc->lcu_size_log2; + WRITE_VREG(HEVC_SAO_CTRL0, data32); + + data32 = (hevc->pic_w | hevc->pic_h << 16); + WRITE_VREG(HEVC_SAO_PIC_SIZE, data32); + + data32 = ((hevc->lcu_x_num - 1) | (hevc->lcu_y_num - 1) << 16); + WRITE_VREG(HEVC_SAO_PIC_SIZE_LCU, data32); + + if (hevc->new_pic) + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0xffffffff); +#ifdef LOSLESS_COMPRESS_MODE +/*SUPPORT_10BIT*/ + if ((get_double_write_mode(hevc) & 0x10) == 0) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) + WRITE_VREG(HEVC_SAO_CTRL26, 0); + + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 &= (~(0xff << 16)); + if ((get_double_write_mode(hevc) == 8) || + (get_double_write_mode(hevc) == 9)) { + data32 |= (0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + WRITE_VREG(HEVC_SAO_CTRL26, 0xf); + } else { + if (get_double_write_mode(hevc) == 2 || + get_double_write_mode(hevc) == 3) + data32 |= (0xff<<16); + else if (get_double_write_mode(hevc) == 4 || + get_double_write_mode(hevc) == 5) + data32 |= (0x33<<16); + + if (hevc->mem_saving_mode == 1) + data32 |= (1 << 9); + else + data32 &= ~(1 << 9); + if (workaround_enable & 1) + data32 |= (1 << 7); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + } + data32 = cur_pic->mc_y_adr; + if (get_double_write_mode(hevc)) + WRITE_VREG(HEVC_SAO_Y_START_ADDR, cur_pic->dw_y_adr); + + if ((get_double_write_mode(hevc) & 0x10) == 0) + WRITE_VREG(HEVC_CM_BODY_START_ADDR, data32); + + if (hevc->mmu_enable) + WRITE_VREG(HEVC_CM_HEADER_START_ADDR, cur_pic->header_adr); +#else + data32 = cur_pic->mc_y_adr; + WRITE_VREG(HEVC_SAO_Y_START_ADDR, data32); +#endif + data32 = (mc_buffer_size_u_v_h << 16) << 1; + WRITE_VREG(HEVC_SAO_Y_LENGTH, data32); + +#ifdef LOSLESS_COMPRESS_MODE +/*SUPPORT_10BIT*/ + if (get_double_write_mode(hevc)) + WRITE_VREG(HEVC_SAO_C_START_ADDR, cur_pic->dw_u_v_adr); +#else + data32 = cur_pic->mc_u_v_adr; + WRITE_VREG(HEVC_SAO_C_START_ADDR, data32); +#endif + data32 = (mc_buffer_size_u_v_h << 16); + WRITE_VREG(HEVC_SAO_C_LENGTH, data32); + + if (hevc->is_used_v4l) { + WRITE_VREG(HEVC_SAO_Y_LENGTH, cur_pic->luma_size); + WRITE_VREG(HEVC_SAO_C_LENGTH, cur_pic->chroma_size); + if (debug & PRINT_FLAG_V4L_DETAIL) { + pr_info("[%d] config pic, id: %d, Y:(%x, %d) C:(%x, %d).\n", + v4l2_ctx->id, cur_pic->index, + cur_pic->dw_y_adr, cur_pic->luma_size, + cur_pic->dw_u_v_adr, cur_pic->chroma_size); + } + } + +#ifdef LOSLESS_COMPRESS_MODE +/*SUPPORT_10BIT*/ + if (get_double_write_mode(hevc)) { + WRITE_VREG(HEVC_SAO_Y_WPTR, cur_pic->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_WPTR, cur_pic->dw_u_v_adr); + } +#else + /* multi tile to do... */ + data32 = cur_pic->mc_y_adr; + WRITE_VREG(HEVC_SAO_Y_WPTR, data32); + + data32 = cur_pic->mc_u_v_adr; + WRITE_VREG(HEVC_SAO_C_WPTR, data32); +#endif + /* DBLK CONFIG HERE */ + if (hevc->new_pic) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + data32 = (0xff << 8) | (0x0 << 0); + else + data32 = (0x57 << 8) | /* 1st/2nd write both enable*/ + (0x0 << 0); /* h265 video format*/ + + if (hevc->pic_w >= 1280) + data32 |= (0x1 << 4); /*dblk pipeline mode=1 for performance*/ + data32 &= (~0x300); /*[8]:first write enable (compress) [9]:double write enable (uncompress)*/ + if (get_double_write_mode(hevc) == 0) + data32 |= (0x1 << 8); /*enable first write*/ + else if (get_double_write_mode(hevc) == 0x10) + data32 |= (0x1 << 9); /*double write only*/ + else + data32 |= ((0x1 << 8) |(0x1 << 9)); + + WRITE_VREG(HEVC_DBLK_CFGB, data32); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "[DBLK DEBUG] HEVC1 CFGB : 0x%x\n", data32); + } + data32 = (hevc->pic_w | hevc->pic_h << 16); + WRITE_VREG(HEVC_DBLK_CFG2, data32); + + if ((misc_flag0 >> PCM_ENABLE_FLAG_BIT) & 0x1) { + data32 = + ((misc_flag0 >> + PCM_LOOP_FILTER_DISABLED_FLAG_BIT) & + 0x1) << 3; + } else + data32 = 0; + data32 |= + (((params->p.pps_cb_qp_offset & 0x1f) << 4) | + ((params->p.pps_cr_qp_offset + & 0x1f) << + 9)); + data32 |= + (hevc->lcu_size == + 64) ? 0 : ((hevc->lcu_size == 32) ? 1 : 2); + data32 |= (hevc->pic_w <= 64) ? (1 << 20) : 0; + WRITE_VREG(HEVC_DBLK_CFG1, data32); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + /*if (debug & 0x80) {*/ + data32 = 1 << 28; /* Debug only: sts1 chooses dblk_main*/ + WRITE_VREG(HEVC_DBLK_STS1 + 4, data32); /* 0x3510 */ + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "[DBLK DEBUG] HEVC1 STS1 : 0x%x\n", + data32); + /*}*/ + } + } +#if 0 + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + data32 |= (hevc->mem_map_mode << + 12); + +/* [13:12] axi_aformat, + * 0-Linear, 1-32x32, 2-64x32 + */ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + data32 |= (hevc->mem_map_mode << + 4); + +/* [5:4] -- address_format + * 00:linear 01:32x32 10:64x32 + */ + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#else + /* m8baby test1902 */ + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + /* [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 */ + data32 |= (hevc->mem_map_mode << 12); + data32 &= (~0xff0); + data32 |= ((hevc->endian >> 8) & 0xfff); /* data32 |= 0x670; Big-Endian per 64-bit */ + data32 &= (~0x3); /*[1]:dw_disable [0]:cm_disable*/ + if (get_double_write_mode(hevc) == 0) + data32 |= 0x2; /*disable double write*/ + else if (get_double_write_mode(hevc) & 0x10) + data32 |= 0x1; /*disable cm*/ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + unsigned int data; + data = (0x57 << 8) | /* 1st/2nd write both enable*/ + (0x0 << 0); /* h265 video format*/ + if (hevc->pic_w >= 1280) + data |= (0x1 << 4); /*dblk pipeline mode=1 for performance*/ + data &= (~0x300); /*[8]:first write enable (compress) [9]:double write enable (uncompress)*/ + if (get_double_write_mode(hevc) == 0) + data |= (0x1 << 8); /*enable first write*/ + else if (get_double_write_mode(hevc) & 0x10) + data |= (0x1 << 9); /*double write only*/ + else + data |= ((0x1 << 8) |(0x1 << 9)); + + WRITE_VREG(HEVC_DBLK_CFGB, data); + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "[DBLK DEBUG] HEVC1 CFGB : 0x%x\n", data); + } + + /* swap uv */ + if (hevc->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV21M)) + data32 &= ~(1 << 8); /* NV21 */ + else + data32 |= (1 << 8); /* NV12 */ + } + data32 &= (~(3 << 14)); + data32 |= (2 << 14); + /* + * [31:24] ar_fifo1_axi_thred + * [23:16] ar_fifo0_axi_thred + * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes + * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + * [11:08] axi_lendian_C + * [07:04] axi_lendian_Y + * [3] reserved + * [2] clk_forceon + * [1] dw_disable:disable double write output + * [0] cm_disable:disable compress output + */ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + if (get_double_write_mode(hevc) & 0x10) { + /* [23:22] dw_v1_ctrl + *[21:20] dw_v0_ctrl + *[19:18] dw_h1_ctrl + *[17:16] dw_h0_ctrl + */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /*set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /* [5:4] -- address_format 00:linear 01:32x32 10:64x32 */ + data32 |= (hevc->mem_map_mode << 4); + data32 &= (~0xF); + data32 |= (hevc->endian & 0xf); /* valid only when double write only */ + + /* swap uv */ + if (hevc->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV21M)) + data32 |= (1 << 12); /* NV21 */ + else + data32 &= ~(1 << 12); /* NV12 */ + } + data32 &= (~(3 << 8)); + data32 |= (2 << 8); + /* + * [3:0] little_endian + * [5:4] address_format 00:linear 01:32x32 10:64x32 + * [7:6] reserved + * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte + * [11:10] reserved + * [12] CbCr_byte_swap + * [31:13] reserved + */ + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#endif + data32 = 0; + data32_2 = READ_VREG(HEVC_SAO_CTRL0); + data32_2 &= (~0x300); + /* slice_deblocking_filter_disabled_flag = 0; + * ucode has handle it , so read it from ucode directly + */ + if (hevc->tile_enabled) { + data32 |= + ((misc_flag0 >> + LOOP_FILER_ACROSS_TILES_ENABLED_FLAG_BIT) & + 0x1) << 0; + data32_2 |= + ((misc_flag0 >> + LOOP_FILER_ACROSS_TILES_ENABLED_FLAG_BIT) & + 0x1) << 8; + } + slice_deblocking_filter_disabled_flag = (misc_flag0 >> + SLICE_DEBLOCKING_FILTER_DISABLED_FLAG_BIT) & + 0x1; /* ucode has handle it,so read it from ucode directly */ + if ((misc_flag0 & (1 << DEBLOCKING_FILTER_OVERRIDE_ENABLED_FLAG_BIT)) + && (misc_flag0 & (1 << DEBLOCKING_FILTER_OVERRIDE_FLAG_BIT))) { + /* slice_deblocking_filter_disabled_flag = + * (misc_flag0>>SLICE_DEBLOCKING_FILTER_DISABLED_FLAG_BIT)&0x1; + * //ucode has handle it , so read it from ucode directly + */ + data32 |= slice_deblocking_filter_disabled_flag << 2; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + PR_FILL("(1,%x)", data32); + if (!slice_deblocking_filter_disabled_flag) { + data32 |= (params->p.slice_beta_offset_div2 & 0xf) << 3; + data32 |= (params->p.slice_tc_offset_div2 & 0xf) << 7; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + PR_FILL("(2,%x)", data32); + } + } else { + data32 |= + ((misc_flag0 >> + PPS_DEBLOCKING_FILTER_DISABLED_FLAG_BIT) & + 0x1) << 2; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + PR_FILL("(3,%x)", data32); + if (((misc_flag0 >> PPS_DEBLOCKING_FILTER_DISABLED_FLAG_BIT) & + 0x1) == 0) { + data32 |= (params->p.pps_beta_offset_div2 & 0xf) << 3; + data32 |= (params->p.pps_tc_offset_div2 & 0xf) << 7; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + PR_FILL("(4,%x)", data32); + } + } + if ((misc_flag0 & (1 << PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT)) + && ((misc_flag0 & (1 << SLICE_SAO_LUMA_FLAG_BIT)) + || (misc_flag0 & (1 << SLICE_SAO_CHROMA_FLAG_BIT)) + || (!slice_deblocking_filter_disabled_flag))) { + data32 |= + ((misc_flag0 >> + SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT) + & 0x1) << 1; + data32_2 |= + ((misc_flag0 >> + SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT) + & 0x1) << 9; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + PR_FILL("(5,%x)\n", data32); + } else { + data32 |= + ((misc_flag0 >> + PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT) + & 0x1) << 1; + data32_2 |= + ((misc_flag0 >> + PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED_FLAG_BIT) + & 0x1) << 9; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + PR_FILL("(6,%x)\n", data32); + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + PR_INFO(hevc->index); + + WRITE_VREG(HEVC_DBLK_CFG9, data32); + WRITE_VREG(HEVC_SAO_CTRL0, data32_2); +} + +#ifdef TEST_NO_BUF +static unsigned char test_flag = 1; +#endif + +static void pic_list_process(struct hevc_state_s *hevc) +{ + int work_pic_num = hevc->used_buf_num; + int alloc_pic_count = 0; + int i; + struct PIC_s *pic; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + alloc_pic_count++; + if (pic->output_mark == 0 && pic->referenced == 0 + && pic->output_ready == 0 + && (pic->width != hevc->pic_w || + pic->height != hevc->pic_h) + ) { + set_buf_unused(hevc, pic->BUF_index); + pic->BUF_index = -1; + if (alloc_pic_count > work_pic_num) { + pic->width = 0; + pic->height = 0; + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + release_pic_mmu_buf(hevc, pic); + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + pic->index = -1; + } else { + pic->width = hevc->pic_w; + pic->height = hevc->pic_h; + } + } + } + if (alloc_pic_count < work_pic_num) { + int new_count = alloc_pic_count; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic && pic->index == -1) { + pic->index = i; + pic->BUF_index = -1; + pic->width = hevc->pic_w; + pic->height = hevc->pic_h; + new_count++; + if (new_count >= + work_pic_num) + break; + } + } + + } + dealloc_unused_buf(hevc); + if (get_alloc_pic_count(hevc) + != alloc_pic_count) { + hevc_print_cont(hevc, 0, + "%s: work_pic_num is %d, Change alloc_pic_count from %d to %d\n", + __func__, + work_pic_num, + alloc_pic_count, + get_alloc_pic_count(hevc)); + } +} + +static struct PIC_s *get_new_pic(struct hevc_state_s *hevc, + union param_u *rpm_param) +{ + struct vdec_s *vdec = hw_to_vdec(hevc); + struct PIC_s *new_pic = NULL; + struct PIC_s *pic; + int i; + int ret; + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + + if (pic->output_mark == 0 && pic->referenced == 0 + && pic->output_ready == 0 + && pic->width == hevc->pic_w + && pic->height == hevc->pic_h + && pic->vf_ref == 0 + ) { + if (new_pic) { + if (new_pic->POC != INVALID_POC) { + if (pic->POC == INVALID_POC || + pic->POC < new_pic->POC) + new_pic = pic; + } + } else + new_pic = pic; + } + } + + if (new_pic == NULL) + return NULL; + + if (new_pic->BUF_index < 0) { + if (alloc_buf(hevc) < 0) + return NULL; + else { + if (config_pic(hevc, new_pic) < 0) { + dealloc_pic_buf(hevc, new_pic); + return NULL; + } + } + new_pic->width = hevc->pic_w; + new_pic->height = hevc->pic_h; + set_canvas(hevc, new_pic); + + init_pic_list_hw(hevc); + } + + if (new_pic) { + new_pic->double_write_mode = + get_double_write_mode(hevc); + if (new_pic->double_write_mode) + set_canvas(hevc, new_pic); + +#ifdef TEST_NO_BUF + if (test_flag) { + test_flag = 0; + return NULL; + } else + test_flag = 1; +#endif + if (get_mv_buf(hevc, new_pic) < 0) + return NULL; + + if (hevc->mmu_enable) { + ret = H265_alloc_mmu(hevc, new_pic, + rpm_param->p.bit_depth, + hevc->frame_mmu_map_addr); + if (ret != 0) { + put_mv_buf(hevc, new_pic); + hevc_print(hevc, 0, + "can't alloc need mmu1,idx %d ret =%d\n", + new_pic->decode_idx, + ret); + return NULL; + } + } + new_pic->referenced = 1; + new_pic->decode_idx = hevc->decode_idx; + new_pic->slice_idx = 0; + new_pic->referenced = 1; + new_pic->output_mark = 0; + new_pic->recon_mark = 0; + new_pic->error_mark = 0; + new_pic->dis_mark = 0; + /* new_pic->output_ready = 0; */ + new_pic->num_reorder_pic = rpm_param->p.sps_num_reorder_pics_0; + new_pic->ip_mode = (!new_pic->num_reorder_pic && + !(vdec->slave || vdec->master) && + !disable_ip_mode) ? true : false; + new_pic->losless_comp_body_size = hevc->losless_comp_body_size; + new_pic->POC = hevc->curr_POC; + new_pic->pic_struct = hevc->curr_pic_struct; + if (new_pic->aux_data_buf) + release_aux_data(hevc, new_pic); + new_pic->mem_saving_mode = + hevc->mem_saving_mode; + new_pic->bit_depth_luma = + hevc->bit_depth_luma; + new_pic->bit_depth_chroma = + hevc->bit_depth_chroma; + new_pic->video_signal_type = + hevc->video_signal_type; + + new_pic->conformance_window_flag = + hevc->param.p.conformance_window_flag; + new_pic->conf_win_left_offset = + hevc->param.p.conf_win_left_offset; + new_pic->conf_win_right_offset = + hevc->param.p.conf_win_right_offset; + new_pic->conf_win_top_offset = + hevc->param.p.conf_win_top_offset; + new_pic->conf_win_bottom_offset = + hevc->param.p.conf_win_bottom_offset; + new_pic->chroma_format_idc = + hevc->param.p.chroma_format_idc; + + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s: index %d, buf_idx %d, decode_idx %d, POC %d\n", + __func__, new_pic->index, + new_pic->BUF_index, new_pic->decode_idx, + new_pic->POC); + + } + if (pic_list_debug & 0x1) { + dump_pic_list(hevc); + pr_err("\n*******************************************\n"); + } + + return new_pic; +} + +static int get_free_fb_idx(struct hevc_state_s *hevc) +{ + int i; + + for (i = 0; i < hevc->used_buf_num; ++i) { + if (hevc->m_PIC[i] == NULL) + continue; + + if ((hevc->m_PIC[i]->referenced == 0) && + (hevc->m_PIC[i]->vf_ref == 0) && + (!hevc->m_PIC[i]->cma_alloc_addr)) + break; + } + + return (hevc->m_PIC[i] && + (i != hevc->used_buf_num)) ? i : -1; +} + +static struct PIC_s *v4l_get_new_pic(struct hevc_state_s *hevc, + union param_u *rpm_param) +{ + struct vdec_s *vdec = hw_to_vdec(hevc); + int ret; + struct aml_vcodec_ctx * v4l = hevc->v4l2_ctx; + struct v4l_buff_pool *pool = &v4l->cap_pool; + struct PIC_s *new_pic = NULL; + struct PIC_s *pic = NULL; + int i, j, idx; + + for (i = 0; i < pool->in; ++i) { + u32 state = (pool->seq[i] >> 16); + u32 index = (pool->seq[i] & 0xffff); + + switch (state) { + case V4L_CAP_BUFF_IN_DEC: + for (j = 0; j < hevc->used_buf_num; j++) { + pic = hevc->m_PIC[j]; + if (pic == NULL || pic->index == -1) + continue; + + if (pic->output_mark == 0 && + pic->referenced == 0 && + pic->output_ready == 0 && + pic->width == hevc->pic_w && + pic->height == hevc->pic_h && + pic->vf_ref == 0 && + pic->cma_alloc_addr) { + if (new_pic) { + if (new_pic->POC != INVALID_POC) { + if (pic->POC == INVALID_POC || + pic->POC < new_pic->POC) + new_pic = pic; + } + } else + new_pic = pic; + } + } + break; + case V4L_CAP_BUFF_IN_M2M: + idx = get_free_fb_idx(hevc); + if (idx < 0) + break; + pic = hevc->m_PIC[idx]; + pic->width = hevc->pic_w; + pic->height = hevc->pic_h; + hevc->buffer_wrap[idx] = index; + if ((pic->index != -1) && + !v4l_alloc_buf(hevc, pic)) { + v4l_config_pic(hevc, pic); + init_pic_list_hw(hevc); + new_pic = pic; + } + break; + default: + break; + } + + if (new_pic) + break; + } + + if (new_pic == NULL) + return NULL; + + /* for notify eos. */ + if (!rpm_param) + return new_pic; + + new_pic->double_write_mode = get_double_write_mode(hevc); + if (new_pic->double_write_mode) + set_canvas(hevc, new_pic); + + if (get_mv_buf(hevc, new_pic) < 0) + return NULL; + + if (hevc->mmu_enable) { + ret = H265_alloc_mmu(hevc, new_pic, + rpm_param->p.bit_depth, + hevc->frame_mmu_map_addr); + if (ret != 0) { + put_mv_buf(hevc, new_pic); + hevc_print(hevc, 0, + "can't alloc need mmu1,idx %d ret =%d\n", + new_pic->decode_idx, ret); + return NULL; + } + } + + new_pic->referenced = 1; + new_pic->decode_idx = hevc->decode_idx; + new_pic->slice_idx = 0; + new_pic->referenced = 1; + new_pic->output_mark = 0; + new_pic->recon_mark = 0; + new_pic->error_mark = 0; + new_pic->dis_mark = 0; + /* new_pic->output_ready = 0; */ + new_pic->num_reorder_pic = rpm_param->p.sps_num_reorder_pics_0; + new_pic->ip_mode = hevc->low_latency_flag ? true : + (!new_pic->num_reorder_pic && + !(vdec->slave || vdec->master) && + !disable_ip_mode) ? true : false; + new_pic->losless_comp_body_size = hevc->losless_comp_body_size; + new_pic->POC = hevc->curr_POC; + new_pic->pic_struct = hevc->curr_pic_struct; + + v4l->aux_infos.bind_sei_buffer(v4l, &new_pic->aux_data_buf, + &new_pic->aux_data_size, &new_pic->ctx_buf_idx); + + new_pic->mem_saving_mode = + hevc->mem_saving_mode; + new_pic->bit_depth_luma = + hevc->bit_depth_luma; + new_pic->bit_depth_chroma = + hevc->bit_depth_chroma; + new_pic->video_signal_type = + hevc->video_signal_type; + + new_pic->conformance_window_flag = + hevc->param.p.conformance_window_flag; + new_pic->conf_win_left_offset = + hevc->param.p.conf_win_left_offset; + new_pic->conf_win_right_offset = + hevc->param.p.conf_win_right_offset; + new_pic->conf_win_top_offset = + hevc->param.p.conf_win_top_offset; + new_pic->conf_win_bottom_offset = + hevc->param.p.conf_win_bottom_offset; + new_pic->chroma_format_idc = + hevc->param.p.chroma_format_idc; + + if (new_pic) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *)new_pic->cma_alloc_addr; + + fb->status = FB_ST_DECODER; + } + + hevc_print(hevc, H265_DEBUG_BUFMGR, + "%s: index %d, buf_idx %d, decode_idx %d, POC %d\n", + __func__, new_pic->index, + new_pic->BUF_index, new_pic->decode_idx, + new_pic->POC); + + return new_pic; +} + +static int get_display_pic_num(struct hevc_state_s *hevc) +{ + int i; + struct PIC_s *pic; + int num = 0; + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || + pic->index == -1) + continue; + + if (pic->output_ready == 1) + num++; + } + return num; +} + +static void flush_output(struct hevc_state_s *hevc, struct PIC_s *pic) +{ + struct PIC_s *pic_display; + + if (pic) { + /*PB skip control */ + if (pic->error_mark == 0 && hevc->PB_skip_mode == 1) { + /* start decoding after first I */ + hevc->ignore_bufmgr_error |= 0x1; + } + if (hevc->ignore_bufmgr_error & 1) { + if (hevc->PB_skip_count_after_decoding > 0) + hevc->PB_skip_count_after_decoding--; + else { + /* start displaying */ + hevc->ignore_bufmgr_error |= 0x2; + } + } + if (pic->POC != INVALID_POC && !pic->ip_mode) + pic->output_mark = 1; + pic->recon_mark = 1; + } + do { + pic_display = output_pic(hevc, 1); + + if (pic_display) { + pic_display->referenced = 0; + put_mv_buf(hevc, pic_display); + if ((pic_display->error_mark + && ((hevc->ignore_bufmgr_error & 0x2) == 0)) + || (get_dbg_flag(hevc) & + H265_DEBUG_DISPLAY_CUR_FRAME) + || (get_dbg_flag(hevc) & + H265_DEBUG_NO_DISPLAY)) { + pic_display->output_ready = 0; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "[BM] Display: POC %d, ", + pic_display->POC); + hevc_print_cont(hevc, 0, + "decoding index %d ==> ", + pic_display->decode_idx); + hevc_print_cont(hevc, 0, + "Debug mode or error, recycle it\n"); + } + /* + * Here the pic/frame error_mark is 1, + * and it won't be displayed, so increase + * the drop count + */ + hevc->gvs->drop_frame_count++; + if (pic_display->slice_type == I_SLICE) { + hevc->gvs->i_lost_frames++; + } else if (pic_display->slice_type == P_SLICE) { + hevc->gvs->p_lost_frames++; + } else if (pic_display->slice_type == B_SLICE) { + hevc->gvs->b_lost_frames++; + } + /* error frame count also need increase */ + hevc->gvs->error_frame_count++; + if (pic_display->slice_type == I_SLICE) { + hevc->gvs->i_concealed_frames++; + } else if (pic_display->slice_type == P_SLICE) { + hevc->gvs->p_concealed_frames++; + } else if (pic_display->slice_type == B_SLICE) { + hevc->gvs->b_concealed_frames++; + } + } else { + if (hevc->i_only & 0x1 + && pic_display->slice_type != 2) { + pic_display->output_ready = 0; + } else { + prepare_display_buf(hw_to_vdec(hevc), pic_display); + if (get_dbg_flag(hevc) + & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "[BM] flush Display: POC %d, ", + pic_display->POC); + hevc_print_cont(hevc, 0, + "decoding index %d\n", + pic_display->decode_idx); + } + } + } + } + } while (pic_display); + clear_referenced_flag(hevc); +} + +/* +* dv_meta_flag: 1, dolby meta only; 2, not include dolby meta +*/ +static void set_aux_data(struct hevc_state_s *hevc, + struct PIC_s *pic, unsigned char suffix_flag, + unsigned char dv_meta_flag) +{ + int i; + unsigned short *aux_adr; + unsigned int size_reg_val = + READ_VREG(HEVC_AUX_DATA_SIZE); + unsigned int aux_count = 0; + int aux_size = 0; + if (pic == NULL || 0 == aux_data_is_avaible(hevc)) + return; + + if (hevc->aux_data_dirty || + hevc->m_ins_flag == 0) { + + hevc->aux_data_dirty = 0; + } + + if (suffix_flag) { + aux_adr = (unsigned short *) + (hevc->aux_addr + + hevc->prefix_aux_size); + aux_count = + ((size_reg_val & 0xffff) << 4) + >> 1; + aux_size = + hevc->suffix_aux_size; + } else { + aux_adr = + (unsigned short *)hevc->aux_addr; + aux_count = + ((size_reg_val >> 16) << 4) + >> 1; + aux_size = + hevc->prefix_aux_size; + } + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) { + hevc_print(hevc, 0, + "%s:pic 0x%p old size %d count %d,suf %d dv_flag %d\r\n", + __func__, pic, pic->aux_data_size, + aux_count, suffix_flag, dv_meta_flag); + } + + if (aux_count > aux_size) { + hevc_print(hevc, 0, + "%s:aux_count(%d) is over size\n", __func__, aux_count); + aux_count = 0; + } + if (aux_size > 0 && aux_count > 0) { + int heads_size = 0; + + for (i = 0; i < aux_count; i++) { + unsigned char tag = aux_adr[i] >> 8; + if (tag != 0 && tag != 0xff) { + if (dv_meta_flag == 0) + heads_size += 8; + else if (dv_meta_flag == 1 && tag == 0x1) + heads_size += 8; + else if (dv_meta_flag == 2 && tag != 0x1) + heads_size += 8; + } + } + + if (pic->aux_data_buf) { + unsigned char valid_tag = 0; + unsigned char *h = + pic->aux_data_buf + + pic->aux_data_size; + unsigned char *p = h + 8; + int len = 0; + int padding_len = 0; + + for (i = 0; i < aux_count; i += 4) { + int ii; + unsigned char tag = aux_adr[i + 3] >> 8; + if (tag != 0 && tag != 0xff) { + if (dv_meta_flag == 0) + valid_tag = 1; + else if (dv_meta_flag == 1 + && tag == 0x1) + valid_tag = 1; + else if (dv_meta_flag == 2 + && tag != 0x1) + valid_tag = 1; + else + valid_tag = 0; + if (valid_tag && len > 0) { + pic->aux_data_size += + (len + 8); + h[0] = (len >> 24) + & 0xff; + h[1] = (len >> 16) + & 0xff; + h[2] = (len >> 8) + & 0xff; + h[3] = (len >> 0) + & 0xff; + h[6] = + (padding_len >> 8) + & 0xff; + h[7] = (padding_len) + & 0xff; + h += (len + 8); + p += 8; + len = 0; + padding_len = 0; + } + if (valid_tag) { + h[4] = tag; + h[5] = 0; + h[6] = 0; + h[7] = 0; + } + } + if (valid_tag) { + for (ii = 0; ii < 4; ii++) { + unsigned short aa = + aux_adr[i + 3 + - ii]; + *p = aa & 0xff; + p++; + len++; + /*if ((aa >> 8) == 0xff) + padding_len++;*/ + } + } + } + if (len > 0) { + pic->aux_data_size += (len + 8); + h[0] = (len >> 24) & 0xff; + h[1] = (len >> 16) & 0xff; + h[2] = (len >> 8) & 0xff; + h[3] = (len >> 0) & 0xff; + h[6] = (padding_len >> 8) & 0xff; + h[7] = (padding_len) & 0xff; + } + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) { + PR_INIT(128); + hevc_print(hevc, 0, + "aux: (size %d) suffix_flag %d\n", + pic->aux_data_size, suffix_flag); + for (i = 0; i < pic->aux_data_size; i++) { + PR_FILL("%02x ", pic->aux_data_buf[i]); + if (((i + 1) & 0xf) == 0) + PR_INFO(hevc->index); + } + PR_INFO(hevc->index); + } + } + } + +} + +static void release_aux_data(struct hevc_state_s *hevc, + struct PIC_s *pic) +{ +#if 0 + if (pic->aux_data_buf) { + vfree(pic->aux_data_buf); + if ((run_count[hevc->index] & 63) == 0) + vm_unmap_aliases(); + } + pic->aux_data_buf = NULL; + pic->aux_data_size = 0; +#endif +} + +static int recycle_mmu_buf_tail(struct hevc_state_s *hevc, + bool check_dma) +{ + hevc_print(hevc, + H265_DEBUG_BUFMGR_MORE, + "%s pic index %d scatter_alloc %d page_start %d\n", + "decoder_mmu_box_free_idx_tail", + hevc->cur_pic->index, + hevc->cur_pic->scatter_alloc, + hevc->used_4k_num); + if (check_dma) + hevc_mmu_dma_check(hw_to_vdec(hevc)); + + if (hevc->is_used_v4l) { + int index = hevc->cur_pic->BUF_index; + struct internal_comp_buf *ibuf = + index_to_icomp_buf(hevc, index); + + decoder_mmu_box_free_idx_tail( + ibuf->mmu_box, + ibuf->index, + hevc->used_4k_num); + } else { + decoder_mmu_box_free_idx_tail( + hevc->mmu_box, + hevc->cur_pic->index, + hevc->used_4k_num); + } + hevc->cur_pic->scatter_alloc = 2; + hevc->used_4k_num = -1; + return 0; +} + +static inline void hevc_pre_pic(struct hevc_state_s *hevc, + struct PIC_s *pic) +{ + + /* prev pic */ + /*if (hevc->curr_POC != 0) {*/ + int decoded_poc = hevc->iPrevPOC; +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) { + decoded_poc = hevc->decoded_poc; + hevc->decoded_poc = INVALID_POC; + } +#endif + if (hevc->m_nalUnitType != NAL_UNIT_CODED_SLICE_IDR + && hevc->m_nalUnitType != + NAL_UNIT_CODED_SLICE_IDR_N_LP) { + struct PIC_s *pic_display; + + pic = get_pic_by_POC(hevc, decoded_poc); + if (pic && (pic->POC != INVALID_POC)) { + struct vdec_s *vdec = hw_to_vdec(hevc); + + /*PB skip control */ + if (pic->error_mark == 0 + && hevc->PB_skip_mode == 1) { + /* start decoding after + * first I + */ + hevc->ignore_bufmgr_error |= 0x1; + } + if (hevc->ignore_bufmgr_error & 1) { + if (hevc->PB_skip_count_after_decoding > 0) { + hevc->PB_skip_count_after_decoding--; + } else { + /* start displaying */ + hevc->ignore_bufmgr_error |= 0x2; + } + } + if (hevc->mmu_enable + && ((hevc->double_write_mode & 0x10) == 0)) { + if (!hevc->m_ins_flag) { + hevc->used_4k_num = + READ_VREG(HEVC_SAO_MMU_STATUS) >> 16; + + if ((!is_skip_decoding(hevc, pic)) && + (hevc->used_4k_num >= 0) && + (hevc->cur_pic->scatter_alloc + == 1)) { + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + recycle_mmu_buf_tail(hevc, true); + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + } + } + } + if (!pic->ip_mode) + pic->output_mark = 1; + pic->recon_mark = 1; + pic->dis_mark = 1; + if (vdec->mvfrm) { + pic->frame_size = vdec->mvfrm->frame_size; + pic->hw_decode_time = (u32)vdec->mvfrm->hw_decode_time; + } + } + do { + pic_display = output_pic(hevc, 0); + + if (pic_display) { + if ((pic_display->error_mark && + ((hevc->ignore_bufmgr_error & + 0x2) == 0)) + || (get_dbg_flag(hevc) & + H265_DEBUG_DISPLAY_CUR_FRAME) + || (get_dbg_flag(hevc) & + H265_DEBUG_NO_DISPLAY)) { + pic_display->output_ready = 0; + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "[BM] Display: POC %d, ", + pic_display->POC); + hevc_print_cont(hevc, 0, + "decoding index %d ==> ", + pic_display-> + decode_idx); + hevc_print_cont(hevc, 0, + "Debug or err,recycle it\n"); + } + /* + * Here the pic/frame error_mark is 1, + * and it won't be displayed, so increase + * the drop count + */ + hevc->gvs->drop_frame_count++; + if (pic_display->slice_type == I_SLICE) { + hevc->gvs->i_lost_frames++; + }else if (pic_display->slice_type == P_SLICE) { + hevc->gvs->p_lost_frames++; + } else if (pic_display->slice_type == B_SLICE) { + hevc->gvs->b_lost_frames++; + } + /* error frame count also need increase */ + hevc->gvs->error_frame_count++; + if (pic_display->slice_type == I_SLICE) { + hevc->gvs->i_concealed_frames++; + } else if (pic_display->slice_type == P_SLICE) { + hevc->gvs->p_concealed_frames++; + } else if (pic_display->slice_type == B_SLICE) { + hevc->gvs->b_concealed_frames++; + } + } else { + if (hevc->i_only & 0x1 + && pic_display-> + slice_type != 2) { + pic_display->output_ready = 0; + } else { + prepare_display_buf + (hw_to_vdec(hevc), + pic_display); + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "[BM] Display: POC %d, ", + pic_display->POC); + hevc_print_cont(hevc, 0, + "decoding index %d\n", + pic_display-> + decode_idx); + } + } + } + } + } while (pic_display); + } else { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "[BM] current pic is IDR, "); + hevc_print(hevc, 0, + "clear referenced flag of all buffers\n"); + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + dump_pic_list(hevc); + if (atomic_read(&hevc->vf_pre_count) == 1 && + hevc->first_pic_flag == 1) { + hevc->first_pic_flag = 2; + pic = NULL; + } + else + pic = get_pic_by_POC(hevc, decoded_poc); + + flush_output(hevc, pic); + } + +} + +static void check_pic_decoded_error_pre(struct hevc_state_s *hevc, + int decoded_lcu) +{ + int current_lcu_idx = decoded_lcu; + if (decoded_lcu < 0) + return; + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "cur lcu idx = %d, (total %d)\n", + current_lcu_idx, hevc->lcu_total); + } + if ((error_handle_policy & 0x20) == 0 && hevc->cur_pic != NULL) { + if (hevc->first_pic_after_recover) { + if (current_lcu_idx != + ((hevc->lcu_x_num_pre*hevc->lcu_y_num_pre) - 1)) + hevc->cur_pic->error_mark = 1; + } else { + if (hevc->lcu_x_num_pre != 0 + && hevc->lcu_y_num_pre != 0 + && current_lcu_idx != 0 + && current_lcu_idx < + ((hevc->lcu_x_num_pre*hevc->lcu_y_num_pre) - 1)) + hevc->cur_pic->error_mark = 1; + } + if (hevc->cur_pic->error_mark) { + if (print_lcu_error) + hevc_print(hevc, 0, + "cur lcu idx = %d, (total %d), set error_mark\n", + current_lcu_idx, + hevc->lcu_x_num_pre*hevc->lcu_y_num_pre); + if (is_log_enable(hevc)) + add_log(hevc, + "cur lcu idx = %d, (total %d), set error_mark", + current_lcu_idx, + hevc->lcu_x_num_pre * + hevc->lcu_y_num_pre); + + } + + } + if (hevc->cur_pic && hevc->head_error_flag) { + hevc->cur_pic->error_mark = 1; + hevc_print(hevc, 0, + "head has error, set error_mark\n"); + } + + if ((error_handle_policy & 0x80) == 0) { + if (hevc->over_decode && hevc->cur_pic) { + hevc_print(hevc, 0, + "over decode, set error_mark\n"); + hevc->cur_pic->error_mark = 1; + } + } + + hevc->lcu_x_num_pre = hevc->lcu_x_num; + hevc->lcu_y_num_pre = hevc->lcu_y_num; +} + +static void check_pic_decoded_error(struct hevc_state_s *hevc, + int decoded_lcu) +{ + int current_lcu_idx = decoded_lcu; + if (decoded_lcu < 0) + return; + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "cur lcu idx = %d, (total %d)\n", + current_lcu_idx, hevc->lcu_total); + } + if ((error_handle_policy & 0x20) == 0 && hevc->cur_pic != NULL) { + if (hevc->lcu_x_num != 0 + && hevc->lcu_y_num != 0 + && current_lcu_idx != 0 + && current_lcu_idx < + ((hevc->lcu_x_num*hevc->lcu_y_num) - 1)) + hevc->cur_pic->error_mark = 1; + + if (hevc->cur_pic->error_mark) { + if (print_lcu_error) + hevc_print(hevc, 0, + "cur lcu idx = %d, (total %d), set error_mark\n", + current_lcu_idx, + hevc->lcu_x_num*hevc->lcu_y_num); + if (((hevc->i_only & 0x4) == 0) && hevc->cur_pic->POC && ( hevc->cur_pic->slice_type == 0) + && ((hevc->cur_pic->POC + MAX_BUF_NUM) < hevc->iPrevPOC)) { + hevc_print(hevc, 0, + "Flush.. num_reorder_pic %d pic->POC %d hevc->iPrevPOC %d\n", + hevc->sps_num_reorder_pics_0,hevc->cur_pic->POC ,hevc->iPrevPOC); + flush_output(hevc, get_pic_by_POC(hevc, hevc->cur_pic->POC )); + } + if (is_log_enable(hevc)) + add_log(hevc, + "cur lcu idx = %d, (total %d), set error_mark", + current_lcu_idx, + hevc->lcu_x_num * + hevc->lcu_y_num); + + } + + } + if (hevc->cur_pic && hevc->head_error_flag) { + hevc->cur_pic->error_mark = 1; + hevc_print(hevc, 0, + "head has error, set error_mark\n"); + } + + if ((error_handle_policy & 0x80) == 0) { + if (hevc->over_decode && hevc->cur_pic) { + hevc_print(hevc, 0, + "over decode, set error_mark\n"); + hevc->cur_pic->error_mark = 1; + } + } +} + +/* only when we decoded one field or one frame, +we can call this function to get qos info*/ +static void get_picture_qos_info(struct hevc_state_s *hevc) +{ + struct PIC_s *picture = hevc->cur_pic; + +/* +#define DEBUG_QOS +*/ + + if (!hevc->cur_pic) + return; + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + unsigned char a[3]; + unsigned char i, j, t; + unsigned long data; + + data = READ_VREG(HEVC_MV_INFO); + if (picture->slice_type == I_SLICE) + data = 0; + a[0] = data & 0xff; + a[1] = (data >> 8) & 0xff; + a[2] = (data >> 16) & 0xff; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_mv = a[2]; + picture->avg_mv = a[1]; + picture->min_mv = a[0]; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "mv data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); +#endif + + data = READ_VREG(HEVC_QP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_qp = a[2]; + picture->avg_qp = a[1]; + picture->min_qp = a[0]; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "qp data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); +#endif + + data = READ_VREG(HEVC_SKIP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + picture->max_skip = a[2]; + picture->avg_skip = a[1]; + picture->min_skip = a[0]; + +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "skip data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); +#endif + } else { + uint32_t blk88_y_count; + uint32_t blk88_c_count; + uint32_t blk22_mv_count; + uint32_t rdata32; + int32_t mv_hi; + int32_t mv_lo; + uint32_t rdata32_l; + uint32_t mvx_L0_hi; + uint32_t mvy_L0_hi; + uint32_t mvx_L1_hi; + uint32_t mvy_L1_hi; + int64_t value; + uint64_t temp_value; +#ifdef DEBUG_QOS + int pic_number = picture->POC; +#endif + + picture->max_mv = 0; + picture->avg_mv = 0; + picture->min_mv = 0; + + picture->max_skip = 0; + picture->avg_skip = 0; + picture->min_skip = 0; + + picture->max_qp = 0; + picture->avg_qp = 0; + picture->min_qp = 0; + + + +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "slice_type:%d, poc:%d\n", + picture->slice_type, + picture->POC); +#endif + /* set rd_idx to 0 */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, 0); + + blk88_y_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_y_count == 0) { +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_y_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] Y QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_y_count, + rdata32, blk88_y_count); +#endif + picture->avg_qp = rdata32/blk88_y_count; + /* intra_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] Y intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + /* skipped_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] Y skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); +#endif + picture->avg_skip = rdata32*100/blk88_y_count; + /* coeff_non_zero_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] Y ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_y_count*1)), + '%', rdata32); +#endif + /* blk66_c_count */ + blk88_c_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_c_count == 0) { +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] NO Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_c_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] C QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_c_count, + rdata32, blk88_c_count); +#endif + /* intra_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] C intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* skipped_cu_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] C skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); +#endif + /* coeff_non_zero_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] C ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_c_count*1)), + '%', rdata32); +#endif + + /* 1'h0, qp_c_max[6:0], 1'h0, qp_c_min[6:0], + 1'h0, qp_y_max[6:0], 1'h0, qp_y_min[6:0] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "[Picture %d Quality] Y QP min : %d\n", + pic_number, (rdata32>>0)&0xff); +#endif + picture->min_qp = (rdata32>>0)&0xff; + +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "[Picture %d Quality] Y QP max : %d\n", + pic_number, (rdata32>>8)&0xff); +#endif + picture->max_qp = (rdata32>>8)&0xff; + +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "[Picture %d Quality] C QP min : %d\n", + pic_number, (rdata32>>16)&0xff); + hevc_print(hevc, 0, "[Picture %d Quality] C QP max : %d\n", + pic_number, (rdata32>>24)&0xff); +#endif + + /* blk22_mv_count */ + blk22_mv_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk22_mv_count == 0) { +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] NO MV Data yet.\n", + pic_number); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* mvy_L1_count[39:32], mvx_L1_count[39:32], + mvy_L0_count[39:32], mvx_L0_count[39:32] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + /* should all be 0x00 or 0xff */ +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] MV AVG High Bits: 0x%X\n", + pic_number, rdata32); +#endif + mvx_L0_hi = ((rdata32>>0)&0xff); + mvy_L0_hi = ((rdata32>>8)&0xff); + mvx_L1_hi = ((rdata32>>16)&0xff); + mvy_L1_hi = ((rdata32>>24)&0xff); + + /* mvx_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvx_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + value = div_s64(value, blk22_mv_count); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] MVX_L0 AVG : %d (%lld/%d)\n", + pic_number, (int)value, + value, blk22_mv_count); +#endif + picture->avg_mv = value; + + /* mvy_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvy_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] MVY_L0 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvx_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvx_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] MVX_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* mvy_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvy_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] MVY_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); +#endif + + /* {mvx_L0_max, mvx_L0_min} // format : {sign, abs[14:0]} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "[Picture %d Quality] MVX_L0 MAX : %d\n", + pic_number, mv_hi); +#endif + picture->max_mv = mv_hi; + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; +#ifdef DEBUG_QOS + hevc_print(hevc, 0, "[Picture %d Quality] MVX_L0 MIN : %d\n", + pic_number, mv_lo); +#endif + picture->min_mv = mv_lo; + +#ifdef DEBUG_QOS + /* {mvy_L0_max, mvy_L0_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + hevc_print(hevc, 0, "[Picture %d Quality] MVY_L0 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + hevc_print(hevc, 0, "[Picture %d Quality] MVY_L0 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvx_L1_max, mvx_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + hevc_print(hevc, 0, "[Picture %d Quality] MVX_L1 MAX : %d\n", + pic_number, mv_hi); + + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + hevc_print(hevc, 0, "[Picture %d Quality] MVX_L1 MIN : %d\n", + pic_number, mv_lo); + + + /* {mvy_L1_max, mvy_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + hevc_print(hevc, 0, "[Picture %d Quality] MVY_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + hevc_print(hevc, 0, "[Picture %d Quality] MVY_L1 MIN : %d\n", + pic_number, mv_lo); +#endif + + rdata32 = READ_VREG(HEVC_PIC_QUALITY_CTRL); +#ifdef DEBUG_QOS + hevc_print(hevc, 0, + "[Picture %d Quality] After Read : VDEC_PIC_QUALITY_CTRL : 0x%x\n", + pic_number, rdata32); +#endif + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + } +} + +static int hevc_slice_segment_header_process(struct hevc_state_s *hevc, + union param_u *rpm_param, + int decode_pic_begin) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + struct vdec_s *vdec = hw_to_vdec(hevc); +#endif + int i; + int lcu_x_num_div; + int lcu_y_num_div; + int Col_ref; + int dbg_skip_flag = 0; + + if (hevc->wait_buf == 0) { + hevc->sps_num_reorder_pics_0 = + rpm_param->p.sps_num_reorder_pics_0; + hevc->ip_mode = (!hevc->sps_num_reorder_pics_0 && + !(vdec->slave || vdec->master) && + !disable_ip_mode) ? true : false; + hevc->m_temporalId = rpm_param->p.m_temporalId; + hevc->m_nalUnitType = rpm_param->p.m_nalUnitType; + hevc->interlace_flag = + (rpm_param->p.profile_etc >> 2) & 0x1; + hevc->curr_pic_struct = + (rpm_param->p.sei_frame_field_info >> 3) & 0xf; + + hevc->frame_field_info_present_flag = + (rpm_param->p.sei_frame_field_info >> 8) & 0x1; + + if (hevc->frame_field_info_present_flag) { + if (hevc->curr_pic_struct == 0 + || hevc->curr_pic_struct == 7 + || hevc->curr_pic_struct == 8) + hevc->interlace_flag = 0; + } + + hevc_print(hevc, H265_DEBUG_PIC_STRUCT, + "frame_field_info_present_flag = %d curr_pic_struct = %d interlace_flag = %d\n", + hevc->frame_field_info_present_flag, + hevc->curr_pic_struct, + hevc->interlace_flag); + + /* if (interlace_enable == 0 || hevc->m_ins_flag) */ + if (interlace_enable == 0) + hevc->interlace_flag = 0; + if (interlace_enable & 0x100) + hevc->interlace_flag = interlace_enable & 0x1; + if (hevc->interlace_flag == 0) + hevc->curr_pic_struct = 0; + /* if(hevc->m_nalUnitType == NAL_UNIT_EOS){ */ + /* + *hevc->m_pocRandomAccess = MAX_INT; + * //add to fix RAP_B_Bossen_1 + */ + /* } */ + hevc->misc_flag0 = rpm_param->p.misc_flag0; + if (rpm_param->p.first_slice_segment_in_pic_flag == 0) { + hevc->slice_segment_addr = + rpm_param->p.slice_segment_address; + if (!rpm_param->p.dependent_slice_segment_flag) + hevc->slice_addr = hevc->slice_segment_addr; + } else { + hevc->slice_segment_addr = 0; + hevc->slice_addr = 0; + } + + hevc->iPrevPOC = hevc->curr_POC; + hevc->slice_type = (rpm_param->p.slice_type == I_SLICE) ? 2 : + (rpm_param->p.slice_type == P_SLICE) ? 1 : + (rpm_param->p.slice_type == B_SLICE) ? 0 : 3; + /* hevc->curr_predFlag_L0=(hevc->slice_type==2) ? 0:1; */ + /* hevc->curr_predFlag_L1=(hevc->slice_type==0) ? 1:0; */ + hevc->TMVPFlag = rpm_param->p.slice_temporal_mvp_enable_flag; + hevc->isNextSliceSegment = + rpm_param->p.dependent_slice_segment_flag ? 1 : 0; + if (is_oversize_ex(rpm_param->p.pic_width_in_luma_samples, + rpm_param->p.pic_height_in_luma_samples)) { + hevc_print(hevc, 0, "over size : %u x %u.\n", + rpm_param->p.pic_width_in_luma_samples, rpm_param->p.pic_height_in_luma_samples); + if ((!hevc->m_ins_flag) && + ((debug & + H265_NO_CHANG_DEBUG_FLAG_IN_CODE) == 0)) + debug |= (H265_DEBUG_DIS_LOC_ERROR_PROC | + H265_DEBUG_DIS_SYS_ERROR_PROC); + return 3; + } + + if (hevc->pic_w != rpm_param->p.pic_width_in_luma_samples + || hevc->pic_h != + rpm_param->p.pic_height_in_luma_samples) { + hevc_print(hevc, 0, + "Pic Width/Height Change (%d,%d)=>(%d,%d), interlace %d\n", + hevc->pic_w, hevc->pic_h, + rpm_param->p.pic_width_in_luma_samples, + rpm_param->p.pic_height_in_luma_samples, + hevc->interlace_flag); + + hevc->pic_w = rpm_param->p.pic_width_in_luma_samples; + hevc->pic_h = rpm_param->p.pic_height_in_luma_samples; + hevc->frame_width = hevc->pic_w; + hevc->frame_height = hevc->pic_h; +#ifdef LOSLESS_COMPRESS_MODE + if (/*re_config_pic_flag == 0 &&*/ + (get_double_write_mode(hevc) & 0x10) == 0) + init_decode_head_hw(hevc); +#endif + } + + if (hevc->bit_depth_chroma > 10 || + hevc->bit_depth_luma > 10) { + hevc_print(hevc, 0, "unsupport bitdepth : %u,%u\n", + hevc->bit_depth_chroma, + hevc->bit_depth_luma); + if (!hevc->m_ins_flag) + debug |= (H265_DEBUG_DIS_LOC_ERROR_PROC | + H265_DEBUG_DIS_SYS_ERROR_PROC); + hevc->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + return 4; + } + + /* it will cause divide 0 error */ + if (hevc->pic_w == 0 || hevc->pic_h == 0) { + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "Fatal Error, pic_w = %d, pic_h = %d\n", + hevc->pic_w, hevc->pic_h); + } + return 3; + } + pic_list_process(hevc); + + hevc->lcu_size = + 1 << (rpm_param->p.log2_min_coding_block_size_minus3 + + 3 + rpm_param-> + p.log2_diff_max_min_coding_block_size); + if (hevc->lcu_size == 0) { + hevc_print(hevc, 0, + "Error, lcu_size = 0 (%d,%d)\n", + rpm_param->p. + log2_min_coding_block_size_minus3, + rpm_param->p. + log2_diff_max_min_coding_block_size); + return 3; + } + hevc->lcu_size_log2 = log2i(hevc->lcu_size); + lcu_x_num_div = (hevc->pic_w / hevc->lcu_size); + lcu_y_num_div = (hevc->pic_h / hevc->lcu_size); + hevc->lcu_x_num = + ((hevc->pic_w % hevc->lcu_size) == + 0) ? lcu_x_num_div : lcu_x_num_div + 1; + hevc->lcu_y_num = + ((hevc->pic_h % hevc->lcu_size) == + 0) ? lcu_y_num_div : lcu_y_num_div + 1; + hevc->lcu_total = hevc->lcu_x_num * hevc->lcu_y_num; + + if (hevc->m_nalUnitType == NAL_UNIT_CODED_SLICE_IDR + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_IDR_N_LP) { + hevc->curr_POC = 0; + if ((hevc->m_temporalId - 1) == 0) + hevc->iPrevTid0POC = hevc->curr_POC; + } else { + int iMaxPOClsb = + 1 << (rpm_param->p. + log2_max_pic_order_cnt_lsb_minus4 + 4); + int iPrevPOClsb; + int iPrevPOCmsb; + int iPOCmsb; + int iPOClsb = rpm_param->p.POClsb; + + if (iMaxPOClsb == 0) { + hevc_print(hevc, 0, + "error iMaxPOClsb is 0\n"); + return 3; + } + + iPrevPOClsb = hevc->iPrevTid0POC % iMaxPOClsb; + iPrevPOCmsb = hevc->iPrevTid0POC - iPrevPOClsb; + + if ((iPOClsb < iPrevPOClsb) + && ((iPrevPOClsb - iPOClsb) >= + (iMaxPOClsb / 2))) + iPOCmsb = iPrevPOCmsb + iMaxPOClsb; + else if ((iPOClsb > iPrevPOClsb) + && ((iPOClsb - iPrevPOClsb) > + (iMaxPOClsb / 2))) + iPOCmsb = iPrevPOCmsb - iMaxPOClsb; + else + iPOCmsb = iPrevPOCmsb; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "iPrePOC%d iMaxPOClsb%d iPOCmsb%d iPOClsb%d\n", + hevc->iPrevTid0POC, iMaxPOClsb, iPOCmsb, + iPOClsb); + } + if (hevc->m_nalUnitType == NAL_UNIT_CODED_SLICE_BLA + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_BLANT + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_BLA_N_LP) { + /* For BLA picture types, POCmsb is set to 0. */ + iPOCmsb = 0; + } + hevc->curr_POC = (iPOCmsb + iPOClsb); + if ((hevc->m_temporalId - 1) == 0) + hevc->iPrevTid0POC = hevc->curr_POC; + else { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "m_temporalID is %d\n", + hevc->m_temporalId); + } + } + } + hevc->RefNum_L0 = + (rpm_param->p.num_ref_idx_l0_active > + MAX_REF_ACTIVE) ? MAX_REF_ACTIVE : rpm_param->p. + num_ref_idx_l0_active; + hevc->RefNum_L1 = + (rpm_param->p.num_ref_idx_l1_active > + MAX_REF_ACTIVE) ? MAX_REF_ACTIVE : rpm_param->p. + num_ref_idx_l1_active; + + /* if(curr_POC==0x10) dump_lmem(); */ + + /* skip RASL pictures after CRA/BLA pictures */ + if (hevc->m_pocRandomAccess == MAX_INT) {/* first picture */ + if (hevc->m_nalUnitType == NAL_UNIT_CODED_SLICE_CRA || + hevc->m_nalUnitType == NAL_UNIT_CODED_SLICE_BLA + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_BLANT + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_BLA_N_LP) + hevc->m_pocRandomAccess = hevc->curr_POC; + else + hevc->m_pocRandomAccess = -MAX_INT; + } else if (hevc->m_nalUnitType == NAL_UNIT_CODED_SLICE_BLA + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_BLANT + || hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_BLA_N_LP) + hevc->m_pocRandomAccess = hevc->curr_POC; + else if ((hevc->curr_POC < hevc->m_pocRandomAccess) && + (nal_skip_policy >= 3) && + (hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_RASL_N || + hevc->m_nalUnitType == + NAL_UNIT_CODED_SLICE_TFD)) { /* skip */ + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "RASL picture with POC %d < %d ", + hevc->curr_POC, hevc->m_pocRandomAccess); + hevc_print(hevc, 0, + "RandomAccess point POC), skip it\n"); + } + return 1; + } + + WRITE_VREG(HEVC_WAIT_FLAG, READ_VREG(HEVC_WAIT_FLAG) | 0x2); + hevc->skip_flag = 0; + /**/ + /* if((iPrevPOC != curr_POC)){ */ + if (rpm_param->p.slice_segment_address == 0) { + struct PIC_s *pic = NULL; + + hevc->new_pic = 1; +#ifdef MULTI_INSTANCE_SUPPORT + if (!hevc->m_ins_flag) +#endif + check_pic_decoded_error_pre(hevc, + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff); + if (vdec_stream_based(vdec) && ((READ_VREG(HEVC_PARSER_LCU_START) & 0xffffff) != 0)) { + if (hevc->cur_pic) + hevc->cur_pic->error_mark = 1; + } + /**/ if (use_cma == 0) { + if (hevc->pic_list_init_flag == 0) { + init_pic_list(hevc); + init_pic_list_hw(hevc); + init_buf_spec(hevc); + hevc->pic_list_init_flag = 3; + } + } + if (!hevc->m_ins_flag) { + if (hevc->cur_pic) + get_picture_qos_info(hevc); + } + hevc->first_pic_after_recover = 0; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + dump_pic_list(hevc); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->master) { + struct hevc_state_s *hevc_ba = + (struct hevc_state_s *) + vdec->master->private; + if (hevc_ba->cur_pic != NULL) { + hevc_ba->cur_pic->dv_enhance_exist = 1; + hevc_print(hevc, H265_DEBUG_DV, + "To decode el (poc %d) => set bl (poc %d) dv_enhance_exist flag\n", + hevc->curr_POC, hevc_ba->cur_pic->POC); + } + } + if (vdec->master == NULL && + vdec->slave == NULL) + set_aux_data(hevc, + hevc->cur_pic, 1, 0); /*suffix*/ + if (hevc->bypass_dvenl && !dolby_meta_with_el) + set_aux_data(hevc, + hevc->cur_pic, 0, 1); /*dv meta only*/ +#else + set_aux_data(hevc, hevc->cur_pic, 1, 0); +#endif + + /* prev pic */ + hevc_pre_pic(hevc, pic); + /* + *update referenced of old pictures + *(cur_pic->referenced is 1 and not updated) + */ + apply_ref_pic_set(hevc, hevc->curr_POC, + rpm_param); + + /*if (hevc->mmu_enable) + recycle_mmu_bufs(hevc);*/ + + /* new pic */ + hevc->cur_pic = hevc->is_used_v4l ? + v4l_get_new_pic(hevc, rpm_param) : + get_new_pic(hevc, rpm_param); + if (hevc->cur_pic == NULL) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + dump_pic_list(hevc); + hevc->wait_buf = 1; + return -1; + } +#ifdef MULTI_INSTANCE_SUPPORT + hevc->decoding_pic = hevc->cur_pic; + if (!hevc->m_ins_flag) + hevc->over_decode = 0; +#endif +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + hevc->cur_pic->dv_enhance_exist = 0; + if (vdec->slave) + hevc_print(hevc, H265_DEBUG_DV, + "Clear bl (poc %d) dv_enhance_exist flag\n", + hevc->curr_POC); + if (vdec->master == NULL && + vdec->slave == NULL) + set_aux_data(hevc, + hevc->cur_pic, 0, 0); /*prefix*/ + + if (hevc->bypass_dvenl && !dolby_meta_with_el) + set_aux_data(hevc, + hevc->cur_pic, 0, 2); /*pre sei only*/ +#else + set_aux_data(hevc, hevc->cur_pic, 0, 0); +#endif + if (get_dbg_flag(hevc) & H265_DEBUG_DISPLAY_CUR_FRAME) { + hevc->cur_pic->output_ready = 1; + hevc->cur_pic->stream_offset = + READ_VREG(HEVC_SHIFT_BYTE_COUNT); + prepare_display_buf(vdec, hevc->cur_pic); + hevc->wait_buf = 2; + return -1; + } + } else { + if (get_dbg_flag(hevc) & H265_DEBUG_HAS_AUX_IN_SLICE) { +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (vdec->master == NULL && + vdec->slave == NULL) { + set_aux_data(hevc, hevc->cur_pic, 1, 0); + set_aux_data(hevc, hevc->cur_pic, 0, 0); + } +#else + set_aux_data(hevc, hevc->cur_pic, 1, 0); + set_aux_data(hevc, hevc->cur_pic, 0, 0); +#endif + } + if (hevc->pic_list_init_flag != 3 + || hevc->cur_pic == NULL) { + /* make it dec from the first slice segment */ + return 3; + } + hevc->cur_pic->slice_idx++; + hevc->new_pic = 0; + } + } else { + if (hevc->wait_buf == 1) { + pic_list_process(hevc); + + hevc->cur_pic = hevc->is_used_v4l ? + v4l_get_new_pic(hevc, rpm_param) : + get_new_pic(hevc, rpm_param); + if (hevc->cur_pic == NULL) + return -1; + + if (!hevc->m_ins_flag) + hevc->over_decode = 0; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + hevc->cur_pic->dv_enhance_exist = 0; + if (vdec->master == NULL && + vdec->slave == NULL) + set_aux_data(hevc, hevc->cur_pic, 0, 0); +#else + set_aux_data(hevc, hevc->cur_pic, 0, 0); +#endif + hevc->wait_buf = 0; + } else if (hevc->wait_buf == + 2) { + if (get_display_pic_num(hevc) > + 1) + return -1; + hevc->wait_buf = 0; + } + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + dump_pic_list(hevc); + } + + if (hevc->new_pic) { +#if 1 + /*SUPPORT_10BIT*/ + int sao_mem_unit = + (hevc->lcu_size == 16 ? 9 : + hevc->lcu_size == + 32 ? 14 : 24) << 4; +#else + int sao_mem_unit = ((hevc->lcu_size / 8) * 2 + 4) << 4; +#endif + int pic_height_cu = + (hevc->pic_h + hevc->lcu_size - 1) / hevc->lcu_size; + int pic_width_cu = + (hevc->pic_w + hevc->lcu_size - 1) / hevc->lcu_size; + int sao_vb_size = (sao_mem_unit + (2 << 4)) * pic_height_cu; + + /* int sao_abv_size = sao_mem_unit*pic_width_cu; */ + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "==>%s dec idx %d, struct %d interlace %d pic idx %d\n", + __func__, + hevc->decode_idx, + hevc->curr_pic_struct, + hevc->interlace_flag, + hevc->cur_pic->index); + } + if (dbg_skip_decode_index != 0 && + hevc->decode_idx == dbg_skip_decode_index) + dbg_skip_flag = 1; + + hevc->decode_idx++; + update_tile_info(hevc, pic_width_cu, pic_height_cu, + sao_mem_unit, rpm_param); + + config_title_hw(hevc, sao_vb_size, sao_mem_unit); + } + + if (hevc->iPrevPOC != hevc->curr_POC) { + hevc->new_tile = 1; + hevc->tile_x = 0; + hevc->tile_y = 0; + hevc->tile_y_x = 0; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "new_tile (new_pic) tile_x=%d, tile_y=%d\n", + hevc->tile_x, hevc->tile_y); + } + } else if (hevc->tile_enabled) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "slice_segment_address is %d\n", + rpm_param->p.slice_segment_address); + } + hevc->tile_y_x = + get_tile_index(hevc, rpm_param->p.slice_segment_address, + (hevc->pic_w + + hevc->lcu_size - + 1) / hevc->lcu_size); + if ((hevc->tile_y_x != (hevc->tile_x | (hevc->tile_y << 8))) + && (hevc->tile_y_x != -1)) { + hevc->new_tile = 1; + hevc->tile_x = hevc->tile_y_x & 0xff; + hevc->tile_y = (hevc->tile_y_x >> 8) & 0xff; + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "new_tile seg adr %d tile_x=%d, tile_y=%d\n", + rpm_param->p.slice_segment_address, + hevc->tile_x, hevc->tile_y); + } + } else + hevc->new_tile = 0; + } else + hevc->new_tile = 0; + + if ((hevc->tile_x > (MAX_TILE_COL_NUM - 1)) + || (hevc->tile_y > (MAX_TILE_ROW_NUM - 1))) + hevc->new_tile = 0; + + if (hevc->new_tile) { + hevc->tile_start_lcu_x = + hevc->m_tile[hevc->tile_y][hevc->tile_x].start_cu_x; + hevc->tile_start_lcu_y = + hevc->m_tile[hevc->tile_y][hevc->tile_x].start_cu_y; + hevc->tile_width_lcu = + hevc->m_tile[hevc->tile_y][hevc->tile_x].width; + hevc->tile_height_lcu = + hevc->m_tile[hevc->tile_y][hevc->tile_x].height; + } + + set_ref_pic_list(hevc, rpm_param); + + Col_ref = rpm_param->p.collocated_ref_idx; + + hevc->LDCFlag = 0; + if (rpm_param->p.slice_type != I_SLICE) { + hevc->LDCFlag = 1; + for (i = 0; (i < hevc->RefNum_L0) && hevc->LDCFlag; i++) { + if (hevc->cur_pic-> + m_aiRefPOCList0[hevc->cur_pic->slice_idx][i] > + hevc->curr_POC) + hevc->LDCFlag = 0; + } + if (rpm_param->p.slice_type == B_SLICE) { + for (i = 0; (i < hevc->RefNum_L1) + && hevc->LDCFlag; i++) { + if (hevc->cur_pic-> + m_aiRefPOCList1[hevc->cur_pic-> + slice_idx][i] > + hevc->curr_POC) + hevc->LDCFlag = 0; + } + } + } + + hevc->ColFromL0Flag = rpm_param->p.collocated_from_l0_flag; + + hevc->plevel = + rpm_param->p.log2_parallel_merge_level; + hevc->MaxNumMergeCand = 5 - rpm_param->p.five_minus_max_num_merge_cand; + + hevc->LongTerm_Curr = 0; /* to do ... */ + hevc->LongTerm_Col = 0; /* to do ... */ + + hevc->list_no = 0; + if (rpm_param->p.slice_type == B_SLICE) + hevc->list_no = 1 - hevc->ColFromL0Flag; + if (hevc->list_no == 0) { + if (Col_ref < hevc->RefNum_L0) { + hevc->Col_POC = + hevc->cur_pic->m_aiRefPOCList0[hevc->cur_pic-> + slice_idx][Col_ref]; + } else + hevc->Col_POC = INVALID_POC; + } else { + if (Col_ref < hevc->RefNum_L1) { + hevc->Col_POC = + hevc->cur_pic->m_aiRefPOCList1[hevc->cur_pic-> + slice_idx][Col_ref]; + } else + hevc->Col_POC = INVALID_POC; + } + + hevc->LongTerm_Ref = 0; /* to do ... */ + + if (hevc->slice_type != 2) { + /* if(hevc->i_only==1){ */ + /* return 0xf; */ + /* } */ + + if (hevc->Col_POC != INVALID_POC) { + hevc->col_pic = get_ref_pic_by_POC(hevc, hevc->Col_POC); + if (hevc->col_pic == NULL) { + hevc->cur_pic->error_mark = 1; + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "WRONG,fail to get the pic Col_POC\n"); + } + if (is_log_enable(hevc)) + add_log(hevc, + "WRONG,fail to get the pic Col_POC"); + } else if (hevc->col_pic->error_mark || hevc->col_pic->dis_mark == 0) { + hevc->col_pic->error_mark = 1; + hevc->cur_pic->error_mark = 1; + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "WRONG, Col_POC error_mark is 1\n"); + } + if (is_log_enable(hevc)) + add_log(hevc, + "WRONG, Col_POC error_mark is 1"); + } else { + if ((hevc->col_pic->width + != hevc->pic_w) || + (hevc->col_pic->height + != hevc->pic_h)) { + hevc_print(hevc, 0, + "Wrong reference pic (poc %d) width/height %d/%d\n", + hevc->col_pic->POC, + hevc->col_pic->width, + hevc->col_pic->height); + hevc->cur_pic->error_mark = 1; + } + + } + + if (hevc->cur_pic->error_mark + && ((hevc->ignore_bufmgr_error & 0x1) == 0)) { + /*count info*/ + vdec_count_info(hevc->gvs, hevc->cur_pic->error_mark, + hevc->cur_pic->stream_offset); + if (hevc->cur_pic->slice_type == I_SLICE) { + hevc->gvs->i_decoded_frames++; + } else if (hevc->cur_pic->slice_type == P_SLICE) { + hevc->gvs->p_decoded_frames++; + } else if (hevc->cur_pic->slice_type == B_SLICE) { + hevc->gvs->b_decoded_frames++; + } + if (hevc->cur_pic->error_mark) { + if (hevc->cur_pic->slice_type == I_SLICE) { + hevc->gvs->i_concealed_frames++; + } else if (hevc->cur_pic->slice_type == P_SLICE) { + hevc->gvs->p_concealed_frames++; + } else if (hevc->cur_pic->slice_type == B_SLICE) { + hevc->gvs->b_concealed_frames++; + } + } + if (hevc->PB_skip_mode == 2) { + hevc->gvs->drop_frame_count++; + if (rpm_param->p.slice_type == I_SLICE) { + hevc->gvs->i_lost_frames++; + } else if (rpm_param->p.slice_type == P_SLICE) { + hevc->gvs->p_lost_frames++; + } else if (rpm_param->p.slice_type == B_SLICE) { + hevc->gvs->b_lost_frames++; + } + } + } + + if (is_skip_decoding(hevc, + hevc->cur_pic)) { + return 2; + } + } else + hevc->col_pic = hevc->cur_pic; + } /* */ + if (hevc->col_pic == NULL) + hevc->col_pic = hevc->cur_pic; +#ifdef BUFFER_MGR_ONLY + return 0xf; +#else + if ((decode_pic_begin > 0 && hevc->decode_idx <= decode_pic_begin) + || (dbg_skip_flag)) + return 0xf; +#endif + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_REGISTER_START); + config_mc_buffer(hevc, hevc->cur_pic); + + if (is_skip_decoding(hevc, + hevc->cur_pic)) { + if (get_dbg_flag(hevc)) + hevc_print(hevc, 0, + "Discard this picture index %d\n", + hevc->cur_pic->index); + /*count info*/ + vdec_count_info(hevc->gvs, hevc->cur_pic->error_mark, + hevc->cur_pic->stream_offset); + if (hevc->cur_pic->slice_type == I_SLICE) { + hevc->gvs->i_decoded_frames++; + } else if (hevc->cur_pic->slice_type == P_SLICE) { + hevc->gvs->p_decoded_frames++; + } else if (hevc->cur_pic->slice_type == B_SLICE) { + hevc->gvs->b_decoded_frames++; + } + if (hevc->cur_pic->error_mark) { + if (hevc->cur_pic->slice_type == I_SLICE) { + hevc->gvs->i_concealed_frames++; + } else if (hevc->cur_pic->slice_type == P_SLICE) { + hevc->gvs->p_concealed_frames++; + } else if (hevc->cur_pic->slice_type == B_SLICE) { + hevc->gvs->b_concealed_frames++; + } + } + if (hevc->PB_skip_mode == 2) { + hevc->gvs->drop_frame_count++; + if (rpm_param->p.slice_type == I_SLICE) { + hevc->gvs->i_lost_frames++; + } else if (rpm_param->p.slice_type == P_SLICE) { + hevc->gvs->p_lost_frames++; + } else if (rpm_param->p.slice_type == B_SLICE) { + hevc->gvs->b_lost_frames++; + } + } + return 2; + } +#ifdef MCRCC_ENABLE + config_mcrcc_axi_hw(hevc, hevc->cur_pic->slice_type); +#endif + if (!hevc->tile_width_lcu || !hevc->tile_height_lcu) + return -1; + config_mpred_hw(hevc); + + config_sao_hw(hevc, rpm_param); + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_REGISTER_START); + if ((hevc->slice_type != 2) && (hevc->i_only & 0x2)) + return 0xf; + + if (post_picture_early(vdec, hevc->cur_pic->index)) + return -1; + + return 0; +} + +/* return page number */ +static int hevc_mmu_page_num(struct hevc_state_s *hevc, + int w, int h, int save_mode) +{ + int picture_size; + int page_num; + int max_frame_num; + + picture_size = compute_losless_comp_body_size(hevc, w, + h, save_mode); + page_num = ((picture_size + PAGE_SIZE - 1) >> PAGE_SHIFT); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + max_frame_num = MAX_FRAME_8K_NUM; + else + max_frame_num = MAX_FRAME_4K_NUM; + + if (page_num > max_frame_num) { + hevc_print(hevc, 0, "over max !! 0x%x width %d height %d\n", + page_num, w, h); + return -1; + } + return page_num; +} + +static int H265_alloc_mmu(struct hevc_state_s *hevc, struct PIC_s *new_pic, + unsigned short bit_depth, unsigned int *mmu_index_adr) { + int bit_depth_10 = (bit_depth != 0x00); + int cur_mmu_4k_number; + int ret; + + if (get_double_write_mode(hevc) == 0x10) + return 0; + + cur_mmu_4k_number = hevc_mmu_page_num(hevc, new_pic->width, + new_pic->height, !bit_depth_10); + if (cur_mmu_4k_number < 0) + return -1; + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + if (hevc->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(hevc, new_pic->BUF_index); + + ret = decoder_mmu_box_alloc_idx( + ibuf->mmu_box, + ibuf->index, + ibuf->frame_buffer_size, + mmu_index_adr); + } else { + ret = decoder_mmu_box_alloc_idx( + hevc->mmu_box, + new_pic->index, + cur_mmu_4k_number, + mmu_index_adr); + } + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + new_pic->scatter_alloc = 1; + + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s pic index %d page count(%d) ret =%d\n", + __func__, new_pic->index, + cur_mmu_4k_number, ret); + return ret; +} + + +static void release_pic_mmu_buf(struct hevc_state_s *hevc, + struct PIC_s *pic) +{ + hevc_print(hevc, H265_DEBUG_BUFMGR_MORE, + "%s pic index %d scatter_alloc %d\n", + __func__, pic->index, + pic->scatter_alloc); + + if (hevc->mmu_enable + && !(hevc->double_write_mode & 0x10) + && pic->scatter_alloc) { + if (!hevc->is_used_v4l) + decoder_mmu_box_free_idx(hevc->mmu_box, pic->index); + else { + struct internal_comp_buf *ibuf = + ibuf = index_to_icomp_buf(hevc, pic->BUF_index); + decoder_mmu_box_free_idx(ibuf->mmu_box, ibuf->index); + } + } + pic->scatter_alloc = 0; +} + +/* + ************************************************* + * + *h265 buffer management end + * + ************************************************** + */ +static struct hevc_state_s *gHevc; + +static void hevc_local_uninit(struct hevc_state_s *hevc) +{ + hevc->rpm_ptr = NULL; + hevc->lmem_ptr = NULL; + +#ifdef SWAP_HEVC_UCODE + if (hevc->is_swap && get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + if (hevc->mc_cpu_addr != NULL) { + dma_free_coherent(amports_get_dma_device(), + hevc->swap_size, hevc->mc_cpu_addr, + hevc->mc_dma_handle); + hevc->mc_cpu_addr = NULL; + } + + } +#endif +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) + uninit_detrefill_buf(hevc); +#endif + if (hevc->aux_addr) { + dma_free_coherent(amports_get_dma_device(), + hevc->prefix_aux_size + hevc->suffix_aux_size, hevc->aux_addr, + hevc->aux_phy_addr); + hevc->aux_addr = NULL; + } + if (hevc->rpm_addr) { + dma_free_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, hevc->rpm_addr, + hevc->rpm_phy_addr); + hevc->rpm_addr = NULL; + } + if (hevc->lmem_addr) { + dma_free_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, hevc->lmem_addr, + hevc->lmem_phy_addr); + hevc->lmem_addr = NULL; + } + + if (hevc->mmu_enable && hevc->frame_mmu_map_addr) { + if (hevc->frame_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(), hevc->frame_mmu_map_addr, + hevc->frame_mmu_map_phy_addr); + + hevc->frame_mmu_map_addr = NULL; + } + + //pr_err("[%s line %d] hevc->gvs=0x%p operation\n",__func__, __LINE__, hevc->gvs); +} + +static int hevc_local_init(struct hevc_state_s *hevc) +{ + int ret = -1; + struct BuffInfo_s *cur_buf_info = NULL; + + memset(&hevc->param, 0, sizeof(union param_u)); + + cur_buf_info = &hevc->work_space_buf_store; + if (force_bufspec) { + memcpy(cur_buf_info, &amvh265_workbuff_spec[force_bufspec & 0xf], + sizeof(struct BuffInfo_s)); + pr_info("force buffer spec %d\n", force_bufspec & 0xf); + } else { + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + memcpy(cur_buf_info, &amvh265_workbuff_spec[2], /* 4k */ + sizeof(struct BuffInfo_s)); + else + memcpy(cur_buf_info, &amvh265_workbuff_spec[1], /* 4k */ + sizeof(struct BuffInfo_s)); + } else { + memcpy(cur_buf_info, &amvh265_workbuff_spec[0], /* 1080p */ + sizeof(struct BuffInfo_s)); + } + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) { + memcpy(cur_buf_info, &amvh265_workbuff_spec[5], /* 4k */ + sizeof(struct BuffInfo_s)); + } else { + memcpy(cur_buf_info, &amvh265_workbuff_spec[3], /* 1080p */ + sizeof(struct BuffInfo_s)); + } + } + } + + cur_buf_info->start_adr = hevc->buf_start; + init_buff_spec(hevc, cur_buf_info); + + hevc_init_stru(hevc, cur_buf_info); + + hevc->bit_depth_luma = 8; + hevc->bit_depth_chroma = 8; + hevc->video_signal_type = 0; + hevc->video_signal_type_debug = 0; + bit_depth_luma = hevc->bit_depth_luma; + bit_depth_chroma = hevc->bit_depth_chroma; + video_signal_type = hevc->video_signal_type; + + if ((get_dbg_flag(hevc) & H265_DEBUG_SEND_PARAM_WITH_REG) == 0) { + hevc->rpm_addr = dma_alloc_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, &hevc->rpm_phy_addr, GFP_KERNEL); + if (hevc->rpm_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + return -1; + } + hevc->rpm_ptr = hevc->rpm_addr; + } + + if (prefix_aux_buf_size > 0 || + suffix_aux_buf_size > 0) { + u32 aux_buf_size; + + hevc->prefix_aux_size = AUX_BUF_ALIGN(prefix_aux_buf_size); + hevc->suffix_aux_size = AUX_BUF_ALIGN(suffix_aux_buf_size); + aux_buf_size = hevc->prefix_aux_size + hevc->suffix_aux_size; + hevc->aux_addr =dma_alloc_coherent(amports_get_dma_device(), + aux_buf_size, &hevc->aux_phy_addr, GFP_KERNEL); + if (hevc->aux_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + return -1; + } + } + + hevc->lmem_addr = dma_alloc_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, &hevc->lmem_phy_addr, GFP_KERNEL); + if (hevc->lmem_addr == NULL) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + return -1; + } + hevc->lmem_ptr = hevc->lmem_addr; + + if (hevc->mmu_enable) { + hevc->frame_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + get_frame_mmu_map_size(), + &hevc->frame_mmu_map_phy_addr, GFP_KERNEL); + if (hevc->frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(hevc->frame_mmu_map_addr, 0, get_frame_mmu_map_size()); + } + ret = 0; + return ret; +} + +/* + ******************************************* + * Mailbox command + ******************************************* + */ +#define CMD_FINISHED 0 +#define CMD_ALLOC_VIEW 1 +#define CMD_FRAME_DISPLAY 3 +#define CMD_DEBUG 10 + + +#define DECODE_BUFFER_NUM_MAX 32 +#define DISPLAY_BUFFER_NUM 6 + +#define video_domain_addr(adr) (adr&0x7fffffff) +#define DECODER_WORK_SPACE_SIZE 0x800000 + +#define spec2canvas(x) \ + (((x)->uv_canvas_index << 16) | \ + ((x)->uv_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + + +static void set_canvas(struct hevc_state_s *hevc, struct PIC_s *pic) +{ + struct vdec_s *vdec = hw_to_vdec(hevc); + int canvas_w = ALIGN(pic->width, 64)/4; + int canvas_h = ALIGN(pic->height, 32)/4; + int blkmode = hevc->mem_map_mode; + + /*CANVAS_BLKMODE_64X32*/ +#ifdef SUPPORT_10BIT + if (pic->double_write_mode) { + canvas_w = pic->width / + get_double_write_ratio(pic->double_write_mode); + canvas_h = pic->height / + get_double_write_ratio(pic->double_write_mode); + + canvas_w = ALIGN(canvas_w, 64); + canvas_h = ALIGN(canvas_h, 32); + + if (vdec->parallel_dec == 1) { + if (pic->y_canvas_index == -1) + pic->y_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + if (pic->uv_canvas_index == -1) + pic->uv_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + } else { + pic->y_canvas_index = 128 + pic->index * 2; + pic->uv_canvas_index = 128 + pic->index * 2 + 1; + } + + config_cav_lut_ex(pic->y_canvas_index, + pic->dw_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hevc->is_used_v4l ? 0 : 7, VDEC_HEVC); + config_cav_lut_ex(pic->uv_canvas_index, pic->dw_u_v_adr, + canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hevc->is_used_v4l ? 0 : 7, VDEC_HEVC); +#ifdef MULTI_INSTANCE_SUPPORT + pic->canvas_config[0].phy_addr = + pic->dw_y_adr; + pic->canvas_config[0].width = + canvas_w; + pic->canvas_config[0].height = + canvas_h; + pic->canvas_config[0].block_mode = + blkmode; + pic->canvas_config[0].endian = hevc->is_used_v4l ? 0 : 7; + + pic->canvas_config[1].phy_addr = + pic->dw_u_v_adr; + pic->canvas_config[1].width = + canvas_w; + pic->canvas_config[1].height = + canvas_h; + pic->canvas_config[1].block_mode = + blkmode; + pic->canvas_config[1].endian = hevc->is_used_v4l ? 0 : 7; + + ATRACE_COUNTER(hevc->trace.set_canvas0_addr, pic->canvas_config[0].phy_addr); + hevc_print(hevc, H265_DEBUG_PIC_STRUCT,"%s(canvas0 addr:0x%x)\n", + __func__, pic->canvas_config[0].phy_addr); +#else + ATRACE_COUNTER(hevc->trace.set_canvas0_addr, spec2canvas(pic)); + hevc_print(hevc, H265_DEBUG_PIC_STRUCT,"%s(canvas0 addr:0x%x)\n", + __func__, spec2canvas(pic)); +#endif + } else { + if (!hevc->mmu_enable) { + /* to change after 10bit VPU is ready ... */ + if (vdec->parallel_dec == 1) { + if (pic->y_canvas_index == -1) + pic->y_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + pic->uv_canvas_index = pic->y_canvas_index; + } else { + pic->y_canvas_index = 128 + pic->index; + pic->uv_canvas_index = 128 + pic->index; + } + + config_cav_lut_ex(pic->y_canvas_index, + pic->mc_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hevc->is_used_v4l ? 0 : 7, VDEC_HEVC); + config_cav_lut_ex(pic->uv_canvas_index, pic->mc_u_v_adr, + canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hevc->is_used_v4l ? 0 : 7, VDEC_HEVC); + } + ATRACE_COUNTER(hevc->trace.set_canvas0_addr, spec2canvas(pic)); + hevc_print(hevc, H265_DEBUG_PIC_STRUCT,"%s(canvas0 addr:0x%x)\n", + __func__, spec2canvas(pic)); + } +#else + if (vdec->parallel_dec == 1) { + if (pic->y_canvas_index == -1) + pic->y_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + if (pic->uv_canvas_index == -1) + pic->uv_canvas_index = vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + } else { + pic->y_canvas_index = 128 + pic->index * 2; + pic->uv_canvas_index = 128 + pic->index * 2 + 1; + } + + + config_cav_lut_ex(pic->y_canvas_index, pic->mc_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hevc->is_used_v4l ? 0 : 7, VDEC_HEVC); + config_cav_lut_ex(pic->uv_canvas_index, pic->mc_u_v_adr, + canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hevc->is_used_v4l ? 0 : 7, VDEC_HEVC); + + ATRACE_COUNTER(hevc->trace.set_canvas0_addr, spec2canvas(pic)); + hevc_print(hevc, H265_DEBUG_PIC_STRUCT,"%s(canvas0 addr:0x%x)\n", + __func__, spec2canvas(pic)); +#endif +} + +static int init_buf_spec(struct hevc_state_s *hevc) +{ + int pic_width = hevc->pic_w; + int pic_height = hevc->pic_h; + + /* hevc_print(hevc, 0, + *"%s1: %d %d\n", __func__, hevc->pic_w, hevc->pic_h); + */ + hevc_print(hevc, 0, + "%s2 %d %d\n", __func__, pic_width, pic_height); + /* pic_width = hevc->pic_w; */ + /* pic_height = hevc->pic_h; */ + + if (hevc->frame_width == 0 || hevc->frame_height == 0) { + hevc->frame_width = pic_width; + hevc->frame_height = pic_height; + + } + + return 0; +} + +static int parse_sei(struct hevc_state_s *hevc, + struct PIC_s *pic, char *sei_buf, uint32_t size) +{ + char *p = sei_buf; + char *p_sei; + uint16_t header; + uint16_t nal_unit_type; + uint16_t payload_type, payload_size; + int i, j; + + if (size < 2) + return 0; + header = *p++; + header <<= 8; + header += *p++; + nal_unit_type = header >> 9; + if ((nal_unit_type != NAL_UNIT_SEI) + && (nal_unit_type != NAL_UNIT_SEI_SUFFIX)) + return 0; + while (p+4 <= sei_buf+size) { + payload_type = *p++; + if (payload_type == 0xff) { + payload_type += *p++; + } + payload_size = *p++; + if (payload_size == 0xff) { + payload_size += *p++; + } + + if (p+payload_size <= sei_buf+size) { + switch (payload_type) { + case SEI_PicTiming: + if ((parser_sei_enable & 0x4) && + hevc->frame_field_info_present_flag) { + p_sei = p; + hevc->curr_pic_struct = (*p_sei >> 4)&0x0f; + pic->pic_struct = hevc->curr_pic_struct; + if (get_dbg_flag(hevc) & + H265_DEBUG_PIC_STRUCT) { + hevc_print(hevc, 0, + "parse result pic_struct = %d\n", + hevc->curr_pic_struct); + } + } + break; + case SEI_UserDataITU_T_T35: + p_sei = p; + if (p_sei[0] == 0xB5 + && p_sei[1] == 0x00 + && p_sei[2] == 0x3C + && p_sei[3] == 0x00 + && p_sei[4] == 0x01 + && p_sei[5] == 0x04) { + char *new_buf; + hevc->sei_present_flag |= SEI_HDR10PLUS_MASK; + new_buf = vzalloc(payload_size); + if (new_buf) { + memcpy(new_buf, p_sei, payload_size); + pic->hdr10p_data_buf = new_buf; + pic->hdr10p_data_size = payload_size; + } else { + hevc_print(hevc, 0, + "%s:hdr10p data vzalloc size(%d) fail\n", + __func__, payload_size); + pic->hdr10p_data_buf = NULL; + pic->hdr10p_data_size = 0; + } + } else if (p_sei[0] == 0x26 + && p_sei[1] == 0x00 + && p_sei[2] == 0x04 + && p_sei[3] == 0x00 + && p_sei[4] == 0x05) { + hevc->sei_present_flag |= SEI_HDR_CUVA_MASK; + + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) { + PR_INIT(128); + hevc_print(hevc, 0, + "hdr cuva data: (size %d)\n", + payload_size); + for (i = 0; i < payload_size; i++) { + PR_FILL("%02x ", p_sei[i]); + if (((i + 1) & 0xf) == 0) + PR_INFO(hevc->index); + } + PR_INFO(hevc->index); + } + } + + break; + case SEI_MasteringDisplayColorVolume: + /*hevc_print(hevc, 0, + "sei type: primary display color volume %d, size %d\n", + payload_type, + payload_size);*/ + /* master_display_colour */ + p_sei = p; + for (i = 0; i < 3; i++) { + for (j = 0; j < 2; j++) { + hevc->primaries[i][j] + = (*p_sei<<8) + | *(p_sei+1); + p_sei += 2; + } + } + for (i = 0; i < 2; i++) { + hevc->white_point[i] + = (*p_sei<<8) + | *(p_sei+1); + p_sei += 2; + } + for (i = 0; i < 2; i++) { + hevc->luminance[i] + = (*p_sei<<24) + | (*(p_sei+1)<<16) + | (*(p_sei+2)<<8) + | *(p_sei+3); + p_sei += 4; + } + hevc->sei_present_flag |= + SEI_MASTER_DISPLAY_COLOR_MASK; + /*for (i = 0; i < 3; i++) + for (j = 0; j < 2; j++) + hevc_print(hevc, 0, + "\tprimaries[%1d][%1d] = %04x\n", + i, j, + hevc->primaries[i][j]); + hevc_print(hevc, 0, + "\twhite_point = (%04x, %04x)\n", + hevc->white_point[0], + hevc->white_point[1]); + hevc_print(hevc, 0, + "\tmax,min luminance = %08x, %08x\n", + hevc->luminance[0], + hevc->luminance[1]);*/ + break; + case SEI_ContentLightLevel: + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "sei type: max content light level %d, size %d\n", + payload_type, payload_size); + /* content_light_level */ + p_sei = p; + hevc->content_light_level[0] + = (*p_sei<<8) | *(p_sei+1); + p_sei += 2; + hevc->content_light_level[1] + = (*p_sei<<8) | *(p_sei+1); + p_sei += 2; + hevc->sei_present_flag |= + SEI_CONTENT_LIGHT_LEVEL_MASK; + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "\tmax cll = %04x, max_pa_cll = %04x\n", + hevc->content_light_level[0], + hevc->content_light_level[1]); + break; + default: + break; + } + } + p += payload_size; + } + return 0; +} + +static unsigned calc_ar(unsigned idc, unsigned sar_w, unsigned sar_h, + unsigned w, unsigned h) +{ + unsigned ar; + + if (idc == 255) { + ar = div_u64(256ULL * sar_h * h, + sar_w * w); + } else { + switch (idc) { + case 1: + ar = 0x100 * h / w; + break; + case 2: + ar = 0x100 * h * 11 / (w * 12); + break; + case 3: + ar = 0x100 * h * 11 / (w * 10); + break; + case 4: + ar = 0x100 * h * 11 / (w * 16); + break; + case 5: + ar = 0x100 * h * 33 / (w * 40); + break; + case 6: + ar = 0x100 * h * 11 / (w * 24); + break; + case 7: + ar = 0x100 * h * 11 / (w * 20); + break; + case 8: + ar = 0x100 * h * 11 / (w * 32); + break; + case 9: + ar = 0x100 * h * 33 / (w * 80); + break; + case 10: + ar = 0x100 * h * 11 / (w * 18); + break; + case 11: + ar = 0x100 * h * 11 / (w * 15); + break; + case 12: + ar = 0x100 * h * 33 / (w * 64); + break; + case 13: + ar = 0x100 * h * 99 / (w * 160); + break; + case 14: + ar = 0x100 * h * 3 / (w * 4); + break; + case 15: + ar = 0x100 * h * 2 / (w * 3); + break; + case 16: + ar = 0x100 * h * 1 / (w * 2); + break; + default: + ar = h * 0x100 / w; + break; + } + } + + return ar; +} + +static void set_frame_info(struct hevc_state_s *hevc, struct vframe_s *vf, + struct PIC_s *pic) +{ + unsigned int ar; + int i, j; + char *p; + unsigned size = 0; + unsigned type = 0; + struct vframe_master_display_colour_s *vf_dp + = &vf->prop.master_display_colour; + + vf->width = pic->width / + get_double_write_ratio(pic->double_write_mode); + vf->height = pic->height / + get_double_write_ratio(pic->double_write_mode); + + vf->duration = hevc->frame_dur; + vf->duration_pulldown = 0; + vf->flag = 0; + + ar = min_t(u32, hevc->frame_ar, DISP_RATIO_ASPECT_RATIO_MAX); + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + + + if (((pic->aspect_ratio_idc == 255) && + pic->sar_width && + pic->sar_height) || + ((pic->aspect_ratio_idc != 255) && + (pic->width))) { + ar = min_t(u32, + calc_ar(pic->aspect_ratio_idc, + pic->sar_width, + pic->sar_height, + pic->width, + pic->height), + DISP_RATIO_ASPECT_RATIO_MAX); + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + vf->ratio_control <<= hevc->interlace_flag; + } + hevc->ratio_control = vf->ratio_control; + if (pic->aux_data_buf + && pic->aux_data_size) { + /* parser sei */ + p = pic->aux_data_buf; + while (p < pic->aux_data_buf + + pic->aux_data_size - 8) { + size = *p++; + size = (size << 8) | *p++; + size = (size << 8) | *p++; + size = (size << 8) | *p++; + type = *p++; + type = (type << 8) | *p++; + type = (type << 8) | *p++; + type = (type << 8) | *p++; + if (type == 0x02000000) { + /* hevc_print(hevc, 0, + "sei(%d)\n", size); */ + parse_sei(hevc, pic, p, size); + } + p += size; + } + } + if (hevc->video_signal_type & VIDEO_SIGNAL_TYPE_AVAILABLE_MASK) { + vf->signal_type = pic->video_signal_type; + if (hevc->sei_present_flag & SEI_HDR10PLUS_MASK) { + u32 data; + data = vf->signal_type; + data = data & 0xFFFF00FF; + data = data | (0x30<<8); + vf->signal_type = data; + } + + if (hevc->sei_present_flag & SEI_HDR_CUVA_MASK) { + u32 data; + data = vf->signal_type; + data = data & 0x7FFFFFFF; + data = data | (1<<31); + vf->signal_type = data; + } + } + else + vf->signal_type = 0; + hevc->video_signal_type_debug = vf->signal_type; + + /* master_display_colour */ + if (hevc->sei_present_flag & SEI_MASTER_DISPLAY_COLOR_MASK) { + for (i = 0; i < 3; i++) + for (j = 0; j < 2; j++) + vf_dp->primaries[i][j] = hevc->primaries[i][j]; + for (i = 0; i < 2; i++) { + vf_dp->white_point[i] = hevc->white_point[i]; + vf_dp->luminance[i] + = hevc->luminance[i]; + } + vf_dp->present_flag = 1; + } else + vf_dp->present_flag = 0; + + /* content_light_level */ + if (hevc->sei_present_flag & SEI_CONTENT_LIGHT_LEVEL_MASK) { + vf_dp->content_light_level.max_content + = hevc->content_light_level[0]; + vf_dp->content_light_level.max_pic_average + = hevc->content_light_level[1]; + vf_dp->content_light_level.present_flag = 1; + } else + vf_dp->content_light_level.present_flag = 0; + + if (hevc->is_used_v4l && + ((hevc->video_signal_type & VIDEO_SIGNAL_TYPE_AVAILABLE_MASK) || + (hevc->sei_present_flag & SEI_HDR10PLUS_MASK) || + (vf_dp->present_flag) || + (vf_dp->content_light_level.present_flag))) { + struct aml_vdec_hdr_infos hdr; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + + memset(&hdr, 0, sizeof(hdr)); + hdr.signal_type = vf->signal_type; + hdr.color_parms = *vf_dp; + vdec_v4l_set_hdr_infos(ctx, &hdr); + } + + if ((hevc->sei_present_flag & SEI_HDR10PLUS_MASK) && (pic->hdr10p_data_buf != NULL) + && (pic->hdr10p_data_size != 0)) { + if (pic->hdr10p_data_size <= 128) { + char *new_buf; + new_buf = kzalloc(pic->hdr10p_data_size, GFP_ATOMIC); + + if (new_buf) { + memcpy(new_buf, pic->hdr10p_data_buf, pic->hdr10p_data_size); + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) { + PR_INIT(128); + hevc_print(hevc, 0, + "hdr10p data: (size %d)\n", + pic->hdr10p_data_size); + for (i = 0; i < pic->hdr10p_data_size; i++) { + PR_FILL("%02x ", pic->hdr10p_data_buf[i]); + if (((i + 1) & 0xf) == 0) + PR_INFO(hevc->index); + } + PR_INFO(hevc->index); + } + + vf->hdr10p_data_size = pic->hdr10p_data_size; + vf->hdr10p_data_buf = new_buf; + set_meta_data_to_vf(vf, UVM_META_DATA_HDR10P_DATA, hevc->v4l2_ctx); + } else { + hevc_print(hevc, 0, + "%s:hdr10p data vzalloc size(%d) fail\n", + __func__, pic->hdr10p_data_size); + vf->hdr10p_data_buf = NULL; + vf->hdr10p_data_size = 0; + } + } + + vfree(pic->hdr10p_data_buf); + pic->hdr10p_data_buf = NULL; + pic->hdr10p_data_size = 0; + } + + vf->sidebind_type = hevc->sidebind_type; + vf->sidebind_channel_id = hevc->sidebind_channel_id; +} + +static int vh265_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; +#ifdef MULTI_INSTANCE_SUPPORT + struct vdec_s *vdec = op_arg; + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; +#else + struct hevc_state_s *hevc = (struct hevc_state_s *)op_arg; +#endif + + spin_lock_irqsave(&lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hevc->newframe_q); + states->buf_avail_num = kfifo_len(&hevc->display_q); + + if (step == 2) + states->buf_avail_num = 0; + spin_unlock_irqrestore(&lock, flags); + return 0; +} + +static struct vframe_s *vh265_vf_peek(void *op_arg) +{ + struct vframe_s *vf[2] = {0, 0}; +#ifdef MULTI_INSTANCE_SUPPORT + struct vdec_s *vdec = op_arg; + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; +#else + struct hevc_state_s *hevc = (struct hevc_state_s *)op_arg; +#endif + + if (step == 2) + return NULL; + + if (force_disp_pic_index & 0x100) { + if (force_disp_pic_index & 0x200) + return NULL; + return &hevc->vframe_dummy; + } + + + if (kfifo_out_peek(&hevc->display_q, (void *)&vf, 2)) { + if (vf[1]) { + vf[0]->next_vf_pts_valid = true; + vf[0]->next_vf_pts = vf[1]->pts; + } else + vf[0]->next_vf_pts_valid = false; + return vf[0]; + } + + return NULL; +} + +static struct vframe_s *vh265_vf_get(void *op_arg) +{ + struct vframe_s *vf; +#ifdef MULTI_INSTANCE_SUPPORT + struct vdec_s *vdec = op_arg; + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; +#else + struct hevc_state_s *hevc = (struct hevc_state_s *)op_arg; +#endif + + if (step == 2) + return NULL; + else if (step == 1) + step = 2; + +#if 0 + if (force_disp_pic_index & 0x100) { + int buffer_index = force_disp_pic_index & 0xff; + struct PIC_s *pic = NULL; + if (buffer_index >= 0 + && buffer_index < MAX_REF_PIC_NUM) + pic = hevc->m_PIC[buffer_index]; + if (pic == NULL) + return NULL; + if (force_disp_pic_index & 0x200) + return NULL; + + vf = &hevc->vframe_dummy; + if (get_double_write_mode(hevc)) { + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | + VIDTYPE_VIU_NV21; + if (hevc->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + pic->canvas_config[0]; + vf->canvas0_config[1] = + pic->canvas_config[1]; + + vf->canvas1_config[0] = + pic->canvas_config[0]; + vf->canvas1_config[1] = + pic->canvas_config[1]; + } else { + vf->canvas0Addr = vf->canvas1Addr + = spec2canvas(pic); + } + } else { + vf->canvas0Addr = vf->canvas1Addr = 0; + vf->type = VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + vf->compWidth = pic->width; + vf->compHeight = pic->height; + update_vf_memhandle(hevc, vf, pic); + switch (hevc->bit_depth_luma) { + case 9: + vf->bitdepth = BITDEPTH_Y9 | BITDEPTH_U9 | BITDEPTH_V9; + break; + case 10: + vf->bitdepth = BITDEPTH_Y10 | BITDEPTH_U10 + | BITDEPTH_V10; + break; + default: + vf->bitdepth = BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + break; + } + if ((vf->type & VIDTYPE_COMPRESS) == 0) + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + if (hevc->mem_saving_mode == 1) + vf->bitdepth |= BITDEPTH_SAVING_MODE; + vf->duration_pulldown = 0; + vf->pts = 0; + vf->pts_us64 = 0; + set_frame_info(hevc, vf); + + vf->width = pic->width / + get_double_write_ratio(pic->double_write_mode); + vf->height = pic->height / + get_double_write_ratio(pic->double_write_mode); + + force_disp_pic_index |= 0x200; + return vf; + } +#endif + + if (kfifo_get(&hevc->display_q, &vf)) { + struct vframe_s *next_vf = NULL; + + ATRACE_COUNTER(hevc->trace.vf_get_name, (long)vf); + ATRACE_COUNTER(hevc->trace.disp_q_name, kfifo_len(&hevc->display_q)); +#ifdef MULTI_INSTANCE_SUPPORT + ATRACE_COUNTER(hevc->trace.set_canvas0_addr, vf->canvas0_config[0].phy_addr); +#else + ATRACE_COUNTER(hevc->trace.get_canvas0_addr, vf->canvas0Addr); +#endif + + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) { + hevc_print(hevc, 0, + "%s(vf 0x%p type %d index 0x%x poc %d/%d) pts(%d,%d) dur %d\n", + __func__, vf, vf->type, vf->index, + get_pic_poc(hevc, vf->index & 0xff), + get_pic_poc(hevc, (vf->index >> 8) & 0xff), + vf->pts, vf->pts_us64, + vf->duration); +#ifdef MULTI_INSTANCE_SUPPORT + hevc_print(hevc, 0, "get canvas0 addr:0x%x\n", vf->canvas0_config[0].phy_addr); +#else + hevc_print(hevc, 0, "get canvas0 addr:0x%x\n", vf->canvas0Addr); +#endif + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (get_dbg_flag(hevc) & H265_DEBUG_DV) { + struct PIC_s *pic = hevc->m_PIC[vf->index & 0xff]; + hevc_print(hevc, 0, "pic 0x%p aux size %d:\n", + pic, pic->aux_data_size); + if (pic->aux_data_buf && pic->aux_data_size > 0) { + PR_INIT(128); + int i; + for (i = 0; i < pic->aux_data_size; i++) { + PR_FILL("%02x ", pic->aux_data_buf[i]); + if (((i + 1) & 0xf) == 0) + PR_INFO(hevc->index); + } + PR_INFO(hevc->index); + } + } +#endif + hevc->show_frame_num++; + vf->index_disp = atomic_read(&hevc->vf_get_count); + atomic_add(1, &hevc->vf_get_count); + + if (kfifo_peek(&hevc->display_q, &next_vf) && next_vf) { + vf->next_vf_pts_valid = true; + vf->next_vf_pts = next_vf->pts; + } else + vf->next_vf_pts_valid = false; + + return vf; + } + + return NULL; +} +static bool vf_valid_check(struct vframe_s *vf, struct hevc_state_s *hevc) { + int i; + for (i = 0; i < VF_POOL_SIZE; i++) { + if (vf == &hevc->vfpool[i] || vf == &hevc->vframe_dummy) + return true; + } + hevc_print(hevc, 0," h265 invalid vf been put, vf = %p\n", vf); + for (i = 0; i < VF_POOL_SIZE; i++) { + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS,"valid vf[%d]= %p \n", i, &hevc->vfpool[i]); + } + return false; +} + +static void vh265_vf_put(struct vframe_s *vf, void *op_arg) +{ + unsigned long flags; +#ifdef MULTI_INSTANCE_SUPPORT + struct vdec_s *vdec = op_arg; + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; +#else + struct hevc_state_s *hevc = (struct hevc_state_s *)op_arg; +#endif + unsigned char index_top; + unsigned char index_bot; + + if (!vf) + return; + if (vf == (&hevc->vframe_dummy)) + return; + if (vf && (vf_valid_check(vf, hevc) == false)) + return; + + if (hevc->enable_fence && vf->fence) { + int ret, i; + + mutex_lock(&hevc->fence_mutex); + ret = dma_fence_get_status(vf->fence); + if (ret == 0) { + for (i = 0; i < VF_POOL_SIZE; i++) { + if (hevc->fence_vf_s.fence_vf[i] == NULL) { + hevc->fence_vf_s.fence_vf[i] = vf; + hevc->fence_vf_s.used_size++; + mutex_unlock(&hevc->fence_mutex); + return; + } + } + } + mutex_unlock(&hevc->fence_mutex); + } + + ATRACE_COUNTER(hevc->trace.vf_put_name, (long)vf); +#ifdef MULTI_INSTANCE_SUPPORT + ATRACE_COUNTER(hevc->trace.put_canvas0_addr, vf->canvas0_config[0].phy_addr); +#else + ATRACE_COUNTER(hevc->trace.put_canvas0_addr, vf->canvas0Addr); +#endif + index_top = vf->index & 0xff; + index_bot = (vf->index >> 8) & 0xff; + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "%s(vf 0x%p type %d index 0x%x put canvas0 addr:0x%x)\n", + __func__, vf, vf->type, vf->index +#ifdef MULTI_INSTANCE_SUPPORT + , vf->canvas0_config[0].phy_addr +#else + , vf->canvas0Addr +#endif + ); + atomic_add(1, &hevc->vf_put_count); + spin_lock_irqsave(&lock, flags); + kfifo_put(&hevc->newframe_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hevc->trace.new_q_name, kfifo_len(&hevc->newframe_q)); + if (hevc->enable_fence && vf->fence) { + vdec_fence_put(vf->fence); + vf->fence = NULL; + } + + if (vf->hdr10p_data_buf) { + kfree(vf->hdr10p_data_buf); + vf->hdr10p_data_buf = NULL; + vf->hdr10p_data_size = 0; + } + + if (vf->meta_data_buf) { + vf->meta_data_buf = NULL; + vf->meta_data_size = 0; + } + + if (!hevc->interlace_flag && + (vf->v4l_mem_handle != + hevc->m_PIC[index_top]->cma_alloc_addr)) { + hevc_print(hevc, PRINT_FLAG_V4L_DETAIL, + "H265 update fb handle, old:%llx, new:%llx\n", + hevc->m_PIC[index_top]->cma_alloc_addr, + vf->v4l_mem_handle); + + hevc->m_PIC[index_top]->cma_alloc_addr + = vf->v4l_mem_handle; + } + + if (index_top != 0xff + && index_top < MAX_REF_PIC_NUM + && hevc->m_PIC[index_top]) { + if (hevc->m_PIC[index_top]->vf_ref > 0) { + hevc->m_PIC[index_top]->vf_ref--; + + if (hevc->m_PIC[index_top]->vf_ref == 0) { + hevc->m_PIC[index_top]->output_ready = 0; + hevc->m_PIC[index_top]->show_frame = false; + + if (hevc->wait_buf != 0) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } + } + + if (index_bot != 0xff + && index_bot < MAX_REF_PIC_NUM + && hevc->m_PIC[index_bot]) { + if (hevc->m_PIC[index_bot]->vf_ref > 0) { + hevc->m_PIC[index_bot]->vf_ref--; + + if (hevc->m_PIC[index_bot]->vf_ref == 0) { + hevc->m_PIC[index_bot]->output_ready = 0; + hevc->m_PIC[index_bot]->show_frame = false; + + if (hevc->wait_buf != 0) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } + } + + spin_unlock_irqrestore(&lock, flags); +} + + +static int vh265_event_cb(int type, void *data, void *op_arg) +{ + unsigned long flags; +#ifdef MULTI_INSTANCE_SUPPORT + struct vdec_s *vdec = op_arg; + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; +#else + struct hevc_state_s *hevc = (struct hevc_state_s *)op_arg; +#endif + if (type & VFRAME_EVENT_RECEIVER_RESET) { +#if 0 + amhevc_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vh265_vf_prov); +#endif + spin_lock_irqsave(&hevc->lock, flags); + vh265_local_init(); + vh265_prot_init(); + spin_unlock_irqrestore(&hevc->lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vh265_vf_prov); +#endif + amhevc_start(); +#endif + } else if (type & VFRAME_EVENT_RECEIVER_GET_AUX_DATA) { + struct provider_aux_req_s *req = + (struct provider_aux_req_s *)data; + unsigned char index; + + if (!req->vf) { + req->aux_size = atomic_read(&hevc->vf_put_count); + return 0; + } + spin_lock_irqsave(&lock, flags); + index = req->vf->index & 0xff; + req->aux_buf = NULL; + req->aux_size = 0; + req->format = VFORMAT_HEVC; + if (req->bot_flag) + index = (req->vf->index >> 8) & 0xff; + if (index != 0xff + && index < MAX_REF_PIC_NUM + && hevc->m_PIC[index]) { + req->aux_buf = hevc->m_PIC[index]->aux_data_buf; + req->aux_size = hevc->m_PIC[index]->aux_data_size; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (hevc->bypass_dvenl && !dolby_meta_with_el) + req->dv_enhance_exist = false; + else + req->dv_enhance_exist = + hevc->m_PIC[index]->dv_enhance_exist; + if (vdec_frame_based(vdec) && (hevc->dv_duallayer == true)) + req->dv_enhance_exist = 1; + hevc_print(hevc, H265_DEBUG_DV, + "query dv_enhance_exist for (pic 0x%p, vf 0x%p, poc %d index %d) flag => %d, aux sizd 0x%x\n", + hevc->m_PIC[index], + req->vf, + hevc->m_PIC[index]->POC, index, + req->dv_enhance_exist, req->aux_size); +#else + req->dv_enhance_exist = 0; +#endif + } + spin_unlock_irqrestore(&lock, flags); + + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "%s(type 0x%x vf index 0x%x)=>size 0x%x\n", + __func__, type, index, req->aux_size); + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (type & VFRAME_EVENT_RECEIVER_DOLBY_BYPASS_EL) { + if ((force_bypass_dvenl & 0x80000000) == 0) { + hevc_print(hevc, 0, + "%s: VFRAME_EVENT_RECEIVER_DOLBY_BYPASS_EL\n", + __func__); + hevc->bypass_dvenl_enable = 1; + } + } +#endif + else if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +static void get_pair_fb(struct hevc_state_s *hevc, struct vframe_s *vf) +{ + int i; + for (i = 0; i < 2; i ++) { + if (hevc->pair_fb[i] == NULL) { + hevc->pair_fb[i] = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + break; + } + } + + if (i >= 2) { + hevc->pair_fb[0] = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + hevc->pair_fb[1] = NULL; + } +} + +static void clear_pair_fb(struct hevc_state_s *hevc) +{ + int i; + for (i = 0; i < 2; i ++) + hevc->pair_fb[i] = NULL; +} + +static bool v4l_output_dw_with_compress(struct hevc_state_s *hevc, int dw) +{ + if ((!hevc->is_used_v4l) || (dw == 0x10) || + IS_8K_SIZE(hevc->frame_width, hevc->frame_height) || + hevc->interlace_flag) + return false; + + return true; +} + +#ifdef HEVC_PIC_STRUCT_SUPPORT +static int process_pending_vframe(struct hevc_state_s *hevc, + struct PIC_s *pair_pic, unsigned char pair_frame_top_flag) +{ + struct vframe_s *vf; + + if (!pair_pic) + return -1; + + hevc_print(hevc, H265_DEBUG_PIC_STRUCT, + "%s: pair_pic index 0x%x %s\n", __func__, pair_pic->index, + pair_frame_top_flag ? "top" : "bot"); + + if (kfifo_len(&hevc->pending_q) > 1) { + unsigned long flags; + int index1; + int index2; + /* do not pending more than 1 frame */ + if (kfifo_get(&hevc->pending_q, &vf) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + if (get_dbg_flag(hevc) & H265_DEBUG_PIC_STRUCT) + hevc_print(hevc, 0, + "%s warning(1), vf=>display_q: (index 0x%x), vf 0x%px\n", + __func__, vf->index, vf); + + if (v4l_output_dw_with_compress(hevc, pair_pic->double_write_mode)) { + vf->type |= VIDTYPE_COMPRESS; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + + /* recycle vframe */ + atomic_add(1, &hevc->vf_pre_count); + spin_lock_irqsave(&lock, flags); + kfifo_put(&hevc->newframe_q, (const struct vframe_s *)vf); + index1 = vf->index & 0xff; + index2 = (vf->index >> 8) & 0xff; + if (index1 >= MAX_REF_PIC_NUM && + index2 >= MAX_REF_PIC_NUM) { + spin_unlock_irqrestore(&lock, flags); + return -1; + } + + if (index1 < MAX_REF_PIC_NUM) { + hevc->m_PIC[index1]->vf_ref = 0; + hevc->m_PIC[index1]->output_ready = 0; + } + if (index2 < MAX_REF_PIC_NUM) { + hevc->m_PIC[index2]->vf_ref = 0; + hevc->m_PIC[index2]->output_ready = 0; + } + if (hevc->wait_buf != 0) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + spin_unlock_irqrestore(&lock, flags); + + ATRACE_COUNTER(hevc->trace.pts_name, vf->timestamp); + } + + if (kfifo_peek(&hevc->pending_q, &vf)) { + if (kfifo_get(&hevc->pending_q, &vf) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + if (vf == NULL) + return -1; + + if (pair_pic == NULL || pair_pic->vf_ref <= 0) { + /* + *if pair_pic is recycled (pair_pic->vf_ref <= 0), + *do not use it + */ + hevc_print(hevc, H265_DEBUG_PIC_STRUCT, + "%s warning(2), vf=>display_q: (index 0x%x)\n", + __func__, vf->index); + + if (v4l_output_dw_with_compress(hevc, pair_pic->double_write_mode)) { + vf->type |= VIDTYPE_COMPRESS; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + atomic_add(1, &hevc->vf_pre_count); + vdec_vframe_ready(hw_to_vdec(hevc), vf); + hevc->send_frame_flag = 1; + kfifo_put(&hevc->display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(hevc->trace.pts_name, vf->timestamp); + } else if ((!pair_frame_top_flag) && (((vf->index >> 8) & 0xff) == 0xff)) { + if (v4l_output_dw_with_compress(hevc, pair_pic->double_write_mode)) { + vf->type |= VIDTYPE_COMPRESS; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + + vf->index &= 0xff; + vf->index |= (pair_pic->index << 8); + pair_pic->vf_ref++; + vdec_vframe_ready(hw_to_vdec(hevc), vf); + hevc->send_frame_flag = 1; + get_pair_fb(hevc, vf); + kfifo_put(&hevc->display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(hevc->trace.pts_name, vf->timestamp); + atomic_add(1, &hevc->vf_pre_count); + hevc_print(hevc, H265_DEBUG_PIC_STRUCT, + "%s vf => display_q: (index 0x%x), w %d, h %d, type 0x%x\n", + __func__, vf->index, vf->width, vf->height, vf->type); + } else if (pair_frame_top_flag && ((vf->index & 0xff) == 0xff)) { + if (v4l_output_dw_with_compress(hevc, pair_pic->double_write_mode)) { + vf->type |= VIDTYPE_COMPRESS; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + + vf->index &= 0xff00; + vf->index |= pair_pic->index; + pair_pic->vf_ref++; + vdec_vframe_ready(hw_to_vdec(hevc), vf); + hevc->send_frame_flag = 1; + get_pair_fb(hevc, vf); + kfifo_put(&hevc->display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(hevc->trace.pts_name, vf->timestamp); + atomic_add(1, &hevc->vf_pre_count); + hevc_print(hevc, H265_DEBUG_PIC_STRUCT, + "%s vf => display_q: (index 0x%x), w %d, h %d, type 0x%x\n", + __func__, vf->index, vf->width, vf->height, vf->type); + } + } + return 0; +} +#endif +static void update_vf_memhandle(struct hevc_state_s *hevc, + struct vframe_s *vf, struct PIC_s *pic) +{ + vf->mem_handle = NULL; + vf->mem_head_handle = NULL; + + /* keeper not needed for v4l solution */ + if (hevc->is_used_v4l) + return; + + if (vf->type & VIDTYPE_SCATTER) { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + hevc->mmu_box, pic->index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hevc->bmmu_box, VF_BUFFER_IDX(pic->BUF_index)); + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hevc->bmmu_box, VF_BUFFER_IDX(pic->BUF_index)); + vf->mem_head_handle = NULL; + /*vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hevc->bmmu_box, VF_BUFFER_IDX(BUF_index));*/ + } + return; +} + +static void fill_frame_info(struct hevc_state_s *hevc, + struct PIC_s *pic, unsigned int framesize, unsigned int pts) +{ + struct vframe_qos_s *vframe_qos = &hevc->vframe_qos; + if (hevc->m_nalUnitType == NAL_UNIT_CODED_SLICE_IDR) + vframe_qos->type = 4; + else if (pic->slice_type == I_SLICE) + vframe_qos->type = 1; + else if (pic->slice_type == P_SLICE) + vframe_qos->type = 2; + else if (pic->slice_type == B_SLICE) + vframe_qos->type = 3; +/* +#define SHOW_QOS_INFO +*/ + if (input_frame_based(hw_to_vdec(hevc))) + vframe_qos->size = pic->frame_size; + else + vframe_qos->size = framesize; + vframe_qos->pts = pts; +#ifdef SHOW_QOS_INFO + hevc_print(hevc, 0, "slice:%d, poc:%d\n", pic->slice_type, pic->POC); +#endif + + + vframe_qos->max_mv = pic->max_mv; + vframe_qos->avg_mv = pic->avg_mv; + vframe_qos->min_mv = pic->min_mv; +#ifdef SHOW_QOS_INFO + hevc_print(hevc, 0, "mv: max:%d, avg:%d, min:%d\n", + vframe_qos->max_mv, + vframe_qos->avg_mv, + vframe_qos->min_mv); +#endif + + vframe_qos->max_qp = pic->max_qp; + vframe_qos->avg_qp = pic->avg_qp; + vframe_qos->min_qp = pic->min_qp; +#ifdef SHOW_QOS_INFO + hevc_print(hevc, 0, "qp: max:%d, avg:%d, min:%d\n", + vframe_qos->max_qp, + vframe_qos->avg_qp, + vframe_qos->min_qp); +#endif + + vframe_qos->max_skip = pic->max_skip; + vframe_qos->avg_skip = pic->avg_skip; + vframe_qos->min_skip = pic->min_skip; +#ifdef SHOW_QOS_INFO + hevc_print(hevc, 0, "skip: max:%d, avg:%d, min:%d\n", + vframe_qos->max_skip, + vframe_qos->avg_skip, + vframe_qos->min_skip); +#endif + + vframe_qos->num++; + +} + +static inline void hevc_update_gvs(struct hevc_state_s *hevc) +{ + if (hevc->gvs->frame_height != hevc->frame_height) { + hevc->gvs->frame_width = hevc->frame_width; + hevc->gvs->frame_height = hevc->frame_height; + } + if (hevc->gvs->frame_dur != hevc->frame_dur) { + hevc->gvs->frame_dur = hevc->frame_dur; + if (hevc->frame_dur != 0) + hevc->gvs->frame_rate = ((96000 * 10 / hevc->frame_dur) % 10) < 5 ? + 96000 / hevc->frame_dur : (96000 / hevc->frame_dur +1); + else + hevc->gvs->frame_rate = -1; + } + hevc->gvs->error_count = hevc->gvs->error_frame_count; + hevc->gvs->status = hevc->stat | hevc->fatal_error; + if (hevc->gvs->ratio_control != hevc->ratio_control) + hevc->gvs->ratio_control = hevc->ratio_control; +} + +static void put_vf_to_display_q(struct hevc_state_s *hevc, struct vframe_s *vf) +{ + atomic_add(1, &hevc->vf_pre_count); + decoder_do_frame_check(hw_to_vdec(hevc), vf); + vdec_vframe_ready(hw_to_vdec(hevc), vf); + hevc->send_frame_flag = 1; + kfifo_put(&hevc->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hevc->trace.pts_name, vf->timestamp); +} + +static int post_prepare_process(struct vdec_s *vdec, struct PIC_s *frame) +{ + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; + + if (force_disp_pic_index & 0x100) { + /*recycle directly*/ + frame->output_ready = 0; + frame->show_frame = false; + hevc_print(hevc, 0, "discard show frame.\n"); + return 0; + } + + frame->show_frame = true; + + return 0; +} + +static int post_video_frame(struct vdec_s *vdec, struct PIC_s *pic) +{ + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; + struct vframe_s *vf = NULL; + int stream_offset = pic->stream_offset; + unsigned short slice_type = pic->slice_type; + ulong nv_order = VIDTYPE_VIU_NV21; + u32 frame_size = 0; + struct vdec_info tmp4x; + struct aml_vcodec_ctx * v4l2_ctx = hevc->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + int index; + + hevc->send_frame_flag = 0; + /* swap uv */ + if (hevc->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + } + + if (kfifo_get(&hevc->newframe_q, &vf) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + + if (vf) { + /*hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: pic index 0x%x\n", + __func__, pic->index);*/ + + if (hevc->is_used_v4l) { + vf->v4l_mem_handle = pic->cma_alloc_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + if (hevc->mmu_enable) { + vf->mm_box.bmmu_box = hevc->bmmu_box; + vf->mm_box.bmmu_idx = VF_BUFFER_IDX(hevc->buffer_wrap[pic->BUF_index]); + vf->mm_box.mmu_box = hevc->mmu_box; + vf->mm_box.mmu_idx = hevc->buffer_wrap[pic->BUF_index]; + } + } + + if (hevc->enable_fence) { + /* fill fence information. */ + if (hevc->fence_usage == FENCE_USE_FOR_DRIVER) + vf->fence = pic->fence; + } + +#ifdef MULTI_INSTANCE_SUPPORT + if (vdec_frame_based(vdec)) { + vf->pts = pic->pts; + vf->pts_us64 = pic->pts64; + vf->timestamp = pic->timestamp; + } + /* if (pts_lookup_offset(PTS_TYPE_VIDEO, + stream_offset, &vf->pts, 0) != 0) { */ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (vdec->master == NULL) { +#else + else { +#endif +#endif + hevc_print(hevc, H265_DEBUG_OUT_PTS, + "call pts_lookup_offset_us64(0x%x)\n", + stream_offset); + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, stream_offset, &vf->pts, + &frame_size, 0, &vf->pts_us64) != 0) { +#ifdef DEBUG_PTS + hevc->pts_missed++; +#endif + vf->pts = 0; + vf->pts_us64 = 0; + } else { +#ifdef DEBUG_PTS + hevc->pts_hit++; +#endif + } + } + +#ifdef MULTI_INSTANCE_SUPPORT +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + } else { + vf->pts = 0; + vf->pts_us64 = 0; + } +#else + } +#endif +#endif + + if (pts_unstable && (hevc->frame_dur > 0)) + hevc->pts_mode = PTS_NONE_REF_USE_DURATION; + + fill_frame_info(hevc, pic, frame_size, vf->pts); + + if (vf->pts != 0) + hevc->last_lookup_pts = vf->pts; + + if ((hevc->pts_mode == PTS_NONE_REF_USE_DURATION) + && (slice_type != 2)) + vf->pts = hevc->last_pts + DUR2PTS(hevc->frame_dur); + hevc->last_pts = vf->pts; + + if (vf->pts_us64 != 0) + hevc->last_lookup_pts_us64 = vf->pts_us64; + + if ((hevc->pts_mode == PTS_NONE_REF_USE_DURATION) + && (slice_type != 2)) { + vf->pts_us64 = + hevc->last_pts_us64 + + (DUR2PTS(hevc->frame_dur) * 100 / 9); + } + hevc->last_pts_us64 = vf->pts_us64; + if ((get_dbg_flag(hevc) & H265_DEBUG_OUT_PTS) != 0) { + hevc_print(hevc, 0, + "H265 dec out pts: vf->pts=%d, vf->pts_us64 = %lld, ts: %llu\n", + vf->pts, vf->pts_us64, vf->timestamp); + } + + /* + *vf->index: + *(1) vf->type is VIDTYPE_INTERLACE + * and vf->canvas0Addr != vf->canvas1Addr, + * vf->index[7:0] is the index of top pic + * vf->index[15:8] is the index of bot pic + *(2) other cases, + * only vf->index[7:0] is used + * vf->index[15:8] == 0xff + */ + vf->index = 0xff00 | pic->index; +#if 1 +/*SUPPORT_10BIT*/ + if (pic->double_write_mode & 0x10) { + /* double write only */ + vf->compBodyAddr = 0; + vf->compHeadAddr = 0; + } else { + + if (hevc->mmu_enable) { + vf->compBodyAddr = 0; + vf->compHeadAddr = pic->header_adr; + } else { + vf->compBodyAddr = pic->mc_y_adr; /*body adr*/ + vf->compHeadAddr = pic->mc_y_adr + + pic->losless_comp_body_size; + vf->mem_head_handle = NULL; + } + + /*head adr*/ + vf->canvas0Addr = vf->canvas1Addr = 0; + } + if (pic->double_write_mode) { + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; + vf->type |= nv_order; + + if (v4l_output_dw_with_compress(hevc, pic->double_write_mode)) { + vf->type |= VIDTYPE_COMPRESS; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag && + (get_dbg_flag(hevc) + & H265_CFG_CANVAS_IN_DECODE) == 0) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + pic->canvas_config[0]; + vf->canvas0_config[1] = + pic->canvas_config[1]; + + vf->canvas1_config[0] = + pic->canvas_config[0]; + vf->canvas1_config[1] = + pic->canvas_config[1]; + + } else +#endif + vf->canvas0Addr = vf->canvas1Addr + = spec2canvas(pic); + } else { + vf->canvas0Addr = vf->canvas1Addr = 0; + vf->type = VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; + if (hevc->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + vf->compWidth = pic->width; + vf->compHeight = pic->height; + update_vf_memhandle(hevc, vf, pic); + switch (pic->bit_depth_luma) { + case 9: + vf->bitdepth = BITDEPTH_Y9; + break; + case 10: + vf->bitdepth = BITDEPTH_Y10; + break; + default: + vf->bitdepth = BITDEPTH_Y8; + break; + } + switch (pic->bit_depth_chroma) { + case 9: + vf->bitdepth |= (BITDEPTH_U9 | BITDEPTH_V9); + break; + case 10: + vf->bitdepth |= (BITDEPTH_U10 | BITDEPTH_V10); + break; + default: + vf->bitdepth |= (BITDEPTH_U8 | BITDEPTH_V8); + break; + } + if ((vf->type & VIDTYPE_COMPRESS) == 0) + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + if (pic->mem_saving_mode == 1) + vf->bitdepth |= BITDEPTH_SAVING_MODE; +#else + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; + vf->type |= nv_order; + vf->canvas0Addr = vf->canvas1Addr = spec2canvas(pic); +#endif + set_frame_info(hevc, vf, pic); + if (hevc->discard_dv_data) { + vf->discard_dv_data = true; + } + + vf->width = pic->width; + vf->height = pic->height; + + if (force_w_h != 0) { + vf->width = (force_w_h >> 16) & 0xffff; + vf->height = force_w_h & 0xffff; + } + if (force_fps & 0x100) { + u32 rate = force_fps & 0xff; + + if (rate) + vf->duration = 96000/rate; + else + vf->duration = 0; + } + if (force_fps & 0x200) { + vf->pts = 0; + vf->pts_us64 = 0; + } + if (!vdec->vbuf.use_ptsserv && vdec_stream_based(vdec)) { + vf->pts_us64 = stream_offset; + vf->pts = 0; + } + /* + * !!! to do ... + * need move below code to get_new_pic(), + * hevc->xxx can only be used by current decoded pic + */ + if (pic->conformance_window_flag && + (get_dbg_flag(hevc) & + H265_DEBUG_IGNORE_CONFORMANCE_WINDOW) == 0) { + unsigned int SubWidthC, SubHeightC; + + switch (pic->chroma_format_idc) { + case 1: + SubWidthC = 2; + SubHeightC = 2; + break; + case 2: + SubWidthC = 2; + SubHeightC = 1; + break; + default: + SubWidthC = 1; + SubHeightC = 1; + break; + } + vf->width -= SubWidthC * + (pic->conf_win_left_offset + + pic->conf_win_right_offset); + vf->height -= SubHeightC * + (pic->conf_win_top_offset + + pic->conf_win_bottom_offset); + + vf->compWidth -= SubWidthC * + (pic->conf_win_left_offset + + pic->conf_win_right_offset); + vf->compHeight -= SubHeightC * + (pic->conf_win_top_offset + + pic->conf_win_bottom_offset); + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "conformance_window %d, %d, %d, %d, %d => cropped width %d, height %d com_w %d com_h %d\n", + pic->chroma_format_idc, + pic->conf_win_left_offset, + pic->conf_win_right_offset, + pic->conf_win_top_offset, + pic->conf_win_bottom_offset, + vf->width, vf->height, vf->compWidth, vf->compHeight); + } + + vf->width = vf->width / + get_double_write_ratio(pic->double_write_mode); + vf->height = vf->height / + get_double_write_ratio(pic->double_write_mode); + + if (hevc->is_used_v4l && (vdec->prog_only || (!v4l2_ctx->vpp_is_need))) + pic->pic_struct = 0; + + vf->height <<= hevc->interlace_flag; + /* vf->compHeight <<= hevc->interlace_flag; */ + vf->canvas0_config[0].height <<= hevc->interlace_flag; + vf->canvas0_config[1].height <<= hevc->interlace_flag; + +#ifdef HEVC_PIC_STRUCT_SUPPORT + if (pic->pic_struct == 3 || pic->pic_struct == 4) { + struct vframe_s *vf2; + + hevc_print(hevc, H265_DEBUG_PIC_STRUCT, + "pic_struct = %d index 0x%x\n", + pic->pic_struct, pic->index); + + if (kfifo_get(&hevc->newframe_q, &vf2) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + + pic->vf_ref = 2; + vf->duration = vf->duration>>1; + memcpy(vf2, vf, sizeof(struct vframe_s)); + + if (v4l2_ctx->second_field_pts_mode) { + vf2->timestamp = 0; + } + if (pic->pic_struct == 3) { + vf->type = VIDTYPE_INTERLACE_TOP + | nv_order; + vf2->type = VIDTYPE_INTERLACE_BOTTOM + | nv_order; + } else { + vf->type = VIDTYPE_INTERLACE_BOTTOM + | nv_order; + vf2->type = VIDTYPE_INTERLACE_TOP + | nv_order; + } + if (pic->show_frame) { + put_vf_to_display_q(hevc, vf); + atomic_add(1, &hevc->vf_pre_count); + vdec_vframe_ready(hw_to_vdec(hevc), vf2); + kfifo_put(&hevc->display_q, + (const struct vframe_s *)vf2); + ATRACE_COUNTER(hevc->trace.pts_name, vf2->timestamp); + } else { + vh265_vf_put(vf, vdec); + vh265_vf_put(vf2, vdec); + atomic_add(2, &hevc->vf_get_count); + atomic_add(2, &hevc->vf_pre_count); + return 0; + } + } else if (pic->pic_struct == 5 + || pic->pic_struct == 6) { + struct vframe_s *vf2, *vf3; + + hevc_print(hevc, H265_DEBUG_PIC_STRUCT, + "pic_struct = %d index 0x%x\n", + pic->pic_struct, pic->index); + + if (kfifo_get(&hevc->newframe_q, &vf2) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + if (kfifo_get(&hevc->newframe_q, &vf3) == 0) { + hevc_print(hevc, 0, + "fatal error, no available buffer slot."); + return -1; + } + pic->vf_ref = 3; + vf->duration = vf->duration/3; + memcpy(vf2, vf, sizeof(struct vframe_s)); + memcpy(vf3, vf, sizeof(struct vframe_s)); + + if (v4l2_ctx->second_field_pts_mode) { + vf2->timestamp = 0; + vf3->timestamp = 0; + } + + if (pic->pic_struct == 5) { + vf->type = VIDTYPE_INTERLACE_TOP + | nv_order; + vf2->type = VIDTYPE_INTERLACE_BOTTOM + | nv_order; + vf3->type = VIDTYPE_INTERLACE_TOP + | nv_order; + } else { + vf->type = VIDTYPE_INTERLACE_BOTTOM + | nv_order; + vf2->type = VIDTYPE_INTERLACE_TOP + | nv_order; + vf3->type = VIDTYPE_INTERLACE_BOTTOM + | nv_order; + } + if (pic->show_frame) { + put_vf_to_display_q(hevc, vf); + atomic_add(1, &hevc->vf_pre_count); + vdec_vframe_ready(hw_to_vdec(hevc), vf2); + kfifo_put(&hevc->display_q, + (const struct vframe_s *)vf2); + ATRACE_COUNTER(hevc->trace.pts_name, vf2->timestamp); + atomic_add(1, &hevc->vf_pre_count); + vdec_vframe_ready(hw_to_vdec(hevc), vf3); + kfifo_put(&hevc->display_q, + (const struct vframe_s *)vf3); + ATRACE_COUNTER(hevc->trace.pts_name, vf3->timestamp); + } else { + vh265_vf_put(vf, vdec); + vh265_vf_put(vf2, vdec); + vh265_vf_put(vf3, vdec); + atomic_add(3, &hevc->vf_get_count); + atomic_add(3, &hevc->vf_pre_count); + return 0; + } + } else if (pic->pic_struct == 9 + || pic->pic_struct == 10) { + hevc_print(hevc, H265_DEBUG_PIC_STRUCT, + "pic_struct = %d index 0x%x\n", + pic->pic_struct, pic->index); + + pic->vf_ref = 1; + /* process previous pending vf*/ + process_pending_vframe(hevc, + pic, (pic->pic_struct == 9)); + + if (pic->show_frame) { + decoder_do_frame_check(vdec, vf); + vdec_vframe_ready(vdec, vf); + /* process current vf */ + kfifo_put(&hevc->pending_q, + (const struct vframe_s *)vf); + if (pic->pic_struct == 9) { + vf->type = VIDTYPE_INTERLACE_TOP + | nv_order | VIDTYPE_VIU_FIELD; + process_pending_vframe(hevc, + hevc->pre_bot_pic, 0); + } else { + vf->type = VIDTYPE_INTERLACE_BOTTOM | + nv_order | VIDTYPE_VIU_FIELD; + vf->index = (pic->index << 8) | 0xff; + process_pending_vframe(hevc, + hevc->pre_top_pic, 1); + } + + if (atomic_read(&hevc->vf_pre_count) == 0) + atomic_add(1, &hevc->vf_pre_count); + + /**/ + if (pic->pic_struct == 9) + hevc->pre_top_pic = pic; + else + hevc->pre_bot_pic = pic; + } else { + vh265_vf_put(vf, vdec); + atomic_add(1, &hevc->vf_get_count); + atomic_add(1, &hevc->vf_pre_count); + return 0; + } + } else if (pic->pic_struct == 11 + || pic->pic_struct == 12) { + + hevc_print(hevc, H265_DEBUG_PIC_STRUCT, + "pic_struct = %d index 0x%x\n", + pic->pic_struct, pic->index); + pic->vf_ref = 1; + + /* process previous pending vf*/ + process_pending_vframe(hevc, pic, + (pic->pic_struct == 11)); + + /* put current into pending q */ + if (pic->pic_struct == 11) + vf->type = VIDTYPE_INTERLACE_TOP | + nv_order | VIDTYPE_VIU_FIELD; + else { + vf->type = VIDTYPE_INTERLACE_BOTTOM | + nv_order | VIDTYPE_VIU_FIELD; + vf->index = (pic->index << 8) | 0xff; + } + if (pic->show_frame) { + decoder_do_frame_check(vdec, vf); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hevc->pending_q, + (const struct vframe_s *)vf); + if (atomic_read(&hevc->vf_pre_count) == 0) + atomic_add(1, &hevc->vf_pre_count); + + /**/ + if (pic->pic_struct == 11) + hevc->pre_top_pic = pic; + else + hevc->pre_bot_pic = pic; + } else { + vh265_vf_put(vf, vdec); + atomic_add(1, &hevc->vf_get_count); + atomic_add(1, &hevc->vf_pre_count); + return 0; + } + } else { + pic->vf_ref = 1; + + hevc_print(hevc, H265_DEBUG_PIC_STRUCT, + "pic_struct = %d index 0x%x\n", + pic->pic_struct, pic->index); + + switch (pic->pic_struct) { + case 7: + vf->duration <<= 1; + break; + case 8: + vf->duration = vf->duration * 3; + break; + case 1: + vf->type = VIDTYPE_INTERLACE_TOP | + nv_order | VIDTYPE_VIU_FIELD; + process_pending_vframe(hevc, pic, 1); + hevc->pre_top_pic = pic; + break; + case 2: + vf->type = VIDTYPE_INTERLACE_BOTTOM + | nv_order + | VIDTYPE_VIU_FIELD; + process_pending_vframe(hevc, pic, 0); + hevc->pre_bot_pic = pic; + break; + } + + if (pic->show_frame) { + put_vf_to_display_q(hevc, vf); + } else { + vh265_vf_put(vf, vdec); + atomic_add(1, &hevc->vf_get_count); + atomic_add(1, &hevc->vf_pre_count); + return 0; + } + } +#else + vf->type_original = vf->type; + pic->vf_ref = 1; + put_vf_to_display_q(hevc, vf); +#endif + ATRACE_COUNTER(hevc->trace.new_q_name, kfifo_len(&hevc->newframe_q)); + ATRACE_COUNTER(hevc->trace.disp_q_name, kfifo_len(&hevc->display_q)); + /*count info*/ + vdec_count_info(hevc->gvs, 0, stream_offset); + if (pic->slice_type == I_SLICE) { + hevc->gvs->i_decoded_frames++; + vf->frame_type |= V4L2_BUF_FLAG_KEYFRAME; + } else if (pic->slice_type == P_SLICE) { + hevc->gvs->p_decoded_frames++; + vf->frame_type |= V4L2_BUF_FLAG_PFRAME; + } else if (pic->slice_type == B_SLICE) { + hevc->gvs->b_decoded_frames++; + vf->frame_type |= V4L2_BUF_FLAG_BFRAME; + } + hevc_update_gvs(hevc); + memcpy(&tmp4x, hevc->gvs, sizeof(struct vdec_info)); + tmp4x.bit_depth_luma = pic->bit_depth_luma; + tmp4x.bit_depth_chroma = pic->bit_depth_chroma; + tmp4x.double_write_mode = pic->double_write_mode; + vdec_fill_vdec_frame(vdec, &hevc->vframe_qos, &tmp4x, vf, pic->hw_decode_time); + vdec->vdec_fps_detec(vdec->id); + hevc_print(hevc, H265_DEBUG_BUFMGR, + "%s(type %d index 0x%x poc %d/%d) pts(%d,%d) dur %d\n", + __func__, vf->type, vf->index, + get_pic_poc(hevc, vf->index & 0xff), + get_pic_poc(hevc, (vf->index >> 8) & 0xff), + vf->pts, vf->pts_us64, + vf->duration); + if (pic->pic_struct == 10 || pic->pic_struct == 12) { + index = (vf->index >> 8) & 0xff; + } else { + index = vf->index & 0xff; + } +#ifdef AUX_DATA_CRC + if (index <= MAX_REF_PIC_NUM) + decoder_do_aux_data_check(vdec, hevc->m_PIC[index]->aux_data_buf, + hevc->m_PIC[index]->aux_data_size); +#endif + + hevc_print(hevc, H265_DEBUG_PRINT_SEI, + "aux_data_size:%d, signal_type: %d, sei_present_flag: %d\n", + hevc->m_PIC[index]->aux_data_size, hevc->video_signal_type, hevc->sei_present_flag); + + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) { + int i = 0; + PR_INIT(128); + for (i = 0; i < hevc->m_PIC[index]->aux_data_size; i++) { + PR_FILL("%02x ", hevc->m_PIC[index]->aux_data_buf[i]); + if (((i + 1) & 0xf) == 0) + PR_INFO(hevc->index); + } + PR_INFO(hevc->index); + } + + if (hevc->is_used_v4l) { + if ((hevc->m_PIC[index]->aux_data_size == 0) && + (pic->slice_type == I_SLICE) && + (atomic_read(&hevc->vf_pre_count) == 1)) { + hevc->no_need_aux_data = true; + } + + if (hevc->no_need_aux_data) { + v4l2_ctx->aux_infos.free_one_sei_buffer(v4l2_ctx, + &hevc->m_PIC[index]->aux_data_buf, + &hevc->m_PIC[index]->aux_data_size, + hevc->m_PIC[index]->ctx_buf_idx); + } else { + if (!hevc->discard_dv_data) + v4l2_ctx->aux_infos.bind_dv_buffer(v4l2_ctx, &vf->src_fmt.comp_buf, + &vf->src_fmt.md_buf); + + update_vframe_src_fmt(vf, + hevc->m_PIC[index]->aux_data_buf, + hevc->m_PIC[index]->aux_data_size, + hevc->dv_duallayer, hevc->provider_name, NULL); + } + } + + /*if (pic->vf_ref == hevc->vf_pre_count) {*/ + if (hevc->kpi_first_i_decoded == 0) { + hevc->kpi_first_i_decoded = 1; + pr_debug("[vdec_kpi][%s] First I frame decoded.\n", + __func__); + } + + if (without_display_mode == 0) { + if (hevc->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vh265_vf_put(vh265_vf_get(vdec), vdec); + } else { + if (hevc->send_frame_flag == 1) { + while (kfifo_len(&hevc->display_q)) { + if (hevc->pair_fb[0] != NULL && hevc->pair_fb[1] != NULL) { + set_meta_data_to_vf(vf, UVM_META_DATA_VF_BASE_INFOS, hevc->v4l2_ctx); + ATRACE_COUNTER("VC_OUT_DEC-submit", hevc->pair_fb[0]->buf_idx); + hevc->pair_fb[0]->task->submit(hevc->pair_fb[0]->task, TASK_TYPE_DEC); + ATRACE_COUNTER("VC_OUT_DEC-submit", hevc->pair_fb[1]->buf_idx); + hevc->pair_fb[1]->task->submit(hevc->pair_fb[1]->task, TASK_TYPE_DEC); + clear_pair_fb(hevc); + } else { + set_meta_data_to_vf(vf, UVM_META_DATA_VF_BASE_INFOS, hevc->v4l2_ctx); + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } + } + } + } else { + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } + else + vh265_vf_put(vh265_vf_get(vdec), vdec); + } + + return 0; +} + +static int post_picture_early(struct vdec_s *vdec, int index) +{ + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; + struct PIC_s *pic = hevc->m_PIC[index]; + + if (!hevc->enable_fence) + return 0; + + /* create fence for each buffers. */ + if (vdec_timeline_create_fence(vdec->sync)) + return -1; + + pic->fence = vdec->sync->fence; + pic->stream_offset = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + + if (hevc->chunk) { + pic->pts = hevc->chunk->pts; + pic->pts64 = hevc->chunk->pts64; + pic->timestamp = hevc->chunk->timestamp; + } + pic->show_frame = true; + post_video_frame(vdec, pic); + + display_frame_count[hevc->index]++; + + return 0; +} + +static int prepare_display_buf(struct vdec_s *vdec, struct PIC_s *frame) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + + if (hevc->enable_fence) { + int i, j, used_size, ret; + int signed_count = 0; + struct vframe_s *signed_fence[VF_POOL_SIZE]; + + post_prepare_process(vdec, frame); + + if (!frame->show_frame) + pr_info("do not display.\n"); + + hevc->m_PIC[frame->index]->vf_ref = 1; + + /* notify signal to wake up wq of fence. */ + vdec_timeline_increase(vdec->sync, 1); + mutex_lock(&hevc->fence_mutex); + used_size = hevc->fence_vf_s.used_size; + if (used_size) { + for (i = 0, j = 0; i < VF_POOL_SIZE && j < used_size; i++) { + if (hevc->fence_vf_s.fence_vf[i] != NULL) { + ret = dma_fence_get_status(hevc->fence_vf_s.fence_vf[i]->fence); + if (ret == 1) { + signed_fence[signed_count] = hevc->fence_vf_s.fence_vf[i]; + hevc->fence_vf_s.fence_vf[i] = NULL; + hevc->fence_vf_s.used_size--; + signed_count++; + } + j++; + } + } + } + mutex_unlock(&hevc->fence_mutex); + if (signed_count != 0) { + for (i = 0; i < signed_count; i++) + vh265_vf_put(signed_fence[i], vdec); + } + return 0; + } + + if (post_prepare_process(vdec, frame)) + return -1; + + if (post_video_frame(vdec, frame)) + return -1; + + display_frame_count[hevc->index]++; + return 0; +} + +static bool is_avaliable_buffer(struct hevc_state_s *hevc); + +static int notify_v4l_eos(struct vdec_s *vdec) +{ + struct hevc_state_s *hw = (struct hevc_state_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = &hw->vframe_dummy; + struct vdec_v4l2_buffer *fb = NULL; + static struct PIC_s *pic = NULL; + ulong expires; + + if (hw->eos) { + expires = jiffies + msecs_to_jiffies(2000); + while (!is_avaliable_buffer(hw)) { + if (time_after(jiffies, expires)) { + pr_err("[%d] H265 isn't enough buff for notify eos.\n", ctx->id); + return 0; + } + } + + pic = v4l_get_new_pic(hw, NULL); + if (NULL == pic) { + pr_err("[%d] H265 EOS get free buff fail.\n", ctx->id); + return 0; + } + + fb = (struct vdec_v4l2_buffer *) + pic->cma_alloc_addr; + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + vf->v4l_mem_handle = (ulong)fb; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + + pr_info("[%d] H265 EOS notify.\n", ctx->id); + } + + return 0; +} + +static void process_nal_sei(struct hevc_state_s *hevc, + int payload_type, int payload_size) +{ + unsigned short data; + + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "\tsei message: payload_type = 0x%02x, payload_size = 0x%02x\n", + payload_type, payload_size); + + if (payload_type == 137) { + int i, j; + /* MASTERING_DISPLAY_COLOUR_VOLUME */ + if (payload_size >= 24) { + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "\tsei MASTERING_DISPLAY_COLOUR_VOLUME available\n"); + for (i = 0; i < 3; i++) { + for (j = 0; j < 2; j++) { + data = + (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + hevc->primaries[i][j] = data; + WRITE_HREG(HEVC_SHIFT_COMMAND, + (1<<7)|16); + if (get_dbg_flag(hevc) & + H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "\t\tprimaries[%1d][%1d] = %04x\n", + i, j, hevc->primaries[i][j]); + } + } + for (i = 0; i < 2; i++) { + data = (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + hevc->white_point[i] = data; + WRITE_HREG(HEVC_SHIFT_COMMAND, (1<<7)|16); + if (get_dbg_flag(hevc) & H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "\t\twhite_point[%1d] = %04x\n", + i, hevc->white_point[i]); + } + for (i = 0; i < 2; i++) { + data = (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + hevc->luminance[i] = data << 16; + WRITE_HREG(HEVC_SHIFT_COMMAND, + (1<<7)|16); + data = + (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + hevc->luminance[i] |= data; + WRITE_HREG(HEVC_SHIFT_COMMAND, + (1<<7)|16); + if (get_dbg_flag(hevc) & + H265_DEBUG_PRINT_SEI) + hevc_print(hevc, 0, + "\t\tluminance[%1d] = %08x\n", + i, hevc->luminance[i]); + } + hevc->sei_present_flag |= SEI_MASTER_DISPLAY_COLOR_MASK; + } + payload_size -= 24; + while (payload_size > 0) { + data = (READ_HREG(HEVC_SHIFTED_DATA) >> 24); + payload_size--; + WRITE_HREG(HEVC_SHIFT_COMMAND, (1<<7)|8); + hevc_print(hevc, 0, "\t\tskip byte %02x\n", data); + } + } +} + +static int hevc_recover(struct hevc_state_s *hevc) +{ + int ret = -1; + u32 rem; + u64 shift_byte_count64; + unsigned int hevc_shift_byte_count; + unsigned int hevc_stream_start_addr; + unsigned int hevc_stream_end_addr; + unsigned int hevc_stream_rd_ptr; + unsigned int hevc_stream_wr_ptr; + unsigned int hevc_stream_control; + unsigned int hevc_stream_fifo_ctl; + unsigned int hevc_stream_buf_size; + struct vdec_s *vdec = hw_to_vdec(hevc); + + mutex_lock(&vh265_mutex); +#if 0 + for (i = 0; i < (hevc->debug_ptr_size / 2); i += 4) { + int ii; + + for (ii = 0; ii < 4; ii++) + hevc_print(hevc, 0, + "%04x ", hevc->debug_ptr[i + 3 - ii]); + if (((i + ii) & 0xf) == 0) + hevc_print(hevc, 0, "\n"); + } +#endif +#define ES_VID_MAN_RD_PTR (1<<0) + if (!hevc->init_flag) { + hevc_print(hevc, 0, "h265 has stopped, recover return!\n"); + mutex_unlock(&vh265_mutex); + return ret; + } + amhevc_stop(); + msleep(20); + ret = 0; + /* reset */ + if (vdec_stream_based(vdec)) { + STBUF_WRITE(&vdec->vbuf, set_rp, + READ_VREG(HEVC_STREAM_RD_PTR)); + + if (!vdec->vbuf.no_parser) + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, + ES_VID_MAN_RD_PTR); + } + + hevc_stream_start_addr = READ_VREG(HEVC_STREAM_START_ADDR); + hevc_stream_end_addr = READ_VREG(HEVC_STREAM_END_ADDR); + hevc_stream_rd_ptr = READ_VREG(HEVC_STREAM_RD_PTR); + hevc_stream_wr_ptr = READ_VREG(HEVC_STREAM_WR_PTR); + hevc_stream_control = READ_VREG(HEVC_STREAM_CONTROL); + hevc_stream_fifo_ctl = READ_VREG(HEVC_STREAM_FIFO_CTL); + hevc_stream_buf_size = hevc_stream_end_addr - hevc_stream_start_addr; + + /* HEVC streaming buffer will reset and restart + * from current hevc_stream_rd_ptr position + */ + /* calculate HEVC_SHIFT_BYTE_COUNT value with the new position. */ + hevc_shift_byte_count = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + if ((hevc->shift_byte_count_lo & (1 << 31)) + && ((hevc_shift_byte_count & (1 << 31)) == 0)) + hevc->shift_byte_count_hi++; + + hevc->shift_byte_count_lo = hevc_shift_byte_count; + shift_byte_count64 = ((u64)(hevc->shift_byte_count_hi) << 32) | + hevc->shift_byte_count_lo; + div_u64_rem(shift_byte_count64, hevc_stream_buf_size, &rem); + shift_byte_count64 -= rem; + shift_byte_count64 += hevc_stream_rd_ptr - hevc_stream_start_addr; + + if (rem > (hevc_stream_rd_ptr - hevc_stream_start_addr)) + shift_byte_count64 += hevc_stream_buf_size; + + hevc->shift_byte_count_lo = (u32)shift_byte_count64; + hevc->shift_byte_count_hi = (u32)(shift_byte_count64 >> 32); + + WRITE_VREG(DOS_SW_RESET3, + /* (1<<2)| */ + (1 << 3) | (1 << 4) | (1 << 8) | + (1 << 11) | (1 << 12) | (1 << 14) + | (1 << 15) | (1 << 17) | (1 << 18) | (1 << 19)); + WRITE_VREG(DOS_SW_RESET3, 0); + + WRITE_VREG(HEVC_STREAM_START_ADDR, hevc_stream_start_addr); + WRITE_VREG(HEVC_STREAM_END_ADDR, hevc_stream_end_addr); + WRITE_VREG(HEVC_STREAM_RD_PTR, hevc_stream_rd_ptr); + WRITE_VREG(HEVC_STREAM_WR_PTR, hevc_stream_wr_ptr); + WRITE_VREG(HEVC_STREAM_CONTROL, hevc_stream_control); + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, hevc->shift_byte_count_lo); + WRITE_VREG(HEVC_STREAM_FIFO_CTL, hevc_stream_fifo_ctl); + + hevc_config_work_space_hw(hevc); + decoder_hw_reset(); + + hevc->have_vps = 0; + hevc->have_sps = 0; + hevc->have_pps = 0; + + hevc->have_valid_start_slice = 0; + + if (get_double_write_mode(hevc) & 0x10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, + 0x1 << 31 /*/Enable NV21 reference read mode for MC*/ + ); + + WRITE_VREG(HEVC_WAIT_FLAG, 1); + /* clear mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + /* enable mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 1); + /* disable PSCALE for hardware sharing */ + WRITE_VREG(HEVC_PSCALE_CTRL, 0); + + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_VID_MAN_RD_PTR); + + WRITE_VREG(DEBUG_REG1, 0x0); + + if ((error_handle_policy & 1) == 0) { + if ((error_handle_policy & 4) == 0) { + /* ucode auto mode, and do not check vps/sps/pps/idr */ + WRITE_VREG(NAL_SEARCH_CTL, + 0xc); + } else { + WRITE_VREG(NAL_SEARCH_CTL, 0x1);/* manual parser NAL */ + } + } else { + WRITE_VREG(NAL_SEARCH_CTL, 0x1);/* manual parser NAL */ + } + + if (get_dbg_flag(hevc) & H265_DEBUG_NO_EOS_SEARCH_DONE) + WRITE_VREG(NAL_SEARCH_CTL, READ_VREG(NAL_SEARCH_CTL) | 0x10000); + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) + | ((parser_sei_enable & 0x7) << 17)); +/*#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION*/ + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | + ((parser_dolby_vision_enable & 0x1) << 20)); +/*#endif*/ + config_decode_mode(hevc); + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + + /* if (amhevc_loadmc(vh265_mc) < 0) { */ + /* amhevc_disable(); */ + /* return -EBUSY; */ + /* } */ +#if 0 + for (i = 0; i < (hevc->debug_ptr_size / 2); i += 4) { + int ii; + + for (ii = 0; ii < 4; ii++) { + /* hevc->debug_ptr[i+3-ii]=ttt++; */ + hevc_print(hevc, 0, + "%04x ", hevc->debug_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + hevc_print(hevc, 0, "\n"); + } +#endif + init_pic_list_hw(hevc); + + hevc_print(hevc, 0, "%s HEVC_SHIFT_BYTE_COUNT=0x%x\n", __func__, + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + +#ifdef SWAP_HEVC_UCODE + if (!tee_enabled() && hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, hevc->mc_dma_handle); + /*pr_info("write swap buffer %x\n", (u32)(hevc->mc_dma_handle));*/ + } +#endif + amhevc_start(); + + /* skip, search next start code */ + WRITE_VREG(HEVC_WAIT_FLAG, READ_VREG(HEVC_WAIT_FLAG) & (~0x2)); + hevc->skip_flag = 1; +#ifdef ERROR_HANDLE_DEBUG + if (dbg_nal_skip_count & 0x20000) { + dbg_nal_skip_count &= ~0x20000; + mutex_unlock(&vh265_mutex); + return ret; + } +#endif + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); +#ifdef MULTI_INSTANCE_SUPPORT + if (!hevc->m_ins_flag) +#endif + hevc->first_pic_after_recover = 1; + mutex_unlock(&vh265_mutex); + return ret; +} + +static void dump_aux_buf(struct hevc_state_s *hevc) +{ + int i; + unsigned short *aux_adr = + (unsigned short *) + hevc->aux_addr; + unsigned int aux_size = + (READ_VREG(HEVC_AUX_DATA_SIZE) + >> 16) << 4; + PR_INIT(128); + + if (hevc->prefix_aux_size > 0) { + hevc_print(hevc, 0, + "prefix aux: (size %d)\n", + aux_size); + if (aux_size > hevc->prefix_aux_size) { + hevc_print(hevc, 0, + "%s:aux_size(%d) is over size\n", __func__, aux_size); + return ; + } + for (i = 0; i < (aux_size >> 1); i++) { + PR_FILL("%04x ", *(aux_adr + i)); + if (((i + 1) & 0xf) == 0) + PR_INFO(hevc->index);; + } + PR_INFO(hevc->index);; + } + if (hevc->suffix_aux_size > 0) { + aux_adr = (unsigned short *) + (hevc->aux_addr + + hevc->prefix_aux_size); + aux_size = + (READ_VREG(HEVC_AUX_DATA_SIZE) & 0xffff) + << 4; + hevc_print(hevc, 0, + "suffix aux: (size %d)\n", + aux_size); + if (aux_size > hevc->suffix_aux_size) { + hevc_print(hevc, 0, + "%s:aux_size(%d) is over size\n", __func__, aux_size); + return ; + } + for (i = 0; i < + (aux_size >> 1); i++) { + PR_FILL("%04x ", *(aux_adr + i)); + if (((i + 1) & 0xf) == 0) + PR_INFO(hevc->index); + } + PR_INFO(hevc->index); + } +} + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +static void dolby_get_meta(struct hevc_state_s *hevc) +{ + struct vdec_s *vdec = hw_to_vdec(hevc); + + if (get_dbg_flag(hevc) & + H265_DEBUG_PRINT_SEI) + dump_aux_buf(hevc); + if (vdec->dolby_meta_with_el || vdec->slave) { + set_aux_data(hevc, + hevc->cur_pic, 0, 0); + } else if (vdec->master) { + struct hevc_state_s *hevc_ba = + (struct hevc_state_s *) + vdec->master->private; + /*do not use hevc_ba*/ + set_aux_data(hevc, + hevc_ba->cur_pic, + 0, 1); + set_aux_data(hevc, + hevc->cur_pic, 0, 2); + } else if (vdec_frame_based(vdec)) { + set_aux_data(hevc, + hevc->cur_pic, 1, 0); + } +} +#endif + +static void read_decode_info(struct hevc_state_s *hevc) +{ + uint32_t decode_info = + READ_HREG(HEVC_DECODE_INFO); + hevc->start_decoding_flag |= + (decode_info & 0xff); + hevc->rps_set_id = (decode_info >> 8) & 0xff; +} + +static int vh265_get_ps_info(struct hevc_state_s *hevc, + union param_u *rpm_param, + struct aml_vdec_ps_infos *ps) +{ + u32 SubWidthC, SubHeightC; + u32 width = rpm_param->p.pic_width_in_luma_samples; + u32 height = rpm_param->p.pic_height_in_luma_samples; + + switch (rpm_param->p.chroma_format_idc) { + case 1: + SubWidthC = 2; + SubHeightC = 2; + break; + case 2: + SubWidthC = 2; + SubHeightC = 1; + break; + default: + SubWidthC = 1; + SubHeightC = 1; + break; + } + + width -= SubWidthC * + (rpm_param->p.conf_win_left_offset + + rpm_param->p.conf_win_right_offset); + height -= SubHeightC * + (rpm_param->p.conf_win_top_offset + + rpm_param->p.conf_win_bottom_offset); + + hevc->last_width = rpm_param->p.pic_width_in_luma_samples; + hevc->last_height = rpm_param->p.pic_height_in_luma_samples; + hevc->sps_num_reorder_pics_0 = + rpm_param->p.sps_num_reorder_pics_0; + + height <<= hevc->interlace_flag; + ps->visible_width = width; + ps->visible_height = height; + ps->coded_width = ALIGN(width, 64); + ps->coded_height = ALIGN(height, 64); + ps->field = hevc->interlace_flag ? V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE; + ps->dpb_frames = v4l_parser_work_pic_num(hevc); + ps->dpb_margin = get_dynamic_buf_num_margin(hevc); + + return 0; +} + +static int vh265_get_cfg_info(struct hevc_state_s *hevc, + union param_u *rpm_param, + struct aml_vdec_cfg_infos *cfg) +{ + /* force h265 interlace video to double write 1*/ + if (hevc->interlace_flag) { + cfg->double_write_mode = 1; + hevc->double_write_mode = 1; + hevc_print(hevc, 0, + "hevc interlace force dw 1\n"); + } + cfg->init_width = rpm_param->p.pic_width_in_luma_samples; + cfg->init_height = rpm_param->p.pic_height_in_luma_samples + << hevc->interlace_flag; + + return 0; +} + +static void get_comp_buf_info(struct hevc_state_s *hevc, + struct vdec_comp_buf_info *info) +{ + u16 bit_depth = hevc->param.p.bit_depth; + int w = hevc->param.p.pic_width_in_luma_samples; + int h = hevc->param.p.pic_height_in_luma_samples; + + info->max_size = hevc_max_mmu_buf_size( + hevc->max_pic_w, + hevc->max_pic_h); + info->header_size = hevc_get_header_size(w,h); + info->frame_buffer_size = hevc_mmu_page_num( + hevc, w, h, bit_depth == 0); + + pr_info("hevc get comp info: %d %d %d\n", + info->max_size, info->header_size, + info->frame_buffer_size); +} + +static int is_interlace(struct hevc_state_s *hevc) +{ + int pic_struct = (hevc->param.p.sei_frame_field_info >> 3) & 0xf; + int frame_field_info_present_flag = + (hevc->param.p.sei_frame_field_info >> 8) & 0x1; + + if ((hevc->param.p.profile_etc & 0xc) == 0x4 + && (frame_field_info_present_flag + && (pic_struct == 0 + || pic_struct == 7 + || pic_struct == 8))) + return 0; + else if ((hevc->param.p.profile_etc & 0xc) == 0x4) + return 1; + + return 0; +} + +static void hevc_interlace_check(struct hevc_state_s *hevc, + union param_u *rpm_param) +{ + int w, h; + + w = hevc->param.p.pic_width_in_luma_samples; + h = hevc->param.p.pic_height_in_luma_samples; + /* interlace check, 4k force no interlace */ + if ((interlace_enable != 0) && + (!IS_4K_SIZE(w, h)) && + (is_interlace(hevc))) { + hevc->interlace_flag = 1; + hevc->frame_ar = (hevc->pic_h * 0x100 / hevc->pic_w) * 2; + hevc_print(hevc, 0, + "interlace (%d, %d), profile_etc %x, ar 0x%x, dw %d\n", + hevc->pic_w, hevc->pic_h, hevc->param.p.profile_etc, hevc->frame_ar, + get_double_write_mode(hevc)); + } +} + +static int v4l_res_change(struct hevc_state_s *hevc, union param_u *rpm_param) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + int i, ret = 0; + + if (ctx->param_sets_from_ucode) { + struct aml_vdec_ps_infos ps; + int width = rpm_param->p.pic_width_in_luma_samples; + int height = rpm_param->p.pic_height_in_luma_samples; + + if ((hevc->last_width != 0 && + hevc->last_height != 0) && + (hevc->last_width != width || + hevc->last_height != height)) { + hevc_print(hevc, 0, + "v4l_res_change Pic Width/Height Change (%d,%d)=>(%d,%d), interlace %d\n", + hevc->last_width, hevc->last_height, + width, + height, + hevc->interlace_flag); + + if (get_valid_double_write_mode(hevc) != 16) { + struct vdec_comp_buf_info info; + + get_comp_buf_info(hevc, &info); + vdec_v4l_set_comp_buf_info(ctx, &info); + } + vh265_get_ps_info(hevc, &hevc->param, &ps); + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + hevc->v4l_params_parsed = false; + ctx->v4l_resolution_change = 1; + hevc->eos = 1; + hevc->resolution_change = true; + + /* + * marks frame valid on the dpb is the ouput state, + * then via flush_output all frames can be flushed out. + */ + for (i = 0; i < MAX_REF_PIC_NUM; ++i) { + if ((hevc->m_PIC[i] == NULL) || + (hevc->m_PIC[i]->index == -1) || + (hevc->m_PIC[i]->BUF_index == -1)) + continue; + + if ((hevc->m_PIC[i]->POC != INVALID_POC) && + (hevc->m_PIC[i]->output_ready == 0) && + hevc->m_PIC[i]->referenced && + (hevc->m_PIC[i]->POC >= hevc->decoded_poc)) { + hevc->m_PIC[i]->output_mark = 1; + } + } + + flush_output(hevc, NULL); + //del_timer_sync(&hevc->timer); + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(hw_to_vdec(hevc)); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + + ret = 1; + } + } + + return ret; +} + +static int hevc_skip_nal(struct hevc_state_s *hevc) +{ + if ((hevc->pic_h == 96) && (hevc->pic_w == 160) && + (get_double_write_mode(hevc) == 0x10)) { + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TXLX) { + if (hevc->skip_nal_count < skip_nal_count) + return 1; + } else { + if (hevc->skip_nal_count < 1) + return 1; + } + } + return 0; +} + +static irqreturn_t vh265_isr_thread_fn(int irq, void *data) +{ + struct hevc_state_s *hevc = (struct hevc_state_s *) data; + unsigned int dec_status = hevc->dec_status; + int i, ret; + + struct vdec_s *vdec = hw_to_vdec(hevc); + + if (dec_status == HEVC_SLICE_SEGMENT_DONE) { + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_START); + } + else if (dec_status == HEVC_DECPIC_DATA_DONE) { + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_THREAD_PIC_DONE_START); + } + + if (hevc->eos) + return IRQ_HANDLED; + if ( +#ifdef MULTI_INSTANCE_SUPPORT + (!hevc->m_ins_flag) && +#endif + hevc->error_flag == 1) { + if ((error_handle_policy & 0x10) == 0) { + if (hevc->cur_pic) { + int current_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + if (current_lcu_idx < + ((hevc->lcu_x_num*hevc->lcu_y_num)-1)) + hevc->cur_pic->error_mark = 1; + + } + } + if ((error_handle_policy & 1) == 0) { + hevc->error_skip_nal_count = 1; + /* manual search nal, skip error_skip_nal_count + * of nal and trigger the HEVC_NAL_SEARCH_DONE irq + */ + WRITE_VREG(NAL_SEARCH_CTL, + (error_skip_nal_count << 4) | 0x1); + } else { + hevc->error_skip_nal_count = error_skip_nal_count; + WRITE_VREG(NAL_SEARCH_CTL, 0x1);/* manual parser NAL */ + } + if ((get_dbg_flag(hevc) & H265_DEBUG_NO_EOS_SEARCH_DONE) +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + || vdec->master + || vdec->slave +#endif + ) { + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | 0x10000); + } + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) + | ((parser_sei_enable & 0x7) << 17)); +/*#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION*/ + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | + ((parser_dolby_vision_enable & 0x1) << 20)); +/*#endif*/ + config_decode_mode(hevc); + /* search new nal */ + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); + + /* hevc_print(hevc, 0, + *"%s: error handle\n", __func__); + */ + hevc->error_flag = 2; + return IRQ_HANDLED; + } else if ( +#ifdef MULTI_INSTANCE_SUPPORT + (!hevc->m_ins_flag) && +#endif + hevc->error_flag == 3) { + hevc_print(hevc, 0, "error_flag=3, hevc_recover\n"); + hevc_recover(hevc); + hevc->error_flag = 0; + + if ((error_handle_policy & 0x10) == 0) { + if (hevc->cur_pic) { + int current_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + if (current_lcu_idx < + ((hevc->lcu_x_num*hevc->lcu_y_num)-1)) + hevc->cur_pic->error_mark = 1; + + } + } + if ((error_handle_policy & 1) == 0) { + /* need skip some data when + * error_flag of 3 is triggered, + */ + /* to avoid hevc_recover() being called + * for many times at the same bitstream position + */ + hevc->error_skip_nal_count = 1; + /* manual search nal, skip error_skip_nal_count + * of nal and trigger the HEVC_NAL_SEARCH_DONE irq + */ + WRITE_VREG(NAL_SEARCH_CTL, + (error_skip_nal_count << 4) | 0x1); + } + + if ((error_handle_policy & 0x2) == 0) { + hevc->have_vps = 1; + hevc->have_sps = 1; + hevc->have_pps = 1; + } + return IRQ_HANDLED; + } + if (!hevc->m_ins_flag) { + i = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + if ((hevc->shift_byte_count_lo & (1 << 31)) + && ((i & (1 << 31)) == 0)) + hevc->shift_byte_count_hi++; + hevc->shift_byte_count_lo = i; + } +#ifdef MULTI_INSTANCE_SUPPORT + mutex_lock(&hevc->chunks_mutex); + if ((dec_status == HEVC_DECPIC_DATA_DONE || + dec_status == HEVC_FIND_NEXT_PIC_NAL || + dec_status == HEVC_FIND_NEXT_DVEL_NAL) + && (hevc->chunk)) { + hevc->cur_pic->pts = hevc->chunk->pts; + hevc->cur_pic->pts64 = hevc->chunk->pts64; + hevc->cur_pic->timestamp = hevc->chunk->timestamp; + } + mutex_unlock(&hevc->chunks_mutex); + + if (dec_status == HEVC_DECODE_BUFEMPTY || + dec_status == HEVC_DECODE_BUFEMPTY2) { + if (hevc->m_ins_flag) { + read_decode_info(hevc); + if (vdec_frame_based(hw_to_vdec(hevc))) { + hevc->empty_flag = 1; + /*suffix sei or dv meta*/ + set_aux_data(hevc, hevc->cur_pic, 1, 0); + goto pic_done; + } else { + if ( +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + vdec->master || + vdec->slave || +#endif + (data_resend_policy & 0x1)) { + hevc->dec_result = DEC_RESULT_AGAIN; + amhevc_stop(); + restore_decode_state(hevc); + } else + hevc->dec_result = DEC_RESULT_GET_DATA; + } + reset_process_time(hevc); + vdec_schedule_work(&hevc->work); + } + return IRQ_HANDLED; + } else if ((dec_status == HEVC_SEARCH_BUFEMPTY) || + (dec_status == HEVC_NAL_DECODE_DONE) + ) { + if (hevc->m_ins_flag) { + read_decode_info(hevc); + if (vdec_frame_based(hw_to_vdec(hevc))) { + /*hevc->dec_result = DEC_RESULT_GET_DATA;*/ + hevc->empty_flag = 1; + /*suffix sei or dv meta*/ + set_aux_data(hevc, hevc->cur_pic, 1, 0); + goto pic_done; + } else { + hevc->dec_result = DEC_RESULT_AGAIN; + amhevc_stop(); + restore_decode_state(hevc); + } + + reset_process_time(hevc); + vdec_schedule_work(&hevc->work); + } + + return IRQ_HANDLED; + } else if (dec_status == HEVC_DECPIC_DATA_DONE) { + if (hevc->m_ins_flag) { + struct PIC_s *pic; + struct PIC_s *pic_display; + int decoded_poc; + + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + if (hevc->detbuf_adr_virt && hevc->delrefill_check + && READ_VREG(HEVC_SAO_DBG_MODE0)) + hevc->delrefill_check = 2; + } +#endif + hevc->empty_flag = 0; +pic_done: + if (vdec->master == NULL && vdec->slave == NULL && + hevc->empty_flag == 0) { + hevc->over_decode = + (READ_VREG(HEVC_SHIFT_STATUS) >> 15) & 0x1; + if (hevc->over_decode) + hevc_print(hevc, 0, + "!!!Over decode %d\n", __LINE__); + } + if (input_frame_based(hw_to_vdec(hevc)) && + frmbase_cont_bitlevel != 0 && + (hevc->decode_size > READ_VREG(HEVC_SHIFT_BYTE_COUNT)) && + (hevc->decode_size - (READ_VREG(HEVC_SHIFT_BYTE_COUNT)) + > frmbase_cont_bitlevel)) { + check_pic_decoded_error(hevc, READ_VREG(HEVC_PARSER_LCU_START) & 0xffffff); + /*handle the case: multi pictures in one packet*/ + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s has more data index= %d, size=0x%x shiftcnt=0x%x)\n", + __func__, + hevc->decode_idx, hevc->decode_size, + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + start_process_time(hevc); + return IRQ_HANDLED; + } + + read_decode_info(hevc); + get_picture_qos_info(hevc); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + hevc->start_parser_type = 0; + hevc->switch_dvlayer_flag = 0; +#endif + hevc->decoded_poc = hevc->curr_POC; + hevc->decoding_pic = NULL; + hevc->dec_result = DEC_RESULT_DONE; +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) + if (hevc->delrefill_check != 2) +#endif + + amhevc_stop(); + + reset_process_time(hevc); + + if ((!input_stream_based(vdec) && + atomic_read(&hevc->vf_pre_count) == 0) || hevc->ip_mode) { + decoded_poc = hevc->curr_POC; + pic = get_pic_by_POC(hevc, decoded_poc); + if (pic && (pic->POC != INVALID_POC)) { + /*PB skip control */ + if (pic->error_mark == 0 + && hevc->PB_skip_mode == 1) { + /* start decoding after + * first I + */ + hevc->ignore_bufmgr_error |= 0x1; + } + if (hevc->ignore_bufmgr_error & 1) { + if (hevc->PB_skip_count_after_decoding > 0) { + hevc->PB_skip_count_after_decoding--; + } else { + /* start displaying */ + hevc->ignore_bufmgr_error |= 0x2; + } + } + if (hevc->mmu_enable + && ((hevc->double_write_mode & 0x10) == 0)) { + if (!hevc->m_ins_flag) { + hevc->used_4k_num = + READ_VREG(HEVC_SAO_MMU_STATUS) >> 16; + + if ((!is_skip_decoding(hevc, pic)) && + (hevc->used_4k_num >= 0) && + (hevc->cur_pic->scatter_alloc + == 1)) + recycle_mmu_buf_tail(hevc, false); + } + } + + pic->output_mark = 1; + pic->recon_mark = 1; + if (vdec->mvfrm) { + pic->frame_size = + vdec->mvfrm->frame_size; + pic->hw_decode_time = + (u32)vdec->mvfrm->hw_decode_time; + } + } + /*Detects the first frame whether has an over decode error*/ + if (vdec->master == NULL && vdec->slave == NULL && + hevc->empty_flag == 0) { + hevc->over_decode = + (READ_VREG(HEVC_SHIFT_STATUS) >> 15) & 0x1; + if (hevc->over_decode) + hevc_print(hevc, 0, + "!!!Over decode %d\n", __LINE__); + } + check_pic_decoded_error(hevc, + READ_VREG(HEVC_PARSER_LCU_START) & 0xffffff); + if (hevc->cur_pic != NULL && + (READ_VREG(HEVC_PARSER_LCU_START) & 0xffffff) == 0 + && (hevc->lcu_x_num * hevc->lcu_y_num != 1)) + hevc->cur_pic->error_mark = 1; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +force_output: +#endif + pic_display = output_pic(hevc, 1); + + if (pic_display) { + if ((pic_display->error_mark && + ((hevc->ignore_bufmgr_error & + 0x2) == 0)) + || (get_dbg_flag(hevc) & + H265_DEBUG_DISPLAY_CUR_FRAME) + || (get_dbg_flag(hevc) & + H265_DEBUG_NO_DISPLAY)) { + pic_display->output_ready = 0; + if (get_dbg_flag(hevc) & + H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "[BM] Display: POC %d, ", + pic_display->POC); + hevc_print_cont(hevc, 0, + "decoding index %d ==> ", + pic_display-> + decode_idx); + hevc_print_cont(hevc, 0, + "Debug or err,recycle it\n"); + } + } else { + if ((pic_display-> + slice_type != 2) && !pic_display->ip_mode) { + pic_display->output_ready = 0; + } else { + prepare_display_buf + (hw_to_vdec(hevc), + pic_display); + hevc->first_pic_flag = 1; + } + } + } + } + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + vdec_schedule_work(&hevc->work); + } + + return IRQ_HANDLED; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + } else if (dec_status == HEVC_FIND_NEXT_PIC_NAL || + dec_status == HEVC_FIND_NEXT_DVEL_NAL) { + if (hevc->m_ins_flag) { + unsigned char next_parser_type = + READ_HREG(CUR_NAL_UNIT_TYPE) & 0xff; + read_decode_info(hevc); + + if (vdec->slave && + dec_status == HEVC_FIND_NEXT_DVEL_NAL) { + /*cur is base, found enhance*/ + struct hevc_state_s *hevc_el = + (struct hevc_state_s *) + vdec->slave->private; + hevc->switch_dvlayer_flag = 1; + hevc->no_switch_dvlayer_count = 0; + hevc_el->start_parser_type = + next_parser_type; + hevc_print(hevc, H265_DEBUG_DV, + "switch (poc %d) to el\n", + hevc->cur_pic ? + hevc->cur_pic->POC : + INVALID_POC); + } else if (vdec->master && + dec_status == HEVC_FIND_NEXT_PIC_NAL) { + /*cur is enhance, found base*/ + struct hevc_state_s *hevc_ba = + (struct hevc_state_s *) + vdec->master->private; + hevc->switch_dvlayer_flag = 1; + hevc->no_switch_dvlayer_count = 0; + hevc_ba->start_parser_type = + next_parser_type; + hevc_print(hevc, H265_DEBUG_DV, + "switch (poc %d) to bl\n", + hevc->cur_pic ? + hevc->cur_pic->POC : + INVALID_POC); + } else { + hevc->switch_dvlayer_flag = 0; + hevc->start_parser_type = + next_parser_type; + hevc->no_switch_dvlayer_count++; + hevc_print(hevc, H265_DEBUG_DV, + "%s: no_switch_dvlayer_count = %d\n", + vdec->master ? "el" : "bl", + hevc->no_switch_dvlayer_count); + if (vdec->slave && + dolby_el_flush_th != 0 && + hevc->no_switch_dvlayer_count > + dolby_el_flush_th) { + struct hevc_state_s *hevc_el = + (struct hevc_state_s *) + vdec->slave->private; + struct PIC_s *el_pic; + check_pic_decoded_error(hevc_el, + hevc_el->pic_decoded_lcu_idx); + el_pic = get_pic_by_POC(hevc_el, + hevc_el->curr_POC); + hevc_el->curr_POC = INVALID_POC; + hevc_el->m_pocRandomAccess = MAX_INT; + flush_output(hevc_el, el_pic); + hevc_el->decoded_poc = INVALID_POC; /* + already call flush_output*/ + hevc_el->decoding_pic = NULL; + hevc->no_switch_dvlayer_count = 0; + if (get_dbg_flag(hevc) & H265_DEBUG_DV) + hevc_print(hevc, 0, + "no el anymore, flush_output el\n"); + } + } + hevc->decoded_poc = hevc->curr_POC; + hevc->decoding_pic = NULL; + hevc->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + reset_process_time(hevc); + if (aux_data_is_avaible(hevc)) + dolby_get_meta(hevc); + if(hevc->cur_pic && hevc->cur_pic->slice_type == 2 && + atomic_read(&hevc->vf_pre_count) == 0) { + hevc_print(hevc, 0, + "first slice_type %x no_switch_dvlayer_count %x\n", + hevc->cur_pic->slice_type, + hevc->no_switch_dvlayer_count); + goto force_output; + } + vdec_schedule_work(&hevc->work); + } + + return IRQ_HANDLED; +#endif + } + +#endif + + if (dec_status == HEVC_SEI_DAT) { + if (!hevc->m_ins_flag) { + int payload_type = + READ_HREG(CUR_NAL_UNIT_TYPE) & 0xffff; + int payload_size = + (READ_HREG(CUR_NAL_UNIT_TYPE) >> 16) & 0xffff; + process_nal_sei(hevc, + payload_type, payload_size); + } + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_SEI_DAT_DONE); + } else if (dec_status == HEVC_NAL_SEARCH_DONE) { + int naltype = READ_HREG(CUR_NAL_UNIT_TYPE); + int parse_type = HEVC_DISCARD_NAL; + + hevc->error_watchdog_count = 0; + hevc->error_skip_nal_wt_cnt = 0; +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) + reset_process_time(hevc); +#endif + if (slice_parse_begin > 0 && + get_dbg_flag(hevc) & H265_DEBUG_DISCARD_NAL) { + hevc_print(hevc, 0, + "nal type %d, discard %d\n", naltype, + slice_parse_begin); + if (naltype <= NAL_UNIT_CODED_SLICE_CRA) + slice_parse_begin--; + } + if (naltype == NAL_UNIT_EOS) { + struct PIC_s *pic; + bool eos_in_head = false; + + hevc_print(hevc, 0, "get NAL_UNIT_EOS, flush output\n"); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if ((!hevc->discard_dv_data) && aux_data_is_avaible(hevc)) { + if (hevc->decoding_pic) + dolby_get_meta(hevc); + } +#endif + /*Detects frame whether has an over decode error*/ + if (vdec->master == NULL && vdec->slave == NULL && + hevc->empty_flag == 0 && input_stream_based(vdec)) { + hevc->over_decode = + (READ_VREG(HEVC_SHIFT_STATUS) >> 15) & 0x1; + if (hevc->over_decode) + hevc_print(hevc, 0, + "!!!Over decode %d\n", __LINE__); + } + check_pic_decoded_error(hevc, + hevc->pic_decoded_lcu_idx); + pic = get_pic_by_POC(hevc, hevc->curr_POC); + hevc->curr_POC = INVALID_POC; + /* add to fix RAP_B_Bossen_1 */ + hevc->m_pocRandomAccess = MAX_INT; + flush_output(hevc, pic); + clear_poc_flag(hevc); + if (input_frame_based(vdec)) { + u32 shiftbyte = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + if (shiftbyte < 0x8 && (hevc->decode_size - shiftbyte) > 0x100) { + hevc_print(hevc, 0," shiftbytes 0x%x decode_size 0x%x\n", shiftbyte, hevc->decode_size); + eos_in_head = true; + } + } + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_DISCARD_NAL); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); + + /* eos is in the head of the chunk and followed by sps/pps/IDR + * so need to go on decoding + */ + if (eos_in_head) + return IRQ_HANDLED; + +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) { + hevc->decoded_poc = INVALID_POC; /* + already call flush_output*/ + hevc->decoding_pic = NULL; + hevc->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + + vdec_schedule_work(&hevc->work); + } +#endif + return IRQ_HANDLED; + } + + if ( +#ifdef MULTI_INSTANCE_SUPPORT + (!hevc->m_ins_flag) && +#endif + hevc->error_skip_nal_count > 0) { + hevc_print(hevc, 0, + "nal type %d, discard %d\n", naltype, + hevc->error_skip_nal_count); + hevc->error_skip_nal_count--; + if (hevc->error_skip_nal_count == 0) { + hevc_recover(hevc); + hevc->error_flag = 0; + if ((error_handle_policy & 0x2) == 0) { + hevc->have_vps = 1; + hevc->have_sps = 1; + hevc->have_pps = 1; + } + return IRQ_HANDLED; + } + } else if (naltype == NAL_UNIT_VPS) { + parse_type = HEVC_NAL_UNIT_VPS; + hevc->have_vps = 1; +#ifdef ERROR_HANDLE_DEBUG + if (dbg_nal_skip_flag & 1) + parse_type = HEVC_DISCARD_NAL; +#endif + } else if (hevc->have_vps) { + if (naltype == NAL_UNIT_SPS) { + parse_type = HEVC_NAL_UNIT_SPS; + hevc->have_sps = 1; +#ifdef ERROR_HANDLE_DEBUG + if (dbg_nal_skip_flag & 2) + parse_type = HEVC_DISCARD_NAL; +#endif + } else if (naltype == NAL_UNIT_PPS) { + parse_type = HEVC_NAL_UNIT_PPS; + hevc->have_pps = 1; +#ifdef ERROR_HANDLE_DEBUG + if (dbg_nal_skip_flag & 4) + parse_type = HEVC_DISCARD_NAL; +#endif + } else if (hevc->have_sps && hevc->have_pps) { + int seg = HEVC_NAL_UNIT_CODED_SLICE_SEGMENT; + + if ((naltype == NAL_UNIT_CODED_SLICE_IDR) || + (naltype == + NAL_UNIT_CODED_SLICE_IDR_N_LP) + || (naltype == + NAL_UNIT_CODED_SLICE_CRA) + || (naltype == + NAL_UNIT_CODED_SLICE_BLA) + || (naltype == + NAL_UNIT_CODED_SLICE_BLANT) + || (naltype == + NAL_UNIT_CODED_SLICE_BLA_N_LP) + ) { + if (slice_parse_begin > 0) { + hevc_print(hevc, 0, + "discard %d, for debugging\n", + slice_parse_begin); + slice_parse_begin--; + } else { + parse_type = seg; + } + hevc->have_valid_start_slice = 1; + } else if (naltype <= + NAL_UNIT_CODED_SLICE_CRA + && (hevc->have_valid_start_slice + || (hevc->PB_skip_mode != 3))) { + if (slice_parse_begin > 0) { + hevc_print(hevc, 0, + "discard %d, dd\n", + slice_parse_begin); + slice_parse_begin--; + } else + parse_type = seg; + + } + } + } + if (hevc->have_vps && hevc->have_sps && hevc->have_pps + && hevc->have_valid_start_slice && + hevc->error_flag == 0) { + if ((get_dbg_flag(hevc) & + H265_DEBUG_MAN_SEARCH_NAL) == 0 + /* && (!hevc->m_ins_flag)*/) { + /* auot parser NAL; do not check + *vps/sps/pps/idr + */ + WRITE_VREG(NAL_SEARCH_CTL, 0x2); + } + + if ((get_dbg_flag(hevc) & + H265_DEBUG_NO_EOS_SEARCH_DONE) +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + || vdec->master + || vdec->slave +#endif + ) { + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | + 0x10000); + } + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) + | ((parser_sei_enable & 0x7) << 17)); +/*#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION*/ + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | + ((parser_dolby_vision_enable & 0x1) << 20)); +/*#endif*/ + config_decode_mode(hevc); + } + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) { + hevc_print(hevc, 0, + "naltype = %d parse_type %d\n %d %d %d %d\n", + naltype, parse_type, hevc->have_vps, + hevc->have_sps, hevc->have_pps, + hevc->have_valid_start_slice); + } + + WRITE_VREG(HEVC_DEC_STATUS_REG, parse_type); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) + start_process_time(hevc); +#endif + } else if (dec_status == HEVC_SLICE_SEGMENT_DONE) { +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) { + reset_process_time(hevc); + read_decode_info(hevc); + + } +#endif + if (hevc->start_decoding_time > 0) { + u32 process_time = 1000* + (jiffies - hevc->start_decoding_time)/HZ; + if (process_time > max_decoding_time) + max_decoding_time = process_time; + } + + hevc->error_watchdog_count = 0; + if (hevc->pic_list_init_flag == 2) { + hevc->pic_list_init_flag = 3; + hevc_print(hevc, 0, "set pic_list_init_flag to 3\n"); + if (hevc->kpi_first_i_comming == 0) { + hevc->kpi_first_i_comming = 1; + pr_debug("[vdec_kpi][%s] First I frame coming.\n", + __func__); + } + } else if (hevc->wait_buf == 0) { + u32 vui_time_scale; + u32 vui_num_units_in_tick; + unsigned char reconfig_flag = 0; + + if (get_dbg_flag(hevc) & H265_DEBUG_SEND_PARAM_WITH_REG) + get_rpm_param(&hevc->param); + else { + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_RPM_START); + for (i = 0; i < (RPM_END - RPM_BEGIN); i += 4) { + int ii; + + for (ii = 0; ii < 4; ii++) { + hevc->param.l.data[i + ii] = + hevc->rpm_ptr[i + 3 + - ii]; + } + } + ATRACE_COUNTER(hevc->trace.decode_header_memory_time_name, TRACE_HEADER_RPM_END); +#ifdef SEND_LMEM_WITH_RPM + check_head_error(hevc); +#endif + } + + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) { + PR_INIT(128); + + hevc_print(hevc, 0, + "rpm_param: (%d)\n", hevc->slice_idx); + hevc->slice_idx++; + + for (i = 0; i < (RPM_END - RPM_BEGIN); i++) { + PR_FILL("%04x ", hevc->param.l.data[i]); + if (((i + 1) & 0xf) == 0) + PR_INFO(hevc->index); + } + PR_INFO(hevc->index); + + hevc_print(hevc, 0, + "vui_timing_info: %x, %x, %x, %x\n", + hevc->param.p.vui_num_units_in_tick_hi, + hevc->param.p.vui_num_units_in_tick_lo, + hevc->param.p.vui_time_scale_hi, + hevc->param.p.vui_time_scale_lo); + } + + if (aux_data_is_avaible(hevc)) { + static struct PIC_s pic; + u32 size = 0, type = 0; + char *p; + + memset(&pic, 0, sizeof(pic)); + + pic.aux_data_buf = vzalloc(hevc->prefix_aux_size); + + set_aux_data(hevc, &pic, 0, 0); + + if (pic.aux_data_buf + && pic.aux_data_size) { + hevc->frame_field_info_present_flag = + (hevc->param.p.sei_frame_field_info >> 8) & 0x1; + + /* parser sei */ + p = pic.aux_data_buf; + while (p < pic.aux_data_buf + + pic.aux_data_size - 8) { + size = *p++; + size = (size << 8) | *p++; + size = (size << 8) | *p++; + size = (size << 8) | *p++; + type = *p++; + type = (type << 8) | *p++; + type = (type << 8) | *p++; + type = (type << 8) | *p++; + if (type == 0x02000000) { + /* hevc_print(hevc, 0, "sei(%d)\n", size);*/ + parse_sei(hevc, &pic, p, size); + } + p += size; + } + + hevc->param.p.sei_frame_field_info &= + ~(0xf << 3); + hevc->param.p.sei_frame_field_info |= + (pic.pic_struct << 3); + } + + if (pic.hdr10p_data_buf) + vfree(pic.hdr10p_data_buf); + if (pic.aux_data_buf) + vfree(pic.aux_data_buf); + } + + if (hevc->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + if (!v4l_res_change(hevc, &hevc->param)) { + if (ctx->param_sets_from_ucode && !hevc->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + struct aml_vdec_cfg_infos cfg; + int log = hevc->param.p.log2_min_coding_block_size_minus3; + int log_s = hevc->param.p.log2_diff_max_min_coding_block_size; + + hevc->pic_w = hevc->param.p.pic_width_in_luma_samples; + hevc->pic_h = hevc->param.p.pic_height_in_luma_samples; + hevc->lcu_size = 1 << (log + 3 + log_s); + + pr_debug("set ucode parse\n"); + hevc_interlace_check(hevc, &hevc->param); + if (hevc->interlace_flag) { + vh265_get_cfg_info(hevc, &hevc->param, &cfg); + vdec_v4l_set_cfg_infos(ctx, &cfg); + } + if (get_valid_double_write_mode(hevc) != 16) { + struct vdec_comp_buf_info info; + + get_comp_buf_info(hevc, &info); + vdec_v4l_set_comp_buf_info(ctx, &info); + } + vh265_get_ps_info(hevc, &hevc->param, &ps); + /*notice the v4l2 codec.*/ + vdec_v4l_set_ps_infos(ctx, &ps); + hevc->v4l_params_parsed = true; + hevc->dec_result = DEC_RESULT_AGAIN; + amhevc_stop(); + restore_decode_state(hevc); + reset_process_time(hevc); + vdec_schedule_work(&hevc->work); + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + return IRQ_HANDLED; + } else { + struct vdec_pic_info pic; + + vdec_v4l_get_pic_info(ctx, &pic); + hevc->used_buf_num = pic.dpb_frames + + pic.dpb_margin; + if (hevc->used_buf_num > MAX_BUF_NUM) + hevc->used_buf_num = MAX_BUF_NUM; + } + } else { + pr_debug("resolution change\n"); + hevc->dec_result = DEC_RESULT_AGAIN; + amhevc_stop(); + restore_decode_state(hevc); + reset_process_time(hevc); + vdec_schedule_work(&hevc->work); + return IRQ_HANDLED; + } + } + + if ( +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + vdec->master == NULL && + vdec->slave == NULL && +#endif + aux_data_is_avaible(hevc) + ) { + + if (get_dbg_flag(hevc) & + H265_DEBUG_PRINT_SEI) + dump_aux_buf(hevc); + } + + vui_time_scale = + (u32)(hevc->param.p.vui_time_scale_hi << 16) | + hevc->param.p.vui_time_scale_lo; + vui_num_units_in_tick = + (u32)(hevc->param. + p.vui_num_units_in_tick_hi << 16) | + hevc->param. + p.vui_num_units_in_tick_lo; + if (hevc->bit_depth_luma != + ((hevc->param.p.bit_depth & 0xf) + 8)) { + reconfig_flag = 1; + hevc_print(hevc, 0, "Bit depth luma = %d\n", + (hevc->param.p.bit_depth & 0xf) + 8); + } + if (hevc->bit_depth_chroma != + (((hevc->param.p.bit_depth >> 4) & 0xf) + 8)) { + reconfig_flag = 1; + hevc_print(hevc, 0, "Bit depth chroma = %d\n", + ((hevc->param.p.bit_depth >> 4) & + 0xf) + 8); + } + hevc->bit_depth_luma = + (hevc->param.p.bit_depth & 0xf) + 8; + hevc->bit_depth_chroma = + ((hevc->param.p.bit_depth >> 4) & 0xf) + 8; + bit_depth_luma = hevc->bit_depth_luma; + bit_depth_chroma = hevc->bit_depth_chroma; +#ifdef SUPPORT_10BIT + if (hevc->bit_depth_luma == 8 && + hevc->bit_depth_chroma == 8 && + enable_mem_saving) + hevc->mem_saving_mode = 1; + else + hevc->mem_saving_mode = 0; +#endif + if (reconfig_flag && + (get_double_write_mode(hevc) & 0x10) == 0) + init_decode_head_hw(hevc); + + if ((vui_time_scale != 0) + && (vui_num_units_in_tick != 0)) { + hevc->frame_dur = + div_u64(96000ULL * + vui_num_units_in_tick, + vui_time_scale); + if (hevc->get_frame_dur != true) + vdec_schedule_work( + &hevc->notify_work); + + hevc->get_frame_dur = true; + //hevc->gvs->frame_dur = hevc->frame_dur; + } + + if (hevc->video_signal_type != + ((hevc->param.p.video_signal_type << 16) + | hevc->param.p.color_description)) { + u32 v = hevc->param.p.video_signal_type; + u32 c = hevc->param.p.color_description; +#if 0 + if (v & 0x2000) { + hevc_print(hevc, 0, + "video_signal_type present:\n"); + hevc_print(hevc, 0, " %s %s\n", + video_format_names[(v >> 10) & 7], + ((v >> 9) & 1) ? + "full_range" : "limited"); + if (v & 0x100) { + hevc_print(hevc, 0, + " color_description present:\n"); + hevc_print(hevc, 0, + " color_primarie = %s\n", + color_primaries_names + [v & 0xff]); + hevc_print(hevc, 0, + " transfer_characteristic = %s\n", + transfer_characteristics_names + [(c >> 8) & 0xff]); + hevc_print(hevc, 0, + " matrix_coefficient = %s\n", + matrix_coeffs_names[c & 0xff]); + } + } +#endif + hevc->video_signal_type = (v << 16) | c; + video_signal_type = hevc->video_signal_type; + } + + if (use_cma && + (hevc->param.p.slice_segment_address == 0) + && (hevc->pic_list_init_flag == 0)) { + int log = hevc->param.p.log2_min_coding_block_size_minus3; + int log_s = hevc->param.p.log2_diff_max_min_coding_block_size; + + hevc->pic_w = hevc->param.p.pic_width_in_luma_samples; + hevc->pic_h = hevc->param.p.pic_height_in_luma_samples; + hevc->lcu_size = 1 << (log + 3 + log_s); + hevc->lcu_size_log2 = log2i(hevc->lcu_size); + if (performance_profile &&( (!is_oversize(hevc->pic_w, hevc->pic_h)) && IS_8K_SIZE(hevc->pic_w,hevc->pic_h))) + hevc->performance_profile = 1; + else + hevc->performance_profile = 0; + hevc_print(hevc, 0, "hevc->performance_profile %d\n", hevc->performance_profile); + if (hevc->pic_w == 0 || hevc->pic_h == 0 + || hevc->lcu_size == 0 + || is_oversize(hevc->pic_w, hevc->pic_h) + || hevc_skip_nal(hevc)) { + /* skip search next start code */ + WRITE_VREG(HEVC_WAIT_FLAG, READ_VREG(HEVC_WAIT_FLAG) + & (~0x2)); + if ((hevc->pic_h == 96) && (hevc->pic_w == 160)) + hevc->skip_nal_count++; + hevc->skip_flag = 1; + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) + start_process_time(hevc); +#endif + } else { + hevc->sps_num_reorder_pics_0 = + hevc->param.p.sps_num_reorder_pics_0; + hevc->ip_mode = hevc->low_latency_flag ? true : + (!hevc->sps_num_reorder_pics_0 && + !(vdec->slave || vdec->master) && + !disable_ip_mode) ? true : false; + hevc->pic_list_init_flag = 1; + #if 0 + if ((!IS_4K_SIZE(hevc->pic_w, hevc->pic_h)) && + ((hevc->param.p.profile_etc & 0xc) == 0x4) + && (interlace_enable != 0)) { + hevc->double_write_mode = 1; + hevc->interlace_flag = 1; + hevc->frame_ar = (hevc->pic_h * 0x100 / hevc->pic_w) * 2; + hevc_print(hevc, 0, + "interlace (%d, %d), profile_etc %x, ar 0x%x, dw %d\n", + hevc->pic_w, hevc->pic_h, hevc->param.p.profile_etc, hevc->frame_ar, + get_double_write_mode(hevc)); + /* When dw changed from 0x10 to 1, the mmu_box is NULL */ + if (!hevc->mmu_box && init_mmu_buffers(hevc, 0) != 0) { + hevc->dec_result = DEC_RESULT_FORCE_EXIT; + hevc->fatal_error |= + DECODER_FATAL_ERROR_NO_MEM; + vdec_schedule_work(&hevc->work); + hevc_print(hevc, + 0, "can not alloc mmu box, force exit\n"); + return IRQ_HANDLED; + } + } + #endif +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) { + vdec_schedule_work(&hevc->work); + } else +#endif + up(&h265_sema); + hevc_print(hevc, 0, "set pic_list_init_flag 1\n"); + } + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + return IRQ_HANDLED; + } + +} + ret = + hevc_slice_segment_header_process(hevc, + &hevc->param, decode_pic_begin); + if (ret < 0) { +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) { + hevc->wait_buf = 0; + hevc->dec_result = DEC_RESULT_AGAIN; + amhevc_stop(); + restore_decode_state(hevc); + reset_process_time(hevc); + vdec_schedule_work(&hevc->work); + return IRQ_HANDLED; + } +#else + ; +#endif + } else if (ret == 0) { + if ((hevc->new_pic) && (hevc->cur_pic)) { + hevc->cur_pic->stream_offset = + READ_VREG(HEVC_SHIFT_BYTE_COUNT); + hevc_print(hevc, H265_DEBUG_OUT_PTS, + "read stream_offset = 0x%x\n", + hevc->cur_pic->stream_offset); + hevc->cur_pic->aspect_ratio_idc = + hevc->param.p.aspect_ratio_idc; + hevc->cur_pic->sar_width = + hevc->param.p.sar_width; + hevc->cur_pic->sar_height = + hevc->param.p.sar_height; + } + + WRITE_VREG(HEVC_DEC_STATUS_REG, + HEVC_CODED_SLICE_SEGMENT_DAT); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); + + hevc->start_decoding_time = jiffies; +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) + start_process_time(hevc); +#endif +#if 1 + /*to do..., copy aux data to hevc->cur_pic*/ +#endif +#ifdef MULTI_INSTANCE_SUPPORT + } else if (hevc->m_ins_flag) { + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s, bufmgr ret %d skip, DEC_RESULT_DONE\n", + __func__, ret); + hevc->decoded_poc = INVALID_POC; + hevc->decoding_pic = NULL; + hevc->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + reset_process_time(hevc); + vdec_schedule_work(&hevc->work); +#endif + } else { + /* skip, search next start code */ + hevc->gvs->drop_frame_count++; + if (hevc->cur_pic->slice_type == I_SLICE) { + hevc->gvs->i_lost_frames++; + } else if (hevc->cur_pic->slice_type == P_SLICE) { + hevc->gvs->i_lost_frames++; + } else if (hevc->cur_pic->slice_type == B_SLICE) { + hevc->gvs->i_lost_frames++; + } + WRITE_VREG(HEVC_WAIT_FLAG, READ_VREG(HEVC_WAIT_FLAG) & (~0x2)); + hevc->skip_flag = 1; + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + /* Interrupt Amrisc to excute */ + WRITE_VREG(HEVC_MCPU_INTR_REQ, AMRISC_MAIN_REQ); + } + + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + + } else if (dec_status == HEVC_DECODE_OVER_SIZE) { + hevc_print(hevc, 0 , "hevc decode oversize !!\n"); +#ifdef MULTI_INSTANCE_SUPPORT + if (!hevc->m_ins_flag) + debug |= (H265_DEBUG_DIS_LOC_ERROR_PROC | + H265_DEBUG_DIS_SYS_ERROR_PROC); +#endif + hevc->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + } + return IRQ_HANDLED; +} + +static void wait_hevc_search_done(struct hevc_state_s *hevc) +{ + int count = 0; + WRITE_VREG(HEVC_SHIFT_STATUS, 0); + while (READ_VREG(HEVC_STREAM_CONTROL) & 0x2) { + msleep(20); + count++; + if (count > 100) { + hevc_print(hevc, 0, "%s timeout\n", __func__); + break; + } + } +} +static irqreturn_t vh265_isr(int irq, void *data) +{ + int i, temp; + unsigned int dec_status; + struct hevc_state_s *hevc = (struct hevc_state_s *)data; + u32 debug_tag; + dec_status = READ_VREG(HEVC_DEC_STATUS_REG); + + if (dec_status == HEVC_SLICE_SEGMENT_DONE) { + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_HEAD_DONE); + } + else if (dec_status == HEVC_DECPIC_DATA_DONE) { + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_PIC_DONE); + } + + if (hevc->init_flag == 0) + return IRQ_HANDLED; + + ATRACE_COUNTER("V_ST_DEC-decode_state", dec_status); + + hevc->dec_status = dec_status; + if (is_log_enable(hevc)) + add_log(hevc, + "isr: status = 0x%x dec info 0x%x lcu 0x%x shiftbyte 0x%x shiftstatus 0x%x", + dec_status, READ_HREG(HEVC_DECODE_INFO), + READ_VREG(HEVC_MPRED_CURR_LCU), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_STATUS)); + + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) + hevc_print(hevc, 0, + "265 isr dec status = 0x%x dec info 0x%x shiftbyte 0x%x shiftstatus 0x%x\n", + dec_status, READ_HREG(HEVC_DECODE_INFO), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_STATUS)); + + debug_tag = READ_HREG(DEBUG_REG1); + if (debug_tag & 0x10000) { + PR_INIT(128); + hevc_print(hevc, 0, + "LMEM<tag %x>:\n", READ_HREG(DEBUG_REG1)); + + if (hevc->mmu_enable) + temp = 0x500; + else + temp = 0x400; + for (i = 0; i < temp; i += 4) { + int ii; + if ((i & 0xf) == 0) + PR_FILL("%03x: ", i); + for (ii = 0; ii < 4; ii++) { + PR_FILL("%04x ", + hevc->lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + PR_INFO(hevc->index); + } + + PR_INFO(hevc->index); + + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == hevc->decode_idx) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hevc->ucode_pause_pos = udebug_pause_pos; + } + else if (debug_tag & 0x20000) + hevc->ucode_pause_pos = 0xffffffff; + if (hevc->ucode_pause_pos) + reset_process_time(hevc); + else + WRITE_HREG(DEBUG_REG1, 0); + } else if (debug_tag != 0) { + hevc_print(hevc, 0, + "dbg%x: %x l/w/r %x %x %x\n", READ_HREG(DEBUG_REG1), + READ_HREG(DEBUG_REG2), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == hevc->decode_idx) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hevc->ucode_pause_pos = udebug_pause_pos; + } + if (hevc->ucode_pause_pos) + reset_process_time(hevc); + else + WRITE_HREG(DEBUG_REG1, 0); + return IRQ_HANDLED; + } + + + if (hevc->pic_list_init_flag == 1) + return IRQ_HANDLED; + + if (!hevc->m_ins_flag) { + if (dec_status == HEVC_OVER_DECODE) { + hevc->over_decode = 1; + hevc_print(hevc, 0, + "isr: over decode\n"), + WRITE_VREG(HEVC_DEC_STATUS_REG, 0); + return IRQ_HANDLED; + } + } + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_ISR_END); + return IRQ_WAKE_THREAD; + +} + +static void vh265_set_clk(struct work_struct *work) +{ + struct hevc_state_s *hevc = container_of(work, + struct hevc_state_s, set_clk_work); + + int fps = 96000 / hevc->frame_dur; + + if (hevc_source_changed(VFORMAT_HEVC, + hevc->frame_width, hevc->frame_height, fps) > 0) + hevc->saved_resolution = hevc->frame_width * + hevc->frame_height * fps; +} + +static void vh265_check_timer_func(struct timer_list *timer) +{ + struct hevc_state_s *hevc = container_of(timer, + struct hevc_state_s, timer); + unsigned char empty_flag; + unsigned int buf_level; + + enum receviver_start_e state = RECEIVER_INACTIVE; + + if (hevc->init_flag == 0) { + if (hevc->stat & STAT_TIMER_ARM) { + mod_timer(&hevc->timer, jiffies + PUT_INTERVAL); + } + return; + } +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag && + (get_dbg_flag(hevc) & + H265_DEBUG_WAIT_DECODE_DONE_WHEN_STOP) == 0 && + hw_to_vdec(hevc)->next_status == + VDEC_STATUS_DISCONNECTED && + !hevc->is_used_v4l) { + hevc->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hevc->work); + hevc_print(hevc, + 0, "vdec requested to be disconnected\n"); + return; + } + + if (hevc->m_ins_flag) { + if (((get_dbg_flag(hevc) & + H265_DEBUG_DIS_LOC_ERROR_PROC) == 0) && + (decode_timeout_val > 0) && + (hevc->start_process_time > 0) && + ((1000 * (jiffies - hevc->start_process_time) / HZ) + > decode_timeout_val) + ) { + u32 dec_status = READ_VREG(HEVC_DEC_STATUS_REG); + int current_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START)&0xffffff; + if (dec_status == HEVC_CODED_SLICE_SEGMENT_DAT) { + if (hevc->last_lcu_idx == current_lcu_idx) { + if (hevc->decode_timeout_count > 0) + hevc->decode_timeout_count--; + if (hevc->decode_timeout_count == 0) + timeout_process(hevc); + } else + restart_process_time(hevc); + hevc->last_lcu_idx = current_lcu_idx; + } else { + hevc->pic_decoded_lcu_idx = current_lcu_idx; + timeout_process(hevc); + } + } + } else { +#endif + if (hevc->m_ins_flag == 0 && + vf_get_receiver(hevc->provider_name)) { + state = + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) + state = RECEIVER_INACTIVE; + } else + state = RECEIVER_INACTIVE; + + empty_flag = (READ_VREG(HEVC_PARSER_INT_STATUS) >> 6) & 0x1; + /* error watchdog */ + if (hevc->m_ins_flag == 0 && + (empty_flag == 0) + && (hevc->pic_list_init_flag == 0 + || hevc->pic_list_init_flag + == 3)) { + /* decoder has input */ + if ((get_dbg_flag(hevc) & + H265_DEBUG_DIS_LOC_ERROR_PROC) == 0) { + + buf_level = READ_VREG(HEVC_STREAM_LEVEL); + /* receiver has no buffer to recycle */ + if ((state == RECEIVER_INACTIVE) && + (kfifo_is_empty(&hevc->display_q) && + buf_level > 0x200) + ) { + if (hevc->error_flag == 0) { + hevc->error_watchdog_count++; + if (hevc->error_watchdog_count == + error_handle_threshold) { + hevc_print(hevc, 0, + "H265 dec err local reset.\n"); + hevc->error_flag = 1; + hevc->error_watchdog_count = 0; + hevc->error_skip_nal_wt_cnt = 0; + hevc-> + error_system_watchdog_count++; + WRITE_VREG + (HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } else if (hevc->error_flag == 2) { + int th = + error_handle_nal_skip_threshold; + hevc->error_skip_nal_wt_cnt++; + if (hevc->error_skip_nal_wt_cnt + == th) { + hevc->error_flag = 3; + hevc->error_watchdog_count = 0; + hevc-> + error_skip_nal_wt_cnt = 0; + WRITE_VREG + (HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } + } + } + + if ((get_dbg_flag(hevc) + & H265_DEBUG_DIS_SYS_ERROR_PROC) == 0) + /* receiver has no buffer to recycle */ + if ((state == RECEIVER_INACTIVE) && + (kfifo_is_empty(&hevc->display_q)) + ) { /* no buffer to recycle */ + if ((get_dbg_flag(hevc) & + H265_DEBUG_DIS_LOC_ERROR_PROC) != + 0) + hevc->error_system_watchdog_count++; + if (hevc->error_system_watchdog_count == + error_handle_system_threshold) { + /* and it lasts for a while */ + hevc_print(hevc, 0, + "H265 dec fatal error watchdog.\n"); + hevc-> + error_system_watchdog_count = 0; + hevc->fatal_error |= DECODER_FATAL_ERROR_UNKNOWN; + } + } + } else { + hevc->error_watchdog_count = 0; + hevc->error_system_watchdog_count = 0; + } +#ifdef MULTI_INSTANCE_SUPPORT + } +#endif + if ((hevc->ucode_pause_pos != 0) && + (hevc->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != hevc->ucode_pause_pos) { + hevc->ucode_pause_pos = 0; + WRITE_HREG(DEBUG_REG1, 0); + } + + if (get_dbg_flag(hevc) & H265_DEBUG_DUMP_PIC_LIST) { + dump_pic_list(hevc); + debug &= ~H265_DEBUG_DUMP_PIC_LIST; + } + if (get_dbg_flag(hevc) & H265_DEBUG_TRIG_SLICE_SEGMENT_PROC) { + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + debug &= ~H265_DEBUG_TRIG_SLICE_SEGMENT_PROC; + } +#ifdef TEST_NO_BUF + if (hevc->wait_buf) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); +#endif + if (get_dbg_flag(hevc) & H265_DEBUG_HW_RESET) { + hevc->error_skip_nal_count = error_skip_nal_count; + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + + debug &= ~H265_DEBUG_HW_RESET; + } + +#ifdef ERROR_HANDLE_DEBUG + if ((dbg_nal_skip_count > 0) && ((dbg_nal_skip_count & 0x10000) != 0)) { + hevc->error_skip_nal_count = dbg_nal_skip_count & 0xffff; + dbg_nal_skip_count &= ~0x10000; + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + } +#endif + + if (radr != 0) { +#ifdef SUPPORT_LONG_TERM_RPS + if ((radr >> 24) != 0) { + int count = radr >> 24; + int adr = radr & 0xffffff; + int i; + for (i = 0; i < count; i++) + pr_info("READ_VREG(%x)=%x\n", adr+i, READ_VREG(adr+i)); + } else +#endif + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + if (dbg_cmd != 0) { + if (dbg_cmd == 1) { + u32 disp_laddr; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB && + get_double_write_mode(hevc) == 0) { + disp_laddr = + READ_VCBUS_REG(AFBC_BODY_BADDR) << 4; + } else { + struct canvas_s cur_canvas; + + canvas_read((READ_VCBUS_REG(VD1_IF0_CANVAS0) + & 0xff), &cur_canvas); + disp_laddr = cur_canvas.addr; + } + hevc_print(hevc, 0, + "current displayed buffer address %x\r\n", + disp_laddr); + } + dbg_cmd = 0; + } + /*don't changed at start.*/ + if (hevc->m_ins_flag == 0 && + hevc->get_frame_dur && hevc->show_frame_num > 60 && + hevc->frame_dur > 0 && hevc->saved_resolution != + hevc->frame_width * hevc->frame_height * + (96000 / hevc->frame_dur)) + vdec_schedule_work(&hevc->set_clk_work); + + mod_timer(timer, jiffies + PUT_INTERVAL); +} + +static int h265_task_handle(void *data) +{ + int ret = 0; + struct hevc_state_s *hevc = (struct hevc_state_s *)data; + + set_user_nice(current, -10); + while (1) { + if (use_cma == 0) { + hevc_print(hevc, 0, + "ERROR: use_cma can not be changed dynamically\n"); + } + ret = down_interruptible(&h265_sema); + if ((hevc->init_flag != 0) && (hevc->pic_list_init_flag == 1)) { + init_pic_list(hevc); + init_pic_list_hw(hevc); + init_buf_spec(hevc); + hevc->pic_list_init_flag = 2; + hevc_print(hevc, 0, "set pic_list_init_flag to 2\n"); + + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + + } + + if (hevc->uninit_list) { + /*USE_BUF_BLOCK*/ + uninit_pic_list(hevc); + hevc_print(hevc, 0, "uninit list\n"); + hevc->uninit_list = 0; +#ifdef USE_UNINIT_SEMA + if (use_cma) { + up(&hevc->h265_uninit_done_sema); + while (!kthread_should_stop()) + msleep(1); + break; + } +#endif + } + } + + return 0; +} + +void vh265_free_cmabuf(void) +{ + struct hevc_state_s *hevc = gHevc; + + mutex_lock(&vh265_mutex); + + if (hevc->init_flag) { + mutex_unlock(&vh265_mutex); + return; + } + + mutex_unlock(&vh265_mutex); +} + +#ifdef MULTI_INSTANCE_SUPPORT +int vh265_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +#else +int vh265_dec_status(struct vdec_info *vstatus) +#endif +{ +#ifdef MULTI_INSTANCE_SUPPORT + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; +#else + struct hevc_state_s *hevc = gHevc; +#endif + if (!hevc) + return -1; + + vstatus->frame_width = hevc->pic_w; + /* for hevc interlace for disp height x2 */ + vstatus->frame_height = + (hevc->pic_h << hevc->interlace_flag); + if (hevc->frame_dur != 0) + vstatus->frame_rate = ((96000 * 10 / hevc->frame_dur) % 10) < 5 ? + 96000 / hevc->frame_dur : (96000 / hevc->frame_dur +1); + else + vstatus->frame_rate = -1; + vstatus->error_count = hevc->gvs->error_frame_count; + vstatus->status = hevc->stat | hevc->fatal_error; + vstatus->bit_rate = hevc->gvs->bit_rate; + vstatus->frame_dur = hevc->frame_dur; + if (hevc->gvs) { + vstatus->bit_rate = hevc->gvs->bit_rate; + vstatus->frame_data = hevc->gvs->frame_data; + vstatus->total_data = hevc->gvs->total_data; + vstatus->frame_count = hevc->gvs->frame_count; + vstatus->error_frame_count = hevc->gvs->error_frame_count; + vstatus->drop_frame_count = hevc->gvs->drop_frame_count; + vstatus->i_decoded_frames = hevc->gvs->i_decoded_frames; + vstatus->i_lost_frames = hevc->gvs->i_lost_frames; + vstatus->i_concealed_frames = hevc->gvs->i_concealed_frames; + vstatus->p_decoded_frames = hevc->gvs->p_decoded_frames; + vstatus->p_lost_frames = hevc->gvs->p_lost_frames; + vstatus->p_concealed_frames = hevc->gvs->p_concealed_frames; + vstatus->b_decoded_frames = hevc->gvs->b_decoded_frames; + vstatus->b_lost_frames = hevc->gvs->b_lost_frames; + vstatus->b_concealed_frames = hevc->gvs->b_concealed_frames; + vstatus->samp_cnt = hevc->gvs->samp_cnt; + vstatus->offset = hevc->gvs->offset; + } + + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + vstatus->ratio_control = hevc->ratio_control; + return 0; +} + +int vh265_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +static int vh265_vdec_info_init(struct hevc_state_s *hevc) +{ + hevc->gvs = kzalloc(sizeof(struct vdec_info), GFP_KERNEL); + //pr_err("[%s line %d] hevc->gvs=0x%p operation\n",__func__, __LINE__, hevc->gvs); + if (NULL == hevc->gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -ENOMEM; + } + vdec_set_vframe_comm(hw_to_vdec(hevc), DRIVER_NAME); + return 0; +} + +#if 0 +static void H265_DECODE_INIT(void) +{ + /* enable hevc clocks */ + WRITE_VREG(DOS_GCLK_EN3, 0xffffffff); + /* *************************************************************** */ + /* Power ON HEVC */ + /* *************************************************************** */ + /* Powerup HEVC */ + WRITE_VREG(P_AO_RTI_GEN_PWR_SLEEP0, + READ_VREG(P_AO_RTI_GEN_PWR_SLEEP0) & (~(0x3 << 6))); + WRITE_VREG(DOS_MEM_PD_HEVC, 0x0); + WRITE_VREG(DOS_SW_RESET3, READ_VREG(DOS_SW_RESET3) | (0x3ffff << 2)); + WRITE_VREG(DOS_SW_RESET3, READ_VREG(DOS_SW_RESET3) & (~(0x3ffff << 2))); + /* remove isolations */ + WRITE_VREG(AO_RTI_GEN_PWR_ISO0, + READ_VREG(AO_RTI_GEN_PWR_ISO0) & (~(0x3 << 10))); + +} +#endif + +int vh265_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; + hevc_print(hevc, 0, "[%s %d] trickmode:%lu\n", __func__, __LINE__, trickmode); + + if (trickmode == TRICKMODE_I) { + trickmode_i = 1; + i_only_flag = 0x1; + } else if (trickmode == TRICKMODE_NONE) { + trickmode_i = 0; + i_only_flag = 0x0; + } else if (trickmode == 0x02) { + trickmode_i = 0; + i_only_flag = 0x02; + } else if (trickmode == 0x03) { + trickmode_i = 1; + i_only_flag = 0x03; + } else if (trickmode == 0x07) { + trickmode_i = 1; + i_only_flag = 0x07; + } + //hevc_print(hevc, 0, "i_only_flag: %d trickmode_i:%d\n", i_only_flag, trickmode_i); + + return 0; +} + +static void config_decode_mode(struct hevc_state_s *hevc) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + struct vdec_s *vdec = hw_to_vdec(hevc); +#endif + unsigned decode_mode; +#ifdef HEVC_8K_LFTOFFSET_FIX + if (hevc->performance_profile) + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | (1 << 21)); +#endif + if (!hevc->m_ins_flag) + decode_mode = DECODE_MODE_SINGLE; + else if (vdec_frame_based(hw_to_vdec(hevc))) + decode_mode = + DECODE_MODE_MULTI_FRAMEBASE; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (vdec->slave) { + if (force_bypass_dvenl & 0x80000000) + hevc->bypass_dvenl = force_bypass_dvenl & 0x1; + else + hevc->bypass_dvenl = hevc->bypass_dvenl_enable; + if (dolby_meta_with_el && hevc->bypass_dvenl) { + hevc->bypass_dvenl = 0; + hevc_print(hevc, 0, + "NOT support bypass_dvenl when meta_with_el\n"); + } + if (hevc->bypass_dvenl) + decode_mode = + (hevc->start_parser_type << 8) + | DECODE_MODE_MULTI_STREAMBASE; + else + decode_mode = + (hevc->start_parser_type << 8) + | DECODE_MODE_MULTI_DVBAL; + } else if (vdec->master) + decode_mode = + (hevc->start_parser_type << 8) + | DECODE_MODE_MULTI_DVENL; +#endif + else + decode_mode = + DECODE_MODE_MULTI_STREAMBASE; + + if (hevc->m_ins_flag) + decode_mode |= + (hevc->start_decoding_flag << 16); + /* set MBX0 interrupt flag */ + decode_mode |= (0x80 << 24); + WRITE_VREG(HEVC_DECODE_MODE, decode_mode); + WRITE_VREG(HEVC_DECODE_MODE2, + hevc->rps_set_id); +} + +static void vh265_prot_init(struct hevc_state_s *hevc) +{ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + struct vdec_s *vdec = hw_to_vdec(hevc); +#endif + /* H265_DECODE_INIT(); */ + + hevc_config_work_space_hw(hevc); + + hevc_init_decoder_hw(hevc, 0, 0xffffffff); + + WRITE_VREG(HEVC_WAIT_FLAG, 1); + + /* WRITE_VREG(P_HEVC_MPSR, 1); */ + + /* clear mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 1); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(HEVC_PSCALE_CTRL, 0); + + WRITE_VREG(DEBUG_REG1, 0x0 | (dump_nal << 8)); + + if ((get_dbg_flag(hevc) & + (H265_DEBUG_MAN_SKIP_NAL | + H265_DEBUG_MAN_SEARCH_NAL)) + /*||hevc->m_ins_flag*/ + ) { + WRITE_VREG(NAL_SEARCH_CTL, 0x1); /* manual parser NAL */ + } else { + /* check vps/sps/pps/i-slice in ucode */ + unsigned ctl_val = 0x8; + if (hevc->PB_skip_mode == 0) + ctl_val = 0x4; /* check vps/sps/pps only in ucode */ + else if (hevc->PB_skip_mode == 3) + ctl_val = 0x0; /* check vps/sps/pps/idr in ucode */ + /*if (((error_handle_policy & 0x200) == 0) && + input_stream_based(vdec)) + ctl_val = 0x1;*/ + WRITE_VREG(NAL_SEARCH_CTL, ctl_val); + } + if ((get_dbg_flag(hevc) & H265_DEBUG_NO_EOS_SEARCH_DONE) +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + || vdec->master + || vdec->slave +#endif + ) + WRITE_VREG(NAL_SEARCH_CTL, READ_VREG(NAL_SEARCH_CTL) | 0x10000); + + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) + | ((parser_sei_enable & 0x7) << 17)); +/*#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION*/ + WRITE_VREG(NAL_SEARCH_CTL, + READ_VREG(NAL_SEARCH_CTL) | + ((parser_dolby_vision_enable & 0x1) << 20)); +/*#endif*/ + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + + config_decode_mode(hevc); + config_aux_buf(hevc); +#ifdef SWAP_HEVC_UCODE + if (!tee_enabled() && hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, hevc->mc_dma_handle); + /*pr_info("write swap buffer %x\n", (u32)(hevc->mc_dma_handle));*/ + } +#endif +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + WRITE_VREG(HEVC_SAO_DBG_MODE0, 0); + WRITE_VREG(HEVC_SAO_DBG_MODE1, 0); + } +#endif +} + +static int vh265_local_init(struct hevc_state_s *hevc) +{ + int i; + int ret = -1; + struct vdec_s *vdec = hw_to_vdec(hevc); + +#ifdef DEBUG_PTS + hevc->pts_missed = 0; + hevc->pts_hit = 0; +#endif + hevc->saved_resolution = 0; + hevc->get_frame_dur = false; + hevc->frame_width = hevc->vh265_amstream_dec_info.width; + hevc->frame_height = hevc->vh265_amstream_dec_info.height; + if (is_oversize(hevc->frame_width, hevc->frame_height)) { + pr_info("over size : %u x %u.\n", + hevc->frame_width, hevc->frame_height); + hevc->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + return ret; + } + + if (hevc->max_pic_w && hevc->max_pic_h) { + hevc->is_4k = !(hevc->max_pic_w && hevc->max_pic_h) || + ((hevc->max_pic_w * hevc->max_pic_h) > + 1920 * 1088) ? true : false; + } else { + hevc->is_4k = !(hevc->frame_width && hevc->frame_height) || + ((hevc->frame_width * hevc->frame_height) > + 1920 * 1088) ? true : false; + } + + hevc->frame_dur = + (hevc->vh265_amstream_dec_info.rate == + 0) ? 3600 : hevc->vh265_amstream_dec_info.rate; + //hevc->gvs->frame_dur = hevc->frame_dur; + if (hevc->frame_width && hevc->frame_height) + hevc->frame_ar = hevc->frame_height * 0x100 / hevc->frame_width; + + if (i_only_flag) + hevc->i_only = i_only_flag & 0xff; + else if ((unsigned long) hevc->vh265_amstream_dec_info.param + & 0x08) + hevc->i_only = 0x7; + else + hevc->i_only = 0x0; + hevc->error_watchdog_count = 0; + hevc->sei_present_flag = 0; + if (vdec->sys_info) + pts_unstable = ((unsigned long)vdec->sys_info->param + & 0x40) >> 6; + hevc_print(hevc, 0, + "h265:pts_unstable=%d\n", pts_unstable); +/* + *TODO:FOR VERSION + */ + hevc_print(hevc, 0, + "h265: ver (%d,%d) decinfo: %dx%d rate=%d\n", h265_version, + 0, hevc->frame_width, hevc->frame_height, hevc->frame_dur); + + if (hevc->frame_dur == 0) + hevc->frame_dur = 96000 / 24; + + INIT_KFIFO(hevc->display_q); + INIT_KFIFO(hevc->newframe_q); + INIT_KFIFO(hevc->pending_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hevc->vfpool[i]; + + hevc->vfpool[i].index = -1; + kfifo_put(&hevc->newframe_q, vf); + } + + if (!hevc->resolution_change) + ret = hevc_local_init(hevc); + else + ret = 0; + + return ret; +} +#ifdef MULTI_INSTANCE_SUPPORT +static s32 vh265_init(struct vdec_s *vdec) +{ + struct hevc_state_s *hevc = (struct hevc_state_s *)vdec->private; +#else +static s32 vh265_init(struct hevc_state_s *hevc) +{ + +#endif + int ret, size = -1; + int fw_size = 0x1000 * 16; + struct firmware_s *fw = NULL; + + timer_setup(&hevc->timer, vh265_check_timer_func, 0); + + hevc->stat |= STAT_TIMER_INIT; + + if (hevc->m_ins_flag) { +#ifdef USE_UNINIT_SEMA + sema_init(&hevc->h265_uninit_done_sema, 0); +#endif + INIT_WORK(&hevc->work, vh265_work); + INIT_WORK(&hevc->timeout_work, vh265_timeout_work); + } + + if (vh265_local_init(hevc) < 0) + return -EBUSY; + + mutex_init(&hevc->chunks_mutex); + INIT_WORK(&hevc->notify_work, vh265_notify_work); + INIT_WORK(&hevc->set_clk_work, vh265_set_clk); + + fw = vzalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + if (hevc->mmu_enable) + if (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_GXM) + size = get_firmware_data(VIDEO_DEC_HEVC_MMU, fw->data); + else { + if (!hevc->is_4k) { + /* if an older version of the fw was loaded, */ + /* needs try to load noswap fw because the */ + /* old fw package dose not contain the swap fw.*/ + size = get_firmware_data( + VIDEO_DEC_HEVC_MMU_SWAP, fw->data); + if (size < 0) + size = get_firmware_data( + VIDEO_DEC_HEVC_MMU, fw->data); + else if (size) + hevc->is_swap = true; + } else + size = get_firmware_data(VIDEO_DEC_HEVC_MMU, + fw->data); + } + else + size = get_firmware_data(VIDEO_DEC_HEVC, fw->data); + + if (size < 0) { + pr_err("get firmware fail.\n"); + vfree(fw); + return -1; + } + + fw->len = size; + +#ifdef SWAP_HEVC_UCODE + if (!tee_enabled() && hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + if (hevc->mmu_enable) { + hevc->swap_size = (4 * (4 * SZ_1K)); /*max 4 swap code, each 0x400*/ + hevc->mc_cpu_addr = + dma_alloc_coherent(amports_get_dma_device(), + hevc->swap_size, + &hevc->mc_dma_handle, GFP_KERNEL); + if (!hevc->mc_cpu_addr) { + amhevc_disable(); + pr_info("vh265 mmu swap ucode loaded fail.\n"); + return -ENOMEM; + } + + memcpy((u8 *) hevc->mc_cpu_addr, fw->data + SWAP_HEVC_OFFSET, + hevc->swap_size); + + hevc_print(hevc, 0, + "vh265 mmu ucode swap loaded %x\n", + hevc->mc_dma_handle); + } + } +#endif + +#ifdef MULTI_INSTANCE_SUPPORT + if (hevc->m_ins_flag) { + //hevc->timer.data = (ulong) hevc; + //hevc->timer.function = vh265_check_timer_func; + hevc->timer.expires = jiffies + PUT_INTERVAL; + + hevc->fw = fw; + hevc->init_flag = 1; + + return 0; + } +#endif + amhevc_enable(); + + if (hevc->mmu_enable) + if (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_GXM) + ret = amhevc_loadmc_ex(VFORMAT_HEVC, "h265_mmu", fw->data); + else { + if (!hevc->is_4k) { + /* if an older version of the fw was loaded, */ + /* needs try to load noswap fw because the */ + /* old fw package dose not contain the swap fw. */ + ret = amhevc_loadmc_ex(VFORMAT_HEVC, + "hevc_mmu_swap", fw->data); + if (ret < 0) + ret = amhevc_loadmc_ex(VFORMAT_HEVC, + "h265_mmu", fw->data); + else + hevc->is_swap = true; + } else + ret = amhevc_loadmc_ex(VFORMAT_HEVC, + "h265_mmu", fw->data); + } + else + ret = amhevc_loadmc_ex(VFORMAT_HEVC, NULL, fw->data); + + if (ret < 0) { + amhevc_disable(); + vfree(fw); + pr_err("H265: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(fw); + + hevc->stat |= STAT_MC_LOAD; + +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) + init_detrefill_buf(hevc); +#endif + /* enable AMRISC side protocol */ + vh265_prot_init(hevc); + + if (vdec_request_threaded_irq(VDEC_IRQ_0, vh265_isr, + vh265_isr_thread_fn, + IRQF_ONESHOT,/*run thread on this irq disabled*/ + "vh265-irq", (void *)hevc)) { + hevc_print(hevc, 0, "vh265 irq register error.\n"); + amhevc_disable(); + return -ENOENT; + } + + hevc->stat |= STAT_ISR_REG; + hevc->provider_name = PROVIDER_NAME; + +#ifdef MULTI_INSTANCE_SUPPORT + if (!hevc->is_used_v4l) { + vf_provider_init(&vh265_vf_prov, hevc->provider_name, + &vh265_vf_provider, vdec); + vf_reg_provider(&vh265_vf_prov); + vf_notify_receiver(hevc->provider_name, VFRAME_EVENT_PROVIDER_START, + NULL); + if (hevc->frame_dur != 0) { + if (!is_reset) { + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)hevc->frame_dur)); + fr_hint_status = VDEC_HINTED; + } + } else + fr_hint_status = VDEC_NEED_HINT; + } +#else + vf_provider_init(&vh265_vf_prov, PROVIDER_NAME, &vh265_vf_provider, + hevc); + vf_reg_provider(&vh265_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); + if (hevc->frame_dur != 0) { + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)hevc->frame_dur)); + fr_hint_status = VDEC_HINTED; + } else + fr_hint_status = VDEC_NEED_HINT; +#endif + hevc->stat |= STAT_VF_HOOK; + + //hevc->timer.data = (ulong) hevc; + //hevc->timer.function = vh265_check_timer_func; + hevc->timer.expires = jiffies + PUT_INTERVAL; + + add_timer(&hevc->timer); + + hevc->stat |= STAT_TIMER_ARM; + + if (use_cma) { +#ifdef USE_UNINIT_SEMA + sema_init(&hevc->h265_uninit_done_sema, 0); +#endif + if (h265_task == NULL) { + sema_init(&h265_sema, 1); + h265_task = + kthread_run(h265_task_handle, hevc, + "kthread_h265"); + } + } + /* hevc->stat |= STAT_KTHREAD; */ +#if 0 + if (get_dbg_flag(hevc) & H265_DEBUG_FORCE_CLK) { + hevc_print(hevc, 0, "%s force clk\n", __func__); + WRITE_VREG(HEVC_IQIT_CLK_RST_CTRL, + READ_VREG(HEVC_IQIT_CLK_RST_CTRL) | + ((1 << 2) | (1 << 1))); + WRITE_VREG(HEVC_DBLK_CFG0, + READ_VREG(HEVC_DBLK_CFG0) | ((1 << 2) | + (1 << 1) | 0x3fff0000));/* 2,29:16 */ + WRITE_VREG(HEVC_SAO_CTRL1, READ_VREG(HEVC_SAO_CTRL1) | + (1 << 2)); /* 2 */ + WRITE_VREG(HEVC_MPRED_CTRL1, READ_VREG(HEVC_MPRED_CTRL1) | + (1 << 24)); /* 24 */ + WRITE_VREG(HEVC_STREAM_CONTROL, + READ_VREG(HEVC_STREAM_CONTROL) | + (1 << 15)); /* 15 */ + WRITE_VREG(HEVC_CABAC_CONTROL, READ_VREG(HEVC_CABAC_CONTROL) | + (1 << 13)); /* 13 */ + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, + READ_VREG(HEVC_PARSER_CORE_CONTROL) | + (1 << 15)); /* 15 */ + WRITE_VREG(HEVC_PARSER_INT_CONTROL, + READ_VREG(HEVC_PARSER_INT_CONTROL) | + (1 << 15)); /* 15 */ + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + READ_VREG(HEVC_PARSER_IF_CONTROL) | ((1 << 6) | + (1 << 3) | (1 << 1))); /* 6, 3, 1 */ + WRITE_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG, 0xffffffff); /* 31:0 */ + WRITE_VREG(HEVCD_MCRCC_CTL1, READ_VREG(HEVCD_MCRCC_CTL1) | + (1 << 3)); /* 3 */ + } +#endif +#ifdef SWAP_HEVC_UCODE + if (!tee_enabled() && hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, hevc->mc_dma_handle); + /*pr_info("write swap buffer %x\n", (u32)(hevc->mc_dma_handle));*/ + } +#endif + +#ifndef MULTI_INSTANCE_SUPPORT + set_vdec_func(&vh265_dec_status); +#endif + amhevc_start(); + + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, 0); + + hevc->stat |= STAT_VDEC_RUN; + hevc->init_flag = 1; + error_handle_threshold = 30; + /* pr_info("%d, vh265_init, RP=0x%x\n", + * __LINE__, READ_VREG(HEVC_STREAM_RD_PTR)); + */ + + return 0; +} + +static int check_dirty_data(struct vdec_s *vdec) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)(vdec->private); + struct vdec_input_s *input = &vdec->input; + u32 wp, rp, level; + u32 rp_set; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = hevc->pre_parser_wr_ptr; + + if (wp > rp) + level = wp - rp; + else + level = wp + vdec->input.size - rp; + + if (level > 0x100000) { + u32 skip_size = ((level >> 1) >> 19) << 19; + if (!vdec->input.swap_valid) { + hevc_print(hevc , 0, "h265 start data discard level 0x%x, buffer level 0x%x, RP 0x%x, WP 0x%x\n", + ((level >> 1) >> 19) << 19, level, rp, wp); + if (wp >= rp) { + rp_set = rp + skip_size; + } + else if ((rp + skip_size) < (input->start + input->size)) { + rp_set = rp + skip_size; + } else { + rp_set = rp + skip_size - input->size; + } + STBUF_WRITE(&vdec->vbuf, set_rp, rp_set); + vdec->discard_start_data_flag = 1; + vdec->input.stream_cookie += skip_size; + hevc->dirty_shift_flag = 1; + } + return 1; + } + return 0; +} + +#ifdef MULTI_INSTANCE_SUPPORT +static void reset_process_time(struct hevc_state_s *hevc) +{ + if (hevc->start_process_time) { + unsigned int process_time = + 1000 * (jiffies - hevc->start_process_time) / HZ; + hevc->start_process_time = 0; + if (process_time > max_process_time[hevc->index]) + max_process_time[hevc->index] = process_time; + } +} + +static void start_process_time(struct hevc_state_s *hevc) +{ + hevc->start_process_time = jiffies; + hevc->decode_timeout_count = 2; + hevc->last_lcu_idx = 0; +} + +static void restart_process_time(struct hevc_state_s *hevc) +{ + hevc->start_process_time = jiffies; + hevc->decode_timeout_count = 2; +} + +static void timeout_process(struct hevc_state_s *hevc) +{ + /* + * In this very timeout point,the vh265_work arrives, + * or in some cases the system become slow, then come + * this second timeout. In both cases we return. + */ + if (work_pending(&hevc->work) || + work_busy(&hevc->work) || + work_busy(&hevc->timeout_work) || + work_pending(&hevc->timeout_work)) { + pr_err("%s h265[%d] work pending, do nothing.\n",__func__, hevc->index); + return; + } + + hevc->timeout_num++; + amhevc_stop(); + read_decode_info(hevc); + + hevc_print(hevc, + 0, "%s decoder timeout\n", __func__); + check_pic_decoded_error(hevc, + hevc->pic_decoded_lcu_idx); + /*The current decoded frame is marked + error when the decode timeout*/ + if (hevc->cur_pic != NULL) + hevc->cur_pic->error_mark = 1; + hevc->decoded_poc = hevc->curr_POC; + hevc->decoding_pic = NULL; + hevc->dec_result = DEC_RESULT_DONE; + reset_process_time(hevc); + + if (work_pending(&hevc->work)) + return; + vdec_schedule_work(&hevc->timeout_work); +} + +#ifdef CONSTRAIN_MAX_BUF_NUM +static int get_vf_ref_only_buf_count(struct hevc_state_s *hevc) +{ + struct PIC_s *pic; + int i; + int count = 0; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if (pic->output_mark == 0 && pic->referenced == 0 + && pic->output_ready == 1) + count++; + } + + return count; +} + +static int get_used_buf_count(struct hevc_state_s *hevc) +{ + struct PIC_s *pic; + int i; + int count = 0; + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if (pic->output_mark != 0 || pic->referenced != 0 + || pic->output_ready != 0) + count++; + } + + return count; +} +#endif + +static bool is_avaliable_buffer(struct hevc_state_s *hevc) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + struct PIC_s *pic = NULL; + int i, free_count = 0; + int used_count = 0; + + if ((hevc->used_buf_num == 0) || + (ctx->cap_pool.dec < hevc->used_buf_num)) { + if (ctx->fb_ops.query(&ctx->fb_ops, &hevc->fb_token)) { + free_count = + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) + 1; + } + } + + for (i = 0; i < hevc->used_buf_num; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || + pic->index == -1 || + pic->BUF_index == -1) + continue; + + if ((pic->output_mark == 0) && + (pic->referenced == 0) && + (pic->output_ready == 0) && + (pic->vf_ref == 0) && + pic->cma_alloc_addr) { + free_count++; + } else if (pic->cma_alloc_addr) + used_count++; + } + + ATRACE_COUNTER("V_ST_DEC-free_buff_count", free_count); + ATRACE_COUNTER("V_ST_DEC-used_buff_count", used_count); + + return free_count >= run_ready_min_buf_num ? 1 : 0; +} + +static unsigned char is_new_pic_available(struct hevc_state_s *hevc) +{ + struct PIC_s *new_pic = NULL; + struct PIC_s *pic; + /* recycle un-used pic */ + int i; + int ref_pic = 0; + struct vdec_s *vdec = hw_to_vdec(hevc); + unsigned long flags; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + /*return 1 if pic_list is not initialized yet*/ + if (hevc->pic_list_init_flag != 3) + return 1; + + spin_lock_irqsave(&lock, flags); + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1 || pic->BUF_index == -1) + continue; + if (pic->referenced == 1) + ref_pic++; + if (pic->output_mark == 0 && pic->referenced == 0 + && pic->output_ready == 0 + && pic->vf_ref == 0 + ) { + if (new_pic) { + if (pic->POC < new_pic->POC) + new_pic = pic; + } else + new_pic = pic; + } + } + if (!hevc->is_used_v4l && new_pic == NULL) { + enum receviver_start_e state = RECEIVER_INACTIVE; + if (vf_get_receiver(vdec->vf_provider_name)) { + state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) + state = RECEIVER_INACTIVE; + } + if (state == RECEIVER_INACTIVE) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + int poc = INVALID_POC; + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if ((pic->referenced == 0) && + (pic->error_mark == 1) && + (pic->output_mark == 1)) { + if (poc == INVALID_POC || (pic->POC < poc)) { + new_pic = pic; + poc = pic->POC; + } + } + } + if (new_pic) { + new_pic->referenced = 0; + new_pic->output_mark = 0; + put_mv_buf(hevc, new_pic); + hevc_print(hevc, 0, "force release error pic %d recieve_state %d \n", new_pic->POC, state); + } else { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if ((pic->referenced == 1) && (pic->error_mark == 1)) { + spin_unlock_irqrestore(&lock, flags); + flush_output(hevc, pic); + hevc_print(hevc, 0, "DPB error, neeed fornce flush recieve_state %d \n", state); + return 0; + } + } + } + } + } + + if ((new_pic == NULL) || + (hevc->is_used_v4l && + (ctx->param_sets_from_ucode) && + (hevc->v4l_params_parsed) && + (ctx->cap_pool.dec < hevc->used_buf_num) && !is_avaliable_buffer(hevc))) { + int decode_count = 0; + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1 || pic->BUF_index == -1) + continue; + if ((pic->output_ready == 0) && (pic->output_mark != 0 || + pic->referenced != 0 || + pic->vf_ref != 0)) + decode_count++; + } + + if (decode_count >= + hevc->param.p.sps_max_dec_pic_buffering_minus1_0 + detect_stuck_buffer_margin) { + if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR_MORE) + dump_pic_list(hevc); + if (!(error_handle_policy & 0x400)) { + spin_unlock_irqrestore(&lock, flags); + flush_output(hevc, NULL); + hevc_print(hevc, H265_DEBUG_BUFMGR, "flush dpb, ref_error_count %d, sps_max_dec_pic_buffering_minus1_0 %d\n", + decode_count, hevc->param.p.sps_max_dec_pic_buffering_minus1_0); + return 0; + } + } + } + + spin_unlock_irqrestore(&lock, flags); + + return (new_pic != NULL) ? 1 : 0; +} + +static void check_buffer_status(struct hevc_state_s *hevc) +{ + int i; + struct PIC_s *new_pic = NULL; + struct PIC_s *pic; + struct vdec_s *vdec = hw_to_vdec(hevc); + + enum receviver_start_e state = RECEIVER_INACTIVE; + + if (hevc->is_used_v4l) + return; + + if (vf_get_receiver(vdec->vf_provider_name)) { + state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) + state = RECEIVER_INACTIVE; + } + if (hevc->timeout_flag == false) + hevc->timeout = jiffies + HZ / 2; + + if (state == RECEIVER_INACTIVE) + hevc->timeout_flag = true; + else + hevc->timeout_flag = false; + + if (state == RECEIVER_INACTIVE && hevc->timeout_flag && + time_after(jiffies, hevc->timeout)) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + int poc = INVALID_POC; + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if ((pic->referenced == 0) && + (pic->error_mark == 1) && + (pic->output_mark == 1)) { + if (poc == INVALID_POC || (pic->POC < poc)) { + new_pic = pic; + poc = pic->POC; + } + } + } + if (new_pic) { + new_pic->referenced = 0; + new_pic->output_mark = 0; + put_mv_buf(hevc, new_pic); + hevc_print(hevc, 0, "check_buffer_status force release error pic %d recieve_state %d \n", new_pic->POC, state); + } else { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + pic = hevc->m_PIC[i]; + if (pic == NULL || pic->index == -1) + continue; + if ((pic->referenced == 1) && (pic->error_mark == 1)) { + flush_output(hevc, pic); + hevc_print(hevc, 0, "check_buffer_status DPB error, neeed fornce flush recieve_state %d \n", state); + break; + } + } + } + } +} + + +static int vmh265_stop(struct hevc_state_s *hevc) +{ + if (hevc->stat & STAT_TIMER_ARM) { + del_timer_sync(&hevc->timer); + hevc->stat &= ~STAT_TIMER_ARM; + } + if (hevc->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hevc->stat &= ~STAT_VDEC_RUN; + } + if (hevc->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_0, (void *)hevc); + hevc->stat &= ~STAT_ISR_REG; + } + + if (!hevc->is_used_v4l && hevc->stat & STAT_VF_HOOK) { + if (fr_hint_status == VDEC_HINTED) + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + fr_hint_status = VDEC_NO_NEED_HINT; + vf_unreg_provider(&vh265_vf_prov); + hevc->stat &= ~STAT_VF_HOOK; + } + + hevc_local_uninit(hevc); + + if (hevc->gvs) + kfree(hevc->gvs); + hevc->gvs = NULL; + + if (use_cma) { + hevc->uninit_list = 1; + reset_process_time(hevc); + hevc->dec_result = DEC_RESULT_FREE_CANVAS; + vdec_schedule_work(&hevc->work); + flush_work(&hevc->work); +#ifdef USE_UNINIT_SEMA + if (hevc->init_flag) { + down(&hevc->h265_uninit_done_sema); + } +#else + while (hevc->uninit_list) /* wait uninit complete */ + msleep(20); +#endif + } + hevc->init_flag = 0; + hevc->first_sc_checked = 0; + cancel_work_sync(&hevc->notify_work); + cancel_work_sync(&hevc->set_clk_work); + cancel_work_sync(&hevc->timeout_work); + cancel_work_sync(&hevc->work); + uninit_mmu_buffers(hevc); + + vfree(hevc->fw); + hevc->fw = NULL; + + dump_log(hevc); + return 0; +} + +static unsigned char get_data_check_sum + (struct hevc_state_s *hevc, int size) +{ + int sum = 0; + u8 *data = NULL; + + if (!hevc->chunk->block->is_mapped) + data = codec_mm_vmap(hevc->chunk->block->start + + hevc->chunk->offset, size); + else + data = ((u8 *)hevc->chunk->block->start_virt) + + hevc->chunk->offset; + + sum = crc32_le(0, data, size); + + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, size, sum, + data[0], data[1], data[2], data[3], + data[4], data[5], data[size - 4], + data[size - 3], data[size - 2], + data[size - 1]); + + if (!hevc->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void vh265_notify_work(struct work_struct *work) +{ + struct hevc_state_s *hevc = + container_of(work, + struct hevc_state_s, + notify_work); + struct vdec_s *vdec = hw_to_vdec(hevc); + + if (hevc->is_used_v4l) + return; + +#ifdef MULTI_INSTANCE_SUPPORT + if (vdec->fr_hint_state == VDEC_NEED_HINT) { + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)hevc->frame_dur)); + vdec->fr_hint_state = VDEC_HINTED; + } else if (fr_hint_status == VDEC_NEED_HINT) { + vf_notify_receiver(hevc->provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)hevc->frame_dur)); + fr_hint_status = VDEC_HINTED; + } +#else + if (fr_hint_status == VDEC_NEED_HINT) + vf_notify_receiver(PROVIDER_NAME, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)hevc->frame_dur)); + fr_hint_status = VDEC_HINTED; + } +#endif + + return; +} + +static void vh265_work_implement(struct hevc_state_s *hevc, + struct vdec_s *vdec,int from) +{ + ATRACE_COUNTER("V_ST_DEC-work_state", hevc->dec_result); + + if (hevc->dec_result == DEC_RESULT_AGAIN) + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_WORKER_AGAIN); + if (hevc->dec_result != DEC_RESULT_NONE) + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_WORKER_START); + if (hevc->dec_result == DEC_RESULT_FREE_CANVAS) { + /*USE_BUF_BLOCK*/ + uninit_pic_list(hevc); + hevc->uninit_list = 0; +#ifdef USE_UNINIT_SEMA + up(&hevc->h265_uninit_done_sema); +#endif + return; + } + + /* finished decoding one frame or error, + * notify vdec core to switch context + */ + if (hevc->pic_list_init_flag == 1 + && (hevc->dec_result != DEC_RESULT_FORCE_EXIT)) { + hevc->pic_list_init_flag = 2; + init_pic_list(hevc); + init_pic_list_hw(hevc); + init_buf_spec(hevc); + hevc_print(hevc, 0, + "set pic_list_init_flag to 2\n"); + + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + return; + } + + hevc_print(hevc, PRINT_FLAG_VDEC_DETAIL, + "%s dec_result %d %x %x %x\n", + __func__, + hevc->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + + if (((hevc->dec_result == DEC_RESULT_GET_DATA) || + (hevc->dec_result == DEC_RESULT_GET_DATA_RETRY)) + && (hw_to_vdec(hevc)->next_status != + VDEC_STATUS_DISCONNECTED)) { + if (!vdec_has_more_input(vdec)) { + hevc->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hevc->work); + return; + } + if (!input_frame_based(vdec)) { + int r = vdec_sync_input(vdec); + if (r >= 0x200) { + WRITE_VREG(HEVC_DECODE_SIZE, + READ_VREG(HEVC_DECODE_SIZE) + r); + + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA %x %x %x mpc %x size 0x%x\n", + __func__, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_MPC_E), r); + + start_process_time(hevc); + if (READ_VREG(HEVC_DEC_STATUS_REG) + == HEVC_DECODE_BUFEMPTY2) + WRITE_VREG(HEVC_DEC_STATUS_REG, + HEVC_ACTION_DONE); + else + WRITE_VREG(HEVC_DEC_STATUS_REG, + HEVC_ACTION_DEC_CONT); + } else { + hevc->dec_result = DEC_RESULT_GET_DATA_RETRY; + vdec_schedule_work(&hevc->work); + } + return; + } + + /*below for frame_base*/ + if (hevc->dec_result == DEC_RESULT_GET_DATA) { + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA %x %x %x mpc %x\n", + __func__, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_MPC_E)); + mutex_lock(&hevc->chunks_mutex); + vdec_vframe_dirty(vdec, hevc->chunk); + hevc->chunk = NULL; + mutex_unlock(&hevc->chunks_mutex); + vdec_clean_input(vdec); + } + + /*if (is_new_pic_available(hevc)) {*/ + if (run_ready(vdec, VDEC_HEVC)) { + int r; + int decode_size; + + r = vdec_prepare_input(vdec, &hevc->chunk); + if (r < 0) { + hevc->dec_result = DEC_RESULT_GET_DATA_RETRY; + + hevc_print(hevc, + PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&hevc->work); + return; + } + hevc->dec_result = DEC_RESULT_NONE; + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: chunk size 0x%x sum 0x%x mpc %x\n", + __func__, r, + (get_dbg_flag(hevc) & PRINT_FLAG_VDEC_STATUS) ? + get_data_check_sum(hevc, r) : 0, + READ_VREG(HEVC_MPC_E)); + + if (get_dbg_flag(hevc) & PRINT_FRAMEBASE_DATA) { + int jj; + u8 *data = NULL; + PR_INIT(128); + + if (!hevc->chunk->block->is_mapped) + data = codec_mm_vmap( + hevc->chunk->block->start + + hevc->chunk->offset, r); + else + data = ((u8 *) + hevc->chunk->block->start_virt) + + hevc->chunk->offset; + + for (jj = 0; jj < r; jj++) { + if ((jj & 0xf) == 0) + PR_FILL("%06x:", jj); + PR_FILL("%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + PR_INFO(hevc->index); + } + PR_INFO(hevc->index); + + if (!hevc->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + + decode_size = hevc->chunk->size + + (hevc->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + WRITE_VREG(HEVC_DECODE_SIZE, + READ_VREG(HEVC_DECODE_SIZE) + decode_size); + + vdec_enable_input(vdec); + + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: mpc %x\n", + __func__, READ_VREG(HEVC_MPC_E)); + + start_process_time(hevc); + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + } else{ + hevc->dec_result = DEC_RESULT_GET_DATA_RETRY; + + /*hevc_print(hevc, PRINT_FLAG_VDEC_DETAIL, + * "amvdec_vh265: Insufficient data\n"); + */ + + vdec_schedule_work(&hevc->work); + } + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_WORKER_END); + return; + } else if (hevc->dec_result == DEC_RESULT_DONE) { + /* if (!hevc->ctx_valid) + hevc->ctx_valid = 1; */ + int i; + decode_frame_count[hevc->index]++; +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) { + if (hevc->delrefill_check == 2) { + delrefill(hevc); + amhevc_stop(); + } + } +#endif + if (hevc->mmu_enable && ((hevc->double_write_mode & 0x10) == 0)) { + hevc->used_4k_num = + READ_VREG(HEVC_SAO_MMU_STATUS) >> 16; + if (hevc->used_4k_num >= 0 && + hevc->cur_pic && + hevc->cur_pic->scatter_alloc + == 1) + recycle_mmu_buf_tail(hevc, hevc->m_ins_flag); + } + hevc->pic_decoded_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + + if (vdec->master == NULL && vdec->slave == NULL && + hevc->empty_flag == 0) { + hevc->over_decode = + (READ_VREG(HEVC_SHIFT_STATUS) >> 15) & 0x1; + if (hevc->over_decode) + hevc_print(hevc, 0, + "!!!Over decode\n"); + } + + if (is_log_enable(hevc)) + add_log(hevc, + "%s dec_result %d lcu %d used_mmu %d shiftbyte 0x%x decbytes 0x%x", + __func__, + hevc->dec_result, + hevc->pic_decoded_lcu_idx, + hevc->used_4k_num, + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_BYTE_COUNT) - + hevc->start_shift_bytes + ); + + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s dec_result %d (%x %x %x) lcu %d used_mmu %d shiftbyte 0x%x decbytes 0x%x\n", + __func__, + hevc->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + hevc->pic_decoded_lcu_idx, + hevc->used_4k_num, + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_BYTE_COUNT) - + hevc->start_shift_bytes + ); + + hevc->used_4k_num = -1; + + check_pic_decoded_error(hevc, + hevc->pic_decoded_lcu_idx); + if ((error_handle_policy & 0x100) == 0 && hevc->cur_pic) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + struct PIC_s *pic; + pic = hevc->m_PIC[i]; + if (!pic || pic->index == -1) + continue; + if ((hevc->cur_pic->POC + poc_num_margin < pic->POC) && (pic->referenced == 0) && + (pic->output_mark == 1) && (pic->output_ready == 0)) { + hevc->poc_error_count++; + break; + } + } + if (i == MAX_REF_PIC_NUM) + hevc->poc_error_count = 0; + if (hevc->poc_error_count >= poc_error_limit) { + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + struct PIC_s *pic; + pic = hevc->m_PIC[i]; + if (!pic || pic->index == -1) + continue; + if ((hevc->cur_pic->POC + poc_num_margin < pic->POC) && (pic->referenced == 0) && + (pic->output_mark == 1) && (pic->output_ready == 0)) { + pic->output_mark = 0; + hevc_print(hevc, 0, "DPB poc error, remove error frame\n"); + } + } + } + } + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +#if 1 + if (vdec->slave) { + if (dv_debug & 0x1) + vdec_set_flag(vdec->slave, + VDEC_FLAG_SELF_INPUT_CONTEXT); + else + vdec_set_flag(vdec->slave, + VDEC_FLAG_OTHER_INPUT_CONTEXT); + } +#else + if (vdec->slave) { + if (no_interleaved_el_slice) + vdec_set_flag(vdec->slave, + VDEC_FLAG_INPUT_KEEP_CONTEXT); + /* this will move real HW pointer for input */ + else + vdec_set_flag(vdec->slave, 0); + /* this will not move real HW pointer + *and SL layer decoding + *will start from same stream position + *as current BL decoder + */ + } +#endif +#endif +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + hevc->shift_byte_count_lo + = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + if (vdec->slave) { + /*cur is base, found enhance*/ + struct hevc_state_s *hevc_el = + (struct hevc_state_s *) + vdec->slave->private; + if (hevc_el) + hevc_el->shift_byte_count_lo = + hevc->shift_byte_count_lo; + } else if (vdec->master) { + /*cur is enhance, found base*/ + struct hevc_state_s *hevc_ba = + (struct hevc_state_s *) + vdec->master->private; + if (hevc_ba) + hevc_ba->shift_byte_count_lo = + hevc->shift_byte_count_lo; + } +#endif + mutex_lock(&hevc->chunks_mutex); + vdec_vframe_dirty(hw_to_vdec(hevc), hevc->chunk); + hevc->chunk = NULL; + mutex_unlock(&hevc->chunks_mutex); + } else if (hevc->dec_result == DEC_RESULT_AGAIN) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec)) { + hevc->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hevc->work); + return; + } +#ifdef AGAIN_HAS_THRESHOLD + hevc->next_again_flag = 1; +#endif + if (input_stream_based(vdec)) { + u32 rp, wp, level; + struct vdec_input_s *input = &vdec->input; + rp = STBUF_READ(&vdec->vbuf, get_rp);; + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = input->size + wp - rp; + else + level = wp - rp; + if ((level >= dirty_buffersize_threshold) && + (hevc->pre_parser_video_rp == + STBUF_READ(&vdec->vbuf, get_rp)) && + (hevc->pre_parser_video_wp == + STBUF_READ(&vdec->vbuf, get_wp))) { + if (hevc->again_count == 0) { + hevc->again_timeout_jiffies = + get_jiffies_64() + dirty_time_threshold * HZ/1000; + } + hevc->again_count++; + } + else + hevc->again_count = 0; + + hevc->pre_parser_video_rp = STBUF_READ(&vdec->vbuf, get_rp); + hevc->pre_parser_video_wp = STBUF_READ(&vdec->vbuf, get_wp); + + if (((hevc->again_count > dirty_count_threshold) && + time_after64(get_jiffies_64(), hevc->again_timeout_jiffies))) { + mutex_lock(&hevc->chunks_mutex); + hevc->again_count = 0; + vdec_vframe_dirty(hw_to_vdec(hevc), hevc->chunk); + hevc->chunk = NULL; + hevc_print(hevc, PRINT_FLAG_VDEC_DETAIL, + "Discard dirty data\n"); + mutex_unlock(&hevc->chunks_mutex); + } else if ((((error_handle_policy & 0x200) == 0) && + (hevc->pic_list_init_flag == 0))) { + check_dirty_data(vdec); + } + } + } else if (hevc->dec_result == DEC_RESULT_EOS) { + struct PIC_s *pic; + hevc->eos = 1; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if ((!hevc->discard_dv_data) && aux_data_is_avaible(hevc)) + if (hevc->decoding_pic) + dolby_get_meta(hevc); +#endif + check_pic_decoded_error(hevc, + hevc->pic_decoded_lcu_idx); + pic = get_pic_by_POC(hevc, hevc->curr_POC); + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: end of stream, last dec poc %d => 0x%pf\n", + __func__, hevc->curr_POC, pic); + flush_output(hevc, pic); + /* dummy vf with eos flag to backend */ + if (hevc->is_used_v4l) { + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(hw_to_vdec(hevc)); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + hevc->shift_byte_count_lo + = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + if (vdec->slave) { + /*cur is base, found enhance*/ + struct hevc_state_s *hevc_el = + (struct hevc_state_s *) + vdec->slave->private; + if (hevc_el) + hevc_el->shift_byte_count_lo = + hevc->shift_byte_count_lo; + } else if (vdec->master) { + /*cur is enhance, found base*/ + struct hevc_state_s *hevc_ba = + (struct hevc_state_s *) + vdec->master->private; + if (hevc_ba) + hevc_ba->shift_byte_count_lo = + hevc->shift_byte_count_lo; + } +#endif + mutex_lock(&hevc->chunks_mutex); + vdec_vframe_dirty(hw_to_vdec(hevc), hevc->chunk); + hevc->chunk = NULL; + mutex_unlock(&hevc->chunks_mutex); + } else if (hevc->dec_result == DEC_RESULT_FORCE_EXIT) { + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: force exit\n", + __func__); + if (hevc->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hevc->stat &= ~STAT_VDEC_RUN; + } + if (hevc->stat & STAT_ISR_REG) { + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 0); + vdec_free_irq(VDEC_IRQ_0, (void *)hevc); + hevc->stat &= ~STAT_ISR_REG; + } + hevc_print(hevc, 0, "%s: force exit end\n", + __func__); + } + + if (hevc->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hevc->stat &= ~STAT_VDEC_RUN; + } + + if (hevc->stat & STAT_TIMER_ARM) { + del_timer_sync(&hevc->timer); + hevc->stat &= ~STAT_TIMER_ARM; + } + + ATRACE_COUNTER(hevc->trace.decode_work_time_name, TRACE_WORK_WAIT_SEARCH_DONE_START); + wait_hevc_search_done(hevc); + ATRACE_COUNTER(hevc->trace.decode_work_time_name, TRACE_WORK_WAIT_SEARCH_DONE_END); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (hevc->switch_dvlayer_flag) { + if (vdec->slave) + vdec_set_next_sched(vdec, vdec->slave); + else if (vdec->master) + vdec_set_next_sched(vdec, vdec->master); + } else if (vdec->slave || vdec->master) + vdec_set_next_sched(vdec, vdec); +#endif + + if (from == 1) { + /* This is a timeout work */ + if (work_pending(&hevc->work)) { + /* + * The vh265_work arrives at the last second, + * give it a chance to handle the scenario. + */ + return; + //cancel_work_sync(&hevc->work);//reserved for future considraion + } + } + + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_WORKER_END); + + /* mark itself has all HW resource released and input released */ + if (vdec->parallel_dec == 1) + vdec_core_finish_run(vdec, CORE_MASK_HEVC); + else + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + if (hevc->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !hevc->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + ATRACE_COUNTER("V_ST_DEC-chunk_size", 0); + + if (hevc->vdec_cb) + hevc->vdec_cb(hw_to_vdec(hevc), hevc->vdec_cb_arg); +} + +static void vh265_work(struct work_struct *work) +{ + struct hevc_state_s *hevc = container_of(work, + struct hevc_state_s, work); + struct vdec_s *vdec = hw_to_vdec(hevc); + + vh265_work_implement(hevc, vdec, 0); +} + +static void vh265_timeout_work(struct work_struct *work) +{ + struct hevc_state_s *hevc = container_of(work, + struct hevc_state_s, timeout_work); + struct vdec_s *vdec = hw_to_vdec(hevc); + + if (work_pending(&hevc->work)) + return; + hevc->timeout_processing = 1; + vh265_work_implement(hevc, vdec, 1); +} + + +static int vh265_hw_ctx_restore(struct hevc_state_s *hevc) +{ + /* new to do ... */ + vh265_prot_init(hevc); + return 0; +} +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + int tvp = vdec_secure(hw_to_vdec(hevc)) ? + CODEC_MM_FLAGS_TVP : 0; + bool ret = 0; + if (step == 0x12) + return 0; + else if (step == 0x11) + step = 0x12; + + if (hevc->fatal_error & DECODER_FATAL_ERROR_NO_MEM) + return 0; + + if (hevc->eos) + return 0; + if (hevc->timeout_processing && + (work_pending(&hevc->work) || + work_busy(&hevc->work) || + work_busy(&hevc->timeout_work) || + work_pending(&hevc->timeout_work))) { + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "h265 work pending,not ready for run.\n"); + return 0; + } + hevc->timeout_processing = 0; + if (!hevc->first_sc_checked && hevc->mmu_enable) { + int size; + void * mmu_box; + + if (hevc->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + mmu_box = ctx->mmu_box; + } else + mmu_box = hevc->mmu_box; + + size = decoder_mmu_box_sc_check(mmu_box, tvp); + hevc->first_sc_checked =1; + hevc_print(hevc, 0, + "vh265 cached=%d need_size=%d speed= %d ms\n", + size, (hevc->need_cache_size >> PAGE_SHIFT), + (int)(get_jiffies_64() - hevc->sc_start_time) * 1000/HZ); + } + if (vdec_stream_based(vdec) && (hevc->init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + + if (level < pre_decode_buf_level) + return 0; + } + +#ifdef AGAIN_HAS_THRESHOLD + if (hevc->next_again_flag && + (!vdec_frame_based(vdec))) { + u32 parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + if (parser_wr_ptr >= hevc->pre_parser_wr_ptr && + (parser_wr_ptr - hevc->pre_parser_wr_ptr) < + again_threshold) { + int r = vdec_sync_input(vdec); + hevc_print(hevc, + PRINT_FLAG_VDEC_DETAIL, "%s buf lelvel:%x\n", __func__, r); + return 0; + } + } +#endif + + if (disp_vframe_valve_level && + kfifo_len(&hevc->display_q) >= + disp_vframe_valve_level) { + hevc->valve_count--; + if (hevc->valve_count <= 0) + hevc->valve_count = 2; + else + return 0; + } + + ret = is_new_pic_available(hevc); + if (!ret) { + hevc_print(hevc, + PRINT_FLAG_VDEC_DETAIL, "%s=>%d\r\n", + __func__, ret); + } + +#ifdef CONSTRAIN_MAX_BUF_NUM + if (hevc->pic_list_init_flag == 3 && !hevc->is_used_v4l) { + if (run_ready_max_vf_only_num > 0 && + get_vf_ref_only_buf_count(hevc) >= + run_ready_max_vf_only_num + ) + ret = 0; + if (run_ready_display_q_num > 0 && + kfifo_len(&hevc->display_q) >= + run_ready_display_q_num) + ret = 0; + + /*avoid more buffers consumed when + switching resolution*/ + if (run_ready_max_buf_num == 0xff && + (get_used_buf_count(hevc) >= + v4l_parser_work_pic_num(hevc) + + get_dynamic_buf_num_margin(hevc))) { + check_buffer_status(hevc); + ret = 0; + } + else if (run_ready_max_buf_num && + get_used_buf_count(hevc) >= + run_ready_max_buf_num) + ret = 0; + } +#endif + + if (hevc->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + + if (ctx->param_sets_from_ucode) { + if (hevc->v4l_params_parsed) { + ret = is_avaliable_buffer(hevc) ? 1 : 0; + } else { + ret = ctx->v4l_resolution_change ? 0 : 1; + } + } else if (!ctx->v4l_codec_dpb_ready) { + if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < + run_ready_min_buf_num) + ret = 0; + } + } + + if (ret) + not_run_ready[hevc->index] = 0; + else + not_run_ready[hevc->index]++; + if (vdec->parallel_dec == 1) + return ret ? (CORE_MASK_HEVC) : 0; + else + return ret ? (CORE_MASK_VDEC_1 | CORE_MASK_HEVC) : 0; +} + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + int r, loadr = 0; + unsigned char check_sum = 0; + + run_count[hevc->index]++; + hevc->vdec_cb_arg = arg; + hevc->vdec_cb = callback; + hevc->aux_data_dirty = 1; + + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_RUN_START); + hevc_reset_core(vdec); + +#ifdef AGAIN_HAS_THRESHOLD + if (vdec_stream_based(vdec)) { + hevc->pre_parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + hevc->next_again_flag = 0; + } +#endif + r = vdec_prepare_input(vdec, &hevc->chunk); + if (r < 0) { + input_empty[hevc->index]++; + hevc->dec_result = DEC_RESULT_AGAIN; + hevc_print(hevc, PRINT_FLAG_VDEC_DETAIL, + "ammvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&hevc->work); + return; + } + + ATRACE_COUNTER("V_ST_DEC-chunk_size", r); + + input_empty[hevc->index] = 0; + hevc->dec_result = DEC_RESULT_NONE; + if (vdec_frame_based(vdec) && + ((get_dbg_flag(hevc) & PRINT_FLAG_VDEC_STATUS) + || is_log_enable(hevc)) && + !vdec_secure(vdec)) + check_sum = get_data_check_sum(hevc, r); + + if (is_log_enable(hevc)) + add_log(hevc, + "%s: size 0x%x sum 0x%x shiftbyte 0x%x", + __func__, r, + check_sum, + READ_VREG(HEVC_SHIFT_BYTE_COUNT) + ); + if ((hevc->dirty_shift_flag == 1) && !(vdec->input.swap_valid)) { + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, vdec->input.stream_cookie); + } + hevc->start_shift_bytes = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + + hevc_print(hevc, PRINT_FLAG_VDEC_STATUS, + "%s: size 0x%x sum 0x%x (%x %x %x %x %x) byte count %x\n", + __func__, r, + check_sum, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + STBUF_READ(&vdec->vbuf, get_rp), + STBUF_READ(&vdec->vbuf, get_wp), + hevc->start_shift_bytes + ); + if ((get_dbg_flag(hevc) & PRINT_FRAMEBASE_DATA) && + input_frame_based(vdec) && + !vdec_secure(vdec)) { + int jj; + u8 *data = NULL; + PR_INIT(128); + + if (!hevc->chunk->block->is_mapped) + data = codec_mm_vmap(hevc->chunk->block->start + + hevc->chunk->offset, r); + else + data = ((u8 *)hevc->chunk->block->start_virt) + + hevc->chunk->offset; + + for (jj = 0; jj < r; jj++) { + if ((jj & 0xf) == 0) + PR_FILL("%06x:", jj); + PR_FILL("%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + PR_INFO(hevc->index); + } + PR_INFO(hevc->index); + + if (!hevc->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + ATRACE_COUNTER(hevc->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_START); + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + if (tee_enabled() && hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, hevc->swap_addr); + } else { + if (hevc->mmu_enable) + if (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_GXM) + loadr = amhevc_vdec_loadmc_ex(VFORMAT_HEVC, vdec, + "h265_mmu", hevc->fw->data); + else { + if (!hevc->is_4k) { + /* if an older version of the fw was loaded, */ + /* needs try to load noswap fw because the */ + /* old fw package dose not contain the swap fw.*/ + loadr = amhevc_vdec_loadmc_ex( + VFORMAT_HEVC, vdec, + "hevc_mmu_swap", + hevc->fw->data); + if (loadr < 0) + loadr = amhevc_vdec_loadmc_ex( + VFORMAT_HEVC, vdec, + "h265_mmu", + hevc->fw->data); + else + hevc->is_swap = true; + } else + loadr = amhevc_vdec_loadmc_ex( + VFORMAT_HEVC, vdec, + "h265_mmu", hevc->fw->data); + } + else + loadr = amhevc_vdec_loadmc_ex(VFORMAT_HEVC, vdec, + NULL, hevc->fw->data); + + if (loadr < 0) { + amhevc_disable(); + hevc_print(hevc, 0, "H265: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", loadr); + hevc->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hevc->work); + return; + } + + if (tee_enabled() && hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) + hevc->swap_addr = READ_VREG(HEVC_STREAM_SWAP_BUFFER2); +#ifdef DETREFILL_ENABLE + if (hevc->is_swap && + get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_GXM) + init_detrefill_buf(hevc); +#endif + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_HEVC; + } + ATRACE_COUNTER(hevc->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_END); + + ATRACE_COUNTER(hevc->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_START); + if (vh265_hw_ctx_restore(hevc) < 0) { + vdec_schedule_work(&hevc->work); + return; + } + ATRACE_COUNTER(hevc->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_END); + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + + if (vdec_frame_based(vdec)) { + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, 0); + r = hevc->chunk->size + + (hevc->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + hevc->decode_size = r; + if (vdec->mvfrm) + vdec->mvfrm->frame_size = hevc->chunk->size; + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else { + if (vdec->master || vdec->slave) + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, + hevc->shift_byte_count_lo); + } +#endif + WRITE_VREG(HEVC_DECODE_SIZE, r); + /*WRITE_VREG(HEVC_DECODE_COUNT, hevc->decode_idx);*/ + hevc->init_flag = 1; + + if (hevc->pic_list_init_flag == 3) + init_pic_list_hw(hevc); + + backup_decode_state(hevc); + + start_process_time(hevc); + mod_timer(&hevc->timer, jiffies); + hevc->stat |= STAT_TIMER_ARM; + hevc->stat |= STAT_ISR_REG; + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + amhevc_start(); + hevc->stat |= STAT_VDEC_RUN; + + ATRACE_COUNTER(hevc->trace.decode_time_name, DECODER_RUN_END); +} + +static void aml_free_canvas(struct vdec_s *vdec) +{ + int i; + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + struct PIC_s *pic = hevc->m_PIC[i]; + + if (pic) { + if (vdec->parallel_dec == 1) { + vdec->free_canvas_ex(pic->y_canvas_index, vdec->id); + vdec->free_canvas_ex(pic->uv_canvas_index, vdec->id); + } + pic->cma_alloc_addr = 0; + } + hevc->buffer_wrap[i] = i; + } +} + +static void reset(struct vdec_s *vdec) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + int i; + + cancel_work_sync(&hevc->work); + cancel_work_sync(&hevc->notify_work); + if (hevc->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hevc->stat &= ~STAT_VDEC_RUN; + } + + if (hevc->stat & STAT_TIMER_ARM) { + del_timer_sync(&hevc->timer); + hevc->stat &= ~STAT_TIMER_ARM; + } + hevc->dec_result = DEC_RESULT_NONE; + reset_process_time(hevc); + hevc->pic_list_init_flag = 0; + dealloc_mv_bufs(hevc); + aml_free_canvas(vdec); + if (!hevc->resolution_change) + hevc_local_uninit(hevc); + if (vh265_local_init(hevc) < 0) + pr_debug(" %s local init fail\n", __func__); + for (i = 0; i < BUF_POOL_SIZE; i++) { + hevc->m_BUF[i].start_adr = 0; + } + + atomic_set(&hevc->vf_pre_count, 0); + atomic_set(&hevc->vf_get_count, 0); + atomic_set(&hevc->vf_put_count, 0); + hevc->eos = 0; + hevc->resolution_change = false; + + hevc_print(hevc, PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__); +} + +static irqreturn_t vh265_irq_cb(struct vdec_s *vdec, int irq) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + + return vh265_isr(0, hevc); +} + +static irqreturn_t vh265_threaded_irq_cb(struct vdec_s *vdec, int irq) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + + return vh265_isr_thread_fn(0, hevc); +} +#endif + +/****************************************/ +#ifdef CONFIG_PM +static int h265_suspend(struct device *dev) +{ + amhevc_suspend(to_platform_device(dev), dev->power.power_state); + return 0; +} + +static int h265_resume(struct device *dev) +{ + amhevc_resume(to_platform_device(dev)); + return 0; +} + +static const struct dev_pm_ops h265_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(h265_suspend, h265_resume) +}; +#endif + +#ifdef MULTI_INSTANCE_SUPPORT +static void vh265_dump_state(struct vdec_s *vdec) +{ + int i; + struct hevc_state_s *hevc = + (struct hevc_state_s *)vdec->private; + hevc_print(hevc, 0, + "====== %s\n", __func__); + + hevc_print(hevc, 0, + "width/height (%d/%d), reorder_pic_num %d ip_mode %d buf count(bufspec size) %d, video_signal_type 0x%x, is_swap %d i_only 0x%x\n", + hevc->frame_width, + hevc->frame_height, + hevc->sps_num_reorder_pics_0, + hevc->ip_mode, + hevc->used_buf_num, + hevc->video_signal_type_debug, + hevc->is_swap, + hevc->i_only + ); + + hevc_print(hevc, 0, + "is_framebase(%d), eos %d, dec_result 0x%x dec_frm %d disp_frm %d run %d not_run_ready %d input_empty %d\n", + input_frame_based(vdec), + hevc->eos, + hevc->dec_result, + decode_frame_count[hevc->index], + display_frame_count[hevc->index], + run_count[hevc->index], + not_run_ready[hevc->index], + input_empty[hevc->index] + ); + + if (hevc->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + hevc_print(hevc, 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + + hevc_print(hevc, 0, + "%s, newq(%d/%d), dispq(%d/%d), vf prepare/get/put (%d/%d/%d), pic_list_init_flag(%d), is_new_pic_available(%d)\n", + __func__, + kfifo_len(&hevc->newframe_q), + VF_POOL_SIZE, + kfifo_len(&hevc->display_q), + VF_POOL_SIZE, + hevc->vf_pre_count, + hevc->vf_get_count, + hevc->vf_put_count, + hevc->pic_list_init_flag, + is_new_pic_available(hevc) + ); + + dump_pic_list(hevc); + + for (i = 0; i < BUF_POOL_SIZE; i++) { + hevc_print(hevc, 0, + "Buf(%d) start_adr 0x%x header_addr 0x%x size 0x%x used %d\n", + i, + hevc->m_BUF[i].start_adr, + hevc->m_BUF[i].header_addr, + hevc->m_BUF[i].size, + hevc->m_BUF[i].used_flag); + } + + for (i = 0; i < MAX_REF_PIC_NUM; i++) { + hevc_print(hevc, 0, + "mv_Buf(%d) start_adr 0x%x size 0x%x used %d\n", + i, + hevc->m_mv_BUF[i].start_adr, + hevc->m_mv_BUF[i].size, + hevc->m_mv_BUF[i].used_flag); + } + + hevc_print(hevc, 0, + "HEVC_DEC_STATUS_REG=0x%x\n", + READ_VREG(HEVC_DEC_STATUS_REG)); + hevc_print(hevc, 0, + "HEVC_MPC_E=0x%x\n", + READ_VREG(HEVC_MPC_E)); + hevc_print(hevc, 0, + "HEVC_DECODE_MODE=0x%x\n", + READ_VREG(HEVC_DECODE_MODE)); + hevc_print(hevc, 0, + "HEVC_DECODE_MODE2=0x%x\n", + READ_VREG(HEVC_DECODE_MODE2)); + hevc_print(hevc, 0, + "NAL_SEARCH_CTL=0x%x\n", + READ_VREG(NAL_SEARCH_CTL)); + hevc_print(hevc, 0, + "HEVC_PARSER_LCU_START=0x%x\n", + READ_VREG(HEVC_PARSER_LCU_START)); + hevc_print(hevc, 0, + "HEVC_DECODE_SIZE=0x%x\n", + READ_VREG(HEVC_DECODE_SIZE)); + hevc_print(hevc, 0, + "HEVC_SHIFT_BYTE_COUNT=0x%x\n", + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + hevc_print(hevc, 0, + "HEVC_STREAM_START_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_START_ADDR)); + hevc_print(hevc, 0, + "HEVC_STREAM_END_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_END_ADDR)); + hevc_print(hevc, 0, + "HEVC_STREAM_LEVEL=0x%x\n", + READ_VREG(HEVC_STREAM_LEVEL)); + hevc_print(hevc, 0, + "HEVC_STREAM_WR_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_WR_PTR)); + hevc_print(hevc, 0, + "HEVC_STREAM_RD_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_RD_PTR)); + hevc_print(hevc, 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + hevc_print(hevc, 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (input_frame_based(vdec) && + (get_dbg_flag(hevc) & PRINT_FRAMEBASE_DATA) + ) { + int jj; + PR_INIT(128); + + if (hevc->chunk && hevc->chunk->block && + hevc->chunk->size > 0) { + u8 *data = NULL; + if (!hevc->chunk->block->is_mapped) + data = codec_mm_vmap(hevc->chunk->block->start + + hevc->chunk->offset, hevc->chunk->size); + else + data = ((u8 *)hevc->chunk->block->start_virt) + + hevc->chunk->offset; + hevc_print(hevc, 0, + "frame data size 0x%x\n", + hevc->chunk->size); + for (jj = 0; jj < hevc->chunk->size; jj++) { + if ((jj & 0xf) == 0) + PR_FILL("%06x:", jj); + PR_FILL("%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + PR_INFO(hevc->index); + } + PR_INFO(hevc->index); + + if (!hevc->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } + +} + + +static int ammvdec_h265_probe(struct platform_device *pdev) +{ + + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct hevc_state_s *hevc = NULL; + int ret; + int i; +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + int config_val; +#endif + + if (pdata == NULL) { + pr_info("\nammvdec_h265 memory resource undefined.\n"); + return -EFAULT; + } + + /* hevc = (struct hevc_state_s *)devm_kzalloc(&pdev->dev, + sizeof(struct hevc_state_s), GFP_KERNEL); */ + hevc = vmalloc(sizeof(struct hevc_state_s)); + if (hevc == NULL) { + pr_info("\nammvdec_h265 device data allocation failed\n"); + return -ENOMEM; + } + memset(hevc, 0, sizeof(struct hevc_state_s)); + + /* the ctx from v4l2 driver. */ + hevc->v4l2_ctx = pdata->private; + + pdata->private = hevc; + pdata->dec_status = vh265_dec_status; + pdata->set_trickmode = vh265_set_trickmode; + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = vh265_irq_cb; + pdata->threaded_irq_handler = vh265_threaded_irq_cb; + pdata->dump_state = vh265_dump_state; + + hevc->index = pdev->id; + hevc->m_ins_flag = 1; + + + if (is_rdma_enable()) { + hevc->rdma_adr = dma_alloc_coherent(amports_get_dma_device(), RDMA_SIZE, &hevc->rdma_phy_adr, GFP_KERNEL); + for (i = 0; i < SCALELUT_DATA_WRITE_NUM; i++) { + hevc->rdma_adr[i * 4] = HEVC_IQIT_SCALELUT_WR_ADDR & 0xfff; + hevc->rdma_adr[i * 4 + 1] = i; + hevc->rdma_adr[i * 4 + 2] = HEVC_IQIT_SCALELUT_DATA & 0xfff; + hevc->rdma_adr[i * 4 + 3] = 0; + if (i == SCALELUT_DATA_WRITE_NUM - 1) { + hevc->rdma_adr[i * 4 + 2] = (HEVC_IQIT_SCALELUT_DATA & 0xfff) | 0x20000; + } + } + } + snprintf(hevc->trace.vdec_name, sizeof(hevc->trace.vdec_name), + "h265-%d", hevc->index); + snprintf(hevc->trace.pts_name, sizeof(hevc->trace.pts_name), + "%s-timestamp", hevc->trace.vdec_name); + snprintf(hevc->trace.vf_get_name, sizeof(hevc->trace.vf_get_name), + "%s-vf_get", hevc->trace.vdec_name); + snprintf(hevc->trace.vf_put_name, sizeof(hevc->trace.vf_put_name), + "%s-vf_put", hevc->trace.vdec_name); + snprintf(hevc->trace.set_canvas0_addr, sizeof(hevc->trace.set_canvas0_addr), + "%s-set_canvas0_addr", hevc->trace.vdec_name); + snprintf(hevc->trace.get_canvas0_addr, sizeof(hevc->trace.get_canvas0_addr), + "%s-get_canvas0_addr", hevc->trace.vdec_name); + snprintf(hevc->trace.put_canvas0_addr, sizeof(hevc->trace.put_canvas0_addr), + "%s-put_canvas0_addr", hevc->trace.vdec_name); + snprintf(hevc->trace.new_q_name, sizeof(hevc->trace.new_q_name), + "%s-newframe_q", hevc->trace.vdec_name); + snprintf(hevc->trace.disp_q_name, sizeof(hevc->trace.disp_q_name), + "%s-dispframe_q", hevc->trace.vdec_name); + snprintf(hevc->trace.new_q_name, sizeof(hevc->trace.new_q_name), + "%s-newframe_q", hevc->trace.vdec_name); + snprintf(hevc->trace.disp_q_name, sizeof(hevc->trace.disp_q_name), + "%s-dispframe_q", hevc->trace.vdec_name); + snprintf(hevc->trace.decode_time_name, sizeof(hevc->trace.decode_time_name), + "decoder_time%d", pdev->id); + snprintf(hevc->trace.decode_run_time_name, sizeof(hevc->trace.decode_run_time_name), + "decoder_run_time%d", pdev->id); + snprintf(hevc->trace.decode_header_memory_time_name, sizeof(hevc->trace.decode_header_memory_time_name), + "decoder_header_time%d", pdev->id); + snprintf(hevc->trace.decode_work_time_name, sizeof(hevc->trace.decode_work_time_name), + "decoder_work_time%d", pdev->id); + + if (pdata->use_vfm_path) { + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + hevc->frameinfo_enable = 1; + } +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (vdec_dual(pdata)) { + struct hevc_state_s *hevc_pair = NULL; + + if (dv_toggle_prov_name) /*debug purpose*/ + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVBL_PROVIDER_NAME : + VFM_DEC_DVEL_PROVIDER_NAME); + else + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVEL_PROVIDER_NAME : + VFM_DEC_DVBL_PROVIDER_NAME); + hevc->dolby_enhance_flag = pdata->master ? 1 : 0; + if (pdata->master) + hevc_pair = (struct hevc_state_s *) + pdata->master->private; + else if (pdata->slave) + hevc_pair = (struct hevc_state_s *) + pdata->slave->private; + if (hevc_pair) + hevc->shift_byte_count_lo = + hevc_pair->shift_byte_count_lo; + } +#endif + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + MULTI_INSTANCE_PROVIDER_NAME ".%02x", pdev->id & 0xff); + + hevc->provider_name = pdata->vf_provider_name; + platform_set_drvdata(pdev, pdata); + + hevc->platform_dev = pdev; + + if (((get_dbg_flag(hevc) & IGNORE_PARAM_FROM_CONFIG) == 0) && + pdata->config_len) { +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + /*use ptr config for doubel_write_mode, etc*/ + hevc_print(hevc, 0, "pdata->config=%s\n", pdata->config); + + if (get_config_int(pdata->config, "hevc_double_write_mode", + &config_val) == 0) + hevc->double_write_mode = config_val; + else + hevc->double_write_mode = double_write_mode; + + if (get_config_int(pdata->config, "save_buffer_mode", + &config_val) == 0) + hevc->save_buffer_mode = config_val; + else + hevc->save_buffer_mode = 0; + + /*use ptr config for max_pic_w, etc*/ + if (get_config_int(pdata->config, "hevc_buf_width", + &config_val) == 0) { + hevc->max_pic_w = config_val; + } + if (get_config_int(pdata->config, "hevc_buf_height", + &config_val) == 0) { + hevc->max_pic_h = config_val; + } + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + hevc->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + hevc->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + hevc->is_used_v4l = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_buffer_margin", + &config_val) == 0) + hevc->dynamic_buf_num_margin = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + hevc->mem_map_mode = config_val; + + if (get_config_int(pdata->config, "negative_dv", + &config_val) == 0) { + hevc->discard_dv_data = config_val; + hevc_print(hevc, 0, "discard dv data\n"); + } + + if (get_config_int(pdata->config, + "parm_enable_fence", + &config_val) == 0) + hevc->enable_fence = config_val; + + if (get_config_int(pdata->config, + "parm_fence_usage", + &config_val) == 0) + hevc->fence_usage = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_low_latency_mode", + &config_val) == 0) { + hevc->low_latency_flag = (config_val & 1) ? 1 : 0; + hevc->enable_fence = (config_val & 2) ? 1 : 0; + } + + if (get_config_int(pdata->config, + "parm_v4l_metadata_config_flag", + &config_val) == 0) { + hevc->metadata_config_flag = config_val; + hevc->discard_dv_data = hevc->metadata_config_flag & VDEC_CFG_FLAG_DV_NEGATIVE; + hevc->dv_duallayer = hevc->metadata_config_flag & VDEC_CFG_FLAG_DV_TWOLARYER; + if (hevc->discard_dv_data) + hevc_print(hevc, 0, "discard dv data\n"); + if (hevc->dv_duallayer) + hevc_print(hevc, 0, "dv_duallayer\n"); + } + /*if (get_config_int(pdata->config, + "parm_v4l_duration", + &config_val) == 0) + vdec_frame_rate_uevent(config_val);*/ +#endif + } else { + if (pdata->sys_info) + hevc->vh265_amstream_dec_info = *pdata->sys_info; + else { + hevc->vh265_amstream_dec_info.width = 0; + hevc->vh265_amstream_dec_info.height = 0; + hevc->vh265_amstream_dec_info.rate = 30; + } + hevc->double_write_mode = double_write_mode; + } + + if (!hevc->is_used_v4l) + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vh265_vf_provider, pdata); + + if (force_config_fence) { + hevc->enable_fence = true; + hevc->fence_usage = (force_config_fence >> 4) & 0xf; + if (force_config_fence & 0x2) + hevc->enable_fence = false; + hevc_print(hevc, 0, + "enable fence: %d, fence usage: %d\n", + hevc->enable_fence, hevc->fence_usage); + } + + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) && + (hevc->double_write_mode == 3)) + hevc->double_write_mode = 0x1000; + + hevc->endian = HEVC_CONFIG_LITTLE_ENDIAN; + if (!hevc->is_used_v4l) { + /* get valid double write from configure or node */ + //hevc->double_write_mode = get_double_write_mode(hevc); + if (hevc->save_buffer_mode && dynamic_buf_num_margin > 2) + hevc->dynamic_buf_num_margin = dynamic_buf_num_margin -2; + else + hevc->dynamic_buf_num_margin = dynamic_buf_num_margin; + + hevc->mem_map_mode = mem_map_mode; + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) + hevc->endian = HEVC_CONFIG_BIG_ENDIAN; + } + if (endian) + hevc->endian = endian; + + if (mmu_enable_force == 0) { + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL + || hevc->double_write_mode == 0x10) + hevc->mmu_enable = 0; + else + hevc->mmu_enable = 1; + } + + if (hevc->is_used_v4l) { + if (hevc->double_write_mode & 0x10) + hevc->mmu_enable = 0; + else + hevc->mmu_enable = 1; + } + + if (hevc->is_used_v4l && (hevc->v4l2_ctx != NULL)) { + struct aml_vcodec_ctx *ctx = hevc->v4l2_ctx; + + ctx->aux_infos.alloc_buffer(ctx, SEI_TYPE); + + if (!hevc->discard_dv_data) + ctx->aux_infos.alloc_buffer(ctx, DV_TYPE); + } + + if (init_mmu_buffers(hevc, 1) < 0) { + hevc_print(hevc, 0, + "\n 265 mmu init failed!\n"); + mutex_unlock(&vh265_mutex); + /* devm_kfree(&pdev->dev, (void *)hevc);*/ + if (hevc) + vfree((void *)hevc); + pdata->dec_status = NULL; + return -EFAULT; + } +#if 0 + hevc->buf_start = pdata->mem_start; + hevc->buf_size = pdata->mem_end - pdata->mem_start + 1; +#else + + ret = decoder_bmmu_box_alloc_buf_phy(hevc->bmmu_box, + BMMU_WORKSPACE_ID, work_buf_size, + DRIVER_NAME, &hevc->buf_start); + if (ret < 0) { + uninit_mmu_buffers(hevc); + /* devm_kfree(&pdev->dev, (void *)hevc); */ + if (hevc) + vfree((void *)hevc); + pdata->dec_status = NULL; + mutex_unlock(&vh265_mutex); + return ret; + } + hevc->buf_size = work_buf_size; +#endif + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXTVBB) && + (parser_sei_enable & 0x100) == 0) + parser_sei_enable = 7; + hevc->init_flag = 0; + hevc->first_sc_checked = 0; + hevc->uninit_list = 0; + hevc->fatal_error = 0; + hevc->show_frame_num = 0; + + /* + *hevc->mc_buf_spec.buf_end = pdata->mem_end + 1; + *for (i = 0; i < WORK_BUF_SPEC_NUM; i++) + * amvh265_workbuff_spec[i].start_adr = pdata->mem_start; + */ + if (get_dbg_flag(hevc)) { + hevc_print(hevc, 0, + "===H.265 decoder mem resource 0x%lx size 0x%x\n", + hevc->buf_start, hevc->buf_size); + } + + hevc_print(hevc, 0, + "dynamic_buf_num_margin=%d\n", + hevc->dynamic_buf_num_margin); + hevc_print(hevc, 0, + "double_write_mode=%d\n", + hevc->double_write_mode); + + hevc->cma_dev = pdata->cma_dev; + vh265_vdec_info_init(hevc); + + if (vh265_init(pdata) < 0) { + hevc_print(hevc, 0, + "\namvdec_h265 init failed.\n"); + hevc_local_uninit(hevc); + if (hevc->gvs) + kfree(hevc->gvs); + hevc->gvs = NULL; + uninit_mmu_buffers(hevc); + /* devm_kfree(&pdev->dev, (void *)hevc); */ + if (hevc) + vfree((void *)hevc); + pdata->dec_status = NULL; + return -ENODEV; + } + +#ifdef AUX_DATA_CRC + vdec_aux_data_check_init(pdata); +#endif + + vdec_set_prepare_level(pdata, start_decode_buf_level); + + /*set the max clk for smooth playing...*/ + hevc_source_changed(VFORMAT_HEVC, + 3840, 2160, 60); + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_HEVC); + else + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + mutex_init(&hevc->fence_mutex); + if (hevc->enable_fence) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hevc->v4l2_ctx); + + pdata->sync = vdec_sync_get(); + if (!pdata->sync) { + hevc_print(hevc, 0, "alloc fence timeline error\n"); + hevc_local_uninit(hevc); + if (hevc->gvs) + kfree(hevc->gvs); + hevc->gvs = NULL; + uninit_mmu_buffers(hevc); + /* devm_kfree(&pdev->dev, (void *)hevc); */ + if (hevc) + vfree((void *)hevc); + pdata->dec_status = NULL; + return -ENODEV; + } + ctx->sync = pdata->sync; + pdata->sync->usage = hevc->fence_usage; + /* creat timeline. */ + vdec_timeline_create(pdata->sync, DRIVER_NAME); + vdec_timeline_get(pdata->sync); + } + + return 0; +} + +static void vdec_fence_release(struct hevc_state_s *hw, + struct vdec_sync *sync) +{ + ulong expires; + + /* notify signal to wake up all fences. */ + vdec_timeline_increase(sync, VF_POOL_SIZE); + + expires = jiffies + msecs_to_jiffies(2000); + while (!check_objs_all_signaled(sync)) { + if (time_after(jiffies, expires)) { + pr_err("wait fence signaled timeout.\n"); + break; + } + } + + /* decreases refcnt of timeline. */ + vdec_timeline_put(sync); +} + +static int ammvdec_h265_remove(struct platform_device *pdev) +{ + struct hevc_state_s *hevc = + (struct hevc_state_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec; + + if (hevc == NULL) + return 0; + vdec = hw_to_vdec(hevc); + +#ifdef AUX_DATA_CRC + vdec_aux_data_check_exit(vdec); +#endif + + //pr_err("%s [pid=%d,tgid=%d]\n", __func__, current->pid, current->tgid); + if (get_dbg_flag(hevc)) + hevc_print(hevc, 0, "%s\r\n", __func__); + + vmh265_stop(hevc); + + /* vdec_source_changed(VFORMAT_H264, 0, 0, 0); */ + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(hevc), CORE_MASK_HEVC); + else + vdec_core_release(hw_to_vdec(hevc), CORE_MASK_HEVC); + + vdec_set_status(hw_to_vdec(hevc), VDEC_STATUS_DISCONNECTED); + + if (hevc->enable_fence) + vdec_fence_release(hevc, vdec->sync); + if (is_rdma_enable()) + dma_free_coherent(amports_get_dma_device(), RDMA_SIZE, hevc->rdma_adr, hevc->rdma_phy_adr); + vfree((void *)hevc); + + return 0; +} + +static struct platform_driver ammvdec_h265_driver = { + .probe = ammvdec_h265_probe, + .remove = ammvdec_h265_remove, + .driver = { + .name = MULTI_DRIVER_NAME, +#ifdef CONFIG_PM + .pm = &h265_pm_ops, +#endif + } +}; +#endif + +static struct codec_profile_t amvdec_h265_profile = { + .name = "H.265-V4L", + .profile = "" +}; + +static struct mconfig h265_configs[] = { + MC_PU32("use_cma", &use_cma), + MC_PU32("bit_depth_luma", &bit_depth_luma), + MC_PU32("bit_depth_chroma", &bit_depth_chroma), + MC_PU32("video_signal_type", &video_signal_type), +#ifdef ERROR_HANDLE_DEBUG + MC_PU32("dbg_nal_skip_flag", &dbg_nal_skip_flag), + MC_PU32("dbg_nal_skip_count", &dbg_nal_skip_count), +#endif + MC_PU32("radr", &radr), + MC_PU32("rval", &rval), + MC_PU32("dbg_cmd", &dbg_cmd), + MC_PU32("dbg_skip_decode_index", &dbg_skip_decode_index), + MC_PU32("endian", &endian), + MC_PU32("step", &step), + MC_PU32("udebug_flag", &udebug_flag), + MC_PU32("decode_pic_begin", &decode_pic_begin), + MC_PU32("slice_parse_begin", &slice_parse_begin), + MC_PU32("nal_skip_policy", &nal_skip_policy), + MC_PU32("i_only_flag", &i_only_flag), + MC_PU32("error_handle_policy", &error_handle_policy), + MC_PU32("error_handle_threshold", &error_handle_threshold), + MC_PU32("error_handle_nal_skip_threshold", + &error_handle_nal_skip_threshold), + MC_PU32("error_handle_system_threshold", + &error_handle_system_threshold), + MC_PU32("error_skip_nal_count", &error_skip_nal_count), + MC_PU32("debug", &debug), + MC_PU32("debug_mask", &debug_mask), + MC_PU32("buffer_mode", &buffer_mode), + MC_PU32("double_write_mode", &double_write_mode), + MC_PU32("buf_alloc_width", &buf_alloc_width), + MC_PU32("buf_alloc_height", &buf_alloc_height), + MC_PU32("dynamic_buf_num_margin", &dynamic_buf_num_margin), + MC_PU32("max_buf_num", &max_buf_num), + MC_PU32("buf_alloc_size", &buf_alloc_size), + MC_PU32("buffer_mode_dbg", &buffer_mode_dbg), + MC_PU32("mem_map_mode", &mem_map_mode), + MC_PU32("enable_mem_saving", &enable_mem_saving), + MC_PU32("force_w_h", &force_w_h), + MC_PU32("force_fps", &force_fps), + MC_PU32("max_decoding_time", &max_decoding_time), + MC_PU32("prefix_aux_buf_size", &prefix_aux_buf_size), + MC_PU32("suffix_aux_buf_size", &suffix_aux_buf_size), + MC_PU32("interlace_enable", &interlace_enable), + MC_PU32("pts_unstable", &pts_unstable), + MC_PU32("parser_sei_enable", &parser_sei_enable), + MC_PU32("start_decode_buf_level", &start_decode_buf_level), + MC_PU32("decode_timeout_val", &decode_timeout_val), + MC_PU32("parser_dolby_vision_enable", &parser_dolby_vision_enable), +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + MC_PU32("dv_toggle_prov_name", &dv_toggle_prov_name), + MC_PU32("dv_debug", &dv_debug), +#endif +}; +static struct mconfig_node decoder_265_node; + +static int __init amvdec_h265_driver_init_module(void) +{ + struct BuffInfo_s *p_buf_info; + + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + p_buf_info = &amvh265_workbuff_spec[2]; + else + p_buf_info = &amvh265_workbuff_spec[1]; + } else + p_buf_info = &amvh265_workbuff_spec[0]; + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) + p_buf_info = &amvh265_workbuff_spec[5]; + else + p_buf_info = &amvh265_workbuff_spec[3]; + } + + init_buff_spec(NULL, p_buf_info); + work_buf_size = + (p_buf_info->end_adr - p_buf_info->start_adr + + 0xffff) & (~0xffff); + + pr_debug("amvdec_h265 module init\n"); + error_handle_policy = 0; + +#ifdef ERROR_HANDLE_DEBUG + dbg_nal_skip_flag = 0; + dbg_nal_skip_count = 0; +#endif + udebug_flag = 0; + decode_pic_begin = 0; + slice_parse_begin = 0; + step = 0; + buf_alloc_size = 0; + + if (platform_driver_register(&ammvdec_h265_driver)) { + pr_err("failed to register ammvdec_h265 driver\n"); + return -ENODEV; + } + +#if 1/*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8*/ + if (!has_hevc_vdec()) { + /* not support hevc */ + amvdec_h265_profile.name = "hevc_unsupport"; + } + if ((vdec_is_support_4k())) { + if (is_meson_m8m2_cpu()) { + /* m8m2 support 4k */ + amvdec_h265_profile.profile = "4k"; + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + amvdec_h265_profile.profile = + "8k, 8bit, 10bit, dwrite, compressed, frame_dv, fence, uvm"; + }else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB) { + amvdec_h265_profile.profile = + "4k, 8bit, 10bit, dwrite, compressed, frame_dv, fence, uvm"; + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_MG9TV) + amvdec_h265_profile.profile = "4k"; + } else { + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D || is_cpu_s4_s805x2()) { + amvdec_h265_profile.profile = + "8bit, 10bit, dwrite, compressed, frame_dv, uvm, v4l"; + } else { + amvdec_h265_profile.profile = + "8bit, 10bit, dwrite, compressed, v4l"; + } + } +#endif + if (codec_mm_get_total_size() < 80 * SZ_1M) { + pr_info("amvdec_h265 default mmu enabled.\n"); + mmu_enable = 1; + } + + vcodec_profile_register(&amvdec_h265_profile); + INIT_REG_NODE_CONFIGS("media.decoder", &decoder_265_node, + "h265-v4l", h265_configs, CONFIG_FOR_RW); + vcodec_feature_register(VFORMAT_HEVC, 1); + return 0; +} + +static void __exit amvdec_h265_driver_remove_module(void) +{ + pr_debug("amvdec_h265 module remove.\n"); + + platform_driver_unregister(&ammvdec_h265_driver); + +} + +/****************************************/ +/* + *module_param(stat, uint, 0664); + *MODULE_PARM_DESC(stat, "\n amvdec_h265 stat\n"); + */ +module_param(use_cma, uint, 0664); +MODULE_PARM_DESC(use_cma, "\n amvdec_h265 use_cma\n"); + +module_param(bit_depth_luma, uint, 0664); +MODULE_PARM_DESC(bit_depth_luma, "\n amvdec_h265 bit_depth_luma\n"); + +module_param(bit_depth_chroma, uint, 0664); +MODULE_PARM_DESC(bit_depth_chroma, "\n amvdec_h265 bit_depth_chroma\n"); + +module_param(video_signal_type, uint, 0664); +MODULE_PARM_DESC(video_signal_type, "\n amvdec_h265 video_signal_type\n"); + +#ifdef ERROR_HANDLE_DEBUG +module_param(dbg_nal_skip_flag, uint, 0664); +MODULE_PARM_DESC(dbg_nal_skip_flag, "\n amvdec_h265 dbg_nal_skip_flag\n"); + +module_param(dbg_nal_skip_count, uint, 0664); +MODULE_PARM_DESC(dbg_nal_skip_count, "\n amvdec_h265 dbg_nal_skip_count\n"); +#endif + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\n radr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\n rval\n"); + +module_param(dbg_cmd, uint, 0664); +MODULE_PARM_DESC(dbg_cmd, "\n dbg_cmd\n"); + +module_param(dump_nal, uint, 0664); +MODULE_PARM_DESC(dump_nal, "\n dump_nal\n"); + +module_param(dbg_skip_decode_index, uint, 0664); +MODULE_PARM_DESC(dbg_skip_decode_index, "\n dbg_skip_decode_index\n"); + +module_param(endian, uint, 0664); +MODULE_PARM_DESC(endian, "\n rval\n"); + +module_param(step, uint, 0664); +MODULE_PARM_DESC(step, "\n amvdec_h265 step\n"); + +module_param(decode_pic_begin, uint, 0664); +MODULE_PARM_DESC(decode_pic_begin, "\n amvdec_h265 decode_pic_begin\n"); + +module_param(slice_parse_begin, uint, 0664); +MODULE_PARM_DESC(slice_parse_begin, "\n amvdec_h265 slice_parse_begin\n"); + +module_param(nal_skip_policy, uint, 0664); +MODULE_PARM_DESC(nal_skip_policy, "\n amvdec_h265 nal_skip_policy\n"); + +module_param(i_only_flag, uint, 0664); +MODULE_PARM_DESC(i_only_flag, "\n amvdec_h265 i_only_flag\n"); + +module_param(fast_output_enable, uint, 0664); +MODULE_PARM_DESC(fast_output_enable, "\n amvdec_h265 fast_output_enable\n"); + +module_param(error_handle_policy, uint, 0664); +MODULE_PARM_DESC(error_handle_policy, "\n amvdec_h265 error_handle_policy\n"); + +module_param(error_handle_threshold, uint, 0664); +MODULE_PARM_DESC(error_handle_threshold, + "\n amvdec_h265 error_handle_threshold\n"); + +module_param(error_handle_nal_skip_threshold, uint, 0664); +MODULE_PARM_DESC(error_handle_nal_skip_threshold, + "\n amvdec_h265 error_handle_nal_skip_threshold\n"); + +module_param(error_handle_system_threshold, uint, 0664); +MODULE_PARM_DESC(error_handle_system_threshold, + "\n amvdec_h265 error_handle_system_threshold\n"); + +module_param(error_skip_nal_count, uint, 0664); +MODULE_PARM_DESC(error_skip_nal_count, + "\n amvdec_h265 error_skip_nal_count\n"); + +module_param(skip_nal_count, uint, 0664); +MODULE_PARM_DESC(skip_nal_count, "\n skip_nal_count\n"); + +module_param(debug, uint, 0664); +MODULE_PARM_DESC(debug, "\n amvdec_h265 debug\n"); + +module_param(debug_mask, uint, 0664); +MODULE_PARM_DESC(debug_mask, "\n amvdec_h265 debug mask\n"); + +module_param(log_mask, uint, 0664); +MODULE_PARM_DESC(log_mask, "\n amvdec_h265 log_mask\n"); + +module_param(buffer_mode, uint, 0664); +MODULE_PARM_DESC(buffer_mode, "\n buffer_mode\n"); + +module_param(double_write_mode, uint, 0664); +MODULE_PARM_DESC(double_write_mode, "\n double_write_mode\n"); + +module_param(buf_alloc_width, uint, 0664); +MODULE_PARM_DESC(buf_alloc_width, "\n buf_alloc_width\n"); + +module_param(buf_alloc_height, uint, 0664); +MODULE_PARM_DESC(buf_alloc_height, "\n buf_alloc_height\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n dynamic_buf_num_margin\n"); + +module_param(max_buf_num, uint, 0664); +MODULE_PARM_DESC(max_buf_num, "\n max_buf_num\n"); + +module_param(buf_alloc_size, uint, 0664); +MODULE_PARM_DESC(buf_alloc_size, "\n buf_alloc_size\n"); + +#ifdef CONSTRAIN_MAX_BUF_NUM +module_param(run_ready_max_vf_only_num, uint, 0664); +MODULE_PARM_DESC(run_ready_max_vf_only_num, "\n run_ready_max_vf_only_num\n"); + +module_param(run_ready_display_q_num, uint, 0664); +MODULE_PARM_DESC(run_ready_display_q_num, "\n run_ready_display_q_num\n"); + +module_param(run_ready_max_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_max_buf_num, "\n run_ready_max_buf_num\n"); + +module_param(run_ready_min_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_min_buf_num, "\n run_ready_min_buf_num\n"); +#endif + +#if 0 +module_param(re_config_pic_flag, uint, 0664); +MODULE_PARM_DESC(re_config_pic_flag, "\n re_config_pic_flag\n"); +#endif + +module_param(buffer_mode_dbg, uint, 0664); +MODULE_PARM_DESC(buffer_mode_dbg, "\n buffer_mode_dbg\n"); + +module_param(mem_map_mode, uint, 0664); +MODULE_PARM_DESC(mem_map_mode, "\n mem_map_mode\n"); + +module_param(enable_mem_saving, uint, 0664); +MODULE_PARM_DESC(enable_mem_saving, "\n enable_mem_saving\n"); + +module_param(force_w_h, uint, 0664); +MODULE_PARM_DESC(force_w_h, "\n force_w_h\n"); + +module_param(force_fps, uint, 0664); +MODULE_PARM_DESC(force_fps, "\n force_fps\n"); + +module_param(max_decoding_time, uint, 0664); +MODULE_PARM_DESC(max_decoding_time, "\n max_decoding_time\n"); + +module_param(prefix_aux_buf_size, uint, 0664); +MODULE_PARM_DESC(prefix_aux_buf_size, "\n prefix_aux_buf_size\n"); + +module_param(suffix_aux_buf_size, uint, 0664); +MODULE_PARM_DESC(suffix_aux_buf_size, "\n suffix_aux_buf_size\n"); + +module_param(interlace_enable, uint, 0664); +MODULE_PARM_DESC(interlace_enable, "\n interlace_enable\n"); +module_param(pts_unstable, uint, 0664); +MODULE_PARM_DESC(pts_unstable, "\n amvdec_h265 pts_unstable\n"); +module_param(parser_sei_enable, uint, 0664); +MODULE_PARM_DESC(parser_sei_enable, "\n parser_sei_enable\n"); + +module_param(parser_dolby_vision_enable, uint, 0664); +MODULE_PARM_DESC(parser_dolby_vision_enable, + "\n parser_dolby_vision_enable\n"); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +module_param(dolby_meta_with_el, uint, 0664); +MODULE_PARM_DESC(dolby_meta_with_el, + "\n dolby_meta_with_el\n"); + +module_param(dolby_el_flush_th, uint, 0664); +MODULE_PARM_DESC(dolby_el_flush_th, + "\n dolby_el_flush_th\n"); +#endif +module_param(mmu_enable, uint, 0664); +MODULE_PARM_DESC(mmu_enable, "\n mmu_enable\n"); + +module_param(mmu_enable_force, uint, 0664); +MODULE_PARM_DESC(mmu_enable_force, "\n mmu_enable_force\n"); + +#ifdef MULTI_INSTANCE_SUPPORT +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n h265 start_decode_buf_level\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, + "\n h265 decode_timeout_val\n"); + +module_param(print_lcu_error, uint, 0664); +MODULE_PARM_DESC(print_lcu_error, + "\n h265 print_lcu_error\n"); + +module_param(data_resend_policy, uint, 0664); +MODULE_PARM_DESC(data_resend_policy, + "\n h265 data_resend_policy\n"); + +module_param(poc_num_margin, int, 0664); +MODULE_PARM_DESC(poc_num_margin, + "\n h265 poc_num_margin\n"); + +module_param(poc_error_limit, int, 0664); +MODULE_PARM_DESC(poc_error_limit, + "\n h265 poc_error_limit\n"); + +module_param_array(decode_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(display_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(max_process_time, uint, + &max_decode_instance_num, 0664); + +module_param_array(max_get_frame_interval, + uint, &max_decode_instance_num, 0664); + +module_param_array(run_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(input_empty, uint, + &max_decode_instance_num, 0664); + +module_param_array(not_run_ready, uint, + &max_decode_instance_num, 0664); + +module_param_array(ref_frame_mark_flag, uint, + &max_decode_instance_num, 0664); + +#endif +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +module_param(dv_toggle_prov_name, uint, 0664); +MODULE_PARM_DESC(dv_toggle_prov_name, "\n dv_toggle_prov_name\n"); + +module_param(dv_debug, uint, 0664); +MODULE_PARM_DESC(dv_debug, "\n dv_debug\n"); + +module_param(force_bypass_dvenl, uint, 0664); +MODULE_PARM_DESC(force_bypass_dvenl, "\n force_bypass_dvenl\n"); +#endif + +#ifdef AGAIN_HAS_THRESHOLD +module_param(again_threshold, uint, 0664); +MODULE_PARM_DESC(again_threshold, "\n again_threshold\n"); +#endif + +module_param(force_disp_pic_index, int, 0664); +MODULE_PARM_DESC(force_disp_pic_index, + "\n amvdec_h265 force_disp_pic_index\n"); + +module_param(frmbase_cont_bitlevel, uint, 0664); +MODULE_PARM_DESC(frmbase_cont_bitlevel, "\n frmbase_cont_bitlevel\n"); + +module_param(force_bufspec, uint, 0664); +MODULE_PARM_DESC(force_bufspec, "\n amvdec_h265 force_bufspec\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_h265 udebug_flag\n"); + +module_param(udebug_pause_pos, uint, 0664); +MODULE_PARM_DESC(udebug_pause_pos, "\n udebug_pause_pos\n"); + +module_param(udebug_pause_val, uint, 0664); +MODULE_PARM_DESC(udebug_pause_val, "\n udebug_pause_val\n"); + +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, "\n ammvdec_h264 pre_decode_buf_level\n"); + +module_param(udebug_pause_decode_idx, uint, 0664); +MODULE_PARM_DESC(udebug_pause_decode_idx, "\n udebug_pause_decode_idx\n"); + +module_param(disp_vframe_valve_level, uint, 0664); +MODULE_PARM_DESC(disp_vframe_valve_level, "\n disp_vframe_valve_level\n"); + +module_param(pic_list_debug, uint, 0664); +MODULE_PARM_DESC(pic_list_debug, "\n pic_list_debug\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n amvdec_h265 without_display_mode\n"); + +#ifdef HEVC_8K_LFTOFFSET_FIX +module_param(performance_profile, uint, 0664); +MODULE_PARM_DESC(performance_profile, "\n amvdec_h265 performance_profile\n"); +#endif +module_param(disable_ip_mode, uint, 0664); +MODULE_PARM_DESC(disable_ip_mode, "\n amvdec_h265 disable ip_mode\n"); + +module_param(dirty_time_threshold, uint, 0664); +MODULE_PARM_DESC(dirty_time_threshold, "\n dirty_time_threshold\n"); + +module_param(dirty_count_threshold, uint, 0664); +MODULE_PARM_DESC(dirty_count_threshold, "\n dirty_count_threshold\n"); + +module_param(dirty_buffersize_threshold, uint, 0664); +MODULE_PARM_DESC(dirty_buffersize_threshold, "\n dirty_buffersize_threshold\n"); + +module_param(force_config_fence, uint, 0664); +MODULE_PARM_DESC(force_config_fence, "\n force enable fence\n"); + +module_param(mv_buf_dynamic_alloc, uint, 0664); +MODULE_PARM_DESC(mv_buf_dynamic_alloc, "\n mv_buf_dynamic_alloc\n"); + +module_param(detect_stuck_buffer_margin, uint, 0664); +MODULE_PARM_DESC(detect_stuck_buffer_margin, "\n detect_stuck_buffer_margin\n"); + +module_init(amvdec_h265_driver_init_module); +module_exit(amvdec_h265_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC h265 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <tim.yao@amlogic.com>");
diff --git a/drivers/frame_provider/decoder_v4l/h265/vh265.h b/drivers/frame_provider/decoder_v4l/h265/vh265.h new file mode 100644 index 0000000..11de11a --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/h265/vh265.h
@@ -0,0 +1,27 @@ +/* + * drivers/amlogic/amports/vh265.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VH265_H +#define VH265_H + +extern u32 get_blackout_policy(void); + +extern s32 vh265_init(void); + +extern s32 vh265_release(void); + +#endif /* VMPEG4_H */
diff --git a/drivers/frame_provider/decoder_v4l/mjpeg/Makefile b/drivers/frame_provider/decoder_v4l/mjpeg/Makefile new file mode 100644 index 0000000..23819c6 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/mjpeg/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_MJPEG_MULTI) += amvdec_mmjpeg_v4l.o +amvdec_mmjpeg_v4l-objs += vmjpeg_multi.o
diff --git a/drivers/frame_provider/decoder_v4l/mjpeg/vmjpeg_multi.c b/drivers/frame_provider/decoder_v4l/mjpeg/vmjpeg_multi.c new file mode 100644 index 0000000..2f0bb96 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/mjpeg/vmjpeg_multi.c
@@ -0,0 +1,1877 @@ +/* + * drivers/amlogic/amports/vmjpeg.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> + +#include <linux/amlogic/media/utils/vdec_reg.h> +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" + +#include "../../decoder/utils/vdec_input.h" +#include "../../decoder/utils/vdec.h" +#include "../../decoder/utils/amvdec.h" +#include "../../decoder/utils/decoder_mmu_box.h" +#include "../../decoder/utils/decoder_bmmu_box.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../../decoder/utils/firmware.h" +#include "../../decoder/utils/vdec_v4l2_buffer_ops.h" +#include "../../decoder/utils/config_parser.h" +#include <media/v4l2-mem2mem.h> +#include "../../decoder/utils/vdec_feature.h" + +#define MEM_NAME "codec_mmjpeg" + +#define DRIVER_NAME "ammvdec_mjpeg_v4l" +#define CHECK_INTERVAL (HZ/100) + +/* protocol register usage + * AV_SCRATCH_4 : decode buffer spec + * AV_SCRATCH_5 : decode buffer index + */ + +#define MREG_DECODE_PARAM AV_SCRATCH_2 /* bit 0-3: pico_addr_mode */ +/* bit 15-4: reference height */ +#define MREG_TO_AMRISC AV_SCRATCH_8 +#define MREG_FROM_AMRISC AV_SCRATCH_9 +#define MREG_FRAME_OFFSET AV_SCRATCH_A +#define DEC_STATUS_REG AV_SCRATCH_F +#define MREG_PIC_WIDTH AV_SCRATCH_B +#define MREG_PIC_HEIGHT AV_SCRATCH_C +#define DECODE_STOP_POS AV_SCRATCH_K + +#define PICINFO_BUF_IDX_MASK 0x0007 +#define PICINFO_AVI1 0x0080 +#define PICINFO_INTERLACE 0x0020 +#define PICINFO_INTERLACE_AVI1_BOT 0x0010 +#define PICINFO_INTERLACE_FIRST 0x0010 + +#define VF_POOL_SIZE 64 +#define DECODE_BUFFER_NUM_MAX 16 +#define DECODE_BUFFER_NUM_DEF 4 +#define MAX_BMMU_BUFFER_NUM DECODE_BUFFER_NUM_MAX + +#define DEFAULT_MEM_SIZE (32*SZ_1M) + +#define INVALID_IDX (-1) /* Invalid buffer index.*/ + +static int debug_enable; +static u32 udebug_flag; +#define DECODE_ID(hw) (hw_to_vdec(hw)->id) + +static unsigned int radr; +static unsigned int rval; +#define VMJPEG_DEV_NUM 9 +static unsigned int max_decode_instance_num = VMJPEG_DEV_NUM; +static unsigned int max_process_time[VMJPEG_DEV_NUM]; +static unsigned int decode_timeout_val = 200; +static struct vframe_s *vmjpeg_vf_peek(void *); +static struct vframe_s *vmjpeg_vf_get(void *); +static void vmjpeg_vf_put(struct vframe_s *, void *); +static int vmjpeg_vf_states(struct vframe_states *states, void *); +static int vmjpeg_event_cb(int type, void *data, void *private_data); +static void vmjpeg_work(struct work_struct *work); +static int notify_v4l_eos(struct vdec_s *vdec); +static int pre_decode_buf_level = 0x800; +static int start_decode_buf_level = 0x2000; +static u32 without_display_mode; +static u32 dynamic_buf_num_margin; +static u32 run_ready_min_buf_num = 2; +#undef pr_info +#define pr_info printk +unsigned int mmjpeg_debug_mask = 0xff; +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_RUN_FLOW 0X0001 +#define PRINT_FLAG_TIMEINFO 0x0002 +#define PRINT_FLAG_UCODE_DETAIL 0x0004 +#define PRINT_FLAG_VLD_DETAIL 0x0008 +#define PRINT_FLAG_DEC_DETAIL 0x0010 +#define PRINT_FLAG_BUFFER_DETAIL 0x0020 +#define PRINT_FLAG_RESTORE 0x0040 +#define PRINT_FRAME_NUM 0x0080 +#define PRINT_FLAG_FORCE_DONE 0x0100 +#define PRINT_FRAMEBASE_DATA 0x0400 +#define PRINT_FLAG_TIMEOUT_STATUS 0x1000 +#define PRINT_FLAG_V4L_DETAIL 0x8000 +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 + +int mmjpeg_debug_print(int index, int debug_flag, const char *fmt, ...) +{ + if (((debug_enable & debug_flag) && + ((1 << index) & mmjpeg_debug_mask)) + || (debug_flag == PRINT_FLAG_ERROR)) { + unsigned char *buf = kzalloc(512, GFP_ATOMIC); + int len = 0; + va_list args; + + if (!buf) + return 0; + + va_start(args, fmt); + len = sprintf(buf, "%d: ", index); + vsnprintf(buf + len, 512-len, fmt, args); + pr_info("%s", buf); + va_end(args); + kfree(buf); + } + return 0; +} + +static const char vmjpeg_dec_id[] = "vmmjpeg-dev"; + +#define PROVIDER_NAME "vdec.mjpeg" +static const struct vframe_operations_s vf_provider_ops = { + .peek = vmjpeg_vf_peek, + .get = vmjpeg_vf_get, + .put = vmjpeg_vf_put, + .event_cb = vmjpeg_event_cb, + .vf_states = vmjpeg_vf_states, +}; + +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_ERROR 3 +#define DEC_RESULT_FORCE_EXIT 4 +#define DEC_RESULT_EOS 5 +#define DEC_DECODE_TIMEOUT 0x21 + + +struct buffer_spec_s { + unsigned int y_addr; + unsigned int u_addr; + unsigned int v_addr; + + int y_canvas_index; + int u_canvas_index; + int v_canvas_index; + + struct canvas_config_s canvas_config[3]; + unsigned long cma_alloc_addr; + int cma_alloc_count; + unsigned int buf_adr; + ulong v4l_ref_buf_addr; +}; + +#define spec2canvas(x) \ + (((x)->v_canvas_index << 16) | \ + ((x)->u_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + +struct vdec_mjpeg_hw_s { + spinlock_t lock; + struct mutex vmjpeg_mutex; + + struct platform_device *platform_dev; + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + + struct vframe_s vfpool[VF_POOL_SIZE]; + struct vframe_s vframe_dummy; + + struct buffer_spec_s buffer_spec[DECODE_BUFFER_NUM_MAX]; + s32 vfbuf_use[DECODE_BUFFER_NUM_MAX]; + + u32 frame_width; + u32 frame_height; + u32 frame_dur; + u32 saved_resolution; + u8 init_flag; + u32 stat; + u32 dec_result; + unsigned long buf_start; + u32 buf_size; + void *mm_blk_handle; + struct dec_sysinfo vmjpeg_amstream_dec_info; + + struct vframe_chunk_s *chunk; + struct work_struct work; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + struct firmware_s *fw; + struct timer_list check_timer; + u32 decode_timeout_count; + u32 start_process_time; + u32 last_vld_level; + u8 eos; + u32 frame_num; + u32 run_count; + u32 not_run_ready; + u32 buffer_not_ready; + u32 input_empty; + atomic_t peek_num; + atomic_t get_num; + atomic_t put_num; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + int buf_num; + int dynamic_buf_num_margin; + int sidebind_type; + int sidebind_channel_id; + u32 res_ch_flag; + u32 canvas_mode; + u32 canvas_endian; + ulong fb_token; + char vdec_name[32]; + char pts_name[32]; + char new_q_name[32]; + char disp_q_name[32]; +}; + +static void reset_process_time(struct vdec_mjpeg_hw_s *hw); + +static void set_frame_info(struct vdec_mjpeg_hw_s *hw, struct vframe_s *vf) +{ + u32 temp; + u32 temp_endian; + + temp = READ_VREG(MREG_PIC_WIDTH); + if (temp > 1920) + vf->width = hw->frame_width = 1920; + else if (temp > 0) + vf->width = hw->frame_width = temp; + temp = READ_VREG(MREG_PIC_HEIGHT); + if (temp > 1088) + vf->height = hw->frame_height = 1088; + else if (temp > 0) + vf->height = hw->frame_height = temp; + vf->duration = hw->frame_dur; + vf->ratio_control = DISP_RATIO_ASPECT_RATIO_MAX << DISP_RATIO_ASPECT_RATIO_BIT; + vf->sar_width = 1; + vf->sar_height = 1; + vf->duration_pulldown = 0; + vf->flag = 0; + + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 3; + + vf->canvas0_config[0] = hw->buffer_spec[vf->index].canvas_config[0]; + vf->canvas0_config[1] = hw->buffer_spec[vf->index].canvas_config[1]; + vf->canvas0_config[2] = hw->buffer_spec[vf->index].canvas_config[2]; + + vf->canvas1_config[0] = hw->buffer_spec[vf->index].canvas_config[0]; + vf->canvas1_config[1] = hw->buffer_spec[vf->index].canvas_config[1]; + vf->canvas1_config[2] = hw->buffer_spec[vf->index].canvas_config[2]; + + /* mjpeg convert endian to match display. */ + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5W)) { + temp_endian = (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 7 : 0; + } else { + temp_endian = (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 0 : 7; + } + + vf->canvas0_config[0].endian = temp_endian; + vf->canvas0_config[1].endian = temp_endian; + vf->canvas0_config[2].endian = temp_endian; + vf->canvas1_config[0].endian = temp_endian; + vf->canvas1_config[1].endian = temp_endian; + vf->canvas1_config[2].endian = temp_endian; + + vf->sidebind_type = hw->sidebind_type; + vf->sidebind_channel_id = hw->sidebind_channel_id; +} + +static irqreturn_t vmjpeg_isr(struct vdec_s *vdec, int irq) +{ + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *)(vdec->private); + + if (!hw) + return IRQ_HANDLED; + + if (hw->eos) + return IRQ_HANDLED; + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + return IRQ_WAKE_THREAD; +} + +static int vmjpeg_get_ps_info(struct vdec_mjpeg_hw_s *hw, int width, int height, struct aml_vdec_ps_infos *ps) +{ + ps->visible_width = width; + ps->visible_height = height; + ps->coded_width = ALIGN(width, 64); + ps->coded_height = ALIGN(height, 64); + ps->dpb_size = hw->buf_num; + ps->dpb_frames = DECODE_BUFFER_NUM_DEF; + ps->dpb_margin = hw->dynamic_buf_num_margin; + ps->field = V4L2_FIELD_NONE; + + return 0; +} + +static int v4l_res_change(struct vdec_mjpeg_hw_s *hw, int width, int height) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int ret = 0; + + if (ctx->param_sets_from_ucode && + hw->res_ch_flag == 0) { + struct aml_vdec_ps_infos ps; + + if ((hw->frame_width != 0 && + hw->frame_height != 0) && + (hw->frame_width != width || + hw->frame_height != height)) { + mmjpeg_debug_print(DECODE_ID(hw), 0, + "v4l_res_change Pic Width/Height Change (%d,%d)=>(%d,%d)\n", + hw->frame_width, hw->frame_height, + width, + height); + vmjpeg_get_ps_info(hw, width, height, &ps); + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + hw->v4l_params_parsed = false; + hw->res_ch_flag = 1; + ctx->v4l_resolution_change = 1; + hw->eos = 1; + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(hw_to_vdec(hw)); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + + ret = 1; + } + } + + return ret; +} + +static irqreturn_t vmjpeg_isr_thread_fn(struct vdec_s *vdec, int irq) +{ + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)(vdec->private); + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + u32 reg; + struct vframe_s *vf = NULL; + u32 index, offset = 0, pts; + u64 pts_us64; + u32 frame_size; + + if (READ_VREG(AV_SCRATCH_D) != 0 && + (debug_enable & PRINT_FLAG_UCODE_DETAIL)) { + pr_info("dbg%x: %x\n", READ_VREG(AV_SCRATCH_D), + READ_VREG(AV_SCRATCH_E)); + WRITE_VREG(AV_SCRATCH_D, 0); + return IRQ_HANDLED; + } + + if (READ_VREG(DEC_STATUS_REG) == 1) { + if (hw->is_used_v4l) { + int frame_width = READ_VREG(MREG_PIC_WIDTH); + int frame_height = READ_VREG(MREG_PIC_HEIGHT); + + if (!v4l_res_change(hw, frame_width, frame_height)) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + + vmjpeg_get_ps_info(hw, frame_width, frame_height, &ps); + hw->v4l_params_parsed = true; + vdec_v4l_set_ps_infos(ctx, &ps); + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } else { + struct vdec_pic_info pic; + + if (!hw->buf_num) { + vdec_v4l_get_pic_info(ctx, &pic); + hw->buf_num = pic.dpb_frames + + pic.dpb_margin; + if (hw->buf_num > DECODE_BUFFER_NUM_MAX) + hw->buf_num = DECODE_BUFFER_NUM_MAX; + } + + WRITE_VREG(DEC_STATUS_REG, 0); + + hw->res_ch_flag = 1; + } + } else { + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } + } else + WRITE_VREG(DEC_STATUS_REG, 0); + return IRQ_HANDLED; + } + reset_process_time(hw); + + reg = READ_VREG(MREG_FROM_AMRISC); + index = READ_VREG(AV_SCRATCH_5) & 0xffffff; + + if (index >= hw->buf_num) { + pr_err("fatal error, invalid buffer index."); + return IRQ_HANDLED; + } + + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + pr_info( + "fatal error, no available buffer slot."); + return IRQ_HANDLED; + } + + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->buffer_spec[index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), v4l mem handle: 0x%lx\n", + ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, + __func__, vf->v4l_mem_handle); + } + + vf->index = index; + set_frame_info(hw, vf); + + vf->type = VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD; + /* vf->pts = (pts_valid) ? pts : 0; */ + /* vf->pts_us64 = (pts_valid) ? pts_us64 : 0; */ + + if (hw->chunk) { + vf->pts = hw->chunk->pts; + vf->pts_us64 = hw->chunk->pts64; + vf->timestamp = hw->chunk->timestamp; + } else { + offset = READ_VREG(MREG_FRAME_OFFSET); + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, offset, &pts, + &frame_size, 3000, + &pts_us64) == 0) { + vf->pts = pts; + vf->pts_us64 = pts_us64; + } else { + vf->pts = 0; + vf->pts_us64 = 0; + } + } + if (!vdec->vbuf.use_ptsserv && vdec_stream_based(vdec)) { + vf->pts_us64 = offset; + vf->pts = 0; + } + } + vf->orientation = 0; + hw->vfbuf_use[index]++; + + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, index); + decoder_do_frame_check(vdec, vf); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->timestamp); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + hw->frame_num++; + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FRAME_NUM, + "%s:frame num:%d,pts=%d,pts64=%lld. dur=%d\n", + __func__, hw->frame_num, + vf->pts, vf->pts_us64, vf->duration); + vdec->vdec_fps_detec(vdec->id); + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vmjpeg_vf_put(vmjpeg_vf_get(vdec), vdec); + } else { + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + } + } else + vmjpeg_vf_put(vmjpeg_vf_get(vdec), vdec); + + hw->dec_result = DEC_RESULT_DONE; + + vdec_schedule_work(&hw->work); + + return IRQ_HANDLED; +} + +static int valid_vf_check(struct vframe_s *vf, struct vdec_mjpeg_hw_s *hw) +{ + int i; + + if (!vf || (vf->index == -1)) + return 0; + + for (i = 0; i < VF_POOL_SIZE; i++) { + if (vf == &hw->vfpool[i]) + return 1; + } + + return 0; +} + +static struct vframe_s *vmjpeg_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private; + + if (!hw) + return NULL; + atomic_add(1, &hw->peek_num); + if (kfifo_peek(&hw->display_q, &vf)) + return vf; + + return NULL; +} + +static struct vframe_s *vmjpeg_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private; + + if (!hw) + return NULL; + + if (kfifo_get(&hw->display_q, &vf)) { + vf->index_disp = atomic_read(&hw->get_num); + atomic_add(1, &hw->get_num); + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + return vf; + } + return NULL; +} + +static void vmjpeg_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private; + + if (!valid_vf_check(vf, hw)) { + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "invalid vf: %lx\n", (ulong)vf); + return ; + } + + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FRAME_NUM, + "%s:put_num:%d\n", __func__, hw->put_num); + + if (vf->v4l_mem_handle != + hw->buffer_spec[vf->index].v4l_ref_buf_addr) { + hw->buffer_spec[vf->index].v4l_ref_buf_addr + = vf->v4l_mem_handle; + + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "MJPEG update fb handle, old:%llx, new:%llx\n", + hw->buffer_spec[vf->index].v4l_ref_buf_addr, + vf->v4l_mem_handle); + } + + hw->vfbuf_use[vf->index]--; + + kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); + atomic_add(1, &hw->put_num); +} + +static int vmjpeg_event_cb(int type, void *data, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +static int vmjpeg_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + struct vdec_s *vdec = op_arg; + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private; + + spin_lock_irqsave(&hw->lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hw->newframe_q); + states->buf_avail_num = kfifo_len(&hw->display_q); + states->buf_recycle_num = 0; + + spin_unlock_irqrestore(&hw->lock, flags); + + return 0; +} + +static int vmjpeg_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private; + + if (!hw) + return -1; + + vstatus->frame_width = hw->frame_width; + vstatus->frame_height = hw->frame_height; + if (0 != hw->frame_dur) + vstatus->frame_rate = 96000 / hw->frame_dur; + else + vstatus->frame_rate = 96000; + vstatus->error_count = 0; + vstatus->status = hw->stat; + + return 0; +} + +static void init_scaler(void) +{ + /* 4 point triangle */ + const unsigned int filt_coef[] = { + 0x20402000, 0x20402000, 0x1f3f2101, 0x1f3f2101, + 0x1e3e2202, 0x1e3e2202, 0x1d3d2303, 0x1d3d2303, + 0x1c3c2404, 0x1c3c2404, 0x1b3b2505, 0x1b3b2505, + 0x1a3a2606, 0x1a3a2606, 0x19392707, 0x19392707, + 0x18382808, 0x18382808, 0x17372909, 0x17372909, + 0x16362a0a, 0x16362a0a, 0x15352b0b, 0x15352b0b, + 0x14342c0c, 0x14342c0c, 0x13332d0d, 0x13332d0d, + 0x12322e0e, 0x12322e0e, 0x11312f0f, 0x11312f0f, + 0x10303010 + }; + int i; + + /* pscale enable, PSCALE cbus bmem enable */ + WRITE_VREG(PSCALE_CTRL, 0xc000); + + /* write filter coefs */ + WRITE_VREG(PSCALE_BMEM_ADDR, 0); + for (i = 0; i < 33; i++) { + WRITE_VREG(PSCALE_BMEM_DAT, 0); + WRITE_VREG(PSCALE_BMEM_DAT, filt_coef[i]); + } + + /* Y horizontal initial info */ + WRITE_VREG(PSCALE_BMEM_ADDR, 37 * 2); + /* [35]: buf repeat pix0, + * [34:29] => buf receive num, + * [28:16] => buf blk x, + * [15:0] => buf phase + */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x0008); + WRITE_VREG(PSCALE_BMEM_DAT, 0x60000000); + + /* C horizontal initial info */ + WRITE_VREG(PSCALE_BMEM_ADDR, 41 * 2); + WRITE_VREG(PSCALE_BMEM_DAT, 0x0008); + WRITE_VREG(PSCALE_BMEM_DAT, 0x60000000); + + /* Y vertical initial info */ + WRITE_VREG(PSCALE_BMEM_ADDR, 39 * 2); + WRITE_VREG(PSCALE_BMEM_DAT, 0x0008); + WRITE_VREG(PSCALE_BMEM_DAT, 0x60000000); + + /* C vertical initial info */ + WRITE_VREG(PSCALE_BMEM_ADDR, 43 * 2); + WRITE_VREG(PSCALE_BMEM_DAT, 0x0008); + WRITE_VREG(PSCALE_BMEM_DAT, 0x60000000); + + /* Y horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_ADDR, 36 * 2 + 1); + /* [19:0] => Y horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x10000); + /* C horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_ADDR, 40 * 2 + 1); + /* [19:0] => C horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x10000); + + /* Y vertical phase step */ + WRITE_VREG(PSCALE_BMEM_ADDR, 38 * 2 + 1); + /* [19:0] => Y vertical phase step */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x10000); + /* C vertical phase step */ + WRITE_VREG(PSCALE_BMEM_ADDR, 42 * 2 + 1); + /* [19:0] => C horizontal phase step */ + WRITE_VREG(PSCALE_BMEM_DAT, 0x10000); + + /* reset pscaler */ +#if 1/*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6*/ + WRITE_VREG(DOS_SW_RESET0, (1 << 10)); + WRITE_VREG(DOS_SW_RESET0, 0); +#else + WRITE_RESET_REG(RESET2_REGISTER, RESET_PSCALE); +#endif + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SC2) { + READ_RESET_REG(RESET2_REGISTER); + READ_RESET_REG(RESET2_REGISTER); + READ_RESET_REG(RESET2_REGISTER); + } + WRITE_VREG(PSCALE_RST, 0x7); + WRITE_VREG(PSCALE_RST, 0x0); +} + +static void vmjpeg_dump_state(struct vdec_s *vdec) +{ + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *)(vdec->private); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "====== %s\n", __func__); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "width/height (%d/%d) buf_num %d\n", + hw->frame_width, + hw->frame_height, + hw->buf_num + ); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "is_framebase(%d), eos %d, state 0x%x, dec_result 0x%x dec_frm %d put_frm %d run %d not_run_ready %d input_empty %d\n", + input_frame_based(vdec), + hw->eos, + hw->stat, + hw->dec_result, + hw->frame_num, + hw->put_num, + hw->run_count, + hw->not_run_ready, + hw->input_empty + ); + if (!hw->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + mmjpeg_debug_print(DECODE_ID(hw), 0, + "%s, newq(%d/%d), dispq(%d/%d) vf peek/get/put (%d/%d/%d)\n", + __func__, + kfifo_len(&hw->newframe_q), + VF_POOL_SIZE, + kfifo_len(&hw->display_q), + VF_POOL_SIZE, + hw->peek_num, + hw->get_num, + hw->put_num + ); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "VIFF_BIT_CNT=0x%x\n", + READ_VREG(VIFF_BIT_CNT)); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_LEVEL=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_WP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP)); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_RP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_RP)); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + mmjpeg_debug_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + if (input_frame_based(vdec) && + debug_enable & PRINT_FRAMEBASE_DATA + ) { + int jj; + if (hw->chunk && hw->chunk->block && + hw->chunk->size > 0) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, hw->chunk->size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + mmjpeg_debug_print(DECODE_ID(hw), 0, + "frame data size 0x%x\n", + hw->chunk->size); + for (jj = 0; jj < hw->chunk->size; jj++) { + if ((jj & 0xf) == 0) + mmjpeg_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + mmjpeg_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + mmjpeg_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } +} +static void reset_process_time(struct vdec_mjpeg_hw_s *hw) +{ + if (hw->start_process_time) { + unsigned process_time = + 1000 * (jiffies - hw->start_process_time) / HZ; + hw->start_process_time = 0; + if (process_time > max_process_time[DECODE_ID(hw)]) + max_process_time[DECODE_ID(hw)] = process_time; + } +} + +static void start_process_time(struct vdec_mjpeg_hw_s *hw) +{ + hw->decode_timeout_count = 2; + hw->start_process_time = jiffies; +} + +static void timeout_process(struct vdec_mjpeg_hw_s *hw) +{ + amvdec_stop(); + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s decoder timeout\n", __func__); + hw->dec_result = DEC_RESULT_DONE; + reset_process_time(hw); + vdec_schedule_work(&hw->work); +} + +static void check_timer_func(struct timer_list *timer) +{ + struct vdec_mjpeg_hw_s *hw = container_of(timer, + struct vdec_mjpeg_hw_s, check_timer); + struct vdec_s *vdec = hw_to_vdec(hw); + int timeout_val = decode_timeout_val; + + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_VLD_DETAIL, + "%s: status:nstatus=%d:%d\n", + __func__, vdec->status, vdec->next_status); + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_VLD_DETAIL, + "%s: %d,buftl=%x:%x:%x:%x\n", + __func__, __LINE__, + READ_VREG(VLD_MEM_VIFIFO_BUF_CNTL), + STBUF_READ(&vdec->vbuf, get_wp), + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP)); + + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + + if (((debug_enable & PRINT_FLAG_TIMEOUT_STATUS) == 0) && + (timeout_val > 0) && + (hw->start_process_time > 0) && + ((1000 * (jiffies - hw->start_process_time) / HZ) + > timeout_val)) { + if (hw->last_vld_level == READ_VREG(VLD_MEM_VIFIFO_LEVEL)) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + timeout_process(hw); + } + hw->last_vld_level = READ_VREG(VLD_MEM_VIFIFO_LEVEL); + } + + if (READ_VREG(DEC_STATUS_REG) == DEC_DECODE_TIMEOUT) { + pr_info("ucode DEC_DECODE_TIMEOUT\n"); + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + timeout_process(hw); + WRITE_VREG(DEC_STATUS_REG, 0); + } + + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static void mjpeg_put_video_frame(void *vdec_ctx, struct vframe_s *vf) +{ + vmjpeg_vf_put(vf, vdec_ctx); +} + +static void mjpeg_get_video_frame(void *vdec_ctx, struct vframe_s **vf) +{ + *vf = vmjpeg_vf_get(vdec_ctx); +} + +static struct task_ops_s task_dec_ops = { + .type = TASK_TYPE_DEC, + .get_vframe = mjpeg_get_video_frame, + .put_vframe = mjpeg_put_video_frame, +}; + +static int vmjpeg_v4l_alloc_buff_config_canvas(struct vdec_mjpeg_hw_s *hw, int i) +{ + int ret; + u32 canvas; + ulong decbuf_start = 0, decbuf_u_start = 0, decbuf_v_start = 0; + int decbuf_y_size = 0, decbuf_u_size = 0, decbuf_v_size = 0; + u32 canvas_width = 0, canvas_height = 0; + struct vdec_s *vdec = hw_to_vdec(hw); + struct vdec_v4l2_buffer *fb = NULL; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (hw->buffer_spec[i].v4l_ref_buf_addr) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *) + hw->buffer_spec[i].v4l_ref_buf_addr; + + fb->status = FB_ST_DECODER; + return 0; + } + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + mmjpeg_debug_print(DECODE_ID(hw), 0, + "[%d] get fb fail.\n", + ((struct aml_vcodec_ctx *) + (hw->v4l2_ctx))->id); + return ret; + } + + fb->task->attach(fb->task, &task_dec_ops, hw_to_vdec(hw)); + fb->status = FB_ST_DECODER; + + if (!hw->frame_width || !hw->frame_height) { + struct vdec_pic_info pic; + vdec_v4l_get_pic_info(ctx, &pic); + hw->frame_width = pic.visible_width; + hw->frame_height = pic.visible_height; + mmjpeg_debug_print(DECODE_ID(hw), 0, + "[%d] set %d x %d from IF layer\n", ctx->id, + hw->frame_width, hw->frame_height); + } + + hw->buffer_spec[i].v4l_ref_buf_addr = (ulong)fb; + if (fb->num_planes == 1) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].offset; + decbuf_u_start = decbuf_start + decbuf_y_size; + decbuf_u_size = decbuf_y_size / 4; + decbuf_v_start = decbuf_u_start + decbuf_u_size; + decbuf_v_size = decbuf_u_size; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 64); + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + } else if (fb->num_planes == 2) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].size; + decbuf_u_start = fb->m.mem[1].addr; + decbuf_u_size = fb->m.mem[1].size >> 1; + decbuf_v_start = decbuf_u_start + decbuf_u_size; + decbuf_v_size = decbuf_u_size; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 64); + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + fb->m.mem[1].bytes_used = fb->m.mem[1].size; + } else if (fb->num_planes == 3) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].size; + decbuf_u_start = fb->m.mem[1].addr; + decbuf_u_size = fb->m.mem[1].size; + decbuf_v_start = fb->m.mem[2].addr; + decbuf_v_size = fb->m.mem[2].size; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 64); + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + fb->m.mem[1].bytes_used = fb->m.mem[1].size; + fb->m.mem[2].bytes_used = fb->m.mem[2].size; + } + + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] v4l ref buf addr: 0x%x\n", ctx->id, fb); + + if (vdec->parallel_dec == 1) { + if (hw->buffer_spec[i].y_canvas_index == -1) + hw->buffer_spec[i].y_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].u_canvas_index == -1) + hw->buffer_spec[i].u_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + if (hw->buffer_spec[i].v_canvas_index == -1) + hw->buffer_spec[i].v_canvas_index = + vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + } else { + canvas = vdec->get_canvas(i, 3); + hw->buffer_spec[i].y_canvas_index = canvas_y(canvas); + hw->buffer_spec[i].u_canvas_index = canvas_u(canvas); + hw->buffer_spec[i].v_canvas_index = canvas_v(canvas); + } + + hw->buffer_spec[i].canvas_config[0].phy_addr = + decbuf_start; + hw->buffer_spec[i].canvas_config[0].width = + canvas_width; + hw->buffer_spec[i].canvas_config[0].height = + canvas_height; + hw->buffer_spec[i].canvas_config[0].block_mode = + hw->canvas_mode; + hw->buffer_spec[i].canvas_config[0].endian = + (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 7 : 0; + + config_cav_lut(hw->buffer_spec[i].y_canvas_index, + &hw->buffer_spec[i].canvas_config[0], VDEC_1); + + hw->buffer_spec[i].canvas_config[1].phy_addr = + decbuf_u_start; + hw->buffer_spec[i].canvas_config[1].width = + canvas_width / 2; + hw->buffer_spec[i].canvas_config[1].height = + canvas_height / 2; + hw->buffer_spec[i].canvas_config[1].block_mode = + hw->canvas_mode; + hw->buffer_spec[i].canvas_config[1].endian = + (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 7 : 0; + + config_cav_lut(hw->buffer_spec[i].u_canvas_index, + &hw->buffer_spec[i].canvas_config[1], VDEC_1); + + hw->buffer_spec[i].canvas_config[2].phy_addr = + decbuf_v_start; + hw->buffer_spec[i].canvas_config[2].width = + canvas_width / 2; + hw->buffer_spec[i].canvas_config[2].height = + canvas_height / 2; + hw->buffer_spec[i].canvas_config[2].block_mode = + hw->canvas_mode; + hw->buffer_spec[i].canvas_config[2].endian = + (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 7 : 0; + + config_cav_lut(hw->buffer_spec[i].v_canvas_index, + &hw->buffer_spec[i].canvas_config[2], VDEC_1); + + return 0; +} + +static int find_free_buffer(struct vdec_mjpeg_hw_s *hw) +{ + int i; + + for (i = 0; i < hw->buf_num; i++) { + if (hw->vfbuf_use[i] == 0) + break; + } + + if ((i == hw->buf_num) && + (hw->buf_num != 0)) { + return -1; + } + + if (vmjpeg_v4l_alloc_buff_config_canvas(hw, i)) + return -1; + + return i; +} + +static int vmjpeg_hw_ctx_restore(struct vdec_mjpeg_hw_s *hw) +{ + int index = -1; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + int i = 0; + + if (hw->v4l_params_parsed) { + struct vdec_pic_info pic; + + if (!hw->buf_num) { + vdec_v4l_get_pic_info(v4l2_ctx, &pic); + hw->buf_num = pic.dpb_frames + + pic.dpb_margin; + if (hw->buf_num > DECODE_BUFFER_NUM_MAX) + hw->buf_num = DECODE_BUFFER_NUM_MAX; + } + + index = find_free_buffer(hw); + if ((index < 0) || (index >= hw->buf_num)) + return -1; + + for (i = 0; i < hw->buf_num; i++) { + if (hw->buffer_spec[i].v4l_ref_buf_addr) { + config_cav_lut(hw->buffer_spec[i].y_canvas_index, + &hw->buffer_spec[i].canvas_config[0], VDEC_1); + config_cav_lut(hw->buffer_spec[i].u_canvas_index, + &hw->buffer_spec[i].canvas_config[1], VDEC_1); + config_cav_lut(hw->buffer_spec[i].v_canvas_index, + &hw->buffer_spec[i].canvas_config[2], VDEC_1); + } + } + + /* find next decode buffer index */ + WRITE_VREG(AV_SCRATCH_4, spec2canvas(&hw->buffer_spec[index])); + WRITE_VREG(AV_SCRATCH_5, index | 1 << 24); + + } else + WRITE_VREG(AV_SCRATCH_5, 1 << 24); + + WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6)); + WRITE_VREG(DOS_SW_RESET0, 0); + init_scaler(); + + /* clear buffer IN/OUT registers */ + WRITE_VREG(MREG_TO_AMRISC, 0); + WRITE_VREG(MREG_FROM_AMRISC, 0); + + WRITE_VREG(MCPU_INTR_MSK, 0xffff); + WRITE_VREG(MREG_DECODE_PARAM, (hw->frame_height << 4) | 0x8000); + + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + /* set interrupt mapping for vld */ + WRITE_VREG(ASSIST_AMR1_INT8, 8); +#if 1/*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6*/ + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + return 0; +} + +static s32 vmjpeg_init(struct vdec_s *vdec) +{ + int i; + int size = -1, fw_size = 0x1000 * 16; + struct firmware_s *fw = NULL; + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *)vdec->private; + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + size = get_firmware_data(VIDEO_DEC_MJPEG_MULTI, fw->data); + if (size < 0) { + pr_err("get firmware fail."); + vfree(fw); + return -1; + } + + fw->len = size; + hw->fw = fw; + + if (hw->is_used_v4l) { + hw->frame_width = 0; + hw->frame_height = 0; + } else { + hw->frame_width = hw->vmjpeg_amstream_dec_info.width; + hw->frame_height = hw->vmjpeg_amstream_dec_info.height; + } + hw->frame_dur = ((hw->vmjpeg_amstream_dec_info.rate) ? + hw->vmjpeg_amstream_dec_info.rate : 3840); + hw->saved_resolution = 0; + hw->eos = 0; + hw->init_flag = 0; + hw->frame_num = 0; + hw->run_count = 0; + hw->not_run_ready = 0; + hw->input_empty = 0; + atomic_set(&hw->peek_num, 0); + atomic_set(&hw->get_num, 0); + atomic_set(&hw->put_num, 0); + + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->vfbuf_use[i] = 0; + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hw->vfpool[i]; + + hw->vfpool[i].index = -1; + kfifo_put(&hw->newframe_q, vf); + } + + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + + hw->mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER); + + timer_setup(&hw->check_timer, check_timer_func, 0); + hw->check_timer.expires = jiffies + CHECK_INTERVAL; + /*add_timer(&hw->check_timer);*/ + hw->stat |= STAT_TIMER_ARM; + hw->stat |= STAT_ISR_REG; + + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + INIT_WORK(&hw->work, vmjpeg_work); + pr_info("w:h=%d:%d\n", hw->frame_width, hw->frame_height); + return 0; +} + +static bool is_avaliable_buffer(struct vdec_mjpeg_hw_s *hw) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int i, free_count = 0; + int used_count = 0; + + if ((hw->buf_num == 0) || + (ctx->cap_pool.dec < hw->buf_num)) { + if (ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token)) { + free_count = + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) + 1; + } + } + + for (i = 0; i < hw->buf_num; ++i) { + if ((hw->vfbuf_use[i] == 0) && + hw->buffer_spec[i].v4l_ref_buf_addr) { + free_count++; + } else if (hw->buffer_spec[i].v4l_ref_buf_addr) + used_count++; + } + + ATRACE_COUNTER("V_ST_DEC-free_buff_count", free_count); + ATRACE_COUNTER("V_ST_DEC-used_buff_count", used_count); + + return free_count >= run_ready_min_buf_num ? 1 : 0; +} + +static unsigned long run_ready(struct vdec_s *vdec, + unsigned long mask) +{ + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *)vdec->private; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int ret = 0; + + hw->not_run_ready++; + + if (hw->eos) + return 0; + + if (vdec_stream_based(vdec) && (hw->init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + + if (level < pre_decode_buf_level) + return 0; + } + + if (hw->v4l_params_parsed) { + ret = is_avaliable_buffer(hw) ? 1 : 0; + } else { + ret = ctx->v4l_resolution_change ? 0 : 1; + } + + hw->not_run_ready = 0; + hw->buffer_not_ready = 0; + + return ret ? CORE_MASK_VDEC_1 : 0; +} + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *)vdec->private; + int ret; + + hw->vdec_cb_arg = arg; + hw->vdec_cb = callback; + + hw->run_count++; + vdec_reset_core(vdec); + + ret = vdec_prepare_input(vdec, &hw->chunk); + if (ret <= 0) { + hw->input_empty++; + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s: %d,r=%d,buftl=%x:%x:%x\n", + __func__, __LINE__, ret, + READ_VREG(VLD_MEM_VIFIFO_BUF_CNTL), + STBUF_READ(&vdec->vbuf, get_rp), + READ_VREG(VLD_MEM_VIFIFO_WP)); + + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + return; + } + + ATRACE_COUNTER("V_ST_DEC-chunk_size", ret); + + hw->input_empty = 0; + hw->dec_result = DEC_RESULT_NONE; + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else { + ret = amvdec_vdec_loadmc_ex(VFORMAT_MJPEG, "mmjpeg", vdec, hw->fw->data); + if (ret < 0) { + pr_err("[%d] MMJPEG: the %s fw loading failed, err: %x\n", + vdec->id, tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_MJPEG; + } +/* if (amvdec_vdec_loadmc_buf_ex(vdec, hw->fw->data, hw->fw->len) < 0) { + pr_err("%s: Error amvdec_loadmc fail\n", __func__); + return; + }*/ + + if (vmjpeg_hw_ctx_restore(hw) < 0) { + hw->dec_result = DEC_RESULT_ERROR; + mmjpeg_debug_print(DECODE_ID(hw), 0, + "amvdec_mmjpeg: error HW context restore\n"); + vdec_schedule_work(&hw->work); + return; + } +#if 0 + vdec_enable_input(vdec); + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +#endif + hw->stat |= STAT_MC_LOAD; + start_process_time(hw); + hw->last_vld_level = 0; + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); + amvdec_start(); + vdec_enable_input(vdec); + hw->stat |= STAT_VDEC_RUN; + hw->init_flag = 1; + + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s (0x%x 0x%x 0x%x) vldcrl 0x%x bitcnt 0x%x powerctl 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VLD_DECODE_CONTROL), + READ_VREG(VIFF_BIT_CNT), + READ_VREG(POWER_CTL_VLD), + READ_VREG(VLD_MEM_VIFIFO_START_PTR), + READ_VREG(VLD_MEM_VIFIFO_CURR_PTR), + READ_VREG(VLD_MEM_VIFIFO_CONTROL), + READ_VREG(VLD_MEM_VIFIFO_BUF_CNTL), + READ_VREG(VLD_MEM_VIFIFO_END_PTR)); +} +static void wait_vmjpeg_search_done(struct vdec_mjpeg_hw_s *hw) +{ + u32 vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + int count = 0; + + do { + usleep_range(100, 500); + if (vld_rp == READ_VREG(VLD_MEM_VIFIFO_RP)) + break; + if (count > 1000) { + mmjpeg_debug_print(DECODE_ID(hw), 0, + "%s, count %d vld_rp 0x%x VLD_MEM_VIFIFO_RP 0x%x\n", + __func__, count, vld_rp, READ_VREG(VLD_MEM_VIFIFO_RP)); + break; + } else + vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + count++; + } while (1); +} + +static int notify_v4l_eos(struct vdec_s *vdec) +{ + struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = &hw->vframe_dummy; + struct vdec_v4l2_buffer *fb = NULL; + int index = INVALID_IDX; + ulong expires; + + if (hw->eos) { + expires = jiffies + msecs_to_jiffies(2000); + while (!is_avaliable_buffer(hw)) { + if (time_after(jiffies, expires)) { + pr_err("[%d] MJPEG isn't enough buff for notify eos.\n", ctx->id); + return 0; + } + } + + index = find_free_buffer(hw); + if (INVALID_IDX == index) { + pr_err("[%d] MJPEG EOS get free buff fail.\n", ctx->id); + return 0; + } + + fb = (struct vdec_v4l2_buffer *) + hw->buffer_spec[index].v4l_ref_buf_addr; + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->v4l_mem_handle = (ulong)fb; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + + pr_info("[%d] mjpeg EOS notify.\n", ctx->id); + } + + return 0; +} + +static void vmjpeg_work(struct work_struct *work) +{ + struct vdec_mjpeg_hw_s *hw = container_of(work, + struct vdec_mjpeg_hw_s, work); + struct vdec_s *vdec = hw_to_vdec(hw); + + mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_BUFFER_DETAIL, + "%s: result=%d,len=%d:%d\n", + __func__, hw->dec_result, + kfifo_len(&hw->newframe_q), + kfifo_len(&hw->display_q)); + + ATRACE_COUNTER("V_ST_DEC-work_state", hw->dec_result); + + if (hw->dec_result == DEC_RESULT_DONE) { + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + hw->chunk = NULL; + } else if (hw->dec_result == DEC_RESULT_AGAIN) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(hw_to_vdec(hw))) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + /*pr_info("%s: return\n", + __func__);*/ + return; + } + } else if (hw->dec_result == DEC_RESULT_FORCE_EXIT) { + pr_info("%s: force exit\n", __func__); + if (hw->stat & STAT_ISR_REG) { + amvdec_stop(); + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + } else if (hw->dec_result == DEC_RESULT_EOS) { + pr_info("%s: end of stream\n", __func__); + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + hw->eos = 1; + if (hw->is_used_v4l) { + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(vdec); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + } + + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + hw->chunk = NULL; + vdec_clean_input(hw_to_vdec(hw)); + } + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + /*disable mbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 0); + wait_vmjpeg_search_done(hw); + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !hw->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + /* mark itself has all HW resource released and input released */ + if (vdec->parallel_dec == 1) + vdec_core_finish_run(hw_to_vdec(hw), CORE_MASK_VDEC_1); + else { + vdec_core_finish_run(hw_to_vdec(hw), CORE_MASK_VDEC_1 + | CORE_MASK_HEVC); + } + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + + if (hw->vdec_cb) + hw->vdec_cb(hw_to_vdec(hw), hw->vdec_cb_arg); +} + +static int vmjpeg_stop(struct vdec_mjpeg_hw_s *hw) +{ + pr_info("%s ...count = %d\n", __func__, hw->frame_num); + + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + pr_info("%s amvdec_stop\n", __func__); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + cancel_work_sync(&hw->work); + hw->init_flag = 0; + + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + + if (hw->fw) { + vfree(hw->fw); + hw->fw = NULL; + } + + return 0; +} + +static void reset(struct vdec_s *vdec) +{ + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *)vdec->private; + int i; + + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + flush_work(&hw->work); + reset_process_time(hw); + + for (i = 0; i < hw->buf_num; i++) { + hw->buffer_spec[i].v4l_ref_buf_addr = 0; + hw->vfbuf_use[i] = 0; + } + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hw->vfpool[i]; + + memset((void *)vf, 0, sizeof(*vf)); + hw->vfpool[i].index = -1; + kfifo_put(&hw->newframe_q, vf); + } + hw->eos = 0; + + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + vdec->free_canvas_ex(hw->buffer_spec[i].y_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].u_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].v_canvas_index, vdec->id); + hw->buffer_spec[i].y_canvas_index = -1; + hw->buffer_spec[i].u_canvas_index = -1; + hw->buffer_spec[i].v_canvas_index = -1; + } + + hw->eos = 0; + hw->buf_num = 0; + hw->frame_width = 0; + hw->frame_height = 0; + + atomic_set(&hw->peek_num, 0); + atomic_set(&hw->get_num, 0); + atomic_set(&hw->put_num, 0); + + pr_info("mjpeg: reset.\n"); +} + +static int ammvdec_mjpeg_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_mjpeg_hw_s *hw = NULL; + int config_val = 0; + + if (pdata == NULL) { + pr_info("ammvdec_mjpeg memory resource undefined.\n"); + return -EFAULT; + } + + hw = vzalloc(sizeof(struct vdec_mjpeg_hw_s)); + if (hw == NULL) { + pr_info("\nammvdec_mjpeg device data allocation failed\n"); + return -ENOMEM; + } + + /* the ctx from v4l2 driver. */ + hw->v4l2_ctx = pdata->private; + + pdata->private = hw; + pdata->dec_status = vmjpeg_dec_status; + + pdata->run = run; + pdata->run_ready = run_ready; + pdata->reset = reset; + pdata->irq_handler = vmjpeg_isr; + pdata->threaded_irq_handler = vmjpeg_isr_thread_fn; + pdata->dump_state = vmjpeg_dump_state; + + snprintf(hw->vdec_name, sizeof(hw->vdec_name), + "vmjpeg-%d", pdev->id); + snprintf(hw->pts_name, sizeof(hw->pts_name), + "%s-timestamp", hw->vdec_name); + snprintf(hw->new_q_name, sizeof(hw->new_q_name), + "%s-newframe_q", hw->vdec_name); + snprintf(hw->disp_q_name, sizeof(hw->disp_q_name), + "%s-dispframe_q", hw->vdec_name); + + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + hw->buffer_spec[i].y_canvas_index = -1; + hw->buffer_spec[i].u_canvas_index = -1; + hw->buffer_spec[i].v_canvas_index = -1; + } + } + + if (pdata->use_vfm_path) + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + PROVIDER_NAME ".%02x", pdev->id & 0xff); + + platform_set_drvdata(pdev, pdata); + hw->platform_dev = pdev; + + if (((debug_enable & IGNORE_PARAM_FROM_CONFIG) == 0) && pdata->config_len) { + mmjpeg_debug_print(DECODE_ID(hw), 0, "pdata->config: %s\n", pdata->config); + if (get_config_int(pdata->config, "parm_v4l_buffer_margin", + &config_val) == 0) + hw->dynamic_buf_num_margin = config_val; + else + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + hw->canvas_mode = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_endian", + &config_val) == 0) + hw->canvas_endian = config_val; + + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + hw->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + hw->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + hw->is_used_v4l = config_val; + + /*if (get_config_int(pdata->config, + "parm_v4l_duration", + &config_val) == 0) + vdec_frame_rate_uevent(config_val);*/ + } else { + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + } + + if (!hw->is_used_v4l) { + vf_provider_init(&pdata->vframe_provider, + pdata->vf_provider_name, &vf_provider_ops, pdata); + } + + platform_set_drvdata(pdev, pdata); + + hw->platform_dev = pdev; + + vdec_source_changed(VFORMAT_MJPEG, + 1920, 1080, 60); + if (vmjpeg_init(pdata) < 0) { + pr_info("ammvdec_mjpeg init failed.\n"); + if (hw) { + vfree(hw); + hw = NULL; + } + pdata->dec_status = NULL; + return -ENODEV; + } + vdec_set_prepare_level(pdata, start_decode_buf_level); + + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_VDEC_1); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } + + return 0; +} + +static int ammvdec_mjpeg_remove(struct platform_device *pdev) +{ + struct vdec_mjpeg_hw_s *hw = + (struct vdec_mjpeg_hw_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec; + int i; + + if (!hw) + return -1; + vdec = hw_to_vdec(hw); + + vmjpeg_stop(hw); + + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1); + else + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + vdec_set_status(hw_to_vdec(hw), VDEC_STATUS_DISCONNECTED); + if (vdec->parallel_dec == 1) { + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + vdec->free_canvas_ex(hw->buffer_spec[i].y_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].u_canvas_index, vdec->id); + vdec->free_canvas_ex(hw->buffer_spec[i].v_canvas_index, vdec->id); + } + } + + vfree(hw); + + pr_info("%s\n", __func__); + return 0; +} + +/****************************************/ + +static struct platform_driver ammvdec_mjpeg_driver = { + .probe = ammvdec_mjpeg_probe, + .remove = ammvdec_mjpeg_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t ammvdec_mjpeg_profile = { + .name = "MJPEG-V4L", + .profile = "" +}; + +static int __init ammvdec_mjpeg_driver_init_module(void) +{ + if (platform_driver_register(&ammvdec_mjpeg_driver)) { + pr_err("failed to register ammvdec_mjpeg driver\n"); + return -ENODEV; + } + vcodec_profile_register(&ammvdec_mjpeg_profile); + vcodec_feature_register(VFORMAT_MJPEG, 1); + return 0; +} + +static void __exit ammvdec_mjpeg_driver_remove_module(void) +{ + platform_driver_unregister(&ammvdec_mjpeg_driver); +} + +/****************************************/ +module_param(debug_enable, uint, 0664); +MODULE_PARM_DESC(debug_enable, "\n debug enable\n"); +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, + "\n ammvdec_h264 pre_decode_buf_level\n"); +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_mmpeg12 udebug_flag\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n dynamic_buf_num_margin\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, "\n ammvdec_mjpeg decode_timeout_val\n"); + +module_param_array(max_process_time, uint, &max_decode_instance_num, 0664); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(start_decode_buf_level, uint, 0664); +MODULE_PARM_DESC(start_decode_buf_level, "\nstart_decode_buf_level\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n without_display_mode\n"); + +module_init(ammvdec_mjpeg_driver_init_module); +module_exit(ammvdec_mjpeg_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC MJMPEG Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");
diff --git a/drivers/frame_provider/decoder_v4l/mpeg12/Makefile b/drivers/frame_provider/decoder_v4l/mpeg12/Makefile new file mode 100644 index 0000000..6419ed6 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/mpeg12/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_MPEG2_MULTI) += amvdec_mmpeg12_v4l.o +amvdec_mmpeg12_v4l-objs += vmpeg12_multi.o
diff --git a/drivers/frame_provider/decoder_v4l/mpeg12/vmpeg12_multi.c b/drivers/frame_provider/decoder_v4l/mpeg12/vmpeg12_multi.c new file mode 100644 index 0000000..f67fd71 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/mpeg12/vmpeg12_multi.c
@@ -0,0 +1,3945 @@ +/* + * drivers/amlogic/amports/vmpeg12.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/random.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/sched/clock.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> + +#include <linux/amlogic/media/utils/vdec_reg.h> +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../../decoder/utils/vdec_input.h" +#include "../../decoder/utils/vdec.h" +#include "../../decoder/utils/amvdec.h" +#include "../../decoder/utils/decoder_mmu_box.h" +#include "../../decoder/utils/decoder_bmmu_box.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../../decoder/utils/config_parser.h" +#include "../../decoder/utils/firmware.h" +#include "../../decoder/utils/vdec_v4l2_buffer_ops.h" +#include "../../decoder/utils/config_parser.h" +#include <media/v4l2-mem2mem.h> +#include "../../decoder/utils/vdec_feature.h" + +#define MEM_NAME "codec_mmpeg12" +#define CHECK_INTERVAL (HZ/100) + +#define DRIVER_NAME "ammvdec_mpeg12_v4l" +#define MREG_REF0 AV_SCRATCH_2 +#define MREG_REF1 AV_SCRATCH_3 +/* protocol registers */ +#define MREG_SEQ_INFO AV_SCRATCH_4 +#define MREG_PIC_INFO AV_SCRATCH_5 +#define MREG_PIC_WIDTH AV_SCRATCH_6 +#define MREG_PIC_HEIGHT AV_SCRATCH_7 +#define MREG_INPUT AV_SCRATCH_8 /*input_type*/ +#define MREG_BUFFEROUT AV_SCRATCH_9 /*FROM_AMRISC_REG*/ + +#define MREG_CMD AV_SCRATCH_A +#define MREG_CO_MV_START AV_SCRATCH_B +#define MREG_ERROR_COUNT AV_SCRATCH_C +#define MREG_FRAME_OFFSET AV_SCRATCH_D +#define MREG_WAIT_BUFFER AV_SCRATCH_E +#define MREG_FATAL_ERROR AV_SCRATCH_F + +#define MREG_CC_ADDR AV_SCRATCH_0 +#define AUX_BUF_ALIGN(adr) ((adr + 0xf) & (~0xf)) + +#define GET_SLICE_TYPE(type) ("IPB##"[((type&PICINFO_TYPE_MASK)>>16)&0x3]) +#define PICINFO_ERROR 0x80000000 +#define PICINFO_TYPE_MASK 0x00030000 +#define PICINFO_TYPE_I 0x00000000 +#define PICINFO_TYPE_P 0x00010000 +#define PICINFO_TYPE_B 0x00020000 +#define PICINFO_PROG 0x8000 +#define PICINFO_RPT_FIRST 0x4000 +#define PICINFO_TOP_FIRST 0x2000 +#define PICINFO_FRAME 0x1000 +#define TOP_FIELD 0x1000 +#define BOTTOM_FIELD 0x2000 +#define FRAME_PICTURE 0x3000 +#define FRAME_PICTURE_MASK 0x3000 + +#define SEQINFO_EXT_AVAILABLE 0x80000000 +#define SEQINFO_PROG 0x00010000 +#define CCBUF_SIZE (5*1024) + +#define VF_POOL_SIZE 64 +#define DECODE_BUFFER_NUM_MAX 16 +#define DECODE_BUFFER_NUM_DEF 8 +#define MAX_BMMU_BUFFER_NUM (DECODE_BUFFER_NUM_MAX + 1) + +#define PUT_INTERVAL (HZ/100) +#define WORKSPACE_SIZE (4*SZ_64K) /*swap&ccbuf&matirx&MV*/ +#define CTX_LMEM_SWAP_OFFSET 0 +#define CTX_CCBUF_OFFSET 0x800 +#define CTX_QUANT_MATRIX_OFFSET (CTX_CCBUF_OFFSET + 5*1024) +#define CTX_CO_MV_OFFSET (CTX_QUANT_MATRIX_OFFSET + 1*1024) +#define CTX_DECBUF_OFFSET (CTX_CO_MV_OFFSET + 0x11000) + +#define DEFAULT_MEM_SIZE (32*SZ_1M) + +#define INVALID_IDX (-1) /* Invalid buffer index.*/ + +static int pre_decode_buf_level = 0x800; +static int start_decode_buf_level = 0x4000; +static u32 dec_control; +static u32 error_frame_skip_level = 1; +static u32 udebug_flag; +static unsigned int radr; +static unsigned int rval; + +static u32 without_display_mode; +static u32 dynamic_buf_num_margin = 2; + +#define VMPEG12_DEV_NUM 9 +static unsigned int max_decode_instance_num = VMPEG12_DEV_NUM; +static unsigned int max_process_time[VMPEG12_DEV_NUM]; +static unsigned int decode_timeout_val = 200; +#define INCPTR(p) ptr_atomic_wrap_inc(&p) + +#define DEC_CONTROL_FLAG_FORCE_2500_720_576_INTERLACE 0x0002 +#define DEC_CONTROL_FLAG_FORCE_3000_704_480_INTERLACE 0x0004 +#define DEC_CONTROL_FLAG_FORCE_2500_704_576_INTERLACE 0x0008 +#define DEC_CONTROL_FLAG_FORCE_2500_544_576_INTERLACE 0x0010 +#define DEC_CONTROL_FLAG_FORCE_2500_480_576_INTERLACE 0x0020 +#define DEC_CONTROL_INTERNAL_MASK 0x0fff +#define DEC_CONTROL_FLAG_FORCE_SEQ_INTERLACE 0x1000 + +#define INTERLACE_SEQ_ALWAYS + +#if 1/* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +#define NV21 +#endif + +#define AGAIN_HAS_THRESHOLD + +#ifdef AGAIN_HAS_THRESHOLD +static u32 again_threshold; +#endif + +/* +#define DUMP_USER_DATA +*/ + +enum { + FRAME_REPEAT_TOP, + FRAME_REPEAT_BOT, + FRAME_REPEAT_NONE +}; + +/*Send by AV_SCRATCH_9*/ +#define MPEG12_PIC_DONE 1 +#define MPEG12_DATA_EMPTY 2 +#define MPEG12_SEQ_END 3 +#define MPEG12_DATA_REQUEST 4 + +/*Send by AV_SCRATCH_G*/ +#define MPEG12_V4L2_INFO_NOTIFY 1 +/*Send by AV_SCRATCH_J*/ +#define MPEG12_USERDATA_DONE 0x8000 + +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_ERROR 3 +#define DEC_RESULT_FORCE_EXIT 4 +#define DEC_RESULT_EOS 5 +#define DEC_RESULT_GET_DATA 6 +#define DEC_RESULT_GET_DATA_RETRY 7 + +#define DEC_DECODE_TIMEOUT 0x21 +#define DECODE_ID(hw) (hw_to_vdec(hw)->id) +#define DECODE_STOP_POS AV_SCRATCH_K + +struct mmpeg2_userdata_record_t { + struct userdata_meta_info_t meta_info; + u32 rec_start; + u32 rec_len; +}; + +#define USERDATA_FIFO_NUM 256 +#define MAX_FREE_USERDATA_NODES 5 + +struct mmpeg2_userdata_info_t { + struct mmpeg2_userdata_record_t records[USERDATA_FIFO_NUM]; + u8 *data_buf; + u8 *data_buf_end; + u32 buf_len; + u32 read_index; + u32 write_index; + u32 last_wp; +}; +#define MAX_UD_RECORDS 5 + +struct pic_info_t { + u32 buffer_info; + u32 index; + u32 offset; + u32 width; + u32 height; + u32 pts; + u64 pts64; + bool pts_valid; + ulong v4l_ref_buf_addr; + u32 hw_decode_time; + u32 frame_size; // For frame base mode + u64 timestamp; + u64 last_timestamp; +}; + +struct vdec_mpeg12_hw_s { + spinlock_t lock; + struct platform_device *platform_dev; + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + struct vframe_s vfpool[VF_POOL_SIZE]; + struct vframe_s vframe_dummy; + s32 vfbuf_use[DECODE_BUFFER_NUM_MAX]; + s32 ref_use[DECODE_BUFFER_NUM_MAX]; + u32 frame_width; + u32 frame_height; + u32 frame_dur; + u32 frame_prog; + u32 seqinfo; + u32 ctx_valid; + u32 dec_control; + void *mm_blk_handle; + struct vframe_chunk_s *chunk; + u32 stat; + u8 init_flag; + unsigned long buf_start; + u32 buf_size; + u32 reg_pic_width; + u32 reg_pic_height; + u32 reg_mpeg1_2_reg; + u32 reg_pic_head_info; + u32 reg_f_code_reg; + u32 reg_slice_ver_pos_pic_type; + u32 reg_vcop_ctrl_reg; + u32 reg_mb_info; + u32 reg_signal_type; + u32 dec_num; + struct timer_list check_timer; + u32 decode_timeout_count; + unsigned long int start_process_time; + u32 last_vld_level; + u32 eos; + + struct pic_info_t pics[DECODE_BUFFER_NUM_MAX]; + u32 canvas_spec[DECODE_BUFFER_NUM_MAX]; + u64 lastpts64; + u32 last_chunk_pts; + struct canvas_config_s canvas_config[DECODE_BUFFER_NUM_MAX][2]; + struct dec_sysinfo vmpeg12_amstream_dec_info; + + s32 refs[2]; + int dec_result; + u32 timeout_processing; + struct work_struct work; + struct work_struct timeout_work; + struct work_struct notify_work; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + dma_addr_t ccbuf_phyAddress; + void *ccbuf_phyAddress_virt; + u32 cc_buf_size; + unsigned long ccbuf_phyAddress_is_remaped_nocache; + u32 frame_rpt_state; +/* for error handling */ + s32 frame_force_skip_flag; + s32 error_frame_skip_level; + s32 wait_buffer_counter; + u32 first_i_frame_ready; + u32 run_count; + u32 not_run_ready; + u32 input_empty; + atomic_t disp_num; + atomic_t put_num; + atomic_t peek_num; + atomic_t get_num; + u32 drop_frame_count; + u32 buffer_not_ready; + u32 ratio_control; + int frameinfo_enable; + struct firmware_s *fw; + u32 canvas_mode; +#ifdef AGAIN_HAS_THRESHOLD + u32 pre_parser_wr_ptr; + u8 next_again_flag; +#endif + + struct work_struct userdata_push_work; + struct mutex userdata_mutex; + struct mmpeg2_userdata_info_t userdata_info; + struct mmpeg2_userdata_record_t ud_record[MAX_UD_RECORDS]; + int cur_ud_idx; + u8 *user_data_buffer; + int wait_for_udr_send; + u32 ucode_cc_last_wp; + u32 notify_ucode_cc_last_wp; + u32 notify_data_cc_last_wp; + u32 userdata_wp_ctx; +#ifdef DUMP_USER_DATA +#define MAX_USER_DATA_SIZE 1572864 + void *user_data_dump_buf; + unsigned char *pdump_buf_cur_start; + int total_len; + int bskip; + int n_userdata_id; + u32 reference[MAX_UD_RECORDS]; +#endif + int tvp_flag; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + u32 buf_num; + u32 dynamic_buf_num_margin; + struct vdec_info gvs; + struct vframe_qos_s vframe_qos; + u32 res_ch_flag; + u32 i_only; + u32 kpi_first_i_comming; + u32 kpi_first_i_decoded; + int sidebind_type; + int sidebind_channel_id; + u32 profile_idc; + u32 level_idc; + int dec_again_cnt; + int vdec_pg_enable_flag; + ulong fb_token; + bool force_prog_only; + char vdec_name[32]; + char pts_name[32]; + char new_q_name[32]; + char disp_q_name[32]; + u32 chunk_header_offset; + u32 chunk_res_size; + u64 first_field_timestamp; + u64 first_field_timestamp_valid; + u32 report_field; +}; +static void vmpeg12_local_init(struct vdec_mpeg12_hw_s *hw); +static int vmpeg12_hw_ctx_restore(struct vdec_mpeg12_hw_s *hw); +static void reset_process_time(struct vdec_mpeg12_hw_s *hw); +static void vmpeg12_workspace_init(struct vdec_mpeg12_hw_s *hw); +static void flush_output(struct vdec_mpeg12_hw_s *hw); +static struct vframe_s *vmpeg_vf_peek(void *); +static struct vframe_s *vmpeg_vf_get(void *); +static void vmpeg_vf_put(struct vframe_s *, void *); +static int vmpeg_vf_states(struct vframe_states *states, void *); +static int vmpeg_event_cb(int type, void *data, void *private_data); +static int notify_v4l_eos(struct vdec_s *vdec); +static void start_process_time_set(struct vdec_mpeg12_hw_s *hw); +static int debug_enable; +/*static struct work_struct userdata_push_work;*/ +#undef pr_info +#define pr_info printk +unsigned int mpeg12_debug_mask = 0xff; +/*static int counter_max = 5;*/ +static u32 run_ready_min_buf_num = 2; +static int dirty_again_threshold = 100; +static int error_proc_policy = 0x1; + +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_RUN_FLOW 0X0001 +#define PRINT_FLAG_TIMEINFO 0x0002 +#define PRINT_FLAG_UCODE_DETAIL 0x0004 +#define PRINT_FLAG_VLD_DETAIL 0x0008 +#define PRINT_FLAG_DEC_DETAIL 0x0010 +#define PRINT_FLAG_BUFFER_DETAIL 0x0020 +#define PRINT_FLAG_RESTORE 0x0040 +#define PRINT_FRAME_NUM 0x0080 +#define PRINT_FLAG_FORCE_DONE 0x0100 +#define PRINT_FLAG_COUNTER 0X0200 +#define PRINT_FRAMEBASE_DATA 0x0400 +#define PRINT_FLAG_VDEC_STATUS 0x0800 +#define PRINT_FLAG_PARA_DATA 0x1000 +#define PRINT_FLAG_USERDATA_DETAIL 0x2000 +#define PRINT_FLAG_TIMEOUT_STATUS 0x4000 +#define PRINT_FLAG_V4L_DETAIL 0x8000 +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 + + + +int debug_print(int index, int debug_flag, const char *fmt, ...) +{ + if (((debug_enable & debug_flag) && + ((1 << index) & mpeg12_debug_mask)) + || (debug_flag == PRINT_FLAG_ERROR)) { + unsigned char *buf = kzalloc(512, GFP_ATOMIC); + int len = 0; + va_list args; + + if (!buf) + return 0; + + va_start(args, fmt); + len = sprintf(buf, "%d: ", index); + vsnprintf(buf + len, 512-len, fmt, args); + pr_info("%s", buf); + va_end(args); + kfree(buf); + } + return 0; +} + + +/*static bool is_reset;*/ +#define PROVIDER_NAME "vdec.mpeg12" +static const struct vframe_operations_s vf_provider_ops = { + .peek = vmpeg_vf_peek, + .get = vmpeg_vf_get, + .put = vmpeg_vf_put, + .event_cb = vmpeg_event_cb, + .vf_states = vmpeg_vf_states, +}; + + +static const u32 frame_rate_tab[16] = { + 96000 / 30, 96000000 / 23976, 96000 / 24, 96000 / 25, + 9600000 / 2997, 96000 / 30, 96000 / 50, 9600000 / 5994, + 96000 / 60, + /* > 8 reserved, use 24 */ + 96000 / 24, 96000 / 24, 96000 / 24, 96000 / 24, + 96000 / 24, 96000 / 24, 96000 / 24 +}; + +static void mpeg12_put_video_frame(void *vdec_ctx, struct vframe_s *vf) +{ + vmpeg_vf_put(vf, vdec_ctx); +} + +static void mpeg12_get_video_frame(void *vdec_ctx, struct vframe_s **vf) +{ + *vf = vmpeg_vf_get(vdec_ctx); +} + +static struct task_ops_s task_dec_ops = { + .type = TASK_TYPE_DEC, + .get_vframe = mpeg12_get_video_frame, + .put_vframe = mpeg12_put_video_frame, +}; + +static int vmpeg12_v4l_alloc_buff_config_canvas(struct vdec_mpeg12_hw_s *hw, int i) +{ + int ret; + u32 canvas; + ulong decbuf_start = 0, decbuf_uv_start = 0; + int decbuf_y_size = 0, decbuf_uv_size = 0; + u32 canvas_width = 0, canvas_height = 0; + struct vdec_s *vdec = hw_to_vdec(hw); + struct vdec_v4l2_buffer *fb = NULL; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (hw->pics[i].v4l_ref_buf_addr) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *) + hw->pics[i].v4l_ref_buf_addr; + + fb->status = FB_ST_DECODER; + return 0; + } + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + debug_print(DECODE_ID(hw), 0, + "[%d] get fb fail %d/%d.\n", + ctx->id, i, hw->buf_num); + return ret; + } + + fb->task->attach(fb->task, &task_dec_ops, hw_to_vdec(hw)); + fb->status = FB_ST_DECODER; + + if (!hw->frame_width || !hw->frame_height) { + struct vdec_pic_info pic; + vdec_v4l_get_pic_info(ctx, &pic); + hw->frame_width = pic.visible_width; + hw->frame_height = pic.visible_height; + debug_print(DECODE_ID(hw), 0, + "[%d] set %d x %d from IF layer\n", ctx->id, + hw->frame_width, hw->frame_height); + } + + hw->pics[i].v4l_ref_buf_addr = (ulong)fb; + if (fb->num_planes == 1) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].offset; + decbuf_uv_start = decbuf_start + decbuf_y_size; + decbuf_uv_size = decbuf_y_size / 2; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 64); + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + } else if (fb->num_planes == 2) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].size; + decbuf_uv_start = fb->m.mem[1].addr; + decbuf_uv_size = fb->m.mem[1].size; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 64); + fb->m.mem[0].bytes_used = decbuf_y_size; + fb->m.mem[1].bytes_used = decbuf_uv_size; + } + + debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), v4l ref buf addr: 0x%x\n", + ctx->id, __func__, fb); + + if (vdec->parallel_dec == 1) { + u32 tmp; + if (canvas_u(hw->canvas_spec[i]) == 0xff) { + tmp = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~(0xffff << 8); + hw->canvas_spec[i] |= tmp << 8; + hw->canvas_spec[i] |= tmp << 16; + } + if (canvas_y(hw->canvas_spec[i]) == 0xff) { + tmp = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~0xff; + hw->canvas_spec[i] |= tmp; + } + canvas = hw->canvas_spec[i]; + } else { + canvas = vdec->get_canvas(i, 2); + hw->canvas_spec[i] = canvas; + } + + hw->canvas_config[i][0].phy_addr = decbuf_start; + hw->canvas_config[i][0].width = canvas_width; + hw->canvas_config[i][0].height = canvas_height; + hw->canvas_config[i][0].block_mode = hw->canvas_mode; + hw->canvas_config[i][0].endian = + (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 7 : 0; + config_cav_lut(canvas_y(canvas), &hw->canvas_config[i][0], VDEC_1); + + hw->canvas_config[i][1].phy_addr = decbuf_uv_start; + hw->canvas_config[i][1].width = canvas_width; + hw->canvas_config[i][1].height = canvas_height / 2; + hw->canvas_config[i][1].block_mode = hw->canvas_mode; + hw->canvas_config[i][1].endian = + (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 7 : 0; + config_cav_lut(canvas_u(canvas), &hw->canvas_config[i][1], VDEC_1); + + debug_print(DECODE_ID(hw), PRINT_FLAG_BUFFER_DETAIL, + "[%d] %s(), canvas: 0x%x mode: %d y: %x uv: %x w: %d h: %d\n", + ctx->id, __func__, canvas, hw->canvas_mode, + decbuf_start, decbuf_uv_start, + canvas_width, canvas_height); + + return 0; +} + +static int find_free_buffer(struct vdec_mpeg12_hw_s *hw) +{ + int i; + + for (i = 0; i < hw->buf_num; i++) { + if ((hw->vfbuf_use[i] == 0) && + (hw->ref_use[i] == 0)) + break; + } + + if ((i == hw->buf_num) && + (hw->buf_num != 0)) { + return -1; + } + + if (vmpeg12_v4l_alloc_buff_config_canvas(hw, i)) + return -1; + + return i; +} + +static u32 spec_to_index(struct vdec_mpeg12_hw_s *hw, u32 spec) +{ + u32 i; + + for (i = 0; i < hw->buf_num; i++) { + if (hw->canvas_spec[i] == spec) + return i; + } + + return hw->buf_num; +} + +/* +[SE][BUG-145343][huanghang] fixed:mpeg2 frame qos info notify */ +static void fill_frame_info(struct vdec_mpeg12_hw_s *hw, u32 slice_type, + int frame_size, u32 pts) +{ + unsigned char a[3]; + unsigned char i, j, t; + unsigned long data; + struct vframe_qos_s *vframe_qos = &hw->vframe_qos; + + vframe_qos->type = ((slice_type & PICINFO_TYPE_MASK) == + PICINFO_TYPE_I) ? 1 : + ((slice_type & + PICINFO_TYPE_MASK) == + PICINFO_TYPE_P) ? 2 : 3; + vframe_qos->size = frame_size; + vframe_qos->pts = pts; + + get_random_bytes(&data, sizeof(unsigned long)); + if (vframe_qos->type == 1) + data = 0; + a[0] = data & 0xff; + a[1] = (data >> 8) & 0xff; + a[2] = (data >> 16) & 0xff; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + vframe_qos->max_mv = a[2]; + vframe_qos->avg_mv = a[1]; + vframe_qos->min_mv = a[0]; + + get_random_bytes(&data, sizeof(unsigned long)); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + vframe_qos->max_qp = a[2]; + vframe_qos->avg_qp = a[1]; + vframe_qos->min_qp = a[0]; + + get_random_bytes(&data, sizeof(unsigned long)); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) { + for (j = i + 1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + vframe_qos->max_skip = a[2]; + vframe_qos->avg_skip = a[1]; + vframe_qos->min_skip = a[0]; + + vframe_qos->num++; + + return; +} + +static void set_frame_info(struct vdec_mpeg12_hw_s *hw, struct vframe_s *vf) +{ + u32 ar_bits; + u32 endian_tmp; + u32 buffer_index = vf->index; + + vf->width = hw->pics[buffer_index].width; + vf->height = hw->pics[buffer_index].height; + + if (hw->frame_dur > 0) + vf->duration = hw->frame_dur; + else { + vf->duration = hw->frame_dur = + frame_rate_tab[(READ_VREG(MREG_SEQ_INFO) >> 4) & 0xf]; + vdec_schedule_work(&hw->notify_work); + } + + vf->signal_type = hw->reg_signal_type; + + if (hw->is_used_v4l) { + struct aml_vdec_hdr_infos hdr; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + memset(&hdr, 0, sizeof(hdr)); + hdr.signal_type = hw->reg_signal_type; + vdec_v4l_set_hdr_infos(ctx, &hdr); + } + + ar_bits = READ_VREG(MREG_SEQ_INFO) & 0xf; + + if (ar_bits == 0x2) + vf->ratio_control = 0xc0 << DISP_RATIO_ASPECT_RATIO_BIT; + + else if (ar_bits == 0x3) + vf->ratio_control = 0x90 << DISP_RATIO_ASPECT_RATIO_BIT; + + else if (ar_bits == 0x4) + vf->ratio_control = 0x74 << DISP_RATIO_ASPECT_RATIO_BIT; + else + vf->ratio_control = 0; + + hw->ratio_control = vf->ratio_control; + + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + + vf->canvas0_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas0_config[1] = hw->canvas_config[buffer_index][1]; + vf->canvas1_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas1_config[1] = hw->canvas_config[buffer_index][1]; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) { + endian_tmp = (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 7 : 0; + } else { + endian_tmp = (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 0 : 7; + } + + vf->canvas0_config[0].endian = endian_tmp; + vf->canvas0_config[1].endian = endian_tmp; + vf->canvas1_config[0].endian = endian_tmp; + vf->canvas1_config[1].endian = endian_tmp; + + vf->sidebind_type = hw->sidebind_type; + vf->sidebind_channel_id = hw->sidebind_channel_id; + + debug_print(DECODE_ID(hw), PRINT_FLAG_PARA_DATA, + "mpeg2dec: w(%d), h(%d), dur(%d), dur-ES(%d)\n", + hw->frame_width, hw->frame_height, hw->frame_dur, + frame_rate_tab[(READ_VREG(MREG_SEQ_INFO) >> 4) & 0xf]); +} + +static bool error_skip(struct vdec_mpeg12_hw_s *hw, + u32 info, struct vframe_s *vf) +{ + if (hw->error_frame_skip_level) { + /* skip error frame */ + if ((info & PICINFO_ERROR) || (hw->frame_force_skip_flag)) { + if ((info & PICINFO_ERROR) == 0) { + if ((info & PICINFO_TYPE_MASK) == + PICINFO_TYPE_I) + hw->frame_force_skip_flag = 0; + } else { + if (hw->error_frame_skip_level >= 2) + hw->frame_force_skip_flag = 1; + } + if ((info & PICINFO_ERROR) + || (hw->frame_force_skip_flag)) + return true; + } + } + return false; +} + +static inline void vmpeg12_save_hw_context(struct vdec_mpeg12_hw_s *hw, u32 reg) +{ + if (reg == 3) { + hw->ctx_valid = 0; + //pr_info("%s, hw->userdata_wp_ctx %d\n", __func__, hw->userdata_wp_ctx); + } else { + hw->seqinfo = READ_VREG(MREG_SEQ_INFO); + hw->reg_pic_width = READ_VREG(MREG_PIC_WIDTH); + hw->reg_pic_height = READ_VREG(MREG_PIC_HEIGHT); + hw->reg_mpeg1_2_reg = READ_VREG(MPEG1_2_REG); + hw->reg_pic_head_info = READ_VREG(PIC_HEAD_INFO); + hw->reg_f_code_reg = READ_VREG(F_CODE_REG); + hw->reg_slice_ver_pos_pic_type = READ_VREG(SLICE_VER_POS_PIC_TYPE); + hw->reg_vcop_ctrl_reg = READ_VREG(VCOP_CTRL_REG); + hw->reg_mb_info = READ_VREG(MB_INFO); + hw->reg_signal_type = READ_VREG(AV_SCRATCH_H); + debug_print(DECODE_ID(hw), PRINT_FLAG_PARA_DATA, + "signal_type = %x", hw->reg_signal_type); + hw->ctx_valid = 1; + } +} + +static void vmmpeg2_reset_udr_mgr(struct vdec_mpeg12_hw_s *hw) +{ + hw->wait_for_udr_send = 0; + hw->cur_ud_idx = 0; + memset(&hw->ud_record, 0, sizeof(hw->ud_record)); +} + +static void vmmpeg2_crate_userdata_manager( + struct vdec_mpeg12_hw_s *hw, + u8 *userdata_buf, + int buf_len) +{ + if (hw) { + mutex_init(&hw->userdata_mutex); + + memset(&hw->userdata_info, 0, + sizeof(struct mmpeg2_userdata_info_t)); + hw->userdata_info.data_buf = userdata_buf; + hw->userdata_info.buf_len = buf_len; + hw->userdata_info.data_buf_end = userdata_buf + buf_len; + hw->userdata_wp_ctx = 0; + + vmmpeg2_reset_udr_mgr(hw); + } +} + +static void vmmpeg2_destroy_userdata_manager(struct vdec_mpeg12_hw_s *hw) +{ + if (hw) + memset(&hw->userdata_info, + 0, + sizeof(struct mmpeg2_userdata_info_t)); +} + +static void aml_swap_data(uint8_t *user_data, int ud_size) +{ + int swap_blocks, i, j, k, m; + unsigned char c_temp; + + /* swap byte order */ + swap_blocks = ud_size / 8; + for (i = 0; i < swap_blocks; i++) { + j = i * 8; + k = j + 7; + for (m = 0; m < 4; m++) { + c_temp = user_data[j]; + user_data[j++] = user_data[k]; + user_data[k--] = c_temp; + } + } +} + +#ifdef DUMP_USER_DATA +static void push_to_buf(struct vdec_mpeg12_hw_s *hw, + u8 *pdata, + int len, + struct userdata_meta_info_t *pmeta, + u32 reference) +{ + u32 *pLen; + int info_cnt; + u8 *pbuf_end; + + if (!hw->user_data_dump_buf) + return; + + if (hw->bskip) { + pr_info("over size, skip\n"); + return; + } + info_cnt = 0; + pLen = (u32 *)hw->pdump_buf_cur_start; + + *pLen = len; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->duration; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->flags; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->vpts; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = pmeta->vpts_valid; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + + *pLen = hw->n_userdata_id; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + *pLen = reference; + hw->pdump_buf_cur_start += sizeof(u32); + info_cnt++; + pLen++; + + pbuf_end = hw->userdata_info.data_buf_end; + if (pdata + len > pbuf_end) { + int first_section_len; + + first_section_len = pbuf_end - pdata; + memcpy(hw->pdump_buf_cur_start, pdata, first_section_len); + pdata = (u8 *)hw->userdata_info.data_buf; + hw->pdump_buf_cur_start += first_section_len; + memcpy(hw->pdump_buf_cur_start, pdata, len - first_section_len); + hw->pdump_buf_cur_start += len - first_section_len; + } else { + memcpy(hw->pdump_buf_cur_start, pdata, len); + hw->pdump_buf_cur_start += len; + } + + hw->total_len += len + info_cnt * sizeof(u32); + if (hw->total_len >= MAX_USER_DATA_SIZE-4096) + hw->bskip = 1; +} + +static void dump_userdata_info(struct vdec_mpeg12_hw_s *hw, + void *puser_data, + int len, + struct userdata_meta_info_t *pmeta, + u32 reference) +{ + u8 *pstart; + + pstart = (u8 *)puser_data; + +#ifdef DUMP_HEAD_INFO_DATA + push_to_buf(hw, pstart, len, pmeta, reference); +#else + push_to_buf(hw, pstart+8, len - 8, pmeta, reference); +#endif +} + + +static void print_data(unsigned char *pdata, + int len, + unsigned int flag, + unsigned int duration, + unsigned int vpts, + unsigned int vpts_valid, + int rec_id, + u32 reference) +{ + int nLeft; + + nLeft = len; + + pr_info("%d len:%d, flag:0x%x, dur:%d, vpts:0x%x, valid:%d, refer:%d\n", + rec_id, len, flag, + duration, vpts, vpts_valid, + reference); + while (nLeft >= 16) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7], + pdata[8], pdata[9], pdata[10], pdata[11], + pdata[12], pdata[13], pdata[14], pdata[15]); + nLeft -= 16; + pdata += 16; + } + + + while (nLeft > 0) { + pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n", + pdata[0], pdata[1], pdata[2], pdata[3], + pdata[4], pdata[5], pdata[6], pdata[7]); + nLeft -= 8; + pdata += 8; + } +} + +static void dump_data(u8 *pdata, + unsigned int user_data_length, + unsigned int flag, + unsigned int duration, + unsigned int vpts, + unsigned int vpts_valid, + int rec_id, + u32 reference) +{ + unsigned char szBuf[256]; + + + memset(szBuf, 0, 256); + memcpy(szBuf, pdata, user_data_length); + + aml_swap_data(szBuf, user_data_length); + + print_data(szBuf, + user_data_length, + flag, + duration, + vpts, + vpts_valid, + rec_id, + reference); +} + + +static void show_user_data_buf(struct vdec_mpeg12_hw_s *hw) +{ + u8 *pbuf; + int len; + unsigned int flag; + unsigned int duration; + unsigned int vpts; + unsigned int vpts_valid; + int rec_id; + u32 reference; + + pr_info("show user data buf\n"); + pbuf = hw->user_data_dump_buf; + + while (pbuf < hw->pdump_buf_cur_start) { + u32 *pLen; + + pLen = (u32 *)pbuf; + + len = *pLen; + pLen++; + pbuf += sizeof(u32); + + duration = *pLen; + pLen++; + pbuf += sizeof(u32); + + flag = *pLen; + pLen++; + pbuf += sizeof(u32); + + vpts = *pLen; + pLen++; + pbuf += sizeof(u32); + + vpts_valid = *pLen; + pLen++; + pbuf += sizeof(u32); + + rec_id = *pLen; + pLen++; + pbuf += sizeof(u32); + + reference = *pLen; + pLen++; + pbuf += sizeof(u32); + + + dump_data(pbuf, len, flag, duration, + vpts, vpts_valid, rec_id, reference); + pbuf += len; + msleep(30); + } +} + +static int amvdec_mmpeg12_init_userdata_dump(struct vdec_mpeg12_hw_s *hw) +{ + hw->user_data_dump_buf = kmalloc(MAX_USER_DATA_SIZE, GFP_KERNEL); + if (hw->user_data_dump_buf) + return 1; + else + return 0; +} + +static void amvdec_mmpeg12_uninit_userdata_dump(struct vdec_mpeg12_hw_s *hw) +{ + if (hw->user_data_dump_buf) { + show_user_data_buf(hw); + kfree(hw->user_data_dump_buf); + hw->user_data_dump_buf = NULL; + } +} + +static void reset_user_data_buf(struct vdec_mpeg12_hw_s *hw) +{ + hw->total_len = 0; + hw->pdump_buf_cur_start = hw->user_data_dump_buf; + hw->bskip = 0; + hw->n_userdata_id = 0; +} +#endif + +static void user_data_ready_notify(struct vdec_mpeg12_hw_s *hw, + u32 pts, u32 pts_valid) +{ + struct mmpeg2_userdata_record_t *p_userdata_rec; + int i; + + if (hw->wait_for_udr_send) { + for (i = 0; i < hw->cur_ud_idx; i++) { + mutex_lock(&hw->userdata_mutex); + + + p_userdata_rec = hw->userdata_info.records + + hw->userdata_info.write_index; + + hw->ud_record[i].meta_info.vpts_valid = pts_valid; + hw->ud_record[i].meta_info.vpts = pts; + + *p_userdata_rec = hw->ud_record[i]; +#ifdef DUMP_USER_DATA + dump_userdata_info(hw, + hw->userdata_info.data_buf + p_userdata_rec->rec_start, + p_userdata_rec->rec_len, + &p_userdata_rec->meta_info, + hw->reference[i]); + hw->n_userdata_id++; +#endif +/* + pr_info("notify: rec_start:%d, rec_len:%d, wi:%d, reference:%d\n", + p_userdata_rec->rec_start, + p_userdata_rec->rec_len, + hw->userdata_info.write_index, + hw->reference[i]); +*/ + hw->userdata_info.write_index++; + if (hw->userdata_info.write_index >= USERDATA_FIFO_NUM) + hw->userdata_info.write_index = 0; + + mutex_unlock(&hw->userdata_mutex); + + + vdec_wakeup_userdata_poll(hw_to_vdec(hw)); + } + hw->wait_for_udr_send = 0; + hw->cur_ud_idx = 0; + } + hw->notify_ucode_cc_last_wp = hw->ucode_cc_last_wp; + hw->notify_data_cc_last_wp = hw->userdata_info.last_wp; +} + +static int vmmpeg2_user_data_read(struct vdec_s *vdec, + struct userdata_param_t *puserdata_para) +{ + struct vdec_mpeg12_hw_s *hw = NULL; + int rec_ri, rec_wi; + int rec_len; + u8 *rec_data_start; + u8 *pdest_buf; + struct mmpeg2_userdata_record_t *p_userdata_rec; + u32 data_size; + u32 res; + int copy_ok = 1; + + hw = (struct vdec_mpeg12_hw_s *)vdec->private; + + pdest_buf = puserdata_para->pbuf_addr; + + mutex_lock(&hw->userdata_mutex); + +/* + pr_info("ri = %d, wi = %d\n", + hw->userdata_info.read_index, + hw->userdata_info.write_index); +*/ + rec_ri = hw->userdata_info.read_index; + rec_wi = hw->userdata_info.write_index; + + if (rec_ri == rec_wi) { + mutex_unlock(&hw->userdata_mutex); + return 0; + } + + p_userdata_rec = hw->userdata_info.records + rec_ri; + + rec_len = p_userdata_rec->rec_len; + rec_data_start = p_userdata_rec->rec_start + hw->userdata_info.data_buf; +/* + pr_info("ri:%d, wi:%d, rec_len:%d, rec_start:%d, buf_len:%d\n", + rec_ri, rec_wi, + p_userdata_rec->rec_len, + p_userdata_rec->rec_start, + puserdata_para->buf_len); +*/ + if (rec_len <= puserdata_para->buf_len) { + /* dvb user data buffer is enought to + copy the whole recored. */ + data_size = rec_len; + if (rec_data_start + data_size + > hw->userdata_info.data_buf_end) { + int first_section_len; + + first_section_len = hw->userdata_info.buf_len - + p_userdata_rec->rec_start; + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + first_section_len); + if (res) { + pr_info("p1 read not end res=%d, request=%d\n", + res, first_section_len); + copy_ok = 0; + + p_userdata_rec->rec_len -= + first_section_len - res; + p_userdata_rec->rec_start += + first_section_len - res; + puserdata_para->data_size = + first_section_len - res; + } else { + res = (u32)copy_to_user( + (void *)(pdest_buf+first_section_len), + (void *)hw->userdata_info.data_buf, + data_size - first_section_len); + if (res) { + pr_info("p2 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + p_userdata_rec->rec_len -= + data_size - res; + p_userdata_rec->rec_start = + data_size - first_section_len - res; + puserdata_para->data_size = + data_size - res; + } + } else { + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + data_size); + if (res) { + pr_info("p3 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start += data_size - res; + puserdata_para->data_size = data_size - res; + } + + if (copy_ok) { + hw->userdata_info.read_index++; + if (hw->userdata_info.read_index >= USERDATA_FIFO_NUM) + hw->userdata_info.read_index = 0; + } + } else { + /* dvb user data buffer is not enought + to copy the whole recored. */ + data_size = puserdata_para->buf_len; + if (rec_data_start + data_size + > hw->userdata_info.data_buf_end) { + int first_section_len; + + first_section_len = hw->userdata_info.buf_len - + p_userdata_rec->rec_start; + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + first_section_len); + if (res) { + pr_info("p4 read not end res=%d, request=%d\n", + res, first_section_len); + copy_ok = 0; + p_userdata_rec->rec_len -= + first_section_len - res; + p_userdata_rec->rec_start += + first_section_len - res; + puserdata_para->data_size = + first_section_len - res; + } else { + /* first secton copy is ok*/ + res = (u32)copy_to_user( + (void *)(pdest_buf+first_section_len), + (void *)hw->userdata_info.data_buf, + data_size - first_section_len); + if (res) { + pr_info("p5 read not end res=%d, request=%d\n", + res, + data_size - first_section_len); + copy_ok = 0; + } + + p_userdata_rec->rec_len -= + data_size - res; + p_userdata_rec->rec_start = + data_size - first_section_len - res; + puserdata_para->data_size = + data_size - res; + } + } else { + res = (u32)copy_to_user((void *)pdest_buf, + (void *)rec_data_start, + data_size); + if (res) { + pr_info("p6 read not end res=%d, request=%d\n", + res, data_size); + copy_ok = 0; + } + + p_userdata_rec->rec_len -= data_size - res; + p_userdata_rec->rec_start += data_size - res; + puserdata_para->data_size = data_size - res; + } + + if (copy_ok) { + hw->userdata_info.read_index++; + if (hw->userdata_info.read_index >= USERDATA_FIFO_NUM) + hw->userdata_info.read_index = 0; + } + + } + puserdata_para->meta_info = p_userdata_rec->meta_info; + + if (hw->userdata_info.read_index <= hw->userdata_info.write_index) + puserdata_para->meta_info.records_in_que = + hw->userdata_info.write_index - + hw->userdata_info.read_index; + else + puserdata_para->meta_info.records_in_que = + hw->userdata_info.write_index + + USERDATA_FIFO_NUM - + hw->userdata_info.read_index; + + puserdata_para->version = (0<<24|0<<16|0<<8|1); + + mutex_unlock(&hw->userdata_mutex); + + + return 1; +} + +static void vmmpeg2_reset_userdata_fifo(struct vdec_s *vdec, int bInit) +{ + struct vdec_mpeg12_hw_s *hw = NULL; + + hw = (struct vdec_mpeg12_hw_s *)vdec->private; + + if (hw) { + mutex_lock(&hw->userdata_mutex); + pr_info("mpeg2_reset_userdata_fifo: bInit: %d, ri: %d, wi: %d\n", + bInit, + hw->userdata_info.read_index, + hw->userdata_info.write_index); + hw->userdata_info.read_index = 0; + hw->userdata_info.write_index = 0; + + if (bInit) + hw->userdata_info.last_wp = 0; + mutex_unlock(&hw->userdata_mutex); + } +} + +static void vmmpeg2_wakeup_userdata_poll(struct vdec_s *vdec) +{ + amstream_wakeup_userdata_poll(vdec); +} + +/* +#define PRINT_HEAD_INFO +*/ +static void userdata_push_do_work(struct work_struct *work) +{ + u32 reg; + u8 *pdata; + u8 *psrc_data; + u8 head_info[8]; + struct userdata_meta_info_t meta_info; + u32 wp; + u32 index; + u32 picture_struct; + u32 reference; + u32 picture_type; + u32 temp; + u32 data_length; + u32 data_start; + int i; + u32 offset; + u32 cur_wp; +#ifdef PRINT_HEAD_INFO + u8 *ptype_str; +#endif + struct mmpeg2_userdata_record_t *pcur_ud_rec; + + struct vdec_mpeg12_hw_s *hw = container_of(work, + struct vdec_mpeg12_hw_s, userdata_push_work); + + memset(&meta_info, 0, sizeof(meta_info)); + + meta_info.duration = hw->frame_dur; + + + reg = READ_VREG(AV_SCRATCH_J); + hw->userdata_wp_ctx = reg & (~(1<<16)); + meta_info.flags = ((reg >> 30) << 1); + meta_info.flags |= (VFORMAT_MPEG12 << 3); + /* check top_field_first flag */ + if ((reg >> 28) & 0x1) { + meta_info.flags |= (1 << 10); + meta_info.flags |= (((reg >> 29) & 0x1) << 11); + } + + cur_wp = reg & 0x7fff; + if (cur_wp == hw->ucode_cc_last_wp || (cur_wp >= AUX_BUF_ALIGN(CCBUF_SIZE))) { + debug_print(DECODE_ID(hw), 0, + "Null or Over size user data package: wp = %d\n", cur_wp); + WRITE_VREG(AV_SCRATCH_J, 0); + return; + } + + if (hw->cur_ud_idx >= MAX_UD_RECORDS) { + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "UD Records over: %d, skip it\n", MAX_UD_RECORDS); + WRITE_VREG(AV_SCRATCH_J, 0); + hw->cur_ud_idx = 0; + return; + } + + if (cur_wp < hw->ucode_cc_last_wp) + hw->ucode_cc_last_wp = 0; + + offset = READ_VREG(AV_SCRATCH_I); + + codec_mm_dma_flush( + hw->ccbuf_phyAddress_virt, + CCBUF_SIZE, + DMA_FROM_DEVICE); + + mutex_lock(&hw->userdata_mutex); + if (hw->ccbuf_phyAddress_virt) { + pdata = (u8 *)hw->ccbuf_phyAddress_virt + hw->ucode_cc_last_wp; + memcpy(head_info, pdata, 8); + } else + memset(head_info, 0, 8); + mutex_unlock(&hw->userdata_mutex); + aml_swap_data(head_info, 8); + + wp = (head_info[0] << 8 | head_info[1]); + index = (head_info[2] << 8 | head_info[3]); + + picture_struct = (head_info[6] << 8 | head_info[7]); + temp = (head_info[4] << 8 | head_info[5]); + reference = temp & 0x3FF; + picture_type = (temp >> 10) & 0x7; + + if (debug_enable & PRINT_FLAG_USERDATA_DETAIL) + pr_info("index:%d, wp:%d, ref:%d, type:%d, struct:0x%x, u_last_wp:0x%x\n", + index, wp, reference, + picture_type, picture_struct, + hw->ucode_cc_last_wp); + + switch (picture_type) { + case 1: + /* pr_info("I type, pos:%d\n", + (meta_info.flags>>1)&0x3); */ + meta_info.flags |= (1<<7); +#ifdef PRINT_HEAD_INFO + ptype_str = " I"; +#endif + break; + case 2: + /* pr_info("P type, pos:%d\n", + (meta_info.flags>>1)&0x3); */ + meta_info.flags |= (2<<7); +#ifdef PRINT_HEAD_INFO + ptype_str = " P"; +#endif + break; + case 3: + /* pr_info("B type, pos:%d\n", + (meta_info.flags>>1)&0x3); */ + meta_info.flags |= (3<<7); +#ifdef PRINT_HEAD_INFO + ptype_str = " B"; +#endif + break; + case 4: + /* pr_info("D type, pos:%d\n", + (meta_info.flags>>1)&0x3); */ + meta_info.flags |= (4<<7); +#ifdef PRINT_HEAD_INFO + ptype_str = " D"; +#endif + break; + default: + /* pr_info("Unknown type:0x%x, pos:%d\n", + pheader->picture_coding_type, + (meta_info.flags>>1)&0x3); */ +#ifdef PRINT_HEAD_INFO + ptype_str = " U"; +#endif + break; + } +#ifdef PRINT_HEAD_INFO + pr_info("ref:%d, type:%s, ext:%d, first:%d, data_length:%d\n", + reference, ptype_str, + (reg >> 30), + (reg >> 28)&0x3, + reg & 0xffff); +#endif + data_length = cur_wp - hw->ucode_cc_last_wp; + data_start = reg & 0xffff; + psrc_data = (u8 *)hw->ccbuf_phyAddress_virt + hw->ucode_cc_last_wp; + + pdata = hw->userdata_info.data_buf + hw->userdata_info.last_wp; + for (i = 0; i < data_length && hw->ccbuf_phyAddress_virt != NULL && psrc_data; i++) { + *pdata++ = *psrc_data++; + if (pdata >= hw->userdata_info.data_buf_end) + pdata = hw->userdata_info.data_buf; + } + + pcur_ud_rec = hw->ud_record + hw->cur_ud_idx; + + pcur_ud_rec->meta_info = meta_info; + pcur_ud_rec->rec_start = hw->userdata_info.last_wp; + pcur_ud_rec->rec_len = data_length; + + hw->userdata_info.last_wp += data_length; + if (hw->userdata_info.last_wp >= USER_DATA_SIZE) + hw->userdata_info.last_wp %= USER_DATA_SIZE; + + hw->wait_for_udr_send = 1; + + hw->ucode_cc_last_wp = cur_wp; + + if (debug_enable & PRINT_FLAG_USERDATA_DETAIL) + pr_info("cur_wp:%d, rec_start:%d, rec_len:%d\n", + cur_wp, + pcur_ud_rec->rec_start, + pcur_ud_rec->rec_len); + +#ifdef DUMP_USER_DATA + hw->reference[hw->cur_ud_idx] = reference; +#endif + + hw->cur_ud_idx++; + WRITE_VREG(AV_SCRATCH_J, 0); +} + + +void userdata_pushed_drop(struct vdec_mpeg12_hw_s *hw) +{ + hw->userdata_info.last_wp = hw->notify_data_cc_last_wp; + hw->ucode_cc_last_wp = hw->notify_ucode_cc_last_wp; + hw->cur_ud_idx = 0; + hw->wait_for_udr_send = 0; + +} + + +static inline void hw_update_gvs(struct vdec_mpeg12_hw_s *hw) +{ + if (hw->gvs.frame_height != hw->frame_height) { + hw->gvs.frame_width = hw->frame_width; + hw->gvs.frame_height = hw->frame_height; + } + if (hw->gvs.frame_dur != hw->frame_dur) { + hw->gvs.frame_dur = hw->frame_dur; + if (hw->frame_dur != 0) + hw->gvs.frame_rate = ((96000 * 10 / hw->frame_dur) % 10) < 5 ? + 96000 / hw->frame_dur : (96000 / hw->frame_dur +1); + else + hw->gvs.frame_rate = -1; + } + if (hw->gvs.ratio_control != hw->ratio_control) + hw->gvs.ratio_control = hw->ratio_control; + + hw->gvs.status = hw->stat; + hw->gvs.error_count = hw->gvs.error_frame_count; + hw->gvs.drop_frame_count = hw->drop_frame_count; + +} + +static int prepare_display_buf(struct vdec_mpeg12_hw_s *hw, + struct pic_info_t *pic) +{ + u32 field_num = 0, i; + u32 first_field_type = 0, type = 0; + struct vframe_s *vf = NULL; + u32 index = pic->index; + u32 info = pic->buffer_info; + struct vdec_s *vdec = hw_to_vdec(hw); + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + ulong nv_order = VIDTYPE_VIU_NV21; + bool pb_skip = false; + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + } + +#ifdef NV21 + type = nv_order; +#endif + if (hw->i_only) { + pb_skip = 1; + } + + user_data_ready_notify(hw, pic->pts, pic->pts_valid); + + if (hw->frame_prog & PICINFO_PROG) { + field_num = 1; + type |= VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | nv_order; + } else { +#ifdef INTERLACE_SEQ_ALWAYS + /* once an interlace seq, force interlace, to make di easy. */ + hw->dec_control |= DEC_CONTROL_FLAG_FORCE_SEQ_INTERLACE; +#endif + hw->frame_rpt_state = FRAME_REPEAT_NONE; + + first_field_type = (info & PICINFO_TOP_FIRST) ? + VIDTYPE_INTERLACE_TOP : VIDTYPE_INTERLACE_BOTTOM; + field_num = (info & PICINFO_RPT_FIRST) ? 3 : 2; + } + + if ((hw->is_used_v4l) && + ((vdec->prog_only) || (hw->report_field & V4L2_FIELD_NONE) || + (!v4l2_ctx->vpp_is_need))) { + field_num = 1; + type |= VIDTYPE_PROGRESSIVE | VIDTYPE_VIU_FIELD | nv_order; + } + + for (i = 0; i < field_num; i++) { + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "fatal error, no available buffer slot."); + hw->dec_result = DEC_RESULT_ERROR; + vdec_schedule_work(&hw->work); + return -1; + } + + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->pics[index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), v4l mem handle: 0x%lx\n", + ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, + __func__, vf->v4l_mem_handle); + } + + hw->vfbuf_use[index]++; + vf->index = index; + set_frame_info(hw, vf); + if (field_num > 1) { + vf->duration = vf->duration / field_num; + vf->duration_pulldown = (field_num == 3) ? + (vf->duration >> 1):0; + type = nv_order; + if (i == 1) /* second field*/ + type |= (first_field_type == VIDTYPE_INTERLACE_TOP) ? + VIDTYPE_INTERLACE_BOTTOM : VIDTYPE_INTERLACE_TOP; + else + type |= (first_field_type == VIDTYPE_INTERLACE_TOP) ? + VIDTYPE_INTERLACE_TOP : VIDTYPE_INTERLACE_BOTTOM; + } else { + if ((hw->seqinfo & SEQINFO_EXT_AVAILABLE) && + (hw->seqinfo & SEQINFO_PROG)) { + if (info & PICINFO_RPT_FIRST) { + if (info & PICINFO_TOP_FIRST) + vf->duration *= 3; + else + vf->duration *= 2; + } + vf->duration_pulldown = 0; + } else { + vf->duration_pulldown = + (info & PICINFO_RPT_FIRST) ? + vf->duration >> 1 : 0; + } + } + vf->duration += vf->duration_pulldown; + vf->type = type; + vf->orientation = 0; + if (i > 0) { + vf->pts = 0; + vf->pts_us64 = 0; + vf->timestamp = pic->timestamp; + if (v4l2_ctx->second_field_pts_mode) { + vf->timestamp = 0; + } + } else { + vf->pts = (pic->pts_valid) ? pic->pts : 0; + vf->pts_us64 = (pic->pts_valid) ? pic->pts64 : 0; + if (field_num == 1) + vf->timestamp = pic->timestamp; + else + vf->timestamp = pic->last_timestamp; + } + vf->type_original = vf->type; + + if ((error_skip(hw, pic->buffer_info, vf)) || + (((hw->first_i_frame_ready == 0) || pb_skip) && + ((PICINFO_TYPE_MASK & pic->buffer_info) != + PICINFO_TYPE_I))) { + unsigned long flags; + hw->drop_frame_count++; + if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) { + hw->gvs.i_lost_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_P) { + hw->gvs.p_lost_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_B) { + hw->gvs.b_lost_frames++; + } + /* Though we drop it, it is still an error frame, count it. + * Becase we've counted the error frame in vdec_count_info + * function, avoid count it twice. + */ + if (!(info & PICINFO_ERROR)) { + hw->gvs.error_frame_count++; + if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) { + hw->gvs.i_concealed_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_P) { + hw->gvs.p_concealed_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_B) { + hw->gvs.b_concealed_frames++; + } + } + hw->vfbuf_use[index]--; + spin_lock_irqsave(&hw->lock, flags); + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + spin_unlock_irqrestore(&hw->lock, flags); + } else { + debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "%s, vf: %lx, num[%d]: %d(%c), dur: %d, type: %x, pts: %d(%lld), ts(%lld)\n", + __func__, (ulong)vf, i, hw->disp_num, GET_SLICE_TYPE(info), + vf->duration, vf->type, vf->pts, vf->pts_us64, vf->timestamp); + atomic_add(1, &hw->disp_num); + if (i == 0) { + decoder_do_frame_check(vdec, vf); + hw_update_gvs(hw); + vdec_fill_vdec_frame(vdec, &hw->vframe_qos, + &hw->gvs, vf, pic->hw_decode_time); + } + vdec->vdec_fps_detec(vdec->id); + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, index); + if (!vdec->vbuf.use_ptsserv && vdec_stream_based(vdec)) { + /* offset for tsplayer pts lookup */ + if (i == 0) { + vf->pts_us64 = + (((u64)vf->duration << 32) & + 0xffffffff00000000) | pic->offset; + vf->pts = 0; + } else { + vf->pts_us64 = (u64)-1; + vf->pts = 0; + } + } + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->timestamp); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + /* if (hw->disp_num == 1) { */ + if (hw->kpi_first_i_decoded == 0) { + hw->kpi_first_i_decoded = 1; + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "[vdec_kpi][%s] First I frame decoded.\n", + __func__); + } + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } else { + set_meta_data_to_vf(vf, UVM_META_DATA_VF_BASE_INFOS, hw->v4l2_ctx); + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, + NULL); + } + } else + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + + + } + } + return 0; +} + +static void force_interlace_check(struct vdec_mpeg12_hw_s *hw) +{ + if ((hw->dec_control & + DEC_CONTROL_FLAG_FORCE_2500_720_576_INTERLACE) && + (hw->frame_width == 720) && + (hw->frame_height == 576) && + (hw->frame_dur == 3840)) { + hw->frame_prog = 0; + } else if ((hw->dec_control + & DEC_CONTROL_FLAG_FORCE_3000_704_480_INTERLACE) && + (hw->frame_width == 704) && + (hw->frame_height == 480) && + (hw->frame_dur == 3200)) { + hw->frame_prog = 0; + } else if ((hw->dec_control + & DEC_CONTROL_FLAG_FORCE_2500_704_576_INTERLACE) && + (hw->frame_width == 704) && + (hw->frame_height == 576) && + (hw->frame_dur == 3840)) { + hw->frame_prog = 0; + } else if ((hw->dec_control + & DEC_CONTROL_FLAG_FORCE_2500_544_576_INTERLACE) && + (hw->frame_width == 544) && + (hw->frame_height == 576) && + (hw->frame_dur == 3840)) { + hw->frame_prog = 0; + } else if ((hw->dec_control + & DEC_CONTROL_FLAG_FORCE_2500_480_576_INTERLACE) && + (hw->frame_width == 480) && + (hw->frame_height == 576) && + (hw->frame_dur == 3840)) { + hw->frame_prog = 0; + } else if (hw->dec_control + & DEC_CONTROL_FLAG_FORCE_SEQ_INTERLACE) { + hw->frame_prog = 0; + } + +} + +static int update_reference(struct vdec_mpeg12_hw_s *hw, + int index) +{ + hw->ref_use[index]++; + if (hw->refs[1] == -1) { + hw->refs[1] = index; + /* + * first pic need output to show + * usecnt do not decrease. + */ + } else if (hw->refs[0] == -1) { + hw->refs[0] = hw->refs[1]; + hw->refs[1] = index; + /* second pic do not output */ + index = hw->buf_num; + } else { + hw->ref_use[hw->refs[0]]--; //old ref0 ununsed + hw->refs[0] = hw->refs[1]; + hw->refs[1] = index; + index = hw->refs[0]; + } + return index; +} + +static bool is_ref_error(struct vdec_mpeg12_hw_s *hw) +{ + if ((hw->pics[hw->refs[0]].buffer_info & PICINFO_ERROR) || + (hw->pics[hw->refs[1]].buffer_info & PICINFO_ERROR)) + return 1; + return 0; +} + +static int vmpeg2_get_ps_info(struct vdec_mpeg12_hw_s *hw, int width, int height, + bool frame_prog, struct aml_vdec_ps_infos *ps) +{ + ps->visible_width = width; + ps->visible_height = height; + ps->coded_width = ALIGN(width, 64); + ps->coded_height = ALIGN(height, 64); + ps->dpb_size = hw->buf_num; + ps->dpb_frames = DECODE_BUFFER_NUM_DEF; + ps->dpb_margin = hw->dynamic_buf_num_margin; + ps->field = frame_prog ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED; + ps->field = hw->force_prog_only ? V4L2_FIELD_NONE : ps->field; + + return 0; +} + +static int v4l_res_change(struct vdec_mpeg12_hw_s *hw, int width, int height, bool frame_prog) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int ret = 0; + + if (ctx->param_sets_from_ucode && + hw->res_ch_flag == 0) { + struct aml_vdec_ps_infos ps; + + if ((hw->frame_width != 0 && + hw->frame_height != 0) && + (hw->frame_width != width || + hw->frame_height != height)) { + debug_print(DECODE_ID(hw), 0, + "v4l_res_change Pic Width/Height Change (%d,%d)=>(%d,%d)\n", + hw->frame_width, hw->frame_height, + width, + height); + vmpeg2_get_ps_info(hw, width, height, frame_prog, &ps); + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + hw->v4l_params_parsed = false; + hw->res_ch_flag = 1; + ctx->v4l_resolution_change = 1; + hw->eos = 1; + flush_output(hw); + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(hw_to_vdec(hw)); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + + ret = 1; + } + } + + return ret; +} + +void cal_chunk_offset_and_size(struct vdec_mpeg12_hw_s *hw) +{ + u32 consume_byte, res_byte; + + res_byte = READ_VREG(VIFF_BIT_CNT) >> 3; + + if (hw->chunk->size > res_byte) { + consume_byte = hw->chunk->size - res_byte; + + if (consume_byte > VDEC_FIFO_ALIGN) { + consume_byte -= VDEC_FIFO_ALIGN; + res_byte += VDEC_FIFO_ALIGN; + } + hw->chunk_header_offset = hw->chunk->offset + consume_byte; + hw->chunk_res_size = res_byte; + } +} + +static irqreturn_t vmpeg12_isr_thread_fn(struct vdec_s *vdec, int irq) +{ + u32 reg, index, info, seqinfo, offset, pts, frame_size=0, tmp_h, tmp_w; + u64 pts_us64 = 0; + struct pic_info_t *new_pic, *disp_pic; + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)(vdec->private); + + if (READ_VREG(AV_SCRATCH_M) != 0 && + (debug_enable & PRINT_FLAG_UCODE_DETAIL)) { + + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "dbg %x: %x, level %x, wp %x, rp %x, cnt %x\n", + READ_VREG(AV_SCRATCH_M), READ_VREG(AV_SCRATCH_N), + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VIFF_BIT_CNT)); + WRITE_VREG(AV_SCRATCH_M, 0); + return IRQ_HANDLED; + } + + reg = READ_VREG(AV_SCRATCH_G); + if (reg == 1) { + if (hw->kpi_first_i_comming == 0) { + hw->kpi_first_i_comming = 1; + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "[vdec_kpi][%s] First I frame coming.\n", + __func__); + } + if (hw->is_used_v4l) { + int frame_width = READ_VREG(MREG_PIC_WIDTH); + int frame_height = READ_VREG(MREG_PIC_HEIGHT); + int info = READ_VREG(MREG_SEQ_INFO); + bool frame_prog = info & 0x10000; + + if (!v4l_res_change(hw, frame_width, frame_height, frame_prog)) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + + vmpeg2_get_ps_info(hw, frame_width, frame_height, frame_prog, &ps); + hw->v4l_params_parsed = true; + hw->report_field = frame_prog ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED; + vdec_v4l_set_ps_infos(ctx, &ps); + cal_chunk_offset_and_size(hw); + userdata_pushed_drop(hw); + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } else { + struct vdec_pic_info pic; + + if (!hw->buf_num) { + vdec_v4l_get_pic_info(ctx, &pic); + hw->buf_num = pic.dpb_frames + + pic.dpb_margin; + if (hw->buf_num > DECODE_BUFFER_NUM_MAX) + hw->buf_num = DECODE_BUFFER_NUM_MAX; + } + + WRITE_VREG(AV_SCRATCH_G, 0); + + hw->res_ch_flag = 0; + } + } else { + userdata_pushed_drop(hw); + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } + } else + WRITE_VREG(AV_SCRATCH_G, 0); + return IRQ_HANDLED; + } + + reg = READ_VREG(AV_SCRATCH_J); + if (reg & (1<<16)) { + vdec_schedule_work(&hw->userdata_push_work); + return IRQ_HANDLED; + } + + reg = READ_VREG(MREG_BUFFEROUT); + + ATRACE_COUNTER("V_ST_DEC-decode_state", reg); + + if (reg == MPEG12_DATA_REQUEST) { + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s: data request, bcnt=%x\n", + __func__, READ_VREG(VIFF_BIT_CNT)); + if (vdec_frame_based(vdec)) { + reset_process_time(hw); + hw->dec_result = DEC_RESULT_GET_DATA; + vdec_schedule_work(&hw->work); + } + } else if (reg == MPEG12_DATA_EMPTY) { + /*timeout when decoding next frame*/ + debug_print(DECODE_ID(hw), PRINT_FLAG_VLD_DETAIL, + "%s: Insufficient data, lvl=%x ctrl=%x bcnt=%x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_CONTROL), + READ_VREG(VIFF_BIT_CNT)); + + if (vdec_frame_based(vdec)) { + userdata_pushed_drop(hw); + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + } else { + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + userdata_pushed_drop(hw); + reset_process_time(hw); + } + return IRQ_HANDLED; + } else { /* MPEG12_PIC_DONE, MPEG12_SEQ_END */ + reset_process_time(hw); + + info = READ_VREG(MREG_PIC_INFO); + offset = READ_VREG(MREG_FRAME_OFFSET); + index = spec_to_index(hw, READ_VREG(REC_CANVAS_ADDR)); + seqinfo = READ_VREG(MREG_SEQ_INFO); + + if (((seqinfo >> 8) & 0xff) && + ((seqinfo >> 12 & 0x7) != hw->profile_idc || + (seqinfo >> 8 & 0xf) != hw->level_idc)) { + hw->profile_idc = seqinfo >> 12 & 0x7; + hw->level_idc = seqinfo >> 8 & 0xf; + vdec_set_profile_level(vdec, hw->profile_idc, hw->level_idc); + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "profile_idc: %d level_idc: %d\n", + hw->profile_idc, hw->level_idc); + } + + if ((info & PICINFO_PROG) == 0 && + (info & FRAME_PICTURE_MASK) != FRAME_PICTURE) { + hw->first_i_frame_ready = 1; /* for field struct case*/ + } + if (index >= hw->buf_num) { + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "mmpeg12: invalid buf index: %d\n", index); + hw->dec_result = DEC_RESULT_ERROR; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + hw->dec_num++; + hw->dec_result = DEC_RESULT_DONE; + new_pic = &hw->pics[index]; + if (vdec->mvfrm) { + new_pic->frame_size = vdec->mvfrm->frame_size; + new_pic->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; + } + + tmp_w = READ_VREG(MREG_PIC_WIDTH); + tmp_h = READ_VREG(MREG_PIC_HEIGHT); + + new_pic->width = tmp_w; + hw->frame_width = tmp_w; + new_pic->height = tmp_h; + hw->frame_height = tmp_h; + + new_pic->buffer_info = info; + new_pic->offset = offset; + new_pic->index = index; + if (((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) || + ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_P)) { + if (hw->chunk) { + new_pic->pts_valid = hw->chunk->pts_valid; + new_pic->pts = hw->chunk->pts; + new_pic->pts64 = hw->chunk->pts64; + if (hw->first_field_timestamp_valid) + new_pic->last_timestamp = hw->first_field_timestamp; + else + new_pic->last_timestamp = hw->chunk->timestamp; + hw->first_field_timestamp_valid = false; + new_pic->timestamp = hw->chunk->timestamp; + if (hw->last_chunk_pts == hw->chunk->pts) { + new_pic->pts_valid = 0; + debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "pts invalid\n"); + } + } else { + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + if (pts_lookup_offset_us64(PTS_TYPE_VIDEO, offset, + &pts, &frame_size, 0, &pts_us64) == 0) { + new_pic->pts_valid = true; + new_pic->pts = pts; + new_pic->pts64 = pts_us64; + } else + new_pic->pts_valid = false; + } + } + } else { + if (hw->chunk) { + hw->last_chunk_pts = hw->chunk->pts; + if (hw->first_field_timestamp_valid) + new_pic->last_timestamp = hw->first_field_timestamp; + else + new_pic->last_timestamp = hw->chunk->timestamp; + hw->first_field_timestamp_valid = false; + new_pic->timestamp = hw->chunk->timestamp; + } + new_pic->pts_valid = false; + } + + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "mmpeg12: new_pic=%d, ind=%d, info=%x, seq=%x, offset=%d\n", + hw->dec_num, index, info, seqinfo, offset); + + hw->frame_prog = info & PICINFO_PROG; +#if 1 + if ((seqinfo & SEQINFO_EXT_AVAILABLE) && + ((seqinfo & SEQINFO_PROG) == 0)) + hw->frame_prog = 0; +#endif + force_interlace_check(hw); + + if (is_ref_error(hw)) { + if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_B) + new_pic->buffer_info |= PICINFO_ERROR; + } + + if (((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) || + ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_P)) { + index = update_reference(hw, index); + } else { + /* drop b frame before reference pic ready */ + if (hw->refs[0] == -1) + index = hw->buf_num; + } + vmpeg12_save_hw_context(hw, reg); + + if (index >= hw->buf_num) { + if (hw->dec_num != 2) { + debug_print(DECODE_ID(hw), 0, + "mmpeg12: drop pic num %d, type %c, index %d, offset %x\n", + hw->dec_num, GET_SLICE_TYPE(info), index, offset); + hw->dec_result = DEC_RESULT_ERROR; + } + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + + disp_pic = &hw->pics[index]; + info = hw->pics[index].buffer_info; + if (disp_pic->pts_valid && hw->lastpts64 == disp_pic->pts64) + disp_pic->pts_valid = false; + if (disp_pic->pts_valid) + hw->lastpts64 = disp_pic->pts64; + + if (input_frame_based(hw_to_vdec(hw))) + frame_size = new_pic->frame_size; + + fill_frame_info(hw, info, frame_size, new_pic->pts); + + if ((hw->first_i_frame_ready == 0) && + ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) && + ((info & PICINFO_ERROR) == 0)) { + hw->first_i_frame_ready = 1; + } + + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "mmpeg12: disp_pic=%d(%c), ind=%d, offst=%x, pts=(%d,%lld,%lld)(%d)\n", + hw->disp_num, GET_SLICE_TYPE(info), index, disp_pic->offset, + disp_pic->pts, disp_pic->pts64, + disp_pic->timestamp, disp_pic->pts_valid); + + prepare_display_buf(hw, disp_pic); + vdec_schedule_work(&hw->work); + } + + return IRQ_HANDLED; +} +static irqreturn_t vmpeg12_isr(struct vdec_s *vdec, int irq) +{ + u32 info, offset; + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)(vdec->private); + if (hw->eos) + return IRQ_HANDLED; + info = READ_VREG(MREG_PIC_INFO); + offset = READ_VREG(MREG_FRAME_OFFSET); + + vdec_count_info(&hw->gvs, info & PICINFO_ERROR, offset); + if (info &PICINFO_ERROR) { + if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) { + hw->gvs.i_concealed_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_P) { + hw->gvs.p_concealed_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_B) { + hw->gvs.b_concealed_frames++; + } + } + if (offset) { + if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_I) { + hw->gvs.i_decoded_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_P) { + hw->gvs.p_decoded_frames++; + } else if ((info & PICINFO_TYPE_MASK) == PICINFO_TYPE_B) { + hw->gvs.b_decoded_frames++; + } + } + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + return IRQ_WAKE_THREAD; +} + +static void vmpeg12_notify_work(struct work_struct *work) +{ + struct vdec_mpeg12_hw_s *hw = container_of(work, + struct vdec_mpeg12_hw_s, notify_work); + struct vdec_s *vdec = hw_to_vdec(hw); + + if (!hw->is_used_v4l && vdec->fr_hint_state == VDEC_NEED_HINT) { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long)hw->frame_dur)); + vdec->fr_hint_state = VDEC_HINTED; + } +} + +static void wait_vmmpeg12_search_done(struct vdec_mpeg12_hw_s *hw) +{ + u32 vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + int count = 0; + + do { + usleep_range(100, 500); + if (vld_rp == READ_VREG(VLD_MEM_VIFIFO_RP)) + break; + if (count > 1000) { + debug_print(DECODE_ID(hw), 0, + "%s, count %d vld_rp 0x%x VLD_MEM_VIFIFO_RP 0x%x\n", + __func__, count, vld_rp, READ_VREG(VLD_MEM_VIFIFO_RP)); + break; + } else + vld_rp = READ_VREG(VLD_MEM_VIFIFO_RP); + count++; + } while (1); +} + +static void flush_output(struct vdec_mpeg12_hw_s *hw) +{ + int index = hw->refs[1]; + + /* video only one frame need not flush. */ + if (hw->dec_num < 2) + return; + + if ((hw->refs[0] >= 0) && + (hw->refs[0] < hw->buf_num)) + hw->ref_use[hw->refs[0]] = 0; + + if (index >= 0 && index < hw->buf_num) { + hw->ref_use[index] = 0; + prepare_display_buf(hw, &hw->pics[index]); + } +} + +static bool is_avaliable_buffer(struct vdec_mpeg12_hw_s *hw); + +static int notify_v4l_eos(struct vdec_s *vdec) +{ + struct vdec_mpeg12_hw_s *hw = (struct vdec_mpeg12_hw_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = &hw->vframe_dummy; + struct vdec_v4l2_buffer *fb = NULL; + int index = INVALID_IDX; + ulong expires; + + if (hw->eos) { + expires = jiffies + msecs_to_jiffies(2000); + while (!is_avaliable_buffer(hw)) { + if (time_after(jiffies, expires)) { + pr_err("[%d] MPEG2 isn't enough buff for notify eos.\n", ctx->id); + return 0; + } + } + + index = find_free_buffer(hw); + if (INVALID_IDX == index) { + pr_err("[%d] MPEG2 EOS get free buff fail.\n", ctx->id); + return 0; + } + + fb = (struct vdec_v4l2_buffer *) + hw->pics[index].v4l_ref_buf_addr; + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->v4l_mem_handle = (ulong)fb; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->timestamp); + + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + + pr_info("[%d] mpeg12 EOS notify.\n", ctx->id); + } + + return 0; +} + +static void vmpeg12_work_implement(struct vdec_mpeg12_hw_s *hw, + struct vdec_s *vdec, int from) +{ + int r; + + if (hw->dec_result != DEC_RESULT_DONE) + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s, result=%d, status=%d\n", __func__, + hw->dec_result, vdec->next_status); + + ATRACE_COUNTER("V_ST_DEC-work_state", hw->dec_result); + + if (hw->dec_result == DEC_RESULT_DONE) { + if (vdec->input.swap_valid) + hw->dec_again_cnt = 0; + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk = NULL; + hw->chunk_header_offset = 0; + hw->chunk_res_size = 0; + } else if (hw->dec_result == DEC_RESULT_AGAIN && + (vdec->next_status != VDEC_STATUS_DISCONNECTED)) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } +#ifdef AGAIN_HAS_THRESHOLD + hw->next_again_flag = 1; +#endif + //hw->dec_again_cnt++; + } else if (hw->dec_result == DEC_RESULT_GET_DATA && + vdec->next_status != VDEC_STATUS_DISCONNECTED) { + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + debug_print(DECODE_ID(hw), PRINT_FLAG_VLD_DETAIL, + "%s DEC_RESULT_GET_DATA %x %x %x\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP)); + if (hw->chunk != NULL) { + hw->first_field_timestamp = hw->chunk->timestamp; + hw->first_field_timestamp_valid = true; + } + + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk_header_offset = 0; + hw->chunk_res_size = 0; + hw->chunk = NULL; + vdec_clean_input(vdec); + + r = vdec_prepare_input(vdec, &hw->chunk); + if (r < 0) { + hw->input_empty++; + reset_process_time(hw); + hw->dec_result = DEC_RESULT_GET_DATA; + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "%s: Insufficient data, get data retry\n", __func__); + vdec_schedule_work(&hw->work); + return; + } + + hw->input_empty = 0; + if (vdec_frame_based(vdec) && (hw->chunk != NULL)) { + r = hw->chunk->size + + (hw->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + WRITE_VREG(VIFF_BIT_CNT, r * 8); + if (vdec->mvfrm) + vdec->mvfrm->frame_size += hw->chunk->size; + } + debug_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: %x %x %x size %d, bitcnt %d\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + r, READ_VREG(VIFF_BIT_CNT)); + vdec_enable_input(vdec); + hw->dec_result = DEC_RESULT_NONE; + hw->last_vld_level = 0; + start_process_time_set(hw); + hw->init_flag = 1; + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); + WRITE_VREG(MREG_BUFFEROUT, 0); + return; + } else if (hw->dec_result == DEC_RESULT_FORCE_EXIT) { + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s: force exit\n", __func__); + if (hw->stat & STAT_ISR_REG) { + amvdec_stop(); + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + } else if (hw->dec_result == DEC_RESULT_EOS) { + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + hw->eos = 1; + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk_header_offset = 0; + hw->chunk_res_size = 0; + hw->chunk = NULL; + vdec_clean_input(vdec); + flush_output(hw); + if (hw->is_used_v4l) { + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(vdec); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + } + + debug_print(DECODE_ID(hw), 0, + "%s: end of stream, num %d(%d)\n", + __func__, hw->disp_num, hw->dec_num); + } + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + /*disable mbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 0); + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + wait_vmmpeg12_search_done(hw); + + if (from == 1) { + /*This is a timeout work*/ + if (work_pending(&hw->work)) { + pr_err("timeout work return befor finishing."); + /* + * The vmpeg12_work arrives at the last second, + * give it a chance to handle the scenario. + */ + return; + } + } + + if (vdec->parallel_dec == 1) + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1); + else + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !hw->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + ATRACE_COUNTER("V_ST_DEC-chunk_size", 0); + + if (hw->vdec_cb) + hw->vdec_cb(vdec, hw->vdec_cb_arg); +} + +static void vmpeg12_work(struct work_struct *work) +{ + struct vdec_mpeg12_hw_s *hw = + container_of(work, struct vdec_mpeg12_hw_s, work); + struct vdec_s *vdec = hw_to_vdec(hw); + + vmpeg12_work_implement(hw, vdec, 0); +} +static void vmpeg12_timeout_work(struct work_struct *work) +{ + struct vdec_mpeg12_hw_s *hw = + container_of(work, struct vdec_mpeg12_hw_s, timeout_work); + struct vdec_s *vdec = hw_to_vdec(hw); + + if (work_pending(&hw->work)) { + pr_err("timeout work return befor executing."); + return; + } + + hw->timeout_processing = 1; + vmpeg12_work_implement(hw, vdec, 1); +} + +static struct vframe_s *vmpeg_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + atomic_add(1, &hw->peek_num); + if (kfifo_peek(&hw->display_q, &vf)) + return vf; + + return NULL; +} + +static struct vframe_s *vmpeg_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + + if (kfifo_get(&hw->display_q, &vf)) { + vf->index_disp = atomic_read(&hw->get_num); + atomic_add(1, &hw->get_num); + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + return vf; + } + return NULL; +} + +static int mpeg12_valid_vf_check(struct vframe_s *vf, struct vdec_mpeg12_hw_s *hw) +{ + int i; + + if (vf == NULL || (vf->index == -1)) + return 0; + + for (i = 0; i < VF_POOL_SIZE; i++) { + if (vf == &hw->vfpool[i]) + return 1; + } + + return 0; +} + +static void vmpeg_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + unsigned long flags; + + if (!mpeg12_valid_vf_check(vf, hw)) { + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "invalid vf: %lx\n", (ulong)vf); + return ; + } + + if (vf->meta_data_buf) { + vf->meta_data_buf = NULL; + vf->meta_data_size = 0; + } + + if (vf->v4l_mem_handle != + hw->pics[vf->index].v4l_ref_buf_addr) { + hw->pics[vf->index].v4l_ref_buf_addr + = vf->v4l_mem_handle; + + debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "MPEG12 update fb handle, old:%llx, new:%llx\n", + hw->pics[vf->index].v4l_ref_buf_addr, + vf->v4l_mem_handle); + } + spin_lock_irqsave(&hw->lock, flags); + hw->vfbuf_use[vf->index]--; + if (hw->vfbuf_use[vf->index] < 0) { + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "warn: vf %lx, index %d putback repetitive, set use to 0\n", (ulong)vf, vf->index); + hw->vfbuf_use[vf->index] = 0; + } + atomic_add(1, &hw->put_num); + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s: vf: %lx, index: %d, use: %d\n", __func__, (ulong)vf, + vf->index, hw->vfbuf_use[vf->index]); + + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); + spin_unlock_irqrestore(&hw->lock, flags); +} + +static int vmpeg_event_cb(int type, void *data, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +static int vmpeg_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + struct vdec_s *vdec = op_arg; + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + + spin_lock_irqsave(&hw->lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hw->newframe_q); + states->buf_avail_num = kfifo_len(&hw->display_q); + states->buf_recycle_num = 0; + + spin_unlock_irqrestore(&hw->lock, flags); + return 0; +} +static int vmmpeg12_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + + if (!hw) + return -1; + + vstatus->frame_width = hw->frame_width; + vstatus->frame_height = hw->frame_height; + if (hw->frame_dur != 0) + vstatus->frame_rate = ((96000 * 10 / hw->frame_dur) % 10) < 5 ? + 96000 / hw->frame_dur : (96000 / hw->frame_dur +1); + else + vstatus->frame_rate = -1; + vstatus->error_count = READ_VREG(AV_SCRATCH_C); + vstatus->status = hw->stat; + vstatus->bit_rate = hw->gvs.bit_rate; + vstatus->frame_dur = hw->frame_dur; + vstatus->frame_data = hw->gvs.frame_data; + vstatus->total_data = hw->gvs.total_data; + vstatus->frame_count = hw->gvs.frame_count; + vstatus->error_frame_count = hw->gvs.error_frame_count; + vstatus->drop_frame_count = hw->drop_frame_count; + vstatus->i_decoded_frames = hw->gvs.i_decoded_frames; + vstatus->i_lost_frames = hw->gvs.i_lost_frames; + vstatus->i_concealed_frames = hw->gvs.i_concealed_frames; + vstatus->p_decoded_frames = hw->gvs.p_decoded_frames; + vstatus->p_lost_frames = hw->gvs.p_lost_frames; + vstatus->p_concealed_frames = hw->gvs.p_concealed_frames; + vstatus->b_decoded_frames = hw->gvs.b_decoded_frames; + vstatus->b_lost_frames = hw->gvs.b_lost_frames; + vstatus->b_concealed_frames = hw->gvs.b_concealed_frames; + vstatus->total_data = hw->gvs.total_data; + vstatus->samp_cnt = hw->gvs.samp_cnt; + vstatus->offset = hw->gvs.offset; + vstatus->ratio_control = hw->ratio_control; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + +/****************************************/ +static void vmpeg12_workspace_init(struct vdec_mpeg12_hw_s *hw) +{ + int ret; + + ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, + DECODE_BUFFER_NUM_MAX, + WORKSPACE_SIZE, + DRIVER_NAME, + &hw->buf_start); + if (ret < 0) { + pr_err("mpeg2 workspace alloc size %d failed.\n", + WORKSPACE_SIZE); + return; + } + + if (!hw->ccbuf_phyAddress_virt) { + hw->cc_buf_size = AUX_BUF_ALIGN(CCBUF_SIZE); + hw->ccbuf_phyAddress_virt = + dma_alloc_coherent(amports_get_dma_device(), + hw->cc_buf_size, &hw->ccbuf_phyAddress, + GFP_KERNEL); + if (hw->ccbuf_phyAddress_virt == NULL) { + pr_err("%s: failed to alloc cc buffer\n", __func__); + return; + } + } + + WRITE_VREG(MREG_CO_MV_START, hw->buf_start); + WRITE_VREG(MREG_CC_ADDR, hw->ccbuf_phyAddress); + + return; +} + +static void vmpeg2_dump_state(struct vdec_s *vdec) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)(vdec->private); + u32 i; + debug_print(DECODE_ID(hw), 0, + "====== %s\n", __func__); + debug_print(DECODE_ID(hw), 0, + "width/height (%d/%d),i_first %d, buf_num %d\n", + hw->frame_width, + hw->frame_height, + hw->first_i_frame_ready, + hw->buf_num + ); + debug_print(DECODE_ID(hw), 0, + "is_framebase(%d), eos %d, state 0x%x, dec_result 0x%x dec_frm %d put_frm %d run %d not_run_ready %d,input_empty %d\n", + vdec_frame_based(vdec), + hw->eos, + hw->stat, + hw->dec_result, + hw->dec_num, + hw->put_num, + hw->run_count, + hw->not_run_ready, + hw->input_empty + ); + + for (i = 0; i < hw->buf_num; i++) { + debug_print(DECODE_ID(hw), 0, + "index %d, used %d, ref %d\n", i, + hw->vfbuf_use[i], hw->ref_use[i]); + } + + if (!hw->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + debug_print(DECODE_ID(hw), 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + debug_print(DECODE_ID(hw), 0, + "%s, newq(%d/%d), dispq(%d/%d) vf pre/get/put (%d/%d/%d),drop=%d, buffer_not_ready %d\n", + __func__, + kfifo_len(&hw->newframe_q), + VF_POOL_SIZE, + kfifo_len(&hw->display_q), + VF_POOL_SIZE, + hw->disp_num, + hw->get_num, + hw->put_num, + hw->drop_frame_count, + hw->buffer_not_ready + ); + debug_print(DECODE_ID(hw), 0, + "VIFF_BIT_CNT=0x%x\n", + READ_VREG(VIFF_BIT_CNT)); + debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_LEVEL=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_WP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP)); + debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_RP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_RP)); + debug_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + debug_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (vdec_frame_based(vdec) && + debug_enable & PRINT_FRAMEBASE_DATA + ) { + int jj; + if (hw->chunk && hw->chunk->block && + hw->chunk->size > 0) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, hw->chunk->size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + debug_print(DECODE_ID(hw), 0, + "frame data size 0x%x\n", + hw->chunk->size); + for (jj = 0; jj < hw->chunk->size; jj++) { + if ((jj & 0xf) == 0) + debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } +} + +static void reset_process_time(struct vdec_mpeg12_hw_s *hw) +{ + if (hw->start_process_time) { + unsigned process_time = + 1000 * (jiffies - hw->start_process_time) / HZ; + hw->start_process_time = 0; + if (process_time > max_process_time[DECODE_ID(hw)]) + max_process_time[DECODE_ID(hw)] = process_time; + } +} + +static void start_process_time_set(struct vdec_mpeg12_hw_s *hw) +{ + if ((hw->refs[1] != -1) && (hw->refs[0] == -1)) + hw->decode_timeout_count = 1; + else + hw->decode_timeout_count = 10; + hw->start_process_time = jiffies; +} +static void timeout_process(struct vdec_mpeg12_hw_s *hw) +{ + struct vdec_s *vdec = hw_to_vdec(hw); + + if (work_pending(&hw->work) || + work_busy(&hw->work) || + work_busy(&hw->timeout_work) || + work_pending(&hw->timeout_work)) { + pr_err("%s mpeg12[%d] timeout_process return befor do anything.\n",__func__, vdec->id); + return; + } + reset_process_time(hw); + amvdec_stop(); + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s decoder timeout, status=%d, level=%d\n", + __func__, vdec->status, READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + hw->dec_result = DEC_RESULT_DONE; + if ((hw->refs[1] != -1) && (hw->refs[0] != -1)) + hw->first_i_frame_ready = 0; + + /* + * In this very timeout point,the vmpeg12_work arrives, + * let it to handle the scenario. + */ + if (work_pending(&hw->work)) { + pr_err("%s mpeg12[%d] return befor schedule.", __func__, vdec->id); + return; + } + vdec_schedule_work(&hw->timeout_work); +} + +static void check_timer_func(struct timer_list *timer) +{ + struct vdec_mpeg12_hw_s *hw = container_of(timer, + struct vdec_mpeg12_hw_s, check_timer); + unsigned int timeout_val = decode_timeout_val; + + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + + if (((debug_enable & PRINT_FLAG_TIMEOUT_STATUS) == 0) && + (timeout_val > 0) && + (hw->start_process_time > 0) && + ((1000 * (jiffies - hw->start_process_time) / HZ) + > timeout_val)) { + if (hw->last_vld_level == READ_VREG(VLD_MEM_VIFIFO_LEVEL)) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + timeout_process(hw); + } + hw->last_vld_level = READ_VREG(VLD_MEM_VIFIFO_LEVEL); + } + + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static int vmpeg12_hw_ctx_restore(struct vdec_mpeg12_hw_s *hw) +{ + u32 index = -1; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + int i; + + if (!hw->init_flag) + vmpeg12_workspace_init(hw); + + if (hw->v4l_params_parsed) { + struct vdec_pic_info pic; + + if (!hw->buf_num) { + vdec_v4l_get_pic_info(v4l2_ctx, &pic); + hw->buf_num = pic.dpb_frames + + pic.dpb_margin; + if (hw->buf_num > DECODE_BUFFER_NUM_MAX) + hw->buf_num = DECODE_BUFFER_NUM_MAX; + } + + index = find_free_buffer(hw); + if ((index < 0) || (index >= hw->buf_num)) + return -1; + + WRITE_VREG(MREG_CO_MV_START, hw->buf_start); + WRITE_VREG(MREG_CC_ADDR, hw->ccbuf_phyAddress); + + for (i = 0; i < hw->buf_num; i++) { + if (hw->pics[i].v4l_ref_buf_addr) { + config_cav_lut(canvas_y(hw->canvas_spec[i]), + &hw->canvas_config[i][0], VDEC_1); + config_cav_lut(canvas_u(hw->canvas_spec[i]), + &hw->canvas_config[i][1], VDEC_1); + } + } + + /* prepare REF0 & REF1 + points to the past two IP buffers + prepare REC_CANVAS_ADDR and ANC2_CANVAS_ADDR + points to the output buffer*/ + WRITE_VREG(MREG_REF0, + (hw->refs[0] == -1) ? 0xffffffff : + hw->canvas_spec[hw->refs[0]]); + WRITE_VREG(MREG_REF1, + (hw->refs[1] == -1) ? 0xffffffff : + hw->canvas_spec[hw->refs[1]]); + WRITE_VREG(REC_CANVAS_ADDR, hw->canvas_spec[index]); + WRITE_VREG(ANC2_CANVAS_ADDR, hw->canvas_spec[index]); + + debug_print(DECODE_ID(hw), PRINT_FLAG_RESTORE, + "%s,ref0=0x%x, ref1=0x%x,rec=0x%x, ctx_valid=%d,index=%d\n", + __func__, + READ_VREG(MREG_REF0), + READ_VREG(MREG_REF1), + READ_VREG(REC_CANVAS_ADDR), + hw->ctx_valid, index); + } + + /* set to mpeg1 default */ + WRITE_VREG(MPEG1_2_REG, + (hw->ctx_valid) ? hw->reg_mpeg1_2_reg : 0); + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + /* for Mpeg1 default value */ + WRITE_VREG(PIC_HEAD_INFO, + (hw->ctx_valid) ? hw->reg_pic_head_info : 0x380); + /* disable mpeg4 */ + WRITE_VREG(M4_CONTROL_REG, 0); + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + /* clear buffer IN/OUT registers */ + WRITE_VREG(MREG_BUFFEROUT, 0); + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + /* set reference width and height */ + if ((hw->frame_width != 0) && (hw->frame_height != 0)) + WRITE_VREG(MREG_CMD, + (hw->frame_width << 16) | hw->frame_height); + else + WRITE_VREG(MREG_CMD, 0); + + debug_print(DECODE_ID(hw), PRINT_FLAG_RESTORE, + "0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", + hw->frame_width, hw->frame_height, hw->seqinfo, + hw->reg_f_code_reg, hw->reg_slice_ver_pos_pic_type, + hw->reg_mb_info); + + WRITE_VREG(MREG_PIC_WIDTH, hw->reg_pic_width); + WRITE_VREG(MREG_PIC_HEIGHT, hw->reg_pic_height); + WRITE_VREG(MREG_SEQ_INFO, hw->seqinfo); + WRITE_VREG(F_CODE_REG, hw->reg_f_code_reg); + WRITE_VREG(SLICE_VER_POS_PIC_TYPE, + hw->reg_slice_ver_pos_pic_type); + WRITE_VREG(MB_INFO, hw->reg_mb_info); + WRITE_VREG(VCOP_CTRL_REG, hw->reg_vcop_ctrl_reg); + WRITE_VREG(AV_SCRATCH_H, hw->reg_signal_type); + + if (READ_VREG(MREG_ERROR_COUNT) != 0 || + READ_VREG(MREG_FATAL_ERROR) == 1) + debug_print(DECODE_ID(hw), PRINT_FLAG_RESTORE, + "err_cnt:%d fa_err:%d\n", + READ_VREG(MREG_ERROR_COUNT), + READ_VREG(MREG_FATAL_ERROR)); + + /* clear error count */ + WRITE_VREG(MREG_ERROR_COUNT, 0); + /*Use MREG_FATAL_ERROR bit1, the ucode determine + whether to report the interruption of width and + height information,in order to be compatible + with the old version of ucode. + 1: Report the width and height information + 0: No Report + bit0: + 1: Use cma cc buffer for new driver + 0: use codec mm cc buffer for old driver + */ + WRITE_VREG(MREG_FATAL_ERROR, 3); + /* clear wait buffer status */ + WRITE_VREG(MREG_WAIT_BUFFER, 0); +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1<<17); +#endif + + /* cbcr_merge_swap_en */ + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + else + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + } else { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + else + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + } + + if (!hw->ctx_valid) + WRITE_VREG(AV_SCRATCH_J, hw->userdata_wp_ctx); + + if (hw->chunk) { + /*frame based input*/ + WRITE_VREG(MREG_INPUT, + (hw->chunk->offset & 7) | (1<<7) | (hw->ctx_valid<<6)); + } else { + /*stream based input*/ + WRITE_VREG(MREG_INPUT, (hw->ctx_valid<<6)); + } + return 0; +} + +static void vmpeg12_local_init(struct vdec_mpeg12_hw_s *hw) +{ + int i; + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf; + vf = &hw->vfpool[i]; + hw->vfpool[i].index = DECODE_BUFFER_NUM_MAX; + kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf); + } + + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + hw->vfbuf_use[i] = 0; + hw->ref_use[i] = 0; + } + + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + + hw->mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + hw->tvp_flag); + + hw->eos = 0; + hw->frame_width = hw->frame_height = 0; + hw->frame_dur = hw->frame_prog = 0; + hw->frame_force_skip_flag = 0; + hw->wait_buffer_counter = 0; + hw->first_i_frame_ready = 0; + hw->dec_control &= DEC_CONTROL_INTERNAL_MASK; + hw->refs[0] = -1; + hw->refs[1] = -1; + hw->dec_num = 0; + hw->run_count = 0; + hw->not_run_ready = 0; + hw->input_empty = 0; + hw->drop_frame_count = 0; + hw->buffer_not_ready = 0; + hw->start_process_time = 0; + hw->init_flag = 0; + hw->dec_again_cnt = 0; + hw->error_frame_skip_level = error_frame_skip_level; + atomic_set(&hw->disp_num, 0); + atomic_set(&hw->put_num, 0); + atomic_set(&hw->get_num, 0); + atomic_set(&hw->peek_num, 0); + + if (dec_control) + hw->dec_control = dec_control; +} + +static s32 vmpeg12_init(struct vdec_mpeg12_hw_s *hw) +{ + int size; + u32 fw_size = 16*0x1000; + struct firmware_s *fw; + + vmpeg12_local_init(hw); + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + pr_debug("get firmware ...\n"); + size = get_firmware_data(VIDEO_DEC_MPEG12_MULTI, fw->data); + if (size < 0) { + pr_err("get firmware fail.\n"); + vfree(fw); + return -1; + } + + fw->len = size; + hw->fw = fw; + + INIT_WORK(&hw->userdata_push_work, userdata_push_do_work); + INIT_WORK(&hw->work, vmpeg12_work); + INIT_WORK(&hw->timeout_work, vmpeg12_timeout_work); + INIT_WORK(&hw->notify_work, vmpeg12_notify_work); + + if (NULL == hw->user_data_buffer) { + hw->user_data_buffer = kmalloc(USER_DATA_SIZE, + GFP_KERNEL); + if (!hw->user_data_buffer) { + pr_info("%s: Can not allocate user_data_buffer\n", + __func__); + return -1; + } + } + + vmmpeg2_crate_userdata_manager(hw, + hw->user_data_buffer, + USER_DATA_SIZE); + + //amvdec_enable(); + timer_setup(&hw->check_timer, check_timer_func, 0); + //init_timer(&hw->check_timer); + //hw->check_timer.data = (unsigned long)hw; + //hw->check_timer.function = check_timer_func; + hw->check_timer.expires = jiffies + CHECK_INTERVAL; + + hw->stat |= STAT_TIMER_ARM; + hw->stat |= STAT_ISR_REG; + + hw->buf_start = 0; + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + + return 0; +} + +static bool is_avaliable_buffer(struct vdec_mpeg12_hw_s *hw) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int i, free_count = 0; + int used_count = 0; + + if ((hw->buf_num == 0) || + (ctx->cap_pool.dec < hw->buf_num)) { + if (ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token)) { + free_count = + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) + 1; + } + } + + for (i = 0; i < hw->buf_num; ++i) { + if ((hw->vfbuf_use[i] == 0) && + (hw->ref_use[i] == 0) && + hw->pics[i].v4l_ref_buf_addr) { + free_count++; + } else if (hw->pics[i].v4l_ref_buf_addr) + used_count++; + } + + ATRACE_COUNTER("V_ST_DEC-free_buff_count", free_count); + ATRACE_COUNTER("V_ST_DEC-used_buff_count", used_count); + + return free_count >= run_ready_min_buf_num ? 1 : 0; +} + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int ret = 0; + + if (hw->eos) + return 0; + + if (hw->timeout_processing && + (work_pending(&hw->work) || work_busy(&hw->work) || + work_pending(&hw->timeout_work) || work_busy(&hw->timeout_work))) { + debug_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "mpeg12 work pending,not ready for run.\n"); + return 0; + } + hw->timeout_processing = 0; + if (vdec_stream_based(vdec) && (hw->init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + + if (level < pre_decode_buf_level) { + hw->not_run_ready++; + return 0; + } + } + +#ifdef AGAIN_HAS_THRESHOLD + if (hw->next_again_flag&& + (!vdec_frame_based(vdec))) { + u32 parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + if (parser_wr_ptr >= hw->pre_parser_wr_ptr && + (parser_wr_ptr - hw->pre_parser_wr_ptr) < + again_threshold) { + int r = vdec_sync_input(vdec); + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s buf level%x\n", + __func__, r); + return 0; + } + } +#endif + + if (hw->v4l_params_parsed) { + ret = is_avaliable_buffer(hw) ? 1 : 0; + } else { + ret = ctx->v4l_resolution_change ? 0 : 1; + } + + hw->not_run_ready = 0; + hw->buffer_not_ready = 0; + + return ret ? CORE_MASK_VDEC_1 : 0; +} + +static unsigned char get_data_check_sum + (struct vdec_mpeg12_hw_s *hw, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static int check_dirty_data(struct vdec_s *vdec) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)(vdec->private); + u32 wp, rp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + + if (wp > rp) + level = wp - rp; + else + level = wp + vdec->input.size - rp ; + + if (hw->next_again_flag && + hw->pre_parser_wr_ptr != + STBUF_READ(&vdec->vbuf, get_wp)) + hw->dec_again_cnt++; + if ((level > (vdec->input.size * 2 / 3) ) && + (hw->dec_again_cnt > dirty_again_threshold)) { + debug_print(DECODE_ID(hw), 0, "mpeg12 data skipped %x, level %x\n", ((level / 2) >> 20) << 20, level); + if (vdec->input.swap_valid) { + vdec_stream_skip_data(vdec, ((level / 2) >> 20) << 20); + hw->dec_again_cnt = 0; + } + return 1; + } + return 0; +} + + +static void run(struct vdec_s *vdec, unsigned long mask, +void (*callback)(struct vdec_s *, void *), + void *arg) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + int save_reg; + int size, ret; + if (!hw->vdec_pg_enable_flag) { + hw->vdec_pg_enable_flag = 1; + amvdec_enable(); + } + save_reg = READ_VREG(POWER_CTL_VLD); + /* reset everything except DOS_TOP[1] and APB_CBUS[0]*/ + WRITE_VREG(DOS_SW_RESET0, 0xfffffff0); + WRITE_VREG(DOS_SW_RESET0, 0); + WRITE_VREG(POWER_CTL_VLD, save_reg); + hw->run_count++; + vdec_reset_core(vdec); + hw->vdec_cb_arg = arg; + hw->vdec_cb = callback; + + if ((vdec_stream_based(vdec)) && + (error_proc_policy & 0x1) && + check_dirty_data(vdec)) { + hw->dec_result = DEC_RESULT_AGAIN; + if (!vdec->input.swap_valid) { + debug_print(DECODE_ID(hw), 0, "mpeg12 start dirty data skipped\n"); + vdec_prepare_input(vdec, &hw->chunk); + hw->dec_result = DEC_RESULT_DONE; + } + vdec_schedule_work(&hw->work); + return; + } + +#ifdef AGAIN_HAS_THRESHOLD + if (vdec_stream_based(vdec)) { + hw->pre_parser_wr_ptr = + STBUF_READ(&vdec->vbuf, get_wp); + hw->next_again_flag = 0; + } +#endif + + if ((vdec_frame_based(vdec)) && (hw->chunk_header_offset != 0) && + (!hw->v4l_params_parsed) && (hw->chunk != NULL) && + (hw->chunk_res_size != 0)) { + hw->chunk->offset = hw->chunk_header_offset; + hw->chunk->size = hw->chunk_res_size; + hw->chunk_header_offset = 0; + hw->chunk_res_size = 0; + debug_print(DECODE_ID(hw), 0, "Multiple heads are parsed in a chunk and resolution changed.\n"); + } + + size = vdec_prepare_input(vdec, &hw->chunk); + if (size < 0) { + hw->input_empty++; + hw->dec_result = DEC_RESULT_AGAIN; + + debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "vdec_prepare_input: Insufficient data\n"); + vdec_schedule_work(&hw->work); + return; + } + + ATRACE_COUNTER("V_ST_DEC-chunk_size", size); + + hw->input_empty = 0; + if ((vdec_frame_based(vdec)) && + (hw->chunk != NULL)) { + size = hw->chunk->size + + (hw->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + WRITE_VREG(VIFF_BIT_CNT, size * 8); + if (vdec->mvfrm) + vdec->mvfrm->frame_size = hw->chunk->size; + } + if (vdec_frame_based(vdec) && !vdec_secure(vdec)) { + /* HW needs padding (NAL start) for frame ending */ + char* tail = (char *)hw->chunk->block->start_virt; + + tail += hw->chunk->offset + hw->chunk->size; + tail[0] = 0; + tail[1] = 0; + tail[2] = 1; + tail[3] = 0; + codec_mm_dma_flush(tail, 4, DMA_TO_DEVICE); + } + + if (vdec_frame_based(vdec) && debug_enable && !vdec_secure(vdec)) { + u8 *data = NULL; + if (hw->chunk) + debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "run: chunk offset 0x%x, size %d\n", + hw->chunk->offset, hw->chunk->size); + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + if (debug_enable & PRINT_FLAG_VDEC_STATUS + ) { + debug_print(DECODE_ID(hw), 0, + "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, size, get_data_check_sum(hw, size), + data[0], data[1], data[2], data[3], + data[4], data[5], data[size - 4], + data[size - 3], data[size - 2], + data[size - 1]); + } + if (debug_enable & PRINT_FRAMEBASE_DATA + ) { + int jj; + debug_print(DECODE_ID(hw), PRINT_FRAMEBASE_DATA, + "frame data:\n"); + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + pr_info("%06x:", jj); + pr_info("%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + pr_info("\n"); + } + pr_info("\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } else { + debug_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: %x %x %x %x %x size 0x%x, bitcnt %d\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + STBUF_READ(&vdec->vbuf, get_rp), + STBUF_READ(&vdec->vbuf, get_wp), + size, READ_VREG(VIFF_BIT_CNT)); + } + + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else { + ret = amvdec_vdec_loadmc_buf_ex(VFORMAT_MPEG12, "mmpeg12", vdec, + hw->fw->data, hw->fw->len); + if (ret < 0) { + pr_err("[%d] %s: the %s fw loading failed, err: %x\n", vdec->id, + hw->fw->name, tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_MPEG12; + } + + if (vmpeg12_hw_ctx_restore(hw) < 0) { + hw->dec_result = DEC_RESULT_ERROR; + debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "ammvdec_mpeg12: error HW context restore\n"); + vdec_schedule_work(&hw->work); + return; + } + /*wmb();*/ + hw->dec_result = DEC_RESULT_NONE; + hw->stat |= STAT_MC_LOAD; + vdec_enable_input(vdec); + hw->last_vld_level = 0; + start_process_time_set(hw); + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + amvdec_start(); + hw->stat |= STAT_VDEC_RUN; + hw->stat |= STAT_TIMER_ARM; + hw->init_flag = 1; + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static void reset(struct vdec_s *vdec) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + int i; + + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + flush_work(&hw->work); + flush_work(&hw->timeout_work); + flush_work(&hw->notify_work); + flush_work(&hw->userdata_push_work); + reset_process_time(hw); + + for (i = 0; i < hw->buf_num; i++) { + hw->pics[i].v4l_ref_buf_addr = 0; + hw->vfbuf_use[i] = 0; + hw->ref_use[i] = 0; + } + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hw->vfpool[i]; + + memset((void *)vf, 0, sizeof(*vf)); + hw->vfpool[i].index = -1; + kfifo_put(&hw->newframe_q, vf); + } + + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + vdec->free_canvas_ex(canvas_y(hw->canvas_spec[i]), vdec->id); + vdec->free_canvas_ex(canvas_u(hw->canvas_spec[i]), vdec->id); + hw->canvas_spec[i] = 0xffffff; + } + + hw->refs[0] = -1; + hw->refs[1] = -1; + hw->ctx_valid = 0; + hw->dec_num = 0; + hw->eos = 0; + hw->buf_num = 0; + hw->frame_width = 0; + hw->frame_height = 0; + hw->first_i_frame_ready = 0; + hw->first_field_timestamp = 0; + hw->first_field_timestamp_valid = false; + + atomic_set(&hw->disp_num, 0); + atomic_set(&hw->get_num, 0); + atomic_set(&hw->put_num, 0); + + pr_info("mpeg12: reset.\n"); +} + +static int vmpeg12_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *)vdec->private; + if (!hw) + return 0; + + if (trickmode == TRICKMODE_I) { + hw->i_only = 0x3; + //trickmode_i = 1; + } else if (trickmode == TRICKMODE_NONE) { + hw->i_only = 0x0; + //trickmode_i = 0; + } + return 0; +} + +static int ammvdec_mpeg12_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_mpeg12_hw_s *hw = NULL; + int config_val = 0; + + pr_info("ammvdec_mpeg12 probe start.\n"); + + if (pdata == NULL) { + pr_info("ammvdec_mpeg12 platform data undefined.\n"); + return -EFAULT; + } + + hw = vzalloc(sizeof(struct vdec_mpeg12_hw_s)); + if (hw == NULL) { + pr_info("\nammvdec_mpeg12 decoder driver alloc failed\n"); + return -ENOMEM; + } + + /* the ctx from v4l2 driver. */ + hw->v4l2_ctx = pdata->private; + + pdata->private = hw; + pdata->dec_status = vmmpeg12_dec_status; + pdata->set_trickmode = vmpeg12_set_trickmode; + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = vmpeg12_isr; + pdata->threaded_irq_handler = vmpeg12_isr_thread_fn; + pdata->dump_state = vmpeg2_dump_state; + + pdata->user_data_read = vmmpeg2_user_data_read; + pdata->reset_userdata_fifo = vmmpeg2_reset_userdata_fifo; + pdata->wakeup_userdata_poll = vmmpeg2_wakeup_userdata_poll; + + snprintf(hw->vdec_name, sizeof(hw->vdec_name), + "mpeg12-%d", pdev->id); + snprintf(hw->pts_name, sizeof(hw->pts_name), + "%s-timestamp", hw->vdec_name); + snprintf(hw->new_q_name, sizeof(hw->new_q_name), + "%s-newframe_q", hw->vdec_name); + snprintf(hw->disp_q_name, sizeof(hw->disp_q_name), + "%s-dispframe_q", hw->vdec_name); + + if (pdata->use_vfm_path) { + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + hw->frameinfo_enable = 1; + } + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + PROVIDER_NAME ".%02x", pdev->id & 0xff); + + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vf_provider_ops, pdata); + + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->canvas_spec[i] = 0xffffff; + } + platform_set_drvdata(pdev, pdata); + + hw->canvas_mode = pdata->canvas_mode; + if (pdata->config_len) { + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + hw->is_used_v4l = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + hw->canvas_mode = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_metadata_config_flag", + &config_val) == 0) + hw->force_prog_only = (config_val & VDEC_CFG_FLAG_PROG_ONLY) ? 1 : 0; + + /*if (get_config_int(pdata->config, + "parm_v4l_duration", + &config_val) == 0) + vdec_frame_rate_uevent(config_val);*/ + + if ((debug_enable & IGNORE_PARAM_FROM_CONFIG) == 0 && + get_config_int(pdata->config, + "parm_v4l_buffer_margin", + &config_val) == 0) + hw->dynamic_buf_num_margin= config_val; + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + hw->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + hw->sidebind_channel_id = config_val; + } + hw->platform_dev = pdev; + hw->chunk_header_offset = 0; + hw->chunk_res_size = 0; + hw->first_field_timestamp = 0; + hw->first_field_timestamp_valid = false; + + if (hw->force_prog_only) { + pdata->prog_only = 1; + debug_print(DECODE_ID(hw), 0, + "forced progressive output\n"); + } + + hw->tvp_flag = vdec_secure(pdata) ? CODEC_MM_FLAGS_TVP : 0; + if (pdata->sys_info) + hw->vmpeg12_amstream_dec_info = *pdata->sys_info; + + debug_print(DECODE_ID(hw), 0, + "%s, sysinfo: %dx%d, tvp_flag = 0x%x\n", + __func__, + hw->vmpeg12_amstream_dec_info.width, + hw->vmpeg12_amstream_dec_info.height, + hw->tvp_flag); + + if (vmpeg12_init(hw) < 0) { + pr_info("ammvdec_mpeg12 init failed.\n"); + if (hw) { + vfree(hw); + hw = NULL; + } + pdata->dec_status = NULL; + return -ENODEV; + } + vdec_set_prepare_level(pdata, start_decode_buf_level); + + vdec_set_vframe_comm(pdata, DRIVER_NAME); + + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_VDEC_1); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } +#ifdef DUMP_USER_DATA + amvdec_mmpeg12_init_userdata_dump(hw); + reset_user_data_buf(hw); +#endif + + /*INIT_WORK(&userdata_push_work, userdata_push_do_work);*/ + return 0; +} + +static int ammvdec_mpeg12_remove(struct platform_device *pdev) + +{ + struct vdec_mpeg12_hw_s *hw = + (struct vdec_mpeg12_hw_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec = hw_to_vdec(hw); + int i; + + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + cancel_work_sync(&hw->userdata_push_work); + cancel_work_sync(&hw->notify_work); + cancel_work_sync(&hw->work); + cancel_work_sync(&hw->timeout_work); + + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1); + else + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + vdec_set_status(hw_to_vdec(hw), VDEC_STATUS_DISCONNECTED); + + if (vdec->parallel_dec == 1) { + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + vdec->free_canvas_ex(canvas_y(hw->canvas_spec[i]), vdec->id); + vdec->free_canvas_ex(canvas_u(hw->canvas_spec[i]), vdec->id); + } + } + + if (hw->ccbuf_phyAddress_virt) { + dma_free_coherent(amports_get_dma_device(),hw->cc_buf_size, + hw->ccbuf_phyAddress_virt, hw->ccbuf_phyAddress); + hw->ccbuf_phyAddress_virt = NULL; + hw->ccbuf_phyAddress = 0; + } + + if (hw->user_data_buffer != NULL) { + kfree(hw->user_data_buffer); + hw->user_data_buffer = NULL; + } + vmmpeg2_destroy_userdata_manager(hw); + +#ifdef DUMP_USER_DATA + amvdec_mmpeg12_uninit_userdata_dump(hw); +#endif + + if (hw->fw) { + vfree(hw->fw); + hw->fw = NULL; + } + + vfree(hw); + + pr_info("ammvdec_mpeg12 removed.\n"); + + return 0; +} + +/****************************************/ + +static struct platform_driver ammvdec_mpeg12_driver = { + .probe = ammvdec_mpeg12_probe, + .remove = ammvdec_mpeg12_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t ammvdec_mpeg12_profile = { + .name = "MPEG12-V4L", + .profile = "" +}; + +static struct mconfig mmpeg12_configs[] = { + MC_PU32("radr", &radr), + MC_PU32("rval", &rval), + MC_PU32("dec_control", &dec_control), + MC_PU32("error_frame_skip_level", &error_frame_skip_level), + MC_PU32("decode_timeout_val", &decode_timeout_val), + MC_PU32("start_decode_buf_level", &start_decode_buf_level), + MC_PU32("pre_decode_buf_level", &pre_decode_buf_level), + MC_PU32("debug_enable", &debug_enable), + MC_PU32("udebug_flag", &udebug_flag), + MC_PU32("without_display_mode", &without_display_mode), + MC_PU32("dynamic_buf_num_margin", &dynamic_buf_num_margin), +#ifdef AGAIN_HAS_THRESHOLD + MC_PU32("again_threshold", &again_threshold), +#endif +}; +static struct mconfig_node mmpeg12_node; + +static int __init ammvdec_mpeg12_driver_init_module(void) +{ + pr_info("ammvdec_mpeg12 module init\n"); + + if (platform_driver_register(&ammvdec_mpeg12_driver)) { + pr_info("failed to register ammvdec_mpeg12 driver\n"); + return -ENODEV; + } + vcodec_profile_register(&ammvdec_mpeg12_profile); + INIT_REG_NODE_CONFIGS("media.decoder", &mmpeg12_node, + "mmpeg12-v4l", mmpeg12_configs, CONFIG_FOR_RW); + vcodec_feature_register(VFORMAT_MPEG12, 1); + return 0; +} + +static void __exit ammvdec_mpeg12_driver_remove_module(void) +{ + pr_info("ammvdec_mpeg12 module exit.\n"); + platform_driver_unregister(&ammvdec_mpeg12_driver); +} + +/****************************************/ +module_param(dec_control, uint, 0664); +MODULE_PARM_DESC(dec_control, "\n ammvdec_mpeg12 decoder control\n"); +module_param(error_frame_skip_level, uint, 0664); +MODULE_PARM_DESC(error_frame_skip_level, + "\n ammvdec_mpeg12 error_frame_skip_level\n"); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(debug_enable, uint, 0664); +MODULE_PARM_DESC(debug_enable, + "\n ammvdec_mpeg12 debug enable\n"); +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, + "\n ammvdec_mpeg12 pre_decode_buf_level\n"); + +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n ammvdec_mpeg12 start_decode_buf_level\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, "\n ammvdec_mpeg12 decode_timeout_val\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n ammvdec_mpeg12 dynamic_buf_num_margin\n"); + +module_param_array(max_process_time, uint, &max_decode_instance_num, 0664); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n ammvdec_mpeg12 udebug_flag\n"); + +module_param(dirty_again_threshold, int, 0664); +MODULE_PARM_DESC(dirty_again_threshold, "\n ammvdec_mpeg12 dirty_again_threshold\n"); + + +#ifdef AGAIN_HAS_THRESHOLD +module_param(again_threshold, uint, 0664); +MODULE_PARM_DESC(again_threshold, "\n again_threshold\n"); +#endif + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n ammvdec_mpeg12 without_display_mode\n"); + +module_param(error_proc_policy, uint, 0664); +MODULE_PARM_DESC(error_proc_policy, "\n ammvdec_mpeg12 error_proc_policy\n"); + +module_init(ammvdec_mpeg12_driver_init_module); +module_exit(ammvdec_mpeg12_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC MULTI MPEG1/2 Video Decoder Driver"); +MODULE_LICENSE("GPL"); + +
diff --git a/drivers/frame_provider/decoder_v4l/mpeg4/Makefile b/drivers/frame_provider/decoder_v4l/mpeg4/Makefile new file mode 100644 index 0000000..7164940 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/mpeg4/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_MPEG4_MULTI) += amvdec_mmpeg4_v4l.o +amvdec_mmpeg4_v4l-objs += vmpeg4_multi.o
diff --git a/drivers/frame_provider/decoder_v4l/mpeg4/vmpeg4_multi.c b/drivers/frame_provider/decoder_v4l/mpeg4/vmpeg4_multi.c new file mode 100644 index 0000000..1cd8f44 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/mpeg4/vmpeg4_multi.c
@@ -0,0 +1,3003 @@ +/* + * drivers/amlogic/amports/vmpeg4.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/sched/clock.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include <linux/amlogic/media/registers/register.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../../decoder/utils/amvdec.h" +#include "../../decoder/utils/vdec_input.h" +#include "../../decoder/utils/vdec.h" +#include "../../decoder/utils/firmware.h" +#include "../../decoder/utils/decoder_mmu_box.h" +#include "../../decoder/utils/decoder_bmmu_box.h" +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../../decoder/utils/firmware.h" +#include "../../decoder/utils/vdec_v4l2_buffer_ops.h" +#include "../../decoder/utils/config_parser.h" +#include <media/v4l2-mem2mem.h> +#include "../../decoder/utils/vdec_feature.h" + +#define DRIVER_NAME "ammvdec_mpeg4_v4l" + +#define MEM_NAME "codec_mmpeg4" + +#define DEBUG_PTS + +#define NV21 +#define I_PICTURE 0 +#define P_PICTURE 1 +#define B_PICTURE 2 +#define GET_PIC_TYPE(type) ("IPB####"[type&0x3]) + +#define ORI_BUFFER_START_ADDR 0x01000000 +#define DEFAULT_MEM_SIZE (32*SZ_1M) + +#define INTERLACE_FLAG 0x80 +#define TOP_FIELD_FIRST_FLAG 0x40 + +/* protocol registers */ +#define MREG_REF0 AV_SCRATCH_1 +#define MREG_REF1 AV_SCRATCH_2 +#define MP4_PIC_RATIO AV_SCRATCH_5 +#define MP4_RATE AV_SCRATCH_3 +#define MP4_ERR_COUNT AV_SCRATCH_6 +#define MP4_PIC_WH AV_SCRATCH_7 +#define MREG_INPUT AV_SCRATCH_8 +#define MREG_BUFFEROUT AV_SCRATCH_9 +#define MP4_NOT_CODED_CNT AV_SCRATCH_A +#define MP4_VOP_TIME_INC AV_SCRATCH_B +#define MP4_OFFSET_REG AV_SCRATCH_C +#define MP4_VOS_INFO AV_SCRATCH_D +#define MP4_SYS_RATE AV_SCRATCH_E +#define MEM_OFFSET_REG AV_SCRATCH_F +#define MP4_PIC_INFO AV_SCRATCH_H + +#define PARC_FORBIDDEN 0 +#define PARC_SQUARE 1 +#define PARC_CIF 2 +#define PARC_10_11 3 +#define PARC_16_11 4 +#define PARC_40_33 5 +#define PARC_RESERVED 6 +/* values between 6 and 14 are reserved */ +#define PARC_EXTENDED 15 + +#define VF_POOL_SIZE 64 +#define DECODE_BUFFER_NUM_MAX 16 +#define DECODE_BUFFER_NUM_DEF 8 +#define PUT_INTERVAL (HZ/100) +#define MAX_BMMU_BUFFER_NUM (DECODE_BUFFER_NUM_MAX + 1) +#define WORKSPACE_SIZE (12*SZ_64K) + +#define CTX_LMEM_SWAP_OFFSET 0 +#define CTX_QUANT_MATRIX_OFFSET 0x800 +/* dcac buffer must align at 4k boundary */ +#define CTX_DCAC_BUF_OFFSET 0x1000 +#define CTX_DECBUF_OFFSET (0x0c0000 + 0x1000) + +#define RATE_DETECT_COUNT 5 +#define DURATION_UNIT 96000 +#define PTS_UNIT 90000 +#define CHECK_INTERVAL (HZ/100) + +#define DUR2PTS(x) ((x) - ((x) >> 4)) + +/* 96000/(60fps* 2field) = 800, 96000/10fps = 9600 */ +#define MPEG4_VALID_DUR(x) ((x < 9600) && (x > 799)) + +#define MAX_MPEG4_SUPPORT_SIZE (1920*1088) + +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_ERROR 3 +#define DEC_RESULT_FORCE_EXIT 4 +#define DEC_RESULT_EOS 5 +#define DEC_RESULT_UNFINISH 6 +#define DEC_RESULT_ERROR_SZIE 7 + +#define DEC_DECODE_TIMEOUT 0x21 +#define DECODE_ID(hw) (hw_to_vdec(hw)->id) +#define DECODE_STOP_POS AV_SCRATCH_K + +#define INVALID_IDX (-1) /* Invalid buffer index.*/ + +static u32 udebug_flag; + +static struct vframe_s *vmpeg_vf_peek(void *); +static struct vframe_s *vmpeg_vf_get(void *); +static void vmpeg_vf_put(struct vframe_s *, void *); +static int vmpeg_vf_states(struct vframe_states *states, void *); +static int vmpeg_event_cb(int type, void *data, void *private_data); +static int notify_v4l_eos(struct vdec_s *vdec); + +static int pre_decode_buf_level = 0x800; +static int start_decode_buf_level = 0x4000; +static int debug_enable; +static unsigned int radr; +static unsigned int rval; +/* 0x40bit = 8byte */ +static unsigned int frmbase_cont_bitlevel = 0x40; +static unsigned int dynamic_buf_num_margin; + +#define VMPEG4_DEV_NUM 9 +static unsigned int max_decode_instance_num = VMPEG4_DEV_NUM; +static unsigned int max_process_time[VMPEG4_DEV_NUM]; +static unsigned int decode_timeout_val = 200; + +static u32 without_display_mode; + +#undef pr_info +#define pr_info printk +unsigned int mpeg4_debug_mask = 0xff; +static u32 run_ready_min_buf_num = 2; + + +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_RUN_FLOW 0X0001 +#define PRINT_FLAG_TIMEINFO 0x0002 +#define PRINT_FLAG_UCODE_DETAIL 0x0004 +#define PRINT_FLAG_VLD_DETAIL 0x0008 +#define PRINT_FLAG_DEC_DETAIL 0x0010 +#define PRINT_FLAG_BUFFER_DETAIL 0x0020 +#define PRINT_FLAG_RESTORE 0x0040 +#define PRINT_FRAME_NUM 0x0080 +#define PRINT_FLAG_FORCE_DONE 0x0100 +#define PRINT_FLAG_COUNTER 0X0200 +#define PRINT_FRAMEBASE_DATA 0x0400 +#define PRINT_FLAG_VDEC_STATUS 0x0800 +#define PRINT_FLAG_TIMEOUT_STATUS 0x1000 +#define PRINT_FLAG_V4L_DETAIL 0x8000 +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 + +int mmpeg4_debug_print(int index, int debug_flag, const char *fmt, ...) +{ + if (((debug_enable & debug_flag) && + ((1 << index) & mpeg4_debug_mask)) + || (debug_flag == PRINT_FLAG_ERROR)) { + unsigned char *buf = kzalloc(512, GFP_ATOMIC); + int len = 0; + va_list args; + + if (!buf) + return 0; + + va_start(args, fmt); + len = sprintf(buf, "%d: ", index); + vsnprintf(buf + len, 512-len, fmt, args); + pr_info("%s", buf); + va_end(args); + kfree(buf); + } + return 0; +} + +struct pic_info_t { + int index; + u32 pic_type; + u32 pic_info; + u32 pts; + u64 pts64; + bool pts_valid; + u32 duration; + u32 repeat_cnt; + ulong v4l_ref_buf_addr; + u32 hw_decode_time; + u32 frame_size; // For frame base mode; + u64 timestamp; + u32 offset; + u32 height; + u32 width; +}; + +struct vdec_mpeg4_hw_s { + spinlock_t lock; + struct platform_device *platform_dev; + /* struct device *cma_dev; */ + + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + struct vframe_s vfpool[VF_POOL_SIZE]; + struct vframe_s vframe_dummy; + s32 vfbuf_use[DECODE_BUFFER_NUM_MAX]; + u32 frame_width; + u32 frame_height; + u32 frame_dur; + u32 frame_prog; + + u32 ctx_valid; + u32 reg_vcop_ctrl_reg; + u32 reg_pic_head_info; + u32 reg_mpeg1_2_reg; + u32 reg_slice_qp; + u32 reg_mp4_pic_wh; + u32 reg_mp4_rate; + u32 reg_mb_info; + u32 reg_dc_ac_ctrl; + u32 reg_iqidct_control; + u32 reg_resync_marker_length; + u32 reg_rv_ai_mb_count; + struct timer_list check_timer; + u32 decode_timeout_count; + u32 timeout_cnt; + unsigned long int start_process_time; + + u32 last_vld_level; + u8 init_flag; + u32 eos; + void *mm_blk_handle; + + struct vframe_chunk_s *chunk; + u32 chunk_offset; + u32 chunk_size; + u32 chunk_frame_count; + u32 stat; + unsigned long buf_start; + u32 buf_size; + /* + unsigned long cma_alloc_addr; + int cma_alloc_count; + */ + u32 vmpeg4_ratio; + u64 vmpeg4_ratio64; + u32 rate_detect; + u32 vmpeg4_rotation; + u32 total_frame; + u32 last_vop_time_inc; + u32 last_duration; + u32 last_anch_pts; + u32 vop_time_inc_since_last_anch; + u32 frame_num_since_last_anch; + u64 last_anch_pts_us64; + + u32 last_pts; + u64 last_pts64; + u32 pts_hit; + u32 pts_missed; + u32 pts_i_hit; + u32 pts_i_missed; + struct pic_info_t pic[DECODE_BUFFER_NUM_MAX]; + u32 canvas_spec[DECODE_BUFFER_NUM_MAX]; +#ifdef NV21 + struct canvas_config_s canvas_config[DECODE_BUFFER_NUM_MAX][2]; +#else + struct canvas_config_s canvas_config[DECODE_BUFFER_NUM_MAX][3]; +#endif + struct dec_sysinfo vmpeg4_amstream_dec_info; + + s32 refs[2]; + int dec_result; + struct work_struct work; + + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + u32 frame_num; + u32 sys_mp4_rate; + u32 run_count; + u32 not_run_ready; + u32 buffer_not_ready; + u32 input_empty; + atomic_t peek_num; + atomic_t get_num; + atomic_t put_num; + u32 first_i_frame_ready; + u32 drop_frame_count; + u32 unstable_pts; + u32 last_dec_pts; + + struct firmware_s *fw; + u32 blkmode; + wait_queue_head_t wait_q; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + u32 buf_num; + u32 dynamic_buf_num_margin; + u32 i_only; + int sidebind_type; + int sidebind_channel_id; + u32 res_ch_flag; + u32 profile_idc; + u32 level_idc; + unsigned int i_decoded_frames; + unsigned int i_lost_frames; + unsigned int i_concealed_frames; + unsigned int p_decoded_frames; + unsigned int p_lost_frames; + unsigned int p_concealed_frames; + unsigned int b_decoded_frames; + unsigned int b_lost_frames; + unsigned int b_concealed_frames; + int vdec_pg_enable_flag; + ulong fb_token; + char vdec_name[32]; + char pts_name[32]; + char new_q_name[32]; + char disp_q_name[32]; +}; +static void vmpeg4_local_init(struct vdec_mpeg4_hw_s *hw); +static int vmpeg4_hw_ctx_restore(struct vdec_mpeg4_hw_s *hw); +static unsigned char + get_data_check_sum(struct vdec_mpeg4_hw_s *hw, int size); +static void flush_output(struct vdec_mpeg4_hw_s * hw); + +#define PROVIDER_NAME "vdec.mpeg4" + +/* + *int query_video_status(int type, int *value); + */ +static const struct vframe_operations_s vf_provider_ops = { + .peek = vmpeg_vf_peek, + .get = vmpeg_vf_get, + .put = vmpeg_vf_put, + .event_cb = vmpeg_event_cb, + .vf_states = vmpeg_vf_states, +}; + +static unsigned char aspect_ratio_table[16] = { + PARC_FORBIDDEN, + PARC_SQUARE, + PARC_CIF, + PARC_10_11, + PARC_16_11, + PARC_40_33, + PARC_RESERVED, PARC_RESERVED, PARC_RESERVED, PARC_RESERVED, + PARC_RESERVED, PARC_RESERVED, PARC_RESERVED, PARC_RESERVED, + PARC_RESERVED, PARC_EXTENDED +}; + +static void reset_process_time(struct vdec_mpeg4_hw_s *hw); + +static void mpeg4_put_video_frame(void *vdec_ctx, struct vframe_s *vf) +{ + vmpeg_vf_put(vf, vdec_ctx); +} + +static void mpeg4_get_video_frame(void *vdec_ctx, struct vframe_s **vf) +{ + *vf = vmpeg_vf_get(vdec_ctx); +} + +static struct task_ops_s task_dec_ops = { + .type = TASK_TYPE_DEC, + .get_vframe = mpeg4_get_video_frame, + .put_vframe = mpeg4_put_video_frame, +}; + +static int vmpeg4_v4l_alloc_buff_config_canvas(struct vdec_mpeg4_hw_s *hw, int i) +{ + int ret; + u32 canvas; + ulong decbuf_start = 0, decbuf_uv_start = 0; + int decbuf_y_size = 0, decbuf_uv_size = 0; + u32 canvas_width = 0, canvas_height = 0; + struct vdec_s *vdec = hw_to_vdec(hw); + struct vdec_v4l2_buffer *fb = NULL; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (hw->pic[i].v4l_ref_buf_addr) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *) + hw->pic[i].v4l_ref_buf_addr; + + fb->status = FB_ST_DECODER; + return 0; + } + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "[%d] get fb fail.\n", + ((struct aml_vcodec_ctx *) + (hw->v4l2_ctx))->id); + return ret; + } + + fb->task->attach(fb->task, &task_dec_ops, hw_to_vdec(hw)); + fb->status = FB_ST_DECODER; + + if (!hw->frame_width || !hw->frame_height) { + struct vdec_pic_info pic; + vdec_v4l_get_pic_info(ctx, &pic); + hw->frame_width = pic.visible_width; + hw->frame_height = pic.visible_height; + mmpeg4_debug_print(DECODE_ID(hw), 0, + "[%d] set %d x %d from IF layer\n", ctx->id, + hw->frame_width, hw->frame_height); + } + + hw->pic[i].v4l_ref_buf_addr = (ulong)fb; + if (fb->num_planes == 1) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].offset; + decbuf_uv_start = decbuf_start + decbuf_y_size; + decbuf_uv_size = decbuf_y_size / 2; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 64); + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + } else if (fb->num_planes == 2) { + decbuf_start = fb->m.mem[0].addr; + decbuf_y_size = fb->m.mem[0].size; + decbuf_uv_start = fb->m.mem[1].addr; + decbuf_uv_size = fb->m.mem[1].size; + canvas_width = ALIGN(hw->frame_width, 64); + canvas_height = ALIGN(hw->frame_height, 64); + fb->m.mem[0].bytes_used = decbuf_y_size; + fb->m.mem[1].bytes_used = decbuf_uv_size; + } + + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), v4l ref buf addr: 0x%x\n", + ctx->id, __func__, fb); + + if (vdec->parallel_dec == 1) { + u32 tmp; + if (canvas_y(hw->canvas_spec[i]) == 0xff) { + tmp = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~0xff; + hw->canvas_spec[i] |= tmp; + } + if (canvas_u(hw->canvas_spec[i]) == 0xff) { + tmp = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id); + hw->canvas_spec[i] &= ~(0xffff << 8); + hw->canvas_spec[i] |= tmp << 8; + hw->canvas_spec[i] |= tmp << 16; + } + canvas = hw->canvas_spec[i]; + } else { + canvas = vdec->get_canvas(i, 2); + hw->canvas_spec[i] = canvas; + } + + hw->canvas_config[i][0].phy_addr = decbuf_start; + hw->canvas_config[i][0].width = canvas_width; + hw->canvas_config[i][0].height = canvas_height; + hw->canvas_config[i][0].block_mode = hw->blkmode; + if (hw->blkmode == CANVAS_BLKMODE_LINEAR) + hw->canvas_config[i][0].endian = 7; + else + hw->canvas_config[i][0].endian = 0; + config_cav_lut(canvas_y(canvas), + &hw->canvas_config[i][0], VDEC_1); + + hw->canvas_config[i][1].phy_addr = + decbuf_uv_start; + hw->canvas_config[i][1].width = canvas_width; + hw->canvas_config[i][1].height = (canvas_height >> 1); + hw->canvas_config[i][1].block_mode = hw->blkmode; + if (hw->blkmode == CANVAS_BLKMODE_LINEAR) + hw->canvas_config[i][1].endian = 7; + else + hw->canvas_config[i][1].endian = 0; + config_cav_lut(canvas_u(canvas), + &hw->canvas_config[i][1], VDEC_1); + + return 0; +} + +static int find_free_buffer(struct vdec_mpeg4_hw_s *hw) +{ + int i; + + for (i = 0; i < hw->buf_num; i++) { + if (hw->vfbuf_use[i] == 0) + break; + } + + if ((i == hw->buf_num) && + (hw->buf_num != 0)) { + return -1; + } + + if (vmpeg4_v4l_alloc_buff_config_canvas(hw, i)) + return -1; + + return i; +} + +static int spec_to_index(struct vdec_mpeg4_hw_s *hw, u32 spec) +{ + int i; + + for (i = 0; i < hw->buf_num; i++) { + if (hw->canvas_spec[i] == spec) + return i; + } + + return -1; +} + +static void set_frame_info(struct vdec_mpeg4_hw_s *hw, struct vframe_s *vf, + int buffer_index) +{ + int ar = 0; + int endian_tmp; + unsigned int num = 0; + unsigned int den = 0; + unsigned int pixel_ratio = READ_VREG(MP4_PIC_RATIO); + + if (hw->vmpeg4_ratio64 != 0) { + num = hw->vmpeg4_ratio64>>32; + den = hw->vmpeg4_ratio64 & 0xffffffff; + } else { + num = hw->vmpeg4_ratio>>16; + den = hw->vmpeg4_ratio & 0xffff; + + } + if ((num == 0) || (den == 0)) { + num = 1; + den = 1; + } + + if (hw->vmpeg4_ratio == 0) { + vf->ratio_control |= (0x90 << DISP_RATIO_ASPECT_RATIO_BIT); + /* always stretch to 16:9 */ + } else if (pixel_ratio > 0x0f) { + num = (pixel_ratio >> 8) * + hw->frame_width * num; + ar = div_u64((pixel_ratio & 0xff) * + hw->frame_height * den * 0x100ULL + + (num >> 1), num); + } else { + switch (aspect_ratio_table[pixel_ratio]) { + case 0: + num = hw->frame_width * num; + ar = (hw->frame_height * den * + 0x100 + (num >> 1)) / num; + break; + case 1: + num = vf->width * num; + ar = (vf->height * den * 0x100 + (num >> 1)) / num; + break; + case 2: + num = (vf->width * 12) * num; + ar = (vf->height * den * 0x100 * 11 + + ((num) >> 1)) / num; + break; + case 3: + num = (vf->width * 10) * num; + ar = (vf->height * den * 0x100 * 11 + (num >> 1)) / + num; + break; + case 4: + num = (vf->width * 16) * num; + ar = (vf->height * den * 0x100 * 11 + (num >> 1)) / + num; + break; + case 5: + num = (vf->width * 40) * num; + ar = (vf->height * den * 0x100 * 33 + (num >> 1)) / + num; + break; + default: + num = vf->width * num; + ar = (vf->height * den * 0x100 + (num >> 1)) / num; + break; + } + } + + vf->sidebind_type = hw->sidebind_type; + vf->sidebind_channel_id = hw->sidebind_channel_id; + + ar = min(ar, DISP_RATIO_ASPECT_RATIO_MAX); + + vf->signal_type = 0; + vf->type_original = vf->type; + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + vf->canvas0Addr = vf->canvas1Addr = -1; +#ifdef NV21 + vf->plane_num = 2; +#else + vf->plane_num = 3; +#endif + vf->canvas0_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas0_config[1] = hw->canvas_config[buffer_index][1]; +#ifndef NV21 + vf->canvas0_config[2] = hw->canvas_config[buffer_index][2]; +#endif + vf->canvas1_config[0] = hw->canvas_config[buffer_index][0]; + vf->canvas1_config[1] = hw->canvas_config[buffer_index][1]; +#ifndef NV21 + vf->canvas1_config[2] = hw->canvas_config[buffer_index][2]; +#endif + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) { + endian_tmp = (hw->blkmode == CANVAS_BLKMODE_LINEAR) ? 7 : 0; + } else { + endian_tmp = (hw->blkmode == CANVAS_BLKMODE_LINEAR) ? 0 : 7; + } + /* mpeg4 convert endian to match display */ + vf->canvas0_config[0].endian = endian_tmp; + vf->canvas0_config[1].endian = endian_tmp; + vf->canvas1_config[0].endian = endian_tmp; + vf->canvas1_config[1].endian = endian_tmp; +#ifndef NV21 + vf->canvas0_config[2].endian = endian_tmp; + vf->canvas1_config[2].endian = endian_tmp; +#endif +} + +static inline void vmpeg4_save_hw_context(struct vdec_mpeg4_hw_s *hw) +{ + hw->reg_mpeg1_2_reg = READ_VREG(MPEG1_2_REG); + hw->reg_vcop_ctrl_reg = READ_VREG(VCOP_CTRL_REG); + hw->reg_pic_head_info = READ_VREG(PIC_HEAD_INFO); + hw->reg_slice_qp = READ_VREG(SLICE_QP); + hw->reg_mp4_pic_wh = READ_VREG(MP4_PIC_WH); + hw->reg_mp4_rate = READ_VREG(MP4_RATE); + hw->reg_mb_info = READ_VREG(MB_INFO); + hw->reg_dc_ac_ctrl = READ_VREG(DC_AC_CTRL); + hw->reg_iqidct_control = READ_VREG(IQIDCT_CONTROL); + hw->reg_resync_marker_length = READ_VREG(RESYNC_MARKER_LENGTH); + hw->reg_rv_ai_mb_count = READ_VREG(RV_AI_MB_COUNT); +} + +static int update_ref(struct vdec_mpeg4_hw_s *hw, int index) +{ + hw->vfbuf_use[index]++; + + if (hw->refs[1] == -1) { + hw->refs[1] = index; + index = -1; + } else if (hw->refs[0] == -1) { + hw->refs[0] = hw->refs[1]; + hw->refs[1] = index; + index = hw->refs[0]; + } else { + hw->vfbuf_use[hw->refs[0]]--; + hw->refs[0] = hw->refs[1]; + hw->refs[1] = index; + index = hw->refs[0]; + } + + return index; +} + +static int prepare_display_buf(struct vdec_mpeg4_hw_s * hw, + struct pic_info_t *pic) +{ + struct vframe_s *vf = NULL; + struct vdec_s *vdec = hw_to_vdec(hw); + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + ulong nv_order = VIDTYPE_VIU_NV21; + int index = pic->index; + bool pb_skip = false; + unsigned long flags; + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + if (vdec->prog_only || (!v4l2_ctx->vpp_is_need)) + pic->pic_info &= ~INTERLACE_FLAG; + } + + if (hw->i_only) + pb_skip = 1; + + if (pic->pic_info & INTERLACE_FLAG) { + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "fatal error, no available buffer slot."); + return -1; + } + + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->pic[index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), v4l mem handle: 0x%lx\n", + ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, + __func__, vf->v4l_mem_handle); + } + + vf->index = pic->index; + vf->width = pic->width; + vf->height = pic->height; + vf->bufWidth = 1920; + vf->flag = 0; + vf->orientation = hw->vmpeg4_rotation; + vf->pts = pic->pts; + vf->pts_us64 = pic->pts64; + vf->timestamp = pic->timestamp; + vf->duration = pic->duration >> 1; + vf->duration_pulldown = 0; + vf->type = (pic->pic_info & TOP_FIELD_FIRST_FLAG) ? + VIDTYPE_INTERLACE_TOP : VIDTYPE_INTERLACE_BOTTOM; +#ifdef NV21 + vf->type |= nv_order; +#endif + set_frame_info(hw, vf, pic->index); + + hw->vfbuf_use[pic->index]++; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "field0: pts %d, pts64 %lld, w %d, h %d, dur %d\n", + vf->pts, vf->pts_us64, vf->width, vf->height, vf->duration); + + if (vdec_stream_based(vdec) && (!vdec->vbuf.use_ptsserv)) { + vf->pts_us64 = + (((u64)vf->duration << 32) & + 0xffffffff00000000) | pic->offset; + vf->pts = 0; + } + if (((hw->first_i_frame_ready == 0) || pb_skip) + && (pic->pic_type != I_PICTURE)) { + hw->drop_frame_count++; + if (pic->pic_type == I_PICTURE) { + hw->i_lost_frames++; + } else if (pic->pic_type == P_PICTURE) { + hw->p_lost_frames++; + } else if (pic->pic_type == B_PICTURE) { + hw->b_lost_frames++; + } + hw->vfbuf_use[index]--; + spin_lock_irqsave(&hw->lock, flags); + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + spin_unlock_irqrestore(&hw->lock, flags); + return 0; + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, index); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->timestamp); + hw->frame_num++; + if (pic->pic_type == I_PICTURE) { + hw->i_decoded_frames++; + } else if (pic->pic_type == P_PICTURE) { + hw->p_decoded_frames++; + } else if (pic->pic_type == B_PICTURE) { + hw->b_decoded_frames++; + } + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } else { + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } else + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } + + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "error, no available buf.\n"); + hw->dec_result = DEC_RESULT_ERROR; + return -1; + } + + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->pic[index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), v4l mem handle: 0x%lx\n", + ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, + __func__, vf->v4l_mem_handle); + } + + vf->index = pic->index; + vf->width = pic->width; + vf->height = pic->height; + vf->bufWidth = 1920; + vf->flag = 0; + vf->orientation = hw->vmpeg4_rotation; + vf->pts = 0; + vf->pts_us64 = 0; + vf->timestamp = pic->timestamp; + if (v4l2_ctx->second_field_pts_mode) { + vf->timestamp = 0; + } + + vf->duration = pic->duration >> 1; + vf->duration_pulldown = 0; + vf->type = (pic->pic_info & TOP_FIELD_FIRST_FLAG) ? + VIDTYPE_INTERLACE_BOTTOM : VIDTYPE_INTERLACE_TOP; +#ifdef NV21 + vf->type |= nv_order; +#endif + set_frame_info(hw, vf, pic->index); + + hw->vfbuf_use[pic->index]++; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "filed1: pts %d, pts64 %lld, w %d, h %d, dur: %d\n", + vf->pts, vf->pts_us64, vf->width, vf->height, vf->duration); + + if (vdec_stream_based(vdec) && (!vdec->vbuf.use_ptsserv)) { + vf->pts_us64 = (u64)-1; + vf->pts = 0; + } + if (((hw->first_i_frame_ready == 0) || pb_skip) + && (pic->pic_type != I_PICTURE)) { + hw->drop_frame_count++; + if (pic->pic_type == I_PICTURE) { + hw->i_lost_frames++; + } else if (pic->pic_type == P_PICTURE) { + hw->p_lost_frames++; + } else if (pic->pic_type == B_PICTURE) { + hw->b_lost_frames++; + } + hw->vfbuf_use[index]--; + spin_lock_irqsave(&hw->lock, flags); + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + spin_unlock_irqrestore(&hw->lock, flags); + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, index); + decoder_do_frame_check(vdec, vf); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, + (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->timestamp); + vdec->vdec_fps_detec(vdec->id); + hw->frame_num++; + if (pic->pic_type == I_PICTURE) { + hw->i_decoded_frames++; + } else if (pic->pic_type == P_PICTURE) { + hw->p_decoded_frames++; + } else if (pic->pic_type == B_PICTURE) { + hw->b_decoded_frames++; + } + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } else { + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } else + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } + } else { + /* progressive */ + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "error, no available buf\n"); + hw->dec_result = DEC_RESULT_ERROR; + return -1; + } + + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->pic[index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "[%d] %s(), v4l mem handle: 0x%lx\n", + ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, + __func__, vf->v4l_mem_handle); + } + + vf->index = index; + vf->width = hw->frame_width; + vf->height = hw->frame_height; + vf->bufWidth = 1920; + vf->flag = 0; + vf->orientation = hw->vmpeg4_rotation; + vf->pts = pic->pts; + vf->pts_us64 = pic->pts64; + vf->timestamp = pic->timestamp; + vf->duration = pic->duration; + vf->duration_pulldown = pic->repeat_cnt * + pic->duration; +#ifdef NV21 + vf->type = VIDTYPE_PROGRESSIVE | + VIDTYPE_VIU_FIELD | nv_order; +#else + vf->type = VIDTYPE_PROGRESSIVE | + VIDTYPE_VIU_FIELD; +#endif + set_frame_info(hw, vf, index); + + hw->vfbuf_use[index]++; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "prog: pts %d, pts64 %lld, w %d, h %d, dur %d\n", + vf->pts, vf->pts_us64, vf->width, vf->height, vf->duration); + + if (vdec_stream_based(vdec) && (!vdec->vbuf.use_ptsserv)) { + vf->pts_us64 = + (((u64)vf->duration << 32) & + 0xffffffff00000000) | pic->offset; + vf->pts = 0; + } + if (((hw->first_i_frame_ready == 0) || pb_skip) + && (pic->pic_type != I_PICTURE)) { + hw->drop_frame_count++; + if (pic->pic_type == I_PICTURE) { + hw->i_lost_frames++; + } else if (pic->pic_type == P_PICTURE) { + hw->p_lost_frames++; + } else if (pic->pic_type == B_PICTURE) { + hw->b_lost_frames++; + } + hw->vfbuf_use[index]--; + spin_lock_irqsave(&hw->lock, flags); + kfifo_put(&hw->newframe_q, + (const struct vframe_s *)vf); + spin_unlock_irqrestore(&hw->lock, flags); + } else { + struct vdec_info vinfo; + + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->mm_blk_handle, index); + decoder_do_frame_check(vdec, vf); + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->pts_name, vf->timestamp); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + vdec->vdec_fps_detec(vdec->id); + hw->frame_num++; + if (pic->pic_type == I_PICTURE) { + hw->i_decoded_frames++; + } else if (pic->pic_type == P_PICTURE) { + hw->p_decoded_frames++; + } else if (pic->pic_type == B_PICTURE) { + hw->b_decoded_frames++; + } + vdec->dec_status(vdec, &vinfo); + vdec_fill_vdec_frame(vdec, NULL, + &vinfo, vf, pic->hw_decode_time); + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } else { + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } else + vmpeg_vf_put(vmpeg_vf_get(vdec), vdec); + } + + } + return 0; +} + +static void vmpeg4_prepare_input(struct vdec_mpeg4_hw_s *hw) +{ + struct vdec_s *vdec = hw_to_vdec(hw); + struct vdec_input_s *input = &vdec->input; + struct vframe_block_list_s *block = NULL; + struct vframe_chunk_s *chunk = hw->chunk; + int dummy; + + if (chunk == NULL) + return; + + /* full reset to HW input */ + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + + /* reset VLD fifo for all vdec */ + WRITE_VREG(DOS_SW_RESET0, (1<<5) | (1<<4) | (1<<3)); + WRITE_VREG(DOS_SW_RESET0, 0); + + WRITE_VREG(POWER_CTL_VLD, 1 << 4); + + /* + *setup HW decoder input buffer (VLD context) + * based on input->type and input->target + */ + if (input_frame_based(input)) { + block = chunk->block; + + WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, block->start); + WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, block->start + + block->size - 8); + WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR, + round_down(block->start + hw->chunk_offset, + VDEC_FIFO_ALIGN)); + + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1); + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0); + + /* set to manual mode */ + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2); + WRITE_VREG(VLD_MEM_VIFIFO_RP, + round_down(block->start + hw->chunk_offset, + VDEC_FIFO_ALIGN)); + dummy = hw->chunk_offset + hw->chunk_size + + VLD_PADDING_SIZE; + if (dummy >= block->size) + dummy -= block->size; + WRITE_VREG(VLD_MEM_VIFIFO_WP, + round_down(block->start + dummy, + VDEC_FIFO_ALIGN)); + + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 3); + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2); + + WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, + (0x11 << 16) | (1<<10) | (7<<3)); + + } +} + +static int vmpeg4_get_ps_info(struct vdec_mpeg4_hw_s *hw, int width, int height, int interlace, struct aml_vdec_ps_infos *ps) +{ + ps->visible_width = width; + ps->visible_height = height; + ps->coded_width = ALIGN(width, 64); + ps->coded_height = ALIGN(height, 64); + ps->dpb_size = hw->buf_num; + ps->dpb_frames = DECODE_BUFFER_NUM_DEF; + ps->dpb_margin = hw->dynamic_buf_num_margin; + ps->field = interlace ? V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE; + + return 0; +} + +static int v4l_res_change(struct vdec_mpeg4_hw_s *hw, int width, int height, int interlace) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int ret = 0; + + if (ctx->param_sets_from_ucode && + hw->res_ch_flag == 0) { + struct aml_vdec_ps_infos ps; + + if ((hw->frame_width != 0 && + hw->frame_height != 0) && + (hw->frame_width != width || + hw->frame_height != height)) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "v4l_res_change Pic Width/Height Change (%d,%d)=>(%d,%d)\n", + hw->frame_width, hw->frame_height, + width, + height); + vmpeg4_get_ps_info(hw, width, height, interlace, &ps); + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + hw->v4l_params_parsed = false; + hw->res_ch_flag = 1; + ctx->v4l_resolution_change = 1; + hw->eos = 1; + flush_output(hw); + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(hw_to_vdec(hw)); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + + ret = 1; + } + } + + return ret; +} + +static irqreturn_t vmpeg4_isr_thread_fn(struct vdec_s *vdec, int irq) +{ + u32 reg; + u32 picture_type; + int index; + u32 pts, offset = 0; + u64 pts_us64 = 0; + u32 frame_size, dec_w, dec_h; + u32 time_increment_resolution, fixed_vop_rate, vop_time_inc, vos_info; + u32 repeat_cnt, duration = 3200; + struct pic_info_t *dec_pic, *disp_pic; + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)(vdec->private); + if (hw->eos) + return IRQ_HANDLED; + + if (READ_VREG(MP4_PIC_INFO) == 1) { + if (hw->is_used_v4l) { + int frame_width = READ_VREG(MP4_PIC_WH)>> 16; + int frame_height = READ_VREG(MP4_PIC_WH) & 0xffff; + int interlace = (READ_VREG(MP4_PIC_RATIO) & 0x80000000) >> 31; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_BUFFER_DETAIL, + "interlace = %d\n", interlace); + if (!v4l_res_change(hw, frame_width, frame_height, interlace)) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + + vmpeg4_get_ps_info(hw, frame_width, frame_height, interlace, &ps); + hw->v4l_params_parsed = true; + vdec_v4l_set_ps_infos(ctx, &ps); + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } else { + struct vdec_pic_info pic; + + if (!hw->buf_num) { + vdec_v4l_get_pic_info(ctx, &pic); + hw->buf_num = pic.dpb_frames + + pic.dpb_margin; + if (hw->buf_num > DECODE_BUFFER_NUM_MAX) + hw->buf_num = DECODE_BUFFER_NUM_MAX; + } + + WRITE_VREG(MP4_PIC_INFO, 0); + + hw->res_ch_flag = 0; + } + } else { + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } + } else + WRITE_VREG(MP4_PIC_INFO, 0); + return IRQ_HANDLED; + } + + if ((hw->is_used_v4l) && !hw->v4l_params_parsed) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "The head was not found, can not to decode\n"); + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + if (READ_VREG(AV_SCRATCH_M) != 0 && + (debug_enable & PRINT_FLAG_UCODE_DETAIL)) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_UCODE_DETAIL, + "dbg %x: %x, level %x, wp %x, rp %x, cnt %x\n", + READ_VREG(AV_SCRATCH_M), READ_VREG(AV_SCRATCH_N), + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + READ_VREG(VIFF_BIT_CNT)); + WRITE_VREG(AV_SCRATCH_M, 0); + return IRQ_HANDLED; + } + reg = READ_VREG(MREG_BUFFEROUT); + + ATRACE_COUNTER("V_ST_DEC-decode_state", reg); + + time_increment_resolution = READ_VREG(MP4_RATE); + fixed_vop_rate = time_increment_resolution >> 16; + time_increment_resolution &= 0xffff; + if (time_increment_resolution > 0 && + fixed_vop_rate == 0) + hw->sys_mp4_rate = time_increment_resolution; + + if (hw->vmpeg4_amstream_dec_info.rate == 0) { + if ((fixed_vop_rate != 0) && + (time_increment_resolution != 0)) { + hw->vmpeg4_amstream_dec_info.rate = fixed_vop_rate * + DURATION_UNIT / time_increment_resolution; + } else if (time_increment_resolution == 0 + && hw->sys_mp4_rate > 0) + time_increment_resolution = hw->sys_mp4_rate; + } + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "time_inc_res = %d, fixed_vop_rate = %d, rate = %d\n", + time_increment_resolution, fixed_vop_rate, + hw->vmpeg4_amstream_dec_info.rate); + + if (reg == 2) { + /* timeout when decoding next frame */ + + /* for frame based case, insufficient result may happen + * at the beginning when only VOL head is available save + * HW context also, such as for the QTable from VCOP register + */ + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FLAG_VLD_DETAIL, + "%s, level = %x, vfifo_ctrl = %x, bitcnt = %d\n", + __func__, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_CONTROL), + READ_VREG(VIFF_BIT_CNT)); + + if (vdec_frame_based(vdec)) { + vmpeg4_save_hw_context(hw); + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); + } else { + reset_process_time(hw); + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } + return IRQ_HANDLED; + } else { + reset_process_time(hw); + picture_type = (reg >> 3) & 7; + repeat_cnt = READ_VREG(MP4_NOT_CODED_CNT); + vop_time_inc = READ_VREG(MP4_VOP_TIME_INC); + vos_info = READ_VREG(MP4_VOS_INFO); + if ((vos_info & 0xff) && + (((vos_info >> 4) & 0xf) != hw->profile_idc || + (vos_info & 0xf) != hw->level_idc)) { + hw->profile_idc = vos_info >> 4 & 0xf; + hw->level_idc = vos_info & 0xf; + vdec_set_profile_level(vdec, hw->profile_idc, hw->level_idc); + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL, + "profile_idc: %d level_idc: %d\n", + hw->profile_idc, hw->level_idc); + } + + index = spec_to_index(hw, READ_VREG(REC_CANVAS_ADDR)); + if (index < 0) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "invalid buffer index %d. rec = %x\n", + index, READ_VREG(REC_CANVAS_ADDR)); + hw->dec_result = DEC_RESULT_ERROR; + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + hw->dec_result = DEC_RESULT_DONE; + dec_pic = &hw->pic[index]; + if (vdec->mvfrm) { + dec_pic->frame_size = vdec->mvfrm->frame_size; + dec_pic->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; + } + dec_pic->pts_valid = false; + dec_pic->pts = 0; + dec_pic->pts64 = 0; + dec_pic->timestamp = 0; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_BUFFER_DETAIL, + "new pic: index=%d, used=%d, repeat=%d, time_inc=%d\n", + index, hw->vfbuf_use[index], repeat_cnt, vop_time_inc); + + dec_w = READ_VREG(MP4_PIC_WH)>> 16; + dec_h = READ_VREG(MP4_PIC_WH) & 0xffff; + if (dec_w != 0) { + hw->frame_width = dec_w; + dec_pic->width = dec_w; + } + if (dec_h != 0) { + hw->frame_height = dec_h; + dec_pic->height = dec_h; + } + hw->res_ch_flag = 0; + + if (hw->vmpeg4_amstream_dec_info.rate == 0) { + if (vop_time_inc < hw->last_vop_time_inc) { + duration = vop_time_inc + + time_increment_resolution - + hw->last_vop_time_inc; + } else { + duration = vop_time_inc - + hw->last_vop_time_inc; + } + + if (duration == hw->last_duration) { + hw->rate_detect++; + if ((hw->rate_detect >= RATE_DETECT_COUNT) && + (time_increment_resolution != 0)) { + hw->vmpeg4_amstream_dec_info.rate = + duration * DURATION_UNIT / + time_increment_resolution; + duration = + hw->vmpeg4_amstream_dec_info.rate; + } + } else { + hw->rate_detect = 0; + hw->last_duration = duration; + } + if (MPEG4_VALID_DUR(duration)) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "warn: duration %x, set 0\n", duration); + duration = 0; + } + } else { + duration = hw->vmpeg4_amstream_dec_info.rate; +#if 0 + pr_info("info rate = %d, ucode rate = 0x%x:0x%x\n", + hw->vmpeg4_amstream_dec_info.rate, + READ_VREG(MP4_RATE), vop_time_inc); +#endif + } + + /* frame mode with unstable pts */ + if (hw->unstable_pts && hw->chunk) { + dec_pic->pts_valid = hw->chunk->pts_valid; + dec_pic->pts = hw->chunk->pts; + dec_pic->pts64 = hw->chunk->pts64; + dec_pic->timestamp = hw->chunk->timestamp; + if ((B_PICTURE == picture_type) || + (hw->last_dec_pts == dec_pic->pts)) + dec_pic->pts_valid = 0; + + hw->last_dec_pts = dec_pic->pts; + } else if ((I_PICTURE == picture_type) || + (P_PICTURE == picture_type)) { + offset = READ_VREG(MP4_OFFSET_REG); + if (hw->chunk) { + dec_pic->pts_valid = hw->chunk->pts_valid; + dec_pic->pts = hw->chunk->pts; + dec_pic->pts64 = hw->chunk->pts64; + dec_pic->timestamp = hw->chunk->timestamp; + } else { + dec_pic->offset = offset; + if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) { + if (pts_lookup_offset_us64(PTS_TYPE_VIDEO, offset, + &pts, &frame_size, 3000, &pts_us64) == 0) { + dec_pic->pts_valid = true; + dec_pic->pts = pts; + dec_pic->pts64 = pts_us64; + hw->pts_hit++; + } else { + dec_pic->pts_valid = false; + hw->pts_missed++; + } + } + } + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "%c, offset=0x%x, pts=0x%x(%d), index=%d, used=%d\n", + GET_PIC_TYPE(picture_type), offset, dec_pic->pts, + dec_pic->pts_valid, index, hw->vfbuf_use[index]); + } else if (B_PICTURE == picture_type) { + if (hw->chunk) { + dec_pic->pts_valid = hw->chunk->pts_valid; + dec_pic->pts = hw->chunk->pts; + dec_pic->pts64 = hw->chunk->pts64; + dec_pic->timestamp = hw->chunk->timestamp; + } + } + + dec_pic->index = index; + dec_pic->pic_info = reg; + dec_pic->pic_type = picture_type; + dec_pic->duration = duration; + hw->vfbuf_use[index] = 0; + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "mmpeg4: pic_num: %d, index %d, type %c, pts %x\n", + hw->frame_num, index, + GET_PIC_TYPE(picture_type), + dec_pic->pts); + + /* buffer management */ + if ((picture_type == I_PICTURE) || + (picture_type == P_PICTURE)) { + index = update_ref(hw, index); + } else { + /* drop B frame or disp immediately. + * depend on if there are two ref frames + */ + if (hw->refs[1] == -1) + index = -1; + } + vmpeg4_save_hw_context(hw); + if (index < 0) { + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + disp_pic = &hw->pic[index]; + if ((hw->first_i_frame_ready == 0) && + (I_PICTURE == disp_pic->pic_type)) + hw->first_i_frame_ready = 1; + + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "disp: index=%d, pts=%x(%d), used=%d, picout=%c(dec=%c)\n", + index, disp_pic->pts, disp_pic->pts_valid, + hw->vfbuf_use[index], + GET_PIC_TYPE(disp_pic->pic_type), + GET_PIC_TYPE(picture_type)); + + if (disp_pic->pts_valid) { + hw->last_anch_pts = disp_pic->pts; + hw->last_anch_pts_us64 = disp_pic->pts64; + hw->frame_num_since_last_anch = 0; + hw->vop_time_inc_since_last_anch = 0; + } else if (vdec_stream_based(vdec)) { + disp_pic->pts = hw->last_anch_pts; + disp_pic->pts64 = hw->last_anch_pts_us64; + + if ((time_increment_resolution != 0) && + (fixed_vop_rate == 0) && + (hw->vmpeg4_amstream_dec_info.rate == 0)) { + /* variable PTS rate */ + /*bug on variable pts calc, + *do as dixed vop first if we + *have rate setting before. + */ + if (vop_time_inc > hw->last_vop_time_inc) { + duration = vop_time_inc - + hw->last_vop_time_inc; + } else { + duration = vop_time_inc + + time_increment_resolution - + hw->last_vop_time_inc; + } + + hw->vop_time_inc_since_last_anch += duration; + + disp_pic->pts += hw->vop_time_inc_since_last_anch * + PTS_UNIT / time_increment_resolution; + disp_pic->pts64 += (hw->vop_time_inc_since_last_anch * + PTS_UNIT / time_increment_resolution) * + 100 / 9; + + if (hw->vop_time_inc_since_last_anch > + (1 << 14)) { + /* avoid overflow */ + hw->last_anch_pts = disp_pic->pts; + hw->last_anch_pts_us64 = disp_pic->pts64; + hw->vop_time_inc_since_last_anch = 0; + } + } else { + /* fixed VOP rate */ + hw->frame_num_since_last_anch++; + disp_pic->pts += DUR2PTS(hw->frame_num_since_last_anch * + hw->vmpeg4_amstream_dec_info.rate); + disp_pic->pts64 += DUR2PTS( + hw->frame_num_since_last_anch * + hw->vmpeg4_amstream_dec_info.rate) * 100 / 9; + + if (hw->frame_num_since_last_anch > (1 << 15)) { + /* avoid overflow */ + hw->last_anch_pts = disp_pic->pts; + hw->last_anch_pts_us64 = disp_pic->pts64; + hw->frame_num_since_last_anch = 0; + } + } + } else if (hw->unstable_pts && hw->chunk && + MPEG4_VALID_DUR(duration)) { + /* invalid pts calc */ + hw->frame_num_since_last_anch = hw->chunk_frame_count; + disp_pic->pts = hw->last_anch_pts + + DUR2PTS(hw->frame_num_since_last_anch * + duration); + disp_pic->pts64 = hw->last_anch_pts_us64 + + DUR2PTS(hw->frame_num_since_last_anch * + duration) * 100 / 9; + + if (hw->frame_num_since_last_anch > (1 << 15)) { + /* avoid overflow */ + hw->last_anch_pts = disp_pic->pts; + hw->last_anch_pts_us64 = disp_pic->pts64; + hw->frame_num_since_last_anch = 0; + } else + disp_pic->pts_valid = 1; + } + + if (vdec_frame_based(vdec) && + (hw->unstable_pts) && + MPEG4_VALID_DUR(duration)) { + + u32 threshold = DUR2PTS(duration) >> 3; + + if (disp_pic->pts <= (hw->last_pts + threshold)) { + disp_pic->pts = hw->last_pts + DUR2PTS(duration); + disp_pic->pts64 = hw->last_pts64 + + (DUR2PTS(duration)*100/9); + } + if (!disp_pic->pts_valid) { + disp_pic->pts = 0; + disp_pic->pts64 = 0; + disp_pic->timestamp = 0; + } + } + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_TIMEINFO, + "disp: pic_type %c, pts %d(%lld), diff %d, cnt %d, disp_pic->timestamp %llu\n", + GET_PIC_TYPE(disp_pic->pic_type), + disp_pic->pts, + disp_pic->pts64, + disp_pic->pts - hw->last_pts, + hw->chunk_frame_count, + disp_pic->timestamp); + hw->last_pts = disp_pic->pts; + hw->last_pts64 = disp_pic->pts64; + hw->frame_dur = duration; + disp_pic->duration = duration; + disp_pic->repeat_cnt = repeat_cnt; + + prepare_display_buf(hw, disp_pic); + + hw->total_frame += repeat_cnt + 1; + hw->last_vop_time_inc = vop_time_inc; + + if (vdec_frame_based(vdec) && + (frmbase_cont_bitlevel != 0) && + (hw->first_i_frame_ready)) { + u32 consume_byte, res_byte, bitcnt; + + bitcnt = READ_VREG(VIFF_BIT_CNT); + res_byte = bitcnt >> 3; + + if (hw->chunk_size > res_byte) { + if (bitcnt > frmbase_cont_bitlevel) { + consume_byte = hw->chunk_size - res_byte; + + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s, size %d, consume %d, res %d\n", __func__, + hw->chunk_size, consume_byte, res_byte); + + if (consume_byte > VDEC_FIFO_ALIGN) { + consume_byte -= VDEC_FIFO_ALIGN; + res_byte += VDEC_FIFO_ALIGN; + } + hw->chunk_offset += consume_byte; + hw->chunk_size = res_byte; + hw->dec_result = DEC_RESULT_UNFINISH; + hw->chunk_frame_count++; + hw->unstable_pts = 1; + } else { + hw->chunk_size = 0; + hw->chunk_offset = 0; + } + } else { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "error: bitbyte %d hw->chunk_size %d\n", res_byte, hw->chunk_size); + hw->chunk_size = 0; + hw->chunk_offset = 0; + } + } + vdec_schedule_work(&hw->work); + } + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FRAME_NUM, + "%s: frame num:%d\n", __func__, hw->frame_num); + + return IRQ_HANDLED; +} + +static irqreturn_t vmpeg4_isr(struct vdec_s *vdec, int irq) +{ + struct vdec_mpeg4_hw_s *hw = + (struct vdec_mpeg4_hw_s *)(vdec->private); + + if (hw->eos) + return IRQ_HANDLED; + + return IRQ_WAKE_THREAD; +} + +static void flush_output(struct vdec_mpeg4_hw_s * hw) +{ + struct pic_info_t *pic; + + if (hw->vfbuf_use[hw->refs[1]] > 0) { + pic = &hw->pic[hw->refs[1]]; + prepare_display_buf(hw, pic); + } +} + +static bool is_avaliable_buffer(struct vdec_mpeg4_hw_s *hw); + +static int notify_v4l_eos(struct vdec_s *vdec) +{ + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = &hw->vframe_dummy; + struct vdec_v4l2_buffer *fb = NULL; + int index = INVALID_IDX; + ulong expires; + + if (hw->eos) { + expires = jiffies + msecs_to_jiffies(2000); + while (!is_avaliable_buffer(hw)) { + if (time_after(jiffies, expires)) { + pr_err("[%d] MPEG4 isn't enough buff for notify eos.\n", ctx->id); + return 0; + } + } + + index = find_free_buffer(hw); + if (INVALID_IDX == index) { + pr_err("[%d] MPEG4 EOS get free buff fail.\n", ctx->id); + return 0; + } + + fb = (struct vdec_v4l2_buffer *) + hw->pic[index].v4l_ref_buf_addr; + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->v4l_mem_handle = (ulong)fb; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + ATRACE_COUNTER(hw->pts_name, vf->timestamp); + + pr_info("[%d] mpeg4 EOS notify.\n", ctx->id); + } + + return 0; +} + +static void vmpeg4_work(struct work_struct *work) +{ + struct vdec_mpeg4_hw_s *hw = + container_of(work, struct vdec_mpeg4_hw_s, work); + struct vdec_s *vdec = hw_to_vdec(hw); + + /* finished decoding one frame or error, + * notify vdec core to switch context + */ + if (hw->dec_result != DEC_RESULT_DONE) + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "vmpeg4_work: result=%d,status=%d\n", + hw->dec_result, hw_to_vdec(hw)->next_status); + + ATRACE_COUNTER("V_ST_DEC-work_state", hw->dec_result); + + if (hw->dec_result == DEC_RESULT_UNFINISH) { + if (!hw->ctx_valid) + hw->ctx_valid = 1; + + } else if ((hw->dec_result == DEC_RESULT_DONE) || + ((!hw->is_used_v4l) && (input_frame_based(&vdec->input)) && hw->chunk)) { + if (!hw->ctx_valid) + hw->ctx_valid = 1; + + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk = NULL; + } else if (hw->dec_result == DEC_RESULT_AGAIN + && (vdec->next_status != VDEC_STATUS_DISCONNECTED)) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + } else if (hw->dec_result == DEC_RESULT_FORCE_EXIT) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "%s: force exit\n", __func__); + if (hw->stat & STAT_ISR_REG) { + amvdec_stop(); + vdec_free_irq(VDEC_IRQ_1, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + } else if (hw->dec_result == DEC_RESULT_EOS) { + hw->eos = 1; + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk = NULL; + vdec_clean_input(vdec); + flush_output(hw); + + if (hw->is_used_v4l) { + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(vdec); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + } + + mmpeg4_debug_print(DECODE_ID(hw), 0, + "%s: eos flushed, frame_num %d\n", + __func__, hw->frame_num); + } else if (hw->dec_result == DEC_RESULT_ERROR_SZIE) { + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } else { + vdec_vframe_dirty(vdec, hw->chunk); + hw->chunk = NULL; + } + } + + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + /*disable mbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 0); + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !hw->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + /* mark itself has all HW resource released and input released */ + if (vdec->parallel_dec == 1) + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1); + else + vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + ATRACE_COUNTER("V_ST_DEC-chunk_size", 0); + + wake_up_interruptible(&hw->wait_q); + if (hw->vdec_cb) + hw->vdec_cb(vdec, hw->vdec_cb_arg); +} + +static struct vframe_s *vmpeg_vf_peek(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + + if (!hw) + return NULL; + atomic_add(1, &hw->peek_num); + if (kfifo_peek(&hw->display_q, &vf)) + return vf; + + return NULL; +} + +static struct vframe_s *vmpeg_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct vdec_s *vdec = op_arg; + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + + if (kfifo_get(&hw->display_q, &vf)) { + vf->index_disp = atomic_read(&hw->get_num); + atomic_add(1, &hw->get_num); + ATRACE_COUNTER(hw->disp_q_name, kfifo_len(&hw->display_q)); + return vf; + } + return NULL; +} + +static int valid_vf_check(struct vframe_s *vf, struct vdec_mpeg4_hw_s *hw) +{ + int i; + + if (!vf || (vf->index == -1)) + return 0; + + for (i = 0; i < VF_POOL_SIZE; i++) { + if (vf == &hw->vfpool[i]) + return 1; + } + + return 0; +} + + +static void vmpeg_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + unsigned long flags; + + if (!valid_vf_check(vf, hw)) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR, + "invalid vf: %lx\n", (ulong)vf); + return ; + } + + if (vf->v4l_mem_handle != + hw->pic[vf->index].v4l_ref_buf_addr) { + hw->pic[vf->index].v4l_ref_buf_addr + = vf->v4l_mem_handle; + + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL, + "MPEG4 update fb handle, old:%llx, new:%llx\n", + hw->pic[vf->index].v4l_ref_buf_addr, + vf->v4l_mem_handle); + } + + hw->vfbuf_use[vf->index]--; + atomic_add(1, &hw->put_num); + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FRAME_NUM, + "%s: put num:%d\n",__func__, hw->put_num); + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_BUFFER_DETAIL, + "index=%d, used=%d\n", vf->index, hw->vfbuf_use[vf->index]); + spin_lock_irqsave(&hw->lock, flags); + kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf); + spin_unlock_irqrestore(&hw->lock, flags); + ATRACE_COUNTER(hw->new_q_name, kfifo_len(&hw->newframe_q)); +} + +static int vmpeg_event_cb(int type, void *data, void *op_arg) +{ + struct vdec_s *vdec = op_arg; + + if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(vdec); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +static int vmpeg_vf_states(struct vframe_states *states, void *op_arg) +{ + unsigned long flags; + struct vdec_s *vdec = op_arg; + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + + spin_lock_irqsave(&hw->lock, flags); + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hw->newframe_q); + states->buf_avail_num = kfifo_len(&hw->display_q); + states->buf_recycle_num = 0; + + spin_unlock_irqrestore(&hw->lock, flags); + + return 0; +} + + +static int dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct vdec_mpeg4_hw_s *hw = + (struct vdec_mpeg4_hw_s *)vdec->private; + + if (!hw) + return -1; + + vstatus->frame_width = hw->frame_width; + vstatus->frame_height = hw->frame_height; + if (0 != hw->vmpeg4_amstream_dec_info.rate) + vstatus->frame_rate = ((DURATION_UNIT * 10 / hw->vmpeg4_amstream_dec_info.rate) % 10) < 5 ? + DURATION_UNIT / hw->vmpeg4_amstream_dec_info.rate : (DURATION_UNIT / hw->vmpeg4_amstream_dec_info.rate +1); + else + vstatus->frame_rate = -1; + vstatus->error_count = READ_VREG(MP4_ERR_COUNT); + vstatus->status = hw->stat; + vstatus->frame_dur = hw->frame_dur; + vstatus->error_frame_count = READ_VREG(MP4_ERR_COUNT); + vstatus->drop_frame_count = hw->drop_frame_count; + vstatus->frame_count =hw->frame_num; + vstatus->i_decoded_frames = hw->i_decoded_frames; + vstatus->i_lost_frames = hw->i_lost_frames; + vstatus->i_concealed_frames = hw->i_concealed_frames; + vstatus->p_decoded_frames = hw->p_decoded_frames; + vstatus->p_lost_frames = hw->p_lost_frames; + vstatus->p_concealed_frames = hw->p_concealed_frames; + vstatus->b_decoded_frames = hw->b_decoded_frames; + vstatus->b_lost_frames = hw->b_lost_frames; + vstatus->b_concealed_frames = hw->b_concealed_frames; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + + return 0; +} + +/****************************************/ +static int vmpeg4_workspace_init(struct vdec_mpeg4_hw_s *hw) +{ + int ret; + void *buf = NULL; + + ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, + DECODE_BUFFER_NUM_MAX, + WORKSPACE_SIZE, + DRIVER_NAME, + &hw->buf_start); + if (ret < 0) { + pr_err("mpeg4 workspace alloc size %d failed.\n", + WORKSPACE_SIZE); + return ret; + } + + /* notify ucode the buffer start address */ + buf = codec_mm_vmap(hw->buf_start, WORKSPACE_SIZE); + if (buf) { + memset(buf, 0, WORKSPACE_SIZE); + codec_mm_dma_flush(buf, + WORKSPACE_SIZE, DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(buf); + } + + WRITE_VREG(MEM_OFFSET_REG, hw->buf_start); + + return 0; +} + +static void vmpeg4_dump_state(struct vdec_s *vdec) +{ + struct vdec_mpeg4_hw_s *hw = + (struct vdec_mpeg4_hw_s *)(vdec->private); + u32 i; + mmpeg4_debug_print(DECODE_ID(hw), 0, + "====== %s\n", __func__); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "width/height (%d/%d), i_fram:%d, buffer_not_ready %d, buf_num %d\n", + hw->frame_width, + hw->frame_height, + hw->first_i_frame_ready, + hw->buffer_not_ready, + hw->buf_num + ); + for (i = 0; i < hw->buf_num; i++) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "index %d, used %d\n", i, hw->vfbuf_use[i]); + } + + mmpeg4_debug_print(DECODE_ID(hw), 0, + "is_framebase(%d), eos %d, state 0x%x, dec_result 0x%x dec_frm %d\n", + vdec_frame_based(vdec), + hw->eos, + hw->stat, + hw->dec_result, + hw->frame_num + ); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "is_framebase(%d), put_frm %d run %d not_run_ready %d input_empty %d,drop %d\n", + vdec_frame_based(vdec), + hw->put_num, + hw->run_count, + hw->not_run_ready, + hw->input_empty, + hw->drop_frame_count + ); + + if (!hw->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + mmpeg4_debug_print(DECODE_ID(hw), 0, + "%s, newq(%d/%d), dispq(%d/%d) vf peek/get/put (%d/%d/%d)\n", + __func__, + kfifo_len(&hw->newframe_q), VF_POOL_SIZE, + kfifo_len(&hw->display_q), VF_POOL_SIZE, + hw->peek_num, hw->get_num, hw->put_num + ); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "VIFF_BIT_CNT=0x%x\n", + READ_VREG(VIFF_BIT_CNT)); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_LEVEL=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_LEVEL)); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_WP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_WP)); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "VLD_MEM_VIFIFO_RP=0x%x\n", + READ_VREG(VLD_MEM_VIFIFO_RP)); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (vdec_frame_based(vdec) && + debug_enable & PRINT_FRAMEBASE_DATA) { + int jj; + if (hw->chunk && hw->chunk->block && + hw->chunk->size > 0) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, hw->chunk->size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + mmpeg4_debug_print(DECODE_ID(hw), 0, + "frame data size 0x%x\n", + hw->chunk->size); + for (jj = 0; jj < hw->chunk->size; jj++) { + if ((jj & 0xf) == 0) + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } +} + +static void reset_process_time(struct vdec_mpeg4_hw_s *hw) +{ + if (hw->start_process_time) { + unsigned process_time = + 1000 * (jiffies - hw->start_process_time) / HZ; + hw->start_process_time = 0; + if (process_time > max_process_time[DECODE_ID(hw)]) + max_process_time[DECODE_ID(hw)] = process_time; + } +} +static void start_process_time(struct vdec_mpeg4_hw_s *hw) +{ + hw->decode_timeout_count = 2; + hw->start_process_time = jiffies; +} + +static void timeout_process(struct vdec_mpeg4_hw_s *hw) +{ + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + mmpeg4_debug_print(DECODE_ID(hw), 0, + "%s decoder timeout %d\n", __func__, hw->timeout_cnt); + if (vdec_frame_based((hw_to_vdec(hw)))) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "%s frame_num %d, chunk size 0x%x, chksum 0x%x\n", + __func__, + hw->frame_num, + hw->chunk->size, + get_data_check_sum(hw, hw->chunk->size)); + } + hw->timeout_cnt++; + /* timeout: data droped, frame_num inaccurate*/ + hw->frame_num++; + reset_process_time(hw); + hw->first_i_frame_ready = 0; + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); +} + + +static void check_timer_func(struct timer_list *timer) +{ + struct vdec_mpeg4_hw_s *hw = container_of(timer, + struct vdec_mpeg4_hw_s, check_timer); + unsigned int timeout_val = decode_timeout_val; + + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + + if (((debug_enable & PRINT_FLAG_TIMEOUT_STATUS) == 0) && + (timeout_val > 0) && + (hw->start_process_time > 0) && + ((1000 * (jiffies - hw->start_process_time) / HZ) + > timeout_val)) { + if (hw->last_vld_level == READ_VREG(VLD_MEM_VIFIFO_LEVEL)) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) + timeout_process(hw); + } + hw->last_vld_level = READ_VREG(VLD_MEM_VIFIFO_LEVEL); + } + + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static int vmpeg4_hw_ctx_restore(struct vdec_mpeg4_hw_s *hw) +{ + int index; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + int i; + + if (!hw->init_flag) + vmpeg4_workspace_init(hw); + + if (hw->v4l_params_parsed) { + struct vdec_pic_info pic; + + if (!hw->buf_num) { + vdec_v4l_get_pic_info(v4l2_ctx, &pic); + hw->buf_num = pic.dpb_frames + + pic.dpb_margin; + if (hw->buf_num > DECODE_BUFFER_NUM_MAX) + hw->buf_num = DECODE_BUFFER_NUM_MAX; + } + + index = find_free_buffer(hw); + if ((index < 0) || (index >= hw->buf_num)) + return -1; + + WRITE_VREG(MEM_OFFSET_REG, hw->buf_start); + + for (i = 0; i < hw->buf_num; i++) { + if (hw->pic[i].v4l_ref_buf_addr) { + config_cav_lut(canvas_y(hw->canvas_spec[i]), + &hw->canvas_config[i][0], VDEC_1); + config_cav_lut(canvas_u(hw->canvas_spec[i]), + &hw->canvas_config[i][1], VDEC_1); + } + } + + /* prepare REF0 & REF1 + * points to the past two IP buffers + * prepare REC_CANVAS_ADDR and ANC2_CANVAS_ADDR + * points to the output buffer + */ + if (hw->refs[0] == -1) { + WRITE_VREG(MREG_REF0, (hw->refs[1] == -1) ? 0xffffffff : + hw->canvas_spec[hw->refs[1]]); + } else { + WRITE_VREG(MREG_REF0, hw->canvas_spec[hw->refs[0]]); + } + WRITE_VREG(MREG_REF1, (hw->refs[1] == -1) ? 0xffffffff : + hw->canvas_spec[hw->refs[1]]); + if ((hw->is_used_v4l) && (index == 0xffffff)) { + WRITE_VREG(REC_CANVAS_ADDR, 0xffffff); + WRITE_VREG(ANC2_CANVAS_ADDR, 0xffffff); + } else { + WRITE_VREG(REC_CANVAS_ADDR, hw->canvas_spec[index]); + WRITE_VREG(ANC2_CANVAS_ADDR, hw->canvas_spec[index]); + } + + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RESTORE, + "restore ref0=0x%x, ref1=0x%x, rec=0x%x, ctx_valid=%d,index=%d\n", + READ_VREG(MREG_REF0), + READ_VREG(MREG_REF1), + READ_VREG(REC_CANVAS_ADDR), + hw->ctx_valid, index); + } + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(PSCALE_CTRL, 0); + + WRITE_VREG(MREG_BUFFEROUT, 0x10000); + + /* clear mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(ASSIST_MBOX1_MASK, 1); + + /* clear repeat count */ + WRITE_VREG(MP4_NOT_CODED_CNT, 0); + +#ifdef NV21 + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17); +#endif + + /* cbcr_merge_swap_en */ + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + else + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + } else { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + else + CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16); + } + +#if 1/* /MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + WRITE_VREG(MDEC_PIC_DC_THRESH, 0x404038aa); +#endif + + WRITE_VREG(MP4_PIC_WH, (hw->ctx_valid) ? + hw->reg_mp4_pic_wh : + ((hw->frame_width << 16) | hw->frame_height)); + WRITE_VREG(MP4_SYS_RATE, hw->vmpeg4_amstream_dec_info.rate); + + if (hw->ctx_valid) { + WRITE_VREG(DC_AC_CTRL, hw->reg_dc_ac_ctrl); + WRITE_VREG(IQIDCT_CONTROL, hw->reg_iqidct_control); + WRITE_VREG(RESYNC_MARKER_LENGTH, hw->reg_resync_marker_length); + WRITE_VREG(RV_AI_MB_COUNT, hw->reg_rv_ai_mb_count); + } + WRITE_VREG(MPEG1_2_REG, (hw->ctx_valid) ? hw->reg_mpeg1_2_reg : 1); + WRITE_VREG(VCOP_CTRL_REG, hw->reg_vcop_ctrl_reg); + WRITE_VREG(PIC_HEAD_INFO, hw->reg_pic_head_info); + WRITE_VREG(SLICE_QP, hw->reg_slice_qp); + WRITE_VREG(MB_INFO, hw->reg_mb_info); + + if (vdec_frame_based(hw_to_vdec(hw)) && hw->chunk) { + /* frame based input */ + WRITE_VREG(MREG_INPUT, (hw->chunk->offset & 7) | (1<<7) | + (hw->ctx_valid<<6)); + } else { + /* stream based input */ + WRITE_VREG(MREG_INPUT, (hw->ctx_valid<<6)); + } + + return 0; +} + +static void vmpeg4_local_init(struct vdec_mpeg4_hw_s *hw) +{ + int i; + + hw->vmpeg4_ratio = hw->vmpeg4_amstream_dec_info.ratio; + + hw->vmpeg4_ratio64 = hw->vmpeg4_amstream_dec_info.ratio64; + + hw->vmpeg4_rotation = + (((unsigned long)hw->vmpeg4_amstream_dec_info.param) >> 16) & 0xffff; + hw->sys_mp4_rate = hw->vmpeg4_amstream_dec_info.rate; + if (hw->is_used_v4l) { + hw->frame_width = 0; + hw->frame_height = 0; + } else { + hw->frame_width = hw->vmpeg4_amstream_dec_info.width; + hw->frame_height = hw->vmpeg4_amstream_dec_info.height; + } + + hw->frame_dur = 0; + hw->frame_prog = 0; + hw->unstable_pts = + (((unsigned long) hw->vmpeg4_amstream_dec_info.param & 0x40) >> 6); + mmpeg4_debug_print(DECODE_ID(hw), 0, + "param = 0x%x unstable_pts = %d\n", + hw->vmpeg4_amstream_dec_info.param, + hw->unstable_pts); + hw->last_dec_pts = -1; + + hw->total_frame = 0; + + hw->last_anch_pts = 0; + + hw->last_anch_pts_us64 = 0; + + hw->last_vop_time_inc = hw->last_duration = 0; + + hw->vop_time_inc_since_last_anch = 0; + hw->last_pts = 0; + hw->last_pts64 = 0; + hw->frame_num_since_last_anch = 0; + hw->frame_num = 0; + hw->run_count = 0; + hw->not_run_ready = 0; + hw->input_empty = 0; + atomic_set(&hw->peek_num, 0); + atomic_set(&hw->get_num, 0); + atomic_set(&hw->put_num, 0); + + hw->pts_hit = hw->pts_missed = hw->pts_i_hit = hw->pts_i_missed = 0; + hw->refs[0] = -1; + hw->refs[1] = -1; + hw->first_i_frame_ready = 0; + hw->drop_frame_count = 0; + hw->buffer_not_ready = 0; + hw->init_flag = 0; + hw->dec_result = DEC_RESULT_NONE; + hw->timeout_cnt = 0; + + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->vfbuf_use[i] = 0; + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hw->vfpool[i]; + + hw->vfpool[i].index = DECODE_BUFFER_NUM_MAX; + kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf); + } + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + hw->mm_blk_handle = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + 0, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER); + + INIT_WORK(&hw->work, vmpeg4_work); + + init_waitqueue_head(&hw->wait_q); +} + +static s32 vmmpeg4_init(struct vdec_mpeg4_hw_s *hw) +{ + int trickmode_fffb = 0; + int size = -1, fw_size = 0x1000 * 16; + struct firmware_s *fw = NULL; + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + if (hw->vmpeg4_amstream_dec_info.format == + VIDEO_DEC_FORMAT_MPEG4_5) { + size = get_firmware_data(VIDEO_DEC_MPEG4_5_MULTI, fw->data); + strncpy(fw->name, "mmpeg4_mc_5", sizeof(fw->name)); + } else if (hw->vmpeg4_amstream_dec_info.format == + VIDEO_DEC_FORMAT_H263) { + size = get_firmware_data(VIDEO_DEC_H263_MULTI, fw->data); + strncpy(fw->name, "mh263_mc", sizeof(fw->name)); + } else + pr_err("unsupport mpeg4 sub format %d\n", + hw->vmpeg4_amstream_dec_info.format); + pr_info("mmpeg4 get fw %s, size %x\n", fw->name, size); + if (size < 0) { + pr_err("get firmware failed."); + vfree(fw); + return -1; + } + + fw->len = size; + hw->fw = fw; + + query_video_status(0, &trickmode_fffb); + + pr_info("%s\n", __func__); + + //amvdec_enable(); + + timer_setup(&hw->check_timer, check_timer_func, 0); + //init_timer(&hw->check_timer); + //hw->check_timer.data = (unsigned long)hw; + //hw->check_timer.function = check_timer_func; + hw->check_timer.expires = jiffies + CHECK_INTERVAL; + hw->stat |= STAT_TIMER_ARM; + hw->eos = 0; + WRITE_VREG(DECODE_STOP_POS, udebug_flag); + + vmpeg4_local_init(hw); + wmb(); + + return 0; +} + +static bool is_avaliable_buffer(struct vdec_mpeg4_hw_s *hw) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int i, free_count = 0; + int used_count = 0; + + if ((hw->buf_num == 0) || + (ctx->cap_pool.dec < hw->buf_num)) { + if (ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token)) { + free_count = + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) + 1; + } + } + + for (i = 0; i < hw->buf_num; ++i) { + if ((hw->vfbuf_use[i] == 0) && + hw->pic[i].v4l_ref_buf_addr) { + free_count++; + } else if (hw->pic[i].v4l_ref_buf_addr) + used_count++; + } + + ATRACE_COUNTER("V_ST_DEC-free_buff_count", free_count); + ATRACE_COUNTER("V_ST_DEC-used_buff_count", used_count); + + return free_count >= run_ready_min_buf_num ? 1 : 0; +} + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct vdec_mpeg4_hw_s *hw = + (struct vdec_mpeg4_hw_s *)vdec->private; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int ret = 0; + + if (hw->eos) + return 0; + + if (vdec_stream_based(vdec) && (hw->init_flag == 0) + && pre_decode_buf_level != 0) { + u32 rp, wp, level; + + rp = STBUF_READ(&vdec->vbuf, get_rp); + wp = STBUF_READ(&vdec->vbuf, get_wp); + if (wp < rp) + level = vdec->input.size + wp - rp; + else + level = wp - rp; + if (level < pre_decode_buf_level) { + hw->not_run_ready++; + return 0; + } + } + + if (hw->v4l_params_parsed) { + ret = is_avaliable_buffer(hw) ? 1 : 0; + } else { + ret = ctx->v4l_resolution_change ? 0 : 1; + } + + hw->not_run_ready = 0; + hw->buffer_not_ready = 0; + + return ret ? CORE_MASK_VDEC_1 : 0; +} + +static unsigned char get_data_check_sum + (struct vdec_mpeg4_hw_s *hw, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk->offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private; + int size = 0, ret = 0; + if (!hw->vdec_pg_enable_flag) { + hw->vdec_pg_enable_flag = 1; + amvdec_enable(); + } + hw->run_count++; + hw->vdec_cb_arg = arg; + hw->vdec_cb = callback; + vdec_reset_core(vdec); + + if ((vdec_frame_based(vdec)) && + (hw->dec_result == DEC_RESULT_UNFINISH)) { + vmpeg4_prepare_input(hw); + size = hw->chunk_size; + } else { + size = vdec_prepare_input(vdec, &hw->chunk); + if (size < 4) { /*less than start code size 00 00 01 xx*/ + hw->input_empty++; + hw->dec_result = DEC_RESULT_ERROR_SZIE; + vdec_schedule_work(&hw->work); + return; + } + if ((vdec_frame_based(vdec)) && + (hw->chunk != NULL)) { + hw->chunk_offset = hw->chunk->offset; + hw->chunk_size = hw->chunk->size; + hw->chunk_frame_count = 0; + } + } + + ATRACE_COUNTER("V_ST_DEC-chunk_size", size); + + if (vdec_frame_based(vdec) && !vdec_secure(vdec)) { + /* HW needs padding (NAL start) for frame ending */ + char* tail = (char *)hw->chunk->block->start_virt; + + tail += hw->chunk->offset + hw->chunk->size; + tail[0] = 0; + tail[1] = 0; + tail[2] = 1; + tail[3] = 0xb6; + codec_mm_dma_flush(tail, 4, DMA_TO_DEVICE); + } + if (vdec_frame_based(vdec) && + (debug_enable & 0xc00)) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->chunk_offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->chunk_offset; + + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS, + "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, size, get_data_check_sum(hw, size), + data[0], data[1], data[2], data[3], + data[4], data[5], data[size - 4], + data[size - 3], data[size - 2], + data[size - 1]); + + if (debug_enable & PRINT_FRAMEBASE_DATA) { + int jj; + + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%06x:", jj); + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + mmpeg4_debug_print(DECODE_ID(hw), + PRINT_FRAMEBASE_DATA, + "\n"); + } + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "%s, size=%d, %x %x %x %x %x\n", + __func__, size, + READ_VREG(VLD_MEM_VIFIFO_LEVEL), + READ_VREG(VLD_MEM_VIFIFO_WP), + READ_VREG(VLD_MEM_VIFIFO_RP), + STBUF_READ(&vdec->vbuf, get_rp), + STBUF_READ(&vdec->vbuf, get_wp)); + + hw->dec_result = DEC_RESULT_NONE; + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else { + ret = amvdec_vdec_loadmc_buf_ex(VFORMAT_MPEG4,hw->fw->name, vdec, + hw->fw->data, hw->fw->len); + if (ret < 0) { + pr_err("[%d] %s: the %s fw loading failed, err: %x\n", vdec->id, + hw->fw->name, tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_MPEG4; + } + if (vmpeg4_hw_ctx_restore(hw) < 0) { + hw->dec_result = DEC_RESULT_ERROR; + mmpeg4_debug_print(DECODE_ID(hw), 0, + "amvdec_mpeg4: error HW context restore\n"); + vdec_schedule_work(&hw->work); + return; + } + if (vdec_frame_based(vdec)) { + size = hw->chunk_size + + (hw->chunk_offset & (VDEC_FIFO_ALIGN - 1)); + WRITE_VREG(VIFF_BIT_CNT, size * 8); + if (vdec->mvfrm) + vdec->mvfrm->frame_size = hw->chunk->size; + } + hw->input_empty = 0; + hw->last_vld_level = 0; + start_process_time(hw); + vdec_enable_input(vdec); + /* wmb before ISR is handled */ + wmb(); + + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + amvdec_start(); + hw->stat |= STAT_VDEC_RUN; + hw->stat |= STAT_TIMER_ARM; + hw->init_flag = 1; + mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL); +} + +static int vmpeg4_stop(struct vdec_mpeg4_hw_s *hw) +{ + cancel_work_sync(&hw->work); + + if (hw->mm_blk_handle) { + decoder_bmmu_box_free(hw->mm_blk_handle); + hw->mm_blk_handle = NULL; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->check_timer); + hw->stat &= ~STAT_TIMER_ARM; + } + + if (hw->fw) { + vfree(hw->fw); + hw->fw = NULL; + } + return 0; +} +static void reset(struct vdec_s *vdec) +{ + struct vdec_mpeg4_hw_s *hw = + (struct vdec_mpeg4_hw_s *)vdec->private; + int i; + + if (hw->stat & STAT_VDEC_RUN) { + amvdec_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + hw->dec_result = DEC_RESULT_NONE; + flush_work(&hw->work); + reset_process_time(hw); + + for (i = 0; i < hw->buf_num; i++) { + hw->pic[i].v4l_ref_buf_addr = 0; + hw->vfbuf_use[i] = 0; + } + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hw->vfpool[i]; + + memset((void *)vf, 0, sizeof(*vf)); + hw->vfpool[i].index = -1; + kfifo_put(&hw->newframe_q, vf); + } + + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + vdec->free_canvas_ex(canvas_y(hw->canvas_spec[i]), vdec->id); + vdec->free_canvas_ex(canvas_u(hw->canvas_spec[i]), vdec->id); + hw->canvas_spec[i] = 0xffffff; + } + + hw->refs[0] = -1; + hw->refs[1] = -1; + hw->ctx_valid = 0; + hw->eos = 0; + hw->buf_num = 0; + hw->frame_width = 0; + hw->frame_height = 0; + hw->first_i_frame_ready = 0; + + atomic_set(&hw->peek_num, 0); + atomic_set(&hw->get_num, 0); + atomic_set(&hw->put_num, 0); + + pr_info("mpeg4: reset.\n"); +} + +static int vmpeg4_set_trickmode(struct vdec_s *vdec, unsigned long trickmode) +{ + struct vdec_mpeg4_hw_s *hw = + (struct vdec_mpeg4_hw_s *)vdec->private; + if (!hw) + return 0; + + if (trickmode == TRICKMODE_I) { + hw->i_only = 0x3; + trickmode_i = 1; + } else if (trickmode == TRICKMODE_NONE) { + hw->i_only = 0x0; + trickmode_i = 0; + } + return 0; +} + +static int ammvdec_mpeg4_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + struct vdec_mpeg4_hw_s *hw = NULL; + int config_val = 0; + + if (pdata == NULL) { + pr_err("%s memory resource undefined.\n", __func__); + return -EFAULT; + } + + hw = vmalloc(sizeof(struct vdec_mpeg4_hw_s)); + if (hw == NULL) { + pr_err("\namvdec_mpeg4 decoder driver alloc failed\n"); + return -ENOMEM; + } + memset(hw, 0, sizeof(struct vdec_mpeg4_hw_s)); + + /* the ctx from v4l2 driver. */ + hw->v4l2_ctx = pdata->private; + + pdata->private = hw; + pdata->dec_status = dec_status; + /* pdata->set_trickmode = set_trickmode; */ + pdata->set_trickmode = vmpeg4_set_trickmode; + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = vmpeg4_isr; + pdata->threaded_irq_handler = vmpeg4_isr_thread_fn; + pdata->dump_state = vmpeg4_dump_state; + + snprintf(hw->vdec_name, sizeof(hw->vdec_name), + "mpeg4-%d", pdev->id); + snprintf(hw->pts_name, sizeof(hw->pts_name), + "%s-timestamp", hw->vdec_name); + snprintf(hw->new_q_name, sizeof(hw->new_q_name), + "%s-newframe_q", hw->vdec_name); + snprintf(hw->disp_q_name, sizeof(hw->disp_q_name), + "%s-dispframe_q", hw->vdec_name); + + if (pdata->use_vfm_path) + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + PROVIDER_NAME ".%02x", pdev->id & 0xff); + + platform_set_drvdata(pdev, pdata); + hw->platform_dev = pdev; + + if (((debug_enable & IGNORE_PARAM_FROM_CONFIG) == 0) && pdata->config_len) { + mmpeg4_debug_print(DECODE_ID(hw), 0, "pdata->config: %s\n", pdata->config); + if (get_config_int(pdata->config, "parm_v4l_buffer_margin", + &config_val) == 0) + hw->dynamic_buf_num_margin = config_val; + else + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + } else { + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + } + + if (pdata->parallel_dec == 1) { + int i; + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) + hw->canvas_spec[i] = 0xffffff; + } + + hw->blkmode = pdata->canvas_mode; + + if (pdata->sys_info) { + hw->vmpeg4_amstream_dec_info = *pdata->sys_info; + if ((hw->vmpeg4_amstream_dec_info.height != 0) && + (hw->vmpeg4_amstream_dec_info.width > + (MAX_MPEG4_SUPPORT_SIZE/hw->vmpeg4_amstream_dec_info.height))) { + pr_info("ammvdec_mpeg4: oversize, unsupport: %d*%d\n", + hw->vmpeg4_amstream_dec_info.width, + hw->vmpeg4_amstream_dec_info.height); + pdata->dec_status = NULL; + vfree((void *)hw); + hw = NULL; + return -EFAULT; + } + mmpeg4_debug_print(DECODE_ID(hw), 0, + "sysinfo: %d x %d, rate: %d\n", + hw->vmpeg4_amstream_dec_info.width, + hw->vmpeg4_amstream_dec_info.height, + hw->vmpeg4_amstream_dec_info.rate); + } + if (((debug_enable & IGNORE_PARAM_FROM_CONFIG) == 0) && pdata->config_len) { + mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW, + "pdata->config: %s\n", pdata->config); + if (get_config_int(pdata->config, "parm_v4l_buffer_margin", + &config_val) == 0) + hw->dynamic_buf_num_margin = config_val; + else + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + hw->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + hw->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + hw->is_used_v4l = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + hw->blkmode = config_val; + + /*if (get_config_int(pdata->config, + "parm_v4l_duration", + &config_val) == 0) + vdec_frame_rate_uevent(config_val);*/ + } else + hw->dynamic_buf_num_margin = dynamic_buf_num_margin; + + if (hw->is_used_v4l) { + vf_provider_init(&pdata->vframe_provider, + pdata->vf_provider_name, &vf_provider_ops, pdata); + } + + if (vmmpeg4_init(hw) < 0) { + pr_err("%s init failed.\n", __func__); + + if (hw) { + vfree((void *)hw); + hw = NULL; + } + pdata->dec_status = NULL; + return -ENODEV; + } + vdec_set_prepare_level(pdata, start_decode_buf_level); + + vdec_set_vframe_comm(pdata, DRIVER_NAME); + + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_VDEC_1); + else { + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + } + + mmpeg4_debug_print(DECODE_ID(hw), 0, + "%s end.\n", __func__); + return 0; +} + +static int ammvdec_mpeg4_remove(struct platform_device *pdev) +{ + struct vdec_mpeg4_hw_s *hw = + (struct vdec_mpeg4_hw_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec = hw_to_vdec(hw); + int i; + + if (vdec->next_status == VDEC_STATUS_DISCONNECTED + && (vdec->status == VDEC_STATUS_ACTIVE)) { + mmpeg4_debug_print(DECODE_ID(hw), 0, + "%s force exit %d\n", __func__, __LINE__); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + wait_event_interruptible_timeout(hw->wait_q, + (vdec->status == VDEC_STATUS_CONNECTED), + msecs_to_jiffies(1000)); /* wait for work done */ + } + + vmpeg4_stop(hw); + + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1); + else + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + vdec_set_status(hw_to_vdec(hw), VDEC_STATUS_DISCONNECTED); + + if (vdec->parallel_dec == 1) { + for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) { + vdec->free_canvas_ex(canvas_y(hw->canvas_spec[i]), vdec->id); + vdec->free_canvas_ex(canvas_u(hw->canvas_spec[i]), vdec->id); + } + } + + mmpeg4_debug_print(DECODE_ID(hw), 0, "%s\n", __func__); + vfree((void *)hw); + hw = NULL; + + return 0; +} + +/****************************************/ + +static struct platform_driver ammvdec_mpeg4_driver = { + .probe = ammvdec_mpeg4_probe, + .remove = ammvdec_mpeg4_remove, +#ifdef CONFIG_PM + .suspend = amvdec_suspend, + .resume = amvdec_resume, +#endif + .driver = { + .name = DRIVER_NAME, + } +}; + +static struct codec_profile_t amvdec_mpeg4_profile = { + .name = "MPEG4-V4L", + .profile = "" +}; + +static int __init ammvdec_mpeg4_driver_init_module(void) +{ + pr_info("%s \n", __func__); + + if (platform_driver_register(&ammvdec_mpeg4_driver)) { + pr_err("failed to register ammvdec_mpeg4 driver\n"); + return -ENODEV; + } + vcodec_profile_register(&amvdec_mpeg4_profile); + vcodec_feature_register(VFORMAT_MPEG4, 1); + return 0; +} + +static void __exit ammvdec_mpeg4_driver_remove_module(void) +{ + pr_info("ammvdec_mpeg4 module remove.\n"); + + platform_driver_unregister(&ammvdec_mpeg4_driver); +} + +/****************************************/ +module_param(debug_enable, uint, 0664); +MODULE_PARM_DESC(debug_enable, + "\n ammvdec_mpeg4 debug enable\n"); + +module_param(frmbase_cont_bitlevel, uint, 0664); +MODULE_PARM_DESC(frmbase_cont_bitlevel, "\nfrmbase_cont_bitlevel\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n dynamic_buf_num_margin\n"); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\nradr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\nrval\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, "\n ammvdec_mpeg4 decode_timeout_val\n"); + +module_param_array(max_process_time, uint, &max_decode_instance_num, 0664); + +module_param(pre_decode_buf_level, int, 0664); +MODULE_PARM_DESC(pre_decode_buf_level, + "\n ammvdec_mpeg4 pre_decode_buf_level\n"); + +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n ammvdec_mpeg4 start_decode_buf_level\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n ammvdec_mpeg4 udebug_flag\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n ammvdec_mpeg4 without_display_mode\n"); + +module_init(ammvdec_mpeg4_driver_init_module); +module_exit(ammvdec_mpeg4_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC MPEG4 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>"); +
diff --git a/drivers/frame_provider/decoder_v4l/vav1/Makefile b/drivers/frame_provider/decoder_v4l/vav1/Makefile new file mode 100644 index 0000000..7d9a267 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/vav1/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_AV1) += amvdec_av1_v4l.o +amvdec_av1_v4l-objs += vav1.o av1_bufmgr.o av1_film_grain.o
diff --git a/drivers/frame_provider/decoder_v4l/vav1/aom_av1_define.h b/drivers/frame_provider/decoder_v4l/vav1/aom_av1_define.h new file mode 100644 index 0000000..69e63f1 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/vav1/aom_av1_define.h
@@ -0,0 +1,190 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +enum NalUnitType +{ + NAL_UNIT_CODED_SLICE_TRAIL_N = 0, // 0 + NAL_UNIT_CODED_SLICE_TRAIL_R, // 1 + + NAL_UNIT_CODED_SLICE_TSA_N, // 2 + NAL_UNIT_CODED_SLICE_TLA, // 3 // Current name in the spec: TSA_R + + NAL_UNIT_CODED_SLICE_STSA_N, // 4 + NAL_UNIT_CODED_SLICE_STSA_R, // 5 + + NAL_UNIT_CODED_SLICE_RADL_N, // 6 + NAL_UNIT_CODED_SLICE_DLP, // 7 // Current name in the spec: RADL_R + + NAL_UNIT_CODED_SLICE_RASL_N, // 8 + NAL_UNIT_CODED_SLICE_TFD, // 9 // Current name in the spec: RASL_R + + NAL_UNIT_RESERVED_10, + NAL_UNIT_RESERVED_11, + NAL_UNIT_RESERVED_12, + NAL_UNIT_RESERVED_13, + NAL_UNIT_RESERVED_14, + NAL_UNIT_RESERVED_15, + + NAL_UNIT_CODED_SLICE_BLA, // 16 // Current name in the spec: BLA_W_LP + NAL_UNIT_CODED_SLICE_BLANT, // 17 // Current name in the spec: BLA_W_DLP + NAL_UNIT_CODED_SLICE_BLA_N_LP, // 18 + NAL_UNIT_CODED_SLICE_IDR, // 19 // Current name in the spec: IDR_W_DLP + NAL_UNIT_CODED_SLICE_IDR_N_LP, // 20 + NAL_UNIT_CODED_SLICE_CRA, // 21 + NAL_UNIT_RESERVED_22, + NAL_UNIT_RESERVED_23, + + NAL_UNIT_RESERVED_24, + NAL_UNIT_RESERVED_25, + NAL_UNIT_RESERVED_26, + NAL_UNIT_RESERVED_27, + NAL_UNIT_RESERVED_28, + NAL_UNIT_RESERVED_29, + NAL_UNIT_RESERVED_30, + NAL_UNIT_RESERVED_31, + + NAL_UNIT_VPS, // 32 + NAL_UNIT_SPS, // 33 + NAL_UNIT_PPS, // 34 + NAL_UNIT_ACCESS_UNIT_DELIMITER, // 35 + NAL_UNIT_EOS, // 36 + NAL_UNIT_EOB, // 37 + NAL_UNIT_FILLER_DATA, // 38 + NAL_UNIT_SEI, // 39 Prefix SEI + NAL_UNIT_SEI_SUFFIX, // 40 Suffix SEI + NAL_UNIT_RESERVED_41, + NAL_UNIT_RESERVED_42, + NAL_UNIT_RESERVED_43, + NAL_UNIT_RESERVED_44, + NAL_UNIT_RESERVED_45, + NAL_UNIT_RESERVED_46, + NAL_UNIT_RESERVED_47, + NAL_UNIT_UNSPECIFIED_48, + NAL_UNIT_UNSPECIFIED_49, + NAL_UNIT_UNSPECIFIED_50, + NAL_UNIT_UNSPECIFIED_51, + NAL_UNIT_UNSPECIFIED_52, + NAL_UNIT_UNSPECIFIED_53, + NAL_UNIT_UNSPECIFIED_54, + NAL_UNIT_UNSPECIFIED_55, + NAL_UNIT_UNSPECIFIED_56, + NAL_UNIT_UNSPECIFIED_57, + NAL_UNIT_UNSPECIFIED_58, + NAL_UNIT_UNSPECIFIED_59, + NAL_UNIT_UNSPECIFIED_60, + NAL_UNIT_UNSPECIFIED_61, + NAL_UNIT_UNSPECIFIED_62, + NAL_UNIT_UNSPECIFIED_63, + NAL_UNIT_INVALID, +}; + +int forbidden_zero_bit; +int m_nalUnitType; +int m_reservedZero6Bits; +int m_temporalId; + +//--------------------------------------------------- +// Amrisc Software Interrupt +//--------------------------------------------------- +#define AMRISC_STREAM_EMPTY_REQ 0x01 +#define AMRISC_PARSER_REQ 0x02 +#define AMRISC_MAIN_REQ 0x04 + +//--------------------------------------------------- +// AOM_AV1_DEC_STATUS (HEVC_DEC_STATUS) define +//--------------------------------------------------- + /*command*/ +#define AOM_AV1_DEC_IDLE 0 +#define AOM_AV1_DEC_FRAME_HEADER 1 +#define AOM_AV1_DEC_TILE_END 2 +#define AOM_AV1_DEC_TG_END 3 +#define AOM_AV1_DEC_LCU_END 4 +#define AOM_AV1_DECODE_SLICE 5 +#define AOM_AV1_SEARCH_HEAD 6 +#define AOM_AV1_DUMP_LMEM 7 +#define AOM_AV1_FGS_PARAM_CONT 8 +#define AOM_AV1_FGS_PARAM_CONT 8 +#define AOM_AV1_PIC_END_CONT 9 + /*status*/ +#define AOM_AV1_DEC_PIC_END 0xe0 + /*AOM_AV1_FGS_PARA: + Bit[11] - 0 Read, 1 - Write + Bit[10:8] - film_grain_params_ref_idx, For Write request + */ +#define AOM_AV1_FGS_PARAM 0xe1 +#define AOM_AV1_DEC_PIC_END_PRE 0xe2 +#define AOM_AV1_HEAD_PARSER_DONE 0xf0 +#define AOM_AV1_HEAD_SEARCH_DONE 0xf1 +#define AOM_AV1_SEQ_HEAD_PARSER_DONE 0xf2 +#define AOM_AV1_FRAME_HEAD_PARSER_DONE 0xf3 +#define AOM_AV1_FRAME_PARSER_DONE 0xf4 +#define AOM_AV1_REDUNDANT_FRAME_HEAD_PARSER_DONE 0xf5 +#define HEVC_ACTION_DONE 0xff + + +//--------------------------------------------------- +// Include "parser_cmd.h" +//--------------------------------------------------- +#define PARSER_CMD_SKIP_CFG_0 0x0000090b + +#define PARSER_CMD_SKIP_CFG_1 0x1b14140f + +#define PARSER_CMD_SKIP_CFG_2 0x001b1910 + +#define PARSER_CMD_NUMBER 37 + +unsigned short parser_cmd[PARSER_CMD_NUMBER] = { +0x0401, +0x8401, +0x0800, +0x0402, +0x9002, +0x1423, +0x8CC3, +0x1423, +0x8804, +0x9825, +0x0800, +0x04FE, +0x8406, +0x8411, +0x1800, +0x8408, +0x8409, +0x8C2A, +0x9C2B, +0x1C00, +0x840F, +0x8407, +0x8000, +0x8408, +0x2000, +0xA800, +0x8410, +0x04DE, +0x840C, +0x840D, +0xAC00, +0xA000, +0x08C0, +0x08E0, +0xA40E, +0xFC00, +0x7C00 +};
diff --git a/drivers/frame_provider/decoder_v4l/vav1/av1_bufmgr.c b/drivers/frame_provider/decoder_v4l/vav1/av1_bufmgr.c new file mode 100755 index 0000000..a863654 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/vav1/av1_bufmgr.c
@@ -0,0 +1,3426 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#else +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/amlogic/media/canvas/canvas.h> + +#undef pr_info +#define pr_info printk + +#define __COMPARE(context, p1, p2) comp(p1, p2) +#define __SHORTSORT(lo, hi, width, comp, context) \ + shortsort(lo, hi, width, comp) +#define CUTOFF 8 /* testing shows that this is good value */ +#define STKSIZ (8*sizeof(void *) - 2) + +#undef swap +static void swap(char *a, char *b, size_t width) +{ + char tmp; + + if (a != b) + /* Do the swap one character at a time to avoid potential + * alignment problems. + */ + while (width--) { + tmp = *a; + *a++ = *b; + *b++ = tmp; + } +} + +static void shortsort(char *lo, char *hi, size_t width, + int (*comp)(const void *, const void *)) +{ + char *p, *max; + + /* Note: in assertions below, i and j are alway inside original + * bound of array to sort. + */ + while (hi > lo) { + /* A[i] <= A[j] for i <= j, j > hi */ + max = lo; + for (p = lo + width; p <= hi; p += width) { + /* A[i] <= A[max] for lo <= i < p */ + if (__COMPARE(context, p, max) > 0) + max = p; + /* A[i] <= A[max] for lo <= i <= p */ + } + /* A[i] <= A[max] for lo <= i <= hi */ + swap(max, hi, width); + + /* A[i] <= A[hi] for i <= hi, so A[i] <= A[j] for i <= j, + * j >= hi + */ + hi -= width; + + /* A[i] <= A[j] for i <= j, j > hi, loop top condition + * established + */ + } +} + +static void qsort(void *base, size_t num, size_t width, + int (*comp)(const void *, const void *)) +{ + char *lo, *hi; /* ends of sub-array currently sorting */ + char *mid; /* points to middle of subarray */ + char *loguy, *higuy; /* traveling pointers for partition step */ + size_t size; /* size of the sub-array */ + char *lostk[STKSIZ], *histk[STKSIZ]; + int stkptr; + +/* stack for saving sub-array to be + * processed + */ +#if 0 + /* validation section */ + _VALIDATE_RETURN_VOID(base != NULL || num == 0, EINVAL); + _VALIDATE_RETURN_VOID(width > 0, EINVAL); + _VALIDATE_RETURN_VOID(comp != NULL, EINVAL); +#endif + if (num < 2) + return; /* nothing to do */ + + stkptr = 0; /* initialize stack */ + lo = (char *)base; + hi = (char *)base + width * (num - 1); /* initialize limits */ + + /* this entry point is for pseudo-recursion calling: setting + * lo and hi and jumping to here is like recursion, but stkptr is + * preserved, locals aren't, so we preserve stuff on the stack + */ +recurse: + + size = (hi - lo) / width + 1; /* number of el's to sort */ + + /* below a certain size, it is faster to use a O(n^2) sorting method */ + if (size <= CUTOFF) { + __SHORTSORT(lo, hi, width, comp, context); + } else { + /* First we pick a partitioning element. The efficiency of + * the algorithm demands that we find one that is approximately + * the median of the values, but also that we select one fast. + * We choose the median of the first, middle, and last + * elements, to avoid bad performance in the face of already + * sorted data, or data that is made up of multiple sorted + * runs appended together. Testing shows that a + * median-of-three algorithm provides better performance than + * simply picking the middle element for the latter case. + */ + + mid = lo + (size / 2) * width; /* find middle element */ + + /* Sort the first, middle, last elements into order */ + if (__COMPARE(context, lo, mid) > 0) + swap(lo, mid, width); + if (__COMPARE(context, lo, hi) > 0) + swap(lo, hi, width); + if (__COMPARE(context, mid, hi) > 0) + swap(mid, hi, width); + + /* We now wish to partition the array into three pieces, one + * consisting of elements <= partition element, one of elements + * equal to the partition element, and one of elements > than + * it. This is done below; comments indicate conditions + * established at every step. + */ + + loguy = lo; + higuy = hi; + + /* Note that higuy decreases and loguy increases on every + * iteration, so loop must terminate. + */ + for (;;) { + /* lo <= loguy < hi, lo < higuy <= hi, + * A[i] <= A[mid] for lo <= i <= loguy, + * A[i] > A[mid] for higuy <= i < hi, + * A[hi] >= A[mid] + */ + + /* The doubled loop is to avoid calling comp(mid,mid), + * since some existing comparison funcs don't work + * when passed the same value for both pointers. + */ + + if (mid > loguy) { + do { + loguy += width; + } while (loguy < mid && + __COMPARE(context, loguy, mid) <= 0); + } + if (mid <= loguy) { + do { + loguy += width; + } while (loguy <= hi && + __COMPARE(context, loguy, mid) <= 0); + } + + /* lo < loguy <= hi+1, A[i] <= A[mid] for + * lo <= i < loguy, + * either loguy > hi or A[loguy] > A[mid] + */ + + do { + higuy -= width; + } while (higuy > mid && + __COMPARE(context, higuy, mid) > 0); + + /* lo <= higuy < hi, A[i] > A[mid] for higuy < i < hi, + * either higuy == lo or A[higuy] <= A[mid] + */ + + if (higuy < loguy) + break; + + /* if loguy > hi or higuy == lo, then we would have + * exited, so A[loguy] > A[mid], A[higuy] <= A[mid], + * loguy <= hi, higuy > lo + */ + + swap(loguy, higuy, width); + + /* If the partition element was moved, follow it. + * Only need to check for mid == higuy, since before + * the swap, A[loguy] > A[mid] implies loguy != mid. + */ + + if (mid == higuy) + mid = loguy; + + /* A[loguy] <= A[mid], A[higuy] > A[mid]; so condition + * at top of loop is re-established + */ + } + + /* A[i] <= A[mid] for lo <= i < loguy, + * A[i] > A[mid] for higuy < i < hi, + * A[hi] >= A[mid] + * higuy < loguy + * implying: + * higuy == loguy-1 + * or higuy == hi - 1, loguy == hi + 1, A[hi] == A[mid] + */ + + /* Find adjacent elements equal to the partition element. The + * doubled loop is to avoid calling comp(mid,mid), since some + * existing comparison funcs don't work when passed the same + * value for both pointers. + */ + + higuy += width; + if (mid < higuy) { + do { + higuy -= width; + } while (higuy > mid && + __COMPARE(context, higuy, mid) == 0); + } + if (mid >= higuy) { + do { + higuy -= width; + } while (higuy > lo && + __COMPARE(context, higuy, mid) == 0); + } + + /* OK, now we have the following: + * higuy < loguy + * lo <= higuy <= hi + * A[i] <= A[mid] for lo <= i <= higuy + * A[i] == A[mid] for higuy < i < loguy + * A[i] > A[mid] for loguy <= i < hi + * A[hi] >= A[mid] + */ + + /* We've finished the partition, now we want to sort the + * subarrays [lo, higuy] and [loguy, hi]. + * We do the smaller one first to minimize stack usage. + * We only sort arrays of length 2 or more. + */ + + if (higuy - lo >= hi - loguy) { + if (lo < higuy) { + lostk[stkptr] = lo; + histk[stkptr] = higuy; + ++stkptr; + } /* save big recursion for later */ + + if (loguy < hi) { + lo = loguy; + goto recurse; /* do small recursion */ + } + } else { + if (loguy < hi) { + lostk[stkptr] = loguy; + histk[stkptr] = hi; + ++stkptr; /* save big recursion for later */ + } + + if (lo < higuy) { + hi = higuy; + goto recurse; /* do small recursion */ + } + } + } + + /* We have sorted the array, except for any pending sorts on the stack. + * Check if there are any, and do them. + */ + + --stkptr; + if (stkptr >= 0) { + lo = lostk[stkptr]; + hi = histk[stkptr]; + goto recurse; /* pop subarray from stack */ + } else + return; /* all subarrays done */ +} + +#endif + +#include "av1_global.h" +int aom_realloc_frame_buffer(AV1_COMMON *cm, PIC_BUFFER_CONFIG *pic, + int width, int height, unsigned int order_hint); +void dump_params(AV1Decoder *pbi, union param_u *params); + +#define assert(a) +#define IMPLIES(a) + +int new_compressed_data_count = 0; + +static int valid_ref_frame_size(int ref_width, int ref_height, + int this_width, int this_height) { + return 2 * this_width >= ref_width && 2 * this_height >= ref_height && + this_width <= 16 * ref_width && this_height <= 16 * ref_height; +} + +#ifdef SUPPORT_SCALE_FACTOR +// Note: Expect val to be in q4 precision +static inline int scaled_x(int val, const struct scale_factors *sf) { + const int off = + (sf->x_scale_fp - (1 << REF_SCALE_SHIFT)) * (1 << (SUBPEL_BITS - 1)); + const int64_t tval = (int64_t)val * sf->x_scale_fp + off; + return (int)ROUND_POWER_OF_TWO_SIGNED_64(tval, + REF_SCALE_SHIFT - SCALE_EXTRA_BITS); +} + +// Note: Expect val to be in q4 precision +static inline int scaled_y(int val, const struct scale_factors *sf) { + const int off = + (sf->y_scale_fp - (1 << REF_SCALE_SHIFT)) * (1 << (SUBPEL_BITS - 1)); + const int64_t tval = (int64_t)val * sf->y_scale_fp + off; + return (int)ROUND_POWER_OF_TWO_SIGNED_64(tval, + REF_SCALE_SHIFT - SCALE_EXTRA_BITS); +} + +// Note: Expect val to be in q4 precision +static int unscaled_value(int val, const struct scale_factors *sf) { + (void)sf; + return val << SCALE_EXTRA_BITS; +} + +static int get_fixed_point_scale_factor(int other_size, int this_size) { + // Calculate scaling factor once for each reference frame + // and use fixed point scaling factors in decoding and encoding routines. + // Hardware implementations can calculate scale factor in device driver + // and use multiplication and shifting on hardware instead of division. + return ((other_size << REF_SCALE_SHIFT) + this_size / 2) / this_size; +} + +// Given the fixed point scale, calculate coarse point scale. +static int fixed_point_scale_to_coarse_point_scale(int scale_fp) { + return ROUND_POWER_OF_TWO(scale_fp, REF_SCALE_SHIFT - SCALE_SUBPEL_BITS); +} + + +void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, + int other_h, int this_w, int this_h) { + if (!valid_ref_frame_size(other_w, other_h, this_w, this_h)) { + sf->x_scale_fp = REF_INVALID_SCALE; + sf->y_scale_fp = REF_INVALID_SCALE; + return; + } + + sf->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w); + sf->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h); + + sf->x_step_q4 = fixed_point_scale_to_coarse_point_scale(sf->x_scale_fp); + sf->y_step_q4 = fixed_point_scale_to_coarse_point_scale(sf->y_scale_fp); + + if (av1_is_scaled(sf)) { + sf->scale_value_x = scaled_x; + sf->scale_value_y = scaled_y; + } else { + sf->scale_value_x = unscaled_value; + sf->scale_value_y = unscaled_value; + } +#ifdef ORI_CODE + // AV1 convolve functions + // Special case convolve functions should produce the same result as + // av1_convolve_2d. + // subpel_x_qn == 0 && subpel_y_qn == 0 + sf->convolve[0][0][0] = av1_convolve_2d_copy_sr; + // subpel_x_qn == 0 + sf->convolve[0][1][0] = av1_convolve_y_sr; + // subpel_y_qn == 0 + sf->convolve[1][0][0] = av1_convolve_x_sr; + // subpel_x_qn != 0 && subpel_y_qn != 0 + sf->convolve[1][1][0] = av1_convolve_2d_sr; + // subpel_x_qn == 0 && subpel_y_qn == 0 + sf->convolve[0][0][1] = av1_dist_wtd_convolve_2d_copy; + // subpel_x_qn == 0 + sf->convolve[0][1][1] = av1_dist_wtd_convolve_y; + // subpel_y_qn == 0 + sf->convolve[1][0][1] = av1_dist_wtd_convolve_x; + // subpel_x_qn != 0 && subpel_y_qn != 0 + sf->convolve[1][1][1] = av1_dist_wtd_convolve_2d; + // AV1 High BD convolve functions + // Special case convolve functions should produce the same result as + // av1_highbd_convolve_2d. + // subpel_x_qn == 0 && subpel_y_qn == 0 + sf->highbd_convolve[0][0][0] = av1_highbd_convolve_2d_copy_sr; + // subpel_x_qn == 0 + sf->highbd_convolve[0][1][0] = av1_highbd_convolve_y_sr; + // subpel_y_qn == 0 + sf->highbd_convolve[1][0][0] = av1_highbd_convolve_x_sr; + // subpel_x_qn != 0 && subpel_y_qn != 0 + sf->highbd_convolve[1][1][0] = av1_highbd_convolve_2d_sr; + // subpel_x_qn == 0 && subpel_y_qn == 0 + sf->highbd_convolve[0][0][1] = av1_highbd_dist_wtd_convolve_2d_copy; + // subpel_x_qn == 0 + sf->highbd_convolve[0][1][1] = av1_highbd_dist_wtd_convolve_y; + // subpel_y_qn == 0 + sf->highbd_convolve[1][0][1] = av1_highbd_dist_wtd_convolve_x; + // subpel_x_qn != 0 && subpel_y_qn != 0 + sf->highbd_convolve[1][1][1] = av1_highbd_dist_wtd_convolve_2d; +#endif +} +#endif + +static RefCntBuffer *assign_cur_frame_new_fb(AV1_COMMON *const cm) { + // Release the previously-used frame-buffer + int new_fb_idx; + if (cm->cur_frame != NULL) { + --cm->cur_frame->ref_count; + cm->cur_frame = NULL; + } + + // Assign a new framebuffer + new_fb_idx = get_free_frame_buffer(cm); + if (new_fb_idx == INVALID_IDX) return NULL; + + cm->buffer_pool->frame_bufs[new_fb_idx].buf.v4l_buf_index = new_fb_idx; + cm->cur_frame = &cm->buffer_pool->frame_bufs[new_fb_idx]; + cm->cur_frame->buf.buf_8bit_valid = 0; +#ifdef AML + cm->cur_frame->buf.index = new_fb_idx; +#endif +#ifdef ORI_CODE + av1_zero(cm->cur_frame->interp_filter_selected); +#endif + return cm->cur_frame; +} + +// Modify 'lhs_ptr' to reference the buffer at 'rhs_ptr', and update the ref +// counts accordingly. +static void assign_frame_buffer_p(RefCntBuffer **lhs_ptr, + RefCntBuffer *rhs_ptr) { + RefCntBuffer *const old_ptr = *lhs_ptr; + if (old_ptr != NULL) { + assert(old_ptr->ref_count > 0); + // One less reference to the buffer at 'old_ptr', so decrease ref count. + --old_ptr->ref_count; + } + + *lhs_ptr = rhs_ptr; + // One more reference to the buffer at 'rhs_ptr', so increase ref count. + ++rhs_ptr->ref_count; +} + +AV1Decoder *av1_decoder_create(BufferPool *const pool, AV1_COMMON *cm) { + int i; + +#ifndef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + AV1Decoder *pbi = (AV1Decoder *)malloc(sizeof(*pbi)); +#else + AV1Decoder *pbi = (AV1Decoder *)vmalloc(sizeof(AV1Decoder)); +#endif + if (!pbi) return NULL; + memset(pbi, 0, sizeof(*pbi)); + + // The jmp_buf is valid only for the duration of the function that calls + // setjmp(). Therefore, this function must reset the 'setjmp' field to 0 + // before it returns. + + pbi->common = cm; + cm->error.setjmp = 1; + +#ifdef ORI_CODE + memset(cm->fc, 0, sizeof(*cm->fc)); + memset(cm->default_frame_context, 0, sizeof(*cm->default_frame_context)); +#endif + pbi->need_resync = 1; + + // Initialize the references to not point to any frame buffers. + for (i = 0; i < REF_FRAMES; i++) { + cm->ref_frame_map[i] = NULL; + cm->next_ref_frame_map[i] = NULL; +#ifdef AML + cm->next_used_ref_frame_map[i] = NULL; +#endif + } + + cm->current_frame.frame_number = 0; + pbi->decoding_first_frame = 1; + pbi->common->buffer_pool = pool; + + cm->seq_params.bit_depth = AOM_BITS_8; + +#ifdef ORI_CODE + cm->alloc_mi = dec_alloc_mi; + cm->free_mi = dec_free_mi; + cm->setup_mi = dec_setup_mi; + + av1_loop_filter_init(cm); + + av1_qm_init(cm); + av1_loop_restoration_precal(); +#if CONFIG_ACCOUNTING + pbi->acct_enabled = 1; + aom_accounting_init(&pbi->accounting); +#endif +#endif + cm->error.setjmp = 0; + +#ifdef ORI_CODE + aom_get_worker_interface()->init(&pbi->lf_worker); + pbi->lf_worker.thread_name = "aom lf worker"; +#endif + + return pbi; +} + +static void reset_frame_buffers(AV1Decoder *const pbi); + +void av1_bufmgr_ctx_reset(AV1Decoder *pbi, BufferPool *const pool, AV1_COMMON *cm) +{ + u32 save_w, save_h; + + if (!pbi || !pool || !cm) + return; + + reset_frame_buffers(pbi); + memset(pbi, 0, sizeof(*pbi)); + /*save w,h for resolution change after seek */ + save_w = cm->width; + save_h = cm->height; + memset(cm, 0, sizeof(*cm)); + + cm->current_frame.frame_number = 0; + cm->seq_params.bit_depth = AOM_BITS_8; + cm->error.setjmp = 0; + cm->width = save_w; + cm->height = save_h; + + pbi->bufmgr_proc_count = 0; + pbi->need_resync = 1; + pbi->decoding_first_frame = 1; + pbi->num_output_frames = 0; + pbi->common = cm; + pbi->common->buffer_pool = pool; +} + +int release_fb_cb(void *cb_priv, aom_codec_frame_buffer_t *fb) { +#if 0 + InternalFrameBuffer *const int_fb = (InternalFrameBuffer *)fb->priv; + (void)cb_priv; + if (int_fb) int_fb->in_use = 0; +#endif + return 0; +} + +static void decrease_ref_count(AV1Decoder *pbi, RefCntBuffer *const buf, + BufferPool *const pool) { + if (buf != NULL) { + --buf->ref_count; + // Reference counts should never become negative. If this assertion fails, + // there is a bug in our reference count management. + assert(buf->ref_count >= 0); + // A worker may only get a free framebuffer index when calling get_free_fb. + // But the raw frame buffer is not set up until we finish decoding header. + // So if any error happens during decoding header, frame_bufs[idx] will not + // have a valid raw frame buffer. + if (buf->ref_count == 0 +#ifdef ORI_CODE + && buf->raw_frame_buffer.data +#endif + ) { +#ifdef AML + av1_release_buf(pbi, buf); +#endif + release_fb_cb(pool->cb_priv, &buf->raw_frame_buffer); + buf->raw_frame_buffer.data = NULL; + buf->raw_frame_buffer.size = 0; + buf->raw_frame_buffer.priv = NULL; + } + } +} + +void clear_frame_buf_ref_count(AV1Decoder *pbi) +{ + int i; + + for (i = 0; i < pbi->num_output_frames; i++) { + decrease_ref_count(pbi, pbi->output_frames[i], + pbi->common->buffer_pool); + } + pbi->num_output_frames = 0; +} + +static void swap_frame_buffers(AV1Decoder *pbi, int frame_decoded) { + int ref_index = 0, mask; + AV1_COMMON *const cm = pbi->common; + BufferPool *const pool = cm->buffer_pool; + unsigned long flags; + + if (frame_decoded) { + int check_on_show_existing_frame; + lock_buffer_pool(pool, flags); + + // In ext-tile decoding, the camera frame header is only decoded once. So, + // we don't release the references here. + if (!pbi->camera_frame_header_ready) { + // If we are not holding reference buffers in cm->next_ref_frame_map, + // assert that the following two for loops are no-ops. + assert(IMPLIES(!pbi->hold_ref_buf, + cm->current_frame.refresh_frame_flags == 0)); + assert(IMPLIES(!pbi->hold_ref_buf, + cm->show_existing_frame && !pbi->reset_decoder_state)); + + // The following two for loops need to release the reference stored in + // cm->ref_frame_map[ref_index] before transferring the reference stored + // in cm->next_ref_frame_map[ref_index] to cm->ref_frame_map[ref_index]. + for (mask = cm->current_frame.refresh_frame_flags; mask; mask >>= 1) { + decrease_ref_count(pbi, cm->ref_frame_map[ref_index], pool); + cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; + cm->next_ref_frame_map[ref_index] = NULL; + ++ref_index; + } + + check_on_show_existing_frame = + !cm->show_existing_frame || pbi->reset_decoder_state; + for (; ref_index < REF_FRAMES && check_on_show_existing_frame; + ++ref_index) { + decrease_ref_count(pbi, cm->ref_frame_map[ref_index], pool); + cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; + cm->next_ref_frame_map[ref_index] = NULL; + } + } + + if (cm->show_existing_frame || cm->show_frame) { + if (pbi->output_all_layers) { + // Append this frame to the output queue + if (pbi->num_output_frames >= MAX_NUM_SPATIAL_LAYERS) { + // We can't store the new frame anywhere, so drop it and return an + // error + cm->cur_frame->buf.corrupted = 1; + decrease_ref_count(pbi, cm->cur_frame, pool); + cm->error.error_code = AOM_CODEC_UNSUP_BITSTREAM; + } else { + pbi->output_frames[pbi->num_output_frames] = cm->cur_frame; + pbi->num_output_frames++; + } + } else { + // Replace any existing output frame + assert(pbi->num_output_frames == 0 || pbi->num_output_frames == 1); + if (pbi->num_output_frames > 0) { + decrease_ref_count(pbi, pbi->output_frames[0], pool); + } + if (cm->cur_frame) { + pbi->output_frames[0] = cm->cur_frame; + pbi->num_output_frames = 1; + } + } + } else { + decrease_ref_count(pbi, cm->cur_frame, pool); + } + + unlock_buffer_pool(pool, flags); + } else { + // The code here assumes we are not holding reference buffers in + // cm->next_ref_frame_map. If this assertion fails, we are leaking the + // frame buffer references in cm->next_ref_frame_map. + assert(IMPLIES(!pbi->camera_frame_header_ready, !pbi->hold_ref_buf)); + // Nothing was decoded, so just drop this frame buffer + lock_buffer_pool(pool, flags); + decrease_ref_count(pbi, cm->cur_frame, pool); + unlock_buffer_pool(pool, flags); + } + cm->cur_frame = NULL; + + if (!pbi->camera_frame_header_ready) { + pbi->hold_ref_buf = 0; + + // Invalidate these references until the next frame starts. + for (ref_index = 0; ref_index < INTER_REFS_PER_FRAME; ref_index++) { + cm->remapped_ref_idx[ref_index] = INVALID_IDX; + } + } +} + +void aom_internal_error(struct aom_internal_error_info *info, + aom_codec_err_t error, const char *fmt, ...) { + va_list ap; + + info->error_code = error; + info->has_detail = 0; + + if (fmt) { + size_t sz = sizeof(info->detail); + + info->has_detail = 1; + va_start(ap, fmt); + vsnprintf(info->detail, sz - 1, fmt, ap); + va_end(ap); + info->detail[sz - 1] = '\0'; + } +#ifdef ORI_CODE + if (info->setjmp) longjmp(info->jmp, info->error_code); +#endif +} + +#ifdef ORI_CODE +void av1_zero_unused_internal_frame_buffers(InternalFrameBufferList *list) { + int i; + + assert(list != NULL); + + for (i = 0; i < list->num_internal_frame_buffers; ++i) { + if (list->int_fb[i].data && !list->int_fb[i].in_use) + memset(list->int_fb[i].data, 0, list->int_fb[i].size); + } +} +#endif + +// Release the references to the frame buffers in cm->ref_frame_map and reset +// all elements of cm->ref_frame_map to NULL. +static void reset_ref_frame_map(AV1Decoder *const pbi) { + AV1_COMMON *const cm = pbi->common; + BufferPool *const pool = cm->buffer_pool; + int i; + + for (i = 0; i < REF_FRAMES; i++) { + decrease_ref_count(pbi, cm->ref_frame_map[i], pool); + cm->ref_frame_map[i] = NULL; +#ifdef AML + cm->next_used_ref_frame_map[i] = NULL; +#endif + } +} + +// Generate next_ref_frame_map. +static void generate_next_ref_frame_map(AV1Decoder *const pbi) { + AV1_COMMON *const cm = pbi->common; + BufferPool *const pool = cm->buffer_pool; + unsigned long flags; + int ref_index = 0; + int mask; + + lock_buffer_pool(pool, flags); + // cm->next_ref_frame_map holds references to frame buffers. After storing a + // frame buffer index in cm->next_ref_frame_map, we need to increase the + // frame buffer's ref_count. + for (mask = cm->current_frame.refresh_frame_flags; mask; mask >>= 1) { + if (mask & 1) { + cm->next_ref_frame_map[ref_index] = cm->cur_frame; + } else { + cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; + } + if (cm->next_ref_frame_map[ref_index] != NULL) + ++cm->next_ref_frame_map[ref_index]->ref_count; + ++ref_index; + } + + for (; ref_index < REF_FRAMES; ++ref_index) { + cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; + if (cm->next_ref_frame_map[ref_index] != NULL) + ++cm->next_ref_frame_map[ref_index]->ref_count; + } + unlock_buffer_pool(pool, flags); + pbi->hold_ref_buf = 1; +} + +// If the refresh_frame_flags bitmask is set, update reference frame id values +// and mark frames as valid for reference. +static void update_ref_frame_id(AV1_COMMON *const cm, int frame_id) { + int i; + int refresh_frame_flags = cm->current_frame.refresh_frame_flags; + assert(cm->seq_params.frame_id_numbers_present_flag); + for (i = 0; i < REF_FRAMES; i++) { + if ((refresh_frame_flags >> i) & 1) { + cm->ref_frame_id[i] = frame_id; + cm->valid_for_referencing[i] = 1; + } + } +} + +static void show_existing_frame_reset(AV1Decoder *const pbi, + int existing_frame_idx) { + AV1_COMMON *const cm = pbi->common; + int i; + assert(cm->show_existing_frame); + + cm->current_frame.frame_type = KEY_FRAME; + + cm->current_frame.refresh_frame_flags = (1 << REF_FRAMES) - 1; + + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + cm->remapped_ref_idx[i] = INVALID_IDX; + } + + if (pbi->need_resync) { + reset_ref_frame_map(pbi); + pbi->need_resync = 0; + } + + // Note that the displayed frame must be valid for referencing in order to + // have been selected. + if (cm->seq_params.frame_id_numbers_present_flag) { + cm->current_frame_id = cm->ref_frame_id[existing_frame_idx]; + update_ref_frame_id(cm, cm->current_frame_id); + } + + cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_DISABLED; + + generate_next_ref_frame_map(pbi); + +#ifdef ORI_CODE + // Reload the adapted CDFs from when we originally coded this keyframe + *cm->fc = cm->next_ref_frame_map[existing_frame_idx]->frame_context; +#endif +} + +static void reset_frame_buffers(AV1Decoder *const pbi) { + AV1_COMMON *const cm = pbi->common; + RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + unsigned long flags; + + // We have not stored any references to frame buffers in + // cm->next_ref_frame_map, so we can directly reset it to all NULL. + for (i = 0; i < REF_FRAMES; ++i) { + cm->next_ref_frame_map[i] = NULL; + } + + lock_buffer_pool(cm->buffer_pool, flags); + reset_ref_frame_map(pbi); + assert(cm->cur_frame->ref_count == 1); + for (i = 0; i < FRAME_BUFFERS; ++i) { + // Reset all unreferenced frame buffers. We can also reset cm->cur_frame + // because we are the sole owner of cm->cur_frame. + if (frame_bufs[i].ref_count > 0 && &frame_bufs[i] != cm->cur_frame) { + continue; + } + frame_bufs[i].order_hint = 0; + av1_zero(frame_bufs[i].ref_order_hints); + } +#ifdef ORI_CODE + av1_zero_unused_internal_frame_buffers(&cm->buffer_pool->int_frame_buffers); +#endif + unlock_buffer_pool(cm->buffer_pool, flags); +} + +static int frame_is_intra_only(const AV1_COMMON *const cm) { + return cm->current_frame.frame_type == KEY_FRAME || + cm->current_frame.frame_type == INTRA_ONLY_FRAME; +} + +static int frame_is_sframe(const AV1_COMMON *cm) { + return cm->current_frame.frame_type == S_FRAME; +} + +// These functions take a reference frame label between LAST_FRAME and +// EXTREF_FRAME inclusive. Note that this is different to the indexing +// previously used by the frame_refs[] array. +static int get_ref_frame_map_idx(const AV1_COMMON *const cm, + const MV_REFERENCE_FRAME ref_frame) { + return (ref_frame >= LAST_FRAME && ref_frame <= EXTREF_FRAME) + ? cm->remapped_ref_idx[ref_frame - LAST_FRAME] + : INVALID_IDX; +} + +static RefCntBuffer *get_ref_frame_buf( + const AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) { + const int map_idx = get_ref_frame_map_idx(cm, ref_frame); + return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : NULL; +} +#ifdef SUPPORT_SCALE_FACTOR +static struct scale_factors *get_ref_scale_factors( + AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) { + const int map_idx = get_ref_frame_map_idx(cm, ref_frame); + return (map_idx != INVALID_IDX) ? &cm->ref_scale_factors[map_idx] : NULL; +} +#endif +static RefCntBuffer *get_primary_ref_frame_buf( + const AV1_COMMON *const cm) { + int map_idx; + if (cm->primary_ref_frame == PRIMARY_REF_NONE) return NULL; + map_idx = get_ref_frame_map_idx(cm, cm->primary_ref_frame + 1); + return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : NULL; +} + +static int get_relative_dist(const OrderHintInfo *oh, int a, int b) { + int bits; + int m; + int diff; + if (!oh->enable_order_hint) return 0; + + bits = oh->order_hint_bits_minus_1 + 1; + + assert(bits >= 1); + assert(a >= 0 && a < (1 << bits)); + assert(b >= 0 && b < (1 << bits)); + + diff = a - b; + m = 1 << (bits - 1); + diff = (diff & (m - 1)) - (diff & m); + return diff; +} + + +void av1_read_frame_size(union param_u *params, int num_bits_width, + int num_bits_height, int *width, int *height, int* dec_width) { + *width = params->p.frame_width; + *height = params->p.frame_height;//aom_rb_read_literal(rb, num_bits_height) + 1; +#ifdef AML + *dec_width = params->p.dec_frame_width; +#endif +} + +static REFERENCE_MODE read_frame_reference_mode( + const AV1_COMMON *cm, union param_u *params) { + if (frame_is_intra_only(cm)) { + return SINGLE_REFERENCE; + } else { + return params->p.reference_mode ? REFERENCE_MODE_SELECT : SINGLE_REFERENCE; + } +} + +static inline int calc_mi_size(int len) { + // len is in mi units. Align to a multiple of SBs. + return ALIGN_POWER_OF_TWO(len, MAX_MIB_SIZE_LOG2); +} + +void av1_set_mb_mi(AV1_COMMON *cm, int width, int height) { + // Ensure that the decoded width and height are both multiples of + // 8 luma pixels (note: this may only be a multiple of 4 chroma pixels if + // subsampling is used). + // This simplifies the implementation of various experiments, + // eg. cdef, which operates on units of 8x8 luma pixels. + const int aligned_width = ALIGN_POWER_OF_TWO(width, 3); + const int aligned_height = ALIGN_POWER_OF_TWO(height, 3); + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, " [PICTURE] av1_set_mb_mi (%d X %d)\n", width, height); + + cm->mi_cols = aligned_width >> MI_SIZE_LOG2; + cm->mi_rows = aligned_height >> MI_SIZE_LOG2; + cm->mi_stride = calc_mi_size(cm->mi_cols); + + cm->mb_cols = (cm->mi_cols + 2) >> 2; + cm->mb_rows = (cm->mi_rows + 2) >> 2; + cm->MBs = cm->mb_rows * cm->mb_cols; + +#if CONFIG_LPF_MASK + alloc_loop_filter_mask(cm); +#endif +} + +int av1_alloc_context_buffers(AV1_COMMON *cm, int width, int height) { +#ifdef ORI_CODE + int new_mi_size; +#endif + av1_set_mb_mi(cm, width, height); +#ifdef ORI_CODE + new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows); + if (cm->mi_alloc_size < new_mi_size) { + cm->free_mi(cm); + if (cm->alloc_mi(cm, new_mi_size)) goto fail; + } +#endif + return 0; + +#ifdef ORI_CODE +fail: +#endif + // clear the mi_* values to force a realloc on resync + av1_set_mb_mi(cm, 0, 0); +#ifdef ORI_CODE + av1_free_context_buffers(cm); +#endif + return 1; +} + +#ifndef USE_SCALED_WIDTH_FROM_UCODE +static void calculate_scaled_size_helper(int *dim, int denom) { + if (denom != SCALE_NUMERATOR) { + // We need to ensure the constraint in "Appendix A" of the spec: + // * FrameWidth is greater than or equal to 16 + // * FrameHeight is greater than or equal to 16 + // For this, we clamp the downscaled dimension to at least 16. One + // exception: if original dimension itself was < 16, then we keep the + // downscaled dimension to be same as the original, to ensure that resizing + // is valid. + const int min_dim = AOMMIN(16, *dim); + // Use this version if we need *dim to be even + // *width = (*width * SCALE_NUMERATOR + denom) / (2 * denom); + // *width <<= 1; + *dim = (*dim * SCALE_NUMERATOR + denom / 2) / (denom); + *dim = AOMMAX(*dim, min_dim); + } +} +#ifdef ORI_CODE +void av1_calculate_scaled_size(int *width, int *height, int resize_denom) { + calculate_scaled_size_helper(width, resize_denom); + calculate_scaled_size_helper(height, resize_denom); +} +#endif +void av1_calculate_scaled_superres_size(int *width, int *height, + int superres_denom) { + (void)height; + calculate_scaled_size_helper(width, superres_denom); +} +#endif + +static void setup_superres(AV1_COMMON *const cm, union param_u *params, + int *width, int *height) { +#ifdef USE_SCALED_WIDTH_FROM_UCODE + cm->superres_upscaled_width = params->p.frame_width_scaled; + cm->superres_upscaled_height = params->p.frame_height; + + + *width = params->p.dec_frame_width; + *height = params->p.frame_height; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, " [PICTURE] set decoding size to (%d X %d) scaled size to (%d X %d)\n", + *width, *height, + cm->superres_upscaled_width, + cm->superres_upscaled_height); +#else + cm->superres_upscaled_width = *width; + cm->superres_upscaled_height = *height; + + const SequenceHeader *const seq_params = &cm->seq_params; + if (!seq_params->enable_superres) return; + + //if (aom_rb_read_bit(-1, defmark, rb)) { + if (params->p.superres_scale_denominator != SCALE_NUMERATOR) { +#ifdef ORI_CODE + cm->superres_scale_denominator = + (uint8_t)aom_rb_read_literal(-1, defmark, rb, SUPERRES_SCALE_BITS); + cm->superres_scale_denominator += SUPERRES_SCALE_DENOMINATOR_MIN; +#else + cm->superres_scale_denominator = params->p.superres_scale_denominator; +#endif + // Don't edit cm->width or cm->height directly, or the buffers won't get + // resized correctly + av1_calculate_scaled_superres_size(width, height, + cm->superres_scale_denominator); + } else { + // 1:1 scaling - ie. no scaling, scale not provided + cm->superres_scale_denominator = SCALE_NUMERATOR; + } +/*!USE_SCALED_WIDTH_FROM_UCODE*/ +#endif +} + +static void resize_context_buffers(AV1_COMMON *cm, int width, int height) { +#if CONFIG_SIZE_LIMIT + if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Dimensions of %dx%d beyond allowed size of %dx%d.", + width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT); +#endif + if (cm->width != width || cm->height != height) { + const int new_mi_rows = + ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2; + const int new_mi_cols = + ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2; + + // Allocations in av1_alloc_context_buffers() depend on individual + // dimensions as well as the overall size. + if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) { + if (av1_alloc_context_buffers(cm, width, height)) { + // The cm->mi_* values have been cleared and any existing context + // buffers have been freed. Clear cm->width and cm->height to be + // consistent and to force a realloc next time. + cm->width = 0; + cm->height = 0; + aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, + "Failed to allocate context buffers"); + } + } else { + av1_set_mb_mi(cm, width, height); + } +#ifdef ORI_CODE + av1_init_context_buffers(cm); +#endif + cm->width = width; + cm->height = height; + } + +#ifdef ORI_CODE + ensure_mv_buffer(cm->cur_frame, cm); +#endif + cm->cur_frame->width = cm->width; + cm->cur_frame->height = cm->height; +} + +static void setup_buffer_pool(AV1_COMMON *cm) { + BufferPool *const pool = cm->buffer_pool; + const SequenceHeader *const seq_params = &cm->seq_params; + unsigned long flags; + + lock_buffer_pool(pool, flags); + if (aom_realloc_frame_buffer(cm, &cm->cur_frame->buf, + cm->width, cm->height, cm->cur_frame->order_hint)) { + unlock_buffer_pool(pool, flags); + aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, + "Failed to allocate frame buffer"); + } + unlock_buffer_pool(pool, flags); + + cm->cur_frame->buf.bit_depth = (unsigned int)seq_params->bit_depth; + cm->cur_frame->buf.color_primaries = seq_params->color_primaries; + cm->cur_frame->buf.transfer_characteristics = + seq_params->transfer_characteristics; + cm->cur_frame->buf.matrix_coefficients = seq_params->matrix_coefficients; + cm->cur_frame->buf.monochrome = seq_params->monochrome; + cm->cur_frame->buf.chroma_sample_position = + seq_params->chroma_sample_position; + cm->cur_frame->buf.color_range = seq_params->color_range; + cm->cur_frame->buf.render_width = cm->render_width; + cm->cur_frame->buf.render_height = cm->render_height; +} + +static void setup_frame_size(AV1_COMMON *cm, int frame_size_override_flag, union param_u *params) { + const SequenceHeader *const seq_params = &cm->seq_params; + int width, height, dec_width; + + if (frame_size_override_flag) { + int num_bits_width = seq_params->num_bits_width; + int num_bits_height = seq_params->num_bits_height; + av1_read_frame_size(params, num_bits_width, num_bits_height, &width, &height, &dec_width); +#ifdef AML + cm->dec_width = dec_width; +#endif + if (width > seq_params->max_frame_width || + height > seq_params->max_frame_height) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Frame dimensions are larger than the maximum values"); + } + } else { + width = seq_params->max_frame_width; + height = seq_params->max_frame_height; +#ifdef AML + cm->dec_width = dec_width = params->p.dec_frame_width; +#endif + } + setup_superres(cm, params, &width, &height); + resize_context_buffers(cm, width, height); +#ifdef ORI_CODE + setup_render_size(cm, params); +#endif + setup_buffer_pool(cm); +} + +static int valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth, + int ref_xss, int ref_yss, + aom_bit_depth_t this_bit_depth, + int this_xss, int this_yss) { + return ref_bit_depth == this_bit_depth && ref_xss == this_xss && + ref_yss == this_yss; +} + +static void setup_frame_size_with_refs(AV1_COMMON *cm, union param_u *params) { + int width, height, dec_width; + int found = 0; + int has_valid_ref_frame = 0; + int i; + SequenceHeader *seq_params; + for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { + /*if (aom_rb_read_bit(rb)) {*/ + if (params->p.valid_ref_frame_bits & (1<<i)) { + const RefCntBuffer *const ref_buf = get_ref_frame_buf(cm, i); + // This will never be NULL in a normal stream, as streams are required to + // have a shown keyframe before any inter frames, which would refresh all + // the reference buffers. However, it might be null if we're starting in + // the middle of a stream, and static analysis will error if we don't do + // a null check here. + if (ref_buf == NULL) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Invalid condition: invalid reference buffer"); + } else { + const PIC_BUFFER_CONFIG *const buf = &ref_buf->buf; + width = buf->y_crop_width; + height = buf->y_crop_height; + cm->render_width = buf->render_width; + cm->render_height = buf->render_height; + setup_superres(cm, params, &width, &height); + resize_context_buffers(cm, width, height); + found = 1; + break; + } + } + } + + seq_params = &cm->seq_params; + if (!found) { + int num_bits_width = seq_params->num_bits_width; + int num_bits_height = seq_params->num_bits_height; + + av1_read_frame_size(params, num_bits_width, num_bits_height, &width, &height, &dec_width); +#ifdef AML + cm->dec_width = dec_width; +#endif + setup_superres(cm, params, &width, &height); + resize_context_buffers(cm, width, height); +#ifdef ORI_CODE + setup_render_size(cm, rb); +#endif + } + + if (width <= 0 || height <= 0) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Invalid frame size"); + + // Check to make sure at least one of frames that this frame references + // has valid dimensions. + for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { + const RefCntBuffer *const ref_frame = get_ref_frame_buf(cm, i); + if (ref_frame != NULL) { + has_valid_ref_frame |= + valid_ref_frame_size(ref_frame->buf.y_crop_width, + ref_frame->buf.y_crop_height, width, height); + } + } + if (!has_valid_ref_frame) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Referenced frame has invalid size"); + for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { + const RefCntBuffer *const ref_frame = get_ref_frame_buf(cm, i); + if (ref_frame != NULL) { + if (!valid_ref_frame_img_fmt( + ref_frame->buf.bit_depth, ref_frame->buf.subsampling_x, + ref_frame->buf.subsampling_y, seq_params->bit_depth, + seq_params->subsampling_x, seq_params->subsampling_y)) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Referenced frame has incompatible color format"); + } + } + setup_buffer_pool(cm); +} + +typedef struct { + int map_idx; // frame map index + RefCntBuffer *buf; // frame buffer + int sort_idx; // index based on the offset to be used for sorting +} REF_FRAME_INFO; + +// Compares the sort_idx fields. If they are equal, then compares the map_idx +// fields to break the tie. This ensures a stable sort. +static int compare_ref_frame_info(const void *arg_a, const void *arg_b) { + const REF_FRAME_INFO *info_a = (REF_FRAME_INFO *)arg_a; + const REF_FRAME_INFO *info_b = (REF_FRAME_INFO *)arg_b; + + const int sort_idx_diff = info_a->sort_idx - info_b->sort_idx; + if (sort_idx_diff != 0) return sort_idx_diff; + return info_a->map_idx - info_b->map_idx; +} + + +/* +for av1_setup_motion_field() +*/ +static int motion_field_projection(AV1_COMMON *cm, + MV_REFERENCE_FRAME start_frame, int dir) { +#ifdef ORI_CODE + TPL_MV_REF *tpl_mvs_base = cm->tpl_mvs; + int ref_offset[REF_FRAMES] = { 0 }; +#endif + MV_REFERENCE_FRAME rf; + const RefCntBuffer *const start_frame_buf = + get_ref_frame_buf(cm, start_frame); + int start_frame_order_hint; + unsigned int const *ref_order_hints; + int cur_order_hint; + int start_to_current_frame_offset; + +#ifdef AML + int i; + //av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "$$$$$$$$$$$%s:cm->mv_ref_id_index = %d, start_frame=%d\n", __func__, cm->mv_ref_id_index, start_frame); + cm->mv_ref_id[cm->mv_ref_id_index] = start_frame; + for (i = 0; i < REF_FRAMES; i++) { + cm->mv_ref_offset[cm->mv_ref_id_index][i]=0; + } + cm->mv_cal_tpl_mvs[cm->mv_ref_id_index]=0; + cm->mv_ref_id_index++; +#endif + if (start_frame_buf == NULL) return 0; + + if (start_frame_buf->frame_type == KEY_FRAME || + start_frame_buf->frame_type == INTRA_ONLY_FRAME) + return 0; + + if (start_frame_buf->mi_rows != cm->mi_rows || + start_frame_buf->mi_cols != cm->mi_cols) + return 0; + + start_frame_order_hint = start_frame_buf->order_hint; + ref_order_hints = + &start_frame_buf->ref_order_hints[0]; + cur_order_hint = cm->cur_frame->order_hint; + start_to_current_frame_offset = get_relative_dist( + &cm->seq_params.order_hint_info, start_frame_order_hint, cur_order_hint); + + for (rf = LAST_FRAME; rf <= INTER_REFS_PER_FRAME; ++rf) { + cm->mv_ref_offset[cm->mv_ref_id_index-1][rf] = get_relative_dist(&cm->seq_params.order_hint_info, + start_frame_order_hint, + ref_order_hints[rf - LAST_FRAME]); + } +#ifdef AML + cm->mv_cal_tpl_mvs[cm->mv_ref_id_index-1]=1; +#endif + if (dir == 2) start_to_current_frame_offset = -start_to_current_frame_offset; +#ifdef ORI_CODE + MV_REF *mv_ref_base = start_frame_buf->mvs; + const int mvs_rows = (cm->mi_rows + 1) >> 1; + const int mvs_cols = (cm->mi_cols + 1) >> 1; + + for (int blk_row = 0; blk_row < mvs_rows; ++blk_row) { + for (int blk_col = 0; blk_col < mvs_cols; ++blk_col) { + MV_REF *mv_ref = &mv_ref_base[blk_row * mvs_cols + blk_col]; + MV fwd_mv = mv_ref->mv.as_mv; + + if (mv_ref->ref_frame > INTRA_FRAME) { + int_mv this_mv; + int mi_r, mi_c; + const int ref_frame_offset = ref_offset[mv_ref->ref_frame]; + + int pos_valid = + abs(ref_frame_offset) <= MAX_FRAME_DISTANCE && + ref_frame_offset > 0 && + abs(start_to_current_frame_offset) <= MAX_FRAME_DISTANCE; + + if (pos_valid) { + get_mv_projection(&this_mv.as_mv, fwd_mv, + start_to_current_frame_offset, ref_frame_offset); + pos_valid = get_block_position(cm, &mi_r, &mi_c, blk_row, blk_col, + this_mv.as_mv, dir >> 1); + } + + if (pos_valid) { + const int mi_offset = mi_r * (cm->mi_stride >> 1) + mi_c; + + tpl_mvs_base[mi_offset].mfmv0.as_mv.row = fwd_mv.row; + tpl_mvs_base[mi_offset].mfmv0.as_mv.col = fwd_mv.col; + tpl_mvs_base[mi_offset].ref_frame_offset = ref_frame_offset; + } + } + } + } +#endif + return 1; +} + +#ifdef AML +static int setup_motion_field_debug_count = 0; +#endif +void av1_setup_motion_field(AV1_COMMON *cm) { + const OrderHintInfo *const order_hint_info = &cm->seq_params.order_hint_info; + int ref_frame; + int size; + int cur_order_hint; + const RefCntBuffer *ref_buf[INTER_REFS_PER_FRAME]; + int ref_order_hint[INTER_REFS_PER_FRAME]; + int ref_stamp; + memset(cm->ref_frame_side, 0, sizeof(cm->ref_frame_side)); + if (!order_hint_info->enable_order_hint) return; +#ifdef ORI_CODE + TPL_MV_REF *tpl_mvs_base = cm->tpl_mvs; +#endif + size = ((cm->mi_rows + MAX_MIB_SIZE) >> 1) * (cm->mi_stride >> 1); +#ifdef ORI_CODE + for (int idx = 0; idx < size; ++idx) { + tpl_mvs_base[idx].mfmv0.as_int = INVALID_MV; + tpl_mvs_base[idx].ref_frame_offset = 0; + } +#endif + cur_order_hint = cm->cur_frame->order_hint; + + for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) { + const int ref_idx = ref_frame - LAST_FRAME; + const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref_frame); + int order_hint = 0; + + if (buf != NULL) order_hint = buf->order_hint; + + ref_buf[ref_idx] = buf; + ref_order_hint[ref_idx] = order_hint; + + if (get_relative_dist(order_hint_info, order_hint, cur_order_hint) > 0) + cm->ref_frame_side[ref_frame] = 1; + else if (order_hint == cur_order_hint) + cm->ref_frame_side[ref_frame] = -1; + } + ref_stamp = MFMV_STACK_SIZE - 1; +#ifdef AML + cm->mv_ref_id_index = 0; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%s(%d) mi_cols %d mi_rows %d\n", + __func__, setup_motion_field_debug_count++, + cm->mi_cols, + cm->mi_rows + ); +#endif + if (ref_buf[LAST_FRAME - LAST_FRAME] != NULL) { + const int alt_of_lst_order_hint = + ref_buf[LAST_FRAME - LAST_FRAME] + ->ref_order_hints[ALTREF_FRAME - LAST_FRAME]; + + const int is_lst_overlay = + (alt_of_lst_order_hint == ref_order_hint[GOLDEN_FRAME - LAST_FRAME]); + if (!is_lst_overlay) motion_field_projection(cm, LAST_FRAME, 2); + --ref_stamp; + } + + if (get_relative_dist(order_hint_info, + ref_order_hint[BWDREF_FRAME - LAST_FRAME], + cur_order_hint) > 0) { + if (motion_field_projection(cm, BWDREF_FRAME, 0)) --ref_stamp; + } + + if (get_relative_dist(order_hint_info, + ref_order_hint[ALTREF2_FRAME - LAST_FRAME], + cur_order_hint) > 0) { + if (motion_field_projection(cm, ALTREF2_FRAME, 0)) --ref_stamp; + } + + if (get_relative_dist(order_hint_info, + ref_order_hint[ALTREF_FRAME - LAST_FRAME], + cur_order_hint) > 0 && + ref_stamp >= 0) + if (motion_field_projection(cm, ALTREF_FRAME, 0)) --ref_stamp; + + if (ref_stamp >= 0) motion_field_projection(cm, LAST2_FRAME, 2); +} + + +static void set_ref_frame_info(int *remapped_ref_idx, int frame_idx, + REF_FRAME_INFO *ref_info) { + assert(frame_idx >= 0 && frame_idx < INTER_REFS_PER_FRAME); + + remapped_ref_idx[frame_idx] = ref_info->map_idx; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "+++++++++++++%s:remapped_ref_idx[%d]=0x%x\n", __func__, frame_idx, ref_info->map_idx); +} + + +void av1_set_frame_refs(AV1_COMMON *const cm, int *remapped_ref_idx, + int lst_map_idx, int gld_map_idx) { + int lst_frame_sort_idx = -1; + int gld_frame_sort_idx = -1; + int i; + //assert(cm->seq_params.order_hint_info.enable_order_hint); + //assert(cm->seq_params.order_hint_info.order_hint_bits_minus_1 >= 0); + const int cur_order_hint = (int)cm->current_frame.order_hint; + const int cur_frame_sort_idx = + 1 << cm->seq_params.order_hint_info.order_hint_bits_minus_1; + + REF_FRAME_INFO ref_frame_info[REF_FRAMES]; + int ref_flag_list[INTER_REFS_PER_FRAME] = { 0, 0, 0, 0, 0, 0, 0 }; + int bwd_start_idx; + int bwd_end_idx; + int fwd_start_idx, fwd_end_idx; + int ref_idx; + static const MV_REFERENCE_FRAME ref_frame_list[INTER_REFS_PER_FRAME - 2] = { + LAST2_FRAME, LAST3_FRAME, BWDREF_FRAME, ALTREF2_FRAME, ALTREF_FRAME + }; + + for (i = 0; i < REF_FRAMES; ++i) { + const int map_idx = i; + RefCntBuffer *buf; + int offset; + + ref_frame_info[i].map_idx = map_idx; + ref_frame_info[i].sort_idx = -1; + + buf = cm->ref_frame_map[map_idx]; + ref_frame_info[i].buf = buf; + + if (buf == NULL) continue; + // If this assertion fails, there is a reference leak. + assert(buf->ref_count > 0); + + offset = (int)buf->order_hint; + ref_frame_info[i].sort_idx = + (offset == -1) ? -1 + : cur_frame_sort_idx + + get_relative_dist(&cm->seq_params.order_hint_info, + offset, cur_order_hint); + assert(ref_frame_info[i].sort_idx >= -1); + + if (map_idx == lst_map_idx) lst_frame_sort_idx = ref_frame_info[i].sort_idx; + if (map_idx == gld_map_idx) gld_frame_sort_idx = ref_frame_info[i].sort_idx; + } + + // Confirm both LAST_FRAME and GOLDEN_FRAME are valid forward reference + // frames. + if (lst_frame_sort_idx == -1 || lst_frame_sort_idx >= cur_frame_sort_idx) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Inter frame requests a look-ahead frame as LAST"); + } + if (gld_frame_sort_idx == -1 || gld_frame_sort_idx >= cur_frame_sort_idx) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Inter frame requests a look-ahead frame as GOLDEN"); + } + + // Sort ref frames based on their frame_offset values. + qsort(ref_frame_info, REF_FRAMES, sizeof(REF_FRAME_INFO), + compare_ref_frame_info); + + // Identify forward and backward reference frames. + // Forward reference: offset < order_hint + // Backward reference: offset >= order_hint + fwd_start_idx = 0; + fwd_end_idx = REF_FRAMES - 1; + + for (i = 0; i < REF_FRAMES; i++) { + if (ref_frame_info[i].sort_idx == -1) { + fwd_start_idx++; + continue; + } + + if (ref_frame_info[i].sort_idx >= cur_frame_sort_idx) { + fwd_end_idx = i - 1; + break; + } + } + + bwd_start_idx = fwd_end_idx + 1; + bwd_end_idx = REF_FRAMES - 1; + + // === Backward Reference Frames === + + // == ALTREF_FRAME == + if (bwd_start_idx <= bwd_end_idx) { + set_ref_frame_info(remapped_ref_idx, ALTREF_FRAME - LAST_FRAME, + &ref_frame_info[bwd_end_idx]); + ref_flag_list[ALTREF_FRAME - LAST_FRAME] = 1; + bwd_end_idx--; + } + + // == BWDREF_FRAME == + if (bwd_start_idx <= bwd_end_idx) { + set_ref_frame_info(remapped_ref_idx, BWDREF_FRAME - LAST_FRAME, + &ref_frame_info[bwd_start_idx]); + ref_flag_list[BWDREF_FRAME - LAST_FRAME] = 1; + bwd_start_idx++; + } + + // == ALTREF2_FRAME == + if (bwd_start_idx <= bwd_end_idx) { + set_ref_frame_info(remapped_ref_idx, ALTREF2_FRAME - LAST_FRAME, + &ref_frame_info[bwd_start_idx]); + ref_flag_list[ALTREF2_FRAME - LAST_FRAME] = 1; + } + + // === Forward Reference Frames === + + for (i = fwd_start_idx; i <= fwd_end_idx; ++i) { + // == LAST_FRAME == + if (ref_frame_info[i].map_idx == lst_map_idx) { + set_ref_frame_info(remapped_ref_idx, LAST_FRAME - LAST_FRAME, + &ref_frame_info[i]); + ref_flag_list[LAST_FRAME - LAST_FRAME] = 1; + } + + // == GOLDEN_FRAME == + if (ref_frame_info[i].map_idx == gld_map_idx) { + set_ref_frame_info(remapped_ref_idx, GOLDEN_FRAME - LAST_FRAME, + &ref_frame_info[i]); + ref_flag_list[GOLDEN_FRAME - LAST_FRAME] = 1; + } + } + + assert(ref_flag_list[LAST_FRAME - LAST_FRAME] == 1 && + ref_flag_list[GOLDEN_FRAME - LAST_FRAME] == 1); + + // == LAST2_FRAME == + // == LAST3_FRAME == + // == BWDREF_FRAME == + // == ALTREF2_FRAME == + // == ALTREF_FRAME == + + // Set up the reference frames in the anti-chronological order. + for (ref_idx = 0; ref_idx < (INTER_REFS_PER_FRAME - 2); ref_idx++) { + const MV_REFERENCE_FRAME ref_frame = ref_frame_list[ref_idx]; + + if (ref_flag_list[ref_frame - LAST_FRAME] == 1) continue; + + while (fwd_start_idx <= fwd_end_idx && + (ref_frame_info[fwd_end_idx].map_idx == lst_map_idx || + ref_frame_info[fwd_end_idx].map_idx == gld_map_idx)) { + fwd_end_idx--; + } + if (fwd_start_idx > fwd_end_idx) break; + + set_ref_frame_info(remapped_ref_idx, ref_frame - LAST_FRAME, + &ref_frame_info[fwd_end_idx]); + ref_flag_list[ref_frame - LAST_FRAME] = 1; + + fwd_end_idx--; + } + + // Assign all the remaining frame(s), if any, to the earliest reference frame. + for (; ref_idx < (INTER_REFS_PER_FRAME - 2); ref_idx++) { + const MV_REFERENCE_FRAME ref_frame = ref_frame_list[ref_idx]; + if (ref_flag_list[ref_frame - LAST_FRAME] == 1) continue; + set_ref_frame_info(remapped_ref_idx, ref_frame - LAST_FRAME, + &ref_frame_info[fwd_start_idx]); + ref_flag_list[ref_frame - LAST_FRAME] = 1; + } + + for (i = 0; i < INTER_REFS_PER_FRAME; i++) { + assert(ref_flag_list[i] == 1); + } +} + +void av1_setup_frame_buf_refs(AV1_COMMON *cm) { + MV_REFERENCE_FRAME ref_frame; + cm->cur_frame->order_hint = cm->current_frame.order_hint; + + for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { + const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref_frame); + if (buf != NULL) + cm->cur_frame->ref_order_hints[ref_frame - LAST_FRAME] = buf->order_hint; + } +} + +void av1_setup_frame_sign_bias(AV1_COMMON *cm) { + MV_REFERENCE_FRAME ref_frame; + for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { + const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref_frame); + if (cm->seq_params.order_hint_info.enable_order_hint && buf != NULL) { + const int ref_order_hint = buf->order_hint; + cm->ref_frame_sign_bias[ref_frame] = + (get_relative_dist(&cm->seq_params.order_hint_info, ref_order_hint, + (int)cm->current_frame.order_hint) <= 0) + ? 0 + : 1; + } else { + cm->ref_frame_sign_bias[ref_frame] = 0; + } + } +} + + +void av1_setup_skip_mode_allowed(AV1_COMMON *cm) +{ + const OrderHintInfo *const order_hint_info = &cm->seq_params.order_hint_info; + SkipModeInfo *const skip_mode_info = &cm->current_frame.skip_mode_info; + int i; + int cur_order_hint; + int ref_order_hints[2] = { -1, INT_MAX }; + int ref_idx[2] = { INVALID_IDX, INVALID_IDX }; + + skip_mode_info->skip_mode_allowed = 0; + skip_mode_info->ref_frame_idx_0 = INVALID_IDX; + skip_mode_info->ref_frame_idx_1 = INVALID_IDX; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "av1_setup_skip_mode_allowed %d %d %d\n", order_hint_info->enable_order_hint, + frame_is_intra_only(cm), + cm->current_frame.reference_mode); + if (!order_hint_info->enable_order_hint || frame_is_intra_only(cm) || + cm->current_frame.reference_mode == SINGLE_REFERENCE) + return; + + cur_order_hint = cm->current_frame.order_hint; + + // Identify the nearest forward and backward references. + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + const RefCntBuffer *const buf = get_ref_frame_buf(cm, LAST_FRAME + i); + int ref_order_hint; + if (buf == NULL) continue; + + ref_order_hint = buf->order_hint; + if (get_relative_dist(order_hint_info, ref_order_hint, cur_order_hint) < 0) { + // Forward reference + if (ref_order_hints[0] == -1 || + get_relative_dist(order_hint_info, ref_order_hint, + ref_order_hints[0]) > 0) { + ref_order_hints[0] = ref_order_hint; + ref_idx[0] = i; + } + } else if (get_relative_dist(order_hint_info, ref_order_hint, + cur_order_hint) > 0) { + // Backward reference + if (ref_order_hints[1] == INT_MAX || + get_relative_dist(order_hint_info, ref_order_hint, + ref_order_hints[1]) < 0) { + ref_order_hints[1] = ref_order_hint; + ref_idx[1] = i; + } + } + } + + if (ref_idx[0] != INVALID_IDX && ref_idx[1] != INVALID_IDX) { + // == Bi-directional prediction == + skip_mode_info->skip_mode_allowed = 1; + skip_mode_info->ref_frame_idx_0 = AOMMIN(ref_idx[0], ref_idx[1]); + skip_mode_info->ref_frame_idx_1 = AOMMAX(ref_idx[0], ref_idx[1]); + } else if (ref_idx[0] != INVALID_IDX && ref_idx[1] == INVALID_IDX) { + // == Forward prediction only == + // Identify the second nearest forward reference. + ref_order_hints[1] = -1; + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + const RefCntBuffer *const buf = get_ref_frame_buf(cm, LAST_FRAME + i); + int ref_order_hint; + if (buf == NULL) continue; + + ref_order_hint = buf->order_hint; + if ((ref_order_hints[0] != -1 && + get_relative_dist(order_hint_info, ref_order_hint, ref_order_hints[0]) < 0) && + (ref_order_hints[1] == -1 || + get_relative_dist(order_hint_info, ref_order_hint, ref_order_hints[1]) > 0)) { + // Second closest forward reference + ref_order_hints[1] = ref_order_hint; + ref_idx[1] = i; + } + } + if (ref_order_hints[1] != -1) { + skip_mode_info->skip_mode_allowed = 1; + skip_mode_info->ref_frame_idx_0 = AOMMIN(ref_idx[0], ref_idx[1]); + skip_mode_info->ref_frame_idx_1 = AOMMAX(ref_idx[0], ref_idx[1]); + } + } + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, + "skip_mode_info: skip_mode_allowed 0x%x 0x%x 0x%x\n", + cm->current_frame.skip_mode_info.skip_mode_allowed, + cm->current_frame.skip_mode_info.ref_frame_idx_0, + cm->current_frame.skip_mode_info.ref_frame_idx_1); +} + +static inline int frame_might_allow_ref_frame_mvs(const AV1_COMMON *cm) { + return !cm->error_resilient_mode && + cm->seq_params.order_hint_info.enable_ref_frame_mvs && + cm->seq_params.order_hint_info.enable_order_hint && + !frame_is_intra_only(cm); +} + +#ifdef ORI_CODE +/* +* segmentation +*/ +static const int seg_feature_data_signed[SEG_LVL_MAX] = { + 1, 1, 1, 1, 1, 0, 0, 0 +}; + +static const int seg_feature_data_max[SEG_LVL_MAX] = { MAXQ, + MAX_LOOP_FILTER, + MAX_LOOP_FILTER, + MAX_LOOP_FILTER, + MAX_LOOP_FILTER, + 7, + 0, + 0 }; + + +static inline void segfeatures_copy(struct segmentation *dst, + const struct segmentation *src) { + int i, j; + for (i = 0; i < MAX_SEGMENTS; i++) { + dst->feature_mask[i] = src->feature_mask[i]; + for (j = 0; j < SEG_LVL_MAX; j++) { + dst->feature_data[i][j] = src->feature_data[i][j]; + } + } + dst->segid_preskip = src->segid_preskip; + dst->last_active_segid = src->last_active_segid; +} + +static void av1_clearall_segfeatures(struct segmentation *seg) { + av1_zero(seg->feature_data); + av1_zero(seg->feature_mask); +} + +static void av1_enable_segfeature(struct segmentation *seg, int segment_id, + int feature_id) { + seg->feature_mask[segment_id] |= 1 << feature_id; +} + +void av1_calculate_segdata(struct segmentation *seg) { + seg->segid_preskip = 0; + seg->last_active_segid = 0; + for (int i = 0; i < MAX_SEGMENTS; i++) { + for (int j = 0; j < SEG_LVL_MAX; j++) { + if (seg->feature_mask[i] & (1 << j)) { + seg->segid_preskip |= (j >= SEG_LVL_REF_FRAME); + seg->last_active_segid = i; + } + } + } +} + +static int av1_seg_feature_data_max(int feature_id) { + return seg_feature_data_max[feature_id]; +} + +static int av1_is_segfeature_signed(int feature_id) { + return seg_feature_data_signed[feature_id]; +} + +static void av1_set_segdata(struct segmentation *seg, int segment_id, + int feature_id, int seg_data) { + if (seg_data < 0) { + assert(seg_feature_data_signed[feature_id]); + assert(-seg_data <= seg_feature_data_max[feature_id]); + } else { + assert(seg_data <= seg_feature_data_max[feature_id]); + } + + seg->feature_data[segment_id][feature_id] = seg_data; +} + +static inline int clamp(int value, int low, int high) { + return value < low ? low : (value > high ? high : value); +} + +static void setup_segmentation(AV1_COMMON *const cm, + union param_u *params) { + struct segmentation *const seg = &cm->seg; + + seg->update_map = 0; + seg->update_data = 0; + seg->temporal_update = 0; + + seg->enabled = params->p.seg_enabled; //aom_rb_read_bit(-1, defmark, rb); + if (!seg->enabled) { + if (cm->cur_frame->seg_map) + memset(cm->cur_frame->seg_map, 0, (cm->mi_rows * cm->mi_cols)); + + memset(seg, 0, sizeof(*seg)); + segfeatures_copy(&cm->cur_frame->seg, seg); + return; + } + if (cm->seg.enabled && cm->prev_frame && + (cm->mi_rows == cm->prev_frame->mi_rows) && + (cm->mi_cols == cm->prev_frame->mi_cols)) { + cm->last_frame_seg_map = cm->prev_frame->seg_map; + } else { + cm->last_frame_seg_map = NULL; + } + // Read update flags + if (cm->primary_ref_frame == PRIMARY_REF_NONE) { + // These frames can't use previous frames, so must signal map + features + seg->update_map = 1; + seg->temporal_update = 0; + seg->update_data = 1; + } else { + seg->update_map = params->p.seg_update_map; // aom_rb_read_bit(-1, defmark, rb); + if (seg->update_map) { + seg->temporal_update = params->p.seg_temporal_update; //aom_rb_read_bit(-1, defmark, rb); + } else { + seg->temporal_update = 0; + } + seg->update_data = params->p.seg_update_data; //aom_rb_read_bit(-1, defmark, rb); + } + + // Segmentation data update + if (seg->update_data) { + av1_clearall_segfeatures(seg); + + for (int i = 0; i < MAX_SEGMENTS; i++) { + for (int j = 0; j < SEG_LVL_MAX; j++) { + int data = 0; + const int feature_enabled = params->p.seg_feature_enabled ;//aom_rb_read_bit(-1, defmark, rb); + if (feature_enabled) { + av1_enable_segfeature(seg, i, j); + + const int data_max = av1_seg_feature_data_max(j); + const int data_min = -data_max; + /* + const int ubits = get_unsigned_bits(data_max); + + if (av1_is_segfeature_signed(j)) { + data = aom_rb_read_inv_signed_literal(-1, defmark, rb, ubits); + } else { + data = aom_rb_read_literal(-1, defmark, rb, ubits); + }*/ + data = params->p.seg_data; + data = clamp(data, data_min, data_max); + } + av1_set_segdata(seg, i, j, data); + } + } + av1_calculate_segdata(seg); + } else if (cm->prev_frame) { + segfeatures_copy(seg, &cm->prev_frame->seg); + } + segfeatures_copy(&cm->cur_frame->seg, seg); +} +#endif + +/**/ + + +int av1_decode_frame_headers_and_setup(AV1Decoder *pbi, int trailing_bits_present, union param_u *params) +{ + AV1_COMMON *const cm = pbi->common; + /* + read_uncompressed_header() + */ + const SequenceHeader *const seq_params = &cm->seq_params; + CurrentFrame *const current_frame = &cm->current_frame; + //MACROBLOCKD *const xd = &pbi->mb; + BufferPool *const pool = cm->buffer_pool; + RefCntBuffer *const frame_bufs = pool->frame_bufs; + int i; + int frame_size_override_flag; + unsigned long flags; + + if (!pbi->sequence_header_ready) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "No sequence header"); + } + cm->last_frame_type = current_frame->frame_type; + + if (seq_params->reduced_still_picture_hdr) { + cm->show_existing_frame = 0; + cm->show_frame = 1; + current_frame->frame_type = KEY_FRAME; + if (pbi->sequence_header_changed) { + // This is the start of a new coded video sequence. + pbi->sequence_header_changed = 0; + pbi->decoding_first_frame = 1; + reset_frame_buffers(pbi); + } + cm->error_resilient_mode = 1; + } else { + cm->show_existing_frame = params->p.show_existing_frame; + pbi->reset_decoder_state = 0; + if (cm->show_existing_frame) { + int existing_frame_idx; + RefCntBuffer *frame_to_show; + if (pbi->sequence_header_changed) { + aom_internal_error( + &cm->error, AOM_CODEC_CORRUPT_FRAME, + "New sequence header starts with a show_existing_frame."); + } + // Show an existing frame directly. + existing_frame_idx = params->p.existing_frame_idx; //aom_rb_read_literal(rb, 3); + frame_to_show = cm->ref_frame_map[existing_frame_idx]; + if (frame_to_show == NULL) { + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "Buffer does not contain a decoded frame"); + return 0; + } + if (seq_params->decoder_model_info_present_flag && + cm->timing_info.equal_picture_interval == 0) { + cm->frame_presentation_time = params->p.frame_presentation_time; + //read_temporal_point_info(cm); + } + if (seq_params->frame_id_numbers_present_flag) { + //int frame_id_length = seq_params->frame_id_length; + int display_frame_id = params->p.display_frame_id; //aom_rb_read_literal(rb, frame_id_length); + /* Compare display_frame_id with ref_frame_id and check valid for + * referencing */ + if (display_frame_id != cm->ref_frame_id[existing_frame_idx] || + cm->valid_for_referencing[existing_frame_idx] == 0) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Reference buffer frame ID mismatch"); + } + lock_buffer_pool(pool, flags); + assert(frame_to_show->ref_count > 0); + // cm->cur_frame should be the buffer referenced by the return value + // of the get_free_fb() call in av1_receive_compressed_data(), and + // generate_next_ref_frame_map() has not been called, so ref_count + // should still be 1. + assert(cm->cur_frame->ref_count == 1); + // assign_frame_buffer_p() decrements ref_count directly rather than + // call decrease_ref_count(). If cm->cur_frame->raw_frame_buffer has + // already been allocated, it will not be released by + // assign_frame_buffer_p()! + assert(!cm->cur_frame->raw_frame_buffer.data); + + if (check_buff_has_show(frame_to_show)) { + frame_to_show->buf.v4l_buf_index = cm->cur_frame->buf.index; + frame_to_show->buf.repeat_count ++; + cm->cur_frame->buf.repeat_pic = &frame_to_show->buf; + } + + frame_to_show->buf.timestamp = cm->cur_frame->buf.timestamp; + + assign_frame_buffer_p(&cm->cur_frame, frame_to_show); + pbi->reset_decoder_state = frame_to_show->frame_type == KEY_FRAME; + unlock_buffer_pool(pool, flags); + +#ifdef ORI_CODE + cm->lf.filter_level[0] = 0; + cm->lf.filter_level[1] = 0; +#endif + cm->show_frame = 1; + + // Section 6.8.2: It is a requirement of bitstream conformance that when + // show_existing_frame is used to show a previous frame, that the value + // of showable_frame for the previous frame was equal to 1. + if (!frame_to_show->showable_frame) { + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "Buffer does not contain a showable frame"); + } + // Section 6.8.2: It is a requirement of bitstream conformance that when + // show_existing_frame is used to show a previous frame with + // RefFrameType[ frame_to_show_map_idx ] equal to KEY_FRAME, that the + // frame is output via the show_existing_frame mechanism at most once. + if (pbi->reset_decoder_state) frame_to_show->showable_frame = 0; + +#ifdef ORI_CODE + cm->film_grain_params = frame_to_show->film_grain_params; +#endif + if (pbi->reset_decoder_state) { + show_existing_frame_reset(pbi, existing_frame_idx); + } else { + current_frame->refresh_frame_flags = 0; + } + + return 0; + } + + current_frame->frame_type = (FRAME_TYPE)params->p.frame_type; //aom_rb_read_literal(rb, 2); + if (pbi->sequence_header_changed) { + if (current_frame->frame_type == KEY_FRAME) { + // This is the start of a new coded video sequence. + pbi->sequence_header_changed = 0; + pbi->decoding_first_frame = 1; + reset_frame_buffers(pbi); + } else { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Sequence header has changed without a keyframe."); + } + } + cm->show_frame = params->p.show_frame; //aom_rb_read_bit(rb); + if (seq_params->still_picture && + (current_frame->frame_type != KEY_FRAME || !cm->show_frame)) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Still pictures must be coded as shown keyframes"); + } + cm->showable_frame = current_frame->frame_type != KEY_FRAME; + if (cm->show_frame) { + if (seq_params->decoder_model_info_present_flag && + cm->timing_info.equal_picture_interval == 0) + cm->frame_presentation_time = params->p.frame_presentation_time; + //read_temporal_point_info(cm); + } else { + // See if this frame can be used as show_existing_frame in future + cm->showable_frame = params->p.showable_frame;//aom_rb_read_bit(rb); + } + cm->cur_frame->show_frame = cm->show_frame; + cm->cur_frame->showable_frame = cm->showable_frame; + cm->error_resilient_mode = + frame_is_sframe(cm) || + (current_frame->frame_type == KEY_FRAME && cm->show_frame) + ? 1 + : params->p.error_resilient_mode; //aom_rb_read_bit(rb); + } + +#ifdef ORI_CODE + cm->disable_cdf_update = aom_rb_read_bit(rb); + if (seq_params->force_screen_content_tools == 2) { + cm->allow_screen_content_tools = aom_rb_read_bit(rb); + } else { + cm->allow_screen_content_tools = seq_params->force_screen_content_tools; + } + + if (cm->allow_screen_content_tools) { + if (seq_params->force_integer_mv == 2) { + cm->cur_frame_force_integer_mv = aom_rb_read_bit(rb); + } else { + cm->cur_frame_force_integer_mv = seq_params->force_integer_mv; + } + } else { + cm->cur_frame_force_integer_mv = 0; + } +#endif + + frame_size_override_flag = 0; + cm->allow_intrabc = 0; + cm->primary_ref_frame = PRIMARY_REF_NONE; + + if (!seq_params->reduced_still_picture_hdr) { + if (seq_params->frame_id_numbers_present_flag) { + int frame_id_length = seq_params->frame_id_length; + int diff_len = seq_params->delta_frame_id_length; + int prev_frame_id = 0; + int have_prev_frame_id = + !pbi->decoding_first_frame && + !(current_frame->frame_type == KEY_FRAME && cm->show_frame); + if (have_prev_frame_id) { + prev_frame_id = cm->current_frame_id; + } + cm->current_frame_id = params->p.current_frame_id; //aom_rb_read_literal(rb, frame_id_length); + + if (have_prev_frame_id) { + int diff_frame_id; + if (cm->current_frame_id > prev_frame_id) { + diff_frame_id = cm->current_frame_id - prev_frame_id; + } else { + diff_frame_id = + (1 << frame_id_length) + cm->current_frame_id - prev_frame_id; + } + /* Check current_frame_id for conformance */ + if (prev_frame_id == cm->current_frame_id || + diff_frame_id >= (1 << (frame_id_length - 1))) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Invalid value of current_frame_id"); + } + } + /* Check if some frames need to be marked as not valid for referencing */ + for (i = 0; i < REF_FRAMES; i++) { + if (current_frame->frame_type == KEY_FRAME && cm->show_frame) { + cm->valid_for_referencing[i] = 0; + } else if (cm->current_frame_id - (1 << diff_len) > 0) { + if (cm->ref_frame_id[i] > cm->current_frame_id || + cm->ref_frame_id[i] < cm->current_frame_id - (1 << diff_len)) + cm->valid_for_referencing[i] = 0; + } else { + if (cm->ref_frame_id[i] > cm->current_frame_id && + cm->ref_frame_id[i] < (1 << frame_id_length) + + cm->current_frame_id - (1 << diff_len)) + cm->valid_for_referencing[i] = 0; + } + } + } + + frame_size_override_flag = frame_is_sframe(cm) ? 1 : params->p.frame_size_override_flag; //aom_rb_read_bit(rb); + + current_frame->order_hint = params->p.order_hint; /*aom_rb_read_literal( + rb, seq_params->order_hint_info.order_hint_bits_minus_1 + 1);*/ + current_frame->frame_number = current_frame->order_hint; + + if (!cm->error_resilient_mode && !frame_is_intra_only(cm)) { + cm->primary_ref_frame = params->p.primary_ref_frame;//aom_rb_read_literal(rb, PRIMARY_REF_BITS); + } + } + + if (seq_params->decoder_model_info_present_flag) { + cm->buffer_removal_time_present = params->p.buffer_removal_time_present; //aom_rb_read_bit(rb); + if (cm->buffer_removal_time_present) { + int op_num; + for (op_num = 0; + op_num < seq_params->operating_points_cnt_minus_1 + 1; op_num++) { + if (cm->op_params[op_num].decoder_model_param_present_flag) { + if ((((seq_params->operating_point_idc[op_num] >> + cm->temporal_layer_id) & + 0x1) && + ((seq_params->operating_point_idc[op_num] >> + (cm->spatial_layer_id + 8)) & + 0x1)) || + seq_params->operating_point_idc[op_num] == 0) { + cm->op_frame_timing[op_num].buffer_removal_time = + params->p.op_frame_timing[op_num]; + /*aom_rb_read_unsigned_literal( + rb, cm->buffer_model.buffer_removal_time_length);*/ + } else { + cm->op_frame_timing[op_num].buffer_removal_time = 0; + } + } else { + cm->op_frame_timing[op_num].buffer_removal_time = 0; + } + } + } + } + if (current_frame->frame_type == KEY_FRAME) { + if (!cm->show_frame) { // unshown keyframe (forward keyframe) + current_frame->refresh_frame_flags = params->p.refresh_frame_flags; //aom_rb_read_literal(rb, REF_FRAMES); + } else { // shown keyframe + current_frame->refresh_frame_flags = (1 << REF_FRAMES) - 1; + } + + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + cm->remapped_ref_idx[i] = INVALID_IDX; + } + if (pbi->need_resync) { + reset_ref_frame_map(pbi); + pbi->need_resync = 0; + } + } else { + if (current_frame->frame_type == INTRA_ONLY_FRAME) { + current_frame->refresh_frame_flags = params->p.refresh_frame_flags; //aom_rb_read_literal(rb, REF_FRAMES); + if (current_frame->refresh_frame_flags == 0xFF) { + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "Intra only frames cannot have refresh flags 0xFF"); + } + if (pbi->need_resync) { + reset_ref_frame_map(pbi); + pbi->need_resync = 0; + } + } else if (pbi->need_resync != 1) { /* Skip if need resync */ + current_frame->refresh_frame_flags = + frame_is_sframe(cm) ? 0xFF : params->p.refresh_frame_flags; //aom_rb_read_literal(rb, REF_FRAMES); + } + } + + if (!frame_is_intra_only(cm) || current_frame->refresh_frame_flags != 0xFF) { + // Read all ref frame order hints if error_resilient_mode == 1 + if (cm->error_resilient_mode && + seq_params->order_hint_info.enable_order_hint) { + int ref_idx; + for (ref_idx = 0; ref_idx < REF_FRAMES; ref_idx++) { + // Read order hint from bit stream + unsigned int order_hint = params->p.ref_order_hint[ref_idx];/*aom_rb_read_literal( + rb, seq_params->order_hint_info.order_hint_bits_minus_1 + 1);*/ + // Get buffer + RefCntBuffer *buf = cm->ref_frame_map[ref_idx]; + int buf_idx; + if (buf == NULL || order_hint != buf->order_hint) { + if (buf != NULL) { + lock_buffer_pool(pool, flags); + decrease_ref_count(pbi, buf, pool); + unlock_buffer_pool(pool, flags); + } + // If no corresponding buffer exists, allocate a new buffer with all + // pixels set to neutral grey. + buf_idx = get_free_frame_buffer(cm); + if (buf_idx == INVALID_IDX) { + aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, + "Unable to find free frame buffer"); + } + buf = &frame_bufs[buf_idx]; + lock_buffer_pool(pool, flags); + if (aom_realloc_frame_buffer(cm, &buf->buf, seq_params->max_frame_width, + seq_params->max_frame_height, buf->order_hint)) { + decrease_ref_count(pbi, buf, pool); + unlock_buffer_pool(pool, flags); + aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, + "Failed to allocate frame buffer"); + } + unlock_buffer_pool(pool, flags); +#ifdef ORI_CODE + set_planes_to_neutral_grey(seq_params, &buf->buf, 0); +#endif + cm->ref_frame_map[ref_idx] = buf; + buf->order_hint = order_hint; + } + } + } + } + + if (current_frame->frame_type == KEY_FRAME) { + setup_frame_size(cm, frame_size_override_flag, params); +#ifdef ORI_CODE + if (cm->allow_screen_content_tools && !av1_superres_scaled(cm)) + cm->allow_intrabc = aom_rb_read_bit(rb); +#endif + cm->allow_ref_frame_mvs = 0; + cm->prev_frame = NULL; + } else { + cm->allow_ref_frame_mvs = 0; + + if (current_frame->frame_type == INTRA_ONLY_FRAME) { +#ifdef ORI_CODE + cm->cur_frame->film_grain_params_present = + seq_params->film_grain_params_present; +#endif + setup_frame_size(cm, frame_size_override_flag, params); +#ifdef ORI_CODE + if (cm->allow_screen_content_tools && !av1_superres_scaled(cm)) + cm->allow_intrabc = aom_rb_read_bit(rb); +#endif + } else if (pbi->need_resync != 1) { /* Skip if need resync */ + int frame_refs_short_signaling = 0; + // Frame refs short signaling is off when error resilient mode is on. + if (seq_params->order_hint_info.enable_order_hint) + frame_refs_short_signaling = params->p.frame_refs_short_signaling;//aom_rb_read_bit(rb); + + if (frame_refs_short_signaling) { + // == LAST_FRAME == + const int lst_ref = params->p.lst_ref; //aom_rb_read_literal(rb, REF_FRAMES_LOG2); + const RefCntBuffer *const lst_buf = cm->ref_frame_map[lst_ref]; + + // == GOLDEN_FRAME == + const int gld_ref = params->p.gld_ref; //aom_rb_read_literal(rb, REF_FRAMES_LOG2); + const RefCntBuffer *const gld_buf = cm->ref_frame_map[gld_ref]; + + // Most of the time, streams start with a keyframe. In that case, + // ref_frame_map will have been filled in at that point and will not + // contain any NULLs. However, streams are explicitly allowed to start + // with an intra-only frame, so long as they don't then signal a + // reference to a slot that hasn't been set yet. That's what we are + // checking here. + if (lst_buf == NULL) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Inter frame requests nonexistent reference"); + if (gld_buf == NULL) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Inter frame requests nonexistent reference"); + + av1_set_frame_refs(cm, cm->remapped_ref_idx, lst_ref, gld_ref); + } + + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + int ref = 0; + if (!frame_refs_short_signaling) { + ref = params->p.remapped_ref_idx[i];//aom_rb_read_literal(rb, REF_FRAMES_LOG2); + + // Most of the time, streams start with a keyframe. In that case, + // ref_frame_map will have been filled in at that point and will not + // contain any NULLs. However, streams are explicitly allowed to start + // with an intra-only frame, so long as they don't then signal a + // reference to a slot that hasn't been set yet. That's what we are + // checking here. + if (cm->ref_frame_map[ref] == NULL) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Inter frame requests nonexistent reference"); + cm->remapped_ref_idx[i] = ref; + } else { + ref = cm->remapped_ref_idx[i]; + } + + cm->ref_frame_sign_bias[LAST_FRAME + i] = 0; + + if (seq_params->frame_id_numbers_present_flag) { + int frame_id_length = seq_params->frame_id_length; + //int diff_len = seq_params->delta_frame_id_length; + int delta_frame_id_minus_1 = params->p.delta_frame_id_minus_1[i];//aom_rb_read_literal(rb, diff_len); + int ref_frame_id = + ((cm->current_frame_id - (delta_frame_id_minus_1 + 1) + + (1 << frame_id_length)) % + (1 << frame_id_length)); + // Compare values derived from delta_frame_id_minus_1 and + // refresh_frame_flags. Also, check valid for referencing + if (ref_frame_id != cm->ref_frame_id[ref] || + cm->valid_for_referencing[ref] == 0) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Reference buffer frame ID mismatch"); + } + } + + if (!cm->error_resilient_mode && frame_size_override_flag) { + setup_frame_size_with_refs(cm, params); + } else { + setup_frame_size(cm, frame_size_override_flag, params); + } +#ifdef ORI_CODE + if (cm->cur_frame_force_integer_mv) { + cm->allow_high_precision_mv = 0; + } else { + cm->allow_high_precision_mv = aom_rb_read_bit(rb); + } + cm->interp_filter = read_frame_interp_filter(rb); + cm->switchable_motion_mode = aom_rb_read_bit(rb); +#endif + } + + cm->prev_frame = get_primary_ref_frame_buf(cm); + if (cm->primary_ref_frame != PRIMARY_REF_NONE && + get_primary_ref_frame_buf(cm) == NULL) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Reference frame containing this frame's initial " + "frame context is unavailable."); + } +#if 0 + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%d,%d,%d,%d\n",cm->error_resilient_mode, + cm->seq_params.order_hint_info.enable_ref_frame_mvs, + cm->seq_params.order_hint_info.enable_order_hint,frame_is_intra_only(cm)); + + printf("frame_might_allow_ref_frame_mvs()=>%d, current_frame->frame_type=%d, pbi->need_resync=%d, params->p.allow_ref_frame_mvs=%d\n", + frame_might_allow_ref_frame_mvs(cm), current_frame->frame_type, pbi->need_resync, + params->p.allow_ref_frame_mvs); +#endif + if (!(current_frame->frame_type == INTRA_ONLY_FRAME) && + pbi->need_resync != 1) { + if (frame_might_allow_ref_frame_mvs(cm)) + cm->allow_ref_frame_mvs = params->p.allow_ref_frame_mvs; //aom_rb_read_bit(-1, "<allow_ref_frame_mvs>", rb); + else + cm->allow_ref_frame_mvs = 0; + +#ifdef SUPPORT_SCALE_FACTOR + for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { + const RefCntBuffer *const ref_buf = get_ref_frame_buf(cm, i); + struct scale_factors *const ref_scale_factors = + get_ref_scale_factors(cm, i); + if (ref_buf != NULL) { +#ifdef AML + av1_setup_scale_factors_for_frame( + ref_scale_factors, ref_buf->buf.y_crop_width, + ref_buf->buf.y_crop_height, cm->dec_width, cm->height); +#else + av1_setup_scale_factors_for_frame( + ref_scale_factors, ref_buf->buf.y_crop_width, + ref_buf->buf.y_crop_height, cm->width, cm->height); +#endif + } + if (ref_scale_factors) { + if ((!av1_is_valid_scale(ref_scale_factors))) + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "Reference frame has invalid dimensions"); + } + } +#endif + } + } + + av1_setup_frame_buf_refs(cm); + + av1_setup_frame_sign_bias(cm); + + cm->cur_frame->frame_type = current_frame->frame_type; + + if (seq_params->frame_id_numbers_present_flag) { + update_ref_frame_id(cm, cm->current_frame_id); + } +#ifdef ORI_CODE + const int might_bwd_adapt = + !(seq_params->reduced_still_picture_hdr) && !(cm->disable_cdf_update); + if (might_bwd_adapt) { + cm->refresh_frame_context = aom_rb_read_bit(rb) + ? REFRESH_FRAME_CONTEXT_DISABLED + : REFRESH_FRAME_CONTEXT_BACKWARD; + } else { + cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_DISABLED; + } +#endif + + cm->cur_frame->buf.bit_depth = seq_params->bit_depth; + cm->cur_frame->buf.color_primaries = seq_params->color_primaries; + cm->cur_frame->buf.transfer_characteristics = + seq_params->transfer_characteristics; + cm->cur_frame->buf.matrix_coefficients = seq_params->matrix_coefficients; + cm->cur_frame->buf.monochrome = seq_params->monochrome; + cm->cur_frame->buf.chroma_sample_position = + seq_params->chroma_sample_position; + cm->cur_frame->buf.color_range = seq_params->color_range; + cm->cur_frame->buf.render_width = cm->render_width; + cm->cur_frame->buf.render_height = cm->render_height; + + if (pbi->need_resync) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Keyframe / intra-only frame required to reset decoder" + " state"); + } + + generate_next_ref_frame_map(pbi); + +#ifdef ORI_CODE + if (cm->allow_intrabc) { + // Set parameters corresponding to no filtering. + struct loopfilter *lf = &cm->lf; + lf->filter_level[0] = 0; + lf->filter_level[1] = 0; + cm->cdef_info.cdef_bits = 0; + cm->cdef_info.cdef_strengths[0] = 0; + cm->cdef_info.nb_cdef_strengths = 1; + cm->cdef_info.cdef_uv_strengths[0] = 0; + cm->rst_info[0].frame_restoration_type = RESTORE_NONE; + cm->rst_info[1].frame_restoration_type = RESTORE_NONE; + cm->rst_info[2].frame_restoration_type = RESTORE_NONE; + } + + read_tile_info(pbi, rb); + if (!av1_is_min_tile_width_satisfied(cm)) { + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Minimum tile width requirement not satisfied"); + } + + setup_quantization(cm, rb); + xd->bd = (int)seq_params->bit_depth; + + if (cm->num_allocated_above_context_planes < av1_num_planes(cm) || + cm->num_allocated_above_context_mi_col < cm->mi_cols || + cm->num_allocated_above_contexts < cm->tile_rows) { + av1_free_above_context_buffers(cm, cm->num_allocated_above_contexts); + if (av1_alloc_above_context_buffers(cm, cm->tile_rows)) + aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR, + "Failed to allocate context buffers"); + } + + if (cm->primary_ref_frame == PRIMARY_REF_NONE) { + av1_setup_past_independence(cm); + } + + setup_segmentation(cm, params); + + cm->delta_q_info.delta_q_res = 1; + cm->delta_q_info.delta_lf_res = 1; + cm->delta_q_info.delta_lf_present_flag = 0; + cm->delta_q_info.delta_lf_multi = 0; + cm->delta_q_info.delta_q_present_flag = + cm->base_qindex > 0 ? aom_rb_read_bit(-1, defmark, rb) : 0; + if (cm->delta_q_info.delta_q_present_flag) { + xd->current_qindex = cm->base_qindex; + cm->delta_q_info.delta_q_res = 1 << aom_rb_read_literal(-1, defmark, rb, 2); + if (!cm->allow_intrabc) + cm->delta_q_info.delta_lf_present_flag = aom_rb_read_bit(-1, defmark, rb); + if (cm->delta_q_info.delta_lf_present_flag) { + cm->delta_q_info.delta_lf_res = 1 << aom_rb_read_literal(-1, defmark, rb, 2); + cm->delta_q_info.delta_lf_multi = aom_rb_read_bit(-1, defmark, rb); + av1_reset_loop_filter_delta(xd, av1_num_planes(cm)); + } + } + + xd->cur_frame_force_integer_mv = cm->cur_frame_force_integer_mv; + + for (int i = 0; i < MAX_SEGMENTS; ++i) { + const int qindex = av1_get_qindex(&cm->seg, i, cm->base_qindex); + xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 && + cm->u_dc_delta_q == 0 && cm->u_ac_delta_q == 0 && + cm->v_dc_delta_q == 0 && cm->v_ac_delta_q == 0; + xd->qindex[i] = qindex; + } + cm->coded_lossless = is_coded_lossless(cm, xd); + cm->all_lossless = cm->coded_lossless && !av1_superres_scaled(cm); + setup_segmentation_dequant(cm, xd); + if (cm->coded_lossless) { + cm->lf.filter_level[0] = 0; + cm->lf.filter_level[1] = 0; + } + if (cm->coded_lossless || !seq_params->enable_cdef) { + cm->cdef_info.cdef_bits = 0; + cm->cdef_info.cdef_strengths[0] = 0; + cm->cdef_info.cdef_uv_strengths[0] = 0; + } + if (cm->all_lossless || !seq_params->enable_restoration) { + cm->rst_info[0].frame_restoration_type = RESTORE_NONE; + cm->rst_info[1].frame_restoration_type = RESTORE_NONE; + cm->rst_info[2].frame_restoration_type = RESTORE_NONE; + } + setup_loopfilter(cm, rb); + + if (!cm->coded_lossless && seq_params->enable_cdef) { + setup_cdef(cm, rb); + } + if (!cm->all_lossless && seq_params->enable_restoration) { + decode_restoration_mode(cm, rb); + } + + cm->tx_mode = read_tx_mode(cm, rb); +#endif + + current_frame->reference_mode = read_frame_reference_mode(cm, params); + +#ifdef ORI_CODE + if (current_frame->reference_mode != SINGLE_REFERENCE) + setup_compound_reference_mode(cm); + + +#endif + + av1_setup_skip_mode_allowed(cm); + + /* + the point that ucode send send_bufmgr_info + and wait bufmgr code to return is_skip_mode_allowed + */ + + /* + read_uncompressed_header() end + */ + + av1_setup_motion_field(cm); +#ifdef AML + cm->cur_frame->mi_cols = cm->mi_cols; + cm->cur_frame->mi_rows = cm->mi_rows; + cm->cur_frame->dec_width = cm->dec_width; + + /* + superres_post_decode(AV1Decoder *pbi) => + av1_superres_upscale(cm, pool); => + aom_realloc_frame_buffer( + frame_to_show, cm->superres_upscaled_width, + cm->superres_upscaled_height, seq_params->subsampling_x, + seq_params->subsampling_y, seq_params->use_highbitdepth, + AOM_BORDER_IN_PIXELS, cm->byte_alignment, fb, cb, cb_priv) + */ + aom_realloc_frame_buffer(cm, &cm->cur_frame->buf, + cm->superres_upscaled_width, cm->superres_upscaled_height, + cm->cur_frame->order_hint); +#endif + return 0; +} + +static int are_seq_headers_consistent(const SequenceHeader *seq_params_old, + const SequenceHeader *seq_params_new) { + return !memcmp(seq_params_old, seq_params_new, sizeof(SequenceHeader)); +} + +aom_codec_err_t aom_get_num_layers_from_operating_point_idc( + int operating_point_idc, unsigned int *number_spatial_layers, + unsigned int *number_temporal_layers) { + // derive number of spatial/temporal layers from operating_point_idc + + if (!number_spatial_layers || !number_temporal_layers) + return AOM_CODEC_INVALID_PARAM; + + if (operating_point_idc == 0) { + *number_temporal_layers = 1; + *number_spatial_layers = 1; + } else { + int j; + *number_spatial_layers = 0; + *number_temporal_layers = 0; + for (j = 0; j < MAX_NUM_SPATIAL_LAYERS; j++) { + *number_spatial_layers += + (operating_point_idc >> (j + MAX_NUM_TEMPORAL_LAYERS)) & 0x1; + } + for (j = 0; j < MAX_NUM_TEMPORAL_LAYERS; j++) { + *number_temporal_layers += (operating_point_idc >> j) & 0x1; + } + } + + return AOM_CODEC_OK; +} + +void av1_read_sequence_header(AV1_COMMON *cm, union param_u *params, + SequenceHeader *seq_params) { +#ifdef ORI_CODE + const int num_bits_width = aom_rb_read_literal(-1, "<num_bits_width>", rb, 4) + 1; + const int num_bits_height = aom_rb_read_literal(-1, "<num_bits_height>", rb, 4) + 1; + const int max_frame_width = aom_rb_read_literal(-1, "<max_frame_width>", rb, num_bits_width) + 1; + const int max_frame_height = aom_rb_read_literal(-1, "<max_frame_height>", rb, num_bits_height) + 1; + + seq_params->num_bits_width = num_bits_width; + seq_params->num_bits_height = num_bits_height; +#endif + seq_params->max_frame_width = params->p.max_frame_width; //max_frame_width; + seq_params->max_frame_height = params->p.max_frame_height; //max_frame_height; + + if (seq_params->reduced_still_picture_hdr) { + seq_params->frame_id_numbers_present_flag = 0; + } else { + seq_params->frame_id_numbers_present_flag = params->p.frame_id_numbers_present_flag; //aom_rb_read_bit(-1, "<frame_id_numbers_present_flag>", rb); + } + if (seq_params->frame_id_numbers_present_flag) { + // We must always have delta_frame_id_length < frame_id_length, + // in order for a frame to be referenced with a unique delta. + // Avoid wasting bits by using a coding that enforces this restriction. +#ifdef ORI_CODE + seq_params->delta_frame_id_length = aom_rb_read_literal(-1, "<delta_frame_id_length>", rb, 4) + 2; + seq_params->frame_id_length = params->p.frame_id_length + aom_rb_read_literal(-1, "<frame_id_length>", rb, 3) + seq_params->delta_frame_id_length + 1; +#else + seq_params->delta_frame_id_length = params->p.delta_frame_id_length; + seq_params->frame_id_length = params->p.frame_id_length + seq_params->delta_frame_id_length + 1; +#endif + if (seq_params->frame_id_length > 16) + aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, + "Invalid frame_id_length"); + } +#ifdef ORI_CODE + setup_sb_size(seq_params, rb); + seq_params->enable_filter_intra = aom_rb_read_bit(-1, "<enable_filter_intra>", rb); + seq_params->enable_intra_edge_filter = aom_rb_read_bit(-1, "<enable_intra_edge_filter>", rb); +#endif + + if (seq_params->reduced_still_picture_hdr) { + seq_params->enable_interintra_compound = 0; + seq_params->enable_masked_compound = 0; + seq_params->enable_warped_motion = 0; + seq_params->enable_dual_filter = 0; + seq_params->order_hint_info.enable_order_hint = 0; + seq_params->order_hint_info.enable_dist_wtd_comp = 0; + seq_params->order_hint_info.enable_ref_frame_mvs = 0; + seq_params->force_screen_content_tools = 2; // SELECT_SCREEN_CONTENT_TOOLS + seq_params->force_integer_mv = 2; // SELECT_INTEGER_MV + seq_params->order_hint_info.order_hint_bits_minus_1 = -1; + } else { +#ifdef ORI_CODE + seq_params->enable_interintra_compound = aom_rb_read_bit(-1, "<enable_interintra_compound>", rb); + seq_params->enable_masked_compound = aom_rb_read_bit(-1, "<enable_masked_compound>", rb); + seq_params->enable_warped_motion = aom_rb_read_bit(-1, "<enable_warped_motion>", rb); + seq_params->enable_dual_filter = aom_rb_read_bit(-1, "<enable_dual_filter>", rb); +#endif + seq_params->order_hint_info.enable_order_hint = params->p.enable_order_hint; //aom_rb_read_bit(-1, "<order_hint_info.enable_order_hint>", rb); + seq_params->order_hint_info.enable_dist_wtd_comp = + seq_params->order_hint_info.enable_order_hint ? params->p.enable_dist_wtd_comp : 0; //aom_rb_read_bit(-1, "<order_hint_info.enable_dist_wtd_comp>", rb) : 0; + seq_params->order_hint_info.enable_ref_frame_mvs = + seq_params->order_hint_info.enable_order_hint ? params->p.enable_ref_frame_mvs : 0; //aom_rb_read_bit(-1, "<order_hint_info.enable_ref_frame_mvs>", rb) : 0; + +#ifdef ORI_CODE + if (aom_rb_read_bit(-1, defmark, rb)) { + seq_params->force_screen_content_tools = + 2; // SELECT_SCREEN_CONTENT_TOOLS + } else { + seq_params->force_screen_content_tools = aom_rb_read_bit(-1, defmark, rb); + } + + if (seq_params->force_screen_content_tools > 0) { + if (aom_rb_read_bit(-1, defmark, rb)) { + seq_params->force_integer_mv = 2; // SELECT_INTEGER_MV + } else { + seq_params->force_integer_mv = aom_rb_read_bit(-1, defmark, rb); + } + } else { + seq_params->force_integer_mv = 2; // SELECT_INTEGER_MV + } +#endif + seq_params->order_hint_info.order_hint_bits_minus_1 = + seq_params->order_hint_info.enable_order_hint + ? params->p.order_hint_bits_minus_1 /*aom_rb_read_literal(-1, "<order_hint_info.order_hint_bits_minus_1>", rb, 3)*/ + : -1; + } + seq_params->enable_superres = params->p.enable_superres; //aom_rb_read_bit(-1, defmark, rb); + +#ifdef ORI_CODE + seq_params->enable_cdef = aom_rb_read_bit(-1, defmark, rb); + seq_params->enable_restoration = aom_rb_read_bit(-1, defmark, rb); +#endif +} + +#ifdef ORI_CODE +void av1_read_op_parameters_info(AV1_COMMON *const cm, + struct aom_read_bit_buffer *rb, int op_num) { + // The cm->op_params array has MAX_NUM_OPERATING_POINTS + 1 elements. + if (op_num > MAX_NUM_OPERATING_POINTS) { + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "AV1 does not support %d decoder model operating points", + op_num + 1); + } + + cm->op_params[op_num].decoder_buffer_delay = aom_rb_read_unsigned_literal(-1, defmark, + rb, cm->buffer_model.encoder_decoder_buffer_delay_length); + + cm->op_params[op_num].encoder_buffer_delay = aom_rb_read_unsigned_literal(-1, defmark, + rb, cm->buffer_model.encoder_decoder_buffer_delay_length); + + cm->op_params[op_num].low_delay_mode_flag = aom_rb_read_bit(-1, defmark, rb); +} +#endif + +static int is_valid_seq_level_idx(AV1_LEVEL seq_level_idx) { + return seq_level_idx < SEQ_LEVELS || seq_level_idx == SEQ_LEVEL_MAX; +} + +static uint32_t read_sequence_header_obu(AV1Decoder *pbi, + union param_u *params) { + AV1_COMMON *const cm = pbi->common; + int i; + int operating_point; + // Verify rb has been configured to report errors. + //assert(rb->error_handler); + + // Use a local variable to store the information as we decode. At the end, + // if no errors have occurred, cm->seq_params is updated. + SequenceHeader sh = cm->seq_params; + SequenceHeader *const seq_params = &sh; + + seq_params->profile = params->p.profile; //av1_read_profile(rb); + if (seq_params->profile > CONFIG_MAX_DECODE_PROFILE) { + cm->error.error_code = AOM_CODEC_UNSUP_BITSTREAM; + return 0; + } + + // Still picture or not + seq_params->still_picture = params->p.still_picture; //aom_rb_read_bit(-1, "<still_picture>", rb); + seq_params->reduced_still_picture_hdr = params->p.reduced_still_picture_hdr; //aom_rb_read_bit(-1, "<reduced_still_picture_hdr>", rb); + // Video must have reduced_still_picture_hdr = 0 + if (!seq_params->still_picture && seq_params->reduced_still_picture_hdr) { + cm->error.error_code = AOM_CODEC_UNSUP_BITSTREAM; + return 0; + } + + if (seq_params->reduced_still_picture_hdr) { + cm->timing_info_present = 0; + seq_params->decoder_model_info_present_flag = 0; + seq_params->display_model_info_present_flag = 0; + seq_params->operating_points_cnt_minus_1 = 0; + seq_params->operating_point_idc[0] = 0; + //if (!read_bitstream_level(0, "<seq_level_idx>", &seq_params->seq_level_idx[0], rb)) { + if (!is_valid_seq_level_idx(params->p.seq_level_idx[0])) { + cm->error.error_code = AOM_CODEC_UNSUP_BITSTREAM; + return 0; + } + seq_params->tier[0] = 0; + cm->op_params[0].decoder_model_param_present_flag = 0; + cm->op_params[0].display_model_param_present_flag = 0; + } else { + cm->timing_info_present = params->p.timing_info_present; //aom_rb_read_bit(-1, "<timing_info_present>", rb); // timing_info_present_flag + if (cm->timing_info_present) { +#ifdef ORI_CODE + av1_read_timing_info_header(cm, rb); +#endif + seq_params->decoder_model_info_present_flag = params->p.decoder_model_info_present_flag; //aom_rb_read_bit(-1, "<decoder_model_info_present_flag>", rb); +#ifdef ORI_CODE + if (seq_params->decoder_model_info_present_flag) + av1_read_decoder_model_info(cm, rb); +#endif + } else { + seq_params->decoder_model_info_present_flag = 0; + } +#ifdef ORI_CODE + seq_params->display_model_info_present_flag = aom_rb_read_bit(-1, "<display_model_info_present_flag>", rb); +#endif + seq_params->operating_points_cnt_minus_1 = params->p.operating_points_cnt_minus_1; + //aom_rb_read_literal(-1, "<operating_points_cnt_minus_1>", rb, OP_POINTS_CNT_MINUS_1_BITS); + for (i = 0; i < seq_params->operating_points_cnt_minus_1 + 1; i++) { + seq_params->operating_point_idc[i] = params->p.operating_point_idc[i]; + //aom_rb_read_literal(i, "<operating_point_idc>", rb, OP_POINTS_IDC_BITS); + //if (!read_bitstream_level(i, "<seq_level_idx>", &seq_params->seq_level_idx[i], rb)) { + if (!is_valid_seq_level_idx(params->p.seq_level_idx[i])) { + cm->error.error_code = AOM_CODEC_UNSUP_BITSTREAM; + return 0; + } + // This is the seq_level_idx[i] > 7 check in the spec. seq_level_idx 7 + // is equivalent to level 3.3. +#ifdef ORI_CODE + if (seq_params->seq_level_idx[i] >= SEQ_LEVEL_4_0) + seq_params->tier[i] = aom_rb_read_bit(i, "<tier>", rb); + else + seq_params->tier[i] = 0; +#endif + if (seq_params->decoder_model_info_present_flag) { + cm->op_params[i].decoder_model_param_present_flag = params->p.decoder_model_param_present_flag[i]; //aom_rb_read_bit(-1, defmark, rb); +#ifdef ORI_CODE + if (cm->op_params[i].decoder_model_param_present_flag) + av1_read_op_parameters_info(cm, rb, i); +#endif + } else { + cm->op_params[i].decoder_model_param_present_flag = 0; + } +#ifdef ORI_CODE + if (cm->timing_info_present && + (cm->timing_info.equal_picture_interval || + cm->op_params[i].decoder_model_param_present_flag)) { + cm->op_params[i].bitrate = av1_max_level_bitrate( + seq_params->profile, seq_params->seq_level_idx[i], + seq_params->tier[i]); + // Level with seq_level_idx = 31 returns a high "dummy" bitrate to pass + // the check + if (cm->op_params[i].bitrate == 0) + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "AV1 does not support this combination of " + "profile, level, and tier."); + // Buffer size in bits/s is bitrate in bits/s * 1 s + cm->op_params[i].buffer_size = cm->op_params[i].bitrate; + } +#endif + if (cm->timing_info_present && cm->timing_info.equal_picture_interval && + !cm->op_params[i].decoder_model_param_present_flag) { + // When the decoder_model_parameters are not sent for this op, set + // the default ones that can be used with the resource availability mode + cm->op_params[i].decoder_buffer_delay = 70000; + cm->op_params[i].encoder_buffer_delay = 20000; + cm->op_params[i].low_delay_mode_flag = 0; + } + +#ifdef ORI_CODE + if (seq_params->display_model_info_present_flag) { + cm->op_params[i].display_model_param_present_flag = aom_rb_read_bit(-1, defmark, rb); + if (cm->op_params[i].display_model_param_present_flag) { + cm->op_params[i].initial_display_delay = + aom_rb_read_literal(-1, defmark, rb, 4) + 1; + if (cm->op_params[i].initial_display_delay > 10) + aom_internal_error( + &cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "AV1 does not support more than 10 decoded frames delay"); + } else { + cm->op_params[i].initial_display_delay = 10; + } + } else { + cm->op_params[i].display_model_param_present_flag = 0; + cm->op_params[i].initial_display_delay = 10; + } +#endif + } + } + // This decoder supports all levels. Choose operating point provided by + // external means + operating_point = pbi->operating_point; + if (operating_point < 0 || + operating_point > seq_params->operating_points_cnt_minus_1) + operating_point = 0; + pbi->current_operating_point = + seq_params->operating_point_idc[operating_point]; + if (aom_get_num_layers_from_operating_point_idc( + pbi->current_operating_point, &cm->number_spatial_layers, + &cm->number_temporal_layers) != AOM_CODEC_OK) { + cm->error.error_code = AOM_CODEC_ERROR; + return 0; + } + + av1_read_sequence_header(cm, params, seq_params); +#ifdef ORI_CODE + av1_read_color_config(rb, pbi->allow_lowbitdepth, seq_params, &cm->error); + if (!(seq_params->subsampling_x == 0 && seq_params->subsampling_y == 0) && + !(seq_params->subsampling_x == 1 && seq_params->subsampling_y == 1) && + !(seq_params->subsampling_x == 1 && seq_params->subsampling_y == 0)) { + aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, + "Only 4:4:4, 4:2:2 and 4:2:0 are currently supported, " + "%d %d subsampling is not supported.\n", + seq_params->subsampling_x, seq_params->subsampling_y); + } + seq_params->film_grain_params_present = aom_rb_read_bit(-1, "<film_grain_params_present>", rb); + + if (av1_check_trailing_bits(pbi, rb) != 0) { + // cm->error.error_code is already set. + return 0; + } +#endif + + // If a sequence header has been decoded before, we check if the new + // one is consistent with the old one. + if (pbi->sequence_header_ready) { + if (!are_seq_headers_consistent(&cm->seq_params, seq_params)) + pbi->sequence_header_changed = 1; + } + + cm->seq_params = *seq_params; + pbi->sequence_header_ready = 1; + return 0; + +} + +int aom_decode_frame_from_obus(AV1Decoder *pbi, union param_u *params, int obu_type) +{ + AV1_COMMON *const cm = pbi->common; + ObuHeader obu_header; + int frame_decoding_finished = 0; + uint32_t frame_header_size = 0; + + //struct aom_read_bit_buffer rb; + size_t payload_size = 0; + size_t decoded_payload_size = 0; + size_t obu_payload_offset = 0; + //size_t bytes_read = 0; + + memset(&obu_header, 0, sizeof(obu_header)); +#ifdef ORI_CODE + pbi->seen_frame_header = 0; +#else + /* set in the test.c*/ +#endif + + obu_header.type = obu_type; + pbi->cur_obu_type = obu_header.type; + if (av1_is_debug(AOM_DEBUG_PRINT_LIST_INFO)) + dump_params(pbi, params); + switch (obu_header.type) { + case OBU_SEQUENCE_HEADER: + decoded_payload_size = read_sequence_header_obu(pbi, params); + if (cm->error.error_code != AOM_CODEC_OK) return -1; + break; + + case OBU_FRAME_HEADER: + case OBU_REDUNDANT_FRAME_HEADER: + case OBU_FRAME: + if (obu_header.type == OBU_REDUNDANT_FRAME_HEADER) { + if (!pbi->seen_frame_header) { + cm->error.error_code = AOM_CODEC_CORRUPT_FRAME; + return -1; + } + } else { + // OBU_FRAME_HEADER or OBU_FRAME. + if (pbi->seen_frame_header) { + cm->error.error_code = AOM_CODEC_CORRUPT_FRAME; + return -1; + } + } + // Only decode first frame header received + if (!pbi->seen_frame_header || + (cm->large_scale_tile && !pbi->camera_frame_header_ready)) { + frame_header_size = av1_decode_frame_headers_and_setup( + pbi, /*&rb, data, p_data_end,*/obu_header.type != OBU_FRAME, params); + pbi->seen_frame_header = 1; + if (!pbi->ext_tile_debug && cm->large_scale_tile) + pbi->camera_frame_header_ready = 1; + } else { + // TODO(wtc): Verify that the frame_header_obu is identical to the + // original frame_header_obu. For now just skip frame_header_size + // bytes in the bit buffer. + if (frame_header_size > payload_size) { + cm->error.error_code = AOM_CODEC_CORRUPT_FRAME; + return -1; + } + assert(rb.bit_offset == 0); +#ifdef ORI_CODE + rb.bit_offset = 8 * frame_header_size; +#endif + } + + decoded_payload_size = frame_header_size; + pbi->frame_header_size = frame_header_size; + + if (cm->show_existing_frame) { + if (obu_header.type == OBU_FRAME) { + cm->error.error_code = AOM_CODEC_UNSUP_BITSTREAM; + return -1; + } + frame_decoding_finished = 1; + pbi->seen_frame_header = 0; + break; + } + + // In large scale tile coding, decode the common camera frame header + // before any tile list OBU. + if (!pbi->ext_tile_debug && pbi->camera_frame_header_ready) { + frame_decoding_finished = 1; + // Skip the rest of the frame data. + decoded_payload_size = payload_size; + // Update data_end. +#ifdef ORI_CODE + *p_data_end = data_end; +#endif + break; + } +#if 0 //def AML + frame_decoding_finished = 1; +#endif + if (obu_header.type != OBU_FRAME) break; + obu_payload_offset = frame_header_size; + // Byte align the reader before reading the tile group. + // byte_alignment() has set cm->error.error_code if it returns -1. +#ifdef ORI_CODE + if (byte_alignment(cm, &rb)) return -1; + AOM_FALLTHROUGH_INTENDED; // fall through to read tile group. +#endif + default: + break; + } + return frame_decoding_finished; +} + +int get_buffer_index(AV1Decoder *pbi, RefCntBuffer *buffer) +{ + AV1_COMMON *const cm = pbi->common; + int i = -1; + + if (buffer) { + for (i = 0; i < FRAME_BUFFERS; i++) { + RefCntBuffer *buf = + &cm->buffer_pool->frame_bufs[i]; + if (buf == buffer) { + break; + } + } + } + return i; +} + +void dump_buffer(RefCntBuffer *buf) +{ + int i; + pr_info("ref_count %d, vf_ref %d, order_hint %d, w/h(%d,%d) showable_frame %d frame_type %d canvas(%d,%d) w/h(%d,%d) mi_c/r(%d,%d) header 0x%x ref_deltas(", + buf->ref_count, buf->buf.vf_ref, buf->order_hint, buf->width, buf->height, buf->showable_frame, buf->frame_type, + buf->buf.mc_canvas_y, buf->buf.mc_canvas_u_v, + buf->buf.y_crop_width, buf->buf.y_crop_height, + buf->mi_cols, buf->mi_rows, + buf->buf.header_adr); + for (i = 0; i < REF_FRAMES; i++) + pr_info("%d,", buf->ref_deltas[i]); + pr_info("), ref_order_hints("); + + for (i = 0; i < INTER_REFS_PER_FRAME; i++) + pr_info("%d ", buf->ref_order_hints[i]); + pr_info(")"); +} + +void dump_ref_buffer_info(AV1Decoder *pbi, int i) +{ + AV1_COMMON *const cm = pbi->common; + pr_info("remapped_ref_idx %d, ref_frame_sign_bias %d, ref_frame_id %d, valid_for_referencing %d ref_frame_side %d ref_frame_map idx %d, next_ref_frame_map idx %d", + cm->remapped_ref_idx[i], + cm->ref_frame_sign_bias[i], + cm->ref_frame_id[i], + cm->valid_for_referencing[i], + cm->ref_frame_side[i], + get_buffer_index(pbi, cm->ref_frame_map[i]), + get_buffer_index(pbi, cm->next_ref_frame_map[i])); +} + +void dump_mv_refs(AV1Decoder *pbi) +{ + int i, j; + AV1_COMMON *const cm = pbi->common; + for (i = 0; i < cm->mv_ref_id_index; i++) { + pr_info("%d: ref_id %d cal_tpl_mvs %d mv_ref_offset: ", + i, cm->mv_ref_id[i], cm->mv_cal_tpl_mvs[i]); + for (j = 0; j < REF_FRAMES; j++) + pr_info("%d ", cm->mv_ref_offset[i][j]); + pr_info("\n"); + } +} + +void dump_ref_spec_bufs(AV1Decoder *pbi) +{ + int i; + AV1_COMMON *const cm = pbi->common; + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + PIC_BUFFER_CONFIG *pic_config = av1_get_ref_frame_spec_buf(cm, LAST_FRAME + i); + if (pic_config == NULL) continue; + pr_info("%d: index %d order_hint %d header 0x%x dw_header 0x%x canvas(%d,%d) mv_wr_start 0x%x lcu_total %d\n", + i, pic_config->index, + pic_config->order_hint, + pic_config->header_adr, +#ifdef AOM_AV1_MMU_DW + pic_config->header_dw_adr, +#else + 0, +#endif + pic_config->mc_canvas_y, + pic_config->mc_canvas_u_v, + pic_config->mpred_mv_wr_start_addr, + pic_config->lcu_total + ); + } +} + +#ifdef SUPPORT_SCALE_FACTOR +void dump_scale_factors(AV1Decoder *pbi) +{ + int i; + AV1_COMMON *const cm = pbi->common; + for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { + struct scale_factors *const sf = + get_ref_scale_factors(cm, i); + if (sf) + pr_info("%d: is_scaled %d x_scale_fp %d, y_scale_fp %d\n", + i, av1_is_scaled(sf), + sf->x_scale_fp, sf->y_scale_fp); + else + pr_info("%d: sf null\n", i); + } +} + +#endif + +void dump_buffer_status(AV1Decoder *pbi) +{ + int i; + AV1_COMMON *const cm = pbi->common; + BufferPool *const pool = cm->buffer_pool; + unsigned long flags; + + lock_buffer_pool(pool, flags); + + pr_info("%s: pbi %p cm %p cur_frame %p\n", __func__, pbi, cm, cm->cur_frame); + + pr_info("Buffer Pool:\n"); + for (i = 0; i < FRAME_BUFFERS; i++) { + RefCntBuffer *buf = + &cm->buffer_pool->frame_bufs[i]; + pr_info("%d: ", i); + if (buf) + dump_buffer(buf); + pr_info("\n"); + } + + if (cm->prev_frame) { + pr_info("prev_frame (%d): ", + get_buffer_index(pbi, cm->prev_frame)); + dump_buffer(cm->prev_frame); + pr_info("\n"); + } + if (cm->cur_frame) { + pr_info("cur_frame (%d): ", + get_buffer_index(pbi, cm->cur_frame)); + dump_buffer(cm->cur_frame); + pr_info("\n"); + } + pr_info("REF_FRAMES Info(ref buf is ref_frame_map[remapped_ref_idx[i-1]], i=1~7):\n"); + for (i = 0; i < REF_FRAMES; i++) { + pr_info("%d: ", i); + dump_ref_buffer_info(pbi, i); + pr_info("\n"); + } + pr_info("Ref Spec Buffers:\n"); + dump_ref_spec_bufs(pbi); + + pr_info("MV refs:\n"); + dump_mv_refs(pbi); + +#ifdef SUPPORT_SCALE_FACTOR + pr_info("Scale factors:\n"); + dump_scale_factors(pbi); +#endif + unlock_buffer_pool(pool, flags); +} + + +struct param_dump_item_s { + unsigned int size; + char* name; + unsigned int adr_off; +} param_dump_items[] = { + {1, "profile", (unsigned long)&(((union param_u *)0)->p.profile )}, + {1, "still_picture", (unsigned long)&(((union param_u *)0)->p.still_picture )}, + {1, "reduced_still_picture_hdr", (unsigned long)&(((union param_u *)0)->p.reduced_still_picture_hdr )}, + {1, "decoder_model_info_present_flag", (unsigned long)&(((union param_u *)0)->p.decoder_model_info_present_flag)}, + {1, "max_frame_width", (unsigned long)&(((union param_u *)0)->p.max_frame_width )}, + {1, "max_frame_height", (unsigned long)&(((union param_u *)0)->p.max_frame_height )}, + {1, "frame_id_numbers_present_flag", (unsigned long)&(((union param_u *)0)->p.frame_id_numbers_present_flag )}, + {1, "delta_frame_id_length", (unsigned long)&(((union param_u *)0)->p.delta_frame_id_length )}, + {1, "frame_id_length", (unsigned long)&(((union param_u *)0)->p.frame_id_length )}, + {1, "order_hint_bits_minus_1", (unsigned long)&(((union param_u *)0)->p.order_hint_bits_minus_1 )}, + {1, "enable_order_hint", (unsigned long)&(((union param_u *)0)->p.enable_order_hint )}, + {1, "enable_dist_wtd_comp", (unsigned long)&(((union param_u *)0)->p.enable_dist_wtd_comp )}, + {1, "enable_ref_frame_mvs", (unsigned long)&(((union param_u *)0)->p.enable_ref_frame_mvs )}, + {1, "enable_superres", (unsigned long)&(((union param_u *)0)->p.enable_superres )}, + {1, "superres_scale_denominator", (unsigned long)&(((union param_u *)0)->p.superres_scale_denominator )}, + {1, "show_existing_frame", (unsigned long)&(((union param_u *)0)->p.show_existing_frame )}, + {1, "frame_type", (unsigned long)&(((union param_u *)0)->p.frame_type )}, + {1, "show_frame", (unsigned long)&(((union param_u *)0)->p.show_frame )}, + {1, "e.r.r.o.r_resilient_mode", (unsigned long)&(((union param_u *)0)->p.error_resilient_mode )}, + {1, "refresh_frame_flags", (unsigned long)&(((union param_u *)0)->p.refresh_frame_flags )}, + {1, "showable_frame", (unsigned long)&(((union param_u *)0)->p.showable_frame )}, + {1, "current_frame_id", (unsigned long)&(((union param_u *)0)->p.current_frame_id )}, + {1, "frame_size_override_flag", (unsigned long)&(((union param_u *)0)->p.frame_size_override_flag )}, + {1, "order_hint", (unsigned long)&(((union param_u *)0)->p.order_hint )}, + {1, "primary_ref_frame", (unsigned long)&(((union param_u *)0)->p.primary_ref_frame )}, + {1, "frame_refs_short_signaling", (unsigned long)&(((union param_u *)0)->p.frame_refs_short_signaling )}, + {1, "frame_width", (unsigned long)&(((union param_u *)0)->p.frame_width )}, + {1, "dec_frame_width", (unsigned long)&(((union param_u *)0)->p.dec_frame_width )}, + {1, "frame_width_scaled", (unsigned long)&(((union param_u *)0)->p.frame_width_scaled )}, + {1, "frame_height", (unsigned long)&(((union param_u *)0)->p.frame_height )}, + {1, "reference_mode", (unsigned long)&(((union param_u *)0)->p.reference_mode )}, + {1, "update_parameters", (unsigned long)&(((union param_u *)0)->p.update_parameters )}, + {1, "film_grain_params_ref_idx", (unsigned long)&(((union param_u *)0)->p.film_grain_params_ref_idx )}, + {1, "allow_ref_frame_mvs", (unsigned long)&(((union param_u *)0)->p.allow_ref_frame_mvs )}, + {1, "lst_ref", (unsigned long)&(((union param_u *)0)->p.lst_ref )}, + {1, "gld_ref", (unsigned long)&(((union param_u *)0)->p.gld_ref )}, + {INTER_REFS_PER_FRAME, "remapped_ref_idx", (unsigned long)&(((union param_u *)0)->p.remapped_ref_idx[0] )}, + {INTER_REFS_PER_FRAME, "delta_frame_id_minus_1", (unsigned long)&(((union param_u *)0)->p.delta_frame_id_minus_1[0] )}, + {REF_FRAMES, "ref_order_hint", (unsigned long)&(((union param_u *)0)->p.ref_order_hint[0] )}, +}; + +void dump_params(AV1Decoder *pbi, union param_u *params) +{ + int i, j; + unsigned char *start_adr = (unsigned char*)params; + + pr_info("============ params:\n"); + for (i = 0; i < sizeof(param_dump_items) / sizeof(param_dump_items[0]); i++) { + for (j = 0; j < param_dump_items[i].size; j++) { + if (param_dump_items[i].size > 1) + pr_info("%s(%d): 0x%x\n", + param_dump_items[i].name, j, + *((unsigned short*)(start_adr + param_dump_items[i].adr_off + j * 2))); + else + pr_info("%s: 0x%x\n", param_dump_items[i].name, + *((unsigned short*)(start_adr + param_dump_items[i].adr_off + j * 2))); + } + } +} + +/*static void raw_write_image(AV1Decoder *pbi, PIC_BUFFER_CONFIG *sd) +{ + printf("$$$$$$$ output image\n"); +}*/ + +/* + return 0, need decoding data + 1, decoding done + -1, decoding error + +*/ +int av1_bufmgr_process(AV1Decoder *pbi, union param_u *params, + unsigned char new_compressed_data, int obu_type) +{ + AV1_COMMON *const cm = pbi->common; + int j; + // Release any pending output frames from the previous decoder_decode call. + // We need to do this even if the decoder is being flushed or the input + // arguments are invalid. + BufferPool *const pool = cm->buffer_pool; + int frame_decoded; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%s: pbi %p cm %p cur_frame %p\n", __func__, pbi, cm, cm->cur_frame); + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%s: new_compressed_data= %d\n", __func__, new_compressed_data); + for (j = 0; j < pbi->num_output_frames; j++) { + decrease_ref_count(pbi, pbi->output_frames[j], pool); + } + pbi->num_output_frames = 0; + // + if (new_compressed_data) { + if (assign_cur_frame_new_fb(cm) == NULL) { + cm->error.error_code = AOM_CODEC_MEM_ERROR; + return -1; + } + pbi->seen_frame_header = 0; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "New_compressed_data (%d)\n", new_compressed_data_count++); + + } + + frame_decoded = + aom_decode_frame_from_obus(pbi, params, obu_type); + + if (pbi->cur_obu_type == OBU_FRAME_HEADER || + pbi->cur_obu_type == OBU_REDUNDANT_FRAME_HEADER || + pbi->cur_obu_type == OBU_FRAME) { + if (av1_is_debug(AOM_DEBUG_PRINT_LIST_INFO)) { + pr_info("after bufmgr (frame_decoded %d seen_frame_header %d): ", + frame_decoded, pbi->seen_frame_header); + dump_buffer_status(pbi); + } + } + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%s: pbi %p cm %p cur_frame %p\n", __func__, pbi, cm, cm->cur_frame); + + return frame_decoded; + +} + +int av1_get_raw_frame(AV1Decoder *pbi, size_t index, PIC_BUFFER_CONFIG **sd) { + if (index >= pbi->num_output_frames) return -1; + *sd = &pbi->output_frames[index]->buf; + //*grain_params = &pbi->output_frames[index]->film_grain_params; + //aom_clear_system_state(); + return 0; +} + +int av1_bufmgr_postproc(AV1Decoder *pbi, unsigned char frame_decoded) +{ + PIC_BUFFER_CONFIG *sd = NULL; + int index; +#if 0 + if (frame_decoded) { + printf("before swap_frame_buffers: "); + dump_buffer_status(pbi); + } +#endif + swap_frame_buffers(pbi, frame_decoded); + if (frame_decoded) { + if (av1_is_debug(AOM_DEBUG_PRINT_LIST_INFO)) { + pr_info("after swap_frame_buffers: "); + dump_buffer_status(pbi); + } + } + if (frame_decoded) { + pbi->decoding_first_frame = 0; + } + + + for (index = 0;;index++) { + if (av1_get_raw_frame(pbi, index, &sd) < 0) + break; + if (sd) + av1_raw_write_image(pbi, sd); + } + return 0; +} + +int aom_realloc_frame_buffer(AV1_COMMON *cm, PIC_BUFFER_CONFIG *pic, + int width, int height, unsigned int order_hint) +{ + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%s, index 0x%x, width 0x%x, height 0x%x order_hint 0x%x\n", + __func__, pic->index, width, height, order_hint); + pic->y_crop_width = width; + pic->y_crop_height = height; + pic->order_hint = order_hint; + return 0; +} + + +unsigned char av1_frame_is_inter(const AV1_COMMON *const cm) { + unsigned char is_inter = cm->cur_frame && (cm->cur_frame->frame_type != KEY_FRAME) + && (cm->current_frame.frame_type != INTRA_ONLY_FRAME); + return is_inter; +} + +PIC_BUFFER_CONFIG *av1_get_ref_frame_spec_buf( + const AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) { + RefCntBuffer *buf = get_ref_frame_buf(cm, ref_frame); + if (buf) { + buf->buf.order_hint = buf->order_hint; + return &(buf->buf); + } + return NULL; +} + +struct scale_factors *av1_get_ref_scale_factors( + AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame) +{ + return get_ref_scale_factors(cm, ref_frame); +} + +void av1_set_next_ref_frame_map(AV1Decoder *pbi) { + int ref_index = 0; + int mask; + AV1_COMMON *const cm = pbi->common; + int check_on_show_existing_frame; + av1_print2(AV1_DEBUG_BUFMGR_DETAIL, "%s, %d, mask 0x%x, show_existing_frame %d, reset_decoder_state %d\n", + __func__, pbi->camera_frame_header_ready, + cm->current_frame.refresh_frame_flags, + cm->show_existing_frame, + pbi->reset_decoder_state + ); + if (!pbi->camera_frame_header_ready) { + for (mask = cm->current_frame.refresh_frame_flags; mask; mask >>= 1) { + cm->next_used_ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; + ++ref_index; + } + + check_on_show_existing_frame = + !cm->show_existing_frame || pbi->reset_decoder_state; + for (; ref_index < REF_FRAMES && check_on_show_existing_frame; + ++ref_index) { + cm->next_used_ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; + } + } +} + +unsigned int av1_get_next_used_ref_info( + const AV1_COMMON *const cm, int i) { + /* + i = 0~1 orde_hint map + i = 2~10 size map[i-2] + */ + unsigned int info = 0; + int j; + if (i < 2) { + /*next_used_ref_frame_map has 8 items*/ + for (j = 0; j < 4; j++) { + RefCntBuffer *buf = + cm->next_used_ref_frame_map[(i * 4) + j]; + if (buf) + info |= ((buf->buf.order_hint & 0xff) + << (j * 8)); + } + } else if (i < 10) { + RefCntBuffer *buf = + cm->next_used_ref_frame_map[i-2]; + if (buf) + info = (buf->buf.y_crop_width << 16) | (buf->buf.y_crop_height & 0xffff); + } else { + for (j = 0; j < 4; j++) { + RefCntBuffer *buf = + cm->next_used_ref_frame_map[((i - 10) * 4) + j]; + if (buf) + info |= ((buf->buf.index & 0xff) + << (j * 8)); + } + } + return info; +} + +RefCntBuffer *av1_get_primary_ref_frame_buf( + const AV1_COMMON *const cm) +{ + return get_primary_ref_frame_buf(cm); +}
diff --git a/drivers/frame_provider/decoder_v4l/vav1/av1_film_grain.c b/drivers/frame_provider/decoder_v4l/vav1/av1_film_grain.c new file mode 100644 index 0000000..a19762c --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/vav1/av1_film_grain.c
@@ -0,0 +1,1194 @@ +/* + * Copyright (c) 2016, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + +/*!\file + * \brief Describes film grain parameters and film grain synthesis + * + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <linux/kthread.h> +#include <linux/sched/clock.h> +#include "vav1.h" + +#define FILM_GRAIN_REG_SIZE 39 + +#define MID(a, min, max) ((a > max) ? max : ((a > min) ? a : min)) + +#define gauss_bits 11 + +#define luma_subblock_size_y 32 +#define luma_subblock_size_x 32 + +#define left_pad 3 +#define right_pad 3 // padding to offset for AR coefficients +#define top_pad 3 +#define bottom_pad 0 +#define ar_padding 3 // maximum lag used for stabilization of AR coefficients + +#define luma_grain_block_offt 0x120 //18 * 16 +#define cb_grain_block_offt 0x1520 //(18 + 320) * 16 + +struct aom_film_grain_t { + u8 apply_grain; + + u8 update_parameters; + + // 8 bit values + s32 scaling_points_y[14][2]; + u8 num_y_points; // value: 0..14 + + // 8 bit values + s32 scaling_points_cb[10][2]; + u8 num_cb_points; // value: 0..10 + + // 8 bit values + s32 scaling_points_cr[10][2]; + u8 num_cr_points; // value: 0..10 + + u8 scaling_shift; // values : 8..11 + + s32 ar_coeff_lag; // values: 0..3 + + // 8 bit values + s32 ar_coeffs_y[24]; + s32 ar_coeffs_cb[25]; + s32 ar_coeffs_cr[25]; + + // Shift value: AR coeffs range + // 6: [-2, 2) + // 7: [-1, 1) + // 8: [-0.5, 0.5) + // 9: [-0.25, 0.25) + u8 ar_coeff_shift; // values : 6..9 + + s8 cb_mult; // 8 bits + s8 cb_luma_mult; // 8 bits + s16 cb_offset; // 9 bits + + s8 cr_mult; // 8 bits + s8 cr_luma_mult; // 8 bits + s16 cr_offset; // 9 bits + + u8 overlap_flag; + + u8 clip_to_restricted_range; + + u8 bit_depth; // video bit depth + + u8 mc_identity; + + u8 chroma_scaling_from_luma; + + u8 grain_scale_shift; + + u16 random_seed; + + u16 random_register_y; + u16 random_register_cb; + u16 random_register_cr; + + s32 scaling_delta_y[14]; + s32 scaling_delta_cb[10]; + s32 scaling_delta_cr[10]; +}; + + +static u32 debug_fgs; +module_param(debug_fgs, uint, 0664); + +// Samples with Gaussian distribution in the range of [-2048, 2047] (12 bits) +// with zero mean and standard deviation of about 512. +// should be divided by 4 for 10-bit range and 16 for 8-bit range. +static const s32 gaussian_seq[2048] = { + 56, 568, -180, 172, 124, -84, 172, -64, -900, 24, 820, + 224, 1248, 996, 272, -8, -916, -388, -732, -104, -188, 800, + 112, -652, -320, -376, 140, -252, 492, -168, 44, -788, 588, + -584, 500, -228, 12, 680, 272, -476, 972, -100, 652, 368, + 432, -196, -720, -192, 1000, -332, 652, -136, -552, -604, -4, + 192, -220, -136, 1000, -52, 372, -96, -624, 124, -24, 396, + 540, -12, -104, 640, 464, 244, -208, -84, 368, -528, -740, + 248, -968, -848, 608, 376, -60, -292, -40, -156, 252, -292, + 248, 224, -280, 400, -244, 244, -60, 76, -80, 212, 532, + 340, 128, -36, 824, -352, -60, -264, -96, -612, 416, -704, + 220, -204, 640, -160, 1220, -408, 900, 336, 20, -336, -96, + -792, 304, 48, -28, -1232, -1172, -448, 104, -292, -520, 244, + 60, -948, 0, -708, 268, 108, 356, -548, 488, -344, -136, + 488, -196, -224, 656, -236, -1128, 60, 4, 140, 276, -676, + -376, 168, -108, 464, 8, 564, 64, 240, 308, -300, -400, + -456, -136, 56, 120, -408, -116, 436, 504, -232, 328, 844, + -164, -84, 784, -168, 232, -224, 348, -376, 128, 568, 96, + -1244, -288, 276, 848, 832, -360, 656, 464, -384, -332, -356, + 728, -388, 160, -192, 468, 296, 224, 140, -776, -100, 280, + 4, 196, 44, -36, -648, 932, 16, 1428, 28, 528, 808, + 772, 20, 268, 88, -332, -284, 124, -384, -448, 208, -228, + -1044, -328, 660, 380, -148, -300, 588, 240, 540, 28, 136, + -88, -436, 256, 296, -1000, 1400, 0, -48, 1056, -136, 264, + -528, -1108, 632, -484, -592, -344, 796, 124, -668, -768, 388, + 1296, -232, -188, -200, -288, -4, 308, 100, -168, 256, -500, + 204, -508, 648, -136, 372, -272, -120, -1004, -552, -548, -384, + 548, -296, 428, -108, -8, -912, -324, -224, -88, -112, -220, + -100, 996, -796, 548, 360, -216, 180, 428, -200, -212, 148, + 96, 148, 284, 216, -412, -320, 120, -300, -384, -604, -572, + -332, -8, -180, -176, 696, 116, -88, 628, 76, 44, -516, + 240, -208, -40, 100, -592, 344, -308, -452, -228, 20, 916, + -1752, -136, -340, -804, 140, 40, 512, 340, 248, 184, -492, + 896, -156, 932, -628, 328, -688, -448, -616, -752, -100, 560, + -1020, 180, -800, -64, 76, 576, 1068, 396, 660, 552, -108, + -28, 320, -628, 312, -92, -92, -472, 268, 16, 560, 516, + -672, -52, 492, -100, 260, 384, 284, 292, 304, -148, 88, + -152, 1012, 1064, -228, 164, -376, -684, 592, -392, 156, 196, + -524, -64, -884, 160, -176, 636, 648, 404, -396, -436, 864, + 424, -728, 988, -604, 904, -592, 296, -224, 536, -176, -920, + 436, -48, 1176, -884, 416, -776, -824, -884, 524, -548, -564, + -68, -164, -96, 692, 364, -692, -1012, -68, 260, -480, 876, + -1116, 452, -332, -352, 892, -1088, 1220, -676, 12, -292, 244, + 496, 372, -32, 280, 200, 112, -440, -96, 24, -644, -184, + 56, -432, 224, -980, 272, -260, 144, -436, 420, 356, 364, + -528, 76, 172, -744, -368, 404, -752, -416, 684, -688, 72, + 540, 416, 92, 444, 480, -72, -1416, 164, -1172, -68, 24, + 424, 264, 1040, 128, -912, -524, -356, 64, 876, -12, 4, + -88, 532, 272, -524, 320, 276, -508, 940, 24, -400, -120, + 756, 60, 236, -412, 100, 376, -484, 400, -100, -740, -108, + -260, 328, -268, 224, -200, -416, 184, -604, -564, -20, 296, + 60, 892, -888, 60, 164, 68, -760, 216, -296, 904, -336, + -28, 404, -356, -568, -208, -1480, -512, 296, 328, -360, -164, + -1560, -776, 1156, -428, 164, -504, -112, 120, -216, -148, -264, + 308, 32, 64, -72, 72, 116, 176, -64, -272, 460, -536, + -784, -280, 348, 108, -752, -132, 524, -540, -776, 116, -296, + -1196, -288, -560, 1040, -472, 116, -848, -1116, 116, 636, 696, + 284, -176, 1016, 204, -864, -648, -248, 356, 972, -584, -204, + 264, 880, 528, -24, -184, 116, 448, -144, 828, 524, 212, + -212, 52, 12, 200, 268, -488, -404, -880, 824, -672, -40, + 908, -248, 500, 716, -576, 492, -576, 16, 720, -108, 384, + 124, 344, 280, 576, -500, 252, 104, -308, 196, -188, -8, + 1268, 296, 1032, -1196, 436, 316, 372, -432, -200, -660, 704, + -224, 596, -132, 268, 32, -452, 884, 104, -1008, 424, -1348, + -280, 4, -1168, 368, 476, 696, 300, -8, 24, 180, -592, + -196, 388, 304, 500, 724, -160, 244, -84, 272, -256, -420, + 320, 208, -144, -156, 156, 364, 452, 28, 540, 316, 220, + -644, -248, 464, 72, 360, 32, -388, 496, -680, -48, 208, + -116, -408, 60, -604, -392, 548, -840, 784, -460, 656, -544, + -388, -264, 908, -800, -628, -612, -568, 572, -220, 164, 288, + -16, -308, 308, -112, -636, -760, 280, -668, 432, 364, 240, + -196, 604, 340, 384, 196, 592, -44, -500, 432, -580, -132, + 636, -76, 392, 4, -412, 540, 508, 328, -356, -36, 16, + -220, -64, -248, -60, 24, -192, 368, 1040, 92, -24, -1044, + -32, 40, 104, 148, 192, -136, -520, 56, -816, -224, 732, + 392, 356, 212, -80, -424, -1008, -324, 588, -1496, 576, 460, + -816, -848, 56, -580, -92, -1372, -112, -496, 200, 364, 52, + -140, 48, -48, -60, 84, 72, 40, 132, -356, -268, -104, + -284, -404, 732, -520, 164, -304, -540, 120, 328, -76, -460, + 756, 388, 588, 236, -436, -72, -176, -404, -316, -148, 716, + -604, 404, -72, -88, -888, -68, 944, 88, -220, -344, 960, + 472, 460, -232, 704, 120, 832, -228, 692, -508, 132, -476, + 844, -748, -364, -44, 1116, -1104, -1056, 76, 428, 552, -692, + 60, 356, 96, -384, -188, -612, -576, 736, 508, 892, 352, + -1132, 504, -24, -352, 324, 332, -600, -312, 292, 508, -144, + -8, 484, 48, 284, -260, -240, 256, -100, -292, -204, -44, + 472, -204, 908, -188, -1000, -256, 92, 1164, -392, 564, 356, + 652, -28, -884, 256, 484, -192, 760, -176, 376, -524, -452, + -436, 860, -736, 212, 124, 504, -476, 468, 76, -472, 552, + -692, -944, -620, 740, -240, 400, 132, 20, 192, -196, 264, + -668, -1012, -60, 296, -316, -828, 76, -156, 284, -768, -448, + -832, 148, 248, 652, 616, 1236, 288, -328, -400, -124, 588, + 220, 520, -696, 1032, 768, -740, -92, -272, 296, 448, -464, + 412, -200, 392, 440, -200, 264, -152, -260, 320, 1032, 216, + 320, -8, -64, 156, -1016, 1084, 1172, 536, 484, -432, 132, + 372, -52, -256, 84, 116, -352, 48, 116, 304, -384, 412, + 924, -300, 528, 628, 180, 648, 44, -980, -220, 1320, 48, + 332, 748, 524, -268, -720, 540, -276, 564, -344, -208, -196, + 436, 896, 88, -392, 132, 80, -964, -288, 568, 56, -48, + -456, 888, 8, 552, -156, -292, 948, 288, 128, -716, -292, + 1192, -152, 876, 352, -600, -260, -812, -468, -28, -120, -32, + -44, 1284, 496, 192, 464, 312, -76, -516, -380, -456, -1012, + -48, 308, -156, 36, 492, -156, -808, 188, 1652, 68, -120, + -116, 316, 160, -140, 352, 808, -416, 592, 316, -480, 56, + 528, -204, -568, 372, -232, 752, -344, 744, -4, 324, -416, + -600, 768, 268, -248, -88, -132, -420, -432, 80, -288, 404, + -316, -1216, -588, 520, -108, 92, -320, 368, -480, -216, -92, + 1688, -300, 180, 1020, -176, 820, -68, -228, -260, 436, -904, + 20, 40, -508, 440, -736, 312, 332, 204, 760, -372, 728, + 96, -20, -632, -520, -560, 336, 1076, -64, -532, 776, 584, + 192, 396, -728, -520, 276, -188, 80, -52, -612, -252, -48, + 648, 212, -688, 228, -52, -260, 428, -412, -272, -404, 180, + 816, -796, 48, 152, 484, -88, -216, 988, 696, 188, -528, + 648, -116, -180, 316, 476, 12, -564, 96, 476, -252, -364, + -376, -392, 556, -256, -576, 260, -352, 120, -16, -136, -260, + -492, 72, 556, 660, 580, 616, 772, 436, 424, -32, -324, + -1268, 416, -324, -80, 920, 160, 228, 724, 32, -516, 64, + 384, 68, -128, 136, 240, 248, -204, -68, 252, -932, -120, + -480, -628, -84, 192, 852, -404, -288, -132, 204, 100, 168, + -68, -196, -868, 460, 1080, 380, -80, 244, 0, 484, -888, + 64, 184, 352, 600, 460, 164, 604, -196, 320, -64, 588, + -184, 228, 12, 372, 48, -848, -344, 224, 208, -200, 484, + 128, -20, 272, -468, -840, 384, 256, -720, -520, -464, -580, + 112, -120, 644, -356, -208, -608, -528, 704, 560, -424, 392, + 828, 40, 84, 200, -152, 0, -144, 584, 280, -120, 80, + -556, -972, -196, -472, 724, 80, 168, -32, 88, 160, -688, + 0, 160, 356, 372, -776, 740, -128, 676, -248, -480, 4, + -364, 96, 544, 232, -1032, 956, 236, 356, 20, -40, 300, + 24, -676, -596, 132, 1120, -104, 532, -1096, 568, 648, 444, + 508, 380, 188, -376, -604, 1488, 424, 24, 756, -220, -192, + 716, 120, 920, 688, 168, 44, -460, 568, 284, 1144, 1160, + 600, 424, 888, 656, -356, -320, 220, 316, -176, -724, -188, + -816, -628, -348, -228, -380, 1012, -452, -660, 736, 928, 404, + -696, -72, -268, -892, 128, 184, -344, -780, 360, 336, 400, + 344, 428, 548, -112, 136, -228, -216, -820, -516, 340, 92, + -136, 116, -300, 376, -244, 100, -316, -520, -284, -12, 824, + 164, -548, -180, -128, 116, -924, -828, 268, -368, -580, 620, + 192, 160, 0, -1676, 1068, 424, -56, -360, 468, -156, 720, + 288, -528, 556, -364, 548, -148, 504, 316, 152, -648, -620, + -684, -24, -376, -384, -108, -920, -1032, 768, 180, -264, -508, + -1268, -260, -60, 300, -240, 988, 724, -376, -576, -212, -736, + 556, 192, 1092, -620, -880, 376, -56, -4, -216, -32, 836, + 268, 396, 1332, 864, -600, 100, 56, -412, -92, 356, 180, + 884, -468, -436, 292, -388, -804, -704, -840, 368, -348, 140, + -724, 1536, 940, 372, 112, -372, 436, -480, 1136, 296, -32, + -228, 132, -48, -220, 868, -1016, -60, -1044, -464, 328, 916, + 244, 12, -736, -296, 360, 468, -376, -108, -92, 788, 368, + -56, 544, 400, -672, -420, 728, 16, 320, 44, -284, -380, + -796, 488, 132, 204, -596, -372, 88, -152, -908, -636, -572, + -624, -116, -692, -200, -56, 276, -88, 484, -324, 948, 864, + 1000, -456, -184, -276, 292, -296, 156, 676, 320, 160, 908, + -84, -1236, -288, -116, 260, -372, -644, 732, -756, -96, 84, + 344, -520, 348, -688, 240, -84, 216, -1044, -136, -676, -396, + -1500, 960, -40, 176, 168, 1516, 420, -504, -344, -364, -360, + 1216, -940, -380, -212, 252, -660, -708, 484, -444, -152, 928, + -120, 1112, 476, -260, 560, -148, -344, 108, -196, 228, -288, + 504, 560, -328, -88, 288, -1008, 460, -228, 468, -836, -196, + 76, 388, 232, 412, -1168, -716, -644, 756, -172, -356, -504, + 116, 432, 528, 48, 476, -168, -608, 448, 160, -532, -272, + 28, -676, -12, 828, 980, 456, 520, 104, -104, 256, -344, + -4, -28, -368, -52, -524, -572, -556, -200, 768, 1124, -208, + -512, 176, 232, 248, -148, -888, 604, -600, -304, 804, -156, + -212, 488, -192, -804, -256, 368, -360, -916, -328, 228, -240, + -448, -472, 856, -556, -364, 572, -12, -156, -368, -340, 432, + 252, -752, -152, 288, 268, -580, -848, -592, 108, -76, 244, + 312, -716, 592, -80, 436, 360, 4, -248, 160, 516, 584, + 732, 44, -468, -280, -292, -156, -588, 28, 308, 912, 24, + 124, 156, 180, -252, 944, -924, -772, -520, -428, -624, 300, + -212, -1144, 32, -724, 800, -1128, -212, -1288, -848, 180, -416, + 440, 192, -576, -792, -76, -1080, 80, -532, -352, -132, 380, + -820, 148, 1112, 128, 164, 456, 700, -924, 144, -668, -384, + 648, -832, 508, 552, -52, -100, -656, 208, -568, 748, -88, + 680, 232, 300, 192, -408, -1012, -152, -252, -268, 272, -876, + -664, -648, -332, -136, 16, 12, 1152, -28, 332, -536, 320, + -672, -460, -316, 532, -260, 228, -40, 1052, -816, 180, 88, + -496, -556, -672, -368, 428, 92, 356, 404, -408, 252, 196, + -176, -556, 792, 268, 32, 372, 40, 96, -332, 328, 120, + 372, -900, -40, 472, -264, -592, 952, 128, 656, 112, 664, + -232, 420, 4, -344, -464, 556, 244, -416, -32, 252, 0, + -412, 188, -696, 508, -476, 324, -1096, 656, -312, 560, 264, + -136, 304, 160, -64, -580, 248, 336, -720, 560, -348, -288, + -276, -196, -500, 852, -544, -236, -1128, -992, -776, 116, 56, + 52, 860, 884, 212, -12, 168, 1020, 512, -552, 924, -148, + 716, 188, 164, -340, -520, -184, 880, -152, -680, -208, -1156, + -300, -528, -472, 364, 100, -744, -1056, -32, 540, 280, 144, + -676, -32, -232, -280, -224, 96, 568, -76, 172, 148, 148, + 104, 32, -296, -32, 788, -80, 32, -16, 280, 288, 944, + 428, -484 +}; + +static inline void fg_data_copy_unsigned( + s32 *to, u8 *from, s32 size, u8 endian_swap, s32 offt) +{ + s32 i; + + if (endian_swap) { + for (i = 0; i < size; i += 4) { + to[i] = from[i + 3] - offt; + to[i + 1] = from[i + 2] - offt; + to[i + 2] = from[i + 1] - offt; + to[i + 3] = from[i] - offt; + } + } else { + for (i = 0; i < size; i++) { + to[i] = from[i]; + } + } +} + +static inline void fg_data_copy_signed( + s32 *to, s8 *from, s32 size, u8 endian_swap, s32 offt) +{ + s32 i; + + if (endian_swap) { + for (i = 0; i < size; i += 4) { + to[i] = (s32)(from[i + 3] - offt); + to[i + 1] = (s32)(from[i + 2] - offt); + to[i + 2] = (s32)(from[i + 1] - offt); + to[i + 3] = (s32)(from[i] - offt); + } + } else { + for (i = 0; i < size; i++) { + to[i] = from[i]; + } + } +} + + +static void fg_info_print(struct aom_film_grain_t *p) +{ + s32 i; + + pr_info("apply_grain: %d\n", p->apply_grain); + pr_info("update_parameters: %d\n", p->update_parameters); + + if (p->num_y_points > 0) { + for (i = 0; i < 14; i++) { + pr_info("scaling_points_y[%d][0]: %x, [1]: %x\n", i, p->scaling_points_y[i][0], p->scaling_points_y[i][1]); + } + } + pr_info("num_y_points: %d\n", p->num_y_points); + + if (p->num_cb_points > 0) { + for (i = 0; i < 10; i++) { + pr_info("scaling_points_cb[%d][0]: %x, [1]: %x\n", i, p->scaling_points_cb[i][0], p->scaling_points_cb[i][1]); + } + } + pr_info("num_cb_points: %d\n", p->num_cb_points); + + if (p->num_cb_points > 0) { + for (i = 0; i < 10; i++) { + pr_info("scaling_points_cr[%d][0]: %x, [1]: %x\n", i, p->scaling_points_cr[i][0], p->scaling_points_cr[i][1]); + } + } + pr_info("num_cr_points: %d\n", p->num_cr_points); + pr_info("scaling_shift: %d\n", p->scaling_shift); + pr_info("ar_coeff_lag: 0x%x\n", p->ar_coeff_lag); + + for (i = 0; i < 24; i++) { + pr_info("ar_coeffs_y[%d]: %x\n", i, p->ar_coeffs_y[i]); + } + for (i = 0; i < 25; i++) { + pr_info("ar_coeffs_cb[%d]: %x\n", i, p->ar_coeffs_cb[i]); + } + for (i = 0; i < 25; i++) { + pr_info("ar_coeffs_cr[%d]: %x\n", i, p->ar_coeffs_cr[i]); + } + pr_info("ar_coeff_shift: %d\n", p->ar_coeff_shift); + pr_info("cb_mult: 0x%x\n", p->cb_mult); + pr_info("cb_luma_mult: 0x%x\n", p->cb_luma_mult); + pr_info("cb_offset: 0x%x\n", p->cb_offset); + + pr_info("cr_mult: 0x%x\n", p->cr_mult); + pr_info("cr_luma_mult: 0x%x\n", p->cr_luma_mult); + pr_info("cr_offset: 0x%x\n", p->cr_offset); + + pr_info("overlap_flag: %d\n", p->overlap_flag); + pr_info("clip_to_restricted_range: %d\n", p->clip_to_restricted_range); + pr_info("bit_depth: %d\n", p->bit_depth); + pr_info("mc_identity: %d\n", p->mc_identity); + pr_info("chroma_scaling_from_luma: %d\n", p->chroma_scaling_from_luma); + pr_info("grain_scale_shift: %d\n", p->grain_scale_shift); + pr_info("random_seed: 0x%x\n", p->random_seed); +} + +static void film_grain_data_parse(struct aom_film_grain_t *para, u32 fgs_ctrl, u32 *fgs_data) +{ + /* index 0 + bit[30] - bit_depth_10 + bit[29] - mc_identity + bit[28] - num_pos_chroma_one_more + bit[27:24] - ar_coeff_shift + bit[23:20] - scaling_shift + bit[19] - overlap_flag + bit[18] - clip_to_restricted_range + bit[17] - chroma_scaling_from_luma + */ + para->bit_depth = (fgs_data[0] & (1 << 30)) ? 10 : 8; // video bit depth + para->mc_identity = (fgs_data[0] & (1 << 29)) ? 1 : 0; + para->ar_coeff_shift = (fgs_data[0] >> 24) & 0xf;; // values : 6..9 + para->scaling_shift = (fgs_data[0] >> 20) & 0xf; // values : 8..11 + para->overlap_flag = (fgs_data[0] >> 19) & 0x1; + para->clip_to_restricted_range = (fgs_data[0] >> 18) & 0x1; + para->chroma_scaling_from_luma = (fgs_data[0] >> 17) & 0x1; + + /* index 1 + bit[31:30] - grain_scale_shift + bit[29:28] - ar_coeff_lag + bit[27:20] - ar_coeffs_cr[24] + bit[19:12] - ar_coeffs_cb[24] + bit[11:8] - num_cr_points + bit[7:4] - num_cb_points + bit[3:0] - num_y_points + */ + para->grain_scale_shift = (fgs_data[1] >> 30) & 0x3; + para->ar_coeff_lag = (fgs_data[1] >> 28) & 0x3; // values: 0..3 + para->ar_coeffs_cr[24] = (fgs_data[1] >> 20) & 0xff; + para->ar_coeffs_cr[24] -= 128; + para->ar_coeffs_cb[24] = (fgs_data[1] >> 12) & 0xff; + para->ar_coeffs_cb[24] -= 128; + para->num_cr_points = (fgs_data[1] >> 8) & 0xf; // value: 0..10 + para->num_cb_points = (fgs_data[1] >> 4) & 0xf; // value: 0..10 + para->num_y_points = (fgs_data[1] >> 0) & 0xf; // value: 0..14 + + /* index 2~8 scaling_points_y[14][2] */ + fg_data_copy_unsigned(¶->scaling_points_y[0][0], (u8 *)&fgs_data[2], para->num_y_points * 2, 1, 0); // 8 bit values // brian swap ? + + /* index 9-13 scaling_points_cb[10][2] */ + fg_data_copy_unsigned(¶->scaling_points_cb[0][0], (u8 *)&fgs_data[9], para->num_cb_points * 2, 1, 0); // 8 bit values // brian chroma_scaling_from_luma and swap ? + + // 14-18 -- scaling_points_cr[10][2] + fg_data_copy_unsigned(¶->scaling_points_cr[0][0], (u8 *)&fgs_data[14], para->num_cr_points * 2, 1, 0); // 8 bit values // brian chroma_scaling_from_luma and swap ? + + // 19-24 -- ar_coeffs_y[0-23] + fg_data_copy_unsigned(para->ar_coeffs_y, (u8 *)&fgs_data[19], 24, 1, 128); // brian chroma_scaling_from_luma and swap ? + + // 25-30 -- ar_coeffs_cb[0-23] + fg_data_copy_unsigned(para->ar_coeffs_cb, (u8 *)&fgs_data[25], 24, 1, 128); // brian chroma_scaling_from_luma ? + + // 31-36 -- ar_coeffs_cr[0-23] + fg_data_copy_unsigned(para->ar_coeffs_cr, (u8 *)&fgs_data[31], 24, 1, 128); // brian chroma_scaling_from_luma ? + + /* index 37 + bit[31:24] - cb_mult + bit[23:16] - cb_luma_mult + bit[15:7] - cb_offset + */ + if (para->num_cb_points > 0) { // brian chroma_scaling_from_luma ? + para->cb_mult = (fgs_data[37] >> 24) & 0xff; // 8 bits + para->cb_luma_mult = (fgs_data[37] >> 16) & 0xff; // 8 bits + para->cb_offset = (fgs_data[37] >> 7) & 0x1ff; // 9 bits + } else { + para->cb_mult = 0; // 8 bits + para->cb_luma_mult = 0; // 8 bits + para->cb_offset = 0; // 9 bits + } + + /* index 38 + bit[31:24] - cr_mult + bit[23:16] - cr_luma_mult + bit[15:7] - cr_offset + */ + if (para->num_cr_points > 0) { // brian chroma_scaling_from_luma ? + para->cr_mult = (fgs_data[38] >> 24) & 0xff; // 8 bits + para->cr_luma_mult = (fgs_data[38] >> 16) & 0xff; // 8 bits + para->cr_offset = (fgs_data[38] >> 7) & 0x1ff; // 9 bits + } else { + para->cr_mult = 0; // 8 bits + para->cr_luma_mult = 0; // 8 bits + para->cr_offset = 0; // 9 bits + } + /* + bit[31:16] - random_seed + bit[06] - apply_cr (RO) //assign apply_cr = num_cr_points>0 | chroma_scaling_from_luma; + bit[05] - apply_cb (RO) //assign apply_cb = num_cb_points>0 | chroma_scaling_from_luma; + bit[04] - apply_lu (RO) //assign apply_lu = num_y_points>0; + bit[03] - fgs_not_bypass : 0=fgs bypass 1:=fgs not bypass (default=0) + bit[02] - update_parameters + bit[01] - apply_grain + bit[00] - film gran start + */ + para->apply_grain = (fgs_ctrl >> 1) & 0x1; + para->update_parameters = (fgs_ctrl >> 2) & 0x1; + para->random_seed = (fgs_ctrl >> 16) & 0xffff; +} + +#define DEFAULT_ALIGNMENT (2 * sizeof(void *)) + +static inline void *fgs_alloc(u32 size) +{ + void *addr; + size = size + DEFAULT_ALIGNMENT - 1 + sizeof(size_t); + addr = vzalloc(size); + if (addr == NULL) + addr = vzalloc(size); + + return addr; +} + +static void init_arrays(struct aom_film_grain_t *params, + s32 ***pred_pos_luma_p, s32 ***pred_pos_chroma_p, + s32 **luma_grain_block, s32 **cb_grain_block, s32 **cr_grain_block, + s32 luma_grain_samples, s32 luma_grain_stride, + s32 chroma_grain_samples, s32 chroma_grain_stride) +{ + s32 num_pos_luma = 2 * params->ar_coeff_lag * (params->ar_coeff_lag + 1); + s32 num_pos_chroma = num_pos_luma; + s32 row, col; + s32 pos_ar_index = 0; + s32 **pred_pos_luma; + s32 **pred_pos_chroma; + + if (params->num_y_points > 0) + ++num_pos_chroma; + + if (debug_fgs & DEBUG_FGS_DETAIL) { + pr_info("num_pos_luma %d, sizeof(*pred_pos_luma):%ld\n", num_pos_luma, sizeof(*pred_pos_luma)); + pr_info("num_pos_chroma %d, sizeof(*pred_pos_chroma):%ld\n", num_pos_chroma, sizeof(*pred_pos_chroma)); + } + pred_pos_luma = (s32 **)fgs_alloc(sizeof(*pred_pos_luma) * num_pos_luma); + + for (row = 0; row < num_pos_luma; row++) { + pred_pos_luma[row] = (s32 *)fgs_alloc(sizeof(**pred_pos_luma) * 3); + } + + pred_pos_chroma = (s32 **)fgs_alloc(sizeof(*pred_pos_chroma) * num_pos_chroma); + + for (row = 0; row < num_pos_chroma; row++) { + pred_pos_chroma[row] = (s32 *)fgs_alloc(sizeof(**pred_pos_chroma) * 3); + } + + for (row = -params->ar_coeff_lag; row < 0; row++) { + for (col = -params->ar_coeff_lag; + col < params->ar_coeff_lag + 1; col++) { + pred_pos_luma[pos_ar_index][0] = row * luma_grain_stride; + pred_pos_luma[pos_ar_index][1] = col; + pred_pos_luma[pos_ar_index][2] = 0; + + pred_pos_chroma[pos_ar_index][0] = row * chroma_grain_stride; + pred_pos_chroma[pos_ar_index][1] = col; + pred_pos_chroma[pos_ar_index][2] = 0; + ++pos_ar_index; + } + } + + for (col = -params->ar_coeff_lag; col < 0; col++) { + pred_pos_luma[pos_ar_index][0] = 0; + pred_pos_luma[pos_ar_index][1] = col; + pred_pos_luma[pos_ar_index][2] = 0; + + pred_pos_chroma[pos_ar_index][0] = 0; + pred_pos_chroma[pos_ar_index][1] = col; + pred_pos_chroma[pos_ar_index][2] = 0; + + ++pos_ar_index; + } + + if (params->num_y_points > 0) { + pred_pos_chroma[pos_ar_index][0] = 0; + pred_pos_chroma[pos_ar_index][1] = 0; + pred_pos_chroma[pos_ar_index][2] = 1; + } + + *pred_pos_luma_p = pred_pos_luma; + *pred_pos_chroma_p = pred_pos_chroma; + *luma_grain_block = + (s32 *)fgs_alloc(sizeof(**luma_grain_block) * luma_grain_samples); + if (debug_fgs & DEBUG_FGS_DETAIL) { + pr_info("luma block size %ld, luma_grain_samples:%d\n", sizeof(**luma_grain_block) * luma_grain_samples, luma_grain_samples); + } + *cb_grain_block = + (s32 *)fgs_alloc(sizeof(**cb_grain_block) * chroma_grain_samples); + *cr_grain_block = + (s32*)fgs_alloc(sizeof(**cr_grain_block) * chroma_grain_samples); + if (debug_fgs & DEBUG_FGS_DETAIL) { + pr_info("chroma block size %ld, chroma_grain_samples:%d\n", sizeof(**cb_grain_block) * chroma_grain_samples, chroma_grain_samples); + } +} + +static void dealloc_arrays(struct aom_film_grain_t *params, + s32 ***pred_pos_luma, s32 ***pred_pos_chroma, + s32 **luma_grain_block, s32 **cb_grain_block, s32 **cr_grain_block) +{ + s32 num_pos_luma = 2 * params->ar_coeff_lag * (params->ar_coeff_lag + 1); + s32 num_pos_chroma = num_pos_luma; + s32 row; + + if (params->num_y_points > 0) + ++num_pos_chroma; + + for (row = 0; row < num_pos_luma; row++) { + vfree((*pred_pos_luma)[row]); + } + vfree(*pred_pos_luma); + + for (row = 0; row < num_pos_chroma; row++) { + vfree((*pred_pos_chroma)[row]); + } + vfree((*pred_pos_chroma)); + + vfree(*luma_grain_block); + + vfree(*cb_grain_block); + + vfree(*cr_grain_block); +} + +// get a number between 0 and 2^bits - 1 +static inline s32 get_random_number(u16 *random_register, s32 bits) +{ + u16 bit; + + bit = ((*random_register >> 0) ^ (*random_register >> 1) ^ + (*random_register >> 3) ^ (*random_register >> 12)) & 1; + *random_register = (*random_register >> 1) | (bit << 15); + return (*random_register >> (16 - bits)) & ((1 << bits) - 1); +} + +static inline void init_random_generator(u16 *random_register, u16 seed, s32 luma_line) +{ + // same for the picture + u16 msb = (seed >> 8) & 0xff; + u16 lsb = seed & 0xff; + s32 luma_num = luma_line >> 5; + + *random_register = (msb << 8) + lsb; + *random_register ^= ((luma_num * 37 + 178) & 255) << 8; + *random_register ^= ((luma_num * 173 + 105) & 255); +} + +static void generate_luma_grain_block(struct aom_film_grain_t *params, + s32 **pred_pos_luma, s32 *luma_grain_block, + s32 luma_block_size_y, s32 luma_block_size_x, s32 luma_grain_stride, + s8 *table_ptr) +{ + s32 x_start = left_pad + ar_padding * 2; + s32 x_end = luma_block_size_x - right_pad - (ar_padding * 2) - 1; + s32 y_start = top_pad + ar_padding * 2; + s32 y_end = luma_block_size_y - bottom_pad - 1; + s32 x_pad_start = left_pad; + s32 x_pad_end = luma_block_size_x - right_pad - 1; + s32 y_pad_start = top_pad; + s32 y_pad_end = luma_block_size_y - bottom_pad - 1; + u8 cnt = 0; + s32 grain_block[4], grain_val; + u32 bit_mask = (1 << params->bit_depth) - 1; + s8 gauss_sec_shift = 12 - params->bit_depth + params->grain_scale_shift; + s32 gauss_sec_val = (1 << gauss_sec_shift) >> 1; + u32 num_pos_luma = 2 * params->ar_coeff_lag * (params->ar_coeff_lag + 1); + s32 rounding_offset = (1 << (params->ar_coeff_shift - 1)); + u32 i, j, pos, block_id = 0; + s32 random_num; + s32 grain_center = 128 << (params->bit_depth - 8); + s32 grain_min = 0 - grain_center; + s32 grain_max = (256 << (params->bit_depth - 8)) - 1 - grain_center; + s32 pred[30]; + + table_ptr += luma_grain_block_offt; + params->random_register_y = params->random_seed; + + if (params->num_y_points == 0) + return; + + if (debug_fgs & DEBUG_FGS_DETAIL) { + pr_info("-->gauss_sec_shift = %d \n",gauss_sec_shift); + pr_info("-->ar_coeff_shift = %x \n",params->ar_coeff_shift); + pr_info("-->rounding_offset = %x \n",rounding_offset); + } + + for (pos = 0; pos < num_pos_luma; pos++) + pred[pos] = pred_pos_luma[pos][0] + pred_pos_luma[pos][1]; + + for (i = 0; i < luma_block_size_y; i++) { + for (j = 0; j < luma_block_size_x; j++) { + random_num = + get_random_number(¶ms->random_register_y, gauss_bits); + grain_val = (gaussian_seq[random_num] + gauss_sec_val) >> gauss_sec_shift; + luma_grain_block[block_id] = grain_val; + if (i >= y_pad_start && i <= y_pad_end && + j >= x_pad_start && j <= x_pad_end) { + s32 wsum = 0; + + for (pos = 0; pos < num_pos_luma; pos++) + wsum += params->ar_coeffs_y[pos] * + luma_grain_block[block_id + pred[pos]]; + wsum += rounding_offset; + wsum = wsum >> params->ar_coeff_shift; + grain_val = MID(grain_val + wsum, grain_min, grain_max); + luma_grain_block[block_id] = grain_val; + if (i >= y_start && i <= y_end && + j >= x_start && j <= x_end) { + grain_block[cnt++] = grain_val & bit_mask; + if (cnt == 4) { + table_ptr[0] = grain_block[0] & 0xff; + table_ptr[1] = (grain_block[1] & 0x3f) << 2; + table_ptr[1] |= (grain_block[0] >> 8) & 3; + table_ptr[2] = (grain_block[2] & 0xf) << 4; + table_ptr[2] |= (grain_block[1] >> 6) & 0xf; + table_ptr[3] = (grain_block[3] & 3) << 6; + table_ptr[3] |= (grain_block[2] >> 4) & 0x3f; + table_ptr[4] = (grain_block[3] >> 2) & 0xff; + table_ptr += 5; + cnt = 0; + } + } + } + block_id++; + } + } +} + +static s32 generate_chroma_grain_blocks(struct aom_film_grain_t *params, + s32 **pred_pos_chroma, s32 *luma_grain_block, s32 *cb_grain_block, + s32 *cr_grain_block, s32 luma_grain_stride, s32 chroma_block_size_y, + s32 chroma_block_size_x, s32 chroma_grain_stride, + s32 chroma_subsamp_y, s32 chroma_subsamp_x, + s8 *table_ptr) +{ + s8 gauss_sec_shift = 12 - params->bit_depth + params->grain_scale_shift; + s32 gauss_sec_val = (1 << gauss_sec_shift) >> 1; + s32 i, j, k, l, pos; + u32 num_pos_chroma = 2 * params->ar_coeff_lag * (params->ar_coeff_lag + 1); + s32 rounding_offset = (1 << (params->ar_coeff_shift - 1)); + s32 chroma_grain_samples = chroma_block_size_y * chroma_block_size_x; + s32 cb_block[2], cr_block[2], cb_grain_val, cr_grain_val; + u32 bit_mask = (1 << params->bit_depth) - 1; + s32 x_start = left_pad + ar_padding * (2 >> chroma_subsamp_x); + s32 x_end = chroma_block_size_x - right_pad - (ar_padding * (2 >> chroma_subsamp_x)) - 1; + s32 y_start = top_pad + ar_padding * (2 >> chroma_subsamp_x); + s32 y_end = chroma_block_size_y - bottom_pad - 1; + s32 x_pad_start = left_pad; + s32 x_pad_end = chroma_block_size_x - right_pad - 1; + s32 y_pad_start = top_pad; + s32 y_pad_end = chroma_block_size_y - bottom_pad - 1; + u8 cnt; + s32 random_num_cb, random_num_cr; + u32 block_id; + u8 do_cb = 1, do_cr = 1; + s32 grain_center = 128 << (params->bit_depth - 8); + s32 grain_min = 0 - grain_center; + s32 grain_max = (256 << (params->bit_depth - 8)) - 1 - grain_center; + s32 pred[30]; + u8 subsamp = chroma_subsamp_y + chroma_subsamp_x; + u8 subsamp_val = (1 << (chroma_subsamp_y + chroma_subsamp_x)) >> 1; + + table_ptr += cb_grain_block_offt; + + if (params->num_y_points > 0) + ++num_pos_chroma; + + for (pos = 0; pos < num_pos_chroma; pos++) + pred[pos] = pred_pos_chroma[pos][0] + pred_pos_chroma[pos][1]; + + if (!params->num_cb_points && !params->chroma_scaling_from_luma) { + memset(cb_grain_block, 0, sizeof(*cb_grain_block) * chroma_grain_samples); + do_cb = 0; + } else { + init_random_generator( + ¶ms->random_register_cb, + params->random_seed, 7 << 5); + } + if (!params->num_cr_points && !params->chroma_scaling_from_luma) { + memset(cr_grain_block, 0, sizeof(*cr_grain_block) * chroma_grain_samples); + do_cr = 0; + } else { + init_random_generator( + ¶ms->random_register_cr, + params->random_seed, 11 << 5); + + } + + block_id = 0; + for (i = 0; i < chroma_block_size_y; i++) { + for (j = 0; j < chroma_block_size_x; j++) { + if (do_cb) { + random_num_cb = get_random_number( + ¶ms->random_register_cb, gauss_bits); + cb_grain_val = + (gaussian_seq[random_num_cb] + gauss_sec_val) + >> gauss_sec_shift; + cb_grain_block[block_id] = cb_grain_val; + } + if (do_cr) { + random_num_cr = get_random_number( + ¶ms->random_register_cr, gauss_bits); + cr_grain_val = + (gaussian_seq[random_num_cr] + gauss_sec_val) + >> gauss_sec_shift; + cr_grain_block[block_id] = cr_grain_val; + } + if (i >= y_pad_start && i <= y_pad_end && + j >= x_pad_start && j <= x_pad_end) { + s32 wsum_cb = 0; + s32 wsum_cr = 0; + + for (pos = 0; pos < num_pos_chroma; pos++) { + if (pred_pos_chroma[pos][2] == 0) { + wsum_cb += params->ar_coeffs_cb[pos] * + cb_grain_block[block_id + pred[pos]]; + wsum_cr += params->ar_coeffs_cr[pos] * + cr_grain_block[block_id + pred[pos]]; + } else if (pred_pos_chroma[pos][2] == 1) { + s32 av_luma = 0; + s32 luma_coord_y = ((i - top_pad) << chroma_subsamp_y) + top_pad; + s32 luma_coord_x = ((j - left_pad) << chroma_subsamp_x) + left_pad; + + for (k = luma_coord_y; + k < luma_coord_y + chroma_subsamp_y + 1; k++) + for (l = luma_coord_x; + l < luma_coord_x + chroma_subsamp_x + 1; l++) { + av_luma += luma_grain_block[k * luma_grain_stride + l]; + } + av_luma = (av_luma + subsamp_val) >> subsamp; + wsum_cb += params->ar_coeffs_cb[pos] * av_luma; + wsum_cr += params->ar_coeffs_cr[pos] * av_luma; + } else { + pr_info( + "Grain synthesis: prediction between two chroma components is " + "not supported!"); + return -1; + } + } + if (do_cb) { + wsum_cb += rounding_offset; + wsum_cb = wsum_cb >> params->ar_coeff_shift; + cb_grain_val = + MID(cb_grain_val + wsum_cb, grain_min, grain_max); + cb_grain_block[block_id] = cb_grain_val; + } + if (do_cr) { + wsum_cr += rounding_offset; + wsum_cr = wsum_cr >> params->ar_coeff_shift; + cr_grain_val = + MID(cr_grain_val + wsum_cr, grain_min, grain_max); + cr_grain_block[block_id] = cr_grain_val; + } + if (i >= y_start && i <= y_end && j >= x_start && j <= x_end) { + cb_block[cnt] = cb_grain_val & bit_mask; + cr_block[cnt++] = cr_grain_val & bit_mask; + if (cnt == 2) { + table_ptr[0] = cb_block[0] & 0xff; + table_ptr[1] = (cr_block[0] & 0x3f) << 2; + table_ptr[1] |= (cb_block[0] >> 8) & 3; + table_ptr[2] = (cb_block[1] & 0xf) << 4; + table_ptr[2] |= (cr_block[0] >> 6) & 0xf; + table_ptr[3] = (cr_block[1] & 3) << 6; + table_ptr[3] |= (cb_block[1] >> 4) & 0x3f; + table_ptr[4] = (cr_block[1] >> 2) & 0xff; + table_ptr += 5; + cnt = 0; + } + } + } + block_id++; + } + } + return 0; +} + +static void init_scaling_function( + s32 scaling_points[][2], u8 num_points, s32 scaling_delta[]) +{ + s64 delta; + s32 delta_y, delta_x, point; + + if (num_points == 0) return; + + for (point = 0; point < num_points - 1; point++) { + delta_y = scaling_points[point + 1][1] - scaling_points[point][1]; + delta_x = scaling_points[point + 1][0] - scaling_points[point][0]; + + delta = delta_y * ((65536 + (delta_x >> 1)) / delta_x); + scaling_delta[point]=delta; + } +} + +void convert_to_fg_table(struct aom_film_grain_t *params, u8 *table_ptr) +{ + s32 i, j; + u32 total = 0; + + if (params->num_y_points > 0 || + params->num_cb_points > 0 || + params->num_cr_points > 0) { + table_ptr[0] = + ((params->apply_grain & 0x1) << 0) | //1bit [0] + ((params->overlap_flag & 0x1) << 1) | //1bit [1] + ((params->chroma_scaling_from_luma & 0x1) << 2) | //1bit [2] + ((params->clip_to_restricted_range & 0x1) << 3) | //1bit [3] + ((params->mc_identity & 0x1) << 4); //1bit [4] + table_ptr[1] = (params->random_seed >> 0) & 0xff; //random_seed: 16bits + table_ptr[2] = (params->random_seed >> 8) & 0xff; + table_ptr[3] = + ((params->num_y_points & 0xf) << 0) | //4bit 0...14 + ((params->num_cb_points & 0xf) << 4); //4bit 0...10 + table_ptr[4] = + ((params->num_cr_points & 0xf) << 0) | //4bit 0...10 + ((params->scaling_shift & 0xf) << 4); //4bit: 8...11 + table_ptr[5] = params->cb_luma_mult; //8bit + table_ptr[6] = params->cb_mult; //8bit + table_ptr[7] = ((params->cb_offset >> 0) & 0xff); //cb_offset:9bit + table_ptr[8] = ((params->cb_offset >> 8) & 0x1); + table_ptr[9] = params->cr_luma_mult; //8bit + table_ptr[10] = params->cr_mult; //8bit + table_ptr[11] = ((params->cr_offset >> 0) & 0xff); //cr_offset:9bit + table_ptr[12] = ((params->cr_offset >> 8) & 0x1); + table_ptr[13] = 0; + table_ptr[14] = 0; + table_ptr[15] = 0; + } else { + memset(table_ptr, 0, 16 * sizeof(char)); + } + total = 1; + table_ptr += 16; + //y_scale (7x128bit) + if (params->num_y_points > 0) { + for (i = 0; i < 14; i += 2) { + j = i + 1; + table_ptr[0] = params->scaling_points_y[i][0]; //x + table_ptr[1] = params->scaling_points_y[i][1]; //y + //delta, it be signed, so 25bit + table_ptr[2] = (params->scaling_delta_y[i] >> 0) & 0xff; + table_ptr[3] = (params->scaling_delta_y[i] >> 8) & 0xff; + table_ptr[4] = (params->scaling_delta_y[i] >> 16) & 0xff; + table_ptr[5] = (params->scaling_delta_y[i] >> 24) & 0xff; + table_ptr[6] = params->scaling_points_y[j][0]; + table_ptr[7] = params->scaling_points_y[j][1]; + table_ptr[8] = + (j == 13) ? 0 : ((params->scaling_delta_y[j] >> 0) & 0xff); + table_ptr[9] = + (j == 13) ? 0 : ((params->scaling_delta_y[j] >> 8) & 0xff); + table_ptr[10] = + (j == 13) ? 0 : ((params->scaling_delta_y[j] >> 16) & 0xff); + table_ptr[11] = + (j == 13) ? 0 : ((params->scaling_delta_y[j] >> 24) & 0xff); + table_ptr[12] = 0; + table_ptr[13] = 0; + table_ptr[14] = 0; + table_ptr[15] = 0; + table_ptr += 16; + } + } else { + memset(table_ptr, 0, 16 * 7 * sizeof(char)); + table_ptr += 16 * 7; + } + total += 7; + if (params->num_cb_points > 0) { + for (i = 0; i < 10; i += 2) { + j = i + 1; + table_ptr[0] = params->scaling_points_cb[i][0]; + table_ptr[1] = params->scaling_points_cb[i][1]; + //delta, it be signed, so 25bit + table_ptr[2] = (params->scaling_delta_cb[i] >> 0) & 0xff; + table_ptr[3] = (params->scaling_delta_cb[i] >> 8) & 0xff; + table_ptr[4] = (params->scaling_delta_cb[i] >> 16) & 0xff; + table_ptr[5] = (params->scaling_delta_cb[i] >> 24) & 0xff; + table_ptr[6] = params->scaling_points_cb[j][0]; + table_ptr[7] = params->scaling_points_cb[j][1]; + table_ptr[8] = + (j == 9) ? 0 : ((params->scaling_delta_cb[j] >> 0) & 0xff); + table_ptr[9] = + (j == 9) ? 0 : ((params->scaling_delta_cb[j] >> 8) & 0xff); + table_ptr[10] = + (j == 9) ? 0 : ((params->scaling_delta_cb[j] >> 16) & 0xff); + table_ptr[11] = + (j == 9) ? 0 : ((params->scaling_delta_cb[j] >> 24) & 0xff); + table_ptr[12] = 0; + table_ptr[13] = 0; + table_ptr[14] = 0; + table_ptr[15] = 0; + table_ptr += 16; + } + } else { + memset(table_ptr, 0, 16 * 5 * sizeof(char)); + table_ptr += 16 * 5; + } + total += 5; + if (params->num_cr_points > 0) { + for (i = 0; i < 10; i += 2) { + j = i + 1; + table_ptr[0] = params->scaling_points_cr[i][0]; + table_ptr[1] = params->scaling_points_cr[i][1]; + //delta, it be signed, so 25bit + table_ptr[2] = (params->scaling_delta_cr[i] >> 0) & 0xff; + table_ptr[3] = (params->scaling_delta_cr[i] >> 8) & 0xff; + table_ptr[4] = (params->scaling_delta_cr[i] >> 16) & 0xff; + table_ptr[5] = (params->scaling_delta_cr[i] >> 24) & 0xff; + table_ptr[6] = params->scaling_points_cr[j][0]; + table_ptr[7] = params->scaling_points_cr[j][1]; + table_ptr[8] = + (j == 9) ? 0 : ((params->scaling_delta_cr[j] >> 0) & 0xff); + table_ptr[9] = + (j == 9) ? 0 : ((params->scaling_delta_cr[j] >> 8) & 0xff); + table_ptr[10] = + (j == 9) ? 0 : ((params->scaling_delta_cr[j] >> 16) & 0xff); + table_ptr[11] = + (j == 9) ? 0 : ((params->scaling_delta_cr[j] >> 24) & 0xff); + table_ptr[12] = 0; + table_ptr[13] = 0; + table_ptr[14] = 0; + table_ptr[15] = 0; + table_ptr += 16; + } + } else { + memset(table_ptr, 0, 16 * 5 * sizeof(char)); + table_ptr += 16 * 5; + } + //total += 5; + //total += 480;//320; +} + +void av1_add_film_grain_run(char *fg_table_buf, struct aom_film_grain_t *params) +{ + s32 **pred_pos_luma; + s32 **pred_pos_chroma; + s32 *luma_grain_block; + s32 *cb_grain_block; + s32 *cr_grain_block; + s32 luma_block_size_y, luma_block_size_x; + s32 chroma_block_size_y, chroma_block_size_x; + s32 chroma_subblock_size_y, chroma_subblock_size_x; + s32 chroma_subsamp_x = 1; + s32 chroma_subsamp_y = 1; + + ulong start_time, step_time; + u32 time1, time2, time3, time4, time5, time6; + + if (debug_fgs & DEBUG_FGS_CONSUME_TIME) + start_time = step_time = local_clock(); + + chroma_subblock_size_y = luma_subblock_size_y >> chroma_subsamp_y; + chroma_subblock_size_x = luma_subblock_size_x >> chroma_subsamp_x; + + // Initial padding is only needed for generation of + // film grain templates (to stabilize the AR process) + // Only a 64x64 luma and 32x32 chroma part of a template + // is used later for adding grain, padding can be discarded + + luma_block_size_y = top_pad + 2 * ar_padding + + luma_subblock_size_y * 2 + bottom_pad; + luma_block_size_x = left_pad + 2 * ar_padding + + luma_subblock_size_x * 2 + 2 * ar_padding + right_pad; + + chroma_block_size_y = top_pad + (2 >> chroma_subsamp_y) * ar_padding + + chroma_subblock_size_y * 2 + bottom_pad; + chroma_block_size_x = left_pad + (2 >> chroma_subsamp_x) * ar_padding + + chroma_subblock_size_x * 2 + + (2 >> chroma_subsamp_x) * ar_padding + right_pad; + + init_arrays(params, &pred_pos_luma, + &pred_pos_chroma, &luma_grain_block, + &cb_grain_block, &cr_grain_block, + luma_block_size_y * luma_block_size_x, + luma_block_size_x, + chroma_block_size_y * chroma_block_size_x, + chroma_block_size_x); + + if (debug_fgs & DEBUG_FGS_CONSUME_TIME) { + time1 = div64_u64(local_clock() - step_time, 1000); + step_time = local_clock(); + } + + generate_luma_grain_block(params, + pred_pos_luma, luma_grain_block, + luma_block_size_y, luma_block_size_x, + luma_block_size_x, fg_table_buf); + + if (debug_fgs & DEBUG_FGS_CONSUME_TIME) { + time2 = div64_u64(local_clock() - step_time, 1000); + step_time = local_clock(); + } + + generate_chroma_grain_blocks(params, + pred_pos_chroma, luma_grain_block, + cb_grain_block, cr_grain_block, + luma_block_size_x, + chroma_block_size_y, chroma_block_size_x, + chroma_block_size_x, + chroma_subsamp_y, chroma_subsamp_x, + fg_table_buf); + + if (debug_fgs & DEBUG_FGS_CONSUME_TIME) { + time3 = div64_u64(local_clock() - step_time, 1000); + step_time = local_clock(); + } + + init_scaling_function( + params->scaling_points_y, + params->num_y_points, + params->scaling_delta_y); + + if (!params->chroma_scaling_from_luma) { + init_scaling_function( + params->scaling_points_cb, + params->num_cb_points, + params->scaling_delta_cb); + init_scaling_function( + params->scaling_points_cr, + params->num_cr_points, + params->scaling_delta_cr); + } + + if (debug_fgs & DEBUG_FGS_CONSUME_TIME) { + time4 = div64_u64(local_clock() - step_time, 1000); + step_time = local_clock(); + } + + convert_to_fg_table(params, fg_table_buf); + + if (debug_fgs & DEBUG_FGS_CONSUME_TIME) { + time5 = div64_u64(local_clock() - step_time, 1000); + step_time = local_clock(); + } + + dealloc_arrays(params, + &pred_pos_luma, &pred_pos_chroma, + &luma_grain_block, + &cb_grain_block, &cr_grain_block); + + if (debug_fgs & DEBUG_FGS_CONSUME_TIME) { + time6 = div64_u64(local_clock() - step_time, 1000); + pr_info("fgs consume time %d (%d, %d, %d, %d, %d, %d)us\n", + div64_u64(local_clock() - start_time, 1000), time1, time2, time3, time4, time5, time6); + } +} + +/* Film Grain Entry */ +#if 0 +int pic_film_grain_run(char *fg_table_addr, u32 fgs_ctrl, u32 *fgs_data) +#endif +int pic_film_grain_run(u32 frame_count, char *fg_table_addr, u32 fgs_ctrl, u32 *fgs_data) + +{ + struct aom_film_grain_t fg_params; + + if (debug_fgs & DEBUG_FGS_REGS) { + int i; + pr_info("film grain ctrl: 0x%08x\n", fgs_ctrl); + for (i = 0; i < FILM_GRAIN_REG_SIZE; i++) { + pr_info("film grain reg[%02d]: 0x%08x\n", i, fgs_data[i]); + } + } + + film_grain_data_parse(&fg_params, fgs_ctrl, fgs_data); + + if (debug_fgs & DEBUG_FGS_REGS_PARSE) { + fg_info_print(&fg_params); + } + + av1_add_film_grain_run(fg_table_addr, &fg_params); + + if (debug_fgs & DEBUG_FGS_TABLE_DUMP) { + #define FGS_TABLE_SIZE (512 * 128 / 8) + struct file *fg_fp = NULL; + char file[256] = {0}; + + snprintf(file, sizeof(file), "/data/tmp/fgs_table_%d.bin", frame_count - 1); + fg_fp = filp_open(file, O_CREAT | O_RDWR | O_TRUNC, 0666); + if (IS_ERR(fg_fp)) { + fg_fp = NULL; + printk(KERN_ERR"open %s failed\n", file); + } else { + kernel_write(fg_fp, fg_table_addr, FGS_TABLE_SIZE, &fg_fp->f_pos); + filp_close(fg_fp, current->files); + fg_fp = NULL; + } + } + return 0; +} +EXPORT_SYMBOL(pic_film_grain_run); + +int get_debug_fgs(void) +{ + return debug_fgs; +} +EXPORT_SYMBOL(get_debug_fgs); +
diff --git a/drivers/frame_provider/decoder_v4l/vav1/av1_global.h b/drivers/frame_provider/decoder_v4l/vav1/av1_global.h new file mode 100644 index 0000000..0c53ff8 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/vav1/av1_global.h
@@ -0,0 +1,2360 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef AV1_GLOBAL_H_ +#define AV1_GLOBAL_H_ +#define AOM_AV1_MMU_DW +#ifndef HAVE_NEON +#define HAVE_NEON 0 +#endif +#ifndef CONFIG_ACCOUNTING +#define CONFIG_ACCOUNTING 0 +#endif +#ifndef CONFIG_INSPECTION +#define CONFIG_INSPECTION 0 +#endif +#ifndef CONFIG_LPF_MASK +#define CONFIG_LPF_MASK 0 +#endif +#ifndef CONFIG_SIZE_LIMIT +#define CONFIG_SIZE_LIMIT 0 +#endif + +#define SUPPORT_SCALE_FACTOR +#define USE_SCALED_WIDTH_FROM_UCODE +#define AML +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +#define AML_DEVICE +#endif +#ifdef BUFMGR_FOR_SIM +#define printf io_printf +#endif + +#ifndef INT_MAX +#define INT_MAX 0x7FFFFFFF +#endif +#define AOMMIN(x, y) (((x) < (y)) ? (x) : (y)) +#define AOMMAX(x, y) (((x) > (y)) ? (x) : (y)) + +/* + *typedef char int8_t; + *#ifndef BUFMGR_FOR_SIM + *typedef unsigned char uint8_t; + *#endif + *typedef unsigned int uint32_t; + *typedef int int32_t; + *typedef long long int64_t; + */ + +#ifdef AML +#define AOM_AV1_MMU +#define FILM_GRAIN_REG_SIZE 39 +typedef struct buff_s +{ + uint32_t buf_start; + uint32_t buf_size; + uint32_t buf_end; +} buff_t; + +typedef struct BuffInfo_s +{ + uint32_t max_width; + uint32_t max_height; + uint32_t start_adr; + uint32_t end_adr; + buff_t ipp; + buff_t sao_abv; + buff_t sao_vb; + buff_t short_term_rps; + buff_t vps; + buff_t seg_map; + buff_t daala_top; + buff_t sao_up; + buff_t swap_buf; + buff_t cdf_buf; + buff_t gmc_buf; + buff_t scalelut; + buff_t dblk_para; + buff_t dblk_data; + buff_t cdef_data; + buff_t ups_data; +#ifdef AOM_AV1_MMU + buff_t mmu_vbh; + buff_t cm_header; +#endif +#ifdef AOM_AV1_MMU_DW + buff_t mmu_vbh_dw; + buff_t cm_header_dw; +#endif + buff_t fgs_table; + buff_t mpred_above; + buff_t mpred_mv; + buff_t rpm; + buff_t lmem; +} BuffInfo_t; +#endif + +#if 0 +#define va_start(v,l) __builtin_va_start(v,l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v,l) __builtin_va_arg(v,l) +#endif + +/* +mem.h +*/ +#if (defined(__GNUC__) && __GNUC__) || defined(__SUNPRO_C) +#define DECLARE_ALIGNED(n, typ, val) typ val __attribute__((aligned(n))) +#elif defined(_MSC_VER) +#define DECLARE_ALIGNED(n, typ, val) __declspec(align(n)) typ val +#else +#warning No alignment directives known for this compiler. +#define DECLARE_ALIGNED(n, typ, val) typ val +#endif + +/* Indicates that the usage of the specified variable has been audited to assure + * that it's safe to use uninitialized. Silences 'may be used uninitialized' + * warnings on gcc. + */ +#if defined(__GNUC__) && __GNUC__ +#define UNINITIALIZED_IS_SAFE(x) x = x +#else +#define UNINITIALIZED_IS_SAFE(x) x +#endif + +#if HAVE_NEON && defined(_MSC_VER) +#define __builtin_prefetch(x) +#endif + +/* Shift down with rounding for use when n >= 0, value >= 0 */ +#define ROUND_POWER_OF_TWO(value, n) (((value) + (((1 << (n)) >> 1))) >> (n)) + +/* Shift down with rounding for signed integers, for use when n >= 0 */ +#define ROUND_POWER_OF_TWO_SIGNED(value, n) \ + (((value) < 0) ? -ROUND_POWER_OF_TWO(-(value), (n)) \ + : ROUND_POWER_OF_TWO((value), (n))) + +/* Shift down with rounding for use when n >= 0, value >= 0 for (64 bit) */ +#define ROUND_POWER_OF_TWO_64(value, n) \ + (((value) + ((((int64_t)1 << (n)) >> 1))) >> (n)) +/* Shift down with rounding for signed integers, for use when n >= 0 (64 bit) */ +#define ROUND_POWER_OF_TWO_SIGNED_64(value, n) \ + (((value) < 0) ? -ROUND_POWER_OF_TWO_64(-(value), (n)) \ + : ROUND_POWER_OF_TWO_64((value), (n))) + +/* shift right or left depending on sign of n */ +#define RIGHT_SIGNED_SHIFT(value, n) \ + ((n) < 0 ? ((value) << (-(n))) : ((value) >> (n))) + +#define ALIGN_POWER_OF_TWO(value, n) \ + (((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1)) + +#define DIVIDE_AND_ROUND(x, y) (((x) + ((y) >> 1)) / (y)) + +#define CONVERT_TO_SHORTPTR(x) ((uint16_t *)(((uintptr_t)(x)) << 1)) +#define CONVERT_TO_BYTEPTR(x) ((uint8_t *)(((uintptr_t)(x)) >> 1)) + +#ifdef AML +#define TYPEDEF typedef +#define UENUM1BYTE(enumvar) enumvar +#define SENUM1BYTE(enumvar) enumvar +#define UENUM2BYTE(enumvar) enumvar +#define SENUM2BYTE(enumvar) enumvar +#define UENUM4BYTE(enumvar) enumvar +#define SENUM4BYTE(enumvar) enumvar + +#else +#define TYPEDEF +/*!\brief force enum to be unsigned 1 byte*/ +#define UENUM1BYTE(enumvar) \ + ; \ + typedef uint8_t enumvar + +/*!\brief force enum to be signed 1 byte*/ +#define SENUM1BYTE(enumvar) \ + ; \ + typedef int8_t enumvar + +/*!\brief force enum to be unsigned 2 byte*/ +#define UENUM2BYTE(enumvar) \ + ; \ + typedef uint16_t enumvar + +/*!\brief force enum to be signed 2 byte*/ +#define SENUM2BYTE(enumvar) \ + ; \ + typedef int16_t enumvar + +/*!\brief force enum to be unsigned 4 byte*/ +#define UENUM4BYTE(enumvar) \ + ; \ + typedef uint32_t enumvar + +/*!\brief force enum to be unsigned 4 byte*/ +#define SENUM4BYTE(enumvar) \ + ; \ + typedef int32_t enumvar +#endif + + +/* +#include "enums.h" +*/ +#undef MAX_SB_SIZE + +// Max superblock size +#define MAX_SB_SIZE_LOG2 7 +#define MAX_SB_SIZE (1 << MAX_SB_SIZE_LOG2) +#define MAX_SB_SQUARE (MAX_SB_SIZE * MAX_SB_SIZE) + +// Min superblock size +#define MIN_SB_SIZE_LOG2 6 + +// Pixels per Mode Info (MI) unit +#define MI_SIZE_LOG2 2 +#define MI_SIZE (1 << MI_SIZE_LOG2) + +// MI-units per max superblock (MI Block - MIB) +#define MAX_MIB_SIZE_LOG2 (MAX_SB_SIZE_LOG2 - MI_SIZE_LOG2) +#define MAX_MIB_SIZE (1 << MAX_MIB_SIZE_LOG2) + +// MI-units per min superblock +#define MIN_MIB_SIZE_LOG2 (MIN_SB_SIZE_LOG2 - MI_SIZE_LOG2) + +// Mask to extract MI offset within max MIB +#define MAX_MIB_MASK (MAX_MIB_SIZE - 1) + +// Maximum number of tile rows and tile columns +#define MAX_TILE_ROWS 64 +#define MAX_TILE_COLS 64 + +#define MAX_VARTX_DEPTH 2 + +#define MI_SIZE_64X64 (64 >> MI_SIZE_LOG2) +#define MI_SIZE_128X128 (128 >> MI_SIZE_LOG2) + +#define MAX_PALETTE_SQUARE (64 * 64) +// Maximum number of colors in a palette. +#define PALETTE_MAX_SIZE 8 +// Minimum number of colors in a palette. +#define PALETTE_MIN_SIZE 2 + +#define FRAME_OFFSET_BITS 5 +#define MAX_FRAME_DISTANCE ((1 << FRAME_OFFSET_BITS) - 1) + +// 4 frame filter levels: y plane vertical, y plane horizontal, +// u plane, and v plane +#define FRAME_LF_COUNT 4 +#define DEFAULT_DELTA_LF_MULTI 0 +#define MAX_MODE_LF_DELTAS 2 + +#define DIST_PRECISION_BITS 4 +#define DIST_PRECISION (1 << DIST_PRECISION_BITS) // 16 + +#define PROFILE_BITS 3 +// The following three profiles are currently defined. +// Profile 0. 8-bit and 10-bit 4:2:0 and 4:0:0 only. +// Profile 1. 8-bit and 10-bit 4:4:4 +// Profile 2. 8-bit and 10-bit 4:2:2 +// 12-bit 4:0:0, 4:2:2 and 4:4:4 +// Since we have three bits for the profiles, it can be extended later. +TYPEDEF enum { + PROFILE_0, + PROFILE_1, + PROFILE_2, + MAX_PROFILES, +} SENUM1BYTE(BITSTREAM_PROFILE); + +#define OP_POINTS_CNT_MINUS_1_BITS 5 +#define OP_POINTS_IDC_BITS 12 + +// Note: Some enums use the attribute 'packed' to use smallest possible integer +// type, so that we can save memory when they are used in structs/arrays. + +typedef enum ATTRIBUTE_PACKED { + BLOCK_4X4, + BLOCK_4X8, + BLOCK_8X4, + BLOCK_8X8, + BLOCK_8X16, + BLOCK_16X8, + BLOCK_16X16, + BLOCK_16X32, + BLOCK_32X16, + BLOCK_32X32, + BLOCK_32X64, + BLOCK_64X32, + BLOCK_64X64, + BLOCK_64X128, + BLOCK_128X64, + BLOCK_128X128, + BLOCK_4X16, + BLOCK_16X4, + BLOCK_8X32, + BLOCK_32X8, + BLOCK_16X64, + BLOCK_64X16, + BLOCK_SIZES_ALL, + BLOCK_SIZES = BLOCK_4X16, + BLOCK_INVALID = 255, + BLOCK_LARGEST = (BLOCK_SIZES - 1) +} BLOCK_SIZE2; + +// 4X4, 8X8, 16X16, 32X32, 64X64, 128X128 +#define SQR_BLOCK_SIZES 6 + +TYPEDEF enum { + PARTITION_NONE, + PARTITION_HORZ, + PARTITION_VERT, + PARTITION_SPLIT, + PARTITION_HORZ_A, // HORZ split and the top partition is split again + PARTITION_HORZ_B, // HORZ split and the bottom partition is split again + PARTITION_VERT_A, // VERT split and the left partition is split again + PARTITION_VERT_B, // VERT split and the right partition is split again + PARTITION_HORZ_4, // 4:1 horizontal partition + PARTITION_VERT_4, // 4:1 vertical partition + EXT_PARTITION_TYPES, + PARTITION_TYPES = PARTITION_SPLIT + 1, + PARTITION_INVALID = 255 +} UENUM1BYTE(PARTITION_TYPE); + +typedef char PARTITION_CONTEXT; +#define PARTITION_PLOFFSET 4 // number of probability models per block size +#define PARTITION_BLOCK_SIZES 5 +#define PARTITION_CONTEXTS (PARTITION_BLOCK_SIZES * PARTITION_PLOFFSET) + +// block transform size +TYPEDEF enum { + TX_4X4, // 4x4 transform + TX_8X8, // 8x8 transform + TX_16X16, // 16x16 transform + TX_32X32, // 32x32 transform + TX_64X64, // 64x64 transform + TX_4X8, // 4x8 transform + TX_8X4, // 8x4 transform + TX_8X16, // 8x16 transform + TX_16X8, // 16x8 transform + TX_16X32, // 16x32 transform + TX_32X16, // 32x16 transform + TX_32X64, // 32x64 transform + TX_64X32, // 64x32 transform + TX_4X16, // 4x16 transform + TX_16X4, // 16x4 transform + TX_8X32, // 8x32 transform + TX_32X8, // 32x8 transform + TX_16X64, // 16x64 transform + TX_64X16, // 64x16 transform + TX_SIZES_ALL, // Includes rectangular transforms + TX_SIZES = TX_4X8, // Does NOT include rectangular transforms + TX_SIZES_LARGEST = TX_64X64, + TX_INVALID = 255 // Invalid transform size +} UENUM1BYTE(TX_SIZE); + +#define TX_SIZE_LUMA_MIN (TX_4X4) +/* We don't need to code a transform size unless the allowed size is at least + one more than the minimum. */ +#define TX_SIZE_CTX_MIN (TX_SIZE_LUMA_MIN + 1) + +// Maximum tx_size categories +#define MAX_TX_CATS (TX_SIZES - TX_SIZE_CTX_MIN) +#define MAX_TX_DEPTH 2 + +#define MAX_TX_SIZE_LOG2 (6) +#define MAX_TX_SIZE (1 << MAX_TX_SIZE_LOG2) +#define MIN_TX_SIZE_LOG2 2 +#define MIN_TX_SIZE (1 << MIN_TX_SIZE_LOG2) +#define MAX_TX_SQUARE (MAX_TX_SIZE * MAX_TX_SIZE) + +// Pad 4 extra columns to remove horizontal availability check. +#define TX_PAD_HOR_LOG2 2 +#define TX_PAD_HOR 4 +// Pad 6 extra rows (2 on top and 4 on bottom) to remove vertical availability +// check. +#define TX_PAD_TOP 0 +#define TX_PAD_BOTTOM 4 +#define TX_PAD_VER (TX_PAD_TOP + TX_PAD_BOTTOM) +// Pad 16 extra bytes to avoid reading overflow in SIMD optimization. +#define TX_PAD_END 16 +#define TX_PAD_2D ((32 + TX_PAD_HOR) * (32 + TX_PAD_VER) + TX_PAD_END) + +// Number of maxium size transform blocks in the maximum size superblock +#define MAX_TX_BLOCKS_IN_MAX_SB_LOG2 ((MAX_SB_SIZE_LOG2 - MAX_TX_SIZE_LOG2) * 2) +#define MAX_TX_BLOCKS_IN_MAX_SB (1 << MAX_TX_BLOCKS_IN_MAX_SB_LOG2) + +// frame transform mode +TYPEDEF enum { + ONLY_4X4, // use only 4x4 transform + TX_MODE_LARGEST, // transform size is the largest possible for pu size + TX_MODE_SELECT, // transform specified for each block + TX_MODES, +} UENUM1BYTE(TX_MODE); + +// 1D tx types +TYPEDEF enum { + DCT_1D, + ADST_1D, + FLIPADST_1D, + IDTX_1D, + TX_TYPES_1D, +} UENUM1BYTE(TX_TYPE_1D); + +TYPEDEF enum { + DCT_DCT, // DCT in both horizontal and vertical + ADST_DCT, // ADST in vertical, DCT in horizontal + DCT_ADST, // DCT in vertical, ADST in horizontal + ADST_ADST, // ADST in both directions + FLIPADST_DCT, // FLIPADST in vertical, DCT in horizontal + DCT_FLIPADST, // DCT in vertical, FLIPADST in horizontal + FLIPADST_FLIPADST, // FLIPADST in both directions + ADST_FLIPADST, // ADST in vertical, FLIPADST in horizontal + FLIPADST_ADST, // FLIPADST in vertical, ADST in horizontal + IDTX, // Identity in both directions + V_DCT, // DCT in vertical, identity in horizontal + H_DCT, // Identity in vertical, DCT in horizontal + V_ADST, // ADST in vertical, identity in horizontal + H_ADST, // Identity in vertical, ADST in horizontal + V_FLIPADST, // FLIPADST in vertical, identity in horizontal + H_FLIPADST, // Identity in vertical, FLIPADST in horizontal + TX_TYPES, +} UENUM1BYTE(TX_TYPE); + +TYPEDEF enum { + REG_REG, + REG_SMOOTH, + REG_SHARP, + SMOOTH_REG, + SMOOTH_SMOOTH, + SMOOTH_SHARP, + SHARP_REG, + SHARP_SMOOTH, + SHARP_SHARP, +} UENUM1BYTE(DUAL_FILTER_TYPE); + +TYPEDEF enum { + // DCT only + EXT_TX_SET_DCTONLY, + // DCT + Identity only + EXT_TX_SET_DCT_IDTX, + // Discrete Trig transforms w/o flip (4) + Identity (1) + EXT_TX_SET_DTT4_IDTX, + // Discrete Trig transforms w/o flip (4) + Identity (1) + 1D Hor/vert DCT (2) + EXT_TX_SET_DTT4_IDTX_1DDCT, + // Discrete Trig transforms w/ flip (9) + Identity (1) + 1D Hor/Ver DCT (2) + EXT_TX_SET_DTT9_IDTX_1DDCT, + // Discrete Trig transforms w/ flip (9) + Identity (1) + 1D Hor/Ver (6) + EXT_TX_SET_ALL16, + EXT_TX_SET_TYPES +} UENUM1BYTE(TxSetType); + +#define IS_2D_TRANSFORM(tx_type) (tx_type < IDTX) + +#define EXT_TX_SIZES 4 // number of sizes that use extended transforms +#define EXT_TX_SETS_INTER 4 // Sets of transform selections for INTER +#define EXT_TX_SETS_INTRA 3 // Sets of transform selections for INTRA + +TYPEDEF enum { + AOM_LAST_FLAG = 1 << 0, + AOM_LAST2_FLAG = 1 << 1, + AOM_LAST3_FLAG = 1 << 2, + AOM_GOLD_FLAG = 1 << 3, + AOM_BWD_FLAG = 1 << 4, + AOM_ALT2_FLAG = 1 << 5, + AOM_ALT_FLAG = 1 << 6, + AOM_REFFRAME_ALL = (1 << 7) - 1 +} UENUM1BYTE(AOM_REFFRAME); + +TYPEDEF enum { + UNIDIR_COMP_REFERENCE, + BIDIR_COMP_REFERENCE, + COMP_REFERENCE_TYPES, +} UENUM1BYTE(COMP_REFERENCE_TYPE); + +/*enum { PLANE_TYPE_Y, PLANE_TYPE_UV, PLANE_TYPES } UENUM1BYTE(PLANE_TYPE);*/ + +#define CFL_ALPHABET_SIZE_LOG2 4 +#define CFL_ALPHABET_SIZE (1 << CFL_ALPHABET_SIZE_LOG2) +#define CFL_MAGS_SIZE ((2 << CFL_ALPHABET_SIZE_LOG2) + 1) +#define CFL_IDX_U(idx) (idx >> CFL_ALPHABET_SIZE_LOG2) +#define CFL_IDX_V(idx) (idx & (CFL_ALPHABET_SIZE - 1)) + +/*enum { CFL_PRED_U, CFL_PRED_V, CFL_PRED_PLANES } UENUM1BYTE(CFL_PRED_TYPE);*/ + +TYPEDEF enum { + CFL_SIGN_ZERO, + CFL_SIGN_NEG, + CFL_SIGN_POS, + CFL_SIGNS +} UENUM1BYTE(CFL_SIGN_TYPE); + +TYPEDEF enum { + CFL_DISALLOWED, + CFL_ALLOWED, + CFL_ALLOWED_TYPES +} UENUM1BYTE(CFL_ALLOWED_TYPE); + +// CFL_SIGN_ZERO,CFL_SIGN_ZERO is invalid +#define CFL_JOINT_SIGNS (CFL_SIGNS * CFL_SIGNS - 1) +// CFL_SIGN_U is equivalent to (js + 1) / 3 for js in 0 to 8 +#define CFL_SIGN_U(js) (((js + 1) * 11) >> 5) +// CFL_SIGN_V is equivalent to (js + 1) % 3 for js in 0 to 8 +#define CFL_SIGN_V(js) ((js + 1) - CFL_SIGNS * CFL_SIGN_U(js)) + +// There is no context when the alpha for a given plane is zero. +// So there are 2 fewer contexts than joint signs. +#define CFL_ALPHA_CONTEXTS (CFL_JOINT_SIGNS + 1 - CFL_SIGNS) +#define CFL_CONTEXT_U(js) (js + 1 - CFL_SIGNS) +// Also, the contexts are symmetric under swapping the planes. +#define CFL_CONTEXT_V(js) \ + (CFL_SIGN_V(js) * CFL_SIGNS + CFL_SIGN_U(js) - CFL_SIGNS) + +TYPEDEF enum { + PALETTE_MAP, + COLOR_MAP_TYPES, +} UENUM1BYTE(COLOR_MAP_TYPE); + +TYPEDEF enum { + TWO_COLORS, + THREE_COLORS, + FOUR_COLORS, + FIVE_COLORS, + SIX_COLORS, + SEVEN_COLORS, + EIGHT_COLORS, + PALETTE_SIZES +} UENUM1BYTE(PALETTE_SIZE); + +TYPEDEF enum { + PALETTE_COLOR_ONE, + PALETTE_COLOR_TWO, + PALETTE_COLOR_THREE, + PALETTE_COLOR_FOUR, + PALETTE_COLOR_FIVE, + PALETTE_COLOR_SIX, + PALETTE_COLOR_SEVEN, + PALETTE_COLOR_EIGHT, + PALETTE_COLORS +} UENUM1BYTE(PALETTE_COLOR); + +// Note: All directional predictors must be between V_PRED and D67_PRED (both +// inclusive). +TYPEDEF enum { + DC_PRED, // Average of above and left pixels + V_PRED, // Vertical + H_PRED, // Horizontal + D45_PRED, // Directional 45 degree + D135_PRED, // Directional 135 degree + D113_PRED, // Directional 113 degree + D157_PRED, // Directional 157 degree + D203_PRED, // Directional 203 degree + D67_PRED, // Directional 67 degree + SMOOTH_PRED, // Combination of horizontal and vertical interpolation + SMOOTH_V_PRED, // Vertical interpolation + SMOOTH_H_PRED, // Horizontal interpolation + PAETH_PRED, // Predict from the direction of smallest gradient + NEARESTMV, + NEARMV, + GLOBALMV, + NEWMV, + // Compound ref compound modes + NEAREST_NEARESTMV, + NEAR_NEARMV, + NEAREST_NEWMV, + NEW_NEARESTMV, + NEAR_NEWMV, + NEW_NEARMV, + GLOBAL_GLOBALMV, + NEW_NEWMV, + MB_MODE_COUNT, + INTRA_MODE_START = DC_PRED, + INTRA_MODE_END = NEARESTMV, + INTRA_MODE_NUM = INTRA_MODE_END - INTRA_MODE_START, + SINGLE_INTER_MODE_START = NEARESTMV, + SINGLE_INTER_MODE_END = NEAREST_NEARESTMV, + SINGLE_INTER_MODE_NUM = SINGLE_INTER_MODE_END - SINGLE_INTER_MODE_START, + COMP_INTER_MODE_START = NEAREST_NEARESTMV, + COMP_INTER_MODE_END = MB_MODE_COUNT, + COMP_INTER_MODE_NUM = COMP_INTER_MODE_END - COMP_INTER_MODE_START, + INTER_MODE_START = NEARESTMV, + INTER_MODE_END = MB_MODE_COUNT, + INTRA_MODES = PAETH_PRED + 1, // PAETH_PRED has to be the last intra mode. + INTRA_INVALID = MB_MODE_COUNT // For uv_mode in inter blocks +} UENUM1BYTE(PREDICTION_MODE); + +// TODO(ltrudeau) Do we really want to pack this? +// TODO(ltrudeau) Do we match with PREDICTION_MODE? +TYPEDEF enum { + UV_DC_PRED, // Average of above and left pixels + UV_V_PRED, // Vertical + UV_H_PRED, // Horizontal + UV_D45_PRED, // Directional 45 degree + UV_D135_PRED, // Directional 135 degree + UV_D113_PRED, // Directional 113 degree + UV_D157_PRED, // Directional 157 degree + UV_D203_PRED, // Directional 203 degree + UV_D67_PRED, // Directional 67 degree + UV_SMOOTH_PRED, // Combination of horizontal and vertical interpolation + UV_SMOOTH_V_PRED, // Vertical interpolation + UV_SMOOTH_H_PRED, // Horizontal interpolation + UV_PAETH_PRED, // Predict from the direction of smallest gradient + UV_CFL_PRED, // Chroma-from-Luma + UV_INTRA_MODES, + UV_MODE_INVALID, // For uv_mode in inter blocks +} UENUM1BYTE(UV_PREDICTION_MODE); + +TYPEDEF enum { + SIMPLE_TRANSLATION, + OBMC_CAUSAL, // 2-sided OBMC + WARPED_CAUSAL, // 2-sided WARPED + MOTION_MODES +} UENUM1BYTE(MOTION_MODE); + +TYPEDEF enum { + II_DC_PRED, + II_V_PRED, + II_H_PRED, + II_SMOOTH_PRED, + INTERINTRA_MODES +} UENUM1BYTE(INTERINTRA_MODE); + +TYPEDEF enum { + COMPOUND_AVERAGE, + COMPOUND_DISTWTD, + COMPOUND_WEDGE, + COMPOUND_DIFFWTD, + COMPOUND_TYPES, + MASKED_COMPOUND_TYPES = 2, +} UENUM1BYTE(COMPOUND_TYPE); + +TYPEDEF enum { + FILTER_DC_PRED, + FILTER_V_PRED, + FILTER_H_PRED, + FILTER_D157_PRED, + FILTER_PAETH_PRED, + FILTER_INTRA_MODES, +} UENUM1BYTE(FILTER_INTRA_MODE); + +TYPEDEF enum { + SEQ_LEVEL_2_0, + SEQ_LEVEL_2_1, + SEQ_LEVEL_2_2, + SEQ_LEVEL_2_3, + SEQ_LEVEL_3_0, + SEQ_LEVEL_3_1, + SEQ_LEVEL_3_2, + SEQ_LEVEL_3_3, + SEQ_LEVEL_4_0, + SEQ_LEVEL_4_1, + SEQ_LEVEL_4_2, + SEQ_LEVEL_4_3, + SEQ_LEVEL_5_0, + SEQ_LEVEL_5_1, + SEQ_LEVEL_5_2, + SEQ_LEVEL_5_3, + SEQ_LEVEL_6_0, + SEQ_LEVEL_6_1, + SEQ_LEVEL_6_2, + SEQ_LEVEL_6_3, + SEQ_LEVEL_7_0, + SEQ_LEVEL_7_1, + SEQ_LEVEL_7_2, + SEQ_LEVEL_7_3, + SEQ_LEVELS, + SEQ_LEVEL_MAX = 31 +} UENUM1BYTE(AV1_LEVEL); + +#define LEVEL_BITS 5 + +#define DIRECTIONAL_MODES 8 +#define MAX_ANGLE_DELTA 3 +#define ANGLE_STEP 3 + +#define INTER_MODES (1 + NEWMV - NEARESTMV) + +#define INTER_COMPOUND_MODES (1 + NEW_NEWMV - NEAREST_NEARESTMV) + +#define SKIP_CONTEXTS 3 +#define SKIP_MODE_CONTEXTS 3 + +#define COMP_INDEX_CONTEXTS 6 +#define COMP_GROUP_IDX_CONTEXTS 6 + +#define NMV_CONTEXTS 3 + +#define NEWMV_MODE_CONTEXTS 6 +#define GLOBALMV_MODE_CONTEXTS 2 +#define REFMV_MODE_CONTEXTS 6 +#define DRL_MODE_CONTEXTS 3 + +#define GLOBALMV_OFFSET 3 +#define REFMV_OFFSET 4 + +#define NEWMV_CTX_MASK ((1 << GLOBALMV_OFFSET) - 1) +#define GLOBALMV_CTX_MASK ((1 << (REFMV_OFFSET - GLOBALMV_OFFSET)) - 1) +#define REFMV_CTX_MASK ((1 << (8 - REFMV_OFFSET)) - 1) + +#define COMP_NEWMV_CTXS 5 +#define INTER_MODE_CONTEXTS 8 + +#define DELTA_Q_SMALL 3 +#define DELTA_Q_PROBS (DELTA_Q_SMALL) +#define DEFAULT_DELTA_Q_RES_PERCEPTUAL 4 +#define DEFAULT_DELTA_Q_RES_OBJECTIVE 4 + +#define DELTA_LF_SMALL 3 +#define DELTA_LF_PROBS (DELTA_LF_SMALL) +#define DEFAULT_DELTA_LF_RES 2 + +/* Segment Feature Masks */ +#define MAX_MV_REF_CANDIDATES 2 + +#define MAX_REF_MV_STACK_SIZE 8 +#define REF_CAT_LEVEL 640 + +#define INTRA_INTER_CONTEXTS 4 +#define COMP_INTER_CONTEXTS 5 +#define REF_CONTEXTS 3 + +#define COMP_REF_TYPE_CONTEXTS 5 +#define UNI_COMP_REF_CONTEXTS 3 + +#define TXFM_PARTITION_CONTEXTS ((TX_SIZES - TX_8X8) * 6 - 3) +#ifdef ORI_CODE +typedef uint8_t TXFM_CONTEXT; +#endif +// An enum for single reference types (and some derived values). +enum { + NONE_FRAME = -1, + INTRA_FRAME, + LAST_FRAME, + LAST2_FRAME, + LAST3_FRAME, + GOLDEN_FRAME, + BWDREF_FRAME, + ALTREF2_FRAME, + ALTREF_FRAME, + REF_FRAMES, + + // Extra/scratch reference frame. It may be: + // - used to update the ALTREF2_FRAME ref (see lshift_bwd_ref_frames()), or + // - updated from ALTREF2_FRAME ref (see rshift_bwd_ref_frames()). + EXTREF_FRAME = REF_FRAMES, + + // Number of inter (non-intra) reference types. + INTER_REFS_PER_FRAME = ALTREF_FRAME - LAST_FRAME + 1, + + // Number of forward (aka past) reference types. + FWD_REFS = GOLDEN_FRAME - LAST_FRAME + 1, + + // Number of backward (aka future) reference types. + BWD_REFS = ALTREF_FRAME - BWDREF_FRAME + 1, + + SINGLE_REFS = FWD_REFS + BWD_REFS, +}; + +#define REF_FRAMES_LOG2 3 + +// REF_FRAMES for the cm->ref_frame_map array, 1 scratch frame for the new +// frame in cm->cur_frame, INTER_REFS_PER_FRAME for scaled references on the +// encoder in the cpi->scaled_ref_buf array. +#define FRAME_BUFFERS (REF_FRAMES + 1 + INTER_REFS_PER_FRAME) + +#define FWD_RF_OFFSET(ref) (ref - LAST_FRAME) +#define BWD_RF_OFFSET(ref) (ref - BWDREF_FRAME) + +TYPEDEF enum { + LAST_LAST2_FRAMES, // { LAST_FRAME, LAST2_FRAME } + LAST_LAST3_FRAMES, // { LAST_FRAME, LAST3_FRAME } + LAST_GOLDEN_FRAMES, // { LAST_FRAME, GOLDEN_FRAME } + BWDREF_ALTREF_FRAMES, // { BWDREF_FRAME, ALTREF_FRAME } + LAST2_LAST3_FRAMES, // { LAST2_FRAME, LAST3_FRAME } + LAST2_GOLDEN_FRAMES, // { LAST2_FRAME, GOLDEN_FRAME } + LAST3_GOLDEN_FRAMES, // { LAST3_FRAME, GOLDEN_FRAME } + BWDREF_ALTREF2_FRAMES, // { BWDREF_FRAME, ALTREF2_FRAME } + ALTREF2_ALTREF_FRAMES, // { ALTREF2_FRAME, ALTREF_FRAME } + TOTAL_UNIDIR_COMP_REFS, + // NOTE: UNIDIR_COMP_REFS is the number of uni-directional reference pairs + // that are explicitly signaled. + UNIDIR_COMP_REFS = BWDREF_ALTREF_FRAMES + 1, +} UENUM1BYTE(UNIDIR_COMP_REF); + +#define TOTAL_COMP_REFS (FWD_REFS * BWD_REFS + TOTAL_UNIDIR_COMP_REFS) + +#define COMP_REFS (FWD_REFS * BWD_REFS + UNIDIR_COMP_REFS) + +// NOTE: A limited number of unidirectional reference pairs can be signalled for +// compound prediction. The use of skip mode, on the other hand, makes it +// possible to have a reference pair not listed for explicit signaling. +#define MODE_CTX_REF_FRAMES (REF_FRAMES + TOTAL_COMP_REFS) + +// Note: It includes single and compound references. So, it can take values from +// NONE_FRAME to (MODE_CTX_REF_FRAMES - 1). Hence, it is not defined as an enum. +typedef int8_t MV_REFERENCE_FRAME; + +TYPEDEF enum { + RESTORE_NONE, + RESTORE_WIENER, + RESTORE_SGRPROJ, + RESTORE_SWITCHABLE, + RESTORE_SWITCHABLE_TYPES = RESTORE_SWITCHABLE, + RESTORE_TYPES = 4, +} UENUM1BYTE(RestorationType); + +// Picture prediction structures (0-12 are predefined) in scalability metadata. +TYPEDEF enum { + SCALABILITY_L1T2 = 0, + SCALABILITY_L1T3 = 1, + SCALABILITY_L2T1 = 2, + SCALABILITY_L2T2 = 3, + SCALABILITY_L2T3 = 4, + SCALABILITY_S2T1 = 5, + SCALABILITY_S2T2 = 6, + SCALABILITY_S2T3 = 7, + SCALABILITY_L2T1h = 8, + SCALABILITY_L2T2h = 9, + SCALABILITY_L2T3h = 10, + SCALABILITY_S2T1h = 11, + SCALABILITY_S2T2h = 12, + SCALABILITY_S2T3h = 13, + SCALABILITY_SS = 14 +} UENUM1BYTE(SCALABILITY_STRUCTURES); + +#define SUPERRES_SCALE_BITS 3 +#define SUPERRES_SCALE_DENOMINATOR_MIN (SCALE_NUMERATOR + 1) + +// In large_scale_tile coding, external references are used. +#define MAX_EXTERNAL_REFERENCES 128 +#define MAX_TILES 512 + + +#define CONFIG_MULTITHREAD 0 +#define CONFIG_ENTROPY_STATS 0 + +#define CONFIG_MAX_DECODE_PROFILE 2 + +/* +from: +seg_common.h +*/ +#ifdef ORI_CODE + +#define MAX_SEGMENTS 8 +#define SEG_TREE_PROBS (MAX_SEGMENTS - 1) + +#define SEG_TEMPORAL_PRED_CTXS 3 +#define SPATIAL_PREDICTION_PROBS 3 + +enum { + SEG_LVL_ALT_Q, // Use alternate Quantizer .... + SEG_LVL_ALT_LF_Y_V, // Use alternate loop filter value on y plane vertical + SEG_LVL_ALT_LF_Y_H, // Use alternate loop filter value on y plane horizontal + SEG_LVL_ALT_LF_U, // Use alternate loop filter value on u plane + SEG_LVL_ALT_LF_V, // Use alternate loop filter value on v plane + SEG_LVL_REF_FRAME, // Optional Segment reference frame + SEG_LVL_SKIP, // Optional Segment (0,0) + skip mode + SEG_LVL_GLOBALMV, + SEG_LVL_MAX +} UENUM1BYTE(SEG_LVL_FEATURES); + +struct segmentation { + uint8_t enabled; + uint8_t update_map; + uint8_t update_data; + uint8_t temporal_update; + + int16_t feature_data[MAX_SEGMENTS][SEG_LVL_MAX]; + unsigned int feature_mask[MAX_SEGMENTS]; + int last_active_segid; // The highest numbered segment id that has some + // enabled feature. + uint8_t segid_preskip; // Whether the segment id will be read before the + // skip syntax element. + // 1: the segment id will be read first. + // 0: the skip syntax element will be read first. +}; + +/* +from av1_loopfilter.h +*/ +#define MAX_LOOP_FILTER 63 + + +/* from +quant_common.h: +*/ +#define MAXQ 255 + +#endif + +/* +from: +aom/av1/common/common.h +*/ +#define av1_zero(dest) memset(&(dest), 0, sizeof(dest)) +#define av1_zero_array(dest, n) memset(dest, 0, n * sizeof(*(dest))) +/* +from: +aom/av1/common/alloccommon.h +*/ +#define INVALID_IDX -1 // Invalid buffer index. + +/* +from: +aom/av1/common/timing.h +*/ +typedef struct aom_timing { + uint32_t num_units_in_display_tick; + uint32_t time_scale; + int equal_picture_interval; + uint32_t num_ticks_per_picture; +} aom_timing_info_t; + +typedef struct aom_dec_model_info { + uint32_t num_units_in_decoding_tick; + int encoder_decoder_buffer_delay_length; + int buffer_removal_time_length; + int frame_presentation_time_length; +} aom_dec_model_info_t; + +typedef struct aom_dec_model_op_parameters { + int decoder_model_param_present_flag; + int64_t bitrate; + int64_t buffer_size; + uint32_t decoder_buffer_delay; + uint32_t encoder_buffer_delay; + int low_delay_mode_flag; + int display_model_param_present_flag; + int initial_display_delay; +} aom_dec_model_op_parameters_t; + +typedef struct aom_op_timing_info_t { + uint32_t buffer_removal_time; +} aom_op_timing_info_t; +/* +from: +aom/aom_codec.h +*/ +/*!\brief OBU types. */ +typedef enum { + OBU_SEQUENCE_HEADER = 1, + OBU_TEMPORAL_DELIMITER = 2, + OBU_FRAME_HEADER = 3, + OBU_TILE_GROUP = 4, + OBU_METADATA = 5, + OBU_FRAME = 6, + OBU_REDUNDANT_FRAME_HEADER = 7, + OBU_TILE_LIST = 8, + OBU_PADDING = 15, +} OBU_TYPE; + +typedef enum aom_bit_depth { + AOM_BITS_8 = 8, /**< 8 bits */ + AOM_BITS_10 = 10, /**< 10 bits */ + AOM_BITS_12 = 12, /**< 12 bits */ +} aom_bit_depth_t; + +/*!\brief Algorithm return codes */ +typedef enum { + /*!\brief Operation completed without error */ + AOM_CODEC_OK, + + /*!\brief Unspecified error */ + AOM_CODEC_ERROR, + + /*!\brief Memory operation failed */ + AOM_CODEC_MEM_ERROR, + + /*!\brief ABI version mismatch */ + AOM_CODEC_ABI_MISMATCH, + + /*!\brief Algorithm does not have required capability */ + AOM_CODEC_INCAPABLE, + + /*!\brief The given bitstream is not supported. + * + * The bitstream was unable to be parsed at the highest level. The decoder + * is unable to proceed. This error \ref SHOULD be treated as fatal to the + * stream. */ + AOM_CODEC_UNSUP_BITSTREAM, + + /*!\brief Encoded bitstream uses an unsupported feature + * + * The decoder does not implement a feature required by the encoder. This + * return code should only be used for features that prevent future + * pictures from being properly decoded. This error \ref MAY be treated as + * fatal to the stream or \ref MAY be treated as fatal to the current GOP. + */ + AOM_CODEC_UNSUP_FEATURE, + + /*!\brief The coded data for this stream is corrupt or incomplete + * + * There was a problem decoding the current frame. This return code + * should only be used for failures that prevent future pictures from + * being properly decoded. This error \ref MAY be treated as fatal to the + * stream or \ref MAY be treated as fatal to the current GOP. If decoding + * is continued for the current GOP, artifacts may be present. + */ + AOM_CODEC_CORRUPT_FRAME, + + /*!\brief An application-supplied parameter is not valid. + * + */ + AOM_CODEC_INVALID_PARAM, + + /*!\brief An iterator reached the end of list. + * + */ + AOM_CODEC_LIST_END + +} aom_codec_err_t; + +typedef struct cfg_options { + /*!\brief Reflects if ext_partition should be enabled + * + * If this value is non-zero it enabled the feature + */ + unsigned int ext_partition; +} cfg_options_t; + +/* +from: +aom/av1/common/obu_util.h +*/ +typedef struct { + size_t size; // Size (1 or 2 bytes) of the OBU header (including the + // optional OBU extension header) in the bitstream. + OBU_TYPE type; + int has_size_field; + int has_extension; + // The following fields come from the OBU extension header and therefore are + // only used if has_extension is true. + int temporal_layer_id; + int spatial_layer_id; +} ObuHeader; + + +/* +from: +aom/internal/aom_codec_internal.h +*/ + +struct aom_internal_error_info { + aom_codec_err_t error_code; + int has_detail; + char detail[80]; + int setjmp; // Boolean: whether 'jmp' is valid. +#ifdef ORI_CODE + jmp_buf jmp; +#endif +}; + +/* +from: +aom/aom_frame_buffer.h +*/ +typedef struct aom_codec_frame_buffer { + uint8_t *data; /**< Pointer to the data buffer */ + size_t size; /**< Size of data in bytes */ + void *priv; /**< Frame's private data */ +} aom_codec_frame_buffer_t; + +/* +from: +aom/aom_image.h +*/ +#define AOM_IMAGE_ABI_VERSION (5) /**<\hideinitializer*/ + +#define AOM_IMG_FMT_PLANAR 0x100 /**< Image is a planar format. */ +#define AOM_IMG_FMT_UV_FLIP 0x200 /**< V plane precedes U in memory. */ +/** 0x400 used to signal alpha channel, skipping for backwards compatibility. */ +#define AOM_IMG_FMT_HIGHBITDEPTH 0x800 /**< Image uses 16bit framebuffer. */ + +/*!\brief List of supported image formats */ +typedef enum aom_img_fmt { + AOM_IMG_FMT_NONE, + AOM_IMG_FMT_YV12 = + AOM_IMG_FMT_PLANAR | AOM_IMG_FMT_UV_FLIP | 1, /**< planar YVU */ + AOM_IMG_FMT_I420 = AOM_IMG_FMT_PLANAR | 2, + AOM_IMG_FMT_AOMYV12 = AOM_IMG_FMT_PLANAR | AOM_IMG_FMT_UV_FLIP | + 3, /** < planar 4:2:0 format with aom color space */ + AOM_IMG_FMT_AOMI420 = AOM_IMG_FMT_PLANAR | 4, + AOM_IMG_FMT_I422 = AOM_IMG_FMT_PLANAR | 5, + AOM_IMG_FMT_I444 = AOM_IMG_FMT_PLANAR | 6, + AOM_IMG_FMT_I42016 = AOM_IMG_FMT_I420 | AOM_IMG_FMT_HIGHBITDEPTH, + AOM_IMG_FMT_YV1216 = AOM_IMG_FMT_YV12 | AOM_IMG_FMT_HIGHBITDEPTH, + AOM_IMG_FMT_I42216 = AOM_IMG_FMT_I422 | AOM_IMG_FMT_HIGHBITDEPTH, + AOM_IMG_FMT_I44416 = AOM_IMG_FMT_I444 | AOM_IMG_FMT_HIGHBITDEPTH, +} aom_img_fmt_t; /**< alias for enum aom_img_fmt */ + +/*!\brief List of supported color primaries */ +typedef enum aom_color_primaries { + AOM_CICP_CP_RESERVED_0 = 0, /**< For future use */ + AOM_CICP_CP_BT_709 = 1, /**< BT.709 */ + AOM_CICP_CP_UNSPECIFIED = 2, /**< Unspecified */ + AOM_CICP_CP_RESERVED_3 = 3, /**< For future use */ + AOM_CICP_CP_BT_470_M = 4, /**< BT.470 System M (historical) */ + AOM_CICP_CP_BT_470_B_G = 5, /**< BT.470 System B, G (historical) */ + AOM_CICP_CP_BT_601 = 6, /**< BT.601 */ + AOM_CICP_CP_SMPTE_240 = 7, /**< SMPTE 240 */ + AOM_CICP_CP_GENERIC_FILM = + 8, /**< Generic film (color filters using illuminant C) */ + AOM_CICP_CP_BT_2020 = 9, /**< BT.2020, BT.2100 */ + AOM_CICP_CP_XYZ = 10, /**< SMPTE 428 (CIE 1921 XYZ) */ + AOM_CICP_CP_SMPTE_431 = 11, /**< SMPTE RP 431-2 */ + AOM_CICP_CP_SMPTE_432 = 12, /**< SMPTE EG 432-1 */ + AOM_CICP_CP_RESERVED_13 = 13, /**< For future use (values 13 - 21) */ + AOM_CICP_CP_EBU_3213 = 22, /**< EBU Tech. 3213-E */ + AOM_CICP_CP_RESERVED_23 = 23 /**< For future use (values 23 - 255) */ +} aom_color_primaries_t; /**< alias for enum aom_color_primaries */ + +/*!\brief List of supported transfer functions */ +typedef enum aom_transfer_characteristics { + AOM_CICP_TC_RESERVED_0 = 0, /**< For future use */ + AOM_CICP_TC_BT_709 = 1, /**< BT.709 */ + AOM_CICP_TC_UNSPECIFIED = 2, /**< Unspecified */ + AOM_CICP_TC_RESERVED_3 = 3, /**< For future use */ + AOM_CICP_TC_BT_470_M = 4, /**< BT.470 System M (historical) */ + AOM_CICP_TC_BT_470_B_G = 5, /**< BT.470 System B, G (historical) */ + AOM_CICP_TC_BT_601 = 6, /**< BT.601 */ + AOM_CICP_TC_SMPTE_240 = 7, /**< SMPTE 240 M */ + AOM_CICP_TC_LINEAR = 8, /**< Linear */ + AOM_CICP_TC_LOG_100 = 9, /**< Logarithmic (100 : 1 range) */ + AOM_CICP_TC_LOG_100_SQRT10 = + 10, /**< Logarithmic (100 * Sqrt(10) : 1 range) */ + AOM_CICP_TC_IEC_61966 = 11, /**< IEC 61966-2-4 */ + AOM_CICP_TC_BT_1361 = 12, /**< BT.1361 */ + AOM_CICP_TC_SRGB = 13, /**< sRGB or sYCC*/ + AOM_CICP_TC_BT_2020_10_BIT = 14, /**< BT.2020 10-bit systems */ + AOM_CICP_TC_BT_2020_12_BIT = 15, /**< BT.2020 12-bit systems */ + AOM_CICP_TC_SMPTE_2084 = 16, /**< SMPTE ST 2084, ITU BT.2100 PQ */ + AOM_CICP_TC_SMPTE_428 = 17, /**< SMPTE ST 428 */ + AOM_CICP_TC_HLG = 18, /**< BT.2100 HLG, ARIB STD-B67 */ + AOM_CICP_TC_RESERVED_19 = 19 /**< For future use (values 19-255) */ +} aom_transfer_characteristics_t; /**< alias for enum aom_transfer_function */ + +/*!\brief List of supported matrix coefficients */ +typedef enum aom_matrix_coefficients { + AOM_CICP_MC_IDENTITY = 0, /**< Identity matrix */ + AOM_CICP_MC_BT_709 = 1, /**< BT.709 */ + AOM_CICP_MC_UNSPECIFIED = 2, /**< Unspecified */ + AOM_CICP_MC_RESERVED_3 = 3, /**< For future use */ + AOM_CICP_MC_FCC = 4, /**< US FCC 73.628 */ + AOM_CICP_MC_BT_470_B_G = 5, /**< BT.470 System B, G (historical) */ + AOM_CICP_MC_BT_601 = 6, /**< BT.601 */ + AOM_CICP_MC_SMPTE_240 = 7, /**< SMPTE 240 M */ + AOM_CICP_MC_SMPTE_YCGCO = 8, /**< YCgCo */ + AOM_CICP_MC_BT_2020_NCL = + 9, /**< BT.2020 non-constant luminance, BT.2100 YCbCr */ + AOM_CICP_MC_BT_2020_CL = 10, /**< BT.2020 constant luminance */ + AOM_CICP_MC_SMPTE_2085 = 11, /**< SMPTE ST 2085 YDzDx */ + AOM_CICP_MC_CHROMAT_NCL = + 12, /**< Chromaticity-derived non-constant luminance */ + AOM_CICP_MC_CHROMAT_CL = 13, /**< Chromaticity-derived constant luminance */ + AOM_CICP_MC_ICTCP = 14, /**< BT.2100 ICtCp */ + AOM_CICP_MC_RESERVED_15 = 15 /**< For future use (values 15-255) */ +} aom_matrix_coefficients_t; + +/*!\brief List of supported color range */ +typedef enum aom_color_range { + AOM_CR_STUDIO_RANGE = 0, /**< Y [16..235], UV [16..240] */ + AOM_CR_FULL_RANGE = 1 /**< YUV/RGB [0..255] */ +} aom_color_range_t; /**< alias for enum aom_color_range */ + +/*!\brief List of chroma sample positions */ +typedef enum aom_chroma_sample_position { + AOM_CSP_UNKNOWN = 0, /**< Unknown */ + AOM_CSP_VERTICAL = 1, /**< Horizontally co-located with luma(0, 0)*/ + /**< sample, between two vertical samples */ + AOM_CSP_COLOCATED = 2, /**< Co-located with luma(0, 0) sample */ + AOM_CSP_RESERVED = 3 /**< Reserved value */ +} aom_chroma_sample_position_t; /**< alias for enum aom_transfer_function */ + +/* +from: +aom/aom_scale/yv12config.h +*/ +typedef struct PIC_BUFFER_CONFIG_s { + union { + struct { + int y_width; + int uv_width; + }; + int widths[2]; + }; + union { + struct { + int y_height; + int uv_height; + }; + int heights[2]; + }; + union { + struct { + int y_crop_width; + int uv_crop_width; + }; + int crop_widths[2]; + }; + union { + struct { + int y_crop_height; + int uv_crop_height; + }; + int crop_heights[2]; + }; + union { + struct { + int y_stride; + int uv_stride; + }; + int strides[2]; + }; + union { + struct { + uint8_t *y_buffer; + uint8_t *u_buffer; + uint8_t *v_buffer; + }; + uint8_t *buffers[3]; + }; + + // Indicate whether y_buffer, u_buffer, and v_buffer points to the internally + // allocated memory or external buffers. + int use_external_reference_buffers; + // This is needed to store y_buffer, u_buffer, and v_buffer when set reference + // uses an external refernece, and restore those buffer pointers after the + // external reference frame is no longer used. + uint8_t *store_buf_adr[3]; + + // If the frame is stored in a 16-bit buffer, this stores an 8-bit version + // for use in global motion detection. It is allocated on-demand. + uint8_t *y_buffer_8bit; + int buf_8bit_valid; + + uint8_t *buffer_alloc; + size_t buffer_alloc_sz; + int border; + size_t frame_size; + int subsampling_x; + int subsampling_y; + unsigned int bit_depth; + aom_color_primaries_t color_primaries; + aom_transfer_characteristics_t transfer_characteristics; + aom_matrix_coefficients_t matrix_coefficients; + uint8_t monochrome; + aom_chroma_sample_position_t chroma_sample_position; + aom_color_range_t color_range; + int render_width; + int render_height; + + int corrupted; + int flags; + +#ifdef AML + int32_t index; + int32_t decode_idx; + int32_t slice_type; + int32_t RefNum_L0; + int32_t RefNum_L1; + int32_t num_reorder_pic; + int32_t stream_offset; + uint8_t referenced; + uint8_t output_mark; + uint8_t recon_mark; + uint8_t output_ready; + uint8_t error_mark; + /**/ + int32_t slice_idx; + /*buffer*/ + uint32_t fgs_table_adr; + uint32_t sfgs_table_phy; + char *sfgs_table_ptr; +#ifdef AOM_AV1_MMU + uint32_t header_adr; +#endif +#ifdef AOM_AV1_MMU_DW + uint32_t header_dw_adr; +#endif + uint32_t mpred_mv_wr_start_addr; + uint32_t mc_y_adr; + uint32_t mc_u_v_adr; + int32_t mc_canvas_y; + int32_t mc_canvas_u_v; + + int32_t lcu_total; + /**/ + unsigned int order_hint; +#endif +#ifdef AML_DEVICE + int mv_buf_index; + unsigned long cma_alloc_addr; + int BUF_index; + int buf_size; + int comp_body_size; + unsigned int dw_y_adr; + unsigned int dw_u_v_adr; + int double_write_mode; + int y_canvas_index; + int uv_canvas_index; + int vf_ref; + struct canvas_config_s canvas_config[2]; + char *aux_data_buf; + int aux_data_size; + u32 pts; + u64 pts64; + /* picture qos infomation*/ + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; +#endif + u64 timestamp; + u32 hw_decode_time; + u32 frame_size2; // For frame base mode + int ctx_buf_idx; + int v4l_buf_index; + int repeat_count; + struct PIC_BUFFER_CONFIG_s *repeat_pic; +} PIC_BUFFER_CONFIG; + +/* +from: +common/blockd.h +*/ +TYPEDEF enum { + KEY_FRAME = 0, + INTER_FRAME = 1, + INTRA_ONLY_FRAME = 2, // replaces intra-only + S_FRAME = 3, + FRAME_TYPES, +} UENUM1BYTE(FRAME_TYPE); + +/*from: +mv.h +*/ +#ifdef ORI_CODE +typedef struct mv32 { + int32_t row; + int32_t col; +} MV32; +#endif +/*from: + aom_filter.h +*/ +#define SUBPEL_BITS 4 +#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1) +#define SUBPEL_SHIFTS (1 << SUBPEL_BITS) +#define SUBPEL_TAPS 8 + +#define SCALE_SUBPEL_BITS 10 +#define SCALE_SUBPEL_SHIFTS (1 << SCALE_SUBPEL_BITS) +#define SCALE_SUBPEL_MASK (SCALE_SUBPEL_SHIFTS - 1) +#define SCALE_EXTRA_BITS (SCALE_SUBPEL_BITS - SUBPEL_BITS) +#define SCALE_EXTRA_OFF ((1 << SCALE_EXTRA_BITS) / 2) + +#define RS_SUBPEL_BITS 6 +#define RS_SUBPEL_MASK ((1 << RS_SUBPEL_BITS) - 1) +#define RS_SCALE_SUBPEL_BITS 14 +#define RS_SCALE_SUBPEL_MASK ((1 << RS_SCALE_SUBPEL_BITS) - 1) +#define RS_SCALE_EXTRA_BITS (RS_SCALE_SUBPEL_BITS - RS_SUBPEL_BITS) +#define RS_SCALE_EXTRA_OFF (1 << (RS_SCALE_EXTRA_BITS - 1)) + +/*from: +scale.h +*/ +#define SCALE_NUMERATOR 8 + +#define REF_SCALE_SHIFT 14 +#define REF_NO_SCALE (1 << REF_SCALE_SHIFT) +#define REF_INVALID_SCALE -1 + +struct scale_factors { + int x_scale_fp; // horizontal fixed point scale factor + int y_scale_fp; // vertical fixed point scale factor + int x_step_q4; + int y_step_q4; + + int (*scale_value_x)(int val, const struct scale_factors *sf); + int (*scale_value_y)(int val, const struct scale_factors *sf); +#ifdef ORI_CODE + // convolve_fn_ptr[subpel_x != 0][subpel_y != 0][is_compound] + aom_convolve_fn_t convolve[2][2][2]; + aom_highbd_convolve_fn_t highbd_convolve[2][2][2]; +#endif +}; + +#ifdef ORI_CODE +MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf); +#endif +void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, + int other_h, int this_w, int this_h); + +static inline int av1_is_valid_scale(const struct scale_factors *sf) { +#ifdef ORI_CODE + assert(sf != NULL); +#endif + return sf->x_scale_fp != REF_INVALID_SCALE && + sf->y_scale_fp != REF_INVALID_SCALE; +} + +static inline int av1_is_scaled(const struct scale_factors *sf) { +#ifdef ORI_CODE + assert(sf != NULL); +#endif + return av1_is_valid_scale(sf) && + (sf->x_scale_fp != REF_NO_SCALE || sf->y_scale_fp != REF_NO_SCALE); +} + + +/* +from: +common/onyxc_int.h +*/ + +#define CDEF_MAX_STRENGTHS 16 + +/* Constant values while waiting for the sequence header */ +#define FRAME_ID_LENGTH 15 +#define DELTA_FRAME_ID_LENGTH 14 + +#define FRAME_CONTEXTS (FRAME_BUFFERS + 1) +// Extra frame context which is always kept at default values +#define FRAME_CONTEXT_DEFAULTS (FRAME_CONTEXTS - 1) +#define PRIMARY_REF_BITS 3 +#define PRIMARY_REF_NONE 7 + +#define NUM_PING_PONG_BUFFERS 2 + +#define MAX_NUM_TEMPORAL_LAYERS 8 +#define MAX_NUM_SPATIAL_LAYERS 4 +/* clang-format off */ +// clang-format seems to think this is a pointer dereference and not a +// multiplication. +#define MAX_NUM_OPERATING_POINTS \ + MAX_NUM_TEMPORAL_LAYERS * MAX_NUM_SPATIAL_LAYERS +/* clang-format on*/ + +// TODO(jingning): Turning this on to set up transform coefficient +// processing timer. +#define TXCOEFF_TIMER 0 +#define TXCOEFF_COST_TIMER 0 + +TYPEDEF enum { + SINGLE_REFERENCE = 0, + COMPOUND_REFERENCE = 1, + REFERENCE_MODE_SELECT = 2, + REFERENCE_MODES = 3, +} UENUM1BYTE(REFERENCE_MODE); + +TYPEDEF enum { + /** + * Frame context updates are disabled + */ + REFRESH_FRAME_CONTEXT_DISABLED, + /** + * Update frame context to values resulting from backward probability + * updates based on entropy/counts in the decoded frame + */ + REFRESH_FRAME_CONTEXT_BACKWARD, +} UENUM1BYTE(REFRESH_FRAME_CONTEXT_MODE); + +#define MFMV_STACK_SIZE 3 + +#ifdef AML +#define MV_REF_SIZE 8 +#endif + +#ifdef ORI_CODE +typedef struct { + int_mv mfmv0; + uint8_t ref_frame_offset; +} TPL_MV_REF; +typedef struct { + int_mv mv; + MV_REFERENCE_FRAME ref_frame; +} MV_REF; +#endif + +typedef struct RefCntBuffer_s { + // For a RefCntBuffer, the following are reference-holding variables: + // - cm->ref_frame_map[] + // - cm->cur_frame + // - cm->scaled_ref_buf[] (encoder only) + // - cm->next_ref_frame_map[] (decoder only) + // - pbi->output_frame_index[] (decoder only) + // With that definition, 'ref_count' is the number of reference-holding + // variables that are currently referencing this buffer. + // For example: + // - suppose this buffer is at index 'k' in the buffer pool, and + // - Total 'n' of the variables / array elements above have value 'k' (that + // is, they are pointing to buffer at index 'k'). + // Then, pool->frame_bufs[k].ref_count = n. + int ref_count; + + unsigned int order_hint; + unsigned int ref_order_hints[INTER_REFS_PER_FRAME]; + + int intra_only; + int segmentation_enabled; + unsigned int segment_feature[8]; +#ifdef AML + int segmentation_update_map; + int prev_segmentation_enabled; + int seg_mi_rows; + int seg_mi_cols; + + unsigned int seg_lf_info_y[8]; + unsigned int seg_lf_info_c[8]; + int8_t ref_deltas[REF_FRAMES]; + int8_t mode_deltas[MAX_MODE_LF_DELTAS]; +#endif + //MV_REF *mvs; + uint8_t *seg_map; +#ifdef ORI_CODE + struct segmentation seg; +#endif + + int mi_rows; + int mi_cols; + // Width and height give the size of the buffer (before any upscaling, unlike + // the sizes that can be derived from the buf structure) + int width; + int height; +#ifdef ORI_CODE + WarpedMotionParams global_motion[REF_FRAMES]; +#endif + int showable_frame; // frame can be used as show existing frame in future + uint8_t film_grain_params_present; +#ifdef ORI_CODE + aom_film_grain_t film_grain_params; +#endif +#ifdef AML + int dec_width; + uint8_t film_grain_reg_valid; + wait_queue_head_t wait_sfgs; + atomic_t fgs_done; + uint32_t film_grain_reg[FILM_GRAIN_REG_SIZE]; + uint32_t film_grain_ctrl; +#endif + aom_codec_frame_buffer_t raw_frame_buffer; + PIC_BUFFER_CONFIG buf; +#ifdef ORI_CODE + hash_table hash_table; +#endif + FRAME_TYPE frame_type; + + // This is only used in the encoder but needs to be indexed per ref frame + // so it's extremely convenient to keep it here. +#ifdef ORI_CODE + int interp_filter_selected[SWITCHABLE]; +#endif + // Inter frame reference frame delta for loop filter +#ifndef AML + int8_t ref_deltas[REF_FRAMES]; +#endif +#ifdef ORI_CODE + // 0 = ZERO_MV, MV + int8_t mode_deltas[MAX_MODE_LF_DELTAS]; + + FRAME_CONTEXT frame_context; +#endif + int show_frame; +} RefCntBuffer; + +typedef struct BufferPool_s { +// Protect BufferPool from being accessed by several FrameWorkers at +// the same time during frame parallel decode. +// TODO(hkuang): Try to use atomic variable instead of locking the whole pool. +// TODO(wtc): Remove this. See +// https://chromium-review.googlesource.com/c/webm/libvpx/+/560630. +#if CONFIG_MULTITHREAD + pthread_mutex_t pool_mutex; +#endif + + // Private data associated with the frame buffer callbacks. + void *cb_priv; +#ifdef ORI_CODE + aom_get_frame_buffer_cb_fn_t get_fb_cb; + aom_release_frame_buffer_cb_fn_t release_fb_cb; +#endif + RefCntBuffer frame_bufs[FRAME_BUFFERS]; + +#ifdef ORI_CODE + // Frame buffers allocated internally by the codec. + InternalFrameBufferList int_frame_buffers; +#endif +#ifdef AML_DEVICE + spinlock_t lock; +#endif +} BufferPool; + +typedef struct { + int cdef_pri_damping; + int cdef_sec_damping; + int nb_cdef_strengths; + int cdef_strengths[CDEF_MAX_STRENGTHS]; + int cdef_uv_strengths[CDEF_MAX_STRENGTHS]; + int cdef_bits; +} CdefInfo; + +typedef struct { + int delta_q_present_flag; + // Resolution of delta quant + int delta_q_res; + int delta_lf_present_flag; + // Resolution of delta lf level + int delta_lf_res; + // This is a flag for number of deltas of loop filter level + // 0: use 1 delta, for y_vertical, y_horizontal, u, and v + // 1: use separate deltas for each filter level + int delta_lf_multi; +} DeltaQInfo; + +typedef struct { + int enable_order_hint; // 0 - disable order hint, and related tools + int order_hint_bits_minus_1; // dist_wtd_comp, ref_frame_mvs, + // frame_sign_bias + // if 0, enable_dist_wtd_comp and + // enable_ref_frame_mvs must be set as 0. + int enable_dist_wtd_comp; // 0 - disable dist-wtd compound modes + // 1 - enable it + int enable_ref_frame_mvs; // 0 - disable ref frame mvs + // 1 - enable it +} OrderHintInfo; + +// Sequence header structure. +// Note: All syntax elements of sequence_header_obu that need to be +// bit-identical across multiple sequence headers must be part of this struct, +// so that consistency is checked by are_seq_headers_consistent() function. +typedef struct SequenceHeader { + int num_bits_width; + int num_bits_height; + int max_frame_width; + int max_frame_height; + uint8_t frame_id_numbers_present_flag; + int frame_id_length; + int delta_frame_id_length; + BLOCK_SIZE2 sb_size; // Size of the superblock used for this frame + int mib_size; // Size of the superblock in units of MI blocks + int mib_size_log2; // Log 2 of above. + + OrderHintInfo order_hint_info; + + uint8_t force_screen_content_tools; // 0 - force off + // 1 - force on + // 2 - adaptive + uint8_t still_picture; // Video is a single frame still picture + uint8_t reduced_still_picture_hdr; // Use reduced header for still picture + uint8_t force_integer_mv; // 0 - Don't force. MV can use subpel + // 1 - force to integer + // 2 - adaptive + uint8_t enable_filter_intra; // enables/disables filterintra + uint8_t enable_intra_edge_filter; // enables/disables edge upsampling + uint8_t enable_interintra_compound; // enables/disables interintra_compound + uint8_t enable_masked_compound; // enables/disables masked compound + uint8_t enable_dual_filter; // 0 - disable dual interpolation filter + // 1 - enable vert/horz filter selection + uint8_t enable_warped_motion; // 0 - disable warp for the sequence + // 1 - enable warp for the sequence + uint8_t enable_superres; // 0 - Disable superres for the sequence + // and no frame level superres flag + // 1 - Enable superres for the sequence + // enable per-frame superres flag + uint8_t enable_cdef; // To turn on/off CDEF + uint8_t enable_restoration; // To turn on/off loop restoration + BITSTREAM_PROFILE profile; + + // Operating point info. + int operating_points_cnt_minus_1; + int operating_point_idc[MAX_NUM_OPERATING_POINTS]; + uint8_t display_model_info_present_flag; + uint8_t decoder_model_info_present_flag; + AV1_LEVEL seq_level_idx[MAX_NUM_OPERATING_POINTS]; + uint8_t tier[MAX_NUM_OPERATING_POINTS]; // seq_tier in the spec. One bit: 0 + // or 1. + + // Color config. + aom_bit_depth_t bit_depth; // AOM_BITS_8 in profile 0 or 1, + // AOM_BITS_10 or AOM_BITS_12 in profile 2 or 3. + uint8_t use_highbitdepth; // If true, we need to use 16bit frame buffers. + uint8_t monochrome; // Monochorme video + aom_color_primaries_t color_primaries; + aom_transfer_characteristics_t transfer_characteristics; + aom_matrix_coefficients_t matrix_coefficients; + int color_range; + int subsampling_x; // Chroma subsampling for x + int subsampling_y; // Chroma subsampling for y + aom_chroma_sample_position_t chroma_sample_position; + uint8_t separate_uv_delta_q; + uint8_t film_gry_dequant_QTXain_params_present; +} SequenceHeader; + +typedef struct { + int skip_mode_allowed; + int skip_mode_flag; + int ref_frame_idx_0; + int ref_frame_idx_1; +} SkipModeInfo; + +typedef struct { + FRAME_TYPE frame_type; + REFERENCE_MODE reference_mode; + + unsigned int order_hint; + unsigned int frame_number; + SkipModeInfo skip_mode_info; + int refresh_frame_flags; // Which ref frames are overwritten by this frame + int frame_refs_short_signaling; +} CurrentFrame; + +typedef struct AV1_Common_s { + CurrentFrame current_frame; + struct aom_internal_error_info error; + int width; + int height; + int render_width; + int render_height; + int timing_info_present; + aom_timing_info_t timing_info; + int buffer_removal_time_present; + aom_dec_model_info_t buffer_model; + aom_dec_model_op_parameters_t op_params[MAX_NUM_OPERATING_POINTS + 1]; + aom_op_timing_info_t op_frame_timing[MAX_NUM_OPERATING_POINTS + 1]; + uint32_t frame_presentation_time; + + int context_update_tile_id; +#ifdef SUPPORT_SCALE_FACTOR + // Scale of the current frame with respect to itself. + struct scale_factors sf_identity; +#endif + RefCntBuffer *prev_frame; + + // TODO(hkuang): Combine this with cur_buf in macroblockd. + RefCntBuffer *cur_frame; + + // For encoder, we have a two-level mapping from reference frame type to the + // corresponding buffer in the buffer pool: + // * 'remapped_ref_idx[i - 1]' maps reference type 'i' (range: LAST_FRAME ... + // EXTREF_FRAME) to a remapped index 'j' (in range: 0 ... REF_FRAMES - 1) + // * Later, 'cm->ref_frame_map[j]' maps the remapped index 'j' to a pointer to + // the reference counted buffer structure RefCntBuffer, taken from the buffer + // pool cm->buffer_pool->frame_bufs. + // + // LAST_FRAME, ..., EXTREF_FRAME + // | | + // v v + // remapped_ref_idx[LAST_FRAME - 1], ..., remapped_ref_idx[EXTREF_FRAME - 1] + // | | + // v v + // ref_frame_map[], ..., ref_frame_map[] + // + // Note: INTRA_FRAME always refers to the current frame, so there's no need to + // have a remapped index for the same. + int remapped_ref_idx[REF_FRAMES]; + +#ifdef SUPPORT_SCALE_FACTOR + struct scale_factors ref_scale_factors[REF_FRAMES]; +#endif + // For decoder, ref_frame_map[i] maps reference type 'i' to a pointer to + // the buffer in the buffer pool 'cm->buffer_pool.frame_bufs'. + // For encoder, ref_frame_map[j] (where j = remapped_ref_idx[i]) maps + // remapped reference index 'j' (that is, original reference type 'i') to + // a pointer to the buffer in the buffer pool 'cm->buffer_pool.frame_bufs'. + RefCntBuffer *ref_frame_map[REF_FRAMES]; + + // Prepare ref_frame_map for the next frame. + // Only used in frame parallel decode. + RefCntBuffer *next_ref_frame_map[REF_FRAMES]; +#ifdef AML + RefCntBuffer *next_used_ref_frame_map[REF_FRAMES]; +#endif + FRAME_TYPE last_frame_type; /* last frame's frame type for motion search.*/ + + int show_frame; + int showable_frame; // frame can be used as show existing frame in future + int show_existing_frame; + + uint8_t disable_cdf_update; + int allow_high_precision_mv; + uint8_t cur_frame_force_integer_mv; // 0 the default in AOM, 1 only integer + + uint8_t allow_screen_content_tools; + int allow_intrabc; + int allow_warped_motion; + + // MBs, mb_rows/cols is in 16-pixel units; mi_rows/cols is in + // MB_MODE_INFO (8-pixel) units. + int MBs; + int mb_rows, mi_rows; + int mb_cols, mi_cols; + int mi_stride; + + /* profile settings */ + TX_MODE tx_mode; + +#if CONFIG_ENTROPY_STATS + int coef_cdf_category; +#endif + + int base_qindex; + int y_dc_delta_q; + int u_dc_delta_q; + int v_dc_delta_q; + int u_ac_delta_q; + int v_ac_delta_q; + +#ifdef ORI_CODE + // The dequantizers below are true dequantizers used only in the + // dequantization process. They have the same coefficient + // shift/scale as TX. + int16_t y_dequant_QTX[MAX_SEGMENTS][2]; + int16_t u_dequant_QTX[MAX_SEGMENTS][2]; + int16_t v_dequant_QTX[MAX_SEGMENTS][2]; + + // Global quant matrix tables + const qm_val_t *giqmatrix[NUM_QM_LEVELS][3][TX_SIZES_ALL]; + const qm_val_t *gqmatrix[NUM_QM_LEVELS][3][TX_SIZES_ALL]; + + // Local quant matrix tables for each frame + const qm_val_t *y_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL]; + const qm_val_t *u_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL]; + const qm_val_t *v_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL]; +#endif + // Encoder + int using_qmatrix; + int qm_y; + int qm_u; + int qm_v; + int min_qmlevel; + int max_qmlevel; + int use_quant_b_adapt; + + /* We allocate a MB_MODE_INFO struct for each macroblock, together with + an extra row on top and column on the left to simplify prediction. */ + int mi_alloc_size; + +#ifdef ORI_CODE + MB_MODE_INFO *mip; /* Base of allocated array */ + MB_MODE_INFO *mi; /* Corresponds to upper left visible macroblock */ + + // TODO(agrange): Move prev_mi into encoder structure. + // prev_mip and prev_mi will only be allocated in encoder. + MB_MODE_INFO *prev_mip; /* MB_MODE_INFO array 'mip' from last decoded frame */ + MB_MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */ + + // Separate mi functions between encoder and decoder. + int (*alloc_mi)(struct AV1Common *cm, int mi_size); + void (*free_mi)(struct AV1Common *cm); + void (*setup_mi)(struct AV1Common *cm); + + // Grid of pointers to 8x8 MB_MODE_INFO structs. Any 8x8 not in the visible + // area will be NULL. + MB_MODE_INFO **mi_grid_base; + MB_MODE_INFO **mi_grid_visible; + MB_MODE_INFO **prev_mi_grid_base; + MB_MODE_INFO **prev_mi_grid_visible; +#endif + // Whether to use previous frames' motion vectors for prediction. + int allow_ref_frame_mvs; + + uint8_t *last_frame_seg_map; + +#ifdef ORI_CODE + InterpFilter interp_filter; +#endif + int switchable_motion_mode; +#ifdef ORI_CODE + loop_filter_info_n lf_info; +#endif + // The denominator of the superres scale; the numerator is fixed. + uint8_t superres_scale_denominator; + int superres_upscaled_width; + int superres_upscaled_height; + +#ifdef ORI_CODE + RestorationInfo rst_info[MAX_MB_PLANE]; +#endif + // Pointer to a scratch buffer used by self-guided restoration + int32_t *rst_tmpbuf; +#ifdef ORI_CODE + RestorationLineBuffers *rlbs; +#endif + // Output of loop restoration + PIC_BUFFER_CONFIG rst_frame; + + // Flag signaling how frame contexts should be updated at the end of + // a frame decode + REFRESH_FRAME_CONTEXT_MODE refresh_frame_context; + + int ref_frame_sign_bias[REF_FRAMES]; /* Two state 0, 1 */ + +#ifdef ORI_CODE + struct loopfilter lf; + struct segmentation seg; +#endif + + int coded_lossless; // frame is fully lossless at the coded resolution. + int all_lossless; // frame is fully lossless at the upscaled resolution. + + int reduced_tx_set_used; + +#ifdef ORI_CODE + // Context probabilities for reference frame prediction + MV_REFERENCE_FRAME comp_fwd_ref[FWD_REFS]; + MV_REFERENCE_FRAME comp_bwd_ref[BWD_REFS]; + + FRAME_CONTEXT *fc; /* this frame entropy */ + FRAME_CONTEXT *default_frame_context; +#endif + int primary_ref_frame; + + int error_resilient_mode; + + int tile_cols, tile_rows; + + int max_tile_width_sb; + int min_log2_tile_cols; + int max_log2_tile_cols; + int max_log2_tile_rows; + int min_log2_tile_rows; + int min_log2_tiles; + int max_tile_height_sb; + int uniform_tile_spacing_flag; + int log2_tile_cols; // only valid for uniform tiles + int log2_tile_rows; // only valid for uniform tiles + int tile_col_start_sb[MAX_TILE_COLS + 1]; // valid for 0 <= i <= tile_cols + int tile_row_start_sb[MAX_TILE_ROWS + 1]; // valid for 0 <= i <= tile_rows + int tile_width, tile_height; // In MI units + int min_inner_tile_width; // min width of non-rightmost tile + + unsigned int large_scale_tile; + unsigned int single_tile_decoding; + + int byte_alignment; + int skip_loop_filter; + int skip_film_grain; + + // External BufferPool passed from outside. + BufferPool *buffer_pool; + +#ifdef ORI_CODE + PARTITION_CONTEXT **above_seg_context; + ENTROPY_CONTEXT **above_context[MAX_MB_PLANE]; + TXFM_CONTEXT **above_txfm_context; + WarpedMotionParams global_motion[REF_FRAMES]; + aom_film_grain_t film_grain_params; + + CdefInfo cdef_info; + DeltaQInfo delta_q_info; // Delta Q and Delta LF parameters +#endif + int num_tg; + SequenceHeader seq_params; + int current_frame_id; + int ref_frame_id[REF_FRAMES]; + int valid_for_referencing[REF_FRAMES]; +#ifdef ORI_CODE + TPL_MV_REF *tpl_mvs; +#endif + int tpl_mvs_mem_size; + // TODO(jingning): This can be combined with sign_bias later. + int8_t ref_frame_side[REF_FRAMES]; + + int is_annexb; + + int temporal_layer_id; + int spatial_layer_id; + unsigned int number_temporal_layers; + unsigned int number_spatial_layers; + int num_allocated_above_context_mi_col; + int num_allocated_above_contexts; + int num_allocated_above_context_planes; + +#if TXCOEFF_TIMER + int64_t cum_txcoeff_timer; + int64_t txcoeff_timer; + int txb_count; +#endif + +#if TXCOEFF_COST_TIMER + int64_t cum_txcoeff_cost_timer; + int64_t txcoeff_cost_timer; + int64_t txcoeff_cost_count; +#endif + const cfg_options_t *options; + int is_decoding; +#ifdef AML + int mv_ref_offset[MV_REF_SIZE][REF_FRAMES]; + int mv_ref_id[MV_REF_SIZE]; + unsigned char mv_cal_tpl_mvs[MV_REF_SIZE]; + int mv_ref_id_index; + int prev_fb_idx; + int new_fb_idx; + int32_t dec_width; +#endif +#ifdef AML_DEVICE + int cur_fb_idx_mmu; +#ifdef AOM_AV1_MMU_DW + int cur_fb_idx_mmu_dw; +#endif + int current_video_frame; + int use_prev_frame_mvs; + int frame_type; + int intra_only; + struct RefCntBuffer_s frame_refs[INTER_REFS_PER_FRAME]; + +#endif +} AV1_COMMON; + + +/* +from: + decoder/decoder.h +*/ + +typedef struct EXTERNAL_REFERENCES { + PIC_BUFFER_CONFIG refs[MAX_EXTERNAL_REFERENCES]; + int num; +} EXTERNAL_REFERENCES; + +typedef struct AV1Decoder { + //DECLARE_ALIGNED(32, MACROBLOCKD, mb); + + //DECLARE_ALIGNED(32, AV1_COMMON, common); + AV1_COMMON *common; + +#ifdef ORI_CODE + AVxWorker lf_worker; + AV1LfSync lf_row_sync; + AV1LrSync lr_row_sync; + AV1LrStruct lr_ctxt; + AVxWorker *tile_workers; + int num_workers; + DecWorkerData *thread_data; + ThreadData td; + TileDataDec *tile_data; + int allocated_tiles; + TileBufferDec tile_buffers[MAX_TILE_ROWS][MAX_TILE_COLS]; + AV1DecTileMT tile_mt_info; +#endif + + // Each time the decoder is called, we expect to receive a full temporal unit. + // This can contain up to one shown frame per spatial layer in the current + // operating point (note that some layers may be entirely omitted). + // If the 'output_all_layers' option is true, we save all of these shown + // frames so that they can be returned to the application. If the + // 'output_all_layers' option is false, then we only output one image per + // temporal unit. + // + // Note: The saved buffers are released at the start of the next time the + // application calls aom_codec_decode(). + int output_all_layers; + RefCntBuffer *output_frames[MAX_NUM_SPATIAL_LAYERS]; + size_t num_output_frames; // How many frames are queued up so far? + + // In order to properly support random-access decoding, we need + // to behave slightly differently for the very first frame we decode. + // So we track whether this is the first frame or not. + int decoding_first_frame; + + int allow_lowbitdepth; + int max_threads; + int inv_tile_order; + int need_resync; // wait for key/intra-only frame. + int hold_ref_buf; // Boolean: whether we are holding reference buffers in + // common.next_ref_frame_map. + int reset_decoder_state; + + int tile_size_bytes; + int tile_col_size_bytes; + int dec_tile_row, dec_tile_col; // always -1 for non-VR tile encoding +#if CONFIG_ACCOUNTING + int acct_enabled; + Accounting accounting; +#endif + int tg_size; // Number of tiles in the current tilegroup + int tg_start; // First tile in the current tilegroup + int tg_size_bit_offset; + int sequence_header_ready; + int sequence_header_changed; +#if CONFIG_INSPECTION + aom_inspect_cb inspect_cb; + void *inspect_ctx; +#endif + int operating_point; + int current_operating_point; + int seen_frame_header; + + // State if the camera frame header is already decoded while + // large_scale_tile = 1. + int camera_frame_header_ready; + size_t frame_header_size; +#ifdef ORI_CODE + DataBuffer obu_size_hdr; +#endif + int output_frame_width_in_tiles_minus_1; + int output_frame_height_in_tiles_minus_1; + int tile_count_minus_1; + uint32_t coded_tile_data_size; + unsigned int ext_tile_debug; // for ext-tile software debug & testing + unsigned int row_mt; + EXTERNAL_REFERENCES ext_refs; + PIC_BUFFER_CONFIG tile_list_outbuf; + +#ifdef ORI_CODE + CB_BUFFER *cb_buffer_base; +#endif + int cb_buffer_alloc_size; + + int allocated_row_mt_sync_rows; + +#if CONFIG_MULTITHREAD + pthread_mutex_t *row_mt_mutex_; + pthread_cond_t *row_mt_cond_; +#endif + +#ifdef ORI_CODE + AV1DecRowMTInfo frame_row_mt_info; +#endif + +#ifdef AML + unsigned char pred_inter_read_enable; + int cur_obu_type; + int decode_idx; + int bufmgr_proc_count; + int obu_frame_frame_head_come_after_tile; + uint32_t frame_width; + uint32_t frame_height; + BuffInfo_t* work_space_buf; + buff_t* mc_buf; + //unsigned short *rpm_ptr; + void *private_data; + u32 pre_stream_offset; +#endif +} AV1Decoder; + +#define RPM_BEGIN 0x200 +#define RPM_END 0x280 + +typedef union param_u { + struct { + unsigned short data[RPM_END - RPM_BEGIN]; + } l; + struct { + /*sequence head*/ + unsigned short profile; + unsigned short still_picture; + unsigned short reduced_still_picture_hdr; + unsigned short decoder_model_info_present_flag; + unsigned short max_frame_width; + unsigned short max_frame_height; + unsigned short frame_id_numbers_present_flag; + unsigned short delta_frame_id_length; + unsigned short frame_id_length; + unsigned short order_hint_bits_minus_1; + unsigned short enable_order_hint; + unsigned short enable_dist_wtd_comp; + unsigned short enable_ref_frame_mvs; + + /*frame head*/ + unsigned short show_existing_frame; + unsigned short frame_type; + unsigned short show_frame; + unsigned short error_resilient_mode; + unsigned short refresh_frame_flags; + unsigned short showable_frame; + unsigned short current_frame_id; + unsigned short frame_size_override_flag; + unsigned short order_hint; + unsigned short primary_ref_frame; + unsigned short frame_refs_short_signaling; + unsigned short frame_width; + unsigned short dec_frame_width; + unsigned short frame_width_scaled; + unsigned short frame_height; + unsigned short reference_mode; + unsigned short allow_ref_frame_mvs; + unsigned short superres_scale_denominator; + unsigned short lst_ref; + unsigned short gld_ref; + unsigned short existing_frame_idx; + + unsigned short remapped_ref_idx[INTER_REFS_PER_FRAME]; + unsigned short delta_frame_id_minus_1[INTER_REFS_PER_FRAME]; + unsigned short ref_order_hint[REF_FRAMES]; + /*other not in reference*/ + unsigned short bit_depth; + unsigned short seq_flags; + unsigned short update_parameters; + unsigned short film_grain_params_ref_idx; + + /*loop_filter & segmentation*/ + unsigned short loop_filter_sharpness_level; + unsigned short loop_filter_mode_ref_delta_enabled; + unsigned short loop_filter_ref_deltas_0; + unsigned short loop_filter_ref_deltas_1; + unsigned short loop_filter_ref_deltas_2; + unsigned short loop_filter_ref_deltas_3; + unsigned short loop_filter_ref_deltas_4; + unsigned short loop_filter_ref_deltas_5; + unsigned short loop_filter_ref_deltas_6; + unsigned short loop_filter_ref_deltas_7; + unsigned short loop_filter_mode_deltas_0; + unsigned short loop_filter_mode_deltas_1; + unsigned short loop_filter_level_0; + unsigned short loop_filter_level_1; + unsigned short loop_filter_level_u; + unsigned short loop_filter_level_v; + + unsigned short segmentation_enabled; + /* + SEG_LVL_ALT_LF_Y_V feature_enable: seg_lf_info_y[bit7] + SEG_LVL_ALT_LF_Y_V data: seg_lf_info_y[bit0~6] + SEG_LVL_ALT_LF_Y_H feature enable: seg_lf_info_y[bit15] + SEG_LVL_ALT_LF_Y_H data: seg_lf_info_y[bit8~14] + */ + unsigned short seg_lf_info_y[8]; + /* + SEG_LVL_ALT_LF_U feature_enable: seg_lf_info_y[bit7] + SEG_LVL_ALT_LF_U data: seg_lf_info_y[bit0~6] + SEG_LVL_ALT_LF_V feature enable: seg_lf_info_y[bit15] + SEG_LVL_ALT_LF_V data: seg_lf_info_y[bit8~14] + */ + unsigned short seg_lf_info_c[8]; + unsigned short video_signal_type; + unsigned short color_description; + + unsigned short mmu_used_num; + unsigned short dw_mmu_used_num; + unsigned short seq_flags_2; + unsigned short film_grain_present_flag; + + /*ucode end*/ + /*other*/ + unsigned short enable_superres; + + /*seqence not use*/ + unsigned short operating_points_cnt_minus_1; + unsigned short operating_point_idc[MAX_NUM_OPERATING_POINTS]; + unsigned short seq_level_idx[MAX_NUM_OPERATING_POINTS]; + unsigned short decoder_model_param_present_flag[MAX_NUM_OPERATING_POINTS]; + unsigned short timing_info_present; + /*frame head not use*/ + unsigned short display_frame_id; + unsigned short frame_presentation_time; + unsigned short buffer_removal_time_present; + unsigned short op_frame_timing[MAX_NUM_OPERATING_POINTS + 1]; + unsigned short valid_ref_frame_bits; + + } p; +}param_t; + +PIC_BUFFER_CONFIG *av1_get_ref_frame_spec_buf( + const AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame); + +int av1_bufmgr_process(AV1Decoder *pbi, union param_u *params, + unsigned char new_compressed_data, int obu_type); + +struct scale_factors *av1_get_ref_scale_factors( + AV1_COMMON *const cm, const MV_REFERENCE_FRAME ref_frame); + +void av1_set_next_ref_frame_map(AV1Decoder *pbi); + +unsigned int av1_get_next_used_ref_info( + const AV1_COMMON *const cm, int i); + +void av1_release_buf(AV1Decoder *pbi, RefCntBuffer *const buf); + +int av1_bufmgr_postproc(AV1Decoder *pbi, unsigned char frame_decoded); + +AV1Decoder *av1_decoder_create(BufferPool *const pool, AV1_COMMON *cm); + +unsigned char av1_frame_is_inter(const AV1_COMMON *const cm); + +RefCntBuffer *av1_get_primary_ref_frame_buf( + const AV1_COMMON *const cm); + +void av1_raw_write_image(AV1Decoder *pbi, PIC_BUFFER_CONFIG *sd); + +int get_free_frame_buffer(struct AV1_Common_s *cm); + +int check_buff_has_show(struct RefCntBuffer_s *frame_buf); + +void av1_bufmgr_ctx_reset(AV1Decoder *pbi, BufferPool *const pool, AV1_COMMON *cm); + +#if 1 +#define lock_buffer_pool(pool, flags) \ + spin_lock_irqsave(&pool->lock, flags) + +#define unlock_buffer_pool(pool, flags) \ + spin_unlock_irqrestore(&pool->lock, flags) +#else +#define lock_buffer_pool(pool, flags) flags=1; + +#define unlock_buffer_pool(pool, flags) flags=0; + +#endif + +#define AV1_DEBUG_BUFMGR 0x01 +#define AV1_DEBUG_BUFMGR_MORE 0x02 +#define AV1_DEBUG_BUFMGR_DETAIL 0x04 +#define AV1_DEBUG_TIMEOUT_INFO 0x08 +#define AV1_DEBUG_OUT_PTS 0x10 +#define AOM_DEBUG_HW_MORE 0x20 +#define AOM_DEBUG_VFRAME 0x40 +#define AOM_DEBUG_PRINT_LIST_INFO 0x80 +#define AOM_AV1_DEBUG_SEND_PARAM_WITH_REG 0x100 +#define AV1_DEBUG_IGNORE_VF_REF 0x200 +#define AV1_DEBUG_DBG_LF_PRINT 0x400 +#define AV1_DEBUG_REG 0x800 +#define AOM_DEBUG_BUFMGR_ONLY 0x1000 +#define AOM_DEBUG_AUX_DATA 0x2000 +#define AV1_DEBUG_QOS_INFO 0x4000 +#define AOM_DEBUG_DW_DISP_MAIN 0x8000 +#define AV1_DEBUG_DIS_LOC_ERROR_PROC 0x10000 +#define AOM_DEBUG_DIS_RECYCLE_MMU_TAIL 0x20000 +#define AV1_DEBUG_DUMP_PIC_LIST 0x40000 +#define AV1_DEBUG_TRIG_SLICE_SEGMENT_PROC 0x80000 +#define AOM_DEBUG_USE_FIXED_MV_BUF_SIZE 0x100000 +#define AV1_DEBUG_LOAD_UCODE_FROM_FILE 0x200000 +#define AV1_DEBUG_FORCE_SEND_AGAIN 0x400000 +#define AV1_DEBUG_DUMP_DATA 0x800000 +#define AV1_DEBUG_CACHE 0x1000000 +#define AV1_DEBUG_CACHE_HIT_RATE 0x2000000 +#define AV1_DEBUG_SEI_DETAIL 0x4000000 +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 +#if 1 +/*def MULTI_INSTANCE_SUPPORT*/ +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_V4L_DETAIL 0x10000000 +#define PRINT_FLAG_VDEC_STATUS 0x20000000 +#define PRINT_FLAG_VDEC_DETAIL 0x40000000 +#define PRINT_FLAG_VDEC_DATA 0x80000000 +#endif + +int av1_print2(int flag, const char *fmt, ...); + +unsigned char av1_is_debug(int flag); + +#endif +
diff --git a/drivers/frame_provider/decoder_v4l/vav1/vav1.c b/drivers/frame_provider/decoder_v4l/vav1/vav1.c new file mode 100755 index 0000000..132341b --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/vav1/vav1.c
@@ -0,0 +1,12061 @@ + /* + * drivers/amlogic/amports/vav1.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/sched/clock.h> +#include <linux/sched.h> +#include <linux/sched/rt.h> +#include <uapi/linux/sched/types.h> +#include <linux/signal.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/slab.h> +#include <linux/amlogic/tee.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../../decoder/utils/decoder_mmu_box.h" +#include "../../decoder/utils/decoder_bmmu_box.h" +#include <linux/crc32.h> + +#define MEM_NAME "codec_av1" +/* #include <mach/am_regs.h> */ +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../decoder/utils/vdec.h" +#include "../../decoder/utils/amvdec.h" +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +#include "../../decoder/utils/vdec_profile.h" +#endif + +#include <linux/amlogic/media/video_sink/video.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../../decoder/utils/config_parser.h" +#include "../../decoder/utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../../../amvdec_ports/vdec_drv_base.h" + +//#define DEBUG_UCODE_LOG +#define DEBUG_CMD +#define DEBUG_CRC_ERROR + +#define SUPPORT_V4L2 +//#define DEBUG_USE_VP9_DEVICE_NAME +//#define BUFMGR_ONLY_OLD_CHIP + +#ifdef SUPPORT_V4L2 +#include "../../decoder/utils/vdec_v4l2_buffer_ops.h" +#include <media/v4l2-mem2mem.h> +#endif +#include "../../../amvdec_ports/utils/common.h" +#include "../../decoder/utils/vdec_feature.h" +#include "../../decoder/utils/vdec_ge2d_utils.h" + +#define AML +#include "aom_av1_define.h" +#include "av1_global.h" + +#define DUMP_FILMGRAIN +#define MIX_STREAM_SUPPORT +//#define MV_USE_FIXED_BUF +//#define USE_SPEC_BUF_FOR_MMU_HEAD + +#define AOM_AV1_DBLK_INIT +#define AOM_AV1_UPSCALE_INIT + +#define USE_DEC_PIC_END + +#define SANITY_CHECK +#define CO_MV_COMPRESS + +#include "vav1.h" + +#define FGS_TABLE_SIZE (512 * 128 / 8) + +#define AV1_GMC_PARAM_BUFF_ADDR 0x316d +#define HEVCD_MPP_DECOMP_AXIURG_CTL 0x34c7 +#define HEVC_FGS_IDX 0x3660 +#define HEVC_FGS_DATA 0x3661 +#define HEVC_FGS_CTRL 0x3662 +#define AV1_SKIP_MODE_INFO 0x316c +#define AV1_QUANT_WR 0x3146 +#define AV1_SEG_W_ADDR 0x3165 +#define AV1_SEG_R_ADDR 0x3166 +#define AV1_REF_SEG_INFO 0x3171 +#define HEVC_ASSIST_PIC_SIZE_FB_READ 0x300d +#define PARSER_REF_SCALE_ENBL 0x316b +#define HEVC_MPRED_MV_RPTR_1 0x3263 +#define HEVC_MPRED_MV_RPTR_2 0x3264 +#define HEVC_SAO_CTRL9 0x362d +#define HEVC_FGS_TABLE_START 0x3666 +#define HEVC_FGS_TABLE_LENGTH 0x3667 +#define HEVC_DBLK_CDEF0 0x3515 +#define HEVC_DBLK_CDEF1 0x3516 +#define HEVC_DBLK_UPS1 0x351c +#define HEVC_DBLK_UPS2 0x351d +#define HEVC_DBLK_UPS3 0x351e +#define HEVC_DBLK_UPS4 0x351f +#define HEVC_DBLK_UPS5 0x3520 +#define AV1_UPSCALE_X0_QN 0x316e +#define AV1_UPSCALE_STEP_QN 0x316f +#define HEVC_DBLK_DBLK0 0x3523 +#define HEVC_DBLK_DBLK1 0x3524 +#define HEVC_DBLK_DBLK2 0x3525 + +#define HW_MASK_FRONT 0x1 +#define HW_MASK_BACK 0x2 + +#define AV1D_MPP_REFINFO_TBL_ACCCONFIG 0x3442 +#define AV1D_MPP_REFINFO_DATA 0x3443 +#define AV1D_MPP_REF_SCALE_ENBL 0x3441 +#define HEVC_MPRED_CTRL4 0x324c +#define HEVC_CM_HEADER_START_ADDR 0x3628 +#define HEVC_DBLK_CFGB 0x350b +#define HEVCD_MPP_ANC2AXI_TBL_DATA 0x3464 +#define HEVC_SAO_MMU_VH1_ADDR 0x363b +#define HEVC_SAO_MMU_VH0_ADDR 0x363a + +#define HEVC_MV_INFO 0x310d +#define HEVC_QP_INFO 0x3137 +#define HEVC_SKIP_INFO 0x3136 + +#define HEVC_CM_BODY_LENGTH2 0x3663 +#define HEVC_CM_HEADER_OFFSET2 0x3664 +#define HEVC_CM_HEADER_LENGTH2 0x3665 + +#define HEVC_CM_HEADER_START_ADDR2 0x364a +#define HEVC_SAO_MMU_DMA_CTRL2 0x364c +#define HEVC_SAO_MMU_VH0_ADDR2 0x364d +#define HEVC_SAO_MMU_VH1_ADDR2 0x364e +#define HEVC_SAO_MMU_STATUS2 0x3650 +#define HEVC_DW_VH0_ADDDR 0x365e +#define HEVC_DW_VH1_ADDDR 0x365f + +#ifdef BUFMGR_ONLY_OLD_CHIP +#undef AV1_SKIP_MODE_INFO +#define AV1_SKIP_MODE_INFO HEVC_ASSIST_SCRATCH_B +#endif + + +#define AOM_AV1_DEC_IDLE 0 +#define AOM_AV1_DEC_FRAME_HEADER 1 +#define AOM_AV1_DEC_TILE_END 2 +#define AOM_AV1_DEC_TG_END 3 +#define AOM_AV1_DEC_LCU_END 4 +#define AOM_AV1_DECODE_SLICE 5 +#define AOM_AV1_SEARCH_HEAD 6 +#define AOM_AV1_DUMP_LMEM 7 +#define AOM_AV1_FGS_PARAM_CONT 8 +#define AOM_AV1_DISCARD_NAL 0x10 +#define AOM_AV1_RESULT_NEED_MORE_BUFFER 0x11 +#define DEC_RESULT_UNFINISH 0x12 + +/*status*/ +#define AOM_AV1_DEC_PIC_END 0xe0 +/*AOM_AV1_FGS_PARA: +Bit[11] - 0 Read, 1 - Write +Bit[10:8] - film_grain_params_ref_idx, For Write request +*/ +#define AOM_AV1_FGS_PARAM 0xe1 +#define AOM_AV1_DEC_PIC_END_PRE 0xe2 +#define AOM_AV1_HEAD_PARSER_DONE 0xf0 +#define AOM_AV1_HEAD_SEARCH_DONE 0xf1 +#define AOM_AV1_SEQ_HEAD_PARSER_DONE 0xf2 +#define AOM_AV1_FRAME_HEAD_PARSER_DONE 0xf3 +#define AOM_AV1_FRAME_PARSER_DONE 0xf4 +#define AOM_AV1_REDUNDANT_FRAME_HEAD_PARSER_DONE 0xf5 +#define HEVC_ACTION_DONE 0xff + +#define AOM_DECODE_BUFEMPTY 0x20 +#define AOM_DECODE_TIMEOUT 0x21 +#define AOM_SEARCH_BUFEMPTY 0x22 +#define AOM_DECODE_OVER_SIZE 0x23 +#define AOM_EOS 0x24 +#define AOM_NAL_DECODE_DONE 0x25 + +#define VF_POOL_SIZE 32 + +#undef pr_info +#define pr_info printk + +#define DECODE_MODE_SINGLE ((0x80 << 24) | 0) +#define DECODE_MODE_MULTI_STREAMBASE ((0x80 << 24) | 1) +#define DECODE_MODE_MULTI_FRAMEBASE ((0x80 << 24) | 2) +#define DECODE_MODE_SINGLE_LOW_LATENCY ((0x80 << 24) | 3) +#define DECODE_MODE_MULTI_FRAMEBASE_NOHEAD ((0x80 << 24) | 4) + +#define AV1_TRIGGER_FRAME_DONE 0x100 +#define AV1_TRIGGER_FRAME_ENABLE 0x200 + +#define MV_MEM_UNIT 0x240 +/*--------------------------------------------------- + * Include "parser_cmd.h" + *--------------------------------------------------- + */ +#define PARSER_CMD_SKIP_CFG_0 0x0000090b + +#define PARSER_CMD_SKIP_CFG_1 0x1b14140f + +#define PARSER_CMD_SKIP_CFG_2 0x001b1910 + +#define PARSER_CMD_NUMBER 37 + +/*#define HEVC_PIC_STRUCT_SUPPORT*/ +/* to remove, fix build error */ + +/*#define CODEC_MM_FLAGS_FOR_VDECODER 0*/ + +#define MULTI_INSTANCE_SUPPORT +#define SUPPORT_10BIT +/* #define ERROR_HANDLE_DEBUG */ + +#ifndef STAT_KTHREAD +#define STAT_KTHREAD 0x40 +#endif + +#ifdef MULTI_INSTANCE_SUPPORT +#define MAX_DECODE_INSTANCE_NUM 9 + +#ifdef DEBUG_USE_VP9_DEVICE_NAME +#define MULTI_DRIVER_NAME "ammvdec_vp9_v4l" +#else +#define MULTI_DRIVER_NAME "ammvdec_av1_v4l" +#endif + +#define AUX_BUF_ALIGN(adr) ((adr + 0xf) & (~0xf)) +#ifdef DEBUG_UCODE_LOG +static u32 prefix_aux_buf_size; +static u32 suffix_aux_buf_size; +#else +static u32 prefix_aux_buf_size = (16 * 1024); +static u32 suffix_aux_buf_size; +#endif +#if (defined DEBUG_UCODE_LOG) || (defined DEBUG_CMD) +//#define UCODE_LOG_BUF_SIZE (16 * 1024) +#define UCODE_LOG_BUF_SIZE (1024 * 1024) +#endif +static unsigned int max_decode_instance_num + = MAX_DECODE_INSTANCE_NUM; +static unsigned int decode_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int display_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int max_process_time[MAX_DECODE_INSTANCE_NUM]; +static unsigned int run_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int input_empty[MAX_DECODE_INSTANCE_NUM]; +static unsigned int not_run_ready[MAX_DECODE_INSTANCE_NUM]; +#ifdef AOM_AV1_MMU_DW +static unsigned int dw_mmu_enable[MAX_DECODE_INSTANCE_NUM]; +#endif + +static u32 decode_timeout_val = 600; +static u32 enable_single_slice = 1; +static int start_decode_buf_level = 0x8000; +//static u32 work_buf_size; +static u32 force_pts_unstable; +static u32 mv_buf_margin = REF_FRAMES; +static u32 mv_buf_dynamic_alloc; +static u32 force_max_one_mv_buffer_size; + +/* DOUBLE_WRITE_MODE is enabled only when NV21 8 bit output is needed */ +/* double_write_mode: + * 0, no double write; + * 1, 1:1 ratio; + * 2, (1/4):(1/4) ratio; + * 3, (1/4):(1/4) ratio, with both compressed frame included + * 4, (1/2):(1/2) ratio; + * 5, (1/2):(1/2) ratio, with both compressed frame included + * 8, (1/8):(1/8) ratio; + * 0x10, double write only + * 0x20, mmu double write + * 0x100, if > 1080p,use mode 4,else use mode 1; + * 0x200, if > 1080p,use mode 2,else use mode 1; + * 0x300, if > 720p, use mode 4, else use mode 1; + */ +static u32 double_write_mode; + +#ifdef DEBUG_USE_VP9_DEVICE_NAME +#define DRIVER_NAME "amvdec_vp9_v4l" +#define MODULE_NAME "amvdec_vp9_v4l" +#define DRIVER_HEADER_NAME "amvdec_vp9_header" +#else +#define DRIVER_NAME "amvdec_av1_v4l" +#define DRIVER_HEADER_NAME "amvdec_av1_header" +#endif + +#define PUT_INTERVAL (HZ/100) +#define ERROR_SYSTEM_RESET_COUNT 200 + +#define PTS_NORMAL 0 +#define PTS_NONE_REF_USE_DURATION 1 + +#define PTS_MODE_SWITCHING_THRESHOLD 3 +#define PTS_MODE_SWITCHING_RECOVERY_THREASHOLD 3 + +#define DUR2PTS(x) ((x)*90/96) +#define PTS2DUR(x) ((x)*96/90) +#define PTS2DUR_u64(x) (div_u64((x)*96, 90)) + +struct AV1HW_s; +static int vav1_vf_states(struct vframe_states *states, void *); +static struct vframe_s *vav1_vf_peek(void *); +static struct vframe_s *vav1_vf_get(void *); +static void vav1_vf_put(struct vframe_s *, void *); +static int vav1_event_cb(int type, void *data, void *private_data); +#ifdef MULTI_INSTANCE_SUPPORT +static s32 vav1_init(struct vdec_s *vdec); +#else +static s32 vav1_init(struct AV1HW_s *hw); +#endif +static void vav1_prot_init(struct AV1HW_s *hw, u32 mask); +static int vav1_local_init(struct AV1HW_s *hw, bool reset_flag); +static void vav1_put_timer_func(struct timer_list *timer); +static void dump_data(struct AV1HW_s *hw, int size); +static unsigned int get_data_check_sum + (struct AV1HW_s *hw, int size); +static void dump_pic_list(struct AV1HW_s *hw); +static int vav1_mmu_map_alloc(struct AV1HW_s *hw); +static void vav1_mmu_map_free(struct AV1HW_s *hw); +static int av1_alloc_mmu( + struct AV1HW_s *hw, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr); + +#ifdef DEBUG_USE_VP9_DEVICE_NAME +static const char vav1_dec_id[] = "vvp9-dev"; + +#define PROVIDER_NAME "decoder.vp9" +#define MULTI_INSTANCE_PROVIDER_NAME "vdec.vp9" +#else +static const char vav1_dec_id[] = "vav1-dev"; + +#define PROVIDER_NAME "decoder.av1" +#define MULTI_INSTANCE_PROVIDER_NAME "vdec.av1" +#endif +#define DV_PROVIDER_NAME "dvbldec" + +static const struct vframe_operations_s vav1_vf_provider = { + .peek = vav1_vf_peek, + .get = vav1_vf_get, + .put = vav1_vf_put, + .event_cb = vav1_event_cb, + .vf_states = vav1_vf_states, +}; + +static struct vframe_provider_s vav1_vf_prov; + +static u32 bit_depth_luma; +static u32 bit_depth_chroma; +static u32 frame_width; +static u32 frame_height; +static u32 video_signal_type; +static u32 on_no_keyframe_skiped; +static u32 without_display_mode; +static u32 v4l_bitstream_id_enable = 1; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +static u32 force_dv_enable; +#endif + +#define PROB_SIZE (496 * 2 * 4) +#define PROB_BUF_SIZE (0x5000) +#define COUNT_BUF_SIZE (0x300 * 4 * 4) +/*compute_losless_comp_body_size(4096, 2304, 1) = 18874368(0x1200000)*/ +#define MAX_FRAME_4K_NUM 0x1200 +#define MAX_FRAME_8K_NUM 0x4800 + +#define HEVC_ASSIST_MMU_MAP_ADDR 0x3009 + + +/*USE_BUF_BLOCK*/ +struct BUF_s { + int index; + unsigned int alloc_flag; + /*buffer */ + unsigned int cma_page_count; + unsigned long alloc_addr; + unsigned long start_adr; + unsigned int size; + + unsigned int free_start_adr; + ulong v4l_ref_buf_addr; + ulong header_addr; + u32 header_size; + u32 luma_size; + ulong chroma_addr; + u32 chroma_size; + ulong header_dw_addr; +} /*BUF_t */; + +struct MVBUF_s { + unsigned long start_adr; + unsigned int size; + int used_flag; +} /*MVBUF_t */; + +/*#define TEST_WR_PTR_INC*/ +/*#define WR_PTR_INC_NUM 128*/ +#define WR_PTR_INC_NUM 1 + +//#define SIMULATION +#define DOS_PROJECT +#undef MEMORY_MAP_IN_REAL_CHIP + +/*#undef DOS_PROJECT*/ +/*#define MEMORY_MAP_IN_REAL_CHIP*/ + +/*#define CONFIG_HEVC_CLK_FORCED_ON*/ +/*#define ENABLE_SWAP_TEST*/ +#ifndef BUFMGR_ONLY_OLD_CHIP +#define MCRCC_ENABLE +#endif + +#ifdef AV1_10B_NV21 +#else +#define LOSLESS_COMPRESS_MODE +#endif + +static u32 get_picture_qos; + +static u32 disable_repeat; + +static u32 debug; +static u32 disable_fg; + +static bool is_reset; +/*for debug*/ +static u32 force_bufspec; +/* + udebug_flag: + bit 0, enable ucode print + bit 1, enable ucode detail print + bit [31:16] not 0, pos to dump lmem + bit 2, pop bits to lmem + bit [11:8], pre-pop bits for alignment (when bit 2 is 1) +*/ +static u32 udebug_flag; +/* + when udebug_flag[1:0] is not 0 + udebug_pause_pos not 0, + pause position +*/ +static u32 udebug_pause_pos; +/* + when udebug_flag[1:0] is not 0 + and udebug_pause_pos is not 0, + pause only when DEBUG_REG2 is equal to this val +*/ +static u32 udebug_pause_val; + +static u32 udebug_pause_decode_idx; + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +static u32 dv_toggle_prov_name; +#endif + +static u32 run_ready_min_buf_num = 2; +#ifdef DEBUG_CRC_ERROR +/* + bit[4] fill zero in header before starting + bit[5] dump mmu header always + + bit[6] dump mv buffer always + + bit[8] delay after decoding + bit[31~16] delayed mseconds +*/ +static u32 crc_debug_flag; +#endif +#ifdef DEBUG_CMD +static u32 header_dump_size = 0x10000; +static u32 debug_cmd_wait_count; +static u32 debug_cmd_wait_type; +#endif +#define DEBUG_REG +#ifdef DEBUG_REG +void AV1_WRITE_VREG_DBG2(unsigned int adr, unsigned int val, int line) +{ + if (debug & AV1_DEBUG_REG) + pr_info("%d:%s(%x, %x)\n", line, __func__, adr, val); + if (adr != 0) + WRITE_VREG(adr, val); +} + +#undef WRITE_VREG +#define WRITE_VREG(a,v) AV1_WRITE_VREG_DBG2(a,v,__LINE__) +#endif + +#define FRAME_CNT_WINDOW_SIZE 59 +#define RATE_CORRECTION_THRESHOLD 5 +/************************************************** + +AV1 buffer management start + +***************************************************/ +#define MMU_COMPRESS_HEADER_SIZE_1080P 0x10000 +#define MMU_COMPRESS_HEADER_SIZE_4K 0x48000 +#define MMU_COMPRESS_HEADER_SIZE_8K 0x120000 + +//#define MMU_COMPRESS_HEADER_SIZE 0x48000 +//#define MAX_ONE_MV_BUFFER_SIZE 0x260000 +//#define MAX_ONE_MV_BUFFER_SIZE 0x130000 + +#define MAX_ONE_MV_BUFFER_SIZE_1080P 0x20400 +#define MAX_ONE_MV_BUFFER_SIZE_4K 0x91400 +#define MAX_ONE_MV_BUFFER_SIZE_8K 0x244800 +/*to support tm2revb and sc2*/ +#define MAX_ONE_MV_BUFFER_SIZE_1080P_TM2REVB 0x26400 +#define MAX_ONE_MV_BUFFER_SIZE_4K_TM2REVB 0xac400 +#define MAX_ONE_MV_BUFFER_SIZE_8K_TM2REVB 0x2b0800 + +static int vav1_mmu_compress_header_size(struct AV1HW_s *hw); + +//#define MMU_COMPRESS_HEADER_SIZE 0x48000 +//#define MMU_COMPRESS_HEADER_SIZE_DW MMU_COMPRESS_HEADER_SIZE +//#define MMU_COMPRESS_8K_HEADER_SIZE (0x48000*4) +#define MAX_SIZE_8K (8192 * 4608) +#define MAX_SIZE_4K (4096 * 2304) +#define MAX_SIZE_2K (1920 * 1088) +#define IS_8K_SIZE(w, h) (((w) * (h)) > MAX_SIZE_4K) +#define IS_4K_SIZE(w, h) (((w) * (h)) > (1920*1088)) + +#define INVALID_IDX -1 /* Invalid buffer index.*/ + + +/*4 scratch frames for the new frames to support a maximum of 4 cores decoding + *in parallel, 3 for scaled references on the encoder. + *TODO(hkuang): Add ondemand frame buffers instead of hardcoding the number + * // of framebuffers. + *TODO(jkoleszar): These 3 extra references could probably come from the + *normal reference pool. + */ +//#define FRAME_BUFFERS (REF_FRAMES + 16) +//#define REF_FRAMES_4K (6) +#define REF_FRAMES_4K REF_FRAMES + +#ifdef USE_SPEC_BUF_FOR_MMU_HEAD +#define HEADER_FRAME_BUFFERS (0) +#elif (defined AOM_AV1_MMU_DW) +#define HEADER_FRAME_BUFFERS (2 * FRAME_BUFFERS) +#else +#define HEADER_FRAME_BUFFERS (FRAME_BUFFERS) +#endif +#define MAX_BUF_NUM (FRAME_BUFFERS) +#define MV_BUFFER_NUM FRAME_BUFFERS + +//#define FRAME_CONTEXTS_LOG2 2 +//#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2) +/*buffer + header buffer + workspace*/ +#ifdef MV_USE_FIXED_BUF +#define MAX_BMMU_BUFFER_NUM (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + 1) +#define VF_BUFFER_IDX(n) (n) +#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n) +#define WORK_SPACE_BUF_ID (FRAME_BUFFERS + HEADER_FRAME_BUFFERS) +#else +#define MAX_BMMU_BUFFER_NUM \ + (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + MV_BUFFER_NUM + 1) +#define VF_BUFFER_IDX(n) (n) +#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n) +#define MV_BUFFER_IDX(n) (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + n) +#define WORK_SPACE_BUF_ID \ + (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + MV_BUFFER_NUM) +#endif +#ifdef AOM_AV1_MMU_DW +#define DW_HEADER_BUFFER_IDX(n) (HEADER_BUFFER_IDX(HEADER_FRAME_BUFFERS/2) + n) +#endif + + +static void set_canvas(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic_config); + +static void fill_frame_info(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *frame, + unsigned int framesize, + unsigned int pts); + + +static int compute_losless_comp_body_size(int width, int height, + uint8_t is_bit_depth_10); + +void clear_frame_buf_ref_count(AV1Decoder *pbi); + +#ifdef MULTI_INSTANCE_SUPPORT +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_CONFIG_PARAM 3 +#define DEC_RESULT_ERROR 4 +#define DEC_INIT_PICLIST 5 +#define DEC_UNINIT_PICLIST 6 +#define DEC_RESULT_GET_DATA 7 +#define DEC_RESULT_GET_DATA_RETRY 8 +#define DEC_RESULT_EOS 9 +#define DEC_RESULT_FORCE_EXIT 10 +#define DEC_RESULT_DISCARD_DATA 11 + +#define DEC_S1_RESULT_NONE 0 +#define DEC_S1_RESULT_DONE 1 +#define DEC_S1_RESULT_FORCE_EXIT 2 +#define DEC_S1_RESULT_TEST_TRIGGER_DONE 0xf0 + +#ifdef FB_DECODING_TEST_SCHEDULE +#define TEST_SET_NONE 0 +#define TEST_SET_PIC_DONE 1 +#define TEST_SET_S2_DONE 2 +#endif + +static void av1_work(struct work_struct *work); +#endif + +#ifdef DUMP_FILMGRAIN +u32 fg_dump_index = 0xff; +#endif + +#ifdef AOM_AV1_DBLK_INIT +struct loop_filter_info_n_s; +struct loopfilter; +struct segmentation_lf; +#endif + +struct vav1_assit_task { + bool use_sfgs; + bool running; + struct mutex assit_mutex; + struct semaphore sem; + struct task_struct *task; + void *private; +}; + +struct AV1HW_s { + AV1Decoder *pbi; + union param_u aom_param; + unsigned char frame_decoded; + unsigned char one_compressed_data_done; + unsigned char new_compressed_data; +#if 1 +/*def CHECK_OBU_REDUNDANT_FRAME_HEADER*/ + int obu_frame_frame_head_come_after_tile; +#endif + unsigned char index; + + struct device *cma_dev; + struct platform_device *platform_dev; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + struct vframe_chunk_s *chunk; + int dec_result; + struct work_struct work; + struct work_struct set_clk_work; + u32 start_shift_bytes; + u32 data_size; + + struct BuffInfo_s work_space_buf_store; + unsigned long buf_start; + u32 buf_size; + u32 cma_alloc_count; + unsigned long cma_alloc_addr; + uint8_t eos; + unsigned long int start_process_time; + unsigned last_lcu_idx; + int decode_timeout_count; + unsigned timeout_num; + int save_buffer_mode; + + int double_write_mode; + + long used_4k_num; + + unsigned char m_ins_flag; + char *provider_name; + union param_u param; + int frame_count; + int pic_count; + u32 stat; + struct timer_list timer; + u32 frame_dur; + u32 frame_ar; + int fatal_error; + uint8_t init_flag; + uint8_t config_next_ref_info_flag; + uint8_t first_sc_checked; + uint8_t process_busy; +#define PROC_STATE_INIT 0 +#define PROC_STATE_DECODESLICE 1 +#define PROC_STATE_SENDAGAIN 2 + uint8_t process_state; + u32 ucode_pause_pos; + + int show_frame_num; + struct buff_s mc_buf_spec; + struct dec_sysinfo vav1_amstream_dec_info; + void *rpm_addr; + void *lmem_addr; + dma_addr_t rpm_phy_addr; + dma_addr_t lmem_phy_addr; + unsigned short *lmem_ptr; + unsigned short *debug_ptr; +#ifdef DUMP_FILMGRAIN + dma_addr_t fg_phy_addr; + unsigned char *fg_ptr; + void *fg_addr; +#endif + u32 fgs_valid; + + u8 aux_data_dirty; + u32 prefix_aux_size; + u32 suffix_aux_size; + void *aux_addr; + dma_addr_t aux_phy_addr; + char *dv_data_buf; + int dv_data_size; +#if (defined DEBUG_UCODE_LOG) || (defined DEBUG_CMD) + void *ucode_log_addr; + dma_addr_t ucode_log_phy_addr; +#endif + + void *prob_buffer_addr; + void *count_buffer_addr; + dma_addr_t prob_buffer_phy_addr; + dma_addr_t count_buffer_phy_addr; + + void *frame_mmu_map_addr; + dma_addr_t frame_mmu_map_phy_addr; +#ifdef AOM_AV1_MMU_DW + void *dw_frame_mmu_map_addr; + dma_addr_t dw_frame_mmu_map_phy_addr; +#endif + unsigned int use_cma_flag; + + struct BUF_s m_BUF[MAX_BUF_NUM]; + struct MVBUF_s m_mv_BUF[MV_BUFFER_NUM]; + u32 used_buf_num; + u32 mv_buf_margin; + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(pending_q, struct vframe_s *, VF_POOL_SIZE); + struct vframe_s vfpool[VF_POOL_SIZE]; + atomic_t vf_pre_count; + atomic_t vf_get_count; + atomic_t vf_put_count; + int buf_num; + int pic_num; + int lcu_size_log2; + unsigned int losless_comp_body_size; + + u32 video_signal_type; + + u32 pts_unstable; + bool av1_first_pts_ready; + bool dur_recalc_flag; + u8 first_pts_index; + u32 frame_mode_pts_save[FRAME_BUFFERS]; + u64 frame_mode_pts64_save[FRAME_BUFFERS]; + + int last_pts; + u64 last_pts_us64; + u64 shift_byte_count; + + u32 shift_byte_count_lo; + u32 shift_byte_count_hi; + int pts_mode_switching_count; + int pts_mode_recovery_count; + bool get_frame_dur; + + u32 saved_resolution; + /**/ + struct AV1_Common_s common; + struct RefCntBuffer_s *cur_buf; + int refresh_frame_flags; + uint8_t need_resync; + uint8_t hold_ref_buf; + uint8_t ready_for_new_data; + struct BufferPool_s av1_buffer_pool; + + struct BuffInfo_s *work_space_buf; + + struct buff_s *mc_buf; + + unsigned int frame_width; + unsigned int frame_height; + + unsigned short *rpm_ptr; + int init_pic_w; + int init_pic_h; + int lcu_total; + + int current_lcu_size; + + int slice_type; + + int skip_flag; + int decode_idx; + int result_done_count; + uint8_t has_keyframe; + uint8_t has_sequence; + uint8_t wait_buf; + uint8_t error_flag; + + /* bit 0, for decoding; bit 1, for displaying */ + uint8_t ignore_bufmgr_error; + int PB_skip_mode; + int PB_skip_count_after_decoding; + /*hw*/ + + /**/ + struct vdec_info *gvs; + + u32 pre_stream_offset; + + unsigned int dec_status; + u32 last_put_idx; + int new_frame_displayed; + void *mmu_box; + void *bmmu_box; + int mmu_enable; +#ifdef AOM_AV1_MMU_DW + void *mmu_box_dw; + int dw_mmu_enable; +#endif + struct vframe_master_display_colour_s vf_dp; + struct firmware_s *fw; + int max_pic_w; + int max_pic_h; + int buffer_spec_index; + int32_t max_one_mv_buffer_size; + + int need_cache_size; + u64 sc_start_time; + bool postproc_done; + int low_latency_flag; + bool no_head; + bool pic_list_init_done; + bool pic_list_init_done2; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + int frameinfo_enable; + struct vframe_qos_s vframe_qos; + +#ifdef AOM_AV1_DBLK_INIT + /* + * malloc may not work in real chip, please allocate memory for the following structures + */ + struct loop_filter_info_n_s *lfi; + struct loopfilter *lf; + struct segmentation_lf *seg_4lf; +#endif + u32 mem_map_mode; + u32 dynamic_buf_num_margin; + struct vframe_s vframe_dummy; + u32 res_ch_flag; + int buffer_wrap[FRAME_BUFFERS]; + int sidebind_type; + int sidebind_channel_id; + u32 cur_obu_type; + u32 multi_frame_cnt; + u32 endian; + u32 run_ready_min_buf_num; + int one_package_frame_cnt; + ulong fb_token; + bool wait_more_buf; + dma_addr_t rdma_phy_adr; + unsigned *rdma_adr; + u32 aux_data_size; + bool no_need_aux_data; + struct trace_decoder_name trace; + struct vav1_assit_task assit_task; + int film_grain_present; + ulong fg_table_handle; + struct vdec_ge2d *ge2d; + u32 data_offset; + u32 data_invalid; + u32 consume_byte; +}; +static void av1_dump_state(struct vdec_s *vdec); + +int av1_print(struct AV1HW_s *hw, + int flag, const char *fmt, ...) +{ +#define HEVC_PRINT_BUF 512 + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + + if (hw == NULL || + (flag == 0) || + (debug & flag)) { + va_list args; + + va_start(args, fmt); + if (hw) + len = sprintf(buf, "[%d]", hw->index); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; +} + +unsigned char av1_is_debug(int flag) +{ + if ((flag == 0) || (debug & flag)) + return 1; + + return 0; +} + +int av1_print2(int flag, const char *fmt, ...) +{ + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + + if ((flag == 0) || + (debug & flag)) { + va_list args; + + va_start(args, fmt); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; + +} + +static int is_oversize(int w, int h) +{ + int max = (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1)? + MAX_SIZE_8K : MAX_SIZE_4K; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) + max = MAX_SIZE_2K; + + if (w <= 0 || h <= 0) + return true; + + if (h != 0 && (w > max / h)) + return true; + + return false; +} + +static int v4l_alloc_and_config_pic(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic); + +static inline bool close_to(int a, int b, int m) +{ + return (abs(a - b) < m) ? true : false; +} + +#ifdef MULTI_INSTANCE_SUPPORT +static int av1_print_cont(struct AV1HW_s *hw, + int flag, const char *fmt, ...) +{ + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + + if (hw == NULL || + (flag == 0) || + (debug & flag)) { + va_list args; + + va_start(args, fmt); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_info("%s", buf); + va_end(args); + } + return 0; +} + +static void trigger_schedule(struct AV1HW_s *hw) +{ + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !hw->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + ATRACE_COUNTER("V_ST_DEC-chunk_size", 0); + + if (hw->vdec_cb) + hw->vdec_cb(hw_to_vdec(hw), hw->vdec_cb_arg); +} + +static void reset_process_time(struct AV1HW_s *hw) +{ + if (hw->start_process_time) { + unsigned process_time = + 1000 * (jiffies - hw->start_process_time) / HZ; + hw->start_process_time = 0; + if (process_time > max_process_time[hw->index]) + max_process_time[hw->index] = process_time; + } +} + +static void start_process_time(struct AV1HW_s *hw) +{ + hw->start_process_time = jiffies; + hw->decode_timeout_count = 0; + hw->last_lcu_idx = 0; +} + +static void timeout_process(struct AV1HW_s *hw) +{ + reset_process_time(hw); + if (hw->process_busy) { + av1_print(hw, + 0, "%s decoder timeout but process_busy\n", __func__); + if (debug) + av1_print(hw, 0, "debug disable timeout notify\n"); + return; + } + hw->timeout_num++; + amhevc_stop(); + + av1_print(hw, + 0, "%s decoder timeout\n", __func__); + + hw->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&hw->work); +} + +static u32 get_valid_double_write_mode(struct AV1HW_s *hw) +{ + u32 dw = ((double_write_mode & 0x80000000) == 0) ? + hw->double_write_mode : + (double_write_mode & 0x7fffffff); + if ((dw & 0x20) && + ((dw & 0xf) == 2 || (dw & 0xf) == 3)) { + pr_info("MMU doueble write 1:4 not supported !!!\n"); + dw = 0; + } + return dw; +} + +static int get_double_write_mode(struct AV1HW_s *hw) +{ + u32 valid_dw_mode = get_valid_double_write_mode(hw); + u32 dw; + int w, h; + struct AV1_Common_s *cm = &hw->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config; + + if (hw->is_used_v4l) { + unsigned int out; + + vdec_v4l_get_dw_mode(hw->v4l2_ctx, &out); + dw = out; + return dw; + } + + cur_pic_config = &cm->cur_frame->buf; + w = cur_pic_config->y_crop_width; + h = cur_pic_config->y_crop_height; + + dw = 0x1; /*1:1*/ + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + return dw; +} + +/* for double write buf alloc */ +static int get_double_write_mode_init(struct AV1HW_s *hw) +{ + u32 valid_dw_mode = get_valid_double_write_mode(hw); + u32 dw; + int w = hw->init_pic_w; + int h = hw->init_pic_h; + + dw = 0x1; /*1:1*/ + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + return dw; +} +#endif + +#define FILM_GRAIN_TASK +#ifdef FILM_GRAIN_TASK + +static u32 use_sfgs; +module_param(use_sfgs, uint, 0664); + +void film_grain_task_wakeup(struct AV1HW_s *hw) +{ + u32 fg_reg0, fg_reg1, num_y_points, num_cb_points, num_cr_points; + struct AV1_Common_s *cm = &hw->common; + + if (!hw->assit_task.use_sfgs || hw->eos) + return; + + fg_reg0 = cm->cur_frame->film_grain_reg[0]; + fg_reg1 = cm->cur_frame->film_grain_reg[1]; + num_y_points = fg_reg1 & 0xf; + num_cr_points = (fg_reg1 >> 8) & 0xf; + num_cb_points = (fg_reg1 >> 4) & 0xf; + if ((num_y_points > 0) || + ((num_cb_points > 0) | ((fg_reg0 >> 17) & 0x1)) || + ((num_cr_points > 0) | ((fg_reg0 >> 17) & 0x1))) + hw->fgs_valid = 1; + else + hw->fgs_valid = 0; + + if (cm->cur_frame) { + init_waitqueue_head(&cm->cur_frame->wait_sfgs); + if (((get_debug_fgs() & DEBUG_FGS_BYPASS) == 0) + && hw->fgs_valid) { + atomic_set(&cm->cur_frame->fgs_done, 0); + hw->assit_task.private = cm->cur_frame; + up(&hw->assit_task.sem); + } else { + atomic_set(&cm->cur_frame->fgs_done, 1); + } + } +} +EXPORT_SYMBOL(film_grain_task_wakeup); + +static int film_grain_task(void *args) +{ + struct AV1HW_s *hw = (struct AV1HW_s *)args; + struct vav1_assit_task *assit = &hw->assit_task; + struct sched_param param = {.sched_priority = MAX_RT_PRIO/2}; + RefCntBuffer *cur_frame; + + sched_setscheduler(current, SCHED_FIFO, ¶m); + + allow_signal(SIGTERM); + + while (down_interruptible(&assit->sem) == 0) { + if (assit->running == false) + break; + + if (assit->private == NULL) + continue; + + mutex_lock(&assit->assit_mutex); + cur_frame = (RefCntBuffer *)assit->private; + if ((!hw->eos) && (atomic_read(&cur_frame->fgs_done) == 0)) { + pic_film_grain_run(hw->frame_count, cur_frame->buf.sfgs_table_ptr, + cur_frame->film_grain_ctrl, cur_frame->film_grain_reg); + atomic_set(&cur_frame->fgs_done, 1); + wake_up_interruptible(&cur_frame->wait_sfgs); + assit->private = NULL; + vdec_sync_irq(VDEC_IRQ_0); + } + mutex_unlock(&assit->assit_mutex); + } + + while (!kthread_should_stop()) { + usleep_range(500, 1000); + } + + return 0; +} + +static int film_grain_task_create(struct AV1HW_s *hw) +{ + struct vav1_assit_task *assit = &hw->assit_task; + + mutex_init(&assit->assit_mutex); + + if ((!vdec_secure(hw_to_vdec(hw)) || (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_SC2)) + && !use_sfgs) + return 0; + + assit->use_sfgs = 1; + sema_init(&assit->sem, 0); + + assit->task = kthread_run(film_grain_task, hw, "fgs_task"); + if (IS_ERR(assit->task)) { + pr_err("%s, creat film grain task thread faild %ld\n", + __func__, PTR_ERR(assit->task)); + return PTR_ERR(assit->task); + } + assit->running = true; + assit->private = NULL; + av1_print(hw, 0, "%s, task %px create sucess\n", __func__, assit->task); + + return 0; +} + +static void film_grain_task_exit(struct AV1HW_s *hw) +{ + struct vav1_assit_task *assit = &hw->assit_task; + + if ((!assit->use_sfgs) || (IS_ERR(assit->task))) + return; + + assit->running = false; + up(&assit->sem); + + if (assit->task) { + kthread_stop(assit->task); + assit->task = NULL; + } + assit->use_sfgs = 0; + av1_print(hw, 0, "%s, task kthread stoped\n", __func__); +} +#endif + + +/* return page number */ +static int av1_mmu_page_num(struct AV1HW_s *hw, + int w, int h, int save_mode) +{ + int picture_size; + int cur_mmu_4k_number, max_frame_num; + + picture_size = compute_losless_comp_body_size(w, h, save_mode); + cur_mmu_4k_number = ((picture_size + (PAGE_SIZE - 1)) >> PAGE_SHIFT); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + max_frame_num = MAX_FRAME_8K_NUM; + else + max_frame_num = MAX_FRAME_4K_NUM; + + if (cur_mmu_4k_number > max_frame_num) { + pr_err("over max !! cur_mmu_4k_number 0x%x width %d height %d\n", + cur_mmu_4k_number, w, h); + return -1; + } + + return cur_mmu_4k_number; +} + +static struct internal_comp_buf* v4lfb_to_icomp_buf( + struct AV1HW_s *hw, + struct vdec_v4l2_buffer *fb) +{ + struct aml_video_dec_buf *aml_fb = NULL; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + + aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer); + + return &v4l2_ctx->comp_bufs[aml_fb->internal_index]; +} + +static struct internal_comp_buf* index_to_icomp_buf( + struct AV1HW_s *hw, int index) +{ + struct aml_video_dec_buf *aml_fb = NULL; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + + fb = (struct vdec_v4l2_buffer *) + hw->m_BUF[index].v4l_ref_buf_addr; + aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer); + + return &v4l2_ctx->comp_bufs[aml_fb->internal_index]; +} + +//#define MAX_4K_NUM 0x1200 +int av1_alloc_mmu( + struct AV1HW_s *hw, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr) +{ + int ret = 0; + int bit_depth_10 = (bit_depth == AOM_BITS_10); + int cur_mmu_4k_number; + + if (get_double_write_mode(hw) & 0x10) + return 0; + + if (bit_depth >= AOM_BITS_12) { + hw->fatal_error = DECODER_FATAL_ERROR_SIZE_OVERFLOW; + pr_err("fatal_error, un support bit depth 12!\n\n"); + return -1; + } + + cur_mmu_4k_number = av1_mmu_page_num(hw, + pic_width, + pic_height, + bit_depth_10); + if (cur_mmu_4k_number < 0) + return -1; + ATRACE_COUNTER(hw->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + if (hw->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(hw, cur_buf_idx); + + ret = decoder_mmu_box_alloc_idx( + ibuf->mmu_box, + ibuf->index, + ibuf->frame_buffer_size, + mmu_index_adr); + } else { + ret = decoder_mmu_box_alloc_idx( + hw->mmu_box, + cur_buf_idx, + cur_mmu_4k_number, + mmu_index_adr); + } + ATRACE_COUNTER(hw->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + return ret; +} + +#ifdef AOM_AV1_MMU_DW +static int compute_losless_comp_body_size_dw(int width, int height, + uint8_t is_bit_depth_10); + +int av1_alloc_mmu_dw( + struct AV1HW_s *hw, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr) +{ + int ret = 0; + int bit_depth_10 = (bit_depth == AOM_BITS_10); + int picture_size; + int cur_mmu_4k_number, max_frame_num; + + if (get_double_write_mode(hw) & 0x10) + return 0; + if (bit_depth >= AOM_BITS_12) { + hw->fatal_error = DECODER_FATAL_ERROR_SIZE_OVERFLOW; + pr_err("fatal_error, un support bit depth 12!\n\n"); + return -1; + } + picture_size = compute_losless_comp_body_size_dw(pic_width, pic_height, + bit_depth_10); + cur_mmu_4k_number = ((picture_size + (1 << 12) - 1) >> 12); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + max_frame_num = MAX_FRAME_8K_NUM; + else + max_frame_num = MAX_FRAME_4K_NUM; + + if (cur_mmu_4k_number > max_frame_num) { + pr_err("over max !! cur_mmu_4k_number 0x%x width %d height %d\n", + cur_mmu_4k_number, pic_width, pic_height); + return -1; + } + if (hw->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(hw, cur_buf_idx); + + ret = decoder_mmu_box_alloc_idx( + ibuf->mmu_box_dw, + ibuf->index, + ibuf->frame_buffer_size, + mmu_index_adr); + } else { + ret = decoder_mmu_box_alloc_idx( + hw->mmu_box_dw, + cur_buf_idx, + cur_mmu_4k_number, + mmu_index_adr); + } + return ret; +} +#endif + +#ifndef MV_USE_FIXED_BUF +static void dealloc_mv_bufs(struct AV1HW_s *hw) +{ + int i; + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (hw->m_mv_BUF[i].start_adr) { + if (debug) + pr_info( + "dealloc mv buf(%d) adr %ld size 0x%x used_flag %d\n", + i, hw->m_mv_BUF[i].start_adr, + hw->m_mv_BUF[i].size, + hw->m_mv_BUF[i].used_flag); + decoder_bmmu_box_free_idx( + hw->bmmu_box, + MV_BUFFER_IDX(i)); + hw->m_mv_BUF[i].start_adr = 0; + hw->m_mv_BUF[i].size = 0; + hw->m_mv_BUF[i].used_flag = 0; + } + } +} + +static int alloc_mv_buf(struct AV1HW_s *hw, + int i, int size) +{ + int ret = 0; + + if (hw->m_mv_BUF[i].start_adr && + size > hw->m_mv_BUF[i].size) { + dealloc_mv_bufs(hw); + } else if (hw->m_mv_BUF[i].start_adr) + return 0; + + if (decoder_bmmu_box_alloc_buf_phy + (hw->bmmu_box, + MV_BUFFER_IDX(i), size, + DRIVER_NAME, + &hw->m_mv_BUF[i].start_adr) < 0) { + hw->m_mv_BUF[i].start_adr = 0; + ret = -1; + } else { + hw->m_mv_BUF[i].size = size; + hw->m_mv_BUF[i].used_flag = 0; + ret = 0; + if (debug) { + pr_info( + "MV Buffer %d: start_adr %p size %x\n", + i, + (void *)hw->m_mv_BUF[i].start_adr, + hw->m_mv_BUF[i].size); + } + } + return ret; +} + +static int cal_mv_buf_size(struct AV1HW_s *hw, int pic_width, int pic_height) +{ + unsigned lcu_size = hw->current_lcu_size; + int extended_pic_width = (pic_width + lcu_size -1) + & (~(lcu_size - 1)); + int extended_pic_height = (pic_height + lcu_size -1) + & (~(lcu_size - 1)); + + int lcu_x_num = extended_pic_width / lcu_size; + int lcu_y_num = extended_pic_height / lcu_size; + int size_a, size_b, size; + + if (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_SC2) + /*tm2revb and sc2*/ + size_a = lcu_x_num * lcu_y_num * 16 * + ((lcu_size == 64) ? 16 : 64); + else + size_a = lcu_x_num * lcu_y_num * 16 * + ((lcu_size == 64) ? 19 : 76); + + size_b = lcu_x_num * ((lcu_y_num >> 3) + + (lcu_y_num & 0x7)) * 16; + size = ((size_a + size_b) + 0xffff) & (~0xffff); + + if (debug & AOM_DEBUG_USE_FIXED_MV_BUF_SIZE) + size = hw->max_one_mv_buffer_size; + if (force_max_one_mv_buffer_size) + size = force_max_one_mv_buffer_size; + return size; +} + +static int init_mv_buf_list(struct AV1HW_s *hw) +{ + int i; + int ret = 0; + int count = MV_BUFFER_NUM; + int pic_width = hw->init_pic_w; + int pic_height = hw->init_pic_h; + int size = cal_mv_buf_size(hw, pic_width, pic_height); + + if (mv_buf_dynamic_alloc) + return 0; +#if 0 + if (mv_buf_margin > 0) + count = REF_FRAMES + mv_buf_margin; + if (hw->init_pic_w > 2048 && hw->init_pic_h > 1088) + count = REF_FRAMES_4K + mv_buf_margin; +#else + if (debug) + pr_info("%s, calculated mv size 0x%x\n", + __func__, size); + + if ((hw->is_used_v4l) && !IS_8K_SIZE(pic_width, pic_height)) { + if (vdec_is_support_4k()) + size = 0xb0000; + else + size = 0x30000; + } + + if (hw->init_pic_w > 4096 && hw->init_pic_h > 2048) + count = REF_FRAMES_4K + hw->mv_buf_margin; + else if (hw->init_pic_w > 2048 && hw->init_pic_h > 1088) + count = REF_FRAMES_4K + hw->mv_buf_margin; + else + count = REF_FRAMES + hw->mv_buf_margin; + +#endif + if (debug) { + pr_info("%s w:%d, h:%d, count: %d, size 0x%x\n", + __func__, hw->init_pic_w, hw->init_pic_h, + count, size); + } + + for (i = 0; + i < count && i < MV_BUFFER_NUM; i++) { + if (alloc_mv_buf(hw, i, size) < 0) { + ret = -1; + break; + } + } + return ret; +} + +static int get_mv_buf(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + int i; + int ret = -1; + if (mv_buf_dynamic_alloc) { + int size = cal_mv_buf_size(hw, + pic_config->y_crop_width, pic_config->y_crop_height); + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (hw->m_mv_BUF[i].start_adr == 0) { + ret = i; + break; + } + } + if (i == MV_BUFFER_NUM) { + pr_info( + "%s: Error, mv buf MV_BUFFER_NUM is not enough\n", + __func__); + return ret; + } + + if (alloc_mv_buf(hw, ret, size) >= 0) { + pic_config->mv_buf_index = ret; + pic_config->mpred_mv_wr_start_addr = + (hw->m_mv_BUF[ret].start_adr + 0xffff) & + (~0xffff); + } else { + pr_info( + "%s: Error, mv buf alloc fail\n", + __func__); + } + return ret; + } + + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (hw->m_mv_BUF[i].start_adr && + hw->m_mv_BUF[i].used_flag == 0) { + hw->m_mv_BUF[i].used_flag = 1; + ret = i; + break; + } + } + + if (ret >= 0) { + pic_config->mv_buf_index = ret; + pic_config->mpred_mv_wr_start_addr = + (hw->m_mv_BUF[ret].start_adr + 0xffff) & + (~0xffff); + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info( + "%s => %d (%d) size 0x%x\n", + __func__, ret, + pic_config->mpred_mv_wr_start_addr, + hw->m_mv_BUF[ret].size); + } else { + pr_info( + "%s: Error, mv buf is not enough\n", + __func__); + } + return ret; +} +static void put_mv_buf(struct AV1HW_s *hw, + int *mv_buf_index) +{ + int i = *mv_buf_index; + if (i >= MV_BUFFER_NUM) { + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info( + "%s: index %d beyond range\n", + __func__, i); + return; + } + if (mv_buf_dynamic_alloc) { + if (hw->m_mv_BUF[i].start_adr) { + if (debug) + pr_info( + "dealloc mv buf(%d) adr %ld size 0x%x used_flag %d\n", + i, hw->m_mv_BUF[i].start_adr, + hw->m_mv_BUF[i].size, + hw->m_mv_BUF[i].used_flag); + decoder_bmmu_box_free_idx( + hw->bmmu_box, + MV_BUFFER_IDX(i)); + hw->m_mv_BUF[i].start_adr = 0; + hw->m_mv_BUF[i].size = 0; + hw->m_mv_BUF[i].used_flag = 0; + } + *mv_buf_index = -1; + return; + } + + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info( + "%s(%d): used_flag(%d)\n", + __func__, i, + hw->m_mv_BUF[i].used_flag); + + *mv_buf_index = -1; + if (hw->m_mv_BUF[i].start_adr && + hw->m_mv_BUF[i].used_flag) + hw->m_mv_BUF[i].used_flag = 0; +} +static void put_un_used_mv_bufs(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + for (i = 0; i < hw->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.index != -1) && + (frame_bufs[i].buf.mv_buf_index >= 0) + ) + put_mv_buf(hw, &frame_bufs[i].buf.mv_buf_index); + } +} +#endif + +static void init_pic_list_hw(struct AV1HW_s *pbi); + +static void update_hide_frame_timestamp(struct AV1HW_s *hw) +{ + RefCntBuffer *const frame_bufs = hw->common.buffer_pool->frame_bufs; + int i; + + for (i = 0; i < hw->used_buf_num; ++i) { + if ((!frame_bufs[i].show_frame) && + (frame_bufs[i].showable_frame) && + (!frame_bufs[i].buf.vf_ref) && + (frame_bufs[i].buf.BUF_index != -1)) { + frame_bufs[i].buf.timestamp = hw->chunk->timestamp; + av1_print(hw, AV1_DEBUG_OUT_PTS, + "%s, update %d hide frame ts: %lld\n", + __func__, i, frame_bufs[i].buf.timestamp); + } + } +} + +int check_buff_has_show(struct RefCntBuffer_s *frame_buf) +{ + int ret = 1; + + if (disable_repeat || + ((frame_buf->buf.vf_ref == 0) && + (frame_buf->buf.index != -1) && + frame_buf->buf.cma_alloc_addr)) { + ret = 0; + if (debug & AV1_DEBUG_BUFMGR) + pr_info("existing buff can use\n"); + } else { + if (debug & AV1_DEBUG_BUFMGR) + pr_info("existing buff can't use\n"); + } + return ret; +} + +static int get_free_fb_idx(AV1_COMMON *cm) +{ + int i; + RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; + + for (i = 0; i < FRAME_BUFFERS; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.repeat_count == 0)) + break; + } + + return (i != FRAME_BUFFERS) ? i : -1; +} + +static int v4l_get_free_fb(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + struct aml_vcodec_ctx * v4l = hw->v4l2_ctx; + struct v4l_buff_pool *pool = &v4l->cap_pool; + struct PIC_BUFFER_CONFIG_s *pic = NULL; + struct PIC_BUFFER_CONFIG_s *free_pic = NULL; + ulong flags; + int idx, i; + + lock_buffer_pool(cm->buffer_pool, flags); + + for (i = 0; i < pool->in; ++i) { + u32 state = (pool->seq[i] >> 16); + u32 index = (pool->seq[i] & 0xffff); + + switch (state) { + case V4L_CAP_BUFF_IN_DEC: + pic = &frame_bufs[i].buf; + if ((frame_bufs[i].ref_count == 0) && + (pic->vf_ref == 0) && + (pic->repeat_count == 0) && + (pic->index != -1) && + pic->cma_alloc_addr) { + free_pic = pic; + } + break; + case V4L_CAP_BUFF_IN_M2M: + idx = get_free_fb_idx(cm); + if (idx < 0) + break; + + pic = &frame_bufs[idx].buf; + pic->y_crop_width = hw->frame_width; + pic->y_crop_height = hw->frame_height; + hw->buffer_wrap[idx] = index; + if (!v4l_alloc_and_config_pic(hw, pic)) { + set_canvas(hw, pic); + init_pic_list_hw(hw); + free_pic = pic; + } + break; + default: + break; + } + + if (free_pic) { + if (frame_bufs[i].buf.use_external_reference_buffers) { + // If this frame buffer's y_buffer, u_buffer, and v_buffer point to the + // external reference buffers. Restore the buffer pointers to point to the + // internally allocated memory. + PIC_BUFFER_CONFIG *ybf = &frame_bufs[i].buf; + + ybf->y_buffer = ybf->store_buf_adr[0]; + ybf->u_buffer = ybf->store_buf_adr[1]; + ybf->v_buffer = ybf->store_buf_adr[2]; + ybf->use_external_reference_buffers = 0; + } + + frame_bufs[i].ref_count = 1; + break; + } + } + + if (free_pic && hw->chunk) { + free_pic->timestamp = hw->chunk->timestamp; + update_hide_frame_timestamp(hw); + } + + unlock_buffer_pool(cm->buffer_pool, flags); + + if (free_pic) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *) + hw->m_BUF[free_pic->index].v4l_ref_buf_addr; + + fb->status = FB_ST_DECODER; + + v4l->aux_infos.bind_sei_buffer(v4l, &free_pic->aux_data_buf, + &free_pic->aux_data_size, &free_pic->ctx_buf_idx); + + } + + if (debug & AV1_DEBUG_OUT_PTS) { + if (free_pic) { + pr_debug("%s, idx: %d, ts: %lld\n", + __func__, free_pic->index, free_pic->timestamp); + } else { + pr_debug("%s, av1 get free pic null\n", __func__); + } + } + + return free_pic ? free_pic->index : INVALID_IDX; +} + +static int get_free_fb(AV1_COMMON *cm) { + RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; + unsigned long flags; + int i; + + lock_buffer_pool(cm->buffer_pool, flags); + for (i = 0; i < FRAME_BUFFERS; ++i) { + if (frame_bufs[i].ref_count == 0 +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + && frame_bufs[i].buf.vf_ref == 0 +#endif + ) + break; + } + + if (i != FRAME_BUFFERS) { + if (frame_bufs[i].buf.use_external_reference_buffers) { + // If this frame buffer's y_buffer, u_buffer, and v_buffer point to the + // external reference buffers. Restore the buffer pointers to point to the + // internally allocated memory. + PIC_BUFFER_CONFIG *ybf = &frame_bufs[i].buf; + ybf->y_buffer = ybf->store_buf_adr[0]; + ybf->u_buffer = ybf->store_buf_adr[1]; + ybf->v_buffer = ybf->store_buf_adr[2]; + ybf->use_external_reference_buffers = 0; + } + + frame_bufs[i].ref_count = 1; + } else { + // We should never run out of free buffers. If this assertion fails, there + // is a reference leak. + //assert(0 && "Ran out of free frame buffers. Likely a reference leak."); + // Reset i to be INVALID_IDX to indicate no free buffer found. + i = INVALID_IDX; + } + + unlock_buffer_pool(cm->buffer_pool, flags); + return i; +} + +int get_free_frame_buffer(struct AV1_Common_s *cm) +{ + struct AV1HW_s *hw = container_of(cm, struct AV1HW_s, common); + + return hw->is_used_v4l ? v4l_get_free_fb(hw) : get_free_fb(cm); +} + +static int get_free_buf_count(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int i, free_buf_count = 0; + + //clear_frame_buf_ref_count(hw->pbi); + + if (hw->is_used_v4l) { + for (i = 0; i < hw->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.repeat_count == 0) && + frame_bufs[i].buf.cma_alloc_addr) { + free_buf_count++; + } + } + + if (ctx->cap_pool.dec < hw->used_buf_num) { + if (ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token)) { + free_buf_count += + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) + 1; + } + } + + /* trigger to parse head data. */ + if (!hw->v4l_params_parsed) { + free_buf_count = hw->run_ready_min_buf_num; + } + if ((debug & AV1_DEBUG_BUFMGR_MORE) && + (free_buf_count <= 0)) { + pr_info("%s, free count %d, m2m_ready %d\n", + __func__, + free_buf_count, + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)); + } + } else { + for (i = 0; i < hw->used_buf_num; ++i) + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.index != -1)) { + free_buf_count++; + } + } + + return free_buf_count; +} + +int aom_bufmgr_init(struct AV1HW_s *hw, struct BuffInfo_s *buf_spec_i, + struct buff_s *mc_buf_i) { + struct AV1_Common_s *cm = &hw->common; + if (debug) + pr_info("%s %d %p\n", __func__, __LINE__, hw->pbi); + hw->frame_count = 0; + hw->pic_count = 0; + hw->pre_stream_offset = 0; + spin_lock_init(&cm->buffer_pool->lock); + cm->prev_fb_idx = INVALID_IDX; + cm->new_fb_idx = INVALID_IDX; + hw->used_4k_num = -1; + cm->cur_fb_idx_mmu = INVALID_IDX; + pr_debug + ("After aom_bufmgr_init, prev_fb_idx : %d, new_fb_idx : %d\r\n", + cm->prev_fb_idx, cm->new_fb_idx); + hw->need_resync = 1; + + cm->current_video_frame = 0; + hw->ready_for_new_data = 1; + + /* private init */ + hw->work_space_buf = buf_spec_i; + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL) || + (get_double_write_mode(hw) == 0x10)) { + hw->mc_buf = mc_buf_i; + } + + hw->rpm_addr = NULL; + hw->lmem_addr = NULL; + + hw->use_cma_flag = 0; + hw->decode_idx = 0; + hw->result_done_count = 0; + /*int m_uiMaxCUWidth = 1<<7;*/ + /*int m_uiMaxCUHeight = 1<<7;*/ + hw->has_keyframe = 0; + hw->has_sequence = 0; + hw->skip_flag = 0; + hw->wait_buf = 0; + hw->error_flag = 0; + + hw->last_pts = 0; + hw->last_pts_us64 = 0; + hw->shift_byte_count = 0; + hw->shift_byte_count_lo = 0; + hw->shift_byte_count_hi = 0; + hw->pts_mode_switching_count = 0; + hw->pts_mode_recovery_count = 0; + + hw->buf_num = 0; + hw->pic_num = 0; + + return 0; +} + +/* +struct AV1HW_s av1_decoder; +union param_u av1_param; +*/ +/************************************************** + * + *AV1 buffer management end + * + *************************************************** + */ + + +#define HEVC_CM_BODY_START_ADDR 0x3626 +#define HEVC_CM_BODY_LENGTH 0x3627 +#define HEVC_CM_HEADER_LENGTH 0x3629 +#define HEVC_CM_HEADER_OFFSET 0x362b + +#define LOSLESS_COMPRESS_MODE + +/*#define DECOMP_HEADR_SURGENT*/ +#ifdef AV1_10B_NV21 +static u32 mem_map_mode = 2 /* 0:linear 1:32x32 2:64x32*/ +#else +static u32 mem_map_mode; /* 0:linear 1:32x32 2:64x32 ; m8baby test1902 */ +#endif +static u32 enable_mem_saving = 1; +static u32 force_w_h; + +static u32 force_fps; + + +const u32 av1_version = 201602101; +static u32 debug; +static u32 radr; +static u32 rval; +static u32 pop_shorts; +static u32 dbg_cmd; +static u32 dbg_skip_decode_index; +/* + * bit 0~3, for HEVCD_IPP_AXIIF_CONFIG endian config + * bit 8~23, for HEVC_SAO_CTRL1 endian config + */ +static u32 endian; +#define HEVC_CONFIG_BIG_ENDIAN ((0x880 << 8) | 0x8) +#define HEVC_CONFIG_LITTLE_ENDIAN ((0xff0 << 8) | 0xf) + +static u32 multi_frames_in_one_pack = 1; +#ifdef ERROR_HANDLE_DEBUG +static u32 dbg_nal_skip_flag; + /* bit[0], skip vps; bit[1], skip sps; bit[2], skip pps */ +static u32 dbg_nal_skip_count; +#endif +/*for debug*/ +static u32 decode_pic_begin; +static uint slice_parse_begin; +static u32 step; +#ifdef MIX_STREAM_SUPPORT +static u32 buf_alloc_width = 4096; +static u32 buf_alloc_height = 2304; +static u32 av1_max_pic_w = 4096; +static u32 av1_max_pic_h = 2304; + +static u32 dynamic_buf_num_margin = 3; +#else +static u32 buf_alloc_width; +static u32 buf_alloc_height; +static u32 dynamic_buf_num_margin = 7; +#endif +static u32 buf_alloc_depth = 10; +static u32 buf_alloc_size; +/* + *bit[0]: 0, + * bit[1]: 0, always release cma buffer when stop + * bit[1]: 1, never release cma buffer when stop + *bit[0]: 1, when stop, release cma buffer if blackout is 1; + *do not release cma buffer is blackout is not 1 + * + *bit[2]: 0, when start decoding, check current displayed buffer + * (only for buffer decoded by AV1) if blackout is 0 + * 1, do not check current displayed buffer + * + *bit[3]: 1, if blackout is not 1, do not release current + * displayed cma buffer always. + */ +/* set to 1 for fast play; + * set to 8 for other case of "keep last frame" + */ +static u32 buffer_mode = 1; +/* buffer_mode_dbg: debug only*/ +static u32 buffer_mode_dbg = 0xffff0000; +/**/ + +/* + *bit 0, 1: only display I picture; + *bit 1, 1: only decode I picture; + */ +static u32 i_only_flag; + +static u32 low_latency_flag; + +static u32 no_head; + +static u32 max_decoding_time; +/* + *error handling + */ +/*error_handle_policy: + *bit 0: 0, auto skip error_skip_nal_count nals before error recovery; + *1, skip error_skip_nal_count nals before error recovery; + *bit 1 (valid only when bit0 == 1): + *1, wait vps/sps/pps after error recovery; + *bit 2 (valid only when bit0 == 0): + *0, auto search after error recovery (av1_recover() called); + *1, manual search after error recovery + *(change to auto search after get IDR: WRITE_VREG(NAL_SEARCH_CTL, 0x2)) + * + *bit 4: 0, set error_mark after reset/recover + * 1, do not set error_mark after reset/recover + *bit 5: 0, check total lcu for every picture + * 1, do not check total lcu + * + */ + +static u32 error_handle_policy; +/*static u32 parser_sei_enable = 1;*/ +#define MAX_BUF_NUM_NORMAL 16 +/*less bufs num 12 caused frame drop, nts failed*/ +#define MAX_BUF_NUM_LESS 14 +static u32 max_buf_num = MAX_BUF_NUM_NORMAL; +#define MAX_BUF_NUM_SAVE_BUF 8 + +static DEFINE_MUTEX(vav1_mutex); +#ifndef MULTI_INSTANCE_SUPPORT +static struct device *cma_dev; +#endif +#define HEVC_DEC_STATUS_REG HEVC_ASSIST_SCRATCH_0 +#define HEVC_FG_STATUS HEVC_ASSIST_SCRATCH_B +#define HEVC_RPM_BUFFER HEVC_ASSIST_SCRATCH_1 +#define AOM_AV1_ADAPT_PROB_REG HEVC_ASSIST_SCRATCH_3 +#define AOM_AV1_MMU_MAP_BUFFER HEVC_ASSIST_SCRATCH_4 // changed to use HEVC_ASSIST_MMU_MAP_ADDR +#define AOM_AV1_DAALA_TOP_BUFFER HEVC_ASSIST_SCRATCH_5 +//#define HEVC_SAO_UP HEVC_ASSIST_SCRATCH_6 +//#define HEVC_STREAM_SWAP_BUFFER HEVC_ASSIST_SCRATCH_7 +#define AOM_AV1_CDF_BUFFER_W HEVC_ASSIST_SCRATCH_8 +#define AOM_AV1_CDF_BUFFER_R HEVC_ASSIST_SCRATCH_9 +#define AOM_AV1_COUNT_SWAP_BUFFER HEVC_ASSIST_SCRATCH_A +#define AOM_AV1_SEG_MAP_BUFFER_W AV1_SEG_W_ADDR // HEVC_ASSIST_SCRATCH_B +#define AOM_AV1_SEG_MAP_BUFFER_R AV1_SEG_R_ADDR // HEVC_ASSIST_SCRATCH_C +//#define HEVC_sao_vb_size HEVC_ASSIST_SCRATCH_B +//#define HEVC_SAO_VB HEVC_ASSIST_SCRATCH_C +//#define HEVC_SCALELUT HEVC_ASSIST_SCRATCH_D +#define HEVC_WAIT_FLAG HEVC_ASSIST_SCRATCH_E +#define RPM_CMD_REG HEVC_ASSIST_SCRATCH_F +//#define HEVC_STREAM_SWAP_TEST HEVC_ASSIST_SCRATCH_L + +#ifdef MULTI_INSTANCE_SUPPORT +#define HEVC_DECODE_COUNT HEVC_ASSIST_SCRATCH_M +#define HEVC_DECODE_SIZE HEVC_ASSIST_SCRATCH_N +#else +#define HEVC_DECODE_PIC_BEGIN_REG HEVC_ASSIST_SCRATCH_M +#define HEVC_DECODE_PIC_NUM_REG HEVC_ASSIST_SCRATCH_N +#endif +#define AOM_AV1_SEGMENT_FEATURE AV1_QUANT_WR + +#define DEBUG_REG1 HEVC_ASSIST_SCRATCH_G +#define DEBUG_REG2 HEVC_ASSIST_SCRATCH_H + +#define LMEM_DUMP_ADR HEVC_ASSIST_SCRATCH_I +#define CUR_NAL_UNIT_TYPE HEVC_ASSIST_SCRATCH_J +#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K + +#define PIC_END_LCU_COUNT HEVC_ASSIST_SCRATCH_2 + +#define HEVC_AUX_ADR HEVC_ASSIST_SCRATCH_L +#define HEVC_AUX_DATA_SIZE HEVC_ASSIST_SCRATCH_7 +#if (defined DEBUG_UCODE_LOG) || (defined DEBUG_CMD) +#define HEVC_DBG_LOG_ADR HEVC_ASSIST_SCRATCH_C +#ifdef DEBUG_CMD +#define HEVC_D_ADR HEVC_ASSIST_SCRATCH_4 +#endif +#endif +/* + *ucode parser/search control + *bit 0: 0, header auto parse; 1, header manual parse + *bit 1: 0, auto skip for noneseamless stream; 1, no skip + *bit [3:2]: valid when bit1==0; + *0, auto skip nal before first vps/sps/pps/idr; + *1, auto skip nal before first vps/sps/pps + *2, auto skip nal before first vps/sps/pps, + * and not decode until the first I slice (with slice address of 0) + * + *3, auto skip before first I slice (nal_type >=16 && nal_type<=21) + *bit [15:4] nal skip count (valid when bit0 == 1 (manual mode) ) + *bit [16]: for NAL_UNIT_EOS when bit0 is 0: + * 0, send SEARCH_DONE to arm ; 1, do not send SEARCH_DONE to arm + *bit [17]: for NAL_SEI when bit0 is 0: + * 0, do not parse SEI in ucode; 1, parse SEI in ucode + *bit [31:20]: used by ucode for debug purpose + */ +#define NAL_SEARCH_CTL HEVC_ASSIST_SCRATCH_I + /*[31:24] chip feature + 31: 0, use MBOX1; 1, use MBOX0 + [24:16] debug + 0x1, bufmgr only + */ +#define DECODE_MODE HEVC_ASSIST_SCRATCH_J +#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K + +#define RPM_BUF_SIZE ((RPM_END - RPM_BEGIN) * 2) +#define LMEM_BUF_SIZE (0x600 * 2) + +/* +#ifdef MAP_8K +static u32 seg_map_size = 0xd8000; +#else +static u32 seg_map_size = 0x36000; +#endif +*/ +//static u32 seg_map_size = 0x36000; + +//#define VBH_BUF_COUNT 4 +//#define VBH_BUF_SIZE_1080P ((((2 * 16 * 1088) + 0xffff) & (~0xffff)) * VBH_BUF_COUNT) +//#define VBH_BUF_SIZE_4K ((((2 * 16 * 2304) + 0xffff) & (~0xffff))) * VBH_BUF_COUNT) +//#define VBH_BUF_SIZE_8K ((((2 * 16 * 4608) + 0xffff) & (~0xffff))) * VBH_BUF_COUNT) + + /*mmu_vbh buf is used by HEVC_SAO_MMU_VH0_ADDR, HEVC_SAO_MMU_VH1_ADDR*/ +#define VBH_BUF_SIZE_1080P 0x3000 +#define VBH_BUF_SIZE_4K 0x5000 +#define VBH_BUF_SIZE_8K 0xa000 +#define VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh.buf_size / 2) + /*mmu_vbh_dw buf is used by HEVC_SAO_MMU_VH0_ADDR2,HEVC_SAO_MMU_VH1_ADDR2, + HEVC_DW_VH0_ADDDR, HEVC_DW_VH1_ADDDR*/ +#define DW_VBH_BUF_SIZE_1080P (VBH_BUF_SIZE_1080P * 2) +#define DW_VBH_BUF_SIZE_4K (VBH_BUF_SIZE_4K * 2) +#define DW_VBH_BUF_SIZE_8K (VBH_BUF_SIZE_8K * 2) +#define DW_VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh_dw.buf_size / 4) + +/* necessary 4K page size align for t7/t3 decoder and after. fix case1440 dec timeout */ +#define WORKBUF_ALIGN(addr) (ALIGN(addr, PAGE_SIZE)) + +#define WORK_BUF_SPEC_NUM 3 + +static struct BuffInfo_s aom_workbuff_spec[WORK_BUF_SPEC_NUM] = { + { //8M bytes + .max_width = 1920, + .max_height = 1088, + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x1E00, + }, + .sao_abv = { + .buf_size = 0x0, //0x30000, + }, + .sao_vb = { + .buf_size = 0x0, //0x30000, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .seg_map = { + // SEGMENT MAP AREA - 1920x1088/4/4 * 3 bits = 0xBF40 Bytes * 16 = 0xBF400 + .buf_size = 0xBF400, + }, + .daala_top = { + // DAALA TOP STORE AREA - 224 Bytes (use 256 Bytes for LPDDR4) per 128. Total 4096/128*256 = 0x2000 + .buf_size = 0xf00, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0x0, //0x2800, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .cdf_buf = { + // for context store/load 1024x256 x16 = 512K bytes 16*0x8000 + .buf_size = 0x80000, + }, + .gmc_buf = { + // for gmc_parameter store/load 128 x 16 = 2K bytes 0x800 + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0x0, //0x8000, + }, + .dblk_para = { + // DBLK -> Max 256(4096/16) LCU, each para 1024bytes(total:0x40000), data 1024bytes(total:0x40000) + .buf_size = 0xd00, /*0xc40*/ + }, + .dblk_data = { + .buf_size = 0x49000, + }, + .cdef_data = { + .buf_size = 0x22400, + }, + .ups_data = { + .buf_size = 0x36000, + }, + .fgs_table = { + .buf_size = FGS_TABLE_SIZE * FRAME_BUFFERS, // 512x128bits + }, +#ifdef AOM_AV1_MMU + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_1080P, //2*16*(more than 2304)/4, 4K + }, + .cm_header = { + #ifdef USE_SPEC_BUF_FOR_MMU_HEAD + .buf_size = MMU_COMPRESS_HEADER_SIZE_1080P * FRAME_BUFFERS, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + #else + .buf_size = 0, + #endif + }, +#endif +#ifdef AOM_AV1_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_1080P, //2*16*(more than 2304)/4, 4K + }, + .cm_header_dw = { + #ifdef USE_SPEC_BUF_FOR_MMU_HEAD + .buf_size = MMU_COMPRESS_HEADER_SIZE_1080P*FRAME_BUFFERS, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + #else + .buf_size = 0, + #endif + }, +#endif + .mpred_above = { + .buf_size = 0x2800, /*round from 0x2760*/ /* 2 * size of hw*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + .buf_size = MAX_ONE_MV_BUFFER_SIZE_1080P_TM2REVB * FRAME_BUFFERS,/*round from 203A0*/ //1080p, 0x40000 per buffer + }, +#endif + .rpm = { + .buf_size = 0x80*2, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { +#ifdef VPU_FILMGRAIN_DUMP + .max_width = 640, + .max_height = 480, +#else + .max_width = 4096, + .max_height = 2304, +#endif + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x0, //0x30000, + }, + .sao_vb = { + .buf_size = 0x0, //0x30000, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .seg_map = { + // SEGMENT MAP AREA - 4096x2304/4/4 * 3 bits = 0x36000 Bytes * 16 = 0x360000 + .buf_size = 0x360000, + }, + .daala_top = { + // DAALA TOP STORE AREA - 224 Bytes (use 256 Bytes for LPDDR4) per 128. Total 4096/128*256 = 0x2000 + .buf_size = 0x2000, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0x0, //0x2800, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .cdf_buf = { + // for context store/load 1024x256 x16 = 512K bytes 16*0x8000 + .buf_size = 0x80000, + }, + .gmc_buf = { + // for gmc_parameter store/load 128 x 16 = 2K bytes 0x800 + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0x0, //0x8000, + }, + .dblk_para = { + // DBLK -> Max 256(4096/16) LCU, each para 1024bytes(total:0x40000), data 1024bytes(total:0x40000) + .buf_size = 0x1a00, /*0x1980*/ + }, + .dblk_data = { + .buf_size = 0x52800, + }, + .cdef_data = { + .buf_size = 0x24a00, + }, + .ups_data = { + .buf_size = 0x6f000, + }, + .fgs_table = { + .buf_size = FGS_TABLE_SIZE * FRAME_BUFFERS, // 512x128bits + }, +#ifdef AOM_AV1_MMU + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_4K, //2*16*(more than 2304)/4, 4K + }, + .cm_header = { + #ifdef USE_SPEC_BUF_FOR_MMU_HEAD + .buf_size = MMU_COMPRESS_HEADER_SIZE_4K*FRAME_BUFFERS, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + #else + .buf_size = 0, + #endif + }, +#endif +#ifdef AOM_AV1_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_4K, //2*16*(more than 2304)/4, 4K + }, + .cm_header_dw = { + #ifdef USE_SPEC_BUF_FOR_MMU_HEAD + .buf_size = MMU_COMPRESS_HEADER_SIZE_4K*FRAME_BUFFERS, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + #else + .buf_size = 0, + #endif + }, +#endif + .mpred_above = { + .buf_size = 0x5400, /* 2 * size of hw*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = MAX_ONE_MV_BUFFER_SIZE_4K_TM2REVB * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = 0x80*2, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + + }, + { + .max_width = 8192, + .max_height = 4608, + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x0, //0x30000, + }, + .sao_vb = { + .buf_size = 0x0, //0x30000, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .seg_map = { + // SEGMENT MAP AREA - 4096x2304/4/4 * 3 bits = 0x36000 Bytes * 16 = 0x360000 + .buf_size = 0xd80000, + }, + .daala_top = { + // DAALA TOP STORE AREA - 224 Bytes (use 256 Bytes for LPDDR4) per 128. Total 4096/128*256 = 0x2000 + .buf_size = 0x2000, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0x0, //0x2800, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .cdf_buf = { + // for context store/load 1024x256 x16 = 512K bytes 16*0x8000 + .buf_size = 0x80000, + }, + .gmc_buf = { + // for gmc_parameter store/load 128 x 16 = 2K bytes 0x800 + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0x0, //0x8000, + }, + .dblk_para = { + // DBLK -> Max 256(4096/16) LCU, each para 1024bytes(total:0x40000), data 1024bytes(total:0x40000) + .buf_size = 0x3300, /*0x32a0*/ + }, + .dblk_data = { + .buf_size = 0xa4800, + }, + .cdef_data = { + .buf_size = 0x29200, + }, + .ups_data = { + .buf_size = 0xdb000, + }, + .fgs_table = { + .buf_size = FGS_TABLE_SIZE * FRAME_BUFFERS, // 512x128bits + }, +#ifdef AOM_AV1_MMU + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_8K, //2*16*(more than 2304)/4, 4K + }, + .cm_header = { + #ifdef USE_SPEC_BUF_FOR_MMU_HEAD + .buf_size = MMU_COMPRESS_HEADER_SIZE_8K*FRAME_BUFFERS, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) +#else + .buf_size = 0, +#endif + }, +#endif +#ifdef AOM_AV1_MMU_DW + .mmu_vbh_dw = { + .buf_size = DW_VBH_BUF_SIZE_8K, //2*16*(more than 2304)/4, 4K + }, + .cm_header_dw = { + #ifdef USE_SPEC_BUF_FOR_MMU_HEAD + .buf_size = MMU_COMPRESS_HEADER_SIZE_8K*FRAME_BUFFERS, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + #else + .buf_size = 0, + #endif + }, +#endif + .mpred_above = { + .buf_size = 0xA800, /* 2 * size of hw*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = MAX_ONE_MV_BUFFER_SIZE_8K_TM2REVB * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = 0x80*2, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + + } +}; + + +/* +* AUX DATA Process +*/ +static u32 init_aux_size; +static int aux_data_is_avaible(struct AV1HW_s *hw) +{ + u32 reg_val; + + reg_val = READ_VREG(HEVC_AUX_DATA_SIZE); + if (reg_val != 0 && reg_val != init_aux_size) + return 1; + else + return 0; +} + +static void config_aux_buf(struct AV1HW_s *hw) +{ + WRITE_VREG(HEVC_AUX_ADR, hw->aux_phy_addr); + init_aux_size = ((hw->prefix_aux_size >> 4) << 16) | + (hw->suffix_aux_size >> 4); + WRITE_VREG(HEVC_AUX_DATA_SIZE, init_aux_size); +} + +/* +* dv_meta_flag: 1, dolby meta (T35) only; 2, not include dolby meta (T35) +*/ +static void set_aux_data(struct AV1HW_s *hw, + char **aux_data_buf, int *aux_data_size, + unsigned char suffix_flag, + unsigned char dv_meta_flag) +{ + int i; + unsigned short *aux_adr; + unsigned int size_reg_val = + READ_VREG(HEVC_AUX_DATA_SIZE); + unsigned int aux_count = 0; + int aux_size = 0; + if (0 == aux_data_is_avaible(hw)) + return; + + if (hw->aux_data_dirty || + hw->m_ins_flag == 0) { + + hw->aux_data_dirty = 0; + } + + if (suffix_flag) { + aux_adr = (unsigned short *) + (hw->aux_addr + + hw->prefix_aux_size); + aux_count = + ((size_reg_val & 0xffff) << 4) + >> 1; + aux_size = + hw->suffix_aux_size; + } else { + aux_adr = + (unsigned short *)hw->aux_addr; + aux_count = + ((size_reg_val >> 16) << 4) + >> 1; + aux_size = + hw->prefix_aux_size; + } + if (debug & AV1_DEBUG_BUFMGR_MORE) { + av1_print(hw, 0, + "%s:old size %d count %d,suf %d dv_flag %d\r\n", + __func__, *aux_data_size, + aux_count, suffix_flag, dv_meta_flag); + } + if (aux_size > 0 && aux_count > 0) { + int heads_size = 0; + + for (i = 0; i < aux_count; i++) { + unsigned char tag = aux_adr[i] >> 8; + if (tag != 0 && tag != 0xff) { + if (dv_meta_flag == 0) + heads_size += 8; + else if (dv_meta_flag == 1 && tag == 0x14) + heads_size += 8; + else if (dv_meta_flag == 2 && tag != 0x14) + heads_size += 8; + } + } + + if (*aux_data_buf) { + unsigned char valid_tag = 0; + unsigned char *h = + *aux_data_buf + + *aux_data_size; + unsigned char *p = h + 8; + int len = 0; + int padding_len = 0; + + for (i = 0; i < aux_count; i += 4) { + int ii; + unsigned char tag = aux_adr[i + 3] >> 8; + if (tag != 0 && tag != 0xff) { + if (dv_meta_flag == 0) + valid_tag = 1; + else if (dv_meta_flag == 1 + && tag == 0x14) + valid_tag = 1; + else if (dv_meta_flag == 2 + && tag != 0x14) + valid_tag = 1; + else + valid_tag = 0; + if (valid_tag && len > 0) { + *aux_data_size += + (len + 8); + h[0] = (len >> 24) + & 0xff; + h[1] = (len >> 16) + & 0xff; + h[2] = (len >> 8) + & 0xff; + h[3] = (len >> 0) + & 0xff; + h[6] = + (padding_len >> 8) + & 0xff; + h[7] = (padding_len) + & 0xff; + h += (len + 8); + p += 8; + len = 0; + padding_len = 0; + } + if (valid_tag) { + h[4] = tag; + h[5] = 0; + h[6] = 0; + h[7] = 0; + } + } + if (valid_tag) { + for (ii = 0; ii < 4; ii++) { + unsigned short aa = + aux_adr[i + 3 + - ii]; + *p = aa & 0xff; + p++; + len++; + if ((aa >> 8) == 0xff) + padding_len++; + } + } + } + if (len > 0) { + *aux_data_size += (len + 8); + h[0] = (len >> 24) & 0xff; + h[1] = (len >> 16) & 0xff; + h[2] = (len >> 8) & 0xff; + h[3] = (len >> 0) & 0xff; + h[6] = (padding_len >> 8) & 0xff; + h[7] = (padding_len) & 0xff; + } + if (debug & AV1_DEBUG_BUFMGR_MORE) { + av1_print(hw, 0, + "aux: (size %d) suffix_flag %d\n", + *aux_data_size, suffix_flag); + for (i = 0; i < *aux_data_size; i++) { + av1_print_cont(hw, 0, + "%02x ", (*aux_data_buf)[i]); + if (((i + 1) & 0xf) == 0) + av1_print_cont(hw, 0, "\n"); + } + av1_print_cont(hw, 0, "\n"); + } + } + } + +} + +static void set_dv_data(struct AV1HW_s *hw) +{ + set_aux_data(hw, &hw->dv_data_buf, + &hw->dv_data_size, 0, 1); + +} + +static void set_pic_aux_data(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic, unsigned char suffix_flag, + unsigned char dv_meta_flag) +{ + if (pic == NULL) + return; + set_aux_data(hw, &pic->aux_data_buf, + &pic->aux_data_size, suffix_flag, dv_meta_flag); +} + +static void copy_dv_data(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic) +{ + if (pic->aux_data_buf) { + if (debug & AV1_DEBUG_BUFMGR_MORE) { + av1_print(hw, 0, + "%s: (size %d) pic index %d\n", + __func__, + hw->dv_data_size, pic->index); + } + memcpy(pic->aux_data_buf + pic->aux_data_size, hw->dv_data_buf, hw->dv_data_size); + pic->aux_data_size += hw->dv_data_size; + memset(hw->dv_data_buf, 0, hw->aux_data_size); + hw->dv_data_size = 0; + } +} + +static void release_aux_data(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic) +{ +#if 0 + if (pic->aux_data_buf) + vfree(pic->aux_data_buf); + pic->aux_data_buf = NULL; + pic->aux_data_size = 0; +#endif +} + +static void dump_aux_buf(struct AV1HW_s *hw) +{ + int i; + unsigned short *aux_adr = + (unsigned short *) + hw->aux_addr; + unsigned int aux_size = + (READ_VREG(HEVC_AUX_DATA_SIZE) + >> 16) << 4; + + if (hw->prefix_aux_size > 0) { + av1_print(hw, 0, + "prefix aux: (size %d)\n", + aux_size); + for (i = 0; i < + (aux_size >> 1); i++) { + av1_print_cont(hw, 0, + "%04x ", + *(aux_adr + i)); + if (((i + 1) & 0xf) + == 0) + av1_print_cont(hw, + 0, "\n"); + } + } + if (hw->suffix_aux_size > 0) { + aux_adr = (unsigned short *) + (hw->aux_addr + + hw->prefix_aux_size); + aux_size = + (READ_VREG(HEVC_AUX_DATA_SIZE) & 0xffff) + << 4; + av1_print(hw, 0, + "suffix aux: (size %d)\n", + aux_size); + for (i = 0; i < + (aux_size >> 1); i++) { + av1_print_cont(hw, 0, + "%04x ", *(aux_adr + i)); + if (((i + 1) & 0xf) == 0) + av1_print_cont(hw, 0, "\n"); + } + } +} + +/* +* +*/ + +/*Losless compression body buffer size 4K per 64x32 (jt)*/ +static int compute_losless_comp_body_size(int width, int height, + uint8_t is_bit_depth_10) +{ + int width_x64; + int height_x32; + int bsize; + + width_x64 = width + 63; + width_x64 >>= 6; + height_x32 = height + 31; + height_x32 >>= 5; + bsize = (is_bit_depth_10?4096:3200)*width_x64*height_x32; + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info("%s(%d,%d,%d)=>%d\n", + __func__, width, height, + is_bit_depth_10, bsize); + + return bsize; +} + +/* Losless compression header buffer size 32bytes per 128x64 (jt)*/ +static int compute_losless_comp_header_size(int width, int height) +{ + int width_x128; + int height_x64; + int hsize; + + width_x128 = width + 127; + width_x128 >>= 7; + height_x64 = height + 63; + height_x64 >>= 6; + + hsize = 32 * width_x128 * height_x64; + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info("%s(%d,%d)=>%d\n", + __func__, width, height, + hsize); + + return hsize; +} + +#ifdef AOM_AV1_MMU_DW +static int compute_losless_comp_body_size_dw(int width, int height, + uint8_t is_bit_depth_10) +{ + + return compute_losless_comp_body_size(width, height, is_bit_depth_10); +} + +/* Losless compression header buffer size 32bytes per 128x64 (jt)*/ +static int compute_losless_comp_header_size_dw(int width, int height) +{ + return compute_losless_comp_header_size(width, height); +} +#endif + +static void init_buff_spec(struct AV1HW_s *hw, + struct BuffInfo_s *buf_spec) +{ + void *mem_start_virt; + + buf_spec->ipp.buf_start = + WORKBUF_ALIGN(buf_spec->start_adr); + buf_spec->sao_abv.buf_start = + WORKBUF_ALIGN(buf_spec->ipp.buf_start + buf_spec->ipp.buf_size); + buf_spec->sao_vb.buf_start = + WORKBUF_ALIGN(buf_spec->sao_abv.buf_start + buf_spec->sao_abv.buf_size); + buf_spec->short_term_rps.buf_start = + WORKBUF_ALIGN(buf_spec->sao_vb.buf_start + buf_spec->sao_vb.buf_size); + buf_spec->vps.buf_start = + WORKBUF_ALIGN(buf_spec->short_term_rps.buf_start + buf_spec->short_term_rps.buf_size); + buf_spec->seg_map.buf_start = + WORKBUF_ALIGN(buf_spec->vps.buf_start + buf_spec->vps.buf_size); + buf_spec->daala_top.buf_start = + WORKBUF_ALIGN(buf_spec->seg_map.buf_start + buf_spec->seg_map.buf_size); + buf_spec->sao_up.buf_start = + WORKBUF_ALIGN(buf_spec->daala_top.buf_start + buf_spec->daala_top.buf_size); + buf_spec->swap_buf.buf_start = + WORKBUF_ALIGN(buf_spec->sao_up.buf_start + buf_spec->sao_up.buf_size); + buf_spec->cdf_buf.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf.buf_start + buf_spec->swap_buf.buf_size); + buf_spec->gmc_buf.buf_start = + WORKBUF_ALIGN(buf_spec->cdf_buf.buf_start + buf_spec->cdf_buf.buf_size); + buf_spec->scalelut.buf_start = + WORKBUF_ALIGN(buf_spec->gmc_buf.buf_start + buf_spec->gmc_buf.buf_size); + buf_spec->dblk_para.buf_start = + WORKBUF_ALIGN(buf_spec->scalelut.buf_start + buf_spec->scalelut.buf_size); + buf_spec->dblk_data.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_para.buf_start + buf_spec->dblk_para.buf_size); + buf_spec->cdef_data.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data.buf_start + buf_spec->dblk_data.buf_size); + buf_spec->ups_data.buf_start = + WORKBUF_ALIGN(buf_spec->cdef_data.buf_start + buf_spec->cdef_data.buf_size); + buf_spec->fgs_table.buf_start = + WORKBUF_ALIGN(buf_spec->ups_data.buf_start + buf_spec->ups_data.buf_size); +#ifdef AOM_AV1_MMU + buf_spec->mmu_vbh.buf_start = + WORKBUF_ALIGN(buf_spec->fgs_table.buf_start + buf_spec->fgs_table.buf_size); + buf_spec->cm_header.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh.buf_start + buf_spec->mmu_vbh.buf_size); +#ifdef AOM_AV1_MMU_DW + buf_spec->mmu_vbh_dw.buf_start = + WORKBUF_ALIGN(buf_spec->cm_header.buf_start + buf_spec->cm_header.buf_size); + buf_spec->cm_header_dw.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh_dw.buf_start + buf_spec->mmu_vbh_dw.buf_size); + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->cm_header_dw.buf_start + buf_spec->cm_header_dw.buf_size); +#else + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->cm_header.buf_start + buf_spec->cm_header.buf_size); +#endif +#else + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->fgs_table.buf_start + buf_spec->fgs_table.buf_size); +#endif +#ifdef MV_USE_FIXED_BUF + buf_spec->mpred_mv.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_mv.buf_start + buf_spec->mpred_mv.buf_size); +#else + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); +#endif + buf_spec->lmem.buf_start = + WORKBUF_ALIGN(buf_spec->rpm.buf_start + buf_spec->rpm.buf_size); + buf_spec->end_adr = + WORKBUF_ALIGN(buf_spec->lmem.buf_start + buf_spec->lmem.buf_size); + + if (!hw) + return; + + if (!vdec_secure(hw_to_vdec(hw))) { + mem_start_virt = + codec_mm_phys_to_virt(buf_spec->dblk_para.buf_start); + if (mem_start_virt) { + memset(mem_start_virt, 0, + buf_spec->dblk_para.buf_size); + codec_mm_dma_flush(mem_start_virt, + buf_spec->dblk_para.buf_size, + DMA_TO_DEVICE); + } else { + mem_start_virt = codec_mm_vmap( + buf_spec->dblk_para.buf_start, + buf_spec->dblk_para.buf_size); + if (mem_start_virt) { + memset(mem_start_virt, 0, + buf_spec->dblk_para.buf_size); + codec_mm_dma_flush(mem_start_virt, + buf_spec->dblk_para.buf_size, + DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(mem_start_virt); + } else { + /*not virt for tvp playing, + may need clear on ucode.*/ + pr_err("mem_start_virt failed\n"); + } + } + } + + if (debug) { + pr_info("%s workspace (%x %x) size = %x\n", __func__, + buf_spec->start_adr, buf_spec->end_adr, + buf_spec->end_adr - buf_spec->start_adr); + } + + if (debug) { + pr_info("ipp.buf_start :%x\n", + buf_spec->ipp.buf_start); + pr_info("sao_abv.buf_start :%x\n", + buf_spec->sao_abv.buf_start); + pr_info("sao_vb.buf_start :%x\n", + buf_spec->sao_vb.buf_start); + pr_info("short_term_rps.buf_start :%x\n", + buf_spec->short_term_rps.buf_start); + pr_info("vps.buf_start :%x\n", + buf_spec->vps.buf_start); + pr_info("seg_map.buf_start :%x\n", + buf_spec->seg_map.buf_start); + pr_info("daala_top.buf_start :%x\n", + buf_spec->daala_top.buf_start); + pr_info("swap_buf.buf_start :%x\n", + buf_spec->swap_buf.buf_start); + pr_info("cdf_buf.buf_start :%x\n", + buf_spec->cdf_buf.buf_start); + pr_info("gmc_buf.buf_start :%x\n", + buf_spec->gmc_buf.buf_start); + pr_info("scalelut.buf_start :%x\n", + buf_spec->scalelut.buf_start); + pr_info("dblk_para.buf_start :%x\n", + buf_spec->dblk_para.buf_start); + pr_info("dblk_data.buf_start :%x\n", + buf_spec->dblk_data.buf_start); + pr_info("cdef_data.buf_start :%x\n", + buf_spec->cdef_data.buf_start); + pr_info("ups_data.buf_start :%x\n", + buf_spec->ups_data.buf_start); + +#ifdef AOM_AV1_MMU + pr_info("mmu_vbh.buf_start :%x\n", + buf_spec->mmu_vbh.buf_start); +#endif + pr_info("mpred_above.buf_start :%x\n", + buf_spec->mpred_above.buf_start); +#ifdef MV_USE_FIXED_BUF + pr_info("mpred_mv.buf_start :%x\n", + buf_spec->mpred_mv.buf_start); +#endif + if ((debug & AOM_AV1_DEBUG_SEND_PARAM_WITH_REG) == 0) { + pr_info("rpm.buf_start :%x\n", + buf_spec->rpm.buf_start); + } + } +} + +static void uninit_mmu_buffers(struct AV1HW_s *hw) +{ +#ifndef MV_USE_FIXED_BUF + dealloc_mv_bufs(hw); +#endif + if (hw->mmu_box) + decoder_mmu_box_free(hw->mmu_box); + hw->mmu_box = NULL; + +#ifdef AOM_AV1_MMU_DW + if (hw->mmu_box_dw) + decoder_mmu_box_free(hw->mmu_box_dw); + hw->mmu_box_dw = NULL; +#endif + if (hw->bmmu_box) + decoder_bmmu_box_free(hw->bmmu_box); + hw->bmmu_box = NULL; +} + +static int calc_luc_quantity(int lcu_size, u32 w, u32 h) +{ + int pic_width_64 = (w + 63) & (~0x3f); + int pic_height_32 = (h + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 : pic_height_32 / lcu_size; + + return pic_width_lcu * pic_height_lcu; +} + +/* return in MB */ +static int av1_max_mmu_buf_size(int max_w, int max_h) +{ + int buf_size = 48; + + if ((max_w * max_h > 1280*736) && + (max_w * max_h <= 1920*1088)) { + buf_size = 12; + } else if ((max_w * max_h > 0) && + (max_w * max_h <= 1280*736)) { + buf_size = 4; + } + + return buf_size; +} + +static int av1_get_header_size(int w, int h) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + IS_8K_SIZE(w, h)) + return MMU_COMPRESS_HEADER_SIZE_8K; + if (IS_4K_SIZE(w, h)) + return MMU_COMPRESS_HEADER_SIZE_4K; + + return MMU_COMPRESS_HEADER_SIZE_1080P; +} + +static void av1_put_video_frame(void *vdec_ctx, struct vframe_s *vf) +{ + vav1_vf_put(vf, vdec_ctx); +} + +static void av1_get_video_frame(void *vdec_ctx, struct vframe_s **vf) +{ + *vf = vav1_vf_get(vdec_ctx); +} + +static struct task_ops_s task_dec_ops = { + .type = TASK_TYPE_DEC, + .get_vframe = av1_get_video_frame, + .put_vframe = av1_put_video_frame, +}; + +static int v4l_alloc_and_config_pic(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic) +{ + int ret = -1; + int i = pic->index; + int dw_mode = get_double_write_mode_init(hw); + int lcu_total = calc_luc_quantity(hw->current_lcu_size, + hw->frame_width, hw->frame_height); +#ifdef MV_USE_FIXED_BUF + u32 mpred_mv_end = hw->work_space_buf->mpred_mv.buf_start + + hw->work_space_buf->mpred_mv.buf_size; +//#ifdef USE_DYNAMIC_MV_BUFFER +// int32_t MV_MEM_UNIT = (lcu_size == 128) ? (19*4*16) : (19*16); +// int32_t mv_buffer_size = (lcu_total*MV_MEM_UNIT); +//#else + int32_t mv_buffer_size = hw->max_one_mv_buffer_size; +//#endif +#endif + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + + if (i < 0) + return ret; + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + av1_print(hw, 0, "[%d] AV1 get buffer fail.\n", ctx->id); + return ret; + } + + fb->task->attach(fb->task, &task_dec_ops, hw); + fb->status = FB_ST_DECODER; + + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + struct internal_comp_buf *ibuf = v4lfb_to_icomp_buf(hw, fb); + + hw->m_BUF[i].header_addr = ibuf->header_addr; + if (debug & AV1_DEBUG_BUFMGR_MORE) { + pr_info("MMU header_adr %d: %ld\n", + i, hw->m_BUF[i].header_addr); + } + } + + if (get_double_write_mode(hw) & 0x20) { + struct internal_comp_buf *ibuf = v4lfb_to_icomp_buf(hw, fb); + + hw->m_BUF[i].header_dw_addr = ibuf->header_dw_addr; + if (debug & AV1_DEBUG_BUFMGR_MORE) { + pr_info("MMU header_adr %d: %ld\n", + i, hw->m_BUF[i].header_addr); + } + } + pic->repeat_pic = NULL; + pic->repeat_count = 0; +#ifdef MV_USE_FIXED_BUF + if ((hw->work_space_buf->mpred_mv.buf_start + + ((i + 1) * mv_buffer_size)) + <= mpred_mv_end) { +#endif + hw->m_BUF[i].v4l_ref_buf_addr = (ulong)fb; + pic->cma_alloc_addr = fb->m.mem[0].addr; + if (fb->num_planes == 1) { + hw->m_BUF[i].start_adr = fb->m.mem[0].addr; + hw->m_BUF[i].luma_size = fb->m.mem[0].offset; + hw->m_BUF[i].size = fb->m.mem[0].size; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + pic->dw_y_adr = hw->m_BUF[i].start_adr; + pic->dw_u_v_adr = pic->dw_y_adr + hw->m_BUF[i].luma_size; + } else if (fb->num_planes == 2) { + hw->m_BUF[i].start_adr = fb->m.mem[0].addr; + hw->m_BUF[i].size = fb->m.mem[0].size; + hw->m_BUF[i].chroma_addr = fb->m.mem[1].addr; + hw->m_BUF[i].chroma_size = fb->m.mem[1].size; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + fb->m.mem[1].bytes_used = fb->m.mem[1].size; + pic->dw_y_adr = hw->m_BUF[i].start_adr; + pic->dw_u_v_adr = hw->m_BUF[i].chroma_addr; + } + + /* config frame buffer */ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + pic->header_adr = hw->m_BUF[i].header_addr; + } + + if (get_double_write_mode(hw) & 0x20) { + pic->header_dw_adr = hw->m_BUF[i].header_dw_addr; + } + + pic->BUF_index = i; + pic->lcu_total = lcu_total; + pic->mc_canvas_y = pic->index; + pic->mc_canvas_u_v = pic->index; + + if (dw_mode & 0x10) { + pic->mc_canvas_y = (pic->index << 1); + pic->mc_canvas_u_v = (pic->index << 1) + 1; + } + +#ifdef MV_USE_FIXED_BUF + pic->mpred_mv_wr_start_addr = + hw->work_space_buf->mpred_mv.buf_start + + (pic->index * mv_buffer_size); +#endif + +#ifdef DUMP_FILMGRAIN + if (pic->index == fg_dump_index) { + pic->fgs_table_adr = hw->fg_phy_addr; + pr_info("set buffer %d film grain table 0x%x\n", + pic->index, pic->fgs_table_adr); + } else { +#endif + if (hw->assit_task.use_sfgs) { + pic->sfgs_table_phy = hw->fg_phy_addr + (pic->index * FGS_TABLE_SIZE); + pic->sfgs_table_ptr = hw->fg_ptr + (pic->index * FGS_TABLE_SIZE); + } + pic->fgs_table_adr = hw->work_space_buf->fgs_table.buf_start + + (pic->index * FGS_TABLE_SIZE); + } + + if (debug) { + + pr_info("%s index %d BUF_index %d ", + __func__, pic->index, + pic->BUF_index); + pr_info("comp_body_size %x comp_buf_size %x ", + pic->comp_body_size, + pic->buf_size); + pr_info("mpred_mv_wr_start_adr %d\n", + pic->mpred_mv_wr_start_addr); + pr_info("dw_y_adr %d, pic_config->dw_u_v_adr =%d\n", + pic->dw_y_adr, + pic->dw_u_v_adr); + } +#ifdef MV_USE_FIXED_BUF + } +#endif + return ret; +} + +static int config_pic(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + int ret = -1; + int i; + int pic_width = hw->init_pic_w; + int pic_height = hw->init_pic_h; + //int lcu_size = ((params->p.seq_flags >> 6) & 0x1) ? 128 : 64; + int lcu_size = hw->current_lcu_size; + + int pic_width_64 = (pic_width + 63) & (~0x3f); + int pic_height_32 = (pic_height + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 + : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 + : pic_height_32 / lcu_size; + int lcu_total = pic_width_lcu * pic_height_lcu; +#ifdef MV_USE_FIXED_BUF + u32 mpred_mv_end = hw->work_space_buf->mpred_mv.buf_start + + hw->work_space_buf->mpred_mv.buf_size; +//#ifdef USE_DYNAMIC_MV_BUFFER +// int32_t MV_MEM_UNIT = (lcu_size == 128) ? (19*4*16) : (19*16); +// int32_t mv_buffer_size = (lcu_total*MV_MEM_UNIT); +//#else + int32_t mv_buffer_size = hw->max_one_mv_buffer_size; +//#endif + +#endif + + u32 y_adr = 0; + int buf_size = 0; + + int losless_comp_header_size = + compute_losless_comp_header_size(pic_width, + pic_height); + int losless_comp_body_size = compute_losless_comp_body_size(pic_width, + pic_height, buf_alloc_depth == 10); + int mc_buffer_size = losless_comp_header_size + losless_comp_body_size; + int mc_buffer_size_h = (mc_buffer_size + 0xffff) >> 16; + int mc_buffer_size_u_v = 0; + int mc_buffer_size_u_v_h = 0; + int dw_mode = get_double_write_mode_init(hw); + + hw->lcu_total = lcu_total; + + if (dw_mode && (dw_mode & 0x20) == 0) { + int pic_width_dw = pic_width / + get_double_write_ratio(dw_mode & 0xf); + int pic_height_dw = pic_height / + get_double_write_ratio(dw_mode & 0xf); + + int pic_width_64_dw = (pic_width_dw + 63) & (~0x3f); + int pic_height_32_dw = (pic_height_dw + 31) & (~0x1f); + int pic_width_lcu_dw = (pic_width_64_dw % lcu_size) ? + pic_width_64_dw / lcu_size + 1 + : pic_width_64_dw / lcu_size; + int pic_height_lcu_dw = (pic_height_32_dw % lcu_size) ? + pic_height_32_dw / lcu_size + 1 + : pic_height_32_dw / lcu_size; + int lcu_total_dw = pic_width_lcu_dw * pic_height_lcu_dw; + mc_buffer_size_u_v = lcu_total_dw * lcu_size * lcu_size / 2; + mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16; + /*64k alignment*/ + buf_size = ((mc_buffer_size_u_v_h << 16) * 3); + buf_size = ((buf_size + 0xffff) >> 16) << 16; + } + + if (mc_buffer_size & 0xffff) /*64k alignment*/ + mc_buffer_size_h += 1; + if ((!hw->mmu_enable) && ((dw_mode & 0x10) == 0)) + buf_size += (mc_buffer_size_h << 16); + +#ifdef USE_SPEC_BUF_FOR_MMU_HEAD + if (hw->mmu_enable) { + pic_config->header_adr = + hw->work_space_buf->cm_header.buf_start + + (pic_config->index * vav1_mmu_compress_header_size(hw)); + +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + pic_config->header_dw_adr = + hw->work_space_buf->cm_header_dw.buf_start + + (pic_config->index * vav1_mmu_compress_header_size(hw)); + + } +#endif + } + +#else +/*!USE_SPEC_BUF_FOR_MMU_HEAD*/ + if (hw->mmu_enable) { + pic_config->header_adr = decoder_bmmu_box_get_phy_addr( + hw->bmmu_box, HEADER_BUFFER_IDX(pic_config->index)); + +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + pic_config->header_dw_adr = decoder_bmmu_box_get_phy_addr( + hw->bmmu_box, DW_HEADER_BUFFER_IDX(pic_config->index)); + + } + if (debug & AV1_DEBUG_BUFMGR_MORE) { + pr_info("MMU dw header_adr (%d, %d) %d: %d\n", + hw->dw_mmu_enable, + DW_HEADER_BUFFER_IDX(pic_config->index), + pic_config->index, + pic_config->header_dw_adr); + } +#endif + + if (debug & AV1_DEBUG_BUFMGR_MORE) { + pr_info("MMU header_adr %d: %d\n", + pic_config->index, pic_config->header_adr); + } + } +#endif + + i = pic_config->index; +#ifdef MV_USE_FIXED_BUF + if ((hw->work_space_buf->mpred_mv.buf_start + + ((i + 1) * mv_buffer_size)) + <= mpred_mv_end + ) { +#endif + if (buf_size > 0) { + ret = decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, + VF_BUFFER_IDX(i), + buf_size, DRIVER_NAME, + &pic_config->cma_alloc_addr); + if (ret < 0) { + pr_info( + "decoder_bmmu_box_alloc_buf_phy idx %d size %d fail\n", + VF_BUFFER_IDX(i), + buf_size + ); + return ret; + } + + if (pic_config->cma_alloc_addr) + y_adr = pic_config->cma_alloc_addr; + else { + pr_info( + "decoder_bmmu_box_alloc_buf_phy idx %d size %d return null\n", + VF_BUFFER_IDX(i), + buf_size + ); + return -1; + } + } + { + /*ensure get_pic_by_POC() + not get the buffer not decoded*/ + pic_config->BUF_index = i; + pic_config->lcu_total = lcu_total; + + pic_config->comp_body_size = losless_comp_body_size; + pic_config->buf_size = buf_size; + + pic_config->mc_canvas_y = pic_config->index; + pic_config->mc_canvas_u_v = pic_config->index; + if (dw_mode & 0x10) { + pic_config->dw_y_adr = y_adr; + pic_config->dw_u_v_adr = y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); + + pic_config->mc_canvas_y = + (pic_config->index << 1); + pic_config->mc_canvas_u_v = + (pic_config->index << 1) + 1; + } else if (dw_mode && (dw_mode & 0x20) == 0) { + pic_config->dw_y_adr = y_adr; + pic_config->dw_u_v_adr = pic_config->dw_y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); + } +#ifdef MV_USE_FIXED_BUF + pic_config->mpred_mv_wr_start_addr = + hw->work_space_buf->mpred_mv.buf_start + + (pic_config->index * mv_buffer_size); +#endif +#ifdef DUMP_FILMGRAIN + if (pic_config->index == fg_dump_index) { + pic_config->fgs_table_adr = hw->fg_phy_addr; + pr_info("set buffer %d film grain table 0x%x\n", + pic_config->index, pic_config->fgs_table_adr); + } else { +#endif + if (hw->assit_task.use_sfgs) { + pic_config->sfgs_table_phy = hw->fg_phy_addr + (pic_config->index * FGS_TABLE_SIZE); + pic_config->sfgs_table_ptr = hw->fg_ptr + (pic_config->index * FGS_TABLE_SIZE); + } + pic_config->fgs_table_adr = + hw->work_space_buf->fgs_table.buf_start + + (pic_config->index * FGS_TABLE_SIZE); + } + + if (debug) { + pr_info + ("%s index %d BUF_index %d ", + __func__, pic_config->index, + pic_config->BUF_index); + pr_info + ("comp_body_size %x comp_buf_size %x ", + pic_config->comp_body_size, + pic_config->buf_size); + pr_info + ("mpred_mv_wr_start_adr %d\n", + pic_config->mpred_mv_wr_start_addr); + pr_info("dw_y_adr %d, pic_config->dw_u_v_adr =%d\n", + pic_config->dw_y_adr, + pic_config->dw_u_v_adr); + } + ret = 0; + } +#ifdef MV_USE_FIXED_BUF + } +#endif + return ret; +} + +#ifndef USE_SPEC_BUF_FOR_MMU_HEAD +static int vav1_mmu_compress_header_size(struct AV1HW_s *hw) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h)) + return (MMU_COMPRESS_HEADER_SIZE_8K); + + if (IS_4K_SIZE(hw->max_pic_w, hw->max_pic_h)) + return MMU_COMPRESS_HEADER_SIZE_4K; + + return (MMU_COMPRESS_HEADER_SIZE_1080P); +} +#endif +/*#define FRAME_MMU_MAP_SIZE (MAX_FRAME_4K_NUM * 4)*/ +static int vav1_frame_mmu_map_size(struct AV1HW_s *hw) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h)) + return (MAX_FRAME_8K_NUM * 4); + + return (MAX_FRAME_4K_NUM * 4); +} + +#ifdef AOM_AV1_MMU_DW +static int vaom_dw_frame_mmu_map_size(struct AV1HW_s *hw) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h)) + return (MAX_FRAME_8K_NUM * 4); + + return (MAX_FRAME_4K_NUM * 4); +} +#endif + +static void init_pic_list(struct AV1HW_s *hw) +{ + int i; + struct AV1_Common_s *cm = &hw->common; + struct PIC_BUFFER_CONFIG_s *pic_config; + struct vdec_s *vdec = hw_to_vdec(hw); + +#ifndef USE_SPEC_BUF_FOR_MMU_HEAD + u32 header_size; + + if (!hw->is_used_v4l && (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + header_size = vav1_mmu_compress_header_size(hw); + /*alloc AV1 compress header first*/ + for (i = 0; i < hw->used_buf_num; i++) { + unsigned long buf_addr; + if (decoder_bmmu_box_alloc_buf_phy + (hw->bmmu_box, + HEADER_BUFFER_IDX(i), header_size, + DRIVER_HEADER_NAME, + &buf_addr) < 0) { + av1_print(hw, 0, "%s malloc compress header failed %d\n", + DRIVER_HEADER_NAME, i); + hw->fatal_error |= DECODER_FATAL_ERROR_NO_MEM; + return; + } +#ifdef AOM_AV1_MMU_DW + if (hw->dw_mmu_enable) { + if (decoder_bmmu_box_alloc_buf_phy + (hw->bmmu_box, + DW_HEADER_BUFFER_IDX(i), header_size, + DRIVER_HEADER_NAME, + &buf_addr) < 0) { + av1_print(hw, 0, "%s malloc compress dw header failed %d\n", + DRIVER_HEADER_NAME, i); + hw->fatal_error |= DECODER_FATAL_ERROR_NO_MEM; + return; + } + } +#endif + } + } +#endif + for (i = 0; i < hw->used_buf_num; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + pic_config->index = i; + pic_config->BUF_index = -1; + pic_config->mv_buf_index = -1; + if (vdec->parallel_dec == 1) { + pic_config->y_canvas_index = -1; + pic_config->uv_canvas_index = -1; + } + pic_config->y_crop_width = hw->init_pic_w; + pic_config->y_crop_height = hw->init_pic_h; + pic_config->double_write_mode = get_double_write_mode(hw); + hw->buffer_wrap[i] = i; + + if (!hw->is_used_v4l) { + if (config_pic(hw, pic_config) < 0) { + if (debug) + av1_print(hw, 0, "Config_pic %d fail\n", + pic_config->index); + pic_config->index = -1; + break; + } + + if (pic_config->double_write_mode && + (pic_config->double_write_mode & 0x20) == 0) { + set_canvas(hw, pic_config); + } + } + } + for (; i < hw->used_buf_num; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + pic_config->index = -1; + pic_config->BUF_index = -1; + pic_config->mv_buf_index = -1; + hw->buffer_wrap[i] = i; + if (vdec->parallel_dec == 1) { + pic_config->y_canvas_index = -1; + pic_config->uv_canvas_index = -1; + } + } + av1_print(hw, AV1_DEBUG_BUFMGR, "%s ok, used_buf_num = %d\n", + __func__, hw->used_buf_num); + +} + +static void init_pic_list_hw(struct AV1HW_s *hw) +{ + int i; + struct AV1_Common_s *cm = &hw->common; + struct PIC_BUFFER_CONFIG_s *pic_config; + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x0);*/ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (0x1 << 2)); + + + for (i = 0; i < hw->used_buf_num; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + if (pic_config->index < 0) + break; + + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->header_adr >> 5); + } else { + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + * pic_config->mc_y_adr + * | (pic_config->mc_canvas_y << 8) | 0x1); + */ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->dw_y_adr >> 5); + } +#ifndef LOSLESS_COMPRESS_MODE + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + * pic_config->mc_u_v_adr + * | (pic_config->mc_canvas_u_v << 8)| 0x1); + */ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->dw_u_v_adr >> 5); +#else + if (pic_config->double_write_mode & 0x10) { + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->dw_u_v_adr >> 5); + } +#endif + } + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1); + +#ifdef CHANGE_REMOVED + /*Zero out canvas registers in IPP -- avoid simulation X*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 1); +#else + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (1 << 8) | (0 << 1) | 1); +#endif + for (i = 0; i < 32; i++) + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); +} + + +static void dump_pic_list(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + struct PIC_BUFFER_CONFIG_s *pic_config; + int i; + for (i = 0; i < FRAME_BUFFERS; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + av1_print(hw, 0, + "Buf(%d) index %d mv_buf_index %d ref_count %d vf_ref %d repeat_count %d dec_idx %d slice_type %d w/h %d/%d adr%ld\n", + i, + pic_config->index, +#ifndef MV_USE_FIXED_BUF + pic_config->mv_buf_index, +#else + -1, +#endif + cm->buffer_pool-> + frame_bufs[i].ref_count, + pic_config->vf_ref, + pic_config->repeat_count, + pic_config->decode_idx, + pic_config->slice_type, + pic_config->y_crop_width, + pic_config->y_crop_height, + pic_config->cma_alloc_addr + ); + } + return; +} + +void av1_release_buf(AV1Decoder *pbi, RefCntBuffer *const buf) +{ + +#if 0 + //def CHANGE_DONE + struct AV1HW_s *hw = (struct AV1HW_s *)(pbi->private_data); + if (!hw->mmu_enable) + return; + //release_buffer_4k(&av1_mmumgr_m, buf->buf.index); + decoder_mmu_box_free_idx(hw->mmu_box, buf->buf.index); +#ifdef AOM_AV1_MMU_DW + //release_buffer_4k(&av1_mmumgr_dw, buf->buf.index); + decoder_mmu_box_free_idx(hw->mmu_box_dw, buf->buf.index); +#endif + +#endif +} + +void av1_release_bufs(struct AV1HW_s *hw) +{ + AV1_COMMON *cm = &hw->common; + RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + + for (i = 0; i < FRAME_BUFFERS; ++i) { + if (frame_bufs[i].buf.vf_ref == 0 && + frame_bufs[i].ref_count == 0 && + frame_bufs[i].buf.index >= 0) { + if (frame_bufs[i].buf.aux_data_buf) + release_aux_data(hw, &frame_bufs[i].buf); + } + } +} + +#ifdef DEBUG_CMD +static void d_fill_zero(struct AV1HW_s *hw, unsigned int phyadr, int size) +{ + WRITE_VREG(HEVC_DBG_LOG_ADR, phyadr); + WRITE_VREG(DEBUG_REG1, + 0x20000000 | size); + debug_cmd_wait_count = 0; + debug_cmd_wait_type = 1; + while ((READ_VREG(DEBUG_REG1) & 0x1) == 0 + && debug_cmd_wait_count < 0x7fffffff) { + debug_cmd_wait_count++; + } + + WRITE_VREG(DEBUG_REG1, 0); + debug_cmd_wait_type = 0; +} + +static void d_dump(struct AV1HW_s *hw, unsigned int phyadr, int size, + struct file *fp, loff_t *wr_off) +{ + + int jj; + unsigned char *data = (unsigned char *) + (hw->ucode_log_addr); + WRITE_VREG(HEVC_DBG_LOG_ADR, hw->ucode_log_phy_addr); + + WRITE_VREG(HEVC_D_ADR, phyadr); + WRITE_VREG(DEBUG_REG1, + 0x10000000 | size); + + debug_cmd_wait_count = 0; + debug_cmd_wait_type = 3; + while ((READ_VREG(DEBUG_REG1) & 0x1) == 0 + && debug_cmd_wait_count < 0x7fffffff) { + debug_cmd_wait_count++; + } + + if (fp) { + vfs_write(fp, data, + size, wr_off); + + } else { + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + av1_print(hw, 0, + "%06x:", jj); + av1_print_cont(hw, 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + av1_print_cont(hw, 0, + "\n"); + } + av1_print(hw, 0, "\n"); + } + + WRITE_VREG(DEBUG_REG1, 0); + debug_cmd_wait_type = 0; + +} + +static void mv_buffer_fill_zero(struct AV1HW_s *hw, struct PIC_BUFFER_CONFIG_s *pic_config) +{ + pr_info("fill dummy data pic index %d colocate addreses %x size %x\n", + pic_config->index, pic_config->mpred_mv_wr_start_addr, + hw->m_mv_BUF[pic_config->mv_buf_index].size); + d_fill_zero(hw, pic_config->mpred_mv_wr_start_addr, + hw->m_mv_BUF[pic_config->mv_buf_index].size); +} + +static void dump_mv_buffer(struct AV1HW_s *hw, struct PIC_BUFFER_CONFIG_s *pic_config) +{ + + unsigned int adr, size; + unsigned int adr_end = pic_config->mpred_mv_wr_start_addr + + hw->m_mv_BUF[pic_config->mv_buf_index].size; + mm_segment_t old_fs; + loff_t off = 0; + int mode = O_CREAT | O_WRONLY | O_TRUNC; + char file[64]; + struct file *fp; + sprintf(&file[0], "/data/tmp/colocate%d", hw->frame_count-1); + fp = filp_open(file, mode, 0666); + old_fs = get_fs(); + set_fs(KERNEL_DS); + for (adr = pic_config->mpred_mv_wr_start_addr; + adr < adr_end; + adr += UCODE_LOG_BUF_SIZE) { + size = UCODE_LOG_BUF_SIZE; + if (size > (adr_end - adr)) + size = adr_end - adr; + pr_info("dump pic index %d colocate addreses %x size %x\n", + pic_config->index, adr, size); + d_dump(hw, adr, size, fp, &off); + } + set_fs(old_fs); + vfs_fsync(fp, 0); + + filp_close(fp, current->files); +} + +#endif + +static int config_pic_size(struct AV1HW_s *hw, unsigned short bit_depth) +{ + uint32_t data32; + struct AV1_Common_s *cm = &hw->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config = &cm->cur_frame->buf; + int losless_comp_header_size, losless_comp_body_size; +#ifdef AOM_AV1_MMU_DW + int losless_comp_header_size_dw, losless_comp_body_size_dw; +#endif + av1_print(hw, AOM_DEBUG_HW_MORE, + " #### config_pic_size ####, bit_depth = %d\n", bit_depth); + + frame_width = cur_pic_config->y_crop_width; + frame_height = cur_pic_config->y_crop_height; + cur_pic_config->bit_depth = bit_depth; + cur_pic_config->double_write_mode = get_double_write_mode(hw); + + /* use fixed maximum size // 128x128/4/4*3-bits = 384 Bytes + seg_map_size = + ((frame_width + 127) >> 7) * ((frame_height + 127) >> 7) * 384 ; + */ + WRITE_VREG(HEVC_PARSER_PICTURE_SIZE, + (frame_height << 16) | frame_width); +#ifdef DUAL_DECODE +#else + WRITE_VREG(HEVC_ASSIST_PIC_SIZE_FB_READ, + (frame_height << 16) | frame_width); +#endif +#ifdef AOM_AV1_MMU + + //alloc_mmu(&av1_mmumgr_m, cm->cur_frame->buf.index, frame_width, frame_height, bit_depth); +#endif +#ifdef AOM_AV1_MMU_DW + + //alloc_mmu(&av1_mmumgr_dw, cm->cur_frame->buf.index, frame_width, frame_height, bit_depth); + losless_comp_header_size_dw = + compute_losless_comp_header_size_dw(frame_width, frame_height); + losless_comp_body_size_dw = + compute_losless_comp_body_size_dw(frame_width, frame_height, + (bit_depth == AOM_BITS_10)); +#endif + + losless_comp_header_size = + compute_losless_comp_header_size + (frame_width, frame_height); + losless_comp_body_size = + compute_losless_comp_body_size(frame_width, + frame_height, (bit_depth == AOM_BITS_10)); + + cur_pic_config->comp_body_size = losless_comp_body_size; + + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s: width %d height %d depth %d head_size 0x%x body_size 0x%x\r\n", + __func__, frame_width, frame_height, bit_depth, + losless_comp_header_size, losless_comp_body_size); +#ifdef LOSLESS_COMPRESS_MODE + data32 = READ_VREG(HEVC_SAO_CTRL5); + if (bit_depth == AOM_BITS_10) + data32 &= ~(1<<9); + else + data32 |= (1<<9); + + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1,(0x1<< 4)); // bit[4] : paged_mem_mode + } else { + if (bit_depth == AOM_BITS_10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0<<3)); // bit[3] smem mdoe + else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (1<<3)); // bit[3] smem mdoe + } + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, + (losless_comp_body_size >> 5)); + /* + WRITE_VREG(HEVCD_MPP_DECOMP_CTL3, + (0xff<<20) | (0xff<<10) | 0xff); //8-bit mode + */ + WRITE_VREG(HEVC_CM_BODY_LENGTH, + losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, + losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, + losless_comp_header_size); + + if (get_double_write_mode(hw) & 0x10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); + +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1,0x1 << 31); +#endif +#ifdef AOM_AV1_MMU_DW + if (get_double_write_mode(hw) & 0x20) { + WRITE_VREG(HEVC_CM_BODY_LENGTH2, losless_comp_body_size_dw); + WRITE_VREG(HEVC_CM_HEADER_OFFSET2, losless_comp_body_size_dw); + WRITE_VREG(HEVC_CM_HEADER_LENGTH2, losless_comp_header_size_dw); + } +#endif + return 0; + +} + +static int config_mc_buffer(struct AV1HW_s *hw, unsigned short bit_depth, unsigned char inter_flag) +{ + int32_t i; + AV1_COMMON *cm = &hw->common; + PIC_BUFFER_CONFIG* cur_pic_config = &cm->cur_frame->buf; + uint8_t scale_enable = 0; + + av1_print(hw, AOM_DEBUG_HW_MORE, + " #### config_mc_buffer %s ####\n", + inter_flag ? "inter" : "intra"); + +#ifdef DEBUG_PRINT + if (debug&AOM_AV1_DEBUG_BUFMGR) + av1_print(hw, AOM_DEBUG_HW_MORE, + "config_mc_buffer entered .....\n"); +#endif + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0<<1) | 1); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (cur_pic_config->order_hint<<24) | + (cur_pic_config->mc_canvas_u_v<<16) | + (cur_pic_config->mc_canvas_u_v<<8)| + cur_pic_config->mc_canvas_y); + for (i = LAST_FRAME; i <= ALTREF_FRAME; i++) { + PIC_BUFFER_CONFIG *pic_config; //cm->frame_refs[i].buf; + if (inter_flag) + pic_config = av1_get_ref_frame_spec_buf(cm, i); + else + pic_config = cur_pic_config; + if (pic_config) { + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic_config->order_hint<<24) | + (pic_config->mc_canvas_u_v<<16) | + (pic_config->mc_canvas_u_v<<8) | + pic_config->mc_canvas_y); + if (inter_flag) + av1_print(hw, AOM_DEBUG_HW_MORE, + "refid 0x%x mc_canvas_u_v 0x%x mc_canvas_y 0x%x order_hint 0x%x\n", + i, pic_config->mc_canvas_u_v, + pic_config->mc_canvas_y, pic_config->order_hint); + } else { + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); + } + } + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (0 << 1) | 1); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (cur_pic_config->order_hint << 24) | + (cur_pic_config->mc_canvas_u_v << 16) | + (cur_pic_config->mc_canvas_u_v << 8) | + cur_pic_config->mc_canvas_y); + for (i = LAST_FRAME; i <= ALTREF_FRAME; i++) { + PIC_BUFFER_CONFIG *pic_config; + if (inter_flag) + pic_config = av1_get_ref_frame_spec_buf(cm, i); + else + pic_config = cur_pic_config; + + if (pic_config) { + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic_config->order_hint << 24)| + (pic_config->mc_canvas_u_v << 16) | + (pic_config->mc_canvas_u_v << 8) | + pic_config->mc_canvas_y); + } else { + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); + } + } + + WRITE_VREG(AV1D_MPP_REFINFO_TBL_ACCCONFIG, + (0x1 << 2) | (0x0 <<3)); // auto_inc start index:0 field:0 + for (i = 0; i <= ALTREF_FRAME; i++) { + int32_t ref_pic_body_size; + struct scale_factors * sf = NULL; + PIC_BUFFER_CONFIG *pic_config; + + if (inter_flag && i >= LAST_FRAME) + pic_config = av1_get_ref_frame_spec_buf(cm, i); + else + pic_config = cur_pic_config; + + if (pic_config) { + ref_pic_body_size = + compute_losless_comp_body_size(pic_config->y_crop_width, + pic_config->y_crop_height, (bit_depth == AOM_BITS_10)); + + WRITE_VREG(AV1D_MPP_REFINFO_DATA, pic_config->y_crop_width); + WRITE_VREG(AV1D_MPP_REFINFO_DATA, pic_config->y_crop_height); + if (inter_flag && i >= LAST_FRAME) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "refid %d: ref width/height(%d,%d), cur width/height(%d,%d) ref_pic_body_size 0x%x\n", + i, pic_config->y_crop_width, pic_config->y_crop_height, + cur_pic_config->y_crop_width, cur_pic_config->y_crop_height, + ref_pic_body_size); + } + } else { + ref_pic_body_size = 0; + WRITE_VREG(AV1D_MPP_REFINFO_DATA, 0); + WRITE_VREG(AV1D_MPP_REFINFO_DATA, 0); + } + + if (inter_flag && i >= LAST_FRAME) + sf = av1_get_ref_scale_factors(cm, i); + + if ((sf != NULL) && av1_is_scaled(sf)) { + scale_enable |= (1 << i); + } + + if (sf) { + WRITE_VREG(AV1D_MPP_REFINFO_DATA, sf->x_scale_fp); + WRITE_VREG(AV1D_MPP_REFINFO_DATA, sf->y_scale_fp); + + av1_print(hw, AOM_DEBUG_HW_MORE, + "x_scale_fp %d, y_scale_fp %d\n", + sf->x_scale_fp, sf->y_scale_fp); + } else { + WRITE_VREG(AV1D_MPP_REFINFO_DATA, REF_NO_SCALE); //1<<14 + WRITE_VREG(AV1D_MPP_REFINFO_DATA, REF_NO_SCALE); + } + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + WRITE_VREG(AV1D_MPP_REFINFO_DATA, 0); + } else + WRITE_VREG(AV1D_MPP_REFINFO_DATA, ref_pic_body_size >> 5); + + } + WRITE_VREG(AV1D_MPP_REF_SCALE_ENBL, scale_enable); + WRITE_VREG(PARSER_REF_SCALE_ENBL, scale_enable); + av1_print(hw, AOM_DEBUG_HW_MORE, + "WRITE_VREG(PARSER_REF_SCALE_ENBL, 0x%x)\n", + scale_enable); + return 0; +} + +static void clear_mpred_hw(struct AV1HW_s *hw) +{ + unsigned int data32; + + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 &= (~(1 << 6)); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); +} + +static void config_mpred_hw(struct AV1HW_s *hw, unsigned char inter_flag) +{ + AV1_COMMON *cm = &hw->common; + PIC_BUFFER_CONFIG *cur_pic_config = &cm->cur_frame->buf; + //PIC_BUFFER_CONFIG *last_frame_pic_config = NULL; + int i, j, pos, reg_i; + int mv_cal_tpl_count = 0; + unsigned int mv_ref_id[MFMV_STACK_SIZE] = {0, 0, 0}; + unsigned ref_offset_reg[] = { + HEVC_MPRED_L0_REF06_POC, + HEVC_MPRED_L0_REF07_POC, + HEVC_MPRED_L0_REF08_POC, + HEVC_MPRED_L0_REF09_POC, + HEVC_MPRED_L0_REF10_POC, + HEVC_MPRED_L0_REF11_POC, + }; + unsigned ref_buf_reg[] = { + HEVC_MPRED_L0_REF03_POC, + HEVC_MPRED_L0_REF04_POC, + HEVC_MPRED_L0_REF05_POC + }; + unsigned ref_offset_val[6] = + {0, 0, 0, 0, 0, 0}; + unsigned ref_buf_val[3] = {0, 0, 0}; + + uint32_t data32; + int32_t mpred_curr_lcu_x; + int32_t mpred_curr_lcu_y; + //int32_t mpred_mv_rd_end_addr; + + av1_print(hw, AOM_DEBUG_HW_MORE, + " #### config_mpred_hw ####\n"); + + /*if (cm->prev_frame) + last_frame_pic_config = &cm->prev_frame->buf; + mpred_mv_rd_end_addr = last_frame_pic_config->mpred_mv_wr_start_addr + + (last_frame_pic_config->lcu_total * MV_MEM_UNIT); + */ + + data32 = READ_VREG(HEVC_MPRED_CURR_LCU); + mpred_curr_lcu_x =data32 & 0xffff; + mpred_curr_lcu_y =(data32>>16) & 0xffff; + + av1_print(hw, AOM_DEBUG_HW_MORE, + "cur pic index %d\n", cur_pic_config->index); + /*printk("cur pic index %d col pic index %d\n", + cur_pic_config->index, last_frame_pic_config->index);*/ + + //WRITE_VREG(HEVC_MPRED_CTRL3,0x24122412); +#ifdef CO_MV_COMPRESS + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + WRITE_VREG(HEVC_MPRED_CTRL3,0x10151015); // 'd10, 'd21 for AV1 + } else { + WRITE_VREG(HEVC_MPRED_CTRL3,0x13151315); // 'd19, 'd21 for AV1 + } +#else + WRITE_VREG(HEVC_MPRED_CTRL3,0x13151315); // 'd19, 'd21 for AV1 +#endif + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, + hw->pbi->work_space_buf->mpred_above.buf_start); + +#if 0 + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 &= (~(1<<6)); + data32 |= (cm->use_prev_frame_mvs << 6); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); +#endif + if (inter_flag) { + /* config sign_bias */ + //data32 = (cm->cur_frame_force_integer_mv & 0x1) << 9; + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 &= (~(0xff << 12)); + //for (i = LAST_FRAME; i <= ALTREF_FRAME; i++) { + /* HEVC_MPRED_CTRL4[bit 12] is for cm->ref_frame_sign_bias[0] + instead of cm->ref_frame_sign_bias[LAST_FRAME] */ + for (i = 0; i <= ALTREF_FRAME; i++) { + data32 |= ((cm->ref_frame_sign_bias[i] & 0x1) << (12 + i)); + } + WRITE_VREG(HEVC_MPRED_CTRL4, data32); + av1_print(hw, AOM_DEBUG_HW_MORE, + "WRITE_VREG(HEVC_MPRED_CTRL4, 0x%x)\n", data32); + } +#if 1 + data32 = ((cm->seq_params.order_hint_info.enable_order_hint << 27) | + (cm->seq_params.order_hint_info.order_hint_bits_minus_1 << 24) | + (cm->cur_frame->order_hint << 16 )); +#ifdef CO_MV_COMPRESS + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + data32 |= (0x10 << 8) | (0x10 << 0); + } else { + data32 |= (0x13 << 8) | (0x13 << 0); + } +#else + data32 |= (0x13 << 8) | (0x13 << 0); +#endif + +#else + data32 = READ_VREG(HEVC_MPRED_L0_REF00_POC); + data32 &= (~(0xff << 16)); + data32 |= (cm->cur_frame->order_hint & 0xff); + data32 &= (~(1 << 27)); + data32 |= (cm->seq_params.order_hint_info.enable_order_hint << 27); +#endif + WRITE_VREG(HEVC_MPRED_L0_REF00_POC, data32); + av1_print(hw, AOM_DEBUG_HW_MORE, + "WRITE_VREG(HEVC_MPRED_L0_REF00_POC, 0x%x)\n", data32); + + if (inter_flag) { + /* config ref_buf id and order hint */ + data32 = 0; + pos = 25; + reg_i = 0; + for (i = ALTREF_FRAME; i >= LAST_FRAME; i--) { + PIC_BUFFER_CONFIG *pic_config = + av1_get_ref_frame_spec_buf(cm, i); + if (pic_config) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "pic_config for %d th ref: index %d, reg[%d] pos %d\n", + i, pic_config->index, reg_i, pos); + data32 |= ((pic_config->index < 0)? 0 : pic_config->index) << pos; + } else + av1_print(hw, AOM_DEBUG_HW_MORE, + "pic_config is null for %d th ref\n", i); + if (pos == 0) { + //WRITE_VREG(ref_buf_reg[reg_i], data32); + ref_buf_val[reg_i] = data32; + av1_print(hw, AOM_DEBUG_HW_MORE, + "ref_buf_reg[%d], WRITE_VREG(0x%x, 0x%x)\n", + reg_i, ref_buf_reg[reg_i], data32); + reg_i++; + data32 = 0; + pos = 24; //for P_HEVC_MPRED_L0_REF04_POC + } else { + if (pos == 24) + pos -= 8; //for P_HEVC_MPRED_L0_REF04_POC + else + pos -= 5; //for P_HEVC_MPRED_L0_REF03_POC + } + } + for (i = ALTREF_FRAME; i >= LAST_FRAME; i--) { + PIC_BUFFER_CONFIG *pic_config = + av1_get_ref_frame_spec_buf(cm, i); + if (pic_config) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "pic_config for %d th ref: order_hint %d, reg[%d] pos %d\n", + i, pic_config->order_hint, reg_i, pos); + data32 |= ((pic_config->index < 0)? 0 : pic_config->order_hint) << pos; + } else + av1_print(hw, AOM_DEBUG_HW_MORE, + "pic_config is null for %d th ref\n", i); + if (pos == 0) { + //WRITE_VREG(ref_buf_reg[reg_i], data32); + ref_buf_val[reg_i] = data32; + av1_print(hw, AOM_DEBUG_HW_MORE, + "ref_buf_reg[%d], WRITE_VREG(0x%x, 0x%x)\n", + reg_i, ref_buf_reg[reg_i], data32); + reg_i++; + data32 = 0; + pos = 24; + } else + pos -= 8; + } + if (pos != 24) { + //WRITE_VREG(ref_buf_reg[reg_i], data32); + ref_buf_val[reg_i] = data32; + av1_print(hw, AOM_DEBUG_HW_MORE, + "ref_buf_reg[%d], WRITE_VREG(0x%x, 0x%x)\n", + reg_i, ref_buf_reg[reg_i], data32); + } + /* config ref_offset */ + data32 = 0; + pos = 24; + mv_cal_tpl_count = 0; + reg_i = 0; + for (i = 0; i < cm->mv_ref_id_index; i++) { + if (cm->mv_cal_tpl_mvs[i]) { + mv_ref_id[mv_cal_tpl_count] = cm->mv_ref_id[i]; + mv_cal_tpl_count++; + for (j = LAST_FRAME; j <= ALTREF_FRAME; j++) { + /*offset can be negative*/ + unsigned char offval = + cm->mv_ref_offset[i][j] & 0xff; + data32 |= (offval << pos); + if (pos == 0) { + //WRITE_VREG(ref_offset_reg[reg_i], data32); + ref_offset_val[reg_i] = data32; + av1_print(hw, AOM_DEBUG_HW_MORE, + "ref_offset_reg[%d], WRITE_VREG(0x%x, 0x%x)\n", + reg_i, ref_offset_reg[reg_i], data32); + reg_i++; + data32 = 0; + pos = 24; + } else + pos -= 8; + } + } + } + if (pos != 24) { + //WRITE_VREG(ref_offset_reg[reg_i], data32); + ref_offset_val[reg_i] = data32; + av1_print(hw, AOM_DEBUG_HW_MORE, + "ref_offset_reg[%d], WRITE_VREG(0x%x, 0x%x)\n", + reg_i, ref_offset_reg[reg_i], data32); + } + + data32 = ref_offset_val[5] | //READ_VREG(HEVC_MPRED_L0_REF11_POC) | + mv_cal_tpl_count | (mv_ref_id[0] << 2) | + (mv_ref_id[1] << 5) | (mv_ref_id[2] << 8); + ref_offset_val[5] = data32; + //WRITE_VREG(HEVC_MPRED_L0_REF11_POC, data32); + av1_print(hw, AOM_DEBUG_HW_MORE, + "WRITE_VREG(HEVC_MPRED_L0_REF11_POC 0x%x, 0x%x)\n", + HEVC_MPRED_L0_REF11_POC, data32); + } + for (i = 0; i < 3; i++) + WRITE_VREG(ref_buf_reg[i], ref_buf_val[i]); + for (i = 0; i < 6; i++) + WRITE_VREG(ref_offset_reg[i], ref_offset_val[i]); + + WRITE_VREG(HEVC_MPRED_MV_WR_START_ADDR, + cur_pic_config->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_WPTR, + cur_pic_config->mpred_mv_wr_start_addr); + + if (inter_flag) { + for (i = 0; i < mv_cal_tpl_count; i++) { + PIC_BUFFER_CONFIG *pic_config = + av1_get_ref_frame_spec_buf(cm, mv_ref_id[i]); + if (pic_config == NULL) + continue; + if (i == 0) { + WRITE_VREG(HEVC_MPRED_MV_RD_START_ADDR, + pic_config->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_RPTR, + pic_config->mpred_mv_wr_start_addr); + } else if (i == 1) { + WRITE_VREG(HEVC_MPRED_L0_REF01_POC, + pic_config->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_RPTR_1, + pic_config->mpred_mv_wr_start_addr); + } else if (i == 2) { + WRITE_VREG(HEVC_MPRED_L0_REF02_POC, + pic_config->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_RPTR_2, + pic_config->mpred_mv_wr_start_addr); + } else { + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s: mv_ref_id error\n", __func__); + } + } + } + data32 = READ_VREG(HEVC_MPRED_CTRL0); + data32 &= ~((1 << 10) | (1 << 11)); + data32 |= (1 << 10); /*write enable*/ + av1_print(hw, AOM_DEBUG_HW_MORE, + "current_frame.frame_type=%d, cur_frame->frame_type=%d, allow_ref_frame_mvs=%d\n", + cm->current_frame.frame_type, cm->cur_frame->frame_type, + cm->allow_ref_frame_mvs); + + if (av1_frame_is_inter(&hw->common)) { + if (cm->allow_ref_frame_mvs) { + data32 |= (1 << 11); /*read enable*/ + } + } + av1_print(hw, AOM_DEBUG_HW_MORE, + "WRITE_VREG(HEVC_MPRED_CTRL0 0x%x, 0x%x)\n", + HEVC_MPRED_CTRL0, data32); + WRITE_VREG(HEVC_MPRED_CTRL0, data32); + /* + printk("config_mpred: (%x) wr_start_addr %x from indx %d; + (%x) rd_start_addr %x from index %d\n", + cur_pic_config, cur_pic_config->mpred_mv_wr_start_addr, cur_pic_config->index, + last_frame_pic_config, last_frame_pic_config->mpred_mv_wr_start_addr, last_frame_pic_config->index); + data32 = ((pbi->lcu_x_num - pbi->tile_width_lcu)*MV_MEM_UNIT); + WRITE_VREG(HEVC_MPRED_MV_WR_ROW_JUMP,data32); + WRITE_VREG(HEVC_MPRED_MV_RD_ROW_JUMP,data32); + + WRITE_VREG(HEVC_MPRED_MV_RD_END_ADDR, mpred_mv_rd_end_addr); + */ +} + +static void config_sao_hw(struct AV1HW_s *hw, union param_u *params) +{ + /* + !!!!!!!!!!!!!!!!!!!!!!!!!TODO .... !!!!!!!!!!! + mem_map_mode, endian, get_double_write_mode + */ + AV1_COMMON *cm = &hw->common; + PIC_BUFFER_CONFIG* pic_config = &cm->cur_frame->buf; + uint32_t data32; + int32_t lcu_size = + ((params->p.seq_flags >> 6) & 0x1) ? 128 : 64; + int32_t mc_buffer_size_u_v = + pic_config->lcu_total*lcu_size*lcu_size/2; + int32_t mc_buffer_size_u_v_h = + (mc_buffer_size_u_v + 0xffff)>>16; //64k alignment + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] #### config_sao_hw ####, lcu_size %d\n", lcu_size); + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] lcu_total : %d\n", pic_config->lcu_total); + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] mc_y_adr : 0x%x\n", pic_config->mc_y_adr); + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] mc_u_v_adr : 0x%x\n", pic_config->mc_u_v_adr); + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] header_adr : 0x%x\n", pic_config->header_adr); +#ifdef AOM_AV1_MMU_DW + if (get_double_write_mode(hw) & 0x20) + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] header_dw_adr : 0x%x\n", pic_config->header_dw_adr); +#endif + data32 = READ_VREG(HEVC_SAO_CTRL9) | (1 << 1); + WRITE_VREG(HEVC_SAO_CTRL9, data32); + + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (0x1 << 14); /* av1 mode */ + data32 |= (0xff << 16); /* dw {v1,v0,h1,h0} ctrl_y_cbus */ + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + WRITE_VREG(HEVC_SAO_CTRL0, + lcu_size == 128 ? 0x7 : 0x6); /*lcu_size_log2*/ +#ifdef LOSLESS_COMPRESS_MODE + WRITE_VREG(HEVC_CM_BODY_START_ADDR, pic_config->mc_y_adr); +#ifdef AOM_AV1_MMU + WRITE_VREG(HEVC_CM_HEADER_START_ADDR, pic_config->header_adr); +#endif +#ifdef AOM_AV1_MMU_DW + if (get_double_write_mode(hw) & 0x20) { + WRITE_VREG(HEVC_CM_HEADER_START_ADDR2, pic_config->header_dw_adr); + } +#endif +#else +/*!LOSLESS_COMPRESS_MODE*/ + WRITE_VREG(HEVC_SAO_Y_START_ADDR, pic_config->mc_y_adr); +#endif + + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] sao_body_addr:%x\n", pic_config->mc_y_adr); + //printk("[config_sao_hw] sao_header_addr:%x\n", pic_config->mc_y_adr + losless_comp_body_size ); + +#ifdef VPU_FILMGRAIN_DUMP + // Let Microcode to increase + // WRITE_VREG(HEVC_FGS_TABLE_START, pic_config->fgs_table_adr); +#else + WRITE_VREG(HEVC_FGS_TABLE_START, pic_config->fgs_table_adr); +#endif + WRITE_VREG(HEVC_FGS_TABLE_LENGTH, FGS_TABLE_SIZE * 8); + av1_print(hw, AOM_DEBUG_HW_MORE, + "[config_sao_hw] fgs_table adr:0x%x , length 0x%x bits\n", + pic_config->fgs_table_adr, FGS_TABLE_SIZE * 8); + + data32 = (mc_buffer_size_u_v_h<<16)<<1; + //printk("data32 = %x, mc_buffer_size_u_v_h = %x, lcu_total = %x\n", data32, mc_buffer_size_u_v_h, pic_config->lcu_total); + WRITE_VREG(HEVC_SAO_Y_LENGTH ,data32); + +#ifndef LOSLESS_COMPRESS_MODE + WRITE_VREG(HEVC_SAO_C_START_ADDR, pic_config->mc_u_v_adr); +#else +#endif + + data32 = (mc_buffer_size_u_v_h<<16); + WRITE_VREG(HEVC_SAO_C_LENGTH ,data32); + +#ifndef LOSLESS_COMPRESS_MODE + /* multi tile to do... */ + WRITE_VREG(HEVC_SAO_Y_WPTR, pic_config->mc_y_adr); + + WRITE_VREG(HEVC_SAO_C_WPTR, pic_config->mc_u_v_adr); +#else + if (get_double_write_mode(hw) && + (get_double_write_mode(hw) & 0x20) == 0) { + WRITE_VREG(HEVC_SAO_Y_START_ADDR, pic_config->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_START_ADDR, pic_config->dw_u_v_adr); + WRITE_VREG(HEVC_SAO_Y_WPTR, pic_config->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_WPTR, pic_config->dw_u_v_adr); + } else { + //WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0xffffffff); + //WRITE_VREG(HEVC_SAO_C_START_ADDR, 0xffffffff); + } +#endif + + +#ifndef AOM_AV1_NV21 +#ifdef AOM_AV1_MMU_DW + if (get_double_write_mode(hw) & 0x20) { + } +#endif +#endif + +#ifdef AOM_AV1_NV21 +#ifdef DOS_PROJECT + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + data32 |= (hw->mem_map_mode << 12); // [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + data32 &= (~0x3); + data32 |= 0x1; // [1]:dw_disable [0]:cm_disable + WRITE_VREG(HEVC_SAO_CTRL1, data32); + + data32 = READ_VREG(HEVC_SAO_CTRL5); // [23:22] dw_v1_ctrl [21:20] dw_v0_ctrl [19:18] dw_h1_ctrl [17:16] dw_h0_ctrl + data32 &= ~(0xff << 16); // set them all 0 for AOM_AV1_NV21 (no down-scale) + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + data32 |= (hw->mem_map_mode << 4); // [5:4] -- address_format 00:linear 01:32x32 10:64x32 + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#else +// m8baby test1902 + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + data32 |= (hw->mem_map_mode << 12); // [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + data32 &= (~0xff0); + //data32 |= 0x670; // Big-Endian per 64-bit + data32 |= 0x880; // Big-Endian per 64-bit + data32 &= (~0x3); + data32 |= 0x1; // [1]:dw_disable [0]:cm_disable + WRITE_VREG(HEVC_SAO_CTRL1, data32); + + data32 = READ_VREG(HEVC_SAO_CTRL5); // [23:22] dw_v1_ctrl [21:20] dw_v0_ctrl [19:18] dw_h1_ctrl [17:16] dw_h0_ctrl + data32 &= ~(0xff << 16); // set them all 0 for AOM_AV1_NV21 (no down-scale) + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + data32 |= (hw->mem_map_mode << 4); // [5:4] -- address_format 00:linear 01:32x32 10:64x32 + data32 &= (~0xF); + data32 |= 0x8; // Big-Endian per 64-bit + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#endif +#else +/*CHANGE_DONE nnn*/ + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + data32 |= (hw->mem_map_mode << 12); /* [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 */ + data32 &= (~0xff0); + /* data32 |= 0x670; // Big-Endian per 64-bit */ +#ifdef AOM_AV1_MMU_DW + if ((get_double_write_mode(hw) & 0x20) == 0) + data32 |= ((hw->endian >> 8) & 0xfff); /* Big-Endian per 64-bit */ +#else + data32 |= ((hw->endian >> 8) & 0xfff); /* Big-Endian per 64-bit */ +#endif + data32 &= (~0x3); /*[1]:dw_disable [0]:cm_disable*/ + if (get_double_write_mode(hw) == 0) + data32 |= 0x2; /*disable double write*/ + else if (get_double_write_mode(hw) & 0x10) + data32 |= 0x1; /*disable cm*/ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { /* >= G12A dw write control */ + unsigned int data; + data = READ_VREG(HEVC_DBLK_CFGB); + data &= (~0x300); /*[8]:first write enable (compress) [9]:double write enable (uncompress)*/ + if (get_double_write_mode(hw) == 0) + data |= (0x1 << 8); /*enable first write*/ + else if (get_double_write_mode(hw) & 0x10) + data |= (0x1 << 9); /*double write only*/ + else + data |= ((0x1 << 8) |(0x1 << 9)); + WRITE_VREG(HEVC_DBLK_CFGB, data); + } + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + data32 &= ~(1 << 8); /* NV21 */ + else + data32 |= (1 << 8); /* NV12 */ + } + data32 &= (~(3 << 14)); + data32 |= (2 << 14); + /* + * [31:24] ar_fifo1_axi_thred + * [23:16] ar_fifo0_axi_thred + * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes + * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + * [11:08] axi_lendian_C + * [07:04] axi_lendian_Y + * [3] reserved + * [2] clk_forceon + * [1] dw_disable:disable double write output + * [0] cm_disable:disable compress output + */ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + + if (get_double_write_mode(hw) & 0x10) { + /* [23:22] dw_v1_ctrl + *[21:20] dw_v0_ctrl + *[19:18] dw_h1_ctrl + *[17:16] dw_h0_ctrl + */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /*set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } else { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) + WRITE_VREG(HEVC_SAO_CTRL26, 0); + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 &= (~(0xff << 16)); + if ((get_double_write_mode(hw) & 0xf) == 8) { + WRITE_VREG(HEVC_SAO_CTRL26, 0xf); + data32 |= (0xff << 16); + } else if ((get_double_write_mode(hw) & 0xf) == 2 || + (get_double_write_mode(hw) & 0xf) == 3) + data32 |= (0xff<<16); + else if ((get_double_write_mode(hw) & 0xf) == 4 || + (get_double_write_mode(hw) & 0xf) == 5) + data32 |= (0x33<<16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /* [5:4] -- address_format 00:linear 01:32x32 10:64x32 */ + data32 |= (hw->mem_map_mode << 4); + data32 &= (~0xf); + data32 |= (hw->endian & 0xf); /* valid only when double write only */ + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + data32 |= (1 << 12); /* NV21 */ + else + data32 &= ~(1 << 12); /* NV12 */ + } + data32 &= (~(3 << 8)); + data32 |= (2 << 8); + /* + * [3:0] little_endian + * [5:4] address_format 00:linear 01:32x32 10:64x32 + * [7:6] reserved + * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte + * [11:10] reserved + * [12] CbCr_byte_swap + * [31:13] reserved + */ + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); + +#endif + +} + + +#ifdef AOM_AV1_DBLK_INIT +/* + * Defines, declarations, sub-functions for av1 de-block loop filter Thr/Lvl table update + * - struct segmentation_lf is for loop filter only (removed something) + * - function "av1_loop_filter_init" and "av1_loop_filter_frame_init" will be instantiated in C_Entry + * - av1_loop_filter_init run once before decoding start + * - av1_loop_filter_frame_init run before every frame decoding start + * - set video format to AOM_AV1 is in av1_loop_filter_init + */ +#define MAX_LOOP_FILTER 63 +#define MAX_MODE_LF_DELTAS 2 +#define MAX_SEGMENTS 8 +#define MAX_MB_PLANE 3 + +typedef enum { + SEG_LVL_ALT_Q, // Use alternate Quantizer .... + SEG_LVL_ALT_LF_Y_V, // Use alternate loop filter value on y plane vertical + SEG_LVL_ALT_LF_Y_H, // Use alternate loop filter value on y plane horizontal + SEG_LVL_ALT_LF_U, // Use alternate loop filter value on u plane + SEG_LVL_ALT_LF_V, // Use alternate loop filter value on v plane + SEG_LVL_REF_FRAME, // Optional Segment reference frame + SEG_LVL_SKIP, // Optional Segment (0,0) + skip mode + SEG_LVL_GLOBALMV, + SEG_LVL_MAX +} SEG_LVL_FEATURES; + +static const SEG_LVL_FEATURES seg_lvl_lf_lut[MAX_MB_PLANE][2] = { + { SEG_LVL_ALT_LF_Y_V, SEG_LVL_ALT_LF_Y_H }, + { SEG_LVL_ALT_LF_U, SEG_LVL_ALT_LF_U }, + { SEG_LVL_ALT_LF_V, SEG_LVL_ALT_LF_V } +}; + +struct segmentation_lf { // for loopfilter only + uint8_t enabled; + /* + SEG_LVL_ALT_LF_Y_V feature_enable: seg_lf_info_y[bit7] + SEG_LVL_ALT_LF_Y_V data: seg_lf_info_y[bit0~6] + SEG_LVL_ALT_LF_Y_H feature enable: seg_lf_info_y[bit15] + SEG_LVL_ALT_LF_Y_H data: seg_lf_info_y[bit8~14] + */ + uint16_t seg_lf_info_y[8]; + /* + SEG_LVL_ALT_LF_U feature_enable: seg_lf_info_c[bit7] + SEG_LVL_ALT_LF_U data: seg_lf_info_c[bit0~6] + SEG_LVL_ALT_LF_V feature enable: seg_lf_info_c[bit15] + SEG_LVL_ALT_LF_V data: seg_lf_info_c[bit8~14] + */ + uint16_t seg_lf_info_c[8]; +}; + +typedef struct { + uint8_t mblim; + uint8_t lim; + uint8_t hev_thr; +} loop_filter_thresh; + +typedef struct loop_filter_info_n_s { + loop_filter_thresh lfthr[MAX_LOOP_FILTER + 1]; + uint8_t lvl[MAX_MB_PLANE][MAX_SEGMENTS][2][REF_FRAMES][MAX_MODE_LF_DELTAS]; +} loop_filter_info_n; + +struct loopfilter { + int32_t filter_level[2]; + int32_t filter_level_u; + int32_t filter_level_v; + + int32_t sharpness_level; + + uint8_t mode_ref_delta_enabled; + uint8_t mode_ref_delta_update; + + // 0 = Intra, Last, Last2+Last3, + // GF, BRF, ARF2, ARF + int8_t ref_deltas[REF_FRAMES]; + + // 0 = ZERO_MV, MV + int8_t mode_deltas[MAX_MODE_LF_DELTAS]; + + int32_t combine_vert_horz_lf; + + int32_t lf_pic_cnt; + +//#if LOOP_FILTER_BITMASK + //LoopFilterMask *lfm; + //size_t lfm_num; + //int lfm_stride; + //LpfSuperblockInfo neighbor_sb_lpf_info; +//#endif // LOOP_FILTER_BITMASK +}; +#ifdef DBG_LPF_DBLK_LVL +static int32_t myclamp(int32_t value, int32_t low, int32_t high) { + return value < low ? low : (value > high ? high : value); +} +#endif +/*static int8_t extend_sign_7bits(uint8_t value) { + return (((value>>6) & 0x1)<<7) | (value&0x7f); +}*/ + +// convert data to int8_t variable +// value : signed data (with any bitwidth<8) which is assigned to uint8_t variable as an input +// bw : bitwidth of signed data, (from 1 to 7) +static int8_t conv2int8 (uint8_t value, uint8_t bw) { + if (bw<1 || bw>7) return (int8_t)value; + else { + const uint8_t data_bits = value & ((1<<bw)-1); + const uint8_t sign_bit = (value>>(bw-1)) & 0x1; + const uint8_t sign_bit_ext = sign_bit | sign_bit<<1 | sign_bit<<2 | sign_bit<<3 | sign_bit<<4 | sign_bit<<5 | sign_bit<<6 | sign_bit<<7; + return (int8_t)((sign_bit_ext<<bw) | data_bits); + } +} + +static void av1_update_sharpness(loop_filter_info_n *lfi, int32_t sharpness_lvl) { + int32_t lvl; + + // For each possible value for the loop filter fill out limits + for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) { + // Set loop filter parameters that control sharpness. + int32_t block_inside_limit = + lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4)); + + if (sharpness_lvl > 0) { + if (block_inside_limit > (9 - sharpness_lvl)) + block_inside_limit = (9 - sharpness_lvl); + } + + if (block_inside_limit < 1) + block_inside_limit = 1; + + lfi->lfthr[lvl].lim = (uint8_t)block_inside_limit; + lfi->lfthr[lvl].mblim = (uint8_t)(2 * (lvl + 2) + block_inside_limit); + } +} + +// instantiate this function once when decode is started +void av1_loop_filter_init(loop_filter_info_n *lfi, struct loopfilter *lf) { + int32_t i; + uint32_t data32; + + // init limits for given sharpness + av1_update_sharpness(lfi, lf->sharpness_level); + + // Write to register + for (i = 0; i < 32; i++) { + uint32_t thr; + thr = ((lfi->lfthr[i*2+1].lim & 0x3f)<<8) | + (lfi->lfthr[i*2+1].mblim & 0xff); + thr = (thr<<16) | ((lfi->lfthr[i*2].lim & 0x3f)<<8) | + (lfi->lfthr[i*2].mblim & 0xff); + WRITE_VREG(HEVC_DBLK_CFG9, thr); + } + // video format is AOM_AV1 + data32 = (0x57 << 8) | // 1st/2nd write both enable + (0x4 << 0); // aom_av1 video format + WRITE_VREG(HEVC_DBLK_CFGB, data32); + av1_print2(AOM_DEBUG_HW_MORE, + "[DBLK DEBUG] CFGB : 0x%x\n", data32); +} + +// perform this function per frame +void av1_loop_filter_frame_init(AV1Decoder* pbi, struct segmentation_lf *seg, + loop_filter_info_n *lfi, + struct loopfilter *lf, + int32_t pic_width) { + BuffInfo_t* buf_spec = pbi->work_space_buf; + int32_t i; +#ifdef DBG_LPF_DBLK_LVL + int32_t dir; + int32_t filt_lvl[MAX_MB_PLANE], filt_lvl_r[MAX_MB_PLANE]; + int32_t plane; + int32_t seg_id; +#endif + // n_shift is the multiplier for lf_deltas + // the multiplier is 1 for when filter_lvl is between 0 and 31; + // 2 when filter_lvl is between 32 and 63 + + // update limits if sharpness has changed + av1_update_sharpness(lfi, lf->sharpness_level); + + // Write to register + for (i = 0; i < 32; i++) { + uint32_t thr; + thr = ((lfi->lfthr[i*2+1].lim & 0x3f)<<8) + | (lfi->lfthr[i*2+1].mblim & 0xff); + thr = (thr<<16) | ((lfi->lfthr[i*2].lim & 0x3f)<<8) + | (lfi->lfthr[i*2].mblim & 0xff); + WRITE_VREG(HEVC_DBLK_CFG9, thr); + } +#ifdef DBG_LPF_DBLK_LVL + filt_lvl[0] = lf->filter_level[0]; + filt_lvl[1] = lf->filter_level_u; + filt_lvl[2] = lf->filter_level_v; + + filt_lvl_r[0] = lf->filter_level[1]; + filt_lvl_r[1] = lf->filter_level_u; + filt_lvl_r[2] = lf->filter_level_v; + +#ifdef DBG_LPF_PRINT + printk("LF_PRINT: pic_cnt(%d) base_filter_level(%d,%d,%d,%d)\n", + lf->lf_pic_cnt, lf->filter_level[0], + lf->filter_level[1], lf->filter_level_u, lf->filter_level_v); +#endif + + for (plane = 0; plane < 3; plane++) { + if (plane == 0 && !filt_lvl[0] && !filt_lvl_r[0]) + break; + else if (plane == 1 && !filt_lvl[1]) + continue; + else if (plane == 2 && !filt_lvl[2]) + continue; + + for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) { // MAX_SEGMENTS==8 + for (dir = 0; dir < 2; ++dir) { + int32_t lvl_seg = (dir == 0) ? filt_lvl[plane] : filt_lvl_r[plane]; + //assert(plane >= 0 && plane <= 2); + const uint8_t seg_lf_info_y0 = seg->seg_lf_info_y[seg_id] & 0xff; + const uint8_t seg_lf_info_y1 = (seg->seg_lf_info_y[seg_id]>>8) & 0xff; + const uint8_t seg_lf_info_u = seg->seg_lf_info_c[seg_id] & 0xff; + const uint8_t seg_lf_info_v = (seg->seg_lf_info_c[seg_id]>>8) & 0xff; + const uint8_t seg_lf_info = (plane==2) ? seg_lf_info_v : (plane==1) ? + seg_lf_info_u : ((dir==0) ? seg_lf_info_y0 : seg_lf_info_y1); + const int8_t seg_lf_active = ((seg->enabled) && ((seg_lf_info>>7) & 0x1)); + const int8_t seg_lf_data = conv2int8(seg_lf_info,7); +#ifdef DBG_LPF_PRINT + const int8_t seg_lf_data_clip = (seg_lf_data>63) ? 63 : + (seg_lf_data<-63) ? -63 : seg_lf_data; +#endif + if (seg_lf_active) { + lvl_seg = myclamp(lvl_seg + (int32_t)seg_lf_data, 0, MAX_LOOP_FILTER); + } + +#ifdef DBG_LPF_PRINT + printk("LF_PRINT:plane(%d) seg_id(%d) dir(%d) seg_lf_info(%d,0x%x),lvl_seg(0x%x)\n", + plane,seg_id,dir,seg_lf_active,seg_lf_data_clip,lvl_seg); +#endif + + if (!lf->mode_ref_delta_enabled) { + // we could get rid of this if we assume that deltas are set to + // zero when not in use; encoder always uses deltas + memset(lfi->lvl[plane][seg_id][dir], lvl_seg, + sizeof(lfi->lvl[plane][seg_id][dir])); + } else { + int32_t ref, mode; + const int32_t scale = 1 << (lvl_seg >> 5); + const int32_t intra_lvl = lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale; + lfi->lvl[plane][seg_id][dir][INTRA_FRAME][0] = + myclamp(intra_lvl, 0, MAX_LOOP_FILTER); +#ifdef DBG_LPF_PRINT + printk("LF_PRINT:ref_deltas[INTRA_FRAME](%d)\n",lf->ref_deltas[INTRA_FRAME]); +#endif + for (ref = LAST_FRAME; ref < REF_FRAMES; ++ref) { // LAST_FRAME==1 REF_FRAMES==8 + for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) { // MAX_MODE_LF_DELTAS==2 + const int32_t inter_lvl = + lvl_seg + lf->ref_deltas[ref] * scale + + lf->mode_deltas[mode] * scale; + lfi->lvl[plane][seg_id][dir][ref][mode] = + myclamp(inter_lvl, 0, MAX_LOOP_FILTER); +#ifdef DBG_LPF_PRINT + printk("LF_PRINT:ref_deltas(%d) mode_deltas(%d)\n", + lf->ref_deltas[ref], lf->mode_deltas[mode]); +#endif + } + } + } + } + } + } + +#ifdef DBG_LPF_PRINT + for (i = 0; i <= MAX_LOOP_FILTER; i++) { + printk("LF_PRINT:(%2d) thr=%d,blim=%3d,lim=%2d\n", + i, lfi->lfthr[i].hev_thr, + lfi->lfthr[i].mblim, lfi->lfthr[i].lim); + } + for (plane = 0; plane < 3; plane++) { + for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) { // MAX_SEGMENTS==8 + for (dir = 0; dir < 2; ++dir) { + int32_t mode; + for (mode = 0; mode < 2; ++mode) { + printk("assign {lvl[%d][%d][%d][0][%d],lvl[%d][%d][%d][1][%d],lvl[%d][%d][%d][2][%d],lvl[%d][%d][%d][3][%d],lvl[%d][%d][%d][4][%d],lvl[%d][%d][%d][5][%d],lvl[%d][%d][%d][6][%d],lvl[%d][%d][%d][7][%d]}={6'd%2d,6'd%2d,6'd%2d,6'd%2d,6'd%2d,6'd%2d,6'd%2d,6'd%2d};\n", + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + plane, seg_id, dir, mode, + lfi->lvl[plane][seg_id][dir][0][mode], + lfi->lvl[plane][seg_id][dir][1][mode], + lfi->lvl[plane][seg_id][dir][2][mode], + lfi->lvl[plane][seg_id][dir][3][mode], + lfi->lvl[plane][seg_id][dir][4][mode], + lfi->lvl[plane][seg_id][dir][5][mode], + lfi->lvl[plane][seg_id][dir][6][mode], + lfi->lvl[plane][seg_id][dir][7][mode]); + } + } + } + } +#endif + // Write to register + for (i = 0; i < 192; i++) { + uint32_t level; + level = ((lfi->lvl[i>>6&3][i>>3&7][1][i&7][1] & 0x3f)<<24) | + ((lfi->lvl[i>>6&3][i>>3&7][1][i&7][0] & 0x3f)<<16) | + ((lfi->lvl[i>>6&3][i>>3&7][0][i&7][1] & 0x3f)<<8) | + (lfi->lvl[i>>6&3][i>>3&7][0][i&7][0] & 0x3f); + if (!lf->filter_level[0] && !lf->filter_level[1]) + level = 0; + WRITE_VREG(HEVC_DBLK_CFGA, level); + } +#endif // DBG_LPF_DBLK_LVL + +#ifdef DBG_LPF_DBLK_FORCED_OFF + if (lf->lf_pic_cnt == 2) { + printk("LF_PRINT: pic_cnt(%d) dblk forced off !!!\n", lf->lf_pic_cnt); + WRITE_VREG(HEVC_DBLK_DBLK0, 0); + } else + WRITE_VREG(HEVC_DBLK_DBLK0, + lf->filter_level[0] | lf->filter_level[1] << 6 | + lf->filter_level_u << 12 | lf->filter_level_v << 18); +#else + WRITE_VREG(HEVC_DBLK_DBLK0, + lf->filter_level[0] | lf->filter_level[1]<<6 | + lf->filter_level_u<<12 | lf->filter_level_v<<18); +#endif + for (i =0; i < 10; i++) + WRITE_VREG(HEVC_DBLK_DBLK1, + ((i<2) ? lf->mode_deltas[i&1] : lf->ref_deltas[(i-2)&7])); + for (i = 0; i < 8; i++) + WRITE_VREG(HEVC_DBLK_DBLK2, + (uint32_t)(seg->seg_lf_info_y[i]) | (uint32_t)(seg->seg_lf_info_c[i]<<16)); + + // Set P_HEVC_DBLK_CFGB again + { + uint32_t lpf_data32 = READ_VREG(HEVC_DBLK_CFGB); + if (lf->mode_ref_delta_enabled) + lpf_data32 |= (0x1<<28); // mode_ref_delta_enabled + else + lpf_data32 &= ~(0x1<<28); + if (seg->enabled) + lpf_data32 |= (0x1<<29); // seg enable + else + lpf_data32 &= ~(0x1<<29); + if (pic_width >= 1280) + lpf_data32 |= (0x1 << 4); // dblk pipeline mode=1 for performance + else + lpf_data32 &= ~(0x3 << 4); + WRITE_VREG(HEVC_DBLK_CFGB, lpf_data32); + } + // Set CDEF + WRITE_VREG(HEVC_DBLK_CDEF0, buf_spec->cdef_data.buf_start); + { + uint32_t cdef_data32 = (READ_VREG(HEVC_DBLK_CDEF1) & 0xffffff00); + cdef_data32 |= 17; // TODO ERROR :: cdef temp dma address left offset +#ifdef DBG_LPF_CDEF_NO_PIPELINE + cdef_data32 |= (1<<17); // cdef test no pipeline for very small picture +#endif + WRITE_VREG(HEVC_DBLK_CDEF1, cdef_data32); + } + // Picture count + lf->lf_pic_cnt++; +} +#endif // #ifdef AOM_AV1_DBLK_INIT + +#ifdef AOM_AV1_UPSCALE_INIT +/* + * these functions here for upscaling updated in every picture + */ +#define RS_SUBPEL_BITS 6 +#define RS_SUBPEL_MASK ((1 << RS_SUBPEL_BITS) - 1) +#define RS_SCALE_SUBPEL_BITS 14 +#define RS_SCALE_SUBPEL_MASK ((1 << RS_SCALE_SUBPEL_BITS) - 1) +#define RS_SCALE_EXTRA_BITS (RS_SCALE_SUBPEL_BITS - RS_SUBPEL_BITS) +#define RS_SCALE_EXTRA_OFF (1 << (RS_SCALE_EXTRA_BITS - 1)) + +static int32_t av1_get_upscale_convolve_step(int32_t in_length, int32_t out_length) { + return ((in_length << RS_SCALE_SUBPEL_BITS) + out_length / 2) / out_length; +} + +static int32_t get_upscale_convolve_x0(int32_t in_length, int32_t out_length, + int32_t x_step_qn) { + const int32_t err = out_length * x_step_qn - (in_length << RS_SCALE_SUBPEL_BITS); + const int32_t x0 = + (-((out_length - in_length) << (RS_SCALE_SUBPEL_BITS - 1)) + + out_length / 2) / + out_length + + RS_SCALE_EXTRA_OFF - err / 2; + return (int32_t)((uint32_t)x0 & RS_SCALE_SUBPEL_MASK); +} + +void av1_upscale_frame_init(AV1Decoder* pbi, AV1_COMMON *cm, param_t* params) +{ + BuffInfo_t* buf_spec = pbi->work_space_buf; + //uint32_t data32; + const int32_t width = cm->dec_width; + const int32_t superres_upscaled_width = cm->superres_upscaled_width; + const int32_t x_step_qn_luma = av1_get_upscale_convolve_step(width, superres_upscaled_width); + const int32_t x0_qn_luma = get_upscale_convolve_x0(width, superres_upscaled_width, x_step_qn_luma); + const int32_t x_step_qn_chroma = av1_get_upscale_convolve_step((width+1)>>1, (superres_upscaled_width+1)>>1); + const int32_t x0_qn_chroma = get_upscale_convolve_x0((width+1)>>1, (superres_upscaled_width+1)>>1, x_step_qn_chroma); + av1_print2(AOM_DEBUG_HW_MORE, + "UPS_PRINT: width(%d -> %d)\n", + width, superres_upscaled_width); + av1_print2(AOM_DEBUG_HW_MORE, + "UPS_PRINT: xstep(%d,%d)(0x%X, 0x%X) x0qn(%d,%d)(0x%X, 0x%X)\n", + x_step_qn_luma,x_step_qn_chroma, + x_step_qn_luma,x_step_qn_chroma, + x0_qn_luma,x0_qn_chroma, + x0_qn_luma,x0_qn_chroma); + WRITE_VREG(HEVC_DBLK_UPS1, buf_spec->ups_data.buf_start); + WRITE_VREG(HEVC_DBLK_UPS2, x0_qn_luma); // x0_qn y + WRITE_VREG(HEVC_DBLK_UPS3, x0_qn_chroma); // x0_qn c + WRITE_VREG(HEVC_DBLK_UPS4, x_step_qn_luma); // x_step y + WRITE_VREG(HEVC_DBLK_UPS5, x_step_qn_chroma); // x_step c + WRITE_VREG(AV1_UPSCALE_X0_QN, (x0_qn_chroma<<16)|x0_qn_luma); + WRITE_VREG(AV1_UPSCALE_STEP_QN, (x_step_qn_chroma<<16)|x_step_qn_luma); + +/* + * TileR calculation here if cm needs an exactly accurate value + */ +//#define AV1_UPSCALE_TILER_CALCULATION +#ifdef AV1_UPSCALE_TILER_CALCULATION + uint32_t upscl_enabled = 1; // 1 just for example, actually this is use_superres flag + uint32_t tiler_x = 192; // 192 just for example, actually this is tile end + uint32_t ux; + uint32_t ux_tiler,ux_tiler_rnd32; + uint32_t xqn_y; + uint32_t xqn_c; + uint32_t tiler_x_y = tiler_x - 8 - 3; // dblk/cdef left-shift-8 plus upscaling extra-3 + uint32_t tiler_x_c = (tiler_x/2) - 4 - 3; // dblk/cdef left-shift-4 plus upscaling extra-3 + + xqn_y = x0_qn_luma; + xqn_c = x0_qn_chroma; + ux_tiler = 0; + ux_tiler_rnd32 = 0; + for (ux=0; ux<16384; ux+=8) { + uint32_t x1qn_y = xqn_y + x_step_qn_luma *( 7+3); // extra-3 is for lrf + uint32_t x1qn_c = xqn_c + x_step_qn_chroma*( 3+3); // extra-3 is for lrf + uint32_t x1qn_y_nxt = xqn_y + x_step_qn_luma *(8+7+3); // extra-3 is for lrf + uint32_t x1qn_c_nxt = xqn_c + x_step_qn_chroma*(4+3+3); // extra-3 is for lrf + + uint32_t x1_y = upscl_enabled ? (x1qn_y>>14) : ux +7+3; + uint32_t x1_c = upscl_enabled ? (x1qn_c>>14) : (ux/2)+3+3; + uint32_t x1_y_nxt = upscl_enabled ? (x1qn_y_nxt>>14) : ux +8+7+3; + uint32_t x1_c_nxt = upscl_enabled ? (x1qn_c_nxt>>14) : (ux/2)+4+3+3; + + if ((x1_y<tiler_x_y && x1_c<tiler_x_c) && + (x1_y_nxt>=tiler_x_y || x1_c_nxt>=tiler_x_c)) { + ux_tiler = ux; + ux_tiler_rnd32 = (ux_tiler/32 + (ux_tiler%32 ? 1 : 0)) * 32; + break; + } + + xqn_y += x_step_qn_luma*8; + xqn_c += x_step_qn_chroma*4; + } + + av1_print(hw, AOM_DEBUG_HW_MORE, + "UPS_PRINT: xqn_y(0x%x), xqn_c(0x%x), x1qn_y(0x%x), x1qn_c(0x%x)\n", + xqn_y, xqn_c, x1qn_y, x1qn_c); + av1_print(hw, AOM_DEBUG_HW_MORE, + "UPS_PRINT: ux_tiler(%d)(0x%x), ux_tiler_rnd32(%d)(0x%x)\n", + ux_tiler, ux_tiler, ux_tiler_rnd32, ux_tiler_rnd32); +#endif + + // TEMP write lrf register here + //WRITE_VREG(HEVC_DBLK_LRF0, 1<<0 | 1<<2); // LRF UNIT SIZE + //WRITE_VREG(HEVC_DBLK_LRF1, 3<<0 | 1<<8 | 1<<16 | 1<<24); // LRF UNIT NUMBER + + // TEMP Global Enables write here + /* + const uint32_t dblk_enable = (!cm->allow_intrabc && !cm->single_tile_decoding && (cm->lf.filter_level[0] || cm->lf.filter_level[1])); + const uint32_t cdef_enable = (!cm->allow_intrabc && !cm->single_tile_decoding && !cm->skip_loop_filter && !cm->coded_lossless && (cm->cdef_bits || cm->cdef_strengths[0] || cm->cdef_uv_strengths[0])); + printk("LPF_ENABLES : dblk(%d) cdef(%d)\n", dblk_enable, cdef_enable); + data32 = READ_VREG(HEVC_DBLK_CFGB ); + data32 &= ~(0xf<<20); + data32 |= (dblk_enable<<20); + data32 |= (cdef_enable<<23); + WRITE_VREG(HEVC_DBLK_CFGB, data32); + */ +} + +#endif // #ifdef AOM_AV1_UPSCALE_INIT + +static void release_dblk_struct(struct AV1HW_s *hw) +{ +#ifdef AOM_AV1_DBLK_INIT + if (hw->lfi) + vfree(hw->lfi); + if (hw->lf) + vfree(hw->lf); + if (hw->seg_4lf) + vfree(hw->seg_4lf); + hw->lfi = NULL; + hw->lf = NULL; + hw->seg_4lf = NULL; +#endif +} + +static int init_dblk_struc(struct AV1HW_s *hw) +{ +#ifdef AOM_AV1_DBLK_INIT + hw->lfi = vmalloc(sizeof(loop_filter_info_n)); + hw->lf = vmalloc(sizeof(struct loopfilter)); + hw->seg_4lf = vmalloc(sizeof(struct segmentation_lf)); + + if (hw->lfi == NULL || hw->lf == NULL || hw->seg_4lf == NULL) { + printk("[test.c] aom_loop_filter init malloc error!!!\n"); + release_dblk_struct(hw); + return -1; + } + + hw->lf->mode_ref_delta_enabled = 1; // set default here + hw->lf->mode_ref_delta_update = 1; // set default here + hw->lf->sharpness_level = 0; // init to 0 + hw->lf->lf_pic_cnt = 0; // init to 0 +#endif + return 0; +} + +static void config_dblk_hw(struct AV1HW_s *hw) +{ + AV1Decoder *pbi = hw->pbi; + AV1_COMMON *cm = &hw->common; + loop_filter_info_n *lfi = hw->lfi; + struct loopfilter *lf = hw->lf; + struct segmentation_lf *seg_4lf = hw->seg_4lf; + BuffInfo_t* buf_spec = pbi->work_space_buf; + PIC_BUFFER_CONFIG* cur_pic_config = &cm->cur_frame->buf; + PIC_BUFFER_CONFIG* prev_pic_config = &cm->prev_frame->buf; + int i; + +#ifdef AOM_AV1_DBLK_INIT +#ifdef DUAL_DECODE +#else + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c ref_delta] cur_frame : %p prev_frame : %p - %p \n", + cm->cur_frame, cm->prev_frame, + av1_get_primary_ref_frame_buf(cm)); + // get lf parameters from parser + lf->mode_ref_delta_enabled = + (hw->aom_param.p.loop_filter_mode_ref_delta_enabled & 1); + lf->mode_ref_delta_update = + ((hw->aom_param.p.loop_filter_mode_ref_delta_enabled >> 1) & 1); + lf->sharpness_level = + hw->aom_param.p.loop_filter_sharpness_level; + if (((hw->aom_param.p.loop_filter_mode_ref_delta_enabled)&3) == 3) { // enabled but and update + if (cm->prev_frame <= 0) { + // already initialized in Microcode + lf->ref_deltas[0] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_0),7); + lf->ref_deltas[1] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_0>>8),7); + lf->ref_deltas[2] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_1),7); + lf->ref_deltas[3] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_1>>8),7); + lf->ref_deltas[4] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_2),7); + lf->ref_deltas[5] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_2>>8),7); + lf->ref_deltas[6] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_3),7); + lf->ref_deltas[7] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_3>>8),7); + lf->mode_deltas[0] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_mode_deltas_0),7); + lf->mode_deltas[1] = conv2int8((uint8_t)(hw->aom_param.p.loop_filter_mode_deltas_0>>8),7); + } else { + lf->ref_deltas[0] = (hw->aom_param.p.loop_filter_ref_deltas_0 & 0x80) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_0),7) : + cm->prev_frame->ref_deltas[0]; + lf->ref_deltas[1] = (hw->aom_param.p.loop_filter_ref_deltas_0 & 0x8000) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_0>>8),7) : + cm->prev_frame->ref_deltas[1]; + lf->ref_deltas[2] = (hw->aom_param.p.loop_filter_ref_deltas_1 & 0x80) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_1),7) : + cm->prev_frame->ref_deltas[2]; + lf->ref_deltas[3] = (hw->aom_param.p.loop_filter_ref_deltas_1 & 0x8000) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_1>>8),7) : + cm->prev_frame->ref_deltas[3]; + lf->ref_deltas[4] = (hw->aom_param.p.loop_filter_ref_deltas_2 & 0x80) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_2),7) : + cm->prev_frame->ref_deltas[4]; + lf->ref_deltas[5] = (hw->aom_param.p.loop_filter_ref_deltas_2 & 0x8000) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_2>>8),7) : + cm->prev_frame->ref_deltas[5]; + lf->ref_deltas[6] = (hw->aom_param.p.loop_filter_ref_deltas_3 & 0x80) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_3),7) : + cm->prev_frame->ref_deltas[6]; + lf->ref_deltas[7] = (hw->aom_param.p.loop_filter_ref_deltas_3 & 0x8000) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_ref_deltas_3>>8),7) : + cm->prev_frame->ref_deltas[7]; + lf->mode_deltas[0] = (hw->aom_param.p.loop_filter_mode_deltas_0 & 0x80) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_mode_deltas_0),7) : + cm->prev_frame->mode_deltas[0]; + lf->mode_deltas[1] = (hw->aom_param.p.loop_filter_mode_deltas_0 & 0x8000) ? + conv2int8((uint8_t)(hw->aom_param.p.loop_filter_mode_deltas_0>>8),7) : + cm->prev_frame->mode_deltas[1]; + } + } //else if (hw->aom_param.p.loop_filter_mode_ref_delta_enabled == 1) { // enabled but no update + else { // match c code -- not enabled, still need to copy prev to used for next + if ((cm->prev_frame <= 0) | (hw->aom_param.p.loop_filter_mode_ref_delta_enabled & 4)) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] mode_ref_delta set to default\n"); + lf->ref_deltas[0] = conv2int8((uint8_t)1,7); + lf->ref_deltas[1] = conv2int8((uint8_t)0,7); + lf->ref_deltas[2] = conv2int8((uint8_t)0,7); + lf->ref_deltas[3] = conv2int8((uint8_t)0,7); + lf->ref_deltas[4] = conv2int8((uint8_t)0xff,7); + lf->ref_deltas[5] = conv2int8((uint8_t)0,7); + lf->ref_deltas[6] = conv2int8((uint8_t)0xff,7); + lf->ref_deltas[7] = conv2int8((uint8_t)0xff,7); + lf->mode_deltas[0] = conv2int8((uint8_t)0,7); + lf->mode_deltas[1] = conv2int8((uint8_t)0,7); + } else { + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] mode_ref_delta copy from prev_frame\n"); + lf->ref_deltas[0] = cm->prev_frame->ref_deltas[0]; + lf->ref_deltas[1] = cm->prev_frame->ref_deltas[1]; + lf->ref_deltas[2] = cm->prev_frame->ref_deltas[2]; + lf->ref_deltas[3] = cm->prev_frame->ref_deltas[3]; + lf->ref_deltas[4] = cm->prev_frame->ref_deltas[4]; + lf->ref_deltas[5] = cm->prev_frame->ref_deltas[5]; + lf->ref_deltas[6] = cm->prev_frame->ref_deltas[6]; + lf->ref_deltas[7] = cm->prev_frame->ref_deltas[7]; + lf->mode_deltas[0] = cm->prev_frame->mode_deltas[0]; + lf->mode_deltas[1] = cm->prev_frame->mode_deltas[1]; + } + } + lf->filter_level[0] = hw->aom_param.p.loop_filter_level_0; + lf->filter_level[1] = hw->aom_param.p.loop_filter_level_1; + lf->filter_level_u = hw->aom_param.p.loop_filter_level_u; + lf->filter_level_v = hw->aom_param.p.loop_filter_level_v; + + cm->cur_frame->ref_deltas[0] = lf->ref_deltas[0]; + cm->cur_frame->ref_deltas[1] = lf->ref_deltas[1]; + cm->cur_frame->ref_deltas[2] = lf->ref_deltas[2]; + cm->cur_frame->ref_deltas[3] = lf->ref_deltas[3]; + cm->cur_frame->ref_deltas[4] = lf->ref_deltas[4]; + cm->cur_frame->ref_deltas[5] = lf->ref_deltas[5]; + cm->cur_frame->ref_deltas[6] = lf->ref_deltas[6]; + cm->cur_frame->ref_deltas[7] = lf->ref_deltas[7]; + cm->cur_frame->mode_deltas[0] = lf->mode_deltas[0]; + cm->cur_frame->mode_deltas[1] = lf->mode_deltas[1]; + + // get seg_4lf parameters from parser + seg_4lf->enabled = hw->aom_param.p.segmentation_enabled & 1; + cm->cur_frame->segmentation_enabled = hw->aom_param.p.segmentation_enabled & 1; + cm->cur_frame->intra_only = (hw->aom_param.p.segmentation_enabled >> 2) & 1; + cm->cur_frame->segmentation_update_map = (hw->aom_param.p.segmentation_enabled >> 3) & 1; + + if (hw->aom_param.p.segmentation_enabled & 1) { // segmentation_enabled + if (hw->aom_param.p.segmentation_enabled & 2) { // segmentation_update_data + for (i = 0; i < MAX_SEGMENTS; i++) { + seg_4lf->seg_lf_info_y[i] = hw->aom_param.p.seg_lf_info_y[i]; + seg_4lf->seg_lf_info_c[i] = hw->aom_param.p.seg_lf_info_c[i]; + #ifdef DBG_LPF_PRINT + printk(" read seg_lf_info [%d] : 0x%x, 0x%x\n", + i, seg_4lf->seg_lf_info_y[i], seg_4lf->seg_lf_info_c[i]); + #endif + } + } // segmentation_update_data + else { // no segmentation_update_data + if (cm->prev_frame <= 0) { + for (i=0;i<MAX_SEGMENTS;i++) { + seg_4lf->seg_lf_info_y[i] = 0; + seg_4lf->seg_lf_info_c[i] = 0; + } + } else { + for (i = 0; i < MAX_SEGMENTS; i++) { + seg_4lf->seg_lf_info_y[i] = cm->prev_frame->seg_lf_info_y[i]; + seg_4lf->seg_lf_info_c[i] = cm->prev_frame->seg_lf_info_c[i]; + #ifdef DBG_LPF_PRINT + printk(" Refrence seg_lf_info [%d] : 0x%x, 0x%x\n", + i, seg_4lf->seg_lf_info_y[i], seg_4lf->seg_lf_info_c[i]); + #endif + } + } + } // no segmentation_update_data + } // segmentation_enabled + else { + for (i=0;i<MAX_SEGMENTS;i++) { + seg_4lf->seg_lf_info_y[i] = 0; + seg_4lf->seg_lf_info_c[i] = 0; + } + } // NOT segmentation_enabled + for (i=0;i<MAX_SEGMENTS;i++) { + cm->cur_frame->seg_lf_info_y[i] = seg_4lf->seg_lf_info_y[i]; + cm->cur_frame->seg_lf_info_c[i] = seg_4lf->seg_lf_info_c[i]; +#ifdef DBG_LPF_PRINT + printk(" SAVE seg_lf_info [%d] : 0x%x, 0x%x\n", + i, cm->cur_frame->seg_lf_info_y[i], + cm->cur_frame->seg_lf_info_c[i]); +#endif + } + + /* + * Update loop filter Thr/Lvl table for every frame + */ + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] av1_loop_filter_frame_init (run before every frame decoding start)\n"); + av1_loop_filter_frame_init(pbi, seg_4lf, lfi, lf, cm->dec_width); +#endif // not DUAL_DECODE +#endif + +#ifdef AOM_AV1_UPSCALE_INIT + /* + * init for upscaling + */ + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] av1_upscale_frame_init (run before every frame decoding start)\n"); + av1_upscale_frame_init(pbi, + pbi->common, &hw->aom_param); +#endif // #ifdef AOM_AV1_UPSCALE_INIT + + //BuffInfo_t* buf_spec = pbi->work_space_buf; + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] cur_frame : %p prev_frame : %p - %p \n", + cm->cur_frame, cm->prev_frame, av1_get_primary_ref_frame_buf(cm)); + if (cm->cur_frame <= 0) { + WRITE_VREG(AOM_AV1_CDF_BUFFER_W, buf_spec->cdf_buf.buf_start); + WRITE_VREG(AOM_AV1_SEG_MAP_BUFFER_W, buf_spec->seg_map.buf_start); + } + else { + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] Config WRITE CDF_BUF/SEG_MAP_BUF : %d\n", + cur_pic_config->index); + WRITE_VREG(AOM_AV1_CDF_BUFFER_W, + buf_spec->cdf_buf.buf_start + (0x8000*cur_pic_config->index)); + WRITE_VREG(AOM_AV1_SEG_MAP_BUFFER_W, + buf_spec->seg_map.buf_start + ((buf_spec->seg_map.buf_size / 16) * cur_pic_config->index)); + } + cm->cur_frame->seg_mi_rows = cm->cur_frame->mi_rows; + cm->cur_frame->seg_mi_cols = cm->cur_frame->mi_cols; + if (cm->prev_frame <= 0) { + WRITE_VREG(AOM_AV1_CDF_BUFFER_R, buf_spec->cdf_buf.buf_start); + WRITE_VREG(AOM_AV1_SEG_MAP_BUFFER_R, buf_spec->seg_map.buf_start); + } else { + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] Config READ CDF_BUF/SEG_MAP_BUF : %d\n", + prev_pic_config->index); + WRITE_VREG(AOM_AV1_CDF_BUFFER_R, + buf_spec->cdf_buf.buf_start + (0x8000*prev_pic_config->index)); + WRITE_VREG(AOM_AV1_SEG_MAP_BUFFER_R, + buf_spec->seg_map.buf_start + ((buf_spec->seg_map.buf_size / 16) * prev_pic_config->index)); + + // segmentation_enabled but no segmentation_update_data + if ((hw->aom_param.p.segmentation_enabled & 3) == 1) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] segfeatures_copy from prev_frame\n"); + for (i = 0; i < 8; i++) { + WRITE_VREG(AOM_AV1_SEGMENT_FEATURE, + cm->prev_frame->segment_feature[i]); + } + } + // segmentation_enabled but no segmentation_update_map + if ((hw->aom_param.p.segmentation_enabled & 9) == 1) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] seg_map_size copy from prev_frame\n"); + cm->cur_frame->seg_mi_rows = cm->prev_frame->seg_mi_rows; + cm->cur_frame->seg_mi_cols = cm->prev_frame->seg_mi_cols; + } + } +#ifdef PRINT_HEVC_DATA_PATH_MONITOR + { + uint32_t total_clk_count; + uint32_t path_transfer_count; + uint32_t path_wait_count; + float path_wait_ratio; + if (pbi->decode_idx > 1) { + WRITE_VREG(HEVC_PATH_MONITOR_CTRL, 0); // Disabble monitor and set rd_idx to 0 + total_clk_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + + WRITE_VREG(HEVC_PATH_MONITOR_CTRL, (1<<4)); // Disabble monitor and set rd_idx to 0 + + // parser --> iqit + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) + path_wait_ratio = 0.0; + else + path_wait_ratio = + (float)path_wait_count/(float)path_transfer_count; + printk("[P%d HEVC PATH] Parser/IQIT/IPP/DBLK/OW/DDR/CMD WAITING \% : %.2f", + pbi->decode_idx - 2, + path_wait_ratio); + + // iqit --> ipp + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) + path_wait_ratio = 0.0; + else + path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + + // dblk <-- ipp + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) + path_wait_ratio = 0.0; + else + path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + + // dblk --> ow + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) + path_wait_ratio = 0.0; + else path_wait_ratio = + (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + + // <--> DDR + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) + path_wait_ratio = 0.0; + else path_wait_ratio = + (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + + // CMD + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) + path_wait_ratio = 0.0; + else + path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f\n", path_wait_ratio); + } + } + +#endif + +} + +static void aom_config_work_space_hw(struct AV1HW_s *hw, u32 mask) +{ + struct BuffInfo_s *buf_spec = hw->work_space_buf; + unsigned int data32; + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %d\n", __func__, __LINE__); + if (debug && hw->init_flag == 0) + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %x %x %x %x %x %x %x %x\n", + __func__, + buf_spec->ipp.buf_start, + buf_spec->start_adr, + buf_spec->short_term_rps.buf_start, + buf_spec->sao_up.buf_start, + buf_spec->swap_buf.buf_start, + buf_spec->scalelut.buf_start, + buf_spec->dblk_para.buf_start, + buf_spec->dblk_data.buf_start); + if (mask & HW_MASK_FRONT) { + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %d\n", __func__, __LINE__); + if ((debug & AOM_AV1_DEBUG_SEND_PARAM_WITH_REG) == 0) + WRITE_VREG(HEVC_RPM_BUFFER, (u32)hw->rpm_phy_addr); + + /*WRITE_VREG(HEVC_STREAM_SWAP_BUFFER, + buf_spec->swap_buf.buf_start);*/ + WRITE_VREG(LMEM_DUMP_ADR, (u32)hw->lmem_phy_addr); + + } + + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %d\n", __func__, __LINE__); + + WRITE_VREG(AOM_AV1_DAALA_TOP_BUFFER, + buf_spec->daala_top.buf_start); + WRITE_VREG(AV1_GMC_PARAM_BUFF_ADDR, + buf_spec->gmc_buf.buf_start); + + WRITE_VREG(HEVC_DBLK_CFG4, + buf_spec->dblk_para.buf_start); // cfg_addr_cif + WRITE_VREG(HEVC_DBLK_CFG5, + buf_spec->dblk_data.buf_start); // cfg_addr_xio + + if (mask & HW_MASK_BACK) { +#ifdef LOSLESS_COMPRESS_MODE + int losless_comp_header_size = + compute_losless_comp_header_size(hw->init_pic_w, + hw->init_pic_h); + int losless_comp_body_size = + compute_losless_comp_body_size(hw->init_pic_w, + hw->init_pic_h, buf_alloc_depth == 10); +#endif +#ifdef AOM_AV1_MMU_DW + int losless_comp_header_size_dw = + compute_losless_comp_header_size_dw(hw->init_pic_w, + hw->init_pic_h); + int losless_comp_body_size_dw = + compute_losless_comp_body_size_dw(hw->init_pic_w, + hw->init_pic_h, buf_alloc_depth == 10); +#endif + WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, + buf_spec->ipp.buf_start); + //WRITE_VREG(HEVC_SAO_UP, buf_spec->sao_up.buf_start); + //WRITE_VREG(HEVC_SCALELUT, buf_spec->scalelut.buf_start); +#ifdef CHANGE_REMOVED + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + /* cfg_addr_adp*/ + WRITE_VREG(HEVC_DBLK_CFGE, buf_spec->dblk_para.buf_start); + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info("Write HEVC_DBLK_CFGE\n"); + } +#endif + /* cfg_p_addr */ + WRITE_VREG(HEVC_DBLK_CFG4, buf_spec->dblk_para.buf_start); + /* cfg_d_addr */ + WRITE_VREG(HEVC_DBLK_CFG5, buf_spec->dblk_data.buf_start); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + if (buf_spec->max_width <= 4096 && buf_spec->max_height <= 2304) + WRITE_VREG(HEVC_DBLK_CFG3, 0x404010); //default value + else + WRITE_VREG(HEVC_DBLK_CFG3, 0x808020); // make left storage 2 x 4k] + av1_print(hw, AV1_DEBUG_BUFMGR_MORE, + "HEVC_DBLK_CFG3 = %x\n", READ_VREG(HEVC_DBLK_CFG3)); + } + +#ifdef LOSLESS_COMPRESS_MODE + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + /*bit[4] : paged_mem_mode*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); +#ifdef CHANGE_REMOVED + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SM1) +#endif + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0); + } else { + /*if (cur_pic_config->bit_depth == AOM_BITS_10) + * WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0<<3)); + */ + /*bit[3] smem mdoe*/ + /*else WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (1<<3));*/ + /*bit[3] smem mdoe*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, + (losless_comp_body_size >> 5)); + } + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, + (losless_comp_body_size >> 5));*/ + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL3, + (0xff<<20) | (0xff<<10) | 0xff);*/ + /*8-bit mode */ + WRITE_VREG(HEVC_CM_BODY_LENGTH, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, losless_comp_header_size); + if (get_double_write_mode(hw) & 0x10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif + + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR, buf_spec->mmu_vbh.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR, buf_spec->mmu_vbh.buf_start + + VBH_BUF_SIZE(buf_spec)); + /*data32 = READ_VREG(HEVC_SAO_CTRL9);*/ + /*data32 |= 0x1;*/ + /*WRITE_VREG(HEVC_SAO_CTRL9, data32);*/ + + /* use HEVC_CM_HEADER_START_ADDR */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (1<<10); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } +#ifdef AOM_AV1_MMU_DW + data32 = READ_VREG(HEVC_SAO_CTRL5); + if (get_double_write_mode(hw) & 0x20) { + u32 data_tmp; + data_tmp = READ_VREG(HEVC_SAO_CTRL9); + data_tmp |= (1<<10); + WRITE_VREG(HEVC_SAO_CTRL9, data_tmp); + + WRITE_VREG(HEVC_CM_BODY_LENGTH2,losless_comp_body_size_dw); + WRITE_VREG(HEVC_CM_HEADER_OFFSET2,losless_comp_body_size_dw); + WRITE_VREG(HEVC_CM_HEADER_LENGTH2,losless_comp_header_size_dw); + + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR2, buf_spec->mmu_vbh_dw.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR2, buf_spec->mmu_vbh_dw.buf_start + + DW_VBH_BUF_SIZE(buf_spec)); + + WRITE_VREG(HEVC_DW_VH0_ADDDR, buf_spec->mmu_vbh_dw.buf_start + + (2 * DW_VBH_BUF_SIZE(buf_spec))); + WRITE_VREG(HEVC_DW_VH1_ADDDR, buf_spec->mmu_vbh_dw.buf_start + + (3 * DW_VBH_BUF_SIZE(buf_spec))); + + /* use HEVC_CM_HEADER_START_ADDR */ + data32 |= (1<<15); + } else + data32 &= ~(1<<15); + WRITE_VREG(HEVC_SAO_CTRL5, data32); +#endif + + WRITE_VREG(LMEM_DUMP_ADR, (u32)hw->lmem_phy_addr); +#ifdef CHANGE_REMOVED + + WRITE_VREG(AV1_SEG_MAP_BUFFER, buf_spec->seg_map.buf_start); + + /**/ + WRITE_VREG(AV1_PROB_SWAP_BUFFER, hw->prob_buffer_phy_addr); + WRITE_VREG(AV1_COUNT_SWAP_BUFFER, hw->count_buffer_phy_addr); + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR, hw->frame_mmu_map_phy_addr); + else + WRITE_VREG(AV1_MMU_MAP_BUFFER, hw->frame_mmu_map_phy_addr); + } +#else + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + WRITE_VREG(HEVC_SAO_MMU_DMA_CTRL, hw->frame_mmu_map_phy_addr); + } +#ifdef AOM_AV1_MMU_DW + if (get_double_write_mode(hw) & 0x20) { + WRITE_VREG(HEVC_SAO_MMU_DMA_CTRL2, hw->dw_frame_mmu_map_phy_addr); + //default of 0xffffffff will disable dw + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0); + WRITE_VREG(HEVC_SAO_C_START_ADDR, 0); + } +#endif +#endif +#ifdef CO_MV_COMPRESS + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T5D) { + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 |= (1 << 1); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); + } +#endif + } + + config_aux_buf(hw); +} + +#ifdef MCRCC_ENABLE +static u32 mcrcc_cache_alg_flag = 1; +static void mcrcc_perfcount_reset(struct AV1HW_s *hw); +static void decomp_perfcount_reset(struct AV1HW_s *hw); +#endif + +static void aom_init_decoder_hw(struct AV1HW_s *hw, u32 mask) +{ + unsigned int data32; + int i; + const unsigned short parser_cmd[PARSER_CMD_NUMBER] = { + 0x0401, 0x8401, 0x0800, 0x0402, 0x9002, 0x1423, + 0x8CC3, 0x1423, 0x8804, 0x9825, 0x0800, 0x04FE, + 0x8406, 0x8411, 0x1800, 0x8408, 0x8409, 0x8C2A, + 0x9C2B, 0x1C00, 0x840F, 0x8407, 0x8000, 0x8408, + 0x2000, 0xA800, 0x8410, 0x04DE, 0x840C, 0x840D, + 0xAC00, 0xA000, 0x08C0, 0x08E0, 0xA40E, 0xFC00, + 0x7C00 + }; +#if 0 + if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) { + /* Set MCR fetch priorities*/ + data32 = 0x1 | (0x1 << 2) | (0x1 <<3) | + (24 << 4) | (32 << 11) | (24 << 18) | (32 << 25); + WRITE_VREG(HEVCD_MPP_DECOMP_AXIURG_CTL, data32); + } +#endif + /*if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info("%s\n", __func__);*/ + if (mask & HW_MASK_FRONT) { + data32 = READ_VREG(HEVC_PARSER_INT_CONTROL); +#ifdef CHANGE_REMOVED +#if 1 + /* set bit 31~29 to 3 if HEVC_STREAM_FIFO_CTL[29] is 1 */ + data32 &= ~(7 << 29); + data32 |= (3 << 29); +#endif + data32 = data32 | + (1 << 24) |/*stream_buffer_empty_int_amrisc_enable*/ + (1 << 22) |/*stream_fifo_empty_int_amrisc_enable*/ + (1 << 7) |/*dec_done_int_cpu_enable*/ + (1 << 4) |/*startcode_found_int_cpu_enable*/ + (0 << 3) |/*startcode_found_int_amrisc_enable*/ + (1 << 0) /*parser_int_enable*/ + ; +#else + data32 = data32 & 0x03ffffff; + data32 = data32 | + (3 << 29) | // stream_buffer_empty_int_ctl ( 0x200 interrupt) + (3 << 26) | // stream_fifo_empty_int_ctl ( 4 interrupt) + (1 << 24) | // stream_buffer_empty_int_amrisc_enable + (1 << 22) | // stream_fifo_empty_int_amrisc_enable +#ifdef AOM_AV1_HED_FB +#ifdef DUAL_DECODE + // For HALT CCPU test. Use Pull inside CCPU to generate interrupt + // (1 << 9) | // fed_fb_slice_done_int_amrisc_enable +#else + (1 << 10) | // fed_fb_slice_done_int_cpu_enable +#endif +#endif + (1 << 7) | // dec_done_int_cpu_enable + (1 << 4) | // startcode_found_int_cpu_enable + (0 << 3) | // startcode_found_int_amrisc_enable + (1 << 0) // parser_int_enable + ; +#endif + WRITE_VREG(HEVC_PARSER_INT_CONTROL, data32); + + data32 = READ_VREG(HEVC_SHIFT_STATUS); + data32 = data32 | + (0 << 1) |/*emulation_check_off AV1 + do not have emulation*/ + (1 << 0)/*startcode_check_on*/ + ; + WRITE_VREG(HEVC_SHIFT_STATUS, data32); + WRITE_VREG(HEVC_SHIFT_CONTROL, + (0 << 14) | /*disable_start_code_protect*/ + (1 << 10) | /*length_zero_startcode_en for AV1*/ + (1 << 9) | /*length_valid_startcode_en for AV1*/ + (3 << 6) | /*sft_valid_wr_position*/ + (2 << 4) | /*emulate_code_length_sub_1*/ + (3 << 1) | /*start_code_length_sub_1 + AV1 use 0x00000001 as startcode (4 Bytes)*/ + (1 << 0) /*stream_shift_enable*/ + ); + + WRITE_VREG(HEVC_CABAC_CONTROL, + (1 << 0)/*cabac_enable*/ + ); + + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, + (1 << 0)/* hevc_parser_core_clk_en*/ + ); + + WRITE_VREG(HEVC_DEC_STATUS_REG, 0); + } + + if (mask & HW_MASK_BACK) { + /*Initial IQIT_SCALELUT memory + -- just to avoid X in simulation*/ + if (is_rdma_enable()) + rdma_back_end_work(hw->rdma_phy_adr, RDMA_SIZE); + else { + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 0);/*cfg_p_addr*/ + for (i = 0; i < 1024; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, 0); + } + } + + if (mask & HW_MASK_FRONT) { + u32 decode_mode; +/* +#ifdef ENABLE_SWAP_TEST + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 100); +#else + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 0); +#endif +*/ +#ifdef MULTI_INSTANCE_SUPPORT + if (!hw->m_ins_flag) { + if (hw->low_latency_flag) + decode_mode = DECODE_MODE_SINGLE_LOW_LATENCY; + else + decode_mode = DECODE_MODE_SINGLE; + } else if (vdec_frame_based(hw_to_vdec(hw))) + decode_mode = hw->no_head ? + DECODE_MODE_MULTI_FRAMEBASE_NOHEAD : + DECODE_MODE_MULTI_FRAMEBASE; + else + decode_mode = DECODE_MODE_MULTI_STREAMBASE; + if (debug & AOM_DEBUG_BUFMGR_ONLY) + decode_mode |= (1 << 16); + WRITE_VREG(DECODE_MODE, decode_mode); + WRITE_VREG(HEVC_DECODE_SIZE, 0); + WRITE_VREG(HEVC_DECODE_COUNT, 0); +#else + WRITE_VREG(DECODE_MODE, DECODE_MODE_SINGLE); + WRITE_VREG(HEVC_DECODE_PIC_BEGIN_REG, 0); + WRITE_VREG(HEVC_DECODE_PIC_NUM_REG, 0x7fffffff); /*to remove*/ +#endif + /*Send parser_cmd*/ + WRITE_VREG(HEVC_PARSER_CMD_WRITE, (1 << 16) | (0 << 0)); + for (i = 0; i < PARSER_CMD_NUMBER; i++) + WRITE_VREG(HEVC_PARSER_CMD_WRITE, parser_cmd[i]); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2); + + + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + /* (1 << 8) |*/ /*sao_sw_pred_enable*/ + (1 << 5) | /*parser_sao_if_en*/ + (1 << 2) | /*parser_mpred_if_en*/ + (1 << 0) /*parser_scaler_if_en*/ + ); + } + + if (mask & HW_MASK_BACK) { + /*Changed to Start MPRED in microcode*/ + /* + pr_info("[test.c] Start MPRED\n"); + WRITE_VREG(HEVC_MPRED_INT_STATUS, + (1<<31) + ); + */ + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (0 << 1) | /*enable ipp*/ + (1 << 0) /*software reset ipp and mpp*/ + ); +#ifdef CHANGE_REMOVED + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (1 << 1) | /*enable ipp*/ + (0 << 0) /*software reset ipp and mpp*/ + ); +#else + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (3 << 4) | // av1 + (1 << 1) | /*enable ipp*/ + (0 << 0) /*software reset ipp and mpp*/ + ); +#endif + if (get_double_write_mode(hw) & 0x10) { + /*Enable NV21 reference read mode for MC*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); + } +#ifdef MCRCC_ENABLE + /*Initialize mcrcc and decomp perf counters*/ + if (mcrcc_cache_alg_flag && + hw->init_flag == 0) { + mcrcc_perfcount_reset(hw); + decomp_perfcount_reset(hw); + } +#endif + } +#ifdef CHANGE_REMOVED +#else +// Set MCR fetch priorities + data32 = 0x1 | (0x1 << 2) | (0x1 <<3) | + (24 << 4) | (32 << 11) | (24 << 18) | (32 << 25); + WRITE_VREG(HEVCD_MPP_DECOMP_AXIURG_CTL, data32); +#endif + return; +} + + +#ifdef CONFIG_HEVC_CLK_FORCED_ON +static void config_av1_clk_forced_on(void) +{ + unsigned int rdata32; + /*IQIT*/ + rdata32 = READ_VREG(HEVC_IQIT_CLK_RST_CTRL); + WRITE_VREG(HEVC_IQIT_CLK_RST_CTRL, rdata32 | (0x1 << 2)); + + /* DBLK*/ + rdata32 = READ_VREG(HEVC_DBLK_CFG0); + WRITE_VREG(HEVC_DBLK_CFG0, rdata32 | (0x1 << 2)); + + /* SAO*/ + rdata32 = READ_VREG(HEVC_SAO_CTRL1); + WRITE_VREG(HEVC_SAO_CTRL1, rdata32 | (0x1 << 2)); + + /*MPRED*/ + rdata32 = READ_VREG(HEVC_MPRED_CTRL1); + WRITE_VREG(HEVC_MPRED_CTRL1, rdata32 | (0x1 << 24)); + + /* PARSER*/ + rdata32 = READ_VREG(HEVC_STREAM_CONTROL); + WRITE_VREG(HEVC_STREAM_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_SHIFT_CONTROL); + WRITE_VREG(HEVC_SHIFT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_CABAC_CONTROL); + WRITE_VREG(HEVC_CABAC_CONTROL, rdata32 | (0x1 << 13)); + rdata32 = READ_VREG(HEVC_PARSER_CORE_CONTROL); + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_INT_CONTROL); + WRITE_VREG(HEVC_PARSER_INT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_IF_CONTROL); + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + rdata32 | (0x1 << 6) | (0x1 << 3) | (0x1 << 1)); + + /*IPP*/ + rdata32 = READ_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG); + WRITE_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG, rdata32 | 0xffffffff); + + /* MCRCC*/ + rdata32 = READ_VREG(HEVCD_MCRCC_CTL1); + WRITE_VREG(HEVCD_MCRCC_CTL1, rdata32 | (0x1 << 3)); +} +#endif + + +static int vav1_mmu_map_alloc(struct AV1HW_s *hw) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + u32 mmu_map_size = vav1_frame_mmu_map_size(hw); + hw->frame_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + mmu_map_size, + &hw->frame_mmu_map_phy_addr, GFP_KERNEL); + if (hw->frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(hw->frame_mmu_map_addr, 0, mmu_map_size); + } +#ifdef AOM_AV1_MMU_DW + if (get_double_write_mode(hw) & 0x20) { + u32 mmu_map_size = vaom_dw_frame_mmu_map_size(hw); + hw->dw_frame_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + mmu_map_size, + &hw->dw_frame_mmu_map_phy_addr, GFP_KERNEL); + if (hw->dw_frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(hw->dw_frame_mmu_map_addr, 0, mmu_map_size); + } +#endif + return 0; +} + + +static void vav1_mmu_map_free(struct AV1HW_s *hw) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + u32 mmu_map_size = vav1_frame_mmu_map_size(hw); + if (hw->frame_mmu_map_addr) { + if (hw->frame_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + mmu_map_size, + hw->frame_mmu_map_addr, + hw->frame_mmu_map_phy_addr); + hw->frame_mmu_map_addr = NULL; + } + } +#ifdef AOM_AV1_MMU_DW + if (get_double_write_mode(hw) & 0x20) { + u32 mmu_map_size = vaom_dw_frame_mmu_map_size(hw); + if (hw->dw_frame_mmu_map_addr) { + if (hw->dw_frame_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + mmu_map_size, + hw->dw_frame_mmu_map_addr, + hw->dw_frame_mmu_map_phy_addr); + hw->dw_frame_mmu_map_addr = NULL; + } + } +#endif +} + + +static void av1_local_uninit(struct AV1HW_s *hw, bool reset_flag) +{ + hw->rpm_ptr = NULL; + hw->lmem_ptr = NULL; + + if (!reset_flag) { + hw->fg_ptr = NULL; + if (hw->fg_addr) { + if (hw->fg_phy_addr) + codec_mm_dma_free_coherent(hw->fg_table_handle); + hw->fg_addr = NULL; + } + } + + if (hw->rpm_addr) { + dma_free_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, + hw->rpm_addr, + hw->rpm_phy_addr); + hw->rpm_addr = NULL; + } + if (hw->aux_addr) { + dma_free_coherent(amports_get_dma_device(), + hw->prefix_aux_size + hw->suffix_aux_size, hw->aux_addr, + hw->aux_phy_addr); + hw->aux_addr = NULL; + } +#if (defined DEBUG_UCODE_LOG) || (defined DEBUG_CMD) + if (hw->ucode_log_addr) { + dma_free_coherent(amports_get_dma_device(), + UCODE_LOG_BUF_SIZE, hw->ucode_log_addr, + hw->ucode_log_phy_addr); + hw->ucode_log_addr = NULL; + } +#endif + if (hw->lmem_addr) { + if (hw->lmem_phy_addr) + dma_free_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, hw->lmem_addr, + hw->lmem_phy_addr); + hw->lmem_addr = NULL; + } + + vav1_mmu_map_free(hw); + + if (hw->gvs) + vfree(hw->gvs); + hw->gvs = NULL; +} + +static int av1_local_init(struct AV1HW_s *hw, bool reset_flag) +{ + int ret = -1; + /*int losless_comp_header_size, losless_comp_body_size;*/ + + struct BuffInfo_s *cur_buf_info = NULL; + + memset(&hw->param, 0, sizeof(union param_u)); +#ifdef MULTI_INSTANCE_SUPPORT + cur_buf_info = &hw->work_space_buf_store; + hw->pbi->work_space_buf = cur_buf_info; +#if 0 + if (vdec_is_support_4k()) { + memcpy(cur_buf_info, &aom_workbuff_spec[1], /* 8k */ + sizeof(struct BuffInfo_s)); + } else + memcpy(cur_buf_info, &aom_workbuff_spec[0],/* 1080p */ + sizeof(struct BuffInfo_s)); +#endif + memcpy(cur_buf_info, &aom_workbuff_spec[hw->buffer_spec_index], + sizeof(struct BuffInfo_s)); + + cur_buf_info->start_adr = hw->buf_start; + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL) || + (get_double_write_mode(hw) == 0x10)) + hw->mc_buf_spec.buf_end = hw->buf_start + hw->buf_size; + +#else +/*! MULTI_INSTANCE_SUPPORT*/ +#if 0 + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + cur_buf_info = &aom_workbuff_spec[1];/* 8k work space */ + else + cur_buf_info = &aom_workbuff_spec[1];/* 4k2k work space */ + } else + cur_buf_info = &aom_workbuff_spec[0];/* 1080p work space */ +#endif + memcpy(cur_buf_info, &aom_workbuff_spec[hw->buffer_spec_index], + sizeof(struct BuffInfo_s)); +#endif + + init_buff_spec(hw, cur_buf_info); + aom_bufmgr_init(hw, cur_buf_info, NULL); + + if (!vdec_is_support_4k() + && (buf_alloc_width > 1920 && buf_alloc_height > 1088)) { + buf_alloc_width = 1920; + buf_alloc_height = 1088; + if (hw->max_pic_w > 1920 && hw->max_pic_h > 1088) { + hw->max_pic_w = 1920; + hw->max_pic_h = 1088; + } + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + buf_alloc_width = 8192; + buf_alloc_height = 4608; + } + + hw->init_pic_w = hw->max_pic_w ? hw->max_pic_w : + (hw->vav1_amstream_dec_info.width ? hw->vav1_amstream_dec_info.width : + (buf_alloc_width ? buf_alloc_width : hw->work_space_buf->max_width)); + hw->init_pic_h = hw->max_pic_h ? hw->max_pic_h : + (hw->vav1_amstream_dec_info.height ? hw->vav1_amstream_dec_info.height : + (buf_alloc_height ? buf_alloc_height : hw->work_space_buf->max_height)); + + hw->pbi->frame_width = hw->init_pic_w; + hw->pbi->frame_height = hw->init_pic_h; + + /* video is not support unaligned with 64 in tl1 + ** vdec canvas mode will be linear when dump yuv is set + */ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (get_double_write_mode(hw) != 0) && + (((hw->max_pic_w % 64) != 0) || + (hw->vav1_amstream_dec_info.width % 64) != 0)) { + if (hw_to_vdec(hw)->canvas_mode != + CANVAS_BLKMODE_LINEAR) + hw->mem_map_mode = 2; + else { + hw->mem_map_mode = 0; + av1_print(hw, AOM_DEBUG_HW_MORE, "vdec blkmod linear, force mem_map_mode 0\n"); + } + } + +#if 0 +//ndef MV_USE_FIXED_BUF + if (init_mv_buf_list(hw) < 0) { + pr_err("%s: init_mv_buf_list fail\n", __func__); + return -1; + } +#endif + + hw->mv_buf_margin = mv_buf_margin; + + hw->pts_unstable = ((unsigned long)(hw->vav1_amstream_dec_info.param) + & 0x40) >> 6; + + if ((debug & AOM_AV1_DEBUG_SEND_PARAM_WITH_REG) == 0) { + hw->rpm_addr = dma_alloc_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, + &hw->rpm_phy_addr, GFP_KERNEL); + if (hw->rpm_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + return -1; + } + hw->rpm_ptr = hw->rpm_addr; + } + + if (prefix_aux_buf_size > 0 || + suffix_aux_buf_size > 0) { + u32 aux_buf_size; + + hw->prefix_aux_size = AUX_BUF_ALIGN(prefix_aux_buf_size); + hw->suffix_aux_size = AUX_BUF_ALIGN(suffix_aux_buf_size); + aux_buf_size = hw->prefix_aux_size + hw->suffix_aux_size; + hw->aux_addr = dma_alloc_coherent(amports_get_dma_device(), + aux_buf_size, &hw->aux_phy_addr, GFP_KERNEL); + if (hw->aux_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + goto dma_alloc_fail; + } + } +#if (defined DEBUG_UCODE_LOG) || (defined DEBUG_CMD) + //if (udebug_flag & 0x8) { + hw->ucode_log_addr = dma_alloc_coherent(amports_get_dma_device(), + UCODE_LOG_BUF_SIZE, &hw->ucode_log_phy_addr, GFP_KERNEL); + if (hw->ucode_log_addr == NULL) { + hw->ucode_log_phy_addr = 0; + } + pr_info("%s: alloc ucode log buffer %p\n", + __func__, hw->ucode_log_addr); + //} +#endif + + if (!reset_flag) { + int alloc_num = 1; + if ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_SC2) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5W)) { + alloc_num = FRAME_BUFFERS; + } + hw->fg_addr = codec_mm_dma_alloc_coherent(&hw->fg_table_handle, + (ulong *)&hw->fg_phy_addr,FGS_TABLE_SIZE * alloc_num, MEM_NAME); + if (hw->fg_addr == NULL) { + pr_err("%s: failed to alloc fg buffer\n", __func__); + } + hw->fg_ptr = hw->fg_addr; + pr_info("%s, alloc fg table addr %lx, size 0x%x\n", __func__, + (ulong)hw->fg_phy_addr, FGS_TABLE_SIZE * alloc_num); + } + + hw->lmem_addr = dma_alloc_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, + &hw->lmem_phy_addr, GFP_KERNEL); + if (hw->lmem_addr == NULL) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + goto dma_alloc_fail; + } + hw->lmem_ptr = hw->lmem_addr; + + vdec_set_vframe_comm(hw_to_vdec(hw), DRIVER_NAME); + ret = vav1_mmu_map_alloc(hw); + if (ret < 0) + goto dma_alloc_fail; + + return ret; + +dma_alloc_fail: + av1_local_uninit(hw, reset_flag); + return -1; +} + + +#define spec2canvas(x) \ + (((x)->uv_canvas_index << 16) | \ + ((x)->uv_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + + +static void set_canvas(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + struct vdec_s *vdec = hw_to_vdec(hw); + int canvas_w = ALIGN(pic_config->y_crop_width, 64)/4; + int canvas_h = ALIGN(pic_config->y_crop_height, 32)/4; + int blkmode = hw->mem_map_mode; + /*CANVAS_BLKMODE_64X32*/ + if (pic_config->double_write_mode) { + canvas_w = pic_config->y_crop_width / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + canvas_h = pic_config->y_crop_height / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + + /* sao ctrl1 config aligned with 64, so aligned with 64 same */ + canvas_w = ALIGN(canvas_w, 64); + canvas_h = ALIGN(canvas_h, 32); + + if (vdec->parallel_dec == 1) { + if (pic_config->y_canvas_index == -1) + pic_config->y_canvas_index = + vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + if (pic_config->uv_canvas_index == -1) + pic_config->uv_canvas_index = + vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + } else { + pic_config->y_canvas_index = 128 + pic_config->index * 2; + pic_config->uv_canvas_index = 128 + pic_config->index * 2 + 1; + } + + config_cav_lut_ex(pic_config->y_canvas_index, + pic_config->dw_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hw->is_used_v4l ? 0 : 7, VDEC_HEVC); + config_cav_lut_ex(pic_config->uv_canvas_index, + pic_config->dw_u_v_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, hw->is_used_v4l ? 0 : 7, VDEC_HEVC); + +#ifdef MULTI_INSTANCE_SUPPORT + pic_config->canvas_config[0].phy_addr = + pic_config->dw_y_adr; + pic_config->canvas_config[0].width = + canvas_w; + pic_config->canvas_config[0].height = + canvas_h; + pic_config->canvas_config[0].block_mode = + blkmode; + pic_config->canvas_config[0].endian = hw->is_used_v4l ? 0 : 7; + + pic_config->canvas_config[1].phy_addr = + pic_config->dw_u_v_adr; + pic_config->canvas_config[1].width = + canvas_w; + pic_config->canvas_config[1].height = + canvas_h; + pic_config->canvas_config[1].block_mode = + blkmode; + pic_config->canvas_config[1].endian = hw->is_used_v4l ? 0 : 7; +#endif + } +} + +static void set_frame_info(struct AV1HW_s *hw, struct vframe_s *vf) +{ + unsigned int ar; + vf->duration = hw->frame_dur; + vf->duration_pulldown = 0; + vf->flag = 0; + vf->prop.master_display_colour = hw->vf_dp; + vf->signal_type = hw->video_signal_type; + if (vf->compWidth && vf->compHeight) + hw->frame_ar = vf->compHeight * 0x100 / vf->compWidth; + ar = min_t(u32, hw->frame_ar, DISP_RATIO_ASPECT_RATIO_MAX); + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + + if (hw->is_used_v4l && (vf->signal_type != 0)) { + struct aml_vdec_hdr_infos hdr; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + memset(&hdr, 0, sizeof(hdr)); + hdr.signal_type = vf->signal_type; + hdr.color_parms = hw->vf_dp; + hdr.color_parms.luminance[0] = hdr.color_parms.luminance[0] / 1000; + vdec_v4l_set_hdr_infos(ctx, &hdr); + } + + vf->sidebind_type = hw->sidebind_type; + vf->sidebind_channel_id = hw->sidebind_channel_id; +} + +static int vav1_vf_states(struct vframe_states *states, void *op_arg) +{ + struct AV1HW_s *hw = (struct AV1HW_s *)op_arg; + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&hw->newframe_q); + states->buf_avail_num = kfifo_len(&hw->display_q); + + if (step == 2) + states->buf_avail_num = 0; + return 0; +} + +static struct vframe_s *vav1_vf_peek(void *op_arg) +{ + struct vframe_s *vf[2] = {0, 0}; + struct AV1HW_s *hw = (struct AV1HW_s *)op_arg; + + if (step == 2) + return NULL; + + if (kfifo_out_peek(&hw->display_q, (void *)&vf, 2)) { + if (vf[1]) { + vf[0]->next_vf_pts_valid = true; + vf[0]->next_vf_pts = vf[1]->pts; + } else + vf[0]->next_vf_pts_valid = false; + return vf[0]; + } + + return NULL; +} + +static struct vframe_s *vav1_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct AV1HW_s *hw = (struct AV1HW_s *)op_arg; + + if (step == 2) + return NULL; + else if (step == 1) + step = 2; + + if (kfifo_get(&hw->display_q, &vf)) { + struct vframe_s *next_vf = NULL; + uint8_t index = vf->index & 0xff; + ATRACE_COUNTER(hw->trace.disp_q_name, kfifo_len(&hw->display_q)); + if (index < hw->used_buf_num || + (vf->type & VIDTYPE_V4L_EOS)) { + vf->index_disp = atomic_read(&hw->vf_get_count); + atomic_add(1, &hw->vf_get_count); + if (debug & AOM_DEBUG_VFRAME) { + struct BufferPool_s *pool = hw->common.buffer_pool; + struct PIC_BUFFER_CONFIG_s *pic = + &pool->frame_bufs[index].buf; + unsigned long flags; + lock_buffer_pool(hw->common.buffer_pool, flags); + av1_print(hw, AOM_DEBUG_VFRAME, "%s vf %px index 0x%x type 0x%x w/h %d/%d, aux size %d, pts %d, %lld, ts: %llu\n", + __func__, vf, vf->index, vf->type, + vf->width, vf->height, + pic->aux_data_size, + vf->pts, + vf->pts_us64, + vf->timestamp); + unlock_buffer_pool(hw->common.buffer_pool, flags); + } + + if (kfifo_peek(&hw->display_q, &next_vf) && next_vf) { + vf->next_vf_pts_valid = true; + vf->next_vf_pts = next_vf->pts; + } else + vf->next_vf_pts_valid = false; +#ifdef DUMP_FILMGRAIN + if (index == fg_dump_index) { + unsigned long flags; + int ii; + lock_buffer_pool(hw->common.buffer_pool, flags); + pr_info("FGS_TABLE for buffer %d:\n", index); + for (ii = 0; ii < FGS_TABLE_SIZE; ii++) { + pr_info("%02x ", hw->fg_ptr[ii]); + if (((ii+ 1) & 0xf) == 0) + pr_info("\n"); + } + unlock_buffer_pool(hw->common.buffer_pool, flags); + } +#endif + + return vf; + } + } + return NULL; +} + +static void vav1_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct AV1HW_s *hw = (struct AV1HW_s *)op_arg; + uint8_t index = vf->index & 0xff; + unsigned long flags; + + if ((vf == NULL) || (hw == NULL)) + return; + + kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->trace.new_q_name, kfifo_len(&hw->newframe_q)); + atomic_add(1, &hw->vf_put_count); + if (debug & AOM_DEBUG_VFRAME) { + lock_buffer_pool(hw->common.buffer_pool, flags); + av1_print(hw, AOM_DEBUG_VFRAME, "%s index 0x%x type 0x%x w/h %d/%d, pts %d, %lld, ts: %llu\n", + __func__, vf->index, vf->type, + vf->width, vf->height, + vf->pts, + vf->pts_us64, + vf->timestamp); + unlock_buffer_pool(hw->common.buffer_pool, flags); + } + + if (index < hw->used_buf_num) { + struct AV1_Common_s *cm = &hw->common; + struct BufferPool_s *pool = cm->buffer_pool; + PIC_BUFFER_CONFIG *pic = &pool->frame_bufs[index].buf; + + if (vf->v4l_mem_handle != + hw->m_BUF[pic->BUF_index].v4l_ref_buf_addr) { + av1_print(hw, PRINT_FLAG_V4L_DETAIL, + "AV1 update fb handle, old:%llx, new:%llx\n", + hw->m_BUF[pic->BUF_index].v4l_ref_buf_addr, + vf->v4l_mem_handle); + + hw->m_BUF[pic->BUF_index].v4l_ref_buf_addr = + vf->v4l_mem_handle; + } + + lock_buffer_pool(hw->common.buffer_pool, flags); + if (pic->repeat_pic) { + if (pic->repeat_pic->repeat_count > 0) + pic->repeat_pic->repeat_count --; + else + av1_print(hw, PRINT_FLAG_ERROR, "repeat_count <= 0 pic:%px\n", pic); + pic->repeat_pic = NULL; + } + if ((debug & AV1_DEBUG_IGNORE_VF_REF) == 0) { + if (pool->frame_bufs[index].buf.vf_ref > 0) + pool->frame_bufs[index].buf.vf_ref--; + } + if (hw->wait_buf) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + + hw->last_put_idx = index; + hw->new_frame_displayed++; + + if (hw->wait_more_buf) { + hw->wait_more_buf = false; + hw->dec_result = AOM_AV1_RESULT_NEED_MORE_BUFFER; + vdec_schedule_work(&hw->work); + } + + unlock_buffer_pool(hw->common.buffer_pool, flags); + } +} + +static int vav1_event_cb(int type, void *data, void *op_arg) +{ + unsigned long flags; + struct AV1HW_s *hw = (struct AV1HW_s *)op_arg; + struct AV1_Common_s *cm = &hw->common; + struct BufferPool_s *pool = cm->buffer_pool; + + if (type & VFRAME_EVENT_RECEIVER_RESET) { +#if 0 + unsigned long flags; + + amhevc_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vav1_vf_prov); +#endif + spin_lock_irqsave(&hw->lock, flags); + vav1_local_init(); + vav1_prot_init(); + spin_unlock_irqrestore(&hw->lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vav1_vf_prov); +#endif + amhevc_start(); +#endif + } else if (type & VFRAME_EVENT_RECEIVER_GET_AUX_DATA) { + struct provider_aux_req_s *req = + (struct provider_aux_req_s *)data; + unsigned char index; + + lock_buffer_pool(hw->common.buffer_pool, flags); + index = req->vf->index & 0xff; + req->aux_buf = NULL; + req->aux_size = 0; + if (req->bot_flag) + index = (req->vf->index >> 8) & 0xff; + if (index != 0xff + && index < hw->used_buf_num) { + struct PIC_BUFFER_CONFIG_s *pic_config = + &pool->frame_bufs[index].buf; + req->aux_buf = pic_config->aux_data_buf; + req->aux_size = pic_config->aux_data_size; +#if 0 +//def CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (hw->bypass_dvenl && !dolby_meta_with_el) + req->dv_enhance_exist = false; + else + req->dv_enhance_exist = + pic_config->dv_enhance_exist; + av1_print(hw, AOM_DEBUG_VFRAME, + "query dv_enhance_exist for pic (vf 0x%p, poc %d index %d) flag => %d, aux sizd 0x%x\n", + req->vf, + pic_config->POC, index, + req->dv_enhance_exist, req->aux_size); +#else + req->dv_enhance_exist = 0; +#endif + } + unlock_buffer_pool(hw->common.buffer_pool, flags); + + if (debug & AOM_DEBUG_AUX_DATA) + av1_print(hw, 0, + "%s(type 0x%x vf index 0x%x)=>size 0x%x\n", + __func__, type, index, req->aux_size); + } else if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(hw_to_vdec(hw)); + else + req->req_result[0] = 0xffffffff; + } + return 0; +} + +void av1_inc_vf_ref(struct AV1HW_s *hw, int index) +{ + struct AV1_Common_s *cm = &hw->common; + + if ((debug & AV1_DEBUG_IGNORE_VF_REF) == 0) { + cm->buffer_pool->frame_bufs[index].buf.vf_ref++; + + av1_print(hw, AV1_DEBUG_BUFMGR_MORE, "%s index = %d new vf_ref = %d\r\n", + __func__, index, + cm->buffer_pool->frame_bufs[index].buf.vf_ref); + } +} +#if 0 +static int frame_duration_adapt(struct AV1HW_s *hw, struct vframe_s *vf, u32 valid) +{ + u32 old_duration, pts_duration = 0; + u32 pts = vf->pts; + + if (hw->get_frame_dur == true) + return true; + + hw->frame_cnt_window++; + if (!(hw->av1_first_pts_ready == 1)) { + if (valid) { + hw->pts1 = pts; + hw->frame_cnt_window = 0; + hw->duration_from_pts_done = 0; + hw->av1_first_pts_ready = 1; + } else { + return false; + } + } else { + if (pts < hw->pts1) { + if (hw->frame_cnt_window > FRAME_CNT_WINDOW_SIZE) { + hw->pts1 = pts; + hw->frame_cnt_window = 0; + } + } + + if (valid && (hw->frame_cnt_window > FRAME_CNT_WINDOW_SIZE) && + (pts > hw->pts1) && (hw->duration_from_pts_done == 0)) { + old_duration = hw->frame_dur; + hw->pts2 = pts; + pts_duration = (((hw->pts2 - hw->pts1) * 16) / + (hw->frame_cnt_window * 15)); + + if (close_to(pts_duration, old_duration, 2000)) { + hw->frame_dur = pts_duration; + av1_print(hw, AV1_DEBUG_OUT_PTS, + "use calc duration %d\n", pts_duration); + } + + if (hw->duration_from_pts_done == 0) { + if (close_to(pts_duration, old_duration, RATE_CORRECTION_THRESHOLD)) { + hw->duration_from_pts_done = 1; + } else { + if (!close_to(pts_duration, + old_duration, 1000) && + !close_to(pts_duration, + hw->frame_dur, 1000) && + close_to(pts_duration, + hw->last_duration, 200)) { + /* frame_dur must + * wrong,recover it. + */ + hw->frame_dur = pts_duration; + } + hw->pts1 = hw->pts2; + hw->frame_cnt_window = 0; + hw->duration_from_pts_done = 0; + } + } + hw->last_duration = pts_duration; + } + } + return true; +} +#endif + +static void update_vf_memhandle(struct AV1HW_s *hw, + struct vframe_s *vf, struct PIC_BUFFER_CONFIG_s *pic) +{ + /* keeper not needed for v4l solution */ + if (hw->is_used_v4l) + return; + + if (pic->index < 0) { + vf->mem_handle = NULL; + vf->mem_head_handle = NULL; + vf->mem_dw_handle = NULL; + } else if (vf->type & VIDTYPE_SCATTER) { +#ifdef AOM_AV1_MMU_DW + if (pic->double_write_mode & 0x20 && + (debug & AOM_DEBUG_DW_DISP_MAIN) == 0) { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + hw->mmu_box_dw, pic->index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, + DW_HEADER_BUFFER_IDX(pic->BUF_index)); + vf->mem_dw_handle = NULL; + } else +#endif + { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + hw->mmu_box, pic->index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, + HEADER_BUFFER_IDX(pic->BUF_index)); + if (get_double_write_mode(hw) == 3) + vf->mem_dw_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, + VF_BUFFER_IDX(pic->BUF_index)); + else + vf->mem_dw_handle = NULL; + } +#ifdef USE_SPEC_BUF_FOR_MMU_HEAD + vf->mem_head_handle = NULL; +#endif + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + hw->bmmu_box, VF_BUFFER_IDX(pic->BUF_index)); + vf->mem_head_handle = NULL; + vf->mem_dw_handle = NULL; + /*vf->mem_head_handle = + *decoder_bmmu_box_get_mem_handle( + *hw->bmmu_box, VF_BUFFER_IDX(BUF_index)); + */ + } +} + +static inline void av1_update_gvs(struct AV1HW_s *hw, struct vframe_s *vf, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + if (hw->gvs->frame_height != pic_config->y_crop_height) { + hw->gvs->frame_width = pic_config->y_crop_width; + hw->gvs->frame_height = pic_config->y_crop_height; + } + if (hw->gvs->frame_dur != hw->frame_dur) { + hw->gvs->frame_dur = hw->frame_dur; + if (hw->frame_dur != 0) + hw->gvs->frame_rate = ((96000 * 10 / hw->frame_dur) % 10) < 5 ? + 96000 / hw->frame_dur : (96000 / hw->frame_dur +1); + else + hw->gvs->frame_rate = -1; + } + if (vf && hw->gvs->ratio_control != vf->ratio_control) + hw->gvs->ratio_control = vf->ratio_control; + + hw->gvs->status = hw->stat | hw->fatal_error; + hw->gvs->error_count = hw->gvs->error_frame_count; + +} + +static int prepare_display_buf(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + struct vframe_s *vf = NULL; + struct vdec_info tmp4x; + int stream_offset = pic_config->stream_offset; + struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + ulong nv_order = VIDTYPE_VIU_NV21; + u32 pts_valid = 0, pts_us64_valid = 0; + u32 frame_size; + int i, reclac_flag = 0; + + av1_print(hw, AOM_DEBUG_VFRAME, "%s index = %d\r\n", __func__, pic_config->index); + if (kfifo_get(&hw->newframe_q, &vf) == 0) { + av1_print(hw, 0, "fatal error, no available buffer slot."); + return -1; + } + + /* swap uv */ + if (hw->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + } + + if (pic_config->double_write_mode && + (pic_config->double_write_mode & 0x20) == 0) + set_canvas(hw, pic_config); + + display_frame_count[hw->index]++; + if (vf) { + if (!force_pts_unstable && hw->av1_first_pts_ready) { + if ((pic_config->pts == 0) || ((pic_config->pts <= hw->last_pts) && + (pic_config->pts64 <= hw->last_pts_us64))) { + for (i = (FRAME_BUFFERS - 1); i > 0; i--) { + if ((hw->last_pts == hw->frame_mode_pts_save[i]) || + (hw->last_pts_us64 == hw->frame_mode_pts64_save[i])) { + pic_config->pts = hw->frame_mode_pts_save[i - 1]; + pic_config->pts64 = hw->frame_mode_pts64_save[i - 1]; + break; + } + } + + if ((i == 0) || (pic_config->pts <= hw->last_pts)) { + av1_print(hw, AV1_DEBUG_OUT_PTS, + "no found pts %d, set 0. %d, %d\n", + i, pic_config->pts, hw->last_pts); + pic_config->pts = 0; + pic_config->pts64 = 0; + } + } + } + + if (hw->is_used_v4l) { + vf->v4l_mem_handle + = hw->m_BUF[pic_config->v4l_buf_index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + vf->mm_box.bmmu_box = hw->bmmu_box; + vf->mm_box.bmmu_idx = HEADER_BUFFER_IDX(hw->buffer_wrap[pic_config->v4l_buf_index]); + vf->mm_box.mmu_box = hw->mmu_box; + vf->mm_box.mmu_idx = hw->buffer_wrap[pic_config->v4l_buf_index]; + } + } + +#ifdef MULTI_INSTANCE_SUPPORT + if (vdec_frame_based(hw_to_vdec(hw))) { + vf->pts = pic_config->pts; + vf->pts_us64 = pic_config->pts64; + + if (hw->is_used_v4l && v4l_bitstream_id_enable) + vf->timestamp = pic_config->timestamp; + else + vf->timestamp = pic_config->pts64; + + if (vf->pts != 0 || vf->pts_us64 != 0) { + pts_valid = 1; + pts_us64_valid = 1; + } else { + pts_valid = 0; + pts_us64_valid = 0; + } + } else +#endif + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, stream_offset, &vf->pts, + &frame_size, 0, + &vf->pts_us64) != 0) { +#ifdef DEBUG_PTS + hw->pts_missed++; +#endif + vf->pts = 0; + vf->pts_us64 = 0; + pts_valid = 0; + pts_us64_valid = 0; + } else { +#ifdef DEBUG_PTS + hw->pts_hit++; +#endif + pts_valid = 1; + pts_us64_valid = 1; + } + + if (hw->av1_first_pts_ready) { + if (hw->frame_dur && ((vf->pts == 0) || (vf->pts_us64 == 0))) { + vf->pts = hw->last_pts + DUR2PTS(hw->frame_dur); + vf->pts_us64 = hw->last_pts_us64 + + (DUR2PTS(hw->frame_dur) * 100 / 9); + reclac_flag = 1; + } + + if (!close_to(vf->pts, (hw->last_pts + DUR2PTS(hw->frame_dur)), 100)) { + vf->pts = hw->last_pts + DUR2PTS(hw->frame_dur); + vf->pts_us64 = hw->last_pts_us64 + + (DUR2PTS(hw->frame_dur) * 100 / 9); + reclac_flag = 2; + } + + if (hw->is_used_v4l) + reclac_flag = 0; + + /* try find the closed pts in saved pts pool */ + if (reclac_flag) { + for (i = 0; i < FRAME_BUFFERS - 1; i++) { + if ((hw->frame_mode_pts_save[i] > vf->pts) && + (hw->frame_mode_pts_save[i + 1] < vf->pts)) { + if ((hw->frame_mode_pts_save[i] - vf->pts) > + (vf->pts - hw->frame_mode_pts_save[i + 1])) { + vf->pts = hw->frame_mode_pts_save[i + 1]; + vf->pts_us64 = hw->frame_mode_pts64_save[i + 1]; + } else { + vf->pts = hw->frame_mode_pts_save[i]; + vf->pts_us64 = hw->frame_mode_pts64_save[i]; + } + break; + } + } + if (i == (FRAME_BUFFERS - 1)) + hw->dur_recalc_flag = 1; + } + } else { + av1_print(hw, AV1_DEBUG_OUT_PTS, + "first pts %d change to save[%d] %d\n", + vf->pts, hw->first_pts_index - 1, + hw->frame_mode_pts_save[hw->first_pts_index - 1]); + vf->pts = hw->frame_mode_pts_save[hw->first_pts_index - 1]; + vf->pts_us64 = hw->frame_mode_pts64_save[hw->first_pts_index - 1]; + } + hw->last_pts = vf->pts; + hw->last_pts_us64 = vf->pts_us64; + hw->av1_first_pts_ready = true; + av1_print(hw, AV1_DEBUG_OUT_PTS, + "av1 output slice type %d, dur %d, pts %d, pts64 %lld, ts: %llu\n", + pic_config->slice_type, hw->frame_dur, vf->pts, vf->pts_us64, vf->timestamp); + + fill_frame_info(hw, pic_config, frame_size, vf->pts); + + vf->index = 0xff00 | pic_config->v4l_buf_index; + if (pic_config->double_write_mode & 0x10) { + /* double write only */ + vf->compBodyAddr = 0; + vf->compHeadAddr = 0; +#ifdef AOM_AV1_MMU_DW + vf->dwBodyAddr = 0; + vf->dwHeadAddr = 0; +#endif + } else { + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + vf->compBodyAddr = 0; + vf->compHeadAddr = pic_config->header_adr; + if (((get_debug_fgs() & DEBUG_FGS_BYPASS) == 0) + && hw->assit_task.use_sfgs) + vf->fgs_table_adr = pic_config->sfgs_table_phy; + else + vf->fgs_table_adr = pic_config->fgs_table_adr; + vf->fgs_valid = hw->fgs_valid; +#ifdef AOM_AV1_MMU_DW + vf->dwBodyAddr = 0; + vf->dwHeadAddr = 0; + if (pic_config->double_write_mode & 0x20) { + u32 mode = pic_config->double_write_mode & 0xf; + if (mode == 5 || mode == 3) + vf->dwHeadAddr = pic_config->header_dw_adr; + else if ((mode == 1 || mode == 2 || mode == 4) + && (debug & AOM_DEBUG_DW_DISP_MAIN) == 0) { + vf->compHeadAddr = pic_config->header_dw_adr; + vf->fgs_valid = 0; + av1_print(hw, AOM_DEBUG_VFRAME, + "Use dw mmu for display\n"); + } + } +#endif + } else { + /*vf->compBodyAddr = pic_config->mc_y_adr; + *vf->compHeadAddr = pic_config->mc_y_adr + + *pic_config->comp_body_size; */ + /*head adr*/ + } + vf->canvas0Addr = vf->canvas1Addr = 0; + } + if (pic_config->double_write_mode && + (pic_config->double_write_mode & 0x20) == 0) { + vf->type = VIDTYPE_PROGRESSIVE | + VIDTYPE_VIU_FIELD; + vf->type |= nv_order; + if ((pic_config->double_write_mode == 3 || + pic_config->double_write_mode == 5) && + (!IS_8K_SIZE(pic_config->y_crop_width, + pic_config->y_crop_height))) { + vf->type |= VIDTYPE_COMPRESS; + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + vf->type |= VIDTYPE_SCATTER; + } + } + if (hw->is_used_v4l && pic_config->double_write_mode != 16 && + (!IS_8K_SIZE(pic_config->y_crop_width, + pic_config->y_crop_height))) { + if ((get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_S4 && + get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_S4D) + || (!hw->film_grain_present) || (get_double_write_mode(hw) != 1)) { + vf->type |= VIDTYPE_COMPRESS | VIDTYPE_SCATTER; + } + } +#ifdef MULTI_INSTANCE_SUPPORT + if (hw->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + pic_config->canvas_config[0]; + vf->canvas0_config[1] = + pic_config->canvas_config[1]; + vf->canvas1_config[0] = + pic_config->canvas_config[0]; + vf->canvas1_config[1] = + pic_config->canvas_config[1]; + + } else +#endif + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(pic_config); + } else { + vf->canvas0Addr = vf->canvas1Addr = 0; + vf->type = VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + vf->type |= VIDTYPE_SCATTER; + } + } + + switch (pic_config->bit_depth) { + case AOM_BITS_8: + vf->bitdepth = BITDEPTH_Y8 | + BITDEPTH_U8 | BITDEPTH_V8; + break; + case AOM_BITS_10: + case AOM_BITS_12: + vf->bitdepth = BITDEPTH_Y10 | + BITDEPTH_U10 | BITDEPTH_V10; + break; + default: + vf->bitdepth = BITDEPTH_Y10 | + BITDEPTH_U10 | BITDEPTH_V10; + break; + } + if ((vf->type & VIDTYPE_COMPRESS) == 0) + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + if (pic_config->bit_depth == AOM_BITS_8) + vf->bitdepth |= BITDEPTH_SAVING_MODE; + + /* if ((vf->width!=pic_config->width)| + * (vf->height!=pic_config->height)) + */ + /* pr_info("aaa: %d/%d, %d/%d\n", + vf->width,vf->height, pic_config->width, + pic_config->height); */ + vf->width = pic_config->y_crop_width / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + vf->height = pic_config->y_crop_height / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + if (force_w_h != 0) { + vf->width = (force_w_h >> 16) & 0xffff; + vf->height = force_w_h & 0xffff; + } + if ((pic_config->double_write_mode & 0x20) && + ((pic_config->double_write_mode & 0xf) == 2 || + (pic_config->double_write_mode & 0xf) == 4)) { + vf->compWidth = pic_config->y_crop_width / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + vf->compHeight = pic_config->y_crop_height / + get_double_write_ratio( + pic_config->double_write_mode & 0xf); + } else { + vf->compWidth = pic_config->y_crop_width; + vf->compHeight = pic_config->y_crop_height; + } + set_frame_info(hw, vf); + if (force_fps & 0x100) { + u32 rate = force_fps & 0xff; + + if (rate) + vf->duration = 96000/rate; + else + vf->duration = 0; + } + update_vf_memhandle(hw, vf, pic_config); + + av1_inc_vf_ref(hw, pic_config->v4l_buf_index); + + vdec_vframe_ready(hw_to_vdec(hw), vf); + if (pic_config->double_write_mode && + ((pic_config->double_write_mode & 0x20) == 0) && + (pic_config->v4l_buf_index != pic_config->BUF_index)) { + struct PIC_BUFFER_CONFIG_s *dst_pic = + &hw->common.buffer_pool->frame_bufs[pic_config->v4l_buf_index].buf; + struct PIC_BUFFER_CONFIG_s *src_pic = + &hw->common.buffer_pool->frame_bufs[pic_config->BUF_index].buf; + struct vdec_ge2d_info ge2d_info; + + av1_print(hw, PRINT_FLAG_V4L_DETAIL, + "ge2d copy start v4l_buf_index:%d repeat_buff_index:%d\n", + pic_config->v4l_buf_index, + pic_config->BUF_index); + ge2d_info.dst_vf = vf; + ge2d_info.src_canvas0Addr = ge2d_info.src_canvas1Addr = 0; + if (dst_pic->double_write_mode) { +#ifdef MULTI_INSTANCE_SUPPORT + if (hw->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + ge2d_info.src_canvas0Addr = ge2d_info.src_canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + dst_pic->canvas_config[0]; + vf->canvas0_config[1] = + dst_pic->canvas_config[1]; + vf->canvas1_config[0] = + dst_pic->canvas_config[0]; + vf->canvas1_config[1] = + dst_pic->canvas_config[1]; + ge2d_info.src_canvas0_config[0] = + src_pic->canvas_config[0]; + ge2d_info.src_canvas0_config[1] = + src_pic->canvas_config[1]; + ge2d_info.src_canvas1_config[0] = + src_pic->canvas_config[0]; + ge2d_info.src_canvas1_config[1] = + src_pic->canvas_config[1]; + } else +#endif + { + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(dst_pic); + ge2d_info.src_canvas0Addr = ge2d_info.src_canvas1Addr = + spec2canvas(src_pic); + } + } + + if (!hw->ge2d) { + int mode = nv_order == VIDTYPE_VIU_NV21 ? GE2D_MODE_CONVERT_NV21 : GE2D_MODE_CONVERT_NV12; + mode |= GE2D_MODE_CONVERT_LE; + vdec_ge2d_init(&hw->ge2d, mode); + } + vdec_ge2d_copy_data(hw->ge2d, &ge2d_info); + av1_print(hw, PRINT_FLAG_V4L_DETAIL, "ge2d copy done\n"); + } + decoder_do_frame_check(hw_to_vdec(hw), vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(hw->trace.pts_name, vf->timestamp); + ATRACE_COUNTER(hw->trace.new_q_name, kfifo_len(&hw->newframe_q)); + ATRACE_COUNTER(hw->trace.disp_q_name, kfifo_len(&hw->display_q)); + + atomic_add(1, &hw->vf_pre_count); + /*count info*/ + hw->gvs->frame_dur = hw->frame_dur; + vdec_count_info(hw->gvs, 0, stream_offset); + hw_to_vdec(hw)->vdec_fps_detec(hw_to_vdec(hw)->id); + +#ifdef AUX_DATA_CRC + decoder_do_aux_data_check(hw_to_vdec(hw), pic_config->aux_data_buf, + pic_config->aux_data_size); +#endif + + av1_print(hw, AV1_DEBUG_SEI_DETAIL, "%s aux_data_size = %d\n", + __func__, pic_config->aux_data_size); + + if (debug & AV1_DEBUG_SEI_DETAIL) { + int i = 0; + PR_INIT(128); + for (i = 0; i < pic_config->aux_data_size; i++) { + PR_FILL("%02x ", pic_config->aux_data_buf[i]); + if (((i + 1) & 0xf) == 0) + PR_INFO(hw->index); + } + PR_INFO(hw->index); + } + + if (hw->is_used_v4l) { + if ((pic_config->aux_data_size == 0) && + (pic_config->slice_type == KEY_FRAME) && + (atomic_read(&hw->vf_pre_count) == 1)) { + hw->no_need_aux_data = true; + } + + if (hw->no_need_aux_data) { + v4l2_ctx->aux_infos.free_buffer(v4l2_ctx, DV_TYPE); + v4l2_ctx->aux_infos.free_one_sei_buffer(v4l2_ctx, + &pic_config->aux_data_buf, + &pic_config->aux_data_size, + pic_config->ctx_buf_idx); + } else { + v4l2_ctx->aux_infos.bind_dv_buffer(v4l2_ctx, &vf->src_fmt.comp_buf, + &vf->src_fmt.md_buf); + } + + update_vframe_src_fmt(vf, + pic_config->aux_data_buf, + pic_config->aux_data_size, + false, hw->provider_name, NULL); + } + + av1_update_gvs(hw, vf, pic_config); + memcpy(&tmp4x, hw->gvs, sizeof(struct vdec_info)); + tmp4x.bit_depth_luma = bit_depth_luma; + tmp4x.bit_depth_chroma = bit_depth_chroma; + tmp4x.double_write_mode = pic_config->double_write_mode; + vdec_fill_vdec_frame(hw_to_vdec(hw), &hw->vframe_qos, &tmp4x, vf, pic_config->hw_decode_time); + if (without_display_mode == 0) { + if (hw->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vav1_vf_put(vav1_vf_get(hw), hw); + } else { + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(hw->provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } else + vav1_vf_put(vav1_vf_get(hw), hw); + } + + return 0; +} + +void av1_raw_write_image(AV1Decoder *pbi, PIC_BUFFER_CONFIG *sd) +{ + sd->stream_offset = pbi->pre_stream_offset; + prepare_display_buf((struct AV1HW_s *)(pbi->private_data), sd); + pbi->pre_stream_offset = READ_VREG(HEVC_SHIFT_BYTE_COUNT); +} + +static bool is_avaliable_buffer(struct AV1HW_s *hw); + +static int notify_v4l_eos(struct vdec_s *vdec) +{ + struct AV1HW_s *hw = (struct AV1HW_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = &hw->vframe_dummy; + struct vdec_v4l2_buffer *fb = NULL; + int index = INVALID_IDX; + ulong expires; + + if (hw->eos) { + expires = jiffies + msecs_to_jiffies(2000); + while (!is_avaliable_buffer(hw)) { + if (time_after(jiffies, expires)) { + pr_err("[%d] AV1 isn't enough buff for notify eos.\n", ctx->id); + return 0; + } + } + + index = v4l_get_free_fb(hw); + if (INVALID_IDX == index) { + pr_err("[%d] AV1 EOS get free buff fail.\n", ctx->id); + return 0; + } + + fb = (struct vdec_v4l2_buffer *) + hw->m_BUF[index].v4l_ref_buf_addr; + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + vf->v4l_mem_handle = (ulong)fb; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + + av1_print(hw, 0, "[%d] AV1 EOS notify.\n", ctx->id); + } + + return 0; +} + +static void get_rpm_param(union param_u *params) +{ + int i; + unsigned int data32; + + if (debug & AV1_DEBUG_BUFMGR) + pr_info("enter %s\r\n", __func__); + for (i = 0; i < 128; i++) { + do { + data32 = READ_VREG(RPM_CMD_REG); + /*pr_info("%x\n", data32);*/ + } while ((data32 & 0x10000) == 0); + params->l.data[i] = data32&0xffff; + /*pr_info("%x\n", data32);*/ + WRITE_VREG(RPM_CMD_REG, 0); + } + if (debug & AV1_DEBUG_BUFMGR) + pr_info("leave %s\r\n", __func__); +} + +#ifdef CHANGE_REMOVED +static int recycle_mmu_buf_tail(struct AV1HW_s *hw, + bool check_dma) +{ + struct AV1_Common_s *const cm = &hw->common; + + hw->used_4k_num = + READ_VREG(HEVC_SAO_MMU_STATUS) >> 16; + + av1_print(hw, 0, "pic index %d page_start %d\n", + cm->cur_fb_idx_mmu, hw->used_4k_num); + + if (check_dma) + hevc_mmu_dma_check(hw_to_vdec(hw)); + + if (hw->is_used_v4l) { + int index = cm->cur_fb_idx_mmu; + struct internal_comp_buf *ibuf = + index_to_icomp_buf(hw, index); + + decoder_mmu_box_free_idx_tail( + ibuf->mmu_box, + ibuf->index, + hw->used_4k_num); + } else { + decoder_mmu_box_free_idx_tail( + hw->mmu_box, + cm->cur_fb_idx_mmu, + hw->used_4k_num); + } + + cm->cur_fb_idx_mmu = INVALID_IDX; + hw->used_4k_num = -1; + + return 0; +} +#endif + +#ifdef CHANGE_REMOVED +static void av1_recycle_mmu_buf_tail(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + if (get_double_write_mode(hw) & 0x10) + return; + + if (cm->cur_fb_idx_mmu != INVALID_IDX) { + recycle_mmu_buf_tail(hw, + ((hw->used_4k_num == -1) && + hw->m_ins_flag) ? 1 : 0); + } +} +#endif + +static void av1_recycle_mmu_buf(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + + if (hw->is_used_v4l) + return; + + if (get_double_write_mode(hw) & 0x10) + return; + if (cm->cur_fb_idx_mmu != INVALID_IDX) { + decoder_mmu_box_free_idx(hw->mmu_box, + cm->cur_fb_idx_mmu); + + cm->cur_fb_idx_mmu = INVALID_IDX; + hw->used_4k_num = -1; + } +} + +static void dec_again_process(struct AV1HW_s *hw) +{ + amhevc_stop(); + hw->dec_result = DEC_RESULT_AGAIN; + if (hw->process_state == + PROC_STATE_DECODESLICE) { + hw->process_state = + PROC_STATE_SENDAGAIN; + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + av1_recycle_mmu_buf(hw); + } + } + reset_process_time(hw); + vdec_schedule_work(&hw->work); +} + +static void read_film_grain_reg(struct AV1HW_s *hw) +{ + AV1_COMMON *cm = &hw->common; + int i; + if (cm->cur_frame == NULL) { + av1_print(hw, AOM_DEBUG_HW_MORE, "%s, cur_frame not exist!!!\n", __func__); + return; + } else + av1_print(hw, AOM_DEBUG_HW_MORE, "%s\n", __func__); + WRITE_VREG(HEVC_FGS_IDX, 0); + for (i = 0; i < FILM_GRAIN_REG_SIZE; i++) { + cm->cur_frame->film_grain_reg[i] = READ_VREG(HEVC_FGS_DATA); + } + cm->cur_frame->film_grain_reg_valid = 1; + cm->cur_frame->film_grain_ctrl = READ_VREG(HEVC_FGS_CTRL); +} + +static void config_film_grain_reg(struct AV1HW_s *hw, int film_grain_params_ref_idx) +{ + AV1_COMMON *cm = &hw->common; + int i; + unsigned char found = 0; + RefCntBuffer *buf; + + av1_print(hw, AOM_DEBUG_HW_MORE, + " ## %s frome reference idx %d\n", + __func__, film_grain_params_ref_idx); + for (i = 0; i < INTER_REFS_PER_FRAME; ++i) { + if (film_grain_params_ref_idx == cm->remapped_ref_idx[i]) { + found = 1; + break; + } + } + if (!found) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s Error, Invalid film grain reference idx %d\n", + __func__, film_grain_params_ref_idx); + return; + } + buf = cm->ref_frame_map[film_grain_params_ref_idx]; + + if (buf->film_grain_reg_valid == 0) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s Error, film grain register data invalid for reference idx %d\n", + __func__, film_grain_params_ref_idx); + return; + } + + if (cm->cur_frame == NULL) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s, cur_frame not exist!!!\n", __func__); + } + WRITE_VREG(HEVC_FGS_IDX, 0); + for (i = 0; i < FILM_GRAIN_REG_SIZE; i++) { + WRITE_VREG(HEVC_FGS_DATA, buf->film_grain_reg[i]); + if (cm->cur_frame) + cm->cur_frame->film_grain_reg[i] = buf->film_grain_reg[i]; + } + if (cm->cur_frame) + cm->cur_frame->film_grain_reg_valid = 1; + WRITE_VREG(HEVC_FGS_CTRL, READ_VREG(HEVC_FGS_CTRL) | 1); // set fil_grain_start + if (cm->cur_frame) + cm->cur_frame->film_grain_ctrl = READ_VREG(HEVC_FGS_CTRL); +} + +void config_next_ref_info_hw(struct AV1HW_s *hw) +{ + int j; + AV1_COMMON *const cm = &hw->common; + + av1_set_next_ref_frame_map(hw->pbi); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SC2) + WRITE_VREG(HEVC_PARSER_MEM_WR_ADDR, 0x11a0); + else + WRITE_VREG(HEVC_PARSER_MEM_WR_ADDR, 0x1000); + + for (j = 0; j < 12; j++) { + unsigned int info = + av1_get_next_used_ref_info(cm, j); + + WRITE_VREG(HEVC_PARSER_MEM_RW_DATA, info); + av1_print(hw, AOM_DEBUG_HW_MORE, + "config next ref info %d 0x%x\n", j, info); + } +} + + + +#ifdef PRINT_HEVC_DATA_PATH_MONITOR +void datapath_monitor(struct AV1HW_s *hw) +{ + uint32_t total_clk_count; + uint32_t path_transfer_count; + uint32_t path_wait_count; + float path_wait_ratio; + if (pbi->decode_idx > 1) { + WRITE_VREG(HEVC_PATH_MONITOR_CTRL, 0); // Disabble monitor and set rd_idx to 0 + total_clk_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + + WRITE_VREG(HEVC_PATH_MONITOR_CTRL, (1<<4)); // Disabble monitor and set rd_idx to 0 + +// parser --> iqit + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) path_wait_ratio = 0.0; + else path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk("[P%d HEVC PATH] Parser/IQIT/IPP/DBLK/OW/DDR/CMD WAITING \% : %.2f", + pbi->decode_idx - 2, path_wait_ratio); + +// iqit --> ipp + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) path_wait_ratio = 0.0; + else path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + +// dblk <-- ipp + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) path_wait_ratio = 0.0; + else path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + +// dblk --> ow + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) path_wait_ratio = 0.0; + else path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + +// <--> DDR + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) path_wait_ratio = 0.0; + else path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f", path_wait_ratio); + +// CMD + path_transfer_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + path_wait_count = READ_VREG(HEVC_PATH_MONITOR_DATA); + if (path_transfer_count == 0) path_wait_ratio = 0.0; + else path_wait_ratio = (float)path_wait_count/(float)path_transfer_count; + printk(" %.2f\n", path_wait_ratio); + } +} + +#endif + +#ifdef MCRCC_ENABLE + +static int mcrcc_hit_rate; +static int mcrcc_bypass_rate; + +#define C_Reg_Wr WRITE_VREG +static void C_Reg_Rd(unsigned int adr, unsigned int *val) +{ + *val = READ_VREG(adr); +} + +static void mcrcc_perfcount_reset(struct AV1HW_s *hw) +{ + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, + "[cache_util.c] Entered mcrcc_perfcount_reset...\n"); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)0x1); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)0x0); + return; +} + +static unsigned raw_mcr_cnt_total_prev; +static unsigned hit_mcr_0_cnt_total_prev; +static unsigned hit_mcr_1_cnt_total_prev; +static unsigned byp_mcr_cnt_nchcanv_total_prev; +static unsigned byp_mcr_cnt_nchoutwin_total_prev; + +static void mcrcc_get_hitrate(struct AV1HW_s *hw, unsigned reset_pre) +{ + unsigned delta_hit_mcr_0_cnt; + unsigned delta_hit_mcr_1_cnt; + unsigned delta_raw_mcr_cnt; + unsigned delta_mcr_cnt_nchcanv; + unsigned delta_mcr_cnt_nchoutwin; + + unsigned tmp; + unsigned raw_mcr_cnt; + unsigned hit_mcr_cnt; + unsigned byp_mcr_cnt_nchoutwin; + unsigned byp_mcr_cnt_nchcanv; + int hitrate; + + if (reset_pre) { + raw_mcr_cnt_total_prev = 0; + hit_mcr_0_cnt_total_prev = 0; + hit_mcr_1_cnt_total_prev = 0; + byp_mcr_cnt_nchcanv_total_prev = 0; + byp_mcr_cnt_nchoutwin_total_prev = 0; + } + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "[cache_util.c] Entered mcrcc_get_hitrate...\n"); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x0<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &raw_mcr_cnt); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x1<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &hit_mcr_cnt); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x2<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &byp_mcr_cnt_nchoutwin); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x3<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &byp_mcr_cnt_nchcanv); + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "raw_mcr_cnt_total: %d\n",raw_mcr_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "hit_mcr_cnt_total: %d\n",hit_mcr_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "byp_mcr_cnt_nchoutwin_total: %d\n",byp_mcr_cnt_nchoutwin); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "byp_mcr_cnt_nchcanv_total: %d\n",byp_mcr_cnt_nchcanv); + + delta_raw_mcr_cnt = raw_mcr_cnt - raw_mcr_cnt_total_prev; + delta_mcr_cnt_nchcanv = byp_mcr_cnt_nchcanv - byp_mcr_cnt_nchcanv_total_prev; + delta_mcr_cnt_nchoutwin = byp_mcr_cnt_nchoutwin - byp_mcr_cnt_nchoutwin_total_prev; + raw_mcr_cnt_total_prev = raw_mcr_cnt; + byp_mcr_cnt_nchcanv_total_prev = byp_mcr_cnt_nchcanv; + byp_mcr_cnt_nchoutwin_total_prev = byp_mcr_cnt_nchoutwin; + + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x4<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &tmp); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "miss_mcr_0_cnt_total: %d\n",tmp); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x5<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &tmp); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "miss_mcr_1_cnt_total: %d\n",tmp); + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x6<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &tmp); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "hit_mcr_0_cnt_total: %d\n",tmp); + delta_hit_mcr_0_cnt = tmp - hit_mcr_0_cnt_total_prev; + hit_mcr_0_cnt_total_prev = tmp; + C_Reg_Wr(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x7<<1)); + C_Reg_Rd(HEVCD_MCRCC_PERFMON_DATA, &tmp); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "hit_mcr_1_cnt_total: %d\n",tmp); + delta_hit_mcr_1_cnt = tmp - hit_mcr_1_cnt_total_prev; + hit_mcr_1_cnt_total_prev = tmp; + + if ( delta_raw_mcr_cnt != 0 ) { + hitrate = 100 * delta_hit_mcr_0_cnt/ delta_raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "CANV0_HIT_RATE : %d\n", hitrate); + hitrate = 100 * delta_hit_mcr_1_cnt/delta_raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "CANV1_HIT_RATE : %d\n", hitrate); + hitrate = 100 * delta_mcr_cnt_nchcanv/delta_raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "NONCACH_CANV_BYP_RATE : %d\n", hitrate); + hitrate = 100 * delta_mcr_cnt_nchoutwin/delta_raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "CACHE_OUTWIN_BYP_RATE : %d\n", hitrate); + } + + if (raw_mcr_cnt != 0) + { + hitrate = 100*hit_mcr_cnt/raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "MCRCC_HIT_RATE : %d\n", hitrate); + hitrate = 100*(byp_mcr_cnt_nchoutwin + byp_mcr_cnt_nchcanv)/raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "MCRCC_BYP_RATE : %d\n", hitrate); + } else { + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "MCRCC_HIT_RATE : na\n"); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "MCRCC_BYP_RATE : na\n"); + } + mcrcc_hit_rate = 100*hit_mcr_cnt/raw_mcr_cnt; + mcrcc_bypass_rate = 100*(byp_mcr_cnt_nchoutwin + byp_mcr_cnt_nchcanv)/raw_mcr_cnt; + + return; +} + +static void decomp_perfcount_reset(struct AV1HW_s *hw) +{ + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "[cache_util.c] Entered decomp_perfcount_reset...\n"); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)0x1); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)0x0); + return; +} + +static void decomp_get_hitrate(struct AV1HW_s *hw) +{ + unsigned raw_mcr_cnt; + unsigned hit_mcr_cnt; + int hitrate; + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "[cache_util.c] Entered decomp_get_hitrate...\n"); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x0<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &raw_mcr_cnt); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x1<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &hit_mcr_cnt); + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "hcache_raw_cnt_total: %d\n",raw_mcr_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "hcache_hit_cnt_total: %d\n",hit_mcr_cnt); + + if ( raw_mcr_cnt != 0 ) { + hitrate = 100*hit_mcr_cnt/raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "DECOMP_HCACHE_HIT_RATE : %.2f\%\n", hitrate); + } else { + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "DECOMP_HCACHE_HIT_RATE : na\n"); + } + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x2<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &raw_mcr_cnt); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x3<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &hit_mcr_cnt); + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "dcache_raw_cnt_total: %d\n",raw_mcr_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "dcache_hit_cnt_total: %d\n",hit_mcr_cnt); + + if ( raw_mcr_cnt != 0 ) { + hitrate = 100*hit_mcr_cnt/raw_mcr_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "DECOMP_DCACHE_HIT_RATE : %d\n", hitrate); + + //hitrate = ((float)hit_mcr_cnt/(float)raw_mcr_cnt); + //hitrate = (mcrcc_hit_rate + (mcrcc_bypass_rate * hitrate))*100; + hitrate = mcrcc_hit_rate + (mcrcc_bypass_rate * hit_mcr_cnt/raw_mcr_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "MCRCC_DECOMP_DCACHE_EFFECTIVE_HIT_RATE : %d\n", hitrate); + + } else { + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "DECOMP_DCACHE_HIT_RATE : na\n"); + } + + return; +} + +static void decomp_get_comprate(struct AV1HW_s *hw) +{ + unsigned raw_ucomp_cnt; + unsigned fast_comp_cnt; + unsigned slow_comp_cnt; + int comprate; + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "[cache_util.c] Entered decomp_get_comprate...\n"); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x4<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &fast_comp_cnt); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x5<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &slow_comp_cnt); + C_Reg_Wr(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x6<<1)); + C_Reg_Rd(HEVCD_MPP_DECOMP_PERFMON_DATA, &raw_ucomp_cnt); + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "decomp_fast_comp_total: %d\n",fast_comp_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "decomp_slow_comp_total: %d\n",slow_comp_cnt); + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "decomp_raw_uncomp_total: %d\n",raw_ucomp_cnt); + + if ( raw_ucomp_cnt != 0 ) + { + comprate = 100*(fast_comp_cnt + slow_comp_cnt)/raw_ucomp_cnt; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "DECOMP_COMP_RATIO : %d\n", comprate); + } else + { + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "DECOMP_COMP_RATIO : na\n"); + } + return; +} + +static void dump_hit_rate(struct AV1HW_s *hw) +{ + if (debug & AV1_DEBUG_CACHE_HIT_RATE) { + mcrcc_get_hitrate(hw, hw->m_ins_flag); + decomp_get_hitrate(hw); + decomp_get_comprate(hw); + } +} + +static uint32_t mcrcc_get_abs_frame_distance(struct AV1HW_s *hw, uint32_t refid, uint32_t ref_ohint, uint32_t curr_ohint, uint32_t ohint_bits_min1) +{ + int32_t diff_ohint0; + int32_t diff_ohint1; + uint32_t abs_dist; + uint32_t m; + uint32_t m_min1; + + diff_ohint0 = ref_ohint - curr_ohint; + + m = (1 << ohint_bits_min1); + m_min1 = m -1; + + diff_ohint1 = (diff_ohint0 & m_min1 ) - (diff_ohint0 & m); + + abs_dist = (diff_ohint1 < 0) ? -diff_ohint1 : diff_ohint1; + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, + "[cache_util.c] refid:%0x ref_orderhint:%0x curr_orderhint:%0x orderhint_bits_min1:%0x abd_dist:%0x\n", + refid, ref_ohint, curr_ohint, ohint_bits_min1,abs_dist); + + return abs_dist; +} + +static void config_mcrcc_axi_hw_nearest_ref(struct AV1HW_s *hw) +{ + uint32_t i; + uint32_t rdata32; + uint32_t dist_array[8]; + uint32_t refcanvas_array[2]; + uint32_t orderhint_bits; + unsigned char is_inter; + AV1_COMMON *cm = &hw->common; + PIC_BUFFER_CONFIG *curr_pic_config; + int32_t curr_orderhint; + int cindex0 = LAST_FRAME; + uint32_t last_ref_orderhint_dist = 1023; // large distance + uint32_t curr_ref_orderhint_dist = 1023; // large distance + int cindex1; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, + "[test.c] #### config_mcrcc_axi_hw ####\n"); + + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2); // reset mcrcc + + is_inter = av1_frame_is_inter(&hw->common); //((pbi->common.frame_type != KEY_FRAME) && (!pbi->common.intra_only)) ? 1 : 0; + if ( !is_inter ) { // I-PIC + //WRITE_VREG(HEVCD_MCRCC_CTL1, 0x1); // remove reset -- disables clock + WRITE_VREG(HEVCD_MCRCC_CTL2, 0xffffffff); // Replace with current-frame canvas + WRITE_VREG(HEVCD_MCRCC_CTL3, 0xffffffff); // + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); // enable mcrcc progressive-mode + return; + } + +#if 0 + //printk("before call mcrcc_get_hitrate\r\n"); + mcrcc_get_hitrate(hw); + decomp_get_hitrate(hw); + decomp_get_comprate(hw); +#endif + + // Find absolute orderhint delta + curr_pic_config = &cm->cur_frame->buf; + curr_orderhint = curr_pic_config->order_hint; + orderhint_bits = cm->seq_params.order_hint_info.order_hint_bits_minus_1; + for (i = LAST_FRAME; i <= ALTREF_FRAME; i++) { + int32_t ref_orderhint = 0; + PIC_BUFFER_CONFIG *pic_config; + //int32_t tmp; + pic_config = av1_get_ref_frame_spec_buf(cm,i); + if (pic_config) + ref_orderhint = pic_config->order_hint; + //tmp = curr_orderhint - ref_orderhint; + //dist_array[i] = (tmp < 0) ? -tmp : tmp; + dist_array[i] = mcrcc_get_abs_frame_distance(hw, i,ref_orderhint, curr_orderhint, orderhint_bits); + } + // Get smallest orderhint distance refid + for (i = LAST_FRAME; i <= ALTREF_FRAME; i++) { + PIC_BUFFER_CONFIG *pic_config; + pic_config = av1_get_ref_frame_spec_buf(cm, i); + curr_ref_orderhint_dist = dist_array[i]; + if ( curr_ref_orderhint_dist < last_ref_orderhint_dist) { + cindex0 = i; + last_ref_orderhint_dist = curr_ref_orderhint_dist; + } + } + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (cindex0 << 8) | (1<<1) | 0); + refcanvas_array[0] = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR) & 0xffff; + + last_ref_orderhint_dist = 1023; // large distance + curr_ref_orderhint_dist = 1023; // large distance + // Get 2nd smallest orderhint distance refid + cindex1 = LAST_FRAME; + for (i = LAST_FRAME; i <= ALTREF_FRAME; i++) { + PIC_BUFFER_CONFIG *pic_config; + pic_config = av1_get_ref_frame_spec_buf(cm, i); + curr_ref_orderhint_dist = dist_array[i]; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (i << 8) | (1<<1) | 0); + refcanvas_array[1] = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR) & 0xffff; + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "[cache_util.c] curr_ref_orderhint_dist:%x last_ref_orderhint_dist:%x refcanvas_array[0]:%x refcanvas_array[1]:%x\n", + curr_ref_orderhint_dist, last_ref_orderhint_dist, refcanvas_array[0],refcanvas_array[1]); + if ((curr_ref_orderhint_dist < last_ref_orderhint_dist) && (refcanvas_array[0] != refcanvas_array[1])) { + cindex1 = i; + last_ref_orderhint_dist = curr_ref_orderhint_dist; + } + } + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (cindex0 << 8) | (1<<1) | 0); + refcanvas_array[0] = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (cindex1 << 8) | (1<<1) | 0); + refcanvas_array[1] = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + + av1_print(hw, AV1_DEBUG_CACHE_HIT_RATE, "[cache_util.c] refcanvas_array[0](index %d):%x refcanvas_array[1](index %d):%x\n", + cindex0, refcanvas_array[0], cindex1, refcanvas_array[1]); + + // lowest delta_picnum + rdata32 = refcanvas_array[0]; + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | ( rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + // 2nd-lowest delta_picnum + rdata32 = refcanvas_array[1]; + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | ( rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); // enable mcrcc progressive-mode + return; +} + + +#endif + +int av1_continue_decoding(struct AV1HW_s *hw, int obu_type) +{ + int ret = 0; +#ifdef SANITY_CHECK + param_t* params = &hw->aom_param; +#endif +#if 1 + //def CHANGE_DONE + AV1Decoder *pbi = hw->pbi; + AV1_COMMON *const cm = pbi->common; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int i; + + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s: pbi %p cm %p cur_frame %p %d has_seq %d has_keyframe %d\n", + __func__, pbi, cm, cm->cur_frame, + pbi->bufmgr_proc_count, + hw->has_sequence, + hw->has_keyframe); + + if (hw->has_sequence == 0) { + av1_print(hw, 0, + "no sequence head, skip\n"); + if (!hw->m_ins_flag) + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + return -2; + } else if (hw->has_keyframe == 0 && + hw->aom_param.p.frame_type != KEY_FRAME){ + av1_print(hw, 0, + "no key frame, skip\n"); + on_no_keyframe_skiped++; + if (!hw->m_ins_flag) + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + return -2; + } + hw->has_keyframe = 1; + on_no_keyframe_skiped = 0; + + if (hw->is_used_v4l && ctx->param_sets_from_ucode) + hw->res_ch_flag = 0; + + //pre_decode_idx = pbi->decode_idx; + if (pbi->bufmgr_proc_count == 0 || + hw->one_compressed_data_done) { + hw->new_compressed_data = 1; + hw->one_compressed_data_done = 0; + } else { + hw->new_compressed_data = 0; + } +#ifdef SANITY_CHECK + ret = 0; + av1_print(hw, AOM_DEBUG_HW_MORE, + "Check Picture size, max (%d, %d), width/height (%d, %d), dec_width %d\n", + params->p.max_frame_width, + params->p.max_frame_height, + params->p.frame_width_scaled, + params->p.frame_height, + params->p.dec_frame_width + ); + + if (/*params->p.max_frame_width > MAX_PIC_WIDTH || + params->p.max_frame_height > MAX_PIC_HEIGHT ||*/ + (params->p.frame_width_scaled * params->p.frame_height) > MAX_SIZE_8K || + (params->p.dec_frame_width * params->p.frame_height) > MAX_SIZE_8K || + params->p.frame_width_scaled <= 0 || + params->p.dec_frame_width <= 0 || + params->p.frame_height <= 0) { + av1_print(hw, 0, "!!Picture size error, max (%d, %d), width/height (%d, %d), dec_width %d\n", + params->p.max_frame_width, + params->p.max_frame_height, + params->p.frame_width_scaled, + params->p.frame_height, + params->p.dec_frame_width + ); + ret = -1; + } +#endif + if (ret >= 0) { + ret = av1_bufmgr_process(pbi, &hw->aom_param, + hw->new_compressed_data, obu_type); + if (ret < 0) + return -1; + av1_print(hw, AOM_DEBUG_HW_MORE, + "%s: pbi %p cm %p cur_frame %p\n", + __func__, pbi, cm, cm->cur_frame); + + av1_print(hw, AOM_DEBUG_HW_MORE, + "1+++++++++++++++++++++++++++++++++++%d %p\n", + ret, cm->cur_frame); + if (cm->cur_frame) { + init_waitqueue_head(&cm->cur_frame->wait_sfgs); + atomic_set(&cm->cur_frame->fgs_done, 1); + } + if (hw->new_compressed_data) + WRITE_VREG(PIC_END_LCU_COUNT, 0); + } + if (ret > 0) { + /* the case when cm->show_existing_frame is 1 */ + /*case 3016*/ + av1_print(hw, AOM_DEBUG_HW_MORE, + "Decoding done (index=%d, show_existing_frame = %d)\n", + cm->cur_frame? cm->cur_frame->buf.index:-1, + cm->show_existing_frame + ); + + if (cm->cur_frame) { + PIC_BUFFER_CONFIG* cur_pic_config = &cm->cur_frame->buf; + if (debug & + AV1_DEBUG_BUFMGR_MORE) + dump_aux_buf(hw); + set_pic_aux_data(hw, + cur_pic_config, 0, 0); + } + config_next_ref_info_hw(hw); + + av1_print(hw, AOM_DEBUG_HW_MORE, + "aom_bufmgr_process=> %d,decode done, AOM_AV1_SEARCH_HEAD\r\n", ret); + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + pbi->decode_idx++; + pbi->bufmgr_proc_count++; + hw->frame_decoded = 1; + return 0; + } else if (ret < 0) { + hw->frame_decoded = 1; + av1_print(hw, AOM_DEBUG_HW_MORE, + "aom_bufmgr_process=> %d, bufmgr e.r.r.o.r. %d, AOM_AV1_SEARCH_HEAD\r\n", + ret, cm->error.error_code); + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + return 0; + } + else if (ret == 0) { + PIC_BUFFER_CONFIG* cur_pic_config = &cm->cur_frame->buf; + PIC_BUFFER_CONFIG* prev_pic_config = &cm->prev_frame->buf; + //struct segmentation_lf *seg_4lf = &hw->seg_4lf_store; + if (debug & + AV1_DEBUG_BUFMGR_MORE) + dump_aux_buf(hw); + set_dv_data(hw); + if (cm->show_frame && + hw->dv_data_buf != NULL) + copy_dv_data(hw, cur_pic_config); + /* to do:.. + set_pic_aux_data(hw, + cur_pic_config, 0, 2);*/ + hw->frame_decoded = 0; + pbi->bufmgr_proc_count++; + if (hw->new_compressed_data == 0) { + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_DECODE_SLICE); + return 0; + } + av1_print(hw, AOM_DEBUG_HW_MORE, + " [PICTURE %d] cm->cur_frame->mi_size : (%d X %d) y_crop_size :(%d X %d)\n", + hw->frame_count, + cm->cur_frame->mi_cols, + cm->cur_frame->mi_rows, + cur_pic_config->y_crop_width, + cur_pic_config->y_crop_height); + if (cm->prev_frame > 0) { + av1_print(hw, AOM_DEBUG_HW_MORE, + " [SEGMENT] cm->prev_frame->segmentation_enabled : %d\n", + cm->prev_frame->segmentation_enabled); + av1_print(hw, AOM_DEBUG_HW_MORE, + " [SEGMENT] cm->prev_frame->mi_size : (%d X %d)\n", + cm->prev_frame->mi_cols, cm->prev_frame->mi_rows); + } + cm->cur_frame->prev_segmentation_enabled = (cm->prev_frame > 0) ? + (cm->prev_frame->segmentation_enabled & (cm->prev_frame->segmentation_update_map + | cm->prev_frame->prev_segmentation_enabled) & + (cm->cur_frame->mi_rows == cm->prev_frame->mi_rows) & + (cm->cur_frame->mi_cols == cm->prev_frame->mi_cols)) : 0; + WRITE_VREG(AV1_SKIP_MODE_INFO, + (cm->cur_frame->prev_segmentation_enabled << 31) | + (((cm->prev_frame > 0) ? cm->prev_frame->intra_only : 0) << 30) | + (((cm->prev_frame > 0) ? prev_pic_config->index : 0x1f) << 24) | + (((cm->cur_frame > 0) ? cur_pic_config->index : 0x1f) << 16) | + (cm->current_frame.skip_mode_info.ref_frame_idx_0 & 0xf) | + ((cm->current_frame.skip_mode_info.ref_frame_idx_1 & 0xf) << 4) | + (cm->current_frame.skip_mode_info.skip_mode_allowed << 8)); + cur_pic_config->decode_idx = pbi->decode_idx; + + av1_print(hw, AOM_DEBUG_HW_MORE, + "Decode Frame Data %d frame_type %d (%d) bufmgr_proc_count %d\n", + pbi->decode_idx, + cm->cur_frame->frame_type, + cm->current_frame.frame_type, + pbi->bufmgr_proc_count); + pbi->decode_idx++; + hw->frame_count++; + cur_pic_config->slice_type = cm->cur_frame->frame_type; + if (hw->chunk) { + av1_print(hw, AV1_DEBUG_OUT_PTS, + "%s, config pic pts %d, pts64 %lld, ts: %lld\n", + __func__, hw->chunk->pts, hw->chunk->pts64, hw->chunk->timestamp); + cur_pic_config->pts = hw->chunk->pts; + cur_pic_config->pts64 = hw->chunk->pts64; + + if (hw->is_used_v4l && !v4l_bitstream_id_enable) { + cur_pic_config->pts64 = hw->chunk->timestamp; + hw->chunk->timestamp = 0; + } + + hw->chunk->pts = 0; + hw->chunk->pts64 = 0; + } + ATRACE_COUNTER(hw->trace.decode_header_memory_time_name, TRACE_HEADER_REGISTER_START); +#ifdef DUAL_DECODE +#else + config_pic_size(hw, hw->aom_param.p.bit_depth); +#endif + if (get_mv_buf(hw, &cm->cur_frame->buf) < 0) { + av1_print(hw, 0, + "%s: Error get_mv_buf fail\n", + __func__); + ret = -1; + } + + if (ret >= 0 && (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + ret = av1_alloc_mmu(hw, + cm->cur_frame->buf.index, + cur_pic_config->y_crop_width, + cur_pic_config->y_crop_height, + hw->aom_param.p.bit_depth, + hw->frame_mmu_map_addr); + if (ret >= 0) + cm->cur_fb_idx_mmu = cm->cur_frame->buf.index; + else + pr_err("can't alloc need mmu1,idx %d ret =%d\n", + cm->cur_frame->buf.index, ret); +#ifdef AOM_AV1_MMU_DW + if (get_double_write_mode(hw) & 0x20) { + ret = av1_alloc_mmu_dw(hw, + cm->cur_frame->buf.index, + cur_pic_config->y_crop_width, + cur_pic_config->y_crop_height, + hw->aom_param.p.bit_depth, + hw->dw_frame_mmu_map_addr); + if (ret >= 0) + cm->cur_fb_idx_mmu_dw = cm->cur_frame->buf.index; + else + pr_err("can't alloc need dw mmu1,idx %d ret =%d\n", + cm->cur_frame->buf.index, ret); + } +#endif +#ifdef DEBUG_CRC_ERROR + if (crc_debug_flag & 0x40) + mv_buffer_fill_zero(hw, &cm->cur_frame->buf); +#endif + } else { + ret = 0; + } + if (av1_frame_is_inter(&hw->common)) { + //if ((pbi->common.frame_type != KEY_FRAME) && (!pbi->common.intra_only)) { +#ifdef DUAL_DECODE +#else + config_mc_buffer(hw, hw->aom_param.p.bit_depth, 1); +#endif + config_mpred_hw(hw, 1); + } + else { + config_mc_buffer(hw, hw->aom_param.p.bit_depth, 0); + clear_mpred_hw(hw); + config_mpred_hw(hw, 0); + } +#ifdef DUAL_DECODE +#else +#ifdef MCRCC_ENABLE + config_mcrcc_axi_hw_nearest_ref(hw); +#endif + config_sao_hw(hw, &hw->aom_param); +#endif + + config_dblk_hw(hw); + + /* store segment_feature before shared sub-module run to fix mosaic on t5d */ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SC2) + WRITE_VREG(HEVC_PARSER_MEM_WR_ADDR, 0x11b0 + (cur_pic_config->index)); + else + WRITE_VREG(HEVC_PARSER_MEM_WR_ADDR, 0x1010 + (cur_pic_config->index)); + if (hw->aom_param.p.segmentation_enabled & 1) // segmentation_enabled + WRITE_VREG(HEVC_PARSER_MEM_RW_DATA, READ_VREG(AV1_REF_SEG_INFO)); + else + WRITE_VREG(HEVC_PARSER_MEM_RW_DATA, 0); + + av1_print(hw, AOM_DEBUG_HW_MORE, "HEVC_DEC_STATUS_REG <= AOM_AV1_DECODE_SLICE\n"); + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_DECODE_SLICE); + + // Save segment_feature while hardware decoding + if (hw->seg_4lf->enabled) { + for (i = 0; i < 8; i++) { + cm->cur_frame->segment_feature[i] = READ_VREG(AOM_AV1_SEGMENT_FEATURE); + } + } else { + for (i = 0; i < 8; i++) { + cm->cur_frame->segment_feature[i] = (0x80000000 | (i << 22)); + } + } + } else { + av1_print(hw, AOM_DEBUG_HW_MORE, "Sequence head, Search next start code\n"); + cm->prev_fb_idx = INVALID_IDX; + //skip, search next start code + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_DECODE_SLICE); + } + ATRACE_COUNTER(hw->trace.decode_header_memory_time_name, TRACE_HEADER_REGISTER_END); + return ret; + +#else + + bit_depth_luma = av1_param.p.bit_depth; + bit_depth_chroma = av1_param.p.bit_depth; + + if (hw->process_state != PROC_STATE_SENDAGAIN) { + ret = av1_bufmgr_process(hw, &av1_param); + if (!hw->m_ins_flag) + hw->result_done_count++; + } else { + union param_u *params = &av1_param; + if (hw->mmu_enable && ((hw->double_write_mode & 0x10) == 0)) { + ret = av1_alloc_mmu(hw, + cm->new_fb_idx, + params->p.width, + params->p.height, + params->p.bit_depth, + hw->frame_mmu_map_addr); + if (ret >= 0) + cm->cur_fb_idx_mmu = cm->new_fb_idx; + else + pr_err("can't alloc need mmu1,idx %d ret =%d\n", + cm->new_fb_idx, ret); + } else { + ret = 0; + } + WRITE_VREG(HEVC_PARSER_PICTURE_SIZE, + (params->p.height << 16) | params->p.width); + } + if (ret < 0) { + pr_info("av1_bufmgr_process=> %d, AV1_10B_DISCARD_NAL\r\n", ret); + WRITE_VREG(HEVC_DEC_STATUS_REG, AV1_10B_DISCARD_NAL); + cm->show_frame = 0; + if (hw->mmu_enable) + av1_recycle_mmu_buf(hw); + + if (hw->m_ins_flag) { + hw->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&hw->work); + } + return ret; + } else if (ret == 0) { + struct PIC_BUFFER_CONFIG_s *cur_pic_config + = &cm->cur_frame->buf; + cur_pic_config->decode_idx = hw->frame_count; + + if (hw->process_state != PROC_STATE_SENDAGAIN) { + if (!hw->m_ins_flag) { + hw->frame_count++; + decode_frame_count[hw->index] + = hw->frame_count; + } + if (hw->chunk) { + cur_pic_config->pts = hw->chunk->pts; + cur_pic_config->pts64 = hw->chunk->pts64; + } + } + /*pr_info("Decode Frame Data %d\n", hw->frame_count);*/ + config_pic_size(hw, av1_param.p.bit_depth); + + if ((hw->common.frame_type != KEY_FRAME) + && (!hw->common.intra_only)) { + config_mc_buffer(hw, av1_param.p.bit_depth); + config_mpred_hw(hw); + } else { + clear_mpred_hw(hw); + } +#ifdef MCRCC_ENABLE + if (mcrcc_cache_alg_flag) + config_mcrcc_axi_hw_new(hw); + else + config_mcrcc_axi_hw(hw); +#endif + config_sao_hw(hw, &av1_param); + /*pr_info("HEVC_DEC_STATUS_REG <= AV1_10B_DECODE_SLICE\n");*/ + WRITE_VREG(HEVC_DEC_STATUS_REG, AV1_10B_DECODE_SLICE); + } else { + pr_info("Skip search next start code\n"); + cm->prev_fb_idx = INVALID_IDX; + /*skip, search next start code*/ + WRITE_VREG(HEVC_DEC_STATUS_REG, AV1_10B_DECODE_SLICE); + } + hw->process_state = PROC_STATE_DECODESLICE; + if (hw->mmu_enable && ((hw->double_write_mode & 0x10) == 0)) { + if (hw->last_put_idx < hw->used_buf_num) { + struct RefCntBuffer_s *frame_bufs = + cm->buffer_pool->frame_bufs; + int i = hw->last_put_idx; + /*free not used buffers.*/ + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.index != -1)) { + if (pbi->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(pbi, i); + + decoder_mmu_box_free_idx(ibuf->mmu_box, i); + } else { + decoder_mmu_box_free_idx(pbi->mmu_box, i); + } + } + hw->last_put_idx = -1; + } + } + return ret; +#endif +} + +static void fill_frame_info(struct AV1HW_s *hw, + struct PIC_BUFFER_CONFIG_s *frame, + unsigned int framesize, + unsigned int pts) +{ + struct vframe_qos_s *vframe_qos = &hw->vframe_qos; + + if (frame->slice_type == KEY_FRAME) + vframe_qos->type = 1; + else if (frame->slice_type == INTER_FRAME) + vframe_qos->type = 2; +/* +#define SHOW_QOS_INFO +*/ + vframe_qos->size = framesize; + vframe_qos->pts = pts; +#ifdef SHOW_QOS_INFO + av1_print(hw, 0, "slice:%d\n", frame->slice_type); +#endif + vframe_qos->max_mv = frame->max_mv; + vframe_qos->avg_mv = frame->avg_mv; + vframe_qos->min_mv = frame->min_mv; +#ifdef SHOW_QOS_INFO + av1_print(hw, 0, "mv: max:%d, avg:%d, min:%d\n", + vframe_qos->max_mv, + vframe_qos->avg_mv, + vframe_qos->min_mv); +#endif + vframe_qos->max_qp = frame->max_qp; + vframe_qos->avg_qp = frame->avg_qp; + vframe_qos->min_qp = frame->min_qp; +#ifdef SHOW_QOS_INFO + av1_print(hw, 0, "qp: max:%d, avg:%d, min:%d\n", + vframe_qos->max_qp, + vframe_qos->avg_qp, + vframe_qos->min_qp); +#endif + vframe_qos->max_skip = frame->max_skip; + vframe_qos->avg_skip = frame->avg_skip; + vframe_qos->min_skip = frame->min_skip; +#ifdef SHOW_QOS_INFO + av1_print(hw, 0, "skip: max:%d, avg:%d, min:%d\n", + vframe_qos->max_skip, + vframe_qos->avg_skip, + vframe_qos->min_skip); +#endif + vframe_qos->num++; + /* + if (hw->frameinfo_enable) + vdec_fill_frame_info(vframe_qos, 1); + */ +} + +/* only when we decoded one field or one frame, +we can call this function to get qos info*/ +static void get_picture_qos_info(struct AV1HW_s *hw) +{ + struct PIC_BUFFER_CONFIG_s *frame = &hw->cur_buf->buf; + + if (!frame) + return; + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + unsigned char a[3]; + unsigned char i, j, t; + unsigned long data; + + data = READ_VREG(HEVC_MV_INFO); + if (frame->slice_type == KEY_FRAME) + data = 0; + a[0] = data & 0xff; + a[1] = (data >> 8) & 0xff; + a[2] = (data >> 16) & 0xff; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + frame->max_mv = a[2]; + frame->avg_mv = a[1]; + frame->min_mv = a[0]; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "mv data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + + data = READ_VREG(HEVC_QP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + frame->max_qp = a[2]; + frame->avg_qp = a[1]; + frame->min_qp = a[0]; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "qp data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + + data = READ_VREG(HEVC_SKIP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + frame->max_skip = a[2]; + frame->avg_skip = a[1]; + frame->min_skip = a[0]; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "skip data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + } else { + uint32_t blk88_y_count; + uint32_t blk88_c_count; + uint32_t blk22_mv_count; + uint32_t rdata32; + int32_t mv_hi; + int32_t mv_lo; + uint32_t rdata32_l; + uint32_t mvx_L0_hi; + uint32_t mvy_L0_hi; + uint32_t mvx_L1_hi; + uint32_t mvy_L1_hi; + int64_t value; + uint64_t temp_value; + int pic_number = frame->decode_idx; + + frame->max_mv = 0; + frame->avg_mv = 0; + frame->min_mv = 0; + + frame->max_skip = 0; + frame->avg_skip = 0; + frame->min_skip = 0; + + frame->max_qp = 0; + frame->avg_qp = 0; + frame->min_qp = 0; + + av1_print(hw, AV1_DEBUG_QOS_INFO, "slice_type:%d, poc:%d\n", + frame->slice_type, + pic_number); + + /* set rd_idx to 0 */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, 0); + + blk88_y_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_y_count == 0) { + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] NO Data yet.\n", + pic_number); + + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_y_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] Y QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_y_count, + rdata32, blk88_y_count); + + frame->avg_qp = rdata32/blk88_y_count; + /* intra_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] Y intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); + + /* skipped_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] Y skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); + + frame->avg_skip = rdata32*100/blk88_y_count; + /* coeff_non_zero_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] Y ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_y_count*1)), + '%', rdata32); + + /* blk66_c_count */ + blk88_c_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_c_count == 0) { + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] NO Data yet.\n", + pic_number); + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_c_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] C QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_c_count, + rdata32, blk88_c_count); + + /* intra_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] C intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); + + /* skipped_cu_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] C skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); + + /* coeff_non_zero_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] C ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_c_count*1)), + '%', rdata32); + + /* 1'h0, qp_c_max[6:0], 1'h0, qp_c_min[6:0], + 1'h0, qp_y_max[6:0], 1'h0, qp_y_min[6:0] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] Y QP min : %d\n", + pic_number, (rdata32>>0)&0xff); + + frame->min_qp = (rdata32>>0)&0xff; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] Y QP max : %d\n", + pic_number, (rdata32>>8)&0xff); + + frame->max_qp = (rdata32>>8)&0xff; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] C QP min : %d\n", + pic_number, (rdata32>>16)&0xff); + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] C QP max : %d\n", + pic_number, (rdata32>>24)&0xff); + + /* blk22_mv_count */ + blk22_mv_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk22_mv_count == 0) { + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] NO MV Data yet.\n", + pic_number); + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* mvy_L1_count[39:32], mvx_L1_count[39:32], + mvy_L0_count[39:32], mvx_L0_count[39:32] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + /* should all be 0x00 or 0xff */ + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MV AVG High Bits: 0x%X\n", + pic_number, rdata32); + + mvx_L0_hi = ((rdata32>>0)&0xff); + mvy_L0_hi = ((rdata32>>8)&0xff); + mvx_L1_hi = ((rdata32>>16)&0xff); + mvy_L1_hi = ((rdata32>>24)&0xff); + + /* mvx_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvx_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + value = div_s64(value, blk22_mv_count); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L0 AVG : %d (%lld/%d)\n", + pic_number, (int)value, + value, blk22_mv_count); + + frame->avg_mv = value; + + /* mvy_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvy_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L0 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); + + /* mvx_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvx_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); + + /* mvy_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvy_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); + + /* {mvx_L0_max, mvx_L0_min} // format : {sign, abs[14:0]} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L0 MAX : %d\n", + pic_number, mv_hi); + + frame->max_mv = mv_hi; + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L0 MIN : %d\n", + pic_number, mv_lo); + + frame->min_mv = mv_lo; + + /* {mvy_L0_max, mvy_L0_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L0 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L0 MIN : %d\n", + pic_number, mv_lo); + + /* {mvx_L1_max, mvx_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L1 MIN : %d\n", + pic_number, mv_lo); + + /* {mvy_L1_max, mvy_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L1 MIN : %d\n", + pic_number, mv_lo); + + rdata32 = READ_VREG(HEVC_PIC_QUALITY_CTRL); + + av1_print(hw, AV1_DEBUG_QOS_INFO, + "[Picture %d Quality] After Read : VDEC_PIC_QUALITY_CTRL : 0x%x\n", + pic_number, rdata32); + + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + } +} + +static int load_param(struct AV1HW_s *hw, union param_u *params, uint32_t dec_status) +{ + int i; + unsigned long flags; + int head_type = 0; + if (dec_status == AOM_AV1_SEQ_HEAD_PARSER_DONE) + head_type = OBU_SEQUENCE_HEADER; + else if (dec_status == AOM_AV1_FRAME_HEAD_PARSER_DONE) + head_type = OBU_FRAME_HEADER; + else if (dec_status == AOM_AV1_FRAME_PARSER_DONE) + head_type = OBU_FRAME; + else if (dec_status == AOM_AV1_REDUNDANT_FRAME_HEAD_PARSER_DONE) + head_type = OBU_REDUNDANT_FRAME_HEADER; + else { + //printf("Error, dec_status of 0x%x, not supported!!!\n", dec_status); + return -1; + } + av1_print2(AOM_DEBUG_HW_MORE, "load_param: ret 0x%x\n", head_type); + ATRACE_COUNTER(hw->trace.decode_header_memory_time_name, TRACE_HEADER_RPM_START); + if (debug&AOM_AV1_DEBUG_SEND_PARAM_WITH_REG) { + get_rpm_param(params); + } + else { + for (i = 0; i < (RPM_END-RPM_BEGIN); i += 4) { + int32_t ii; + for (ii = 0; ii < 4; ii++) { + params->l.data[i+ii]=hw->rpm_ptr[i+3-ii]; + } + } + } + ATRACE_COUNTER(hw->trace.decode_header_memory_time_name, TRACE_HEADER_RPM_END); + params->p.enable_ref_frame_mvs = (params->p.seq_flags >> 7) & 0x1; + params->p.enable_superres = (params->p.seq_flags >> 15) & 0x1; + + if (debug & AV1_DEBUG_BUFMGR_MORE) { + lock_buffer_pool(hw->common.buffer_pool, flags); + pr_info("aom_param: (%d)\n", hw->pbi->decode_idx); + //pbi->slice_idx++; + for ( i = 0; i < (RPM_END-RPM_BEGIN); i++) { + pr_info("%04x ", params->l.data[i]); + if (((i + 1) & 0xf) == 0) + pr_info("\n"); + } + unlock_buffer_pool(hw->common.buffer_pool, flags); + } + return head_type; +} + +static int av1_postproc(struct AV1HW_s *hw) +{ + if (hw->postproc_done) + return 0; + hw->postproc_done = 1; + return av1_bufmgr_postproc(hw->pbi, hw->frame_decoded); +} + +static void vav1_get_comp_buf_info(struct AV1HW_s *hw, + struct vdec_comp_buf_info *info) +{ + u16 bit_depth = hw->param.p.bit_depth; + + info->max_size = av1_max_mmu_buf_size( + hw->max_pic_w, + hw->max_pic_h); + info->header_size = av1_get_header_size( + hw->frame_width, + hw->frame_height); + info->frame_buffer_size = av1_mmu_page_num( + hw, hw->frame_width, + hw->frame_height, + bit_depth == 0); +} + +static int vav1_get_ps_info(struct AV1HW_s *hw, struct aml_vdec_ps_infos *ps) +{ + ps->visible_width = hw->frame_width; + ps->visible_height = hw->frame_height; + ps->coded_width = ALIGN(hw->frame_width, 64); + ps->coded_height = ALIGN(hw->frame_height, 64); + ps->dpb_size = hw->used_buf_num; + ps->dpb_margin = hw->dynamic_buf_num_margin; + if (hw->frame_width > 1920 && hw->frame_height > 1088) + ps->dpb_frames = 8; + else + ps->dpb_frames = 10; + + ps->dpb_frames += 2; + + if (ps->dpb_margin + ps->dpb_frames > MAX_BUF_NUM_NORMAL) { + u32 delta; + delta = ps->dpb_margin + ps->dpb_frames - MAX_BUF_NUM_NORMAL; + ps->dpb_margin -= delta; + hw->dynamic_buf_num_margin = ps->dpb_margin; + } + ps->field = V4L2_FIELD_NONE; + + return 0; +} + +static int v4l_res_change(struct AV1HW_s *hw) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct AV1_Common_s *const cm = &hw->common; + int ret = 0; + + if (ctx->param_sets_from_ucode && + hw->res_ch_flag == 0) { + struct aml_vdec_ps_infos ps; + struct vdec_comp_buf_info comp; + + if ((cm->width != 0 && + cm->height != 0) && + (hw->frame_width != cm->width || + hw->frame_height != cm->height)) { + + av1_print(hw, 0, + "%s (%d,%d)=>(%d,%d)\r\n", __func__, cm->width, + cm->height, hw->frame_width, hw->frame_height); + + if (get_valid_double_write_mode(hw) != 16) { + vav1_get_comp_buf_info(hw, &comp); + vdec_v4l_set_comp_buf_info(ctx, &comp); + } + + vav1_get_ps_info(hw, &ps); + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + hw->v4l_params_parsed = false; + hw->res_ch_flag = 1; + ctx->v4l_resolution_change = 1; + mutex_lock(&hw->assit_task.assit_mutex); + hw->eos = 1; + + av1_postproc(hw); + + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(hw_to_vdec(hw)); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + mutex_unlock(&hw->assit_task.assit_mutex); + ret = 1; + } + } + + return ret; +} + +static int work_space_size_update(struct AV1HW_s *hw) +{ + int workbuf_size, cma_alloc_cnt, ret; + struct BuffInfo_s *p_buf_info = &hw->work_space_buf_store; + + /* only for 8k workspace update */ + if (!IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h)) + return 0; + + if (hw->cma_alloc_addr && hw->buf_size) { + memcpy(p_buf_info, &aom_workbuff_spec[2], + sizeof(struct BuffInfo_s)); + workbuf_size = (p_buf_info->end_adr - + p_buf_info->start_adr + 0xffff) & (~0xffff); + cma_alloc_cnt = PAGE_ALIGN(workbuf_size) / PAGE_SIZE; + if (hw->cma_alloc_count < cma_alloc_cnt) { + decoder_bmmu_box_free_idx(hw->bmmu_box, WORK_SPACE_BUF_ID); + hw->buffer_spec_index = 2; + hw->cma_alloc_count = cma_alloc_cnt; + ret = decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, + WORK_SPACE_BUF_ID, hw->cma_alloc_count * PAGE_SIZE, + DRIVER_NAME, &hw->cma_alloc_addr); + if(ret < 0) { + hw->fatal_error |= DECODER_FATAL_ERROR_NO_MEM; + pr_err("8k workspace recalloc failed\n"); + return ret; + } + hw->buf_start = hw->cma_alloc_addr; + hw->buf_size = workbuf_size; + pr_info("8k work_space_buf recalloc, size 0x%x\n", hw->buf_size); + p_buf_info->start_adr = hw->buf_start; + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL) || + (get_double_write_mode(hw) == 0x10)) { + hw->mc_buf_spec.buf_end = hw->buf_start + hw->buf_size; + } + init_buff_spec(hw, p_buf_info); + hw->work_space_buf = p_buf_info; + hw->pbi->work_space_buf = p_buf_info; + } + } + + return 0; +} + +static irqreturn_t vav1_isr_thread_fn(int irq, void *data) +{ + struct AV1HW_s *hw = (struct AV1HW_s *)data; + unsigned int dec_status = hw->dec_status; + int obu_type; + int ret = 0; + + if (dec_status == AOM_AV1_FRAME_HEAD_PARSER_DONE || + dec_status == AOM_AV1_SEQ_HEAD_PARSER_DONE || + dec_status == AOM_AV1_FRAME_PARSER_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_START); + } + else if (dec_status == AOM_AV1_DEC_PIC_END || + dec_status == AOM_NAL_DECODE_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_PIC_DONE_START); + } + + if (hw->eos) + return IRQ_HANDLED; + hw->wait_buf = 0; + if ((dec_status == AOM_NAL_DECODE_DONE) || + (dec_status == AOM_SEARCH_BUFEMPTY) || + (dec_status == AOM_DECODE_BUFEMPTY) + ) { + if (hw->m_ins_flag) { + reset_process_time(hw); + if (!vdec_frame_based(hw_to_vdec(hw))) + dec_again_process(hw); + else { + hw->dec_result = DEC_RESULT_DONE; + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + vdec_schedule_work(&hw->work); + } + } + hw->process_busy = 0; + return IRQ_HANDLED; + } else if (dec_status == AOM_AV1_DEC_PIC_END) { + struct AV1_Common_s *const cm = &hw->common; + struct PIC_BUFFER_CONFIG_s *frame = &cm->cur_frame->buf; + struct vdec_s *vdec = hw_to_vdec(hw); +#if 1 + u32 fg_reg0, fg_reg1, num_y_points, num_cb_points, num_cr_points; + WRITE_VREG(HEVC_FGS_IDX, 0); + fg_reg0 = READ_VREG(HEVC_FGS_DATA); + fg_reg1 = READ_VREG(HEVC_FGS_DATA); + num_y_points = fg_reg1 & 0xf; + num_cr_points = (fg_reg1 >> 8) & 0xf; + num_cb_points = (fg_reg1 >> 4) & 0xf; + if ((num_y_points > 0) || + ((num_cb_points > 0) | ((fg_reg0 >> 17) & 0x1)) || + ((num_cr_points > 0) | ((fg_reg0 >> 17) & 0x1))) + hw->fgs_valid = 1; + else + hw->fgs_valid = 0; + av1_print(hw, AOM_DEBUG_HW_MORE, + "fg_data0 0x%x fg_data1 0x%x fg_valid %d\n", + fg_reg0, fg_reg1, hw->fgs_valid); +#else + if (READ_VREG(HEVC_FGS_CTRL) & + ((1 << 4) | (1 << 5) | (1 << 6))) + hw->fgs_valid = 1; + else + hw->fgs_valid = 0; +#endif + decode_frame_count[hw->index] = hw->frame_count; + if (hw->m_ins_flag) { +#ifdef USE_DEC_PIC_END + if (READ_VREG(PIC_END_LCU_COUNT) != 0) { + hw->frame_decoded = 1; + if (cm->cur_frame && vdec->mvfrm && frame) { + frame->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; + frame->frame_size2 = vdec->mvfrm->frame_size; + } + hw->gvs->frame_count = hw->frame_count; + /* + In c module, multi obus are put in one packet, which is decoded + with av1_receive_compressed_data(). + For STREAM_MODE or SINGLE_MODE, there is no packet boundary, + we assume each packet must and only include one picture of data (LCUs) + or cm->show_existing_frame is 1 + */ + av1_print(hw, AOM_DEBUG_HW_MORE, + "Decoding done (index %d), fgs_valid %d data_size 0x%x shiftbyte 0x%x\n", + cm->cur_frame? cm->cur_frame->buf.index:-1, + hw->fgs_valid, + hw->data_size, + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + hw->config_next_ref_info_flag = 1; /*to do: low_latency_flag case*/ + //config_next_ref_info_hw(hw); + } +#endif + + if (get_picture_qos) + get_picture_qos_info(hw); + + reset_process_time(hw); + + if (hw->m_ins_flag && + (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10) && + (debug & AOM_DEBUG_DIS_RECYCLE_MMU_TAIL) == 0) { + long used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); + if ((cm->cur_frame != NULL) && (cm->cur_fb_idx_mmu != INVALID_IDX)) { + hevc_mmu_dma_check(hw_to_vdec(hw)); + + av1_print(hw, AOM_DEBUG_HW_MORE, "mmu free tail, index %d used_num 0x%x\n", + cm->cur_fb_idx_mmu, used_4k_num); + if (hw->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(hw, cm->cur_fb_idx_mmu); + + decoder_mmu_box_free_idx_tail( + ibuf->mmu_box, + ibuf->index, + used_4k_num); +#ifdef AOM_AV1_MMU_DW + if (get_double_write_mode(hw) & 0x20) { + used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS2) >> 16); + decoder_mmu_box_free_idx_tail( + ibuf->mmu_box_dw, + ibuf->index, + used_4k_num); + av1_print(hw, AOM_DEBUG_HW_MORE, "dw mmu free tail, index %d used_num 0x%x\n", + cm->cur_frame->buf.index, used_4k_num); + } +#endif + cm->cur_fb_idx_mmu = INVALID_IDX; + } else { + decoder_mmu_box_free_idx_tail(hw->mmu_box, + cm->cur_frame->buf.index, used_4k_num); +#ifdef AOM_AV1_MMU_DW + if (get_double_write_mode(hw) & 0x20) { + used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS2) >> 16); + decoder_mmu_box_free_idx_tail(hw->mmu_box_dw, + cm->cur_frame->buf.index, used_4k_num); + av1_print(hw, AOM_DEBUG_HW_MORE, "dw mmu free tail, index %d used_num 0x%x\n", + cm->cur_frame->buf.index, used_4k_num); + } +#endif + } + } + } + + if (hw->assit_task.use_sfgs) { + ulong start_time; + start_time = local_clock(); + if (cm->cur_frame) + wait_event_interruptible_timeout(cm->cur_frame->wait_sfgs, + (atomic_read(&cm->cur_frame->fgs_done) == 1), msecs_to_jiffies(50)); + if (get_debug_fgs() & DEBUG_FGS_CONSUME_TIME) { + pr_info("%s, pic %d, fgs_valid %d, wait consume time %d us\n", __func__, + hw->frame_count - 1, hw->fgs_valid, + div64_u64(local_clock() - start_time, 1000)); + } + } + if (hw->low_latency_flag) + av1_postproc(hw); + + if (multi_frames_in_one_pack && + hw->frame_decoded && + READ_VREG(HEVC_SHIFT_BYTE_COUNT) < hw->data_size) { + if (enable_single_slice == 1) { + hw->consume_byte = READ_VREG(HEVC_SHIFT_BYTE_COUNT) - 4; + hw->dec_result = DEC_RESULT_UNFINISH; + amhevc_stop(); +#ifdef MCRCC_ENABLE + if (mcrcc_cache_alg_flag) + dump_hit_rate(hw); +#endif + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + vdec_schedule_work(&hw->work); + }else { +#ifdef DEBUG_CRC_ERROR + if ((crc_debug_flag & 0x40) && cm->cur_frame) + dump_mv_buffer(hw, &cm->cur_frame->buf); +#endif + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + av1_print(hw, AOM_DEBUG_HW_MORE, + "PIC_END, fgs_valid %d search head ...\n", + hw->fgs_valid); + if (hw->config_next_ref_info_flag) + config_next_ref_info_hw(hw); + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + } + } else { + hw->data_size = 0; + hw->data_offset = 0; +#ifdef DEBUG_CRC_ERROR + if ((crc_debug_flag & 0x40) && cm->cur_frame) + dump_mv_buffer(hw, &cm->cur_frame->buf); +#endif + hw->dec_result = DEC_RESULT_DONE; + amhevc_stop(); +#ifdef MCRCC_ENABLE + if (mcrcc_cache_alg_flag) + dump_hit_rate(hw); +#endif + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + vdec_schedule_work(&hw->work); + } + } else { + av1_print(hw, AOM_DEBUG_HW_MORE, + "PIC_END, fgs_valid %d search head ...\n", + hw->fgs_valid); +#ifdef USE_DEC_PIC_END + if (READ_VREG(PIC_END_LCU_COUNT) != 0) { + hw->frame_decoded = 1; + /* + In c module, multi obus are put in one packet, which is decoded + with av1_receive_compressed_data(). + For STREAM_MODE or SINGLE_MODE, there is no packet boundary, + we assume each packet must and only include one picture of data (LCUs) + or cm->show_existing_frame is 1 + */ + if (cm->cur_frame) + av1_print(hw, AOM_DEBUG_HW_MORE, "Decoding done (index %d)\n", + cm->cur_frame? cm->cur_frame->buf.index:-1); + config_next_ref_info_hw(hw); + } +#endif + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + /* + if (debug & + AV1_DEBUG_BUFMGR_MORE) + dump_aux_buf(hw); + set_aux_data(hw, + &cm->cur_frame->buf, 0, 0); + */ + if (hw->low_latency_flag) { + av1_postproc(hw); + vdec_profile(hw_to_vdec(hw), VDEC_PROFILE_EVENT_CB); + if (debug & PRINT_FLAG_VDEC_DETAIL) + pr_info("%s AV1 frame done \n", __func__); + } + } + + start_process_time(hw); + hw->process_busy = 0; + return IRQ_HANDLED; + } + + if (dec_status == AOM_EOS) { + if (hw->m_ins_flag) + reset_process_time(hw); + + av1_print(hw, AOM_DEBUG_HW_MORE, "AV1_EOS, flush buffer\r\n"); + + av1_postproc(hw); + + av1_print(hw, AOM_DEBUG_HW_MORE, "send AV1_10B_DISCARD_NAL\r\n"); + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_DISCARD_NAL); + hw->process_busy = 0; + if (hw->m_ins_flag) { + hw->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&hw->work); + } + return IRQ_HANDLED; + } else if (dec_status == AOM_DECODE_OVER_SIZE) { + av1_print(hw, AOM_DEBUG_HW_MORE, "av1 decode oversize !!\n"); + /*debug |= (AV1_DEBUG_DIS_LOC_ERROR_PROC | + AV1_DEBUG_DIS_SYS_ERROR_PROC);*/ + hw->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + hw->process_busy = 0; + if (hw->m_ins_flag) + reset_process_time(hw); + return IRQ_HANDLED; + } + + obu_type = load_param(hw, &hw->aom_param, dec_status); + if (obu_type < 0) { + hw->process_busy = 0; + return IRQ_HANDLED; + } + + if (obu_type == OBU_SEQUENCE_HEADER) { + int next_lcu_size; + + av1_bufmgr_process(hw->pbi, &hw->aom_param, 0, obu_type); + + if ((hw->max_pic_w < hw->aom_param.p.max_frame_width) || + (hw->max_pic_h < hw->aom_param.p.max_frame_height)) { + av1_print(hw, 0, "%s, max size change (%d, %d) -> (%d, %d)\n", + __func__, hw->max_pic_w, hw->max_pic_h, + hw->aom_param.p.max_frame_width, hw->aom_param.p.max_frame_height); + + vav1_mmu_map_free(hw); + + hw->max_pic_w = hw->aom_param.p.max_frame_width; + hw->max_pic_h = hw->aom_param.p.max_frame_height; + hw->init_pic_w = hw->max_pic_w; + hw->init_pic_h = hw->max_pic_h; + hw->pbi->frame_width = hw->init_pic_w; + hw->pbi->frame_height = hw->init_pic_h; + + vav1_mmu_map_alloc(hw); + + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + WRITE_VREG(HEVC_SAO_MMU_DMA_CTRL, hw->frame_mmu_map_phy_addr); + } +#ifdef AOM_AV1_MMU_DW + if (get_double_write_mode(hw) & 0x20) { + WRITE_VREG(HEVC_SAO_MMU_DMA_CTRL2, hw->dw_frame_mmu_map_phy_addr); + //default of 0xffffffff will disable dw + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0); + WRITE_VREG(HEVC_SAO_C_START_ADDR, 0); + } +#endif + + /*v4l2 alloc new mv when max size changed */ + if (hw->is_used_v4l && IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h)) { + /* now less than 8k use fix mv buf size */ + if (hw->pic_list_init_done) { + if (init_mv_buf_list(hw) < 0) + pr_err("%s: !!!!Error, reinit_mv_buf_list fail\n", __func__); + } + } + } + + bit_depth_luma = hw->aom_param.p.bit_depth; + bit_depth_chroma = hw->aom_param.p.bit_depth; + hw->film_grain_present = hw->aom_param.p.film_grain_present_flag; + + next_lcu_size = ((hw->aom_param.p.seq_flags >> 6) & 0x1) ? 128 : 64; + hw->video_signal_type = (hw->aom_param.p.video_signal_type << 16 + | hw->aom_param.p.color_description); + + if (next_lcu_size != hw->current_lcu_size) { + av1_print(hw, AOM_DEBUG_HW_MORE, + " ## lcu_size changed from %d to %d\n", + hw->current_lcu_size, next_lcu_size); + hw->current_lcu_size = next_lcu_size; + } + + av1_print(hw, AOM_DEBUG_HW_MORE, + "AOM_AV1_SEQ_HEAD_PARSER_DONE, search head ...\n"); + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_SEARCH_HEAD); + + hw->process_busy = 0; + hw->has_sequence = 1; + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + return IRQ_HANDLED; + } + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + hw->frame_width = hw->common.seq_params.max_frame_width; + hw->frame_height = hw->common.seq_params.max_frame_height; + + if (hw->frame_width == 0 || hw->frame_height == 0) { + hw->dec_result = DEC_RESULT_DISCARD_DATA; + hw->process_busy = 0; + amhevc_stop(); + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + + if (!v4l_res_change(hw)) { + if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + struct vdec_comp_buf_info comp; + + pr_info("set ucode parse\n"); + + ctx->film_grain_present = hw->film_grain_present; + if (ctx->film_grain_present && + !disable_fg && + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_S4 || + get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_S4D)) { +#ifdef AOM_AV1_MMU_DW + ctx->config.parm.dec.cfg.double_write_mode = 0x21; + pr_info("AV1 has fg, use dw 0x21!\n"); + if (hw->dw_frame_mmu_map_addr == NULL) { + u32 mmu_map_size = vaom_dw_frame_mmu_map_size(hw); + hw->dw_frame_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + mmu_map_size, + &hw->dw_frame_mmu_map_phy_addr, GFP_KERNEL); + if (hw->dw_frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(hw->dw_frame_mmu_map_addr, 0, mmu_map_size); + } +#endif + } + if (get_valid_double_write_mode(hw) != 16) { + vav1_get_comp_buf_info(hw, &comp); + vdec_v4l_set_comp_buf_info(ctx, &comp); + } + vav1_get_ps_info(hw, &ps); + /*notice the v4l2 codec.*/ + vdec_v4l_set_ps_infos(ctx, &ps); + hw->v4l_params_parsed = true; + work_space_size_update(hw); + hw->postproc_done = 0; + hw->process_busy = 0; + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + dec_again_process(hw); + return IRQ_HANDLED; + } else { + struct vdec_pic_info pic; + + if (!hw->pic_list_init_done) { + vdec_v4l_get_pic_info(ctx, &pic); + hw->used_buf_num = pic.dpb_frames + + pic.dpb_margin; + + if (IS_4K_SIZE(hw->init_pic_w, hw->init_pic_h)) { + hw->used_buf_num = MAX_BUF_NUM_LESS + pic.dpb_margin; + if (hw->used_buf_num > REF_FRAMES_4K) + hw->mv_buf_margin = hw->used_buf_num - REF_FRAMES_4K + 1; + } + + if (IS_8K_SIZE(hw->max_pic_w, hw->max_pic_h)) { + hw->double_write_mode = 4; + hw->used_buf_num = MAX_BUF_NUM_LESS; + if (hw->used_buf_num > REF_FRAMES_4K) + hw->mv_buf_margin = hw->used_buf_num - REF_FRAMES_4K + 1; + if (((hw->max_pic_w % 64) != 0) && + (hw_to_vdec(hw)->canvas_mode != CANVAS_BLKMODE_LINEAR)) + hw->mem_map_mode = 2; + av1_print(hw, 0, + "force 8k double write 4, mem_map_mode %d\n", hw->mem_map_mode); + } + + if (hw->used_buf_num > MAX_BUF_NUM) + hw->used_buf_num = MAX_BUF_NUM; + + init_pic_list(hw); + init_pic_list_hw(hw); +#ifndef MV_USE_FIXED_BUF + if (init_mv_buf_list(hw) < 0) { + pr_err("%s: !!!!Error, init_mv_buf_list fail\n", __func__); + } +#endif + hw->pic_list_init_done = true; + } + } + } else { + hw->postproc_done = 0; + hw->process_busy = 0; + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + dec_again_process(hw); + return IRQ_HANDLED; + } + } + +#ifndef USE_DEC_PIC_END + //if (pbi->wait_buf) { + if (pbi->bufmgr_proc_count > 0) { + if (READ_VREG(PIC_END_LCU_COUNT) != 0) { + hw->frame_decoded = 1; + /* + In c module, multi obus are put in one packet, which is decoded + with av1_receive_compressed_data(). + For STREAM_MODE or SINGLE_MODE, there is no packet boundary, + we assume each packet must and only include one picture of data (LCUs) + or cm->show_existing_frame is 1 + */ + if (cm->cur_frame) + av1_print(hw, AOM_DEBUG_HW_MORE, "Decoding done (index %d)\n", + cm->cur_frame? cm->cur_frame->buf.index:-1); + } + } +#endif +#if 1 +/*def CHECK_OBU_REDUNDANT_FRAME_HEADER*/ + if (debug & AOM_DEBUG_BUFMGR_ONLY) { + if (READ_VREG(PIC_END_LCU_COUNT) != 0) + hw->obu_frame_frame_head_come_after_tile = 0; + + if (obu_type == OBU_FRAME_HEADER || + obu_type == OBU_FRAME) { + hw->obu_frame_frame_head_come_after_tile = 1; + } else if (obu_type == OBU_REDUNDANT_FRAME_HEADER && + hw->obu_frame_frame_head_come_after_tile == 0) { + if (hw->frame_decoded == 1) { + av1_print(hw, AOM_DEBUG_HW_MORE, + "Warning, OBU_REDUNDANT_FRAME_HEADER come without OBU_FRAME or OBU_FRAME_HEAD\n"); + hw->frame_decoded = 0; + } + } + } +#endif + if (hw->frame_decoded) + hw->one_compressed_data_done = 1; + + if (hw->m_ins_flag) + reset_process_time(hw); + + if (hw->process_state != PROC_STATE_SENDAGAIN + ) { + if (hw->one_compressed_data_done) { + av1_postproc(hw); + av1_release_bufs(hw); +#ifndef MV_USE_FIXED_BUF + put_un_used_mv_bufs(hw); +#endif + } + } + + if (hw->one_package_frame_cnt) { + if (get_free_buf_count(hw) <= 0) { + hw->dec_result = AOM_AV1_RESULT_NEED_MORE_BUFFER; + hw->cur_obu_type = obu_type; + hw->process_busy = 0; + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + vdec_schedule_work(&hw->work); + return IRQ_HANDLED; + } + } + hw->one_package_frame_cnt++; + + ret = av1_continue_decoding(hw, obu_type); + hw->postproc_done = 0; + hw->process_busy = 0; + + if (hw->m_ins_flag) { + if (ret >= 0) + start_process_time(hw); + else { + hw->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&hw->work); + } + } + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + return IRQ_HANDLED; +} + +static irqreturn_t vav1_isr(int irq, void *data) +{ + int i; + unsigned int dec_status; + struct AV1HW_s *hw = (struct AV1HW_s *)data; + //struct AV1_Common_s *const cm = &hw->common; + uint debug_tag; + + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + dec_status = READ_VREG(HEVC_DEC_STATUS_REG) & 0xff; + + if (dec_status == AOM_AV1_FRAME_HEAD_PARSER_DONE || + dec_status == AOM_AV1_SEQ_HEAD_PARSER_DONE || + dec_status == AOM_AV1_FRAME_PARSER_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_HEAD_DONE); + } + else if (dec_status == AOM_AV1_DEC_PIC_END || + dec_status == AOM_NAL_DECODE_DONE) { + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_PIC_DONE); + } + if (!hw) + return IRQ_HANDLED; + if (hw->init_flag == 0) + return IRQ_HANDLED; + if (hw->process_busy) {/*on process.*/ + pr_err("err: %s, process busy\n", __func__); + return IRQ_HANDLED; + } + + ATRACE_COUNTER("V_ST_DEC-decode_state", dec_status); + + hw->dec_status = dec_status; + hw->process_busy = 1; + if (debug & AV1_DEBUG_BUFMGR) + av1_print(hw, AV1_DEBUG_BUFMGR, + "av1 isr (%d) dec status = 0x%x (0x%x), lcu 0x%x shiftbyte 0x%x shifted_data 0x%x (%x %x lev %x, wr %x, rd %x) log %x\n", + irq, + dec_status, READ_VREG(HEVC_DEC_STATUS_REG), + READ_VREG(HEVC_PARSER_LCU_START), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFTED_DATA), + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), +#ifdef DEBUG_UCODE_LOG + READ_VREG(HEVC_DBG_LOG_ADR) +#else + 0 +#endif + ); +#ifdef DEBUG_UCODE_LOG + if ((udebug_flag & 0x8) && + (hw->ucode_log_addr != 0) && + (READ_VREG(HEVC_DEC_STATUS_REG) & 0x100)) { + unsigned long flags; + unsigned short *log_adr = + (unsigned short *)hw->ucode_log_addr; + lock_buffer_pool(hw->pbi->common.buffer_pool, flags); + while (*(log_adr + 3)) { + pr_info("dbg%04x %04x %04x %04x\n", + *(log_adr + 3), *(log_adr + 2), *(log_adr + 1), *(log_adr + 0) + ); + log_adr += 4; + } + unlock_buffer_pool(hw->pbi->common.buffer_pool, flags); + } +#endif + debug_tag = READ_HREG(DEBUG_REG1); + if (debug_tag & 0x10000) { + pr_info("LMEM<tag %x>:\n", READ_HREG(DEBUG_REG1)); + for (i = 0; i < 0x400; i += 4) { + int ii; + if ((i & 0xf) == 0) + pr_info("%03x: ", i); + for (ii = 0; ii < 4; ii++) { + pr_info("%04x ", + hw->lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + pr_info("\n"); + } + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == hw->result_done_count) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hw->ucode_pause_pos = udebug_pause_pos; + } + else if (debug_tag & 0x20000) + hw->ucode_pause_pos = 0xffffffff; + if (hw->ucode_pause_pos) + reset_process_time(hw); + else + WRITE_HREG(DEBUG_REG1, 0); + } else if (debug_tag != 0) { + pr_info( + "dbg%x: %x lcu %x\n", READ_HREG(DEBUG_REG1), + READ_HREG(DEBUG_REG2), + READ_VREG(HEVC_PARSER_LCU_START)); + + if (((udebug_pause_pos & 0xffff) + == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == hw->result_done_count) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) { + udebug_pause_pos &= 0xffff; + hw->ucode_pause_pos = udebug_pause_pos; + } + if (hw->ucode_pause_pos) + reset_process_time(hw); + else + WRITE_HREG(DEBUG_REG1, 0); + hw->process_busy = 0; + return IRQ_HANDLED; + } + + //if (READ_VREG(HEVC_FG_STATUS) == AOM_AV1_FGS_PARAM) { + if (hw->dec_status == AOM_AV1_FGS_PARAM) { + uint32_t status_val = READ_VREG(HEVC_FG_STATUS); + WRITE_VREG(HEVC_FG_STATUS, AOM_AV1_FGS_PARAM_CONT); + WRITE_VREG(HEVC_DEC_STATUS_REG, AOM_AV1_FGS_PARAM_CONT); + // Bit[11] - 0 Read, 1 - Write + // Bit[10:8] - film_grain_params_ref_idx // For Write request + if ((status_val >> 11) & 0x1) { + uint32_t film_grain_params_ref_idx = (status_val >> 8) & 0x7; + config_film_grain_reg(hw, film_grain_params_ref_idx); + } + else + read_film_grain_reg(hw); + + film_grain_task_wakeup(hw); + + hw->process_busy = 0; + return IRQ_HANDLED; + } + + if (!hw->m_ins_flag) { + av1_print(hw, AV1_DEBUG_BUFMGR, + "error flag = %d\n", hw->error_flag); + if (hw->error_flag == 1) { + hw->error_flag = 2; + hw->process_busy = 0; + return IRQ_HANDLED; + } else if (hw->error_flag == 3) { + hw->process_busy = 0; + return IRQ_HANDLED; + } + if (get_free_buf_count(hw) <= 0) { + /* + if (hw->wait_buf == 0) + pr_info("set wait_buf to 1\r\n"); + */ + hw->wait_buf = 1; + hw->process_busy = 0; + av1_print(hw, AV1_DEBUG_BUFMGR, + "free buf not enough = %d\n", + get_free_buf_count(hw)); + return IRQ_HANDLED; + } + } + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_ISR_END); + return IRQ_WAKE_THREAD; +} + +static void av1_set_clk(struct work_struct *work) +{ + struct AV1HW_s *hw = container_of(work, + struct AV1HW_s, set_clk_work); + int fps = 96000 / hw->frame_dur; + + if (hevc_source_changed(VFORMAT_AV1, + frame_width, frame_height, fps) > 0) + hw->saved_resolution = frame_width * + frame_height * fps; +} + +static void vav1_put_timer_func(struct timer_list *timer) +{ + struct AV1HW_s *hw = container_of(timer, + struct AV1HW_s, timer); + uint8_t empty_flag; + unsigned int buf_level; + + enum receviver_start_e state = RECEIVER_INACTIVE; + + if (hw->m_ins_flag) { + if (hw_to_vdec(hw)->next_status + == VDEC_STATUS_DISCONNECTED) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + if (!hw->is_used_v4l || ctx->is_stream_off) { + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + pr_debug("vdec requested to be disconnected\n"); + return; + } + } + } + if (hw->init_flag == 0) { + if (hw->stat & STAT_TIMER_ARM) { + timer->expires = jiffies + PUT_INTERVAL; + add_timer(&hw->timer); + } + return; + } + if (hw->m_ins_flag == 0) { + if (vf_get_receiver(hw->provider_name)) { + state = + vf_notify_receiver(hw->provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) + state = RECEIVER_INACTIVE; + } else + state = RECEIVER_INACTIVE; + + empty_flag = (READ_VREG(HEVC_PARSER_INT_STATUS) >> 6) & 0x1; + /* error watchdog */ + if (empty_flag == 0) { + /* decoder has input */ + if ((debug & AV1_DEBUG_DIS_LOC_ERROR_PROC) == 0) { + + buf_level = READ_VREG(HEVC_STREAM_LEVEL); + /* receiver has no buffer to recycle */ + if ((state == RECEIVER_INACTIVE) && + (kfifo_is_empty(&hw->display_q) && + buf_level > 0x200) + ) { + WRITE_VREG + (HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } + + } + } +#ifdef MULTI_INSTANCE_SUPPORT + else { + av1_print(hw, AV1_DEBUG_TIMEOUT_INFO, "timeout!!!start_process_time %ld\n", + hw->start_process_time); + if ( + (decode_timeout_val > 0) && + (hw->start_process_time > 0) && + ((1000 * (jiffies - hw->start_process_time) / HZ) + > decode_timeout_val) + ) { + int current_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + av1_print(hw, AV1_DEBUG_TIMEOUT_INFO, "timeout!!!current_lcu_idx = %u last_lcu_idx = %u decode_timeout_count = %d\n", + current_lcu_idx, hw->last_lcu_idx, hw->decode_timeout_count); + if (hw->last_lcu_idx == current_lcu_idx) { + if (hw->decode_timeout_count > 0) + hw->decode_timeout_count--; + if (hw->decode_timeout_count == 0) { + if (input_frame_based( + hw_to_vdec(hw)) || + (READ_VREG(HEVC_STREAM_LEVEL) > 0x200)) + timeout_process(hw); + else { + av1_print(hw, 0, + "timeout & empty, again\n"); + dec_again_process(hw); + } + } + } else { + start_process_time(hw); + hw->last_lcu_idx = current_lcu_idx; + } + } + } +#endif + + if ((hw->ucode_pause_pos != 0) && + (hw->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != hw->ucode_pause_pos) { + hw->ucode_pause_pos = 0; + WRITE_HREG(DEBUG_REG1, 0); + } +#ifdef MULTI_INSTANCE_SUPPORT + if (debug & AV1_DEBUG_DUMP_DATA) { + debug &= ~AV1_DEBUG_DUMP_DATA; + av1_print(hw, 0, + "%s: chunk size 0x%x off 0x%x sum 0x%x\n", + __func__, + hw->data_size, + hw->data_offset, + get_data_check_sum(hw, hw->data_size) + ); + dump_data(hw, hw->data_size); + } +#endif + if (debug & AV1_DEBUG_DUMP_PIC_LIST) { + /*dump_pic_list(hw);*/ + av1_dump_state(hw_to_vdec(hw)); + debug &= ~AV1_DEBUG_DUMP_PIC_LIST; + } + if (debug & AV1_DEBUG_TRIG_SLICE_SEGMENT_PROC) { + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + debug &= ~AV1_DEBUG_TRIG_SLICE_SEGMENT_PROC; + } + /*if (debug & AV1_DEBUG_HW_RESET) { + }*/ + + if (radr != 0) { + if ((radr >> 24) != 0) { + int count = radr >> 24; + int adr = radr & 0xffffff; + int i; + for (i = 0; i < count; i++) + pr_info("READ_VREG(%x)=%x\n", adr+i, READ_VREG(adr+i)); + } else if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + if (pop_shorts != 0) { + int i; + u32 sum = 0; + + pr_info("pop stream 0x%x shorts\r\n", pop_shorts); + for (i = 0; i < pop_shorts; i++) { + u32 data = + (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + WRITE_HREG(HEVC_SHIFT_COMMAND, + (1<<7)|16); + if ((i & 0xf) == 0) + pr_info("%04x:", i); + pr_info("%04x ", data); + if (((i + 1) & 0xf) == 0) + pr_info("\r\n"); + sum += data; + } + pr_info("\r\nsum = %x\r\n", sum); + pop_shorts = 0; + } + if (dbg_cmd != 0) { + if (dbg_cmd == 1) { + u32 disp_laddr; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB && + get_double_write_mode(hw) == 0) { + disp_laddr = + READ_VCBUS_REG(AFBC_BODY_BADDR) << 4; + } else { + struct canvas_s cur_canvas; + + canvas_read((READ_VCBUS_REG(VD1_IF0_CANVAS0) + & 0xff), &cur_canvas); + disp_laddr = cur_canvas.addr; + } + pr_info("current displayed buffer address %x\r\n", + disp_laddr); + } + dbg_cmd = 0; + } + /*don't changed at start.*/ + if (hw->get_frame_dur && hw->show_frame_num > 60 && + hw->frame_dur > 0 && hw->saved_resolution != + frame_width * frame_height * + (96000 / hw->frame_dur)) + vdec_schedule_work(&hw->set_clk_work); + + timer->expires = jiffies + PUT_INTERVAL; + add_timer(timer); +} + + +int vav1_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct AV1HW_s *av1 = + (struct AV1HW_s *)vdec->private; + + if (!av1) + return -1; + + vstatus->frame_width = frame_width; + vstatus->frame_height = frame_height; + if (av1->frame_dur != 0) + vstatus->frame_rate = 96000 / av1->frame_dur; + else + vstatus->frame_rate = -1; + vstatus->error_count = 0; + vstatus->status = av1->stat | av1->fatal_error; + vstatus->frame_dur = av1->frame_dur; +//#ifndef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + vstatus->bit_rate = av1->gvs->bit_rate; + vstatus->frame_data = av1->gvs->frame_data; + vstatus->total_data = av1->gvs->total_data; + vstatus->frame_count = av1->gvs->frame_count; + vstatus->error_frame_count = av1->gvs->error_frame_count; + vstatus->drop_frame_count = av1->gvs->drop_frame_count; + vstatus->samp_cnt = av1->gvs->samp_cnt; + vstatus->offset = av1->gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); +//#endif + return 0; +} + +int vav1_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +#if 0 +static void AV1_DECODE_INIT(void) +{ + /* enable av1 clocks */ + WRITE_VREG(DOS_GCLK_EN3, 0xffffffff); + /* *************************************************************** */ + /* Power ON HEVC */ + /* *************************************************************** */ + /* Powerup HEVC */ + WRITE_VREG(AO_RTI_GEN_PWR_SLEEP0, + READ_VREG(AO_RTI_GEN_PWR_SLEEP0) & (~(0x3 << 6))); + WRITE_VREG(DOS_MEM_PD_HEVC, 0x0); + WRITE_VREG(DOS_SW_RESET3, READ_VREG(DOS_SW_RESET3) | (0x3ffff << 2)); + WRITE_VREG(DOS_SW_RESET3, READ_VREG(DOS_SW_RESET3) & (~(0x3ffff << 2))); + /* remove isolations */ + WRITE_VREG(AO_RTI_GEN_PWR_ISO0, + READ_VREG(AO_RTI_GEN_PWR_ISO0) & (~(0x3 << 10))); + +} +#endif + +static void vav1_prot_init(struct AV1HW_s *hw, u32 mask) +{ + unsigned int data32; + /* AV1_DECODE_INIT(); */ + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %d\n", __func__, __LINE__); + + aom_config_work_space_hw(hw, mask); + if (mask & HW_MASK_BACK) { + //to do: .. for single instance, called after init_pic_list() + if (hw->m_ins_flag) + init_pic_list_hw(hw); + } + + aom_init_decoder_hw(hw, mask); + +#ifdef AOM_AV1_DBLK_INIT + av1_print(hw, AOM_DEBUG_HW_MORE, + "[test.c] av1_loop_filter_init (run once before decoding start)\n"); + av1_loop_filter_init(hw->lfi, hw->lf); +#endif + if ((mask & HW_MASK_FRONT) == 0) + return; +#if 1 + if (debug & AV1_DEBUG_BUFMGR_MORE) + pr_info("%s\n", __func__); + data32 = READ_VREG(HEVC_STREAM_CONTROL); + data32 = data32 | + (1 << 0)/*stream_fetch_enable*/ + ; + WRITE_VREG(HEVC_STREAM_CONTROL, data32); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + if (debug & AV1_DEBUG_BUFMGR) + pr_info("[test.c] Config STREAM_FIFO_CTL\n"); + data32 = READ_VREG(HEVC_STREAM_FIFO_CTL); + data32 = data32 | + (1 << 29) // stream_fifo_hole + ; + WRITE_VREG(HEVC_STREAM_FIFO_CTL, data32); + } +#if 0 + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x00000100) { + pr_info("av1 prot init error %d\n", __LINE__); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x00000300) { + pr_info("av1 prot init error %d\n", __LINE__); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x12345678); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x9abcdef0); + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x12345678) { + pr_info("av1 prot init error %d\n", __LINE__); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x9abcdef0) { + pr_info("av1 prot init error %d\n", __LINE__); + return; + } +#endif + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x000000001); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x00000300); +#endif + + + + WRITE_VREG(HEVC_WAIT_FLAG, 1); + + /* WRITE_VREG(HEVC_MPSR, 1); */ + + /* clear mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 1); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(HEVC_PSCALE_CTRL, 0); + + WRITE_VREG(DEBUG_REG1, 0x0); + /*check vps/sps/pps/i-slice in ucode*/ + WRITE_VREG(NAL_SEARCH_CTL, 0x8); + + WRITE_VREG(DECODE_STOP_POS, udebug_flag); +//#if (defined DEBUG_UCODE_LOG) || (defined DEBUG_CMD) +// WRITE_VREG(HEVC_DBG_LOG_ADR, hw->ucode_log_phy_addr); +//#endif +} + +static int vav1_local_init(struct AV1HW_s *hw, bool reset_flag) +{ + int i; + int ret; + int width, height; + + hw->gvs = vzalloc(sizeof(struct vdec_info)); + if (NULL == hw->gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -1; + } +#ifdef DEBUG_PTS + hw->pts_missed = 0; + hw->pts_hit = 0; +#endif + hw->new_frame_displayed = 0; + hw->last_put_idx = -1; + hw->saved_resolution = 0; + hw->get_frame_dur = false; + on_no_keyframe_skiped = 0; + hw->first_pts_index = 0; + hw->dur_recalc_flag = 0; + hw->av1_first_pts_ready = false; + width = hw->vav1_amstream_dec_info.width; + height = hw->vav1_amstream_dec_info.height; + hw->frame_dur = + (hw->vav1_amstream_dec_info.rate == + 0) ? 3200 : hw->vav1_amstream_dec_info.rate; + if (width && height) + hw->frame_ar = height * 0x100 / width; +/* + *TODO:FOR VERSION + */ + pr_info("av1: ver (%d,%d) decinfo: %dx%d rate=%d\n", av1_version, + 0, width, height, hw->frame_dur); + + if (hw->frame_dur == 0) + hw->frame_dur = 96000 / 24; + + INIT_KFIFO(hw->display_q); + INIT_KFIFO(hw->newframe_q); + + for (i = 0; i < FRAME_BUFFERS; i++) { + hw->buffer_wrap[i] = i; + } + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &hw->vfpool[i]; + + hw->vfpool[i].index = -1; + kfifo_put(&hw->newframe_q, vf); + } + + ret = av1_local_init(hw, reset_flag); + + if (force_pts_unstable) { + if (!hw->pts_unstable) { + hw->pts_unstable = + (hw->vav1_amstream_dec_info.rate == 0)?1:0; + pr_info("set pts unstable\n"); + } + } + + return ret; +} + + +#ifdef MULTI_INSTANCE_SUPPORT +static s32 vav1_init(struct vdec_s *vdec) +{ + struct AV1HW_s *hw = (struct AV1HW_s *)vdec->private; +#else +static s32 vav1_init(struct AV1HW_s *hw) +{ +#endif + int ret; + int i; + int fw_size = 0x1000 * 16; + struct firmware_s *fw = NULL; + + hw->stat |= STAT_TIMER_INIT; + + if (vav1_local_init(hw, false) < 0) + return -EBUSY; + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %d\n", __func__, __LINE__); +#ifdef DEBUG_USE_VP9_DEVICE_NAME + if (get_firmware_data(VIDEO_DEC_VP9_MMU, fw->data) < 0) { +#else + if (get_firmware_data(VIDEO_DEC_AV1_MMU, fw->data) < 0) { +#endif + pr_err("get firmware fail.\n"); + printk("%s %d\n", __func__, __LINE__); + vfree(fw); + return -1; + } + av1_print(hw, AOM_DEBUG_HW_MORE, "%s %d\n", __func__, __LINE__); + fw->len = fw_size; + + INIT_WORK(&hw->set_clk_work, av1_set_clk); + timer_setup(&hw->timer, vav1_put_timer_func, 0); + + for (i = 0; i < FILM_GRAIN_REG_SIZE; i++) { + WRITE_VREG(HEVC_FGS_DATA, 0); + } + WRITE_VREG(HEVC_FGS_CTRL, 0); + +#ifdef MULTI_INSTANCE_SUPPORT + if (hw->m_ins_flag) { + hw->timer.expires = jiffies + PUT_INTERVAL; + + /*add_timer(&hw->timer); + + hw->stat |= STAT_TIMER_ARM; + hw->stat |= STAT_ISR_REG;*/ + + INIT_WORK(&hw->work, av1_work); + hw->fw = fw; + + return 0; /*multi instance return */ + } +#endif + amhevc_enable(); + + ret = amhevc_loadmc_ex(VFORMAT_AV1, NULL, fw->data); + if (ret < 0) { + amhevc_disable(); + vfree(fw); + pr_err("AV1: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(fw); + + hw->stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + vav1_prot_init(hw, HW_MASK_FRONT | HW_MASK_BACK); + + if (vdec_request_threaded_irq(VDEC_IRQ_0, + vav1_isr, + vav1_isr_thread_fn, + IRQF_ONESHOT,/*run thread on this irq disabled*/ + "vav1-irq", (void *)hw)) { + pr_info("vav1 irq register error.\n"); + amhevc_disable(); + return -ENOENT; + } + + hw->stat |= STAT_ISR_REG; +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + if (force_dv_enable) + hw->provider_name = DV_PROVIDER_NAME; + else +#endif + hw->provider_name = PROVIDER_NAME; +#ifdef MULTI_INSTANCE_SUPPORT + if (!hw->is_used_v4l) { + vf_provider_init(&vav1_vf_prov, hw->provider_name, + &vav1_vf_provider, hw); + vf_reg_provider(&vav1_vf_prov); + vf_notify_receiver(hw->provider_name, VFRAME_EVENT_PROVIDER_START, NULL); + if (hw->frame_dur != 0) { + if (!is_reset) + vf_notify_receiver(hw->provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)hw->frame_dur)); + } + } +#else + vf_provider_init(&vav1_vf_prov, hw->provider_name, &vav1_vf_provider, + hw); + vf_reg_provider(&vav1_vf_prov); + vf_notify_receiver(hw->provider_name, VFRAME_EVENT_PROVIDER_START, NULL); + if (!is_reset) + vf_notify_receiver(hw->provider_name, VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long)hw->frame_dur)); +#endif + hw->stat |= STAT_VF_HOOK; + + hw->timer.expires = jiffies + PUT_INTERVAL; + add_timer(&hw->timer); + + hw->stat |= STAT_VDEC_RUN; + hw->stat |= STAT_TIMER_ARM; + + amhevc_start(); + + hw->init_flag = 1; + hw->process_busy = 0; + pr_info("%d, vav1_init, RP=0x%x\n", + __LINE__, READ_VREG(HEVC_STREAM_RD_PTR)); + return 0; +} + +static int vmav1_stop(struct AV1HW_s *hw) +{ + hw->init_flag = 0; + + if (hw->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + if (hw->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_0, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->timer); + hw->stat &= ~STAT_TIMER_ARM; + } + + if (!hw->is_used_v4l && (hw->stat & STAT_VF_HOOK)) { + if (!is_reset) + vf_notify_receiver(hw->provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + + vf_unreg_provider(&vav1_vf_prov); + hw->stat &= ~STAT_VF_HOOK; + } + av1_local_uninit(hw, false); + reset_process_time(hw); + cancel_work_sync(&hw->work); + cancel_work_sync(&hw->set_clk_work); + uninit_mmu_buffers(hw); + if (hw->fw) + vfree(hw->fw); + hw->fw = NULL; + return 0; +} + +static int amvdec_av1_mmu_init(struct AV1HW_s *hw) +{ + int tvp_flag = vdec_secure(hw_to_vdec(hw)) ? + CODEC_MM_FLAGS_TVP : 0; + int buf_size = 48; + + if ((hw->max_pic_w * hw->max_pic_h > 1280*736) && + (hw->max_pic_w * hw->max_pic_h <= 1920*1088)) { + buf_size = 12; + } else if ((hw->max_pic_w * hw->max_pic_h > 0) && + (hw->max_pic_w * hw->max_pic_h <= 1280*736)) { + buf_size = 4; + } + hw->need_cache_size = buf_size * SZ_1M; + hw->sc_start_time = get_jiffies_64(); + if (hw->mmu_enable && !hw->is_used_v4l) { + int count = FRAME_BUFFERS; + hw->mmu_box = decoder_mmu_box_alloc_box(DRIVER_NAME, + hw->index /* * 2*/, count, + hw->need_cache_size, + tvp_flag + ); + if (!hw->mmu_box) { + pr_err("av1 alloc mmu box failed!!\n"); + return -1; + } +#ifdef AOM_AV1_MMU_DW + if (get_double_write_mode(hw) & 0x20) { + hw->mmu_box_dw = decoder_mmu_box_alloc_box(DRIVER_NAME, + hw->index /** 2 + 1*/, count, + hw->need_cache_size, + tvp_flag + ); + if (!hw->mmu_box_dw) { + pr_err("av1 alloc dw mmu box failed!!\n"); + return -1; + } + } +#endif + + } + hw->bmmu_box = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + hw->index, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + tvp_flag); + av1_print(hw, AV1_DEBUG_BUFMGR, + "%s, MAX_BMMU_BUFFER_NUM = %d\n", + __func__, + MAX_BMMU_BUFFER_NUM); + if (!hw->bmmu_box) { + pr_err("av1 alloc bmmu box failed!!\n"); + return -1; + } + return 0; +} + +/****************************************/ +#ifdef CONFIG_PM +static int av1_suspend(struct device *dev) +{ + amhevc_suspend(to_platform_device(dev), dev->power.power_state); + return 0; +} + +static int av1_resume(struct device *dev) +{ + amhevc_resume(to_platform_device(dev)); + return 0; +} + +static const struct dev_pm_ops av1_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(av1_suspend, av1_resume) +}; +#endif + +static struct codec_profile_t amvdec_av1_profile = { + .name = "AV1-V4L", + .profile = "" +}; + +static unsigned int get_data_check_sum + (struct AV1HW_s *hw, int size) +{ + int sum = 0; + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->data_offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->data_offset; + + sum = crc32_le(0, data, size); + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void dump_data(struct AV1HW_s *hw, int size) +{ + int jj; + u8 *data = NULL; + int padding_size = hw->data_offset & + (VDEC_FIFO_ALIGN - 1); + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->data_offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->data_offset; + + av1_print(hw, 0, "padding: "); + for (jj = padding_size; jj > 0; jj--) + av1_print_cont(hw, + 0, + "%02x ", *(data - jj)); + av1_print_cont(hw, 0, "data adr %p\n", + data); + + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + av1_print(hw, + 0, + "%06x:", jj); + av1_print_cont(hw, + 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + av1_print(hw, + 0, + "\n"); + } + av1_print(hw, + 0, + "\n"); + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); +} + +static int av1_wait_cap_buf(void *args) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *) args; + struct AV1_Common_s *const cm = &hw->common; + struct aml_vcodec_ctx * ctx = + (struct aml_vcodec_ctx *)hw->v4l2_ctx; + ulong flags; + int ret = 0; + + ret = wait_event_interruptible_timeout(ctx->cap_wq, + (ctx->is_stream_off || (get_free_buf_count(hw) > 0)), + msecs_to_jiffies(300)); + if (ret <= 0){ + av1_print(hw, PRINT_FLAG_V4L_DETAIL, "%s, wait cap buf timeout or err %d\n", + __func__, ret); + } + + lock_buffer_pool(cm->buffer_pool, flags); + if (hw->wait_more_buf) { + hw->wait_more_buf = false; + hw->dec_result = ctx->is_stream_off ? + DEC_RESULT_FORCE_EXIT : + AOM_AV1_RESULT_NEED_MORE_BUFFER; + vdec_schedule_work(&hw->work); + } + unlock_buffer_pool(cm->buffer_pool, flags); + + av1_print(hw, PRINT_FLAG_V4L_DETAIL, + "%s wait capture buffer end, ret:%d\n", + __func__, ret); + return 0; +} + +static void av1_work(struct work_struct *work) +{ + struct AV1HW_s *hw = container_of(work, + struct AV1HW_s, work); + struct vdec_s *vdec = hw_to_vdec(hw); + /* finished decoding one frame or error, + * notify vdec core to switch context + */ + if (hw->dec_result == DEC_RESULT_AGAIN) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_AGAIN); + if (hw->dec_result != AOM_AV1_RESULT_NEED_MORE_BUFFER) + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_START); + av1_print(hw, PRINT_FLAG_VDEC_DETAIL, + "%s dec_result %d %x %x %x\n", + __func__, + hw->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + + ATRACE_COUNTER("V_ST_DEC-work_state", hw->dec_result); + + if (hw->dec_result == AOM_AV1_RESULT_NEED_MORE_BUFFER) { + reset_process_time(hw); + if (get_free_buf_count(hw) <= 0) { + struct AV1_Common_s *const cm = &hw->common; + ulong flags; + int ret; + + lock_buffer_pool(cm->buffer_pool, flags); + + hw->dec_result = AOM_AV1_RESULT_NEED_MORE_BUFFER; + if (vdec->next_status == VDEC_STATUS_DISCONNECTED) { + hw->dec_result = DEC_RESULT_AGAIN; + vdec_schedule_work(&hw->work); + } else { + hw->wait_more_buf = true; + } + unlock_buffer_pool(cm->buffer_pool, flags); + + if (hw->wait_more_buf) { + ATRACE_COUNTER("V_ST_DEC-wait_more_buff", __LINE__); + ret = vdec_post_task(av1_wait_cap_buf, hw); + if (ret != 0) { + pr_err("post task create failed!!!! ret %d\n", ret); + lock_buffer_pool(cm->buffer_pool, flags); + hw->wait_more_buf = false; + hw->dec_result = AOM_AV1_RESULT_NEED_MORE_BUFFER; + vdec_schedule_work(&hw->work); + unlock_buffer_pool(cm->buffer_pool, flags); + } + } + + } else { + ATRACE_COUNTER("V_ST_DEC-wait_more_buff", 0); + av1_release_bufs(hw); + av1_continue_decoding(hw, hw->cur_obu_type); + hw->postproc_done = 0; + start_process_time(hw); + } + return; + } + + if (((hw->dec_result == DEC_RESULT_GET_DATA) || + (hw->dec_result == DEC_RESULT_GET_DATA_RETRY)) + && (hw_to_vdec(hw)->next_status != + VDEC_STATUS_DISCONNECTED)) { + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + + if (hw->dec_result == DEC_RESULT_GET_DATA) { + av1_print(hw, PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA %x %x %x\n", + __func__, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + vdec_vframe_dirty(vdec, hw->chunk); + vdec_clean_input(vdec); + } + + if (get_free_buf_count(hw) >= + hw->run_ready_min_buf_num) { + int r; + int decode_size; + r = vdec_prepare_input(vdec, &hw->chunk); + if (r < 0) { + hw->dec_result = DEC_RESULT_GET_DATA_RETRY; + + av1_print(hw, + PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&hw->work); + return; + } + hw->dec_result = DEC_RESULT_NONE; + av1_print(hw, PRINT_FLAG_VDEC_STATUS, + "%s: chunk size 0x%x sum 0x%x\n", + __func__, r, + (debug & PRINT_FLAG_VDEC_STATUS) ? + get_data_check_sum(hw, r) : 0 + ); + + if (debug & PRINT_FLAG_VDEC_DATA) + dump_data(hw, hw->data_size); + + decode_size = hw->data_size + + (hw->data_offset & (VDEC_FIFO_ALIGN - 1)); + + WRITE_VREG(HEVC_DECODE_SIZE, + READ_VREG(HEVC_DECODE_SIZE) + decode_size); + + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + + start_process_time(hw); + + } else { + hw->dec_result = DEC_RESULT_GET_DATA_RETRY; + + av1_print(hw, PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&hw->work); + } + return; + } else if (hw->dec_result == DEC_RESULT_DONE) { + /* if (!hw->ctx_valid) + hw->ctx_valid = 1; */ + hw->result_done_count++; + hw->process_state = PROC_STATE_INIT; + + av1_print(hw, PRINT_FLAG_VDEC_STATUS, + "%s (===> %d) dec_result %d (%d) %x %x %x shiftbytes 0x%x decbytes 0x%x\n", + __func__, + hw->frame_count, + hw->dec_result, + hw->result_done_count, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_BYTE_COUNT) - + hw->start_shift_bytes + ); + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + } else if (hw->dec_result == DEC_RESULT_AGAIN) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec)) { + hw->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&hw->work); + return; + } + } else if (hw->dec_result == DEC_RESULT_EOS) { + av1_print(hw, PRINT_FLAG_VDEC_STATUS, + "%s: end of stream\n", __func__); + mutex_lock(&hw->assit_task.assit_mutex); + hw->eos = 1; + av1_postproc(hw); + + if (hw->is_used_v4l) { + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(hw_to_vdec(hw)); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + } + mutex_unlock(&hw->assit_task.assit_mutex); + + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + } else if (hw->dec_result == DEC_RESULT_FORCE_EXIT) { + av1_print(hw, PRINT_FLAG_VDEC_STATUS, + "%s: force exit\n", + __func__); + if (hw->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_ISR_REG) { +#ifdef MULTI_INSTANCE_SUPPORT + if (!hw->m_ins_flag) +#endif + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 0); + vdec_free_irq(VDEC_IRQ_0, (void *)hw); + hw->stat &= ~STAT_ISR_REG; + } + } else if (hw->dec_result == DEC_RESULT_DISCARD_DATA) { + av1_print(hw, PRINT_FLAG_VDEC_STATUS, + "%s (===> %d) dec_result %d (%d) %x %x %x shiftbytes 0x%x decbytes 0x%x discard pic!\n", + __func__, + hw->frame_count, + hw->dec_result, + hw->result_done_count, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_BYTE_COUNT) - + hw->start_shift_bytes + ); + vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk); + } else if (hw->dec_result == DEC_RESULT_UNFINISH) { + hw->result_done_count++; + hw->process_state = PROC_STATE_INIT; + + av1_print(hw, PRINT_FLAG_VDEC_STATUS, + "%s (===> %d) dec_result %d (%d) %x %x %x shiftbytes 0x%x decbytes 0x%x\n", + __func__, + hw->frame_count, + hw->dec_result, + hw->result_done_count, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_BYTE_COUNT) - hw->start_shift_bytes); + amhevc_stop(); + } + + if (hw->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->timer); + hw->stat &= ~STAT_TIMER_ARM; + } + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_WORKER_END); + /* mark itself has all HW resource released and input released */ + if (vdec->parallel_dec == 1) + vdec_core_finish_run(vdec, CORE_MASK_HEVC); + else + vdec_core_finish_run(hw_to_vdec(hw), CORE_MASK_VDEC_1 + | CORE_MASK_HEVC); + trigger_schedule(hw); +} + +static int av1_hw_ctx_restore(struct AV1HW_s *hw) +{ + vav1_prot_init(hw, HW_MASK_FRONT | HW_MASK_BACK); + return 0; +} + +static bool is_avaliable_buffer(struct AV1HW_s *hw) +{ + AV1_COMMON *cm = &hw->common; + RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + int i, free_count = 0; + int used_count = 0; + + if ((hw->used_buf_num == 0) || + (ctx->cap_pool.dec < hw->used_buf_num)) { + if (ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token)) { + free_count = + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) + 1; + } + } + + for (i = 0; i < hw->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.repeat_count == 0) && + (frame_bufs[i].buf.index >= 0) && + frame_bufs[i].buf.cma_alloc_addr) { + free_count++; + } else if (frame_bufs[i].buf.cma_alloc_addr) + used_count++; + } + + ATRACE_COUNTER("av1_free_buff_count", free_count); + ATRACE_COUNTER("av1_used_buff_count", used_count); + + return free_count >= hw->run_ready_min_buf_num ? 1 : 0; +} + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + int tvp = vdec_secure(hw_to_vdec(hw)) ? + CODEC_MM_FLAGS_TVP : 0; + unsigned long ret = 0; + + if (!hw->pic_list_init_done2 || hw->eos) + return ret; + + if (!hw->first_sc_checked && + (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) && + (get_double_write_mode(hw) != 0x10)) { + int size; + void * mmu_box; + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + mmu_box = ctx->mmu_box; + } else + mmu_box = hw->mmu_box; + + size = decoder_mmu_box_sc_check(mmu_box, tvp); + hw->first_sc_checked = 1; + av1_print(hw, 0, "av1 cached=%d need_size=%d speed= %d ms\n", + size, (hw->need_cache_size >> PAGE_SHIFT), + (int)(get_jiffies_64() - hw->sc_start_time) * 1000/HZ); +#ifdef AOM_AV1_MMU_DW + /*!!!!!! To do ... */ + if (get_double_write_mode(hw) & 0x20) { + + } +#endif + } + + if (hw->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + + if (hw->v4l_params_parsed) { + if (is_avaliable_buffer(hw)) + ret = CORE_MASK_HEVC; + else + ret = 0; + } else { + if (ctx->v4l_resolution_change) + ret = 0; + else + ret = CORE_MASK_HEVC; + } + } + + if (ret) + not_run_ready[hw->index] = 0; + else + not_run_ready[hw->index]++; + + /*av1_print(hw, + PRINT_FLAG_VDEC_DETAIL, "%s mask %lx=>%lx\r\n", + __func__, mask, ret);*/ + return ret; +} + +static void av1_frame_mode_pts_save(struct AV1HW_s *hw) +{ + u64 i, valid_pts_diff_cnt, pts_diff_sum; + u64 in_pts_diff, last_valid_pts_diff, calc_dur; + + if (hw->chunk == NULL) + return; + /* no return when first pts is 0 */ + if (!hw->is_used_v4l && hw->first_pts_index) { + /* filtration pts 0 and continuous same pts */ + if ((hw->chunk->pts == 0) || + (hw->frame_mode_pts_save[0] == hw->chunk->pts)) + return; + + /* fps change, frame dur change to lower or higher, + * can't find closed pts in saved pool */ + if (hw->dur_recalc_flag || + (hw->last_pts > hw->chunk->pts)) { + hw->av1_first_pts_ready = 0; + hw->first_pts_index = 0; + hw->get_frame_dur = 0; + hw->dur_recalc_flag = 0; + memset(hw->frame_mode_pts_save, 0, + sizeof(hw->frame_mode_pts_save)); + memset(hw->frame_mode_pts64_save, 0, + sizeof(hw->frame_mode_pts64_save)); + } + } + av1_print(hw, AV1_DEBUG_OUT_PTS, + "run_front: pts %d, pts64 %lld, ts: %lld\n", + hw->chunk->pts, hw->chunk->pts64, hw->chunk->timestamp); + + for (i = (FRAME_BUFFERS - 1); i > 0; i--) { + hw->frame_mode_pts_save[i] = hw->frame_mode_pts_save[i - 1]; + hw->frame_mode_pts64_save[i] = hw->frame_mode_pts64_save[i - 1]; + } + hw->frame_mode_pts_save[0] = hw->chunk->pts; + hw->frame_mode_pts64_save[0] = hw->chunk->pts64; + + if (hw->is_used_v4l && !v4l_bitstream_id_enable) + hw->frame_mode_pts64_save[0] = hw->chunk->timestamp; + + if (hw->first_pts_index < ARRAY_SIZE(hw->frame_mode_pts_save)) + hw->first_pts_index++; + /* frame duration check, vdec_secure return for nts problem */ + if ((!hw->first_pts_index) || + hw->get_frame_dur || + vdec_secure(hw_to_vdec(hw))) + return; + valid_pts_diff_cnt = 0; + pts_diff_sum = 0; + + for (i = 0; i < FRAME_BUFFERS - 1; i++) { + if ((hw->frame_mode_pts_save[i] > hw->frame_mode_pts_save[i + 1]) && + (hw->frame_mode_pts_save[i + 1] != 0)) + in_pts_diff = hw->frame_mode_pts_save[i] + - hw->frame_mode_pts_save[i + 1]; + else + in_pts_diff = 0; + + if (in_pts_diff < 100 || + (valid_pts_diff_cnt && (!close_to(in_pts_diff, last_valid_pts_diff, 100)))) + in_pts_diff = 0; + else { + last_valid_pts_diff = in_pts_diff; + valid_pts_diff_cnt++; + } + + pts_diff_sum += in_pts_diff; + } + + if (!valid_pts_diff_cnt) { + av1_print(hw, AV1_DEBUG_OUT_PTS, "checked no avaliable pts\n"); + return; + } + + calc_dur = PTS2DUR_u64(div_u64(pts_diff_sum, valid_pts_diff_cnt)); + + if ((!close_to(calc_dur, hw->frame_dur, 10)) && + (calc_dur < 4800) && (calc_dur > 800)) { + av1_print(hw, 0, "change to calc dur %llu, old dur %u\n", calc_dur, hw->frame_dur); + hw->frame_dur = calc_dur; + hw->get_frame_dur = true; + } else { + if (hw->frame_count > FRAME_BUFFERS) + hw->get_frame_dur = true; + } +} + +static void run_front(struct vdec_s *vdec) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + int ret, size; + + run_count[hw->index]++; + /* hw->chunk = vdec_prepare_input(vdec); */ + hevc_reset_core(vdec); + + if ((vdec_frame_based(vdec)) && + (hw->dec_result == DEC_RESULT_UNFINISH)) { + u32 res_byte = hw->data_size - hw->consume_byte; + + av1_print(hw, AV1_DEBUG_BUFMGR, + "%s before, consume 0x%x, size 0x%x, offset 0x%x, res 0x%x\n", __func__, + hw->consume_byte, hw->data_size, hw->data_offset + hw->consume_byte, res_byte); + + hw->data_invalid = vdec_offset_prepare_input(vdec, hw->consume_byte, hw->data_offset, hw->data_size); + hw->data_offset -= (hw->data_invalid - hw->consume_byte); + hw->data_size += (hw->data_invalid - hw->consume_byte); + size = hw->data_size; + WRITE_VREG(HEVC_ASSIST_SCRATCH_C, hw->data_invalid); + + av1_print(hw, AV1_DEBUG_BUFMGR, + "%s after, consume 0x%x, size 0x%x, offset 0x%x, invalid 0x%x, res 0x%x\n", __func__, + hw->consume_byte, hw->data_size, hw->data_offset, hw->data_invalid, res_byte); + } else { + size = vdec_prepare_input(vdec, &hw->chunk); + if (size < 0) { + input_empty[hw->index]++; + + hw->dec_result = DEC_RESULT_AGAIN; + + av1_print(hw, PRINT_FLAG_VDEC_DETAIL, + "ammvdec_av1: Insufficient data\n"); + + vdec_schedule_work(&hw->work); + return; + } + if ((vdec_frame_based(vdec)) && + (hw->chunk != NULL)) { + hw->data_offset = hw->chunk->offset; + hw->data_size = size; + } + WRITE_VREG(HEVC_ASSIST_SCRATCH_C, 0); + } + + ATRACE_COUNTER("V_ST_DEC-chunk_size", size); + + input_empty[hw->index] = 0; + hw->dec_result = DEC_RESULT_NONE; + hw->start_shift_bytes = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + + av1_frame_mode_pts_save(hw); + if (debug & PRINT_FLAG_VDEC_STATUS) { + if (vdec_frame_based(vdec) && hw->chunk && !vdec_secure(vdec)) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap(hw->chunk->block->start + + hw->data_offset, size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->data_offset; + + //print_hex_debug(data, size, size > 64 ? 64 : size); + av1_print(hw, 0, + "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, size, get_data_check_sum(hw, size), + data[0], data[1], data[2], data[3], + data[4], data[5], data[size - 4], + data[size - 3], data[size - 2], + data[size - 1]); + av1_print(hw, 0, + "%s frm cnt (%d): chunk (0x%x 0x%x) (%x %x %x %x %x) bytes 0x%x\n", + __func__, hw->frame_count, hw->data_size, hw->data_offset, + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + hw->start_shift_bytes); + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } else { + av1_print(hw, 0, + "%s (%d): size 0x%x (0x%x 0x%x) (%x %x %x %x %x) bytes 0x%x\n", + __func__, + hw->frame_count, size, + hw->chunk ? hw->data_size : 0, + hw->chunk ? hw->data_offset : 0, + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + hw->start_shift_bytes); + } + } + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_START); + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else { +#ifdef DEBUG_USE_VP9_DEVICE_NAME + ret = amhevc_loadmc_ex(VFORMAT_VP9, NULL, hw->fw->data); +#else + ret = amhevc_loadmc_ex(VFORMAT_AV1, NULL, hw->fw->data); +#endif + if (ret < 0) { + amhevc_disable(); + av1_print(hw, PRINT_FLAG_ERROR, + "AV1: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + hw->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&hw->work); + return; + } + vdec->mc_loaded = 1; +#ifdef DEBUG_USE_VP9_DEVICE_NAME + vdec->mc_type = VFORMAT_VP9; +#else + vdec->mc_type = VFORMAT_AV1; +#endif + } + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_END); + + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_START); + if (av1_hw_ctx_restore(hw) < 0) { + vdec_schedule_work(&hw->work); + return; + } + ATRACE_COUNTER(hw->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_END); + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + + if (vdec_frame_based(vdec)) { + if (debug & PRINT_FLAG_VDEC_DATA) + dump_data(hw, hw->data_size); + + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, 0); + size = hw->data_size + + (hw->data_offset & (VDEC_FIFO_ALIGN - 1)); + if (vdec->mvfrm) + vdec->mvfrm->frame_size = hw->data_size; + } + hw->data_size = size; + WRITE_VREG(HEVC_DECODE_SIZE, size); + WRITE_VREG(HEVC_DECODE_COUNT, hw->result_done_count); + WRITE_VREG(LMEM_DUMP_ADR, (u32)hw->lmem_phy_addr); + if (hw->config_next_ref_info_flag) + config_next_ref_info_hw(hw); + hw->config_next_ref_info_flag = 0; + hw->init_flag = 1; + + av1_print(hw, PRINT_FLAG_VDEC_DETAIL, + "%s: start hw (%x %x %x) HEVC_DECODE_SIZE 0x%x\n", + __func__, + READ_VREG(HEVC_DEC_STATUS_REG), + READ_VREG(HEVC_MPC_E), + READ_VREG(HEVC_MPSR), + READ_VREG(HEVC_DECODE_SIZE)); + + start_process_time(hw); + mod_timer(&hw->timer, jiffies); + hw->stat |= STAT_TIMER_ARM; + hw->stat |= STAT_ISR_REG; + amhevc_start(); + hw->stat |= STAT_VDEC_RUN; +} + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_RUN_START); + av1_print(hw, + PRINT_FLAG_VDEC_DETAIL, "%s mask %lx\r\n", + __func__, mask); + + run_count[hw->index]++; + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + hw->vdec_cb_arg = arg; + hw->vdec_cb = callback; + hw->one_package_frame_cnt = 0; + run_front(vdec); + ATRACE_COUNTER(hw->trace.decode_time_name, DECODER_RUN_END); +} + +static void av1_decode_ctx_reset(struct AV1HW_s *hw) +{ + struct AV1_Common_s *const cm = &hw->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + + for (i = 0; i < FRAME_BUFFERS; ++i) { + frame_bufs[i].ref_count = 0; + frame_bufs[i].buf.vf_ref = 0; + frame_bufs[i].buf.decode_idx = 0; + frame_bufs[i].buf.cma_alloc_addr = 0; + frame_bufs[i].buf.index = i; + frame_bufs[i].buf.BUF_index = -1; + frame_bufs[i].buf.mv_buf_index = -1; + frame_bufs[i].buf.repeat_pic = NULL; + frame_bufs[i].buf.repeat_count = 0; + } + + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (hw->m_mv_BUF[i].start_adr) { + if (mv_buf_dynamic_alloc) { + decoder_bmmu_box_free_idx(hw->bmmu_box, MV_BUFFER_IDX(i)); + hw->m_mv_BUF[i].start_adr = 0; + hw->m_mv_BUF[i].size = 0; + } + hw->m_mv_BUF[i].used_flag = 0; + } + } + + hw->one_compressed_data_done = 0; + hw->config_next_ref_info_flag = 0; + hw->init_flag = 0; + hw->first_sc_checked = 0; + hw->fatal_error = 0; + hw->show_frame_num = 0; + hw->postproc_done = 0; + hw->process_busy = 0; + hw->process_state = 0; + hw->frame_decoded = 0; + hw->eos = 0; + hw->dec_result = DEC_RESULT_NONE; +} + +static void reset(struct vdec_s *vdec) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + + cancel_work_sync(&hw->work); + cancel_work_sync(&hw->set_clk_work); + + if (hw->stat & STAT_VDEC_RUN) { + amhevc_stop(); + hw->stat &= ~STAT_VDEC_RUN; + } + + if (hw->stat & STAT_TIMER_ARM) { + del_timer_sync(&hw->timer); + hw->stat &= ~STAT_TIMER_ARM; + } + + reset_process_time(hw); + + av1_bufmgr_ctx_reset(hw->pbi, &hw->av1_buffer_pool, &hw->common); + hw->pbi->private_data = hw; + mutex_lock(&hw->assit_task.assit_mutex); + av1_local_uninit(hw, true); + if (vav1_local_init(hw, true) < 0) + av1_print(hw, 0, "%s local_init failed \r\n", __func__); + mutex_unlock(&hw->assit_task.assit_mutex); + + av1_decode_ctx_reset(hw); + + atomic_set(&hw->vf_pre_count, 0); + atomic_set(&hw->vf_get_count, 0); + atomic_set(&hw->vf_put_count, 0); + + if (hw->ge2d) { + vdec_ge2d_destroy(hw->ge2d); + hw->ge2d = NULL; + } + + av1_print(hw, PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__); +} + +static irqreturn_t av1_irq_cb(struct vdec_s *vdec, int irq) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + return vav1_isr(0, hw); +} + +static irqreturn_t av1_threaded_irq_cb(struct vdec_s *vdec, int irq) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + return vav1_isr_thread_fn(0, hw); +} + +static void av1_dump_state(struct vdec_s *vdec) +{ + struct AV1HW_s *hw = + (struct AV1HW_s *)vdec->private; + struct AV1_Common_s *const cm = &hw->common; + int i; + av1_print(hw, 0, "====== %s\n", __func__); + + av1_print(hw, 0, + "width/height (%d/%d), used_buf_num %d\n", + cm->width, + cm->height, + hw->used_buf_num + ); + + av1_print(hw, 0, + "is_framebase(%d), eos %d, dec_result 0x%x dec_frm %d disp_frm %d run %d not_run_ready %d input_empty %d low_latency %d no_head %d \n", + input_frame_based(vdec), + hw->eos, + hw->dec_result, + decode_frame_count[hw->index], + display_frame_count[hw->index], + run_count[hw->index], + not_run_ready[hw->index], + input_empty[hw->index], + hw->low_latency_flag, + hw->no_head + ); + + if (!hw->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + av1_print(hw, 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + + av1_print(hw, 0, + "%s, newq(%d/%d), dispq(%d/%d), vf prepare/get/put (%d/%d/%d), free_buf_count %d (min %d for run_ready)\n", + __func__, + kfifo_len(&hw->newframe_q), + VF_POOL_SIZE, + kfifo_len(&hw->display_q), + VF_POOL_SIZE, + hw->vf_pre_count, + hw->vf_get_count, + hw->vf_put_count, + get_free_buf_count(hw), + hw->run_ready_min_buf_num + ); + + dump_pic_list(hw); + + for (i = 0; i < MAX_BUF_NUM; i++) { + av1_print(hw, 0, + "mv_Buf(%d) start_adr 0x%x size 0x%x used %d\n", + i, + hw->m_mv_BUF[i].start_adr, + hw->m_mv_BUF[i].size, + hw->m_mv_BUF[i].used_flag); + } + + av1_print(hw, 0, + "HEVC_DEC_STATUS_REG=0x%x\n", + READ_VREG(HEVC_DEC_STATUS_REG)); + av1_print(hw, 0, + "HEVC_MPC_E=0x%x\n", + READ_VREG(HEVC_MPC_E)); + av1_print(hw, 0, + "DECODE_MODE=0x%x\n", + READ_VREG(DECODE_MODE)); + av1_print(hw, 0, + "NAL_SEARCH_CTL=0x%x\n", + READ_VREG(NAL_SEARCH_CTL)); + av1_print(hw, 0, + "HEVC_PARSER_LCU_START=0x%x\n", + READ_VREG(HEVC_PARSER_LCU_START)); + av1_print(hw, 0, + "HEVC_DECODE_SIZE=0x%x\n", + READ_VREG(HEVC_DECODE_SIZE)); + av1_print(hw, 0, + "HEVC_SHIFT_BYTE_COUNT=0x%x\n", + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + av1_print(hw, 0, + "HEVC_SHIFTED_DATA=0x%x\n", + READ_VREG(HEVC_SHIFTED_DATA)); + av1_print(hw, 0, + "HEVC_STREAM_START_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_START_ADDR)); + av1_print(hw, 0, + "HEVC_STREAM_END_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_END_ADDR)); + av1_print(hw, 0, + "HEVC_STREAM_LEVEL=0x%x\n", + READ_VREG(HEVC_STREAM_LEVEL)); + av1_print(hw, 0, + "HEVC_STREAM_WR_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_WR_PTR)); + av1_print(hw, 0, + "HEVC_STREAM_RD_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_RD_PTR)); + av1_print(hw, 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + av1_print(hw, 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (input_frame_based(vdec) && + (debug & PRINT_FLAG_VDEC_DATA) + ) { + int jj; + if (hw->chunk && hw->chunk->block && + hw->data_size > 0) { + u8 *data = NULL; + + if (!hw->chunk->block->is_mapped) + data = codec_mm_vmap( + hw->chunk->block->start + + hw->data_offset, + hw->data_size); + else + data = ((u8 *)hw->chunk->block->start_virt) + + hw->data_offset; + av1_print(hw, 0, + "frame data size 0x%x\n", + hw->data_size); + for (jj = 0; jj < hw->data_size; jj++) { + if ((jj & 0xf) == 0) + av1_print(hw, 0, + "%06x:", jj); + av1_print_cont(hw, 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + av1_print_cont(hw, 0, + "\n"); + } + + if (!hw->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } + +} + +static int ammvdec_av1_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + int ret; + int config_val; + int i; + struct vframe_content_light_level_s content_light_level; + struct vframe_master_display_colour_s vf_dp; + u32 work_buf_size; + struct BuffInfo_s *p_buf_info; + struct AV1HW_s *hw = NULL; + + if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_TM2) || + (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) || + ((get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_TM2) && !is_meson_rev_b())) { + pr_err("av1 unsupported on cpu %d, is_tm2_revb %d\n", + get_cpu_major_id(), is_cpu_tm2_revb()); + return -EINVAL; + } + + if (pdata == NULL) { + av1_print(hw, 0, "\nammvdec_av1 memory resource undefined.\n"); + return -EFAULT; + } + memset(&vf_dp, 0, sizeof(struct vframe_master_display_colour_s)); + + hw = vzalloc(sizeof(struct AV1HW_s)); + if (hw == NULL) { + av1_print(hw, 0, "\nammvdec_av1 device data allocation failed\n"); + return -ENOMEM; + } + + if (init_dblk_struc(hw) < 0) { + av1_print(hw, 0, "\nammvdec_av1 device data allocation failed\n"); + vfree(hw); + return -ENOMEM; + } + + hw->pbi = av1_decoder_create(&hw->av1_buffer_pool, &hw->common); //&aom_decoder; + if (hw->pbi == NULL) { + av1_print(hw, 0, "\nammvdec_av1 device data allocation failed\n"); + release_dblk_struct(hw); + vfree(hw); + return -ENOMEM; + } + + hw->pbi->private_data = hw; + /* the ctx from v4l2 driver. */ + hw->v4l2_ctx = pdata->private; + + pdata->private = hw; + pdata->dec_status = vav1_dec_status; + /* pdata->set_trickmode = set_trickmode; */ + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = av1_irq_cb; + pdata->threaded_irq_handler = av1_threaded_irq_cb; + pdata->dump_state = av1_dump_state; + + hw->index = pdev->id; + if (is_rdma_enable()) { + hw->rdma_adr = dma_alloc_coherent(amports_get_dma_device(), RDMA_SIZE, &hw->rdma_phy_adr, GFP_KERNEL); + for (i = 0; i < SCALELUT_DATA_WRITE_NUM; i++) { + hw->rdma_adr[i * 4] = HEVC_IQIT_SCALELUT_WR_ADDR & 0xfff; + hw->rdma_adr[i * 4 + 1] = i; + hw->rdma_adr[i * 4 + 2] = HEVC_IQIT_SCALELUT_DATA & 0xfff; + hw->rdma_adr[i * 4 + 3] = 0; + if (i == SCALELUT_DATA_WRITE_NUM - 1) { + hw->rdma_adr[i * 4 + 2] = (HEVC_IQIT_SCALELUT_DATA & 0xfff) | 0x20000; + } + } + } + snprintf(hw->trace.vdec_name, sizeof(hw->trace.vdec_name), + "av1-%d", hw->index); + snprintf(hw->trace.pts_name, sizeof(hw->trace.pts_name), + "%s-timestamp", hw->trace.vdec_name); + snprintf(hw->trace.new_q_name, sizeof(hw->trace.new_q_name), + "%s-newframe_q", hw->trace.vdec_name); + snprintf(hw->trace.disp_q_name, sizeof(hw->trace.disp_q_name), + "%s-dispframe_q", hw->trace.vdec_name); + snprintf(hw->trace.decode_time_name, sizeof(hw->trace.decode_time_name), + "decoder_time%d", pdev->id); + snprintf(hw->trace.decode_run_time_name, sizeof(hw->trace.decode_run_time_name), + "decoder_run_time%d", pdev->id); + snprintf(hw->trace.decode_header_memory_time_name, sizeof(hw->trace.decode_header_memory_time_name), + "decoder_header_time%d", pdev->id); + snprintf(hw->trace.decode_work_time_name, sizeof(hw->trace.decode_work_time_name), + "decoder_work_time%d", pdev->id); + if (pdata->use_vfm_path) + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + else if (vdec_dual(pdata)) { + struct AV1HW_s *hevc_pair = NULL; + + if (dv_toggle_prov_name) /*debug purpose*/ + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVBL_PROVIDER_NAME : + VFM_DEC_DVEL_PROVIDER_NAME); + else + snprintf(pdata->vf_provider_name, + VDEC_PROVIDER_NAME_SIZE, + (pdata->master) ? VFM_DEC_DVEL_PROVIDER_NAME : + VFM_DEC_DVBL_PROVIDER_NAME); + if (pdata->master) + hevc_pair = (struct AV1HW_s *)pdata->master->private; + else if (pdata->slave) + hevc_pair = (struct AV1HW_s *)pdata->slave->private; + + if (hevc_pair) + hw->shift_byte_count_lo = hevc_pair->shift_byte_count_lo; + } +#endif + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + MULTI_INSTANCE_PROVIDER_NAME ".%02x", pdev->id & 0xff); + + hw->provider_name = pdata->vf_provider_name; + platform_set_drvdata(pdev, pdata); + + hw->platform_dev = pdev; + hw->video_signal_type = 0; + hw->m_ins_flag = 1; + + film_grain_task_create(hw); + + if (pdata->sys_info) { + hw->vav1_amstream_dec_info = *pdata->sys_info; + if ((unsigned long) hw->vav1_amstream_dec_info.param + & 0x08) { + hw->low_latency_flag = 1; + } else + hw->low_latency_flag = 0; + } else { + hw->vav1_amstream_dec_info.width = 0; + hw->vav1_amstream_dec_info.height = 0; + hw->vav1_amstream_dec_info.rate = 30; + } + + if ((debug & IGNORE_PARAM_FROM_CONFIG) == 0 && + pdata->config_len) { +#ifdef MULTI_INSTANCE_SUPPORT + int av1_buf_width = 0; + int av1_buf_height = 0; + /*use ptr config for doubel_write_mode, etc*/ + av1_print(hw, 0, "pdata->config=%s\n", pdata->config); + if (get_config_int(pdata->config, "av1_double_write_mode", + &config_val) == 0) + hw->double_write_mode = config_val; + else + hw->double_write_mode = double_write_mode; + + if (get_config_int(pdata->config, "save_buffer_mode", + &config_val) == 0) + hw->save_buffer_mode = config_val; + else + hw->save_buffer_mode = 0; + if (get_config_int(pdata->config, "av1_buf_width", + &config_val) == 0) { + av1_buf_width = config_val; + } + if (get_config_int(pdata->config, "av1_buf_height", + &config_val) == 0) { + av1_buf_height = config_val; + } + + if (get_config_int(pdata->config, "no_head", + &config_val) == 0) + hw->no_head = config_val; + else + hw->no_head = no_head; + + /*use ptr config for max_pic_w, etc*/ + if (get_config_int(pdata->config, "av1_max_pic_w", + &config_val) == 0) { + hw->max_pic_w = config_val; + } + if (get_config_int(pdata->config, "av1_max_pic_h", + &config_val) == 0) { + hw->max_pic_h = config_val; + } + if ((hw->max_pic_w * hw->max_pic_h) + < (av1_buf_width * av1_buf_height)) { + hw->max_pic_w = av1_buf_width; + hw->max_pic_h = av1_buf_height; + av1_print(hw, 0, "use buf resolution\n"); + } + + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + hw->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + hw->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + hw->is_used_v4l = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_buffer_margin", + &config_val) == 0) + hw->dynamic_buf_num_margin = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + hw->mem_map_mode = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_low_latency_mode", + &config_val) == 0) + hw->low_latency_flag = config_val; + /*if (get_config_int(pdata->config, + "parm_v4l_duration", + &config_val) == 0) + vdec_frame_rate_uevent(config_val);*/ + +#endif + if (get_config_int(pdata->config, "HDRStaticInfo", + &vf_dp.present_flag) == 0 + && vf_dp.present_flag == 1) { + get_config_int(pdata->config, "signal_type", + &hw->video_signal_type); + get_config_int(pdata->config, "mG.x", + &vf_dp.primaries[0][0]); + get_config_int(pdata->config, "mG.y", + &vf_dp.primaries[0][1]); + get_config_int(pdata->config, "mB.x", + &vf_dp.primaries[1][0]); + get_config_int(pdata->config, "mB.y", + &vf_dp.primaries[1][1]); + get_config_int(pdata->config, "mR.x", + &vf_dp.primaries[2][0]); + get_config_int(pdata->config, "mR.y", + &vf_dp.primaries[2][1]); + get_config_int(pdata->config, "mW.x", + &vf_dp.white_point[0]); + get_config_int(pdata->config, "mW.y", + &vf_dp.white_point[1]); + get_config_int(pdata->config, "mMaxDL", + &vf_dp.luminance[0]); + get_config_int(pdata->config, "mMinDL", + &vf_dp.luminance[1]); + vf_dp.content_light_level.present_flag = 1; + get_config_int(pdata->config, "mMaxCLL", + &content_light_level.max_content); + get_config_int(pdata->config, "mMaxFALL", + &content_light_level.max_pic_average); + vf_dp.content_light_level = content_light_level; + if (!hw->video_signal_type) { + hw->video_signal_type = (1 << 29) + | (5 << 26) /* unspecified */ + | (0 << 25) /* limit */ + | (1 << 24) /* color available */ + | (9 << 16) /* 2020 */ + | (16 << 8) /* 2084 */ + | (9 << 0); /* 2020 */ + } + } + hw->vf_dp = vf_dp; + } else { + u32 force_w, force_h; + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) { + force_w = 1920; + force_h = 1088; + } else { + force_w = 8192; + force_h = 4608; + } + if (hw->vav1_amstream_dec_info.width) + hw->max_pic_w = hw->vav1_amstream_dec_info.width; + else + hw->max_pic_w = force_w; + + if (hw->vav1_amstream_dec_info.height) + hw->max_pic_h = hw->vav1_amstream_dec_info.height; + else + hw->max_pic_h = force_h; + hw->double_write_mode = double_write_mode; + } + + hw->endian = HEVC_CONFIG_LITTLE_ENDIAN; + if (endian) + hw->endian = endian; + + if (is_oversize(hw->max_pic_w, hw->max_pic_h)) { + pr_err("over size: %dx%d, probe failed\n", + hw->max_pic_w, hw->max_pic_h); + return -1; + } + if (force_bufspec) { + hw->buffer_spec_index = force_bufspec & 0xf; + pr_info("force buffer spec %d\n", force_bufspec & 0xf); + } else if (vdec_is_support_4k()) { + hw->buffer_spec_index = 1; + } else + hw->buffer_spec_index = 0; + + if (hw->buffer_spec_index == 0) + hw->max_one_mv_buffer_size = + (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_SC2) ? + MAX_ONE_MV_BUFFER_SIZE_1080P : MAX_ONE_MV_BUFFER_SIZE_1080P_TM2REVB; + else if (hw->buffer_spec_index == 1) + hw->max_one_mv_buffer_size = + (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_SC2) ? + MAX_ONE_MV_BUFFER_SIZE_4K : MAX_ONE_MV_BUFFER_SIZE_4K_TM2REVB; + else + hw->max_one_mv_buffer_size = + (get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_SC2) ? + MAX_ONE_MV_BUFFER_SIZE_8K : MAX_ONE_MV_BUFFER_SIZE_8K_TM2REVB; + + p_buf_info = &aom_workbuff_spec[hw->buffer_spec_index]; + work_buf_size = (p_buf_info->end_adr - p_buf_info->start_adr + + 0xffff) & (~0xffff); + + if (vdec_is_support_4k() && + (hw->max_pic_w * hw->max_pic_h < MAX_SIZE_4K)) { + hw->max_pic_w = 4096; + hw->max_pic_h = 2304; + } + av1_print(hw, 0, + "vdec_is_support_4k() %d max_pic_w %d max_pic_h %d buffer_spec_index %d work_buf_size 0x%x\n", + vdec_is_support_4k(), hw->max_pic_w, hw->max_pic_h, + hw->buffer_spec_index, work_buf_size); + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL || + hw->double_write_mode == 0x10) + hw->mmu_enable = 0; + else + hw->mmu_enable = 1; + + video_signal_type = hw->video_signal_type; + + if (pdata->sys_info) { + hw->vav1_amstream_dec_info = *pdata->sys_info; + if ((unsigned long) hw->vav1_amstream_dec_info.param + & 0x08) { + hw->low_latency_flag = 1; + } else + hw->low_latency_flag = 0; + } else { + hw->vav1_amstream_dec_info.width = 0; + hw->vav1_amstream_dec_info.height = 0; + hw->vav1_amstream_dec_info.rate = 30; + } + +#ifdef AOM_AV1_MMU_DW + hw->dw_mmu_enable = + get_double_write_mode_init(hw) & 0x20 ? 1 : 0; + +#endif + av1_print(hw, 0, + "no_head %d low_latency %d video_signal_type 0x%x\n", + hw->no_head, hw->low_latency_flag, hw->video_signal_type); +#if 0 + hw->buf_start = pdata->mem_start; + hw->buf_size = pdata->mem_end - pdata->mem_start + 1; +#else + if (amvdec_av1_mmu_init(hw) < 0) { + pr_err("av1 alloc bmmu box failed!!\n"); + /* devm_kfree(&pdev->dev, (void *)hw); */ + vfree((void *)hw); + pdata->dec_status = NULL; + return -1; + } + + hw->cma_alloc_count = PAGE_ALIGN(work_buf_size) / PAGE_SIZE; + ret = decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, WORK_SPACE_BUF_ID, + hw->cma_alloc_count * PAGE_SIZE, DRIVER_NAME, + &hw->cma_alloc_addr); + if (ret < 0) { + uninit_mmu_buffers(hw); + /* devm_kfree(&pdev->dev, (void *)hw); */ + vfree((void *)hw); + pdata->dec_status = NULL; + return ret; + } + hw->buf_start = hw->cma_alloc_addr; + hw->buf_size = work_buf_size; +#endif + + hw->init_flag = 0; + hw->first_sc_checked = 0; + hw->fatal_error = 0; + hw->show_frame_num = 0; + hw->run_ready_min_buf_num = run_ready_min_buf_num; + + if (hw->is_used_v4l && (hw->v4l2_ctx != NULL)) { + struct aml_vcodec_ctx *ctx = hw->v4l2_ctx; + + ctx->aux_infos.alloc_buffer(ctx, SEI_TYPE | DV_TYPE); + } + + hw->aux_data_size = AUX_BUF_ALIGN(prefix_aux_buf_size) + + AUX_BUF_ALIGN(suffix_aux_buf_size); + hw->dv_data_buf = vmalloc(hw->aux_data_size); + hw->dv_data_size = 0; + + if (debug) { + av1_print(hw, AOM_DEBUG_HW_MORE, "===AV1 decoder mem resource 0x%lx size 0x%x\n", + hw->buf_start, + hw->buf_size); + } + + hw->cma_dev = pdata->cma_dev; + if (vav1_init(pdata) < 0) { + av1_print(hw, 0, "\namvdec_av1 init failed.\n"); + av1_local_uninit(hw, false); + uninit_mmu_buffers(hw); + /* devm_kfree(&pdev->dev, (void *)hw); */ + vfree((void *)hw); + pdata->dec_status = NULL; + return -ENODEV; + } + +#ifdef AUX_DATA_CRC + vdec_aux_data_check_init(pdata); +#endif + + vdec_set_prepare_level(pdata, start_decode_buf_level); + hevc_source_changed(VFORMAT_AV1, 4096, 2048, 60); + + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_HEVC); + else + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); + + hw->pic_list_init_done2 = true; + return 0; +} + +static int ammvdec_av1_remove(struct platform_device *pdev) +{ + struct AV1HW_s *hw = (struct AV1HW_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec = hw_to_vdec(hw); + int i; + if (debug) + av1_print(hw, AOM_DEBUG_HW_MORE, "amvdec_av1_remove\n"); + +#ifdef AUX_DATA_CRC + vdec_aux_data_check_exit(vdec); +#endif + + if (hw->dv_data_buf != NULL) { + vfree(hw->dv_data_buf); + hw->dv_data_buf = NULL; + } + + vmav1_stop(hw); + + film_grain_task_exit(hw); + + if (hw->ge2d) { + vdec_ge2d_destroy(hw->ge2d); + hw->ge2d = NULL; + } + + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(hw), CORE_MASK_HEVC); + else + vdec_core_release(hw_to_vdec(hw), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); + + vdec_set_status(hw_to_vdec(hw), VDEC_STATUS_DISCONNECTED); + + if (vdec->parallel_dec == 1) { + for (i = 0; i < FRAME_BUFFERS; i++) { + vdec->free_canvas_ex + (hw->common.buffer_pool->frame_bufs[i].buf.y_canvas_index, + vdec->id); + vdec->free_canvas_ex + (hw->common.buffer_pool->frame_bufs[i].buf.uv_canvas_index, + vdec->id); + } + } + +#ifdef DEBUG_PTS + pr_info("pts missed %ld, pts hit %ld, duration %d\n", + hw->pts_missed, hw->pts_hit, hw->frame_dur); +#endif + /* devm_kfree(&pdev->dev, (void *)hw); */ + if (is_rdma_enable()) + dma_free_coherent(amports_get_dma_device(), RDMA_SIZE, hw->rdma_adr, hw->rdma_phy_adr); + vfree(hw->pbi); + release_dblk_struct(hw); + vfree((void *)hw); + return 0; +} + +static struct platform_driver ammvdec_av1_driver = { + .probe = ammvdec_av1_probe, + .remove = ammvdec_av1_remove, + .driver = { + .name = MULTI_DRIVER_NAME, +#ifdef CONFIG_PM + .pm = &av1_pm_ops, +#endif + } +}; +#endif +static struct mconfig av1_configs[] = { + MC_PU32("bit_depth_luma", &bit_depth_luma), + MC_PU32("bit_depth_chroma", &bit_depth_chroma), + MC_PU32("frame_width", &frame_width), + MC_PU32("frame_height", &frame_height), + MC_PU32("debug", &debug), + MC_PU32("radr", &radr), + MC_PU32("rval", &rval), + MC_PU32("pop_shorts", &pop_shorts), + MC_PU32("dbg_cmd", &dbg_cmd), + MC_PU32("dbg_skip_decode_index", &dbg_skip_decode_index), + MC_PU32("endian", &endian), + MC_PU32("step", &step), + MC_PU32("udebug_flag", &udebug_flag), + MC_PU32("decode_pic_begin", &decode_pic_begin), + MC_PU32("slice_parse_begin", &slice_parse_begin), + MC_PU32("i_only_flag", &i_only_flag), + MC_PU32("error_handle_policy", &error_handle_policy), + MC_PU32("buf_alloc_width", &buf_alloc_width), + MC_PU32("buf_alloc_height", &buf_alloc_height), + MC_PU32("buf_alloc_depth", &buf_alloc_depth), + MC_PU32("buf_alloc_size", &buf_alloc_size), + MC_PU32("buffer_mode", &buffer_mode), + MC_PU32("buffer_mode_dbg", &buffer_mode_dbg), + MC_PU32("max_buf_num", &max_buf_num), + MC_PU32("dynamic_buf_num_margin", &dynamic_buf_num_margin), + MC_PU32("mem_map_mode", &mem_map_mode), + MC_PU32("double_write_mode", &double_write_mode), + MC_PU32("enable_mem_saving", &enable_mem_saving), + MC_PU32("force_w_h", &force_w_h), + MC_PU32("force_fps", &force_fps), + MC_PU32("max_decoding_time", &max_decoding_time), + MC_PU32("on_no_keyframe_skiped", &on_no_keyframe_skiped), + MC_PU32("start_decode_buf_level", &start_decode_buf_level), + MC_PU32("decode_timeout_val", &decode_timeout_val), + MC_PU32("av1_max_pic_w", &av1_max_pic_w), + MC_PU32("av1_max_pic_h", &av1_max_pic_h), +}; +static struct mconfig_node av1_node; + +static int __init amvdec_av1_driver_init_module(void) +{ + //struct BuffInfo_s *p_buf_info; + int i; +#ifdef BUFMGR_ONLY_OLD_CHIP + debug |= AOM_DEBUG_BUFMGR_ONLY; +#endif + /* + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + p_buf_info = &aom_workbuff_spec[1]; + else + p_buf_info = &aom_workbuff_spec[1]; + } else + p_buf_info = &aom_workbuff_spec[0]; + + init_buff_spec(NULL, p_buf_info); + work_buf_size = + (p_buf_info->end_adr - p_buf_info->start_adr + + 0xffff) & (~0xffff); + */ + for (i = 0; i < WORK_BUF_SPEC_NUM; i++) + init_buff_spec(NULL, &aom_workbuff_spec[i]); + + pr_debug("amvdec_av1 module init\n"); + + error_handle_policy = 0; + +#ifdef ERROR_HANDLE_DEBUG + dbg_nal_skip_flag = 0; + dbg_nal_skip_count = 0; +#endif + udebug_flag = 0; + decode_pic_begin = 0; + slice_parse_begin = 0; + step = 0; + buf_alloc_size = 0; + + if (platform_driver_register(&ammvdec_av1_driver)) { + pr_err("failed to register ammvdec_av1 driver\n"); + return -ENODEV; + } + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) { + amvdec_av1_profile.profile = + "10bit, dwrite, compressed, no_head, uvm"; + } else if (((get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2) || is_cpu_tm2_revb()) + && (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5)) { + amvdec_av1_profile.profile = + "8k, 10bit, dwrite, compressed, no_head, frame_dv, uvm"; + } else { + amvdec_av1_profile.name = "av1_unsupport"; + } + + vcodec_profile_register(&amvdec_av1_profile); + + INIT_REG_NODE_CONFIGS("media.decoder", &av1_node, + "av1-v4l", av1_configs, CONFIG_FOR_RW); + vcodec_feature_register(VFORMAT_AV1, 1); + + return 0; +} + +static void __exit amvdec_av1_driver_remove_module(void) +{ + pr_debug("amvdec_av1 module remove.\n"); + + platform_driver_unregister(&ammvdec_av1_driver); +} + +/****************************************/ +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +module_param(force_dv_enable, uint, 0664); +MODULE_PARM_DESC(force_dv_enable, "\n amvdec_av1 force_dv_enable\n"); +#endif + +module_param(bit_depth_luma, uint, 0664); +MODULE_PARM_DESC(bit_depth_luma, "\n amvdec_av1 bit_depth_luma\n"); + +module_param(bit_depth_chroma, uint, 0664); +MODULE_PARM_DESC(bit_depth_chroma, "\n amvdec_av1 bit_depth_chroma\n"); + +module_param(frame_width, uint, 0664); +MODULE_PARM_DESC(frame_width, "\n amvdec_av1 frame_width\n"); + +module_param(frame_height, uint, 0664); +MODULE_PARM_DESC(frame_height, "\n amvdec_av1 frame_height\n"); + +module_param(multi_frames_in_one_pack, uint, 0664); +MODULE_PARM_DESC(multi_frames_in_one_pack, "\n multi_frames_in_one_pack\n"); + +module_param(debug, uint, 0664); +MODULE_PARM_DESC(debug, "\n amvdec_av1 debug\n"); + +module_param(disable_fg, uint, 0664); +MODULE_PARM_DESC(disable_fg, "\n amvdec_av1 disable_fg\n"); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\n radr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\n rval\n"); + +module_param(pop_shorts, uint, 0664); +MODULE_PARM_DESC(pop_shorts, "\n rval\n"); + +module_param(dbg_cmd, uint, 0664); +MODULE_PARM_DESC(dbg_cmd, "\n dbg_cmd\n"); + +module_param(dbg_skip_decode_index, uint, 0664); +MODULE_PARM_DESC(dbg_skip_decode_index, "\n dbg_skip_decode_index\n"); + +module_param(endian, uint, 0664); +MODULE_PARM_DESC(endian, "\n rval\n"); + +module_param(disable_repeat, uint, 0664); +MODULE_PARM_DESC(disable_repeat, "\n disable_repeat\n"); + +module_param(step, uint, 0664); +MODULE_PARM_DESC(step, "\n amvdec_av1 step\n"); + +module_param(decode_pic_begin, uint, 0664); +MODULE_PARM_DESC(decode_pic_begin, "\n amvdec_av1 decode_pic_begin\n"); + +module_param(slice_parse_begin, uint, 0664); +MODULE_PARM_DESC(slice_parse_begin, "\n amvdec_av1 slice_parse_begin\n"); + +module_param(i_only_flag, uint, 0664); +MODULE_PARM_DESC(i_only_flag, "\n amvdec_av1 i_only_flag\n"); + +module_param(low_latency_flag, uint, 0664); +MODULE_PARM_DESC(low_latency_flag, "\n amvdec_av1 low_latency_flag\n"); + +module_param(no_head, uint, 0664); +MODULE_PARM_DESC(no_head, "\n amvdec_av1 no_head\n"); + +module_param(error_handle_policy, uint, 0664); +MODULE_PARM_DESC(error_handle_policy, "\n amvdec_av1 error_handle_policy\n"); + +module_param(buf_alloc_width, uint, 0664); +MODULE_PARM_DESC(buf_alloc_width, "\n buf_alloc_width\n"); + +module_param(buf_alloc_height, uint, 0664); +MODULE_PARM_DESC(buf_alloc_height, "\n buf_alloc_height\n"); + +module_param(buf_alloc_depth, uint, 0664); +MODULE_PARM_DESC(buf_alloc_depth, "\n buf_alloc_depth\n"); + +module_param(buf_alloc_size, uint, 0664); +MODULE_PARM_DESC(buf_alloc_size, "\n buf_alloc_size\n"); + +module_param(buffer_mode, uint, 0664); +MODULE_PARM_DESC(buffer_mode, "\n buffer_mode\n"); + +module_param(buffer_mode_dbg, uint, 0664); +MODULE_PARM_DESC(buffer_mode_dbg, "\n buffer_mode_dbg\n"); +/*USE_BUF_BLOCK*/ +module_param(max_buf_num, uint, 0664); +MODULE_PARM_DESC(max_buf_num, "\n max_buf_num\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n dynamic_buf_num_margin\n"); + +module_param(mv_buf_margin, uint, 0664); +MODULE_PARM_DESC(mv_buf_margin, "\n mv_buf_margin\n"); + +module_param(mv_buf_dynamic_alloc, uint, 0664); +MODULE_PARM_DESC(mv_buf_dynamic_alloc, "\n mv_buf_dynamic_alloc\n"); + +module_param(force_max_one_mv_buffer_size, uint, 0664); +MODULE_PARM_DESC(force_max_one_mv_buffer_size, "\n force_max_one_mv_buffer_size\n"); + +module_param(run_ready_min_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_min_buf_num, "\n run_ready_min_buf_num\n"); + +/**/ + +module_param(mem_map_mode, uint, 0664); +MODULE_PARM_DESC(mem_map_mode, "\n mem_map_mode\n"); + +#ifdef SUPPORT_10BIT +module_param(double_write_mode, uint, 0664); +MODULE_PARM_DESC(double_write_mode, "\n double_write_mode\n"); + +module_param(enable_mem_saving, uint, 0664); +MODULE_PARM_DESC(enable_mem_saving, "\n enable_mem_saving\n"); + +module_param(force_w_h, uint, 0664); +MODULE_PARM_DESC(force_w_h, "\n force_w_h\n"); +#endif + +module_param(force_fps, uint, 0664); +MODULE_PARM_DESC(force_fps, "\n force_fps\n"); + +module_param(max_decoding_time, uint, 0664); +MODULE_PARM_DESC(max_decoding_time, "\n max_decoding_time\n"); + +module_param(on_no_keyframe_skiped, uint, 0664); +MODULE_PARM_DESC(on_no_keyframe_skiped, "\n on_no_keyframe_skiped\n"); + +#ifdef MCRCC_ENABLE +module_param(mcrcc_cache_alg_flag, uint, 0664); +MODULE_PARM_DESC(mcrcc_cache_alg_flag, "\n mcrcc_cache_alg_flag\n"); +#endif + +#ifdef MULTI_INSTANCE_SUPPORT +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n av1 start_decode_buf_level\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, + "\n av1 decode_timeout_val\n"); + +module_param(av1_max_pic_w, uint, 0664); +MODULE_PARM_DESC(av1_max_pic_w, "\n av1_max_pic_w\n"); + +module_param(av1_max_pic_h, uint, 0664); +MODULE_PARM_DESC(av1_max_pic_h, "\n av1_max_pic_h\n"); + +module_param_array(decode_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(display_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(max_process_time, uint, + &max_decode_instance_num, 0664); + +module_param_array(run_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(input_empty, uint, + &max_decode_instance_num, 0664); + +module_param_array(not_run_ready, uint, + &max_decode_instance_num, 0664); + +#ifdef AOM_AV1_MMU_DW +module_param_array(dw_mmu_enable, uint, + &max_decode_instance_num, 0664); +#endif + +module_param(prefix_aux_buf_size, uint, 0664); +MODULE_PARM_DESC(prefix_aux_buf_size, "\n prefix_aux_buf_size\n"); + +module_param(suffix_aux_buf_size, uint, 0664); +MODULE_PARM_DESC(suffix_aux_buf_size, "\n suffix_aux_buf_size\n"); + +#endif + +#ifdef DUMP_FILMGRAIN +module_param(fg_dump_index, uint, 0664); +MODULE_PARM_DESC(fg_dump_index, "\n fg_dump_index\n"); +#endif + +module_param(get_picture_qos, uint, 0664); +MODULE_PARM_DESC(get_picture_qos, "\n amvdec_av1 get_picture_qos\n"); + +module_param(force_bufspec, uint, 0664); +MODULE_PARM_DESC(force_bufspec, "\n amvdec_h265 force_bufspec\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_h265 udebug_flag\n"); + +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION +module_param(dv_toggle_prov_name, uint, 0664); +MODULE_PARM_DESC(dv_toggle_prov_name, "\n dv_toggle_prov_name\n"); +#endif + +module_param(udebug_pause_pos, uint, 0664); +MODULE_PARM_DESC(udebug_pause_pos, "\n udebug_pause_pos\n"); + +module_param(udebug_pause_val, uint, 0664); +MODULE_PARM_DESC(udebug_pause_val, "\n udebug_pause_val\n"); + +module_param(udebug_pause_decode_idx, uint, 0664); +MODULE_PARM_DESC(udebug_pause_decode_idx, "\n udebug_pause_decode_idx\n"); + +#ifdef DEBUG_CRC_ERROR +module_param(crc_debug_flag, uint, 0664); +MODULE_PARM_DESC(crc_debug_flag, "\n crc_debug_flag\n"); +#endif + +#ifdef DEBUG_CMD +module_param(debug_cmd_wait_type, uint, 0664); +MODULE_PARM_DESC(debug_cmd_wait_type, "\n debug_cmd_wait_type\n"); + +module_param(debug_cmd_wait_count, uint, 0664); +MODULE_PARM_DESC(debug_cmd_wait_count, "\n debug_cmd_wait_count\n"); + +module_param(header_dump_size, uint, 0664); +MODULE_PARM_DESC(header_dump_size, "\n header_dump_size\n"); +#endif + +module_param(force_pts_unstable, uint, 0664); +MODULE_PARM_DESC(force_pts_unstable, "\n force_pts_unstable\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n without_display_mode\n"); + +module_param(v4l_bitstream_id_enable, uint, 0664); +MODULE_PARM_DESC(v4l_bitstream_id_enable, "\n v4l_bitstream_id_enable\n"); + +module_param(enable_single_slice, uint, 0664); +MODULE_PARM_DESC(enable_single_slice, "\n enable_single_slice\n"); + +module_init(amvdec_av1_driver_init_module); +module_exit(amvdec_av1_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC av1 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +
diff --git a/drivers/frame_provider/decoder_v4l/vav1/vav1.h b/drivers/frame_provider/decoder_v4l/vav1/vav1.h new file mode 100644 index 0000000..19c715c --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/vav1/vav1.h
@@ -0,0 +1,33 @@ +/* + * drivers/amlogic/amports/vav1.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VAV1_H +#define VAV1_H + +#define DEBUG_FGS_REGS (1 << 0) //0x00000001 +#define DEBUG_FGS_REGS_PARSE (1 << 1) //0x00000002 +#define DEBUG_FGS_DETAIL (1 << 2) //0x00000004 +#define DEBUG_FGS_TABLE_DUMP (1 << 3) //0x00000008 +#define DEBUG_FGS_CONSUME_TIME (1 << 4) //0x00000010 +#define DEBUG_FGS_BYPASS (1 << 5) //0x00000020 + +int get_debug_fgs(void); +int pic_film_grain_run(u32 frame_count, char *fg_table_addr, u32 fgs_ctrl, u32 *fgs_data); + +void adapt_coef_probs(int pic_count, int prev_kf, int cur_kf, int pre_fc, +unsigned int *prev_prob, unsigned int *cur_prob, unsigned int *count); +#endif
diff --git a/drivers/frame_provider/decoder_v4l/vp9/Makefile b/drivers/frame_provider/decoder_v4l/vp9/Makefile new file mode 100644 index 0000000..ebbd910 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/vp9/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VDEC_VP9) += amvdec_vp9_v4l.o +amvdec_vp9_v4l-objs += vvp9.o
diff --git a/drivers/frame_provider/decoder_v4l/vp9/vvp9.c b/drivers/frame_provider/decoder_v4l/vp9/vvp9.c new file mode 100755 index 0000000..5857c68 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/vp9/vvp9.c
@@ -0,0 +1,12449 @@ + /* + * drivers/amlogic/amports/vvp9.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/amlogic/media/vfm/vframe_receiver.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/slab.h> +//#include <linux/amlogic/tee.h> +#include <uapi/linux/tee.h> +#include <linux/sched/clock.h> +#include "../../../stream_input/amports/amports_priv.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../../decoder/utils/decoder_mmu_box.h" +#include "../../decoder/utils/decoder_bmmu_box.h" + +#define MEM_NAME "codec_vp9" +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../decoder/utils/vdec.h" +#include "../../decoder/utils/amvdec.h" +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC +#include "../../decoder/utils/vdec_profile.h" +#endif +#include "../../decoder/utils/vdec_ge2d_utils.h" + +#include <linux/amlogic/media/video_sink/video.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../../decoder/utils/config_parser.h" +#include "../../decoder/utils/firmware.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../../decoder/utils/vdec_v4l2_buffer_ops.h" +#include <media/v4l2-mem2mem.h> +#include "../../decoder/utils/vdec_feature.h" + +#define MIX_STREAM_SUPPORT + +#include "vvp9.h" + +/*#define SUPPORT_FB_DECODING*/ +/*#define FB_DECODING_TEST_SCHEDULE*/ + +#define CO_MV_COMPRESS +#define HW_MASK_FRONT 0x1 +#define HW_MASK_BACK 0x2 + +#define VP9D_MPP_REFINFO_TBL_ACCCONFIG 0x3442 +#define VP9D_MPP_REFINFO_DATA 0x3443 +#define VP9D_MPP_REF_SCALE_ENBL 0x3441 +#define HEVC_MPRED_CTRL4 0x324c +#define HEVC_CM_HEADER_START_ADDR 0x3628 +#define HEVC_DBLK_CFGB 0x350b +#define HEVCD_MPP_ANC2AXI_TBL_DATA 0x3464 +#define HEVC_SAO_MMU_VH1_ADDR 0x363b +#define HEVC_SAO_MMU_VH0_ADDR 0x363a + +#define HEVC_MV_INFO 0x310d +#define HEVC_QP_INFO 0x3137 +#define HEVC_SKIP_INFO 0x3136 + +#define VP9_10B_DEC_IDLE 0 +#define VP9_10B_DEC_FRAME_HEADER 1 +#define VP9_10B_DEC_SLICE_SEGMENT 2 +#define VP9_10B_DECODE_SLICE 5 +#define VP9_10B_DISCARD_NAL 6 +#define VP9_DUMP_LMEM 7 +#define HEVC_DECPIC_DATA_DONE 0xa +#define HEVC_DECPIC_DATA_ERROR 0xb +#define HEVC_NAL_DECODE_DONE 0xe +#define HEVC_DECODE_BUFEMPTY 0x20 +#define HEVC_DECODE_TIMEOUT 0x21 +#define HEVC_SEARCH_BUFEMPTY 0x22 +#define HEVC_DECODE_OVER_SIZE 0x23 +#define HEVC_S2_DECODING_DONE 0x50 +#define VP9_HEAD_PARSER_DONE 0xf0 +#define VP9_HEAD_SEARCH_DONE 0xf1 +#define VP9_EOS 0xf2 +#define HEVC_ACTION_DONE 0xff + +#define VF_POOL_SIZE 32 + +#undef pr_info +#define pr_info printk + +#define DECODE_MODE_SINGLE ((0x80 << 24) | 0) +#define DECODE_MODE_MULTI_STREAMBASE ((0x80 << 24) | 1) +#define DECODE_MODE_MULTI_FRAMEBASE ((0x80 << 24) | 2) +#define DECODE_MODE_SINGLE_LOW_LATENCY ((0x80 << 24) | 3) +#define DECODE_MODE_MULTI_FRAMEBASE_NOHEAD ((0x80 << 24) | 4) + +#define VP9_TRIGGER_FRAME_DONE 0x100 +#define VP9_TRIGGER_FRAME_ENABLE 0x200 + +#define MV_MEM_UNIT 0x240 +/*--------------------------------------------------- + * Include "parser_cmd.h" + *--------------------------------------------------- + */ +#define PARSER_CMD_SKIP_CFG_0 0x0000090b + +#define PARSER_CMD_SKIP_CFG_1 0x1b14140f + +#define PARSER_CMD_SKIP_CFG_2 0x001b1910 + +#define PARSER_CMD_NUMBER 37 + +/*#define HEVC_PIC_STRUCT_SUPPORT*/ +/* to remove, fix build error */ + +/*#define CODEC_MM_FLAGS_FOR_VDECODER 0*/ + +#define MULTI_INSTANCE_SUPPORT +#define SUPPORT_10BIT +/* #define ERROR_HANDLE_DEBUG */ + +#ifndef STAT_KTHREAD +#define STAT_KTHREAD 0x40 +#endif + +#ifdef MULTI_INSTANCE_SUPPORT +#define MAX_DECODE_INSTANCE_NUM 9 +#define MULTI_DRIVER_NAME "ammvdec_vp9_v4l" + +static unsigned int max_decode_instance_num + = MAX_DECODE_INSTANCE_NUM; +static unsigned int decode_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int display_frame_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int max_process_time[MAX_DECODE_INSTANCE_NUM]; +static unsigned int run_count[MAX_DECODE_INSTANCE_NUM]; +static unsigned int input_empty[MAX_DECODE_INSTANCE_NUM]; +static unsigned int not_run_ready[MAX_DECODE_INSTANCE_NUM]; + +static u32 decode_timeout_val = 200; +static int start_decode_buf_level = 0x8000; +static u32 work_buf_size; + +static u32 force_pts_unstable; + +static u32 mv_buf_margin; + +static u32 mv_buf_dynamic_alloc; + +/* DOUBLE_WRITE_MODE is enabled only when NV21 8 bit output is needed */ +/* double_write_mode: + * 0, no double write; + * 1, 1:1 ratio; + * 2, (1/4):(1/4) ratio; + * 3, (1/4):(1/4) ratio, with both compressed frame included + * 4, (1/2):(1/2) ratio; + * 8, (1/8):(1/8) ratio; + * 0x10, double write only + * 0x100, if > 1080p,use mode 4,else use mode 1; + * 0x200, if > 1080p,use mode 2,else use mode 1; + * 0x300, if > 720p, use mode 4, else use mode 1; + */ +static u32 double_write_mode; + +#define DRIVER_NAME "amvdec_vp9_v4l" +#define DRIVER_HEADER_NAME "amvdec_vp9_header" + + +#define PUT_INTERVAL (HZ/100) +#define ERROR_SYSTEM_RESET_COUNT 200 + +#define PTS_NORMAL 0 +#define PTS_NONE_REF_USE_DURATION 1 + +#define PTS_MODE_SWITCHING_THRESHOLD 3 +#define PTS_MODE_SWITCHING_RECOVERY_THREASHOLD 3 + +#define DUR2PTS(x) ((x)*90/96) + +struct VP9Decoder_s; +static int vvp9_vf_states(struct vframe_states *states, void *); +static struct vframe_s *vvp9_vf_peek(void *); +static struct vframe_s *vvp9_vf_get(void *); +static void vvp9_vf_put(struct vframe_s *, void *); +static int vvp9_event_cb(int type, void *data, void *private_data); +#ifdef MULTI_INSTANCE_SUPPORT +static s32 vvp9_init(struct vdec_s *vdec); +#else +static s32 vvp9_init(struct VP9Decoder_s *pbi); +#endif +static void vvp9_prot_init(struct VP9Decoder_s *pbi, u32 mask); +static int vvp9_local_init(struct VP9Decoder_s *pbi); +static void vvp9_put_timer_func(struct timer_list *timer); +static void dump_data(struct VP9Decoder_s *pbi, int size); +static unsigned char get_data_check_sum + (struct VP9Decoder_s *pbi, int size); +static void dump_pic_list(struct VP9Decoder_s *pbi); +static int vp9_alloc_mmu( + struct VP9Decoder_s *pbi, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr); + +static const char vvp9_dec_id[] = "vvp9-dev"; + +#define PROVIDER_NAME "decoder.vp9" +#define MULTI_INSTANCE_PROVIDER_NAME "vdec.vp9" + +static const struct vframe_operations_s vvp9_vf_provider = { + .peek = vvp9_vf_peek, + .get = vvp9_vf_get, + .put = vvp9_vf_put, + .event_cb = vvp9_event_cb, + .vf_states = vvp9_vf_states, +}; + +static struct vframe_provider_s vvp9_vf_prov; + +static u32 bit_depth_luma; +static u32 bit_depth_chroma; +static u32 frame_width; +static u32 frame_height; +static u32 video_signal_type; + +static u32 on_no_keyframe_skiped; + +#define PROB_SIZE (496 * 2 * 4) +#define PROB_BUF_SIZE (0x5000) +#define COUNT_BUF_SIZE (0x300 * 4 * 4) +/*compute_losless_comp_body_size(4096, 2304, 1) = 18874368(0x1200000)*/ +#define MAX_FRAME_4K_NUM 0x1200 +#define MAX_FRAME_8K_NUM 0x4800 + +#define HEVC_ASSIST_MMU_MAP_ADDR 0x3009 + +#ifdef SUPPORT_FB_DECODING +/* register define */ +#define HEVC_ASSIST_HED_FB_W_CTL 0x3006 +#define HEVC_ASSIST_HED_FB_R_CTL 0x3007 +#define HEVC_ASSIST_HED_FB_ADDR 0x3008 +#define HEVC_ASSIST_FB_MMU_MAP_ADDR 0x300a +#define HEVC_ASSIST_FBD_MMU_MAP_ADDR 0x300b + + +#define MAX_STAGE_PAGE_NUM 0x1200 +#define STAGE_MMU_MAP_SIZE (MAX_STAGE_PAGE_NUM * 4) +#endif +static inline int div_r32(int64_t m, int n) +{ +/* + *return (int)(m/n) + */ +#ifndef CONFIG_ARM64 + int64_t qu = 0; + qu = div_s64(m, n); + return (int)qu; +#else + return (int)(m/n); +#endif +} + +/*USE_BUF_BLOCK*/ +struct BUF_s { + int index; + unsigned int alloc_flag; + /*buffer */ + unsigned int cma_page_count; + unsigned long alloc_addr; + unsigned long start_adr; + unsigned int size; + + unsigned int free_start_adr; + ulong v4l_ref_buf_addr; + ulong header_addr; + u32 header_size; + u32 luma_size; + ulong chroma_addr; + u32 chroma_size; +} /*BUF_t */; + +struct MVBUF_s { + unsigned long start_adr; + unsigned int size; + int used_flag; +} /*MVBUF_t */; + + /* #undef BUFMGR_ONLY to enable hardware configuration */ + +/*#define TEST_WR_PTR_INC*/ +/*#define WR_PTR_INC_NUM 128*/ +#define WR_PTR_INC_NUM 1 + +#define SIMULATION +#define DOS_PROJECT +#undef MEMORY_MAP_IN_REAL_CHIP + +/*#undef DOS_PROJECT*/ +/*#define MEMORY_MAP_IN_REAL_CHIP*/ + +/*#define BUFFER_MGR_ONLY*/ +/*#define CONFIG_HEVC_CLK_FORCED_ON*/ +/*#define ENABLE_SWAP_TEST*/ +#define MCRCC_ENABLE + +#define VP9_LPF_LVL_UPDATE +/*#define DBG_LF_PRINT*/ + +#ifdef VP9_10B_NV21 +#else +#define LOSLESS_COMPRESS_MODE +#endif + +#define DOUBLE_WRITE_YSTART_TEMP 0x02000000 +#define DOUBLE_WRITE_CSTART_TEMP 0x02900000 + +#define VP9_DEBUG_BUFMGR 0x01 +#define VP9_DEBUG_BUFMGR_MORE 0x02 +#define VP9_DEBUG_BUFMGR_DETAIL 0x04 +#define VP9_DEBUG_OUT_PTS 0x10 +#define VP9_DEBUG_SEND_PARAM_WITH_REG 0x100 +#define VP9_DEBUG_MERGE 0x200 +#define VP9_DEBUG_DBG_LF_PRINT 0x400 +#define VP9_DEBUG_REG 0x800 +#define VP9_DEBUG_2_STAGE 0x1000 +#define VP9_DEBUG_2_STAGE_MORE 0x2000 +#define VP9_DEBUG_QOS_INFO 0x4000 +#define VP9_DEBUG_DIS_LOC_ERROR_PROC 0x10000 +#define VP9_DEBUG_DIS_SYS_ERROR_PROC 0x20000 +#define VP9_DEBUG_DUMP_PIC_LIST 0x40000 +#define VP9_DEBUG_TRIG_SLICE_SEGMENT_PROC 0x80000 +#define VP9_DEBUG_NO_TRIGGER_FRAME 0x100000 +#define VP9_DEBUG_LOAD_UCODE_FROM_FILE 0x200000 +#define VP9_DEBUG_FORCE_SEND_AGAIN 0x400000 +#define VP9_DEBUG_DUMP_DATA 0x800000 +#define VP9_DEBUG_CACHE 0x1000000 +#define VP9_DEBUG_CACHE_HIT_RATE 0x2000000 +#define IGNORE_PARAM_FROM_CONFIG 0x8000000 +#ifdef MULTI_INSTANCE_SUPPORT +#define PRINT_FLAG_ERROR 0x0 +#define PRINT_FLAG_V4L_DETAIL 0x10000000 +#define PRINT_FLAG_VDEC_STATUS 0x20000000 +#define PRINT_FLAG_VDEC_DETAIL 0x40000000 +#define PRINT_FLAG_VDEC_DATA 0x80000000 +#endif +static u32 force_bufspec; +static u32 debug; +static bool is_reset; +/*for debug*/ +/* + udebug_flag: + bit 0, enable ucode print + bit 1, enable ucode detail print + bit [31:16] not 0, pos to dump lmem + bit 2, pop bits to lmem + bit [11:8], pre-pop bits for alignment (when bit 2 is 1) +*/ +static u32 udebug_flag; +/* + when udebug_flag[1:0] is not 0 + udebug_pause_pos not 0, + pause position +*/ +static u32 udebug_pause_pos; +/* + when udebug_flag[1:0] is not 0 + and udebug_pause_pos is not 0, + pause only when DEBUG_REG2 is equal to this val +*/ +static u32 udebug_pause_val; + +static u32 udebug_pause_decode_idx; + +static u32 disable_repeat; + +static u32 without_display_mode; + +static u32 v4l_bitstream_id_enable = 1; + +/* + *[3:0] 0: default use config from omx. + * 1: force enable fence. + * 2: disable fence. + *[7:4] 0: fence use for driver. + * 1: fence fd use for app. + */ +static u32 force_config_fence; + +#define DEBUG_REG +#ifdef DEBUG_REG +void WRITE_VREG_DBG2(unsigned int adr, unsigned int val) +{ + if (debug & VP9_DEBUG_REG) + pr_info("%s(%x, %x)\n", __func__, adr, val); + if (adr != 0) + WRITE_VREG(adr, val); +} + +#undef WRITE_VREG +#define WRITE_VREG WRITE_VREG_DBG2 +#endif + +#define FRAME_CNT_WINDOW_SIZE 59 +#define RATE_CORRECTION_THRESHOLD 5 +/************************************************** + +VP9 buffer management start + +***************************************************/ +#define MMU_COMPRESS_HEADER_SIZE_1080P 0x10000 +#define MMU_COMPRESS_HEADER_SIZE_4K 0x48000 +#define MMU_COMPRESS_HEADER_SIZE_8K 0x120000 + + +//#define MMU_COMPRESS_HEADER_SIZE 0x48000 +//#define MMU_COMPRESS_HEADER_SIZE_DW 0x48000 +//#define MMU_COMPRESS_8K_HEADER_SIZE (MMU_COMPRESS_HEADER_SIZE * 4) + +#define MMU_COMPRESS_HEADER_SIZE 0x48000 +#define MMU_COMPRESS_8K_HEADER_SIZE (0x48000*4) +#define MAX_SIZE_8K (8192 * 4608) +#define MAX_SIZE_4K (4096 * 2304) +#define MAX_SIZE_2K (1920 * 1088) +#define IS_8K_SIZE(w, h) (((w) * (h)) > MAX_SIZE_4K) +#define IS_4K_SIZE(w, h) (((w) * (h)) > (1920*1088)) + +#define INVALID_IDX -1 /* Invalid buffer index.*/ + +#define RPM_BEGIN 0x200 +#define RPM_END 0x280 + +union param_u { + struct { + unsigned short data[RPM_END - RPM_BEGIN]; + } l; + struct { + /* from ucode lmem, do not change this struct */ + unsigned short profile; + unsigned short show_existing_frame; + unsigned short frame_to_show_idx; + unsigned short frame_type; /*1 bit*/ + unsigned short show_frame; /*1 bit*/ + unsigned short error_resilient_mode; /*1 bit*/ + unsigned short intra_only; /*1 bit*/ + unsigned short display_size_present; /*1 bit*/ + unsigned short reset_frame_context; + unsigned short refresh_frame_flags; + unsigned short width; + unsigned short height; + unsigned short display_width; + unsigned short display_height; + /* + *bit[11:8] - ref_frame_info_0 (ref(3-bits), ref_frame_sign_bias(1-bit)) + *bit[7:4] - ref_frame_info_1 (ref(3-bits), ref_frame_sign_bias(1-bit)) + *bit[3:0] - ref_frame_info_2 (ref(3-bits), ref_frame_sign_bias(1-bit)) + */ + unsigned short ref_info; + /* + *bit[2]: same_frame_size0 + *bit[1]: same_frame_size1 + *bit[0]: same_frame_size2 + */ + unsigned short same_frame_size; + + unsigned short mode_ref_delta_enabled; + unsigned short ref_deltas[4]; + unsigned short mode_deltas[2]; + unsigned short filter_level; + unsigned short sharpness_level; + unsigned short bit_depth; + unsigned short seg_quant_info[8]; + unsigned short seg_enabled; + unsigned short seg_abs_delta; + /* bit 15: feature enabled; bit 8, sign; bit[5:0], data */ + unsigned short seg_lf_info[8]; + } p; +}; + + +struct vpx_codec_frame_buffer_s { + uint8_t *data; /**< Pointer to the data buffer */ + size_t size; /**< Size of data in bytes */ + void *priv; /**< Frame's private data */ +}; + +enum vpx_color_space_t { + VPX_CS_UNKNOWN = 0, /**< Unknown */ + VPX_CS_BT_601 = 1, /**< BT.601 */ + VPX_CS_BT_709 = 2, /**< BT.709 */ + VPX_CS_SMPTE_170 = 3, /**< SMPTE.170 */ + VPX_CS_SMPTE_240 = 4, /**< SMPTE.240 */ + VPX_CS_BT_2020 = 5, /**< BT.2020 */ + VPX_CS_RESERVED = 6, /**< Reserved */ + VPX_CS_SRGB = 7 /**< sRGB */ +}; /**< alias for enum vpx_color_space */ + +enum vpx_bit_depth_t { + VPX_BITS_8 = 8, /**< 8 bits */ + VPX_BITS_10 = 10, /**< 10 bits */ + VPX_BITS_12 = 12, /**< 12 bits */ +}; + +#define MAX_SLICE_NUM 1024 +struct PIC_BUFFER_CONFIG_s { + int index; + int BUF_index; + int mv_buf_index; + int comp_body_size; + int buf_size; + int vf_ref; + int y_canvas_index; + int uv_canvas_index; +#ifdef MULTI_INSTANCE_SUPPORT + struct canvas_config_s canvas_config[2]; +#endif + int decode_idx; + int slice_type; + int stream_offset; + u32 pts; + u64 pts64; + u64 timestamp; + uint8_t error_mark; + /**/ + int slice_idx; + /*buffer*/ + unsigned long header_adr; + unsigned long mpred_mv_wr_start_addr; + int mv_size; + /*unsigned long mc_y_adr; + *unsigned long mc_u_v_adr; + */ + unsigned int dw_y_adr; + unsigned int dw_u_v_adr; + u32 luma_size; + u32 chroma_size; + int mc_canvas_y; + int mc_canvas_u_v; + + int lcu_total; + /**/ + int y_width; + int y_height; + int y_crop_width; + int y_crop_height; + int y_stride; + + int uv_width; + int uv_height; + int uv_crop_width; + int uv_crop_height; + int uv_stride; + + int alpha_width; + int alpha_height; + int alpha_stride; + + uint8_t *y_buffer; + uint8_t *u_buffer; + uint8_t *v_buffer; + uint8_t *alpha_buffer; + + uint8_t *buffer_alloc; + int buffer_alloc_sz; + int border; + int frame_size; + int subsampling_x; + int subsampling_y; + unsigned int bit_depth; + enum vpx_color_space_t color_space; + + int corrupted; + int flags; + unsigned long cma_alloc_addr; + + int double_write_mode; + + /* picture qos infomation*/ + int max_qp; + int avg_qp; + int min_qp; + int max_skip; + int avg_skip; + int min_skip; + int max_mv; + int min_mv; + int avg_mv; + + u32 hw_decode_time; + u32 frame_size2; // For frame base mode + + /* vdec sync. */ + struct dma_fence *fence; + + /* hdr10 plus data */ + u32 hdr10p_data_size; + char *hdr10p_data_buf; + int v4l_buf_index; + int repeat_count; + struct PIC_BUFFER_CONFIG_s *repeat_pic; +} PIC_BUFFER_CONFIG; + +enum BITSTREAM_PROFILE { + PROFILE_0, + PROFILE_1, + PROFILE_2, + PROFILE_3, + MAX_PROFILES +}; + +enum FRAME_TYPE { + KEY_FRAME = 0, + INTER_FRAME = 1, + FRAME_TYPES, +}; + +enum REFERENCE_MODE { + SINGLE_REFERENCE = 0, + COMPOUND_REFERENCE = 1, + REFERENCE_MODE_SELECT = 2, + REFERENCE_MODES = 3, +}; + +#define NONE -1 +#define INTRA_FRAME 0 +#define LAST_FRAME 1 +#define GOLDEN_FRAME 2 +#define ALTREF_FRAME 3 +#define MAX_REF_FRAMES 4 + +#define REFS_PER_FRAME 3 + +#define REF_FRAMES_LOG2 3 +#define REF_FRAMES (1 << REF_FRAMES_LOG2) +#define REF_FRAMES_4K (6) + +/*4 scratch frames for the new frames to support a maximum of 4 cores decoding + *in parallel, 3 for scaled references on the encoder. + *TODO(hkuang): Add ondemand frame buffers instead of hardcoding the number + * // of framebuffers. + *TODO(jkoleszar): These 3 extra references could probably come from the + *normal reference pool. + */ +#define FRAME_BUFFERS (REF_FRAMES + 16) +#define HEADER_FRAME_BUFFERS (FRAME_BUFFERS) +#define MAX_BUF_NUM (FRAME_BUFFERS) +#define MV_BUFFER_NUM FRAME_BUFFERS +#ifdef SUPPORT_FB_DECODING +#define STAGE_MAX_BUFFERS 16 +#else +#define STAGE_MAX_BUFFERS 0 +#endif + +#define FRAME_CONTEXTS_LOG2 2 +#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2) +/*buffer + header buffer + workspace*/ +#ifdef MV_USE_FIXED_BUF +#define MAX_BMMU_BUFFER_NUM (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + 1) +#define VF_BUFFER_IDX(n) (n) +#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n) +#define WORK_SPACE_BUF_ID (FRAME_BUFFERS + HEADER_FRAME_BUFFERS) +#else +#define MAX_BMMU_BUFFER_NUM \ + (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + MV_BUFFER_NUM + 1) +#define VF_BUFFER_IDX(n) (n) +#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n) +#define MV_BUFFER_IDX(n) (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + n) +#define WORK_SPACE_BUF_ID \ + (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + MV_BUFFER_NUM) +#endif + +struct RefCntBuffer_s { + int ref_count; + /*MV_REF *mvs;*/ + int mi_rows; + int mi_cols; + struct vpx_codec_frame_buffer_s raw_frame_buffer; + struct PIC_BUFFER_CONFIG_s buf; + +/*The Following variables will only be used in frame parallel decode. + * + *frame_worker_owner indicates which FrameWorker owns this buffer. NULL means + *that no FrameWorker owns, or is decoding, this buffer. + *VP9Worker *frame_worker_owner; + * + *row and col indicate which position frame has been decoded to in real + *pixel unit. They are reset to -1 when decoding begins and set to INT_MAX + *when the frame is fully decoded. + */ + int row; + int col; + + int show_frame; +} RefCntBuffer; + +struct RefBuffer_s { +/*TODO(dkovalev): idx is not really required and should be removed, now it + *is used in vp9_onyxd_if.c + */ + int idx; + struct PIC_BUFFER_CONFIG_s *buf; + /*struct scale_factors sf;*/ +} RefBuffer; + +struct InternalFrameBuffer_s { + uint8_t *data; + size_t size; + int in_use; +} InternalFrameBuffer; + +struct InternalFrameBufferList_s { + int num_internal_frame_buffers; + struct InternalFrameBuffer_s *int_fb; +} InternalFrameBufferList; + +struct BufferPool_s { +/*Protect BufferPool from being accessed by several FrameWorkers at + *the same time during frame parallel decode. + *TODO(hkuang): Try to use atomic variable instead of locking the whole pool. + * + *Private data associated with the frame buffer callbacks. + *void *cb_priv; + * + *vpx_get_frame_buffer_cb_fn_t get_fb_cb; + *vpx_release_frame_buffer_cb_fn_t release_fb_cb; + */ + + struct RefCntBuffer_s frame_bufs[FRAME_BUFFERS]; + +/*Frame buffers allocated internally by the codec.*/ + struct InternalFrameBufferList_s int_frame_buffers; + unsigned long flags; + spinlock_t lock; + +} BufferPool; + +#define lock_buffer_pool(pool, flags) \ + spin_lock_irqsave(&pool->lock, flags) + +#define unlock_buffer_pool(pool, flags) \ + spin_unlock_irqrestore(&pool->lock, flags) + +struct VP9_Common_s { + enum vpx_color_space_t color_space; + int width; + int height; + int display_width; + int display_height; + int last_width; + int last_height; + + int subsampling_x; + int subsampling_y; + + int use_highbitdepth;/*Marks if we need to use 16bit frame buffers.*/ + + struct PIC_BUFFER_CONFIG_s *frame_to_show; + struct RefCntBuffer_s *prev_frame; + + /*TODO(hkuang): Combine this with cur_buf in macroblockd.*/ + struct RefCntBuffer_s *cur_frame; + + int ref_frame_map[REF_FRAMES]; /* maps fb_idx to reference slot */ + + /*Prepare ref_frame_map for the next frame. + *Only used in frame parallel decode. + */ + int next_ref_frame_map[REF_FRAMES]; + + /* TODO(jkoleszar): could expand active_ref_idx to 4, + *with 0 as intra, and roll new_fb_idx into it. + */ + + /*Each frame can reference REFS_PER_FRAME buffers*/ + struct RefBuffer_s frame_refs[REFS_PER_FRAME]; + + int prev_fb_idx; + int new_fb_idx; + int cur_fb_idx_mmu; + /*last frame's frame type for motion search*/ + enum FRAME_TYPE last_frame_type; + enum FRAME_TYPE frame_type; + + int show_frame; + int last_show_frame; + int show_existing_frame; + + /*Flag signaling that the frame is encoded using only INTRA modes.*/ + uint8_t intra_only; + uint8_t last_intra_only; + + int allow_high_precision_mv; + + /*Flag signaling that the frame context should be reset to default + *values. 0 or 1 implies don't reset, 2 reset just the context + *specified in the frame header, 3 reset all contexts. + */ + int reset_frame_context; + + /*MBs, mb_rows/cols is in 16-pixel units; mi_rows/cols is in + * MODE_INFO (8-pixel) units. + */ + int MBs; + int mb_rows, mi_rows; + int mb_cols, mi_cols; + int mi_stride; + + /*Whether to use previous frame's motion vectors for prediction.*/ + int use_prev_frame_mvs; + + int refresh_frame_context; /* Two state 0 = NO, 1 = YES */ + + int ref_frame_sign_bias[MAX_REF_FRAMES]; /* Two state 0, 1 */ + + /*struct loopfilter lf;*/ + /*struct segmentation seg;*/ + + /*TODO(hkuang):Remove this as it is the same as frame_parallel_decode*/ + /* in pbi.*/ + int frame_parallel_decode; /* frame-based threading.*/ + + /*Context probabilities for reference frame prediction*/ + /*MV_REFERENCE_FRAME comp_fixed_ref;*/ + /*MV_REFERENCE_FRAME comp_var_ref[2];*/ + enum REFERENCE_MODE reference_mode; + + /*FRAME_CONTEXT *fc; */ /* this frame entropy */ + /*FRAME_CONTEXT *frame_contexts; */ /*FRAME_CONTEXTS*/ + /*unsigned int frame_context_idx; *//* Context to use/update */ + /*FRAME_COUNTS counts;*/ + + unsigned int current_video_frame; + enum BITSTREAM_PROFILE profile; + + enum vpx_bit_depth_t bit_depth; + + int error_resilient_mode; + int frame_parallel_decoding_mode; + + int byte_alignment; + int skip_loop_filter; + + /*External BufferPool passed from outside.*/ + struct BufferPool_s *buffer_pool; + + int above_context_alloc_cols; +}; + +static void set_canvas(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config); +static int prepare_display_buf(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config); + +static void fill_frame_info(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *frame, + unsigned int framesize, + unsigned int pts); + +static struct PIC_BUFFER_CONFIG_s *get_frame_new_buffer(struct VP9_Common_s *cm) +{ + return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf; +} + +static void ref_cnt_fb(struct RefCntBuffer_s *bufs, int *idx, int new_idx) +{ + const int ref_index = *idx; + + if (ref_index >= 0 && bufs[ref_index].ref_count > 0) { + bufs[ref_index].ref_count--; + /*pr_info("[MMU DEBUG 2] dec ref_count[%d] : %d\r\n", + * ref_index, bufs[ref_index].ref_count); + */ + } + + *idx = new_idx; + + bufs[new_idx].ref_count++; + /*pr_info("[MMU DEBUG 3] inc ref_count[%d] : %d\r\n", + * new_idx, bufs[new_idx].ref_count); + */ +} + +int vp9_release_frame_buffer(struct vpx_codec_frame_buffer_s *fb) +{ + struct InternalFrameBuffer_s *const int_fb = + (struct InternalFrameBuffer_s *)fb->priv; + if (int_fb) + int_fb->in_use = 0; + return 0; +} + +static int compute_losless_comp_body_size(int width, int height, + uint8_t is_bit_depth_10); + +static void setup_display_size(struct VP9_Common_s *cm, union param_u *params, + int print_header_info) +{ + cm->display_width = cm->width; + cm->display_height = cm->height; + if (params->p.display_size_present) { + if (print_header_info) + pr_info(" * 1-bit display_size_present read : 1\n"); + cm->display_width = params->p.display_width; + cm->display_height = params->p.display_height; + /*vp9_read_frame_size(rb, &cm->display_width, + * &cm->display_height); + */ + } else { + if (print_header_info) + pr_info(" * 1-bit display_size_present read : 0\n"); + } +} + + +uint8_t print_header_info = 0; + +struct buff_s { + u32 buf_start; + u32 buf_size; + u32 buf_end; +} buff_t; + +struct BuffInfo_s { + u32 max_width; + u32 max_height; + u32 start_adr; + u32 end_adr; + struct buff_s ipp; + struct buff_s sao_abv; + struct buff_s sao_vb; + struct buff_s short_term_rps; + struct buff_s vps; + struct buff_s sps; + struct buff_s pps; + struct buff_s sao_up; + struct buff_s swap_buf; + struct buff_s swap_buf2; + struct buff_s scalelut; + struct buff_s dblk_para; + struct buff_s dblk_data; + struct buff_s seg_map; + struct buff_s mmu_vbh; + struct buff_s cm_header; + struct buff_s mpred_above; +#ifdef MV_USE_FIXED_BUF + struct buff_s mpred_mv; +#endif + struct buff_s rpm; + struct buff_s lmem; +} BuffInfo_t; +#ifdef MULTI_INSTANCE_SUPPORT +#define DEC_RESULT_NONE 0 +#define DEC_RESULT_DONE 1 +#define DEC_RESULT_AGAIN 2 +#define DEC_RESULT_CONFIG_PARAM 3 +#define DEC_RESULT_ERROR 4 +#define DEC_INIT_PICLIST 5 +#define DEC_UNINIT_PICLIST 6 +#define DEC_RESULT_GET_DATA 7 +#define DEC_RESULT_GET_DATA_RETRY 8 +#define DEC_RESULT_EOS 9 +#define DEC_RESULT_FORCE_EXIT 10 +#define DEC_RESULT_NEED_MORE_BUFFER 11 +#define DEC_V4L2_CONTINUE_DECODING 18 + +#define DEC_S1_RESULT_NONE 0 +#define DEC_S1_RESULT_DONE 1 +#define DEC_S1_RESULT_FORCE_EXIT 2 +#define DEC_S1_RESULT_TEST_TRIGGER_DONE 0xf0 + +#ifdef FB_DECODING_TEST_SCHEDULE +#define TEST_SET_NONE 0 +#define TEST_SET_PIC_DONE 1 +#define TEST_SET_S2_DONE 2 +#endif + +static void vp9_work(struct work_struct *work); +#endif +struct loop_filter_info_n; +struct loopfilter; +struct segmentation; + +#ifdef SUPPORT_FB_DECODING +static void mpred_process(struct VP9Decoder_s *pbi); +static void vp9_s1_work(struct work_struct *work); + +struct stage_buf_s { + int index; + unsigned short rpm[RPM_END - RPM_BEGIN]; +}; + +static unsigned int not_run2_ready[MAX_DECODE_INSTANCE_NUM]; + +static unsigned int run2_count[MAX_DECODE_INSTANCE_NUM]; + +#ifdef FB_DECODING_TEST_SCHEDULE +u32 stage_buf_num; /* = 16;*/ +#else +u32 stage_buf_num; +#endif +#endif + +struct vp9_fence_vf_t { + u32 used_size; + struct vframe_s *fence_vf[VF_POOL_SIZE]; +}; + +struct VP9Decoder_s { +#ifdef MULTI_INSTANCE_SUPPORT + unsigned char index; + + struct device *cma_dev; + struct platform_device *platform_dev; + void (*vdec_cb)(struct vdec_s *, void *); + void *vdec_cb_arg; + struct vframe_chunk_s *chunk; + int dec_result; + struct work_struct work; + struct work_struct recycle_mmu_work; + struct work_struct set_clk_work; + u32 start_shift_bytes; + + struct BuffInfo_s work_space_buf_store; + unsigned long buf_start; + u32 buf_size; + u32 cma_alloc_count; + unsigned long cma_alloc_addr; + uint8_t eos; + unsigned long int start_process_time; + unsigned last_lcu_idx; + int decode_timeout_count; + unsigned timeout_num; + int save_buffer_mode; + + int double_write_mode; +#endif + long used_4k_num; + + unsigned char m_ins_flag; + char *provider_name; + union param_u param; + int frame_count; + int pic_count; + u32 stat; + struct timer_list timer; + u32 frame_dur; + u32 frame_ar; + int fatal_error; + uint8_t init_flag; + uint8_t first_sc_checked; + uint8_t process_busy; +#define PROC_STATE_INIT 0 +#define PROC_STATE_DECODESLICE 1 +#define PROC_STATE_SENDAGAIN 2 + uint8_t process_state; + u32 ucode_pause_pos; + + int show_frame_num; + struct buff_s mc_buf_spec; + struct dec_sysinfo vvp9_amstream_dec_info; + void *rpm_addr; + void *lmem_addr; + dma_addr_t rpm_phy_addr; + dma_addr_t lmem_phy_addr; + unsigned short *lmem_ptr; + unsigned short *debug_ptr; + + void *prob_buffer_addr; + void *count_buffer_addr; + dma_addr_t prob_buffer_phy_addr; + dma_addr_t count_buffer_phy_addr; + + void *frame_mmu_map_addr; + dma_addr_t frame_mmu_map_phy_addr; + + unsigned int use_cma_flag; + + struct BUF_s m_BUF[MAX_BUF_NUM]; + struct MVBUF_s m_mv_BUF[MV_BUFFER_NUM]; + u32 used_buf_num; + DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE); + DECLARE_KFIFO(pending_q, struct vframe_s *, VF_POOL_SIZE); + struct vframe_s vfpool[VF_POOL_SIZE]; + atomic_t vf_pre_count; + atomic_t vf_get_count; + atomic_t vf_put_count; + int buf_num; + int pic_num; + int lcu_size_log2; + unsigned int losless_comp_body_size; + + u32 video_signal_type; + + int pts_mode; + int last_lookup_pts; + int last_pts; + u64 last_lookup_pts_us64; + u64 last_pts_us64; + u64 shift_byte_count; + + u32 pts_unstable; + u32 frame_cnt_window; + u32 pts1, pts2; + u32 last_duration; + u32 duration_from_pts_done; + bool vp9_first_pts_ready; + + u32 shift_byte_count_lo; + u32 shift_byte_count_hi; + int pts_mode_switching_count; + int pts_mode_recovery_count; + + bool get_frame_dur; + u32 saved_resolution; + + /**/ + struct VP9_Common_s common; + struct RefCntBuffer_s *cur_buf; + int refresh_frame_flags; + uint8_t need_resync; + uint8_t hold_ref_buf; + uint8_t ready_for_new_data; + struct BufferPool_s vp9_buffer_pool; + + struct BuffInfo_s *work_space_buf; + + struct buff_s *mc_buf; + + unsigned int frame_width; + unsigned int frame_height; + + unsigned short *rpm_ptr; + int init_pic_w; + int init_pic_h; + int lcu_total; + int lcu_size; + + int slice_type; + + int skip_flag; + int decode_idx; + int slice_idx; + uint8_t has_keyframe; + uint8_t wait_buf; + uint8_t error_flag; + + /* bit 0, for decoding; bit 1, for displaying */ + uint8_t ignore_bufmgr_error; + int PB_skip_mode; + int PB_skip_count_after_decoding; + /*hw*/ + + /*lf*/ + int default_filt_lvl; + struct loop_filter_info_n *lfi; + struct loopfilter *lf; + struct segmentation *seg_4lf; + /**/ + struct vdec_info *gvs; + + u32 pre_stream_offset; + + unsigned int dec_status; + u32 last_put_idx; + int new_frame_displayed; + void *mmu_box; + void *bmmu_box; + int mmu_enable; + struct vframe_master_display_colour_s vf_dp; + struct firmware_s *fw; + int max_pic_w; + int max_pic_h; +#ifdef SUPPORT_FB_DECODING + int dec_s1_result; + int s1_test_cmd; + struct work_struct s1_work; + int used_stage_buf_num; + int s1_pos; + int s2_pos; + void *stage_mmu_map_addr; + dma_addr_t stage_mmu_map_phy_addr; + struct stage_buf_s *s1_buf; + struct stage_buf_s *s2_buf; + struct stage_buf_s *stage_bufs + [STAGE_MAX_BUFFERS]; + unsigned char run2_busy; + + int s1_mv_buf_index; + int s1_mv_buf_index_pre; + int s1_mv_buf_index_pre_pre; + unsigned long s1_mpred_mv_wr_start_addr; + unsigned long s1_mpred_mv_wr_start_addr_pre; + unsigned short s1_intra_only; + unsigned short s1_frame_type; + unsigned short s1_width; + unsigned short s1_height; + unsigned short s1_last_show_frame; + union param_u s1_param; + u8 back_not_run_ready; +#endif + int need_cache_size; + u64 sc_start_time; + bool postproc_done; + int low_latency_flag; + bool no_head; + bool pic_list_init_done; + bool pic_list_init_done2; + bool is_used_v4l; + void *v4l2_ctx; + bool v4l_params_parsed; + int frameinfo_enable; + struct vframe_qos_s vframe_qos; + u32 mem_map_mode; + u32 dynamic_buf_num_margin; + struct vframe_s vframe_dummy; + u32 res_ch_flag; + /*struct VP9Decoder_s vp9_decoder;*/ + union param_u vp9_param; + int sidebind_type; + int sidebind_channel_id; + bool enable_fence; + int fence_usage; + u32 frame_mode_pts_save[FRAME_BUFFERS]; + u64 frame_mode_pts64_save[FRAME_BUFFERS]; + int run_ready_min_buf_num; + int one_package_frame_cnt; + int buffer_wrap[FRAME_BUFFERS]; + int last_width; + int last_height; + u32 error_frame_width; + u32 error_frame_height; + u32 endian; + ulong fb_token; + bool wait_more_buf; + spinlock_t wait_buf_lock; + struct vp9_fence_vf_t fence_vf_s; + struct mutex fence_mutex; + dma_addr_t rdma_phy_adr; + unsigned *rdma_adr; + struct trace_decoder_name trace; + struct vdec_ge2d *ge2d; +}; + +static int vp9_print(struct VP9Decoder_s *pbi, + int flag, const char *fmt, ...) +{ +#define HEVC_PRINT_BUF 512 + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + + if (pbi == NULL || + (flag == 0) || + (debug & flag)) { + va_list args; + + va_start(args, fmt); + if (pbi) + len = sprintf(buf, "[%d]", pbi->index); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_debug("%s", buf); + va_end(args); + } + return 0; +} + +static int is_oversize(int w, int h) +{ + int max = (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1)? + MAX_SIZE_8K : MAX_SIZE_4K; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D) + max = MAX_SIZE_2K; + + if (w <= 0 || h <= 0) + return true; + + if (h != 0 && (w > max / h)) + return true; + + return false; +} + +static int vvp9_mmu_compress_header_size(int w, int h) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + IS_8K_SIZE(w, h)) + return (MMU_COMPRESS_HEADER_SIZE_8K); + if (IS_4K_SIZE(w, h)) + return (MMU_COMPRESS_HEADER_SIZE_4K); + return (MMU_COMPRESS_HEADER_SIZE_1080P); +} + +/*#define FRAME_MMU_MAP_SIZE (MAX_FRAME_4K_NUM * 4)*/ +static int vvp9_frame_mmu_map_size(struct VP9Decoder_s *pbi) +{ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + IS_8K_SIZE(pbi->max_pic_w, pbi->max_pic_h)) + return (MAX_FRAME_8K_NUM << 2); + + return (MAX_FRAME_4K_NUM << 2); +} + +static int v4l_alloc_and_config_pic(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic); + +static void resize_context_buffers(struct VP9Decoder_s *pbi, + struct VP9_Common_s *cm, int width, int height) +{ + if (cm->width != width || cm->height != height) { + /* to do ..*/ + if (pbi != NULL) { + pbi->vp9_first_pts_ready = 0; + pbi->duration_from_pts_done = 0; + } + pr_info("%s (%d,%d)=>(%d,%d)\r\n", __func__, + cm->width, cm->height, + width, height); + cm->width = width; + cm->height = height; + } + /* + *if (cm->cur_frame->mvs == NULL || + * cm->mi_rows > cm->cur_frame->mi_rows || + * cm->mi_cols > cm->cur_frame->mi_cols) { + * resize_mv_buffer(cm); + *} + */ +} + +static int valid_ref_frame_size(int ref_width, int ref_height, + int this_width, int this_height) { + return 2 * this_width >= ref_width && + 2 * this_height >= ref_height && + this_width <= 16 * ref_width && + this_height <= 16 * ref_height; +} + +/* + *static int valid_ref_frame_img_fmt(enum vpx_bit_depth_t ref_bit_depth, + * int ref_xss, int ref_yss, + * enum vpx_bit_depth_t this_bit_depth, + * int this_xss, int this_yss) { + * return ref_bit_depth == this_bit_depth && ref_xss == this_xss && + * ref_yss == this_yss; + *} + */ + + +static int setup_frame_size( + struct VP9Decoder_s *pbi, + struct VP9_Common_s *cm, union param_u *params, + unsigned int *mmu_index_adr, + int print_header_info) { + int width, height; + struct BufferPool_s * const pool = cm->buffer_pool; + struct PIC_BUFFER_CONFIG_s *ybf; + int ret = 0; + + width = params->p.width; + height = params->p.height; + if (is_oversize(width, height)) { + pbi->error_frame_width = width; + pbi->error_frame_height = height; + vp9_print(pbi, 0, "%s, Error: Invalid frame size\n", __func__); + return -1; + } + pbi->error_frame_width = 0; + pbi->error_frame_height = 0; + + /*vp9_read_frame_size(rb, &width, &height);*/ + if (print_header_info) + pr_info(" * 16-bits w read : %d (width : %d)\n", width, height); + if (print_header_info) + pr_info + (" * 16-bits h read : %d (height : %d)\n", width, height); + + WRITE_VREG(HEVC_PARSER_PICTURE_SIZE, (height << 16) | width); +#ifdef VP9_10B_HED_FB + WRITE_VREG(HEVC_ASSIST_PIC_SIZE_FB_READ, (height << 16) | width); +#endif + if (pbi->mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) { + ret = vp9_alloc_mmu(pbi, + cm->new_fb_idx, + params->p.width, + params->p.height, + params->p.bit_depth, + mmu_index_adr); + if (ret != 0) { + pr_err("can't alloc need mmu1,idx %d ret =%d\n", + cm->new_fb_idx, + ret); + return ret; + } + cm->cur_fb_idx_mmu = cm->new_fb_idx; + } + + resize_context_buffers(pbi, cm, width, height); + setup_display_size(cm, params, print_header_info); +#if 0 + lock_buffer_pool(pool); + if (vp9_realloc_frame_buffer( + get_frame_new_buffer(cm), cm->width, cm->height, + cm->subsampling_x, cm->subsampling_y, +#if CONFIG_VP9_HIGHBITDEPTH + cm->use_highbitdepth, +#endif + VP9_DEC_BORDER_IN_PIXELS, + cm->byte_alignment, + &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, + pool->get_fb_cb, pool->cb_priv)) { + unlock_buffer_pool(pool); + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate frame buffer"); + } + unlock_buffer_pool(pool); +#else + /* porting */ + ybf = get_frame_new_buffer(cm); + if (!ybf) + return -1; + + ybf->y_crop_width = width; + ybf->y_crop_height = height; + ybf->bit_depth = params->p.bit_depth; +#endif + pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x; + pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y; + pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = + (unsigned int)cm->bit_depth; + pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space; + return ret; +} + +static int setup_frame_size_with_refs( + struct VP9Decoder_s *pbi, + struct VP9_Common_s *cm, + union param_u *params, + unsigned int *mmu_index_adr, + int print_header_info) { + + int width, height; + int found = 0, i; + int has_valid_ref_frame = 0; + struct PIC_BUFFER_CONFIG_s *ybf; + struct BufferPool_s * const pool = cm->buffer_pool; + int ret = 0; + + for (i = 0; i < REFS_PER_FRAME; ++i) { + if ((params->p.same_frame_size >> + (REFS_PER_FRAME - i - 1)) & 0x1) { + struct PIC_BUFFER_CONFIG_s *const buf = + cm->frame_refs[i].buf; + /*if (print_header_info) + * pr_info + * ("1-bit same_frame_size[%d] read : 1\n", i); + */ + width = buf->y_crop_width; + height = buf->y_crop_height; + /*if (print_header_info) + * pr_info + * (" - same_frame_size width : %d\n", width); + */ + /*if (print_header_info) + * pr_info + * (" - same_frame_size height : %d\n", height); + */ + found = 1; + break; + } else { + /*if (print_header_info) + * pr_info + * ("1-bit same_frame_size[%d] read : 0\n", i); + */ + } + } + + if (!found) { + /*vp9_read_frame_size(rb, &width, &height);*/ + width = params->p.width; + height = params->p.height; + /*if (print_header_info) + * pr_info + * (" * 16-bits w read : %d (width : %d)\n", + * width, height); + *if (print_header_info) + * pr_info + * (" * 16-bits h read : %d (height : %d)\n", + * width, height); + */ + } + + if (is_oversize(width, height)) { + pbi->error_frame_width = width; + pbi->error_frame_height = height; + vp9_print(pbi, 0, "%s, Error: Invalid frame size\n", __func__); + return -1; + } + pbi->error_frame_width = 0; + pbi->error_frame_height = 0; + + params->p.width = width; + params->p.height = height; + + WRITE_VREG(HEVC_PARSER_PICTURE_SIZE, (height << 16) | width); + if (pbi->mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) { + /*if(cm->prev_fb_idx >= 0) release_unused_4k(cm->prev_fb_idx); + *cm->prev_fb_idx = cm->new_fb_idx; + */ + /* pr_info + * ("[DEBUG DEBUG]Before alloc_mmu, + * prev_fb_idx : %d, new_fb_idx : %d\r\n", + * cm->prev_fb_idx, cm->new_fb_idx); + */ + ret = vp9_alloc_mmu(pbi, cm->new_fb_idx, + params->p.width, params->p.height, + params->p.bit_depth, mmu_index_adr); + if (ret != 0) { + pr_err("can't alloc need mmu,idx %d\r\n", + cm->new_fb_idx); + return ret; + } + cm->cur_fb_idx_mmu = cm->new_fb_idx; + } + + /*Check to make sure at least one of frames that this frame references + *has valid dimensions. + */ + for (i = 0; i < REFS_PER_FRAME; ++i) { + struct RefBuffer_s * const ref_frame = &cm->frame_refs[i]; + + has_valid_ref_frame |= + valid_ref_frame_size(ref_frame->buf->y_crop_width, + ref_frame->buf->y_crop_height, + width, height); + } + if (!has_valid_ref_frame) { + pr_err("Error: Referenced frame has invalid size\r\n"); + return -1; + } +#if 0 + for (i = 0; i < REFS_PER_FRAME; ++i) { + struct RefBuffer_s * const ref_frame = + &cm->frame_refs[i]; + if (!valid_ref_frame_img_fmt( + ref_frame->buf->bit_depth, + ref_frame->buf->subsampling_x, + ref_frame->buf->subsampling_y, + cm->bit_depth, + cm->subsampling_x, + cm->subsampling_y)) + pr_err + ("Referenced frame incompatible color fmt\r\n"); + return -1; + } +#endif + resize_context_buffers(pbi, cm, width, height); + setup_display_size(cm, params, print_header_info); + +#if 0 + lock_buffer_pool(pool); + if (vp9_realloc_frame_buffer( + get_frame_new_buffer(cm), cm->width, cm->height, + cm->subsampling_x, cm->subsampling_y, +#if CONFIG_VP9_HIGHBITDEPTH + cm->use_highbitdepth, +#endif + VP9_DEC_BORDER_IN_PIXELS, + cm->byte_alignment, + &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, + pool->get_fb_cb, + pool->cb_priv)) { + unlock_buffer_pool(pool); + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate frame buffer"); + } + unlock_buffer_pool(pool); +#else + /* porting */ + ybf = get_frame_new_buffer(cm); + if (!ybf) + return -1; + + ybf->y_crop_width = width; + ybf->y_crop_height = height; + ybf->bit_depth = params->p.bit_depth; +#endif + pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x; + pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y; + pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = + (unsigned int)cm->bit_depth; + pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space; + return ret; +} + +static inline bool close_to(int a, int b, int m) +{ + return (abs(a - b) < m) ? true : false; +} + +#ifdef MULTI_INSTANCE_SUPPORT +static int vp9_print_cont(struct VP9Decoder_s *pbi, + int flag, const char *fmt, ...) +{ + unsigned char buf[HEVC_PRINT_BUF]; + int len = 0; + + if (pbi == NULL || + (flag == 0) || + (debug & flag)) { + va_list args; + + va_start(args, fmt); + vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args); + pr_debug("%s", buf); + va_end(args); + } + return 0; +} + +static void trigger_schedule(struct VP9Decoder_s *pbi) +{ + if (pbi->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + + if (ctx->param_sets_from_ucode && + !pbi->v4l_params_parsed) + vdec_v4l_write_frame_sync(ctx); + } + + ATRACE_COUNTER("V_ST_DEC-chunk_size", 0); + + if (pbi->vdec_cb) + pbi->vdec_cb(hw_to_vdec(pbi), pbi->vdec_cb_arg); +} + +static void reset_process_time(struct VP9Decoder_s *pbi) +{ + if (pbi->start_process_time) { + unsigned process_time = + 1000 * (jiffies - pbi->start_process_time) / HZ; + pbi->start_process_time = 0; + if (process_time > max_process_time[pbi->index]) + max_process_time[pbi->index] = process_time; + } +} + +static void start_process_time(struct VP9Decoder_s *pbi) +{ + pbi->start_process_time = jiffies; + pbi->decode_timeout_count = 0; + pbi->last_lcu_idx = 0; +} + +static void timeout_process(struct VP9Decoder_s *pbi) +{ + pbi->timeout_num++; + if (pbi->process_busy) { + vp9_print(pbi, + 0, "%s decoder timeout but process_busy\n", __func__); + return; + } + amhevc_stop(); + vp9_print(pbi, + 0, "%s decoder timeout\n", __func__); + + pbi->dec_result = DEC_RESULT_DONE; + reset_process_time(pbi); + vdec_schedule_work(&pbi->work); +} + +static u32 get_valid_double_write_mode(struct VP9Decoder_s *pbi) +{ + return ((double_write_mode & 0x80000000) == 0) ? + pbi->double_write_mode : + (double_write_mode & 0x7fffffff); +} + +static int get_double_write_mode(struct VP9Decoder_s *pbi) +{ + u32 valid_dw_mode = get_valid_double_write_mode(pbi); + u32 dw; + int w, h; + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config; + + if (pbi->is_used_v4l) { + unsigned int out; + + vdec_v4l_get_dw_mode(pbi->v4l2_ctx, &out); + dw = out; + return dw; + } + + /* mask for supporting double write value bigger than 0x100 */ + if (valid_dw_mode & 0xffffff00) { + if (!cm->cur_frame) + return 1;/*no valid frame,*/ + cur_pic_config = &cm->cur_frame->buf; + w = cur_pic_config->y_crop_width; + h = cur_pic_config->y_crop_height; + + dw = 0x1; /*1:1*/ + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + break; + } + return dw; + } + + return valid_dw_mode; +} + +/* for double write buf alloc */ +static int get_double_write_mode_init(struct VP9Decoder_s *pbi) +{ + u32 valid_dw_mode = get_valid_double_write_mode(pbi); + u32 dw; + int w = pbi->init_pic_w; + int h = pbi->init_pic_h; + + dw = 0x1; /*1:1*/ + switch (valid_dw_mode) { + case 0x100: + if (w > 1920 && h > 1088) + dw = 0x4; /*1:2*/ + break; + case 0x200: + if (w > 1920 && h > 1088) + dw = 0x2; /*1:4*/ + break; + case 0x300: + if (w > 1280 && h > 720) + dw = 0x4; /*1:2*/ + break; + default: + dw = valid_dw_mode; + break; + } + return dw; +} +#endif + +//#define MAX_4K_NUM 0x1200 + +/* return page number */ +static int vp9_mmu_page_num(struct VP9Decoder_s *pbi, + int w, int h, int save_mode) +{ + int picture_size; + int cur_mmu_4k_number, max_frame_num; + + picture_size = compute_losless_comp_body_size(w, h, save_mode); + cur_mmu_4k_number = ((picture_size + (PAGE_SIZE - 1)) >> PAGE_SHIFT); + + max_frame_num = (vvp9_frame_mmu_map_size(pbi) >> 2); + + if (cur_mmu_4k_number > max_frame_num) { + pr_err("over max !! cur_mmu_4k_number 0x%x width %d height %d\n", + cur_mmu_4k_number, w, h); + return -1; + } + + return cur_mmu_4k_number; +} + +static struct internal_comp_buf* v4lfb_to_icomp_buf( + struct VP9Decoder_s *pbi, + struct vdec_v4l2_buffer *fb) +{ + struct aml_video_dec_buf *aml_fb = NULL; + struct aml_vcodec_ctx * v4l2_ctx = pbi->v4l2_ctx; + + aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer); + return &v4l2_ctx->comp_bufs[aml_fb->internal_index]; +} + +static struct internal_comp_buf* index_to_icomp_buf( + struct VP9Decoder_s *pbi, int index) +{ + struct aml_video_dec_buf *aml_fb = NULL; + struct aml_vcodec_ctx * v4l2_ctx = pbi->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + + fb = (struct vdec_v4l2_buffer *) + pbi->m_BUF[index].v4l_ref_buf_addr; + aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer); + return &v4l2_ctx->comp_bufs[aml_fb->internal_index]; +} + +int vp9_alloc_mmu( + struct VP9Decoder_s *pbi, + int cur_buf_idx, + int pic_width, + int pic_height, + unsigned short bit_depth, + unsigned int *mmu_index_adr) +{ + int ret; + int bit_depth_10 = (bit_depth == VPX_BITS_10); + int cur_mmu_4k_number; + + if (get_double_write_mode(pbi) == 0x10) + return 0; + + if (bit_depth >= VPX_BITS_12) { + pbi->fatal_error = DECODER_FATAL_ERROR_SIZE_OVERFLOW; + pr_err("fatal_error, un support bit depth 12!\n\n"); + return -1; + } + + cur_mmu_4k_number = vp9_mmu_page_num(pbi, + pic_width, + pic_height, + bit_depth_10); + if (cur_mmu_4k_number < 0) + return -1; + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + if (pbi->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(pbi, cur_buf_idx); + + ret = decoder_mmu_box_alloc_idx( + ibuf->mmu_box, + ibuf->index, + ibuf->frame_buffer_size, + mmu_index_adr); + } else { + ret = decoder_mmu_box_alloc_idx( + pbi->mmu_box, + cur_buf_idx, + cur_mmu_4k_number, + mmu_index_adr); + } + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + return ret; +} + +#ifndef MV_USE_FIXED_BUF +static void dealloc_mv_bufs(struct VP9Decoder_s *pbi) +{ + int i; + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (pbi->m_mv_BUF[i].start_adr) { + if (debug) + pr_info( + "dealloc mv buf(%d) adr %ld size 0x%x used_flag %d\n", + i, pbi->m_mv_BUF[i].start_adr, + pbi->m_mv_BUF[i].size, + pbi->m_mv_BUF[i].used_flag); + decoder_bmmu_box_free_idx( + pbi->bmmu_box, + MV_BUFFER_IDX(i)); + pbi->m_mv_BUF[i].start_adr = 0; + pbi->m_mv_BUF[i].size = 0; + pbi->m_mv_BUF[i].used_flag = 0; + } + } +} + +static int alloc_mv_buf(struct VP9Decoder_s *pbi, + int i, int size) +{ + int ret = 0; + + if (pbi->m_mv_BUF[i].start_adr && + size > pbi->m_mv_BUF[i].size) { + dealloc_mv_bufs(pbi); + } else if (pbi->m_mv_BUF[i].start_adr) + return 0; + + if (decoder_bmmu_box_alloc_buf_phy + (pbi->bmmu_box, + MV_BUFFER_IDX(i), size, + DRIVER_NAME, + &pbi->m_mv_BUF[i].start_adr) < 0) { + pbi->m_mv_BUF[i].start_adr = 0; + ret = -1; + } else { + pbi->m_mv_BUF[i].size = size; + pbi->m_mv_BUF[i].used_flag = 0; + ret = 0; + if (debug) { + pr_info( + "MV Buffer %d: start_adr %px size %x\n", + i, + (void *)pbi->m_mv_BUF[i].start_adr, + pbi->m_mv_BUF[i].size); + } + } + return ret; +} + +static int cal_mv_buf_size(struct VP9Decoder_s *pbi, int pic_width, int pic_height) +{ + int lcu_size = 64; /*fixed 64*/ + int pic_width_64 = (pic_width + 63) & (~0x3f); + int pic_height_32 = (pic_height + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 + : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 + : pic_height_32 / lcu_size; + int lcu_total = pic_width_lcu * pic_height_lcu; + int size_a = lcu_total * 36 * 16; + int size_b = pic_width_lcu * 16 * + ((pic_height_lcu >> 3) + (pic_height_lcu & 0x7)); + int size = (size_a + size_b + 0xffff) & + (~0xffff); + + return size; +} + + +static int init_mv_buf_list(struct VP9Decoder_s *pbi) +{ + int i; + int ret = 0; + int count = MV_BUFFER_NUM; + int pic_width = pbi->init_pic_w; + int pic_height = pbi->init_pic_h; + int size = cal_mv_buf_size(pbi, pic_width, pic_height); + + if (mv_buf_dynamic_alloc) + return 0; + + if (mv_buf_margin > 0) + count = REF_FRAMES + mv_buf_margin; + + if (pbi->init_pic_w > 2048 && pbi->init_pic_h > 1088) + count = REF_FRAMES_4K + mv_buf_margin; + else + count = REF_FRAMES + mv_buf_margin; + + if (debug) { + pr_info("%s w:%d, h:%d, count: %d\n", + __func__, pbi->init_pic_w, pbi->init_pic_h, count); + } + + for (i = 0; + i < count && i < MV_BUFFER_NUM; i++) { + if (alloc_mv_buf(pbi, i, size) < 0) { + ret = -1; + break; + } + } + return ret; +} + +static int get_mv_buf(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + int i; + int ret = -1; + if (mv_buf_dynamic_alloc) { + union param_u *params = &pbi->vp9_param; + int size = cal_mv_buf_size(pbi, + params->p.width, params->p.height); + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (pbi->m_mv_BUF[i].start_adr == 0) { + ret = i; + break; + } + } + if (i == MV_BUFFER_NUM) { + pr_info( + "%s: Error, mv buf MV_BUFFER_NUM is not enough\n", + __func__); + return ret; + } + + if (alloc_mv_buf(pbi, ret, size) >= 0) { + pic_config->mv_buf_index = ret; + pic_config->mpred_mv_wr_start_addr = + (pbi->m_mv_BUF[ret].start_adr + 0xffff) & + (~0xffff); + pic_config->mv_size = size; + + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info( + "%s alloc => %d (%ld) size 0x%x\n", + __func__, ret, + pic_config->mpred_mv_wr_start_addr, + pic_config->mv_size); + } else { + pr_info( + "%s: Error, mv buf alloc fail\n", + __func__); + } + return ret; + } + + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (pbi->m_mv_BUF[i].start_adr && + pbi->m_mv_BUF[i].used_flag == 0) { + pbi->m_mv_BUF[i].used_flag = 1; + ret = i; + break; + } + } + + if (ret >= 0) { + pic_config->mv_buf_index = ret; + pic_config->mpred_mv_wr_start_addr = + (pbi->m_mv_BUF[ret].start_adr + 0xffff) & + (~0xffff); + pic_config->mv_size = pbi->m_mv_BUF[ret].size; + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info( + "%s => %d (%lx) size 0x%x\n", + __func__, ret, + pic_config->mpred_mv_wr_start_addr, + pic_config->mv_size); + } else { + pr_info( + "%s: Error, mv buf is not enough\n", + __func__); + } + return ret; +} + +static void put_mv_buf(struct VP9Decoder_s *pbi, + int *mv_buf_index) +{ + int i = *mv_buf_index; + if (i >= MV_BUFFER_NUM) { + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info( + "%s: index %d beyond range\n", + __func__, i); + return; + } + + if (mv_buf_dynamic_alloc) { + if (pbi->m_mv_BUF[i].start_adr) { + if (debug) + pr_info( + "dealloc mv buf(%d) adr %ld size 0x%x used_flag %d\n", + i, pbi->m_mv_BUF[i].start_adr, + pbi->m_mv_BUF[i].size, + pbi->m_mv_BUF[i].used_flag); + decoder_bmmu_box_free_idx( + pbi->bmmu_box, + MV_BUFFER_IDX(i)); + pbi->m_mv_BUF[i].start_adr = 0; + pbi->m_mv_BUF[i].size = 0; + pbi->m_mv_BUF[i].used_flag = 0; + } + *mv_buf_index = -1; + return; + } + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info( + "%s(%d): used_flag(%d)\n", + __func__, i, + pbi->m_mv_BUF[i].used_flag); + + *mv_buf_index = -1; + if (pbi->m_mv_BUF[i].start_adr && + pbi->m_mv_BUF[i].used_flag) + pbi->m_mv_BUF[i].used_flag = 0; +} + +static void put_un_used_mv_bufs(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((&frame_bufs[i] != cm->prev_frame) && + (frame_bufs[i].buf.index != -1) && + (frame_bufs[i].buf.mv_buf_index >= 0) + ) + put_mv_buf(pbi, &frame_bufs[i].buf.mv_buf_index); + } +} + +#ifdef SUPPORT_FB_DECODING +static bool mv_buf_available(struct VP9Decoder_s *pbi) +{ + int i; + bool ret = 0; + for (i = 0; i < MV_BUFFER_NUM; i++) { + if (pbi->m_mv_BUF[i].start_adr && + pbi->m_mv_BUF[i].used_flag == 0) { + ret = 1; + break; + } + } + return ret; +} +#endif +#endif + +#ifdef SUPPORT_FB_DECODING +static void init_stage_buf(struct VP9Decoder_s *pbi) +{ + uint i; + for (i = 0; i < STAGE_MAX_BUFFERS + && i < stage_buf_num; i++) { + pbi->stage_bufs[i] = + vmalloc(sizeof(struct stage_buf_s)); + if (pbi->stage_bufs[i] == NULL) { + vp9_print(pbi, + 0, "%s vmalloc fail\n", __func__); + break; + } + pbi->stage_bufs[i]->index = i; + } + pbi->used_stage_buf_num = i; + pbi->s1_pos = 0; + pbi->s2_pos = 0; + pbi->s1_buf = NULL; + pbi->s2_buf = NULL; + pbi->s1_mv_buf_index = FRAME_BUFFERS; + pbi->s1_mv_buf_index_pre = FRAME_BUFFERS; + pbi->s1_mv_buf_index_pre_pre = FRAME_BUFFERS; + + if (pbi->used_stage_buf_num > 0) + vp9_print(pbi, + 0, "%s 2 stage decoding buf %d\n", + __func__, + pbi->used_stage_buf_num); +} + +static void uninit_stage_buf(struct VP9Decoder_s *pbi) +{ + int i; + for (i = 0; i < pbi->used_stage_buf_num; i++) { + if (pbi->stage_bufs[i]) + vfree(pbi->stage_bufs[i]); + pbi->stage_bufs[i] = NULL; + } + pbi->used_stage_buf_num = 0; + pbi->s1_pos = 0; + pbi->s2_pos = 0; + pbi->s1_buf = NULL; + pbi->s2_buf = NULL; +} + +static int get_s1_buf( + struct VP9Decoder_s *pbi) +{ + struct stage_buf_s *buf = NULL; + int ret = -1; + int buf_page_num = MAX_STAGE_PAGE_NUM; + int next_s1_pos = pbi->s1_pos + 1; + + if (next_s1_pos >= pbi->used_stage_buf_num) + next_s1_pos = 0; + if (next_s1_pos == pbi->s2_pos) { + pbi->s1_buf = NULL; + return ret; + } + + buf = pbi->stage_bufs[pbi->s1_pos]; + ret = decoder_mmu_box_alloc_idx( + pbi->mmu_box, + buf->index, + buf_page_num, + pbi->stage_mmu_map_addr); + if (ret < 0) { + vp9_print(pbi, 0, + "%s decoder_mmu_box_alloc fail for index %d (s1_pos %d s2_pos %d)\n", + __func__, buf->index, + pbi->s1_pos, pbi->s2_pos); + buf = NULL; + } else { + vp9_print(pbi, VP9_DEBUG_2_STAGE, + "%s decoder_mmu_box_alloc %d page for index %d (s1_pos %d s2_pos %d)\n", + __func__, buf_page_num, buf->index, + pbi->s1_pos, pbi->s2_pos); + } + pbi->s1_buf = buf; + return ret; +} + +static void inc_s1_pos(struct VP9Decoder_s *pbi) +{ + struct stage_buf_s *buf = + pbi->stage_bufs[pbi->s1_pos]; + + int used_page_num = +#ifdef FB_DECODING_TEST_SCHEDULE + MAX_STAGE_PAGE_NUM/2; +#else + (READ_VREG(HEVC_ASSIST_HED_FB_W_CTL) >> 16); +#endif + decoder_mmu_box_free_idx_tail(pbi->mmu_box, + FRAME_BUFFERS + buf->index, used_page_num); + + pbi->s1_pos++; + if (pbi->s1_pos >= pbi->used_stage_buf_num) + pbi->s1_pos = 0; + + vp9_print(pbi, VP9_DEBUG_2_STAGE, + "%s (used_page_num %d) for index %d (s1_pos %d s2_pos %d)\n", + __func__, used_page_num, buf->index, + pbi->s1_pos, pbi->s2_pos); +} + +#define s2_buf_available(pbi) (pbi->s1_pos != pbi->s2_pos) + +static int get_s2_buf( + struct VP9Decoder_s *pbi) +{ + int ret = -1; + struct stage_buf_s *buf = NULL; + if (s2_buf_available(pbi)) { + buf = pbi->stage_bufs[pbi->s2_pos]; + vp9_print(pbi, VP9_DEBUG_2_STAGE, + "%s for index %d (s1_pos %d s2_pos %d)\n", + __func__, buf->index, + pbi->s1_pos, pbi->s2_pos); + pbi->s2_buf = buf; + ret = 0; + } + return ret; +} + +static void inc_s2_pos(struct VP9Decoder_s *pbi) +{ + struct stage_buf_s *buf = + pbi->stage_bufs[pbi->s2_pos]; + decoder_mmu_box_free_idx(pbi->mmu_box, + FRAME_BUFFERS + buf->index); + pbi->s2_pos++; + if (pbi->s2_pos >= pbi->used_stage_buf_num) + pbi->s2_pos = 0; + vp9_print(pbi, VP9_DEBUG_2_STAGE, + "%s for index %d (s1_pos %d s2_pos %d)\n", + __func__, buf->index, + pbi->s1_pos, pbi->s2_pos); +} + +static int get_free_stage_buf_num(struct VP9Decoder_s *pbi) +{ + int num; + if (pbi->s1_pos >= pbi->s2_pos) + num = pbi->used_stage_buf_num - + (pbi->s1_pos - pbi->s2_pos) - 1; + else + num = (pbi->s2_pos - pbi->s1_pos) - 1; + return num; +} + +#ifndef FB_DECODING_TEST_SCHEDULE +static DEFINE_SPINLOCK(fb_core_spin_lock); + +static u8 is_s2_decoding_finished(struct VP9Decoder_s *pbi) +{ + /* to do: VLSI review + completion of last LCU decoding in BACK + */ + return 1; +} + +static void start_s1_decoding(struct VP9Decoder_s *pbi) +{ + /* to do: VLSI review + after parser, how to start LCU decoding in BACK + */ +} + +static void fb_reset_core(struct vdec_s *vdec, u32 mask) +{ + /* to do: VLSI review + 1. how to disconnect DMC for FRONT and BACK + 2. reset bit 13, 24, FRONT or BACK ?? + */ + + unsigned long flags; + u32 reset_bits = 0; + if (mask & HW_MASK_FRONT) + WRITE_VREG(HEVC_STREAM_CONTROL, 0); + spin_lock_irqsave(&fb_core_spin_lock, flags); + codec_dmcbus_write(DMC_REQ_CTRL, + codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 4))); + spin_unlock_irqrestore(&fb_core_spin_lock, flags); + + while (!(codec_dmcbus_read(DMC_CHAN_STS) + & (1 << 4))) + ; + + if ((mask & HW_MASK_FRONT) && + input_frame_based(vdec)) + WRITE_VREG(HEVC_STREAM_CONTROL, 0); + + /* + * 2: assist + * 3: parser + * 4: parser_state + * 8: dblk + * 11:mcpu + * 12:ccpu + * 13:ddr + * 14:iqit + * 15:ipp + * 17:qdct + * 18:mpred + * 19:sao + * 24:hevc_afifo + */ + if (mask & HW_MASK_FRONT) { + reset_bits = + (1<<3)|(1<<4)|(1<<11)| + (1<<12)|(1<<18); + } + if (mask & HW_MASK_BACK) { + reset_bits = + (1<<8)|(1<<13)|(1<<14)|(1<<15)| + (1<<17)|(1<<19)|(1<<24); + } + WRITE_VREG(DOS_SW_RESET3, reset_bits); +#if 0 + (1<<3)|(1<<4)|(1<<8)|(1<<11)| + (1<<12)|(1<<13)|(1<<14)|(1<<15)| + (1<<17)|(1<<18)|(1<<19)|(1<<24); +#endif + WRITE_VREG(DOS_SW_RESET3, 0); + + + spin_lock_irqsave(&fb_core_spin_lock, flags); + codec_dmcbus_write(DMC_REQ_CTRL, + codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 4)); + spin_unlock_irqrestore(&fb_core_spin_lock, flags); + +} +#endif + +#endif + +static void init_pic_list_hw(struct VP9Decoder_s *pbi); + +static int get_free_fb(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + int i; + unsigned long flags; + + lock_buffer_pool(cm->buffer_pool, flags); + if (debug & VP9_DEBUG_BUFMGR_MORE) { + for (i = 0; i < pbi->used_buf_num; ++i) { + pr_info("%s:%d, ref_count %d vf_ref %d index %d\r\n", + __func__, i, frame_bufs[i].ref_count, + frame_bufs[i].buf.vf_ref, + frame_bufs[i].buf.index); + } + } + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.index != -1) + ) + break; + } + if (i != pbi->used_buf_num) { + frame_bufs[i].ref_count = 1; + /*pr_info("[MMU DEBUG 1] set ref_count[%d] : %d\r\n", + i, frame_bufs[i].ref_count);*/ + } else { + /* Reset i to be INVALID_IDX to indicate + no free buffer found*/ + i = INVALID_IDX; + } + + unlock_buffer_pool(cm->buffer_pool, flags); + return i; +} + +static void update_hide_frame_timestamp(struct VP9Decoder_s *pbi) +{ + struct RefCntBuffer_s *const frame_bufs = + pbi->common.buffer_pool->frame_bufs; + int i; + + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((!frame_bufs[i].show_frame) && + (!frame_bufs[i].buf.vf_ref) && + (frame_bufs[i].buf.BUF_index != -1)) { + frame_bufs[i].buf.timestamp = pbi->chunk->timestamp; + vp9_print(pbi, VP9_DEBUG_OUT_PTS, + "%s, update %d hide frame ts: %lld\n", + __func__, i, frame_bufs[i].buf.timestamp); + } + } +} + +static int get_free_fb_idx(struct VP9Decoder_s *pbi) +{ + int i; + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.repeat_count == 0) && + (cm->cur_frame != &frame_bufs[i])) + break; + } + + return i; +} + +static int v4l_get_free_fb(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + struct aml_vcodec_ctx * v4l = pbi->v4l2_ctx; + struct v4l_buff_pool *pool = &v4l->cap_pool; + struct PIC_BUFFER_CONFIG_s *pic = NULL; + struct PIC_BUFFER_CONFIG_s *free_pic = NULL; + ulong flags; + int idx, i; + + lock_buffer_pool(cm->buffer_pool, flags); + + for (i = 0; i < pool->in; ++i) { + u32 state = (pool->seq[i] >> 16); + u32 index = (pool->seq[i] & 0xffff); + + switch (state) { + case V4L_CAP_BUFF_IN_DEC: + pic = &frame_bufs[i].buf; + if ((frame_bufs[i].ref_count == 0) && + (pic->vf_ref == 0) && + (pic->repeat_count == 0) && + (pic->index != -1) && + pic->cma_alloc_addr && + (cm->cur_frame != &frame_bufs[i])) { + free_pic = pic; + } + break; + case V4L_CAP_BUFF_IN_M2M: + idx = get_free_fb_idx(pbi); + pic = &frame_bufs[idx].buf; + pic->y_crop_width = pbi->frame_width; + pic->y_crop_height = pbi->frame_height; + pbi->buffer_wrap[idx] = index; + if (!v4l_alloc_and_config_pic(pbi, pic)) { + set_canvas(pbi, pic); + init_pic_list_hw(pbi); + free_pic = pic; + } + break; + default: + break; + } + + if (free_pic) { + frame_bufs[i].ref_count = 1; + break; + } + } + + if (free_pic && pbi->chunk) { + free_pic->timestamp = pbi->chunk->timestamp; + update_hide_frame_timestamp(pbi); + } + + unlock_buffer_pool(cm->buffer_pool, flags); + + if (free_pic) { + struct vdec_v4l2_buffer *fb = + (struct vdec_v4l2_buffer *) + pbi->m_BUF[i].v4l_ref_buf_addr; + + fb->status = FB_ST_DECODER; + } + + if (debug & VP9_DEBUG_OUT_PTS) { + if (free_pic) { + pr_debug("%s, idx: %d, ts: %lld\n", + __func__, free_pic->index, free_pic->timestamp); + } else { + pr_debug("%s, vp9 get free pic null\n", __func__); + dump_pic_list(pbi); + } + } + + return free_pic ? free_pic->index : INVALID_IDX; +} + +static int get_free_buf_count(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + int i, free_buf_count = 0; + + if (pbi->is_used_v4l) { + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.repeat_count == 0) && + frame_bufs[i].buf.cma_alloc_addr && + (cm->cur_frame != &frame_bufs[i])) { + free_buf_count++; + } + } + + if (ctx->cap_pool.dec < pbi->used_buf_num) { + if (ctx->fb_ops.query(&ctx->fb_ops, &pbi->fb_token)) { + free_buf_count += + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) + 1; + } + } + + /* trigger to parse head data. */ + if (!pbi->v4l_params_parsed) { + free_buf_count = pbi->run_ready_min_buf_num; + } + } else { + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.index != -1)) + free_buf_count++; + } + } + + return free_buf_count; +} + +static void decrease_ref_count(int idx, struct RefCntBuffer_s *const frame_bufs, + struct BufferPool_s *const pool) +{ + if (idx >= 0) { + --frame_bufs[idx].ref_count; + /*pr_info("[MMU DEBUG 7] dec ref_count[%d] : %d\r\n", idx, + * frame_bufs[idx].ref_count); + */ + /*A worker may only get a free framebuffer index when + *calling get_free_fb. But the private buffer is not set up + *until finish decoding header. So any error happens during + *decoding header, the frame_bufs will not have valid priv + *buffer. + */ + + if (frame_bufs[idx].ref_count == 0 && + frame_bufs[idx].raw_frame_buffer.priv) + vp9_release_frame_buffer + (&frame_bufs[idx].raw_frame_buffer); + } +} + +static void generate_next_ref_frames(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *frame_bufs = cm->buffer_pool->frame_bufs; + struct BufferPool_s *const pool = cm->buffer_pool; + int mask, ref_index = 0; + unsigned long flags; + + /* Generate next_ref_frame_map.*/ + lock_buffer_pool(pool, flags); + for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { + if (mask & 1) { + cm->next_ref_frame_map[ref_index] = cm->new_fb_idx; + ++frame_bufs[cm->new_fb_idx].ref_count; + /*pr_info("[MMU DEBUG 4] inc ref_count[%d] : %d\r\n", + *cm->new_fb_idx, frame_bufs[cm->new_fb_idx].ref_count); + */ + } else + cm->next_ref_frame_map[ref_index] = + cm->ref_frame_map[ref_index]; + /* Current thread holds the reference frame.*/ + if (cm->ref_frame_map[ref_index] >= 0) { + ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; + /*pr_info + *("[MMU DEBUG 5] inc ref_count[%d] : %d\r\n", + *cm->ref_frame_map[ref_index], + *frame_bufs[cm->ref_frame_map[ref_index]].ref_count); + */ + } + ++ref_index; + } + + for (; ref_index < REF_FRAMES; ++ref_index) { + cm->next_ref_frame_map[ref_index] = + cm->ref_frame_map[ref_index]; + /* Current thread holds the reference frame.*/ + if (cm->ref_frame_map[ref_index] >= 0) { + ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; + /*pr_info("[MMU DEBUG 6] inc ref_count[%d] : %d\r\n", + *cm->ref_frame_map[ref_index], + *frame_bufs[cm->ref_frame_map[ref_index]].ref_count); + */ + } + } + unlock_buffer_pool(pool, flags); + return; +} + +static void refresh_ref_frames(struct VP9Decoder_s *pbi) + +{ + struct VP9_Common_s *const cm = &pbi->common; + struct BufferPool_s *pool = cm->buffer_pool; + struct RefCntBuffer_s *frame_bufs = cm->buffer_pool->frame_bufs; + int mask, ref_index = 0; + unsigned long flags; + + lock_buffer_pool(pool, flags); + for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { + const int old_idx = cm->ref_frame_map[ref_index]; + /*Current thread releases the holding of reference frame.*/ + decrease_ref_count(old_idx, frame_bufs, pool); + + /*Release the reference frame in reference map.*/ + if ((mask & 1) && old_idx >= 0) + decrease_ref_count(old_idx, frame_bufs, pool); + cm->ref_frame_map[ref_index] = + cm->next_ref_frame_map[ref_index]; + ++ref_index; + } + + /*Current thread releases the holding of reference frame.*/ + for (; ref_index < REF_FRAMES && !cm->show_existing_frame; + ++ref_index) { + const int old_idx = cm->ref_frame_map[ref_index]; + + decrease_ref_count(old_idx, frame_bufs, pool); + cm->ref_frame_map[ref_index] = + cm->next_ref_frame_map[ref_index]; + } + unlock_buffer_pool(pool, flags); + return; +} + +static int check_buff_has_show(struct RefCntBuffer_s *frame_buf) +{ + int ret = 1; + + if (disable_repeat || + ((frame_buf->buf.vf_ref == 0) && + (frame_buf->buf.index != -1) && + frame_buf->buf.cma_alloc_addr)) { + ret = 0; + if (debug & VP9_DEBUG_BUFMGR) + pr_info("existing buff can use\n"); + } else { + if (debug & VP9_DEBUG_BUFMGR) + pr_info("existing buff can't use\n"); + } + return ret; +} + +int vp9_bufmgr_process(struct VP9Decoder_s *pbi, union param_u *params) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct BufferPool_s *pool = cm->buffer_pool; + struct RefCntBuffer_s *frame_bufs = cm->buffer_pool->frame_bufs; + struct PIC_BUFFER_CONFIG_s *pic = NULL; + int i; + int ret; + + pbi->ready_for_new_data = 0; + + if ((pbi->has_keyframe == 0) && + (params->p.frame_type != KEY_FRAME) && + (!params->p.intra_only)){ + on_no_keyframe_skiped++; + pr_info("vp9_bufmgr_process no key frame return\n"); + return -2; + } + pbi->has_keyframe = 1; + on_no_keyframe_skiped = 0; +#if 0 + if (pbi->mmu_enable) { + if (!pbi->m_ins_flag) + pbi->used_4k_num = (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); + if (cm->prev_fb_idx >= 0) { + decoder_mmu_box_free_idx_tail(pbi->mmu_box, + cm->prev_fb_idx, pbi->used_4k_num); + } + } +#endif + if (cm->new_fb_idx >= 0 + && frame_bufs[cm->new_fb_idx].ref_count == 0){ + vp9_release_frame_buffer + (&frame_bufs[cm->new_fb_idx].raw_frame_buffer); + } + /*pr_info("Before get_free_fb, prev_fb_idx : %d, new_fb_idx : %d\r\n", + cm->prev_fb_idx, cm->new_fb_idx);*/ +#ifndef MV_USE_FIXED_BUF + put_un_used_mv_bufs(pbi); + if (debug & VP9_DEBUG_BUFMGR_DETAIL) + dump_pic_list(pbi); +#endif + cm->new_fb_idx = pbi->is_used_v4l ? + v4l_get_free_fb(pbi) : + get_free_fb(pbi); + if (cm->new_fb_idx == INVALID_IDX) { + pr_info("get_free_fb error\r\n"); + return -1; + } + frame_bufs[cm->new_fb_idx].buf.v4l_buf_index = cm->new_fb_idx; +#ifndef MV_USE_FIXED_BUF +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num == 0) { +#endif + if (get_mv_buf(pbi, + &pool->frame_bufs[cm->new_fb_idx]. + buf) < 0) { + pr_info("get_mv_buf fail\r\n"); + return -1; + } + if (debug & VP9_DEBUG_BUFMGR_DETAIL) + dump_pic_list(pbi); +#ifdef SUPPORT_FB_DECODING + } +#endif +#endif + cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx]; + /*if (debug & VP9_DEBUG_BUFMGR) + pr_info("[VP9 DEBUG]%s(get_free_fb): %d\r\n", __func__, + cm->new_fb_idx);*/ + + pbi->cur_buf = &frame_bufs[cm->new_fb_idx]; + if (pbi->mmu_enable) { + /* moved to after picture size ready + *alloc_mmu(cm, params->p.width, params->p.height, + *params->p.bit_depth, pbi->frame_mmu_map_addr); + */ + cm->prev_fb_idx = cm->new_fb_idx; + } + /*read_uncompressed_header()*/ + cm->last_frame_type = cm->frame_type; + cm->last_intra_only = cm->intra_only; + cm->profile = params->p.profile; + if (cm->profile >= MAX_PROFILES) { + pr_err("Error: Unsupported profile %d\r\n", cm->profile); + return -1; + } + cm->show_existing_frame = params->p.show_existing_frame; + if (cm->show_existing_frame) { + /* Show an existing frame directly.*/ + int frame_to_show_idx = params->p.frame_to_show_idx; + int frame_to_show; + unsigned long flags; + if (frame_to_show_idx >= REF_FRAMES) { + pr_info("frame_to_show_idx %d exceed max index\r\n", + frame_to_show_idx); + return -1; + } + + frame_to_show = cm->ref_frame_map[frame_to_show_idx]; + /*pr_info("frame_to_show %d\r\n", frame_to_show);*/ + lock_buffer_pool(pool, flags); + if (frame_to_show < 0 || + frame_bufs[frame_to_show].ref_count < 1) { + unlock_buffer_pool(pool, flags); + pr_err + ("Error:Buffer %d does not contain a decoded frame", + frame_to_show); + return -1; + } + + if (check_buff_has_show(&frame_bufs[frame_to_show])) { + frame_bufs[frame_to_show].buf.repeat_count ++; + frame_bufs[frame_to_show].buf.v4l_buf_index = cm->new_fb_idx; + frame_bufs[cm->new_fb_idx].buf.repeat_pic = &frame_bufs[frame_to_show].buf; + } + frame_bufs[frame_to_show].buf.timestamp = frame_bufs[cm->new_fb_idx].buf.timestamp; + ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show); + update_hide_frame_timestamp(pbi); + unlock_buffer_pool(pool, flags); + pbi->refresh_frame_flags = 0; + /*cm->lf.filter_level = 0;*/ + cm->show_frame = 1; + cm->cur_frame->show_frame = 1; + + /* + *if (pbi->frame_parallel_decode) { + * for (i = 0; i < REF_FRAMES; ++i) + * cm->next_ref_frame_map[i] = + * cm->ref_frame_map[i]; + *} + */ + /* do not decode, search next start code */ + return 1; + } + cm->frame_type = params->p.frame_type; + cm->show_frame = params->p.show_frame; + cm->bit_depth = params->p.bit_depth; + cm->error_resilient_mode = params->p.error_resilient_mode; + cm->cur_frame->show_frame = cm->show_frame; + + if (cm->frame_type == KEY_FRAME) { + pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1; + + for (i = 0; i < REFS_PER_FRAME; ++i) { + cm->frame_refs[i].idx = INVALID_IDX; + cm->frame_refs[i].buf = NULL; + } + + ret = setup_frame_size(pbi, + cm, params, pbi->frame_mmu_map_addr, + print_header_info); + if (ret) + return -1; + if (pbi->need_resync) { + memset(&cm->ref_frame_map, -1, + sizeof(cm->ref_frame_map)); + pbi->need_resync = 0; + } + } else { + cm->intra_only = cm->show_frame ? 0 : params->p.intra_only; + /*if (print_header_info) { + * if (cm->show_frame) + * pr_info + * ("intra_only set to 0 because of show_frame\n"); + * else + * pr_info + * ("1-bit intra_only read: %d\n", cm->intra_only); + *} + */ + + + cm->reset_frame_context = cm->error_resilient_mode ? + 0 : params->p.reset_frame_context; + if (print_header_info) { + if (cm->error_resilient_mode) + pr_info + ("reset to 0 error_resilient_mode\n"); + else + pr_info + (" * 2-bits reset_frame_context read : %d\n", + cm->reset_frame_context); + } + + if (cm->intra_only) { + if (cm->profile > PROFILE_0) { + /*read_bitdepth_colorspace_sampling(cm, + * rb, print_header_info); + */ + } else { + /*NOTE: The intra-only frame header + *does not include the specification + *of either the color format or + *color sub-sampling + *in profile 0. VP9 specifies that the default + *color format should be YUV 4:2:0 in this + *case (normative). + */ + cm->color_space = VPX_CS_BT_601; + cm->subsampling_y = cm->subsampling_x = 1; + cm->bit_depth = VPX_BITS_8; + cm->use_highbitdepth = 0; + } + + pbi->refresh_frame_flags = + params->p.refresh_frame_flags; + /*if (print_header_info) + * pr_info("*%d-bits refresh_frame read:0x%x\n", + * REF_FRAMES, pbi->refresh_frame_flags); + */ + ret = setup_frame_size(pbi, + cm, + params, + pbi->frame_mmu_map_addr, + print_header_info); + if (ret) + return -1; + if (pbi->need_resync) { + memset(&cm->ref_frame_map, -1, + sizeof(cm->ref_frame_map)); + pbi->need_resync = 0; + } + } else if (pbi->need_resync != 1) { /* Skip if need resync */ + pbi->refresh_frame_flags = + params->p.refresh_frame_flags; + if (print_header_info) + pr_info + ("*%d-bits refresh_frame read:0x%x\n", + REF_FRAMES, pbi->refresh_frame_flags); + for (i = 0; i < REFS_PER_FRAME; ++i) { + const int ref = + (params->p.ref_info >> + (((REFS_PER_FRAME-i-1)*4)+1)) + & 0x7; + const int idx = + cm->ref_frame_map[ref]; + struct RefBuffer_s * const ref_frame = + &cm->frame_refs[i]; + if (print_header_info) + pr_info("*%d-bits ref[%d]read:%d\n", + REF_FRAMES_LOG2, i, ref); + ref_frame->idx = idx; + ref_frame->buf = &frame_bufs[idx].buf; + cm->ref_frame_sign_bias[LAST_FRAME + i] + = (params->p.ref_info >> + ((REFS_PER_FRAME-i-1)*4)) & 0x1; + if (print_header_info) + pr_info("1bit ref_frame_sign_bias"); + /*pr_info + *("%dread: %d\n", + *LAST_FRAME+i, + *cm->ref_frame_sign_bias + *[LAST_FRAME + i]); + */ + /*pr_info + *("[VP9 DEBUG]%s(get ref):%d\r\n", + *__func__, ref_frame->idx); + */ + + } + + ret = setup_frame_size_with_refs( + pbi, + cm, + params, + pbi->frame_mmu_map_addr, + print_header_info); + if (ret) + return -1; + for (i = 0; i < REFS_PER_FRAME; ++i) { + /*struct RefBuffer_s *const ref_buf = + *&cm->frame_refs[i]; + */ + /* to do: + *vp9_setup_scale_factors_for_frame + */ + } + } + } + + pic = get_frame_new_buffer(cm); + if (!pic) + return -1; + + pic->bit_depth = cm->bit_depth; + pic->color_space = cm->color_space; + pic->slice_type = cm->frame_type; + + if (pbi->need_resync) { + pr_err + ("Error: Keyframe/intra-only frame required to reset\r\n"); + return -1; + } + generate_next_ref_frames(pbi); + pbi->hold_ref_buf = 1; + +#if 0 + if (frame_is_intra_only(cm) || cm->error_resilient_mode) + vp9_setup_past_independence(cm); + setup_loopfilter(&cm->lf, rb, print_header_info); + setup_quantization(cm, &pbi->mb, rb, print_header_info); + setup_segmentation(&cm->seg, rb, print_header_info); + setup_segmentation_dequant(cm, print_header_info); + + setup_tile_info(cm, rb, print_header_info); + sz = vp9_rb_read_literal(rb, 16); + if (print_header_info) + pr_info(" * 16-bits size read : %d (0x%x)\n", sz, sz); + + if (sz == 0) + vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, + "Invalid header size"); +#endif + /*end read_uncompressed_header()*/ + cm->use_prev_frame_mvs = !cm->error_resilient_mode && + cm->width == cm->last_width && + cm->height == cm->last_height && + !cm->last_intra_only && + cm->last_show_frame && + (cm->last_frame_type != KEY_FRAME); + + /*pr_info + *("set use_prev_frame_mvs to %d (last_width %d last_height %d", + *cm->use_prev_frame_mvs, cm->last_width, cm->last_height); + *pr_info + *(" last_intra_only %d last_show_frame %d last_frame_type %d)\n", + *cm->last_intra_only, cm->last_show_frame, cm->last_frame_type); + */ + + if (pbi->enable_fence && cm->show_frame) { + struct PIC_BUFFER_CONFIG_s *pic = &cm->cur_frame->buf; + struct vdec_s *vdec = hw_to_vdec(pbi); + + /* create fence for each buffers. */ + ret = vdec_timeline_create_fence(vdec->sync); + if (ret < 0) + return ret; + + pic->fence = vdec->sync->fence; + pic->bit_depth = cm->bit_depth; + pic->slice_type = cm->frame_type; + pic->stream_offset = pbi->pre_stream_offset; + + if (pbi->chunk) { + pic->pts = pbi->chunk->pts; + pic->pts64 = pbi->chunk->pts64; + pic->timestamp = pbi->chunk->timestamp; + } + + /* post video vframe. */ + prepare_display_buf(pbi, pic); + } + + return 0; +} + + +void swap_frame_buffers(struct VP9Decoder_s *pbi) +{ + int ref_index = 0; + struct VP9_Common_s *const cm = &pbi->common; + struct BufferPool_s *const pool = cm->buffer_pool; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + unsigned long flags; + refresh_ref_frames(pbi); + pbi->hold_ref_buf = 0; + cm->frame_to_show = get_frame_new_buffer(cm); + + if (cm->frame_to_show) { + /*if (!pbi->frame_parallel_decode || !cm->show_frame) {*/ + lock_buffer_pool(pool, flags); + --frame_bufs[cm->new_fb_idx].ref_count; + /*pr_info("[MMU DEBUG 8] dec ref_count[%d] : %d\r\n", cm->new_fb_idx, + * frame_bufs[cm->new_fb_idx].ref_count); + */ + unlock_buffer_pool(pool, flags); + /*}*/ + } + + /*Invalidate these references until the next frame starts.*/ + for (ref_index = 0; ref_index < 3; ref_index++) + cm->frame_refs[ref_index].idx = -1; +} + +#if 0 +static void check_resync(vpx_codec_alg_priv_t *const ctx, + const struct VP9Decoder_s *const pbi) +{ + /* Clear resync flag if worker got a key frame or intra only frame.*/ + if (ctx->need_resync == 1 && pbi->need_resync == 0 && + (pbi->common.intra_only || pbi->common.frame_type == KEY_FRAME)) + ctx->need_resync = 0; +} +#endif + +int vp9_get_raw_frame(struct VP9Decoder_s *pbi, struct PIC_BUFFER_CONFIG_s *sd) +{ + struct VP9_Common_s *const cm = &pbi->common; + int ret = -1; + + if (pbi->ready_for_new_data == 1) + return ret; + + pbi->ready_for_new_data = 1; + + /* no raw frame to show!!! */ + if (!cm->show_frame) + return ret; + + /* may not be get buff in v4l2 */ + if (!cm->frame_to_show) + return ret; + + pbi->ready_for_new_data = 1; + + *sd = *cm->frame_to_show; + ret = 0; + + return ret; +} + +int vp9_bufmgr_init(struct VP9Decoder_s *pbi, struct BuffInfo_s *buf_spec_i, + struct buff_s *mc_buf_i) { + struct VP9_Common_s *cm = &pbi->common; + + /*memset(pbi, 0, sizeof(struct VP9Decoder_s));*/ + pbi->frame_count = 0; + pbi->pic_count = 0; + pbi->pre_stream_offset = 0; + cm->buffer_pool = &pbi->vp9_buffer_pool; + spin_lock_init(&cm->buffer_pool->lock); + cm->prev_fb_idx = INVALID_IDX; + cm->new_fb_idx = INVALID_IDX; + pbi->used_4k_num = -1; + cm->cur_fb_idx_mmu = INVALID_IDX; + pr_debug + ("After vp9_bufmgr_init, prev_fb_idx : %d, new_fb_idx : %d\r\n", + cm->prev_fb_idx, cm->new_fb_idx); + pbi->need_resync = 1; + /* Initialize the references to not point to any frame buffers.*/ + memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); + memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map)); + cm->current_video_frame = 0; + pbi->ready_for_new_data = 1; + + /* private init */ + pbi->work_space_buf = buf_spec_i; + if (!pbi->mmu_enable) + pbi->mc_buf = mc_buf_i; + + pbi->rpm_addr = NULL; + pbi->lmem_addr = NULL; + + pbi->use_cma_flag = 0; + pbi->decode_idx = 0; + pbi->slice_idx = 0; + /*int m_uiMaxCUWidth = 1<<7;*/ + /*int m_uiMaxCUHeight = 1<<7;*/ + pbi->has_keyframe = 0; + pbi->skip_flag = 0; + pbi->wait_buf = 0; + pbi->error_flag = 0; + + pbi->pts_mode = PTS_NORMAL; + pbi->last_pts = 0; + pbi->last_lookup_pts = 0; + pbi->last_pts_us64 = 0; + pbi->last_lookup_pts_us64 = 0; + pbi->shift_byte_count = 0; + pbi->shift_byte_count_lo = 0; + pbi->shift_byte_count_hi = 0; + pbi->pts_mode_switching_count = 0; + pbi->pts_mode_recovery_count = 0; + + pbi->buf_num = 0; + pbi->pic_num = 0; + + return 0; +} + +int vp9_bufmgr_postproc(struct VP9Decoder_s *pbi) +{ + struct vdec_s *vdec = hw_to_vdec(pbi); + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s sd; + + if (pbi->postproc_done) + return 0; + pbi->postproc_done = 1; + swap_frame_buffers(pbi); + if (!cm->show_existing_frame) { + cm->last_show_frame = cm->show_frame; + cm->prev_frame = cm->cur_frame; +#if 0 + if (cm->seg.enabled && !pbi->frame_parallel_decode) + vp9_swap_current_and_last_seg_map(cm); +#endif + } + cm->last_width = cm->width; + cm->last_height = cm->height; + //pbi->last_width = cm->width; + //pbi->last_height = cm->height; + + if (cm->show_frame) + cm->current_video_frame++; + + if (vp9_get_raw_frame(pbi, &sd) == 0) { + /*pr_info("Display frame index %d\r\n", sd.index);*/ + sd.stream_offset = pbi->pre_stream_offset; + + if (pbi->enable_fence) { + int i, j, used_size, ret; + int signed_count = 0; + struct vframe_s *signed_fence[VF_POOL_SIZE]; + /* notify signal to wake up wq of fence. */ + vdec_timeline_increase(vdec->sync, 1); + mutex_lock(&pbi->fence_mutex); + used_size = pbi->fence_vf_s.used_size; + if (used_size) { + for (i = 0, j = 0; i < VF_POOL_SIZE && j < used_size; i++) { + if (pbi->fence_vf_s.fence_vf[i] != NULL) { + ret = dma_fence_get_status(pbi->fence_vf_s.fence_vf[i]->fence); + if (ret == 1) { + signed_fence[signed_count] = pbi->fence_vf_s.fence_vf[i]; + pbi->fence_vf_s.fence_vf[i] = NULL; + pbi->fence_vf_s.used_size--; + signed_count++; + } + j++; + } + } + } + mutex_unlock(&pbi->fence_mutex); + if (signed_count != 0) { + for (i = 0; i < signed_count; i++) + vvp9_vf_put(signed_fence[i], vdec); + } + } else { + prepare_display_buf(pbi, &sd); + } + + pbi->pre_stream_offset = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + } + +/* else + * pr_info + * ("Not display this frame,ready_for_new_data%d show_frame%d\r\n", + * pbi->ready_for_new_data, cm->show_frame); + */ + return 0; +} + +/************************************************** + * + *VP9 buffer management end + * + *************************************************** + */ + + +#define HEVC_CM_BODY_START_ADDR 0x3626 +#define HEVC_CM_BODY_LENGTH 0x3627 +#define HEVC_CM_HEADER_LENGTH 0x3629 +#define HEVC_CM_HEADER_OFFSET 0x362b + +#define LOSLESS_COMPRESS_MODE + +/*#define DECOMP_HEADR_SURGENT*/ +#ifdef VP9_10B_NV21 +static u32 mem_map_mode = 2 /* 0:linear 1:32x32 2:64x32*/ +#else +static u32 mem_map_mode; /* 0:linear 1:32x32 2:64x32 ; m8baby test1902 */ +#endif +static u32 enable_mem_saving = 1; +static u32 force_w_h; + +static u32 force_fps; + + +const u32 vp9_version = 201602101; +static u32 debug; +static u32 radr; +static u32 rval; +static u32 pop_shorts; +static u32 dbg_cmd; +static u32 dbg_skip_decode_index; + +/* + * bit 0~3, for HEVCD_IPP_AXIIF_CONFIG endian config + * bit 8~23, for HEVC_SAO_CTRL1 endian config + */ +static u32 endian; +#define HEVC_CONFIG_BIG_ENDIAN ((0x880 << 8) | 0x8) +#define HEVC_CONFIG_LITTLE_ENDIAN ((0xff0 << 8) | 0xf) + +#ifdef ERROR_HANDLE_DEBUG +static u32 dbg_nal_skip_flag; + /* bit[0], skip vps; bit[1], skip sps; bit[2], skip pps */ +static u32 dbg_nal_skip_count; +#endif +/*for debug*/ +static u32 decode_pic_begin; +static uint slice_parse_begin; +static u32 step; +#ifdef MIX_STREAM_SUPPORT +static u32 buf_alloc_width = 4096; +static u32 buf_alloc_height = 2304; +static u32 vp9_max_pic_w = 4096; +static u32 vp9_max_pic_h = 2304; + +static u32 dynamic_buf_num_margin; +#else +static u32 buf_alloc_width; +static u32 buf_alloc_height; +static u32 dynamic_buf_num_margin = 7; +#endif +static u32 buf_alloc_depth = 10; +static u32 buf_alloc_size; +/* + *bit[0]: 0, + * bit[1]: 0, always release cma buffer when stop + * bit[1]: 1, never release cma buffer when stop + *bit[0]: 1, when stop, release cma buffer if blackout is 1; + *do not release cma buffer is blackout is not 1 + * + *bit[2]: 0, when start decoding, check current displayed buffer + * (only for buffer decoded by vp9) if blackout is 0 + * 1, do not check current displayed buffer + * + *bit[3]: 1, if blackout is not 1, do not release current + * displayed cma buffer always. + */ +/* set to 1 for fast play; + * set to 8 for other case of "keep last frame" + */ +static u32 buffer_mode = 1; +/* buffer_mode_dbg: debug only*/ +static u32 buffer_mode_dbg = 0xffff0000; +/**/ + +/* + *bit 0, 1: only display I picture; + *bit 1, 1: only decode I picture; + */ +static u32 i_only_flag; + +static u32 low_latency_flag; + +static u32 no_head; + +static u32 max_decoding_time; +/* + *error handling + */ +/*error_handle_policy: + *bit 0: 0, auto skip error_skip_nal_count nals before error recovery; + *1, skip error_skip_nal_count nals before error recovery; + *bit 1 (valid only when bit0 == 1): + *1, wait vps/sps/pps after error recovery; + *bit 2 (valid only when bit0 == 0): + *0, auto search after error recovery (vp9_recover() called); + *1, manual search after error recovery + *(change to auto search after get IDR: WRITE_VREG(NAL_SEARCH_CTL, 0x2)) + * + *bit 4: 0, set error_mark after reset/recover + * 1, do not set error_mark after reset/recover + *bit 5: 0, check total lcu for every picture + * 1, do not check total lcu + * + */ + +static u32 error_handle_policy; +/*static u32 parser_sei_enable = 1;*/ +#define MAX_BUF_NUM_NORMAL 15 +#define MAX_BUF_NUM_LESS 10 +static u32 max_buf_num = MAX_BUF_NUM_NORMAL; +#define MAX_BUF_NUM_SAVE_BUF 8 + +static u32 run_ready_min_buf_num = 2; + +static DEFINE_MUTEX(vvp9_mutex); +#ifndef MULTI_INSTANCE_SUPPORT +static struct device *cma_dev; +#endif + +#define HEVC_DEC_STATUS_REG HEVC_ASSIST_SCRATCH_0 +#define HEVC_RPM_BUFFER HEVC_ASSIST_SCRATCH_1 +#define HEVC_SHORT_TERM_RPS HEVC_ASSIST_SCRATCH_2 +#define VP9_ADAPT_PROB_REG HEVC_ASSIST_SCRATCH_3 +#define VP9_MMU_MAP_BUFFER HEVC_ASSIST_SCRATCH_4 +#define HEVC_PPS_BUFFER HEVC_ASSIST_SCRATCH_5 +//#define HEVC_SAO_UP HEVC_ASSIST_SCRATCH_6 +#define HEVC_STREAM_SWAP_BUFFER HEVC_ASSIST_SCRATCH_7 +#define HEVC_STREAM_SWAP_BUFFER2 HEVC_ASSIST_SCRATCH_8 +#define VP9_PROB_SWAP_BUFFER HEVC_ASSIST_SCRATCH_9 +#define VP9_COUNT_SWAP_BUFFER HEVC_ASSIST_SCRATCH_A +#define VP9_SEG_MAP_BUFFER HEVC_ASSIST_SCRATCH_B +//#define HEVC_SCALELUT HEVC_ASSIST_SCRATCH_D +#define HEVC_WAIT_FLAG HEVC_ASSIST_SCRATCH_E +#define RPM_CMD_REG HEVC_ASSIST_SCRATCH_F +#define LMEM_DUMP_ADR HEVC_ASSIST_SCRATCH_F +#define HEVC_STREAM_SWAP_TEST HEVC_ASSIST_SCRATCH_L +#ifdef MULTI_INSTANCE_SUPPORT +#define HEVC_DECODE_COUNT HEVC_ASSIST_SCRATCH_M +#define HEVC_DECODE_SIZE HEVC_ASSIST_SCRATCH_N +#else +#define HEVC_DECODE_PIC_BEGIN_REG HEVC_ASSIST_SCRATCH_M +#define HEVC_DECODE_PIC_NUM_REG HEVC_ASSIST_SCRATCH_N +#endif +#define DEBUG_REG1 HEVC_ASSIST_SCRATCH_G +#define DEBUG_REG2 HEVC_ASSIST_SCRATCH_H + + +/* + *ucode parser/search control + *bit 0: 0, header auto parse; 1, header manual parse + *bit 1: 0, auto skip for noneseamless stream; 1, no skip + *bit [3:2]: valid when bit1==0; + *0, auto skip nal before first vps/sps/pps/idr; + *1, auto skip nal before first vps/sps/pps + *2, auto skip nal before first vps/sps/pps, + * and not decode until the first I slice (with slice address of 0) + * + *3, auto skip before first I slice (nal_type >=16 && nal_type<=21) + *bit [15:4] nal skip count (valid when bit0 == 1 (manual mode) ) + *bit [16]: for NAL_UNIT_EOS when bit0 is 0: + * 0, send SEARCH_DONE to arm ; 1, do not send SEARCH_DONE to arm + *bit [17]: for NAL_SEI when bit0 is 0: + * 0, do not parse SEI in ucode; 1, parse SEI in ucode + *bit [31:20]: used by ucode for debug purpose + */ +#define NAL_SEARCH_CTL HEVC_ASSIST_SCRATCH_I + /*[31:24] chip feature + 31: 0, use MBOX1; 1, use MBOX0 + */ +#define DECODE_MODE HEVC_ASSIST_SCRATCH_J +#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K + +#ifdef MULTI_INSTANCE_SUPPORT +#define RPM_BUF_SIZE (0x400 * 2) +#else +#define RPM_BUF_SIZE (0x80*2) +#endif +#define LMEM_BUF_SIZE (0x400 * 2) + +//#define VBH_BUF_SIZE (2 * 16 * 2304) +//#define VBH_BUF_COUNT 4 + + /*mmu_vbh buf is used by HEVC_SAO_MMU_VH0_ADDR, HEVC_SAO_MMU_VH1_ADDR*/ +#define VBH_BUF_SIZE_1080P 0x3000 +#define VBH_BUF_SIZE_4K 0x5000 +#define VBH_BUF_SIZE_8K 0xa000 +#define VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh.buf_size / 2) + /*mmu_vbh_dw buf is used by HEVC_SAO_MMU_VH0_ADDR2,HEVC_SAO_MMU_VH1_ADDR2, + HEVC_DW_VH0_ADDDR, HEVC_DW_VH1_ADDDR*/ +#define DW_VBH_BUF_SIZE_1080P (VBH_BUF_SIZE_1080P * 2) +#define DW_VBH_BUF_SIZE_4K (VBH_BUF_SIZE_4K * 2) +#define DW_VBH_BUF_SIZE_8K (VBH_BUF_SIZE_8K * 2) +#define DW_VBH_BUF_SIZE(bufspec) (bufspec->mmu_vbh_dw.buf_size / 4) + +/* necessary 4K page size align for t7/t3 decoder and after */ +#define WORKBUF_ALIGN(addr) (ALIGN(addr, PAGE_SIZE)) + +#define WORK_BUF_SPEC_NUM 6 +static struct BuffInfo_s amvvp9_workbuff_spec[WORK_BUF_SPEC_NUM] = { + { + /* 8M bytes */ + .max_width = 1920, + .max_height = 1088, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x30000, + }, + .sao_vb = { + .buf_size = 0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = + * 32Kbytes (0x8000) + */ + .buf_size = 0x8000, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + *each para 1024bytes(total:0x40000), + *data 1024bytes(total:0x40000) + */ + .buf_size = 0x80000, + }, + .dblk_data = { + .buf_size = 0x80000, + }, + .seg_map = { + /*4096x2304/64/64 *24 = 0xd800 Bytes*/ + .buf_size = 0xd800, + }, + .mmu_vbh = { + .buf_size = 0x5000, /*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif + .mpred_above = { + .buf_size = 0x10000, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = {/* 1080p, 0x40000 per buffer */ + .buf_size = 0x40000 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096, + .max_height = 2304, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0x30000, + }, + .sao_vb = { + .buf_size = 0x30000, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0x2800, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = 32Kbytes + * (0x8000) + */ + .buf_size = 0x8000, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + *each para 1024bytes(total:0x40000), + *data 1024bytes(total:0x40000) + */ + .buf_size = 0x80000, + }, + .dblk_data = { + .buf_size = 0x80000, + }, + .seg_map = { + /*4096x2304/64/64 *24 = 0xd800 Bytes*/ + .buf_size = 0xd800, + }, + .mmu_vbh = { + .buf_size = 0x5000,/*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif + .mpred_above = { + .buf_size = 0x10000, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + * //4k2k , 0x100000 per buffer + */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = 0x120000 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096*2, + .max_height = 2304*2, + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x4000*2, + }, + .sao_abv = { + .buf_size = 0x30000*2, + }, + .sao_vb = { + .buf_size = 0x30000*2, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .sps = { + // SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .pps = { + // PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, total 0x2000 bytes + .buf_size = 0x2000, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0x2800*2, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0x8000*2, + }, + .dblk_para = { + // DBLK -> Max 256(4096/16) LCU, each para 1024bytes(total:0x40000), data 1024bytes(total:0x40000) + .buf_size = 0x80000*2, + }, + .dblk_data = { + .buf_size = 0x80000*2, + }, + .seg_map = { + /*4096x2304/64/64 *24 = 0xd800 Bytes*/ + .buf_size = 0xd800*4, + }, + .mmu_vbh = { + .buf_size = 0x5000*2, //2*16*(more than 2304)/4, 4K + }, +#if 0 + .cm_header = { + //.buf_size = MMU_COMPRESS_HEADER_SIZE*8, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + .buf_size = MMU_COMPRESS_HEADER_SIZE*16, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif + .mpred_above = { + .buf_size = 0x10000*2, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = 0x120000 * FRAME_BUFFERS * 4, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + /* 8M bytes */ + .max_width = 1920, + .max_height = 1088, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x1e00, + }, + .sao_abv = { + .buf_size = 0, + }, + .sao_vb = { + .buf_size = 0, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = + * 32Kbytes (0x8000) + */ + .buf_size = 0, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + *each para 1024bytes(total:0x40000), + *data 1024bytes(total:0x40000) + */ + .buf_size = 0x49000, + }, + .dblk_data = { + .buf_size = 0x49000, + }, + .seg_map = { + /*4096x2304/64/64 *24 = 0xd800 Bytes*/ + .buf_size = 0x3000, //0x2fd0, + }, + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_1080P, /*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif + .mpred_above = { + .buf_size = 0x2200, //0x21c0, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = {/* 1080p, 0x40000 per buffer */ + .buf_size = 0x48200 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096, + .max_height = 2304, + .ipp = { + /* IPP work space calculation : + * 4096 * (Y+CbCr+Flags) = 12k, round to 16k + */ + .buf_size = 0x4000, + }, + .sao_abv = { + .buf_size = 0, + }, + .sao_vb = { + .buf_size = 0, + }, + .short_term_rps = { + /* SHORT_TERM_RPS - Max 64 set, 16 entry every set, + * total 64x16x2 = 2048 bytes (0x800) + */ + .buf_size = 0x800, + }, + .vps = { + /* VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .sps = { + /* SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, + * total 0x0800 bytes + */ + .buf_size = 0x800, + }, + .pps = { + /* PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, + * total 0x2000 bytes + */ + .buf_size = 0x2000, + }, + .sao_up = { + /* SAO UP STORE AREA - Max 640(10240/16) LCU, + * each has 16 bytes total 0x2800 bytes + */ + .buf_size = 0, + }, + .swap_buf = { + /* 256cyclex64bit = 2K bytes 0x800 + * (only 144 cycles valid) + */ + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + /* support up to 32 SCALELUT 1024x32 = 32Kbytes + * (0x8000) + */ + .buf_size = 0, + }, + .dblk_para = { + /* DBLK -> Max 256(4096/16) LCU, + *each para 1024bytes(total:0x40000), + *data 1024bytes(total:0x40000) + */ + .buf_size = 0x52800, + }, + .dblk_data = { + .buf_size = 0x52800, + }, + .seg_map = { + /*4096x2304/64/64 *24 = 0xd800 Bytes*/ + .buf_size = 0xd800, + }, + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_4K,/*2*16*(more than 2304)/4, 4K*/ + }, +#if 0 + .cm_header = { + /*add one for keeper.*/ + .buf_size = MMU_COMPRESS_HEADER_SIZE * + (FRAME_BUFFERS + 1), + /* 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) */ + }, +#endif + .mpred_above = { + .buf_size = 0x4800, /* 2 * size of hevc*/ + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + /* .buf_size = 0x100000*16, + * //4k2k , 0x100000 per buffer + */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = 0x145400 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + }, + { + .max_width = 4096*2, + .max_height = 2304*2, + .ipp = { + // IPP work space calculation : 4096 * (Y+CbCr+Flags) = 12k, round to 16k + .buf_size = 0x4000*2, + }, + .sao_abv = { + .buf_size = 0, + }, + .sao_vb = { + .buf_size = 0, + }, + .short_term_rps = { + // SHORT_TERM_RPS - Max 64 set, 16 entry every set, total 64x16x2 = 2048 bytes (0x800) + .buf_size = 0x800, + }, + .vps = { + // VPS STORE AREA - Max 16 VPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .sps = { + // SPS STORE AREA - Max 16 SPS, each has 0x80 bytes, total 0x0800 bytes + .buf_size = 0x800, + }, + .pps = { + // PPS STORE AREA - Max 64 PPS, each has 0x80 bytes, total 0x2000 bytes + .buf_size = 0x2000, + }, + .sao_up = { + // SAO UP STORE AREA - Max 640(10240/16) LCU, each has 16 bytes total 0x2800 bytes + .buf_size = 0, + }, + .swap_buf = { + // 256cyclex64bit = 2K bytes 0x800 (only 144 cycles valid) + .buf_size = 0x800, + }, + .swap_buf2 = { + .buf_size = 0x800, + }, + .scalelut = { + // support up to 32 SCALELUT 1024x32 = 32Kbytes (0x8000) + .buf_size = 0, + }, + .dblk_para = { + // DBLK -> Max 256(4096/16) LCU, each para 1024bytes(total:0x40000), data 1024bytes(total:0x40000) + .buf_size = 0xa4800, + }, + .dblk_data = { + .buf_size = 0xa4800, + }, + .seg_map = { + /*4096x2304/64/64 *24 = 0xd800 Bytes*/ + .buf_size = 0x36000, + }, + .mmu_vbh = { + .buf_size = VBH_BUF_SIZE_8K, //2*16*(more than 2304)/4, 4K + }, +#if 0 + .cm_header = { + //.buf_size = MMU_COMPRESS_HEADER_SIZE*8, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + .buf_size = MMU_COMPRESS_HEADER_SIZE*16, // 0x44000 = ((1088*2*1024*4)/32/4)*(32/8) + }, +#endif + .mpred_above = { + .buf_size = 0x9000, + }, +#ifdef MV_USE_FIXED_BUF + .mpred_mv = { + //4k2k , 0x100000 per buffer */ + /* 4096x2304 , 0x120000 per buffer */ + .buf_size = 0x514800 * FRAME_BUFFERS, + }, +#endif + .rpm = { + .buf_size = RPM_BUF_SIZE, + }, + .lmem = { + .buf_size = 0x400 * 2, + } + } +}; + + +/*Losless compression body buffer size 4K per 64x32 (jt)*/ +int compute_losless_comp_body_size(int width, int height, + uint8_t is_bit_depth_10) +{ + int width_x64; + int height_x32; + int bsize; + + width_x64 = width + 63; + width_x64 >>= 6; + height_x32 = height + 31; + height_x32 >>= 5; + bsize = (is_bit_depth_10?4096:3200)*width_x64*height_x32; + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("%s(%d,%d,%d)=>%d\n", + __func__, width, height, + is_bit_depth_10, bsize); + + return bsize; +} + +/* Losless compression header buffer size 32bytes per 128x64 (jt)*/ +static int compute_losless_comp_header_size(int width, int height) +{ + int width_x128; + int height_x64; + int hsize; + + width_x128 = width + 127; + width_x128 >>= 7; + height_x64 = height + 63; + height_x64 >>= 6; + + hsize = 32 * width_x128 * height_x64; + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("%s(%d,%d)=>%d\n", + __func__, width, height, + hsize); + + return hsize; +} + +static void init_buff_spec(struct VP9Decoder_s *pbi, + struct BuffInfo_s *buf_spec) +{ + void *mem_start_virt; + + buf_spec->ipp.buf_start = + WORKBUF_ALIGN(buf_spec->start_adr); + buf_spec->sao_abv.buf_start = + WORKBUF_ALIGN(buf_spec->ipp.buf_start + buf_spec->ipp.buf_size); + buf_spec->sao_vb.buf_start = + WORKBUF_ALIGN(buf_spec->sao_abv.buf_start + buf_spec->sao_abv.buf_size); + buf_spec->short_term_rps.buf_start = + WORKBUF_ALIGN(buf_spec->sao_vb.buf_start + buf_spec->sao_vb.buf_size); + buf_spec->vps.buf_start = + WORKBUF_ALIGN(buf_spec->short_term_rps.buf_start + buf_spec->short_term_rps.buf_size); + buf_spec->sps.buf_start = + WORKBUF_ALIGN(buf_spec->vps.buf_start + buf_spec->vps.buf_size); + buf_spec->pps.buf_start = + WORKBUF_ALIGN(buf_spec->sps.buf_start + buf_spec->sps.buf_size); + buf_spec->sao_up.buf_start = + WORKBUF_ALIGN(buf_spec->pps.buf_start + buf_spec->pps.buf_size); + buf_spec->swap_buf.buf_start = + WORKBUF_ALIGN(buf_spec->sao_up.buf_start + buf_spec->sao_up.buf_size); + buf_spec->swap_buf2.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf.buf_start + buf_spec->swap_buf.buf_size); + buf_spec->scalelut.buf_start = + WORKBUF_ALIGN(buf_spec->swap_buf2.buf_start + buf_spec->swap_buf2.buf_size); + buf_spec->dblk_para.buf_start = + WORKBUF_ALIGN(buf_spec->scalelut.buf_start + buf_spec->scalelut.buf_size); + buf_spec->dblk_data.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_para.buf_start + buf_spec->dblk_para.buf_size); + buf_spec->seg_map.buf_start = + WORKBUF_ALIGN(buf_spec->dblk_data.buf_start + buf_spec->dblk_data.buf_size); + if (pbi == NULL || pbi->mmu_enable) { + buf_spec->mmu_vbh.buf_start = + WORKBUF_ALIGN(buf_spec->seg_map.buf_start + buf_spec->seg_map.buf_size); + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->mmu_vbh.buf_start + buf_spec->mmu_vbh.buf_size); + } else { + buf_spec->mpred_above.buf_start = + WORKBUF_ALIGN(buf_spec->seg_map.buf_start + buf_spec->seg_map.buf_size); + } +#ifdef MV_USE_FIXED_BUF + buf_spec->mpred_mv.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_mv.buf_start + buf_spec->mpred_mv.buf_size); +#else + buf_spec->rpm.buf_start = + WORKBUF_ALIGN(buf_spec->mpred_above.buf_start + buf_spec->mpred_above.buf_size); +#endif + buf_spec->lmem.buf_start = + WORKBUF_ALIGN(buf_spec->rpm.buf_start + buf_spec->rpm.buf_size); + buf_spec->end_adr = + WORKBUF_ALIGN(buf_spec->lmem.buf_start + buf_spec->lmem.buf_size); + + if (!pbi) + return; + + if (!vdec_secure(hw_to_vdec(pbi))) { + mem_start_virt = + codec_mm_phys_to_virt(buf_spec->dblk_para.buf_start); + if (mem_start_virt) { + memset(mem_start_virt, 0, + buf_spec->dblk_para.buf_size); + codec_mm_dma_flush(mem_start_virt, + buf_spec->dblk_para.buf_size, + DMA_TO_DEVICE); + } else { + mem_start_virt = codec_mm_vmap( + buf_spec->dblk_para.buf_start, + buf_spec->dblk_para.buf_size); + if (mem_start_virt) { + memset(mem_start_virt, 0, + buf_spec->dblk_para.buf_size); + codec_mm_dma_flush(mem_start_virt, + buf_spec->dblk_para.buf_size, + DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(mem_start_virt); + } else { + /*not virt for tvp playing, + may need clear on ucode.*/ + pr_err("mem_start_virt failed\n"); + } + } + } + + if (debug) { + pr_info("%s workspace (%x %x) size = %x\n", __func__, + buf_spec->start_adr, buf_spec->end_adr, + buf_spec->end_adr - buf_spec->start_adr); + } + + if (debug) { + pr_info("ipp.buf_start :%x\n", + buf_spec->ipp.buf_start); + pr_info("sao_abv.buf_start :%x\n", + buf_spec->sao_abv.buf_start); + pr_info("sao_vb.buf_start :%x\n", + buf_spec->sao_vb.buf_start); + pr_info("short_term_rps.buf_start :%x\n", + buf_spec->short_term_rps.buf_start); + pr_info("vps.buf_start :%x\n", + buf_spec->vps.buf_start); + pr_info("sps.buf_start :%x\n", + buf_spec->sps.buf_start); + pr_info("pps.buf_start :%x\n", + buf_spec->pps.buf_start); + pr_info("sao_up.buf_start :%x\n", + buf_spec->sao_up.buf_start); + pr_info("swap_buf.buf_start :%x\n", + buf_spec->swap_buf.buf_start); + pr_info("swap_buf2.buf_start :%x\n", + buf_spec->swap_buf2.buf_start); + pr_info("scalelut.buf_start :%x\n", + buf_spec->scalelut.buf_start); + pr_info("dblk_para.buf_start :%x\n", + buf_spec->dblk_para.buf_start); + pr_info("dblk_data.buf_start :%x\n", + buf_spec->dblk_data.buf_start); + pr_info("seg_map.buf_start :%x\n", + buf_spec->seg_map.buf_start); + if (pbi->mmu_enable) { + pr_info("mmu_vbh.buf_start :%x\n", + buf_spec->mmu_vbh.buf_start); + } + pr_info("mpred_above.buf_start :%x\n", + buf_spec->mpred_above.buf_start); +#ifdef MV_USE_FIXED_BUF + pr_info("mpred_mv.buf_start :%x\n", + buf_spec->mpred_mv.buf_start); +#endif + if ((debug & VP9_DEBUG_SEND_PARAM_WITH_REG) == 0) { + pr_info("rpm.buf_start :%x\n", + buf_spec->rpm.buf_start); + } + } +} + +/* cache_util.c */ +#define THODIYIL_MCRCC_CANVAS_ALGX 4 + +static u32 mcrcc_cache_alg_flag = THODIYIL_MCRCC_CANVAS_ALGX; + +static void mcrcc_perfcount_reset(void) +{ + if (debug & VP9_DEBUG_CACHE) + pr_info("[cache_util.c] Entered mcrcc_perfcount_reset...\n"); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)0x1); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)0x0); + return; +} + +static unsigned raw_mcr_cnt_total_prev; +static unsigned hit_mcr_0_cnt_total_prev; +static unsigned hit_mcr_1_cnt_total_prev; +static unsigned byp_mcr_cnt_nchcanv_total_prev; +static unsigned byp_mcr_cnt_nchoutwin_total_prev; + +static void mcrcc_get_hitrate(unsigned reset_pre) +{ + unsigned delta_hit_mcr_0_cnt; + unsigned delta_hit_mcr_1_cnt; + unsigned delta_raw_mcr_cnt; + unsigned delta_mcr_cnt_nchcanv; + unsigned delta_mcr_cnt_nchoutwin; + + unsigned tmp; + unsigned raw_mcr_cnt; + unsigned hit_mcr_cnt; + unsigned byp_mcr_cnt_nchoutwin; + unsigned byp_mcr_cnt_nchcanv; + int hitrate; + if (reset_pre) { + raw_mcr_cnt_total_prev = 0; + hit_mcr_0_cnt_total_prev = 0; + hit_mcr_1_cnt_total_prev = 0; + byp_mcr_cnt_nchcanv_total_prev = 0; + byp_mcr_cnt_nchoutwin_total_prev = 0; + } + if (debug & VP9_DEBUG_CACHE) + pr_info("[cache_util.c] Entered mcrcc_get_hitrate...\n"); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x0<<1)); + raw_mcr_cnt = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x1<<1)); + hit_mcr_cnt = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x2<<1)); + byp_mcr_cnt_nchoutwin = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x3<<1)); + byp_mcr_cnt_nchcanv = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + + if (debug & VP9_DEBUG_CACHE) + pr_info("raw_mcr_cnt_total: %d\n", + raw_mcr_cnt); + if (debug & VP9_DEBUG_CACHE) + pr_info("hit_mcr_cnt_total: %d\n", + hit_mcr_cnt); + if (debug & VP9_DEBUG_CACHE) + pr_info("byp_mcr_cnt_nchoutwin_total: %d\n", + byp_mcr_cnt_nchoutwin); + if (debug & VP9_DEBUG_CACHE) + pr_info("byp_mcr_cnt_nchcanv_total: %d\n", + byp_mcr_cnt_nchcanv); + + delta_raw_mcr_cnt = raw_mcr_cnt - + raw_mcr_cnt_total_prev; + delta_mcr_cnt_nchcanv = byp_mcr_cnt_nchcanv - + byp_mcr_cnt_nchcanv_total_prev; + delta_mcr_cnt_nchoutwin = byp_mcr_cnt_nchoutwin - + byp_mcr_cnt_nchoutwin_total_prev; + raw_mcr_cnt_total_prev = raw_mcr_cnt; + byp_mcr_cnt_nchcanv_total_prev = byp_mcr_cnt_nchcanv; + byp_mcr_cnt_nchoutwin_total_prev = byp_mcr_cnt_nchoutwin; + + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x4<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & VP9_DEBUG_CACHE) + pr_info("miss_mcr_0_cnt_total: %d\n", tmp); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x5<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & VP9_DEBUG_CACHE) + pr_info("miss_mcr_1_cnt_total: %d\n", tmp); + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x6<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & VP9_DEBUG_CACHE) + pr_info("hit_mcr_0_cnt_total: %d\n", tmp); + delta_hit_mcr_0_cnt = tmp - hit_mcr_0_cnt_total_prev; + hit_mcr_0_cnt_total_prev = tmp; + WRITE_VREG(HEVCD_MCRCC_PERFMON_CTL, (unsigned int)(0x7<<1)); + tmp = READ_VREG(HEVCD_MCRCC_PERFMON_DATA); + if (debug & VP9_DEBUG_CACHE) + pr_info("hit_mcr_1_cnt_total: %d\n", tmp); + delta_hit_mcr_1_cnt = tmp - hit_mcr_1_cnt_total_prev; + hit_mcr_1_cnt_total_prev = tmp; + + if (delta_raw_mcr_cnt != 0) { + hitrate = 100 * delta_hit_mcr_0_cnt + / delta_raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("CANV0_HIT_RATE : %d\n", hitrate); + hitrate = 100 * delta_hit_mcr_1_cnt + / delta_raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("CANV1_HIT_RATE : %d\n", hitrate); + hitrate = 100 * delta_mcr_cnt_nchcanv + / delta_raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("NONCACH_CANV_BYP_RATE : %d\n", hitrate); + hitrate = 100 * delta_mcr_cnt_nchoutwin + / delta_raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("CACHE_OUTWIN_BYP_RATE : %d\n", hitrate); + } + + + if (raw_mcr_cnt != 0) { + hitrate = 100 * hit_mcr_cnt / raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("MCRCC_HIT_RATE : %d\n", hitrate); + hitrate = 100 * (byp_mcr_cnt_nchoutwin + byp_mcr_cnt_nchcanv) + / raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("MCRCC_BYP_RATE : %d\n", hitrate); + } else { + if (debug & VP9_DEBUG_CACHE) + pr_info("MCRCC_HIT_RATE : na\n"); + if (debug & VP9_DEBUG_CACHE) + pr_info("MCRCC_BYP_RATE : na\n"); + } + return; +} + + +static void decomp_perfcount_reset(void) +{ + if (debug & VP9_DEBUG_CACHE) + pr_info("[cache_util.c] Entered decomp_perfcount_reset...\n"); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)0x1); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)0x0); + return; +} + +static void decomp_get_hitrate(void) +{ + unsigned raw_mcr_cnt; + unsigned hit_mcr_cnt; + int hitrate; + if (debug & VP9_DEBUG_CACHE) + pr_info("[cache_util.c] Entered decomp_get_hitrate...\n"); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x0<<1)); + raw_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x1<<1)); + hit_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + + if (debug & VP9_DEBUG_CACHE) + pr_info("hcache_raw_cnt_total: %d\n", raw_mcr_cnt); + if (debug & VP9_DEBUG_CACHE) + pr_info("hcache_hit_cnt_total: %d\n", hit_mcr_cnt); + + if (raw_mcr_cnt != 0) { + hitrate = hit_mcr_cnt * 100 / raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("DECOMP_HCACHE_HIT_RATE : %d\n", hitrate); + } else { + if (debug & VP9_DEBUG_CACHE) + pr_info("DECOMP_HCACHE_HIT_RATE : na\n"); + } + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x2<<1)); + raw_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x3<<1)); + hit_mcr_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + + if (debug & VP9_DEBUG_CACHE) + pr_info("dcache_raw_cnt_total: %d\n", raw_mcr_cnt); + if (debug & VP9_DEBUG_CACHE) + pr_info("dcache_hit_cnt_total: %d\n", hit_mcr_cnt); + + if (raw_mcr_cnt != 0) { + hitrate = hit_mcr_cnt * 100 / raw_mcr_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("DECOMP_DCACHE_HIT_RATE : %d\n", hitrate); + } else { + if (debug & VP9_DEBUG_CACHE) + pr_info("DECOMP_DCACHE_HIT_RATE : na\n"); + } + return; +} + +static void decomp_get_comprate(void) +{ + unsigned raw_ucomp_cnt; + unsigned fast_comp_cnt; + unsigned slow_comp_cnt; + int comprate; + + if (debug & VP9_DEBUG_CACHE) + pr_info("[cache_util.c] Entered decomp_get_comprate...\n"); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x4<<1)); + fast_comp_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x5<<1)); + slow_comp_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + WRITE_VREG(HEVCD_MPP_DECOMP_PERFMON_CTL, (unsigned int)(0x6<<1)); + raw_ucomp_cnt = READ_VREG(HEVCD_MPP_DECOMP_PERFMON_DATA); + + if (debug & VP9_DEBUG_CACHE) + pr_info("decomp_fast_comp_total: %d\n", fast_comp_cnt); + if (debug & VP9_DEBUG_CACHE) + pr_info("decomp_slow_comp_total: %d\n", slow_comp_cnt); + if (debug & VP9_DEBUG_CACHE) + pr_info("decomp_raw_uncomp_total: %d\n", raw_ucomp_cnt); + + if (raw_ucomp_cnt != 0) { + comprate = (fast_comp_cnt + slow_comp_cnt) + * 100 / raw_ucomp_cnt; + if (debug & VP9_DEBUG_CACHE) + pr_info("DECOMP_COMP_RATIO : %d\n", comprate); + } else { + if (debug & VP9_DEBUG_CACHE) + pr_info("DECOMP_COMP_RATIO : na\n"); + } + return; +} +/* cache_util.c end */ + +/*==================================================== + *======================================================================== + *vp9_prob define + *======================================================================== + */ +#define VP9_PARTITION_START 0 +#define VP9_PARTITION_SIZE_STEP (3 * 4) +#define VP9_PARTITION_ONE_SIZE (4 * VP9_PARTITION_SIZE_STEP) +#define VP9_PARTITION_KEY_START 0 +#define VP9_PARTITION_P_START VP9_PARTITION_ONE_SIZE +#define VP9_PARTITION_SIZE (2 * VP9_PARTITION_ONE_SIZE) +#define VP9_SKIP_START (VP9_PARTITION_START + VP9_PARTITION_SIZE) +#define VP9_SKIP_SIZE 4 /* only use 3*/ +#define VP9_TX_MODE_START (VP9_SKIP_START+VP9_SKIP_SIZE) +#define VP9_TX_MODE_8_0_OFFSET 0 +#define VP9_TX_MODE_8_1_OFFSET 1 +#define VP9_TX_MODE_16_0_OFFSET 2 +#define VP9_TX_MODE_16_1_OFFSET 4 +#define VP9_TX_MODE_32_0_OFFSET 6 +#define VP9_TX_MODE_32_1_OFFSET 9 +#define VP9_TX_MODE_SIZE 12 +#define VP9_COEF_START (VP9_TX_MODE_START+VP9_TX_MODE_SIZE) +#define VP9_COEF_BAND_0_OFFSET 0 +#define VP9_COEF_BAND_1_OFFSET (VP9_COEF_BAND_0_OFFSET + 3 * 3 + 1) +#define VP9_COEF_BAND_2_OFFSET (VP9_COEF_BAND_1_OFFSET + 6 * 3) +#define VP9_COEF_BAND_3_OFFSET (VP9_COEF_BAND_2_OFFSET + 6 * 3) +#define VP9_COEF_BAND_4_OFFSET (VP9_COEF_BAND_3_OFFSET + 6 * 3) +#define VP9_COEF_BAND_5_OFFSET (VP9_COEF_BAND_4_OFFSET + 6 * 3) +#define VP9_COEF_SIZE_ONE_SET 100 /* ((3 +5*6)*3 + 1 padding)*/ +#define VP9_COEF_4X4_START (VP9_COEF_START + 0 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_8X8_START (VP9_COEF_START + 4 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_16X16_START (VP9_COEF_START + 8 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_32X32_START (VP9_COEF_START + 12 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_SIZE_PLANE (2 * VP9_COEF_SIZE_ONE_SET) +#define VP9_COEF_SIZE (4 * 2 * 2 * VP9_COEF_SIZE_ONE_SET) +#define VP9_INTER_MODE_START (VP9_COEF_START+VP9_COEF_SIZE) +#define VP9_INTER_MODE_SIZE 24 /* only use 21 ( #*7)*/ +#define VP9_INTERP_START (VP9_INTER_MODE_START+VP9_INTER_MODE_SIZE) +#define VP9_INTERP_SIZE 8 +#define VP9_INTRA_INTER_START (VP9_INTERP_START+VP9_INTERP_SIZE) +#define VP9_INTRA_INTER_SIZE 4 +#define VP9_INTERP_INTRA_INTER_START VP9_INTERP_START +#define VP9_INTERP_INTRA_INTER_SIZE (VP9_INTERP_SIZE + VP9_INTRA_INTER_SIZE) +#define VP9_COMP_INTER_START \ + (VP9_INTERP_INTRA_INTER_START+VP9_INTERP_INTRA_INTER_SIZE) +#define VP9_COMP_INTER_SIZE 5 +#define VP9_COMP_REF_START (VP9_COMP_INTER_START+VP9_COMP_INTER_SIZE) +#define VP9_COMP_REF_SIZE 5 +#define VP9_SINGLE_REF_START (VP9_COMP_REF_START+VP9_COMP_REF_SIZE) +#define VP9_SINGLE_REF_SIZE 10 +#define VP9_REF_MODE_START VP9_COMP_INTER_START +#define VP9_REF_MODE_SIZE \ + (VP9_COMP_INTER_SIZE+VP9_COMP_REF_SIZE+VP9_SINGLE_REF_SIZE) +#define VP9_IF_Y_MODE_START (VP9_REF_MODE_START+VP9_REF_MODE_SIZE) +#define VP9_IF_Y_MODE_SIZE 36 +#define VP9_IF_UV_MODE_START (VP9_IF_Y_MODE_START+VP9_IF_Y_MODE_SIZE) +#define VP9_IF_UV_MODE_SIZE 92 /* only use 90*/ +#define VP9_MV_JOINTS_START (VP9_IF_UV_MODE_START+VP9_IF_UV_MODE_SIZE) +#define VP9_MV_JOINTS_SIZE 3 +#define VP9_MV_SIGN_0_START (VP9_MV_JOINTS_START+VP9_MV_JOINTS_SIZE) +#define VP9_MV_SIGN_0_SIZE 1 +#define VP9_MV_CLASSES_0_START (VP9_MV_SIGN_0_START+VP9_MV_SIGN_0_SIZE) +#define VP9_MV_CLASSES_0_SIZE 10 +#define VP9_MV_CLASS0_0_START (VP9_MV_CLASSES_0_START+VP9_MV_CLASSES_0_SIZE) +#define VP9_MV_CLASS0_0_SIZE 1 +#define VP9_MV_BITS_0_START (VP9_MV_CLASS0_0_START+VP9_MV_CLASS0_0_SIZE) +#define VP9_MV_BITS_0_SIZE 10 +#define VP9_MV_SIGN_1_START (VP9_MV_BITS_0_START+VP9_MV_BITS_0_SIZE) +#define VP9_MV_SIGN_1_SIZE 1 +#define VP9_MV_CLASSES_1_START \ + (VP9_MV_SIGN_1_START+VP9_MV_SIGN_1_SIZE) +#define VP9_MV_CLASSES_1_SIZE 10 +#define VP9_MV_CLASS0_1_START \ + (VP9_MV_CLASSES_1_START+VP9_MV_CLASSES_1_SIZE) +#define VP9_MV_CLASS0_1_SIZE 1 +#define VP9_MV_BITS_1_START \ + (VP9_MV_CLASS0_1_START+VP9_MV_CLASS0_1_SIZE) +#define VP9_MV_BITS_1_SIZE 10 +#define VP9_MV_CLASS0_FP_0_START \ + (VP9_MV_BITS_1_START+VP9_MV_BITS_1_SIZE) +#define VP9_MV_CLASS0_FP_0_SIZE 9 +#define VP9_MV_CLASS0_FP_1_START \ + (VP9_MV_CLASS0_FP_0_START+VP9_MV_CLASS0_FP_0_SIZE) +#define VP9_MV_CLASS0_FP_1_SIZE 9 +#define VP9_MV_CLASS0_HP_0_START \ + (VP9_MV_CLASS0_FP_1_START+VP9_MV_CLASS0_FP_1_SIZE) +#define VP9_MV_CLASS0_HP_0_SIZE 2 +#define VP9_MV_CLASS0_HP_1_START \ + (VP9_MV_CLASS0_HP_0_START+VP9_MV_CLASS0_HP_0_SIZE) +#define VP9_MV_CLASS0_HP_1_SIZE 2 +#define VP9_MV_START VP9_MV_JOINTS_START +#define VP9_MV_SIZE 72 /*only use 69*/ + +#define VP9_TOTAL_SIZE (VP9_MV_START + VP9_MV_SIZE) + + +/*======================================================================== + * vp9_count_mem define + *======================================================================== + */ +#define VP9_COEF_COUNT_START 0 +#define VP9_COEF_COUNT_BAND_0_OFFSET 0 +#define VP9_COEF_COUNT_BAND_1_OFFSET \ + (VP9_COEF_COUNT_BAND_0_OFFSET + 3*5) +#define VP9_COEF_COUNT_BAND_2_OFFSET \ + (VP9_COEF_COUNT_BAND_1_OFFSET + 6*5) +#define VP9_COEF_COUNT_BAND_3_OFFSET \ + (VP9_COEF_COUNT_BAND_2_OFFSET + 6*5) +#define VP9_COEF_COUNT_BAND_4_OFFSET \ + (VP9_COEF_COUNT_BAND_3_OFFSET + 6*5) +#define VP9_COEF_COUNT_BAND_5_OFFSET \ + (VP9_COEF_COUNT_BAND_4_OFFSET + 6*5) +#define VP9_COEF_COUNT_SIZE_ONE_SET 165 /* ((3 +5*6)*5 */ +#define VP9_COEF_COUNT_4X4_START \ + (VP9_COEF_COUNT_START + 0*VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_8X8_START \ + (VP9_COEF_COUNT_START + 4*VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_16X16_START \ + (VP9_COEF_COUNT_START + 8*VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_32X32_START \ + (VP9_COEF_COUNT_START + 12*VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_SIZE_PLANE (2 * VP9_COEF_COUNT_SIZE_ONE_SET) +#define VP9_COEF_COUNT_SIZE (4 * 2 * 2 * VP9_COEF_COUNT_SIZE_ONE_SET) + +#define VP9_INTRA_INTER_COUNT_START \ + (VP9_COEF_COUNT_START+VP9_COEF_COUNT_SIZE) +#define VP9_INTRA_INTER_COUNT_SIZE (4*2) +#define VP9_COMP_INTER_COUNT_START \ + (VP9_INTRA_INTER_COUNT_START+VP9_INTRA_INTER_COUNT_SIZE) +#define VP9_COMP_INTER_COUNT_SIZE (5*2) +#define VP9_COMP_REF_COUNT_START \ + (VP9_COMP_INTER_COUNT_START+VP9_COMP_INTER_COUNT_SIZE) +#define VP9_COMP_REF_COUNT_SIZE (5*2) +#define VP9_SINGLE_REF_COUNT_START \ + (VP9_COMP_REF_COUNT_START+VP9_COMP_REF_COUNT_SIZE) +#define VP9_SINGLE_REF_COUNT_SIZE (10*2) +#define VP9_TX_MODE_COUNT_START \ + (VP9_SINGLE_REF_COUNT_START+VP9_SINGLE_REF_COUNT_SIZE) +#define VP9_TX_MODE_COUNT_SIZE (12*2) +#define VP9_SKIP_COUNT_START \ + (VP9_TX_MODE_COUNT_START+VP9_TX_MODE_COUNT_SIZE) +#define VP9_SKIP_COUNT_SIZE (3*2) +#define VP9_MV_SIGN_0_COUNT_START \ + (VP9_SKIP_COUNT_START+VP9_SKIP_COUNT_SIZE) +#define VP9_MV_SIGN_0_COUNT_SIZE (1*2) +#define VP9_MV_SIGN_1_COUNT_START \ + (VP9_MV_SIGN_0_COUNT_START+VP9_MV_SIGN_0_COUNT_SIZE) +#define VP9_MV_SIGN_1_COUNT_SIZE (1*2) +#define VP9_MV_BITS_0_COUNT_START \ + (VP9_MV_SIGN_1_COUNT_START+VP9_MV_SIGN_1_COUNT_SIZE) +#define VP9_MV_BITS_0_COUNT_SIZE (10*2) +#define VP9_MV_BITS_1_COUNT_START \ + (VP9_MV_BITS_0_COUNT_START+VP9_MV_BITS_0_COUNT_SIZE) +#define VP9_MV_BITS_1_COUNT_SIZE (10*2) +#define VP9_MV_CLASS0_HP_0_COUNT_START \ + (VP9_MV_BITS_1_COUNT_START+VP9_MV_BITS_1_COUNT_SIZE) +#define VP9_MV_CLASS0_HP_0_COUNT_SIZE (2*2) +#define VP9_MV_CLASS0_HP_1_COUNT_START \ + (VP9_MV_CLASS0_HP_0_COUNT_START+VP9_MV_CLASS0_HP_0_COUNT_SIZE) +#define VP9_MV_CLASS0_HP_1_COUNT_SIZE (2*2) +/* Start merge_tree*/ +#define VP9_INTER_MODE_COUNT_START \ + (VP9_MV_CLASS0_HP_1_COUNT_START+VP9_MV_CLASS0_HP_1_COUNT_SIZE) +#define VP9_INTER_MODE_COUNT_SIZE (7*4) +#define VP9_IF_Y_MODE_COUNT_START \ + (VP9_INTER_MODE_COUNT_START+VP9_INTER_MODE_COUNT_SIZE) +#define VP9_IF_Y_MODE_COUNT_SIZE (10*4) +#define VP9_IF_UV_MODE_COUNT_START \ + (VP9_IF_Y_MODE_COUNT_START+VP9_IF_Y_MODE_COUNT_SIZE) +#define VP9_IF_UV_MODE_COUNT_SIZE (10*10) +#define VP9_PARTITION_P_COUNT_START \ + (VP9_IF_UV_MODE_COUNT_START+VP9_IF_UV_MODE_COUNT_SIZE) +#define VP9_PARTITION_P_COUNT_SIZE (4*4*4) +#define VP9_INTERP_COUNT_START \ + (VP9_PARTITION_P_COUNT_START+VP9_PARTITION_P_COUNT_SIZE) +#define VP9_INTERP_COUNT_SIZE (4*3) +#define VP9_MV_JOINTS_COUNT_START \ + (VP9_INTERP_COUNT_START+VP9_INTERP_COUNT_SIZE) +#define VP9_MV_JOINTS_COUNT_SIZE (1 * 4) +#define VP9_MV_CLASSES_0_COUNT_START \ + (VP9_MV_JOINTS_COUNT_START+VP9_MV_JOINTS_COUNT_SIZE) +#define VP9_MV_CLASSES_0_COUNT_SIZE (1*11) +#define VP9_MV_CLASS0_0_COUNT_START \ + (VP9_MV_CLASSES_0_COUNT_START+VP9_MV_CLASSES_0_COUNT_SIZE) +#define VP9_MV_CLASS0_0_COUNT_SIZE (1*2) +#define VP9_MV_CLASSES_1_COUNT_START \ + (VP9_MV_CLASS0_0_COUNT_START+VP9_MV_CLASS0_0_COUNT_SIZE) +#define VP9_MV_CLASSES_1_COUNT_SIZE (1*11) +#define VP9_MV_CLASS0_1_COUNT_START \ + (VP9_MV_CLASSES_1_COUNT_START+VP9_MV_CLASSES_1_COUNT_SIZE) +#define VP9_MV_CLASS0_1_COUNT_SIZE (1*2) +#define VP9_MV_CLASS0_FP_0_COUNT_START \ + (VP9_MV_CLASS0_1_COUNT_START+VP9_MV_CLASS0_1_COUNT_SIZE) +#define VP9_MV_CLASS0_FP_0_COUNT_SIZE (3*4) +#define VP9_MV_CLASS0_FP_1_COUNT_START \ + (VP9_MV_CLASS0_FP_0_COUNT_START+VP9_MV_CLASS0_FP_0_COUNT_SIZE) +#define VP9_MV_CLASS0_FP_1_COUNT_SIZE (3*4) + + +#define DC_PRED 0 /* Average of above and left pixels*/ +#define V_PRED 1 /* Vertical*/ +#define H_PRED 2 /* Horizontal*/ +#define D45_PRED 3 /*Directional 45 deg = round(arctan(1/1) * 180/pi)*/ +#define D135_PRED 4 /* Directional 135 deg = 180 - 45*/ +#define D117_PRED 5 /* Directional 117 deg = 180 - 63*/ +#define D153_PRED 6 /* Directional 153 deg = 180 - 27*/ +#define D207_PRED 7 /* Directional 207 deg = 180 + 27*/ +#define D63_PRED 8 /*Directional 63 deg = round(arctan(2/1) * 180/pi)*/ +#define TM_PRED 9 /*True-motion*/ + +int clip_prob(int p) +{ + return (p > 255) ? 255 : (p < 1) ? 1 : p; +} + +#define ROUND_POWER_OF_TWO(value, n) \ + (((value) + (1 << ((n) - 1))) >> (n)) + +#define MODE_MV_COUNT_SAT 20 +static const int count_to_update_factor[MODE_MV_COUNT_SAT + 1] = { + 0, 6, 12, 19, 25, 32, 38, 44, 51, 57, 64, + 70, 76, 83, 89, 96, 102, 108, 115, 121, 128 +}; + +void vp9_tree_merge_probs(unsigned int *prev_prob, unsigned int *cur_prob, + int coef_node_start, int tree_left, int tree_right, int tree_i, + int node) { + + int prob_32, prob_res, prob_shift; + int pre_prob, new_prob; + int den, m_count, get_prob, factor; + + prob_32 = prev_prob[coef_node_start / 4 * 2]; + prob_res = coef_node_start & 3; + prob_shift = prob_res * 8; + pre_prob = (prob_32 >> prob_shift) & 0xff; + + den = tree_left + tree_right; + + if (den == 0) + new_prob = pre_prob; + else { + m_count = (den < MODE_MV_COUNT_SAT) ? + den : MODE_MV_COUNT_SAT; + get_prob = clip_prob( + div_r32(((int64_t)tree_left * 256 + (den >> 1)), + den)); + /*weighted_prob*/ + factor = count_to_update_factor[m_count]; + new_prob = ROUND_POWER_OF_TWO(pre_prob * (256 - factor) + + get_prob * factor, 8); + } + cur_prob[coef_node_start / 4 * 2] = (cur_prob[coef_node_start / 4 * 2] + & (~(0xff << prob_shift))) | (new_prob << prob_shift); + + /*pr_info(" - [%d][%d] 0x%02X --> 0x%02X (0x%X 0x%X) (%X)\n", + *tree_i, node, pre_prob, new_prob, tree_left, tree_right, + *cur_prob[coef_node_start/4*2]); + */ +} + + +/*void adapt_coef_probs(void)*/ +void adapt_coef_probs(int pic_count, int prev_kf, int cur_kf, int pre_fc, + unsigned int *prev_prob, unsigned int *cur_prob, unsigned int *count) +{ + /* 80 * 64bits = 0xF00 ( use 0x1000 4K bytes) + *unsigned int prev_prob[496*2]; + *unsigned int cur_prob[496*2]; + *0x300 * 128bits = 0x3000 (32K Bytes) + *unsigned int count[0x300*4]; + */ + + int tx_size, coef_tx_size_start, coef_count_tx_size_start; + int plane, coef_plane_start, coef_count_plane_start; + int type, coef_type_start, coef_count_type_start; + int band, coef_band_start, coef_count_band_start; + int cxt_num; + int cxt, coef_cxt_start, coef_count_cxt_start; + int node, coef_node_start, coef_count_node_start; + + int tree_i, tree_left, tree_right; + int mvd_i; + + int count_sat = 24; + /*int update_factor = 112;*/ /*If COEF_MAX_UPDATE_FACTOR_AFTER_KEY, + *use 128 + */ + /* If COEF_MAX_UPDATE_FACTOR_AFTER_KEY, use 128*/ + /*int update_factor = (pic_count == 1) ? 128 : 112;*/ + int update_factor = cur_kf ? 112 : + prev_kf ? 128 : 112; + + int prob_32; + int prob_res; + int prob_shift; + int pre_prob; + + int num, den; + int get_prob; + int m_count; + int factor; + + int new_prob; + + if (debug & VP9_DEBUG_MERGE) + pr_info + ("\n ##adapt_coef_probs (pre_fc : %d ,prev_kf : %d,cur_kf : %d)##\n\n", + pre_fc, prev_kf, cur_kf); + + /*adapt_coef_probs*/ + for (tx_size = 0; tx_size < 4; tx_size++) { + coef_tx_size_start = VP9_COEF_START + + tx_size * 4 * VP9_COEF_SIZE_ONE_SET; + coef_count_tx_size_start = VP9_COEF_COUNT_START + + tx_size * 4 * VP9_COEF_COUNT_SIZE_ONE_SET; + coef_plane_start = coef_tx_size_start; + coef_count_plane_start = coef_count_tx_size_start; + for (plane = 0; plane < 2; plane++) { + coef_type_start = coef_plane_start; + coef_count_type_start = coef_count_plane_start; + for (type = 0; type < 2; type++) { + coef_band_start = coef_type_start; + coef_count_band_start = coef_count_type_start; + for (band = 0; band < 6; band++) { + if (band == 0) + cxt_num = 3; + else + cxt_num = 6; + coef_cxt_start = coef_band_start; + coef_count_cxt_start = + coef_count_band_start; + for (cxt = 0; cxt < cxt_num; cxt++) { + const int n0 = + count[coef_count_cxt_start]; + const int n1 = + count[coef_count_cxt_start + 1]; + const int n2 = + count[coef_count_cxt_start + 2]; + const int neob = + count[coef_count_cxt_start + 3]; + const int nneob = + count[coef_count_cxt_start + 4]; + const unsigned int + branch_ct[3][2] = { + { neob, nneob }, + { n0, n1 + n2 }, + { n1, n2 } + }; + coef_node_start = + coef_cxt_start; + for + (node = 0; node < 3; node++) { + prob_32 = + prev_prob[ + coef_node_start + / 4 * 2]; + prob_res = + coef_node_start & 3; + prob_shift = + prob_res * 8; + pre_prob = + (prob_32 >> prob_shift) + & 0xff; + + /*get_binary_prob*/ + num = + branch_ct[node][0]; + den = + branch_ct[node][0] + + branch_ct[node][1]; + m_count = (den < + count_sat) + ? den : count_sat; + + get_prob = + (den == 0) ? 128u : + clip_prob( + div_r32(((int64_t) + num * 256 + + (den >> 1)), + den)); + + factor = + update_factor * m_count + / count_sat; + new_prob = + ROUND_POWER_OF_TWO + (pre_prob * + (256 - factor) + + get_prob * factor, 8); + + cur_prob[coef_node_start + / 4 * 2] = + (cur_prob + [coef_node_start + / 4 * 2] & (~(0xff << + prob_shift))) | + (new_prob << + prob_shift); + + coef_node_start += 1; + } + + coef_cxt_start = + coef_cxt_start + 3; + coef_count_cxt_start = + coef_count_cxt_start + + 5; + } + if (band == 0) { + coef_band_start += 10; + coef_count_band_start += 15; + } else { + coef_band_start += 18; + coef_count_band_start += 30; + } + } + coef_type_start += VP9_COEF_SIZE_ONE_SET; + coef_count_type_start += + VP9_COEF_COUNT_SIZE_ONE_SET; + } + coef_plane_start += 2 * VP9_COEF_SIZE_ONE_SET; + coef_count_plane_start += + 2 * VP9_COEF_COUNT_SIZE_ONE_SET; + } + } + + if (cur_kf == 0) { + /*mode_mv_merge_probs - merge_intra_inter_prob*/ + for (coef_count_node_start = VP9_INTRA_INTER_COUNT_START; + coef_count_node_start < (VP9_MV_CLASS0_HP_1_COUNT_START + + VP9_MV_CLASS0_HP_1_COUNT_SIZE); coef_count_node_start += 2) { + + if (coef_count_node_start == + VP9_INTRA_INTER_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_intra_inter_prob\n"); + coef_node_start = VP9_INTRA_INTER_START; + } else if (coef_count_node_start == + VP9_COMP_INTER_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_comp_inter_prob\n"); + coef_node_start = VP9_COMP_INTER_START; + } + /* + *else if (coef_count_node_start == + * VP9_COMP_REF_COUNT_START) { + * pr_info(" # merge_comp_inter_prob\n"); + * coef_node_start = VP9_COMP_REF_START; + *} + *else if (coef_count_node_start == + * VP9_SINGLE_REF_COUNT_START) { + * pr_info(" # merge_comp_inter_prob\n"); + * coef_node_start = VP9_SINGLE_REF_START; + *} + */ + else if (coef_count_node_start == + VP9_TX_MODE_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_tx_mode_probs\n"); + coef_node_start = VP9_TX_MODE_START; + } else if (coef_count_node_start == + VP9_SKIP_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_skip_probs\n"); + coef_node_start = VP9_SKIP_START; + } else if (coef_count_node_start == + VP9_MV_SIGN_0_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_sign_0\n"); + coef_node_start = VP9_MV_SIGN_0_START; + } else if (coef_count_node_start == + VP9_MV_SIGN_1_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_sign_1\n"); + coef_node_start = VP9_MV_SIGN_1_START; + } else if (coef_count_node_start == + VP9_MV_BITS_0_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_bits_0\n"); + coef_node_start = VP9_MV_BITS_0_START; + } else if (coef_count_node_start == + VP9_MV_BITS_1_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_bits_1\n"); + coef_node_start = VP9_MV_BITS_1_START; + } else if (coef_count_node_start == + VP9_MV_CLASS0_HP_0_COUNT_START) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_class0_hp\n"); + coef_node_start = VP9_MV_CLASS0_HP_0_START; + } + + + den = count[coef_count_node_start] + + count[coef_count_node_start + 1]; + + prob_32 = prev_prob[coef_node_start / 4 * 2]; + prob_res = coef_node_start & 3; + prob_shift = prob_res * 8; + pre_prob = (prob_32 >> prob_shift) & 0xff; + + if (den == 0) + new_prob = pre_prob; + else { + m_count = (den < MODE_MV_COUNT_SAT) ? + den : MODE_MV_COUNT_SAT; + get_prob = + clip_prob( + div_r32(((int64_t)count[coef_count_node_start] + * 256 + (den >> 1)), + den)); + /*weighted_prob*/ + factor = count_to_update_factor[m_count]; + new_prob = + ROUND_POWER_OF_TWO(pre_prob * (256 - factor) + + get_prob * factor, 8); + } + cur_prob[coef_node_start / 4 * 2] = + (cur_prob[coef_node_start / 4 * 2] & + (~(0xff << prob_shift))) + | (new_prob << prob_shift); + + coef_node_start = coef_node_start + 1; + } + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_inter_mode_tree\n"); + coef_node_start = VP9_INTER_MODE_START; + coef_count_node_start = VP9_INTER_MODE_COUNT_START; + for (tree_i = 0; tree_i < 7; tree_i++) { + for (node = 0; node < 3; node++) { + switch (node) { + case 2: + tree_left = + count[coef_count_node_start + 1]; + tree_right = + count[coef_count_node_start + 3]; + break; + case 1: + tree_left = + count[coef_count_node_start + 0]; + tree_right = + count[coef_count_node_start + 1] + + count[coef_count_node_start + 3]; + break; + default: + tree_left = + count[coef_count_node_start + 2]; + tree_right = + count[coef_count_node_start + 0] + + count[coef_count_node_start + 1] + + count[coef_count_node_start + 3]; + break; + + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 4; + } + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_intra_mode_tree\n"); + coef_node_start = VP9_IF_Y_MODE_START; + coef_count_node_start = VP9_IF_Y_MODE_COUNT_START; + for (tree_i = 0; tree_i < 14; tree_i++) { + for (node = 0; node < 9; node++) { + switch (node) { + case 8: + tree_left = + count[coef_count_node_start+D153_PRED]; + tree_right = + count[coef_count_node_start+D207_PRED]; + break; + case 7: + tree_left = + count[coef_count_node_start+D63_PRED]; + tree_right = + count[coef_count_node_start+D207_PRED] + + count[coef_count_node_start+D153_PRED]; + break; + case 6: + tree_left = + count[coef_count_node_start + D45_PRED]; + tree_right = + count[coef_count_node_start+D207_PRED] + + count[coef_count_node_start+D153_PRED] + + count[coef_count_node_start+D63_PRED]; + break; + case 5: + tree_left = + count[coef_count_node_start+D135_PRED]; + tree_right = + count[coef_count_node_start+D117_PRED]; + break; + case 4: + tree_left = + count[coef_count_node_start+H_PRED]; + tree_right = + count[coef_count_node_start+D117_PRED] + + count[coef_count_node_start+D135_PRED]; + break; + case 3: + tree_left = + count[coef_count_node_start+H_PRED] + + count[coef_count_node_start+D117_PRED] + + count[coef_count_node_start+D135_PRED]; + tree_right = + count[coef_count_node_start+D45_PRED] + + count[coef_count_node_start+D207_PRED] + + count[coef_count_node_start+D153_PRED] + + count[coef_count_node_start+D63_PRED]; + break; + case 2: + tree_left = + count[coef_count_node_start+V_PRED]; + tree_right = + count[coef_count_node_start+H_PRED] + + count[coef_count_node_start+D117_PRED] + + count[coef_count_node_start+D135_PRED] + + count[coef_count_node_start+D45_PRED] + + count[coef_count_node_start+D207_PRED] + + count[coef_count_node_start+D153_PRED] + + count[coef_count_node_start+D63_PRED]; + break; + case 1: + tree_left = + count[coef_count_node_start+TM_PRED]; + tree_right = + count[coef_count_node_start+V_PRED] + + count[coef_count_node_start+H_PRED] + + count[coef_count_node_start+D117_PRED] + + count[coef_count_node_start+D135_PRED] + + count[coef_count_node_start+D45_PRED] + + count[coef_count_node_start+D207_PRED] + + count[coef_count_node_start+D153_PRED] + + count[coef_count_node_start+D63_PRED]; + break; + default: + tree_left = + count[coef_count_node_start+DC_PRED]; + tree_right = + count[coef_count_node_start+TM_PRED] + + count[coef_count_node_start+V_PRED] + + count[coef_count_node_start+H_PRED] + + count[coef_count_node_start+D117_PRED] + + count[coef_count_node_start+D135_PRED] + + count[coef_count_node_start+D45_PRED] + + count[coef_count_node_start+D207_PRED] + + count[coef_count_node_start+D153_PRED] + + count[coef_count_node_start+D63_PRED]; + break; + + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 10; + } + + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_partition_tree\n"); + coef_node_start = VP9_PARTITION_P_START; + coef_count_node_start = VP9_PARTITION_P_COUNT_START; + for (tree_i = 0; tree_i < 16; tree_i++) { + for (node = 0; node < 3; node++) { + switch (node) { + case 2: + tree_left = + count[coef_count_node_start + 2]; + tree_right = + count[coef_count_node_start + 3]; + break; + case 1: + tree_left = + count[coef_count_node_start + 1]; + tree_right = + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + break; + default: + tree_left = + count[coef_count_node_start + 0]; + tree_right = + count[coef_count_node_start + 1] + + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + break; + + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 4; + } + + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_switchable_interp_tree\n"); + coef_node_start = VP9_INTERP_START; + coef_count_node_start = VP9_INTERP_COUNT_START; + for (tree_i = 0; tree_i < 4; tree_i++) { + for (node = 0; node < 2; node++) { + switch (node) { + case 1: + tree_left = + count[coef_count_node_start + 1]; + tree_right = + count[coef_count_node_start + 2]; + break; + default: + tree_left = + count[coef_count_node_start + 0]; + tree_right = + count[coef_count_node_start + 1] + + count[coef_count_node_start + 2]; + break; + + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 3; + } + + if (debug & VP9_DEBUG_MERGE) + pr_info("# merge_vp9_mv_joint_tree\n"); + coef_node_start = VP9_MV_JOINTS_START; + coef_count_node_start = VP9_MV_JOINTS_COUNT_START; + for (tree_i = 0; tree_i < 1; tree_i++) { + for (node = 0; node < 3; node++) { + switch (node) { + case 2: + tree_left = + count[coef_count_node_start + 2]; + tree_right = + count[coef_count_node_start + 3]; + break; + case 1: + tree_left = + count[coef_count_node_start + 1]; + tree_right = + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + break; + default: + tree_left = + count[coef_count_node_start + 0]; + tree_right = + count[coef_count_node_start + 1] + + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + break; + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, + tree_left, tree_right, tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 4; + } + + for (mvd_i = 0; mvd_i < 2; mvd_i++) { + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_mv_class_tree [%d] -\n", mvd_i); + coef_node_start = + mvd_i ? VP9_MV_CLASSES_1_START : VP9_MV_CLASSES_0_START; + coef_count_node_start = + mvd_i ? VP9_MV_CLASSES_1_COUNT_START + : VP9_MV_CLASSES_0_COUNT_START; + tree_i = 0; + for (node = 0; node < 10; node++) { + switch (node) { + case 9: + tree_left = + count[coef_count_node_start + 9]; + tree_right = + count[coef_count_node_start + 10]; + break; + case 8: + tree_left = + count[coef_count_node_start + 7]; + tree_right = + count[coef_count_node_start + 8]; + break; + case 7: + tree_left = + count[coef_count_node_start + 7] + + count[coef_count_node_start + 8]; + tree_right = + count[coef_count_node_start + 9] + + count[coef_count_node_start + 10]; + break; + case 6: + tree_left = + count[coef_count_node_start + 6]; + tree_right = + count[coef_count_node_start + 7] + + count[coef_count_node_start + 8] + + count[coef_count_node_start + 9] + + count[coef_count_node_start + 10]; + break; + case 5: + tree_left = + count[coef_count_node_start + 4]; + tree_right = + count[coef_count_node_start + 5]; + break; + case 4: + tree_left = + count[coef_count_node_start + 4] + + count[coef_count_node_start + 5]; + tree_right = + count[coef_count_node_start + 6] + + count[coef_count_node_start + 7] + + count[coef_count_node_start + 8] + + count[coef_count_node_start + 9] + + count[coef_count_node_start + 10]; + break; + case 3: + tree_left = + count[coef_count_node_start + 2]; + tree_right = + count[coef_count_node_start + 3]; + break; + case 2: + tree_left = + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + tree_right = + count[coef_count_node_start + 4] + + count[coef_count_node_start + 5] + + count[coef_count_node_start + 6] + + count[coef_count_node_start + 7] + + count[coef_count_node_start + 8] + + count[coef_count_node_start + 9] + + count[coef_count_node_start + 10]; + break; + case 1: + tree_left = + count[coef_count_node_start + 1]; + tree_right = + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3] + + count[coef_count_node_start + 4] + + count[coef_count_node_start + 5] + + count[coef_count_node_start + 6] + + count[coef_count_node_start + 7] + + count[coef_count_node_start + 8] + + count[coef_count_node_start + 9] + + count[coef_count_node_start + 10]; + break; + default: + tree_left = + count[coef_count_node_start + 0]; + tree_right = + count[coef_count_node_start + 1] + + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3] + + count[coef_count_node_start + 4] + + count[coef_count_node_start + 5] + + count[coef_count_node_start + 6] + + count[coef_count_node_start + 7] + + count[coef_count_node_start + 8] + + count[coef_count_node_start + 9] + + count[coef_count_node_start + 10]; + break; + + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_mv_class0_tree [%d] -\n", mvd_i); + coef_node_start = + mvd_i ? VP9_MV_CLASS0_1_START : VP9_MV_CLASS0_0_START; + coef_count_node_start = + mvd_i ? VP9_MV_CLASS0_1_COUNT_START : + VP9_MV_CLASS0_0_COUNT_START; + tree_i = 0; + node = 0; + tree_left = count[coef_count_node_start + 0]; + tree_right = count[coef_count_node_start + 1]; + + vp9_tree_merge_probs(prev_prob, cur_prob, coef_node_start, + tree_left, tree_right, tree_i, node); + if (debug & VP9_DEBUG_MERGE) + pr_info(" # merge_vp9_mv_fp_tree_class0_fp [%d] -\n", + mvd_i); + coef_node_start = + mvd_i ? VP9_MV_CLASS0_FP_1_START : + VP9_MV_CLASS0_FP_0_START; + coef_count_node_start = + mvd_i ? VP9_MV_CLASS0_FP_1_COUNT_START : + VP9_MV_CLASS0_FP_0_COUNT_START; + for (tree_i = 0; tree_i < 3; tree_i++) { + for (node = 0; node < 3; node++) { + switch (node) { + case 2: + tree_left = + count[coef_count_node_start + 2]; + tree_right = + count[coef_count_node_start + 3]; + break; + case 1: + tree_left = + count[coef_count_node_start + 1]; + tree_right = + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + break; + default: + tree_left = + count[coef_count_node_start + 0]; + tree_right = + count[coef_count_node_start + 1] + + count[coef_count_node_start + 2] + + count[coef_count_node_start + 3]; + break; + + } + + vp9_tree_merge_probs(prev_prob, cur_prob, + coef_node_start, tree_left, tree_right, + tree_i, node); + + coef_node_start = coef_node_start + 1; + } + coef_count_node_start = coef_count_node_start + 4; + } + + } /* for mvd_i (mvd_y or mvd_x)*/ +} + +} + +static void uninit_mmu_buffers(struct VP9Decoder_s *pbi) +{ +#ifndef MV_USE_FIXED_BUF + dealloc_mv_bufs(pbi); +#endif + if (pbi->mmu_box) { + decoder_mmu_box_free(pbi->mmu_box); + pbi->mmu_box = NULL; + } + + if (pbi->bmmu_box) { + decoder_bmmu_box_free(pbi->bmmu_box); + pbi->bmmu_box = NULL; + } +} + +static int calc_luc_quantity(u32 w, u32 h) +{ + int lcu_size = 64; /*fixed 64*/ + int pic_width_64 = (w + 63) & (~0x3f); + int pic_height_32 = (h + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 : pic_height_32 / lcu_size; + + return pic_width_lcu * pic_height_lcu; +} + +/* return in MB */ +static int vp9_max_mmu_buf_size(int max_w, int max_h) +{ + int buf_size = 48; + + if ((max_w * max_h > 1280*736) && + (max_w * max_h <= 1920*1088)) { + buf_size = 12; + } else if ((max_w * max_h > 0) && + (max_w * max_h <= 1280*736)) { + buf_size = 4; + } + + return buf_size; +} + +static void vp9_put_video_frame(void *vdec_ctx, struct vframe_s *vf) +{ + vvp9_vf_put(vf, vdec_ctx); +} + +static void vp9_get_video_frame(void *vdec_ctx, struct vframe_s **vf) +{ + *vf = vvp9_vf_get(vdec_ctx); +} + +static struct task_ops_s task_dec_ops = { + .type = TASK_TYPE_DEC, + .get_vframe = vp9_get_video_frame, + .put_vframe = vp9_put_video_frame, +}; + +static int v4l_alloc_and_config_pic(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic) +{ + int ret = -1; + int i = pic->index; + int dw_mode = get_double_write_mode_init(pbi); + int lcu_total = calc_luc_quantity(pbi->frame_width, pbi->frame_height); +#ifdef MV_USE_FIXED_BUF + u32 mpred_mv_end = pbi->work_space_buf->mpred_mv.buf_start + + pbi->work_space_buf->mpred_mv.buf_size; + int mv_size = cal_mv_buf_size(pbi, pbi->frame_width, pbi->frame_height); +#endif + struct aml_vcodec_ctx * ctx = (struct aml_vcodec_ctx *)pbi->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + + if (i < 0) + return ret; + + ret = ctx->fb_ops.alloc(&ctx->fb_ops, pbi->fb_token, &fb, AML_FB_REQ_DEC); + if (ret < 0) { + vp9_print(pbi, 0, "[%d] VP9 get buffer fail.\n", ctx->id); + return ret; + } + + fb->task->attach(fb->task, &task_dec_ops, pbi); + fb->status = FB_ST_DECODER; + + if (pbi->mmu_enable) { + struct internal_comp_buf *ibuf = v4lfb_to_icomp_buf(pbi, fb); + + pbi->m_BUF[i].header_addr = ibuf->header_addr; + if (debug & VP9_DEBUG_BUFMGR_MORE) { + pr_info("MMU header_adr %d: %ld\n", + i, pbi->m_BUF[i].header_addr); + } + } + + pic->repeat_pic = NULL; + pic->repeat_count = 0; +#ifdef MV_USE_FIXED_BUF + if ((pbi->work_space_buf->mpred_mv.buf_start + + ((i + 1) * mv_size)) + <= mpred_mv_end) { +#endif + pbi->m_BUF[i].v4l_ref_buf_addr = (ulong)fb; + pic->cma_alloc_addr = fb->m.mem[0].addr; + if (fb->num_planes == 1) { + pbi->m_BUF[i].start_adr = fb->m.mem[0].addr; + pbi->m_BUF[i].luma_size = fb->m.mem[0].offset; + pbi->m_BUF[i].size = fb->m.mem[0].size; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + pic->dw_y_adr = pbi->m_BUF[i].start_adr; + pic->dw_u_v_adr = pic->dw_y_adr + pbi->m_BUF[i].luma_size; + pic->luma_size = fb->m.mem[0].offset; + pic->chroma_size = fb->m.mem[0].size - fb->m.mem[0].offset; + } else if (fb->num_planes == 2) { + pbi->m_BUF[i].start_adr = fb->m.mem[0].addr; + pbi->m_BUF[i].size = fb->m.mem[0].size; + pbi->m_BUF[i].chroma_addr = fb->m.mem[1].addr; + pbi->m_BUF[i].chroma_size = fb->m.mem[1].size; + fb->m.mem[0].bytes_used = fb->m.mem[0].size; + fb->m.mem[1].bytes_used = fb->m.mem[1].size; + pic->dw_y_adr = pbi->m_BUF[i].start_adr; + pic->dw_u_v_adr = pbi->m_BUF[i].chroma_addr; + pic->luma_size = fb->m.mem[0].size; + pic->chroma_size = fb->m.mem[1].size; + } + + /* config frame buffer */ + if (pbi->mmu_enable) + pic->header_adr = pbi->m_BUF[i].header_addr; + + pic->BUF_index = i; + pic->lcu_total = lcu_total; + pic->mc_canvas_y = pic->index; + pic->mc_canvas_u_v = pic->index; + + if (dw_mode & 0x10) { + pic->mc_canvas_y = (pic->index << 1); + pic->mc_canvas_u_v = (pic->index << 1) + 1; + } + +#ifdef MV_USE_FIXED_BUF + pic->mpred_mv_wr_start_addr = + pbi->work_space_buf->mpred_mv.buf_start + + (pic->index * mv_size); + pic->mv_size = mv_size; +#endif + if (debug) { + pr_info("%s index %d BUF_index %d ", + __func__, pic->index, + pic->BUF_index); + pr_info("comp_body_size %x comp_buf_size %x ", + pic->comp_body_size, + pic->buf_size); + pr_info("mpred_mv_wr_start_adr %ld\n", + pic->mpred_mv_wr_start_addr); + pr_info("dw_y_adr %d, pic_config->dw_u_v_adr =%d\n", + pic->dw_y_adr, + pic->dw_u_v_adr); + } +#ifdef MV_USE_FIXED_BUF + } +#endif + return ret; +} + +static int config_pic(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + int ret = -1; + int i; + int pic_width = pbi->init_pic_w; + int pic_height = pbi->init_pic_h; + int lcu_size = 64; /*fixed 64*/ + int pic_width_64 = (pic_width + 63) & (~0x3f); + int pic_height_32 = (pic_height + 31) & (~0x1f); + int pic_width_lcu = (pic_width_64 % lcu_size) ? + pic_width_64 / lcu_size + 1 + : pic_width_64 / lcu_size; + int pic_height_lcu = (pic_height_32 % lcu_size) ? + pic_height_32 / lcu_size + 1 + : pic_height_32 / lcu_size; + int lcu_total = pic_width_lcu * pic_height_lcu; +#ifdef MV_USE_FIXED_BUF + u32 mpred_mv_end = pbi->work_space_buf->mpred_mv.buf_start + + pbi->work_space_buf->mpred_mv.buf_size; + int mv_size = cal_mv_buf_size(pbi, pbi->init_pic_w, pbi->init_pic_h); +#endif + u32 y_adr = 0; + int buf_size = 0; + + int losless_comp_header_size = + compute_losless_comp_header_size(pic_width, + pic_height); + int losless_comp_body_size = compute_losless_comp_body_size(pic_width, + pic_height, buf_alloc_depth == 10); + int mc_buffer_size = losless_comp_header_size + losless_comp_body_size; + int mc_buffer_size_h = (mc_buffer_size + 0xffff) >> 16; + int mc_buffer_size_u_v = 0; + int mc_buffer_size_u_v_h = 0; + int dw_mode = get_double_write_mode_init(pbi); + + pbi->lcu_total = lcu_total; + + if (dw_mode) { + int pic_width_dw = pic_width / + get_double_write_ratio(dw_mode); + int pic_height_dw = pic_height / + get_double_write_ratio(dw_mode); + + int pic_width_64_dw = (pic_width_dw + 63) & (~0x3f); + int pic_height_32_dw = (pic_height_dw + 31) & (~0x1f); + int pic_width_lcu_dw = (pic_width_64_dw % lcu_size) ? + pic_width_64_dw / lcu_size + 1 + : pic_width_64_dw / lcu_size; + int pic_height_lcu_dw = (pic_height_32_dw % lcu_size) ? + pic_height_32_dw / lcu_size + 1 + : pic_height_32_dw / lcu_size; + int lcu_total_dw = pic_width_lcu_dw * pic_height_lcu_dw; + mc_buffer_size_u_v = lcu_total_dw * lcu_size * lcu_size / 2; + mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16; + /*64k alignment*/ + buf_size = ((mc_buffer_size_u_v_h << 16) * 3); + buf_size = ((buf_size + 0xffff) >> 16) << 16; + } + + if (mc_buffer_size & 0xffff) /*64k alignment*/ + mc_buffer_size_h += 1; + if ((!pbi->mmu_enable) && ((dw_mode & 0x10) == 0)) + buf_size += (mc_buffer_size_h << 16); + + if (pbi->mmu_enable) { + pic_config->header_adr = decoder_bmmu_box_get_phy_addr( + pbi->bmmu_box, HEADER_BUFFER_IDX(pic_config->index)); + + if (debug & VP9_DEBUG_BUFMGR_MORE) { + pr_info("MMU header_adr %d: %ld\n", + pic_config->index, pic_config->header_adr); + } + } + + i = pic_config->index; +#ifdef MV_USE_FIXED_BUF + if ((pbi->work_space_buf->mpred_mv.buf_start + + ((i + 1) * mv_size)) + <= mpred_mv_end + ) { +#endif + if (buf_size > 0) { + ret = decoder_bmmu_box_alloc_buf_phy(pbi->bmmu_box, + VF_BUFFER_IDX(i), + buf_size, DRIVER_NAME, + &pic_config->cma_alloc_addr); + if (ret < 0) { + pr_info( + "decoder_bmmu_box_alloc_buf_phy idx %d size %d fail\n", + VF_BUFFER_IDX(i), + buf_size + ); + return ret; + } + + if (pic_config->cma_alloc_addr) + y_adr = pic_config->cma_alloc_addr; + else { + pr_info( + "decoder_bmmu_box_alloc_buf_phy idx %d size %d return null\n", + VF_BUFFER_IDX(i), + buf_size + ); + return -1; + } + } + { + /*ensure get_pic_by_POC() + not get the buffer not decoded*/ + pic_config->BUF_index = i; + pic_config->lcu_total = lcu_total; + + pic_config->comp_body_size = losless_comp_body_size; + pic_config->buf_size = buf_size; + + pic_config->mc_canvas_y = pic_config->index; + pic_config->mc_canvas_u_v = pic_config->index; + if (dw_mode & 0x10) { + pic_config->dw_y_adr = y_adr; + pic_config->dw_u_v_adr = y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); + + pic_config->mc_canvas_y = + (pic_config->index << 1); + pic_config->mc_canvas_u_v = + (pic_config->index << 1) + 1; + } else if (dw_mode) { + pic_config->dw_y_adr = y_adr; + pic_config->dw_u_v_adr = pic_config->dw_y_adr + + ((mc_buffer_size_u_v_h << 16) << 1); + } +#ifdef MV_USE_FIXED_BUF + pic_config->mpred_mv_wr_start_addr = + pbi->work_space_buf->mpred_mv.buf_start + + (pic_config->index * mv_size); + pic_config->mv_size = mv_size; +#endif + if (debug) { + pr_info + ("%s index %d BUF_index %d ", + __func__, pic_config->index, + pic_config->BUF_index); + pr_info + ("comp_body_size %x comp_buf_size %x ", + pic_config->comp_body_size, + pic_config->buf_size); + pr_info + ("mpred_mv_wr_start_adr %ld\n", + pic_config->mpred_mv_wr_start_addr); + pr_info("dw_y_adr %d, pic_config->dw_u_v_adr =%d\n", + pic_config->dw_y_adr, + pic_config->dw_u_v_adr); + } + ret = 0; + } +#ifdef MV_USE_FIXED_BUF + } +#endif + return ret; +} + + +static void init_pic_list(struct VP9Decoder_s *pbi) +{ + int i; + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *pic_config; + u32 header_size; + struct vdec_s *vdec = hw_to_vdec(pbi); + + if (!pbi->is_used_v4l && pbi->mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) { + header_size = vvp9_mmu_compress_header_size(pbi->max_pic_w, pbi->max_pic_h); + /*alloc VP9 compress header first*/ + for (i = 0; i < pbi->used_buf_num; i++) { + unsigned long buf_addr; + if (decoder_bmmu_box_alloc_buf_phy + (pbi->bmmu_box, + HEADER_BUFFER_IDX(i), header_size, + DRIVER_HEADER_NAME, + &buf_addr) < 0) { + pr_info("%s malloc compress header failed %d\n", + DRIVER_HEADER_NAME, i); + pbi->fatal_error |= DECODER_FATAL_ERROR_NO_MEM; + return; + } + } + } + for (i = 0; i < pbi->used_buf_num; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + pic_config->index = i; + pic_config->BUF_index = -1; + pic_config->mv_buf_index = -1; + if (vdec->parallel_dec == 1) { + pic_config->y_canvas_index = -1; + pic_config->uv_canvas_index = -1; + } + pic_config->y_crop_width = pbi->init_pic_w; + pic_config->y_crop_height = pbi->init_pic_h; + pic_config->double_write_mode = get_double_write_mode(pbi); + + if (!pbi->is_used_v4l) { + if (config_pic(pbi, pic_config) < 0) { + if (debug) + pr_info("Config_pic %d fail\n", + pic_config->index); + pic_config->index = -1; + break; + } + + if (pic_config->double_write_mode) { + set_canvas(pbi, pic_config); + } + } + } + for (; i < pbi->used_buf_num; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + pic_config->index = -1; + pic_config->BUF_index = -1; + pic_config->mv_buf_index = -1; + if (vdec->parallel_dec == 1) { + pic_config->y_canvas_index = -1; + pic_config->uv_canvas_index = -1; + } + } + pr_info("%s ok, used_buf_num = %d\n", + __func__, pbi->used_buf_num); +} + +static void init_pic_list_hw(struct VP9Decoder_s *pbi) +{ + int i; + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *pic_config; + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x0);*/ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, + (0x1 << 1) | (0x1 << 2)); + + for (i = 0; i < pbi->used_buf_num; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + if (pic_config->index < 0) + break; + + if (pbi->mmu_enable && ((pic_config->double_write_mode & 0x10) == 0)) { + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->header_adr >> 5); + } else { + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + * pic_config->mc_y_adr + * | (pic_config->mc_canvas_y << 8) | 0x1); + */ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->dw_y_adr >> 5); + } +#ifndef LOSLESS_COMPRESS_MODE + /*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, + * pic_config->mc_u_v_adr + * | (pic_config->mc_canvas_u_v << 8)| 0x1); + */ + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->header_adr >> 5); +#else + if (pic_config->double_write_mode & 0x10) { + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, + pic_config->dw_u_v_adr >> 5); + } +#endif + } + WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1); + + /*Zero out canvas registers in IPP -- avoid simulation X*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 1); + for (i = 0; i < 32; i++) + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0); +} + +static void dump_pic_list(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *pic_config; + int i; + for (i = 0; i < FRAME_BUFFERS; i++) { + pic_config = &cm->buffer_pool->frame_bufs[i].buf; + vp9_print(pbi, 0, + "Buf(%d) index %d mv_buf_index %d ref_count %d vf_ref %d repeat_count %d dec_idx %d slice_type %d w/h %d/%d adr: %x\n", + i, + pic_config->index, +#ifndef MV_USE_FIXED_BUF + pic_config->mv_buf_index, +#else + -1, +#endif + cm->buffer_pool-> + frame_bufs[i].ref_count, + pic_config->vf_ref, + pic_config->repeat_count, + pic_config->decode_idx, + pic_config->slice_type, + pic_config->y_crop_width, + pic_config->y_crop_height, + pic_config->cma_alloc_addr + ); + } + return; +} + +static int config_pic_size(struct VP9Decoder_s *pbi, unsigned short bit_depth) +{ +#ifdef LOSLESS_COMPRESS_MODE + unsigned int data32; +#endif + int losless_comp_header_size, losless_comp_body_size; + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config = &cm->cur_frame->buf; + + frame_width = cur_pic_config->y_crop_width; + frame_height = cur_pic_config->y_crop_height; + cur_pic_config->bit_depth = bit_depth; + cur_pic_config->double_write_mode = get_double_write_mode(pbi); + losless_comp_header_size = + compute_losless_comp_header_size(cur_pic_config->y_crop_width, + cur_pic_config->y_crop_height); + losless_comp_body_size = + compute_losless_comp_body_size(cur_pic_config->y_crop_width, + cur_pic_config->y_crop_height, (bit_depth == VPX_BITS_10)); + cur_pic_config->comp_body_size = losless_comp_body_size; +#ifdef LOSLESS_COMPRESS_MODE + data32 = READ_VREG(HEVC_SAO_CTRL5); + if (bit_depth == VPX_BITS_10) + data32 &= ~(1 << 9); + else + data32 |= (1 << 9); + + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + if (pbi->mmu_enable) { + /*bit[4] : paged_mem_mode*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); + } else { + /*bit[3] smem mdoe*/ + if (bit_depth == VPX_BITS_10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0 << 3)); + else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (1 << 3)); + } + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SM1) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, (losless_comp_body_size >> 5)); + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL3,(0xff<<20) | (0xff<<10) | 0xff);*/ + WRITE_VREG(HEVC_CM_BODY_LENGTH, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, losless_comp_header_size); + if (get_double_write_mode(pbi) & 0x10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif + return 0; +} + +static int config_mc_buffer(struct VP9Decoder_s *pbi, unsigned short bit_depth) +{ + int i; + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config = &cm->cur_frame->buf; + uint8_t scale_enable = 0; + + if (debug&VP9_DEBUG_BUFMGR_MORE) + pr_info("config_mc_buffer entered .....\n"); + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (0 << 1) | 1); + for (i = 0; i < REFS_PER_FRAME; ++i) { + struct PIC_BUFFER_CONFIG_s *pic_config = cm->frame_refs[i].buf; + if (!pic_config) + continue; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic_config->mc_canvas_u_v << 16) + | (pic_config->mc_canvas_u_v << 8) + | pic_config->mc_canvas_y); + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("refid %x mc_canvas_u_v %x mc_canvas_y %x\n", + i, pic_config->mc_canvas_u_v, + pic_config->mc_canvas_y); + } + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (16 << 8) | (0 << 1) | 1); + for (i = 0; i < REFS_PER_FRAME; ++i) { + struct PIC_BUFFER_CONFIG_s *pic_config = cm->frame_refs[i].buf; + if (!pic_config) + continue; + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, + (pic_config->mc_canvas_u_v << 16) + | (pic_config->mc_canvas_u_v << 8) + | pic_config->mc_canvas_y); + } + + /*auto_inc start index:0 field:0*/ + WRITE_VREG(VP9D_MPP_REFINFO_TBL_ACCCONFIG, 0x1 << 2); + /*index 0:last 1:golden 2:altref*/ + for (i = 0; i < REFS_PER_FRAME; i++) { + int ref_pic_body_size; + struct PIC_BUFFER_CONFIG_s *pic_config = cm->frame_refs[i].buf; + if (!pic_config) + continue; + WRITE_VREG(VP9D_MPP_REFINFO_DATA, pic_config->y_crop_width); + WRITE_VREG(VP9D_MPP_REFINFO_DATA, pic_config->y_crop_height); + + if (pic_config->y_crop_width != cur_pic_config->y_crop_width || + pic_config->y_crop_height != cur_pic_config->y_crop_height) { + scale_enable |= (1 << i); + } + ref_pic_body_size = + compute_losless_comp_body_size(pic_config->y_crop_width, + pic_config->y_crop_height, (bit_depth == VPX_BITS_10)); + WRITE_VREG(VP9D_MPP_REFINFO_DATA, + (pic_config->y_crop_width << 14) + / cur_pic_config->y_crop_width); + WRITE_VREG(VP9D_MPP_REFINFO_DATA, + (pic_config->y_crop_height << 14) + / cur_pic_config->y_crop_height); + if (pbi->mmu_enable) + WRITE_VREG(VP9D_MPP_REFINFO_DATA, 0); + else + WRITE_VREG(VP9D_MPP_REFINFO_DATA, ref_pic_body_size >> 5); + } + WRITE_VREG(VP9D_MPP_REF_SCALE_ENBL, scale_enable); + return 0; +} + +static void clear_mpred_hw(struct VP9Decoder_s *pbi) +{ + unsigned int data32; + + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 &= (~(1 << 6)); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); +} + +static void config_mpred_hw(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config = &cm->cur_frame->buf; + struct PIC_BUFFER_CONFIG_s *last_frame_pic_config = + &cm->prev_frame->buf; + + unsigned int data32; + int mpred_curr_lcu_x; + int mpred_curr_lcu_y; + int mpred_mv_rd_end_addr; + + + mpred_mv_rd_end_addr = last_frame_pic_config->mpred_mv_wr_start_addr + + last_frame_pic_config->mv_size; + //+ (last_frame_pic_config->lcu_total * MV_MEM_UNIT); + + data32 = READ_VREG(HEVC_MPRED_CURR_LCU); + mpred_curr_lcu_x = data32 & 0xffff; + mpred_curr_lcu_y = (data32 >> 16) & 0xffff; + + if (debug & VP9_DEBUG_BUFMGR) + pr_info("cur pic_config index %d col pic_config index %d\n", + cur_pic_config->index, last_frame_pic_config->index); + WRITE_VREG(HEVC_MPRED_CTRL3, 0x24122412); + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, + pbi->work_space_buf->mpred_above.buf_start); + + data32 = READ_VREG(HEVC_MPRED_CTRL4); + + data32 &= (~(1 << 6)); + data32 |= (cm->use_prev_frame_mvs << 6); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); + + WRITE_VREG(HEVC_MPRED_MV_WR_START_ADDR, + cur_pic_config->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_WPTR, cur_pic_config->mpred_mv_wr_start_addr); + + WRITE_VREG(HEVC_MPRED_MV_RD_START_ADDR, + last_frame_pic_config->mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_RPTR, + last_frame_pic_config->mpred_mv_wr_start_addr); + /*data32 = ((pbi->lcu_x_num - pbi->tile_width_lcu)*MV_MEM_UNIT);*/ + /*WRITE_VREG(HEVC_MPRED_MV_WR_ROW_JUMP,data32);*/ + /*WRITE_VREG(HEVC_MPRED_MV_RD_ROW_JUMP,data32);*/ + WRITE_VREG(HEVC_MPRED_MV_RD_END_ADDR, mpred_mv_rd_end_addr); + +} + +static void config_sao_hw(struct VP9Decoder_s *pbi, union param_u *params) +{ + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *pic_config = &cm->cur_frame->buf; + + unsigned int data32; + int lcu_size = 64; + int mc_buffer_size_u_v = + pic_config->lcu_total * lcu_size*lcu_size/2; + int mc_buffer_size_u_v_h = + (mc_buffer_size_u_v + 0xffff) >> 16;/*64k alignment*/ + struct aml_vcodec_ctx * v4l2_ctx = pbi->v4l2_ctx; + + if (get_double_write_mode(pbi)) { + WRITE_VREG(HEVC_SAO_Y_START_ADDR, pic_config->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_START_ADDR, pic_config->dw_u_v_adr); + WRITE_VREG(HEVC_SAO_Y_WPTR, pic_config->dw_y_adr); + WRITE_VREG(HEVC_SAO_C_WPTR, pic_config->dw_u_v_adr); + } else { + WRITE_VREG(HEVC_SAO_Y_START_ADDR, 0xffffffff); + WRITE_VREG(HEVC_SAO_C_START_ADDR, 0xffffffff); + } + if (pbi->mmu_enable) + WRITE_VREG(HEVC_CM_HEADER_START_ADDR, pic_config->header_adr); + + if (pbi->is_used_v4l) { + WRITE_VREG(HEVC_SAO_Y_LENGTH, pic_config->luma_size); + WRITE_VREG(HEVC_SAO_C_LENGTH, pic_config->chroma_size); + if (debug & PRINT_FLAG_V4L_DETAIL) { + pr_info("[%d] config pic, id: %d, Y:(%x, %d) C:(%x, %d).\n", + v4l2_ctx->id, pic_config->index, + pic_config->dw_y_adr, pic_config->luma_size, + pic_config->dw_u_v_adr, pic_config->chroma_size); + } + } else { + data32 = (mc_buffer_size_u_v_h << 16) << 1; + /*pr_info("data32=%x,mc_buffer_size_u_v_h=%x,lcu_total=%x\n", + data32, mc_buffer_size_u_v_h, pic_config->lcu_total);*/ + + WRITE_VREG(HEVC_SAO_Y_LENGTH, data32); + + data32 = (mc_buffer_size_u_v_h << 16); + WRITE_VREG(HEVC_SAO_C_LENGTH, data32); + } + +#ifdef VP9_10B_NV21 +#ifdef DOS_PROJECT + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + /*[13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32*/ + data32 |= (pbi->mem_map_mode << 12); + data32 &= (~0x3); + data32 |= 0x1; /* [1]:dw_disable [0]:cm_disable*/ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + /*[23:22] dw_v1_ctrl [21:20] dw_v0_ctrl [19:18] dw_h1_ctrl + * [17:16] dw_h0_ctrl + */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /*set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /*[5:4] address_format 00:linear 01:32x32 10:64x32*/ + data32 |= (pbi->mem_map_mode << 4); + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#else + /*m8baby test1902*/ + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~0x3000); + /*[13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32*/ + data32 |= (pbi->mem_map_mode << 12); + data32 &= (~0xff0); + /*data32 |= 0x670;*/ /*Big-Endian per 64-bit*/ + data32 |= 0x880; /*.Big-Endian per 64-bit */ + data32 &= (~0x3); + data32 |= 0x1; /*[1]:dw_disable [0]:cm_disable*/ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + /* [23:22] dw_v1_ctrl [21:20] dw_v0_ctrl + *[19:18] dw_h1_ctrl [17:16] dw_h0_ctrl + */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /* set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /*[5:4] address_format 00:linear 01:32x32 10:64x32*/ + data32 |= (pbi->mem_map_mode << 4); + data32 &= (~0xF); + data32 |= 0x8; /*Big-Endian per 64-bit*/ + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#endif +#else + data32 = READ_VREG(HEVC_SAO_CTRL1); + data32 &= (~(3 << 14)); + data32 |= (2 << 14); /* line align with 64*/ + + data32 &= (~0x3000); + /* [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 */ + data32 |= (pbi->mem_map_mode << 12); + + data32 &= (~0xff0); + data32 |= ((pbi->endian >> 8) & 0xfff); /* Big-Endian per 64-bit */ + data32 &= (~0x3); /*[1]:dw_disable [0]:cm_disable*/ + if (get_double_write_mode(pbi) == 0) + data32 |= 0x2; /*disable double write*/ + else if (get_double_write_mode(pbi) & 0x10) + data32 |= 0x1; /*disable cm*/ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { /* >= G12A dw write control */ + unsigned int data; + data = READ_VREG(HEVC_DBLK_CFGB); + data &= (~0x300); /*[8]:first write enable (compress) [9]:double write enable (uncompress)*/ + if (get_double_write_mode(pbi) == 0) + data |= (0x1 << 8); /*enable first write*/ + else if (get_double_write_mode(pbi) & 0x10) + data |= (0x1 << 9); /*double write only*/ + else + data |= ((0x1 << 8) |(0x1 << 9)); + WRITE_VREG(HEVC_DBLK_CFGB, data); + } + + /* swap uv */ + if (pbi->is_used_v4l) { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + data32 &= ~(1 << 8); /* NV21 */ + else + data32 |= (1 << 8); /* NV12 */ + } + + /* + * [31:24] ar_fifo1_axi_thred + * [23:16] ar_fifo0_axi_thred + * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes + * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32 + * [11:08] axi_lendian_C + * [07:04] axi_lendian_Y + * [3] reserved + * [2] clk_forceon + * [1] dw_disable:disable double write output + * [0] cm_disable:disable compress output + */ + WRITE_VREG(HEVC_SAO_CTRL1, data32); + + if (get_double_write_mode(pbi) & 0x10) { + /* [23:22] dw_v1_ctrl + *[21:20] dw_v0_ctrl + *[19:18] dw_h1_ctrl + *[17:16] dw_h0_ctrl + */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + /*set them all 0 for H265_NV21 (no down-scale)*/ + data32 &= ~(0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } else { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) + WRITE_VREG(HEVC_SAO_CTRL26, 0); + + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 &= (~(0xff << 16)); + if (get_double_write_mode(pbi) == 8 || + get_double_write_mode(pbi) == 9) { + data32 |= (0xff << 16); + WRITE_VREG(HEVC_SAO_CTRL26, 0xf); + } else if (get_double_write_mode(pbi) == 2 || + get_double_write_mode(pbi) == 3) + data32 |= (0xff << 16); + else if (get_double_write_mode(pbi) == 4) + data32 |= (0x33 << 16); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + + data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG); + data32 &= (~0x30); + /* [5:4] -- address_format 00:linear 01:32x32 10:64x32 */ + data32 |= (pbi->mem_map_mode << 4); + data32 &= (~0xf); + data32 |= (pbi->endian & 0xf); /* valid only when double write only */ + + /* swap uv */ + if (pbi->is_used_v4l) { + if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) || + (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M)) + data32 |= (1 << 12); /* NV21 */ + else + data32 &= ~(1 << 12); /* NV12 */ + } + data32 &= (~(3 << 8)); + data32 |= (2 << 8); /* line align with 64 for dw only */ + /* + * [3:0] little_endian + * [5:4] address_format 00:linear 01:32x32 10:64x32 + * [7:6] reserved + * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte + * [11:10] reserved + * [12] CbCr_byte_swap + * [31:13] reserved + */ + WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32); +#endif +} + +static void vp9_config_work_space_hw(struct VP9Decoder_s *pbi, u32 mask) +{ + struct BuffInfo_s *buf_spec = pbi->work_space_buf; + unsigned int data32; + int losless_comp_header_size, losless_comp_body_size; + + if (debug && pbi->init_flag == 0) + pr_info("%s w %d h %d %x %x %x %x %x %x %x %x %x %x %x %x\n", + __func__, + buf_spec->max_width, + buf_spec->max_height, + buf_spec->ipp.buf_start, + buf_spec->start_adr, + buf_spec->short_term_rps.buf_start, + buf_spec->vps.buf_start, + buf_spec->sps.buf_start, + buf_spec->pps.buf_start, + buf_spec->sao_up.buf_start, + buf_spec->swap_buf.buf_start, + buf_spec->swap_buf2.buf_start, + buf_spec->scalelut.buf_start, + buf_spec->dblk_para.buf_start, + buf_spec->dblk_data.buf_start); + + if (mask & HW_MASK_FRONT) { + if ((debug & VP9_DEBUG_SEND_PARAM_WITH_REG) == 0) + WRITE_VREG(HEVC_RPM_BUFFER, (u32)pbi->rpm_phy_addr); + + WRITE_VREG(HEVC_SHORT_TERM_RPS, + buf_spec->short_term_rps.buf_start); + /*WRITE_VREG(HEVC_VPS_BUFFER, buf_spec->vps.buf_start);*/ + /*WRITE_VREG(HEVC_SPS_BUFFER, buf_spec->sps.buf_start);*/ + WRITE_VREG(HEVC_PPS_BUFFER, buf_spec->pps.buf_start); + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER, + buf_spec->swap_buf.buf_start); + WRITE_VREG(HEVC_STREAM_SWAP_BUFFER2, + buf_spec->swap_buf2.buf_start); + WRITE_VREG(LMEM_DUMP_ADR, (u32)pbi->lmem_phy_addr); + + } + + if (mask & HW_MASK_BACK) { +#ifdef LOSLESS_COMPRESS_MODE + losless_comp_header_size = + compute_losless_comp_header_size(pbi->init_pic_w, + pbi->init_pic_h); + losless_comp_body_size = + compute_losless_comp_body_size(pbi->init_pic_w, + pbi->init_pic_h, buf_alloc_depth == 10); +#endif + WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, + buf_spec->ipp.buf_start); + //WRITE_VREG(HEVC_SAO_UP, buf_spec->sao_up.buf_start); + //WRITE_VREG(HEVC_SCALELUT, buf_spec->scalelut.buf_start); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + /* cfg_addr_adp*/ + WRITE_VREG(HEVC_DBLK_CFGE, buf_spec->dblk_para.buf_start); + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("Write HEVC_DBLK_CFGE\n"); + } + /* cfg_p_addr */ + WRITE_VREG(HEVC_DBLK_CFG4, buf_spec->dblk_para.buf_start); + + /* cfg_d_addr */ + WRITE_VREG(HEVC_DBLK_CFG5, buf_spec->dblk_data.buf_start); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + /* + * data32 = (READ_VREG(P_HEVC_DBLK_CFG3)>>8) & 0xff; // xio left offset, default is 0x40 + * data32 = data32 * 2; + * data32 = (READ_VREG(P_HEVC_DBLK_CFG3)>>16) & 0xff; // adp left offset, default is 0x040 + * data32 = data32 * 2; + */ + if (buf_spec->max_width <= 4096 && buf_spec->max_height <= 2304) + WRITE_VREG(HEVC_DBLK_CFG3, 0x404010); //default value + else + WRITE_VREG(HEVC_DBLK_CFG3, 0x808020); // make left storage 2 x 4k] + vp9_print(pbi, VP9_DEBUG_BUFMGR_MORE, + "HEVC_DBLK_CFG3 = %x\n", READ_VREG(HEVC_DBLK_CFG3)); + } +#ifdef LOSLESS_COMPRESS_MODE + if (pbi->mmu_enable) { + /*bit[4] : paged_mem_mode*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4)); + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SM1) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0); + } else { + /*if(cur_pic_config->bit_depth == VPX_BITS_10) + * WRITE_VREG(P_HEVCD_MPP_DECOMP_CTL1, (0<<3)); + */ + /*bit[3] smem mdoe*/ + /*else WRITE_VREG(P_HEVCD_MPP_DECOMP_CTL1, (1<<3));*/ + /*bit[3] smem mdoe*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, + (losless_comp_body_size >> 5)); + } + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, + (losless_comp_body_size >> 5));*/ + /*WRITE_VREG(HEVCD_MPP_DECOMP_CTL3, + (0xff<<20) | (0xff<<10) | 0xff);*/ + /*8-bit mode */ + WRITE_VREG(HEVC_CM_BODY_LENGTH, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_OFFSET, losless_comp_body_size); + WRITE_VREG(HEVC_CM_HEADER_LENGTH, losless_comp_header_size); + + if (get_double_write_mode(pbi) & 0x10) + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#else + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); +#endif + + if (pbi->mmu_enable) { + WRITE_VREG(HEVC_SAO_MMU_VH0_ADDR, buf_spec->mmu_vbh.buf_start); + WRITE_VREG(HEVC_SAO_MMU_VH1_ADDR, buf_spec->mmu_vbh.buf_start + + VBH_BUF_SIZE(buf_spec)); + /*data32 = READ_VREG(P_HEVC_SAO_CTRL9);*/ + /*data32 |= 0x1;*/ + /*WRITE_VREG(P_HEVC_SAO_CTRL9, data32);*/ + + /* use HEVC_CM_HEADER_START_ADDR */ + data32 = READ_VREG(HEVC_SAO_CTRL5); + data32 |= (1<<10); + WRITE_VREG(HEVC_SAO_CTRL5, data32); + } + + /* config mpred axi burst threshold */ + WRITE_VREG(HEVC_MPRED_CTRL3, 0x24122412); + +#ifdef CO_MV_COMPRESS + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) { + data32 = READ_VREG(HEVC_MPRED_CTRL4); + data32 |= (1 << 1); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); + } +#endif + WRITE_VREG(VP9_SEG_MAP_BUFFER, buf_spec->seg_map.buf_start); + + WRITE_VREG(LMEM_DUMP_ADR, (u32)pbi->lmem_phy_addr); + /**/ + WRITE_VREG(VP9_PROB_SWAP_BUFFER, pbi->prob_buffer_phy_addr); + WRITE_VREG(VP9_COUNT_SWAP_BUFFER, pbi->count_buffer_phy_addr); + if (pbi->mmu_enable) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR, pbi->frame_mmu_map_phy_addr); + else + WRITE_VREG(VP9_MMU_MAP_BUFFER, pbi->frame_mmu_map_phy_addr); + } + } +} + + +#ifdef VP9_LPF_LVL_UPDATE +/* + * Defines, declarations, sub-functions for vp9 de-block loop + filter Thr/Lvl table update + * - struct segmentation is for loop filter only (removed something) + * - function "vp9_loop_filter_init" and "vp9_loop_filter_frame_init" will + be instantiated in C_Entry + * - vp9_loop_filter_init run once before decoding start + * - vp9_loop_filter_frame_init run before every frame decoding start + * - set video format to VP9 is in vp9_loop_filter_init + */ +#define MAX_LOOP_FILTER 63 +#define MAX_REF_LF_DELTAS 4 +#define MAX_MODE_LF_DELTAS 2 +/*#define INTRA_FRAME 0*/ +/*#define LAST_FRAME 1*/ +/*#define MAX_REF_FRAMES 4*/ +#define SEGMENT_DELTADATA 0 +#define SEGMENT_ABSDATA 1 +#define MAX_SEGMENTS 8 +/*.#define SEG_TREE_PROBS (MAX_SEGMENTS-1)*/ +/*no use for loop filter, if this struct for common use, pls add it back*/ +/*#define PREDICTION_PROBS 3*/ +/* no use for loop filter, if this struct for common use, pls add it back*/ + +enum SEG_LVL_FEATURES { + SEG_LVL_ALT_Q = 0, /*Use alternate Quantizer ....*/ + SEG_LVL_ALT_LF = 1, /*Use alternate loop filter value...*/ + SEG_LVL_REF_FRAME = 2, /*Optional Segment reference frame*/ + SEG_LVL_SKIP = 3, /*Optional Segment (0,0) + skip mode*/ + SEG_LVL_MAX = 4 /*Number of features supported*/ +}; + +struct segmentation { + uint8_t enabled; + uint8_t update_map; + uint8_t update_data; + uint8_t abs_delta; + uint8_t temporal_update; + + /*no use for loop filter, if this struct + *for common use, pls add it back + */ + /*vp9_prob tree_probs[SEG_TREE_PROBS]; */ + /* no use for loop filter, if this struct + * for common use, pls add it back + */ + /*vp9_prob pred_probs[PREDICTION_PROBS];*/ + + int16_t feature_data[MAX_SEGMENTS][SEG_LVL_MAX]; + unsigned int feature_mask[MAX_SEGMENTS]; +}; + +struct loop_filter_thresh { + uint8_t mblim; + uint8_t lim; + uint8_t hev_thr; +}; + +struct loop_filter_info_n { + struct loop_filter_thresh lfthr[MAX_LOOP_FILTER + 1]; + uint8_t lvl[MAX_SEGMENTS][MAX_REF_FRAMES][MAX_MODE_LF_DELTAS]; +}; + +struct loopfilter { + int filter_level; + + int sharpness_level; + int last_sharpness_level; + + uint8_t mode_ref_delta_enabled; + uint8_t mode_ref_delta_update; + + /*0 = Intra, Last, GF, ARF*/ + signed char ref_deltas[MAX_REF_LF_DELTAS]; + signed char last_ref_deltas[MAX_REF_LF_DELTAS]; + + /*0 = ZERO_MV, MV*/ + signed char mode_deltas[MAX_MODE_LF_DELTAS]; + signed char last_mode_deltas[MAX_MODE_LF_DELTAS]; +}; + +static int vp9_clamp(int value, int low, int high) +{ + return value < low ? low : (value > high ? high : value); +} + +int segfeature_active(struct segmentation *seg, + int segment_id, + enum SEG_LVL_FEATURES feature_id) { + return seg->enabled && + (seg->feature_mask[segment_id] & (1 << feature_id)); +} + +int get_segdata(struct segmentation *seg, int segment_id, + enum SEG_LVL_FEATURES feature_id) { + return seg->feature_data[segment_id][feature_id]; +} + +static void vp9_update_sharpness(struct loop_filter_info_n *lfi, + int sharpness_lvl) +{ + int lvl; + /*For each possible value for the loop filter fill out limits*/ + for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) { + /*Set loop filter parameters that control sharpness.*/ + int block_inside_limit = lvl >> ((sharpness_lvl > 0) + + (sharpness_lvl > 4)); + + if (sharpness_lvl > 0) { + if (block_inside_limit > (9 - sharpness_lvl)) + block_inside_limit = (9 - sharpness_lvl); + } + + if (block_inside_limit < 1) + block_inside_limit = 1; + + lfi->lfthr[lvl].lim = (uint8_t)block_inside_limit; + lfi->lfthr[lvl].mblim = (uint8_t)(2 * (lvl + 2) + + block_inside_limit); + } +} + +/*instantiate this function once when decode is started*/ +void vp9_loop_filter_init(struct VP9Decoder_s *pbi) +{ + struct loop_filter_info_n *lfi = pbi->lfi; + struct loopfilter *lf = pbi->lf; + struct segmentation *seg_4lf = pbi->seg_4lf; + int i; + unsigned int data32; + + memset(lfi, 0, sizeof(struct loop_filter_info_n)); + memset(lf, 0, sizeof(struct loopfilter)); + memset(seg_4lf, 0, sizeof(struct segmentation)); + lf->sharpness_level = 0; /*init to 0 */ + /*init limits for given sharpness*/ + vp9_update_sharpness(lfi, lf->sharpness_level); + lf->last_sharpness_level = lf->sharpness_level; + /*init hev threshold const vectors (actually no use) + *for (i = 0; i <= MAX_LOOP_FILTER; i++) + * lfi->lfthr[i].hev_thr = (uint8_t)(i >> 4); + */ + + /*Write to register*/ + for (i = 0; i < 32; i++) { + unsigned int thr; + + thr = ((lfi->lfthr[i * 2 + 1].lim & 0x3f)<<8) | + (lfi->lfthr[i * 2 + 1].mblim & 0xff); + thr = (thr<<16) | ((lfi->lfthr[i*2].lim & 0x3f)<<8) | + (lfi->lfthr[i * 2].mblim & 0xff); + WRITE_VREG(HEVC_DBLK_CFG9, thr); + } + + /*video format is VP9*/ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + data32 = (0x3 << 14) | // (dw fifo thres r and b) + (0x3 << 12) | // (dw fifo thres r or b) + (0x3 << 10) | // (dw fifo thres not r/b) + (0x3 << 8) | // 1st/2nd write both enable + (0x1 << 0); // vp9 video format + if (get_double_write_mode(pbi) == 0x10) + data32 &= (~0x100); + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + data32 = (0x57 << 8) | /*1st/2nd write both enable*/ + (0x1 << 0); /*vp9 video format*/ + if (get_double_write_mode(pbi) == 0x10) + data32 &= (~0x100); + } else + data32 = 0x40400001; + + WRITE_VREG(HEVC_DBLK_CFGB, data32); + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("[DBLK DEBUG] CFGB : 0x%x\n", data32); +} + /* perform this function per frame*/ +void vp9_loop_filter_frame_init(struct segmentation *seg, + struct loop_filter_info_n *lfi, struct loopfilter *lf, + int default_filt_lvl) { + int i; + int seg_id; + /*n_shift is the multiplier for lf_deltas + *the multiplier is 1 for when filter_lvl is between 0 and 31; + *2 when filter_lvl is between 32 and 63 + */ + const int scale = 1 << (default_filt_lvl >> 5); + + /*update limits if sharpness has changed*/ + if (lf->last_sharpness_level != lf->sharpness_level) { + vp9_update_sharpness(lfi, lf->sharpness_level); + lf->last_sharpness_level = lf->sharpness_level; + + /*Write to register*/ + for (i = 0; i < 32; i++) { + unsigned int thr; + + thr = ((lfi->lfthr[i * 2 + 1].lim & 0x3f) << 8) + | (lfi->lfthr[i * 2 + 1].mblim & 0xff); + thr = (thr << 16) | ((lfi->lfthr[i * 2].lim & 0x3f) << 8) + | (lfi->lfthr[i * 2].mblim & 0xff); + WRITE_VREG(HEVC_DBLK_CFG9, thr); + } + } + + for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) {/*MAX_SEGMENTS = 8*/ + int lvl_seg = default_filt_lvl; + + if (segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) { + const int data = get_segdata(seg, seg_id, + SEG_LVL_ALT_LF); + lvl_seg = vp9_clamp(seg->abs_delta == SEGMENT_ABSDATA ? + data : default_filt_lvl + data, + 0, MAX_LOOP_FILTER); +#ifdef DBG_LF_PRINT + pr_info("segfeature_active!!!seg_id=%d,lvl_seg=%d\n", seg_id, lvl_seg); +#endif + } + + if (!lf->mode_ref_delta_enabled) { + /*we could get rid of this if we assume that deltas are set to + *zero when not in use; encoder always uses deltas + */ + memset(lfi->lvl[seg_id], lvl_seg, sizeof(lfi->lvl[seg_id])); + } else { + int ref, mode; + const int intra_lvl = lvl_seg + lf->ref_deltas[INTRA_FRAME] + * scale; +#ifdef DBG_LF_PRINT + pr_info("LF_PRINT:vp9_loop_filter_frame_init,seg_id=%d\n", seg_id); + pr_info("ref_deltas[INTRA_FRAME]=%d\n", lf->ref_deltas[INTRA_FRAME]); +#endif + lfi->lvl[seg_id][INTRA_FRAME][0] = + vp9_clamp(intra_lvl, 0, MAX_LOOP_FILTER); + + for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) { + /* LAST_FRAME = 1, MAX_REF_FRAMES = 4*/ + for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) { + /*MAX_MODE_LF_DELTAS = 2*/ + const int inter_lvl = + lvl_seg + lf->ref_deltas[ref] * scale + + lf->mode_deltas[mode] * scale; +#ifdef DBG_LF_PRINT +#endif + lfi->lvl[seg_id][ref][mode] = + vp9_clamp(inter_lvl, 0, + MAX_LOOP_FILTER); + } + } + } + } + +#ifdef DBG_LF_PRINT + /*print out thr/lvl table per frame*/ + for (i = 0; i <= MAX_LOOP_FILTER; i++) { + pr_info("LF_PRINT:(%d)thr=%d,blim=%d,lim=%d\n", + i, lfi->lfthr[i].hev_thr, lfi->lfthr[i].mblim, + lfi->lfthr[i].lim); + } + for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) { + pr_info("LF_PRINT:lvl(seg_id=%d)(mode=0,%d,%d,%d,%d)\n", + seg_id, lfi->lvl[seg_id][0][0], + lfi->lvl[seg_id][1][0], lfi->lvl[seg_id][2][0], + lfi->lvl[seg_id][3][0]); + pr_info("i(mode=1,%d,%d,%d,%d)\n", lfi->lvl[seg_id][0][1], + lfi->lvl[seg_id][1][1], lfi->lvl[seg_id][2][1], + lfi->lvl[seg_id][3][1]); + } +#endif + + /*Write to register */ + for (i = 0; i < 16; i++) { + unsigned int level; + + level = ((lfi->lvl[i >> 1][3][i & 1] & 0x3f) << 24) | + ((lfi->lvl[i >> 1][2][i & 1] & 0x3f) << 16) | + ((lfi->lvl[i >> 1][1][i & 1] & 0x3f) << 8) | + (lfi->lvl[i >> 1][0][i & 1] & 0x3f); + if (!default_filt_lvl) + level = 0; + WRITE_VREG(HEVC_DBLK_CFGA, level); + } +} +/* VP9_LPF_LVL_UPDATE */ +#endif + +static void vp9_init_decoder_hw(struct VP9Decoder_s *pbi, u32 mask) +{ + unsigned int data32; + int i; + const unsigned short parser_cmd[PARSER_CMD_NUMBER] = { + 0x0401, 0x8401, 0x0800, 0x0402, 0x9002, 0x1423, + 0x8CC3, 0x1423, 0x8804, 0x9825, 0x0800, 0x04FE, + 0x8406, 0x8411, 0x1800, 0x8408, 0x8409, 0x8C2A, + 0x9C2B, 0x1C00, 0x840F, 0x8407, 0x8000, 0x8408, + 0x2000, 0xA800, 0x8410, 0x04DE, 0x840C, 0x840D, + 0xAC00, 0xA000, 0x08C0, 0x08E0, 0xA40E, 0xFC00, + 0x7C00 + }; +#if 0 + if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) { + /* Set MCR fetch priorities*/ + data32 = 0x1 | (0x1 << 2) | (0x1 <<3) | + (24 << 4) | (32 << 11) | (24 << 18) | (32 << 25); + WRITE_VREG(HEVCD_MPP_DECOMP_AXIURG_CTL, data32); + } +#endif + /*if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("%s\n", __func__);*/ + if (mask & HW_MASK_FRONT) { + data32 = READ_VREG(HEVC_PARSER_INT_CONTROL); +#if 1 + /* set bit 31~29 to 3 if HEVC_STREAM_FIFO_CTL[29] is 1 */ + data32 &= ~(7 << 29); + data32 |= (3 << 29); +#endif + data32 = data32 | + (1 << 24) |/*stream_buffer_empty_int_amrisc_enable*/ + (1 << 22) |/*stream_fifo_empty_int_amrisc_enable*/ + (1 << 7) |/*dec_done_int_cpu_enable*/ + (1 << 4) |/*startcode_found_int_cpu_enable*/ + (0 << 3) |/*startcode_found_int_amrisc_enable*/ + (1 << 0) /*parser_int_enable*/ + ; +#ifdef SUPPORT_FB_DECODING +#ifndef FB_DECODING_TEST_SCHEDULE + /*fed_fb_slice_done_int_cpu_enable*/ + if (pbi->used_stage_buf_num > 0) + data32 |= (1 << 10); +#endif +#endif + WRITE_VREG(HEVC_PARSER_INT_CONTROL, data32); + + data32 = READ_VREG(HEVC_SHIFT_STATUS); + data32 = data32 | + (0 << 1) |/*emulation_check_off VP9 + do not have emulation*/ + (1 << 0)/*startcode_check_on*/ + ; + WRITE_VREG(HEVC_SHIFT_STATUS, data32); + WRITE_VREG(HEVC_SHIFT_CONTROL, + (0 << 14) | /*disable_start_code_protect*/ + (1 << 10) | /*length_zero_startcode_en for VP9*/ + (1 << 9) | /*length_valid_startcode_en for VP9*/ + (3 << 6) | /*sft_valid_wr_position*/ + (2 << 4) | /*emulate_code_length_sub_1*/ + (3 << 1) | /*start_code_length_sub_1 + VP9 use 0x00000001 as startcode (4 Bytes)*/ + (1 << 0) /*stream_shift_enable*/ + ); + + WRITE_VREG(HEVC_CABAC_CONTROL, + (1 << 0)/*cabac_enable*/ + ); + + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, + (1 << 0)/* hevc_parser_core_clk_en*/ + ); + + + WRITE_VREG(HEVC_DEC_STATUS_REG, 0); + + } + + if (mask & HW_MASK_BACK) { + /*Initial IQIT_SCALELUT memory + -- just to avoid X in simulation*/ + if (is_rdma_enable()) + rdma_back_end_work(pbi->rdma_phy_adr, RDMA_SIZE); + else { + WRITE_VREG(HEVC_IQIT_SCALELUT_WR_ADDR, 0);/*cfg_p_addr*/ + for (i = 0; i < 1024; i++) + WRITE_VREG(HEVC_IQIT_SCALELUT_DATA, 0); + } + } + + if (mask & HW_MASK_FRONT) { + u32 decode_mode; +#ifdef ENABLE_SWAP_TEST + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 100); +#else + WRITE_VREG(HEVC_STREAM_SWAP_TEST, 0); +#endif +#ifdef MULTI_INSTANCE_SUPPORT + if (!pbi->m_ins_flag) { + if (pbi->low_latency_flag) + decode_mode = DECODE_MODE_SINGLE_LOW_LATENCY; + else + decode_mode = DECODE_MODE_SINGLE; + } else if (vdec_frame_based(hw_to_vdec(pbi))) + decode_mode = pbi->no_head ? + DECODE_MODE_MULTI_FRAMEBASE_NOHEAD : + DECODE_MODE_MULTI_FRAMEBASE; + else + decode_mode = DECODE_MODE_MULTI_STREAMBASE; +#ifdef SUPPORT_FB_DECODING +#ifndef FB_DECODING_TEST_SCHEDULE + if (pbi->used_stage_buf_num > 0) + decode_mode |= (0x01 << 24); +#endif +#endif + WRITE_VREG(DECODE_MODE, decode_mode); + WRITE_VREG(HEVC_DECODE_SIZE, 0); + WRITE_VREG(HEVC_DECODE_COUNT, 0); +#else + WRITE_VREG(DECODE_MODE, DECODE_MODE_SINGLE); + WRITE_VREG(HEVC_DECODE_PIC_BEGIN_REG, 0); + WRITE_VREG(HEVC_DECODE_PIC_NUM_REG, 0x7fffffff); /*to remove*/ +#endif + /*Send parser_cmd*/ + WRITE_VREG(HEVC_PARSER_CMD_WRITE, (1 << 16) | (0 << 0)); + for (i = 0; i < PARSER_CMD_NUMBER; i++) + WRITE_VREG(HEVC_PARSER_CMD_WRITE, parser_cmd[i]); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1); + WRITE_VREG(HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2); + + + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + /* (1 << 8) |*/ /*sao_sw_pred_enable*/ + (1 << 5) | /*parser_sao_if_en*/ + (1 << 2) | /*parser_mpred_if_en*/ + (1 << 0) /*parser_scaler_if_en*/ + ); + } + + if (mask & HW_MASK_BACK) { + /*Changed to Start MPRED in microcode*/ + /* + pr_info("[test.c] Start MPRED\n"); + WRITE_VREG(HEVC_MPRED_INT_STATUS, + (1<<31) + ); + */ + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (0 << 1) | /*enable ipp*/ + (1 << 0) /*software reset ipp and mpp*/ + ); + WRITE_VREG(HEVCD_IPP_TOP_CNTL, + (1 << 1) | /*enable ipp*/ + (0 << 0) /*software reset ipp and mpp*/ + ); + if (get_double_write_mode(pbi) & 0x10) { + /*Enable NV21 reference read mode for MC*/ + WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31); + } + + /*Initialize mcrcc and decomp perf counters*/ + if (mcrcc_cache_alg_flag && + pbi->init_flag == 0) { + mcrcc_perfcount_reset(); + decomp_perfcount_reset(); + } + } + return; +} + + +#ifdef CONFIG_HEVC_CLK_FORCED_ON +static void config_vp9_clk_forced_on(void) +{ + unsigned int rdata32; + /*IQIT*/ + rdata32 = READ_VREG(HEVC_IQIT_CLK_RST_CTRL); + WRITE_VREG(HEVC_IQIT_CLK_RST_CTRL, rdata32 | (0x1 << 2)); + + /* DBLK*/ + rdata32 = READ_VREG(HEVC_DBLK_CFG0); + WRITE_VREG(HEVC_DBLK_CFG0, rdata32 | (0x1 << 2)); + + /* SAO*/ + rdata32 = READ_VREG(HEVC_SAO_CTRL1); + WRITE_VREG(HEVC_SAO_CTRL1, rdata32 | (0x1 << 2)); + + /*MPRED*/ + rdata32 = READ_VREG(HEVC_MPRED_CTRL1); + WRITE_VREG(HEVC_MPRED_CTRL1, rdata32 | (0x1 << 24)); + + /* PARSER*/ + rdata32 = READ_VREG(HEVC_STREAM_CONTROL); + WRITE_VREG(HEVC_STREAM_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_SHIFT_CONTROL); + WRITE_VREG(HEVC_SHIFT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_CABAC_CONTROL); + WRITE_VREG(HEVC_CABAC_CONTROL, rdata32 | (0x1 << 13)); + rdata32 = READ_VREG(HEVC_PARSER_CORE_CONTROL); + WRITE_VREG(HEVC_PARSER_CORE_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_INT_CONTROL); + WRITE_VREG(HEVC_PARSER_INT_CONTROL, rdata32 | (0x1 << 15)); + rdata32 = READ_VREG(HEVC_PARSER_IF_CONTROL); + WRITE_VREG(HEVC_PARSER_IF_CONTROL, + rdata32 | (0x1 << 6) | (0x1 << 3) | (0x1 << 1)); + + /*IPP*/ + rdata32 = READ_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG); + WRITE_VREG(HEVCD_IPP_DYNCLKGATE_CONFIG, rdata32 | 0xffffffff); + + /* MCRCC*/ + rdata32 = READ_VREG(HEVCD_MCRCC_CTL1); + WRITE_VREG(HEVCD_MCRCC_CTL1, rdata32 | (0x1 << 3)); +} +#endif + + +#ifdef MCRCC_ENABLE +static void dump_hit_rate(struct VP9Decoder_s *pbi) +{ + if (debug & VP9_DEBUG_CACHE_HIT_RATE) { + mcrcc_get_hitrate(pbi->m_ins_flag); + decomp_get_hitrate(); + decomp_get_comprate(); + } +} + +static void config_mcrcc_axi_hw(struct VP9Decoder_s *pbi) +{ + unsigned int rdata32; + unsigned short is_inter; + /*pr_info("Entered config_mcrcc_axi_hw...\n");*/ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2);/* reset mcrcc*/ + is_inter = ((pbi->common.frame_type != KEY_FRAME) && + (!pbi->common.intra_only)) ? 1 : 0; + if (!is_inter) { /* I-PIC*/ + /*remove reset -- disables clock*/ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x0); + return; + } + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + mcrcc_get_hitrate(pbi->m_ins_flag); + decomp_get_hitrate(); + decomp_get_comprate(); + } + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (1 << 1) | 0); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + /*Programme canvas1 */ + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + /*enable mcrcc progressive-mode*/ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); +} + +static void config_mcrcc_axi_hw_new(struct VP9Decoder_s *pbi) +{ + u32 curr_picnum = -1; + u32 lastref_picnum = -1; + u32 goldenref_picnum = -1; + u32 altref_picnum = -1; + + u32 lastref_delta_picnum; + u32 goldenref_delta_picnum; + u32 altref_delta_picnum; + + u32 rdata32; + + u32 lastcanvas; + u32 goldencanvas; + u32 altrefcanvas; + + u16 is_inter; + u16 lastref_inref; + u16 goldenref_inref; + u16 altref_inref; + + u32 refcanvas_array[3], utmp; + int deltapicnum_array[3], tmp; + + struct VP9_Common_s *cm = &pbi->common; + struct PIC_BUFFER_CONFIG_s *cur_pic_config + = &cm->cur_frame->buf; + curr_picnum = cur_pic_config->decode_idx; + if (cm->frame_refs[0].buf) + lastref_picnum = cm->frame_refs[0].buf->decode_idx; + if (cm->frame_refs[1].buf) + goldenref_picnum = cm->frame_refs[1].buf->decode_idx; + if (cm->frame_refs[2].buf) + altref_picnum = cm->frame_refs[2].buf->decode_idx; + + lastref_delta_picnum = (lastref_picnum >= curr_picnum) ? + (lastref_picnum - curr_picnum) : (curr_picnum - lastref_picnum); + goldenref_delta_picnum = (goldenref_picnum >= curr_picnum) ? + (goldenref_picnum - curr_picnum) : + (curr_picnum - goldenref_picnum); + altref_delta_picnum = + (altref_picnum >= curr_picnum) ? + (altref_picnum - curr_picnum) : (curr_picnum - altref_picnum); + + lastref_inref = (cm->frame_refs[0].idx != INVALID_IDX) ? 1 : 0; + goldenref_inref = (cm->frame_refs[1].idx != INVALID_IDX) ? 1 : 0; + altref_inref = (cm->frame_refs[2].idx != INVALID_IDX) ? 1 : 0; + + if (debug & VP9_DEBUG_CACHE) + pr_info("%s--0--lastref_inref:%d goldenref_inref:%d altref_inref:%d\n", + __func__, lastref_inref, goldenref_inref, altref_inref); + + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2); /* reset mcrcc */ + + is_inter = ((pbi->common.frame_type != KEY_FRAME) + && (!pbi->common.intra_only)) ? 1 : 0; + + if (!is_inter) { /* I-PIC */ + /* remove reset -- disables clock */ + WRITE_VREG(HEVCD_MCRCC_CTL1, 0x0); + return; + } + + if (!pbi->m_ins_flag) + dump_hit_rate(pbi); + + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (1<<1) | 0); + lastcanvas = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + goldencanvas = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + altrefcanvas = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + + if (debug & VP9_DEBUG_CACHE) + pr_info("[test.c] lastref_canv:%x goldenref_canv:%x altref_canv:%x\n", + lastcanvas, goldencanvas, altrefcanvas); + + altref_inref = ((altref_inref == 1) && + (altrefcanvas != (goldenref_inref + ? goldencanvas : 0xffffffff)) && + (altrefcanvas != (lastref_inref ? + lastcanvas : 0xffffffff))) ? 1 : 0; + goldenref_inref = ((goldenref_inref == 1) && + (goldencanvas != (lastref_inref ? + lastcanvas : 0xffffffff))) ? 1 : 0; + if (debug & VP9_DEBUG_CACHE) + pr_info("[test.c]--1--lastref_inref:%d goldenref_inref:%d altref_inref:%d\n", + lastref_inref, goldenref_inref, altref_inref); + + altref_delta_picnum = altref_inref ? altref_delta_picnum : 0x7fffffff; + goldenref_delta_picnum = goldenref_inref ? + goldenref_delta_picnum : 0x7fffffff; + lastref_delta_picnum = lastref_inref ? + lastref_delta_picnum : 0x7fffffff; + if (debug & VP9_DEBUG_CACHE) + pr_info("[test.c]--1--lastref_delta_picnum:%d goldenref_delta_picnum:%d altref_delta_picnum:%d\n", + lastref_delta_picnum, goldenref_delta_picnum, + altref_delta_picnum); + /*ARRAY SORT HERE DELTA/CANVAS ARRAY SORT -- use DELTA*/ + + refcanvas_array[0] = lastcanvas; + refcanvas_array[1] = goldencanvas; + refcanvas_array[2] = altrefcanvas; + + deltapicnum_array[0] = lastref_delta_picnum; + deltapicnum_array[1] = goldenref_delta_picnum; + deltapicnum_array[2] = altref_delta_picnum; + + /* sort0 : 2-to-1 */ + if (deltapicnum_array[2] < deltapicnum_array[1]) { + utmp = refcanvas_array[2]; + refcanvas_array[2] = refcanvas_array[1]; + refcanvas_array[1] = utmp; + tmp = deltapicnum_array[2]; + deltapicnum_array[2] = deltapicnum_array[1]; + deltapicnum_array[1] = tmp; + } + /* sort1 : 1-to-0 */ + if (deltapicnum_array[1] < deltapicnum_array[0]) { + utmp = refcanvas_array[1]; + refcanvas_array[1] = refcanvas_array[0]; + refcanvas_array[0] = utmp; + tmp = deltapicnum_array[1]; + deltapicnum_array[1] = deltapicnum_array[0]; + deltapicnum_array[0] = tmp; + } + /* sort2 : 2-to-1 */ + if (deltapicnum_array[2] < deltapicnum_array[1]) { + utmp = refcanvas_array[2]; refcanvas_array[2] = + refcanvas_array[1]; refcanvas_array[1] = utmp; + tmp = deltapicnum_array[2]; deltapicnum_array[2] = + deltapicnum_array[1]; deltapicnum_array[1] = tmp; + } + if (mcrcc_cache_alg_flag == + THODIYIL_MCRCC_CANVAS_ALGX) { /*09/15/2017*/ + /* lowest delta_picnum */ + rdata32 = refcanvas_array[0]; + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + /* 2nd-lowest delta_picnum */ + rdata32 = refcanvas_array[1]; + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + } else { + /* previous version -- LAST/GOLDEN ALWAYS -- before 09/13/2017*/ + WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, + (0 << 8) | (1<<1) | 0); + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32); + + /* Programme canvas1 */ + rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR); + rdata32 = rdata32 & 0xffff; + rdata32 = rdata32 | (rdata32 << 16); + WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32); + } + + WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0); /* enable mcrcc progressive-mode */ + return; +} + +#endif + + +static void free_lf_buf(struct VP9Decoder_s *pbi) +{ + if (pbi->lfi) + vfree(pbi->lfi); + if (pbi->lf) + vfree(pbi->lf); + if (pbi->seg_4lf) + vfree(pbi->seg_4lf); + pbi->lfi = NULL; + pbi->lf = NULL; + pbi->seg_4lf = NULL; +} + +static int alloc_lf_buf(struct VP9Decoder_s *pbi) +{ + pbi->lfi = vmalloc(sizeof(struct loop_filter_info_n)); + pbi->lf = vmalloc(sizeof(struct loopfilter)); + pbi->seg_4lf = vmalloc(sizeof(struct segmentation)); + if (pbi->lfi == NULL || pbi->lf == NULL || pbi->seg_4lf == NULL) { + free_lf_buf(pbi); + pr_err("[test.c] vp9_loop_filter init malloc error!!!\n"); + return -1; + } + return 0; +} + +static void vp9_local_uninit(struct VP9Decoder_s *pbi) +{ + pbi->rpm_ptr = NULL; + pbi->lmem_ptr = NULL; + if (pbi->rpm_addr) { + dma_free_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, + pbi->rpm_addr, + pbi->rpm_phy_addr); + pbi->rpm_addr = NULL; + } + if (pbi->lmem_addr) { + if (pbi->lmem_phy_addr) + dma_free_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, pbi->lmem_addr, + pbi->lmem_phy_addr); + pbi->lmem_addr = NULL; + } + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) && + (vdec_secure(hw_to_vdec(pbi)))) { + tee_vp9_prob_free((u32)pbi->prob_buffer_phy_addr); + pbi->prob_buffer_phy_addr = 0; + pbi->count_buffer_phy_addr = 0; + pbi->prob_buffer_addr = NULL; + pbi->count_buffer_addr = NULL; + } else { + if (pbi->prob_buffer_addr) { + if (pbi->prob_buffer_phy_addr) + dma_free_coherent(amports_get_dma_device(), + PROB_BUF_SIZE, pbi->prob_buffer_addr, + pbi->prob_buffer_phy_addr); + + pbi->prob_buffer_addr = NULL; + } + if (pbi->count_buffer_addr) { + if (pbi->count_buffer_phy_addr) + dma_free_coherent(amports_get_dma_device(), + COUNT_BUF_SIZE, pbi->count_buffer_addr, + pbi->count_buffer_phy_addr); + + pbi->count_buffer_addr = NULL; + } + } + if (pbi->mmu_enable) { + u32 mmu_map_size = vvp9_frame_mmu_map_size(pbi); + if (pbi->frame_mmu_map_addr) { + if (pbi->frame_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + mmu_map_size, + pbi->frame_mmu_map_addr, + pbi->frame_mmu_map_phy_addr); + pbi->frame_mmu_map_addr = NULL; + } + } +#ifdef SUPPORT_FB_DECODING + if (pbi->stage_mmu_map_addr) { + if (pbi->stage_mmu_map_phy_addr) + dma_free_coherent(amports_get_dma_device(), + STAGE_MMU_MAP_SIZE * STAGE_MAX_BUFFERS, + pbi->stage_mmu_map_addr, + pbi->stage_mmu_map_phy_addr); + pbi->stage_mmu_map_addr = NULL; + } + + uninit_stage_buf(pbi); +#endif + +#ifdef VP9_LPF_LVL_UPDATE + free_lf_buf(pbi); +#endif + if (pbi->gvs) + vfree(pbi->gvs); + pbi->gvs = NULL; +} + +static int vp9_local_init(struct VP9Decoder_s *pbi) +{ + int ret = -1; + /*int losless_comp_header_size, losless_comp_body_size;*/ + + struct BuffInfo_s *cur_buf_info = NULL; + + memset(&pbi->param, 0, sizeof(union param_u)); + memset(&pbi->common, 0, sizeof(struct VP9_Common_s)); +#ifdef MULTI_INSTANCE_SUPPORT + cur_buf_info = &pbi->work_space_buf_store; + if (force_bufspec) { + memcpy(cur_buf_info, &amvvp9_workbuff_spec[force_bufspec & 0xf], + sizeof(struct BuffInfo_s)); + pr_info("force buffer spec %d\n", force_bufspec & 0xf); + } else { + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + memcpy(cur_buf_info, &amvvp9_workbuff_spec[2], /* 8k */ + sizeof(struct BuffInfo_s)); + } else + memcpy(cur_buf_info, &amvvp9_workbuff_spec[1], /* 4k */ + sizeof(struct BuffInfo_s)); + } else + memcpy(cur_buf_info, &amvvp9_workbuff_spec[0],/* 1080p */ + sizeof(struct BuffInfo_s)); + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) { + memcpy(cur_buf_info, &amvvp9_workbuff_spec[5], /* 8k */ + sizeof(struct BuffInfo_s)); + } else + memcpy(cur_buf_info, &amvvp9_workbuff_spec[3],/* 1080p */ + sizeof(struct BuffInfo_s)); + } + } + + cur_buf_info->start_adr = pbi->buf_start; + if (!pbi->mmu_enable) + pbi->mc_buf_spec.buf_end = pbi->buf_start + pbi->buf_size; + +#else +/*! MULTI_INSTANCE_SUPPORT*/ + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + cur_buf_info = &amvvp9_workbuff_spec[2];/* 8k work space */ + else + cur_buf_info = &amvvp9_workbuff_spec[1];/* 4k2k work space */ + } else + cur_buf_info = &amvvp9_workbuff_spec[0];/* 1080p work space */ + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) { + cur_buf_info = &amvvp9_workbuff_spec[5];/* 8k work space */ + } else + cur_buf_info = &amvvp9_workbuff_spec[3];/* 1080p work space */ + } + +#endif + + init_buff_spec(pbi, cur_buf_info); + vp9_bufmgr_init(pbi, cur_buf_info, NULL); + + if (!vdec_is_support_4k() + && (buf_alloc_width > 1920 && buf_alloc_height > 1088)) { + buf_alloc_width = 1920; + buf_alloc_height = 1088; + if (pbi->max_pic_w > 1920 && pbi->max_pic_h > 1088) { + pbi->max_pic_w = 1920; + pbi->max_pic_h = 1088; + } + } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) { + buf_alloc_width = 8192; + buf_alloc_height = 4608; + } + pbi->init_pic_w = pbi->max_pic_w ? pbi->max_pic_w : + (buf_alloc_width ? buf_alloc_width : + (pbi->vvp9_amstream_dec_info.width ? + pbi->vvp9_amstream_dec_info.width : + pbi->work_space_buf->max_width)); + pbi->init_pic_h = pbi->max_pic_h ? pbi->max_pic_h : + (buf_alloc_height ? buf_alloc_height : + (pbi->vvp9_amstream_dec_info.height ? + pbi->vvp9_amstream_dec_info.height : + pbi->work_space_buf->max_height)); + + /* video is not support unaligned with 64 in tl1 + ** vdec canvas mode will be linear when dump yuv is set + */ + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) && + (pbi->double_write_mode != 0) && + (((pbi->max_pic_w % 64) != 0) || + (pbi->vvp9_amstream_dec_info.width % 64) != 0)) { + if (hw_to_vdec(pbi)->canvas_mode != + CANVAS_BLKMODE_LINEAR) + pbi->mem_map_mode = 2; + else { + pbi->mem_map_mode = 0; + pr_info("vdec blkmod linear, force mem_map_mode 0\n"); + } + } + +#ifndef MV_USE_FIXED_BUF + if (!pbi->is_used_v4l) { + if (init_mv_buf_list(pbi) < 0) { + pr_err("%s: init_mv_buf_list fail\n", __func__); + return -1; + } + } +#endif + + pbi->pts_unstable = ((unsigned long)(pbi->vvp9_amstream_dec_info.param) + & 0x40) >> 6; + + if ((debug & VP9_DEBUG_SEND_PARAM_WITH_REG) == 0) { + pbi->rpm_addr = dma_alloc_coherent(amports_get_dma_device(), + RPM_BUF_SIZE, + &pbi->rpm_phy_addr, GFP_KERNEL); + if (pbi->rpm_addr == NULL) { + pr_err("%s: failed to alloc rpm buffer\n", __func__); + return -1; + } + + pbi->rpm_ptr = pbi->rpm_addr; + } + + pbi->lmem_addr = dma_alloc_coherent(amports_get_dma_device(), + LMEM_BUF_SIZE, + &pbi->lmem_phy_addr, GFP_KERNEL); + if (pbi->lmem_addr == NULL) { + pr_err("%s: failed to alloc lmem buffer\n", __func__); + return -1; + } + pbi->lmem_ptr = pbi->lmem_addr; + + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) && + (vdec_secure(hw_to_vdec(pbi)))) { + u32 prob_addr, id; + id = tee_vp9_prob_malloc(&prob_addr); + if (prob_addr <= 0) + pr_err("%s, tee[%d] malloc prob buf failed\n", __func__, id); + else { + pbi->prob_buffer_phy_addr = prob_addr; + pbi->count_buffer_phy_addr = pbi->prob_buffer_phy_addr + PROB_BUF_SIZE; + } + pbi->prob_buffer_addr = NULL; + pbi->count_buffer_addr = NULL; + } else { + pbi->prob_buffer_addr = dma_alloc_coherent(amports_get_dma_device(), + PROB_BUF_SIZE, + &pbi->prob_buffer_phy_addr, GFP_KERNEL); + if (pbi->prob_buffer_addr == NULL) { + pr_err("%s: failed to alloc prob_buffer\n", __func__); + return -1; + } + memset(pbi->prob_buffer_addr, 0, PROB_BUF_SIZE); + pbi->count_buffer_addr = dma_alloc_coherent(amports_get_dma_device(), + COUNT_BUF_SIZE, + &pbi->count_buffer_phy_addr, GFP_KERNEL); + if (pbi->count_buffer_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(pbi->count_buffer_addr, 0, COUNT_BUF_SIZE); + } + + if (pbi->mmu_enable) { + u32 mmu_map_size = vvp9_frame_mmu_map_size(pbi); + pbi->frame_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + mmu_map_size, + &pbi->frame_mmu_map_phy_addr, GFP_KERNEL); + if (pbi->frame_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(pbi->frame_mmu_map_addr, 0, COUNT_BUF_SIZE); + } +#ifdef SUPPORT_FB_DECODING + if (pbi->m_ins_flag && stage_buf_num > 0) { + pbi->stage_mmu_map_addr = + dma_alloc_coherent(amports_get_dma_device(), + STAGE_MMU_MAP_SIZE * STAGE_MAX_BUFFERS, + &pbi->stage_mmu_map_phy_addr, GFP_KERNEL); + if (pbi->stage_mmu_map_addr == NULL) { + pr_err("%s: failed to alloc count_buffer\n", __func__); + return -1; + } + memset(pbi->stage_mmu_map_addr, + 0, STAGE_MMU_MAP_SIZE * STAGE_MAX_BUFFERS); + + init_stage_buf(pbi); + } +#endif + + ret = 0; + return ret; +} + +/******************************************** + * Mailbox command + ********************************************/ +#define CMD_FINISHED 0 +#define CMD_ALLOC_VIEW 1 +#define CMD_FRAME_DISPLAY 3 +#define CMD_DEBUG 10 + + +#define DECODE_BUFFER_NUM_MAX 32 +#define DISPLAY_BUFFER_NUM 6 + +#define video_domain_addr(adr) (adr&0x7fffffff) +#define DECODER_WORK_SPACE_SIZE 0x800000 + +#define spec2canvas(x) \ + (((x)->uv_canvas_index << 16) | \ + ((x)->uv_canvas_index << 8) | \ + ((x)->y_canvas_index << 0)) + + +static void set_canvas(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + struct vdec_s *vdec = hw_to_vdec(pbi); + int canvas_w = ALIGN(pic_config->y_crop_width, 64)/4; + int canvas_h = ALIGN(pic_config->y_crop_height, 32)/4; + int blkmode = pbi->mem_map_mode; + /*CANVAS_BLKMODE_64X32*/ + if (pic_config->double_write_mode) { + canvas_w = pic_config->y_crop_width / + get_double_write_ratio( + pic_config->double_write_mode); + canvas_h = pic_config->y_crop_height / + get_double_write_ratio( + pic_config->double_write_mode); + + /* sao ctrl1 reg alignline with 64, align with 64 */ + canvas_w = ALIGN(canvas_w, 64); + canvas_h = ALIGN(canvas_h, 32); + + if (vdec->parallel_dec == 1) { + if (pic_config->y_canvas_index == -1) + pic_config->y_canvas_index = + vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + if (pic_config->uv_canvas_index == -1) + pic_config->uv_canvas_index = + vdec->get_canvas_ex(CORE_MASK_HEVC, vdec->id); + } else { + pic_config->y_canvas_index = 128 + pic_config->index * 2; + pic_config->uv_canvas_index = 128 + pic_config->index * 2 + 1; + } + + config_cav_lut_ex(pic_config->y_canvas_index, + pic_config->dw_y_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, pbi->is_used_v4l ? 0 : 7, VDEC_HEVC); + config_cav_lut_ex(pic_config->uv_canvas_index, + pic_config->dw_u_v_adr, canvas_w, canvas_h, + CANVAS_ADDR_NOWRAP, blkmode, pbi->is_used_v4l ? 0 : 7, VDEC_HEVC); + +#ifdef MULTI_INSTANCE_SUPPORT + pic_config->canvas_config[0].phy_addr = + pic_config->dw_y_adr; + pic_config->canvas_config[0].width = + canvas_w; + pic_config->canvas_config[0].height = + canvas_h; + pic_config->canvas_config[0].block_mode = + blkmode; + pic_config->canvas_config[0].endian = pbi->is_used_v4l ? 0 : 7; + + pic_config->canvas_config[1].phy_addr = + pic_config->dw_u_v_adr; + pic_config->canvas_config[1].width = + canvas_w; + pic_config->canvas_config[1].height = + canvas_h; + pic_config->canvas_config[1].block_mode = + blkmode; + pic_config->canvas_config[1].endian = pbi->is_used_v4l ? 0 : 7; +#endif + } +} + + +static void set_frame_info(struct VP9Decoder_s *pbi, struct vframe_s *vf) +{ + unsigned int ar; + vf->duration = pbi->frame_dur; + vf->duration_pulldown = 0; + vf->flag = 0; + vf->prop.master_display_colour = pbi->vf_dp; + vf->signal_type = pbi->video_signal_type; + if (vf->compWidth && vf->compHeight) + pbi->frame_ar = vf->compHeight * 0x100 / vf->compWidth; + ar = min_t(u32, pbi->frame_ar, DISP_RATIO_ASPECT_RATIO_MAX); + vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT); + + if (pbi->is_used_v4l && pbi->vf_dp.present_flag) { + struct aml_vdec_hdr_infos hdr; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + + memset(&hdr, 0, sizeof(hdr)); + hdr.signal_type = vf->signal_type; + hdr.color_parms = pbi->vf_dp; + + hdr.color_parms.luminance[0] = hdr.color_parms.luminance[0] / 10000; + + vdec_v4l_set_hdr_infos(ctx, &hdr); + } + + if ((pbi->chunk != NULL) && (pbi->chunk->hdr10p_data_buf != NULL) + && (pbi->chunk->hdr10p_data_size != 0)) { + if (pbi->chunk->hdr10p_data_size <= 128) { + char *new_buf; + int i = 0; + new_buf = kzalloc(pbi->chunk->hdr10p_data_size, GFP_ATOMIC); + + if (new_buf) { + memcpy(new_buf, pbi->chunk->hdr10p_data_buf, pbi->chunk->hdr10p_data_size); + if (debug & VP9_DEBUG_BUFMGR_MORE) { + vp9_print(pbi, VP9_DEBUG_BUFMGR_MORE, + "hdr10p data: (size %d)\n", + pbi->chunk->hdr10p_data_size); + for (i = 0; i < pbi->chunk->hdr10p_data_size; i++) { + vp9_print(pbi, VP9_DEBUG_BUFMGR_MORE, + "%02x ", pbi->chunk->hdr10p_data_buf[i]); + if (((i + 1) & 0xf) == 0) + vp9_print(pbi, VP9_DEBUG_BUFMGR_MORE, "\n"); + } + vp9_print(pbi, VP9_DEBUG_BUFMGR_MORE, "\n"); + } + + vf->hdr10p_data_size = pbi->chunk->hdr10p_data_size; + vf->hdr10p_data_buf = new_buf; + set_meta_data_to_vf(vf, UVM_META_DATA_HDR10P_DATA, pbi->v4l2_ctx); + } else { + vp9_print(pbi, 0, "%s:hdr10p data vzalloc size(%d) fail\n", + __func__, pbi->chunk->hdr10p_data_size); + vf->hdr10p_data_size = pbi->chunk->hdr10p_data_size; + vf->hdr10p_data_buf = new_buf; + } + } + + vfree(pbi->chunk->hdr10p_data_buf); + pbi->chunk->hdr10p_data_buf = NULL; + pbi->chunk->hdr10p_data_size = 0; + } + + vf->sidebind_type = pbi->sidebind_type; + vf->sidebind_channel_id = pbi->sidebind_channel_id; +} + +static int vvp9_vf_states(struct vframe_states *states, void *op_arg) +{ + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)op_arg; + + states->vf_pool_size = VF_POOL_SIZE; + states->buf_free_num = kfifo_len(&pbi->newframe_q); + states->buf_avail_num = kfifo_len(&pbi->display_q); + + if (step == 2) + states->buf_avail_num = 0; + return 0; +} + +static struct vframe_s *vvp9_vf_peek(void *op_arg) +{ + struct vframe_s *vf[2] = {0, 0}; + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)op_arg; + + if (step == 2) + return NULL; + + if (kfifo_out_peek(&pbi->display_q, (void *)&vf, 2)) { + if (vf[1]) { + vf[0]->next_vf_pts_valid = true; + vf[0]->next_vf_pts = vf[1]->pts; + } else + vf[0]->next_vf_pts_valid = false; + return vf[0]; + } + + return NULL; +} + +static struct vframe_s *vvp9_vf_get(void *op_arg) +{ + struct vframe_s *vf; + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)op_arg; + + if (step == 2) + return NULL; + else if (step == 1) + step = 2; + + if (kfifo_get(&pbi->display_q, &vf)) { + struct vframe_s *next_vf = NULL; + uint8_t index = vf->index & 0xff; + ATRACE_COUNTER(pbi->trace.disp_q_name, kfifo_len(&pbi->display_q)); + if (index < pbi->used_buf_num || + (vf->type & VIDTYPE_V4L_EOS)) { + vf->index_disp = atomic_read(&pbi->vf_get_count); + atomic_add(1, &pbi->vf_get_count); + + if (debug & VP9_DEBUG_BUFMGR) + pr_info("%s idx: %d, type 0x%x w/h %d/%d, pts %d, %lld, ts: %lld\n", + __func__, index, vf->type, + vf->width, vf->height, + vf->pts, + vf->pts_us64, + vf->timestamp); + + if (kfifo_peek(&pbi->display_q, &next_vf) && next_vf) { + vf->next_vf_pts_valid = true; + vf->next_vf_pts = next_vf->pts; + } else + vf->next_vf_pts_valid = false; + + return vf; + } + } + + return NULL; +} + +static void vvp9_vf_put(struct vframe_s *vf, void *op_arg) +{ + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)op_arg; + uint8_t index; + + if (vf == (&pbi->vframe_dummy)) + return; + + if (!vf) + return; + + if (pbi->enable_fence && vf->fence) { + int ret, i; + + mutex_lock(&pbi->fence_mutex); + ret = dma_fence_get_status(vf->fence); + if (ret == 0) { + for (i = 0; i < VF_POOL_SIZE; i++) { + if (pbi->fence_vf_s.fence_vf[i] == NULL) { + pbi->fence_vf_s.fence_vf[i] = vf; + pbi->fence_vf_s.used_size++; + mutex_unlock(&pbi->fence_mutex); + return; + } + } + } + mutex_unlock(&pbi->fence_mutex); + } + + index = vf->index & 0xff; + + if (pbi->enable_fence && vf->fence) { + vdec_fence_put(vf->fence); + vf->fence = NULL; + } + + if (vf->hdr10p_data_buf) { + kfree(vf->hdr10p_data_buf); + vf->hdr10p_data_buf = NULL; + vf->hdr10p_data_size = 0; + } + + if (vf->meta_data_buf) { + vf->meta_data_buf = NULL; + vf->meta_data_size = 0; + } + + kfifo_put(&pbi->newframe_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(pbi->trace.new_q_name, kfifo_len(&pbi->newframe_q)); + atomic_add(1, &pbi->vf_put_count); + + if (debug & VP9_DEBUG_BUFMGR) + pr_info("%s vf:%px, idx: %d, type 0x%x w/h %d/%d, pts %d, %lld, ts: %lld\n", + __func__, vf, index, vf->type, + vf->width, vf->height, + vf->pts, + vf->pts_us64, + vf->timestamp); + + if (index < pbi->used_buf_num) { + struct VP9_Common_s *cm = &pbi->common; + struct BufferPool_s *pool = cm->buffer_pool; + struct PIC_BUFFER_CONFIG_s *pic = &pool->frame_bufs[index].buf; + unsigned long flags; + + if (vf->v4l_mem_handle != + pbi->m_BUF[pic->BUF_index].v4l_ref_buf_addr) { + vp9_print(pbi, PRINT_FLAG_V4L_DETAIL, + "VP9 update fb handle, old:%llx, new:%llx\n", + pbi->m_BUF[pic->BUF_index].v4l_ref_buf_addr, + vf->v4l_mem_handle); + + pbi->m_BUF[pic->BUF_index].v4l_ref_buf_addr = + vf->v4l_mem_handle; + } + + lock_buffer_pool(pool, flags); + if (pic->repeat_pic) { + if (pic->repeat_pic->repeat_count > 0) + pic->repeat_pic->repeat_count --; + else + vp9_print(pbi, PRINT_FLAG_ERROR, "repeat_count <= 0 pic:%px\n", pic); + pic->repeat_pic = NULL; + } + if (pool->frame_bufs[index].buf.vf_ref > 0) + pool->frame_bufs[index].buf.vf_ref--; + + if (pbi->wait_buf) + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + pbi->last_put_idx = index; + pbi->new_frame_displayed++; + unlock_buffer_pool(pool, flags); + + spin_lock_irqsave(&pbi->wait_buf_lock, flags); + + if (pbi->wait_more_buf) { + pbi->wait_more_buf = false; + pbi->dec_result = DEC_RESULT_NEED_MORE_BUFFER; + vdec_schedule_work(&pbi->work); + } + spin_unlock_irqrestore(&pbi->wait_buf_lock, flags); + +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0 && + pbi->back_not_run_ready) + trigger_schedule(pbi); +#endif + } + +} + + +static int vvp9_event_cb(int type, void *data, void *private_data) +{ + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)private_data; + + if (type & VFRAME_EVENT_RECEIVER_RESET) { +#if 0 + unsigned long flags; + + amhevc_stop(); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_light_unreg_provider(&vvp9_vf_prov); +#endif + spin_lock_irqsave(&pbi->lock, flags); + vvp9_local_init(); + vvp9_prot_init(); + spin_unlock_irqrestore(&pbi->lock, flags); +#ifndef CONFIG_AMLOGIC_POST_PROCESS_MANAGER + vf_reg_provider(&vvp9_vf_prov); +#endif + amhevc_start(); +#endif + } else if (type & VFRAME_EVENT_RECEIVER_REQ_STATE) { + struct provider_state_req_s *req = + (struct provider_state_req_s *)data; + if (req->req_type == REQ_STATE_SECURE) + req->req_result[0] = vdec_secure(hw_to_vdec(pbi)); + else + req->req_result[0] = 0xffffffff; + } + + return 0; +} + +void inc_vf_ref(struct VP9Decoder_s *pbi, int index) +{ + struct VP9_Common_s *cm = &pbi->common; + + cm->buffer_pool->frame_bufs[index].buf.vf_ref++; + + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("%s index = %d new vf_ref = %d\r\n", + __func__, index, + cm->buffer_pool->frame_bufs[index].buf.vf_ref); +} + +static int frame_duration_adapt(struct VP9Decoder_s *pbi, struct vframe_s *vf, u32 valid) +{ + u32 old_duration, pts_duration = 0; + u32 pts = vf->pts; + + if (pbi->get_frame_dur == true) + return true; + + pbi->frame_cnt_window++; + if (!(pbi->vp9_first_pts_ready == 1)) { + if (valid) { + pbi->pts1 = pts; + pbi->frame_cnt_window = 0; + pbi->duration_from_pts_done = 0; + pbi->vp9_first_pts_ready = 1; + } else { + return false; + } + } else { + if (pts < pbi->pts1) { + if (pbi->frame_cnt_window > FRAME_CNT_WINDOW_SIZE) { + pbi->pts1 = pts; + pbi->frame_cnt_window = 0; + } + } + + if (valid && (pbi->frame_cnt_window > FRAME_CNT_WINDOW_SIZE) && + (pts > pbi->pts1) && (pbi->duration_from_pts_done == 0)) { + old_duration = pbi->frame_dur; + pbi->pts2 = pts; + pts_duration = (((pbi->pts2 - pbi->pts1) * 16) / + (pbi->frame_cnt_window * 15)); + + if (close_to(pts_duration, old_duration, 2000)) { + pbi->frame_dur = pts_duration; + if ((debug & VP9_DEBUG_OUT_PTS) != 0) + pr_info("use calc duration %d\n", pts_duration); + } + + if (pbi->duration_from_pts_done == 0) { + if (close_to(pts_duration, old_duration, RATE_CORRECTION_THRESHOLD)) { + pbi->duration_from_pts_done = 1; + } else { + if (!close_to(pts_duration, + old_duration, 1000) && + !close_to(pts_duration, + pbi->frame_dur, 1000) && + close_to(pts_duration, + pbi->last_duration, 200)) { + /* frame_dur must + * wrong,recover it. + */ + pbi->frame_dur = pts_duration; + } + pbi->pts1 = pbi->pts2; + pbi->frame_cnt_window = 0; + pbi->duration_from_pts_done = 0; + } + } + pbi->last_duration = pts_duration; + } + } + return true; +} + +static void update_vf_memhandle(struct VP9Decoder_s *pbi, + struct vframe_s *vf, struct PIC_BUFFER_CONFIG_s *pic) +{ + vf->mem_handle = NULL; + vf->mem_head_handle = NULL; + vf->mem_dw_handle = NULL; + + /* keeper not needed for v4l solution */ + if (pbi->is_used_v4l) + return; + + if (vf->type & VIDTYPE_SCATTER) { + vf->mem_handle = + decoder_mmu_box_get_mem_handle( + pbi->mmu_box, pic->index); + vf->mem_head_handle = + decoder_bmmu_box_get_mem_handle( + pbi->bmmu_box, + HEADER_BUFFER_IDX(pic->BUF_index)); + if (pbi->double_write_mode == 3) + vf->mem_dw_handle = + decoder_bmmu_box_get_mem_handle( + pbi->bmmu_box, + VF_BUFFER_IDX(pic->BUF_index)); + else + vf->mem_dw_handle = NULL; + } else { + vf->mem_handle = + decoder_bmmu_box_get_mem_handle( + pbi->bmmu_box, VF_BUFFER_IDX(pic->BUF_index)); + vf->mem_head_handle = NULL; + vf->mem_dw_handle = NULL; + /*vf->mem_head_handle = + *decoder_bmmu_box_get_mem_handle( + *hevc->bmmu_box, VF_BUFFER_IDX(BUF_index)); + */ + } +} + +static inline void pbi_update_gvs(struct VP9Decoder_s *pbi) +{ + if (pbi->gvs->frame_height != frame_height) { + pbi->gvs->frame_width = frame_width; + pbi->gvs->frame_height = frame_height; + } + if (pbi->gvs->frame_dur != pbi->frame_dur) { + pbi->gvs->frame_dur = pbi->frame_dur; + if (pbi->frame_dur != 0) + pbi->gvs->frame_rate = ((96000 * 10 / pbi->frame_dur) % 10) < 5 ? + 96000 / pbi->frame_dur : (96000 / pbi->frame_dur +1); + else + pbi->gvs->frame_rate = -1; + } + pbi->gvs->status = pbi->stat | pbi->fatal_error; +} + +static int prepare_display_buf(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *pic_config) +{ + struct vframe_s *vf = NULL; + struct vdec_s *pvdec = hw_to_vdec(pbi); + int stream_offset = pic_config->stream_offset; + unsigned short slice_type = pic_config->slice_type; + struct aml_vcodec_ctx * v4l2_ctx = pbi->v4l2_ctx; + struct vdec_v4l2_buffer *fb = NULL; + ulong nv_order = VIDTYPE_VIU_NV21; + u32 pts_valid = 0, pts_us64_valid = 0; + u32 pts_save; + u64 pts_us64_save; + u32 frame_size = 0; + int i = 0; + + + if (debug & VP9_DEBUG_BUFMGR) + pr_info("%s index = %d\r\n", __func__, pic_config->index); + if (kfifo_get(&pbi->newframe_q, &vf) == 0) { + pr_info("fatal error, no available buffer slot."); + return -1; + } + + /* swap uv */ + if (pbi->is_used_v4l) { + if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) || + (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M)) + nv_order = VIDTYPE_VIU_NV12; + } + + if (pic_config->double_write_mode) + set_canvas(pbi, pic_config); + + display_frame_count[pbi->index]++; + if (vf) { + if (!force_pts_unstable) { + if ((pic_config->pts == 0) || ((pic_config->pts <= pbi->last_pts) && + (pic_config->pts64 <= pbi->last_pts_us64))) { + for (i = (FRAME_BUFFERS - 1); i > 0; i--) { + if ((pbi->last_pts == pbi->frame_mode_pts_save[i]) || + (pbi->last_pts_us64 == pbi->frame_mode_pts64_save[i])) { + pic_config->pts = pbi->frame_mode_pts_save[i - 1]; + pic_config->pts64 = pbi->frame_mode_pts64_save[i - 1]; + break; + } + } + if ((i == 0) || (pic_config->pts <= pbi->last_pts)) { + vp9_print(pbi, VP9_DEBUG_OUT_PTS, + "no found pts %d, set 0. %d, %d\n", + i, pic_config->pts, pbi->last_pts); + pic_config->pts = 0; + pic_config->pts64 = 0; + } + } + } + + if (pbi->is_used_v4l) { + vf->v4l_mem_handle + = pbi->m_BUF[pic_config->v4l_buf_index].v4l_ref_buf_addr; + fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle; + + if (pbi->mmu_enable) { + vf->mm_box.bmmu_box = pbi->bmmu_box; + vf->mm_box.bmmu_idx = HEADER_BUFFER_IDX(pbi->buffer_wrap[pic_config->v4l_buf_index]); + vf->mm_box.mmu_box = pbi->mmu_box; + vf->mm_box.mmu_idx = pbi->buffer_wrap[pic_config->index]; + } + } + + if (pbi->enable_fence) { + /* fill fence information. */ + if (pbi->fence_usage == FENCE_USE_FOR_DRIVER) + vf->fence = pic_config->fence; + } + +#ifdef MULTI_INSTANCE_SUPPORT + if (vdec_frame_based(pvdec)) { + vf->pts = pic_config->pts; + vf->pts_us64 = pic_config->pts64; + + if (pbi->is_used_v4l && v4l_bitstream_id_enable) + vf->timestamp = pic_config->timestamp; + else + vf->timestamp = pic_config->pts64; + + if (vf->pts != 0 || vf->pts_us64 != 0) { + pts_valid = 1; + pts_us64_valid = 1; + } else { + pts_valid = 0; + pts_us64_valid = 0; + } + } else +#endif + /* if (pts_lookup_offset(PTS_TYPE_VIDEO, + * stream_offset, &vf->pts, 0) != 0) { + */ + if ((pvdec->vbuf.no_parser == 0) || (pvdec->vbuf.use_ptsserv)) { + if (pts_lookup_offset_us64 + (PTS_TYPE_VIDEO, stream_offset, &vf->pts, + &frame_size, 0, + &vf->pts_us64) != 0) { +#ifdef DEBUG_PTS + pbi->pts_missed++; +#endif + vf->pts = 0; + vf->pts_us64 = 0; + pts_valid = 0; + pts_us64_valid = 0; + } else { +#ifdef DEBUG_PTS + pbi->pts_hit++; +#endif + pts_valid = 1; + pts_us64_valid = 1; + } + } + + fill_frame_info(pbi, pic_config, frame_size, vf->pts); + + pts_save = vf->pts; + pts_us64_save = vf->pts_us64; + if (pbi->is_used_v4l || pbi->pts_unstable) { + frame_duration_adapt(pbi, vf, pts_valid); + if (pbi->duration_from_pts_done) { + pbi->pts_mode = PTS_NONE_REF_USE_DURATION; + } else { + if (pts_valid || pts_us64_valid) + pbi->pts_mode = PTS_NORMAL; + } + } + + if ((pbi->pts_mode == PTS_NORMAL) && (vf->pts != 0) + && pbi->get_frame_dur) { + int pts_diff = (int)vf->pts - pbi->last_lookup_pts; + + if (pts_diff < 0) { + pbi->pts_mode_switching_count++; + pbi->pts_mode_recovery_count = 0; + + if (pbi->pts_mode_switching_count >= + PTS_MODE_SWITCHING_THRESHOLD) { + pbi->pts_mode = + PTS_NONE_REF_USE_DURATION; + pr_info + ("HEVC: switch to n_d mode.\n"); + } + + } else { + int p = PTS_MODE_SWITCHING_RECOVERY_THREASHOLD; + + pbi->pts_mode_recovery_count++; + if (pbi->pts_mode_recovery_count > p) { + pbi->pts_mode_switching_count = 0; + pbi->pts_mode_recovery_count = 0; + } + } + } + + if (vf->pts != 0) + pbi->last_lookup_pts = vf->pts; + + if ((pbi->pts_mode == PTS_NONE_REF_USE_DURATION) + && (slice_type != KEY_FRAME)) + vf->pts = pbi->last_pts + DUR2PTS(pbi->frame_dur); + pbi->last_pts = vf->pts; + + if (vf->pts_us64 != 0) + pbi->last_lookup_pts_us64 = vf->pts_us64; + + if ((pbi->pts_mode == PTS_NONE_REF_USE_DURATION) + && (slice_type != KEY_FRAME)) { + vf->pts_us64 = + pbi->last_pts_us64 + + (DUR2PTS(pbi->frame_dur) * 100 / 9); + } + pbi->last_pts_us64 = vf->pts_us64; + + if (pbi->pts_mode == PTS_NONE_REF_USE_DURATION) { + vf->disp_pts = vf->pts; + vf->disp_pts_us64 = vf->pts_us64; + vf->pts = pts_save; + vf->pts_us64 = pts_us64_save; + } else { + vf->disp_pts = 0; + vf->disp_pts_us64 = 0; + } + + vf->index = 0xff00 | pic_config->v4l_buf_index; + + if (pic_config->double_write_mode & 0x10) { + /* double write only */ + vf->compBodyAddr = 0; + vf->compHeadAddr = 0; + } else { + if (pbi->mmu_enable) { + vf->compBodyAddr = 0; + vf->compHeadAddr = pic_config->header_adr; + } else { + /*vf->compBodyAddr = pic_config->mc_y_adr; + *vf->compHeadAddr = pic_config->mc_y_adr + + *pic_config->comp_body_size; */ + /*head adr*/ + } + vf->canvas0Addr = vf->canvas1Addr = 0; + } + if (pic_config->double_write_mode) { + vf->type = VIDTYPE_PROGRESSIVE | + VIDTYPE_VIU_FIELD; + vf->type |= nv_order; + if ((pic_config->double_write_mode == 3) && + (!IS_8K_SIZE(pic_config->y_crop_width, + pic_config->y_crop_height))) { + vf->type |= VIDTYPE_COMPRESS; + if (pbi->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + if (pbi->is_used_v4l && pic_config->double_write_mode != 16 && + (!IS_8K_SIZE(pic_config->y_crop_width, + pic_config->y_crop_height))) + vf->type |= VIDTYPE_COMPRESS | VIDTYPE_SCATTER; +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + pic_config->canvas_config[0]; + vf->canvas0_config[1] = + pic_config->canvas_config[1]; + vf->canvas1_config[0] = + pic_config->canvas_config[0]; + vf->canvas1_config[1] = + pic_config->canvas_config[1]; + + } else +#endif + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(pic_config); + } else { + vf->canvas0Addr = vf->canvas1Addr = 0; + vf->type = VIDTYPE_COMPRESS | VIDTYPE_VIU_FIELD; + if (pbi->mmu_enable) + vf->type |= VIDTYPE_SCATTER; + } + + switch (pic_config->bit_depth) { + case VPX_BITS_8: + vf->bitdepth = BITDEPTH_Y8 | + BITDEPTH_U8 | BITDEPTH_V8; + break; + case VPX_BITS_10: + case VPX_BITS_12: + vf->bitdepth = BITDEPTH_Y10 | + BITDEPTH_U10 | BITDEPTH_V10; + break; + default: + vf->bitdepth = BITDEPTH_Y10 | + BITDEPTH_U10 | BITDEPTH_V10; + break; + } + if ((vf->type & VIDTYPE_COMPRESS) == 0) + vf->bitdepth = + BITDEPTH_Y8 | BITDEPTH_U8 | BITDEPTH_V8; + if (pic_config->bit_depth == VPX_BITS_8) + vf->bitdepth |= BITDEPTH_SAVING_MODE; + + /* if((vf->width!=pic_config->width)| + * (vf->height!=pic_config->height)) + */ + /* pr_info("aaa: %d/%d, %d/%d\n", + vf->width,vf->height, pic_config->width, + pic_config->height); */ + vf->width = pic_config->y_crop_width / + get_double_write_ratio( + pic_config->double_write_mode); + vf->height = pic_config->y_crop_height / + get_double_write_ratio( + pic_config->double_write_mode); + if (force_w_h != 0) { + vf->width = (force_w_h >> 16) & 0xffff; + vf->height = force_w_h & 0xffff; + } + vf->compWidth = pic_config->y_crop_width; + vf->compHeight = pic_config->y_crop_height; + set_frame_info(pbi, vf); + if (force_fps & 0x100) { + u32 rate = force_fps & 0xff; + + if (rate) + vf->duration = 96000/rate; + else + vf->duration = 0; + } + update_vf_memhandle(pbi, vf, pic_config); + if (vdec_stream_based(pvdec) && (!pvdec->vbuf.use_ptsserv)) { + vf->pts_us64 = stream_offset; + vf->pts = 0; + } + if ((debug & VP9_DEBUG_OUT_PTS) != 0) { + pr_info + ("VP9 dec out pts: pts_mode=%d,dur=%d,pts(%d,%lld,%lld)(%d,%lld)\n", + pbi->pts_mode, pbi->frame_dur, vf->pts, + vf->pts_us64, vf->timestamp, pts_save, + pts_us64_save); + } + if (!(pic_config->y_crop_width == 196 + && pic_config->y_crop_height == 196 + && (debug & VP9_DEBUG_NO_TRIGGER_FRAME) == 0 + && (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_TXLX))) { + struct vdec_info tmp4x; + + inc_vf_ref(pbi, pic_config->v4l_buf_index); + + vdec_vframe_ready(pvdec, vf); + if (pic_config->double_write_mode && + (pic_config->v4l_buf_index != pic_config->BUF_index)) { + struct PIC_BUFFER_CONFIG_s *dst_pic = + &pbi->common.buffer_pool->frame_bufs[pic_config->v4l_buf_index].buf; + struct PIC_BUFFER_CONFIG_s *src_pic = + &pbi->common.buffer_pool->frame_bufs[pic_config->BUF_index].buf; + struct vdec_ge2d_info ge2d_info; + + vp9_print(pbi, PRINT_FLAG_V4L_DETAIL, + "ge2d copy start v4l_buf_index:%d repeat_buff_index:%d\n", + pic_config->v4l_buf_index, + pic_config->BUF_index); + ge2d_info.dst_vf = vf; + ge2d_info.src_canvas0Addr = ge2d_info.src_canvas1Addr = 0; + if (dst_pic->double_write_mode) { +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) { + vf->canvas0Addr = vf->canvas1Addr = -1; + ge2d_info.src_canvas0Addr = ge2d_info.src_canvas1Addr = -1; + vf->plane_num = 2; + vf->canvas0_config[0] = + dst_pic->canvas_config[0]; + vf->canvas0_config[1] = + dst_pic->canvas_config[1]; + vf->canvas1_config[0] = + dst_pic->canvas_config[0]; + vf->canvas1_config[1] = + dst_pic->canvas_config[1]; + ge2d_info.src_canvas0_config[0] = + src_pic->canvas_config[0]; + ge2d_info.src_canvas0_config[1] = + src_pic->canvas_config[1]; + ge2d_info.src_canvas1_config[0] = + src_pic->canvas_config[0]; + ge2d_info.src_canvas1_config[1] = + src_pic->canvas_config[1]; + } else +#endif + { + vf->canvas0Addr = vf->canvas1Addr = + spec2canvas(dst_pic); + ge2d_info.src_canvas0Addr = ge2d_info.src_canvas1Addr = + spec2canvas(src_pic); + } + } + + if (!pbi->ge2d) { + int mode = nv_order == VIDTYPE_VIU_NV21 ? GE2D_MODE_CONVERT_NV21 : GE2D_MODE_CONVERT_NV12; + mode |= GE2D_MODE_CONVERT_LE; + vdec_ge2d_init(&pbi->ge2d, mode); + } + vdec_ge2d_copy_data(pbi->ge2d, &ge2d_info); + vp9_print(pbi, PRINT_FLAG_V4L_DETAIL, "ge2d copy done\n"); + } + decoder_do_frame_check(pvdec, vf); + kfifo_put(&pbi->display_q, (const struct vframe_s *)vf); + ATRACE_COUNTER(pbi->trace.pts_name, vf->timestamp); + ATRACE_COUNTER(pbi->trace.new_q_name, kfifo_len(&pbi->newframe_q)); + ATRACE_COUNTER(pbi->trace.disp_q_name, kfifo_len(&pbi->display_q)); + atomic_add(1, &pbi->vf_pre_count); + pbi_update_gvs(pbi); + /*count info*/ + vdec_count_info(pbi->gvs, 0, stream_offset); + if (stream_offset) { + if (slice_type == KEY_FRAME) { + pbi->gvs->i_decoded_frames++; + } else if (slice_type == INTER_FRAME) { + pbi->gvs->p_decoded_frames++; + } else if (slice_type == FRAME_TYPES) { + pbi->gvs->b_decoded_frames++; + } + } + memcpy(&tmp4x, pbi->gvs, sizeof(struct vdec_info)); + tmp4x.bit_depth_luma = pbi->vp9_param.p.bit_depth; + tmp4x.bit_depth_chroma = pbi->vp9_param.p.bit_depth; + tmp4x.double_write_mode = pic_config->double_write_mode; + vdec_fill_vdec_frame(pvdec, &pbi->vframe_qos, &tmp4x, + vf, pic_config->hw_decode_time); + pvdec->vdec_fps_detec(pvdec->id); + if (without_display_mode == 0) { + if (pbi->is_used_v4l) { + if (v4l2_ctx->is_stream_off) { + vvp9_vf_put(vvp9_vf_get(pbi), pbi); + } else { + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + } + } else { + vf_notify_receiver(pbi->provider_name, + VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL); + } + } else + vvp9_vf_put(vvp9_vf_get(pbi), pbi); + } else { + pbi->stat |= VP9_TRIGGER_FRAME_DONE; + hevc_source_changed(VFORMAT_VP9, 196, 196, 30); + pr_debug("[%s %d] drop trigger frame width %d height %d state 0x%x\n", + __func__, __LINE__, vf->width, + vf->height, pbi->stat); + } + } + + return 0; +} + +static bool is_avaliable_buffer(struct VP9Decoder_s *pbi); + +static int notify_v4l_eos(struct vdec_s *vdec) +{ + struct VP9Decoder_s *hw = (struct VP9Decoder_s *)vdec->private; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx); + struct vframe_s *vf = &hw->vframe_dummy; + struct vdec_v4l2_buffer *fb = NULL; + int index = INVALID_IDX; + ulong expires; + + if (hw->eos) { + expires = jiffies + msecs_to_jiffies(2000); + while (!is_avaliable_buffer(hw)) { + if (time_after(jiffies, expires)) { + pr_err("[%d] VP9 isn't enough buff for notify eos.\n", ctx->id); + return 0; + } + } + + index = v4l_get_free_fb(hw); + if (INVALID_IDX == index) { + pr_err("[%d] VP9 EOS get free buff fail.\n", ctx->id); + return 0; + } + + fb = (struct vdec_v4l2_buffer *) + hw->m_BUF[index].v4l_ref_buf_addr; + + vf->type |= VIDTYPE_V4L_EOS; + vf->timestamp = ULONG_MAX; + vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L; + vf->v4l_mem_handle = (ulong)fb; + + vdec_vframe_ready(vdec, vf); + kfifo_put(&hw->display_q, (const struct vframe_s *)vf); + + ATRACE_COUNTER("VC_OUT_DEC-submit", fb->buf_idx); + fb->task->submit(fb->task, TASK_TYPE_DEC); + + pr_info("[%d] VP9 EOS notify.\n", ctx->id); + } + + return 0; +} + +static void get_rpm_param(union param_u *params) +{ + int i; + unsigned int data32; + + if (debug & VP9_DEBUG_BUFMGR) + pr_info("enter %s\r\n", __func__); + for (i = 0; i < 128; i++) { + do { + data32 = READ_VREG(RPM_CMD_REG); + /*pr_info("%x\n", data32);*/ + } while ((data32 & 0x10000) == 0); + params->l.data[i] = data32&0xffff; + /*pr_info("%x\n", data32);*/ + WRITE_VREG(RPM_CMD_REG, 0); + } + if (debug & VP9_DEBUG_BUFMGR) + pr_info("leave %s\r\n", __func__); +} +static void debug_buffer_mgr_more(struct VP9Decoder_s *pbi) +{ + int i; + + if (!(debug & VP9_DEBUG_BUFMGR_MORE)) + return; + pr_info("vp9_param: (%d)\n", pbi->slice_idx); + for (i = 0; i < (RPM_END-RPM_BEGIN); i++) { + pr_info("%04x ", pbi->vp9_param.l.data[i]); + if (((i + 1) & 0xf) == 0) + pr_info("\n"); + } + pr_info("=============param==========\r\n"); + pr_info("profile %x\r\n", pbi->vp9_param.p.profile); + pr_info("show_existing_frame %x\r\n", + pbi->vp9_param.p.show_existing_frame); + pr_info("frame_to_show_idx %x\r\n", + pbi->vp9_param.p.frame_to_show_idx); + pr_info("frame_type %x\r\n", pbi->vp9_param.p.frame_type); + pr_info("show_frame %x\r\n", pbi->vp9_param.p.show_frame); + pr_info("e.r.r.o.r_resilient_mode %x\r\n", + pbi->vp9_param.p.error_resilient_mode); + pr_info("intra_only %x\r\n", pbi->vp9_param.p.intra_only); + pr_info("display_size_present %x\r\n", + pbi->vp9_param.p.display_size_present); + pr_info("reset_frame_context %x\r\n", + pbi->vp9_param.p.reset_frame_context); + pr_info("refresh_frame_flags %x\r\n", + pbi->vp9_param.p.refresh_frame_flags); + pr_info("bit_depth %x\r\n", pbi->vp9_param.p.bit_depth); + pr_info("width %x\r\n", pbi->vp9_param.p.width); + pr_info("height %x\r\n", pbi->vp9_param.p.height); + pr_info("display_width %x\r\n", pbi->vp9_param.p.display_width); + pr_info("display_height %x\r\n", pbi->vp9_param.p.display_height); + pr_info("ref_info %x\r\n", pbi->vp9_param.p.ref_info); + pr_info("same_frame_size %x\r\n", pbi->vp9_param.p.same_frame_size); + if (!(debug & VP9_DEBUG_DBG_LF_PRINT)) + return; + pr_info("mode_ref_delta_enabled: 0x%x\r\n", + pbi->vp9_param.p.mode_ref_delta_enabled); + pr_info("sharpness_level: 0x%x\r\n", + pbi->vp9_param.p.sharpness_level); + pr_info("ref_deltas: 0x%x, 0x%x, 0x%x, 0x%x\r\n", + pbi->vp9_param.p.ref_deltas[0], pbi->vp9_param.p.ref_deltas[1], + pbi->vp9_param.p.ref_deltas[2], pbi->vp9_param.p.ref_deltas[3]); + pr_info("mode_deltas: 0x%x, 0x%x\r\n", pbi->vp9_param.p.mode_deltas[0], + pbi->vp9_param.p.mode_deltas[1]); + pr_info("filter_level: 0x%x\r\n", pbi->vp9_param.p.filter_level); + pr_info("seg_enabled: 0x%x\r\n", pbi->vp9_param.p.seg_enabled); + pr_info("seg_abs_delta: 0x%x\r\n", pbi->vp9_param.p.seg_abs_delta); + pr_info("seg_lf_feature_enabled: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\r\n", + (pbi->vp9_param.p.seg_lf_info[0]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[1]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[2]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[3]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[4]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[5]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[6]>>15 & 1), + (pbi->vp9_param.p.seg_lf_info[7]>>15 & 1)); + pr_info("seg_lf_feature_data: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\r\n", + (pbi->vp9_param.p.seg_lf_info[0] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[1] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[2] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[3] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[4] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[5] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[6] & 0x13f), + (pbi->vp9_param.p.seg_lf_info[7] & 0x13f)); + +} + +static int recycle_mmu_buf_tail(struct VP9Decoder_s *pbi, + bool check_dma) +{ + struct VP9_Common_s *const cm = &pbi->common; + + if (pbi->used_4k_num == -1) { + pbi->used_4k_num = + READ_VREG(HEVC_SAO_MMU_STATUS) >> 16; + } + vp9_print(pbi, VP9_DEBUG_BUFMGR_MORE, + "pic index %d page_start %d\n", + cm->cur_fb_idx_mmu, pbi->used_4k_num); + + if (check_dma) + hevc_mmu_dma_check(hw_to_vdec(pbi)); + + if (pbi->is_used_v4l) { + int index = cm->cur_fb_idx_mmu; + struct internal_comp_buf *ibuf = + index_to_icomp_buf(pbi, index); + + decoder_mmu_box_free_idx_tail( + ibuf->mmu_box, + ibuf->index, + pbi->used_4k_num); + } else { + decoder_mmu_box_free_idx_tail( + pbi->mmu_box, + cm->cur_fb_idx_mmu, + pbi->used_4k_num); + } + + cm->cur_fb_idx_mmu = INVALID_IDX; + pbi->used_4k_num = -1; + + return 0; +} + +static void vp9_recycle_mmu_buf_tail(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + if (pbi->double_write_mode & 0x10) + return; + if (cm->cur_fb_idx_mmu != INVALID_IDX) { + recycle_mmu_buf_tail(pbi, + ((pbi->used_4k_num == -1) && + pbi->m_ins_flag) ? 1 : 0); + } +} + +#ifdef MULTI_INSTANCE_SUPPORT +static void vp9_recycle_mmu_buf(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + + if (pbi->is_used_v4l) + return; + + if (pbi->double_write_mode & 0x10) + return; + if (cm->cur_fb_idx_mmu != INVALID_IDX) { + decoder_mmu_box_free_idx(pbi->mmu_box, + cm->cur_fb_idx_mmu); + + cm->cur_fb_idx_mmu = INVALID_IDX; + pbi->used_4k_num = -1; + } +} + +void vp9_recycle_mmu_work(struct work_struct *work) +{ + struct VP9Decoder_s *pbi = container_of(work, + struct VP9Decoder_s, recycle_mmu_work); + + if (pbi) + vp9_recycle_mmu_buf(pbi); +} +#endif + + +static void dec_again_process(struct VP9Decoder_s *pbi) +{ + amhevc_stop(); + pbi->dec_result = DEC_RESULT_AGAIN; + if (pbi->process_state == + PROC_STATE_DECODESLICE) { + pbi->process_state = + PROC_STATE_SENDAGAIN; + if (pbi->mmu_enable) { + /* + * Because vp9_recycle_mmu_buf has sleep function,we can't + * call it directly. Use a recycle_mmu_work to substitude it. + */ + vdec_schedule_work(&pbi->recycle_mmu_work); + } + } + reset_process_time(pbi); + vdec_schedule_work(&pbi->work); +} + +int continue_decoding(struct VP9Decoder_s *pbi) +{ + int ret; + int i; + struct VP9_Common_s *const cm = &pbi->common; + struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + debug_buffer_mgr_more(pbi); + + if (pbi->is_used_v4l && ctx->param_sets_from_ucode) + pbi->res_ch_flag = 0; + bit_depth_luma = pbi->vp9_param.p.bit_depth; + bit_depth_chroma = pbi->vp9_param.p.bit_depth; + + if ((pbi->vp9_param.p.bit_depth >= VPX_BITS_10) && + (get_double_write_mode(pbi) == 0x10)) { + pbi->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; + pr_err("fatal err, bit_depth %d, unsupport dw 0x10\n", + pbi->vp9_param.p.bit_depth); + return -1; + } + + if (pbi->process_state != PROC_STATE_SENDAGAIN) { + ret = vp9_bufmgr_process(pbi, &pbi->vp9_param); + if (!pbi->m_ins_flag) + pbi->slice_idx++; + } else { + union param_u *params = &pbi->vp9_param; + if (pbi->mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) { + ret = vp9_alloc_mmu(pbi, + cm->new_fb_idx, + params->p.width, + params->p.height, + params->p.bit_depth, + pbi->frame_mmu_map_addr); + if (ret >= 0) + cm->cur_fb_idx_mmu = cm->new_fb_idx; + else + pr_err("can't alloc need mmu1,idx %d ret =%d\n", + cm->new_fb_idx, + ret); + } else { + ret = 0; + } + WRITE_VREG(HEVC_PARSER_PICTURE_SIZE, + (params->p.height << 16) | params->p.width); + } + if (ret < 0) { + pr_info("vp9_bufmgr_process=> %d, VP9_10B_DISCARD_NAL\r\n", + ret); + WRITE_VREG(HEVC_DEC_STATUS_REG, VP9_10B_DISCARD_NAL); + cm->show_frame = 0; + if (pbi->mmu_enable) { + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + vp9_recycle_mmu_buf(pbi); + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + } +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) { + pbi->dec_result = DEC_RESULT_DONE; +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num == 0) +#endif + amhevc_stop(); + vdec_schedule_work(&pbi->work); + } +#endif + return ret; + } else if (ret == 0) { + struct PIC_BUFFER_CONFIG_s *cur_pic_config + = &cm->cur_frame->buf; + cur_pic_config->decode_idx = pbi->frame_count; + + if (pbi->process_state != PROC_STATE_SENDAGAIN) { + if (!pbi->m_ins_flag) { + pbi->frame_count++; + decode_frame_count[pbi->index] + = pbi->frame_count; + } +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->chunk) { + cur_pic_config->pts = pbi->chunk->pts; + cur_pic_config->pts64 = pbi->chunk->pts64; + + if (pbi->is_used_v4l && !v4l_bitstream_id_enable) + cur_pic_config->pts64 = pbi->chunk->timestamp; + } +#endif + } + /*pr_info("Decode Frame Data %d\n", pbi->frame_count);*/ + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_REGISTER_START); + config_pic_size(pbi, pbi->vp9_param.p.bit_depth); + + if ((pbi->common.frame_type != KEY_FRAME) + && (!pbi->common.intra_only)) { + config_mc_buffer(pbi, pbi->vp9_param.p.bit_depth); +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num == 0) +#endif + config_mpred_hw(pbi); + } else { +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num == 0) +#endif + clear_mpred_hw(pbi); + } +#ifdef MCRCC_ENABLE + if (mcrcc_cache_alg_flag) + config_mcrcc_axi_hw_new(pbi); + else + config_mcrcc_axi_hw(pbi); +#endif + config_sao_hw(pbi, &pbi->vp9_param); + +#ifdef VP9_LPF_LVL_UPDATE + /* + * Get loop filter related picture level parameters from Parser + */ + pbi->lf->mode_ref_delta_enabled = pbi->vp9_param.p.mode_ref_delta_enabled; + pbi->lf->sharpness_level = pbi->vp9_param.p.sharpness_level; + for (i = 0; i < 4; i++) + pbi->lf->ref_deltas[i] = pbi->vp9_param.p.ref_deltas[i]; + for (i = 0; i < 2; i++) + pbi->lf->mode_deltas[i] = pbi->vp9_param.p.mode_deltas[i]; + pbi->default_filt_lvl = pbi->vp9_param.p.filter_level; + pbi->seg_4lf->enabled = pbi->vp9_param.p.seg_enabled; + pbi->seg_4lf->abs_delta = pbi->vp9_param.p.seg_abs_delta; + for (i = 0; i < MAX_SEGMENTS; i++) + pbi->seg_4lf->feature_mask[i] = (pbi->vp9_param.p.seg_lf_info[i] & + 0x8000) ? (1 << SEG_LVL_ALT_LF) : 0; + for (i = 0; i < MAX_SEGMENTS; i++) + pbi->seg_4lf->feature_data[i][SEG_LVL_ALT_LF] + = (pbi->vp9_param.p.seg_lf_info[i] + & 0x100) ? -(pbi->vp9_param.p.seg_lf_info[i] + & 0x3f) : (pbi->vp9_param.p.seg_lf_info[i] & 0x3f); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + /*Set pipeline mode*/ + uint32_t lpf_data32 = READ_VREG(HEVC_DBLK_CFGB); + /*dblk pipeline mode=1 for performance*/ + if (pbi->vp9_param.p.width >= 1280) + lpf_data32 |= (0x1 << 4); + else + lpf_data32 &= ~(0x3 << 4); + WRITE_VREG(HEVC_DBLK_CFGB, lpf_data32); + } + /* + * Update loop filter Thr/Lvl table for every frame + */ + /*pr_info + ("vp9_loop_filter (run before every frame decoding start)\n");*/ + vp9_loop_filter_frame_init(pbi->seg_4lf, + pbi->lfi, pbi->lf, pbi->default_filt_lvl); +#endif + /*pr_info("HEVC_DEC_STATUS_REG <= VP9_10B_DECODE_SLICE\n");*/ + WRITE_VREG(HEVC_DEC_STATUS_REG, VP9_10B_DECODE_SLICE); + } else { + vp9_print(pbi, VP9_DEBUG_BUFMGR, "Skip search next start code\n"); + cm->prev_fb_idx = INVALID_IDX; + /*skip, search next start code*/ + WRITE_VREG(HEVC_DEC_STATUS_REG, VP9_10B_DECODE_SLICE); + } + pbi->process_state = PROC_STATE_DECODESLICE; + if (pbi->mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) { + if (pbi->last_put_idx < pbi->used_buf_num) { + struct RefCntBuffer_s *frame_bufs = + cm->buffer_pool->frame_bufs; + int i = pbi->last_put_idx; + /*free not used buffers.*/ + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.index != -1)) { + if (pbi->is_used_v4l) { + struct internal_comp_buf *ibuf = + index_to_icomp_buf(pbi, i); + + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + decoder_mmu_box_free_idx(ibuf->mmu_box, ibuf->index); + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + } else { + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + decoder_mmu_box_free_idx(pbi->mmu_box, i); + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + } + } + pbi->last_put_idx = -1; + } + } + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_REGISTER_START); + return ret; +} + +static void fill_frame_info(struct VP9Decoder_s *pbi, + struct PIC_BUFFER_CONFIG_s *frame, + unsigned int framesize, + unsigned int pts) +{ + struct vframe_qos_s *vframe_qos = &pbi->vframe_qos; + + if (frame->slice_type == KEY_FRAME) + vframe_qos->type = 1; + else if (frame->slice_type == INTER_FRAME) + vframe_qos->type = 2; +/* +#define SHOW_QOS_INFO +*/ + if (input_frame_based(hw_to_vdec(pbi))) + vframe_qos->size = frame->frame_size2; + else + vframe_qos->size = framesize; + vframe_qos->pts = pts; +#ifdef SHOW_QOS_INFO + vp9_print(pbi, 0, "slice:%d\n", frame->slice_type); +#endif + vframe_qos->max_mv = frame->max_mv; + vframe_qos->avg_mv = frame->avg_mv; + vframe_qos->min_mv = frame->min_mv; +#ifdef SHOW_QOS_INFO + vp9_print(pbi, 0, "mv: max:%d, avg:%d, min:%d\n", + vframe_qos->max_mv, + vframe_qos->avg_mv, + vframe_qos->min_mv); +#endif + vframe_qos->max_qp = frame->max_qp; + vframe_qos->avg_qp = frame->avg_qp; + vframe_qos->min_qp = frame->min_qp; +#ifdef SHOW_QOS_INFO + vp9_print(pbi, 0, "qp: max:%d, avg:%d, min:%d\n", + vframe_qos->max_qp, + vframe_qos->avg_qp, + vframe_qos->min_qp); +#endif + vframe_qos->max_skip = frame->max_skip; + vframe_qos->avg_skip = frame->avg_skip; + vframe_qos->min_skip = frame->min_skip; +#ifdef SHOW_QOS_INFO + vp9_print(pbi, 0, "skip: max:%d, avg:%d, min:%d\n", + vframe_qos->max_skip, + vframe_qos->avg_skip, + vframe_qos->min_skip); +#endif + vframe_qos->num++; +} + +/* only when we decoded one field or one frame, +we can call this function to get qos info*/ +static void get_picture_qos_info(struct VP9Decoder_s *pbi) +{ + struct PIC_BUFFER_CONFIG_s *frame = &pbi->cur_buf->buf; + struct vdec_s *vdec = hw_to_vdec(pbi); + + if (!frame) + return; + if (vdec->mvfrm) { + frame->frame_size2 = vdec->mvfrm->frame_size; + frame->hw_decode_time = + local_clock() - vdec->mvfrm->hw_decode_start; + } + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) { + unsigned char a[3]; + unsigned char i, j, t; + unsigned long data; + + data = READ_VREG(HEVC_MV_INFO); + if (frame->slice_type == KEY_FRAME) + data = 0; + a[0] = data & 0xff; + a[1] = (data >> 8) & 0xff; + a[2] = (data >> 16) & 0xff; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + frame->max_mv = a[2]; + frame->avg_mv = a[1]; + frame->min_mv = a[0]; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "mv data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + + data = READ_VREG(HEVC_QP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + frame->max_qp = a[2]; + frame->avg_qp = a[1]; + frame->min_qp = a[0]; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "qp data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + + data = READ_VREG(HEVC_SKIP_INFO); + a[0] = data & 0x1f; + a[1] = (data >> 8) & 0x3f; + a[2] = (data >> 16) & 0x7f; + + for (i = 0; i < 3; i++) { + for (j = i+1; j < 3; j++) { + if (a[j] < a[i]) { + t = a[j]; + a[j] = a[i]; + a[i] = t; + } else if (a[j] == a[i]) { + a[i]++; + t = a[j]; + a[j] = a[i]; + a[i] = t; + } + } + } + frame->max_skip = a[2]; + frame->avg_skip = a[1]; + frame->min_skip = a[0]; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "skip data %x a[0]= %x a[1]= %x a[2]= %x\n", + data, a[0], a[1], a[2]); + } else { + uint32_t blk88_y_count; + uint32_t blk88_c_count; + uint32_t blk22_mv_count; + uint32_t rdata32; + int32_t mv_hi; + int32_t mv_lo; + uint32_t rdata32_l; + uint32_t mvx_L0_hi; + uint32_t mvy_L0_hi; + uint32_t mvx_L1_hi; + uint32_t mvy_L1_hi; + int64_t value; + uint64_t temp_value; + int pic_number = frame->decode_idx; + + frame->max_mv = 0; + frame->avg_mv = 0; + frame->min_mv = 0; + + frame->max_skip = 0; + frame->avg_skip = 0; + frame->min_skip = 0; + + frame->max_qp = 0; + frame->avg_qp = 0; + frame->min_qp = 0; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, "slice_type:%d, poc:%d\n", + frame->slice_type, + pic_number); + + /* set rd_idx to 0 */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, 0); + + blk88_y_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_y_count == 0) { + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] NO Data yet.\n", + pic_number); + + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_y_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] Y QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_y_count, + rdata32, blk88_y_count); + + frame->avg_qp = rdata32/blk88_y_count; + /* intra_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] Y intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); + + /* skipped_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] Y skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_y_count, + '%', rdata32); + + frame->avg_skip = rdata32*100/blk88_y_count; + /* coeff_non_zero_y_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] Y ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_y_count*1)), + '%', rdata32); + + /* blk66_c_count */ + blk88_c_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk88_c_count == 0) { + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] NO Data yet.\n", + pic_number); + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* qp_c_sum */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] C QP AVG : %d (%d/%d)\n", + pic_number, rdata32/blk88_c_count, + rdata32, blk88_c_count); + + /* intra_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] C intra rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); + + /* skipped_cu_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] C skipped rate : %d%c (%d)\n", + pic_number, rdata32*100/blk88_c_count, + '%', rdata32); + + /* coeff_non_zero_c_count */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] C ZERO_Coeff rate : %d%c (%d)\n", + pic_number, (100 - rdata32*100/(blk88_c_count*1)), + '%', rdata32); + + /* 1'h0, qp_c_max[6:0], 1'h0, qp_c_min[6:0], + 1'h0, qp_y_max[6:0], 1'h0, qp_y_min[6:0] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] Y QP min : %d\n", + pic_number, (rdata32>>0)&0xff); + + frame->min_qp = (rdata32>>0)&0xff; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] Y QP max : %d\n", + pic_number, (rdata32>>8)&0xff); + + frame->max_qp = (rdata32>>8)&0xff; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] C QP min : %d\n", + pic_number, (rdata32>>16)&0xff); + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] C QP max : %d\n", + pic_number, (rdata32>>24)&0xff); + + /* blk22_mv_count */ + blk22_mv_count = READ_VREG(HEVC_PIC_QUALITY_DATA); + if (blk22_mv_count == 0) { + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] NO MV Data yet.\n", + pic_number); + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + return; + } + /* mvy_L1_count[39:32], mvx_L1_count[39:32], + mvy_L0_count[39:32], mvx_L0_count[39:32] */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + /* should all be 0x00 or 0xff */ + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MV AVG High Bits: 0x%X\n", + pic_number, rdata32); + + mvx_L0_hi = ((rdata32>>0)&0xff); + mvy_L0_hi = ((rdata32>>8)&0xff); + mvx_L1_hi = ((rdata32>>16)&0xff); + mvy_L1_hi = ((rdata32>>24)&0xff); + + /* mvx_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvx_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + value = div_s64(value, blk22_mv_count); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L0 AVG : %d (%lld/%d)\n", + pic_number, (int)value, + value, blk22_mv_count); + + frame->avg_mv = value; + + /* mvy_L0_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L0_hi; + temp_value = (temp_value << 32) | rdata32_l; + + if (mvy_L0_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L0 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); + + /* mvx_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvx_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvx_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); + + /* mvy_L1_count[31:0] */ + rdata32_l = READ_VREG(HEVC_PIC_QUALITY_DATA); + temp_value = mvy_L1_hi; + temp_value = (temp_value << 32) | rdata32_l; + if (mvy_L1_hi & 0x80) + value = 0xFFFFFFF000000000 | temp_value; + else + value = temp_value; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L1 AVG : %d (%lld/%d)\n", + pic_number, rdata32_l/blk22_mv_count, + value, blk22_mv_count); + + /* {mvx_L0_max, mvx_L0_min} // format : {sign, abs[14:0]} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L0 MAX : %d\n", + pic_number, mv_hi); + + frame->max_mv = mv_hi; + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L0 MIN : %d\n", + pic_number, mv_lo); + + frame->min_mv = mv_lo; + + /* {mvy_L0_max, mvy_L0_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L0 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L0 MIN : %d\n", + pic_number, mv_lo); + + /* {mvx_L1_max, mvx_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVX_L1 MIN : %d\n", + pic_number, mv_lo); + + /* {mvy_L1_max, mvy_L1_min} */ + rdata32 = READ_VREG(HEVC_PIC_QUALITY_DATA); + mv_hi = (rdata32>>16)&0xffff; + if (mv_hi & 0x8000) + mv_hi = 0x8000 - mv_hi; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L1 MAX : %d\n", + pic_number, mv_hi); + + mv_lo = (rdata32>>0)&0xffff; + if (mv_lo & 0x8000) + mv_lo = 0x8000 - mv_lo; + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] MVY_L1 MIN : %d\n", + pic_number, mv_lo); + + rdata32 = READ_VREG(HEVC_PIC_QUALITY_CTRL); + + vp9_print(pbi, VP9_DEBUG_QOS_INFO, + "[Picture %d Quality] After Read : VDEC_PIC_QUALITY_CTRL : 0x%x\n", + pic_number, rdata32); + + /* reset all counts */ + WRITE_VREG(HEVC_PIC_QUALITY_CTRL, (1<<8)); + } +} + +static void vvp9_get_comp_buf_info(struct VP9Decoder_s *pbi, + struct vdec_comp_buf_info *info) +{ + u16 bit_depth = pbi->param.p.bit_depth; + + info->max_size = vp9_max_mmu_buf_size( + pbi->max_pic_w, + pbi->max_pic_h); + info->header_size = vvp9_mmu_compress_header_size( + pbi->frame_width, + pbi->frame_height); + info->frame_buffer_size = vp9_mmu_page_num( + pbi, pbi->frame_width, + pbi->frame_height, + bit_depth == 0); +} + +static int vvp9_get_ps_info(struct VP9Decoder_s *pbi, struct aml_vdec_ps_infos *ps) +{ + ps->visible_width = pbi->frame_width; + ps->visible_height = pbi->frame_height; + ps->coded_width = ALIGN(pbi->frame_width, 64); + ps->coded_height = ALIGN(pbi->frame_height, 64); + ps->dpb_size = pbi->used_buf_num; + ps->dpb_margin = pbi->dynamic_buf_num_margin; + if (pbi->frame_width > 2048 && pbi->frame_height > 1088) + ps->dpb_frames = 4; /* > level 4.1*/ + else + ps->dpb_frames = 8; /* < level 4.1 */ + /* + 1. curruent decoding frame is not include in dpb; + 2. for frame push out, one more buffer necessary. + 3. Two consecutive frames cannot use the same buffer. + */ + ps->dpb_frames += 3; + + if (ps->dpb_margin + ps->dpb_frames > MAX_BUF_NUM_NORMAL) { + u32 delta; + delta = ps->dpb_margin + ps->dpb_frames - MAX_BUF_NUM_NORMAL; + ps->dpb_margin -= delta; + pbi->dynamic_buf_num_margin = ps->dpb_margin; + } + ps->field = V4L2_FIELD_NONE; + + return 0; +} + + +static int v4l_res_change(struct VP9Decoder_s *pbi) +{ + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + int ret = 0; + + if (ctx->param_sets_from_ucode && + pbi->res_ch_flag == 0) { + struct aml_vdec_ps_infos ps; + struct vdec_comp_buf_info comp; + + if ((pbi->last_width != 0 && + pbi->last_height != 0) && + (pbi->frame_width != pbi->last_width || + pbi->frame_height != pbi->last_height)) { + + vp9_print(pbi, 0, "%s (%d,%d)=>(%d,%d)\r\n", __func__, pbi->last_width, + pbi->last_height, pbi->frame_width, pbi->frame_height); + + if (get_valid_double_write_mode(pbi) != 16) { + vvp9_get_comp_buf_info(pbi, &comp); + vdec_v4l_set_comp_buf_info(ctx, &comp); + } + + vvp9_get_ps_info(pbi, &ps); + vdec_v4l_set_ps_infos(ctx, &ps); + vdec_v4l_res_ch_event(ctx); + + pbi->init_pic_w = pbi->frame_width; + pbi->init_pic_h = pbi->frame_height; + init_mv_buf_list(pbi); + + pbi->process_state = PROC_STATE_INIT; + pbi->v4l_params_parsed = false; + pbi->res_ch_flag = 1; + ctx->v4l_resolution_change = 1; + pbi->eos = 1; + vp9_bufmgr_postproc(pbi); + //del_timer_sync(&pbi->timer); + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(hw_to_vdec(pbi)); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + ret = 1; + } + } + + return ret; +} + +static irqreturn_t vvp9_isr_thread_fn(int irq, void *data) +{ + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)data; + unsigned int dec_status = pbi->dec_status; + int i; + + if (dec_status == VP9_HEAD_PARSER_DONE) { + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_START); + } + else if (dec_status == HEVC_DECPIC_DATA_DONE) { + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_THREAD_PIC_DONE_START); + } + + /*if (pbi->wait_buf) + * pr_info("set wait_buf to 0\r\n"); + */ + if (pbi->eos) + return IRQ_HANDLED; + pbi->wait_buf = 0; +#ifdef MULTI_INSTANCE_SUPPORT +#ifdef SUPPORT_FB_DECODING +#ifdef FB_DECODING_TEST_SCHEDULE + if (pbi->s1_test_cmd == TEST_SET_PIC_DONE) + dec_status = HEVC_DECPIC_DATA_DONE; + else if (pbi->s1_test_cmd == TEST_SET_S2_DONE + && dec_status == HEVC_DECPIC_DATA_DONE) + dec_status = HEVC_S2_DECODING_DONE; + pbi->s1_test_cmd = TEST_SET_NONE; +#else + /*if (irq != VDEC_IRQ_0) + dec_status = HEVC_S2_DECODING_DONE;*/ +#endif + if (dec_status == HEVC_S2_DECODING_DONE) { + pbi->dec_result = DEC_RESULT_DONE; + vdec_schedule_work(&pbi->work); +#ifdef FB_DECODING_TEST_SCHEDULE + amhevc_stop(); + pbi->dec_s1_result = DEC_S1_RESULT_DONE; + vdec_schedule_work(&pbi->s1_work); +#endif + } else +#endif + if ((dec_status == HEVC_NAL_DECODE_DONE) || + (dec_status == HEVC_SEARCH_BUFEMPTY) || + (dec_status == HEVC_DECODE_BUFEMPTY) + ) { + if (pbi->m_ins_flag) { + reset_process_time(pbi); + if (!vdec_frame_based(hw_to_vdec(pbi))) + dec_again_process(pbi); + else { + if (pbi->common.show_existing_frame) { + pbi->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&pbi->work); + } + else { + pbi->dec_result = DEC_RESULT_GET_DATA; + vdec_schedule_work(&pbi->work); + } + } + } + pbi->process_busy = 0; + return IRQ_HANDLED; + } else if (dec_status == HEVC_DECPIC_DATA_DONE) { + if (pbi->m_ins_flag) { + get_picture_qos_info(pbi); +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) { + reset_process_time(pbi); + inc_s1_pos(pbi); + trigger_schedule(pbi); +#ifdef FB_DECODING_TEST_SCHEDULE + pbi->s1_test_cmd = TEST_SET_S2_DONE; +#else + amhevc_stop(); + pbi->dec_s1_result = DEC_S1_RESULT_DONE; + vdec_schedule_work(&pbi->s1_work); +#endif + } else +#endif + { + reset_process_time(pbi); + if (atomic_read(&pbi->vf_pre_count) == 0 || pbi->low_latency_flag) + vp9_bufmgr_postproc(pbi); + + pbi->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + if (mcrcc_cache_alg_flag) + dump_hit_rate(pbi); + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_THREAD_EDN); + vdec_schedule_work(&pbi->work); + } + } else { + if (pbi->low_latency_flag) { + vp9_bufmgr_postproc(pbi); + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + vdec_profile(hw_to_vdec(pbi), VDEC_PROFILE_EVENT_CB); + if (debug & PRINT_FLAG_VDEC_DETAIL) + pr_info("%s VP9 frame done \n", __func__); +#endif + } + } + + pbi->process_busy = 0; + return IRQ_HANDLED; + } +#endif + + if (dec_status == VP9_EOS) { +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) + reset_process_time(pbi); +#endif + + pr_info("VP9_EOS, flush buffer\r\n"); + + vp9_bufmgr_postproc(pbi); + + pr_info("send VP9_10B_DISCARD_NAL\r\n"); + WRITE_VREG(HEVC_DEC_STATUS_REG, VP9_10B_DISCARD_NAL); + pbi->process_busy = 0; +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) { + pbi->dec_result = DEC_RESULT_DONE; + amhevc_stop(); + vdec_schedule_work(&pbi->work); + } +#endif + return IRQ_HANDLED; + } else if (dec_status == HEVC_DECODE_OVER_SIZE) { + pr_info("vp9 decode oversize !!\n"); + debug |= (VP9_DEBUG_DIS_LOC_ERROR_PROC | + VP9_DEBUG_DIS_SYS_ERROR_PROC); + pbi->fatal_error |= DECODER_FATAL_ERROR_SIZE_OVERFLOW; +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) + reset_process_time(pbi); +#endif + return IRQ_HANDLED; + } + + if (dec_status != VP9_HEAD_PARSER_DONE) { + pbi->process_busy = 0; + return IRQ_HANDLED; + } + + if (pbi->m_ins_flag && + !get_free_buf_count(pbi) && pbi->pic_list_init_done) { + vp9_print(pbi, VP9_DEBUG_OUT_PTS, + "detected %d frames in a packet, ts: %lld.\n", + pbi->one_package_frame_cnt + 1, + pbi->chunk->timestamp); + pbi->process_busy = 0; + pbi->dec_result = DEC_RESULT_NEED_MORE_BUFFER; + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + vdec_schedule_work(&pbi->work); + return IRQ_HANDLED; + } + + pbi->one_package_frame_cnt++; + +#ifdef MULTI_INSTANCE_SUPPORT +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + if (pbi->m_ins_flag ==0 && pbi->low_latency_flag) { + vdec_profile(hw_to_vdec(pbi), VDEC_PROFILE_EVENT_RUN); + if (debug & PRINT_FLAG_VDEC_DETAIL) + pr_info("%s VP9 frame header found \n", __func__); + } +#endif + if (pbi->m_ins_flag) + reset_process_time(pbi); +#endif + if (pbi->process_state != PROC_STATE_SENDAGAIN +#ifdef SUPPORT_FB_DECODING + && pbi->used_stage_buf_num == 0 +#endif + ) { + if (pbi->mmu_enable) { + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START); + vp9_recycle_mmu_buf_tail(pbi); + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END); + } + + if (pbi->frame_count > 0) + vp9_bufmgr_postproc(pbi); + } + + if (debug & VP9_DEBUG_SEND_PARAM_WITH_REG) { + get_rpm_param(&pbi->vp9_param); + } else { +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) { + reset_process_time(pbi); + get_s1_buf(pbi); + + if (get_mv_buf(pbi, + &pbi->s1_mv_buf_index, + &pbi->s1_mpred_mv_wr_start_addr + ) < 0) { + vp9_print(pbi, 0, + "%s: Error get_mv_buf fail\n", + __func__); + } + + if (pbi->s1_buf == NULL) { + vp9_print(pbi, 0, + "%s: Error get_s1_buf fail\n", + __func__); + pbi->process_busy = 0; + return IRQ_HANDLED; + } + + for (i = 0; i < (RPM_END - RPM_BEGIN); i += 4) { + int ii; + for (ii = 0; ii < 4; ii++) { + pbi->s1_buf->rpm[i + 3 - ii] = + pbi->rpm_ptr[i + 3 - ii]; + pbi->s1_param.l.data[i + ii] = + pbi->rpm_ptr[i + 3 - ii]; + } + } + + mpred_process(pbi); +#ifdef FB_DECODING_TEST_SCHEDULE + pbi->dec_s1_result = + DEC_S1_RESULT_TEST_TRIGGER_DONE; + vdec_schedule_work(&pbi->s1_work); +#else + WRITE_VREG(HEVC_ASSIST_FB_MMU_MAP_ADDR, + pbi->stage_mmu_map_phy_addr + + pbi->s1_buf->index * STAGE_MMU_MAP_SIZE); + + start_s1_decoding(pbi); +#endif + start_process_time(pbi); + pbi->process_busy = 0; + return IRQ_HANDLED; + } else +#endif + { + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_RPM_START); + for (i = 0; i < (RPM_END - RPM_BEGIN); i += 4) { + int ii; + for (ii = 0; ii < 4; ii++) + pbi->vp9_param.l.data[i + ii] = + pbi->rpm_ptr[i + 3 - ii]; + } + ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_RPM_END); + } + } + + if (pbi->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + + pbi->frame_width = pbi->vp9_param.p.width; + pbi->frame_height = pbi->vp9_param.p.height; + + if (is_oversize(pbi->frame_width, pbi->frame_height)) { + continue_decoding(pbi); + vp9_print(pbi, 0, "pic size(%d x %d) is oversize\n", + pbi->frame_width, pbi->frame_height); + if (pbi->m_ins_flag) + start_process_time(pbi); + pbi->postproc_done = 0; + pbi->process_busy = 0; + return IRQ_HANDLED; + } + + if (!v4l_res_change(pbi)) { + if (ctx->param_sets_from_ucode && !pbi->v4l_params_parsed) { + struct aml_vdec_ps_infos ps; + struct vdec_comp_buf_info comp; + + pr_debug("set ucode parse\n"); + if (get_valid_double_write_mode(pbi) != 16) { + vvp9_get_comp_buf_info(pbi, &comp); + vdec_v4l_set_comp_buf_info(ctx, &comp); + } + + vvp9_get_ps_info(pbi, &ps); + /*notice the v4l2 codec.*/ + vdec_v4l_set_ps_infos(ctx, &ps); + + pbi->init_pic_w = pbi->frame_width; + pbi->init_pic_h = pbi->frame_height; + + pbi->last_width = pbi->frame_width; + pbi->last_height = pbi->frame_height; + + pbi->v4l_params_parsed = true; + pbi->postproc_done = 0; + pbi->process_busy = 0; + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + dec_again_process(pbi); + return IRQ_HANDLED; + } else { + if (!pbi->pic_list_init_done) { + struct vdec_pic_info pic; + + vdec_v4l_get_pic_info(ctx, &pic); + pbi->used_buf_num = pic.dpb_frames + + pic.dpb_margin; + init_pic_list(pbi); + init_pic_list_hw(pbi); + + init_mv_buf_list(pbi); + pbi->pic_list_init_done = true; + } + } + } else { + pbi->postproc_done = 0; + pbi->process_busy = 0; + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + dec_again_process(pbi); + return IRQ_HANDLED; + } + } + + continue_decoding(pbi); + pbi->postproc_done = 0; + pbi->process_busy = 0; + +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) + start_process_time(pbi); +#endif + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_THREAD_HEAD_END); + return IRQ_HANDLED; +} + +static irqreturn_t vvp9_isr(int irq, void *data) +{ + int i; + unsigned int dec_status; + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)data; + unsigned int adapt_prob_status; + uint debug_tag; + + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + dec_status = READ_VREG(HEVC_DEC_STATUS_REG); + ATRACE_COUNTER("V_ST_DEC-decode_state", dec_status); + + if (dec_status == VP9_HEAD_PARSER_DONE) { + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_HEAD_DONE); + } + else if (dec_status == HEVC_DECPIC_DATA_DONE) { + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_PIC_DONE); + } + + adapt_prob_status = READ_VREG(VP9_ADAPT_PROB_REG); + if (!pbi) + return IRQ_HANDLED; + if (pbi->init_flag == 0) + return IRQ_HANDLED; + if (pbi->process_busy) { /*on process.*/ + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s: process_busy isr return\n", __func__); + return IRQ_HANDLED; + } + pbi->dec_status = dec_status; + pbi->process_busy = 1; + if (debug & VP9_DEBUG_BUFMGR) + pr_info("vp9 isr (%d) dec status = 0x%x, lcu 0x%x shiftbyte 0x%x (%x %x lev %x, wr %x, rd %x)\n", + irq, + dec_status, READ_VREG(HEVC_PARSER_LCU_START), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR) + ); +#ifdef SUPPORT_FB_DECODING + /*if (irq != VDEC_IRQ_0) + return IRQ_WAKE_THREAD;*/ +#endif + + debug_tag = READ_HREG(DEBUG_REG1); + if (debug_tag & 0x10000) { + pr_info("LMEM<tag %x>:\n", READ_HREG(DEBUG_REG1)); + for (i = 0; i < 0x400; i += 4) { + int ii; + if ((i & 0xf) == 0) + pr_info("%03x: ", i); + for (ii = 0; ii < 4; ii++) { + pr_info("%04x ", + pbi->lmem_ptr[i + 3 - ii]); + } + if (((i + ii) & 0xf) == 0) + pr_info("\n"); + } + + if ((udebug_pause_pos == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == pbi->slice_idx) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) + pbi->ucode_pause_pos = udebug_pause_pos; + else if (debug_tag & 0x20000) + pbi->ucode_pause_pos = 0xffffffff; + if (pbi->ucode_pause_pos) + reset_process_time(pbi); + else + WRITE_HREG(DEBUG_REG1, 0); + } else if (debug_tag != 0) { + pr_info( + "dbg%x: %x lcu %x\n", READ_HREG(DEBUG_REG1), + READ_HREG(DEBUG_REG2), + READ_VREG(HEVC_PARSER_LCU_START)); + if ((udebug_pause_pos == (debug_tag & 0xffff)) && + (udebug_pause_decode_idx == 0 || + udebug_pause_decode_idx == pbi->slice_idx) && + (udebug_pause_val == 0 || + udebug_pause_val == READ_HREG(DEBUG_REG2))) + pbi->ucode_pause_pos = udebug_pause_pos; + if (pbi->ucode_pause_pos) + reset_process_time(pbi); + else + WRITE_HREG(DEBUG_REG1, 0); + pbi->process_busy = 0; + return IRQ_HANDLED; + } + +#ifdef MULTI_INSTANCE_SUPPORT + if (!pbi->m_ins_flag) { +#endif + if (pbi->error_flag == 1) { + pbi->error_flag = 2; + pbi->process_busy = 0; + return IRQ_HANDLED; + } else if (pbi->error_flag == 3) { + pbi->process_busy = 0; + return IRQ_HANDLED; + } + + if (get_free_buf_count(pbi) <= 0) { + /* + if (pbi->wait_buf == 0) + pr_info("set wait_buf to 1\r\n"); + */ + pbi->wait_buf = 1; + pbi->process_busy = 0; + return IRQ_HANDLED; + } +#ifdef MULTI_INSTANCE_SUPPORT + } +#endif + if ((adapt_prob_status & 0xff) == 0xfd) { + struct VP9_Common_s *const cm = &pbi->common; + int pre_fc = 0; + + if (pbi->m_ins_flag) + reset_process_time(pbi); + + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) && + (vdec_secure(hw_to_vdec(pbi)))) { + pre_fc = ((cm->frame_type == KEY_FRAME) || (cm->intra_only)) ? 0 : 1; + tee_vp9_prob_process(pre_fc, cm->last_frame_type, + adapt_prob_status, (unsigned int)pbi->prob_buffer_phy_addr); + } else { + uint8_t *prev_prob_b, *cur_prob_b, *count_b; + + /*VP9_REQ_ADAPT_PROB*/ + pre_fc = ((cm->frame_type == KEY_FRAME) || (cm->intra_only)); + prev_prob_b = ((uint8_t *)pbi->prob_buffer_addr) + + ((adapt_prob_status >> 8) * 0x1000); + cur_prob_b = ((uint8_t *)pbi->prob_buffer_addr) + 0x4000; + count_b = (uint8_t *)pbi->count_buffer_addr; + + adapt_coef_probs(pbi->pic_count, + (cm->last_frame_type == KEY_FRAME), + pre_fc, (adapt_prob_status >> 8), + (unsigned int *)prev_prob_b, + (unsigned int *)cur_prob_b, (unsigned int *)count_b); + + memcpy(prev_prob_b, cur_prob_b, PROB_SIZE); + } + + WRITE_VREG(VP9_ADAPT_PROB_REG, 0); + pbi->pic_count += 1; +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) + start_process_time(pbi); +#endif + } + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_ISR_END); + return IRQ_WAKE_THREAD; +} + +static void vp9_set_clk(struct work_struct *work) +{ + struct VP9Decoder_s *pbi = container_of(work, + struct VP9Decoder_s, set_clk_work); + int fps = 96000 / pbi->frame_dur; + + if (hevc_source_changed(VFORMAT_VP9, + frame_width, frame_height, fps) > 0) + pbi->saved_resolution = frame_width * + frame_height * fps; +} + +static void vvp9_put_timer_func(struct timer_list *timer) +{ + struct VP9Decoder_s *pbi = container_of(timer, + struct VP9Decoder_s, timer); + enum receviver_start_e state = RECEIVER_INACTIVE; + uint8_t empty_flag; + unsigned int buf_level; + + if (pbi->m_ins_flag) { + if (hw_to_vdec(pbi)->next_status + == VDEC_STATUS_DISCONNECTED && + !pbi->is_used_v4l) { +#ifdef SUPPORT_FB_DECODING + if (pbi->run2_busy) + return; + + pbi->dec_s1_result = DEC_S1_RESULT_FORCE_EXIT; + vdec_schedule_work(&pbi->s1_work); +#endif + pbi->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&pbi->work); + pr_debug( + "vdec requested to be disconnected\n"); + return; + } + } + if (pbi->init_flag == 0) { + if (pbi->stat & STAT_TIMER_ARM) { + timer->expires = jiffies + PUT_INTERVAL; + add_timer(&pbi->timer); + } + return; + } + if (pbi->m_ins_flag == 0) { + if (vf_get_receiver(pbi->provider_name)) { + state = + vf_notify_receiver(pbi->provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + if ((state == RECEIVER_STATE_NULL) + || (state == RECEIVER_STATE_NONE)) + state = RECEIVER_INACTIVE; + } else + state = RECEIVER_INACTIVE; + + empty_flag = (READ_VREG(HEVC_PARSER_INT_STATUS) >> 6) & 0x1; + /* error watchdog */ + if (empty_flag == 0) { + /* decoder has input */ + if ((debug & VP9_DEBUG_DIS_LOC_ERROR_PROC) == 0) { + + buf_level = READ_VREG(HEVC_STREAM_LEVEL); + /* receiver has no buffer to recycle */ + if ((state == RECEIVER_INACTIVE) && + (kfifo_is_empty(&pbi->display_q) && + buf_level > 0x200) + ) { + WRITE_VREG + (HEVC_ASSIST_MBOX0_IRQ_REG, + 0x1); + } + } + + if ((debug & VP9_DEBUG_DIS_SYS_ERROR_PROC) == 0) { + /* receiver has no buffer to recycle */ + /*if ((state == RECEIVER_INACTIVE) && + * (kfifo_is_empty(&pbi->display_q))) { + *pr_info("vp9 something error,need reset\n"); + *} + */ + } + } + } +#ifdef MULTI_INSTANCE_SUPPORT + else { + if ( + (decode_timeout_val > 0) && + (pbi->start_process_time > 0) && + ((1000 * (jiffies - pbi->start_process_time) / HZ) + > decode_timeout_val) + ) { + int current_lcu_idx = + READ_VREG(HEVC_PARSER_LCU_START) + & 0xffffff; + if (pbi->last_lcu_idx == current_lcu_idx) { + if (pbi->decode_timeout_count > 0) + pbi->decode_timeout_count--; + if (pbi->decode_timeout_count == 0) { + if (input_frame_based( + hw_to_vdec(pbi)) || + (READ_VREG(HEVC_STREAM_LEVEL) > 0x200)) + timeout_process(pbi); + else { + vp9_print(pbi, 0, + "timeout & empty, again\n"); + dec_again_process(pbi); + } + } + } else { + start_process_time(pbi); + pbi->last_lcu_idx = current_lcu_idx; + } + } + } +#endif + + if ((pbi->ucode_pause_pos != 0) && + (pbi->ucode_pause_pos != 0xffffffff) && + udebug_pause_pos != pbi->ucode_pause_pos) { + pbi->ucode_pause_pos = 0; + WRITE_HREG(DEBUG_REG1, 0); + } +#ifdef MULTI_INSTANCE_SUPPORT + if (debug & VP9_DEBUG_FORCE_SEND_AGAIN) { + pr_info( + "Force Send Again\r\n"); + debug &= ~VP9_DEBUG_FORCE_SEND_AGAIN; + reset_process_time(pbi); + pbi->dec_result = DEC_RESULT_AGAIN; + if (pbi->process_state == + PROC_STATE_DECODESLICE) { + if (pbi->mmu_enable) + vp9_recycle_mmu_buf(pbi); + pbi->process_state = + PROC_STATE_SENDAGAIN; + } + amhevc_stop(); + + vdec_schedule_work(&pbi->work); + } + + if (debug & VP9_DEBUG_DUMP_DATA) { + debug &= ~VP9_DEBUG_DUMP_DATA; + vp9_print(pbi, 0, + "%s: chunk size 0x%x off 0x%x sum 0x%x\n", + __func__, + pbi->chunk->size, + pbi->chunk->offset, + get_data_check_sum(pbi, pbi->chunk->size) + ); + dump_data(pbi, pbi->chunk->size); + } +#endif + if (debug & VP9_DEBUG_DUMP_PIC_LIST) { + dump_pic_list(pbi); + debug &= ~VP9_DEBUG_DUMP_PIC_LIST; + } + if (debug & VP9_DEBUG_TRIG_SLICE_SEGMENT_PROC) { + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + debug &= ~VP9_DEBUG_TRIG_SLICE_SEGMENT_PROC; + } + /*if (debug & VP9_DEBUG_HW_RESET) { + }*/ + + if (radr != 0) { + if (rval != 0) { + WRITE_VREG(radr, rval); + pr_info("WRITE_VREG(%x,%x)\n", radr, rval); + } else + pr_info("READ_VREG(%x)=%x\n", radr, READ_VREG(radr)); + rval = 0; + radr = 0; + } + if (pop_shorts != 0) { + int i; + u32 sum = 0; + + pr_info("pop stream 0x%x shorts\r\n", pop_shorts); + for (i = 0; i < pop_shorts; i++) { + u32 data = + (READ_HREG(HEVC_SHIFTED_DATA) >> 16); + WRITE_HREG(HEVC_SHIFT_COMMAND, + (1<<7)|16); + if ((i & 0xf) == 0) + pr_info("%04x:", i); + pr_info("%04x ", data); + if (((i + 1) & 0xf) == 0) + pr_info("\r\n"); + sum += data; + } + pr_info("\r\nsum = %x\r\n", sum); + pop_shorts = 0; + } + if (dbg_cmd != 0) { + if (dbg_cmd == 1) { + u32 disp_laddr; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB && + get_double_write_mode(pbi) == 0) { + disp_laddr = + READ_VCBUS_REG(AFBC_BODY_BADDR) << 4; + } else { + struct canvas_s cur_canvas; + + canvas_read((READ_VCBUS_REG(VD1_IF0_CANVAS0) + & 0xff), &cur_canvas); + disp_laddr = cur_canvas.addr; + } + pr_info("current displayed buffer address %x\r\n", + disp_laddr); + } + dbg_cmd = 0; + } + /*don't changed at start.*/ + if (pbi->get_frame_dur && pbi->show_frame_num > 60 && + pbi->frame_dur > 0 && pbi->saved_resolution != + frame_width * frame_height * + (96000 / pbi->frame_dur)) + vdec_schedule_work(&pbi->set_clk_work); + + timer->expires = jiffies + PUT_INTERVAL; + add_timer(timer); +} + + +int vvp9_dec_status(struct vdec_s *vdec, struct vdec_info *vstatus) +{ + struct VP9Decoder_s *vp9 = + (struct VP9Decoder_s *)vdec->private; + + if (!vp9) + return -1; + + vstatus->frame_width = frame_width; + vstatus->frame_height = frame_height; + if (vp9->error_frame_width && + vp9->error_frame_height) { + vstatus->frame_width = vp9->error_frame_width; + vstatus->frame_height = vp9->error_frame_height; + } + + if (vp9->frame_dur != 0) + vstatus->frame_rate = ((96000 * 10 / vp9->frame_dur) % 10) < 5 ? + 96000 / vp9->frame_dur : (96000 / vp9->frame_dur +1); + else + vstatus->frame_rate = -1; + vstatus->error_count = 0; + vstatus->status = vp9->stat | vp9->fatal_error; + vstatus->frame_dur = vp9->frame_dur; + vstatus->bit_rate = vp9->gvs->bit_rate; + vstatus->frame_data = vp9->gvs->frame_data; + vstatus->total_data = vp9->gvs->total_data; + vstatus->frame_count = vp9->gvs->frame_count; + vstatus->error_frame_count = vp9->gvs->error_frame_count; + vstatus->drop_frame_count = vp9->gvs->drop_frame_count; + vstatus->i_decoded_frames = vp9->gvs->i_decoded_frames; + vstatus->i_lost_frames = vp9->gvs->i_lost_frames; + vstatus->i_concealed_frames = vp9->gvs->i_concealed_frames; + vstatus->p_decoded_frames = vp9->gvs->p_decoded_frames; + vstatus->p_lost_frames = vp9->gvs->p_lost_frames; + vstatus->p_concealed_frames = vp9->gvs->p_concealed_frames; + vstatus->b_decoded_frames = vp9->gvs->b_decoded_frames; + vstatus->b_lost_frames = vp9->gvs->b_lost_frames; + vstatus->b_concealed_frames = vp9->gvs->b_concealed_frames; + vstatus->total_data = vp9->gvs->total_data; + vstatus->samp_cnt = vp9->gvs->samp_cnt; + vstatus->offset = vp9->gvs->offset; + snprintf(vstatus->vdec_name, sizeof(vstatus->vdec_name), + "%s", DRIVER_NAME); + return 0; +} + +int vvp9_set_isreset(struct vdec_s *vdec, int isreset) +{ + is_reset = isreset; + return 0; +} + +#if 0 +static void VP9_DECODE_INIT(void) +{ + /* enable vp9 clocks */ + WRITE_VREG(DOS_GCLK_EN3, 0xffffffff); + /* *************************************************************** */ + /* Power ON HEVC */ + /* *************************************************************** */ + /* Powerup HEVC */ + WRITE_VREG(AO_RTI_GEN_PWR_SLEEP0, + READ_VREG(AO_RTI_GEN_PWR_SLEEP0) & (~(0x3 << 6))); + WRITE_VREG(DOS_MEM_PD_HEVC, 0x0); + WRITE_VREG(DOS_SW_RESET3, READ_VREG(DOS_SW_RESET3) | (0x3ffff << 2)); + WRITE_VREG(DOS_SW_RESET3, READ_VREG(DOS_SW_RESET3) & (~(0x3ffff << 2))); + /* remove isolations */ + WRITE_VREG(AO_RTI_GEN_PWR_ISO0, + READ_VREG(AO_RTI_GEN_PWR_ISO0) & (~(0x3 << 10))); + +} +#endif + +static void vvp9_prot_init(struct VP9Decoder_s *pbi, u32 mask) +{ + unsigned int data32; + /* VP9_DECODE_INIT(); */ + vp9_config_work_space_hw(pbi, mask); + if (mask & HW_MASK_BACK) + init_pic_list_hw(pbi); + + vp9_init_decoder_hw(pbi, mask); + +#ifdef VP9_LPF_LVL_UPDATE + if (mask & HW_MASK_BACK) + vp9_loop_filter_init(pbi); +#endif + + if ((mask & HW_MASK_FRONT) == 0) + return; +#if 1 + if (debug & VP9_DEBUG_BUFMGR_MORE) + pr_info("%s\n", __func__); + data32 = READ_VREG(HEVC_STREAM_CONTROL); + data32 = data32 | + (1 << 0)/*stream_fetch_enable*/ + ; + WRITE_VREG(HEVC_STREAM_CONTROL, data32); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) { + if (debug & VP9_DEBUG_BUFMGR) + pr_info("[test.c] Config STREAM_FIFO_CTL\n"); + data32 = READ_VREG(HEVC_STREAM_FIFO_CTL); + data32 = data32 | + (1 << 29) // stream_fifo_hole + ; + WRITE_VREG(HEVC_STREAM_FIFO_CTL, data32); + } +#if 0 + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x00000100) { + pr_info("vp9 prot init error %d\n", __LINE__); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x00000300) { + pr_info("vp9 prot init error %d\n", __LINE__); + return; + } + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x12345678); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x9abcdef0); + data32 = READ_VREG(HEVC_SHIFT_STARTCODE); + if (data32 != 0x12345678) { + pr_info("vp9 prot init error %d\n", __LINE__); + return; + } + data32 = READ_VREG(HEVC_SHIFT_EMULATECODE); + if (data32 != 0x9abcdef0) { + pr_info("vp9 prot init error %d\n", __LINE__); + return; + } +#endif + WRITE_VREG(HEVC_SHIFT_STARTCODE, 0x000000001); + WRITE_VREG(HEVC_SHIFT_EMULATECODE, 0x00000300); +#endif + + + + WRITE_VREG(HEVC_WAIT_FLAG, 1); + + /* WRITE_VREG(HEVC_MPSR, 1); */ + + /* clear mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1); + + /* enable mailbox interrupt */ + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 1); + + /* disable PSCALE for hardware sharing */ + WRITE_VREG(HEVC_PSCALE_CTRL, 0); + + WRITE_VREG(DEBUG_REG1, 0x0); + /*check vps/sps/pps/i-slice in ucode*/ + WRITE_VREG(NAL_SEARCH_CTL, 0x8); + + WRITE_VREG(DECODE_STOP_POS, udebug_flag); +#ifdef SUPPORT_FB_DECODING +#ifndef FB_DECODING_TEST_SCHEDULE + if (pbi->used_stage_buf_num > 0) { + if (mask & HW_MASK_FRONT) { + data32 = READ_VREG( + HEVC_ASSIST_HED_FB_W_CTL); + data32 = data32 | + (1 << 0) /*hed_fb_wr_en*/ + ; + WRITE_VREG(HEVC_ASSIST_HED_FB_W_CTL, + data32); + } + if (mask & HW_MASK_BACK) { + data32 = READ_VREG( + HEVC_ASSIST_HED_FB_R_CTL); + while (data32 & (1 << 7)) { + /*wait finish*/ + data32 = READ_VREG( + HEVC_ASSIST_HED_FB_R_CTL); + } + data32 &= (~(0x1 << 0)); + /*hed_fb_rd_addr_auto_rd*/ + data32 &= (~(0x1 << 1)); + /*rd_id = 0, hed_rd_map_auto_halt_num, + after wr 2 ready, then start reading*/ + data32 |= (0x2 << 16); + WRITE_VREG(HEVC_ASSIST_HED_FB_R_CTL, + data32); + + data32 |= (0x1 << 11); /*hed_rd_map_auto_halt_en*/ + data32 |= (0x1 << 1); /*hed_fb_rd_addr_auto_rd*/ + data32 |= (0x1 << 0); /*hed_fb_rd_en*/ + WRITE_VREG(HEVC_ASSIST_HED_FB_R_CTL, + data32); + } + + } +#endif +#endif +} + +static int vvp9_local_init(struct VP9Decoder_s *pbi) +{ + int i; + int ret; + int width, height; + if (alloc_lf_buf(pbi) < 0) + return -1; + + pbi->gvs = vzalloc(sizeof(struct vdec_info)); + if (NULL == pbi->gvs) { + pr_info("the struct of vdec status malloc failed.\n"); + return -1; + } + vdec_set_vframe_comm(hw_to_vdec(pbi), DRIVER_NAME); +#ifdef DEBUG_PTS + pbi->pts_missed = 0; + pbi->pts_hit = 0; +#endif + pbi->new_frame_displayed = 0; + pbi->last_put_idx = -1; + pbi->saved_resolution = 0; + pbi->get_frame_dur = false; + on_no_keyframe_skiped = 0; + pbi->duration_from_pts_done = 0; + pbi->vp9_first_pts_ready = 0; + pbi->frame_cnt_window = 0; + width = pbi->vvp9_amstream_dec_info.width; + height = pbi->vvp9_amstream_dec_info.height; + pbi->frame_dur = + (pbi->vvp9_amstream_dec_info.rate == + 0) ? 3200 : pbi->vvp9_amstream_dec_info.rate; + if (width && height) + pbi->frame_ar = height * 0x100 / width; +/* + *TODO:FOR VERSION + */ + pr_info("vp9: ver (%d,%d) decinfo: %dx%d rate=%d\n", vp9_version, + 0, width, height, pbi->frame_dur); + + if (pbi->frame_dur == 0) + pbi->frame_dur = 96000 / 24; + + INIT_KFIFO(pbi->display_q); + INIT_KFIFO(pbi->newframe_q); + + for (i = 0; i < VF_POOL_SIZE; i++) { + const struct vframe_s *vf = &pbi->vfpool[i]; + + pbi->vfpool[i].index = -1; + kfifo_put(&pbi->newframe_q, vf); + } + + + ret = vp9_local_init(pbi); + + if (!pbi->pts_unstable) { + pbi->pts_unstable = + (pbi->vvp9_amstream_dec_info.rate == 0)?1:0; + pr_info("set pts unstable\n"); + } + + return ret; +} + + +#ifdef MULTI_INSTANCE_SUPPORT +static s32 vvp9_init(struct vdec_s *vdec) +{ + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *)vdec->private; +#else +static s32 vvp9_init(struct VP9Decoder_s *pbi) +{ +#endif + int ret; + int fw_size = 0x1000 * 16; + struct firmware_s *fw = NULL; + + pbi->stat |= STAT_TIMER_INIT; + + if (vvp9_local_init(pbi) < 0) + return -EBUSY; + + fw = vmalloc(sizeof(struct firmware_s) + fw_size); + if (IS_ERR_OR_NULL(fw)) + return -ENOMEM; + + if (get_firmware_data(VIDEO_DEC_VP9_MMU, fw->data) < 0) { + pr_err("get firmware fail.\n"); + vfree(fw); + return -1; + } + + fw->len = fw_size; + + INIT_WORK(&pbi->set_clk_work, vp9_set_clk); + timer_setup(&pbi->timer, vvp9_put_timer_func, 0); + spin_lock_init(&pbi->wait_buf_lock); + +#ifdef MULTI_INSTANCE_SUPPORT + if (pbi->m_ins_flag) { + pbi->timer.expires = jiffies + PUT_INTERVAL; + + /*add_timer(&pbi->timer); + + pbi->stat |= STAT_TIMER_ARM; + pbi->stat |= STAT_ISR_REG;*/ + + INIT_WORK(&pbi->work, vp9_work); + INIT_WORK(&pbi->recycle_mmu_work, vp9_recycle_mmu_work); +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) + INIT_WORK(&pbi->s1_work, vp9_s1_work); +#endif + pbi->fw = fw; + + return 0; + } +#endif + amhevc_enable(); + + init_pic_list(pbi); + + ret = amhevc_loadmc_ex(VFORMAT_VP9, NULL, fw->data); + if (ret < 0) { + amhevc_disable(); + vfree(fw); + pr_err("VP9: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + + vfree(fw); + + pbi->stat |= STAT_MC_LOAD; + + /* enable AMRISC side protocol */ + vvp9_prot_init(pbi, HW_MASK_FRONT | HW_MASK_BACK); + + if (vdec_request_threaded_irq(VDEC_IRQ_0, + vvp9_isr, + vvp9_isr_thread_fn, + IRQF_ONESHOT,/*run thread on this irq disabled*/ + "vvp9-irq", (void *)pbi)) { + pr_info("vvp9 irq register error.\n"); + amhevc_disable(); + return -ENOENT; + } + + pbi->stat |= STAT_ISR_REG; + + pbi->provider_name = PROVIDER_NAME; +#ifdef MULTI_INSTANCE_SUPPORT + if (!pbi->is_used_v4l) { + vf_provider_init(&vvp9_vf_prov, PROVIDER_NAME, + &vvp9_vf_provider, pbi); + vf_reg_provider(&vvp9_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); + if (pbi->frame_dur != 0) { + if (!is_reset) + vf_notify_receiver(pbi->provider_name, + VFRAME_EVENT_PROVIDER_FR_HINT, + (void *) + ((unsigned long)pbi->frame_dur)); + } + } +#else + vf_provider_init(&vvp9_vf_prov, PROVIDER_NAME, &vvp9_vf_provider, + pbi); + vf_reg_provider(&vvp9_vf_prov); + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_START, NULL); + if (!is_reset) + vf_notify_receiver(PROVIDER_NAME, VFRAME_EVENT_PROVIDER_FR_HINT, + (void *)((unsigned long)pbi->frame_dur)); +#endif + pbi->stat |= STAT_VF_HOOK; + + pbi->timer.expires = jiffies + PUT_INTERVAL; + add_timer(&pbi->timer); + + pbi->stat |= STAT_VDEC_RUN; + + pbi->stat |= STAT_TIMER_ARM; + + amhevc_start(); + + pbi->init_flag = 1; + pbi->process_busy = 0; + pr_info("%d, vvp9_init, RP=0x%x\n", + __LINE__, READ_VREG(HEVC_STREAM_RD_PTR)); + return 0; +} + +static int vmvp9_stop(struct VP9Decoder_s *pbi) +{ + pbi->init_flag = 0; + + if (pbi->stat & STAT_VDEC_RUN) { + amhevc_stop(); + pbi->stat &= ~STAT_VDEC_RUN; + } + if (pbi->stat & STAT_ISR_REG) { + vdec_free_irq(VDEC_IRQ_0, (void *)pbi); + pbi->stat &= ~STAT_ISR_REG; + } + if (pbi->stat & STAT_TIMER_ARM) { + del_timer_sync(&pbi->timer); + pbi->stat &= ~STAT_TIMER_ARM; + } + + if (!pbi->is_used_v4l && (pbi->stat & STAT_VF_HOOK)) { + if (!is_reset) + vf_notify_receiver(pbi->provider_name, + VFRAME_EVENT_PROVIDER_FR_END_HINT, + NULL); + + vf_unreg_provider(&vvp9_vf_prov); + pbi->stat &= ~STAT_VF_HOOK; + } + vp9_local_uninit(pbi); + reset_process_time(pbi); + cancel_work_sync(&pbi->work); + cancel_work_sync(&pbi->recycle_mmu_work); +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) + cancel_work_sync(&pbi->s1_work); +#endif + cancel_work_sync(&pbi->set_clk_work); + uninit_mmu_buffers(pbi); + if (pbi->fw) + vfree(pbi->fw); + pbi->fw = NULL; + return 0; +} + +static int amvdec_vp9_mmu_init(struct VP9Decoder_s *pbi) +{ + int tvp_flag = vdec_secure(hw_to_vdec(pbi)) ? + CODEC_MM_FLAGS_TVP : 0; + int buf_size = vp9_max_mmu_buf_size(pbi->max_pic_w, pbi->max_pic_h); + + pbi->need_cache_size = buf_size * SZ_1M; + pbi->sc_start_time = get_jiffies_64(); + if (pbi->mmu_enable && !pbi->is_used_v4l) { + pbi->mmu_box = decoder_mmu_box_alloc_box(DRIVER_NAME, + pbi->index, FRAME_BUFFERS, + pbi->need_cache_size, + tvp_flag + ); + if (!pbi->mmu_box) { + pr_err("vp9 alloc mmu box failed!!\n"); + return -1; + } + } + pbi->bmmu_box = decoder_bmmu_box_alloc_box( + DRIVER_NAME, + pbi->index, + MAX_BMMU_BUFFER_NUM, + 4 + PAGE_SHIFT, + CODEC_MM_FLAGS_CMA_CLEAR | + CODEC_MM_FLAGS_FOR_VDECODER | + tvp_flag); + if (!pbi->bmmu_box) { + pr_err("vp9 alloc bmmu box failed!!\n"); + return -1; + } + return 0; +} + +static void vdec_fence_release(struct VP9Decoder_s *pbi, + struct vdec_sync *sync) +{ + ulong expires; + + /* notify signal to wake up all fences. */ + vdec_timeline_increase(sync, VF_POOL_SIZE); + + expires = jiffies + msecs_to_jiffies(2000); + while (!check_objs_all_signaled(sync)) { + if (time_after(jiffies, expires)) { + pr_err("wait fence signaled timeout.\n"); + break; + } + } + + /* decreases refcnt of timeline. */ + vdec_timeline_put(sync); +} + +/****************************************/ + +static struct codec_profile_t amvdec_vp9_profile = { + .name = "VP9-V4L", + .profile = "" +}; + +static unsigned char get_data_check_sum + (struct VP9Decoder_s *pbi, int size) +{ + int jj; + int sum = 0; + u8 *data = NULL; + + if (vdec_secure(hw_to_vdec(pbi))) + return 0; + + if (!pbi->chunk->block->is_mapped) + data = codec_mm_vmap(pbi->chunk->block->start + + pbi->chunk->offset, size); + else + data = ((u8 *)pbi->chunk->block->start_virt) + + pbi->chunk->offset; + + for (jj = 0; jj < size; jj++) + sum += data[jj]; + + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n", + __func__, size, sum, + (size < 1) ? 0 : data[0], + (size < 2) ? 0 : data[1], + (size < 3) ? 0 : data[2], + (size < 4) ? 0 : data[3], + (size < 5) ? 0 : data[4], + (size < 6) ? 0 : data[5], + (size < 4) ? 0 : data[size - 4], + (size < 3) ? 0 : data[size - 3], + (size < 2) ? 0 : data[size - 2], + (size < 1) ? 0 : data[size - 1]); + + if (!pbi->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + return sum; +} + +static void dump_data(struct VP9Decoder_s *pbi, int size) +{ + int jj; + u8 *data = NULL; + int padding_size = pbi->chunk->offset & + (VDEC_FIFO_ALIGN - 1); + + if (!pbi->chunk->block->is_mapped) + data = codec_mm_vmap(pbi->chunk->block->start + + pbi->chunk->offset, size); + else + data = ((u8 *)pbi->chunk->block->start_virt) + + pbi->chunk->offset; + + vp9_print(pbi, 0, "padding: "); + for (jj = padding_size; jj > 0; jj--) + vp9_print_cont(pbi, + 0, + "%02x ", *(data - jj)); + vp9_print_cont(pbi, 0, "data adr %p\n", + data); + + for (jj = 0; jj < size; jj++) { + if ((jj & 0xf) == 0) + vp9_print(pbi, + 0, + "%06x:", jj); + vp9_print_cont(pbi, + 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + vp9_print(pbi, + 0, + "\n"); + } + vp9_print(pbi, + 0, + "\n"); + + if (!pbi->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); +} + +static int vp9_wait_cap_buf(void *args) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *) args; + struct aml_vcodec_ctx * ctx = + (struct aml_vcodec_ctx *)pbi->v4l2_ctx; + ulong flags; + int ret = 0; + + ret = wait_event_interruptible_timeout(ctx->cap_wq, + (ctx->is_stream_off || (get_free_buf_count(pbi) > 0)), + msecs_to_jiffies(300)); + if (ret <= 0){ + pr_err("%s, wait cap buf timeout or err %d\n", + __func__, ret); + } + + spin_lock_irqsave(&pbi->wait_buf_lock, flags); + if (pbi->wait_more_buf) { + pbi->wait_more_buf = false; + pbi->dec_result = ctx->is_stream_off ? + DEC_RESULT_FORCE_EXIT : + DEC_RESULT_NEED_MORE_BUFFER; + vdec_schedule_work(&pbi->work); + } + spin_unlock_irqrestore(&pbi->wait_buf_lock, flags); + + vp9_print(pbi, PRINT_FLAG_V4L_DETAIL, + "%s wait capture buffer end, ret:%d\n", + __func__, ret); + return 0; +} + +static void vp9_work(struct work_struct *work) +{ + struct VP9Decoder_s *pbi = container_of(work, + struct VP9Decoder_s, work); + struct vdec_s *vdec = hw_to_vdec(pbi); + /* finished decoding one frame or error, + * notify vdec core to switch context + */ + if (pbi->dec_result == DEC_RESULT_AGAIN) + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_WORKER_AGAIN); + + if (pbi->dec_result != DEC_RESULT_NEED_MORE_BUFFER) + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_WORKER_START); + vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, + "%s dec_result %d %x %x %x\n", + __func__, + pbi->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + + ATRACE_COUNTER("V_ST_DEC-work_state", pbi->dec_result); + + if (pbi->dec_result == DEC_INIT_PICLIST) { + init_pic_list(pbi); + pbi->pic_list_init_done = true; + return; + } + + if (pbi->dec_result == DEC_RESULT_NEED_MORE_BUFFER) { + reset_process_time(pbi); + if (!get_free_buf_count(pbi)) { + ulong flags; + + spin_lock_irqsave(&pbi->wait_buf_lock, flags); + if (vdec->next_status == VDEC_STATUS_DISCONNECTED) { + pbi->dec_result = DEC_RESULT_AGAIN; + pbi->postproc_done = 0; + pbi->process_busy = 0; + vdec_schedule_work(&pbi->work); + } else { + pbi->wait_more_buf = true; + } + spin_unlock_irqrestore(&pbi->wait_buf_lock, flags); + + if (pbi->wait_more_buf) { + ATRACE_COUNTER("V_ST_DEC-wait_more_buff", __LINE__); + vdec_post_task(vp9_wait_cap_buf, pbi); + } + } else { + int i; + + ATRACE_COUNTER("V_ST_DEC-wait_more_buff", 0); + + if (pbi->mmu_enable) + vp9_recycle_mmu_buf_tail(pbi); + + if (pbi->frame_count > 0) + vp9_bufmgr_postproc(pbi); + + for (i = 0; i < (RPM_END - RPM_BEGIN); i += 4) { + int ii; + for (ii = 0; ii < 4; ii++) + pbi->vp9_param.l.data[i + ii] = + pbi->rpm_ptr[i + 3 - ii]; + } + continue_decoding(pbi); + pbi->postproc_done = 0; + pbi->process_busy = 0; + + start_process_time(pbi); + } + + return; + } + + if (((pbi->dec_result == DEC_RESULT_GET_DATA) || + (pbi->dec_result == DEC_RESULT_GET_DATA_RETRY)) + && (hw_to_vdec(pbi)->next_status != + VDEC_STATUS_DISCONNECTED)) { + if (!vdec_has_more_input(vdec)) { + pbi->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&pbi->work); + return; + } + + if (pbi->dec_result == DEC_RESULT_GET_DATA) { + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s DEC_RESULT_GET_DATA %x %x %x\n", + __func__, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR)); + vdec_vframe_dirty(vdec, pbi->chunk); + vdec_clean_input(vdec); + } + + if (get_free_buf_count(pbi) >= + pbi->run_ready_min_buf_num) { + int r; + int decode_size; + r = vdec_prepare_input(vdec, &pbi->chunk); + if (r < 0) { + pbi->dec_result = DEC_RESULT_GET_DATA_RETRY; + + vp9_print(pbi, + PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&pbi->work); + return; + } + pbi->dec_result = DEC_RESULT_NONE; + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s: chunk size 0x%x sum 0x%x\n", + __func__, r, + (debug & PRINT_FLAG_VDEC_STATUS) ? + get_data_check_sum(pbi, r) : 0 + ); + + if (debug & PRINT_FLAG_VDEC_DATA) + dump_data(pbi, pbi->chunk->size); + + decode_size = pbi->chunk->size + + (pbi->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + + WRITE_VREG(HEVC_DECODE_SIZE, + READ_VREG(HEVC_DECODE_SIZE) + decode_size); + + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + + start_process_time(pbi); + + } else{ + pbi->dec_result = DEC_RESULT_GET_DATA_RETRY; + + vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, + "amvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&pbi->work); + } + return; + } else if (pbi->dec_result == DEC_RESULT_DONE) { +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) { +#ifndef FB_DECODING_TEST_SCHEDULE + if (!is_s2_decoding_finished(pbi)) { + vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, + "s2 decoding not done, check again later\n"); + vdec_schedule_work(&pbi->work); + } +#endif + inc_s2_pos(pbi); + if (mcrcc_cache_alg_flag) + dump_hit_rate(pbi); + } +#endif + /* if (!pbi->ctx_valid) + pbi->ctx_valid = 1; */ + pbi->slice_idx++; + pbi->frame_count++; + pbi->process_state = PROC_STATE_INIT; + decode_frame_count[pbi->index] = pbi->frame_count; + + if (pbi->mmu_enable) + pbi->used_4k_num = + (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16); + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s (===> %d) dec_result %d %x %x %x shiftbytes 0x%x decbytes 0x%x\n", + __func__, + pbi->frame_count, + pbi->dec_result, + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + READ_VREG(HEVC_SHIFT_BYTE_COUNT), + READ_VREG(HEVC_SHIFT_BYTE_COUNT) - + pbi->start_shift_bytes + ); + vdec_vframe_dirty(hw_to_vdec(pbi), pbi->chunk); + } else if (pbi->dec_result == DEC_RESULT_AGAIN) { + /* + stream base: stream buf empty or timeout + frame base: vdec_prepare_input fail + */ + if (!vdec_has_more_input(vdec)) { + pbi->dec_result = DEC_RESULT_EOS; + vdec_schedule_work(&pbi->work); + return; + } + } else if (pbi->dec_result == DEC_RESULT_EOS) { + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s: end of stream\n", + __func__); + pbi->eos = 1; + vp9_bufmgr_postproc(pbi); + + if (pbi->is_used_v4l) { + ATRACE_COUNTER("V_ST_DEC-submit_eos", __LINE__); + notify_v4l_eos(hw_to_vdec(pbi)); + ATRACE_COUNTER("V_ST_DEC-submit_eos", 0); + } + + vdec_vframe_dirty(hw_to_vdec(pbi), pbi->chunk); + } else if (pbi->dec_result == DEC_RESULT_FORCE_EXIT) { + vp9_print(pbi, PRINT_FLAG_VDEC_STATUS, + "%s: force exit\n", + __func__); + if (pbi->stat & STAT_VDEC_RUN) { + amhevc_stop(); + pbi->stat &= ~STAT_VDEC_RUN; + } + + if (pbi->stat & STAT_ISR_REG) { +#ifdef MULTI_INSTANCE_SUPPORT + if (!pbi->m_ins_flag) +#endif + WRITE_VREG(HEVC_ASSIST_MBOX0_MASK, 0); + vdec_free_irq(VDEC_IRQ_0, (void *)pbi); + pbi->stat &= ~STAT_ISR_REG; + } + } + if (pbi->stat & STAT_VDEC_RUN) { + amhevc_stop(); + pbi->stat &= ~STAT_VDEC_RUN; + } + + if (pbi->stat & STAT_TIMER_ARM) { + del_timer_sync(&pbi->timer); + pbi->stat &= ~STAT_TIMER_ARM; + } + if (pbi->dec_result != DEC_RESULT_NEED_MORE_BUFFER) + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_WORKER_END); + /* mark itself has all HW resource released and input released */ +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) + vdec_core_finish_run(hw_to_vdec(pbi), CORE_MASK_HEVC_BACK); + else + vdec_core_finish_run(hw_to_vdec(pbi), CORE_MASK_VDEC_1 + | CORE_MASK_HEVC + | CORE_MASK_HEVC_FRONT + | CORE_MASK_HEVC_BACK + ); +#else + if (vdec->parallel_dec == 1) + vdec_core_finish_run(vdec, CORE_MASK_HEVC); + else + vdec_core_finish_run(hw_to_vdec(pbi), CORE_MASK_VDEC_1 + | CORE_MASK_HEVC); +#endif + trigger_schedule(pbi); +} + +static int vp9_hw_ctx_restore(struct VP9Decoder_s *pbi) +{ + /* new to do ... */ +#if (!defined SUPPORT_FB_DECODING) + vvp9_prot_init(pbi, HW_MASK_FRONT | HW_MASK_BACK); +#elif (defined FB_DECODING_TEST_SCHEDULE) + vvp9_prot_init(pbi, HW_MASK_FRONT | HW_MASK_BACK); +#else + if (pbi->used_stage_buf_num > 0) + vvp9_prot_init(pbi, HW_MASK_FRONT); + else + vvp9_prot_init(pbi, HW_MASK_FRONT | HW_MASK_BACK); +#endif + return 0; +} + +static bool is_avaliable_buffer(struct VP9Decoder_s *pbi) +{ + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + int i, free_count = 0; + int used_count = 0; + + if ((pbi->used_buf_num == 0) || + (ctx->cap_pool.dec < pbi->used_buf_num)) { + if (ctx->fb_ops.query(&ctx->fb_ops, &pbi->fb_token)) { + free_count = + v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) + 1; + } + } + + for (i = 0; i < pbi->used_buf_num; ++i) { + if ((frame_bufs[i].ref_count == 0) && + (frame_bufs[i].buf.vf_ref == 0) && + (frame_bufs[i].buf.repeat_count == 0) && + frame_bufs[i].buf.cma_alloc_addr && + (cm->cur_frame != &frame_bufs[i])) { + free_count++; + } else if (frame_bufs[i].buf.cma_alloc_addr) + used_count++; + } + + ATRACE_COUNTER("V_ST_DEC-free_buff_count", free_count); + ATRACE_COUNTER("V_ST_DEC-used_buff_count", used_count); + + return free_count >= pbi->run_ready_min_buf_num ? 1 : 0; +} + +static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + int tvp = vdec_secure(hw_to_vdec(pbi)) ? + CODEC_MM_FLAGS_TVP : 0; + unsigned long ret = 0; + + if (!pbi->pic_list_init_done2 || pbi->eos) + return ret; + if (!pbi->first_sc_checked && pbi->mmu_enable) { + int size; + void * mmu_box; + + if (pbi->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + mmu_box = ctx->mmu_box; + } else + mmu_box = pbi->mmu_box; + + size = decoder_mmu_box_sc_check(mmu_box, tvp); + pbi->first_sc_checked = 1; + vp9_print(pbi, 0, "vp9 cached=%d need_size=%d speed= %d ms\n", + size, (pbi->need_cache_size >> PAGE_SHIFT), + (int)(get_jiffies_64() - pbi->sc_start_time) * 1000/HZ); + } + +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) { + if (mask & CORE_MASK_HEVC_FRONT) { + if (get_free_stage_buf_num(pbi) > 0 + && mv_buf_available(pbi)) + ret |= CORE_MASK_HEVC_FRONT; + } + if (mask & CORE_MASK_HEVC_BACK) { + if (s2_buf_available(pbi) && + (get_free_buf_count(pbi) >= + pbi->run_ready_min_buf_num)) { + ret |= CORE_MASK_HEVC_BACK; + pbi->back_not_run_ready = 0; + } else + pbi->back_not_run_ready = 1; +#if 0 + if (get_free_buf_count(pbi) < + run_ready_min_buf_num) + dump_pic_list(pbi); +#endif + } + } else if (get_free_buf_count(pbi) >= + pbi->run_ready_min_buf_num) + ret = CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_HEVC_FRONT + | CORE_MASK_HEVC_BACK; + + if (ret & CORE_MASK_HEVC_FRONT) + not_run_ready[pbi->index] = 0; + else + not_run_ready[pbi->index]++; + + if (ret & CORE_MASK_HEVC_BACK) + not_run2_ready[pbi->index] = 0; + else + not_run2_ready[pbi->index]++; + + vp9_print(pbi, + PRINT_FLAG_VDEC_DETAIL, "%s mask %lx=>%lx (%d %d %d %d)\r\n", + __func__, mask, ret, + get_free_stage_buf_num(pbi), + mv_buf_available(pbi), + s2_buf_available(pbi), + get_free_buf_count(pbi) + ); + + return ret; + +#else + + if (pbi->is_used_v4l) { + struct aml_vcodec_ctx *ctx = + (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + + if (pbi->v4l_params_parsed) { + if (is_avaliable_buffer(pbi)) + ret = CORE_MASK_HEVC; + else + ret = 0; + } else { + if (ctx->v4l_resolution_change) + ret = 0; + else + ret = CORE_MASK_HEVC; + } + } + + if (ret) + not_run_ready[pbi->index] = 0; + else + not_run_ready[pbi->index]++; + + vp9_print(pbi, + PRINT_FLAG_VDEC_DETAIL, "%s mask %lx=>%lx\r\n", + __func__, mask, ret); + return ret; +#endif +} + +static void vp9_frame_mode_pts_save(struct VP9Decoder_s *pbi) +{ + int i = 0; + + if (pbi->chunk == NULL) + return; + vp9_print(pbi, VP9_DEBUG_OUT_PTS, + "run front: pts %d, pts64 %lld, ts: %lld\n", + pbi->chunk->pts, pbi->chunk->pts64, pbi->chunk->timestamp); + for (i = (FRAME_BUFFERS - 1); i > 0; i--) { + pbi->frame_mode_pts_save[i] = pbi->frame_mode_pts_save[i - 1]; + pbi->frame_mode_pts64_save[i] = pbi->frame_mode_pts64_save[i - 1]; + } + pbi->frame_mode_pts_save[0] = pbi->chunk->pts; + pbi->frame_mode_pts64_save[0] = pbi->chunk->pts64; + + if (pbi->is_used_v4l && !v4l_bitstream_id_enable) + pbi->frame_mode_pts64_save[0] = pbi->chunk->timestamp; +} + +static void run_front(struct vdec_s *vdec) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + int ret, size; + + run_count[pbi->index]++; + /* pbi->chunk = vdec_prepare_input(vdec); */ +#if (!defined SUPPORT_FB_DECODING) + hevc_reset_core(vdec); +#elif (defined FB_DECODING_TEST_SCHEDULE) + hevc_reset_core(vdec); +#else + if (pbi->used_stage_buf_num > 0) + fb_reset_core(vdec, HW_MASK_FRONT); + else + hevc_reset_core(vdec); +#endif + + size = vdec_prepare_input(vdec, &pbi->chunk); + if (size < 0) { + input_empty[pbi->index]++; + + pbi->dec_result = DEC_RESULT_AGAIN; + + vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, + "ammvdec_vh265: Insufficient data\n"); + + vdec_schedule_work(&pbi->work); + return; + } + + ATRACE_COUNTER("V_ST_DEC-chunk_size", size); + + input_empty[pbi->index] = 0; + pbi->dec_result = DEC_RESULT_NONE; + pbi->start_shift_bytes = READ_VREG(HEVC_SHIFT_BYTE_COUNT); + + vp9_frame_mode_pts_save(pbi); + + if (debug & PRINT_FLAG_VDEC_STATUS) { + int ii; + vp9_print(pbi, 0, + "%s (%d): size 0x%x (0x%x 0x%x) sum 0x%x (%x %x %x %x %x) bytes 0x%x", + __func__, + pbi->frame_count, size, + pbi->chunk ? pbi->chunk->size : 0, + pbi->chunk ? pbi->chunk->offset : 0, + pbi->chunk ? ((vdec_frame_based(vdec) && + (debug & PRINT_FLAG_VDEC_STATUS)) ? + get_data_check_sum(pbi, size) : 0) : 0, + READ_VREG(HEVC_STREAM_START_ADDR), + READ_VREG(HEVC_STREAM_END_ADDR), + READ_VREG(HEVC_STREAM_LEVEL), + READ_VREG(HEVC_STREAM_WR_PTR), + READ_VREG(HEVC_STREAM_RD_PTR), + pbi->start_shift_bytes); + + if (!vdec_secure(hw_to_vdec(pbi)) && + vdec_frame_based(vdec) && pbi->chunk) { + u8 *data = NULL; + + if (!pbi->chunk->block->is_mapped) + data = codec_mm_vmap(pbi->chunk->block->start + + pbi->chunk->offset, 8); + else + data = ((u8 *)pbi->chunk->block->start_virt) + + pbi->chunk->offset; + + vp9_print_cont(pbi, 0, "data adr %p:", + data); + for (ii = 0; ii < 8; ii++) + vp9_print_cont(pbi, 0, "%02x ", + data[ii]); + + if (!pbi->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + vp9_print_cont(pbi, 0, "\r\n"); + } + ATRACE_COUNTER(pbi->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_START); + if (vdec->mc_loaded) { + /*firmware have load before, + and not changes to another. + ignore reload. + */ + } else { + ret = amhevc_loadmc_ex(VFORMAT_VP9, NULL, pbi->fw->data); + if (ret < 0) { + amhevc_disable(); + vp9_print(pbi, PRINT_FLAG_ERROR, + "VP9: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + pbi->dec_result = DEC_RESULT_FORCE_EXIT; + vdec_schedule_work(&pbi->work); + return; + } + vdec->mc_loaded = 1; + vdec->mc_type = VFORMAT_VP9; + } + ATRACE_COUNTER(pbi->trace.decode_run_time_name, TRACE_RUN_LOADING_FW_END); + + ATRACE_COUNTER(pbi->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_START); + if (vp9_hw_ctx_restore(pbi) < 0) { + vdec_schedule_work(&pbi->work); + return; + } + ATRACE_COUNTER(pbi->trace.decode_run_time_name, TRACE_RUN_LOADING_RESTORE_END); + vdec_enable_input(vdec); + + WRITE_VREG(HEVC_DEC_STATUS_REG, HEVC_ACTION_DONE); + + if (vdec_frame_based(vdec)) { + if (debug & PRINT_FLAG_VDEC_DATA) + dump_data(pbi, pbi->chunk->size); + + WRITE_VREG(HEVC_SHIFT_BYTE_COUNT, 0); + size = pbi->chunk->size + + (pbi->chunk->offset & (VDEC_FIFO_ALIGN - 1)); + if (vdec->mvfrm) + vdec->mvfrm->frame_size = pbi->chunk->size; + } + WRITE_VREG(HEVC_DECODE_SIZE, size); + WRITE_VREG(HEVC_DECODE_COUNT, pbi->slice_idx); + pbi->init_flag = 1; + + vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, + "%s: start hevc (%x %x %x)\n", + __func__, + READ_VREG(HEVC_DEC_STATUS_REG), + READ_VREG(HEVC_MPC_E), + READ_VREG(HEVC_MPSR)); + + start_process_time(pbi); + mod_timer(&pbi->timer, jiffies); + pbi->stat |= STAT_TIMER_ARM; + pbi->stat |= STAT_ISR_REG; + amhevc_start(); + pbi->stat |= STAT_VDEC_RUN; +} + +#ifdef SUPPORT_FB_DECODING +static void mpred_process(struct VP9Decoder_s *pbi) +{ + union param_u *params = &pbi->s1_param; + unsigned char use_prev_frame_mvs = + !params->p.error_resilient_mode && + params->p.width == pbi->s1_width && + params->p.height == pbi->s1_height && + !pbi->s1_intra_only && + pbi->s1_last_show_frame && + (pbi->s1_frame_type != KEY_FRAME); + pbi->s1_width = params->p.width; + pbi->s1_height = params->p.height; + pbi->s1_frame_type = params->p.frame_type; + pbi->s1_intra_only = + (params->p.show_frame || + params->p.show_existing_frame) + ? 0 : params->p.intra_only; + if ((pbi->s1_frame_type != KEY_FRAME) + && (!pbi->s1_intra_only)) { + unsigned int data32; + int mpred_mv_rd_end_addr; + + mpred_mv_rd_end_addr = + pbi->s1_mpred_mv_wr_start_addr_pre + + (pbi->lcu_total * MV_MEM_UNIT); + + WRITE_VREG(HEVC_MPRED_CTRL3, 0x24122412); + WRITE_VREG(HEVC_MPRED_ABV_START_ADDR, + pbi->work_space_buf-> + mpred_above.buf_start); + + data32 = READ_VREG(HEVC_MPRED_CTRL4); + + data32 &= (~(1 << 6)); + data32 |= (use_prev_frame_mvs << 6); + WRITE_VREG(HEVC_MPRED_CTRL4, data32); + + WRITE_VREG(HEVC_MPRED_MV_WR_START_ADDR, + pbi->s1_mpred_mv_wr_start_addr); + WRITE_VREG(HEVC_MPRED_MV_WPTR, + pbi->s1_mpred_mv_wr_start_addr); + + WRITE_VREG(HEVC_MPRED_MV_RD_START_ADDR, + pbi->s1_mpred_mv_wr_start_addr_pre); + WRITE_VREG(HEVC_MPRED_MV_RPTR, + pbi->s1_mpred_mv_wr_start_addr_pre); + + WRITE_VREG(HEVC_MPRED_MV_RD_END_ADDR, + mpred_mv_rd_end_addr); + + } else + clear_mpred_hw(pbi); + + if (!params->p.show_existing_frame) { + pbi->s1_mpred_mv_wr_start_addr_pre = + pbi->s1_mpred_mv_wr_start_addr; + pbi->s1_last_show_frame = + params->p.show_frame; + if (pbi->s1_mv_buf_index_pre_pre != MV_BUFFER_NUM) + put_mv_buf(pbi, &pbi->s1_mv_buf_index_pre_pre); + pbi->s1_mv_buf_index_pre_pre = + pbi->s1_mv_buf_index_pre; + pbi->s1_mv_buf_index_pre = pbi->s1_mv_buf_index; + } else + put_mv_buf(pbi, &pbi->s1_mv_buf_index); +} + +static void vp9_s1_work(struct work_struct *s1_work) +{ + struct VP9Decoder_s *pbi = container_of(s1_work, + struct VP9Decoder_s, s1_work); + vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, + "%s dec_s1_result %d\n", + __func__, + pbi->dec_s1_result); + +#ifdef FB_DECODING_TEST_SCHEDULE + if (pbi->dec_s1_result == + DEC_S1_RESULT_TEST_TRIGGER_DONE) { + pbi->s1_test_cmd = TEST_SET_PIC_DONE; + WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG, 0x1); + } +#endif + if (pbi->dec_s1_result == DEC_S1_RESULT_DONE || + pbi->dec_s1_result == DEC_S1_RESULT_FORCE_EXIT) { + + vdec_core_finish_run(hw_to_vdec(pbi), + CORE_MASK_HEVC_FRONT); + + trigger_schedule(pbi); + /*pbi->dec_s1_result = DEC_S1_RESULT_NONE;*/ + } + +} + +static void run_back(struct vdec_s *vdec) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + int i; + run2_count[pbi->index]++; + if (debug & PRINT_FLAG_VDEC_STATUS) { + vp9_print(pbi, 0, + "%s", __func__); + } + pbi->run2_busy = 1; +#ifndef FB_DECODING_TEST_SCHEDULE + fb_reset_core(vdec, HW_MASK_BACK); + + vvp9_prot_init(pbi, HW_MASK_BACK); +#endif + vp9_recycle_mmu_buf_tail(pbi); + + if (pbi->frame_count > 0) + vp9_bufmgr_postproc(pbi); + + if (get_s2_buf(pbi) >= 0) { + for (i = 0; i < (RPM_END - RPM_BEGIN); i += 4) { + int ii; + for (ii = 0; ii < 4; ii++) + pbi->vp9_param.l.data[i + ii] = + pbi->s2_buf->rpm[i + 3 - ii]; + } +#ifndef FB_DECODING_TEST_SCHEDULE + WRITE_VREG(HEVC_ASSIST_FBD_MMU_MAP_ADDR, + pbi->stage_mmu_map_phy_addr + + pbi->s2_buf->index * STAGE_MMU_MAP_SIZE); +#endif + continue_decoding(pbi); + } + pbi->run2_busy = 0; +} +#endif + +static void run(struct vdec_s *vdec, unsigned long mask, + void (*callback)(struct vdec_s *, void *), void *arg) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_RUN_START); + vp9_print(pbi, + PRINT_FLAG_VDEC_DETAIL, "%s mask %lx\r\n", + __func__, mask); + + if (vdec->mvfrm) + vdec->mvfrm->hw_decode_start = local_clock(); + run_count[pbi->index]++; + pbi->vdec_cb_arg = arg; + pbi->vdec_cb = callback; + pbi->one_package_frame_cnt = 0; +#ifdef SUPPORT_FB_DECODING + if ((mask & CORE_MASK_HEVC) || + (mask & CORE_MASK_HEVC_FRONT)) + run_front(vdec); + + if ((pbi->used_stage_buf_num > 0) + && (mask & CORE_MASK_HEVC_BACK)) + run_back(vdec); +#else + run_front(vdec); +#endif + ATRACE_COUNTER(pbi->trace.decode_time_name, DECODER_RUN_END); +} + +static void vp9_decoder_ctx_reset(struct VP9Decoder_s *pbi) +{ + struct vdec_s *vdec = hw_to_vdec(pbi); + struct VP9_Common_s *const cm = &pbi->common; + struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs; + struct BufferPool_s *buffer_pool = cm->buffer_pool; + int i; + + cm->buffer_pool = buffer_pool; + + for (i = 0; i < FRAME_BUFFERS; ++i) { + frame_bufs[i].buf.index = i; + frame_bufs[i].ref_count = 0; + frame_bufs[i].buf.vf_ref = 0; + frame_bufs[i].buf.decode_idx = 0; + frame_bufs[i].buf.cma_alloc_addr = 0; + frame_bufs[i].buf.BUF_index = -1; + frame_bufs[i].buf.slice_type = 0; + frame_bufs[i].buf.repeat_pic = NULL; + frame_bufs[i].buf.repeat_count = 0; + } + + for (i = 0; i < MV_BUFFER_NUM; ++i) { + pbi->m_mv_BUF[i].used_flag = 0; + } + + for (i = 0; i < FRAME_BUFFERS; i++) { + pbi->buffer_wrap[i] = i; + } + + if (vdec->parallel_dec == 1) { + for (i = 0; i < FRAME_BUFFERS; i++) { + vdec->free_canvas_ex + (pbi->common.buffer_pool->frame_bufs[i].buf.y_canvas_index, + vdec->id); + vdec->free_canvas_ex + (pbi->common.buffer_pool->frame_bufs[i].buf.uv_canvas_index, + vdec->id); + } + } + + pbi->init_flag = 0; + pbi->first_sc_checked = 0; + pbi->fatal_error = 0; + pbi->show_frame_num = 0; + pbi->eos = 0; + pbi->postproc_done = 0; + pbi->process_busy = 0; +} + +static void reset(struct vdec_s *vdec) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + + cancel_work_sync(&pbi->set_clk_work); + cancel_work_sync(&pbi->work); + if (pbi->stat & STAT_VDEC_RUN) { + amhevc_stop(); + pbi->stat &= ~STAT_VDEC_RUN; + } + + if (pbi->stat & STAT_TIMER_ARM) { + del_timer_sync(&pbi->timer); + pbi->stat &= ~STAT_TIMER_ARM; + } + + reset_process_time(pbi); + + vp9_local_uninit(pbi); + if (vvp9_local_init(pbi) < 0) + vp9_print(pbi, 0, "%s local_init failed \r\n", __func__); + + vp9_decoder_ctx_reset(pbi); + + atomic_set(&pbi->vf_pre_count, 0); + atomic_set(&pbi->vf_get_count, 0); + atomic_set(&pbi->vf_put_count, 0); + + if (pbi->ge2d) { + vdec_ge2d_destroy(pbi->ge2d); + pbi->ge2d = NULL; + } + + vp9_print(pbi, 0, "%s\r\n", __func__); +} + +static irqreturn_t vp9_irq_cb(struct vdec_s *vdec, int irq) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + return vvp9_isr(0, pbi); +} + +static irqreturn_t vp9_threaded_irq_cb(struct vdec_s *vdec, int irq) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + return vvp9_isr_thread_fn(0, pbi); +} + +static void vp9_dump_state(struct vdec_s *vdec) +{ + struct VP9Decoder_s *pbi = + (struct VP9Decoder_s *)vdec->private; + struct VP9_Common_s *const cm = &pbi->common; + int i; + vp9_print(pbi, 0, "====== %s\n", __func__); + + vp9_print(pbi, 0, + "width/height (%d/%d), used_buf_num %d video_signal_type 0x%x\n", + cm->width, + cm->height, + pbi->used_buf_num, + pbi->video_signal_type + ); + + vp9_print(pbi, 0, + "is_framebase(%d), eos %d, dec_result 0x%x dec_frm %d disp_frm %d run %d not_run_ready %d input_empty %d low_latency %d no_head %d \n", + input_frame_based(vdec), + pbi->eos, + pbi->dec_result, + decode_frame_count[pbi->index], + display_frame_count[pbi->index], + run_count[pbi->index], + not_run_ready[pbi->index], + input_empty[pbi->index], + pbi->low_latency_flag, + pbi->no_head + ); + + if (!pbi->is_used_v4l && vf_get_receiver(vdec->vf_provider_name)) { + enum receviver_start_e state = + vf_notify_receiver(vdec->vf_provider_name, + VFRAME_EVENT_PROVIDER_QUREY_STATE, + NULL); + vp9_print(pbi, 0, + "\nreceiver(%s) state %d\n", + vdec->vf_provider_name, + state); + } + + vp9_print(pbi, 0, + "%s, newq(%d/%d), dispq(%d/%d), vf prepare/get/put (%d/%d/%d), free_buf_count %d (min %d for run_ready)\n", + __func__, + kfifo_len(&pbi->newframe_q), + VF_POOL_SIZE, + kfifo_len(&pbi->display_q), + VF_POOL_SIZE, + pbi->vf_pre_count, + pbi->vf_get_count, + pbi->vf_put_count, + get_free_buf_count(pbi), + pbi->run_ready_min_buf_num + ); + + dump_pic_list(pbi); + + for (i = 0; i < MAX_BUF_NUM; i++) { + vp9_print(pbi, 0, + "mv_Buf(%d) start_adr 0x%x size 0x%x used %d\n", + i, + pbi->m_mv_BUF[i].start_adr, + pbi->m_mv_BUF[i].size, + pbi->m_mv_BUF[i].used_flag); + } + + vp9_print(pbi, 0, + "HEVC_DEC_STATUS_REG=0x%x\n", + READ_VREG(HEVC_DEC_STATUS_REG)); + vp9_print(pbi, 0, + "HEVC_MPC_E=0x%x\n", + READ_VREG(HEVC_MPC_E)); + vp9_print(pbi, 0, + "DECODE_MODE=0x%x\n", + READ_VREG(DECODE_MODE)); + vp9_print(pbi, 0, + "NAL_SEARCH_CTL=0x%x\n", + READ_VREG(NAL_SEARCH_CTL)); + vp9_print(pbi, 0, + "HEVC_PARSER_LCU_START=0x%x\n", + READ_VREG(HEVC_PARSER_LCU_START)); + vp9_print(pbi, 0, + "HEVC_DECODE_SIZE=0x%x\n", + READ_VREG(HEVC_DECODE_SIZE)); + vp9_print(pbi, 0, + "HEVC_SHIFT_BYTE_COUNT=0x%x\n", + READ_VREG(HEVC_SHIFT_BYTE_COUNT)); + vp9_print(pbi, 0, + "HEVC_STREAM_START_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_START_ADDR)); + vp9_print(pbi, 0, + "HEVC_STREAM_END_ADDR=0x%x\n", + READ_VREG(HEVC_STREAM_END_ADDR)); + vp9_print(pbi, 0, + "HEVC_STREAM_LEVEL=0x%x\n", + READ_VREG(HEVC_STREAM_LEVEL)); + vp9_print(pbi, 0, + "HEVC_STREAM_WR_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_WR_PTR)); + vp9_print(pbi, 0, + "HEVC_STREAM_RD_PTR=0x%x\n", + READ_VREG(HEVC_STREAM_RD_PTR)); + vp9_print(pbi, 0, + "PARSER_VIDEO_RP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_rp)); + vp9_print(pbi, 0, + "PARSER_VIDEO_WP=0x%x\n", + STBUF_READ(&vdec->vbuf, get_wp)); + + if (input_frame_based(vdec) && + (debug & PRINT_FLAG_VDEC_DATA) + ) { + int jj; + if (pbi->chunk && pbi->chunk->block && + pbi->chunk->size > 0) { + u8 *data = NULL; + + if (!pbi->chunk->block->is_mapped) + data = codec_mm_vmap( + pbi->chunk->block->start + + pbi->chunk->offset, + pbi->chunk->size); + else + data = ((u8 *)pbi->chunk->block->start_virt) + + pbi->chunk->offset; + vp9_print(pbi, 0, + "frame data size 0x%x\n", + pbi->chunk->size); + for (jj = 0; jj < pbi->chunk->size; jj++) { + if ((jj & 0xf) == 0) + vp9_print(pbi, 0, + "%06x:", jj); + vp9_print_cont(pbi, 0, + "%02x ", data[jj]); + if (((jj + 1) & 0xf) == 0) + vp9_print_cont(pbi, 0, + "\n"); + } + + if (!pbi->chunk->block->is_mapped) + codec_mm_unmap_phyaddr(data); + } + } + +} + +static int ammvdec_vp9_probe(struct platform_device *pdev) +{ + struct vdec_s *pdata = *(struct vdec_s **)pdev->dev.platform_data; + int ret; + int i; + int config_val; + int transfer_val; + struct vframe_content_light_level_s content_light_level; + struct vframe_master_display_colour_s vf_dp; + + struct VP9Decoder_s *pbi = NULL; + struct aml_vcodec_ctx *ctx = NULL; + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL || + get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_TXL || + get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) { + pr_info("vp9 unsupported on cpu 0x%x\n", get_cpu_major_id()); + return -EINVAL; + } + pr_debug("%s\n", __func__); + + if (pdata == NULL) { + pr_info("\nammvdec_vp9 memory resource undefined.\n"); + return -EFAULT; + } + /*pbi = (struct VP9Decoder_s *)devm_kzalloc(&pdev->dev, + sizeof(struct VP9Decoder_s), GFP_KERNEL);*/ + memset(&vf_dp, 0, sizeof(struct vframe_master_display_colour_s)); + pbi = vmalloc(sizeof(struct VP9Decoder_s)); + if (pbi == NULL) { + pr_info("\nammvdec_vp9 device data allocation failed\n"); + return -ENOMEM; + } + memset(pbi, 0, sizeof(struct VP9Decoder_s)); + + /* the ctx from v4l2 driver. */ + pbi->v4l2_ctx = pdata->private; + ctx = (struct aml_vcodec_ctx *)(pbi->v4l2_ctx); + pdata->private = pbi; + pdata->dec_status = vvp9_dec_status; + /* pdata->set_trickmode = set_trickmode; */ + pdata->run_ready = run_ready; + pdata->run = run; + pdata->reset = reset; + pdata->irq_handler = vp9_irq_cb; + pdata->threaded_irq_handler = vp9_threaded_irq_cb; + pdata->dump_state = vp9_dump_state; + + pbi->index = pdev->id; + + if (is_rdma_enable()) { + pbi->rdma_adr = dma_alloc_coherent(amports_get_dma_device(), RDMA_SIZE, &pbi->rdma_phy_adr, GFP_KERNEL); + for (i = 0; i < SCALELUT_DATA_WRITE_NUM; i++) { + pbi->rdma_adr[i * 4] = HEVC_IQIT_SCALELUT_WR_ADDR & 0xfff; + pbi->rdma_adr[i * 4 + 1] = i; + pbi->rdma_adr[i * 4 + 2] = HEVC_IQIT_SCALELUT_DATA & 0xfff; + pbi->rdma_adr[i * 4 + 3] = 0; + if (i == SCALELUT_DATA_WRITE_NUM - 1) { + pbi->rdma_adr[i * 4 + 2] = (HEVC_IQIT_SCALELUT_DATA & 0xfff) | 0x20000; + } + } + } + + snprintf(pbi->trace.vdec_name, sizeof(pbi->trace.vdec_name), + "vp9-%d", pbi->index); + snprintf(pbi->trace.pts_name, sizeof(pbi->trace.pts_name), + "%s-timestamp", pbi->trace.vdec_name); + snprintf(pbi->trace.new_q_name, sizeof(pbi->trace.new_q_name), + "%s-newframe_q", pbi->trace.vdec_name); + snprintf(pbi->trace.disp_q_name, sizeof(pbi->trace.disp_q_name), + "%s-dispframe_q", pbi->trace.vdec_name); + snprintf(pbi->trace.decode_time_name, sizeof(pbi->trace.decode_time_name), + "decoder_time%d", pdev->id); + snprintf(pbi->trace.decode_run_time_name, sizeof(pbi->trace.decode_run_time_name), + "decoder_run_time%d", pdev->id); + snprintf(pbi->trace.decode_header_memory_time_name, sizeof(pbi->trace.decode_header_memory_time_name), + "decoder_header_time%d", pdev->id); + + if (pdata->use_vfm_path) + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + VFM_DEC_PROVIDER_NAME); + else + snprintf(pdata->vf_provider_name, VDEC_PROVIDER_NAME_SIZE, + MULTI_INSTANCE_PROVIDER_NAME ".%02x", pdev->id & 0xff); + + pbi->provider_name = pdata->vf_provider_name; + platform_set_drvdata(pdev, pdata); + + pbi->platform_dev = pdev; + pbi->video_signal_type = 0; + pbi->m_ins_flag = 1; + if (!vdec_is_support_4k()) { + pbi->max_pic_w = 1920; + pbi->max_pic_h = 1088; + } else if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SM1) { + pbi->max_pic_w = 4096; + pbi->max_pic_h = 2304; + } else { + pbi->max_pic_w = 8192; + pbi->max_pic_h = 4608; + } + + if ((debug & IGNORE_PARAM_FROM_CONFIG) == 0 && + pdata->config_len) { +#ifdef MULTI_INSTANCE_SUPPORT + int vp9_buf_width = 0; + int vp9_buf_height = 0; + /*use ptr config for doubel_write_mode, etc*/ + vp9_print(pbi, 0, "pdata->config=%s\n", pdata->config); + if (get_config_int(pdata->config, "vp9_double_write_mode", + &config_val) == 0) + pbi->double_write_mode = config_val; + else + pbi->double_write_mode = double_write_mode; + + if (get_config_int(pdata->config, "save_buffer_mode", + &config_val) == 0) + pbi->save_buffer_mode = config_val; + else + pbi->save_buffer_mode = 0; + if (get_config_int(pdata->config, "vp9_buf_width", + &config_val) == 0) { + vp9_buf_width = config_val; + } + if (get_config_int(pdata->config, "vp9_buf_height", + &config_val) == 0) { + vp9_buf_height = config_val; + } + + if (get_config_int(pdata->config, "no_head", + &config_val) == 0) + pbi->no_head = config_val; + else + pbi->no_head = no_head; + + /*use ptr config for max_pic_w, etc*/ + if (get_config_int(pdata->config, "vp9_max_pic_w", + &config_val) == 0) { + pbi->max_pic_w = config_val; + } + if (get_config_int(pdata->config, "vp9_max_pic_h", + &config_val) == 0) { + pbi->max_pic_h = config_val; + } + + if ((pbi->max_pic_w * pbi->max_pic_h) + < (vp9_buf_width * vp9_buf_height)) { + pbi->max_pic_w = vp9_buf_width; + pbi->max_pic_h = vp9_buf_height; + vp9_print(pbi, 0, "use buf resolution\n"); + } + + if (get_config_int(pdata->config, "sidebind_type", + &config_val) == 0) + pbi->sidebind_type = config_val; + + if (get_config_int(pdata->config, "sidebind_channel_id", + &config_val) == 0) + pbi->sidebind_channel_id = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_codec_enable", + &config_val) == 0) + pbi->is_used_v4l = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_buffer_margin", + &config_val) == 0) + pbi->dynamic_buf_num_margin = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_canvas_mem_mode", + &config_val) == 0) + pbi->mem_map_mode = config_val; + + if (get_config_int(pdata->config, + "parm_enable_fence", + &config_val) == 0) + pbi->enable_fence = config_val; + + if (get_config_int(pdata->config, + "parm_fence_usage", + &config_val) == 0) + pbi->fence_usage = config_val; + + if (get_config_int(pdata->config, + "parm_v4l_low_latency_mode", + &config_val) == 0) { + pbi->low_latency_flag = (config_val & 1) ? 1 : 0; + pbi->enable_fence = (config_val & 2) ? 1 : 0; + } + + /*if (get_config_int(pdata->config, + "parm_v4l_duration", + &config_val) == 0) + vdec_frame_rate_uevent(config_val);*/ +#endif + if (get_config_int(pdata->config, "HDRStaticInfo", + &vf_dp.present_flag) == 0 + && vf_dp.present_flag == 1) { + get_config_int(pdata->config, "signal_type", + &pbi->video_signal_type); + get_config_int(pdata->config, "mG.x", + &vf_dp.primaries[0][0]); + get_config_int(pdata->config, "mG.y", + &vf_dp.primaries[0][1]); + get_config_int(pdata->config, "mB.x", + &vf_dp.primaries[1][0]); + get_config_int(pdata->config, "mB.y", + &vf_dp.primaries[1][1]); + get_config_int(pdata->config, "mR.x", + &vf_dp.primaries[2][0]); + get_config_int(pdata->config, "mR.y", + &vf_dp.primaries[2][1]); + get_config_int(pdata->config, "mW.x", + &vf_dp.white_point[0]); + get_config_int(pdata->config, "mW.y", + &vf_dp.white_point[1]); + get_config_int(pdata->config, "mMaxDL", + &vf_dp.luminance[0]); + get_config_int(pdata->config, "mMinDL", + &vf_dp.luminance[1]); + vf_dp.content_light_level.present_flag = 1; + get_config_int(pdata->config, "mMaxCLL", + &content_light_level.max_content); + get_config_int(pdata->config, "mMaxFALL", + &content_light_level.max_pic_average); + + get_config_int(pdata->config, "mTransfer", + &transfer_val); + + if (transfer_val == 0) + transfer_val = 16; + + vp9_print(pbi, 0, "transfer_val=%d\n",transfer_val); + + vf_dp.content_light_level = content_light_level; + if (!pbi->video_signal_type) { + pbi->video_signal_type = (1 << 29) + | (5 << 26) /* unspecified */ + | (0 << 25) /* limit */ + | (1 << 24) /* color available */ + | (9 << 16) /* 2020 */ + | (transfer_val << 8) /* 2084 */ + | (9 << 0); /* 2020 */ + } + } + pbi->vf_dp = vf_dp; + } else { + if (pdata->sys_info) { + pbi->vvp9_amstream_dec_info = *pdata->sys_info; + if ((pbi->vvp9_amstream_dec_info.width != 0) && + (pbi->vvp9_amstream_dec_info.height != 0)) { + pbi->max_pic_w = pbi->vvp9_amstream_dec_info.width; + pbi->max_pic_h = pbi->vvp9_amstream_dec_info.height; + } + } + /*pbi->vvp9_amstream_dec_info.width = 0; + pbi->vvp9_amstream_dec_info.height = 0; + pbi->vvp9_amstream_dec_info.rate = 30;*/ + pbi->double_write_mode = double_write_mode; + } + + if (!pbi->is_used_v4l) { + vf_provider_init(&pdata->vframe_provider, pdata->vf_provider_name, + &vvp9_vf_provider, pbi); + } + + if (no_head & 0x10) { + pbi->no_head = (no_head & 0xf); + } + + pbi->endian = HEVC_CONFIG_LITTLE_ENDIAN; + if (!pbi->is_used_v4l) { + pbi->mem_map_mode = mem_map_mode; + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) + pbi->endian = HEVC_CONFIG_BIG_ENDIAN; + } + if (endian) + pbi->endian = endian; + + if (pbi->is_used_v4l) + pbi->run_ready_min_buf_num = run_ready_min_buf_num - 1 ; + else + pbi->run_ready_min_buf_num = run_ready_min_buf_num; + + if (is_oversize(pbi->max_pic_w, pbi->max_pic_h)) { + pr_err("over size: %dx%d, probe failed\n", + pbi->max_pic_w, pbi->max_pic_h); + return -1; + } + + if (force_config_fence) { + pbi->enable_fence = true; + pbi->fence_usage = + (force_config_fence >> 4) & 0xf; + if (force_config_fence & 0x2) + pbi->enable_fence = false; + vp9_print(pbi, 0, "enable fence: %d, fence usage: %d\n", + pbi->enable_fence, pbi->fence_usage); + } + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL || + pbi->double_write_mode == 0x10) + pbi->mmu_enable = 0; + else + pbi->mmu_enable = 1; + + video_signal_type = pbi->video_signal_type; + + if (pdata->sys_info) { + pbi->vvp9_amstream_dec_info = *pdata->sys_info; + } else { + pbi->vvp9_amstream_dec_info.width = 0; + pbi->vvp9_amstream_dec_info.height = 0; + pbi->vvp9_amstream_dec_info.rate = 30; + } + + pbi->low_latency_flag = 1; + vp9_print(pbi, 0, + "no_head %d low_latency %d video_signal_type 0x%x\n", + pbi->no_head, pbi->low_latency_flag, pbi->video_signal_type); +#if 0 + pbi->buf_start = pdata->mem_start; + pbi->buf_size = pdata->mem_end - pdata->mem_start + 1; +#else + if (amvdec_vp9_mmu_init(pbi) < 0) { + pr_err("vp9 alloc bmmu box failed!!\n"); + /* devm_kfree(&pdev->dev, (void *)pbi); */ + vfree((void *)pbi); + pdata->dec_status = NULL; + return -1; + } + + pbi->cma_alloc_count = PAGE_ALIGN(work_buf_size) / PAGE_SIZE; + ret = decoder_bmmu_box_alloc_buf_phy(pbi->bmmu_box, WORK_SPACE_BUF_ID, + pbi->cma_alloc_count * PAGE_SIZE, DRIVER_NAME, + &pbi->cma_alloc_addr); + if (ret < 0) { + uninit_mmu_buffers(pbi); + /* devm_kfree(&pdev->dev, (void *)pbi); */ + vfree((void *)pbi); + pdata->dec_status = NULL; + return ret; + } + pbi->buf_start = pbi->cma_alloc_addr; + pbi->buf_size = work_buf_size; +#endif + + pbi->init_flag = 0; + pbi->first_sc_checked = 0; + pbi->fatal_error = 0; + pbi->show_frame_num = 0; + + if (debug) { + pr_info("===VP9 decoder mem resource 0x%lx size 0x%x\n", + pbi->buf_start, + pbi->buf_size); + } + + pbi->cma_dev = pdata->cma_dev; + + mutex_init(&pbi->fence_mutex); + + if (pbi->enable_fence) { + pdata->sync = vdec_sync_get(); + if (!pdata->sync) { + vp9_print(pbi, 0, "alloc fence timeline error\n"); + vp9_local_uninit(pbi); + uninit_mmu_buffers(pbi); + /* devm_kfree(&pdev->dev, (void *)pbi); */ + vfree((void *)pbi); + pdata->dec_status = NULL; + return -1; + } + ctx->sync = pdata->sync; + pdata->sync->usage = pbi->fence_usage; + vdec_timeline_create(pdata->sync, DRIVER_NAME); + vdec_timeline_get(pdata->sync); + } + + if (vvp9_init(pdata) < 0) { + pr_info("\namvdec_vp9 init failed.\n"); + vdec_timeline_put(pdata->sync); + vp9_local_uninit(pbi); + uninit_mmu_buffers(pbi); + /* devm_kfree(&pdev->dev, (void *)pbi); */ + vfree((void *)pbi); + pdata->dec_status = NULL; + return -ENODEV; + } + vdec_set_prepare_level(pdata, start_decode_buf_level); + hevc_source_changed(VFORMAT_VP9, + 4096, 2048, 60); +#ifdef SUPPORT_FB_DECODING + if (pbi->used_stage_buf_num > 0) + vdec_core_request(pdata, + CORE_MASK_HEVC_FRONT | CORE_MASK_HEVC_BACK); + else + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_HEVC_FRONT | CORE_MASK_HEVC_BACK + | CORE_MASK_COMBINE); +#else + if (pdata->parallel_dec == 1) + vdec_core_request(pdata, CORE_MASK_HEVC); + else + vdec_core_request(pdata, CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_COMBINE); +#endif + pbi->pic_list_init_done2 = true; + + return 0; +} + +static int ammvdec_vp9_remove(struct platform_device *pdev) +{ + struct VP9Decoder_s *pbi = (struct VP9Decoder_s *) + (((struct vdec_s *)(platform_get_drvdata(pdev)))->private); + struct vdec_s *vdec = hw_to_vdec(pbi); + int i; + if (debug) + pr_info("amvdec_vp9_remove\n"); + + vmvp9_stop(pbi); + + if (pbi->ge2d) { + vdec_ge2d_destroy(pbi->ge2d); + pbi->ge2d = NULL; + } + +#ifdef SUPPORT_FB_DECODING + vdec_core_release(hw_to_vdec(pbi), CORE_MASK_VDEC_1 | CORE_MASK_HEVC + | CORE_MASK_HEVC_FRONT | CORE_MASK_HEVC_BACK + ); +#else + if (vdec->parallel_dec == 1) + vdec_core_release(hw_to_vdec(pbi), CORE_MASK_HEVC); + else + vdec_core_release(hw_to_vdec(pbi), CORE_MASK_VDEC_1 | CORE_MASK_HEVC); +#endif + vdec_set_status(hw_to_vdec(pbi), VDEC_STATUS_DISCONNECTED); + + if (vdec->parallel_dec == 1) { + for (i = 0; i < FRAME_BUFFERS; i++) { + vdec->free_canvas_ex + (pbi->common.buffer_pool->frame_bufs[i].buf.y_canvas_index, + vdec->id); + vdec->free_canvas_ex + (pbi->common.buffer_pool->frame_bufs[i].buf.uv_canvas_index, + vdec->id); + } + } + + if (pbi->enable_fence) + vdec_fence_release(pbi, vdec->sync); + +#ifdef DEBUG_PTS + pr_info("pts missed %ld, pts hit %ld, duration %d\n", + pbi->pts_missed, pbi->pts_hit, pbi->frame_dur); +#endif + mem_map_mode = 0; + if (is_rdma_enable()) + dma_free_coherent(amports_get_dma_device(), RDMA_SIZE, pbi->rdma_adr, pbi->rdma_phy_adr); + + /* devm_kfree(&pdev->dev, (void *)pbi); */ + vfree((void *)pbi); + return 0; +} + +static struct platform_driver ammvdec_vp9_driver = { + .probe = ammvdec_vp9_probe, + .remove = ammvdec_vp9_remove, +#ifdef CONFIG_PM + .suspend = amhevc_suspend, + .resume = amhevc_resume, +#endif + .driver = { + .name = MULTI_DRIVER_NAME, + } +}; +#endif +static struct mconfig vp9_configs[] = { + MC_PU32("bit_depth_luma", &bit_depth_luma), + MC_PU32("bit_depth_chroma", &bit_depth_chroma), + MC_PU32("frame_width", &frame_width), + MC_PU32("frame_height", &frame_height), + MC_PU32("debug", &debug), + MC_PU32("radr", &radr), + MC_PU32("rval", &rval), + MC_PU32("pop_shorts", &pop_shorts), + MC_PU32("dbg_cmd", &dbg_cmd), + MC_PU32("dbg_skip_decode_index", &dbg_skip_decode_index), + MC_PU32("endian", &endian), + MC_PU32("step", &step), + MC_PU32("udebug_flag", &udebug_flag), + MC_PU32("decode_pic_begin", &decode_pic_begin), + MC_PU32("slice_parse_begin", &slice_parse_begin), + MC_PU32("i_only_flag", &i_only_flag), + MC_PU32("error_handle_policy", &error_handle_policy), + MC_PU32("buf_alloc_width", &buf_alloc_width), + MC_PU32("buf_alloc_height", &buf_alloc_height), + MC_PU32("buf_alloc_depth", &buf_alloc_depth), + MC_PU32("buf_alloc_size", &buf_alloc_size), + MC_PU32("buffer_mode", &buffer_mode), + MC_PU32("buffer_mode_dbg", &buffer_mode_dbg), + MC_PU32("max_buf_num", &max_buf_num), + MC_PU32("dynamic_buf_num_margin", &dynamic_buf_num_margin), + MC_PU32("mem_map_mode", &mem_map_mode), + MC_PU32("double_write_mode", &double_write_mode), + MC_PU32("enable_mem_saving", &enable_mem_saving), + MC_PU32("force_w_h", &force_w_h), + MC_PU32("force_fps", &force_fps), + MC_PU32("max_decoding_time", &max_decoding_time), + MC_PU32("on_no_keyframe_skiped", &on_no_keyframe_skiped), + MC_PU32("start_decode_buf_level", &start_decode_buf_level), + MC_PU32("decode_timeout_val", &decode_timeout_val), + MC_PU32("vp9_max_pic_w", &vp9_max_pic_w), + MC_PU32("vp9_max_pic_h", &vp9_max_pic_h), +}; +static struct mconfig_node vp9_node; + +static int __init amvdec_vp9_driver_init_module(void) +{ + + struct BuffInfo_s *p_buf_info; + + if (get_cpu_major_id() <= AM_MESON_CPU_MAJOR_ID_TM2 && !is_cpu_tm2_revb()) { + if (vdec_is_support_4k()) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) + p_buf_info = &amvvp9_workbuff_spec[2]; + else + p_buf_info = &amvvp9_workbuff_spec[1]; + } else + p_buf_info = &amvvp9_workbuff_spec[0]; + } else { //get_cpu_major_id() > AM_MESON_CPU_MAJOR_ID_TM2 || is_cpu_tm2_revb() + if (vdec_is_support_4k()) + p_buf_info = &amvvp9_workbuff_spec[5]; + else + p_buf_info = &amvvp9_workbuff_spec[4]; + } + + + init_buff_spec(NULL, p_buf_info); + work_buf_size = + (p_buf_info->end_adr - p_buf_info->start_adr + + 0xffff) & (~0xffff); + + pr_debug("amvdec_vp9 module init\n"); + + error_handle_policy = 0; + +#ifdef ERROR_HANDLE_DEBUG + dbg_nal_skip_flag = 0; + dbg_nal_skip_count = 0; +#endif + udebug_flag = 0; + decode_pic_begin = 0; + slice_parse_begin = 0; + step = 0; + buf_alloc_size = 0; + + if (platform_driver_register(&ammvdec_vp9_driver)) { + pr_err("failed to register ammvdec_vp9 driver\n"); + return -ENODEV; + } + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL || + get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_TXL || + get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5) { + amvdec_vp9_profile.name = "vp9_unsupport"; + } else if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) && + (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T5D)) { + amvdec_vp9_profile.profile = + "8k, 10bit, dwrite, compressed, fence, uvm"; + } else { + if (vdec_is_support_4k()) + amvdec_vp9_profile.profile = + "4k, 10bit, dwrite, compressed, fence, uvm"; + else + amvdec_vp9_profile.profile = + "10bit, dwrite, compressed, fence, uvm"; + } + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) + max_buf_num = MAX_BUF_NUM_LESS; + + vcodec_profile_register(&amvdec_vp9_profile); + INIT_REG_NODE_CONFIGS("media.decoder", &vp9_node, + "vp9-v4l", vp9_configs, CONFIG_FOR_RW); + vcodec_feature_register(VFORMAT_VP9, 1); + + return 0; +} + +static void __exit amvdec_vp9_driver_remove_module(void) +{ + pr_debug("amvdec_vp9 module remove.\n"); + + platform_driver_unregister(&ammvdec_vp9_driver); +} + +/****************************************/ + +module_param(bit_depth_luma, uint, 0664); +MODULE_PARM_DESC(bit_depth_luma, "\n amvdec_vp9 bit_depth_luma\n"); + +module_param(bit_depth_chroma, uint, 0664); +MODULE_PARM_DESC(bit_depth_chroma, "\n amvdec_vp9 bit_depth_chroma\n"); + +module_param(frame_width, uint, 0664); +MODULE_PARM_DESC(frame_width, "\n amvdec_vp9 frame_width\n"); + +module_param(frame_height, uint, 0664); +MODULE_PARM_DESC(frame_height, "\n amvdec_vp9 frame_height\n"); + +module_param(debug, uint, 0664); +MODULE_PARM_DESC(debug, "\n amvdec_vp9 debug\n"); + +module_param(radr, uint, 0664); +MODULE_PARM_DESC(radr, "\n radr\n"); + +module_param(rval, uint, 0664); +MODULE_PARM_DESC(rval, "\n rval\n"); + +module_param(pop_shorts, uint, 0664); +MODULE_PARM_DESC(pop_shorts, "\n rval\n"); + +module_param(dbg_cmd, uint, 0664); +MODULE_PARM_DESC(dbg_cmd, "\n dbg_cmd\n"); + +module_param(dbg_skip_decode_index, uint, 0664); +MODULE_PARM_DESC(dbg_skip_decode_index, "\n dbg_skip_decode_index\n"); + +module_param(endian, uint, 0664); +MODULE_PARM_DESC(endian, "\n rval\n"); + +module_param(step, uint, 0664); +MODULE_PARM_DESC(step, "\n amvdec_vp9 step\n"); + +module_param(decode_pic_begin, uint, 0664); +MODULE_PARM_DESC(decode_pic_begin, "\n amvdec_vp9 decode_pic_begin\n"); + +module_param(slice_parse_begin, uint, 0664); +MODULE_PARM_DESC(slice_parse_begin, "\n amvdec_vp9 slice_parse_begin\n"); + +module_param(i_only_flag, uint, 0664); +MODULE_PARM_DESC(i_only_flag, "\n amvdec_vp9 i_only_flag\n"); + +module_param(low_latency_flag, uint, 0664); +MODULE_PARM_DESC(low_latency_flag, "\n amvdec_vp9 low_latency_flag\n"); + +module_param(no_head, uint, 0664); +MODULE_PARM_DESC(no_head, "\n amvdec_vp9 no_head\n"); + +module_param(error_handle_policy, uint, 0664); +MODULE_PARM_DESC(error_handle_policy, "\n amvdec_vp9 error_handle_policy\n"); + +module_param(buf_alloc_width, uint, 0664); +MODULE_PARM_DESC(buf_alloc_width, "\n buf_alloc_width\n"); + +module_param(disable_repeat, uint, 0664); +MODULE_PARM_DESC(disable_repeat, "\n disable_repeat\n"); + +module_param(buf_alloc_height, uint, 0664); +MODULE_PARM_DESC(buf_alloc_height, "\n buf_alloc_height\n"); + +module_param(buf_alloc_depth, uint, 0664); +MODULE_PARM_DESC(buf_alloc_depth, "\n buf_alloc_depth\n"); + +module_param(buf_alloc_size, uint, 0664); +MODULE_PARM_DESC(buf_alloc_size, "\n buf_alloc_size\n"); + +module_param(buffer_mode, uint, 0664); +MODULE_PARM_DESC(buffer_mode, "\n buffer_mode\n"); + +module_param(buffer_mode_dbg, uint, 0664); +MODULE_PARM_DESC(buffer_mode_dbg, "\n buffer_mode_dbg\n"); +/*USE_BUF_BLOCK*/ +module_param(max_buf_num, uint, 0664); +MODULE_PARM_DESC(max_buf_num, "\n max_buf_num\n"); + +module_param(dynamic_buf_num_margin, uint, 0664); +MODULE_PARM_DESC(dynamic_buf_num_margin, "\n dynamic_buf_num_margin\n"); + +module_param(mv_buf_margin, uint, 0664); +MODULE_PARM_DESC(mv_buf_margin, "\n mv_buf_margin\n"); + +module_param(mv_buf_dynamic_alloc, uint, 0664); +MODULE_PARM_DESC(mv_buf_dynamic_alloc, "\n mv_buf_dynamic_alloc\n"); + +module_param(run_ready_min_buf_num, uint, 0664); +MODULE_PARM_DESC(run_ready_min_buf_num, "\n run_ready_min_buf_num\n"); + +/**/ + +module_param(mem_map_mode, uint, 0664); +MODULE_PARM_DESC(mem_map_mode, "\n mem_map_mode\n"); + +#ifdef SUPPORT_10BIT +module_param(double_write_mode, uint, 0664); +MODULE_PARM_DESC(double_write_mode, "\n double_write_mode\n"); + +module_param(enable_mem_saving, uint, 0664); +MODULE_PARM_DESC(enable_mem_saving, "\n enable_mem_saving\n"); + +module_param(force_w_h, uint, 0664); +MODULE_PARM_DESC(force_w_h, "\n force_w_h\n"); +#endif + +module_param(force_fps, uint, 0664); +MODULE_PARM_DESC(force_fps, "\n force_fps\n"); + +module_param(max_decoding_time, uint, 0664); +MODULE_PARM_DESC(max_decoding_time, "\n max_decoding_time\n"); + +module_param(on_no_keyframe_skiped, uint, 0664); +MODULE_PARM_DESC(on_no_keyframe_skiped, "\n on_no_keyframe_skiped\n"); + +module_param(mcrcc_cache_alg_flag, uint, 0664); +MODULE_PARM_DESC(mcrcc_cache_alg_flag, "\n mcrcc_cache_alg_flag\n"); + +#ifdef MULTI_INSTANCE_SUPPORT +module_param(start_decode_buf_level, int, 0664); +MODULE_PARM_DESC(start_decode_buf_level, + "\n vp9 start_decode_buf_level\n"); + +module_param(decode_timeout_val, uint, 0664); +MODULE_PARM_DESC(decode_timeout_val, + "\n vp9 decode_timeout_val\n"); + +module_param(vp9_max_pic_w, uint, 0664); +MODULE_PARM_DESC(vp9_max_pic_w, "\n vp9_max_pic_w\n"); + +module_param(vp9_max_pic_h, uint, 0664); +MODULE_PARM_DESC(vp9_max_pic_h, "\n vp9_max_pic_h\n"); + +module_param_array(decode_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(display_frame_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(max_process_time, uint, + &max_decode_instance_num, 0664); + +module_param_array(run_count, uint, + &max_decode_instance_num, 0664); + +module_param_array(input_empty, uint, + &max_decode_instance_num, 0664); + +module_param_array(not_run_ready, uint, + &max_decode_instance_num, 0664); +#endif + +#ifdef SUPPORT_FB_DECODING +module_param_array(not_run2_ready, uint, + &max_decode_instance_num, 0664); + +module_param_array(run2_count, uint, + &max_decode_instance_num, 0664); + +module_param(stage_buf_num, uint, 0664); +MODULE_PARM_DESC(stage_buf_num, "\n amvdec_h265 stage_buf_num\n"); +#endif +module_param(force_bufspec, uint, 0664); +MODULE_PARM_DESC(force_bufspec, "\n amvdec_h265 force_bufspec\n"); + +module_param(udebug_flag, uint, 0664); +MODULE_PARM_DESC(udebug_flag, "\n amvdec_h265 udebug_flag\n"); + +module_param(udebug_pause_pos, uint, 0664); +MODULE_PARM_DESC(udebug_pause_pos, "\n udebug_pause_pos\n"); + +module_param(udebug_pause_val, uint, 0664); +MODULE_PARM_DESC(udebug_pause_val, "\n udebug_pause_val\n"); + +module_param(udebug_pause_decode_idx, uint, 0664); +MODULE_PARM_DESC(udebug_pause_decode_idx, "\n udebug_pause_decode_idx\n"); + +module_param(without_display_mode, uint, 0664); +MODULE_PARM_DESC(without_display_mode, "\n without_display_mode\n"); + +module_param(force_config_fence, uint, 0664); +MODULE_PARM_DESC(force_config_fence, "\n force enable fence\n"); + +module_param(force_pts_unstable, uint, 0664); +MODULE_PARM_DESC(force_pts_unstable, "\n force_pts_unstable\n"); + +module_param(v4l_bitstream_id_enable, uint, 0664); +MODULE_PARM_DESC(v4l_bitstream_id_enable, "\n v4l_bitstream_id_enable\n"); + +module_init(amvdec_vp9_driver_init_module); +module_exit(amvdec_vp9_driver_remove_module); + +MODULE_DESCRIPTION("AMLOGIC vp9 Video Decoder Driver"); +MODULE_LICENSE("GPL"); +
diff --git a/drivers/frame_provider/decoder_v4l/vp9/vvp9.h b/drivers/frame_provider/decoder_v4l/vp9/vvp9.h new file mode 100644 index 0000000..1db9d09 --- /dev/null +++ b/drivers/frame_provider/decoder_v4l/vp9/vvp9.h
@@ -0,0 +1,23 @@ +/* + * drivers/amlogic/amports/vvp9.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef VVP9_H +#define VVP9_H + +void adapt_coef_probs(int pic_count, int prev_kf, int cur_kf, int pre_fc, +unsigned int *prev_prob, unsigned int *cur_prob, unsigned int *count); +#endif
diff --git a/drivers/frame_sink/Makefile b/drivers/frame_sink/Makefile new file mode 100644 index 0000000..2b9754a --- /dev/null +++ b/drivers/frame_sink/Makefile
@@ -0,0 +1 @@ +obj-y += encoder/
diff --git a/drivers/frame_sink/encoder/Makefile b/drivers/frame_sink/encoder/Makefile new file mode 100644 index 0000000..a531bb7 --- /dev/null +++ b/drivers/frame_sink/encoder/Makefile
@@ -0,0 +1,4 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VENC_H264) += h264/ +obj-$(CONFIG_AMLOGIC_MEDIA_VENC_H265) += h265/ +obj-$(CONFIG_AMLOGIC_MEDIA_VENC_MULTI) += multi/ +obj-$(CONFIG_AMLOGIC_MEDIA_VENC_JPEG) += jpeg/
diff --git a/drivers/frame_sink/encoder/h264/Makefile b/drivers/frame_sink/encoder/h264/Makefile new file mode 100644 index 0000000..c12d7c3 --- /dev/null +++ b/drivers/frame_sink/encoder/h264/Makefile
@@ -0,0 +1 @@ +obj-m += encoder.o
diff --git a/drivers/frame_sink/encoder/h264/encoder.c b/drivers/frame_sink/encoder/h264/encoder.c new file mode 100644 index 0000000..c44ea56 --- /dev/null +++ b/drivers/frame_sink/encoder/h264/encoder.c
@@ -0,0 +1,5304 @@ +/* + * drivers/amlogic/amports/encoder.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/clk.h> +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/reset.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/ctype.h> +#include <linux/fs.h> +#include <linux/compat.h> +//#include <asm/segment.h> +#include <asm/uaccess.h> +#include <linux/buffer_head.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/canvas/canvas_mgr.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../../../frame_provider/decoder/utils/vdec_canvas_utils.h" +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../../frame_provider/decoder/utils/vdec.h" +#include <linux/delay.h> +#include <linux/poll.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/dma-contiguous.h> +#include <linux/kthread.h> +#include <linux/sched/rt.h> +#include <linux/amlogic/media/utils/amports_config.h> +#include "encoder.h" +#include "../../../frame_provider/decoder/utils/amvdec.h" +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../../../frame_provider/decoder/utils/vdec.h" +#include "../../../frame_provider/decoder/utils/vdec_power_ctrl.h" + +#include <linux/amlogic/media/utils/vdec_reg.h> +#include <linux/amlogic/power_ctrl.h> +#include <dt-bindings/power/sc2-pd.h> +#include <dt-bindings/power/t3-pd.h> +#include <linux/amlogic/power_domain.h> +#include <linux/amlogic/power_ctrl.h> + +#include <linux/amlogic/media/utils/amlog.h> +#include "../../../stream_input/amports/amports_priv.h" +#include "../../../frame_provider/decoder/utils/firmware.h" +#include <linux/amlogic/media/registers/register.h> +#include <linux/of_reserved_mem.h> +#include <linux/version.h> + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,1) +#include <uapi/linux/sched/types.h> +#include <linux/sched/signal.h> +#endif + +#ifdef CONFIG_AM_JPEG_ENCODER +#include "jpegenc.h" +#endif + +#define MHz (1000000) + +#define CHECK_RET(_ret) if (ret) {enc_pr(LOG_ERROR, \ + "%s:%d:function call failed with result: %d\n",\ + __FUNCTION__, __LINE__, _ret);} + +#define ENCODE_NAME "encoder" +#define AMVENC_CANVAS_INDEX 0x64 +#define AMVENC_CANVAS_MAX_INDEX 0x6F + +#define MIN_SIZE amvenc_buffspec[0].min_buffsize +#define DUMP_INFO_BYTES_PER_MB 80 + +#define ADJUSTED_QP_FLAG 64 + +static s32 avc_device_major; +static struct device *amvenc_avc_dev; +#define DRIVER_NAME "amvenc_avc" +#define CLASS_NAME "amvenc_avc" +#define DEVICE_NAME "amvenc_avc" + +static struct encode_manager_s encode_manager; + +#define MULTI_SLICE_MC +#define H264_ENC_CBR +/* #define MORE_MODULE_PARAM */ + +#define ENC_CANVAS_OFFSET AMVENC_CANVAS_INDEX + +#define UCODE_MODE_FULL 0 + +/* #define ENABLE_IGNORE_FUNCTION */ + +static u32 ie_me_mb_type; +static u32 ie_me_mode; +static u32 ie_pippeline_block = 3; +static u32 ie_cur_ref_sel; +/* static u32 avc_endian = 6; */ +static u32 clock_level = 5; + +static u32 encode_print_level = LOG_DEBUG; +static u32 no_timeout; +static int nr_mode = -1; +static u32 qp_table_debug; +static u32 use_reset_control; +static u32 use_ge2d; +static u32 dump_input; + +#ifdef H264_ENC_SVC +static u32 svc_enable = 0; /* Enable sac feature or not */ +static u32 svc_ref_conf = 0; /* Continuous no reference numbers */ +#endif + +struct hcodec_clks { + struct clk *hcodec_aclk; + //struct clk *hcodec_bclk; + //struct clk *hcodec_cclk; +}; + +static struct hcodec_clks s_hcodec_clks; +struct reset_control *hcodec_rst; + +static u32 me_mv_merge_ctl = + (0x1 << 31) | /* [31] me_merge_mv_en_16 */ + (0x1 << 30) | /* [30] me_merge_small_mv_en_16 */ + (0x1 << 29) | /* [29] me_merge_flex_en_16 */ + (0x1 << 28) | /* [28] me_merge_sad_en_16 */ + (0x1 << 27) | /* [27] me_merge_mv_en_8 */ + (0x1 << 26) | /* [26] me_merge_small_mv_en_8 */ + (0x1 << 25) | /* [25] me_merge_flex_en_8 */ + (0x1 << 24) | /* [24] me_merge_sad_en_8 */ + /* [23:18] me_merge_mv_diff_16 - MV diff <= n pixel can be merged */ + (0x12 << 18) | + /* [17:12] me_merge_mv_diff_8 - MV diff <= n pixel can be merged */ + (0x2b << 12) | + /* [11:0] me_merge_min_sad - SAD >= 0x180 can be merged with other MV */ + (0x80 << 0); + /* ( 0x4 << 18) | + * // [23:18] me_merge_mv_diff_16 - MV diff <= n pixel can be merged + */ + /* ( 0x3f << 12) | + * // [17:12] me_merge_mv_diff_8 - MV diff <= n pixel can be merged + */ + /* ( 0xc0 << 0); + * // [11:0] me_merge_min_sad - SAD >= 0x180 can be merged with other MV + */ + +static u32 me_mv_weight_01 = (0x40 << 24) | (0x30 << 16) | (0x20 << 8) | 0x30; +static u32 me_mv_weight_23 = (0x40 << 8) | 0x30; +static u32 me_sad_range_inc = 0x03030303; +static u32 me_step0_close_mv = 0x003ffc21; +static u32 me_f_skip_sad; +static u32 me_f_skip_weight; +static u32 me_sad_enough_01;/* 0x00018010; */ +static u32 me_sad_enough_23;/* 0x00000020; */ + +/* [31:0] NUM_ROWS_PER_SLICE_P */ +/* [15:0] NUM_ROWS_PER_SLICE_I */ +static u32 fixed_slice_cfg; + +/* y tnr */ +static unsigned int y_tnr_mc_en = 1; +static unsigned int y_tnr_txt_mode; +static unsigned int y_tnr_mot_sad_margin = 1; +static unsigned int y_tnr_mot_cortxt_rate = 1; +static unsigned int y_tnr_mot_distxt_ofst = 5; +static unsigned int y_tnr_mot_distxt_rate = 4; +static unsigned int y_tnr_mot_dismot_ofst = 4; +static unsigned int y_tnr_mot_frcsad_lock = 8; +static unsigned int y_tnr_mot2alp_frc_gain = 10; +static unsigned int y_tnr_mot2alp_nrm_gain = 216; +static unsigned int y_tnr_mot2alp_dis_gain = 128; +static unsigned int y_tnr_mot2alp_dis_ofst = 32; +static unsigned int y_tnr_alpha_min = 32; +static unsigned int y_tnr_alpha_max = 63; +static unsigned int y_tnr_deghost_os; +/* c tnr */ +static unsigned int c_tnr_mc_en = 1; +static unsigned int c_tnr_txt_mode; +static unsigned int c_tnr_mot_sad_margin = 1; +static unsigned int c_tnr_mot_cortxt_rate = 1; +static unsigned int c_tnr_mot_distxt_ofst = 5; +static unsigned int c_tnr_mot_distxt_rate = 4; +static unsigned int c_tnr_mot_dismot_ofst = 4; +static unsigned int c_tnr_mot_frcsad_lock = 8; +static unsigned int c_tnr_mot2alp_frc_gain = 10; +static unsigned int c_tnr_mot2alp_nrm_gain = 216; +static unsigned int c_tnr_mot2alp_dis_gain = 128; +static unsigned int c_tnr_mot2alp_dis_ofst = 32; +static unsigned int c_tnr_alpha_min = 32; +static unsigned int c_tnr_alpha_max = 63; +static unsigned int c_tnr_deghost_os; +/* y snr */ +static unsigned int y_snr_err_norm = 1; +static unsigned int y_snr_gau_bld_core = 1; +static int y_snr_gau_bld_ofst = -1; +static unsigned int y_snr_gau_bld_rate = 48; +static unsigned int y_snr_gau_alp0_min; +static unsigned int y_snr_gau_alp0_max = 63; +static unsigned int y_bld_beta2alp_rate = 16; +static unsigned int y_bld_beta_min; +static unsigned int y_bld_beta_max = 63; +/* c snr */ +static unsigned int c_snr_err_norm = 1; +static unsigned int c_snr_gau_bld_core = 1; +static int c_snr_gau_bld_ofst = -1; +static unsigned int c_snr_gau_bld_rate = 48; +static unsigned int c_snr_gau_alp0_min; +static unsigned int c_snr_gau_alp0_max = 63; +static unsigned int c_bld_beta2alp_rate = 16; +static unsigned int c_bld_beta_min; +static unsigned int c_bld_beta_max = 63; +static unsigned int qp_mode; + +static DEFINE_SPINLOCK(lock); + +#define ADV_MV_LARGE_16x8 1 +#define ADV_MV_LARGE_8x16 1 +#define ADV_MV_LARGE_16x16 1 + +/* me weight offset should not very small, it used by v1 me module. */ +/* the min real sad for me is 16 by hardware. */ +#define ME_WEIGHT_OFFSET 0x520 +#define I4MB_WEIGHT_OFFSET 0x655 +#define I16MB_WEIGHT_OFFSET 0x560 + +#define ADV_MV_16x16_WEIGHT 0x080 +#define ADV_MV_16_8_WEIGHT 0x0e0 +#define ADV_MV_8x8_WEIGHT 0x240 +#define ADV_MV_4x4x4_WEIGHT 0x3000 + +#define IE_SAD_SHIFT_I16 0x001 +#define IE_SAD_SHIFT_I4 0x001 +#define ME_SAD_SHIFT_INTER 0x001 + +#define STEP_2_SKIP_SAD 0 +#define STEP_1_SKIP_SAD 0 +#define STEP_0_SKIP_SAD 0 +#define STEP_2_SKIP_WEIGHT 0 +#define STEP_1_SKIP_WEIGHT 0 +#define STEP_0_SKIP_WEIGHT 0 + +#define ME_SAD_RANGE_0 0x1 /* 0x0 */ +#define ME_SAD_RANGE_1 0x0 +#define ME_SAD_RANGE_2 0x0 +#define ME_SAD_RANGE_3 0x0 + +/* use 0 for v3, 0x18 for v2 */ +#define ME_MV_PRE_WEIGHT_0 0x18 +/* use 0 for v3, 0x18 for v2 */ +#define ME_MV_PRE_WEIGHT_1 0x18 +#define ME_MV_PRE_WEIGHT_2 0x0 +#define ME_MV_PRE_WEIGHT_3 0x0 + +/* use 0 for v3, 0x18 for v2 */ +#define ME_MV_STEP_WEIGHT_0 0x18 +/* use 0 for v3, 0x18 for v2 */ +#define ME_MV_STEP_WEIGHT_1 0x18 +#define ME_MV_STEP_WEIGHT_2 0x0 +#define ME_MV_STEP_WEIGHT_3 0x0 + +#define ME_SAD_ENOUGH_0_DATA 0x00 +#define ME_SAD_ENOUGH_1_DATA 0x04 +#define ME_SAD_ENOUGH_2_DATA 0x11 +#define ADV_MV_8x8_ENOUGH_DATA 0x20 + +/* V4_COLOR_BLOCK_FIX */ +#define V3_FORCE_SKIP_SAD_0 0x10 +/* 4 Blocks */ +#define V3_FORCE_SKIP_SAD_1 0x60 +/* 16 Blocks + V3_SKIP_WEIGHT_2 */ +#define V3_FORCE_SKIP_SAD_2 0x250 +/* almost disable it -- use t_lac_coeff_2 output to F_ZERO is better */ +#define V3_ME_F_ZERO_SAD (ME_WEIGHT_OFFSET + 0x10) + +#define V3_IE_F_ZERO_SAD_I16 (I16MB_WEIGHT_OFFSET + 0x10) +#define V3_IE_F_ZERO_SAD_I4 (I4MB_WEIGHT_OFFSET + 0x20) + +#define V3_SKIP_WEIGHT_0 0x10 +/* 4 Blocks 8 separate search sad can be very low */ +#define V3_SKIP_WEIGHT_1 0x8 /* (4 * ME_MV_STEP_WEIGHT_1 + 0x100) */ +#define V3_SKIP_WEIGHT_2 0x3 + +#define V3_LEVEL_1_F_SKIP_MAX_SAD 0x0 +#define V3_LEVEL_1_SKIP_MAX_SAD 0x6 + +#define I4_ipred_weight_most 0x18 +#define I4_ipred_weight_else 0x28 + +#define C_ipred_weight_V 0x04 +#define C_ipred_weight_H 0x08 +#define C_ipred_weight_DC 0x0c + +#define I16_ipred_weight_V 0x04 +#define I16_ipred_weight_H 0x08 +#define I16_ipred_weight_DC 0x0c + +/* 0x00 same as disable */ +#define v3_left_small_max_ie_sad 0x00 +#define v3_left_small_max_me_sad 0x40 + +#define v5_use_small_diff_cnt 0 +#define v5_simple_mb_inter_all_en 1 +#define v5_simple_mb_inter_8x8_en 1 +#define v5_simple_mb_inter_16_8_en 1 +#define v5_simple_mb_inter_16x16_en 1 +#define v5_simple_mb_intra_en 1 +#define v5_simple_mb_C_en 0 +#define v5_simple_mb_Y_en 1 +#define v5_small_diff_Y 0x10 +#define v5_small_diff_C 0x18 +/* shift 8-bits, 2, 1, 0, -1, -2, -3, -4 */ +#define v5_simple_dq_setting 0x43210fed +#define v5_simple_me_weight_setting 0 + +#ifdef H264_ENC_CBR +#define CBR_TABLE_SIZE 0x800 +#define CBR_SHORT_SHIFT 12 /* same as disable */ +#define CBR_LONG_MB_NUM 2 +#define START_TABLE_ID 8 +#define CBR_LONG_THRESH 4 +#endif + +static u32 v3_mv_sad[64] = { + /* For step0 */ + 0x00000004, + 0x00010008, + 0x00020010, + 0x00030018, + 0x00040020, + 0x00050028, + 0x00060038, + 0x00070048, + 0x00080058, + 0x00090068, + 0x000a0080, + 0x000b0098, + 0x000c00b0, + 0x000d00c8, + 0x000e00e8, + 0x000f0110, + /* For step1 */ + 0x00100002, + 0x00110004, + 0x00120008, + 0x0013000c, + 0x00140010, + 0x00150014, + 0x0016001c, + 0x00170024, + 0x0018002c, + 0x00190034, + 0x001a0044, + 0x001b0054, + 0x001c0064, + 0x001d0074, + 0x001e0094, + 0x001f00b4, + /* For step2 */ + 0x00200006, + 0x0021000c, + 0x0022000c, + 0x00230018, + 0x00240018, + 0x00250018, + 0x00260018, + 0x00270030, + 0x00280030, + 0x00290030, + 0x002a0030, + 0x002b0030, + 0x002c0030, + 0x002d0030, + 0x002e0030, + 0x002f0050, + /* For step2 4x4-8x8 */ + 0x00300001, + 0x00310002, + 0x00320002, + 0x00330004, + 0x00340004, + 0x00350004, + 0x00360004, + 0x00370006, + 0x00380006, + 0x00390006, + 0x003a0006, + 0x003b0006, + 0x003c0006, + 0x003d0006, + 0x003e0006, + 0x003f0006 +}; + +static struct BuffInfo_s amvenc_buffspec[] = { + { + .lev_id = 0, + .max_width = 1920, + .max_height = 1088, + .min_buffsize = 0x1400000, + .dct = { + .buf_start = 0, + .buf_size = 0x800000, /* 1920x1088x4 */ + }, + .dec0_y = { + .buf_start = 0x800000, + .buf_size = 0x300000, + }, + .dec1_y = { + .buf_start = 0xb00000, + .buf_size = 0x300000, + }, + .assit = { + .buf_start = 0xe10000, + .buf_size = 0xc0000, + }, + .bitstream = { + .buf_start = 0xf00000, + .buf_size = 0x100000, + }, + .scale_buff = { + .buf_start = 0x1000000, + .buf_size = 0x300000, + }, + .dump_info = { + .buf_start = 0x1300000, + .buf_size = 0xa0000, /* (1920x1088/256)x80 */ + }, + .cbr_info = { + .buf_start = 0x13b0000, + .buf_size = 0x2000, + } + } +}; + +enum ucode_type_e { + UCODE_GXL, + UCODE_TXL, + UCODE_G12A, + UCODE_MAX +}; + +const char *ucode_name[] = { + "gxl_h264_enc", + "txl_h264_enc_cavlc", + "ga_h264_enc_cabac", +}; + +static void dma_flush(u32 buf_start, u32 buf_size); +static void cache_flush(u32 buf_start, u32 buf_size); +static int enc_dma_buf_get_phys(struct enc_dma_cfg *cfg, unsigned long *addr); +static void enc_dma_buf_unmap(struct enc_dma_cfg *cfg); + +//static struct canvas_status_s canvas_stat[CANVAS_MAX_SIZE]; +//static struct canvas_status_s mdec_cav_stat[MDEC_CAV_LUT_MAX]; +/* +static struct canvas_config_s *mdec_cav_pool = NULL; + +static void cav_lut_info_store(u32 index, ulong addr, u32 width, + u32 height, u32 wrap, u32 blkmode, u32 endian) +{ + struct canvas_config_s *pool = NULL; + + if (index < 0 || index >= MDEC_CAV_LUT_MAX) { + pr_err("%s, error index %d\n", __func__, index); + return; + } + if (mdec_cav_pool == NULL) + mdec_cav_pool = vzalloc(sizeof(struct canvas_config_s) + * (MDEC_CAV_LUT_MAX + 1)); + + if (mdec_cav_pool == NULL) { + pr_err("%s failed, mdec_cav_pool null\n", __func__); + return; + } + pool = &mdec_cav_pool[index]; + pool->width = width; + pool->height = height; + pool->block_mode = blkmode; + pool->endian = endian; + pool->phy_addr = addr; +} + +*/ + +static struct file *file_open(const char *path, int flags, int rights) +{ + struct file *filp = NULL; + mm_segment_t oldfs; + int err = 0; + + oldfs = get_fs(); + //set_fs(get_ds()); + set_fs(KERNEL_DS); + filp = filp_open(path, flags, rights); + set_fs(oldfs); + if (IS_ERR(filp)) { + err = PTR_ERR(filp); + return NULL; + } + return filp; +} +static void file_close(struct file *file) +{ + filp_close(file, NULL); +} +/* +static int file_read(struct file *file, unsigned long long offset, unsigned char *data, unsigned int size) +{ + mm_segment_t oldfs; + int ret; + + oldfs = get_fs(); + set_fs(KERNEL_DS); + + ret = vfs_read(file, data, size, &offset); + + set_fs(oldfs); + return ret; +}*/ +static int file_write(struct file *file, unsigned long long offset, unsigned char *data, unsigned int size) +{ + mm_segment_t oldfs; + int ret; + + oldfs = get_fs(); + set_fs(KERNEL_DS); + + ret = vfs_write(file, data, size, &offset); + + set_fs(oldfs); + return ret; +} +static int file_sync(struct file *file) +{ + vfs_fsync(file, 0); + return 0; +} + +static void canvas_config_proxy(u32 index, ulong addr, u32 width, u32 height, + u32 wrap, u32 blkmode) { + unsigned long datah_temp, datal_temp; + + if (!is_support_vdec_canvas()) { + canvas_config(index, addr, width, height, wrap, blkmode); + } else { +#if 1 + ulong start_addr = addr >> 3; + u32 cav_width = (((width + 31)>>5)<<2); + u32 cav_height = height; + u32 x_wrap_en = 0; + u32 y_wrap_en = 0; + u32 blk_mode = 0;//blkmode; + u32 cav_endian = 0; + + datal_temp = (start_addr & 0x1fffffff) | + ((cav_width & 0x7 ) << 29 ); + + datah_temp = ((cav_width >> 3) & 0x1ff) | + ((cav_height & 0x1fff) <<9 ) | + ((x_wrap_en & 1) << 22 ) | + ((y_wrap_en & 1) << 23) | + ((blk_mode & 0x3) << 24) | + ( cav_endian << 26); + +#else + u32 endian = 0; + u32 addr_bits_l = ((((addr + 7) >> 3) & CANVAS_ADDR_LMASK) << CAV_WADDR_LBIT); + u32 width_l = ((((width + 7) >> 3) & CANVAS_WIDTH_LMASK) << CAV_WIDTH_LBIT); + u32 width_h = ((((width + 7) >> 3) >> CANVAS_WIDTH_LWID) << CAV_WIDTH_HBIT); + u32 height_h = (height & CANVAS_HEIGHT_MASK) << CAV_HEIGHT_HBIT; + u32 blkmod_h = (blkmode & CANVAS_BLKMODE_MASK) << CAV_BLKMODE_HBIT; + u32 switch_bits_ctl = (endian & 0xf) << CAV_ENDIAN_HBIT; + u32 wrap_h = (0 << 23); + datal_temp = addr_bits_l | width_l; + datah_temp = width_h | height_h | wrap_h | blkmod_h | switch_bits_ctl; +#endif + /* + if (core == VDEC_1) { + WRITE_VREG(MDEC_CAV_CFG0, 0); //[0]canv_mode, by default is non-canv-mode + WRITE_VREG(MDEC_CAV_LUT_DATAL, datal_temp); + WRITE_VREG(MDEC_CAV_LUT_DATAH, datah_temp); + WRITE_VREG(MDEC_CAV_LUT_ADDR, index); + } else if (core == VDEC_HCODEC) */ { + WRITE_HREG(HCODEC_MDEC_CAV_CFG0, 0); //[0]canv_mode, by default is non-canv-mode + WRITE_HREG(HCODEC_MDEC_CAV_LUT_DATAL, datal_temp); + WRITE_HREG(HCODEC_MDEC_CAV_LUT_DATAH, datah_temp); + WRITE_HREG(HCODEC_MDEC_CAV_LUT_ADDR, index); + } + + /* + cav_lut_info_store(index, addr, width, height, wrap, blkmode, 0); + + if (vdec_get_debug() & 0x40000000) { + pr_info("(%s %2d) addr: %lx, width: %d, height: %d, blkm: %d, endian: %d\n", + __func__, index, addr, width, height, blkmode, 0); + pr_info("data(h,l): 0x%8lx, 0x%8lx\n", datah_temp, datal_temp); + } + */ + } +} + +s32 hcodec_hw_reset(void) +{ + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2 && use_reset_control) { + reset_control_reset(hcodec_rst); + enc_pr(LOG_DEBUG, "request hcodec reset from application.\n"); + } + return 0; +} + +s32 hcodec_clk_prepare(struct device *dev, struct hcodec_clks *clks) +{ + int ret; + + clks->hcodec_aclk = devm_clk_get(dev, "cts_hcodec_aclk"); + + if (IS_ERR_OR_NULL(clks->hcodec_aclk)) { + enc_pr(LOG_ERROR, "failed to get hcodec aclk\n"); + return -1; + } + + ret = clk_set_rate(clks->hcodec_aclk, 667 * MHz); + CHECK_RET(ret); + + ret = clk_prepare(clks->hcodec_aclk); + CHECK_RET(ret); + + enc_pr(LOG_ERROR, "hcodec_clk_a: %lu MHz\n", clk_get_rate(clks->hcodec_aclk) / 1000000); + + return 0; +} + +void hcodec_clk_unprepare(struct device *dev, struct hcodec_clks *clks) +{ + clk_unprepare(clks->hcodec_aclk); + devm_clk_put(dev, clks->hcodec_aclk); + + //clk_unprepare(clks->wave_bclk); + //devm_clk_put(dev, clks->wave_bclk); + + //clk_unprepare(clks->wave_aclk); + //devm_clk_put(dev, clks->wave_aclk); +} + +s32 hcodec_clk_config(u32 enable) +{ + if (enable) { + clk_enable(s_hcodec_clks.hcodec_aclk); + //clk_enable(s_hcodec_clks.wave_bclk); + //clk_enable(s_hcodec_clks.wave_cclk); + } else { + clk_disable(s_hcodec_clks.hcodec_aclk); + //clk_disable(s_hcodec_clks.wave_bclk); + //clk_disable(s_hcodec_clks.wave_aclk); + } + + return 0; +} + +static const char *select_ucode(u32 ucode_index) +{ + enum ucode_type_e ucode = UCODE_GXL; + + switch (ucode_index) { + case UCODE_MODE_FULL: + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A) + ucode = UCODE_G12A; + else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL) + ucode = UCODE_TXL; + else /* (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) */ + ucode = UCODE_GXL; + break; + break; + default: + break; + } + return (const char *)ucode_name[ucode]; +} + +static void hcodec_prog_qtbl(struct encode_wq_s *wq) +{ + WRITE_HREG(HCODEC_Q_QUANT_CONTROL, + (0 << 23) | /* quant_table_addr */ + (1 << 22)); /* quant_table_addr_update */ + + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i4[0]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i4[1]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i4[2]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i4[3]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i4[4]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i4[5]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i4[6]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i4[7]); + + WRITE_HREG(HCODEC_Q_QUANT_CONTROL, + (8 << 23) | /* quant_table_addr */ + (1 << 22)); /* quant_table_addr_update */ + + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i16[0]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i16[1]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i16[2]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i16[3]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i16[4]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i16[5]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i16[6]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_i16[7]); + + WRITE_HREG(HCODEC_Q_QUANT_CONTROL, + (16 << 23) | /* quant_table_addr */ + (1 << 22)); /* quant_table_addr_update */ + + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_me[0]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_me[1]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_me[2]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_me[3]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_me[4]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_me[5]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_me[6]); + WRITE_HREG(HCODEC_QUANT_TABLE_DATA, + wq->quant_tbl_me[7]); +} + +static void InitEncodeWeight(void) +{ + me_mv_merge_ctl = + (0x1 << 31) | /* [31] me_merge_mv_en_16 */ + (0x1 << 30) | /* [30] me_merge_small_mv_en_16 */ + (0x1 << 29) | /* [29] me_merge_flex_en_16 */ + (0x1 << 28) | /* [28] me_merge_sad_en_16 */ + (0x1 << 27) | /* [27] me_merge_mv_en_8 */ + (0x1 << 26) | /* [26] me_merge_small_mv_en_8 */ + (0x1 << 25) | /* [25] me_merge_flex_en_8 */ + (0x1 << 24) | /* [24] me_merge_sad_en_8 */ + (0x12 << 18) | + /* [23:18] me_merge_mv_diff_16 - MV diff + * <= n pixel can be merged + */ + (0x2b << 12) | + /* [17:12] me_merge_mv_diff_8 - MV diff + * <= n pixel can be merged + */ + (0x80 << 0); + /* [11:0] me_merge_min_sad - SAD + * >= 0x180 can be merged with other MV + */ + + me_mv_weight_01 = (ME_MV_STEP_WEIGHT_1 << 24) | + (ME_MV_PRE_WEIGHT_1 << 16) | + (ME_MV_STEP_WEIGHT_0 << 8) | + (ME_MV_PRE_WEIGHT_0 << 0); + + me_mv_weight_23 = (ME_MV_STEP_WEIGHT_3 << 24) | + (ME_MV_PRE_WEIGHT_3 << 16) | + (ME_MV_STEP_WEIGHT_2 << 8) | + (ME_MV_PRE_WEIGHT_2 << 0); + + me_sad_range_inc = (ME_SAD_RANGE_3 << 24) | + (ME_SAD_RANGE_2 << 16) | + (ME_SAD_RANGE_1 << 8) | + (ME_SAD_RANGE_0 << 0); + + me_step0_close_mv = (0x100 << 10) | + /* me_step0_big_sad -- two MV sad + * diff bigger will use use 1 + */ + (2 << 5) | /* me_step0_close_mv_y */ + (2 << 0); /* me_step0_close_mv_x */ + + me_f_skip_sad = (0x00 << 24) | /* force_skip_sad_3 */ + (STEP_2_SKIP_SAD << 16) | /* force_skip_sad_2 */ + (STEP_1_SKIP_SAD << 8) | /* force_skip_sad_1 */ + (STEP_0_SKIP_SAD << 0); /* force_skip_sad_0 */ + + me_f_skip_weight = (0x00 << 24) | /* force_skip_weight_3 */ + /* force_skip_weight_2 */ + (STEP_2_SKIP_WEIGHT << 16) | + /* force_skip_weight_1 */ + (STEP_1_SKIP_WEIGHT << 8) | + /* force_skip_weight_0 */ + (STEP_0_SKIP_WEIGHT << 0); + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { + me_f_skip_sad = 0; + me_f_skip_weight = 0; + me_mv_weight_01 = 0; + me_mv_weight_23 = 0; + } + + me_sad_enough_01 = (ME_SAD_ENOUGH_1_DATA << 12) | + /* me_sad_enough_1 */ + (ME_SAD_ENOUGH_0_DATA << 0) | + /* me_sad_enough_0 */ + (0 << 12) | /* me_sad_enough_1 */ + (0 << 0); /* me_sad_enough_0 */ + + me_sad_enough_23 = (ADV_MV_8x8_ENOUGH_DATA << 12) | + /* adv_mv_8x8_enough */ + (ME_SAD_ENOUGH_2_DATA << 0) | + /* me_sad_enough_2 */ + (0 << 12) | /* me_sad_enough_3 */ + (0 << 0); /* me_sad_enough_2 */ +} + +/*output stream buffer setting*/ +static void avc_init_output_buffer(struct encode_wq_s *wq) +{ + WRITE_HREG(HCODEC_VLC_VB_MEM_CTL, + ((1 << 31) | (0x3f << 24) | + (0x20 << 16) | (2 << 0))); + WRITE_HREG(HCODEC_VLC_VB_START_PTR, + wq->mem.BitstreamStart); + WRITE_HREG(HCODEC_VLC_VB_WR_PTR, + wq->mem.BitstreamStart); + WRITE_HREG(HCODEC_VLC_VB_SW_RD_PTR, + wq->mem.BitstreamStart); + WRITE_HREG(HCODEC_VLC_VB_END_PTR, + wq->mem.BitstreamEnd); + WRITE_HREG(HCODEC_VLC_VB_CONTROL, 1); + WRITE_HREG(HCODEC_VLC_VB_CONTROL, + ((0 << 14) | (7 << 3) | + (1 << 1) | (0 << 0))); +} + +/*input dct buffer setting*/ +static void avc_init_input_buffer(struct encode_wq_s *wq) +{ + WRITE_HREG(HCODEC_QDCT_MB_START_PTR, + wq->mem.dct_buff_start_addr); + WRITE_HREG(HCODEC_QDCT_MB_END_PTR, + wq->mem.dct_buff_end_addr); + WRITE_HREG(HCODEC_QDCT_MB_WR_PTR, + wq->mem.dct_buff_start_addr); + WRITE_HREG(HCODEC_QDCT_MB_RD_PTR, + wq->mem.dct_buff_start_addr); + WRITE_HREG(HCODEC_QDCT_MB_BUFF, 0); +} + +/*input reference buffer setting*/ +static void avc_init_reference_buffer(s32 canvas) +{ + WRITE_HREG(HCODEC_ANC0_CANVAS_ADDR, canvas); + WRITE_HREG(HCODEC_VLC_HCMD_CONFIG, 0); +} + +static void avc_init_assit_buffer(struct encode_wq_s *wq) +{ + WRITE_HREG(MEM_OFFSET_REG, wq->mem.assit_buffer_offset); +} + +/*deblock buffer setting, same as INI_CANVAS*/ +static void avc_init_dblk_buffer(s32 canvas) +{ + WRITE_HREG(HCODEC_REC_CANVAS_ADDR, canvas); + WRITE_HREG(HCODEC_DBKR_CANVAS_ADDR, canvas); + WRITE_HREG(HCODEC_DBKW_CANVAS_ADDR, canvas); +} + +static void avc_init_encoder(struct encode_wq_s *wq, bool idr) +{ + WRITE_HREG(HCODEC_VLC_TOTAL_BYTES, 0); + WRITE_HREG(HCODEC_VLC_CONFIG, 0x07); + WRITE_HREG(HCODEC_VLC_INT_CONTROL, 0); + + WRITE_HREG(HCODEC_ASSIST_AMR1_INT0, 0x15); + WRITE_HREG(HCODEC_ASSIST_AMR1_INT1, 0x8); + WRITE_HREG(HCODEC_ASSIST_AMR1_INT3, 0x14); + + WRITE_HREG(IDR_PIC_ID, wq->pic.idr_pic_id); + WRITE_HREG(FRAME_NUMBER, + (idr == true) ? 0 : wq->pic.frame_number); + WRITE_HREG(PIC_ORDER_CNT_LSB, + (idr == true) ? 0 : wq->pic.pic_order_cnt_lsb); + + WRITE_HREG(LOG2_MAX_PIC_ORDER_CNT_LSB, + wq->pic.log2_max_pic_order_cnt_lsb); + WRITE_HREG(LOG2_MAX_FRAME_NUM, + wq->pic.log2_max_frame_num); + WRITE_HREG(ANC0_BUFFER_ID, 0); + WRITE_HREG(QPPICTURE, wq->pic.init_qppicture); +} + +static void avc_canvas_init(struct encode_wq_s *wq) +{ + u32 canvas_width, canvas_height; + u32 start_addr = wq->mem.buf_start; + canvas_width = ((wq->pic.encoder_width + 31) >> 5) << 5; + canvas_height = ((wq->pic.encoder_height + 15) >> 4) << 4; + + canvas_config_proxy(ENC_CANVAS_OFFSET, + start_addr + wq->mem.bufspec.dec0_y.buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(1 + ENC_CANVAS_OFFSET, + start_addr + wq->mem.bufspec.dec0_uv.buf_start, + canvas_width, canvas_height / 2, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); + /*here the third plane use the same address as the second plane*/ + canvas_config_proxy(2 + ENC_CANVAS_OFFSET, + start_addr + wq->mem.bufspec.dec0_uv.buf_start, + canvas_width, canvas_height / 2, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); + + canvas_config_proxy(3 + ENC_CANVAS_OFFSET, + start_addr + wq->mem.bufspec.dec1_y.buf_start, + canvas_width, canvas_height, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(4 + ENC_CANVAS_OFFSET, + start_addr + wq->mem.bufspec.dec1_uv.buf_start, + canvas_width, canvas_height / 2, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); + /*here the third plane use the same address as the second plane*/ + canvas_config_proxy(5 + ENC_CANVAS_OFFSET, + start_addr + wq->mem.bufspec.dec1_uv.buf_start, + canvas_width, canvas_height / 2, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); +} + +static void avc_buffspec_init(struct encode_wq_s *wq) +{ + u32 canvas_width, canvas_height; + u32 start_addr = wq->mem.buf_start; + u32 mb_w = (wq->pic.encoder_width + 15) >> 4; + u32 mb_h = (wq->pic.encoder_height + 15) >> 4; + u32 mbs = mb_w * mb_h; + + canvas_width = ((wq->pic.encoder_width + 31) >> 5) << 5; + canvas_height = ((wq->pic.encoder_height + 15) >> 4) << 4; + + wq->mem.dct_buff_start_addr = start_addr + + wq->mem.bufspec.dct.buf_start; + wq->mem.dct_buff_end_addr = + wq->mem.dct_buff_start_addr + + wq->mem.bufspec.dct.buf_size - 1; + enc_pr(LOG_INFO, "dct_buff_start_addr is 0x%x, wq:%p.\n", + wq->mem.dct_buff_start_addr, (void *)wq); + + wq->mem.bufspec.dec0_uv.buf_start = + wq->mem.bufspec.dec0_y.buf_start + + canvas_width * canvas_height; + wq->mem.bufspec.dec0_uv.buf_size = canvas_width * canvas_height / 2; + wq->mem.bufspec.dec1_uv.buf_start = + wq->mem.bufspec.dec1_y.buf_start + + canvas_width * canvas_height; + wq->mem.bufspec.dec1_uv.buf_size = canvas_width * canvas_height / 2; + wq->mem.assit_buffer_offset = start_addr + + wq->mem.bufspec.assit.buf_start; + enc_pr(LOG_INFO, "assit_buffer_offset is 0x%x, wq: %p.\n", + wq->mem.assit_buffer_offset, (void *)wq); + /*output stream buffer config*/ + wq->mem.BitstreamStart = start_addr + + wq->mem.bufspec.bitstream.buf_start; + wq->mem.BitstreamEnd = + wq->mem.BitstreamStart + + wq->mem.bufspec.bitstream.buf_size - 1; + enc_pr(LOG_INFO, "BitstreamStart is 0x%x, wq: %p.\n", + wq->mem.BitstreamStart, (void *)wq); + + wq->mem.scaler_buff_start_addr = + wq->mem.buf_start + wq->mem.bufspec.scale_buff.buf_start; + wq->mem.dump_info_ddr_start_addr = + wq->mem.buf_start + wq->mem.bufspec.dump_info.buf_start; + enc_pr(LOG_INFO, + "CBR: dump_info_ddr_start_addr:%x.\n", + wq->mem.dump_info_ddr_start_addr); + enc_pr(LOG_INFO, "CBR: buf_start :%d.\n", + wq->mem.buf_start); + enc_pr(LOG_INFO, "CBR: dump_info.buf_start :%d.\n", + wq->mem.bufspec.dump_info.buf_start); + wq->mem.dump_info_ddr_size = + DUMP_INFO_BYTES_PER_MB * mbs; + wq->mem.dump_info_ddr_size = + (wq->mem.dump_info_ddr_size + PAGE_SIZE - 1) + & ~(PAGE_SIZE - 1); + wq->mem.cbr_info_ddr_start_addr = + wq->mem.buf_start + wq->mem.bufspec.cbr_info.buf_start; + wq->mem.cbr_info_ddr_size = + wq->mem.bufspec.cbr_info.buf_size; + wq->mem.cbr_info_ddr_virt_addr = + codec_mm_vmap(wq->mem.cbr_info_ddr_start_addr, + wq->mem.bufspec.cbr_info.buf_size); + + wq->mem.dblk_buf_canvas = + ((ENC_CANVAS_OFFSET + 2) << 16) | + ((ENC_CANVAS_OFFSET + 1) << 8) | + (ENC_CANVAS_OFFSET); + wq->mem.ref_buf_canvas = + ((ENC_CANVAS_OFFSET + 5) << 16) | + ((ENC_CANVAS_OFFSET + 4) << 8) | + (ENC_CANVAS_OFFSET + 3); +} + +static void avc_init_ie_me_parameter(struct encode_wq_s *wq, u32 quant) +{ + ie_cur_ref_sel = 0; + ie_pippeline_block = 12; + /* currently disable half and sub pixel */ + ie_me_mode = + (ie_pippeline_block & IE_PIPPELINE_BLOCK_MASK) << + IE_PIPPELINE_BLOCK_SHIFT; + + WRITE_HREG(IE_ME_MODE, ie_me_mode); + WRITE_HREG(IE_REF_SEL, ie_cur_ref_sel); + WRITE_HREG(IE_ME_MB_TYPE, ie_me_mb_type); +#ifdef MULTI_SLICE_MC + if (fixed_slice_cfg) + WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg); + else if (wq->pic.rows_per_slice != + (wq->pic.encoder_height + 15) >> 4) { + u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4; + + mb_per_slice = mb_per_slice * wq->pic.rows_per_slice; + WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice); + } else + WRITE_HREG(FIXED_SLICE_CFG, 0); +#else + WRITE_HREG(FIXED_SLICE_CFG, 0); +#endif +} + +/* for temp */ +#define HCODEC_MFDIN_REGC_MBLP (HCODEC_MFDIN_REGB_AMPC + 0x1) +#define HCODEC_MFDIN_REG0D (HCODEC_MFDIN_REGB_AMPC + 0x2) +#define HCODEC_MFDIN_REG0E (HCODEC_MFDIN_REGB_AMPC + 0x3) +#define HCODEC_MFDIN_REG0F (HCODEC_MFDIN_REGB_AMPC + 0x4) +#define HCODEC_MFDIN_REG10 (HCODEC_MFDIN_REGB_AMPC + 0x5) +#define HCODEC_MFDIN_REG11 (HCODEC_MFDIN_REGB_AMPC + 0x6) +#define HCODEC_MFDIN_REG12 (HCODEC_MFDIN_REGB_AMPC + 0x7) +#define HCODEC_MFDIN_REG13 (HCODEC_MFDIN_REGB_AMPC + 0x8) +#define HCODEC_MFDIN_REG14 (HCODEC_MFDIN_REGB_AMPC + 0x9) +#define HCODEC_MFDIN_REG15 (HCODEC_MFDIN_REGB_AMPC + 0xa) +#define HCODEC_MFDIN_REG16 (HCODEC_MFDIN_REGB_AMPC + 0xb) + +static void mfdin_basic(u32 input, u8 iformat, + u8 oformat, u32 picsize_x, u32 picsize_y, + u8 r2y_en, u8 nr, u8 ifmt_extra) +{ + u8 dsample_en; /* Downsample Enable */ + u8 interp_en; /* Interpolation Enable */ + u8 y_size; /* 0:16 Pixels for y direction pickup; 1:8 pixels */ + u8 r2y_mode; /* RGB2YUV Mode, range(0~3) */ + /* mfdin_reg3_canv[25:24]; + * // bytes per pixel in x direction for index0, 0:half 1:1 2:2 3:3 + */ + u8 canv_idx0_bppx; + /* mfdin_reg3_canv[27:26]; + * // bytes per pixel in x direction for index1-2, 0:half 1:1 2:2 3:3 + */ + u8 canv_idx1_bppx; + /* mfdin_reg3_canv[29:28]; + * // bytes per pixel in y direction for index0, 0:half 1:1 2:2 3:3 + */ + u8 canv_idx0_bppy; + /* mfdin_reg3_canv[31:30]; + * // bytes per pixel in y direction for index1-2, 0:half 1:1 2:2 3:3 + */ + u8 canv_idx1_bppy; + u8 ifmt444, ifmt422, ifmt420, linear_bytes4p; + u8 nr_enable; + u8 cfg_y_snr_en; + u8 cfg_y_tnr_en; + u8 cfg_c_snr_en; + u8 cfg_c_tnr_en; + u32 linear_bytesperline; + s32 reg_offset; + bool linear_enable = false; + bool format_err = false; + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL) { + if ((iformat == 7) && (ifmt_extra > 2)) + format_err = true; + } else if (iformat == 7) + format_err = true; + + if (format_err) { + enc_pr(LOG_ERROR, + "mfdin format err, iformat:%d, ifmt_extra:%d\n", + iformat, ifmt_extra); + return; + } + if (iformat != 7) + ifmt_extra = 0; + + ifmt444 = ((iformat == 1) || (iformat == 5) || (iformat == 8) || + (iformat == 9) || (iformat == 12)) ? 1 : 0; + if (iformat == 7 && ifmt_extra == 1) + ifmt444 = 1; + ifmt422 = ((iformat == 0) || (iformat == 10)) ? 1 : 0; + if (iformat == 7 && ifmt_extra != 1) + ifmt422 = 1; + ifmt420 = ((iformat == 2) || (iformat == 3) || (iformat == 4) || + (iformat == 11)) ? 1 : 0; + dsample_en = ((ifmt444 && (oformat != 2)) || + (ifmt422 && (oformat == 0))) ? 1 : 0; + interp_en = ((ifmt422 && (oformat == 2)) || + (ifmt420 && (oformat != 0))) ? 1 : 0; + y_size = (oformat != 0) ? 1 : 0; + if (iformat == 12) + y_size = 0; + r2y_mode = (r2y_en == 1) ? 1 : 0; /* Fixed to 1 (TODO) */ + canv_idx0_bppx = (iformat == 1) ? 3 : (iformat == 0) ? 2 : 1; + canv_idx1_bppx = (iformat == 4) ? 0 : 1; + canv_idx0_bppy = 1; + canv_idx1_bppy = (iformat == 5) ? 1 : 0; + + if ((iformat == 8) || (iformat == 9) || (iformat == 12)) + linear_bytes4p = 3; + else if (iformat == 10) + linear_bytes4p = 2; + else if (iformat == 11) + linear_bytes4p = 1; + else + linear_bytes4p = 0; + if (iformat == 12) + linear_bytesperline = picsize_x * 4; + else + linear_bytesperline = picsize_x * linear_bytes4p; + + if (iformat < 8) + linear_enable = false; + else + linear_enable = true; + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXBB) { + reg_offset = -8; + /* nr_mode: 0:Disabled 1:SNR Only 2:TNR Only 3:3DNR */ + nr_enable = (nr) ? 1 : 0; + cfg_y_snr_en = ((nr == 1) || (nr == 3)) ? 1 : 0; + cfg_y_tnr_en = ((nr == 2) || (nr == 3)) ? 1 : 0; + cfg_c_snr_en = cfg_y_snr_en; + /* cfg_c_tnr_en = cfg_y_tnr_en; */ + cfg_c_tnr_en = 0; + + /* NR For Y */ + WRITE_HREG((HCODEC_MFDIN_REG0D + reg_offset), + ((cfg_y_snr_en << 0) | + (y_snr_err_norm << 1) | + (y_snr_gau_bld_core << 2) | + (((y_snr_gau_bld_ofst) & 0xff) << 6) | + (y_snr_gau_bld_rate << 14) | + (y_snr_gau_alp0_min << 20) | + (y_snr_gau_alp0_max << 26))); + WRITE_HREG((HCODEC_MFDIN_REG0E + reg_offset), + ((cfg_y_tnr_en << 0) | + (y_tnr_mc_en << 1) | + (y_tnr_txt_mode << 2) | + (y_tnr_mot_sad_margin << 3) | + (y_tnr_alpha_min << 7) | + (y_tnr_alpha_max << 13) | + (y_tnr_deghost_os << 19))); + WRITE_HREG((HCODEC_MFDIN_REG0F + reg_offset), + ((y_tnr_mot_cortxt_rate << 0) | + (y_tnr_mot_distxt_ofst << 8) | + (y_tnr_mot_distxt_rate << 4) | + (y_tnr_mot_dismot_ofst << 16) | + (y_tnr_mot_frcsad_lock << 24))); + WRITE_HREG((HCODEC_MFDIN_REG10 + reg_offset), + ((y_tnr_mot2alp_frc_gain << 0) | + (y_tnr_mot2alp_nrm_gain << 8) | + (y_tnr_mot2alp_dis_gain << 16) | + (y_tnr_mot2alp_dis_ofst << 24))); + WRITE_HREG((HCODEC_MFDIN_REG11 + reg_offset), + ((y_bld_beta2alp_rate << 0) | + (y_bld_beta_min << 8) | + (y_bld_beta_max << 14))); + + /* NR For C */ + WRITE_HREG((HCODEC_MFDIN_REG12 + reg_offset), + ((cfg_y_snr_en << 0) | + (c_snr_err_norm << 1) | + (c_snr_gau_bld_core << 2) | + (((c_snr_gau_bld_ofst) & 0xff) << 6) | + (c_snr_gau_bld_rate << 14) | + (c_snr_gau_alp0_min << 20) | + (c_snr_gau_alp0_max << 26))); + + WRITE_HREG((HCODEC_MFDIN_REG13 + reg_offset), + ((cfg_c_tnr_en << 0) | + (c_tnr_mc_en << 1) | + (c_tnr_txt_mode << 2) | + (c_tnr_mot_sad_margin << 3) | + (c_tnr_alpha_min << 7) | + (c_tnr_alpha_max << 13) | + (c_tnr_deghost_os << 19))); + WRITE_HREG((HCODEC_MFDIN_REG14 + reg_offset), + ((c_tnr_mot_cortxt_rate << 0) | + (c_tnr_mot_distxt_ofst << 8) | + (c_tnr_mot_distxt_rate << 4) | + (c_tnr_mot_dismot_ofst << 16) | + (c_tnr_mot_frcsad_lock << 24))); + WRITE_HREG((HCODEC_MFDIN_REG15 + reg_offset), + ((c_tnr_mot2alp_frc_gain << 0) | + (c_tnr_mot2alp_nrm_gain << 8) | + (c_tnr_mot2alp_dis_gain << 16) | + (c_tnr_mot2alp_dis_ofst << 24))); + + WRITE_HREG((HCODEC_MFDIN_REG16 + reg_offset), + ((c_bld_beta2alp_rate << 0) | + (c_bld_beta_min << 8) | + (c_bld_beta_max << 14))); + + WRITE_HREG((HCODEC_MFDIN_REG1_CTRL + reg_offset), + (iformat << 0) | (oformat << 4) | + (dsample_en << 6) | (y_size << 8) | + (interp_en << 9) | (r2y_en << 12) | + (r2y_mode << 13) | (ifmt_extra << 16) | + (nr_enable << 19)); + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset), + (picsize_x << 16) | (picsize_y << 0)); + } else { + WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset), + (picsize_x << 14) | (picsize_y << 0)); + } + } else { + reg_offset = 0; + WRITE_HREG((HCODEC_MFDIN_REG1_CTRL + reg_offset), + (iformat << 0) | (oformat << 4) | + (dsample_en << 6) | (y_size << 8) | + (interp_en << 9) | (r2y_en << 12) | + (r2y_mode << 13)); + + WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset), + (picsize_x << 12) | (picsize_y << 0)); + } + + if (linear_enable == false) { + WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset), + (input & 0xffffff) | + (canv_idx1_bppy << 30) | + (canv_idx0_bppy << 28) | + (canv_idx1_bppx << 26) | + (canv_idx0_bppx << 24)); + WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset), + (0 << 16) | (0 << 0)); + WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), 0); + } else { + WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset), + (canv_idx1_bppy << 30) | + (canv_idx0_bppy << 28) | + (canv_idx1_bppx << 26) | + (canv_idx0_bppx << 24)); + WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset), + (linear_bytes4p << 16) | (linear_bytesperline << 0)); + WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), input); + } + + if (iformat == 12) + WRITE_HREG((HCODEC_MFDIN_REG9_ENDN + reg_offset), + (2 << 0) | (1 << 3) | (0 << 6) | + (3 << 9) | (6 << 12) | (5 << 15) | + (4 << 18) | (7 << 21)); + else + WRITE_HREG((HCODEC_MFDIN_REG9_ENDN + reg_offset), + (7 << 0) | (6 << 3) | (5 << 6) | + (4 << 9) | (3 << 12) | (2 << 15) | + (1 << 18) | (0 << 21)); +} + +#ifdef CONFIG_AMLOGIC_MEDIA_GE2D +static int scale_frame(struct encode_wq_s *wq, + struct encode_request_s *request, + struct config_para_ex_s *ge2d_config, + u32 src_addr, bool canvas) +{ + struct ge2d_context_s *context = encode_manager.context; + int src_top, src_left, src_width, src_height; + struct canvas_s cs0, cs1, cs2, cd; + u32 src_canvas, dst_canvas; + u32 src_canvas_w, dst_canvas_w; + u32 src_h = request->src_h; + u32 dst_w = ((wq->pic.encoder_width + 15) >> 4) << 4; + u32 dst_h = ((wq->pic.encoder_height + 15) >> 4) << 4; + int input_format = GE2D_FORMAT_M24_NV21; + + src_top = request->crop_top; + src_left = request->crop_left; + src_width = request->src_w - src_left - request->crop_right; + src_height = request->src_h - src_top - request->crop_bottom; + enc_pr(LOG_INFO, "request->fmt=%d, %d %d, canvas=%d\n", request->fmt, FMT_NV21, FMT_BGR888, canvas); + + if (canvas) { + if ((request->fmt == FMT_NV21) + || (request->fmt == FMT_NV12)) { + src_canvas = src_addr & 0xffff; + input_format = GE2D_FORMAT_M24_NV21; + } else if (request->fmt == FMT_BGR888) { + src_canvas = src_addr & 0xffffff; + input_format = GE2D_FORMAT_S24_RGB; //Opposite color after ge2d + } else if (request->fmt == FMT_RGBA8888) { + src_canvas = src_addr & 0xffffff; + input_format = GE2D_FORMAT_S32_ABGR; + } else { + src_canvas = src_addr & 0xffffff; + input_format = GE2D_FORMAT_M24_YUV420; + } + } else { + if ((request->fmt == FMT_NV21) + || (request->fmt == FMT_NV12)) { + src_canvas_w = + ((request->src_w + 31) >> 5) << 5; + canvas_config(ENC_CANVAS_OFFSET + 9, + src_addr, + src_canvas_w, src_h, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config(ENC_CANVAS_OFFSET + 10, + src_addr + src_canvas_w * src_h, + src_canvas_w, src_h / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + src_canvas = + ((ENC_CANVAS_OFFSET + 10) << 8) + | (ENC_CANVAS_OFFSET + 9); + input_format = GE2D_FORMAT_M24_NV21; + } else if (request->fmt == FMT_BGR888) { + src_canvas_w = + ((request->src_w + 31) >> 5) << 5; + + canvas_config(ENC_CANVAS_OFFSET + 9, + src_addr, + src_canvas_w * 3, src_h, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + src_canvas = ENC_CANVAS_OFFSET + 9; + input_format = GE2D_FORMAT_S24_RGB; //Opposite color after ge2d + } else if (request->fmt == FMT_RGBA8888) { + src_canvas_w = + ((request->src_w + 31) >> 5) << 5; + canvas_config( + ENC_CANVAS_OFFSET + 9, + src_addr, + src_canvas_w * 4, + src_h, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + src_canvas = ENC_CANVAS_OFFSET + 9; + input_format = GE2D_FORMAT_S32_ABGR; //Opposite color after ge2d + } else { + src_canvas_w = + ((request->src_w + 63) >> 6) << 6; + canvas_config(ENC_CANVAS_OFFSET + 9, + src_addr, + src_canvas_w, src_h, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config(ENC_CANVAS_OFFSET + 10, + src_addr + src_canvas_w * src_h, + src_canvas_w / 2, src_h / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config(ENC_CANVAS_OFFSET + 11, + src_addr + src_canvas_w * src_h * 5 / 4, + src_canvas_w / 2, src_h / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + src_canvas = + ((ENC_CANVAS_OFFSET + 11) << 16) | + ((ENC_CANVAS_OFFSET + 10) << 8) | + (ENC_CANVAS_OFFSET + 9); + input_format = GE2D_FORMAT_M24_YUV420; + } + } + + dst_canvas_w = ((dst_w + 31) >> 5) << 5; + + canvas_config(ENC_CANVAS_OFFSET + 6, + wq->mem.scaler_buff_start_addr, + dst_canvas_w, dst_h, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); + + canvas_config(ENC_CANVAS_OFFSET + 7, + wq->mem.scaler_buff_start_addr + dst_canvas_w * dst_h, + dst_canvas_w, dst_h / 2, + CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); + + dst_canvas = ((ENC_CANVAS_OFFSET + 7) << 8) | + (ENC_CANVAS_OFFSET + 6); + + ge2d_config->alu_const_color = 0; + ge2d_config->bitmask_en = 0; + ge2d_config->src1_gb_alpha = 0; + ge2d_config->dst_xy_swap = 0; + canvas_read(src_canvas & 0xff, &cs0); + canvas_read((src_canvas >> 8) & 0xff, &cs1); + canvas_read((src_canvas >> 16) & 0xff, &cs2); + ge2d_config->src_planes[0].addr = cs0.addr; + ge2d_config->src_planes[0].w = dst_w * 4;//cs0.width; + ge2d_config->src_planes[0].h = dst_h;//cs0.height; + ge2d_config->src_planes[1].addr = cs1.addr; + ge2d_config->src_planes[1].w = cs1.width; + ge2d_config->src_planes[1].h = cs1.height; + ge2d_config->src_planes[2].addr = cs2.addr; + ge2d_config->src_planes[2].w = cs2.width; + ge2d_config->src_planes[2].h = cs2.height; + + canvas_read(dst_canvas & 0xff, &cd); + + ge2d_config->dst_planes[0].addr = cd.addr; + ge2d_config->dst_planes[0].w = dst_w * 4;//cd.width; + ge2d_config->dst_planes[0].h = dst_h;//cd.height; + ge2d_config->src_key.key_enable = 0; + ge2d_config->src_key.key_mask = 0; + ge2d_config->src_key.key_mode = 0; + ge2d_config->src_para.canvas_index = src_canvas; + ge2d_config->src_para.mem_type = CANVAS_TYPE_INVALID; + ge2d_config->src_para.format = input_format | GE2D_LITTLE_ENDIAN; + ge2d_config->src_para.fill_color_en = 0; + ge2d_config->src_para.fill_mode = 0; + ge2d_config->src_para.x_rev = 0; + ge2d_config->src_para.y_rev = 0; + ge2d_config->src_para.color = 0xffffffff; + ge2d_config->src_para.top = 0; + ge2d_config->src_para.left = 0; + ge2d_config->src_para.width = dst_w;//request->src_w; + ge2d_config->src_para.height = dst_h;//request->src_h; + ge2d_config->src2_para.mem_type = CANVAS_TYPE_INVALID; + ge2d_config->dst_para.canvas_index = dst_canvas; + ge2d_config->dst_para.mem_type = CANVAS_TYPE_INVALID; + ge2d_config->dst_para.format = + GE2D_FORMAT_M24_NV21 | GE2D_LITTLE_ENDIAN; + + if (wq->pic.encoder_width >= 1280 && wq->pic.encoder_height >= 720) { + ge2d_config->dst_para.format |= wq->pic.color_space; + } + + ge2d_config->dst_para.fill_color_en = 0; + ge2d_config->dst_para.fill_mode = 0; + ge2d_config->dst_para.x_rev = 0; + ge2d_config->dst_para.y_rev = 0; + ge2d_config->dst_para.color = 0; + ge2d_config->dst_para.top = 0; + ge2d_config->dst_para.left = 0; + ge2d_config->dst_para.width = dst_w; + ge2d_config->dst_para.height = dst_h; + ge2d_config->dst_para.x_rev = 0; + ge2d_config->dst_para.y_rev = 0; + + + if (ge2d_context_config_ex(context, ge2d_config) < 0) { + pr_err("++ge2d configing error.\n"); + return -1; + } + stretchblt_noalpha(context, src_left, src_top, src_width, src_height, + 0, 0, wq->pic.encoder_width, wq->pic.encoder_height); + return dst_canvas_w*dst_h * 3 / 2; +} +#endif + +static s32 dump_raw_input(struct encode_wq_s *wq, struct encode_request_s *request) { + u8 *data; + struct canvas_s cs0, cs1;//, cs2 + u32 y_addr, uv_addr, canvas_w, picsize_y; + u32 input = request->src; + //u8 iformat = MAX_FRAME_FMT; + struct file *filp; + if (request->type == CANVAS_BUFF) { + if ((request->fmt == FMT_NV21) || (request->fmt == FMT_NV12)) { + input = input & 0xffff; + canvas_read(input & 0xff, &cs0); + canvas_read((input >> 8) & 0xff, &cs1); + pr_err("dump raw input for canvas source\n"); + y_addr = cs0.addr; + uv_addr = cs1.addr; + + canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5; + picsize_y = wq->pic.encoder_height; + + data = (u8*)phys_to_virt(y_addr); + filp = file_open("/data/encoder.yuv", O_APPEND | O_RDWR, 0644); + if (filp) { + file_write(filp, 0, data, canvas_w * picsize_y); + file_sync(filp); + file_close(filp); + } else + pr_err("open encoder.yuv failed\n"); + + } + } + return 0; +} + +static s32 set_input_format(struct encode_wq_s *wq, + struct encode_request_s *request) +{ + s32 ret = 0; + u8 iformat = MAX_FRAME_FMT, oformat = MAX_FRAME_FMT, r2y_en = 0; + u32 picsize_x, picsize_y, src_addr; + u32 canvas_w = 0; + u32 input = request->src; + u32 input_y = 0; + u32 input_u = 0; + u32 input_v = 0; + u8 ifmt_extra = 0; + + if ((request->fmt == FMT_RGB565) || (request->fmt >= MAX_FRAME_FMT)) + return -1; + + if (dump_input) + dump_raw_input(wq, request); + + picsize_x = ((wq->pic.encoder_width + 15) >> 4) << 4; + picsize_y = ((wq->pic.encoder_height + 15) >> 4) << 4; + oformat = 0; + + if ((request->type == LOCAL_BUFF) + || (request->type == PHYSICAL_BUFF) + || (request->type == DMA_BUFF)) { + if ((request->type == LOCAL_BUFF) && + (request->flush_flag & AMVENC_FLUSH_FLAG_INPUT)) + dma_flush(wq->mem.dct_buff_start_addr, + request->framesize); + if (request->type == LOCAL_BUFF) { + input = wq->mem.dct_buff_start_addr; + src_addr = + wq->mem.dct_buff_start_addr; + } else if (request->type == DMA_BUFF) { + if (request->plane_num == 3) { + input_y = (unsigned long)request->dma_cfg[0].paddr; + input_u = (unsigned long)request->dma_cfg[1].paddr; + input_v = (unsigned long)request->dma_cfg[2].paddr; + } else if (request->plane_num == 2) { + input_y = (unsigned long)request->dma_cfg[0].paddr; + input_u = (unsigned long)request->dma_cfg[1].paddr; + input_v = input_u; + } else if (request->plane_num == 1) { + input_y = (unsigned long)request->dma_cfg[0].paddr; + if (request->fmt == FMT_NV21 + || request->fmt == FMT_NV12) { + input_u = input_y + picsize_x * picsize_y; + input_v = input_u; + } + if (request->fmt == FMT_YUV420) { + input_u = input_y + picsize_x * picsize_y; + input_v = input_u + picsize_x * picsize_y / 4; + } + } + src_addr = input_y; + picsize_y = wq->pic.encoder_height; + enc_pr(LOG_INFO, "dma addr[0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx]\n", + (unsigned long)request->dma_cfg[0].vaddr, + (unsigned long)request->dma_cfg[0].paddr, + (unsigned long)request->dma_cfg[1].vaddr, + (unsigned long)request->dma_cfg[1].paddr, + (unsigned long)request->dma_cfg[2].vaddr, + (unsigned long)request->dma_cfg[2].paddr); + } else { + src_addr = input; + picsize_y = wq->pic.encoder_height; + } + if (request->scale_enable) { +#ifdef CONFIG_AMLOGIC_MEDIA_GE2D + struct config_para_ex_s ge2d_config; + + memset(&ge2d_config, 0, + sizeof(struct config_para_ex_s)); + scale_frame( + wq, request, + &ge2d_config, + src_addr, + false); + iformat = 2; + r2y_en = 0; + input = ((ENC_CANVAS_OFFSET + 7) << 8) | + (ENC_CANVAS_OFFSET + 6); + ret = 0; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) { + /* + * for t3, after scaling before goto MFDIN, need to config canvas with scaler buffer + * */ + enc_pr(LOG_INFO, "reconfig with scaler buffer\n"); + canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5; + iformat = 2; + + canvas_config_proxy(ENC_CANVAS_OFFSET + 6, + wq->mem.scaler_buff_start_addr, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 7, + wq->mem.scaler_buff_start_addr + canvas_w * picsize_y, + canvas_w, picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + + input = ((ENC_CANVAS_OFFSET + 7) << 8) | + (ENC_CANVAS_OFFSET + 6); + } + + goto MFDIN; +#else + enc_pr(LOG_ERROR, + "Warning: need enable ge2d for scale frame!\n"); + return -1; +#endif + } + if ((request->fmt <= FMT_YUV444_PLANE) || + (request->fmt >= FMT_YUV422_12BIT)) + r2y_en = 0; + else + r2y_en = 1; + + if (request->fmt >= FMT_YUV422_12BIT) { + iformat = 7; + ifmt_extra = request->fmt - FMT_YUV422_12BIT; + if (request->fmt == FMT_YUV422_12BIT) + canvas_w = picsize_x * 24 / 8; + else if (request->fmt == FMT_YUV444_10BIT) + canvas_w = picsize_x * 32 / 8; + else + canvas_w = (picsize_x * 20 + 7) / 8; + canvas_w = ((canvas_w + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET + 6, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ENC_CANVAS_OFFSET + 6; + input = input & 0xff; + } else if (request->fmt == FMT_YUV422_SINGLE) + iformat = 10; + else if ((request->fmt == FMT_YUV444_SINGLE) + || (request->fmt == FMT_RGB888)) { + iformat = 1; + if (request->fmt == FMT_RGB888) + r2y_en = 1; + canvas_w = picsize_x * 3; + canvas_w = ((canvas_w + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET + 6, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ENC_CANVAS_OFFSET + 6; + } else if ((request->fmt == FMT_NV21) + || (request->fmt == FMT_NV12)) { + canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5; + iformat = (request->fmt == FMT_NV21) ? 2 : 3; + if (request->type == DMA_BUFF) { + canvas_config_proxy(ENC_CANVAS_OFFSET + 6, + input_y, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 7, + input_u, + canvas_w, picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + } else { + canvas_config_proxy(ENC_CANVAS_OFFSET + 6, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 7, + input + canvas_w * picsize_y, + canvas_w, picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + } + input = ((ENC_CANVAS_OFFSET + 7) << 8) | + (ENC_CANVAS_OFFSET + 6); + } else if (request->fmt == FMT_YUV420) { + iformat = 4; + canvas_w = ((wq->pic.encoder_width + 63) >> 6) << 6; + if (request->type == DMA_BUFF) { + canvas_config_proxy(ENC_CANVAS_OFFSET + 6, + input_y, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 7, + input_u, + canvas_w / 2, picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 8, + input_v, + canvas_w / 2, picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + } else { + canvas_config_proxy(ENC_CANVAS_OFFSET + 6, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 7, + input + canvas_w * picsize_y, + canvas_w / 2, picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 8, + input + canvas_w * picsize_y * 5 / 4, + canvas_w / 2, picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + + } + input = ((ENC_CANVAS_OFFSET + 8) << 16) | + ((ENC_CANVAS_OFFSET + 7) << 8) | + (ENC_CANVAS_OFFSET + 6); + } else if ((request->fmt == FMT_YUV444_PLANE) + || (request->fmt == FMT_RGB888_PLANE)) { + if (request->fmt == FMT_RGB888_PLANE) + r2y_en = 1; + iformat = 5; + canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET + 6, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 7, + input + canvas_w * picsize_y, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 8, + input + canvas_w * picsize_y * 2, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ((ENC_CANVAS_OFFSET + 8) << 16) | + ((ENC_CANVAS_OFFSET + 7) << 8) | + (ENC_CANVAS_OFFSET + 6); + } else if (request->fmt == FMT_RGBA8888) { + r2y_en = 1; + iformat = 12; + } + ret = 0; + } else if (request->type == CANVAS_BUFF) { + + r2y_en = 0; + if (request->scale_enable) { +#ifdef CONFIG_AMLOGIC_MEDIA_GE2D + struct config_para_ex_s ge2d_config; + memset(&ge2d_config, 0, + sizeof(struct config_para_ex_s)); + scale_frame( + wq, request, + &ge2d_config, + input, true); + iformat = 2; + r2y_en = 0; + input = ((ENC_CANVAS_OFFSET + 7) << 8) | + (ENC_CANVAS_OFFSET + 6); + ret = 0; + + /* + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) { + pr_info("reconfig with scaler buffer\n"); + canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5; + iformat = 2; + + canvas_config_proxy(ENC_CANVAS_OFFSET + 6, + wq->mem.scaler_buff_start_addr, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 7, + wq->mem.scaler_buff_start_addr + canvas_w * picsize_y, + canvas_w, picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + + input = ((ENC_CANVAS_OFFSET + 7) << 8) | + (ENC_CANVAS_OFFSET + 6); + } + */ + + goto MFDIN; +#else + enc_pr(LOG_ERROR, + "Warning: need enable ge2d for scale frame!\n"); + return -1; +#endif + } + //pr_err("request->type=%u\n", request->type); + if (request->fmt == FMT_YUV422_SINGLE) { + iformat = 0; + input = input & 0xff; + } else if (request->fmt == FMT_YUV444_SINGLE) { + iformat = 1; + input = input & 0xff; + } else if ((request->fmt == FMT_NV21) + || (request->fmt == FMT_NV12)) { + iformat = (request->fmt == FMT_NV21) ? 2 : 3; + input = input & 0xffff; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) { + struct canvas_s cs0, cs1;//, cs2 + u32 y_addr, uv_addr, canvas_w, picsize_y; + u8 iformat = MAX_FRAME_FMT; + canvas_read(input & 0xff, &cs0); + canvas_read((input >> 8) & 0xff, &cs1); + //pr_err("t3 canvas source input reconfig\n"); + y_addr = cs0.addr; + uv_addr = cs1.addr; + + canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5; + picsize_y = wq->pic.encoder_height; + iformat = (request->fmt == FMT_NV21) ? 2 : 3; + + canvas_config_proxy( + ENC_CANVAS_OFFSET + 6, + y_addr, + canvas_w, + picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + + canvas_config_proxy( + ENC_CANVAS_OFFSET + 7, + uv_addr, + canvas_w, + picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + + input = ((ENC_CANVAS_OFFSET + 7) << 8) | + (ENC_CANVAS_OFFSET + 6); + } + } else if (request->fmt == FMT_YUV420) { + iformat = 4; + input = input & 0xffffff; + } else if ((request->fmt == FMT_YUV444_PLANE) + || (request->fmt == FMT_RGB888_PLANE)) { + if (request->fmt == FMT_RGB888_PLANE) + r2y_en = 1; + iformat = 5; + input = input & 0xffffff; + } else if ((request->fmt == FMT_YUV422_12BIT) + || (request->fmt == FMT_YUV444_10BIT) + || (request->fmt == FMT_YUV422_10BIT)) { + iformat = 7; + ifmt_extra = request->fmt - FMT_YUV422_12BIT; + input = input & 0xff; + } else + ret = -1; + } +#ifdef CONFIG_AMLOGIC_MEDIA_GE2D +MFDIN: +#endif + if (ret == 0) + mfdin_basic(input, iformat, oformat, + picsize_x, picsize_y, r2y_en, + request->nr_mode, ifmt_extra); + return ret; +} + +#ifdef H264_ENC_CBR +static void ConvertTable2Risc(void *table, u32 len) +{ + u32 i, j; + u16 temp; + u16 *tbl = (u16 *)table; + + if ((len < 8) || (len % 8) || (!table)) { + enc_pr(LOG_ERROR, "ConvertTable2Risc tbl %p, len %d error\n", + table, len); + return; + } + for (i = 0; i < len / 8; i++) { + j = i << 2; + temp = tbl[j]; + tbl[j] = tbl[j + 3]; + tbl[j + 3] = temp; + + temp = tbl[j + 1]; + tbl[j + 1] = tbl[j + 2]; + tbl[j + 2] = temp; + } + +} +#endif + +static void avc_prot_init(struct encode_wq_s *wq, + struct encode_request_s *request, u32 quant, bool IDR) +{ + u32 data32; + u32 pic_width, pic_height; + u32 pic_mb_nr; + u32 pic_mbx, pic_mby; + u32 i_pic_qp, p_pic_qp; + u32 i_pic_qp_c, p_pic_qp_c; + u32 pic_width_in_mb; + u32 slice_qp; + + pic_width = wq->pic.encoder_width; + pic_height = wq->pic.encoder_height; + pic_mb_nr = 0; + pic_mbx = 0; + pic_mby = 0; + i_pic_qp = quant; + p_pic_qp = quant; + + pic_width_in_mb = (pic_width + 15) / 16; + WRITE_HREG(HCODEC_HDEC_MC_OMEM_AUTO, + (1 << 31) | /* use_omem_mb_xy */ + ((pic_width_in_mb - 1) << 16)); /* omem_max_mb_x */ + + WRITE_HREG(HCODEC_VLC_ADV_CONFIG, + /* early_mix_mc_hcmd -- will enable in P Picture */ + (0 << 10) | + (1 << 9) | /* update_top_left_mix */ + (1 << 8) | /* p_top_left_mix */ + /* mv_cal_mixed_type -- will enable in P Picture */ + (0 << 7) | + /* mc_hcmd_mixed_type -- will enable in P Picture */ + (0 << 6) | + (1 << 5) | /* use_separate_int_control */ + (1 << 4) | /* hcmd_intra_use_q_info */ + (1 << 3) | /* hcmd_left_use_prev_info */ + (1 << 2) | /* hcmd_use_q_info */ + (1 << 1) | /* use_q_delta_quant */ + /* detect_I16_from_I4 use qdct detected mb_type */ + (0 << 0)); + + WRITE_HREG(HCODEC_QDCT_ADV_CONFIG, + (1 << 29) | /* mb_info_latch_no_I16_pred_mode */ + (1 << 28) | /* ie_dma_mbxy_use_i_pred */ + (1 << 27) | /* ie_dma_read_write_use_ip_idx */ + (1 << 26) | /* ie_start_use_top_dma_count */ + (1 << 25) | /* i_pred_top_dma_rd_mbbot */ + (1 << 24) | /* i_pred_top_dma_wr_disable */ + /* i_pred_mix -- will enable in P Picture */ + (0 << 23) | + (1 << 22) | /* me_ab_rd_when_intra_in_p */ + (1 << 21) | /* force_mb_skip_run_when_intra */ + /* mc_out_mixed_type -- will enable in P Picture */ + (0 << 20) | + (1 << 19) | /* ie_start_when_quant_not_full */ + (1 << 18) | /* mb_info_state_mix */ + /* mb_type_use_mix_result -- will enable in P Picture */ + (0 << 17) | + /* me_cb_ie_read_enable -- will enable in P Picture */ + (0 << 16) | + /* ie_cur_data_from_me -- will enable in P Picture */ + (0 << 15) | + (1 << 14) | /* rem_per_use_table */ + (0 << 13) | /* q_latch_int_enable */ + (1 << 12) | /* q_use_table */ + (0 << 11) | /* q_start_wait */ + (1 << 10) | /* LUMA_16_LEFT_use_cur */ + (1 << 9) | /* DC_16_LEFT_SUM_use_cur */ + (1 << 8) | /* c_ref_ie_sel_cur */ + (0 << 7) | /* c_ipred_perfect_mode */ + (1 << 6) | /* ref_ie_ul_sel */ + (1 << 5) | /* mb_type_use_ie_result */ + (1 << 4) | /* detect_I16_from_I4 */ + (1 << 3) | /* ie_not_wait_ref_busy */ + (1 << 2) | /* ie_I16_enable */ + (3 << 0)); /* ie_done_sel // fastest when waiting */ + + if (request != NULL) { + WRITE_HREG(HCODEC_IE_WEIGHT, + (request->i16_weight << 16) | + (request->i4_weight << 0)); + WRITE_HREG(HCODEC_ME_WEIGHT, + (request->me_weight << 0)); + WRITE_HREG(HCODEC_SAD_CONTROL_0, + /* ie_sad_offset_I16 */ + (request->i16_weight << 16) | + /* ie_sad_offset_I4 */ + (request->i4_weight << 0)); + WRITE_HREG(HCODEC_SAD_CONTROL_1, + /* ie_sad_shift_I16 */ + (IE_SAD_SHIFT_I16 << 24) | + /* ie_sad_shift_I4 */ + (IE_SAD_SHIFT_I4 << 20) | + /* me_sad_shift_INTER */ + (ME_SAD_SHIFT_INTER << 16) | + /* me_sad_offset_INTER */ + (request->me_weight << 0)); + wq->me_weight = request->me_weight; + wq->i4_weight = request->i4_weight; + wq->i16_weight = request->i16_weight; + } else { + WRITE_HREG(HCODEC_IE_WEIGHT, + (I16MB_WEIGHT_OFFSET << 16) | + (I4MB_WEIGHT_OFFSET << 0)); + WRITE_HREG(HCODEC_ME_WEIGHT, + (ME_WEIGHT_OFFSET << 0)); + WRITE_HREG(HCODEC_SAD_CONTROL_0, + /* ie_sad_offset_I16 */ + (I16MB_WEIGHT_OFFSET << 16) | + /* ie_sad_offset_I4 */ + (I4MB_WEIGHT_OFFSET << 0)); + WRITE_HREG(HCODEC_SAD_CONTROL_1, + /* ie_sad_shift_I16 */ + (IE_SAD_SHIFT_I16 << 24) | + /* ie_sad_shift_I4 */ + (IE_SAD_SHIFT_I4 << 20) | + /* me_sad_shift_INTER */ + (ME_SAD_SHIFT_INTER << 16) | + /* me_sad_offset_INTER */ + (ME_WEIGHT_OFFSET << 0)); + } + + WRITE_HREG(HCODEC_ADV_MV_CTL0, + (ADV_MV_LARGE_16x8 << 31) | + (ADV_MV_LARGE_8x16 << 30) | + (ADV_MV_8x8_WEIGHT << 16) | /* adv_mv_8x8_weight */ + /* adv_mv_4x4x4_weight should be set bigger */ + (ADV_MV_4x4x4_WEIGHT << 0)); + WRITE_HREG(HCODEC_ADV_MV_CTL1, + /* adv_mv_16x16_weight */ + (ADV_MV_16x16_WEIGHT << 16) | + (ADV_MV_LARGE_16x16 << 15) | + (ADV_MV_16_8_WEIGHT << 0)); /* adv_mv_16_8_weight */ + + hcodec_prog_qtbl(wq); + if (IDR) { + i_pic_qp = + wq->quant_tbl_i4[0] & 0xff; + i_pic_qp += + wq->quant_tbl_i16[0] & 0xff; + i_pic_qp /= 2; + p_pic_qp = i_pic_qp; + } else { + i_pic_qp = + wq->quant_tbl_i4[0] & 0xff; + i_pic_qp += + wq->quant_tbl_i16[0] & 0xff; + p_pic_qp = wq->quant_tbl_me[0] & 0xff; + slice_qp = (i_pic_qp + p_pic_qp) / 3; + i_pic_qp = slice_qp; + p_pic_qp = i_pic_qp; + } +#ifdef H264_ENC_CBR + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { + data32 = READ_HREG(HCODEC_SAD_CONTROL_1); + data32 = data32 & 0xffff; /* remove sad shift */ + WRITE_HREG(HCODEC_SAD_CONTROL_1, data32); + WRITE_HREG(H264_ENC_CBR_TABLE_ADDR, + wq->mem.cbr_info_ddr_start_addr); + WRITE_HREG(H264_ENC_CBR_MB_SIZE_ADDR, + wq->mem.cbr_info_ddr_start_addr + + CBR_TABLE_SIZE); + WRITE_HREG(H264_ENC_CBR_CTL, + (wq->cbr_info.start_tbl_id << 28) | + (wq->cbr_info.short_shift << 24) | + (wq->cbr_info.long_mb_num << 16) | + (wq->cbr_info.long_th << 0)); + WRITE_HREG(H264_ENC_CBR_REGION_SIZE, + (wq->cbr_info.block_w << 16) | + (wq->cbr_info.block_h << 0)); + } +#endif + WRITE_HREG(HCODEC_QDCT_VLC_QUANT_CTL_0, + (0 << 19) | /* vlc_delta_quant_1 */ + (i_pic_qp << 13) | /* vlc_quant_1 */ + (0 << 6) | /* vlc_delta_quant_0 */ + (i_pic_qp << 0)); /* vlc_quant_0 */ + WRITE_HREG(HCODEC_QDCT_VLC_QUANT_CTL_1, + (14 << 6) | /* vlc_max_delta_q_neg */ + (13 << 0)); /* vlc_max_delta_q_pos */ + WRITE_HREG(HCODEC_VLC_PIC_SIZE, + pic_width | (pic_height << 16)); + WRITE_HREG(HCODEC_VLC_PIC_POSITION, + (pic_mb_nr << 16) | + (pic_mby << 8) | + (pic_mbx << 0)); + + /* synopsys parallel_case full_case */ + switch (i_pic_qp) { + case 0: + i_pic_qp_c = 0; + break; + case 1: + i_pic_qp_c = 1; + break; + case 2: + i_pic_qp_c = 2; + break; + case 3: + i_pic_qp_c = 3; + break; + case 4: + i_pic_qp_c = 4; + break; + case 5: + i_pic_qp_c = 5; + break; + case 6: + i_pic_qp_c = 6; + break; + case 7: + i_pic_qp_c = 7; + break; + case 8: + i_pic_qp_c = 8; + break; + case 9: + i_pic_qp_c = 9; + break; + case 10: + i_pic_qp_c = 10; + break; + case 11: + i_pic_qp_c = 11; + break; + case 12: + i_pic_qp_c = 12; + break; + case 13: + i_pic_qp_c = 13; + break; + case 14: + i_pic_qp_c = 14; + break; + case 15: + i_pic_qp_c = 15; + break; + case 16: + i_pic_qp_c = 16; + break; + case 17: + i_pic_qp_c = 17; + break; + case 18: + i_pic_qp_c = 18; + break; + case 19: + i_pic_qp_c = 19; + break; + case 20: + i_pic_qp_c = 20; + break; + case 21: + i_pic_qp_c = 21; + break; + case 22: + i_pic_qp_c = 22; + break; + case 23: + i_pic_qp_c = 23; + break; + case 24: + i_pic_qp_c = 24; + break; + case 25: + i_pic_qp_c = 25; + break; + case 26: + i_pic_qp_c = 26; + break; + case 27: + i_pic_qp_c = 27; + break; + case 28: + i_pic_qp_c = 28; + break; + case 29: + i_pic_qp_c = 29; + break; + case 30: + i_pic_qp_c = 29; + break; + case 31: + i_pic_qp_c = 30; + break; + case 32: + i_pic_qp_c = 31; + break; + case 33: + i_pic_qp_c = 32; + break; + case 34: + i_pic_qp_c = 32; + break; + case 35: + i_pic_qp_c = 33; + break; + case 36: + i_pic_qp_c = 34; + break; + case 37: + i_pic_qp_c = 34; + break; + case 38: + i_pic_qp_c = 35; + break; + case 39: + i_pic_qp_c = 35; + break; + case 40: + i_pic_qp_c = 36; + break; + case 41: + i_pic_qp_c = 36; + break; + case 42: + i_pic_qp_c = 37; + break; + case 43: + i_pic_qp_c = 37; + break; + case 44: + i_pic_qp_c = 37; + break; + case 45: + i_pic_qp_c = 38; + break; + case 46: + i_pic_qp_c = 38; + break; + case 47: + i_pic_qp_c = 38; + break; + case 48: + i_pic_qp_c = 39; + break; + case 49: + i_pic_qp_c = 39; + break; + case 50: + i_pic_qp_c = 39; + break; + default: + i_pic_qp_c = 39; + break; + } + + /* synopsys parallel_case full_case */ + switch (p_pic_qp) { + case 0: + p_pic_qp_c = 0; + break; + case 1: + p_pic_qp_c = 1; + break; + case 2: + p_pic_qp_c = 2; + break; + case 3: + p_pic_qp_c = 3; + break; + case 4: + p_pic_qp_c = 4; + break; + case 5: + p_pic_qp_c = 5; + break; + case 6: + p_pic_qp_c = 6; + break; + case 7: + p_pic_qp_c = 7; + break; + case 8: + p_pic_qp_c = 8; + break; + case 9: + p_pic_qp_c = 9; + break; + case 10: + p_pic_qp_c = 10; + break; + case 11: + p_pic_qp_c = 11; + break; + case 12: + p_pic_qp_c = 12; + break; + case 13: + p_pic_qp_c = 13; + break; + case 14: + p_pic_qp_c = 14; + break; + case 15: + p_pic_qp_c = 15; + break; + case 16: + p_pic_qp_c = 16; + break; + case 17: + p_pic_qp_c = 17; + break; + case 18: + p_pic_qp_c = 18; + break; + case 19: + p_pic_qp_c = 19; + break; + case 20: + p_pic_qp_c = 20; + break; + case 21: + p_pic_qp_c = 21; + break; + case 22: + p_pic_qp_c = 22; + break; + case 23: + p_pic_qp_c = 23; + break; + case 24: + p_pic_qp_c = 24; + break; + case 25: + p_pic_qp_c = 25; + break; + case 26: + p_pic_qp_c = 26; + break; + case 27: + p_pic_qp_c = 27; + break; + case 28: + p_pic_qp_c = 28; + break; + case 29: + p_pic_qp_c = 29; + break; + case 30: + p_pic_qp_c = 29; + break; + case 31: + p_pic_qp_c = 30; + break; + case 32: + p_pic_qp_c = 31; + break; + case 33: + p_pic_qp_c = 32; + break; + case 34: + p_pic_qp_c = 32; + break; + case 35: + p_pic_qp_c = 33; + break; + case 36: + p_pic_qp_c = 34; + break; + case 37: + p_pic_qp_c = 34; + break; + case 38: + p_pic_qp_c = 35; + break; + case 39: + p_pic_qp_c = 35; + break; + case 40: + p_pic_qp_c = 36; + break; + case 41: + p_pic_qp_c = 36; + break; + case 42: + p_pic_qp_c = 37; + break; + case 43: + p_pic_qp_c = 37; + break; + case 44: + p_pic_qp_c = 37; + break; + case 45: + p_pic_qp_c = 38; + break; + case 46: + p_pic_qp_c = 38; + break; + case 47: + p_pic_qp_c = 38; + break; + case 48: + p_pic_qp_c = 39; + break; + case 49: + p_pic_qp_c = 39; + break; + case 50: + p_pic_qp_c = 39; + break; + default: + p_pic_qp_c = 39; + break; + } + WRITE_HREG(HCODEC_QDCT_Q_QUANT_I, + (i_pic_qp_c << 22) | + (i_pic_qp << 16) | + ((i_pic_qp_c % 6) << 12) | + ((i_pic_qp_c / 6) << 8) | + ((i_pic_qp % 6) << 4) | + ((i_pic_qp / 6) << 0)); + + WRITE_HREG(HCODEC_QDCT_Q_QUANT_P, + (p_pic_qp_c << 22) | + (p_pic_qp << 16) | + ((p_pic_qp_c % 6) << 12) | + ((p_pic_qp_c / 6) << 8) | + ((p_pic_qp % 6) << 4) | + ((p_pic_qp / 6) << 0)); + +#ifdef ENABLE_IGNORE_FUNCTION + WRITE_HREG(HCODEC_IGNORE_CONFIG, + (1 << 31) | /* ignore_lac_coeff_en */ + (1 << 26) | /* ignore_lac_coeff_else (<1) */ + (1 << 21) | /* ignore_lac_coeff_2 (<1) */ + (2 << 16) | /* ignore_lac_coeff_1 (<2) */ + (1 << 15) | /* ignore_cac_coeff_en */ + (1 << 10) | /* ignore_cac_coeff_else (<1) */ + (1 << 5) | /* ignore_cac_coeff_2 (<1) */ + (3 << 0)); /* ignore_cac_coeff_1 (<2) */ + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) + WRITE_HREG(HCODEC_IGNORE_CONFIG_2, + (1 << 31) | /* ignore_t_lac_coeff_en */ + (1 << 26) | /* ignore_t_lac_coeff_else (<1) */ + (2 << 21) | /* ignore_t_lac_coeff_2 (<2) */ + (6 << 16) | /* ignore_t_lac_coeff_1 (<6) */ + (1<<15) | /* ignore_cdc_coeff_en */ + (0<<14) | /* ignore_t_lac_coeff_else_le_3 */ + (1<<13) | /* ignore_t_lac_coeff_else_le_4 */ + (1<<12) | /* ignore_cdc_only_when_empty_cac_inter */ + (1<<11) | /* ignore_cdc_only_when_one_empty_inter */ + /* ignore_cdc_range_max_inter 0-0, 1-1, 2-2, 3-3 */ + (2<<9) | + /* ignore_cdc_abs_max_inter 0-1, 1-2, 2-3, 3-4 */ + (0<<7) | + /* ignore_cdc_only_when_empty_cac_intra */ + (1<<5) | + /* ignore_cdc_only_when_one_empty_intra */ + (1<<4) | + /* ignore_cdc_range_max_intra 0-0, 1-1, 2-2, 3-3 */ + (1<<2) | + /* ignore_cdc_abs_max_intra 0-1, 1-2, 2-3, 3-4 */ + (0<<0)); + else + WRITE_HREG(HCODEC_IGNORE_CONFIG_2, + (1 << 31) | /* ignore_t_lac_coeff_en */ + (1 << 26) | /* ignore_t_lac_coeff_else (<1) */ + (1 << 21) | /* ignore_t_lac_coeff_2 (<1) */ + (5 << 16) | /* ignore_t_lac_coeff_1 (<5) */ + (0 << 0)); +#else + WRITE_HREG(HCODEC_IGNORE_CONFIG, 0); + WRITE_HREG(HCODEC_IGNORE_CONFIG_2, 0); +#endif + + WRITE_HREG(HCODEC_QDCT_MB_CONTROL, + (1 << 9) | /* mb_info_soft_reset */ + (1 << 0)); /* mb read buffer soft reset */ + + WRITE_HREG(HCODEC_QDCT_MB_CONTROL, + (1 << 28) | /* ignore_t_p8x8 */ + (0 << 27) | /* zero_mc_out_null_non_skipped_mb */ + (0 << 26) | /* no_mc_out_null_non_skipped_mb */ + (0 << 25) | /* mc_out_even_skipped_mb */ + (0 << 24) | /* mc_out_wait_cbp_ready */ + (0 << 23) | /* mc_out_wait_mb_type_ready */ + (1 << 29) | /* ie_start_int_enable */ + (1 << 19) | /* i_pred_enable */ + (1 << 20) | /* ie_sub_enable */ + (1 << 18) | /* iq_enable */ + (1 << 17) | /* idct_enable */ + (1 << 14) | /* mb_pause_enable */ + (1 << 13) | /* q_enable */ + (1 << 12) | /* dct_enable */ + (1 << 10) | /* mb_info_en */ + (0 << 3) | /* endian */ + (0 << 1) | /* mb_read_en */ + (0 << 0)); /* soft reset */ + + WRITE_HREG(HCODEC_SAD_CONTROL, + (0 << 3) | /* ie_result_buff_enable */ + (1 << 2) | /* ie_result_buff_soft_reset */ + (0 << 1) | /* sad_enable */ + (1 << 0)); /* sad soft reset */ + WRITE_HREG(HCODEC_IE_RESULT_BUFFER, 0); + + WRITE_HREG(HCODEC_SAD_CONTROL, + (1 << 3) | /* ie_result_buff_enable */ + (0 << 2) | /* ie_result_buff_soft_reset */ + (1 << 1) | /* sad_enable */ + (0 << 0)); /* sad soft reset */ + + WRITE_HREG(HCODEC_IE_CONTROL, + (1 << 30) | /* active_ul_block */ + (0 << 1) | /* ie_enable */ + (1 << 0)); /* ie soft reset */ + + WRITE_HREG(HCODEC_IE_CONTROL, + (1 << 30) | /* active_ul_block */ + (0 << 1) | /* ie_enable */ + (0 << 0)); /* ie soft reset */ + + WRITE_HREG(HCODEC_ME_SKIP_LINE, + (8 << 24) | /* step_3_skip_line */ + (8 << 18) | /* step_2_skip_line */ + (2 << 12) | /* step_1_skip_line */ + (0 << 6) | /* step_0_skip_line */ + (0 << 0)); + + WRITE_HREG(HCODEC_ME_MV_MERGE_CTL, me_mv_merge_ctl); + WRITE_HREG(HCODEC_ME_STEP0_CLOSE_MV, me_step0_close_mv); + WRITE_HREG(HCODEC_ME_SAD_ENOUGH_01, me_sad_enough_01); + WRITE_HREG(HCODEC_ME_SAD_ENOUGH_23, me_sad_enough_23); + WRITE_HREG(HCODEC_ME_F_SKIP_SAD, me_f_skip_sad); + WRITE_HREG(HCODEC_ME_F_SKIP_WEIGHT, me_f_skip_weight); + WRITE_HREG(HCODEC_ME_MV_WEIGHT_01, me_mv_weight_01); + WRITE_HREG(HCODEC_ME_MV_WEIGHT_23, me_mv_weight_23); + WRITE_HREG(HCODEC_ME_SAD_RANGE_INC, me_sad_range_inc); + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL) { + WRITE_HREG(HCODEC_V5_SIMPLE_MB_CTL, 0); + WRITE_HREG(HCODEC_V5_SIMPLE_MB_CTL, + (v5_use_small_diff_cnt << 7) | + (v5_simple_mb_inter_all_en << 6) | + (v5_simple_mb_inter_8x8_en << 5) | + (v5_simple_mb_inter_16_8_en << 4) | + (v5_simple_mb_inter_16x16_en << 3) | + (v5_simple_mb_intra_en << 2) | + (v5_simple_mb_C_en << 1) | + (v5_simple_mb_Y_en << 0)); + WRITE_HREG(HCODEC_V5_MB_DIFF_SUM, 0); + WRITE_HREG(HCODEC_V5_SMALL_DIFF_CNT, + (v5_small_diff_C<<16) | + (v5_small_diff_Y<<0)); + if (qp_mode == 1) { + WRITE_HREG(HCODEC_V5_SIMPLE_MB_DQUANT, + 0); + } else { + WRITE_HREG(HCODEC_V5_SIMPLE_MB_DQUANT, + v5_simple_dq_setting); + } + WRITE_HREG(HCODEC_V5_SIMPLE_MB_ME_WEIGHT, + v5_simple_me_weight_setting); + /* txlx can remove it */ + WRITE_HREG(HCODEC_QDCT_CONFIG, 1 << 0); + } + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) { + WRITE_HREG(HCODEC_V4_FORCE_SKIP_CFG, + (i_pic_qp << 26) | /* v4_force_q_r_intra */ + (i_pic_qp << 20) | /* v4_force_q_r_inter */ + (0 << 19) | /* v4_force_q_y_enable */ + (5 << 16) | /* v4_force_qr_y */ + (6 << 12) | /* v4_force_qp_y */ + (0 << 0)); /* v4_force_skip_sad */ + + /* V3 Force skip */ + WRITE_HREG(HCODEC_V3_SKIP_CONTROL, + (1 << 31) | /* v3_skip_enable */ + (0 << 30) | /* v3_step_1_weight_enable */ + (1 << 28) | /* v3_mv_sad_weight_enable */ + (1 << 27) | /* v3_ipred_type_enable */ + (V3_FORCE_SKIP_SAD_1 << 12) | + (V3_FORCE_SKIP_SAD_0 << 0)); + WRITE_HREG(HCODEC_V3_SKIP_WEIGHT, + (V3_SKIP_WEIGHT_1 << 16) | + (V3_SKIP_WEIGHT_0 << 0)); + WRITE_HREG(HCODEC_V3_L1_SKIP_MAX_SAD, + (V3_LEVEL_1_F_SKIP_MAX_SAD << 16) | + (V3_LEVEL_1_SKIP_MAX_SAD << 0)); + WRITE_HREG(HCODEC_V3_L2_SKIP_WEIGHT, + (V3_FORCE_SKIP_SAD_2 << 16) | + (V3_SKIP_WEIGHT_2 << 0)); + if (request != NULL) { + unsigned int off1, off2; + + off1 = V3_IE_F_ZERO_SAD_I4 - I4MB_WEIGHT_OFFSET; + off2 = V3_IE_F_ZERO_SAD_I16 + - I16MB_WEIGHT_OFFSET; + WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0, + ((request->i16_weight + off2) << 16) | + ((request->i4_weight + off1) << 0)); + off1 = V3_ME_F_ZERO_SAD - ME_WEIGHT_OFFSET; + WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1, + (0 << 25) | + /* v3_no_ver_when_top_zero_en */ + (0 << 24) | + /* v3_no_hor_when_left_zero_en */ + (3 << 16) | /* type_hor break */ + ((request->me_weight + off1) << 0)); + } else { + WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0, + (V3_IE_F_ZERO_SAD_I16 << 16) | + (V3_IE_F_ZERO_SAD_I4 << 0)); + WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1, + (0 << 25) | + /* v3_no_ver_when_top_zero_en */ + (0 << 24) | + /* v3_no_hor_when_left_zero_en */ + (3 << 16) | /* type_hor break */ + (V3_ME_F_ZERO_SAD << 0)); + } + } else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { + /* V3 Force skip */ + WRITE_HREG(HCODEC_V3_SKIP_CONTROL, + (1 << 31) | /* v3_skip_enable */ + (0 << 30) | /* v3_step_1_weight_enable */ + (1 << 28) | /* v3_mv_sad_weight_enable */ + (1 << 27) | /* v3_ipred_type_enable */ + (0 << 12) | /* V3_FORCE_SKIP_SAD_1 */ + (0 << 0)); /* V3_FORCE_SKIP_SAD_0 */ + WRITE_HREG(HCODEC_V3_SKIP_WEIGHT, + (V3_SKIP_WEIGHT_1 << 16) | + (V3_SKIP_WEIGHT_0 << 0)); + WRITE_HREG(HCODEC_V3_L1_SKIP_MAX_SAD, + (V3_LEVEL_1_F_SKIP_MAX_SAD << 16) | + (V3_LEVEL_1_SKIP_MAX_SAD << 0)); + WRITE_HREG(HCODEC_V3_L2_SKIP_WEIGHT, + (0 << 16) | /* V3_FORCE_SKIP_SAD_2 */ + (V3_SKIP_WEIGHT_2 << 0)); + WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0, + (0 << 16) | /* V3_IE_F_ZERO_SAD_I16 */ + (0 << 0)); /* V3_IE_F_ZERO_SAD_I4 */ + WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1, + (0 << 25) | /* v3_no_ver_when_top_zero_en */ + (0 << 24) | /* v3_no_hor_when_left_zero_en */ + (3 << 16) | /* type_hor break */ + (0 << 0)); /* V3_ME_F_ZERO_SAD */ + } + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { + int i; + /* MV SAD Table */ + for (i = 0; i < 64; i++) + WRITE_HREG(HCODEC_V3_MV_SAD_TABLE, + v3_mv_sad[i]); + + /* IE PRED SAD Table*/ + WRITE_HREG(HCODEC_V3_IPRED_TYPE_WEIGHT_0, + (C_ipred_weight_H << 24) | + (C_ipred_weight_V << 16) | + (I4_ipred_weight_else << 8) | + (I4_ipred_weight_most << 0)); + WRITE_HREG(HCODEC_V3_IPRED_TYPE_WEIGHT_1, + (I16_ipred_weight_DC << 24) | + (I16_ipred_weight_H << 16) | + (I16_ipred_weight_V << 8) | + (C_ipred_weight_DC << 0)); + WRITE_HREG(HCODEC_V3_LEFT_SMALL_MAX_SAD, + (v3_left_small_max_me_sad << 16) | + (v3_left_small_max_ie_sad << 0)); + } + WRITE_HREG(HCODEC_IE_DATA_FEED_BUFF_INFO, 0); + WRITE_HREG(HCODEC_CURR_CANVAS_CTRL, 0); + data32 = READ_HREG(HCODEC_VLC_CONFIG); + data32 = data32 | (1 << 0); /* set pop_coeff_even_all_zero */ + WRITE_HREG(HCODEC_VLC_CONFIG, data32); + + WRITE_HREG(INFO_DUMP_START_ADDR, + wq->mem.dump_info_ddr_start_addr); + + /* clear mailbox interrupt */ + WRITE_HREG(HCODEC_IRQ_MBOX_CLR, 1); + + /* enable mailbox interrupt */ + WRITE_HREG(HCODEC_IRQ_MBOX_MASK, 1); +} + +void amvenc_reset(void) +{ + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2 && + use_reset_control) { + hcodec_hw_reset(); + } else { + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + WRITE_VREG(DOS_SW_RESET1, + (1 << 2) | (1 << 6) | + (1 << 7) | (1 << 8) | + (1 << 14) | (1 << 16) | + (1 << 17)); + WRITE_VREG(DOS_SW_RESET1, 0); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + } +} + +void amvenc_start(void) +{ + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2 && + use_reset_control) { + hcodec_hw_reset(); + } else { + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + WRITE_VREG(DOS_SW_RESET1, + (1 << 12) | (1 << 11)); + WRITE_VREG(DOS_SW_RESET1, 0); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + } + + WRITE_HREG(HCODEC_MPSR, 0x0001); +} + +void amvenc_stop(void) +{ + ulong timeout = jiffies + HZ; + + WRITE_HREG(HCODEC_MPSR, 0); + WRITE_HREG(HCODEC_CPSR, 0); + + while (READ_HREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2 && + use_reset_control) { + hcodec_hw_reset(); + } else { + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + + WRITE_VREG(DOS_SW_RESET1, + (1 << 12) | (1 << 11) | + (1 << 2) | (1 << 6) | + (1 << 7) | (1 << 8) | + (1 << 14) | (1 << 16) | + (1 << 17)); + + WRITE_VREG(DOS_SW_RESET1, 0); + + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + } + +} + +static void __iomem *mc_addr; +static u32 mc_addr_map; +#define MC_SIZE (4096 * 8) +s32 amvenc_loadmc(const char *p, struct encode_wq_s *wq) +{ + ulong timeout; + s32 ret = 0; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) { + char *buf = vmalloc(0x1000 * 16); + int ret = -1; + pr_err("load firmware for t3 avc encoder\n"); + if (get_firmware_data(VIDEO_ENC_H264, buf) < 0) { + //amvdec_disable(); + pr_err("get firmware for 264 enc fail!\n"); + vfree(buf); + return -1; + } + + WRITE_HREG(HCODEC_MPSR, 0); + WRITE_HREG(HCODEC_CPSR, 0); + ret = amvdec_loadmc_ex(VFORMAT_H264_ENC, NULL, buf); + + if (ret < 0) { + //amvdec_disable(); + vfree(buf); + pr_err("amvenc: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + vfree(buf); + return 0; + } + + /* use static mempry*/ + if (mc_addr == NULL) { + mc_addr = kmalloc(MC_SIZE, GFP_KERNEL); + if (!mc_addr) { + enc_pr(LOG_ERROR, "avc loadmc iomap mc addr error.\n"); + return -ENOMEM; + } + } + + enc_pr(LOG_ALL, "avc encode ucode name is %s\n", p); + ret = get_data_from_name(p, (u8 *)mc_addr); + if (ret < 0) { + enc_pr(LOG_ERROR, + "avc microcode fail ret=%d, name: %s, wq:%p.\n", + ret, p, (void *)wq); + } + + mc_addr_map = dma_map_single( + &encode_manager.this_pdev->dev, + mc_addr, MC_SIZE, DMA_TO_DEVICE); + + /* mc_addr_map = wq->mem.assit_buffer_offset; */ + /* mc_addr = ioremap_wc(mc_addr_map, MC_SIZE); */ + /* memcpy(mc_addr, p, MC_SIZE); */ + enc_pr(LOG_ALL, "address 0 is 0x%x\n", *((u32 *)mc_addr)); + enc_pr(LOG_ALL, "address 1 is 0x%x\n", *((u32 *)mc_addr + 1)); + enc_pr(LOG_ALL, "address 2 is 0x%x\n", *((u32 *)mc_addr + 2)); + enc_pr(LOG_ALL, "address 3 is 0x%x\n", *((u32 *)mc_addr + 3)); + WRITE_HREG(HCODEC_MPSR, 0); + WRITE_HREG(HCODEC_CPSR, 0); + + /* Read CBUS register for timing */ + timeout = READ_HREG(HCODEC_MPSR); + timeout = READ_HREG(HCODEC_MPSR); + + timeout = jiffies + HZ; + + WRITE_HREG(HCODEC_IMEM_DMA_ADR, mc_addr_map); + WRITE_HREG(HCODEC_IMEM_DMA_COUNT, 0x1000); + //WRITE_HREG(HCODEC_IMEM_DMA_CTRL, (0x8000 | (7 << 16))); + WRITE_VREG(HCODEC_IMEM_DMA_CTRL, (0x8000 | (0xf << 16))); + + while (READ_HREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) { + if (time_before(jiffies, timeout)) + schedule(); + else { + enc_pr(LOG_ERROR, "hcodec load mc error\n"); + ret = -EBUSY; + break; + } + } + dma_unmap_single( + &encode_manager.this_pdev->dev, + mc_addr_map, MC_SIZE, DMA_TO_DEVICE); + return ret; +} + +const u32 fix_mc[] __aligned(8) = { + 0x0809c05a, 0x06696000, 0x0c780000, 0x00000000 +}; + + +/* + * DOS top level register access fix. + * When hcodec is running, a protocol register HCODEC_CCPU_INTR_MSK + * is set to make hcodec access one CBUS out of DOS domain once + * to work around a HW bug for 4k2k dual decoder implementation. + * If hcodec is not running, then a ucode is loaded and executed + * instead. + */ +/*void amvenc_dos_top_reg_fix(void) +{ + bool hcodec_on; + ulong flags; + + spin_lock_irqsave(&lock, flags); + + hcodec_on = vdec_on(VDEC_HCODEC); + + if ((hcodec_on) && (READ_VREG(HCODEC_MPSR) & 1)) { + WRITE_HREG(HCODEC_CCPU_INTR_MSK, 1); + spin_unlock_irqrestore(&lock, flags); + return; + } + + if (!hcodec_on) + vdec_poweron(VDEC_HCODEC); + + amhcodec_loadmc(fix_mc); + + amhcodec_start(); + + udelay(1000); + + amhcodec_stop(); + + if (!hcodec_on) + vdec_poweroff(VDEC_HCODEC); + + spin_unlock_irqrestore(&lock, flags); +} + +bool amvenc_avc_on(void) +{ + bool hcodec_on; + ulong flags; + + spin_lock_irqsave(&lock, flags); + + hcodec_on = vdec_on(VDEC_HCODEC); + hcodec_on &= (encode_manager.wq_count > 0); + + spin_unlock_irqrestore(&lock, flags); + return hcodec_on; +} +*/ + +static s32 avc_poweron(u32 clock) +{ + ulong flags; + u32 data32; + + data32 = 0; + amports_switch_gate("vdec", 1); + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + hcodec_clk_config(1); + udelay(20); + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) { + vdec_poweron(VDEC_HCODEC); + pr_err("vdec_poweron VDEC_HCODEC\n"); + } else { + pwr_ctrl_psci_smc(PDID_T3_DOS_HCODEC, PWR_ON); + pr_err("pwr_ctrl_psci_smc PDID_T3_DOS_HCODEC off\n"); + } + udelay(20); + /* + pr_err("hcodec powered on, hcodec clk rate:%ld, pwr_state:%d\n", + clk_get_rate(s_hcodec_clks.hcodec_aclk), + !pwr_ctrl_status_psci_smc(PDID_T3_DOS_HCODEC)); + */ + } else { + spin_lock_irqsave(&lock, flags); + WRITE_AOREG(AO_RTI_PWR_CNTL_REG0, + (READ_AOREG(AO_RTI_PWR_CNTL_REG0) & (~0x18))); + udelay(10); + /* Powerup HCODEC */ + /* [1:0] HCODEC */ + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & + ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 || + get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2) + ? ~0x1 : ~0x3)); + udelay(10); + spin_unlock_irqrestore(&lock, flags); + } + spin_lock_irqsave(&lock, flags); + WRITE_VREG(DOS_SW_RESET1, 0xffffffff); + WRITE_VREG(DOS_SW_RESET1, 0); + /* Enable Dos internal clock gating */ + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) { + WRITE_VREG_BITS(DOS_GCLK_EN0, 0x7fff, 12, 15); + /* + * WRITE_VREG(DOS_GCLK_EN0, 0xffffffff); + */ + } else + hvdec_clock_enable(clock); + /* Powerup HCODEC memories */ + WRITE_VREG(DOS_MEM_PD_HCODEC, 0x0); + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + } else { + /* Remove HCODEC ISO */ + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) & + ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 || + get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2) + ? ~0x1 : ~0x30)); + } + udelay(10); + /* Disable auto-clock gate */ + WRITE_VREG(DOS_GEN_CTRL0, + (READ_VREG(DOS_GEN_CTRL0) | 0x1)); + WRITE_VREG(DOS_GEN_CTRL0, + (READ_VREG(DOS_GEN_CTRL0) & 0xFFFFFFFE)); + spin_unlock_irqrestore(&lock, flags); + mdelay(10); + + return 0; +} + +static s32 avc_poweroff(void) +{ + ulong flags; + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + hcodec_clk_config(0); + udelay(20); + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) { + vdec_poweroff(VDEC_HCODEC); + pr_err("vdec_poweroff VDEC_HCODEC\n"); + } else { + pwr_ctrl_psci_smc(PDID_T3_DOS_HCODEC, PWR_OFF); + pr_err("pwr_ctrl_psci_smc PDID_T3_DOS_HCODEC on\n"); + } + udelay(20); + } else { + /* enable HCODEC isolation */ + spin_lock_irqsave(&lock, flags); + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | + ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 || + get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2) + ? 0x1 : 0x30)); + spin_unlock_irqrestore(&lock, flags); + } + spin_lock_irqsave(&lock, flags); + /* power off HCODEC memories */ + WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL); + + /* disable HCODEC clock */ + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) { + WRITE_VREG_BITS(DOS_GCLK_EN0, 0, 12, 15); + } else + hvdec_clock_disable(); + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + + } else { + /* HCODEC power off */ + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | + ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 || + get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2) + ? 0x1 : 0x3)); + } + + spin_unlock_irqrestore(&lock, flags); + + /* release DOS clk81 clock gating */ + amports_switch_gate("vdec", 0); + return 0; +} + +static s32 reload_mc(struct encode_wq_s *wq) +{ + const char *p = select_ucode(encode_manager.ucode_index); + + amvenc_stop(); + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2 && use_reset_control) { + hcodec_hw_reset(); + } else { + WRITE_VREG(DOS_SW_RESET1, 0xffffffff); + WRITE_VREG(DOS_SW_RESET1, 0); + } + + udelay(10); + + WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1, 0x32); + enc_pr(LOG_INFO, "reload microcode\n"); + + if (amvenc_loadmc(p, wq) < 0) + return -EBUSY; + return 0; +} + +static void encode_isr_tasklet(ulong data) +{ + struct encode_manager_s *manager = (struct encode_manager_s *)data; + + enc_pr(LOG_INFO, "encoder is done %d\n", manager->encode_hw_status); + if (((manager->encode_hw_status == ENCODER_IDR_DONE) + || (manager->encode_hw_status == ENCODER_NON_IDR_DONE) + || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE) + || (manager->encode_hw_status == ENCODER_PICTURE_DONE)) + && (manager->process_irq)) { + wake_up_interruptible(&manager->event.hw_complete); + } +} + +/* irq function */ +static irqreturn_t enc_isr(s32 irq_number, void *para) +{ + struct encode_manager_s *manager = (struct encode_manager_s *)para; + + enc_pr(LOG_INFO, "*****ENC_ISR*****\n"); + WRITE_HREG(HCODEC_IRQ_MBOX_CLR, 1); + + manager->encode_hw_status = READ_HREG(ENCODER_STATUS); + if ((manager->encode_hw_status == ENCODER_IDR_DONE) + || (manager->encode_hw_status == ENCODER_NON_IDR_DONE) + || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE) + || (manager->encode_hw_status == ENCODER_PICTURE_DONE)) { + enc_pr(LOG_ALL, "encoder stage is %d\n", + manager->encode_hw_status); + } + + if (((manager->encode_hw_status == ENCODER_IDR_DONE) + || (manager->encode_hw_status == ENCODER_NON_IDR_DONE) + || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE) + || (manager->encode_hw_status == ENCODER_PICTURE_DONE)) + && (!manager->process_irq)) { + manager->process_irq = true; + if (manager->encode_hw_status != ENCODER_SEQUENCE_DONE) + manager->need_reset = true; + + tasklet_schedule(&manager->encode_tasklet); + } + return IRQ_HANDLED; +} + +static s32 convert_request(struct encode_wq_s *wq, u32 *cmd_info) +{ + int i = 0; + u8 *ptr; + u32 data_offset; + u32 cmd = cmd_info[0]; + unsigned long paddr = 0; + struct enc_dma_cfg *cfg = NULL; + s32 ret = 0; + struct platform_device *pdev; + + if (!wq) + return -1; + + memset(&wq->request, 0, sizeof(struct encode_request_s)); + wq->request.me_weight = ME_WEIGHT_OFFSET; + wq->request.i4_weight = I4MB_WEIGHT_OFFSET; + wq->request.i16_weight = I16MB_WEIGHT_OFFSET; + + if (cmd == ENCODER_SEQUENCE) { + wq->request.cmd = cmd; + wq->request.ucode_mode = cmd_info[1]; + wq->request.quant = cmd_info[2]; + wq->request.flush_flag = cmd_info[3]; + //wq->request.timeout = cmd_info[4]; + wq->request.timeout = 5000; /* 5000 ms */ + } else if ((cmd == ENCODER_IDR) || (cmd == ENCODER_NON_IDR)) { + wq->request.cmd = cmd; + wq->request.ucode_mode = cmd_info[1]; + wq->request.type = cmd_info[2]; + wq->request.fmt = cmd_info[3]; + wq->request.src = cmd_info[4]; + wq->request.framesize = cmd_info[5]; + wq->request.quant = cmd_info[6]; + wq->request.flush_flag = cmd_info[7]; + wq->request.timeout = cmd_info[8]; + wq->request.crop_top = cmd_info[9]; + wq->request.crop_bottom = cmd_info[10]; + wq->request.crop_left = cmd_info[11]; + wq->request.crop_right = cmd_info[12]; + wq->request.src_w = cmd_info[13]; + wq->request.src_h = cmd_info[14]; + wq->request.scale_enable = cmd_info[15]; + + enc_pr(LOG_INFO, "hwenc: wq->pic.encoder_width %d, ", + wq->pic.encoder_width); + enc_pr(LOG_INFO, "wq->pic.encoder_height:%d, request fmt=%d\n", + wq->pic.encoder_height, wq->request.fmt); + + if (wq->pic.encoder_width >= 1280 && wq->pic.encoder_height >= 720 + && wq->request.fmt == FMT_RGBA8888 && wq->pic.color_space != GE2D_FORMAT_BT601) { + wq->request.scale_enable = 1; + wq->request.src_w = wq->pic.encoder_width; + wq->request.src_h = wq->pic.encoder_height; + enc_pr(LOG_INFO, "hwenc: force wq->request.scale_enable=%d\n", wq->request.scale_enable); + } + /* + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3 && wq->request.type == CANVAS_BUFF) { + wq->request.scale_enable = 1; + wq->request.src_w = wq->pic.encoder_width; + wq->request.src_h = wq->pic.encoder_height; + enc_pr(LOG_DEBUG, "hwenc: t3 canvas source, force wq->request.scale_enable=%d\n", wq->request.scale_enable); + } + */ + + wq->request.nr_mode = + (nr_mode > 0) ? nr_mode : cmd_info[16]; + if (cmd == ENCODER_IDR) + wq->request.nr_mode = 0; + + data_offset = 17 + + (sizeof(wq->quant_tbl_i4) + + sizeof(wq->quant_tbl_i16) + + sizeof(wq->quant_tbl_me)) / 4; + + if (wq->request.quant == ADJUSTED_QP_FLAG) { + ptr = (u8 *) &cmd_info[17]; + memcpy(wq->quant_tbl_i4, ptr, + sizeof(wq->quant_tbl_i4)); + ptr += sizeof(wq->quant_tbl_i4); + memcpy(wq->quant_tbl_i16, ptr, + sizeof(wq->quant_tbl_i16)); + ptr += sizeof(wq->quant_tbl_i16); + memcpy(wq->quant_tbl_me, ptr, + sizeof(wq->quant_tbl_me)); + wq->request.i4_weight -= + cmd_info[data_offset++]; + wq->request.i16_weight -= + cmd_info[data_offset++]; + wq->request.me_weight -= + cmd_info[data_offset++]; + if (qp_table_debug) { + u8 *qp_tb = (u8 *)(&wq->quant_tbl_i4[0]); + + for (i = 0; i < 32; i++) { + enc_pr(LOG_INFO, "%d ", *qp_tb); + qp_tb++; + } + enc_pr(LOG_INFO, "\n"); + + qp_tb = (u8 *)(&wq->quant_tbl_i16[0]); + for (i = 0; i < 32; i++) { + enc_pr(LOG_INFO, "%d ", *qp_tb); + qp_tb++; + } + enc_pr(LOG_INFO, "\n"); + + qp_tb = (u8 *)(&wq->quant_tbl_me[0]); + for (i = 0; i < 32; i++) { + enc_pr(LOG_INFO, "%d ", *qp_tb); + qp_tb++; + } + enc_pr(LOG_INFO, "\n"); + } + } else { + memset(wq->quant_tbl_me, wq->request.quant, + sizeof(wq->quant_tbl_me)); + memset(wq->quant_tbl_i4, wq->request.quant, + sizeof(wq->quant_tbl_i4)); + memset(wq->quant_tbl_i16, wq->request.quant, + sizeof(wq->quant_tbl_i16)); + data_offset += 3; + } + //add qp range check + enc_pr(LOG_INFO, "wq->request.quant %d \n", wq->request.quant); + { + u8 *qp_tb = (u8 *)(&wq->quant_tbl_i4[0]); + for (i = 0; i < 32; i++) { + if (*qp_tb > 51) { + enc_pr(LOG_ERROR, "i4 %d ", *qp_tb); + *qp_tb = 51; + } + qp_tb++; + } + + qp_tb = (u8 *)(&wq->quant_tbl_i16[0]); + for (i = 0; i < 32; i++) { + if (*qp_tb > 51) { + enc_pr(LOG_ERROR, "i16 %d ", *qp_tb); + *qp_tb = 51; + } + qp_tb++; + } + + qp_tb = (u8 *)(&wq->quant_tbl_me[0]); + for (i = 0; i < 32; i++) { + if (*qp_tb > 51) { + enc_pr(LOG_ERROR, "me %d ", *qp_tb); + *qp_tb = 51; + } + qp_tb++; + } + } +#ifdef H264_ENC_CBR + wq->cbr_info.block_w = cmd_info[data_offset++]; + wq->cbr_info.block_h = cmd_info[data_offset++]; + wq->cbr_info.long_th = cmd_info[data_offset++]; + wq->cbr_info.start_tbl_id = cmd_info[data_offset++]; + wq->cbr_info.short_shift = CBR_SHORT_SHIFT; + wq->cbr_info.long_mb_num = CBR_LONG_MB_NUM; +#endif + data_offset = 17 + + (sizeof(wq->quant_tbl_i4) + + sizeof(wq->quant_tbl_i16) + + sizeof(wq->quant_tbl_me)) / 4 + 7; + + if (wq->request.type == DMA_BUFF) { + wq->request.plane_num = cmd_info[data_offset++]; + enc_pr(LOG_INFO, "wq->request.plane_num %d\n", + wq->request.plane_num); + if (wq->request.fmt == FMT_NV12 || + wq->request.fmt == FMT_NV21 || + wq->request.fmt == FMT_YUV420) { + if (wq->request.plane_num > 3) { + enc_pr(LOG_ERROR, "wq->request.plane_num is invalid %d.\n", + wq->request.plane_num); + return -1; + } + for (i = 0; i < wq->request.plane_num; i++) { + cfg = &wq->request.dma_cfg[i]; + cfg->dir = DMA_TO_DEVICE; + cfg->fd = cmd_info[data_offset++]; + pdev = encode_manager.this_pdev; + cfg->dev = &(pdev->dev); + + ret = enc_dma_buf_get_phys(cfg, &paddr); + if (ret < 0) { + enc_pr(LOG_ERROR, + "import fd %d failed\n", + cfg->fd); + cfg->paddr = NULL; + cfg->vaddr = NULL; + return -1; + } + cfg->paddr = (void *)paddr; + enc_pr(LOG_INFO, "vaddr %p\n", + cfg->vaddr); + } + } else { + enc_pr(LOG_ERROR, "error fmt = %d\n", + wq->request.fmt); + } + } + + } else { + enc_pr(LOG_ERROR, "error cmd = %d, wq: %p.\n", + cmd, (void *)wq); + return -1; + } + wq->request.parent = wq; + return 0; +} + +void amvenc_avc_start_cmd(struct encode_wq_s *wq, + struct encode_request_s *request) +{ + u32 reload_flag = 0; + + if (request->ucode_mode != encode_manager.ucode_index) { + encode_manager.ucode_index = request->ucode_mode; + if (reload_mc(wq)) { + enc_pr(LOG_ERROR, + "reload mc fail, wq:%p\n", (void *)wq); + return; + } + reload_flag = 1; + encode_manager.need_reset = true; + } + + wq->hw_status = 0; + wq->output_size = 0; + wq->ucode_index = encode_manager.ucode_index; + + ie_me_mode = (0 & ME_PIXEL_MODE_MASK) << ME_PIXEL_MODE_SHIFT; + + if (encode_manager.need_reset) { + amvenc_stop(); + reload_flag = 1; + encode_manager.need_reset = false; + encode_manager.encode_hw_status = ENCODER_IDLE; + amvenc_reset(); + avc_canvas_init(wq); + avc_init_encoder(wq, (request->cmd == ENCODER_IDR) ? true : false); + avc_init_input_buffer(wq); + avc_init_output_buffer(wq); + + avc_prot_init( + wq, request, request->quant, + (request->cmd == ENCODER_IDR) ? true : false); + + avc_init_assit_buffer(wq); + + enc_pr(LOG_INFO, + "begin to new frame, request->cmd: %d, ucode mode: %d, wq:%p\n", + request->cmd, request->ucode_mode, (void *)wq); + } + + if ((request->cmd == ENCODER_IDR) || + (request->cmd == ENCODER_NON_IDR)) { +#ifdef H264_ENC_SVC + /* encode non reference frame or not */ + if (request->cmd == ENCODER_IDR) + wq->pic.non_ref_cnt = 0; //IDR reset counter + + if (wq->pic.enable_svc && wq->pic.non_ref_cnt) { + enc_pr(LOG_INFO, + "PIC is NON REF cmd %d cnt %d value 0x%x\n", + request->cmd, wq->pic.non_ref_cnt, + ENC_SLC_NON_REF); + WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_NON_REF); + } else { + enc_pr(LOG_INFO, + "PIC is REF cmd %d cnt %d val 0x%x\n", + request->cmd, wq->pic.non_ref_cnt, + ENC_SLC_REF); + WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_REF); + } +#else + /* if FW defined but not defined SVC in driver here*/ + WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_REF); +#endif + avc_init_dblk_buffer(wq->mem.dblk_buf_canvas); + avc_init_reference_buffer(wq->mem.ref_buf_canvas); + } + if ((request->cmd == ENCODER_IDR) || + (request->cmd == ENCODER_NON_IDR)) + set_input_format(wq, request); + + if (request->cmd == ENCODER_IDR) + ie_me_mb_type = HENC_MB_Type_I4MB; + else if (request->cmd == ENCODER_NON_IDR) + ie_me_mb_type = + (HENC_SKIP_RUN_AUTO << 16) | + (HENC_MB_Type_AUTO << 4) | + (HENC_MB_Type_AUTO << 0); + else + ie_me_mb_type = 0; + avc_init_ie_me_parameter(wq, request->quant); + +#ifdef MULTI_SLICE_MC + if (fixed_slice_cfg) + WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg); + else if (wq->pic.rows_per_slice != + (wq->pic.encoder_height + 15) >> 4) { + u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4; + + mb_per_slice = mb_per_slice * wq->pic.rows_per_slice; + WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice); + } else + WRITE_HREG(FIXED_SLICE_CFG, 0); +#else + WRITE_HREG(FIXED_SLICE_CFG, 0); +#endif + + encode_manager.encode_hw_status = request->cmd; + wq->hw_status = request->cmd; + WRITE_HREG(ENCODER_STATUS, request->cmd); + if ((request->cmd == ENCODER_IDR) + || (request->cmd == ENCODER_NON_IDR) + || (request->cmd == ENCODER_SEQUENCE) + || (request->cmd == ENCODER_PICTURE)) + encode_manager.process_irq = false; + + if (reload_flag) + amvenc_start(); + enc_pr(LOG_ALL, "amvenc_avc_start cmd out, request:%p.\n", (void*)request); +} + +static void dma_flush(u32 buf_start, u32 buf_size) +{ + if ((buf_start == 0) || (buf_size == 0)) + return; + dma_sync_single_for_device( + &encode_manager.this_pdev->dev, buf_start, + buf_size, DMA_TO_DEVICE); +} + +static void cache_flush(u32 buf_start, u32 buf_size) +{ + if ((buf_start == 0) || (buf_size == 0)) + return; + dma_sync_single_for_cpu( + &encode_manager.this_pdev->dev, buf_start, + buf_size, DMA_FROM_DEVICE); +} + +static u32 getbuffer(struct encode_wq_s *wq, u32 type) +{ + u32 ret = 0; + + switch (type) { + case ENCODER_BUFFER_INPUT: + ret = wq->mem.dct_buff_start_addr; + break; + case ENCODER_BUFFER_REF0: + ret = wq->mem.dct_buff_start_addr + + wq->mem.bufspec.dec0_y.buf_start; + break; + case ENCODER_BUFFER_REF1: + ret = wq->mem.dct_buff_start_addr + + wq->mem.bufspec.dec1_y.buf_start; + break; + case ENCODER_BUFFER_OUTPUT: + ret = wq->mem.BitstreamStart; + break; + case ENCODER_BUFFER_DUMP: + ret = wq->mem.dump_info_ddr_start_addr; + break; + case ENCODER_BUFFER_CBR: + ret = wq->mem.cbr_info_ddr_start_addr; + break; + default: + break; + } + return ret; +} + +s32 amvenc_avc_start(struct encode_wq_s *wq, u32 clock) +{ + const char *p = select_ucode(encode_manager.ucode_index); + + avc_poweron(clock); + + avc_canvas_init(wq); + + WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1, 0x32); + + if (amvenc_loadmc(p, wq) < 0) + return -EBUSY; + encode_manager.need_reset = true; + encode_manager.process_irq = false; + encode_manager.encode_hw_status = ENCODER_IDLE; + amvenc_reset(); + avc_init_encoder(wq, true); + avc_init_input_buffer(wq); /* dct buffer setting */ + avc_init_output_buffer(wq); /* output stream buffer */ + + ie_me_mode = (0 & ME_PIXEL_MODE_MASK) << ME_PIXEL_MODE_SHIFT; + avc_prot_init(wq, NULL, wq->pic.init_qppicture, true); + if (request_irq(encode_manager.irq_num, enc_isr, IRQF_SHARED, + "enc-irq", (void *)&encode_manager) == 0) + encode_manager.irq_requested = true; + else + encode_manager.irq_requested = false; + + /* decoder buffer , need set before each frame start */ + avc_init_dblk_buffer(wq->mem.dblk_buf_canvas); + /* reference buffer , need set before each frame start */ + avc_init_reference_buffer(wq->mem.ref_buf_canvas); + avc_init_assit_buffer(wq); /* assitant buffer for microcode */ + ie_me_mb_type = 0; + avc_init_ie_me_parameter(wq, wq->pic.init_qppicture); + WRITE_HREG(ENCODER_STATUS, ENCODER_IDLE); + +#ifdef MULTI_SLICE_MC + if (fixed_slice_cfg) + WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg); + else if (wq->pic.rows_per_slice != + (wq->pic.encoder_height + 15) >> 4) { + u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4; + + mb_per_slice = mb_per_slice * wq->pic.rows_per_slice; + WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice); + } else + WRITE_HREG(FIXED_SLICE_CFG, 0); +#else + WRITE_HREG(FIXED_SLICE_CFG, 0); +#endif + + amvenc_start(); + + return 0; +} + +void amvenc_avc_stop(void) +{ + if ((encode_manager.irq_num >= 0) && + (encode_manager.irq_requested == true)) { + encode_manager.irq_requested = false; + free_irq(encode_manager.irq_num, &encode_manager); + } + amvenc_stop(); + avc_poweroff(); +} + +static s32 avc_init(struct encode_wq_s *wq) +{ + s32 r = 0; + + encode_manager.ucode_index = wq->ucode_index; + r = amvenc_avc_start(wq, clock_level); + + enc_pr(LOG_DEBUG, + "init avc encode. microcode %d, ret=%d, wq:%px\n", + encode_manager.ucode_index, r, (void *)wq); + return 0; +} + +static s32 amvenc_avc_light_reset(struct encode_wq_s *wq, u32 value) +{ + s32 r = 0; + + amvenc_avc_stop(); + + mdelay(value); + + encode_manager.ucode_index = UCODE_MODE_FULL; + r = amvenc_avc_start(wq, clock_level); + + enc_pr(LOG_DEBUG, + "amvenc_avc_light_reset finish, wq:%px, ret=%d\n", + (void *)wq, r); + return r; +} + +#ifdef CONFIG_CMA +static u32 checkCMA(void) +{ + u32 ret; + + if (encode_manager.cma_pool_size > 0) { + ret = encode_manager.cma_pool_size; + ret = ret / MIN_SIZE; + } else + ret = 0; + return ret; +} +#endif + +/* file operation */ +static s32 amvenc_avc_open(struct inode *inode, struct file *file) +{ + s32 r = 0; + struct encode_wq_s *wq = NULL; + + file->private_data = NULL; + enc_pr(LOG_DEBUG, "avc open\n"); + +#ifdef CONFIG_AM_JPEG_ENCODER + if (jpegenc_on() == true) { + enc_pr(LOG_ERROR, + "hcodec in use for JPEG Encode now.\n"); + return -EBUSY; + } +#endif + +#ifdef CONFIG_CMA + if ((encode_manager.use_reserve == false) && + (encode_manager.check_cma == false)) { + encode_manager.max_instance = checkCMA(); + if (encode_manager.max_instance > 0) { + enc_pr(LOG_DEBUG, + "amvenc_avc check CMA pool success, max instance: %d.\n", + encode_manager.max_instance); + } else { + enc_pr(LOG_ERROR, + "amvenc_avc CMA pool too small.\n"); + } + encode_manager.check_cma = true; + } +#endif + + wq = create_encode_work_queue(); + if (wq == NULL) { + enc_pr(LOG_ERROR, "amvenc_avc create instance fail.\n"); + return -EBUSY; + } + +#ifdef CONFIG_CMA + if (encode_manager.use_reserve == false) { + wq->mem.buf_start = codec_mm_alloc_for_dma(ENCODE_NAME, + MIN_SIZE >> PAGE_SHIFT, 0, + CODEC_MM_FLAGS_CPU); + if (wq->mem.buf_start) { + wq->mem.buf_size = MIN_SIZE; + enc_pr(LOG_DEBUG, + "allocating phys 0x%x, size %dk, wq:%p.\n", + wq->mem.buf_start, + wq->mem.buf_size >> 10, (void *)wq); + } else { + enc_pr(LOG_ERROR, + "CMA failed to allocate dma buffer for %s, wq:%p.\n", + encode_manager.this_pdev->name, + (void *)wq); + destroy_encode_work_queue(wq); + return -ENOMEM; + } + } +#endif + + if (wq->mem.buf_start == 0 || + wq->mem.buf_size < MIN_SIZE) { + enc_pr(LOG_ERROR, + "alloc mem failed, start: 0x%x, size:0x%x, wq:%p.\n", + wq->mem.buf_start, + wq->mem.buf_size, (void *)wq); + destroy_encode_work_queue(wq); + return -ENOMEM; + } + + memcpy(&wq->mem.bufspec, &amvenc_buffspec[0], + sizeof(struct BuffInfo_s)); + + enc_pr(LOG_DEBUG, + "amvenc_avc memory config success, buff start:0x%x, size is 0x%x, wq:%p.\n", + wq->mem.buf_start, wq->mem.buf_size, (void *)wq); + + file->private_data = (void *) wq; + return r; +} + +static s32 amvenc_avc_release(struct inode *inode, struct file *file) +{ + struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data; + + if (wq) { + enc_pr(LOG_DEBUG, "avc release, wq:%p\n", (void *)wq); + destroy_encode_work_queue(wq); + } + return 0; +} + +static long amvenc_avc_ioctl(struct file *file, u32 cmd, ulong arg) +{ + long r = 0; + u32 amrisc_cmd = 0; + struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data; +#define MAX_ADDR_INFO_SIZE 52 + u32 addr_info[MAX_ADDR_INFO_SIZE + 4]; + ulong argV; + u32 buf_start; + s32 canvas = -1; + struct canvas_s dst; + u32 cpuid; + memset(&dst, 0, sizeof(struct canvas_s)); + switch (cmd) { + case AMVENC_AVC_IOC_GET_ADDR: + if ((wq->mem.ref_buf_canvas & 0xff) == (ENC_CANVAS_OFFSET)) + put_user(1, (u32 *)arg); + else + put_user(2, (u32 *)arg); + break; + case AMVENC_AVC_IOC_INPUT_UPDATE: + break; + case AMVENC_AVC_IOC_NEW_CMD: + if (copy_from_user(addr_info, (void *)arg, + MAX_ADDR_INFO_SIZE * sizeof(u32))) { + enc_pr(LOG_ERROR, + "avc get new cmd error, wq:%p.\n", (void *)wq); + return -1; + } + r = convert_request(wq, addr_info); + if (r == 0) + r = encode_wq_add_request(wq); + if (r) { + enc_pr(LOG_ERROR, + "avc add new request error, wq:%p.\n", + (void *)wq); + } + break; + case AMVENC_AVC_IOC_GET_STAGE: + put_user(wq->hw_status, (u32 *)arg); + break; + case AMVENC_AVC_IOC_GET_OUTPUT_SIZE: + addr_info[0] = wq->output_size; + addr_info[1] = wq->me_weight; + addr_info[2] = wq->i4_weight; + addr_info[3] = wq->i16_weight; + r = copy_to_user((u32 *)arg, + addr_info, 4 * sizeof(u32)); + break; + case AMVENC_AVC_IOC_CONFIG_INIT: + if (copy_from_user(addr_info, (void *)arg, + MAX_ADDR_INFO_SIZE * sizeof(u32))) { + enc_pr(LOG_ERROR, + "avc config init error, wq:%p.\n", (void *)wq); + return -1; + } + wq->ucode_index = UCODE_MODE_FULL; +#ifdef MULTI_SLICE_MC + wq->pic.rows_per_slice = addr_info[1]; + enc_pr(LOG_DEBUG, + "avc init -- rows_per_slice: %d, wq: %p.\n", + wq->pic.rows_per_slice, (void *)wq); +#endif + enc_pr(LOG_DEBUG, + "avc init as mode %d, wq: %px.\n", + wq->ucode_index, (void *)wq); + + if (addr_info[2] > wq->mem.bufspec.max_width || + addr_info[3] > wq->mem.bufspec.max_height) { + enc_pr(LOG_ERROR, + "avc config init- encode size %dx%d is larger than supported (%dx%d). wq:%p.\n", + addr_info[2], addr_info[3], + wq->mem.bufspec.max_width, + wq->mem.bufspec.max_height, (void *)wq); + return -1; + } + + wq->pic.encoder_width = addr_info[2]; + wq->pic.encoder_height = addr_info[3]; + pr_err("hwenc: AMVENC_AVC_IOC_CONFIG_INIT: w:%d, h:%d\n", wq->pic.encoder_width, wq->pic.encoder_height); + + wq->pic.color_space = addr_info[4]; + pr_err("hwenc: AMVENC_AVC_IOC_CONFIG_INIT, wq->pic.color_space=%#x\n", wq->pic.color_space); + + /* + if (wq->pic.encoder_width * + wq->pic.encoder_height >= 1280 * 720) + clock_level = 6; + else + clock_level = 5; + */ + avc_buffspec_init(wq); + complete(&encode_manager.event.request_in_com); + addr_info[1] = wq->mem.bufspec.dct.buf_start; + addr_info[2] = wq->mem.bufspec.dct.buf_size; + addr_info[3] = wq->mem.bufspec.bitstream.buf_start; + addr_info[4] = wq->mem.bufspec.bitstream.buf_size; + addr_info[5] = wq->mem.bufspec.scale_buff.buf_start; + addr_info[6] = wq->mem.bufspec.scale_buff.buf_size; + addr_info[7] = wq->mem.bufspec.dump_info.buf_start; + addr_info[8] = wq->mem.bufspec.dump_info.buf_size; + addr_info[9] = wq->mem.bufspec.cbr_info.buf_start; + addr_info[10] = wq->mem.bufspec.cbr_info.buf_size; + r = copy_to_user((u32 *)arg, addr_info, 11*sizeof(u32)); + break; + case AMVENC_AVC_IOC_FLUSH_CACHE: + if (copy_from_user(addr_info, (void *)arg, + MAX_ADDR_INFO_SIZE * sizeof(u32))) { + enc_pr(LOG_ERROR, + "avc flush cache error, wq: %p.\n", (void *)wq); + return -1; + } + buf_start = getbuffer(wq, addr_info[0]); + dma_flush(buf_start + addr_info[1], + addr_info[2] - addr_info[1]); + break; + case AMVENC_AVC_IOC_FLUSH_DMA: + if (copy_from_user(addr_info, (void *)arg, + MAX_ADDR_INFO_SIZE * sizeof(u32))) { + enc_pr(LOG_ERROR, + "avc flush dma error, wq:%p.\n", (void *)wq); + return -1; + } + buf_start = getbuffer(wq, addr_info[0]); + cache_flush(buf_start + addr_info[1], + addr_info[2] - addr_info[1]); + break; + case AMVENC_AVC_IOC_GET_BUFFINFO: + put_user(wq->mem.buf_size, (u32 *)arg); + break; + case AMVENC_AVC_IOC_GET_DEVINFO: + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) { + /* send the same id as GXTVBB to upper*/ + r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXTVBB, + strlen(AMVENC_DEVINFO_GXTVBB)); + } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXTVBB) { + r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXTVBB, + strlen(AMVENC_DEVINFO_GXTVBB)); + } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXBB) { + r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXBB, + strlen(AMVENC_DEVINFO_GXBB)); + } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_MG9TV) { + r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_G9, + strlen(AMVENC_DEVINFO_G9)); + } else { + r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_M8, + strlen(AMVENC_DEVINFO_M8)); + } + break; + case AMVENC_AVC_IOC_SUBMIT: + get_user(amrisc_cmd, ((u32 *)arg)); + if (amrisc_cmd == ENCODER_IDR) { + wq->pic.idr_pic_id++; + if (wq->pic.idr_pic_id > 65535) + wq->pic.idr_pic_id = 0; + wq->pic.pic_order_cnt_lsb = 2; + wq->pic.frame_number = 1; + } else if (amrisc_cmd == ENCODER_NON_IDR) { +#ifdef H264_ENC_SVC + /* only update when there is reference frame */ + if (wq->pic.enable_svc == 0 || wq->pic.non_ref_cnt == 0) { + wq->pic.frame_number++; + enc_pr(LOG_INFO, "Increase frame_num to %d\n", + wq->pic.frame_number); + } +#else + wq->pic.frame_number++; +#endif + + wq->pic.pic_order_cnt_lsb += 2; + if (wq->pic.frame_number > 65535) + wq->pic.frame_number = 0; + } +#ifdef H264_ENC_SVC + /* only update when there is reference frame */ + if (wq->pic.enable_svc == 0 || wq->pic.non_ref_cnt == 0) { + amrisc_cmd = wq->mem.dblk_buf_canvas; + wq->mem.dblk_buf_canvas = wq->mem.ref_buf_canvas; + /* current dblk buffer as next reference buffer */ + wq->mem.ref_buf_canvas = amrisc_cmd; + enc_pr(LOG_INFO, + "switch buffer enable %d cnt %d\n", + wq->pic.enable_svc, wq->pic.non_ref_cnt); + } + if (wq->pic.enable_svc) { + wq->pic.non_ref_cnt ++; + if (wq->pic.non_ref_cnt > wq->pic.non_ref_limit) { + enc_pr(LOG_INFO, "Svc clear cnt %d conf %d\n", + wq->pic.non_ref_cnt, + wq->pic.non_ref_limit); + wq->pic.non_ref_cnt = 0; + } else + enc_pr(LOG_INFO,"Svc increase non ref counter to %d\n", + wq->pic.non_ref_cnt ); + } +#else + amrisc_cmd = wq->mem.dblk_buf_canvas; + wq->mem.dblk_buf_canvas = wq->mem.ref_buf_canvas; + /* current dblk buffer as next reference buffer */ + wq->mem.ref_buf_canvas = amrisc_cmd; +#endif + break; + case AMVENC_AVC_IOC_READ_CANVAS: + get_user(argV, ((u32 *)arg)); + canvas = argV; + if (canvas & 0xff) { + canvas_read(canvas & 0xff, &dst); + addr_info[0] = dst.addr; + if ((canvas & 0xff00) >> 8) + canvas_read((canvas & 0xff00) >> 8, &dst); + if ((canvas & 0xff0000) >> 16) + canvas_read((canvas & 0xff0000) >> 16, &dst); + addr_info[1] = dst.addr - addr_info[0] + + dst.width * dst.height; + } else { + addr_info[0] = 0; + addr_info[1] = 0; + } + dma_flush(dst.addr, dst.width * dst.height * 3 / 2); + r = copy_to_user((u32 *)arg, addr_info, 2 * sizeof(u32)); + break; + case AMVENC_AVC_IOC_MAX_INSTANCE: + put_user(encode_manager.max_instance, (u32 *)arg); + break; + case AMVENC_AVC_IOC_QP_MODE: + get_user(qp_mode, ((u32 *)arg)); + pr_info("qp_mode %d\n", qp_mode); + break; + case AMVENC_AVC_IOC_GET_CPU_ID: + cpuid = (u32) get_cpu_major_id(); + pr_err("AMVENC_AVC_IOC_GET_CPU_ID return %u\n", cpuid); + put_user(cpuid, (u32 *)arg); + break; + default: + r = -1; + break; + } + return r; +} + +#ifdef CONFIG_COMPAT +static long amvenc_avc_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long args) +{ + unsigned long ret; + + args = (unsigned long)compat_ptr(args); + ret = amvenc_avc_ioctl(filp, cmd, args); + return ret; +} +#endif + +static s32 avc_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct encode_wq_s *wq = (struct encode_wq_s *)filp->private_data; + ulong off = vma->vm_pgoff << PAGE_SHIFT; + ulong vma_size = vma->vm_end - vma->vm_start; + + if (vma_size == 0) { + enc_pr(LOG_ERROR, "vma_size is 0, wq:%p.\n", (void *)wq); + return -EAGAIN; + } + if (!off) + off += wq->mem.buf_start; + enc_pr(LOG_ALL, + "vma_size is %ld , off is %ld, wq:%p.\n", + vma_size, off, (void *)wq); + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; + /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */ + if (remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) { + enc_pr(LOG_ERROR, + "set_cached: failed remap_pfn_range, wq:%p.\n", + (void *)wq); + return -EAGAIN; + } + return 0; +} + +static u32 amvenc_avc_poll(struct file *file, poll_table *wait_table) +{ + struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data; + + poll_wait(file, &wq->request_complete, wait_table); + + if (atomic_read(&wq->request_ready)) { + atomic_dec(&wq->request_ready); + return POLLIN | POLLRDNORM; + } + return 0; +} + +static const struct file_operations amvenc_avc_fops = { + .owner = THIS_MODULE, + .open = amvenc_avc_open, + .mmap = avc_mmap, + .release = amvenc_avc_release, + .unlocked_ioctl = amvenc_avc_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = amvenc_avc_compat_ioctl, +#endif + .poll = amvenc_avc_poll, +}; + +/* work queue function */ +static s32 encode_process_request(struct encode_manager_s *manager, + struct encode_queue_item_s *pitem) +{ + s32 ret = 0; + struct encode_wq_s *wq = pitem->request.parent; + struct encode_request_s *request = &pitem->request; + u32 timeout = (request->timeout == 0) ? + 1 : msecs_to_jiffies(request->timeout); + u32 buf_start = 0; + u32 size = 0; + u32 flush_size = ((wq->pic.encoder_width + 31) >> 5 << 5) * + ((wq->pic.encoder_height + 15) >> 4 << 4) * 3 / 2; + + struct enc_dma_cfg *cfg = NULL; + int i = 0; + +#ifdef H264_ENC_CBR + if (request->cmd == ENCODER_IDR || request->cmd == ENCODER_NON_IDR) { + if (request->flush_flag & AMVENC_FLUSH_FLAG_CBR + && get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { + void *vaddr = wq->mem.cbr_info_ddr_virt_addr; + ConvertTable2Risc(vaddr, 0xa00); + //buf_start = getbuffer(wq, ENCODER_BUFFER_CBR); + codec_mm_dma_flush(vaddr, wq->mem.cbr_info_ddr_size, DMA_TO_DEVICE); + } + } +#endif + + + +Again: + amvenc_avc_start_cmd(wq, request); + + if (no_timeout) { + wait_event_interruptible(manager->event.hw_complete, + (manager->encode_hw_status == ENCODER_IDR_DONE + || manager->encode_hw_status == ENCODER_NON_IDR_DONE + || manager->encode_hw_status == ENCODER_SEQUENCE_DONE + || manager->encode_hw_status == ENCODER_PICTURE_DONE)); + } else { + wait_event_interruptible_timeout(manager->event.hw_complete, + ((manager->encode_hw_status == ENCODER_IDR_DONE) + || (manager->encode_hw_status == ENCODER_NON_IDR_DONE) + || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE) + || (manager->encode_hw_status == ENCODER_PICTURE_DONE)), + timeout); + } + + if ((request->cmd == ENCODER_SEQUENCE) && + (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)) { + wq->sps_size = READ_HREG(HCODEC_VLC_TOTAL_BYTES); + wq->hw_status = manager->encode_hw_status; + request->cmd = ENCODER_PICTURE; + goto Again; + } else if ((request->cmd == ENCODER_PICTURE) && + (manager->encode_hw_status == ENCODER_PICTURE_DONE)) { + wq->pps_size = + READ_HREG(HCODEC_VLC_TOTAL_BYTES) - wq->sps_size; + wq->hw_status = manager->encode_hw_status; + if (request->flush_flag & AMVENC_FLUSH_FLAG_OUTPUT) { + buf_start = getbuffer(wq, ENCODER_BUFFER_OUTPUT); + cache_flush(buf_start, + wq->sps_size + wq->pps_size); + } + wq->output_size = (wq->sps_size << 16) | wq->pps_size; + } else { + wq->hw_status = manager->encode_hw_status; + + if ((manager->encode_hw_status == ENCODER_IDR_DONE) || + (manager->encode_hw_status == ENCODER_NON_IDR_DONE)) { + wq->output_size = READ_HREG(HCODEC_VLC_TOTAL_BYTES); + + if (request->flush_flag & AMVENC_FLUSH_FLAG_OUTPUT) { + buf_start = getbuffer(wq, ENCODER_BUFFER_OUTPUT); + cache_flush(buf_start, wq->output_size); + } + + if (request->flush_flag & AMVENC_FLUSH_FLAG_DUMP) { + buf_start = getbuffer(wq, ENCODER_BUFFER_DUMP); + size = wq->mem.dump_info_ddr_size; + cache_flush(buf_start, size); + //enc_pr(LOG_DEBUG, "CBR flush dump_info done"); + } + + if (request->flush_flag & AMVENC_FLUSH_FLAG_REFERENCE) { + u32 ref_id = ENCODER_BUFFER_REF0; + + if ((wq->mem.ref_buf_canvas & 0xff) == (ENC_CANVAS_OFFSET)) + ref_id = ENCODER_BUFFER_REF0; + else + ref_id = ENCODER_BUFFER_REF1; + + buf_start = getbuffer(wq, ref_id); + cache_flush(buf_start, flush_size); + } + } else { + manager->encode_hw_status = ENCODER_ERROR; + enc_pr(LOG_DEBUG, "avc encode light reset --- "); + enc_pr(LOG_DEBUG, + "frame type: %s, size: %dx%d, wq: %px\n", + (request->cmd == ENCODER_IDR) ? "IDR" : "P", + wq->pic.encoder_width, + wq->pic.encoder_height, (void *)wq); + enc_pr(LOG_DEBUG, + "mb info: 0x%x, encode status: 0x%x, dct status: 0x%x ", + READ_HREG(HCODEC_VLC_MB_INFO), + READ_HREG(ENCODER_STATUS), + READ_HREG(HCODEC_QDCT_STATUS_CTRL)); + enc_pr(LOG_DEBUG, + "vlc status: 0x%x, me status: 0x%x, risc pc:0x%x, debug:0x%x\n", + READ_HREG(HCODEC_VLC_STATUS_CTRL), + READ_HREG(HCODEC_ME_STATUS), + READ_HREG(HCODEC_MPC_E), + READ_HREG(DEBUG_REG)); + amvenc_avc_light_reset(wq, 30); + } + + for (i = 0; i < request->plane_num; i++) { + cfg = &request->dma_cfg[i]; + enc_pr(LOG_INFO, "request vaddr %p, paddr %p\n", + cfg->vaddr, cfg->paddr); + if (cfg->fd >= 0 && cfg->vaddr != NULL) + enc_dma_buf_unmap(cfg); + } + } + atomic_inc(&wq->request_ready); + wake_up_interruptible(&wq->request_complete); + return ret; +} + +s32 encode_wq_add_request(struct encode_wq_s *wq) +{ + struct encode_queue_item_s *pitem = NULL; + struct list_head *head = NULL; + struct encode_wq_s *tmp = NULL; + bool find = false; + + spin_lock(&encode_manager.event.sem_lock); + + head = &encode_manager.wq; + list_for_each_entry(tmp, head, list) { + if ((wq == tmp) && (wq != NULL)) { + find = true; + break; + } + } + + if (find == false) { + enc_pr(LOG_ERROR, "current wq (%p) doesn't register.\n", + (void *)wq); + goto error; + } + + if (list_empty(&encode_manager.free_queue)) { + enc_pr(LOG_ERROR, "work queue no space, wq:%p.\n", + (void *)wq); + goto error; + } + + pitem = list_entry(encode_manager.free_queue.next, + struct encode_queue_item_s, list); + + if (IS_ERR(pitem)) + goto error; + + memcpy(&pitem->request, &wq->request, sizeof(struct encode_request_s)); + + enc_pr(LOG_INFO, "new work request %p, vaddr %p, paddr %p\n", &pitem->request, + pitem->request.dma_cfg[0].vaddr,pitem->request.dma_cfg[0].paddr); + + memset(&wq->request, 0, sizeof(struct encode_request_s)); + wq->request.dma_cfg[0].fd = -1; + wq->request.dma_cfg[1].fd = -1; + wq->request.dma_cfg[2].fd = -1; + wq->hw_status = 0; + wq->output_size = 0; + pitem->request.parent = wq; + list_move_tail(&pitem->list, &encode_manager.process_queue); + spin_unlock(&encode_manager.event.sem_lock); + + enc_pr(LOG_INFO, + "add new work ok, cmd:%d, ucode mode: %d, wq:%p.\n", + pitem->request.cmd, pitem->request.ucode_mode, + (void *)wq); + complete(&encode_manager.event.request_in_com);/* new cmd come in */ + + return 0; +error: + spin_unlock(&encode_manager.event.sem_lock); + + return -1; +} + +struct encode_wq_s *create_encode_work_queue(void) +{ + struct encode_wq_s *encode_work_queue = NULL; + bool done = false; + u32 i, max_instance; + struct Buff_s *reserve_buff; + + encode_work_queue = kzalloc(sizeof(struct encode_wq_s), GFP_KERNEL); + if (IS_ERR(encode_work_queue)) { + enc_pr(LOG_ERROR, "can't create work queue\n"); + return NULL; + } + max_instance = encode_manager.max_instance; + encode_work_queue->pic.init_qppicture = 26; + encode_work_queue->pic.log2_max_frame_num = 4; + encode_work_queue->pic.log2_max_pic_order_cnt_lsb = 4; + encode_work_queue->pic.idr_pic_id = 0; + encode_work_queue->pic.frame_number = 0; + encode_work_queue->pic.pic_order_cnt_lsb = 0; +#ifdef H264_ENC_SVC + /* Get settings from the global*/ + encode_work_queue->pic.enable_svc = svc_enable; + encode_work_queue->pic.non_ref_limit = svc_ref_conf; + encode_work_queue->pic.non_ref_cnt = 0; + enc_pr(LOG_INFO, "svc conf enable %d, duration %d\n", + encode_work_queue->pic.enable_svc, + encode_work_queue->pic.non_ref_limit); +#endif + encode_work_queue->ucode_index = UCODE_MODE_FULL; + +#ifdef H264_ENC_CBR + encode_work_queue->cbr_info.block_w = 16; + encode_work_queue->cbr_info.block_h = 9; + encode_work_queue->cbr_info.long_th = CBR_LONG_THRESH; + encode_work_queue->cbr_info.start_tbl_id = START_TABLE_ID; + encode_work_queue->cbr_info.short_shift = CBR_SHORT_SHIFT; + encode_work_queue->cbr_info.long_mb_num = CBR_LONG_MB_NUM; +#endif + init_waitqueue_head(&encode_work_queue->request_complete); + atomic_set(&encode_work_queue->request_ready, 0); + spin_lock(&encode_manager.event.sem_lock); + if (encode_manager.wq_count < encode_manager.max_instance) { + list_add_tail(&encode_work_queue->list, &encode_manager.wq); + encode_manager.wq_count++; + if (encode_manager.use_reserve == true) { + for (i = 0; i < max_instance; i++) { + reserve_buff = &encode_manager.reserve_buff[i]; + if (reserve_buff->used == false) { + encode_work_queue->mem.buf_start = + reserve_buff->buf_start; + encode_work_queue->mem.buf_size = + reserve_buff->buf_size; + reserve_buff->used = true; + done = true; + break; + } + } + } else + done = true; + } + spin_unlock(&encode_manager.event.sem_lock); + if (done == false) { + kfree(encode_work_queue); + encode_work_queue = NULL; + enc_pr(LOG_ERROR, "too many work queue!\n"); + } + return encode_work_queue; /* find it */ +} + +static void _destroy_encode_work_queue(struct encode_manager_s *manager, + struct encode_wq_s **wq, + struct encode_wq_s *encode_work_queue, + bool *find) +{ + struct list_head *head; + struct encode_wq_s *wp_tmp = NULL; + u32 i, max_instance; + struct Buff_s *reserve_buff; + u32 buf_start = encode_work_queue->mem.buf_start; + + max_instance = manager->max_instance; + head = &manager->wq; + list_for_each_entry_safe((*wq), wp_tmp, head, list) { + if ((*wq) && (*wq == encode_work_queue)) { + list_del(&(*wq)->list); + if (manager->use_reserve == true) { + for (i = 0; i < max_instance; i++) { + reserve_buff = + &manager->reserve_buff[i]; + if (reserve_buff->used == true && + buf_start == + reserve_buff->buf_start) { + reserve_buff->used = false; + break; + } + } + } + *find = true; + manager->wq_count--; + enc_pr(LOG_DEBUG, + "remove encode_work_queue %p success, %s line %d.\n", + (void *)encode_work_queue, + __func__, __LINE__); + break; + } + } +} + +s32 destroy_encode_work_queue(struct encode_wq_s *encode_work_queue) +{ + struct encode_queue_item_s *pitem, *tmp; + struct encode_wq_s *wq = NULL; + bool find = false; + + struct list_head *head; + + if (encode_work_queue) { + spin_lock(&encode_manager.event.sem_lock); + if (encode_manager.current_wq == encode_work_queue) { + encode_manager.remove_flag = true; + spin_unlock(&encode_manager.event.sem_lock); + enc_pr(LOG_DEBUG, + "warning--Destroy the running queue, should not be here.\n"); + wait_for_completion( + &encode_manager.event.process_complete); + spin_lock(&encode_manager.event.sem_lock); + } /* else we can delete it safely. */ + + head = &encode_manager.process_queue; + list_for_each_entry_safe(pitem, tmp, head, list) { + if (pitem && pitem->request.parent == + encode_work_queue) { + pitem->request.parent = NULL; + enc_pr(LOG_DEBUG, + "warning--remove not process request, should not be here.\n"); + list_move_tail(&pitem->list, + &encode_manager.free_queue); + } + } + + _destroy_encode_work_queue(&encode_manager, &wq, + encode_work_queue, &find); + spin_unlock(&encode_manager.event.sem_lock); +#ifdef CONFIG_CMA + if (encode_work_queue->mem.buf_start) { + if (wq->mem.cbr_info_ddr_virt_addr != NULL) { + codec_mm_unmap_phyaddr(wq->mem.cbr_info_ddr_virt_addr); + wq->mem.cbr_info_ddr_virt_addr = NULL; + } + codec_mm_free_for_dma( + ENCODE_NAME, + encode_work_queue->mem.buf_start); + encode_work_queue->mem.buf_start = 0; + + } +#endif + kfree(encode_work_queue); + complete(&encode_manager.event.request_in_com); + } + return 0; +} + +static s32 encode_monitor_thread(void *data) +{ + struct encode_manager_s *manager = (struct encode_manager_s *)data; + struct encode_queue_item_s *pitem = NULL; + struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 }; + s32 ret = 0; + + enc_pr(LOG_DEBUG, "encode workqueue monitor start.\n"); + sched_setscheduler(current, SCHED_FIFO, ¶m); + allow_signal(SIGTERM); + + /* setup current_wq here. */ + while (manager->process_queue_state != ENCODE_PROCESS_QUEUE_STOP) { + if (kthread_should_stop()) + break; + + ret = wait_for_completion_interruptible( + &manager->event.request_in_com); + + if (ret == -ERESTARTSYS) + break; + + if (kthread_should_stop()) + break; + + if (manager->inited == false) { + spin_lock(&manager->event.sem_lock); + + if (!list_empty(&manager->wq)) { + struct encode_wq_s *first_wq = + list_entry(manager->wq.next, + struct encode_wq_s, list); + manager->current_wq = first_wq; + spin_unlock(&manager->event.sem_lock); + + if (first_wq) { +#ifdef CONFIG_AMLOGIC_MEDIA_GE2D + if (!manager->context) + manager->context = + create_ge2d_work_queue(); +#endif + avc_init(first_wq); + manager->inited = true; + } + spin_lock(&manager->event.sem_lock); + manager->current_wq = NULL; + spin_unlock(&manager->event.sem_lock); + if (manager->remove_flag) { + complete( + &manager + ->event.process_complete); + manager->remove_flag = false; + } + } else + spin_unlock(&manager->event.sem_lock); + continue; + } + + spin_lock(&manager->event.sem_lock); + pitem = NULL; + + if (list_empty(&manager->wq)) { + spin_unlock(&manager->event.sem_lock); + manager->inited = false; + amvenc_avc_stop(); + +#ifdef CONFIG_AMLOGIC_MEDIA_GE2D + if (manager->context) { + destroy_ge2d_work_queue(manager->context); + manager->context = NULL; + } +#endif + + enc_pr(LOG_DEBUG, "power off encode.\n"); + continue; + } else if (!list_empty(&manager->process_queue)) { + pitem = list_entry(manager->process_queue.next, + struct encode_queue_item_s, list); + list_del(&pitem->list); + manager->current_item = pitem; + manager->current_wq = pitem->request.parent; + } + + spin_unlock(&manager->event.sem_lock); + + if (pitem) { + encode_process_request(manager, pitem); + spin_lock(&manager->event.sem_lock); + list_add_tail(&pitem->list, &manager->free_queue); + manager->current_item = NULL; + manager->last_wq = manager->current_wq; + manager->current_wq = NULL; + spin_unlock(&manager->event.sem_lock); + } + + if (manager->remove_flag) { + complete(&manager->event.process_complete); + manager->remove_flag = false; + } + } + while (!kthread_should_stop()) + msleep(20); + + enc_pr(LOG_DEBUG, "exit encode_monitor_thread.\n"); + return 0; +} + +static s32 encode_start_monitor(void) +{ + s32 ret = 0; + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { + y_tnr_mot2alp_nrm_gain = 216; + y_tnr_mot2alp_dis_gain = 144; + c_tnr_mot2alp_nrm_gain = 216; + c_tnr_mot2alp_dis_gain = 144; + } else { + /* more tnr */ + y_tnr_mot2alp_nrm_gain = 144; + y_tnr_mot2alp_dis_gain = 96; + c_tnr_mot2alp_nrm_gain = 144; + c_tnr_mot2alp_dis_gain = 96; + } + + enc_pr(LOG_DEBUG, "encode start monitor.\n"); + encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_START; + encode_manager.encode_thread = kthread_run(encode_monitor_thread, + &encode_manager, "encode_monitor"); + if (IS_ERR(encode_manager.encode_thread)) { + ret = PTR_ERR(encode_manager.encode_thread); + encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_STOP; + enc_pr(LOG_ERROR, + "encode monitor : failed to start kthread (%d)\n", ret); + } + return ret; +} + +static s32 encode_stop_monitor(void) +{ + enc_pr(LOG_DEBUG, "stop encode monitor thread\n"); + if (encode_manager.encode_thread) { + spin_lock(&encode_manager.event.sem_lock); + if (!list_empty(&encode_manager.wq)) { + u32 count = encode_manager.wq_count; + + spin_unlock(&encode_manager.event.sem_lock); + enc_pr(LOG_ERROR, + "stop encode monitor thread error, active wq (%d) is not 0.\n", + count); + return -1; + } + spin_unlock(&encode_manager.event.sem_lock); + encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_STOP; + send_sig(SIGTERM, encode_manager.encode_thread, 1); + complete(&encode_manager.event.request_in_com); + kthread_stop(encode_manager.encode_thread); + encode_manager.encode_thread = NULL; + kfree(mc_addr); + mc_addr = NULL; + } + return 0; +} + +static s32 encode_wq_init(void) +{ + u32 i = 0; + struct encode_queue_item_s *pitem = NULL; + + enc_pr(LOG_DEBUG, "encode_wq_init.\n"); + + spin_lock_init(&encode_manager.event.sem_lock); + spin_lock(&encode_manager.event.sem_lock); + encode_manager.irq_requested = false; + init_completion(&encode_manager.event.request_in_com); + init_waitqueue_head(&encode_manager.event.hw_complete); + init_completion(&encode_manager.event.process_complete); + INIT_LIST_HEAD(&encode_manager.process_queue); + INIT_LIST_HEAD(&encode_manager.free_queue); + INIT_LIST_HEAD(&encode_manager.wq); + + tasklet_init(&encode_manager.encode_tasklet, + encode_isr_tasklet, + (ulong)&encode_manager); + + spin_unlock(&encode_manager.event.sem_lock); + for (i = 0; i < MAX_ENCODE_REQUEST; i++) { + pitem = kcalloc(1, + sizeof(struct encode_queue_item_s), + GFP_KERNEL); + if (IS_ERR(pitem)) { + enc_pr(LOG_ERROR, "can't request queue item memory.\n"); + return -1; + } + pitem->request.parent = NULL; + list_add_tail(&pitem->list, &encode_manager.free_queue); + } + spin_lock(&encode_manager.event.sem_lock); + encode_manager.current_wq = NULL; + encode_manager.last_wq = NULL; + encode_manager.encode_thread = NULL; + encode_manager.current_item = NULL; + encode_manager.wq_count = 0; + encode_manager.remove_flag = false; + spin_unlock(&encode_manager.event.sem_lock); + InitEncodeWeight(); + if (encode_start_monitor()) { + enc_pr(LOG_ERROR, "encode create thread error.\n"); + return -1; + } + return 0; +} + +static s32 encode_wq_uninit(void) +{ + struct encode_queue_item_s *pitem, *tmp; + struct list_head *head; + u32 count = 0; + s32 r = -1; + + enc_pr(LOG_DEBUG, "uninit encode wq.\n"); + if (encode_stop_monitor() == 0) { + if ((encode_manager.irq_num >= 0) && + (encode_manager.irq_requested == true)) { + free_irq(encode_manager.irq_num, &encode_manager); + encode_manager.irq_requested = false; + } + spin_lock(&encode_manager.event.sem_lock); + head = &encode_manager.process_queue; + list_for_each_entry_safe(pitem, tmp, head, list) { + if (pitem) { + list_del(&pitem->list); + kfree(pitem); + count++; + } + } + head = &encode_manager.free_queue; + list_for_each_entry_safe(pitem, tmp, head, list) { + if (pitem) { + list_del(&pitem->list); + kfree(pitem); + count++; + } + } + spin_unlock(&encode_manager.event.sem_lock); + if (count == MAX_ENCODE_REQUEST) + r = 0; + else { + enc_pr(LOG_ERROR, "lost some request item %d.\n", + MAX_ENCODE_REQUEST - count); + } + } + return r; +} + +static ssize_t encode_status_show(struct class *cla, + struct class_attribute *attr, char *buf) +{ + u32 process_count = 0; + u32 free_count = 0; + struct encode_queue_item_s *pitem = NULL; + struct encode_wq_s *current_wq = NULL; + struct encode_wq_s *last_wq = NULL; + struct list_head *head = NULL; + s32 irq_num = 0; + u32 hw_status = 0; + u32 process_queue_state = 0; + u32 wq_count = 0; + u32 ucode_index; + bool need_reset; + bool process_irq; + bool inited; + bool use_reserve; + struct Buff_s reserve_mem; + u32 max_instance; +#ifdef CONFIG_CMA + bool check_cma = false; +#endif + + spin_lock(&encode_manager.event.sem_lock); + head = &encode_manager.free_queue; + list_for_each_entry(pitem, head, list) { + free_count++; + if (free_count > MAX_ENCODE_REQUEST) + break; + } + + head = &encode_manager.process_queue; + list_for_each_entry(pitem, head, list) { + process_count++; + if (process_count > MAX_ENCODE_REQUEST) + break; + } + + current_wq = encode_manager.current_wq; + last_wq = encode_manager.last_wq; + pitem = encode_manager.current_item; + irq_num = encode_manager.irq_num; + hw_status = encode_manager.encode_hw_status; + process_queue_state = encode_manager.process_queue_state; + wq_count = encode_manager.wq_count; + ucode_index = encode_manager.ucode_index; + need_reset = encode_manager.need_reset; + process_irq = encode_manager.process_irq; + inited = encode_manager.inited; + use_reserve = encode_manager.use_reserve; + reserve_mem.buf_start = encode_manager.reserve_mem.buf_start; + reserve_mem.buf_size = encode_manager.reserve_mem.buf_size; + + max_instance = encode_manager.max_instance; +#ifdef CONFIG_CMA + check_cma = encode_manager.check_cma; +#endif + + spin_unlock(&encode_manager.event.sem_lock); + + enc_pr(LOG_DEBUG, + "encode process queue count: %d, free queue count: %d.\n", + process_count, free_count); + enc_pr(LOG_DEBUG, + "encode curent wq: %p, last wq: %p, wq count: %d, max_instance: %d.\n", + current_wq, last_wq, wq_count, max_instance); + if (current_wq) + enc_pr(LOG_DEBUG, + "encode curent wq -- encode width: %d, encode height: %d.\n", + current_wq->pic.encoder_width, + current_wq->pic.encoder_height); + enc_pr(LOG_DEBUG, + "encode curent pitem: %p, ucode_index: %d, hw_status: %d, need_reset: %s, process_irq: %s.\n", + pitem, ucode_index, hw_status, need_reset ? "true" : "false", + process_irq ? "true" : "false"); + enc_pr(LOG_DEBUG, + "encode irq num: %d, inited: %s, process_queue_state: %d.\n", + irq_num, inited ? "true" : "false", process_queue_state); + if (use_reserve) { + enc_pr(LOG_DEBUG, + "encode use reserve memory, buffer start: 0x%x, size: %d MB.\n", + reserve_mem.buf_start, + reserve_mem.buf_size / SZ_1M); + } else { +#ifdef CONFIG_CMA + enc_pr(LOG_DEBUG, "encode check cma: %s.\n", + check_cma ? "true" : "false"); +#endif + } + return snprintf(buf, 40, "encode max instance: %d\n", max_instance); +} +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4,13,1) +static struct class_attribute amvenc_class_attrs[] = { + __ATTR(encode_status, + S_IRUGO | S_IWUSR, + encode_status_show, + NULL), + __ATTR_NULL +}; + +static struct class amvenc_avc_class = { + .name = CLASS_NAME, + .class_attrs = amvenc_class_attrs, +}; +#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(4,13,1) */ +static CLASS_ATTR_RO(encode_status); + +static struct attribute *amvenc_avc_class_attrs[] = { + &class_attr_encode_status.attr, + NULL +}; + +ATTRIBUTE_GROUPS(amvenc_avc_class); + +static struct class amvenc_avc_class = { + .name = CLASS_NAME, + .class_groups = amvenc_avc_class_groups, +}; +#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(4,13,1) */ +s32 init_avc_device(void) +{ + s32 r = 0; + + r = register_chrdev(0, DEVICE_NAME, &amvenc_avc_fops); + if (r <= 0) { + enc_pr(LOG_ERROR, "register amvenc_avc device error.\n"); + return r; + } + avc_device_major = r; + + r = class_register(&amvenc_avc_class); + if (r < 0) { + enc_pr(LOG_ERROR, "error create amvenc_avc class.\n"); + return r; + } + + amvenc_avc_dev = device_create(&amvenc_avc_class, NULL, + MKDEV(avc_device_major, 0), NULL, + DEVICE_NAME); + + if (IS_ERR(amvenc_avc_dev)) { + enc_pr(LOG_ERROR, "create amvenc_avc device error.\n"); + class_unregister(&amvenc_avc_class); + return -1; + } + return r; +} + +s32 uninit_avc_device(void) +{ + if (amvenc_avc_dev) + device_destroy(&amvenc_avc_class, MKDEV(avc_device_major, 0)); + + class_destroy(&amvenc_avc_class); + + unregister_chrdev(avc_device_major, DEVICE_NAME); + return 0; +} + +static s32 avc_mem_device_init(struct reserved_mem *rmem, struct device *dev) +{ + s32 r; + struct resource res; + + if (!rmem) { + enc_pr(LOG_ERROR, + "Can not obtain I/O memory, and will allocate avc buffer!\n"); + r = -EFAULT; + return r; + } + res.start = (phys_addr_t)rmem->base; + res.end = res.start + (phys_addr_t)rmem->size - 1; + encode_manager.reserve_mem.buf_start = res.start; + encode_manager.reserve_mem.buf_size = res.end - res.start + 1; + + if (encode_manager.reserve_mem.buf_size >= + amvenc_buffspec[0].min_buffsize) { + encode_manager.max_instance = + encode_manager.reserve_mem.buf_size / + amvenc_buffspec[0].min_buffsize; + if (encode_manager.max_instance > MAX_ENCODE_INSTANCE) + encode_manager.max_instance = MAX_ENCODE_INSTANCE; + encode_manager.reserve_buff = kzalloc( + encode_manager.max_instance * + sizeof(struct Buff_s), GFP_KERNEL); + if (encode_manager.reserve_buff) { + u32 i; + struct Buff_s *reserve_buff; + u32 max_instance = encode_manager.max_instance; + + for (i = 0; i < max_instance; i++) { + reserve_buff = &encode_manager.reserve_buff[i]; + reserve_buff->buf_start = + i * + amvenc_buffspec[0] + .min_buffsize + + encode_manager.reserve_mem.buf_start; + reserve_buff->buf_size = + encode_manager.reserve_mem.buf_start; + reserve_buff->used = false; + } + encode_manager.use_reserve = true; + r = 0; + enc_pr(LOG_DEBUG, + "amvenc_avc use reserve memory, buff start: 0x%x, size: 0x%x, max instance is %d\n", + encode_manager.reserve_mem.buf_start, + encode_manager.reserve_mem.buf_size, + encode_manager.max_instance); + } else { + enc_pr(LOG_ERROR, + "amvenc_avc alloc reserve buffer pointer fail. max instance is %d.\n", + encode_manager.max_instance); + encode_manager.max_instance = 0; + encode_manager.reserve_mem.buf_start = 0; + encode_manager.reserve_mem.buf_size = 0; + r = -ENOMEM; + } + } else { + enc_pr(LOG_ERROR, + "amvenc_avc memory resource too small, size is 0x%x. Need 0x%x bytes at least.\n", + encode_manager.reserve_mem.buf_size, + amvenc_buffspec[0] + .min_buffsize); + encode_manager.reserve_mem.buf_start = 0; + encode_manager.reserve_mem.buf_size = 0; + r = -ENOMEM; + } + return r; +} + +static s32 amvenc_avc_probe(struct platform_device *pdev) +{ + /* struct resource mem; */ + s32 res_irq; + s32 idx; + s32 r; + + enc_pr(LOG_INFO, "amvenc_avc probe start.\n"); + + encode_manager.this_pdev = pdev; +#ifdef CONFIG_CMA + encode_manager.check_cma = false; +#endif + encode_manager.reserve_mem.buf_start = 0; + encode_manager.reserve_mem.buf_size = 0; + encode_manager.use_reserve = false; + encode_manager.max_instance = 0; + encode_manager.reserve_buff = NULL; + + idx = of_reserved_mem_device_init(&pdev->dev); + + if (idx != 0) { + enc_pr(LOG_DEBUG, + "amvenc_avc_probe -- reserved memory config fail.\n"); + } + + + if (encode_manager.use_reserve == false) { +#ifndef CONFIG_CMA + enc_pr(LOG_ERROR, + "amvenc_avc memory is invaild, probe fail!\n"); + return -EFAULT; +#else + encode_manager.cma_pool_size = + (codec_mm_get_total_size() > (MIN_SIZE * 3)) ? + (MIN_SIZE * 3) : codec_mm_get_total_size(); + enc_pr(LOG_DEBUG, + "amvenc_avc - cma memory pool size: %d MB\n", + (u32)encode_manager.cma_pool_size / SZ_1M); +#endif + } + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + if (hcodec_clk_prepare(&pdev->dev, &s_hcodec_clks)) { + //err = -ENOENT; + enc_pr(LOG_ERROR, "[%s:%d] probe hcodec enc failed\n", __FUNCTION__, __LINE__); + //goto ERROR_PROBE_DEVICE; + return -EINVAL; + } + } + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + hcodec_rst = devm_reset_control_get(&pdev->dev, "hcodec_rst"); + if (IS_ERR(hcodec_rst)) + pr_err("amvenc probe, hcodec get reset failed: %ld\n", PTR_ERR(hcodec_rst)); + } + + res_irq = platform_get_irq(pdev, 0); + if (res_irq < 0) { + enc_pr(LOG_ERROR, "[%s] get irq error!", __func__); + return -EINVAL; + } + + encode_manager.irq_num = res_irq; + if (encode_wq_init()) { + kfree(encode_manager.reserve_buff); + encode_manager.reserve_buff = NULL; + enc_pr(LOG_ERROR, "encode work queue init error.\n"); + return -EFAULT; + } + + r = init_avc_device(); + enc_pr(LOG_INFO, "amvenc_avc probe end.\n"); + + return r; +} + +static s32 amvenc_avc_remove(struct platform_device *pdev) +{ + kfree(encode_manager.reserve_buff); + encode_manager.reserve_buff = NULL; + if (encode_wq_uninit()) + enc_pr(LOG_ERROR, "encode work queue uninit error.\n"); + uninit_avc_device(); + hcodec_clk_unprepare(&pdev->dev, &s_hcodec_clks); + enc_pr(LOG_INFO, "amvenc_avc remove.\n"); + return 0; +} + +static const struct of_device_id amlogic_avcenc_dt_match[] = { + { + .compatible = "amlogic, amvenc_avc", + }, + {}, +}; + +static struct platform_driver amvenc_avc_driver = { + .probe = amvenc_avc_probe, + .remove = amvenc_avc_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = amlogic_avcenc_dt_match, + } +}; + +/* +static struct codec_profile_t amvenc_avc_profile = { + .name = "avc", + .profile = "" +}; +*/ + +static s32 __init amvenc_avc_driver_init_module(void) +{ + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) { + pr_err("T7 doesn't support hcodec avc encoder!!\n"); + return -1; + } + + enc_pr(LOG_INFO, "amvenc_avc module init\n"); + + if (platform_driver_register(&amvenc_avc_driver)) { + enc_pr(LOG_ERROR, + "failed to register amvenc_avc driver\n"); + return -ENODEV; + } + //vcodec_profile_register(&amvenc_avc_profile); + return 0; +} + +static void __exit amvenc_avc_driver_remove_module(void) +{ + enc_pr(LOG_INFO, "amvenc_avc module remove.\n"); + + platform_driver_unregister(&amvenc_avc_driver); +} + +static const struct reserved_mem_ops rmem_avc_ops = { + .device_init = avc_mem_device_init, +}; + +static s32 __init avc_mem_setup(struct reserved_mem *rmem) +{ + rmem->ops = &rmem_avc_ops; + enc_pr(LOG_DEBUG, "amvenc_avc reserved mem setup.\n"); + return 0; +} + +static int enc_dma_buf_map(struct enc_dma_cfg *cfg) +{ + long ret = -1; + int fd = -1; + struct dma_buf *dbuf = NULL; + struct dma_buf_attachment *d_att = NULL; + struct sg_table *sg = NULL; + void *vaddr = NULL; + struct device *dev = NULL; + enum dma_data_direction dir; + + if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) { + enc_pr(LOG_ERROR, "error input param\n"); + return -EINVAL; + } + enc_pr(LOG_INFO, "enc_dma_buf_map, fd %d\n", cfg->fd); + + fd = cfg->fd; + dev = cfg->dev; + dir = cfg->dir; + enc_pr(LOG_INFO, "enc_dma_buffer_map fd %d\n", fd); + + dbuf = dma_buf_get(fd); + if (dbuf == NULL) { + enc_pr(LOG_ERROR, "failed to get dma buffer,fd %d\n",fd); + return -EINVAL; + } + + d_att = dma_buf_attach(dbuf, dev); + if (d_att == NULL) { + enc_pr(LOG_ERROR, "failed to set dma attach\n"); + goto attach_err; + } + + sg = dma_buf_map_attachment(d_att, dir); + if (sg == NULL) { + enc_pr(LOG_ERROR, "failed to get dma sg\n"); + goto map_attach_err; + } + + ret = dma_buf_begin_cpu_access(dbuf, dir); + if (ret != 0) { + enc_pr(LOG_ERROR, "failed to access dma buff\n"); + goto access_err; + } + + vaddr = dma_buf_vmap(dbuf); + if (vaddr == NULL) { + enc_pr(LOG_ERROR, "failed to vmap dma buf\n"); + goto vmap_err; + } + cfg->dbuf = dbuf; + cfg->attach = d_att; + cfg->vaddr = vaddr; + cfg->sg = sg; + + return ret; + +vmap_err: + dma_buf_end_cpu_access(dbuf, dir); + +access_err: + dma_buf_unmap_attachment(d_att, sg, dir); + +map_attach_err: + dma_buf_detach(dbuf, d_att); + +attach_err: + dma_buf_put(dbuf); + + return ret; +} + +static int enc_dma_buf_get_phys(struct enc_dma_cfg *cfg, unsigned long *addr) +{ + struct sg_table *sg_table; + struct page *page; + int ret; + enc_pr(LOG_INFO, "enc_dma_buf_get_phys in\n"); + + ret = enc_dma_buf_map(cfg); + if (ret < 0) { + enc_pr(LOG_ERROR, "gdc_dma_buf_map failed\n"); + return ret; + } + if (cfg->sg) { + sg_table = cfg->sg; + page = sg_page(sg_table->sgl); + *addr = PFN_PHYS(page_to_pfn(page)); + ret = 0; + } + enc_pr(LOG_INFO, "enc_dma_buf_get_phys 0x%lx\n", *addr); + return ret; +} + +static void enc_dma_buf_unmap(struct enc_dma_cfg *cfg) +{ + int fd = -1; + struct dma_buf *dbuf = NULL; + struct dma_buf_attachment *d_att = NULL; + struct sg_table *sg = NULL; + void *vaddr = NULL; + struct device *dev = NULL; + enum dma_data_direction dir; + + if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL + || cfg->dbuf == NULL || cfg->vaddr == NULL + || cfg->attach == NULL || cfg->sg == NULL) { + enc_pr(LOG_ERROR, "Error input param\n"); + return; + } + + fd = cfg->fd; + dev = cfg->dev; + dir = cfg->dir; + dbuf = cfg->dbuf; + vaddr = cfg->vaddr; + d_att = cfg->attach; + sg = cfg->sg; + + dma_buf_vunmap(dbuf, vaddr); + + dma_buf_end_cpu_access(dbuf, dir); + + dma_buf_unmap_attachment(d_att, sg, dir); + + dma_buf_detach(dbuf, d_att); + + dma_buf_put(dbuf); + enc_pr(LOG_DEBUG, "enc_dma_buffer_unmap vaddr %p\n",(unsigned *)vaddr); +} + + +module_param(fixed_slice_cfg, uint, 0664); +MODULE_PARM_DESC(fixed_slice_cfg, "\n fixed_slice_cfg\n"); + +module_param(clock_level, uint, 0664); +MODULE_PARM_DESC(clock_level, "\n clock_level\n"); + +module_param(encode_print_level, uint, 0664); +MODULE_PARM_DESC(encode_print_level, "\n encode_print_level\n"); + +module_param(no_timeout, uint, 0664); +MODULE_PARM_DESC(no_timeout, "\n no_timeout flag for process request\n"); + +module_param(nr_mode, int, 0664); +MODULE_PARM_DESC(nr_mode, "\n nr_mode option\n"); + +module_param(qp_table_debug, uint, 0664); +MODULE_PARM_DESC(qp_table_debug, "\n print qp table\n"); + +module_param(use_reset_control, uint, 0664); +MODULE_PARM_DESC(use_reset_control, "\n use_reset_control\n"); + +module_param(use_ge2d, uint, 0664); +MODULE_PARM_DESC(use_ge2d, "\n use_ge2d\n"); + +module_param(dump_input, uint, 0664); +MODULE_PARM_DESC(dump_input, "\n dump_input\n"); + +#ifdef H264_ENC_SVC +module_param(svc_enable, uint, 0664); +MODULE_PARM_DESC(svc_enable, "\n svc enable\n"); +module_param(svc_ref_conf, uint, 0664); +MODULE_PARM_DESC(svc_ref_conf, "\n svc reference duration config\n"); +#endif + +#ifdef MORE_MODULE_PARAM +module_param(me_mv_merge_ctl, uint, 0664); +MODULE_PARM_DESC(me_mv_merge_ctl, "\n me_mv_merge_ctl\n"); + +module_param(me_step0_close_mv, uint, 0664); +MODULE_PARM_DESC(me_step0_close_mv, "\n me_step0_close_mv\n"); + +module_param(me_f_skip_sad, uint, 0664); +MODULE_PARM_DESC(me_f_skip_sad, "\n me_f_skip_sad\n"); + +module_param(me_f_skip_weight, uint, 0664); +MODULE_PARM_DESC(me_f_skip_weight, "\n me_f_skip_weight\n"); + +module_param(me_mv_weight_01, uint, 0664); +MODULE_PARM_DESC(me_mv_weight_01, "\n me_mv_weight_01\n"); + +module_param(me_mv_weight_23, uint, 0664); +MODULE_PARM_DESC(me_mv_weight_23, "\n me_mv_weight_23\n"); + +module_param(me_sad_range_inc, uint, 0664); +MODULE_PARM_DESC(me_sad_range_inc, "\n me_sad_range_inc\n"); + +module_param(me_sad_enough_01, uint, 0664); +MODULE_PARM_DESC(me_sad_enough_01, "\n me_sad_enough_01\n"); + +module_param(me_sad_enough_23, uint, 0664); +MODULE_PARM_DESC(me_sad_enough_23, "\n me_sad_enough_23\n"); + +module_param(y_tnr_mc_en, uint, 0664); +MODULE_PARM_DESC(y_tnr_mc_en, "\n y_tnr_mc_en option\n"); +module_param(y_tnr_txt_mode, uint, 0664); +MODULE_PARM_DESC(y_tnr_txt_mode, "\n y_tnr_txt_mode option\n"); +module_param(y_tnr_mot_sad_margin, uint, 0664); +MODULE_PARM_DESC(y_tnr_mot_sad_margin, "\n y_tnr_mot_sad_margin option\n"); +module_param(y_tnr_mot_cortxt_rate, uint, 0664); +MODULE_PARM_DESC(y_tnr_mot_cortxt_rate, "\n y_tnr_mot_cortxt_rate option\n"); +module_param(y_tnr_mot_distxt_ofst, uint, 0664); +MODULE_PARM_DESC(y_tnr_mot_distxt_ofst, "\n y_tnr_mot_distxt_ofst option\n"); +module_param(y_tnr_mot_distxt_rate, uint, 0664); +MODULE_PARM_DESC(y_tnr_mot_distxt_rate, "\n y_tnr_mot_distxt_rate option\n"); +module_param(y_tnr_mot_dismot_ofst, uint, 0664); +MODULE_PARM_DESC(y_tnr_mot_dismot_ofst, "\n y_tnr_mot_dismot_ofst option\n"); +module_param(y_tnr_mot_frcsad_lock, uint, 0664); +MODULE_PARM_DESC(y_tnr_mot_frcsad_lock, "\n y_tnr_mot_frcsad_lock option\n"); +module_param(y_tnr_mot2alp_frc_gain, uint, 0664); +MODULE_PARM_DESC(y_tnr_mot2alp_frc_gain, "\n y_tnr_mot2alp_frc_gain option\n"); +module_param(y_tnr_mot2alp_nrm_gain, uint, 0664); +MODULE_PARM_DESC(y_tnr_mot2alp_nrm_gain, "\n y_tnr_mot2alp_nrm_gain option\n"); +module_param(y_tnr_mot2alp_dis_gain, uint, 0664); +MODULE_PARM_DESC(y_tnr_mot2alp_dis_gain, "\n y_tnr_mot2alp_dis_gain option\n"); +module_param(y_tnr_mot2alp_dis_ofst, uint, 0664); +MODULE_PARM_DESC(y_tnr_mot2alp_dis_ofst, "\n y_tnr_mot2alp_dis_ofst option\n"); +module_param(y_tnr_alpha_min, uint, 0664); +MODULE_PARM_DESC(y_tnr_alpha_min, "\n y_tnr_alpha_min option\n"); +module_param(y_tnr_alpha_max, uint, 0664); +MODULE_PARM_DESC(y_tnr_alpha_max, "\n y_tnr_alpha_max option\n"); +module_param(y_tnr_deghost_os, uint, 0664); +MODULE_PARM_DESC(y_tnr_deghost_os, "\n y_tnr_deghost_os option\n"); + +module_param(c_tnr_mc_en, uint, 0664); +MODULE_PARM_DESC(c_tnr_mc_en, "\n c_tnr_mc_en option\n"); +module_param(c_tnr_txt_mode, uint, 0664); +MODULE_PARM_DESC(c_tnr_txt_mode, "\n c_tnr_txt_mode option\n"); +module_param(c_tnr_mot_sad_margin, uint, 0664); +MODULE_PARM_DESC(c_tnr_mot_sad_margin, "\n c_tnr_mot_sad_margin option\n"); +module_param(c_tnr_mot_cortxt_rate, uint, 0664); +MODULE_PARM_DESC(c_tnr_mot_cortxt_rate, "\n c_tnr_mot_cortxt_rate option\n"); +module_param(c_tnr_mot_distxt_ofst, uint, 0664); +MODULE_PARM_DESC(c_tnr_mot_distxt_ofst, "\n c_tnr_mot_distxt_ofst option\n"); +module_param(c_tnr_mot_distxt_rate, uint, 0664); +MODULE_PARM_DESC(c_tnr_mot_distxt_rate, "\n c_tnr_mot_distxt_rate option\n"); +module_param(c_tnr_mot_dismot_ofst, uint, 0664); +MODULE_PARM_DESC(c_tnr_mot_dismot_ofst, "\n c_tnr_mot_dismot_ofst option\n"); +module_param(c_tnr_mot_frcsad_lock, uint, 0664); +MODULE_PARM_DESC(c_tnr_mot_frcsad_lock, "\n c_tnr_mot_frcsad_lock option\n"); +module_param(c_tnr_mot2alp_frc_gain, uint, 0664); +MODULE_PARM_DESC(c_tnr_mot2alp_frc_gain, "\n c_tnr_mot2alp_frc_gain option\n"); +module_param(c_tnr_mot2alp_nrm_gain, uint, 0664); +MODULE_PARM_DESC(c_tnr_mot2alp_nrm_gain, "\n c_tnr_mot2alp_nrm_gain option\n"); +module_param(c_tnr_mot2alp_dis_gain, uint, 0664); +MODULE_PARM_DESC(c_tnr_mot2alp_dis_gain, "\n c_tnr_mot2alp_dis_gain option\n"); +module_param(c_tnr_mot2alp_dis_ofst, uint, 0664); +MODULE_PARM_DESC(c_tnr_mot2alp_dis_ofst, "\n c_tnr_mot2alp_dis_ofst option\n"); +module_param(c_tnr_alpha_min, uint, 0664); +MODULE_PARM_DESC(c_tnr_alpha_min, "\n c_tnr_alpha_min option\n"); +module_param(c_tnr_alpha_max, uint, 0664); +MODULE_PARM_DESC(c_tnr_alpha_max, "\n c_tnr_alpha_max option\n"); +module_param(c_tnr_deghost_os, uint, 0664); +MODULE_PARM_DESC(c_tnr_deghost_os, "\n c_tnr_deghost_os option\n"); + +module_param(y_snr_err_norm, uint, 0664); +MODULE_PARM_DESC(y_snr_err_norm, "\n y_snr_err_norm option\n"); +module_param(y_snr_gau_bld_core, uint, 0664); +MODULE_PARM_DESC(y_snr_gau_bld_core, "\n y_snr_gau_bld_core option\n"); +module_param(y_snr_gau_bld_ofst, int, 0664); +MODULE_PARM_DESC(y_snr_gau_bld_ofst, "\n y_snr_gau_bld_ofst option\n"); +module_param(y_snr_gau_bld_rate, uint, 0664); +MODULE_PARM_DESC(y_snr_gau_bld_rate, "\n y_snr_gau_bld_rate option\n"); +module_param(y_snr_gau_alp0_min, uint, 0664); +MODULE_PARM_DESC(y_snr_gau_alp0_min, "\n y_snr_gau_alp0_min option\n"); +module_param(y_snr_gau_alp0_max, uint, 0664); +MODULE_PARM_DESC(y_snr_gau_alp0_max, "\n y_snr_gau_alp0_max option\n"); +module_param(y_bld_beta2alp_rate, uint, 0664); +MODULE_PARM_DESC(y_bld_beta2alp_rate, "\n y_bld_beta2alp_rate option\n"); +module_param(y_bld_beta_min, uint, 0664); +MODULE_PARM_DESC(y_bld_beta_min, "\n y_bld_beta_min option\n"); +module_param(y_bld_beta_max, uint, 0664); +MODULE_PARM_DESC(y_bld_beta_max, "\n y_bld_beta_max option\n"); + +module_param(c_snr_err_norm, uint, 0664); +MODULE_PARM_DESC(c_snr_err_norm, "\n c_snr_err_norm option\n"); +module_param(c_snr_gau_bld_core, uint, 0664); +MODULE_PARM_DESC(c_snr_gau_bld_core, "\n c_snr_gau_bld_core option\n"); +module_param(c_snr_gau_bld_ofst, int, 0664); +MODULE_PARM_DESC(c_snr_gau_bld_ofst, "\n c_snr_gau_bld_ofst option\n"); +module_param(c_snr_gau_bld_rate, uint, 0664); +MODULE_PARM_DESC(c_snr_gau_bld_rate, "\n c_snr_gau_bld_rate option\n"); +module_param(c_snr_gau_alp0_min, uint, 0664); +MODULE_PARM_DESC(c_snr_gau_alp0_min, "\n c_snr_gau_alp0_min option\n"); +module_param(c_snr_gau_alp0_max, uint, 0664); +MODULE_PARM_DESC(c_snr_gau_alp0_max, "\n c_snr_gau_alp0_max option\n"); +module_param(c_bld_beta2alp_rate, uint, 0664); +MODULE_PARM_DESC(c_bld_beta2alp_rate, "\n c_bld_beta2alp_rate option\n"); +module_param(c_bld_beta_min, uint, 0664); +MODULE_PARM_DESC(c_bld_beta_min, "\n c_bld_beta_min option\n"); +module_param(c_bld_beta_max, uint, 0664); +MODULE_PARM_DESC(c_bld_beta_max, "\n c_bld_beta_max option\n"); +#endif + +module_init(amvenc_avc_driver_init_module); +module_exit(amvenc_avc_driver_remove_module); +RESERVEDMEM_OF_DECLARE(amvenc_avc, "amlogic, amvenc-memory", avc_mem_setup); + +MODULE_DESCRIPTION("AMLOGIC AVC Video Encoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("simon.zheng <simon.zheng@amlogic.com>");
diff --git a/drivers/frame_sink/encoder/h264/encoder.h b/drivers/frame_sink/encoder/h264/encoder.h new file mode 100644 index 0000000..bd97bb6 --- /dev/null +++ b/drivers/frame_sink/encoder/h264/encoder.h
@@ -0,0 +1,503 @@ +/* + * drivers/amlogic/amports/encoder.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __H264_H__ +#define __H264_H__ + +#include <linux/mutex.h> +#include <linux/semaphore.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/slab.h> + +#ifdef CONFIG_AMLOGIC_MEDIA_GE2D +#include <linux/amlogic/media/ge2d/ge2d.h> +#endif + +#include <linux/dma-buf.h> + +#define AMVENC_DEVINFO_M8 "AML-M8" +#define AMVENC_DEVINFO_G9 "AML-G9" +#define AMVENC_DEVINFO_GXBB "AML-GXBB" +#define AMVENC_DEVINFO_GXTVBB "AML-GXTVBB" +#define AMVENC_DEVINFO_GXL "AML-GXL" + +#define HCODEC_IRQ_MBOX_CLR HCODEC_ASSIST_MBOX2_CLR_REG +#define HCODEC_IRQ_MBOX_MASK HCODEC_ASSIST_MBOX2_MASK + +#define H264_ENC_SVC + +/* M8: 2550/10 = 255M GX: 2000/10 = 200M */ +#define HDEC_L0() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (2 << 25) | (1 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) +/* M8: 2550/8 = 319M GX: 2000/8 = 250M */ +#define HDEC_L1() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (0 << 25) | (1 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) +/* M8: 2550/7 = 364M GX: 2000/7 = 285M */ +#define HDEC_L2() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (3 << 25) | (0 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) +/* M8: 2550/6 = 425M GX: 2000/6 = 333M */ +#define HDEC_L3() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (1 << 25) | (1 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) +/* M8: 2550/5 = 510M GX: 2000/5 = 400M */ +#define HDEC_L4() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (2 << 25) | (0 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) +/* M8: 2550/4 = 638M GX: 2000/4 = 500M */ +#define HDEC_L5() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (0 << 25) | (0 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) +/* M8: 2550/3 = 850M GX: 2000/3 = 667M */ +#define HDEC_L6() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (1 << 25) | (0 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) + +#define hvdec_clock_enable(level) \ + do { \ + if (level == 0) \ + HDEC_L0(); \ + else if (level == 1) \ + HDEC_L1(); \ + else if (level == 2) \ + HDEC_L2(); \ + else if (level == 3) \ + HDEC_L3(); \ + else if (level == 4) \ + HDEC_L4(); \ + else if (level == 5) \ + HDEC_L5(); \ + else if (level == 6) \ + HDEC_L6(); \ + WRITE_VREG_BITS(DOS_GCLK_EN0, 0x7fff, 12, 15); \ + } while (0) + +#define hvdec_clock_disable() \ + do { \ + WRITE_VREG_BITS(DOS_GCLK_EN0, 0, 12, 15); \ + WRITE_HHI_REG_BITS(HHI_VDEC_CLK_CNTL, 0, 24, 1); \ + } while (0) + +#define LOG_ALL 0 +#define LOG_INFO 1 +#define LOG_DEBUG 2 +#define LOG_ERROR 3 + +#define enc_pr(level, x...) \ + do { \ + if (level >= encode_print_level) \ + printk(x); \ + } while (0) + +#define AMVENC_AVC_IOC_MAGIC 'E' + +#define AMVENC_AVC_IOC_GET_DEVINFO _IOW(AMVENC_AVC_IOC_MAGIC, 0xf0, u32) +#define AMVENC_AVC_IOC_MAX_INSTANCE _IOW(AMVENC_AVC_IOC_MAGIC, 0xf1, u32) +#define AMVENC_AVC_IOC_GET_CPU_ID _IOW(AMVENC_AVC_IOC_MAGIC, 0xf2, u32) + +#define AMVENC_AVC_IOC_GET_ADDR _IOW(AMVENC_AVC_IOC_MAGIC, 0x00, u32) +#define AMVENC_AVC_IOC_INPUT_UPDATE _IOW(AMVENC_AVC_IOC_MAGIC, 0x01, u32) +#define AMVENC_AVC_IOC_NEW_CMD _IOW(AMVENC_AVC_IOC_MAGIC, 0x02, u32) +#define AMVENC_AVC_IOC_GET_STAGE _IOW(AMVENC_AVC_IOC_MAGIC, 0x03, u32) +#define AMVENC_AVC_IOC_GET_OUTPUT_SIZE _IOW(AMVENC_AVC_IOC_MAGIC, 0x04, u32) +#define AMVENC_AVC_IOC_CONFIG_INIT _IOW(AMVENC_AVC_IOC_MAGIC, 0x05, u32) +#define AMVENC_AVC_IOC_FLUSH_CACHE _IOW(AMVENC_AVC_IOC_MAGIC, 0x06, u32) +#define AMVENC_AVC_IOC_FLUSH_DMA _IOW(AMVENC_AVC_IOC_MAGIC, 0x07, u32) +#define AMVENC_AVC_IOC_GET_BUFFINFO _IOW(AMVENC_AVC_IOC_MAGIC, 0x08, u32) +#define AMVENC_AVC_IOC_SUBMIT _IOW(AMVENC_AVC_IOC_MAGIC, 0x09, u32) +#define AMVENC_AVC_IOC_READ_CANVAS _IOW(AMVENC_AVC_IOC_MAGIC, 0x0a, u32) +#define AMVENC_AVC_IOC_QP_MODE _IOW(AMVENC_AVC_IOC_MAGIC, 0x0b, u32) + + + +#define IE_PIPPELINE_BLOCK_SHIFT 0 +#define IE_PIPPELINE_BLOCK_MASK 0x1f +#define ME_PIXEL_MODE_SHIFT 5 +#define ME_PIXEL_MODE_MASK 0x3 + +enum amvenc_mem_type_e { + LOCAL_BUFF = 0, + CANVAS_BUFF, + PHYSICAL_BUFF, + DMA_BUFF, + MAX_BUFF_TYPE +}; + +enum amvenc_frame_fmt_e { + FMT_YUV422_SINGLE = 0, + FMT_YUV444_SINGLE, + FMT_NV21, + FMT_NV12, + FMT_YUV420, + FMT_YUV444_PLANE, + FMT_RGB888, + FMT_RGB888_PLANE, + FMT_RGB565, + FMT_RGBA8888, + FMT_YUV422_12BIT, + FMT_YUV444_10BIT, + FMT_YUV422_10BIT, + FMT_BGR888, + MAX_FRAME_FMT +}; + +#define MAX_ENCODE_REQUEST 8 /* 64 */ + +#define MAX_ENCODE_INSTANCE 8 /* 64 */ + +#define ENCODE_PROCESS_QUEUE_START 0 +#define ENCODE_PROCESS_QUEUE_STOP 1 + +#define AMVENC_FLUSH_FLAG_INPUT 0x1 +#define AMVENC_FLUSH_FLAG_OUTPUT 0x2 +#define AMVENC_FLUSH_FLAG_REFERENCE 0x4 +#define AMVENC_FLUSH_FLAG_INTRA_INFO 0x8 +#define AMVENC_FLUSH_FLAG_INTER_INFO 0x10 +#define AMVENC_FLUSH_FLAG_QP 0x20 +#define AMVENC_FLUSH_FLAG_DUMP 0x40 +#define AMVENC_FLUSH_FLAG_CBR 0x80 + +#define ENCODER_BUFFER_INPUT 0 +#define ENCODER_BUFFER_REF0 1 +#define ENCODER_BUFFER_REF1 2 +#define ENCODER_BUFFER_OUTPUT 3 +#define ENCODER_BUFFER_INTER_INFO 4 +#define ENCODER_BUFFER_INTRA_INFO 5 +#define ENCODER_BUFFER_QP 6 +#define ENCODER_BUFFER_DUMP 7 +#define ENCODER_BUFFER_CBR 8 + +struct encode_wq_s; + +struct enc_dma_cfg { + int fd; + void *dev; + void *vaddr; + void *paddr; + struct dma_buf *dbuf; + struct dma_buf_attachment *attach; + struct sg_table *sg; + enum dma_data_direction dir; +}; + +struct encode_request_s { + u32 quant; + u32 cmd; + u32 ucode_mode; + u32 src; + u32 framesize; + + u32 me_weight; + u32 i4_weight; + u32 i16_weight; + + u32 crop_top; + u32 crop_bottom; + u32 crop_left; + u32 crop_right; + u32 src_w; + u32 src_h; + u32 scale_enable; + + u32 nr_mode; + u32 flush_flag; + u32 timeout; + enum amvenc_mem_type_e type; + enum amvenc_frame_fmt_e fmt; + struct encode_wq_s *parent; + struct enc_dma_cfg dma_cfg[3]; + u32 plane_num; +}; + +struct encode_queue_item_s { + struct list_head list; + struct encode_request_s request; +}; + +struct Buff_s { + u32 buf_start; + u32 buf_size; + bool used; +}; + +struct BuffInfo_s { + u32 lev_id; + u32 min_buffsize; + u32 max_width; + u32 max_height; + struct Buff_s dct; + struct Buff_s dec0_y; + struct Buff_s dec0_uv; + struct Buff_s dec1_y; + struct Buff_s dec1_uv; + struct Buff_s assit; + struct Buff_s bitstream; + struct Buff_s scale_buff; + struct Buff_s dump_info; + struct Buff_s cbr_info; +}; + +struct encode_meminfo_s { + u32 buf_start; + u32 buf_size; + + u32 BitstreamStart; + u32 BitstreamEnd; + + /*input buffer define*/ + u32 dct_buff_start_addr; + u32 dct_buff_end_addr; + + /*microcode assitant buffer*/ + u32 assit_buffer_offset; + + u32 scaler_buff_start_addr; + + u32 dump_info_ddr_start_addr; + u32 dump_info_ddr_size; + + u32 cbr_info_ddr_start_addr; + u32 cbr_info_ddr_size; + + u8 * cbr_info_ddr_virt_addr; + + s32 dblk_buf_canvas; + s32 ref_buf_canvas; + struct BuffInfo_s bufspec; +#ifdef CONFIG_CMA + struct page *venc_pages; +#endif +}; + +struct encode_picinfo_s { + u32 encoder_width; + u32 encoder_height; + + u32 rows_per_slice; + + u32 idr_pic_id; /* need reset as 0 for IDR */ + u32 frame_number; /* need plus each frame */ + /* need reset as 0 for IDR and plus 2 for NON-IDR */ + u32 pic_order_cnt_lsb; + + u32 log2_max_pic_order_cnt_lsb; + u32 log2_max_frame_num; + u32 init_qppicture; +#ifdef H264_ENC_SVC + u32 enable_svc; + u32 non_ref_limit; + u32 non_ref_cnt; +#endif + u32 color_space; +}; + +struct encode_cbr_s { + u16 block_w; + u16 block_h; + u16 long_th; + u8 start_tbl_id; + u8 short_shift; + u8 long_mb_num; +}; + +struct encode_wq_s { + struct list_head list; + + /* dev info */ + u32 ucode_index; + u32 hw_status; + u32 output_size; + + u32 sps_size; + u32 pps_size; + + u32 me_weight; + u32 i4_weight; + u32 i16_weight; + + u32 quant_tbl_i4[8]; + u32 quant_tbl_i16[8]; + u32 quant_tbl_me[8]; + + struct encode_meminfo_s mem; + struct encode_picinfo_s pic; + struct encode_request_s request; + struct encode_cbr_s cbr_info; + atomic_t request_ready; + wait_queue_head_t request_complete; +}; + +struct encode_event_s { + wait_queue_head_t hw_complete; + struct completion process_complete; + spinlock_t sem_lock; /* for queue switch and create destroy queue. */ + struct completion request_in_com; +}; + +struct encode_manager_s { + struct list_head wq; + struct list_head process_queue; + struct list_head free_queue; + + u32 encode_hw_status; + u32 process_queue_state; + s32 irq_num; + u32 wq_count; + u32 ucode_index; + u32 max_instance; +#ifdef CONFIG_AMLOGIC_MEDIA_GE2D + struct ge2d_context_s *context; +#endif + bool irq_requested; + bool need_reset; + bool process_irq; + bool inited; /* power on encode */ + bool remove_flag; /* remove wq; */ + bool uninit_flag; /* power off encode */ + bool use_reserve; + +#ifdef CONFIG_CMA + bool check_cma; + ulong cma_pool_size; +#endif + struct platform_device *this_pdev; + struct Buff_s *reserve_buff; + struct encode_wq_s *current_wq; + struct encode_wq_s *last_wq; + struct encode_queue_item_s *current_item; + struct task_struct *encode_thread; + struct Buff_s reserve_mem; + struct encode_event_s event; + struct tasklet_struct encode_tasklet; +}; + +extern s32 encode_wq_add_request(struct encode_wq_s *wq); +extern struct encode_wq_s *create_encode_work_queue(void); +extern s32 destroy_encode_work_queue(struct encode_wq_s *encode_work_queue); + +/******************************************** + * AV Scratch Register Re-Define + ****************************************** * + */ +#define ENCODER_STATUS HCODEC_HENC_SCRATCH_0 +#define MEM_OFFSET_REG HCODEC_HENC_SCRATCH_1 +#define DEBUG_REG HCODEC_HENC_SCRATCH_2 +#define IDR_PIC_ID HCODEC_HENC_SCRATCH_5 +#define FRAME_NUMBER HCODEC_HENC_SCRATCH_6 +#define PIC_ORDER_CNT_LSB HCODEC_HENC_SCRATCH_7 +#define LOG2_MAX_PIC_ORDER_CNT_LSB HCODEC_HENC_SCRATCH_8 +#define LOG2_MAX_FRAME_NUM HCODEC_HENC_SCRATCH_9 +#define ANC0_BUFFER_ID HCODEC_HENC_SCRATCH_A +#define QPPICTURE HCODEC_HENC_SCRATCH_B + +#define IE_ME_MB_TYPE HCODEC_HENC_SCRATCH_D + +/* bit 0-4, IE_PIPPELINE_BLOCK + * bit 5 me half pixel in m8 + * disable i4x4 in gxbb + * bit 6 me step2 sub pixel in m8 + * disable i16x16 in gxbb + */ +#define IE_ME_MODE HCODEC_HENC_SCRATCH_E +#define IE_REF_SEL HCODEC_HENC_SCRATCH_F + +/* [31:0] NUM_ROWS_PER_SLICE_P */ +/* [15:0] NUM_ROWS_PER_SLICE_I */ +#define FIXED_SLICE_CFG HCODEC_HENC_SCRATCH_L + +/* For GX */ +#define INFO_DUMP_START_ADDR HCODEC_HENC_SCRATCH_I + +/* For CBR */ +#define H264_ENC_CBR_TABLE_ADDR HCODEC_HENC_SCRATCH_3 +#define H264_ENC_CBR_MB_SIZE_ADDR HCODEC_HENC_SCRATCH_4 +/* Bytes(Float) * 256 */ +#define H264_ENC_CBR_CTL HCODEC_HENC_SCRATCH_G +/* [31:28] : init qp table idx */ +/* [27:24] : short_term adjust shift */ +/* [23:16] : Long_term MB_Number between adjust, */ +/* [15:0] Long_term adjust threshold(Bytes) */ +#define H264_ENC_CBR_TARGET_SIZE HCODEC_HENC_SCRATCH_H +/* Bytes(Float) * 256 */ +#define H264_ENC_CBR_PREV_BYTES HCODEC_HENC_SCRATCH_J +#define H264_ENC_CBR_REGION_SIZE HCODEC_HENC_SCRATCH_J + +/* for SVC */ +#define H264_ENC_SVC_PIC_TYPE HCODEC_HENC_SCRATCH_K + +/* define for PIC header */ +#define ENC_SLC_REF 0x8410 +#define ENC_SLC_NON_REF 0x8010 + +/* --------------------------------------------------- */ +/* ENCODER_STATUS define */ +/* --------------------------------------------------- */ +#define ENCODER_IDLE 0 +#define ENCODER_SEQUENCE 1 +#define ENCODER_PICTURE 2 +#define ENCODER_IDR 3 +#define ENCODER_NON_IDR 4 +#define ENCODER_MB_HEADER 5 +#define ENCODER_MB_DATA 6 + +#define ENCODER_SEQUENCE_DONE 7 +#define ENCODER_PICTURE_DONE 8 +#define ENCODER_IDR_DONE 9 +#define ENCODER_NON_IDR_DONE 10 +#define ENCODER_MB_HEADER_DONE 11 +#define ENCODER_MB_DATA_DONE 12 + +#define ENCODER_NON_IDR_INTRA 13 +#define ENCODER_NON_IDR_INTER 14 + +#define ENCODER_ERROR 0xff + +/******************************************** + * defines for H.264 mb_type + ******************************************* + */ +#define HENC_MB_Type_PBSKIP 0x0 +#define HENC_MB_Type_PSKIP 0x0 +#define HENC_MB_Type_BSKIP_DIRECT 0x0 +#define HENC_MB_Type_P16x16 0x1 +#define HENC_MB_Type_P16x8 0x2 +#define HENC_MB_Type_P8x16 0x3 +#define HENC_MB_Type_SMB8x8 0x4 +#define HENC_MB_Type_SMB8x4 0x5 +#define HENC_MB_Type_SMB4x8 0x6 +#define HENC_MB_Type_SMB4x4 0x7 +#define HENC_MB_Type_P8x8 0x8 +#define HENC_MB_Type_I4MB 0x9 +#define HENC_MB_Type_I16MB 0xa +#define HENC_MB_Type_IBLOCK 0xb +#define HENC_MB_Type_SI4MB 0xc +#define HENC_MB_Type_I8MB 0xd +#define HENC_MB_Type_IPCM 0xe +#define HENC_MB_Type_AUTO 0xf + +#define HENC_MB_CBP_AUTO 0xff +#define HENC_SKIP_RUN_AUTO 0xffff + + +extern bool amvenc_avc_on(void); +#endif
diff --git a/drivers/frame_sink/encoder/h265/Makefile b/drivers/frame_sink/encoder/h265/Makefile new file mode 100644 index 0000000..833822c --- /dev/null +++ b/drivers/frame_sink/encoder/h265/Makefile
@@ -0,0 +1 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VENC_H265) += vpu.o
diff --git a/drivers/frame_sink/encoder/h265/vmm.h b/drivers/frame_sink/encoder/h265/vmm.h new file mode 100644 index 0000000..a32dd06 --- /dev/null +++ b/drivers/frame_sink/encoder/h265/vmm.h
@@ -0,0 +1,665 @@ +/* + * vmm.h + * + * memory allocator for VPU + * + * Copyright (C) 2006 - 2013 CHIPS&MEDIA INC. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __CNM_VIDEO_MEMORY_MANAGEMENT_H__ +#define __CNM_VIDEO_MEMORY_MANAGEMENT_H__ + +#define VMEM_PAGE_SIZE (16 * 1024) +#define MAKE_KEY(_a, _b) (((vmem_key_t)_a) << 32 | _b) +#define KEY_TO_VALUE(_key) (_key >> 32) + +#define VMEM_P_ALLOC(_x) vmalloc(_x) +#define VMEM_P_FREE(_x) vfree(_x) + +#define VMEM_ASSERT \ + pr_info("VMEM_ASSERT at %s:%d\n", __FILE__, __LINE__) + + +#define VMEM_HEIGHT(_tree) (_tree == NULL ? -1 : _tree->height) + +#define MAX(_a, _b) (_a >= _b ? _a : _b) + +struct avl_node_t; +#define vmem_key_t unsigned long long + +struct vmem_info_t { + ulong total_pages; + ulong alloc_pages; + ulong free_pages; + ulong page_size; +}; + +struct page_t { + s32 pageno; + ulong addr; + s32 used; + s32 alloc_pages; + s32 first_pageno; +}; + +struct avl_node_t { + vmem_key_t key; + s32 height; + struct page_t *page; + struct avl_node_t *left; + struct avl_node_t *right; +}; + +struct video_mm_t { + struct avl_node_t *free_tree; + struct avl_node_t *alloc_tree; + struct page_t *page_list; + s32 num_pages; + ulong base_addr; + ulong mem_size; + s32 free_page_count; + s32 alloc_page_count; +}; + +enum rotation_dir_t { + LEFT, + RIGHT +}; + +struct avl_node_data_t { + s32 key; + struct page_t *page; +}; + +static struct avl_node_t *make_avl_node( + vmem_key_t key, + struct page_t *page) +{ + struct avl_node_t *node = + (struct avl_node_t *)VMEM_P_ALLOC(sizeof(struct avl_node_t)); + node->key = key; + node->page = page; + node->height = 0; + node->left = NULL; + node->right = NULL; + return node; +} + +static s32 get_balance_factor(struct avl_node_t *tree) +{ + s32 factor = 0; + + if (tree) + factor = VMEM_HEIGHT(tree->right) - VMEM_HEIGHT(tree->left); + return factor; +} + +/* + * Left Rotation + * + * A B + * \ / \ + * B => A C + * / \ \ + * D C D + * + */ +static struct avl_node_t *rotation_left(struct avl_node_t *tree) +{ + struct avl_node_t *rchild; + struct avl_node_t *lchild; + + if (tree == NULL) + return NULL; + + rchild = tree->right; + if (rchild == NULL) + return tree; + + lchild = rchild->left; + rchild->left = tree; + tree->right = lchild; + + tree->height = + MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1; + rchild->height = + MAX(VMEM_HEIGHT(rchild->left), VMEM_HEIGHT(rchild->right)) + 1; + return rchild; +} + + +/* + * Reft Rotation + * + * A B + * \ / \ + * B => D A + * / \ / + * D C C + * + */ +static struct avl_node_t *rotation_right(struct avl_node_t *tree) +{ + struct avl_node_t *rchild; + struct avl_node_t *lchild; + + if (tree == NULL) + return NULL; + + lchild = tree->left; + if (lchild == NULL) + return NULL; + + rchild = lchild->right; + lchild->right = tree; + tree->left = rchild; + + tree->height = + MAX(VMEM_HEIGHT(tree->left), + VMEM_HEIGHT(tree->right)) + 1; + lchild->height = + MAX(VMEM_HEIGHT(lchild->left), + VMEM_HEIGHT(lchild->right)) + 1; + return lchild; +} + +static struct avl_node_t *do_balance(struct avl_node_t *tree) +{ + s32 bfactor = 0, child_bfactor; + + bfactor = get_balance_factor(tree); + if (bfactor >= 2) { + child_bfactor = get_balance_factor(tree->right); + if (child_bfactor == 1 || child_bfactor == 0) { + tree = rotation_left(tree); + } else if (child_bfactor == -1) { + tree->right = rotation_right(tree->right); + tree = rotation_left(tree); + } else { + pr_info( + "invalid balancing factor: %d\n", + child_bfactor); + VMEM_ASSERT; + return NULL; + } + } else if (bfactor <= -2) { + child_bfactor = get_balance_factor(tree->left); + if (child_bfactor == -1 || child_bfactor == 0) { + tree = rotation_right(tree); + } else if (child_bfactor == 1) { + tree->left = rotation_left(tree->left); + tree = rotation_right(tree); + } else { + pr_info( + "invalid balancing factor: %d\n", + child_bfactor); + VMEM_ASSERT; + return NULL; + } + } + return tree; +} + +static struct avl_node_t *unlink_end_node( + struct avl_node_t *tree, + s32 dir, + struct avl_node_t **found_node) +{ + struct avl_node_t *node; + *found_node = NULL; + + if (tree == NULL) + return NULL; + + if (dir == LEFT) { + if (tree->left == NULL) { + *found_node = tree; + return NULL; + } + } else { + if (tree->right == NULL) { + *found_node = tree; + return NULL; + } + } + + if (dir == LEFT) { + node = tree->left; + tree->left = unlink_end_node(tree->left, LEFT, found_node); + if (tree->left == NULL) { + tree->left = (*found_node)->right; + (*found_node)->left = NULL; + (*found_node)->right = NULL; + } + } else { + node = tree->right; + tree->right = unlink_end_node(tree->right, RIGHT, found_node); + if (tree->right == NULL) { + tree->right = (*found_node)->left; + (*found_node)->left = NULL; + (*found_node)->right = NULL; + } + } + tree->height = + MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1; + return do_balance(tree); +} + + +static struct avl_node_t *avltree_insert( + struct avl_node_t *tree, + vmem_key_t key, + struct page_t *page) +{ + if (tree == NULL) { + tree = make_avl_node(key, page); + } else { + if (key >= tree->key) + tree->right = + avltree_insert(tree->right, key, page); + else + tree->left = + avltree_insert(tree->left, key, page); + } + tree = do_balance(tree); + tree->height = + MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1; + return tree; +} + +static struct avl_node_t *do_unlink(struct avl_node_t *tree) +{ + struct avl_node_t *node; + struct avl_node_t *end_node; + + node = unlink_end_node(tree->right, LEFT, &end_node); + if (node) { + tree->right = node; + } else { + node = + unlink_end_node(tree->left, RIGHT, &end_node); + if (node) + tree->left = node; + } + + if (node == NULL) { + node = tree->right ? tree->right : tree->left; + end_node = node; + } + + if (end_node) { + end_node->left = + (tree->left != end_node) ? + tree->left : end_node->left; + end_node->right = + (tree->right != end_node) ? + tree->right : end_node->right; + end_node->height = + MAX(VMEM_HEIGHT(end_node->left), + VMEM_HEIGHT(end_node->right)) + 1; + } + tree = end_node; + return tree; +} + +static struct avl_node_t *avltree_remove( + struct avl_node_t *tree, + struct avl_node_t **found_node, + vmem_key_t key) +{ + *found_node = NULL; + if (tree == NULL) { + pr_info("failed to find key %d\n", (s32)key); + return NULL; + } + + if (key == tree->key) { + *found_node = tree; + tree = do_unlink(tree); + } else if (key > tree->key) { + tree->right = + avltree_remove(tree->right, found_node, key); + } else { + tree->left = + avltree_remove(tree->left, found_node, key); + } + + if (tree) + tree->height = + MAX(VMEM_HEIGHT(tree->left), + VMEM_HEIGHT(tree->right)) + 1; + + tree = do_balance(tree); + return tree; +} + +void avltree_free(struct avl_node_t *tree) +{ + if (tree == NULL) + return; + if (tree->left == NULL && tree->right == NULL) { + VMEM_P_FREE(tree); + return; + } + + avltree_free(tree->left); + tree->left = NULL; + avltree_free(tree->right); + tree->right = NULL; + VMEM_P_FREE(tree); +} + +static struct avl_node_t *remove_approx_value( + struct avl_node_t *tree, + struct avl_node_t **found, + vmem_key_t key) +{ + *found = NULL; + if (tree == NULL) + return NULL; + + if (key == tree->key) { + *found = tree; + tree = do_unlink(tree); + } else if (key > tree->key) { + tree->right = remove_approx_value(tree->right, found, key); + } else { + tree->left = remove_approx_value(tree->left, found, key); + if (*found == NULL) { + *found = tree; + tree = do_unlink(tree); + } + } + if (tree) + tree->height = + MAX(VMEM_HEIGHT(tree->left), + VMEM_HEIGHT(tree->right)) + 1; + tree = do_balance(tree); + return tree; +} + +static void set_blocks_free( + struct video_mm_t *mm, + s32 pageno, + s32 npages) +{ + s32 last_pageno = pageno + npages - 1; + s32 i; + struct page_t *page; + struct page_t *last_page; + + if (npages == 0) + VMEM_ASSERT; + + if (last_pageno >= mm->num_pages) { + pr_info( + "set_blocks_free: invalid last page number: %d\n", + last_pageno); + VMEM_ASSERT; + return; + } + + for (i = pageno; i <= last_pageno; i++) { + mm->page_list[i].used = 0; + mm->page_list[i].alloc_pages = 0; + mm->page_list[i].first_pageno = -1; + } + + page = &mm->page_list[pageno]; + page->alloc_pages = npages; + last_page = &mm->page_list[last_pageno]; + last_page->first_pageno = pageno; + mm->free_tree = + avltree_insert(mm->free_tree, MAKE_KEY(npages, pageno), page); +} + +static void set_blocks_alloc( + struct video_mm_t *mm, + s32 pageno, + s32 npages) +{ + s32 last_pageno = pageno + npages - 1; + s32 i; + struct page_t *page; + struct page_t *last_page; + + if (last_pageno >= mm->num_pages) { + pr_info( + "set_blocks_free: invalid last page number: %d\n", + last_pageno); + VMEM_ASSERT; + return; + } + + for (i = pageno; i <= last_pageno; i++) { + mm->page_list[i].used = 1; + mm->page_list[i].alloc_pages = 0; + mm->page_list[i].first_pageno = -1; + } + + page = &mm->page_list[pageno]; + page->alloc_pages = npages; + last_page = &mm->page_list[last_pageno]; + last_page->first_pageno = pageno; + mm->alloc_tree = + avltree_insert(mm->alloc_tree, MAKE_KEY(page->addr, 0), page); +} + + +s32 vmem_init(struct video_mm_t *mm, ulong addr, ulong size) +{ + s32 i; + + if (mm == NULL) + return -1; + + mm->base_addr = (addr + (VMEM_PAGE_SIZE - 1)) + & ~(VMEM_PAGE_SIZE - 1); + mm->mem_size = size & ~VMEM_PAGE_SIZE; + mm->num_pages = mm->mem_size / VMEM_PAGE_SIZE; + mm->free_tree = NULL; + mm->alloc_tree = NULL; + mm->free_page_count = mm->num_pages; + mm->alloc_page_count = 0; + mm->page_list = + (struct page_t *)VMEM_P_ALLOC( + mm->num_pages * sizeof(struct page_t)); + if (mm->page_list == NULL) { + pr_err("%s:%d failed to kmalloc(%zu)\n", + __func__, __LINE__, + mm->num_pages * sizeof(struct page_t)); + return -1; + } + + for (i = 0; i < mm->num_pages; i++) { + mm->page_list[i].pageno = i; + mm->page_list[i].addr = + mm->base_addr + i * VMEM_PAGE_SIZE; + mm->page_list[i].alloc_pages = 0; + mm->page_list[i].used = 0; + mm->page_list[i].first_pageno = -1; + } + set_blocks_free(mm, 0, mm->num_pages); + return 0; +} + +s32 vmem_exit(struct video_mm_t *mm) +{ + if (mm == NULL) { + pr_info("vmem_exit: invalid handle\n"); + return -1; + } + + if (mm->free_tree) + avltree_free(mm->free_tree); + if (mm->alloc_tree) + avltree_free(mm->alloc_tree); + + if (mm->page_list) { + VMEM_P_FREE(mm->page_list); + mm->page_list = NULL; + } + + mm->base_addr = 0; + mm->mem_size = 0; + mm->num_pages = 0; + mm->page_list = NULL; + mm->free_tree = NULL; + mm->alloc_tree = NULL; + mm->free_page_count = 0; + mm->alloc_page_count = 0; + return 0; +} + +ulong vmem_alloc(struct video_mm_t *mm, s32 size, ulong pid) +{ + struct avl_node_t *node; + struct page_t *free_page; + s32 npages, free_size; + s32 alloc_pageno; + ulong ptr; + + if (mm == NULL) { + pr_info("vmem_alloc: invalid handle\n"); + return -1; + } + + if (size <= 0) + return -1; + + npages = (size + VMEM_PAGE_SIZE - 1) / VMEM_PAGE_SIZE; + mm->free_tree = remove_approx_value(mm->free_tree, + &node, MAKE_KEY(npages, 0)); + + if (node == NULL) + return -1; + + free_page = node->page; + free_size = KEY_TO_VALUE(node->key); + alloc_pageno = free_page->pageno; + set_blocks_alloc(mm, alloc_pageno, npages); + if (npages != free_size) { + s32 free_pageno = alloc_pageno + npages; + + set_blocks_free(mm, free_pageno, (free_size-npages)); + } + VMEM_P_FREE(node); + + ptr = mm->page_list[alloc_pageno].addr; + mm->alloc_page_count += npages; + mm->free_page_count -= npages; + return ptr; +} + +s32 vmem_free(struct video_mm_t *mm, ulong ptr, ulong pid) +{ + ulong addr; + struct avl_node_t *found; + struct page_t *page; + s32 pageno, prev_free_pageno, next_free_pageno; + s32 prev_size, next_size; + s32 merge_page_no, merge_page_size, free_page_size; + + if (mm == NULL) { + pr_info("vmem_free: invalid handle\n"); + return -1; + } + + addr = ptr; + mm->alloc_tree = avltree_remove(mm->alloc_tree, &found, + MAKE_KEY(addr, 0)); + + if (found == NULL) { + pr_info("vmem_free: 0x%08x not found\n", (s32)addr); + VMEM_ASSERT; + return -1; + } + + /* find previous free block */ + page = found->page; + pageno = page->pageno; + free_page_size = page->alloc_pages; + prev_free_pageno = pageno - 1; + prev_size = -1; + if (prev_free_pageno >= 0) { + if (mm->page_list[prev_free_pageno].used == 0) { + prev_free_pageno = + mm->page_list[prev_free_pageno].first_pageno; + prev_size = + mm->page_list[prev_free_pageno].alloc_pages; + } + } + + /* find next free block */ + next_free_pageno = pageno + page->alloc_pages; + next_free_pageno = + (next_free_pageno == mm->num_pages) ? -1 : next_free_pageno; + next_size = -1; + if (next_free_pageno >= 0) { + if (mm->page_list[next_free_pageno].used == 0) { + next_size = + mm->page_list[next_free_pageno].alloc_pages; + } + } + VMEM_P_FREE(found); + + /* merge */ + merge_page_no = page->pageno; + merge_page_size = page->alloc_pages; + if (prev_size >= 0) { + mm->free_tree = avltree_remove(mm->free_tree, &found, + MAKE_KEY(prev_size, prev_free_pageno)); + if (found == NULL) { + VMEM_ASSERT; + return -1; + } + merge_page_no = found->page->pageno; + merge_page_size += found->page->alloc_pages; + VMEM_P_FREE(found); + } + if (next_size >= 0) { + mm->free_tree = avltree_remove(mm->free_tree, &found, + MAKE_KEY(next_size, next_free_pageno)); + if (found == NULL) { + VMEM_ASSERT; + return -1; + } + merge_page_size += found->page->alloc_pages; + VMEM_P_FREE(found); + } + page->alloc_pages = 0; + page->first_pageno = -1; + set_blocks_free(mm, merge_page_no, merge_page_size); + mm->alloc_page_count -= free_page_size; + mm->free_page_count += free_page_size; + return 0; +} + +s32 vmem_get_info(struct video_mm_t *mm, struct vmem_info_t *info) +{ + if (mm == NULL) { + pr_info("vmem_get_info: invalid handle\n"); + return -1; + } + + if (info == NULL) + return -1; + + info->total_pages = mm->num_pages; + info->alloc_pages = mm->alloc_page_count; + info->free_pages = mm->free_page_count; + info->page_size = VMEM_PAGE_SIZE; + return 0; +} +#endif /* __CNM_VIDEO_MEMORY_MANAGEMENT_H__ */
diff --git a/drivers/frame_sink/encoder/h265/vpu.c b/drivers/frame_sink/encoder/h265/vpu.c new file mode 100644 index 0000000..55cb054 --- /dev/null +++ b/drivers/frame_sink/encoder/h265/vpu.c
@@ -0,0 +1,2528 @@ +/* + * vpu.c + * + * linux device driver for VPU. + * + * Copyright (C) 2006 - 2013 CHIPS&MEDIA INC. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/dma-mapping.h> +#include <linux/wait.h> +#include <linux/list.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/uaccess.h> +#include <linux/cdev.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/reset.h> +#include <linux/clk.h> +#include <linux/compat.h> +#include <linux/of_reserved_mem.h> +#include <linux/of_address.h> +#include <linux/compat.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> + +#include <linux/amlogic/media/registers/cpu_version.h> +#include <linux/version.h> +#include "../../../frame_provider/decoder/utils/vdec_power_ctrl.h" +#include <linux/amlogic/media/utils/vdec_reg.h> +#include <linux/amlogic/power_ctrl.h> +#include <dt-bindings/power/sc2-pd.h> +#include <linux/amlogic/power_domain.h> +#include <linux/amlogic/power_ctrl.h> +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,1) +#include <linux/sched/signal.h> +#endif + +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../../common/media_clock/switch/amports_gate.h" + +#include "vpu.h" +#include "vmm.h" + +/* definitions to be changed as customer configuration */ +/* if you want to have clock gating scheme frame by frame */ +/* #define VPU_SUPPORT_CLOCK_CONTROL */ + +//#define VPU_SUPPORT_CLOCK_CONTROL + + +#define VPU_PLATFORM_DEVICE_NAME "HevcEnc" +#define VPU_DEV_NAME "HevcEnc" +#define VPU_CLASS_NAME "HevcEnc" + +#ifndef VM_RESERVED /*for kernel up to 3.7.0 version*/ +#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP) +#endif + +#define MHz (1000000) + +#define VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE (64 * SZ_1M) + +#define LOG_ALL 0 +#define LOG_INFO 1 +#define LOG_DEBUG 2 +#define LOG_ERROR 3 + +#define enc_pr(level, x...) \ + do { \ + if (level >= print_level) \ + printk(x); \ + } while (0) + +static s32 print_level = LOG_DEBUG; +static s32 clock_level = 4; + +static s32 wave_clocka; +static s32 wave_clockb; +static s32 wave_clockc; + +static struct video_mm_t s_vmem; +static struct vpudrv_buffer_t s_video_memory = {0}; +static bool use_reserve; +static ulong cma_pool_size; + +/* end customer definition */ +static struct vpudrv_buffer_t s_instance_pool = {0}; +static struct vpudrv_buffer_t s_common_memory = {0}; +static struct vpu_drv_context_t s_vpu_drv_context; +static s32 s_vpu_major; +static struct device *hevcenc_dev; + +static s32 s_vpu_open_ref_count; +static s32 s_vpu_irq; +static bool s_vpu_irq_requested; + +static struct vpudrv_buffer_t s_vpu_register = {0}; + +static s32 s_interrupt_flag; +static wait_queue_head_t s_interrupt_wait_q; + +static spinlock_t s_vpu_lock = __SPIN_LOCK_UNLOCKED(s_vpu_lock); +static DEFINE_SEMAPHORE(s_vpu_sem); +static struct list_head s_vbp_head = LIST_HEAD_INIT(s_vbp_head); +static struct list_head s_inst_list_head = LIST_HEAD_INIT(s_inst_list_head); +static struct tasklet_struct hevc_tasklet; +static struct platform_device *hevc_pdev; + +static struct vpu_bit_firmware_info_t s_bit_firmware_info[MAX_NUM_VPU_CORE]; + +static struct vpu_dma_cfg dma_cfg[3]; + +struct vpu_clks { + struct clk *wave_aclk; + struct clk *wave_bclk; + struct clk *wave_cclk; +}; + +static struct vpu_clks s_vpu_clks; + +#define CHECK_RET(_ret) if (ret) {enc_pr(LOG_ERROR, \ + "%s:%d:function call failed with result: %d\n",\ + __FUNCTION__, __LINE__, _ret);} + +static u32 vpu_src_addr_config(struct vpu_dma_buf_info_t); +static void vpu_dma_buffer_unmap(struct vpu_dma_cfg *cfg); + +static void dma_flush(u32 buf_start, u32 buf_size) +{ + if (hevc_pdev) + dma_sync_single_for_device( + &hevc_pdev->dev, buf_start, + buf_size, DMA_TO_DEVICE); +} + +static void cache_flush(u32 buf_start, u32 buf_size) +{ + if (hevc_pdev) + dma_sync_single_for_cpu( + &hevc_pdev->dev, buf_start, + buf_size, DMA_FROM_DEVICE); +} + +s32 vpu_hw_reset(void) +{ + enc_pr(LOG_DEBUG, "request vpu reset from application.\n"); + return 0; +} + +s32 vpu_clk_prepare(struct device *dev, struct vpu_clks *clks) +{ + int ret; + + s32 new_clocka = 667; + s32 new_clockb = 400; + s32 new_clockc = 400; + + if (wave_clocka > 0) + new_clocka = wave_clocka; + if (wave_clockb > 0) + new_clockb = wave_clockb; + if (wave_clockc > 0) + new_clockc = wave_clockc; + + clks->wave_aclk = devm_clk_get(dev, "cts_wave420_aclk"); + if (IS_ERR_OR_NULL(clks->wave_aclk)) { + enc_pr(LOG_ERROR, "failed to get wave aclk\n"); + return -1; + } + + clks->wave_bclk = devm_clk_get(dev, "cts_wave420_bclk"); + if (IS_ERR_OR_NULL(clks->wave_aclk)) { + enc_pr(LOG_ERROR, "failed to get wave aclk\n"); + return -1; + } + + clks->wave_cclk = devm_clk_get(dev, "cts_wave420_cclk"); + if (IS_ERR_OR_NULL(clks->wave_aclk)) { + enc_pr(LOG_ERROR, "failed to get wave aclk\n"); + return -1; + } + + ret = clk_set_rate(clks->wave_aclk, new_clocka * MHz); + CHECK_RET(ret); + ret = clk_set_rate(clks->wave_bclk, new_clockb * MHz); + CHECK_RET(ret); + ret = clk_set_rate(clks->wave_cclk, new_clockc * MHz); + + CHECK_RET(ret); + ret = clk_prepare(clks->wave_aclk); + CHECK_RET(ret); + ret = clk_prepare(clks->wave_bclk); + CHECK_RET(ret); + ret = clk_prepare(clks->wave_cclk); + CHECK_RET(ret); + + enc_pr(LOG_ERROR, "wave_clk_a: %lu MHz\n", clk_get_rate(clks->wave_aclk) / 1000000); + enc_pr(LOG_ERROR, "wave_clk_b: %lu MHz\n", clk_get_rate(clks->wave_bclk) / 1000000); + enc_pr(LOG_ERROR, "wave_clk_c: %lu MHz\n", clk_get_rate(clks->wave_cclk) / 1000000); + + return 0; +} + +void vpu_clk_unprepare(struct device *dev, struct vpu_clks *clks) +{ + clk_unprepare(clks->wave_cclk); + devm_clk_put(dev, clks->wave_cclk); + + clk_unprepare(clks->wave_bclk); + devm_clk_put(dev, clks->wave_bclk); + + clk_unprepare(clks->wave_aclk); + devm_clk_put(dev, clks->wave_aclk); +} + +s32 vpu_clk_config(u32 enable) +{ + if (enable) { + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + clk_enable(s_vpu_clks.wave_aclk); + clk_enable(s_vpu_clks.wave_bclk); + clk_enable(s_vpu_clks.wave_cclk); + } else { + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A) + HevcEnc_MoreClock_enable(); + HevcEnc_clock_enable(clock_level); + } + } else { + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + clk_disable(s_vpu_clks.wave_cclk); + clk_disable(s_vpu_clks.wave_bclk); + clk_disable(s_vpu_clks.wave_aclk); + } else { + HevcEnc_clock_disable(); + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A) + HevcEnc_MoreClock_disable(); + } + } + + return 0; +} + +static s32 vpu_alloc_dma_buffer(struct vpudrv_buffer_t *vb) +{ + if (!vb) + return -1; + + vb->phys_addr = (ulong)vmem_alloc(&s_vmem, vb->size, 0); + if ((ulong)vb->phys_addr == (ulong)-1) { + enc_pr(LOG_ERROR, + "Physical memory allocation error size=%d\n", vb->size); + return -1; + } + + enc_pr(LOG_INFO, "vpu_alloc_dma_buffer: vb->phys_addr 0x%lx \n",vb->phys_addr); + return 0; +} + +static void vpu_free_dma_buffer(struct vpudrv_buffer_t *vb) +{ + if (!vb) + return; + enc_pr(LOG_INFO, "vpu_free_dma_buffer 0x%lx\n",vb->phys_addr); + + if (vb->phys_addr) + vmem_free(&s_vmem, vb->phys_addr, 0); +} + +static s32 vpu_free_instances(struct file *filp) +{ + struct vpudrv_instanace_list_t *vil, *n; + struct vpudrv_instance_pool_t *vip; + void *vip_base; + + enc_pr(LOG_DEBUG, "vpu_free_instances\n"); + + list_for_each_entry_safe(vil, n, &s_inst_list_head, list) { + if (vil->filp == filp) { + vip_base = (void *)s_instance_pool.base; + enc_pr(LOG_INFO, + "free_instances instIdx=%d, coreIdx=%d, vip_base=%p\n", + (s32)vil->inst_idx, + (s32)vil->core_idx, + vip_base); + vip = (struct vpudrv_instance_pool_t *)vip_base; + if (vip) { + /* only first 4 byte is key point + * (inUse of CodecInst in vpuapi) + * to free the corresponding instance. + */ + memset(&vip->codecInstPool[vil->inst_idx], + 0x00, 4); + } + s_vpu_open_ref_count--; + list_del(&vil->list); + kfree(vil); + } + } + return 1; +} + +static s32 vpu_free_buffers(struct file *filp) +{ + struct vpudrv_buffer_pool_t *pool, *n; + struct vpudrv_buffer_t vb; + + enc_pr(LOG_DEBUG, "vpu_free_buffers\n"); + + list_for_each_entry_safe(pool, n, &s_vbp_head, list) { + if (pool->filp == filp) { + vb = pool->vb; + if (vb.phys_addr) { + vpu_free_dma_buffer(&vb); + list_del(&pool->list); + kfree(pool); + } + } + } + return 0; +} + +static u32 vpu_is_buffer_cached(struct file *filp, ulong vm_pgoff) +{ + struct vpudrv_buffer_pool_t *pool, *n; + struct vpudrv_buffer_t vb; + bool find = false; + u32 cached = 0; + + enc_pr(LOG_ALL, "[+]vpu_is_buffer_cached\n"); + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(pool, n, &s_vbp_head, list) { + if (pool->filp == filp) { + vb = pool->vb; + if (((vb.phys_addr >> PAGE_SHIFT) == vm_pgoff) + && find == false){ + cached = vb.cached; + find = true; + } + } + } + spin_unlock(&s_vpu_lock); + enc_pr(LOG_ALL, "[-]vpu_is_buffer_cached, ret:%d\n", cached); + return cached; +} + +static void hevcenc_isr_tasklet(ulong data) +{ + struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)data; + + enc_pr(LOG_INFO, "hevcenc_isr_tasklet interruput:0x%08lx\n", + dev->interrupt_reason); + if (dev->interrupt_reason) { + /* notify the interrupt to user space */ + if (dev->async_queue) { + enc_pr(LOG_ALL, "kill_fasync e %s\n", __func__); + kill_fasync(&dev->async_queue, SIGIO, POLL_IN); + } + s_interrupt_flag = 1; + wake_up_interruptible(&s_interrupt_wait_q); + } + enc_pr(LOG_ALL, "[-]%s\n", __func__); +} + +static irqreturn_t vpu_irq_handler(s32 irq, void *dev_id) +{ + struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)dev_id; + /* this can be removed. + * it also work in VPU_WaitInterrupt of API function + */ + u32 core; + ulong interrupt_reason = 0; + + enc_pr(LOG_ALL, "[+]%s\n", __func__); + + for (core = 0; core < MAX_NUM_VPU_CORE; core++) { + if (s_bit_firmware_info[core].size == 0) { + /* it means that we didn't get an information + * the current core from API layer. + * No core activated. + */ + enc_pr(LOG_ERROR, + "s_bit_firmware_info[core].size is zero\n"); + continue; + } + if (ReadVpuRegister(W4_VPU_VPU_INT_STS)) { + interrupt_reason = ReadVpuRegister(W4_VPU_INT_REASON); + WriteVpuRegister(W4_VPU_INT_REASON_CLEAR, + interrupt_reason); + WriteVpuRegister(W4_VPU_VINT_CLEAR, 0x1); + dev->interrupt_reason |= interrupt_reason; + } + enc_pr(LOG_INFO, + "intr_reason: 0x%08lx\n", dev->interrupt_reason); + } + if (dev->interrupt_reason) + tasklet_schedule(&hevc_tasklet); + enc_pr(LOG_ALL, "[-]%s\n", __func__); + return IRQ_HANDLED; +} + +static s32 vpu_open(struct inode *inode, struct file *filp) +{ + bool alloc_buffer = false; + s32 r = 0; + + enc_pr(LOG_DEBUG, "[+] %s, open_count=%d\n", __func__, + s_vpu_drv_context.open_count); + enc_pr(LOG_DEBUG, "vpu_open, calling process: %d:%s\n", current->pid, current->comm); + spin_lock(&s_vpu_lock); + s_vpu_drv_context.open_count++; + if (s_vpu_drv_context.open_count == 1) { + alloc_buffer = true; + } else { + r = -EBUSY; + enc_pr(LOG_ERROR, "vpu_open, device is busy, s_vpu_drv_context.open_count=%d\n", + s_vpu_drv_context.open_count); + s_vpu_drv_context.open_count--; + spin_unlock(&s_vpu_lock); + return r; + } + filp->private_data = (void *)(&s_vpu_drv_context); + spin_unlock(&s_vpu_lock); + if (alloc_buffer && !use_reserve) { +#ifdef CONFIG_CMA + s_video_memory.size = VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE; + s_video_memory.phys_addr = + (ulong)codec_mm_alloc_for_dma(VPU_DEV_NAME, + VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE >> PAGE_SHIFT, 0, 0); + if (s_video_memory.phys_addr) { + enc_pr(LOG_DEBUG, + "allocating phys 0x%lx, virt addr 0x%lx, size %dk\n", + s_video_memory.phys_addr, + s_video_memory.base, + s_video_memory.size >> 10); + if (vmem_init(&s_vmem, + s_video_memory.phys_addr, + s_video_memory.size) < 0) { + enc_pr(LOG_ERROR, "fail to init vmem system\n"); + r = -ENOMEM; + codec_mm_free_for_dma( + VPU_DEV_NAME, + (u32)s_video_memory.phys_addr); + vmem_exit(&s_vmem); + memset(&s_video_memory, 0, + sizeof(struct vpudrv_buffer_t)); + memset(&s_vmem, 0, + sizeof(struct video_mm_t)); + } + } else { + enc_pr(LOG_ERROR, + "CMA failed to allocate dma buffer for %s, phys: 0x%lx\n", + VPU_DEV_NAME, s_video_memory.phys_addr); + if (s_video_memory.phys_addr) + codec_mm_free_for_dma( + VPU_DEV_NAME, + (u32)s_video_memory.phys_addr); + s_video_memory.phys_addr = 0; + r = -ENOMEM; + } +#else + enc_pr(LOG_ERROR, + "No CMA and reserved memory for HevcEnc!!!\n"); + r = -ENOMEM; +#endif + } else if (!s_video_memory.phys_addr) { + enc_pr(LOG_ERROR, + "HevcEnc memory is not malloced!!!\n"); + r = -ENOMEM; + } + if (alloc_buffer) { + ulong flags; + u32 data32; + + if ((s_vpu_irq >= 0) && (s_vpu_irq_requested == false)) { + s32 err; + + err = request_irq(s_vpu_irq, vpu_irq_handler, 0, + "HevcEnc-irq", (void *)(&s_vpu_drv_context)); + if (err) { + enc_pr(LOG_ERROR, + "fail to register interrupt handler\n"); + spin_lock(&s_vpu_lock); + s_vpu_drv_context.open_count--; + spin_unlock(&s_vpu_lock); + return -EFAULT; + } + s_vpu_irq_requested = true; + } + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + } else + amports_switch_gate("vdec", 1); + + spin_lock_irqsave(&s_vpu_lock, flags); + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + //vpu_clk_config(1); + pwr_ctrl_psci_smc(PDID_SC2_DOS_WAVE, PWR_ON); + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & + (get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 + ? ~0x8 : ~(0x3<<24))); + } + udelay(10); + + if (get_cpu_type() <= MESON_CPU_MAJOR_ID_TXLX) { + data32 = 0x700; + data32 |= READ_VREG(DOS_SW_RESET4); + WRITE_VREG(DOS_SW_RESET4, data32); + data32 &= ~0x700; + WRITE_VREG(DOS_SW_RESET4, data32); + } else { + data32 = 0xf00; + data32 |= READ_VREG(DOS_SW_RESET4); + WRITE_VREG(DOS_SW_RESET4, data32); + data32 &= ~0xf00; + WRITE_VREG(DOS_SW_RESET4, data32); + } + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + pr_err("consider using reset control\n"); + } else { + WRITE_MPEG_REG(RESET0_REGISTER, data32 & ~(1<<21)); + WRITE_MPEG_REG(RESET0_REGISTER, data32 | (1<<21)); + READ_MPEG_REG(RESET0_REGISTER); + READ_MPEG_REG(RESET0_REGISTER); + READ_MPEG_REG(RESET0_REGISTER); + READ_MPEG_REG(RESET0_REGISTER); + } + +#ifndef VPU_SUPPORT_CLOCK_CONTROL + vpu_clk_config(1); +#endif + /* Enable wave420l_vpu_idle_rise_irq, + * Disable wave420l_vpu_idle_fall_irq + */ + WRITE_VREG(DOS_WAVE420L_CNTL_STAT, 0x1); + WRITE_VREG(DOS_MEM_PD_WAVE420L, 0x0); + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) & + (get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 + ? ~0x8 : ~(0x3<<12))); + } + spin_unlock_irqrestore(&s_vpu_lock, flags); + } + memset(dma_cfg, 0, sizeof(dma_cfg)); + dma_cfg[0].fd = -1; + dma_cfg[1].fd = -1; + dma_cfg[2].fd = -1; + + if (r != 0) { + spin_lock(&s_vpu_lock); + enc_pr(LOG_DEBUG, "vpu_open, error handling, r=%d, s_vpu_drv_context.open_count=%d\n", + r, s_vpu_drv_context.open_count); + s_vpu_drv_context.open_count--; + spin_unlock(&s_vpu_lock); + } + enc_pr(LOG_DEBUG, "[-] %s, ret: %d\n", __func__, r); + return r; +} + +static long vpu_ioctl(struct file *filp, u32 cmd, ulong arg) +{ + s32 ret = 0; + struct vpu_drv_context_t *dev = + (struct vpu_drv_context_t *)filp->private_data; + + switch (cmd) { + case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY: + { + struct vpudrv_buffer_pool_t *vbp; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n"); + ret = down_interruptible(&s_vpu_sem); + if (ret == 0) { + vbp = kzalloc(sizeof(*vbp), GFP_KERNEL); + if (!vbp) { + up(&s_vpu_sem); + return -ENOMEM; + } + + ret = copy_from_user(&(vbp->vb), + (struct vpudrv_buffer_t *)arg, + sizeof(struct vpudrv_buffer_t)); + if (ret) { + kfree(vbp); + up(&s_vpu_sem); + return -EFAULT; + } + + ret = vpu_alloc_dma_buffer(&(vbp->vb)); + if (ret == -1) { + ret = -ENOMEM; + kfree(vbp); + up(&s_vpu_sem); + break; + } + ret = copy_to_user((void __user *)arg, + &(vbp->vb), + sizeof(struct vpudrv_buffer_t)); + if (ret) { + kfree(vbp); + ret = -EFAULT; + up(&s_vpu_sem); + break; + } + + vbp->filp = filp; + spin_lock(&s_vpu_lock); + list_add(&vbp->list, &s_vbp_head); + spin_unlock(&s_vpu_lock); + + up(&s_vpu_sem); + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n"); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32: + { + struct vpudrv_buffer_pool_t *vbp; + struct compat_vpudrv_buffer_t buf32; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32\n"); + memset(&buf32, 0, sizeof(struct compat_vpudrv_buffer_t)); + ret = down_interruptible(&s_vpu_sem); + if (ret == 0) { + vbp = kzalloc(sizeof(*vbp), GFP_KERNEL); + if (!vbp) { + up(&s_vpu_sem); + return -ENOMEM; + } + + ret = copy_from_user(&buf32, + (struct compat_vpudrv_buffer_t *)arg, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret) { + kfree(vbp); + up(&s_vpu_sem); + return -EFAULT; + } + + vbp->vb.size = buf32.size; + vbp->vb.cached = buf32.cached; + vbp->vb.phys_addr = + (ulong)buf32.phys_addr; + vbp->vb.virt_addr = + (ulong)buf32.virt_addr; + ret = vpu_alloc_dma_buffer(&(vbp->vb)); + if (ret == -1) { + ret = -ENOMEM; + kfree(vbp); + up(&s_vpu_sem); + break; + } + + buf32.size = vbp->vb.size; + buf32.phys_addr = + (compat_ulong_t)vbp->vb.phys_addr; + buf32.virt_addr = + (compat_ulong_t)vbp->vb.virt_addr; + + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret) { + kfree(vbp); + ret = -EFAULT; + up(&s_vpu_sem); + break; + } + + vbp->filp = filp; + spin_lock(&s_vpu_lock); + list_add(&vbp->list, &s_vbp_head); + spin_unlock(&s_vpu_lock); + + up(&s_vpu_sem); + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32\n"); + } + break; +#endif + case VDI_IOCTL_FREE_PHYSICALMEMORY: + { + struct vpudrv_buffer_pool_t *vbp, *n; + struct vpudrv_buffer_t vb; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_FREE_PHYSICALMEMORY\n"); + ret = down_interruptible(&s_vpu_sem); + if (ret == 0) { + ret = copy_from_user(&vb, + (struct vpudrv_buffer_t *)arg, + sizeof(struct vpudrv_buffer_t)); + if (ret) { + up(&s_vpu_sem); + return -EACCES; + } + + if (vb.phys_addr) + vpu_free_dma_buffer(&vb); + + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(vbp, n, + &s_vbp_head, list) { + if (vbp->vb.phys_addr == vb.phys_addr) { + list_del(&vbp->list); + kfree(vbp); + break; + } + } + spin_unlock(&s_vpu_lock); + + up(&s_vpu_sem); + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_FREE_PHYSICALMEMORY\n"); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_FREE_PHYSICALMEMORY32: + { + struct vpudrv_buffer_pool_t *vbp, *n; + struct compat_vpudrv_buffer_t buf32; + struct vpudrv_buffer_t vb; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_FREE_PHYSICALMEMORY32\n"); + ret = down_interruptible(&s_vpu_sem); + if (ret == 0) { + ret = copy_from_user(&buf32, + (struct compat_vpudrv_buffer_t *)arg, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret) { + up(&s_vpu_sem); + return -EACCES; + } + + vb.size = buf32.size; + vb.phys_addr = + (ulong)buf32.phys_addr; + vb.virt_addr = + (ulong)buf32.virt_addr; + + if (vb.phys_addr) + vpu_free_dma_buffer(&vb); + + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(vbp, n, + &s_vbp_head, list) { + if ((compat_ulong_t)vbp->vb.base + == buf32.base) { + list_del(&vbp->list); + kfree(vbp); + break; + } + } + spin_unlock(&s_vpu_lock); + up(&s_vpu_sem); + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_FREE_PHYSICALMEMORY32\n"); + } + break; +#endif + case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO: + { + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO\n"); + if (s_video_memory.phys_addr != 0) { + ret = copy_to_user((void __user *)arg, + &s_video_memory, + sizeof(struct vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else { + ret = -EFAULT; + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO\n"); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32: + { + struct compat_vpudrv_buffer_t buf32; + + memset(&buf32, 0, sizeof(struct compat_vpudrv_buffer_t)); + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32\n"); + + buf32.size = s_video_memory.size; + buf32.phys_addr = + (compat_ulong_t)s_video_memory.phys_addr; + buf32.virt_addr = + (compat_ulong_t)s_video_memory.virt_addr; + if (s_video_memory.phys_addr != 0) { + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else { + ret = -EFAULT; + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32\n"); + } + break; +#endif + case VDI_IOCTL_WAIT_INTERRUPT: + { + struct vpudrv_intr_info_t info; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_WAIT_INTERRUPT\n"); + ret = copy_from_user(&info, + (struct vpudrv_intr_info_t *)arg, + sizeof(struct vpudrv_intr_info_t)); + if (ret != 0) + return -EFAULT; + + ret = wait_event_interruptible_timeout( + s_interrupt_wait_q, + s_interrupt_flag != 0, + msecs_to_jiffies(info.timeout)); + if (!ret) { + ret = -ETIME; + break; + } + enc_pr(LOG_INFO, + "s_interrupt_flag(%d), reason(0x%08lx)\n", + s_interrupt_flag, dev->interrupt_reason); + if (dev->interrupt_reason & (1 << W4_INT_ENC_PIC)) { + u32 start, end, size, core = 0; + + start = ReadVpuRegister(W4_BS_RD_PTR); + end = ReadVpuRegister(W4_BS_WR_PTR); + size = ReadVpuRegister(W4_RET_ENC_PIC_BYTE); + enc_pr(LOG_INFO, "flush output buffer, "); + enc_pr(LOG_INFO, + "start:0x%x, end:0x%x, size:0x%x\n", + start, end, size); + if (end - start > size && end > start) + size = end - start; + if (size > 0) + cache_flush(start, size); + } + + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + + enc_pr(LOG_INFO, + "s_interrupt_flag(%d), reason(0x%08lx)\n", + s_interrupt_flag, dev->interrupt_reason); + + info.intr_reason = dev->interrupt_reason; + s_interrupt_flag = 0; + dev->interrupt_reason = 0; + ret = copy_to_user((void __user *)arg, + &info, sizeof(struct vpudrv_intr_info_t)); + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_WAIT_INTERRUPT\n"); + if (ret != 0) + return -EFAULT; + } + break; + case VDI_IOCTL_SET_CLOCK_GATE: + { + u32 clkgate; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_SET_CLOCK_GATE\n"); + if (get_user(clkgate, (u32 __user *) arg)) + return -EFAULT; +#ifdef VPU_SUPPORT_CLOCK_CONTROL + vpu_clk_config(clkgate); +#endif + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_SET_CLOCK_GATE\n"); + } + break; + case VDI_IOCTL_GET_INSTANCE_POOL: + { + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_INSTANCE_POOL\n"); + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + break; + + if (s_instance_pool.base != 0) { + ret = copy_to_user((void __user *)arg, + &s_instance_pool, + sizeof(struct vpudrv_buffer_t)); + ret = (ret != 0) ? -EFAULT : 0; + } else { + ret = copy_from_user(&s_instance_pool, + (struct vpudrv_buffer_t *)arg, + sizeof(struct vpudrv_buffer_t)); + if (ret == 0) { + s_instance_pool.size = + PAGE_ALIGN( + s_instance_pool.size); + s_instance_pool.base = + (ulong)vmalloc( + s_instance_pool.size); + s_instance_pool.phys_addr = + s_instance_pool.base; + if (s_instance_pool.base == 0) { + ret = -EFAULT; + up(&s_vpu_sem); + break; + } + /*clearing memory*/ + memset((void *)s_instance_pool.base, + 0, s_instance_pool.size); + ret = copy_to_user((void __user *)arg, + &s_instance_pool, + sizeof(struct vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else + ret = -EFAULT; + } + up(&s_vpu_sem); + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_INSTANCE_POOL\n"); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_GET_INSTANCE_POOL32: + { + struct compat_vpudrv_buffer_t buf32; + + memset(&buf32, 0, sizeof(struct compat_vpudrv_buffer_t)); + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_INSTANCE_POOL32\n"); + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + break; + if (s_instance_pool.base != 0) { + buf32.size = s_instance_pool.size; + buf32.phys_addr = + (compat_ulong_t) + s_instance_pool.phys_addr; + buf32.virt_addr = + (compat_ulong_t) + s_instance_pool.virt_addr; + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof(struct compat_vpudrv_buffer_t)); + ret = (ret != 0) ? -EFAULT : 0; + } else { + ret = copy_from_user(&buf32, + (struct compat_vpudrv_buffer_t *)arg, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret == 0) { + s_instance_pool.size = buf32.size; + s_instance_pool.size = + PAGE_ALIGN( + s_instance_pool.size); + s_instance_pool.base = + (ulong)vmalloc( + s_instance_pool.size); + s_instance_pool.phys_addr = + s_instance_pool.base; + buf32.size = + s_instance_pool.size; + buf32.phys_addr = + (compat_ulong_t) + s_instance_pool.phys_addr; + buf32.base = + (compat_ulong_t) + s_instance_pool.base; + buf32.virt_addr = + (compat_ulong_t) + s_instance_pool.virt_addr; + if (s_instance_pool.base == 0) { + ret = -EFAULT; + up(&s_vpu_sem); + break; + } + /*clearing memory*/ + memset((void *)s_instance_pool.base, + 0x0, s_instance_pool.size); + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof( + struct compat_vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else + ret = -EFAULT; + } + up(&s_vpu_sem); + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_INSTANCE_POOL32\n"); + } + break; +#endif + case VDI_IOCTL_GET_COMMON_MEMORY: + { + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_COMMON_MEMORY\n"); + if (s_common_memory.phys_addr != 0) { + ret = copy_to_user((void __user *)arg, + &s_common_memory, + sizeof(struct vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else { + ret = copy_from_user(&s_common_memory, + (struct vpudrv_buffer_t *)arg, + sizeof(struct vpudrv_buffer_t)); + if (ret != 0) { + ret = -EFAULT; + break; + } + if (vpu_alloc_dma_buffer( + &s_common_memory) != -1) { + ret = copy_to_user((void __user *)arg, + &s_common_memory, + sizeof(struct vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else + ret = -EFAULT; + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_COMMON_MEMORY\n"); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_GET_COMMON_MEMORY32: + { + struct compat_vpudrv_buffer_t buf32; + + memset(&buf32, 0, sizeof(struct compat_vpudrv_buffer_t)); + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_COMMON_MEMORY32\n"); + + buf32.size = s_common_memory.size; + buf32.phys_addr = + (compat_ulong_t) + s_common_memory.phys_addr; + buf32.virt_addr = + (compat_ulong_t) + s_common_memory.virt_addr; + if (s_common_memory.phys_addr != 0) { + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else { + ret = copy_from_user(&buf32, + (struct compat_vpudrv_buffer_t *)arg, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret != 0) { + ret = -EFAULT; + break; + } + s_common_memory.size = buf32.size; + if (vpu_alloc_dma_buffer( + &s_common_memory) != -1) { + buf32.size = + s_common_memory.size; + buf32.phys_addr = + (compat_ulong_t) + s_common_memory.phys_addr; + buf32.virt_addr = + (compat_ulong_t) + s_common_memory.virt_addr; + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof( + struct compat_vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else + ret = -EFAULT; + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_COMMON_MEMORY32\n"); + } + break; +#endif + case VDI_IOCTL_OPEN_INSTANCE: + { + struct vpudrv_inst_info_t inst_info; + struct vpudrv_instanace_list_t *vil, *n; + + vil = kzalloc(sizeof(*vil), GFP_KERNEL); + if (!vil) + return -ENOMEM; + + if (copy_from_user(&inst_info, + (struct vpudrv_inst_info_t *)arg, + sizeof(struct vpudrv_inst_info_t))) + { + kfree(vil); + return -EFAULT; + } + + vil->inst_idx = inst_info.inst_idx; + vil->core_idx = inst_info.core_idx; + vil->filp = filp; + + spin_lock(&s_vpu_lock); + list_add(&vil->list, &s_inst_list_head); + + /* counting the current open instance number */ + inst_info.inst_open_count = 0; + list_for_each_entry_safe(vil, n, + &s_inst_list_head, list) { + if (vil->core_idx == inst_info.core_idx) + inst_info.inst_open_count++; + } + + /* flag just for that vpu is in opened or closed */ + s_vpu_open_ref_count++; + spin_unlock(&s_vpu_lock); + + if (copy_to_user((void __user *)arg, + &inst_info, + sizeof(struct vpudrv_inst_info_t))) { + kfree(vil); + return -EFAULT; + } + + enc_pr(LOG_DEBUG, + "VDI_IOCTL_OPEN_INSTANCE "); + enc_pr(LOG_DEBUG, + "core_idx=%d, inst_idx=%d, ", + (u32)inst_info.core_idx, + (u32)inst_info.inst_idx); + enc_pr(LOG_DEBUG, + "s_vpu_open_ref_count=%d, inst_open_count=%d\n", + s_vpu_open_ref_count, + inst_info.inst_open_count); + } + break; + case VDI_IOCTL_CLOSE_INSTANCE: + { + struct vpudrv_inst_info_t inst_info; + struct vpudrv_instanace_list_t *vil, *n; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_CLOSE_INSTANCE\n"); + if (copy_from_user(&inst_info, + (struct vpudrv_inst_info_t *)arg, + sizeof(struct vpudrv_inst_info_t))) + return -EFAULT; + + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(vil, n, + &s_inst_list_head, list) { + if (vil->inst_idx == inst_info.inst_idx && + vil->core_idx == inst_info.core_idx) { + list_del(&vil->list); + kfree(vil); + break; + } + } + + /* counting the current open instance number */ + inst_info.inst_open_count = 0; + list_for_each_entry_safe(vil, n, + &s_inst_list_head, list) { + if (vil->core_idx == inst_info.core_idx) + inst_info.inst_open_count++; + } + + /* flag just for that vpu is in opened or closed */ + s_vpu_open_ref_count--; + spin_unlock(&s_vpu_lock); + + if (copy_to_user((void __user *)arg, + &inst_info, + sizeof(struct vpudrv_inst_info_t))) + return -EFAULT; + + enc_pr(LOG_DEBUG, + "VDI_IOCTL_CLOSE_INSTANCE "); + enc_pr(LOG_DEBUG, + "core_idx=%d, inst_idx=%d, ", + (u32)inst_info.core_idx, + (u32)inst_info.inst_idx); + enc_pr(LOG_DEBUG, + "s_vpu_open_ref_count=%d, inst_open_count=%d\n", + s_vpu_open_ref_count, + inst_info.inst_open_count); + } + break; + case VDI_IOCTL_GET_INSTANCE_NUM: + { + struct vpudrv_inst_info_t inst_info; + struct vpudrv_instanace_list_t *vil, *n; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_INSTANCE_NUM\n"); + + ret = copy_from_user(&inst_info, + (struct vpudrv_inst_info_t *)arg, + sizeof(struct vpudrv_inst_info_t)); + if (ret != 0) + break; + + inst_info.inst_open_count = 0; + + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(vil, n, + &s_inst_list_head, list) { + if (vil->core_idx == inst_info.core_idx) + inst_info.inst_open_count++; + } + spin_unlock(&s_vpu_lock); + + ret = copy_to_user((void __user *)arg, + &inst_info, + sizeof(struct vpudrv_inst_info_t)); + + enc_pr(LOG_DEBUG, + "VDI_IOCTL_GET_INSTANCE_NUM "); + enc_pr(LOG_DEBUG, + "core_idx=%d, inst_idx=%d, open_count=%d\n", + (u32)inst_info.core_idx, + (u32)inst_info.inst_idx, + inst_info.inst_open_count); + } + break; + case VDI_IOCTL_RESET: + { + vpu_hw_reset(); + } + break; + case VDI_IOCTL_GET_REGISTER_INFO: + { + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_REGISTER_INFO\n"); + ret = copy_to_user((void __user *)arg, + &s_vpu_register, + sizeof(struct vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_REGISTER_INFO "); + enc_pr(LOG_ALL, + "s_vpu_register.phys_addr=0x%lx, ", + s_vpu_register.phys_addr); + enc_pr(LOG_ALL, + "s_vpu_register.virt_addr=0x%lx, ", + s_vpu_register.virt_addr); + enc_pr(LOG_ALL, + "s_vpu_register.size=0x%x\n", + s_vpu_register.size); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_GET_REGISTER_INFO32: + { + struct compat_vpudrv_buffer_t buf32; + + memset(&buf32, 0, sizeof(struct compat_vpudrv_buffer_t)); + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_REGISTER_INFO32\n"); + + buf32.size = s_vpu_register.size; + buf32.phys_addr = + (compat_ulong_t) + s_vpu_register.phys_addr; + buf32.virt_addr = + (compat_ulong_t) + s_vpu_register.virt_addr; + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof( + struct compat_vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_REGISTER_INFO32 "); + enc_pr(LOG_ALL, + "s_vpu_register.phys_addr=0x%lx, ", + s_vpu_register.phys_addr); + enc_pr(LOG_ALL, + "s_vpu_register.virt_addr=0x%lx, ", + s_vpu_register.virt_addr); + enc_pr(LOG_ALL, + "s_vpu_register.size=0x%x\n", + s_vpu_register.size); + } + break; + case VDI_IOCTL_FLUSH_BUFFER32: + { + struct vpudrv_buffer_pool_t *pool, *n; + struct compat_vpudrv_buffer_t buf32; + struct vpudrv_buffer_t vb; + bool find = false; + u32 cached = 0; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_FLUSH_BUFFER32\n"); + + ret = copy_from_user(&buf32, + (struct compat_vpudrv_buffer_t *)arg, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret) + return -EFAULT; + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(pool, n, + &s_vbp_head, list) { + if (pool->filp == filp) { + vb = pool->vb; + if (((compat_ulong_t)vb.phys_addr + == buf32.phys_addr) + && find == false){ + cached = vb.cached; + find = true; + } + } + } + spin_unlock(&s_vpu_lock); + if (find && cached) + dma_flush( + (u32)buf32.phys_addr, + (u32)buf32.size); + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_FLUSH_BUFFER32\n"); + } + break; +#endif + case VDI_IOCTL_FLUSH_BUFFER: + { + struct vpudrv_buffer_pool_t *pool, *n; + struct vpudrv_buffer_t vb, buf; + bool find = false; + u32 cached = 0; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_FLUSH_BUFFER\n"); + + ret = copy_from_user(&buf, + (struct vpudrv_buffer_t *)arg, + sizeof(struct vpudrv_buffer_t)); + if (ret) + return -EFAULT; + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(pool, n, + &s_vbp_head, list) { + if (pool->filp == filp) { + vb = pool->vb; + if ((vb.phys_addr + == buf.phys_addr) + && find == false){ + cached = vb.cached; + find = true; + } + } + } + spin_unlock(&s_vpu_lock); + if (find && cached) + dma_flush( + (u32)buf.phys_addr, + (u32)buf.size); + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_FLUSH_BUFFER\n"); + } + break; + case VDI_IOCTL_CONFIG_DMA: + { + struct vpu_dma_buf_info_t dma_info; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_CONFIG_DMA\n"); + if (copy_from_user(&dma_info, + (struct vpu_dma_buf_info_t *)arg, + sizeof(struct vpu_dma_buf_info_t))) + return -EFAULT; + + if (vpu_src_addr_config(dma_info)) { + enc_pr(LOG_ERROR, + "src addr config error\n"); + return -EFAULT; + } + + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_CONFIG_DMA %d, %d, %d\n", + dma_info.fd[0], + dma_info.fd[1], + dma_info.fd[2]); + } + break; + case VDI_IOCTL_UNMAP_DMA: + { + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_UNMAP_DMA\n"); + + vpu_dma_buffer_unmap(&dma_cfg[0]); + if (dma_cfg[1].paddr != 0) { + vpu_dma_buffer_unmap(&dma_cfg[1]); + } + if (dma_cfg[2].paddr != 0) { + vpu_dma_buffer_unmap(&dma_cfg[2]); + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_UNMAP_DMA\n"); + } + break; + default: + { + enc_pr(LOG_ERROR, + "No such IOCTL, cmd is 0x%x\n", cmd); + ret = -EFAULT; + } + break; + } + return ret; +} + +#ifdef CONFIG_COMPAT +static long vpu_compat_ioctl(struct file *filp, u32 cmd, ulong arg) +{ + long ret; + + arg = (ulong)compat_ptr(arg); + ret = vpu_ioctl(filp, cmd, arg); + return ret; +} +#endif + +static ssize_t vpu_write(struct file *filp, + const char *buf, + size_t len, + loff_t *ppos) +{ + enc_pr(LOG_INFO, "vpu_write len=%d\n", (int)len); + + if (!buf) { + enc_pr(LOG_ERROR, "vpu_write buf = NULL error\n"); + return -EFAULT; + } + + if (len == sizeof(struct vpu_bit_firmware_info_t)) { + struct vpu_bit_firmware_info_t *bit_firmware_info; + + bit_firmware_info = + kmalloc(sizeof(struct vpu_bit_firmware_info_t), + GFP_KERNEL); + if (!bit_firmware_info) { + enc_pr(LOG_ERROR, + "vpu_write bit_firmware_info allocation error\n"); + return -EFAULT; + } + + if (copy_from_user(bit_firmware_info, buf, len)) { + enc_pr(LOG_ERROR, + "vpu_write copy_from_user error for bit_firmware_info\n"); + kfree(bit_firmware_info); + return -EFAULT; + } + + if (bit_firmware_info->size == + sizeof(struct vpu_bit_firmware_info_t)) { + enc_pr(LOG_INFO, + "vpu_write set bit_firmware_info coreIdx=0x%x, ", + bit_firmware_info->core_idx); + enc_pr(LOG_INFO, + "reg_base_offset=0x%x size=0x%x, bit_code[0]=0x%x\n", + bit_firmware_info->reg_base_offset, + bit_firmware_info->size, + bit_firmware_info->bit_code[0]); + + if (bit_firmware_info->core_idx + > MAX_NUM_VPU_CORE) { + enc_pr(LOG_ERROR, + "vpu_write coreIdx[%d] is ", + bit_firmware_info->core_idx); + enc_pr(LOG_ERROR, + "exceeded than MAX_NUM_VPU_CORE[%d]\n", + MAX_NUM_VPU_CORE); + kfree(bit_firmware_info); + return -ENODEV; + } + if (bit_firmware_info->core_idx >= MAX_NUM_VPU_CORE) + { + enc_pr(LOG_ERROR,"bit_firmware_info->core_idx %d is invalid!\n", bit_firmware_info->core_idx); + kfree(bit_firmware_info); + return -1; + } + memcpy((void *)&s_bit_firmware_info + [bit_firmware_info->core_idx], + bit_firmware_info, + sizeof(struct vpu_bit_firmware_info_t)); + kfree(bit_firmware_info); + return len; + } + kfree(bit_firmware_info); + } + return -1; +} + +static s32 vpu_release(struct inode *inode, struct file *filp) +{ + s32 ret = 0; + ulong flags; + + enc_pr(LOG_DEBUG, "vpu_release, calling process: %d:%s\n", current->pid, current->comm); + ret = down_interruptible(&s_vpu_sem); + enc_pr(LOG_DEBUG, "vpu_release, ret = %d\n", ret); + + spin_lock(&s_vpu_lock); + if (s_vpu_drv_context.open_count <= 0) { + enc_pr(LOG_DEBUG, "vpu_release, open_count=%d, already released or even not inited\n", + s_vpu_drv_context.open_count); + s_vpu_drv_context.open_count = 0; + spin_unlock(&s_vpu_lock); + goto exit_release; + } + spin_unlock(&s_vpu_lock); + + if (ret == 0) { + vpu_free_buffers(filp); + vpu_free_instances(filp); + + spin_lock(&s_vpu_lock); + enc_pr(LOG_DEBUG, "vpu_release, decrease open_count from %d\n", + s_vpu_drv_context.open_count); + + s_vpu_drv_context.open_count--; + spin_unlock(&s_vpu_lock); + if (s_vpu_drv_context.open_count == 0) { + enc_pr(LOG_DEBUG, + "vpu_release: s_interrupt_flag(%d), reason(0x%08lx)\n", + s_interrupt_flag, s_vpu_drv_context.interrupt_reason); + s_vpu_drv_context.interrupt_reason = 0; + s_interrupt_flag = 0; + if (s_instance_pool.base) { + enc_pr(LOG_DEBUG, "free instance pool\n"); + vfree((const void *)s_instance_pool.base); + s_instance_pool.base = 0; + } + if (s_common_memory.phys_addr) { + enc_pr(LOG_INFO, "vpu_release, s_common_memory 0x%lx\n",s_common_memory.phys_addr); + vpu_free_dma_buffer(&s_common_memory); + s_common_memory.phys_addr = 0; + } + + if (s_video_memory.phys_addr && !use_reserve) { + enc_pr(LOG_DEBUG, "vpu_release, s_video_memory 0x%lx\n",s_video_memory.phys_addr); + codec_mm_free_for_dma( + VPU_DEV_NAME, + (u32)s_video_memory.phys_addr); + vmem_exit(&s_vmem); + memset(&s_video_memory, + 0, sizeof(struct vpudrv_buffer_t)); + memset(&s_vmem, + 0, sizeof(struct video_mm_t)); + } + + if ((s_vpu_irq >= 0) && (s_vpu_irq_requested == true)) { + free_irq(s_vpu_irq, &s_vpu_drv_context); + s_vpu_irq_requested = false; + } + spin_lock_irqsave(&s_vpu_lock, flags); + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + //vpu_clk_config(0); + pwr_ctrl_psci_smc(PDID_SC2_DOS_WAVE, PWR_OFF); + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | + (get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 + ? 0x8 : (0x3<<12))); + } + + udelay(10); + + WRITE_VREG(DOS_MEM_PD_WAVE420L, 0xffffffff); +#ifndef VPU_SUPPORT_CLOCK_CONTROL + vpu_clk_config(0); +#endif + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + + } else { + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | + (get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 + ? 0x8 : (0x3<<24))); + } + + udelay(10); + spin_unlock_irqrestore(&s_vpu_lock, flags); + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + } else + amports_switch_gate("vdec", 0); + } + } +exit_release: + up(&s_vpu_sem); + return 0; +} + +static s32 vpu_fasync(s32 fd, struct file *filp, s32 mode) +{ + struct vpu_drv_context_t *dev = + (struct vpu_drv_context_t *)filp->private_data; + return fasync_helper(fd, filp, mode, &dev->async_queue); +} + +static s32 vpu_map_to_register(struct file *fp, struct vm_area_struct *vm) +{ + ulong pfn; + + vm->vm_flags |= VM_IO | VM_RESERVED; + vm->vm_page_prot = + pgprot_noncached(vm->vm_page_prot); + pfn = s_vpu_register.phys_addr >> PAGE_SHIFT; + return remap_pfn_range(vm, vm->vm_start, pfn, + vm->vm_end - vm->vm_start, + vm->vm_page_prot) ? -EAGAIN : 0; +} + +static s32 vpu_map_to_physical_memory( + struct file *fp, struct vm_area_struct *vm) +{ + vm->vm_flags |= VM_IO | VM_RESERVED; + if (vm->vm_pgoff == + (s_common_memory.phys_addr >> PAGE_SHIFT)) { + vm->vm_page_prot = + pgprot_noncached(vm->vm_page_prot); + } else { + if (vpu_is_buffer_cached(fp, vm->vm_pgoff) == 0) + vm->vm_page_prot = + pgprot_noncached(vm->vm_page_prot); + } + /* vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot); */ + if (!pfn_valid(vm->vm_pgoff)) { + enc_pr(LOG_ERROR, "%s invalid pfn\n", __FUNCTION__); + return -EAGAIN; + } + return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff, + vm->vm_end - vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0; +} + +static s32 vpu_map_to_instance_pool_memory( + struct file *fp, struct vm_area_struct *vm) +{ + s32 ret; + long length = vm->vm_end - vm->vm_start; + ulong start = vm->vm_start; + s8 *vmalloc_area_ptr = (s8 *)s_instance_pool.base; + ulong pfn; + + vm->vm_flags |= VM_RESERVED; + + /* loop over all pages, map it page individually */ + while (length > 0) { + pfn = vmalloc_to_pfn(vmalloc_area_ptr); + ret = remap_pfn_range(vm, start, pfn, + PAGE_SIZE, PAGE_SHARED); + if (ret < 0) + return ret; + start += PAGE_SIZE; + vmalloc_area_ptr += PAGE_SIZE; + length -= PAGE_SIZE; + } + return 0; +} + +/* + * @brief memory map interface for vpu file operation + * @return 0 on success or negative error code on error + */ +static s32 vpu_mmap(struct file *fp, struct vm_area_struct *vm) +{ + /* if (vm->vm_pgoff == (s_vpu_register.phys_addr >> PAGE_SHIFT)) */ + if ((vm->vm_end - vm->vm_start == s_vpu_register.size + 1) && + (vm->vm_pgoff == 0)) { + vm->vm_pgoff = (s_vpu_register.phys_addr >> PAGE_SHIFT); + return vpu_map_to_register(fp, vm); + } + + if (vm->vm_pgoff == 0) + return vpu_map_to_instance_pool_memory(fp, vm); + + return vpu_map_to_physical_memory(fp, vm); +} +static int vpu_dma_buffer_map(struct vpu_dma_cfg *cfg) +{ + int ret = -1; + int fd = -1; + struct dma_buf *dbuf = NULL; + struct dma_buf_attachment *d_att = NULL; + struct sg_table *sg = NULL; + void *vaddr = NULL; + struct device *dev = NULL; + enum dma_data_direction dir; + + if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) { + enc_pr(LOG_ERROR, "error dma param\n"); + return -EINVAL; + } + fd = cfg->fd; + dev = cfg->dev; + dir = cfg->dir; + + dbuf = dma_buf_get(fd); + if (dbuf == NULL) { + enc_pr(LOG_ERROR, "failed to get dma buffer,fd %d\n",fd); + return -EINVAL; + } + + d_att = dma_buf_attach(dbuf, dev); + if (d_att == NULL) { + enc_pr(LOG_ERROR, "failed to set dma attach\n"); + goto attach_err; + } + + sg = dma_buf_map_attachment(d_att, dir); + if (sg == NULL) { + enc_pr(LOG_ERROR, "failed to get dma sg\n"); + goto map_attach_err; + } + cfg->dbuf = dbuf; + cfg->attach = d_att; + cfg->vaddr = vaddr; + cfg->sg = sg; + + return 0; + +map_attach_err: + dma_buf_detach(dbuf, d_att); +attach_err: + dma_buf_put(dbuf); + + return ret; +} + +static void vpu_dma_buffer_unmap(struct vpu_dma_cfg *cfg) +{ + int fd = -1; + struct dma_buf *dbuf = NULL; + struct dma_buf_attachment *d_att = NULL; + struct sg_table *sg = NULL; + /*void *vaddr = NULL;*/ + struct device *dev = NULL; + enum dma_data_direction dir; + + if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL + || cfg->dbuf == NULL /*|| cfg->vaddr == NULL*/ + || cfg->attach == NULL || cfg->sg == NULL) { + enc_pr(LOG_ERROR, "unmap: Error dma param\n"); + return; + } + + fd = cfg->fd; + dev = cfg->dev; + dir = cfg->dir; + dbuf = cfg->dbuf; + d_att = cfg->attach; + sg = cfg->sg; + + dma_buf_unmap_attachment(d_att, sg, dir); + dma_buf_detach(dbuf, d_att); + dma_buf_put(dbuf); + + enc_pr(LOG_INFO, "vpu_dma_buffer_unmap fd %d\n",fd); +} + +static int vpu_dma_buffer_get_phys(struct vpu_dma_cfg *cfg, unsigned long *addr) +{ + struct sg_table *sg_table; + struct page *page; + int ret; + + ret = vpu_dma_buffer_map(cfg); + if (ret < 0) { + printk("vpu_dma_buffer_map failed\n"); + return ret; + } + if (cfg->sg) { + sg_table = cfg->sg; + page = sg_page(sg_table->sgl); + *addr = PFN_PHYS(page_to_pfn(page)); + ret = 0; + } + enc_pr(LOG_INFO,"vpu_dma_buffer_get_phys\n"); + + return ret; +} + +static u32 vpu_src_addr_config(struct vpu_dma_buf_info_t info) { + unsigned long phy_addr_y = 0; + unsigned long phy_addr_u = 0; + unsigned long phy_addr_v = 0; + unsigned long Ysize = info.width * info.height; + unsigned long Usize = Ysize >> 2; + s32 ret = 0; + u32 core = 0; + + //y + dma_cfg[0].dir = DMA_TO_DEVICE; + dma_cfg[0].fd = info.fd[0]; + dma_cfg[0].dev = &(hevc_pdev->dev); + ret = vpu_dma_buffer_get_phys(&dma_cfg[0], &phy_addr_y); + if (ret < 0) { + enc_pr(LOG_ERROR, "import fd %d failed\n", info.fd[0]); + return -1; + } + + //u + if (info.num_planes >=2) { + dma_cfg[1].dir = DMA_TO_DEVICE; + dma_cfg[1].fd = info.fd[1]; + dma_cfg[1].dev = &(hevc_pdev->dev); + ret = vpu_dma_buffer_get_phys(&dma_cfg[1], &phy_addr_u); + if (ret < 0) { + enc_pr(LOG_ERROR, "import fd %d failed\n", info.fd[1]); + return -1; + } + } + + //v + if (info.num_planes >=3) { + dma_cfg[2].dir = DMA_TO_DEVICE; + dma_cfg[2].fd = info.fd[2]; + dma_cfg[2].dev = &(hevc_pdev->dev); + ret = vpu_dma_buffer_get_phys(&dma_cfg[2], &phy_addr_v); + if (ret < 0) { + enc_pr(LOG_ERROR, "import fd %d failed\n", info.fd[2]); + return -1; + } + } + + enc_pr(LOG_INFO, "vpu_src_addr_config phy_addr 0x%lx, 0x%lx, 0x%lx\n", + phy_addr_y, phy_addr_u, phy_addr_v); + + dma_cfg[0].paddr = (void *)phy_addr_y; + dma_cfg[1].paddr = (void *)phy_addr_u; + dma_cfg[2].paddr = (void *)phy_addr_v; + + enc_pr(LOG_INFO, "info.num_planes %d, info.fmt %d\n", + info.num_planes, info.fmt); + + WriteVpuRegister(W4_SRC_ADDR_Y, phy_addr_y); + if (info.num_planes == 1) { + if (info.fmt == AMVENC_YUV420) { + WriteVpuRegister(W4_SRC_ADDR_U, phy_addr_y + Ysize); + WriteVpuRegister(W4_SRC_ADDR_V, phy_addr_y + Ysize + Usize); + } else if (info.fmt == AMVENC_NV12 || info.fmt == AMVENC_NV21 ) { + WriteVpuRegister(W4_SRC_ADDR_U, phy_addr_y + Ysize); + WriteVpuRegister(W4_SRC_ADDR_V, phy_addr_y + Ysize); + } else { + enc_pr(LOG_ERROR, "not support fmt %d\n", info.fmt); + } + + } else if (info.num_planes == 2) { + if (info.fmt == AMVENC_NV12 || info.fmt == AMVENC_NV21 ) { + WriteVpuRegister(W4_SRC_ADDR_U, phy_addr_u); + WriteVpuRegister(W4_SRC_ADDR_V, phy_addr_u); + } else { + enc_pr(LOG_ERROR, "not support fmt %d\n", info.fmt); + } + + } else if (info.num_planes == 3) { + if (info.fmt == AMVENC_YUV420) { + WriteVpuRegister(W4_SRC_ADDR_U, phy_addr_u); + WriteVpuRegister(W4_SRC_ADDR_V, phy_addr_v); + } else { + enc_pr(LOG_ERROR, "not support fmt %d\n", info.fmt); + } + } + return 0; + +} + +static const struct file_operations vpu_fops = { + .owner = THIS_MODULE, + .open = vpu_open, + .release = vpu_release, + .write = vpu_write, + .unlocked_ioctl = vpu_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vpu_compat_ioctl, +#endif + .fasync = vpu_fasync, + .mmap = vpu_mmap, +}; + +static ssize_t hevcenc_status_show(struct class *cla, + struct class_attribute *attr, char *buf) +{ + return snprintf(buf, 40, "hevcenc_status_show\n"); +} + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4,13,1) +static struct class_attribute hevcenc_class_attrs[] = { + __ATTR(encode_status, + S_IRUGO | S_IWUSR, + hevcenc_status_show, + NULL), + __ATTR_NULL +}; + +static struct class hevcenc_class = { + .name = VPU_CLASS_NAME, + .class_attrs = hevcenc_class_attrs, +}; +#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(4,13,1) */ + +static CLASS_ATTR_RO(hevcenc_status); + +static struct attribute *hevcenc_class_attrs[] = { + &class_attr_hevcenc_status.attr, + NULL +}; + +ATTRIBUTE_GROUPS(hevcenc_class); + +static struct class hevcenc_class = { + .name = VPU_CLASS_NAME, + .class_groups = hevcenc_class_groups, +}; +#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(4,13,1) */ + + +s32 init_HevcEnc_device(void) +{ + s32 r = 0; + + r = register_chrdev(0, VPU_DEV_NAME, &vpu_fops); + if (r <= 0) { + enc_pr(LOG_ERROR, "register hevcenc device error.\n"); + return r; + } + s_vpu_major = r; + + r = class_register(&hevcenc_class); + if (r < 0) { + enc_pr(LOG_ERROR, "error create hevcenc class.\n"); + return r; + } + + hevcenc_dev = device_create(&hevcenc_class, NULL, + MKDEV(s_vpu_major, 0), NULL, + VPU_DEV_NAME); + + if (IS_ERR(hevcenc_dev)) { + enc_pr(LOG_ERROR, "create hevcenc device error.\n"); + class_unregister(&hevcenc_class); + return -1; + } + return r; +} + +s32 uninit_HevcEnc_device(void) +{ + if (hevcenc_dev) + device_destroy(&hevcenc_class, MKDEV(s_vpu_major, 0)); + + class_destroy(&hevcenc_class); + + unregister_chrdev(s_vpu_major, VPU_DEV_NAME); + return 0; +} + +static s32 hevc_mem_device_init( + struct reserved_mem *rmem, struct device *dev) +{ + s32 r; + + if (!rmem) { + enc_pr(LOG_ERROR, + "Can not obtain I/O memory, will allocate hevc buffer!\n"); + r = -EFAULT; + return r; + } + + if ((!rmem->base) || + (rmem->size < VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE)) { + enc_pr(LOG_ERROR, + "memory range error, 0x%lx - 0x%lx\n", + (ulong)rmem->base, (ulong)rmem->size); + r = -EFAULT; + return r; + } + r = 0; + s_video_memory.size = rmem->size; + s_video_memory.phys_addr = (ulong)rmem->base; + enc_pr(LOG_DEBUG, "hevc_mem_device_init %d, 0x%lx\n ",s_video_memory.size,s_video_memory.phys_addr); + + return r; +} + +static s32 vpu_probe(struct platform_device *pdev) +{ + s32 err = 0, irq, reg_count, idx; + struct resource res; + struct device_node *np, *child; + + enc_pr(LOG_DEBUG, "vpu_probe, clock_a: %d, clock_b: %d, clock_c: %d\n", + wave_clocka, wave_clockb, wave_clockc); + + s_vpu_major = 0; + use_reserve = false; + s_vpu_irq = -1; + cma_pool_size = 0; + s_vpu_irq_requested = false; + s_vpu_open_ref_count = 0; + hevcenc_dev = NULL; + hevc_pdev = NULL; + memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t)); + memset(&s_vpu_register, 0, sizeof(struct vpudrv_buffer_t)); + memset(&s_vmem, 0, sizeof(struct video_mm_t)); + memset(&s_bit_firmware_info[0], 0, sizeof(s_bit_firmware_info)); + memset(&res, 0, sizeof(struct resource)); + + idx = of_reserved_mem_device_init(&pdev->dev); + + if (idx != 0) { + enc_pr(LOG_DEBUG, + "HevcEnc reserved memory config fail.\n"); + } else if (s_video_memory.phys_addr) { + use_reserve = true; + } + + if (use_reserve == false) { +#ifndef CONFIG_CMA + enc_pr(LOG_ERROR, + "HevcEnc reserved memory is invaild, probe fail!\n"); + err = -EFAULT; + goto ERROR_PROVE_DEVICE; +#else + cma_pool_size = + (codec_mm_get_total_size() > + (VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE)) ? + (VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE) : + codec_mm_get_total_size(); + enc_pr(LOG_DEBUG, + "HevcEnc - cma memory pool size: %d MB\n", + (u32)cma_pool_size / SZ_1M); +#endif + } + + /* get interrupt resource */ + irq = platform_get_irq_byname(pdev, "wave420l_irq"); + if (irq < 0) { + enc_pr(LOG_ERROR, "get HevcEnc irq resource error\n"); + err = -ENXIO; + goto ERROR_PROVE_DEVICE; + } + s_vpu_irq = irq; + enc_pr(LOG_DEBUG, "HevcEnc - wave420l_irq: %d\n", s_vpu_irq); +#if 0 + rstc = devm_reset_control_get(&pdev->dev, "HevcEnc"); + if (IS_ERR(rstc)) { + enc_pr(LOG_ERROR, + "get HevcEnc rstc error: %lx\n", PTR_ERR(rstc)); + rstc = NULL; + err = -ENOENT; + goto ERROR_PROVE_DEVICE; + } + reset_control_assert(rstc); + s_vpu_rstc = rstc; + + clk = clk_get(&pdev->dev, "clk_HevcEnc"); + if (IS_ERR(clk)) { + enc_pr(LOG_ERROR, "cannot get clock\n"); + clk = NULL; + err = -ENOENT; + goto ERROR_PROVE_DEVICE; + } + s_vpu_clk = clk; +#endif + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) { + if (vpu_clk_prepare(&pdev->dev, &s_vpu_clks)) { + err = -ENOENT; + //goto ERROR_PROVE_DEVICE; + return err; + } + } + +#ifndef VPU_SUPPORT_CLOCK_CONTROL + vpu_clk_config(1); +#endif + + np = pdev->dev.of_node; + reg_count = 0; + for_each_child_of_node(np, child) { + if (of_address_to_resource(child, 0, &res) + || (reg_count > 1)) { + enc_pr(LOG_ERROR, + "no reg ranges or more reg ranges %d\n", + reg_count); + err = -ENXIO; + goto ERROR_PROVE_DEVICE; + } + /* if platform driver is implemented */ + if (res.start != 0) { + s_vpu_register.phys_addr = res.start; + s_vpu_register.virt_addr = + (ulong)ioremap_nocache( + res.start, resource_size(&res)); + s_vpu_register.size = res.end - res.start; + enc_pr(LOG_DEBUG, + "vpu base address get from platform driver "); + enc_pr(LOG_DEBUG, + "physical base addr=0x%lx, virtual base=0x%lx\n", + s_vpu_register.phys_addr, + s_vpu_register.virt_addr); + } else { + s_vpu_register.phys_addr = VPU_REG_BASE_ADDR; + s_vpu_register.virt_addr = + (ulong)ioremap_nocache( + s_vpu_register.phys_addr, VPU_REG_SIZE); + s_vpu_register.size = VPU_REG_SIZE; + enc_pr(LOG_DEBUG, + "vpu base address get from defined value "); + enc_pr(LOG_DEBUG, + "physical base addr=0x%lx, virtual base=0x%lx\n", + s_vpu_register.phys_addr, + s_vpu_register.virt_addr); + } + reg_count++; + } + + /* get the major number of the character device */ + if (init_HevcEnc_device()) { + err = -EBUSY; + enc_pr(LOG_ERROR, "could not allocate major number\n"); + goto ERROR_PROVE_DEVICE; + } + enc_pr(LOG_DEBUG, "SUCCESS alloc_chrdev_region\n"); + + init_waitqueue_head(&s_interrupt_wait_q); + tasklet_init(&hevc_tasklet, + hevcenc_isr_tasklet, + (ulong)&s_vpu_drv_context); + s_common_memory.base = 0; + s_instance_pool.base = 0; + + if (use_reserve == true) { + if (vmem_init(&s_vmem, s_video_memory.phys_addr, + s_video_memory.size) < 0) { + enc_pr(LOG_ERROR, "fail to init vmem system\n"); + goto ERROR_PROVE_DEVICE; + } + enc_pr(LOG_DEBUG, + "success to probe vpu device with video memory "); + enc_pr(LOG_DEBUG, + "phys_addr=0x%lx, base = 0x%lx\n", + (ulong)s_video_memory.phys_addr, + (ulong)s_video_memory.base); + } else + enc_pr(LOG_DEBUG, + "success to probe vpu device with video memory from cma\n"); + hevc_pdev = pdev; + return 0; + +ERROR_PROVE_DEVICE: + if (s_vpu_register.virt_addr) { + iounmap((void *)s_vpu_register.virt_addr); + memset(&s_vpu_register, 0, sizeof(struct vpudrv_buffer_t)); + } + + if (s_video_memory.phys_addr) { + vmem_exit(&s_vmem); + memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t)); + memset(&s_vmem, 0, sizeof(struct video_mm_t)); + } + +#ifndef VPU_SUPPORT_CLOCK_CONTROL + vpu_clk_config(0); +#endif + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) + vpu_clk_unprepare(&pdev->dev, &s_vpu_clks); + + if (s_vpu_irq_requested == true) { + if (s_vpu_irq >= 0) { + free_irq(s_vpu_irq, &s_vpu_drv_context); + s_vpu_irq = -1; + } + s_vpu_irq_requested = false; + } + uninit_HevcEnc_device(); + return err; +} + +static s32 vpu_remove(struct platform_device *pdev) +{ + enc_pr(LOG_DEBUG, "vpu_remove\n"); + + if (s_instance_pool.base) { + vfree((const void *)s_instance_pool.base); + s_instance_pool.base = 0; + } + + if (s_common_memory.phys_addr) { + vpu_free_dma_buffer(&s_common_memory); + s_common_memory.phys_addr = 0; + } + + if (s_video_memory.phys_addr) { + if (!use_reserve) { + codec_mm_free_for_dma( + VPU_DEV_NAME, + (u32)s_video_memory.phys_addr); + } + vmem_exit(&s_vmem); + memset(&s_video_memory, + 0, sizeof(struct vpudrv_buffer_t)); + memset(&s_vmem, + 0, sizeof(struct video_mm_t)); + } + + if (s_vpu_irq_requested == true) { + if (s_vpu_irq >= 0) { + free_irq(s_vpu_irq, &s_vpu_drv_context); + s_vpu_irq = -1; + } + s_vpu_irq_requested = false; + } + + if (s_vpu_register.virt_addr) { + iounmap((void *)s_vpu_register.virt_addr); + memset(&s_vpu_register, + 0, sizeof(struct vpudrv_buffer_t)); + } + hevc_pdev = NULL; +#ifndef VPU_SUPPORT_CLOCK_CONTROL + vpu_clk_config(0); +#endif + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_SC2) + vpu_clk_unprepare(&pdev->dev, &s_vpu_clks); + uninit_HevcEnc_device(); + return 0; +} + +#ifdef CONFIG_PM +static void Wave4BitIssueCommand(u32 core, u32 cmd) +{ + WriteVpuRegister(W4_VPU_BUSY_STATUS, 1); + WriteVpuRegister(W4_CORE_INDEX, 0); + /* coreIdx = ReadVpuRegister(W4_VPU_BUSY_STATUS); */ + /* coreIdx = 0; */ + /* WriteVpuRegister(W4_INST_INDEX, + * (instanceIndex & 0xffff) | (codecMode << 16)); + */ + WriteVpuRegister(W4_COMMAND, cmd); + WriteVpuRegister(W4_VPU_HOST_INT_REQ, 1); +} + +static s32 vpu_suspend(struct platform_device *pdev, pm_message_t state) +{ + u32 core; + ulong timeout = jiffies + HZ; /* vpu wait timeout to 1sec */ + + enc_pr(LOG_DEBUG, "vpu_suspend\n"); + + vpu_clk_config(1); + + if (s_vpu_open_ref_count > 0) { + for (core = 0; core < MAX_NUM_VPU_CORE; core++) { + if (s_bit_firmware_info[core].size == 0) + continue; + while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) { + if (time_after(jiffies, timeout)) { + enc_pr(LOG_ERROR, + "SLEEP_VPU BUSY timeout"); + goto DONE_SUSPEND; + } + } + Wave4BitIssueCommand(core, W4_CMD_SLEEP_VPU); + + while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) { + if (time_after(jiffies, timeout)) { + enc_pr(LOG_ERROR, + "SLEEP_VPU BUSY timeout"); + goto DONE_SUSPEND; + } + } + if (ReadVpuRegister(W4_RET_SUCCESS) == 0) { + enc_pr(LOG_ERROR, + "SLEEP_VPU failed [0x%x]", + ReadVpuRegister(W4_RET_FAIL_REASON)); + goto DONE_SUSPEND; + } + } + } + + vpu_clk_config(0); + return 0; + +DONE_SUSPEND: + vpu_clk_config(0); + return -EAGAIN; +} +static s32 vpu_resume(struct platform_device *pdev) +{ + u32 i; + u32 core; + u32 val; + ulong timeout = jiffies + HZ; /* vpu wait timeout to 1sec */ + ulong code_base; + u32 code_size; + u32 remap_size; + u32 regVal; + u32 hwOption = 0; + + enc_pr(LOG_DEBUG, "vpu_resume\n"); + + vpu_clk_config(1); + if (s_vpu_open_ref_count > 0) { + for (core = 0; core < MAX_NUM_VPU_CORE; core++) { + if (s_bit_firmware_info[core].size == 0) + continue; + code_base = s_common_memory.phys_addr; + /* ALIGN TO 4KB */ + code_size = (s_common_memory.size & ~0xfff); + if (code_size < s_bit_firmware_info[core].size * 2) + goto DONE_WAKEUP; + + /*---- LOAD BOOT CODE */ + for (i = 0; i < 512; i += 2) { + val = s_bit_firmware_info[core].bit_code[i]; + val |= (s_bit_firmware_info[core].bit_code[i+1] << 16); + WriteVpu(code_base+(i*2), val); + } + + regVal = 0; + WriteVpuRegister(W4_PO_CONF, regVal); + + /* Reset All blocks */ + regVal = 0x7ffffff; + WriteVpuRegister(W4_VPU_RESET_REQ, regVal); + + /* Waiting reset done */ + while (ReadVpuRegister(W4_VPU_RESET_STATUS)) { + if (time_after(jiffies, timeout)) + goto DONE_WAKEUP; + } + + WriteVpuRegister(W4_VPU_RESET_REQ, 0); + + /* remap page size */ + remap_size = (code_size >> 12) & 0x1ff; + regVal = 0x80000000 | (W4_REMAP_CODE_INDEX<<12) + | (0 << 16) | (1<<11) | remap_size; + WriteVpuRegister(W4_VPU_REMAP_CTRL, regVal); + /* DO NOT CHANGE! */ + WriteVpuRegister(W4_VPU_REMAP_VADDR, 0x00000000); + WriteVpuRegister(W4_VPU_REMAP_PADDR, code_base); + WriteVpuRegister(W4_ADDR_CODE_BASE, code_base); + WriteVpuRegister(W4_CODE_SIZE, code_size); + WriteVpuRegister(W4_CODE_PARAM, 0); + WriteVpuRegister(W4_INIT_VPU_TIME_OUT_CNT, timeout); + WriteVpuRegister(W4_HW_OPTION, hwOption); + + /* Interrupt */ +#if 0 + regVal = (1 << W4_INT_DEC_PIC_HDR); + regVal |= (1 << W4_INT_DEC_PIC); + regVal |= (1 << W4_INT_QUERY_DEC); + regVal |= (1 << W4_INT_SLEEP_VPU); + regVal |= (1 << W4_INT_BSBUF_EMPTY); +#endif + regVal = 0xfffffefe; + WriteVpuRegister(W4_VPU_VINT_ENABLE, regVal); + Wave4BitIssueCommand(core, W4_CMD_INIT_VPU); + WriteVpuRegister(W4_VPU_REMAP_CORE_START, 1); + while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) { + if (time_after(jiffies, timeout)) + goto DONE_WAKEUP; + } + + if (ReadVpuRegister(W4_RET_SUCCESS) == 0) { + enc_pr(LOG_ERROR, + "WAKEUP_VPU failed [0x%x]", + ReadVpuRegister(W4_RET_FAIL_REASON)); + goto DONE_WAKEUP; + } + } + } + + if (s_vpu_open_ref_count == 0) + vpu_clk_config(0); +DONE_WAKEUP: + if (s_vpu_open_ref_count > 0) + vpu_clk_config(1); + return 0; +} +#else +#define vpu_suspend NULL +#define vpu_resume NULL +#endif /* !CONFIG_PM */ + +static const struct of_device_id cnm_hevcenc_dt_match[] = { + { + .compatible = "cnm, HevcEnc", + }, + {}, +}; + +static struct platform_driver vpu_driver = { + .driver = { + .name = VPU_PLATFORM_DEVICE_NAME, + .of_match_table = cnm_hevcenc_dt_match, + }, + .probe = vpu_probe, + .remove = vpu_remove, + .suspend = vpu_suspend, + .resume = vpu_resume, +}; + +static s32 __init vpu_init(void) +{ + s32 res; + + enc_pr(LOG_DEBUG, "vpu_init\n"); + + if ((get_cpu_type() != MESON_CPU_MAJOR_ID_GXM) + && (get_cpu_type() != MESON_CPU_MAJOR_ID_G12A) + && (get_cpu_type() != MESON_CPU_MAJOR_ID_GXLX) + && (get_cpu_type() != MESON_CPU_MAJOR_ID_G12B) + && (get_cpu_type() != MESON_CPU_MAJOR_ID_SM1) + && (get_cpu_type() != MESON_CPU_MAJOR_ID_SC2)) { + enc_pr(LOG_DEBUG, + "The chip is not support hevc encoder\n"); + return -1; + } + if (get_cpu_type() == MESON_CPU_MAJOR_ID_G12A) { + if ((READ_EFUSE_REG(EFUSE_LIC2) >> 12) & 1) { + enc_pr(LOG_DEBUG, + "Chip efuse disabled H265\n"); + return -1; + } + } + + res = platform_driver_register(&vpu_driver); + enc_pr(LOG_INFO, + "end vpu_init result=0x%x\n", res); + return res; +} + +static void __exit vpu_exit(void) +{ + enc_pr(LOG_DEBUG, "vpu_exit\n"); + if ((get_cpu_type() != MESON_CPU_MAJOR_ID_GXM) && + (get_cpu_type() != MESON_CPU_MAJOR_ID_G12A) && + (get_cpu_type() != MESON_CPU_MAJOR_ID_GXLX) && + (get_cpu_type() != MESON_CPU_MAJOR_ID_G12B) && + (get_cpu_type() != MESON_CPU_MAJOR_ID_SC2) && + (get_cpu_type() != MESON_CPU_MAJOR_ID_SM1)) { + enc_pr(LOG_INFO, + "The chip is not support hevc encoder\n"); + return; + } + platform_driver_unregister(&vpu_driver); +} + +static const struct reserved_mem_ops rmem_hevc_ops = { + .device_init = hevc_mem_device_init, +}; + +static s32 __init hevc_mem_setup(struct reserved_mem *rmem) +{ + rmem->ops = &rmem_hevc_ops; + enc_pr(LOG_DEBUG, "HevcEnc reserved mem setup.\n"); + return 0; +} + +module_param(print_level, uint, 0664); +MODULE_PARM_DESC(print_level, "\n print_level\n"); + +module_param(clock_level, uint, 0664); +MODULE_PARM_DESC(clock_level, "\n clock_level\n"); + +module_param(wave_clocka, uint, 0664); +MODULE_PARM_DESC(wave_clocka, "\n wave_clocka\n"); + +module_param(wave_clockb, uint, 0664); +MODULE_PARM_DESC(wave_clockb, "\n wave_clockb\n"); + +module_param(wave_clockc, uint, 0664); +MODULE_PARM_DESC(wave_clockc, "\n wave_clockc\n"); + +MODULE_AUTHOR("Amlogic using C&M VPU, Inc."); +MODULE_DESCRIPTION("VPU linux driver"); +MODULE_LICENSE("GPL"); + +module_init(vpu_init); +module_exit(vpu_exit); +RESERVEDMEM_OF_DECLARE(amlogic, "amlogic, HevcEnc-memory", hevc_mem_setup);
diff --git a/drivers/frame_sink/encoder/h265/vpu.h b/drivers/frame_sink/encoder/h265/vpu.h new file mode 100644 index 0000000..b89744f --- /dev/null +++ b/drivers/frame_sink/encoder/h265/vpu.h
@@ -0,0 +1,355 @@ +/* + * vpu.h + * + * linux device driver for VPU. + * + * Copyright (C) 2006 - 2013 CHIPS&MEDIA INC. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __VPU_DRV_H__ +#define __VPU_DRV_H__ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/compat.h> +#include <linux/dma-buf.h> + +#define MAX_INST_HANDLE_SIZE (32*1024) +#define MAX_NUM_INSTANCE 4 +#define MAX_NUM_VPU_CORE 1 + +#define W4_CMD_INIT_VPU (0x0001) +#define W4_CMD_SLEEP_VPU (0x0400) +#define W4_CMD_WAKEUP_VPU (0x0800) + +/* GXM: 2000/10 = 200M */ +#define HevcEnc_L0() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \ + (3 << 25) | (1 << 16) | (3 << 9) | (1 << 0)) +/* GXM: 2000/8 = 250M */ +#define HevcEnc_L1() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \ + (1 << 25) | (1 << 16) | (1 << 9) | (1 << 0)) +/* GXM: 2000/7 = 285M */ +#define HevcEnc_L2() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \ + (4 << 25) | (0 << 16) | (4 << 9) | (0 << 0)) +/*GXM: 2000/6 = 333M */ +#define HevcEnc_L3() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \ + (2 << 25) | (1 << 16) | (2 << 9) | (1 << 0)) +/* GXM: 2000/5 = 400M */ +#define HevcEnc_L4() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \ + (3 << 25) | (0 << 16) | (3 << 9) | (0 << 0)) +/* GXM: 2000/4 = 500M */ +#define HevcEnc_L5() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \ + (1 << 25) | (0 << 16) | (1 << 9) | (0 << 0)) +/* GXM: 2000/3 = 667M */ +#define HevcEnc_L6() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \ + (2 << 25) | (0 << 16) | (2 << 9) | (0 << 0)) + +#define HevcEnc_clock_enable(level) \ + do { \ + WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \ + READ_HHI_REG(HHI_WAVE420L_CLK_CNTL) \ + & (~(1 << 8)) & (~(1 << 24))); \ + if (level == 0) \ + HevcEnc_L0(); \ + else if (level == 1) \ + HevcEnc_L1(); \ + else if (level == 2) \ + HevcEnc_L2(); \ + else if (level == 3) \ + HevcEnc_L3(); \ + else if (level == 4) \ + HevcEnc_L4(); \ + else if (level == 5) \ + HevcEnc_L5(); \ + else if (level == 6) \ + HevcEnc_L6(); \ + WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \ + READ_HHI_REG(HHI_WAVE420L_CLK_CNTL) \ + | (1 << 8) | (1 << 24)); \ + } while (0) + +#define HevcEnc_clock_disable() \ + WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \ + READ_HHI_REG(HHI_WAVE420L_CLK_CNTL) \ + & (~(1 << 8)) & (~(1 << 24))) + +/* ACLK 667MHZ */ +#define HevcEnc_MoreClock_enable() \ + do { \ + WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL2, \ + READ_HHI_REG(HHI_WAVE420L_CLK_CNTL2) \ + & (~(1 << 8))); \ + WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL2, \ + (2 << 9) | (0 << 0)); \ + WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL2, \ + READ_HHI_REG(HHI_WAVE420L_CLK_CNTL2) \ + | (1 << 8)); \ + } while (0) + +#define HevcEnc_MoreClock_disable() \ + WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL2, \ + READ_HHI_REG(HHI_WAVE420L_CLK_CNTL2) \ + & (~(1 << 8))) + +typedef enum +{ + AMVENC_YUV422_SINGLE = 0, + AMVENC_YUV444_SINGLE, + AMVENC_NV21, + AMVENC_NV12, + AMVENC_YUV420, + AMVENC_YUV444_PLANE, + AMVENC_RGB888, + AMVENC_RGB888_PLANE, + AMVENC_RGB565, + AMVENC_RGBA8888, + AMVENC_FRAME_FMT +} AMVEncFrameFmt; + +#ifdef CONFIG_COMPAT +struct compat_vpudrv_buffer_t { + u32 size; + u32 cached; + compat_ulong_t phys_addr; + compat_ulong_t base; /* kernel logical address in use kernel */ + compat_ulong_t virt_addr; /* virtual user space address */ +}; +#endif + +struct vpudrv_buffer_t { + u32 size; + u32 cached; + ulong phys_addr; + ulong base; /* kernel logical address in use kernel */ + ulong virt_addr; /* virtual user space address */ +}; + +struct vpu_bit_firmware_info_t { + u32 size; /* size of this structure*/ + u32 core_idx; + u32 reg_base_offset; + u16 bit_code[512]; +}; + +struct vpudrv_inst_info_t { + u32 core_idx; + u32 inst_idx; + s32 inst_open_count; /* for output only*/ +}; + +struct vpudrv_intr_info_t { + u32 timeout; + s32 intr_reason; +}; + +struct vpu_drv_context_t { + struct fasync_struct *async_queue; + ulong interrupt_reason; + u32 open_count; /*!<< device reference count. Not instance count */ +}; + +/* To track the allocated memory buffer */ +struct vpudrv_buffer_pool_t { + struct list_head list; + struct vpudrv_buffer_t vb; + struct file *filp; +}; + +/* To track the instance index and buffer in instance pool */ +struct vpudrv_instanace_list_t { + struct list_head list; + ulong inst_idx; + ulong core_idx; + struct file *filp; +}; + +struct vpudrv_instance_pool_t { + u8 codecInstPool[MAX_NUM_INSTANCE][MAX_INST_HANDLE_SIZE]; +}; + +struct vpu_dma_buf_info_t { + u32 width; + u32 height; + AMVEncFrameFmt fmt; + u32 num_planes; + s32 fd[3]; +}; + +struct vpu_dma_cfg { + int fd; + void *dev; + void *vaddr; + void *paddr; + struct dma_buf *dbuf; + struct dma_buf_attachment *attach; + struct sg_table *sg; + enum dma_data_direction dir; +}; + +#define VPUDRV_BUF_LEN struct vpudrv_buffer_t +#define VPUDRV_BUF_LEN32 struct compat_vpudrv_buffer_t +#define VPUDRV_INST_LEN struct vpudrv_inst_info_t + +#define VDI_MAGIC 'V' +#define VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY \ + _IOW(VDI_MAGIC, 0, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_FREE_PHYSICALMEMORY \ + _IOW(VDI_MAGIC, 1, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_WAIT_INTERRUPT \ + _IOW(VDI_MAGIC, 2, struct vpudrv_intr_info_t) + +#define VDI_IOCTL_SET_CLOCK_GATE \ + _IOW(VDI_MAGIC, 3, u32) + +#define VDI_IOCTL_RESET \ + _IOW(VDI_MAGIC, 4, u32) + +#define VDI_IOCTL_GET_INSTANCE_POOL \ + _IOW(VDI_MAGIC, 5, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_GET_COMMON_MEMORY \ + _IOW(VDI_MAGIC, 6, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO \ + _IOW(VDI_MAGIC, 8, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_OPEN_INSTANCE \ + _IOW(VDI_MAGIC, 9, VPUDRV_INST_LEN) + +#define VDI_IOCTL_CLOSE_INSTANCE \ + _IOW(VDI_MAGIC, 10, VPUDRV_INST_LEN) + +#define VDI_IOCTL_GET_INSTANCE_NUM \ + _IOW(VDI_MAGIC, 11, VPUDRV_INST_LEN) + +#define VDI_IOCTL_GET_REGISTER_INFO \ + _IOW(VDI_MAGIC, 12, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_FLUSH_BUFFER \ + _IOW(VDI_MAGIC, 13, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_CONFIG_DMA \ + _IOW(VDI_MAGIC, 14, struct vpu_dma_buf_info_t) + +#define VDI_IOCTL_UNMAP_DMA \ + _IOW(VDI_MAGIC, 15, u32) + +#ifdef CONFIG_COMPAT +#define VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32 \ + _IOW(VDI_MAGIC, 0, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_FREE_PHYSICALMEMORY32 \ + _IOW(VDI_MAGIC, 1, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_GET_INSTANCE_POOL32 \ + _IOW(VDI_MAGIC, 5, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_GET_COMMON_MEMORY32 \ + _IOW(VDI_MAGIC, 6, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32 \ + _IOW(VDI_MAGIC, 8, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_GET_REGISTER_INFO32 \ + _IOW(VDI_MAGIC, 12, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_FLUSH_BUFFER32 \ + _IOW(VDI_MAGIC, 13, VPUDRV_BUF_LEN32) +#endif + +enum { + W4_INT_INIT_VPU = 0, + W4_INT_DEC_PIC_HDR = 1, + W4_INT_SET_PARAM = 1, + W4_INT_ENC_INIT_SEQ = 1, + W4_INT_FINI_SEQ = 2, + W4_INT_DEC_PIC = 3, + W4_INT_ENC_PIC = 3, + W4_INT_SET_FRAMEBUF = 4, + W4_INT_FLUSH_DEC = 5, + W4_INT_ENC_SLICE_INT = 7, + W4_INT_GET_FW_VERSION = 8, + W4_INT_QUERY_DEC = 9, + W4_INT_SLEEP_VPU = 10, + W4_INT_WAKEUP_VPU = 11, + W4_INT_CHANGE_INT = 12, + W4_INT_CREATE_INSTANCE = 14, + W4_INT_BSBUF_EMPTY = 15, + /*!<< Bitstream buffer empty[dec]/full[enc] */ +}; + +/* WAVE4 registers */ +#define VPU_REG_BASE_ADDR 0xc8810000 +#define VPU_REG_SIZE (0x4000 * MAX_NUM_VPU_CORE) + +#define W4_REG_BASE 0x0000 +#define W4_VPU_BUSY_STATUS (W4_REG_BASE + 0x0070) +#define W4_VPU_INT_REASON_CLEAR (W4_REG_BASE + 0x0034) +#define W4_VPU_VINT_CLEAR (W4_REG_BASE + 0x003C) +#define W4_VPU_VPU_INT_STS (W4_REG_BASE + 0x0044) +#define W4_VPU_INT_REASON (W4_REG_BASE + 0x004c) + +#define W4_RET_SUCCESS (W4_REG_BASE + 0x0110) +#define W4_RET_FAIL_REASON (W4_REG_BASE + 0x0114) + +/* WAVE4 INIT, WAKEUP */ +#define W4_PO_CONF (W4_REG_BASE + 0x0000) +#define W4_VCPU_CUR_PC (W4_REG_BASE + 0x0004) + +#define W4_VPU_VINT_ENABLE (W4_REG_BASE + 0x0048) + +#define W4_VPU_RESET_REQ (W4_REG_BASE + 0x0050) +#define W4_VPU_RESET_STATUS (W4_REG_BASE + 0x0054) + +#define W4_VPU_REMAP_CTRL (W4_REG_BASE + 0x0060) +#define W4_VPU_REMAP_VADDR (W4_REG_BASE + 0x0064) +#define W4_VPU_REMAP_PADDR (W4_REG_BASE + 0x0068) +#define W4_VPU_REMAP_CORE_START (W4_REG_BASE + 0x006C) +#define W4_VPU_BUSY_STATUS (W4_REG_BASE + 0x0070) + +#define W4_HW_OPTION (W4_REG_BASE + 0x0124) +#define W4_CODE_SIZE (W4_REG_BASE + 0x011C) +/* Note: W4_INIT_CODE_BASE_ADDR should be aligned to 4KB */ +#define W4_ADDR_CODE_BASE (W4_REG_BASE + 0x0118) +#define W4_CODE_PARAM (W4_REG_BASE + 0x0120) +#define W4_INIT_VPU_TIME_OUT_CNT (W4_REG_BASE + 0x0134) + +/* WAVE4 Wave4BitIssueCommand */ +#define W4_CORE_INDEX (W4_REG_BASE + 0x0104) +#define W4_INST_INDEX (W4_REG_BASE + 0x0108) +#define W4_COMMAND (W4_REG_BASE + 0x0100) +#define W4_VPU_HOST_INT_REQ (W4_REG_BASE + 0x0038) + +#define W4_BS_RD_PTR (W4_REG_BASE + 0x0130) +#define W4_BS_WR_PTR (W4_REG_BASE + 0x0134) +#define W4_SRC_ADDR_Y (W4_REG_BASE + 0x0174) +#define W4_SRC_ADDR_U (W4_REG_BASE + 0x0178) +#define W4_SRC_ADDR_V (W4_REG_BASE + 0x017C) + +#define W4_RET_ENC_PIC_BYTE (W4_REG_BASE + 0x01C8) + +#define W4_REMAP_CODE_INDEX 0 + +#define ReadVpuRegister(addr) \ + readl((void __iomem *)(s_vpu_register.virt_addr \ + + s_bit_firmware_info[core].reg_base_offset + addr)) + +#define WriteVpuRegister(addr, val) \ + writel((u32)val, (void __iomem *)(s_vpu_register.virt_addr \ + + s_bit_firmware_info[core].reg_base_offset + addr)) + +#define WriteVpu(addr, val) writel((u32)val, (void __iomem *)addr) +#endif
diff --git a/drivers/frame_sink/encoder/jpeg/Makefile b/drivers/frame_sink/encoder/jpeg/Makefile new file mode 100644 index 0000000..4f6c498 --- /dev/null +++ b/drivers/frame_sink/encoder/jpeg/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VENC_JPEG) += jpegenc.o +#amvenc_jpeg-objs += jpegenc.o
diff --git a/drivers/frame_sink/encoder/jpeg/jpegenc.c b/drivers/frame_sink/encoder/jpeg/jpegenc.c new file mode 100644 index 0000000..4416378 --- /dev/null +++ b/drivers/frame_sink/encoder/jpeg/jpegenc.c
@@ -0,0 +1,4870 @@ +/* + * drivers/amlogic/amports/jpegenc.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ +#define LOG_LINE() +//pr_err("[%s:%d]\n", __FUNCTION__, __LINE__); +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/compat.h> +#include <linux/ktime.h> +#include <linux/timekeeping.h> + +#include <linux/delay.h> +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/ctype.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/canvas/canvas_mgr.h> +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../../frame_provider/decoder/utils/vdec_canvas_utils.h" +#include <linux/delay.h> +#include <linux/poll.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/dma-contiguous.h> +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../../../frame_provider/decoder/utils/amvdec.h" +#include "../../../stream_input/amports/amports_priv.h" +#include "../../../frame_provider/decoder/utils/firmware.h" +#include "../../../frame_provider/decoder/utils/vdec.h" +#include "../../../frame_provider/decoder/utils/vdec_power_ctrl.h" +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "jpegenc.h" +#include <linux/of_reserved_mem.h> + +#include <linux/amlogic/power_ctrl.h> +#include <dt-bindings/power/t7-pd.h> +#include <linux/amlogic/power_domain.h> + +#include <linux/clk.h> +#define HCODEC_MFDIN_REG17 0x101f +#define HCODEC_MFDIN_REG18 0x1020 +#define HCODEC_MFDIN_REG19 0x1021 + +#ifdef CONFIG_AM_ENCODER +#include "encoder.h" +#endif + +#define JPEGENC_CANVAS_INDEX 0x64 +#define JPEGENC_CANVAS_MAX_INDEX 0x67 + +#define ENC_CANVAS_OFFSET JPEGENC_CANVAS_INDEX + +#define LOG_ALL 0 +#define LOG_INFO 1 +#define LOG_DEBUG 2 +#define LOG_ERROR 3 + +#define jenc_pr(level, x...) \ + do { \ + if (level >= jpegenc_print_level) \ + printk(x); \ + } while (0) + +#define DRIVER_NAME "jpegenc" +#define CLASS_NAME "jpegenc" +#define DEVICE_NAME "jpegenc" + +/* #define EXTEAN_QUANT_TABLE */ + +/*######### DEBUG-BRINGUP#########*/ +static u32 manual_clock; +static u32 manual_irq_num = 2; +static u32 manual_interrupt = 0; +/*################################*/ + +static s32 jpegenc_device_major; +static struct device *jpegenc_dev; +static u32 jpegenc_print_level = LOG_ERROR; + +static s32 reg_offset; + +static u32 use_dma_io = 1; + +static u32 use_quality=1; +static u32 legacy_load=0; + +static u32 dumpmem_line = 0; +static u32 pointer = 0; + +static u32 clock_level = 1; +static u16 gQuantTable[2][DCTSIZE2]; +#ifdef EXTEAN_QUANT_TABLE +static u16 *gExternalQuantTablePtr; +static bool external_quant_table_available; +#endif + +static u32 simulation_enable; +static u32 g_block_mode; +static u32 g_canv0_stride; +static u32 g_canv1_stride; +static u32 g_canv2_stride; +static u32 g_canvas_height; + +static u32 jpeg_in_full_hcodec; +static u32 mfdin_ambus_canv_conv; + +#define MHz (1000000) + +//static DEFINE_SPINLOCK(lock); + +#define JPEGENC_BUFFER_LEVEL_VGA 0 +#define JPEGENC_BUFFER_LEVEL_2M 1 +#define JPEGENC_BUFFER_LEVEL_3M 2 +#define JPEGENC_BUFFER_LEVEL_5M 3 +#define JPEGENC_BUFFER_LEVEL_8M 4 +#define JPEGENC_BUFFER_LEVEL_13M 5 +#define JPEGENC_BUFFER_LEVEL_HD 6 + +const s8 *glevel_str[] = { + "VGA", + "2M", + "3M", + "5M", + "8M", + "13M", + "HD", +}; + +const struct Jpegenc_BuffInfo_s jpegenc_buffspec[] = { + { + .lev_id = JPEGENC_BUFFER_LEVEL_VGA, + .max_width = 640, + .max_height = 640, + .min_buffsize = 0x330000, + .input = { + .buf_start = 0, + .buf_size = 0x12c000, + }, + .assit = { + .buf_start = 0x12d000, + .buf_size = 0x2000, + }, + .bitstream = { + .buf_start = 0x130000, + .buf_size = 0x200000, + } + }, { + .lev_id = JPEGENC_BUFFER_LEVEL_2M, + .max_width = 1600, + .max_height = 1600, + .min_buffsize = 0x960000, + .input = { + .buf_start = 0, + .buf_size = 0x753000, + }, + .assit = { + .buf_start = 0x754000, + .buf_size = 0x2000, + }, + .bitstream = { + .buf_start = 0x760000, + .buf_size = 0x200000, + } + }, { + .lev_id = JPEGENC_BUFFER_LEVEL_3M, + .max_width = 2048, + .max_height = 2048, + .min_buffsize = 0xe10000, + .input = { + .buf_start = 0, + .buf_size = 0xc00000, + }, + .assit = { + .buf_start = 0xc01000, + .buf_size = 0x2000, + }, + .bitstream = { + .buf_start = 0xc10000, + .buf_size = 0x200000, + } + }, { + .lev_id = JPEGENC_BUFFER_LEVEL_5M, + .max_width = 2624, + .max_height = 2624, + .min_buffsize = 0x1800000, + .input = { + .buf_start = 0, + .buf_size = 0x13B3000, + }, + .assit = { + .buf_start = 0x13B4000, + .buf_size = 0x2000, + }, + .bitstream = { + .buf_start = 0x1400000, + .buf_size = 0x400000, + } + }, { + .lev_id = JPEGENC_BUFFER_LEVEL_8M, + .max_width = 3264, + .max_height = 3264, + .min_buffsize = 0x2300000, + .input = { + .buf_start = 0, + .buf_size = 0x1e7b000, + }, + .assit = { + .buf_start = 0x1e7c000, + .buf_size = 0x2000, + }, + .bitstream = { + .buf_start = 0x1f00000, + .buf_size = 0x400000, + } + }, { + .lev_id = JPEGENC_BUFFER_LEVEL_13M, + .max_width = 8192, + .max_height = 8192, + .min_buffsize = 0xc400000, + .input = { + .buf_start = 0, + .buf_size = 0xc000000, + }, + .assit = { + .buf_start = 0xc001000, + .buf_size = 0x2000, + }, + .bitstream = { + .buf_start = 0xc010000, + .buf_size = 0x3f0000, + } + }, { + .lev_id = JPEGENC_BUFFER_LEVEL_HD, + .max_width = 8192, + .max_height = 8192, + .min_buffsize = 0xc400000, + .input = { + .buf_start = 0, + .buf_size = 0xc000000, + }, + .assit = { + .buf_start = 0xc001000, + .buf_size = 0x2000, + }, + .bitstream = { + .buf_start = 0xc010000, + .buf_size = 0x3f0000, + } + } +}; + +const char *jpegenc_ucode[] = { + "jpegenc_mc", +}; + +static struct jpegenc_manager_s gJpegenc; + +static const u16 jpeg_quant[7][DCTSIZE2] = { + { /* jpeg_quant[0][] : Luma, Canon */ + 0x06, 0x06, 0x08, 0x0A, 0x0A, 0x10, 0x15, 0x19, + 0x06, 0x0A, 0x0A, 0x0E, 0x12, 0x1F, 0x29, 0x29, + 0x08, 0x0A, 0x0E, 0x12, 0x21, 0x29, 0x29, 0x29, + 0x0A, 0x0E, 0x12, 0x14, 0x23, 0x29, 0x29, 0x29, + 0x0A, 0x12, 0x21, 0x23, 0x27, 0x29, 0x29, 0x29, + 0x10, 0x1F, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, + 0x15, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, + 0x19, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29 + }, + { /* jpeg_quant[1][] : Chroma, Canon */ + 0x0A, 0x0E, 0x10, 0x14, 0x15, 0x1D, 0x2B, 0x35, + 0x0E, 0x12, 0x14, 0x1D, 0x25, 0x3E, 0x54, 0x54, + 0x10, 0x14, 0x19, 0x25, 0x40, 0x54, 0x54, 0x54, + 0x14, 0x1D, 0x25, 0x27, 0x48, 0x54, 0x54, 0x54, + 0x15, 0x25, 0x40, 0x48, 0x4E, 0x54, 0x54, 0x54, + 0x1D, 0x3E, 0x54, 0x54, 0x54, 0x54, 0x54, 0x54, + 0x2B, 0x54, 0x54, 0x54, 0x54, 0x54, 0x54, 0x54, + 0x35, 0x54, 0x54, 0x54, 0x54, 0x54, 0x54, 0x54 + }, + { /* jpeg_quant[2][] : Luma, spec example Table K.1 */ + 16, 11, 10, 16, 24, 40, 51, 61, + 12, 12, 14, 19, 26, 58, 60, 55, + 14, 13, 16, 24, 40, 57, 69, 56, + 14, 17, 22, 29, 51, 87, 80, 62, + 18, 22, 37, 56, 68, 109, 103, 77, + 24, 35, 55, 64, 81, 104, 113, 92, + 49, 64, 78, 87, 103, 121, 120, 101, + 72, 92, 95, 98, 112, 100, 103, 99 + }, + { /* jpeg_quant[3][] : Chroma, spec example Table K.2 */ + 17, 18, 24, 47, 99, 99, 99, 99, + 18, 21, 26, 66, 99, 99, 99, 99, + 24, 26, 56, 99, 99, 99, 99, 99, + 47, 66, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99 + }, + { /* jpeg_quant[4][] : Luma, spec example Table K.1, + modified to create long ZRL */ + 16, 11, 10, 16, 24, 40, 51, 61, + 12, 12, 14, 19, 26, 58, 60, 55, + 14, 13, 16, 24, 40, 57, 69, 56, + 14, 17, 22, 29, 51, 87, 80, 62, + 18, 22, 37, 56, 68, 109, 103, 77, + 24, 35, 55, 64, 81, 104, 113, 92, + 49, 64, 78, 87, 103, 121, 120, 101, + 72, 92, 95, 98, 112, 100, 103, 16 + }, + { /* jpeg_quant[5][] : Chroma, spec example Table K.2, + modified to create long ZRL */ + 17, 18, 24, 47, 99, 99, 99, 99, + 18, 21, 26, 66, 99, 99, 99, 99, + 24, 26, 56, 99, 99, 99, 99, 99, + 47, 66, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 17 + }, + { /* jpeg_quant[6][] : no compression */ + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1 + } +}; /* jpeg_quant */ + +static const u8 jpeg_huffman_dc[2][16 + 12] = { + { /* jpeg_huffman_dc[0][] */ + 0x00, /* number of code length=1 */ + 0x01, + 0x05, + 0x01, + 0x01, + 0x01, + 0x01, + 0x01, + 0x01, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, /* number of code length=16 */ + + /* Entry index for code with + minimum code length (=2 in this case) */ + 0x00, + 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, + 0x07, + 0x08, + 0x09, + 0x0A, + 0x0B + }, + { /* jpeg_huffman_dc[1][] */ + 0x00, /* number of code length=1 */ + 0x03, + 0x01, + 0x01, + 0x01, + 0x01, + 0x01, + 0x01, + 0x01, + 0x01, + 0x01, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, /* number of code length=16 */ + + /* Entry index for code with + minimum code length (=2 in this case) */ + 0x00, 0x01, 0x02, + 0x03, + 0x04, + 0x05, + 0x06, + 0x07, + 0x08, + 0x09, + 0x0A, + 0x0B + } +}; /* jpeg_huffman_dc */ + +static const u8 jpeg_huffman_ac[2][16 + 162] = { + { /* jpeg_huffman_ac[0][] */ + 0x00, /* number of code length=1 */ + 0x02, + 0x01, + 0x03, + 0x03, + 0x02, + 0x04, + 0x03, + 0x05, + 0x05, + 0x04, + 0x04, + 0x00, + 0x00, + 0x01, + 0x7D, /* number of code length=16 */ + + /* Entry index for code with + minimum code length (=2 in this case) */ + 0x01, 0x02, + 0x03, + 0x00, 0x04, 0x11, + 0x05, 0x12, 0x21, + 0x31, 0x41, + 0x06, 0x13, 0x51, 0x61, + 0x07, 0x22, 0x71, + 0x14, 0x32, 0x81, 0x91, 0xA1, + 0x08, 0x23, 0x42, 0xB1, 0xC1, + 0x15, 0x52, 0xD1, 0xF0, + 0x24, 0x33, 0x62, 0x72, + 0x82, + 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, + 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, + 0x2A, 0x34, 0x35, 0x36, + 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, + 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, + 0x53, 0x54, 0x55, 0x56, + 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, + 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, + 0x73, 0x74, 0x75, 0x76, + 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, + 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, + 0x92, 0x93, 0x94, 0x95, + 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, + 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, + 0xA9, 0xAA, 0xB2, 0xB3, + 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, + 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, + 0xC7, 0xC8, 0xC9, 0xCA, + 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, + 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, + 0xE4, 0xE5, 0xE6, 0xE7, + 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, + 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, + 0xFA + }, + { /* jpeg_huffman_ac[1][] */ + 0x00, /* number of code length=1 */ + 0x02, + 0x01, + 0x02, + 0x04, + 0x04, + 0x03, + 0x04, + 0x07, + 0x05, + 0x04, + 0x04, + 0x00, + 0x01, + 0x02, + 0x77, /* number of code length=16 */ + + /* Entry index for code with + minimum code length (=2 in this case) */ + 0x00, 0x01, + 0x02, + 0x03, 0x11, + 0x04, 0x05, 0x21, 0x31, + 0x06, 0x12, 0x41, 0x51, + 0x07, 0x61, 0x71, + 0x13, 0x22, 0x32, 0x81, + 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, + 0x09, 0x23, 0x33, 0x52, 0xF0, + 0x15, 0x62, 0x72, 0xD1, + 0x0A, 0x16, 0x24, 0x34, + + 0xE1, + 0x25, 0xF1, + 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, + 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3A, 0x43, + 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, + 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5A, 0x63, + 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, + 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7A, 0x82, + 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, + 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, + 0x96, 0x97, 0x98, 0x99, + 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, + 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, + 0xB4, 0xB5, 0xB6, 0xB7, + 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, + 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, + 0xD2, 0xD3, 0xD4, 0xD5, + 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, + 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, + 0xE9, 0xEA, 0xF2, 0xF3, + 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA + } +}; /* jpeg_huffman_ac */ + +static u64 time_cnt = 0; + +static int enc_dma_buf_get_phys(struct enc_dma_cfg *cfg, unsigned long *addr); +static void enc_dma_buf_unmap(struct enc_dma_cfg *cfg); + +static void dump_requst(struct jpegenc_request_s *request) { + jenc_pr(LOG_DEBUG, "jpegenc: dump request start\n"); + jenc_pr(LOG_DEBUG, "src=%u\n", request->src); + jenc_pr(LOG_DEBUG, "encoder_width=%u\n", request->encoder_width); + jenc_pr(LOG_DEBUG, "encoder_height=%u\n", request->encoder_height); + jenc_pr(LOG_DEBUG, "framesize=%u\n", request->framesize); + jenc_pr(LOG_DEBUG, "jpeg_quality=%u\n", request->jpeg_quality); + jenc_pr(LOG_DEBUG, "QuantTable_id=%u\n", request->QuantTable_id); + jenc_pr(LOG_DEBUG, "flush_flag=%u\n", request->flush_flag); + jenc_pr(LOG_DEBUG, "block_mode=%u\n", request->block_mode); + jenc_pr(LOG_DEBUG, "type=%d\n", request->type); + jenc_pr(LOG_DEBUG, "input_fmt=%d\n", request->input_fmt); + jenc_pr(LOG_DEBUG, "output_fmt=%d\n", request->output_fmt); + + jenc_pr(LOG_DEBUG, "y_off=%u\n", request->y_off); + jenc_pr(LOG_DEBUG, "u_off=%u\n", request->u_off); + jenc_pr(LOG_DEBUG, "v_off=%u\n", request->v_off); + jenc_pr(LOG_DEBUG, "y_stride=%u\n", request->y_stride); + jenc_pr(LOG_DEBUG, "u_stride=%u\n", request->u_stride); + jenc_pr(LOG_DEBUG, "v_stride=%u\n", request->v_stride); + jenc_pr(LOG_DEBUG, "h_stride=%u\n", request->h_stride); + jenc_pr(LOG_DEBUG, "jpegenc: dump request end\n"); +} + +static void canvas_config_proxy(u32 index, ulong addr, u32 width, u32 height, + u32 wrap, u32 blkmode) { + unsigned long datah_temp, datal_temp; + + if (!is_support_vdec_canvas()) { + canvas_config(index, addr, width, height, wrap, blkmode); + } else { +#if 1 + ulong start_addr = addr >> 3; + u32 cav_width = (((width + 31)>>5)<<2); + u32 cav_height = height; + u32 x_wrap_en = 0; + u32 y_wrap_en = 0; + u32 blk_mode = 0;//blkmode; + u32 cav_endian = 0; + + datal_temp = (start_addr & 0x1fffffff) | + ((cav_width & 0x7 ) << 29 ); + + datah_temp = ((cav_width >> 3) & 0x1ff) | + ((cav_height & 0x1fff) <<9 ) | + ((x_wrap_en & 1) << 22 ) | + ((y_wrap_en & 1) << 23) | + ((blk_mode & 0x3) << 24) | + ( cav_endian << 26); + +#else + u32 endian = 0; + u32 addr_bits_l = ((((addr + 7) >> 3) & CANVAS_ADDR_LMASK) << CAV_WADDR_LBIT); + u32 width_l = ((((width + 7) >> 3) & CANVAS_WIDTH_LMASK) << CAV_WIDTH_LBIT); + u32 width_h = ((((width + 7) >> 3) >> CANVAS_WIDTH_LWID) << CAV_WIDTH_HBIT); + u32 height_h = (height & CANVAS_HEIGHT_MASK) << CAV_HEIGHT_HBIT; + u32 blkmod_h = (blkmode & CANVAS_BLKMODE_MASK) << CAV_BLKMODE_HBIT; + u32 switch_bits_ctl = (endian & 0xf) << CAV_ENDIAN_HBIT; + u32 wrap_h = (0 << 23); + datal_temp = addr_bits_l | width_l; + datah_temp = width_h | height_h | wrap_h | blkmod_h | switch_bits_ctl; +#endif + /* + if (core == VDEC_1) { + WRITE_VREG(MDEC_CAV_CFG0, 0); //[0]canv_mode, by default is non-canv-mode + WRITE_VREG(MDEC_CAV_LUT_DATAL, datal_temp); + WRITE_VREG(MDEC_CAV_LUT_DATAH, datah_temp); + WRITE_VREG(MDEC_CAV_LUT_ADDR, index); + } else if (core == VDEC_HCODEC) */ { + WRITE_HREG(HCODEC_MDEC_CAV_CFG0, 0); //[0]canv_mode, by default is non-canv-mode + WRITE_HREG(HCODEC_MDEC_CAV_LUT_DATAL, datal_temp); + WRITE_HREG(HCODEC_MDEC_CAV_LUT_DATAH, datah_temp); + WRITE_HREG(HCODEC_MDEC_CAV_LUT_ADDR, index); + } + + /* + cav_lut_info_store(index, addr, width, height, wrap, blkmode, 0); + + if (vdec_get_debug() & 0x40000000) { + pr_info("(%s %2d) addr: %lx, width: %d, height: %d, blkm: %d, endian: %d\n", + __func__, index, addr, width, height, blkmode, 0); + pr_info("data(h,l): 0x%8lx, 0x%8lx\n", datah_temp, datal_temp); + } + */ + } +} + +static u64 jpegenc_time_count_start(void) +{ + //struct timeval tv; + + //do_gettimeofday(&tv); + //efi_gettimeofday(&tv); + //return div64_u64(timeval_to_ns(&tv), 1000); + return 0; +} + +static void jpegenc_time_count_end(u64 *time) +{ + jenc_pr(LOG_INFO, "the encoder takes time %lld us.\n", + jpegenc_time_count_start() - *time); + *time = 0; +} + +static int is_oversize(int w, int h, int max) +{ + if (w < 0 || h < 0) + return true; + + if (h != 0 && (w > max / h)) + return true; + + return false; +} + +struct jpeg_enc_clks { + struct clk *dos_clk; + struct clk *dos_apb_clk; + struct clk *jpeg_enc_clk; + +}; + +static struct jpeg_enc_clks g_jpeg_enc_clks; + +static void jpeg_enc_clk_put(struct device *dev, struct jpeg_enc_clks *clks) +{ + if (!(clks->jpeg_enc_clk == NULL || IS_ERR(clks->jpeg_enc_clk))) + devm_clk_put(dev, clks->jpeg_enc_clk); + + if (!(clks->dos_apb_clk == NULL || IS_ERR(clks->dos_apb_clk))) + devm_clk_put(dev, clks->dos_apb_clk); + + if (!(clks->dos_clk == NULL || IS_ERR(clks->dos_clk))) + devm_clk_put(dev, clks->dos_clk); +} + +static int jpeg_enc_clk_get(struct device *dev, struct jpeg_enc_clks *clks) +{ + //int ret = 0; + + clks->dos_clk = devm_clk_get(dev, "clk_dos"); + if (IS_ERR(clks->dos_clk)) { + jenc_pr(LOG_ERROR, "cannot get clk_dos clock\n"); + clks->dos_clk = NULL; + //ret = -ENOENT; + //goto err; + } else + pr_err("jpeg_enc_clk_get: get clk_dos OK\n"); + + clks->dos_apb_clk = devm_clk_get(dev, "clk_apb_dos"); + if (IS_ERR(clks->dos_apb_clk)) { + jenc_pr(LOG_ERROR, "cannot get clk_apb_dos clock\n"); + clks->dos_apb_clk = NULL; + //ret = -ENOENT; + //goto err; + } else + pr_err("jpeg_enc_clk_get: get clk_apb_dos OK\n"); + + clks->jpeg_enc_clk = devm_clk_get(dev, "clk_jpeg_enc"); + if (IS_ERR(clks->jpeg_enc_clk)) { + jenc_pr(LOG_ERROR, "cannot get clk_jpeg_enc clock\n"); + clks->jpeg_enc_clk = NULL; + //ret = -ENOENT; + //goto err; + } else + pr_err("jpeg_enc_clk_get: get clk_jpeg_enc OK\n"); + + return 0; +//err: +// jpeg_enc_clk_put(dev, clks); + +// return ret; +} + +static void jpeg_enc_clk_enable(struct jpeg_enc_clks *clks, u32 frq) +{ + if (clks->dos_clk != NULL) { + clk_set_rate(clks->dos_clk, 400 * MHz); + clk_prepare_enable(clks->dos_clk); + pr_err("dos clk: %ld\n", clk_get_rate(clks->dos_clk)); + } + + if (clks->dos_apb_clk != NULL) { + clk_set_rate(clks->dos_apb_clk, 400 * MHz); + clk_prepare_enable(clks->dos_apb_clk); + pr_err("apb clk: %ld\n", clk_get_rate(clks->dos_apb_clk)); + } + + if (clks->jpeg_enc_clk != NULL) { + clk_set_rate(clks->jpeg_enc_clk, 666666666); + clk_prepare_enable(clks->jpeg_enc_clk); + pr_err("jpegenc clk: %ld\n", clk_get_rate(clks->jpeg_enc_clk)); + } + + /* + clk_prepare_enable(clks->dos_clk); + clk_prepare_enable(clks->dos_apb_clk); + clk_prepare_enable(clks->jpeg_enc_clk); + */ + pr_err("dos: %ld, dos_apb: %ld, jpeg clk: %ld\n", + clk_get_rate(clks->dos_clk), + clk_get_rate(clks->dos_apb_clk), + clk_get_rate(clks->jpeg_enc_clk)); + +} + +static void jpeg_enc_clk_disable(struct jpeg_enc_clks *clks) +{ + pr_err("set jpeg_enc_clk rate to 0\n"); + clk_set_rate(clks->jpeg_enc_clk, 0); + clk_disable_unprepare(clks->jpeg_enc_clk); + + //clk_disable_unprepare(clks->dos_apb_clk); + //clk_disable_unprepare(clks->dos_clk); +} + +static void dma_flush(u32 buf_start, u32 buf_size); + +static s32 zigzag(s32 i) +{ + s32 zigzag_i; + switch (i) { + case 0: + zigzag_i = 0; + break; + case 1: + zigzag_i = 1; + break; + case 2: + zigzag_i = 8; + break; + case 3: + zigzag_i = 16; + break; + case 4: + zigzag_i = 9; + break; + case 5: + zigzag_i = 2; + break; + case 6: + zigzag_i = 3; + break; + case 7: + zigzag_i = 10; + break; + case 8: + zigzag_i = 17; + break; + case 9: + zigzag_i = 24; + break; + case 10: + zigzag_i = 32; + break; + case 11: + zigzag_i = 25; + break; + case 12: + zigzag_i = 18; + break; + case 13: + zigzag_i = 11; + break; + case 14: + zigzag_i = 4; + break; + case 15: + zigzag_i = 5; + break; + case 16: + zigzag_i = 12; + break; + case 17: + zigzag_i = 19; + break; + case 18: + zigzag_i = 26; + break; + case 19: + zigzag_i = 33; + break; + case 20: + zigzag_i = 40; + break; + case 21: + zigzag_i = 48; + break; + case 22: + zigzag_i = 41; + break; + case 23: + zigzag_i = 34; + break; + case 24: + zigzag_i = 27; + break; + case 25: + zigzag_i = 20; + break; + case 26: + zigzag_i = 13; + break; + case 27: + zigzag_i = 6; + break; + case 28: + zigzag_i = 7; + break; + case 29: + zigzag_i = 14; + break; + case 30: + zigzag_i = 21; + break; + case 31: + zigzag_i = 28; + break; + case 32: + zigzag_i = 35; + break; + case 33: + zigzag_i = 42; + break; + case 34: + zigzag_i = 49; + break; + case 35: + zigzag_i = 56; + break; + case 36: + zigzag_i = 57; + break; + case 37: + zigzag_i = 50; + break; + case 38: + zigzag_i = 43; + break; + case 39: + zigzag_i = 36; + break; + case 40: + zigzag_i = 29; + break; + case 41: + zigzag_i = 22; + break; + case 42: + zigzag_i = 15; + break; + case 43: + zigzag_i = 23; + break; + case 44: + zigzag_i = 30; + break; + case 45: + zigzag_i = 37; + break; + case 46: + zigzag_i = 44; + break; + case 47: + zigzag_i = 51; + break; + case 48: + zigzag_i = 58; + break; + case 49: + zigzag_i = 59; + break; + case 50: + zigzag_i = 52; + break; + case 51: + zigzag_i = 45; + break; + case 52: + zigzag_i = 38; + break; + case 53: + zigzag_i = 31; + break; + case 54: + zigzag_i = 39; + break; + case 55: + zigzag_i = 46; + break; + case 56: + zigzag_i = 53; + break; + case 57: + zigzag_i = 60; + break; + case 58: + zigzag_i = 61; + break; + case 59: + zigzag_i = 54; + break; + case 60: + zigzag_i = 47; + break; + case 61: + zigzag_i = 55; + break; + case 62: + zigzag_i = 62; + break; + default: + zigzag_i = 63; + break; + } + return zigzag_i; +} + +/* Perform convertion from Q to 1/Q */ +u32 reciprocal(u32 q) +{ + u32 q_recip; + + /* 65535 * (1/q) */ + switch (q) { + case 0: + q_recip = 0; + break; + case 1: + q_recip = 65535; + break; + case 2: + q_recip = 32768; + break; + case 3: + q_recip = 21845; + break; + case 4: + q_recip = 16384; + break; + case 5: + q_recip = 13107; + break; + case 6: + q_recip = 10923; + break; + case 7: + q_recip = 9362; + break; + case 8: + q_recip = 8192; + break; + case 9: + q_recip = 7282; + break; + case 10: + q_recip = 6554; + break; + case 11: + q_recip = 5958; + break; + case 12: + q_recip = 5461; + break; + case 13: + q_recip = 5041; + break; + case 14: + q_recip = 4681; + break; + case 15: + q_recip = 4369; + break; + case 16: + q_recip = 4096; + break; + case 17: + q_recip = 3855; + break; + case 18: + q_recip = 3641; + break; + case 19: + q_recip = 3449; + break; + case 20: + q_recip = 3277; + break; + case 21: + q_recip = 3121; + break; + case 22: + q_recip = 2979; + break; + case 23: + q_recip = 2849; + break; + case 24: + q_recip = 2731; + break; + case 25: + q_recip = 2621; + break; + case 26: + q_recip = 2521; + break; + case 27: + q_recip = 2427; + break; + case 28: + q_recip = 2341; + break; + case 29: + q_recip = 2260; + break; + case 30: + q_recip = 2185; + break; + case 31: + q_recip = 2114; + break; + case 32: + q_recip = 2048; + break; + case 33: + q_recip = 1986; + break; + case 34: + q_recip = 1928; + break; + case 35: + q_recip = 1872; + break; + case 36: + q_recip = 1820; + break; + case 37: + q_recip = 1771; + break; + case 38: + q_recip = 1725; + break; + case 39: + q_recip = 1680; + break; + case 40: + q_recip = 1638; + break; + case 41: + q_recip = 1598; + break; + case 42: + q_recip = 1560; + break; + case 43: + q_recip = 1524; + break; + case 44: + q_recip = 1489; + break; + case 45: + q_recip = 1456; + break; + case 46: + q_recip = 1425; + break; + case 47: + q_recip = 1394; + break; + case 48: + q_recip = 1365; + break; + case 49: + q_recip = 1337; + break; + case 50: + q_recip = 1311; + break; + case 51: + q_recip = 1285; + break; + case 52: + q_recip = 1260; + break; + case 53: + q_recip = 1237; + break; + case 54: + q_recip = 1214; + break; + case 55: + q_recip = 1192; + break; + case 56: + q_recip = 1170; + break; + case 57: + q_recip = 1150; + break; + case 58: + q_recip = 1130; + break; + case 59: + q_recip = 1111; + break; + case 60: + q_recip = 1092; + break; + case 61: + q_recip = 1074; + break; + case 62: + q_recip = 1057; + break; + case 63: + q_recip = 1040; + break; + case 64: + q_recip = 1024; + break; + case 65: + q_recip = 1008; + break; + case 66: + q_recip = 993; + break; + case 67: + q_recip = 978; + break; + case 68: + q_recip = 964; + break; + case 69: + q_recip = 950; + break; + case 70: + q_recip = 936; + break; + case 71: + q_recip = 923; + break; + case 72: + q_recip = 910; + break; + case 73: + q_recip = 898; + break; + case 74: + q_recip = 886; + break; + case 75: + q_recip = 874; + break; + case 76: + q_recip = 862; + break; + case 77: + q_recip = 851; + break; + case 78: + q_recip = 840; + break; + case 79: + q_recip = 830; + break; + case 80: + q_recip = 819; + break; + case 81: + q_recip = 809; + break; + case 82: + q_recip = 799; + break; + case 83: + q_recip = 790; + break; + case 84: + q_recip = 780; + break; + case 85: + q_recip = 771; + break; + case 86: + q_recip = 762; + break; + case 87: + q_recip = 753; + break; + case 88: + q_recip = 745; + break; + case 89: + q_recip = 736; + break; + case 90: + q_recip = 728; + break; + case 91: + q_recip = 720; + break; + case 92: + q_recip = 712; + break; + case 93: + q_recip = 705; + break; + case 94: + q_recip = 697; + break; + case 95: + q_recip = 690; + break; + case 96: + q_recip = 683; + break; + case 97: + q_recip = 676; + break; + case 98: + q_recip = 669; + break; + case 99: + q_recip = 662; + break; + case 100: + q_recip = 655; + break; + case 101: + q_recip = 649; + break; + case 102: + q_recip = 643; + break; + case 103: + q_recip = 636; + break; + case 104: + q_recip = 630; + break; + case 105: + q_recip = 624; + break; + case 106: + q_recip = 618; + break; + case 107: + q_recip = 612; + break; + case 108: + q_recip = 607; + break; + case 109: + q_recip = 601; + break; + case 110: + q_recip = 596; + break; + case 111: + q_recip = 590; + break; + case 112: + q_recip = 585; + break; + case 113: + q_recip = 580; + break; + case 114: + q_recip = 575; + break; + case 115: + q_recip = 570; + break; + case 116: + q_recip = 565; + break; + case 117: + q_recip = 560; + break; + case 118: + q_recip = 555; + break; + case 119: + q_recip = 551; + break; + case 120: + q_recip = 546; + break; + case 121: + q_recip = 542; + break; + case 122: + q_recip = 537; + break; + case 123: + q_recip = 533; + break; + case 124: + q_recip = 529; + break; + case 125: + q_recip = 524; + break; + case 126: + q_recip = 520; + break; + case 127: + q_recip = 516; + break; + case 128: + q_recip = 512; + break; + case 129: + q_recip = 508; + break; + case 130: + q_recip = 504; + break; + case 131: + q_recip = 500; + break; + case 132: + q_recip = 496; + break; + case 133: + q_recip = 493; + break; + case 134: + q_recip = 489; + break; + case 135: + q_recip = 485; + break; + case 136: + q_recip = 482; + break; + case 137: + q_recip = 478; + break; + case 138: + q_recip = 475; + break; + case 139: + q_recip = 471; + break; + case 140: + q_recip = 468; + break; + case 141: + q_recip = 465; + break; + case 142: + q_recip = 462; + break; + case 143: + q_recip = 458; + break; + case 144: + q_recip = 455; + break; + case 145: + q_recip = 452; + break; + case 146: + q_recip = 449; + break; + case 147: + q_recip = 446; + break; + case 148: + q_recip = 443; + break; + case 149: + q_recip = 440; + break; + case 150: + q_recip = 437; + break; + case 151: + q_recip = 434; + break; + case 152: + q_recip = 431; + break; + case 153: + q_recip = 428; + break; + case 154: + q_recip = 426; + break; + case 155: + q_recip = 423; + break; + case 156: + q_recip = 420; + break; + case 157: + q_recip = 417; + break; + case 158: + q_recip = 415; + break; + case 159: + q_recip = 412; + break; + case 160: + q_recip = 410; + break; + case 161: + q_recip = 407; + break; + case 162: + q_recip = 405; + break; + case 163: + q_recip = 402; + break; + case 164: + q_recip = 400; + break; + case 165: + q_recip = 397; + break; + case 166: + q_recip = 395; + break; + case 167: + q_recip = 392; + break; + case 168: + q_recip = 390; + break; + case 169: + q_recip = 388; + break; + case 170: + q_recip = 386; + break; + case 171: + q_recip = 383; + break; + case 172: + q_recip = 381; + break; + case 173: + q_recip = 379; + break; + case 174: + q_recip = 377; + break; + case 175: + q_recip = 374; + break; + case 176: + q_recip = 372; + break; + case 177: + q_recip = 370; + break; + case 178: + q_recip = 368; + break; + case 179: + q_recip = 366; + break; + case 180: + q_recip = 364; + break; + case 181: + q_recip = 362; + break; + case 182: + q_recip = 360; + break; + case 183: + q_recip = 358; + break; + case 184: + q_recip = 356; + break; + case 185: + q_recip = 354; + break; + case 186: + q_recip = 352; + break; + case 187: + q_recip = 350; + break; + case 188: + q_recip = 349; + break; + case 189: + q_recip = 347; + break; + case 190: + q_recip = 345; + break; + case 191: + q_recip = 343; + break; + case 192: + q_recip = 341; + break; + case 193: + q_recip = 340; + break; + case 194: + q_recip = 338; + break; + case 195: + q_recip = 336; + break; + case 196: + q_recip = 334; + break; + case 197: + q_recip = 333; + break; + case 198: + q_recip = 331; + break; + case 199: + q_recip = 329; + break; + case 200: + q_recip = 328; + break; + case 201: + q_recip = 326; + break; + case 202: + q_recip = 324; + break; + case 203: + q_recip = 323; + break; + case 204: + q_recip = 321; + break; + case 205: + q_recip = 320; + break; + case 206: + q_recip = 318; + break; + case 207: + q_recip = 317; + break; + case 208: + q_recip = 315; + break; + case 209: + q_recip = 314; + break; + case 210: + q_recip = 312; + break; + case 211: + q_recip = 311; + break; + case 212: + q_recip = 309; + break; + case 213: + q_recip = 308; + break; + case 214: + q_recip = 306; + break; + case 215: + q_recip = 305; + break; + case 216: + q_recip = 303; + break; + case 217: + q_recip = 302; + break; + case 218: + q_recip = 301; + break; + case 219: + q_recip = 299; + break; + case 220: + q_recip = 298; + break; + case 221: + q_recip = 297; + break; + case 222: + q_recip = 295; + break; + case 223: + q_recip = 294; + break; + case 224: + q_recip = 293; + break; + case 225: + q_recip = 291; + break; + case 226: + q_recip = 290; + break; + case 227: + q_recip = 289; + break; + case 228: + q_recip = 287; + break; + case 229: + q_recip = 286; + break; + case 230: + q_recip = 285; + break; + case 231: + q_recip = 284; + break; + case 232: + q_recip = 282; + break; + case 233: + q_recip = 281; + break; + case 234: + q_recip = 280; + break; + case 235: + q_recip = 279; + break; + case 236: + q_recip = 278; + break; + case 237: + q_recip = 277; + break; + case 238: + q_recip = 275; + break; + case 239: + q_recip = 274; + break; + case 240: + q_recip = 273; + break; + case 241: + q_recip = 272; + break; + case 242: + q_recip = 271; + break; + case 243: + q_recip = 270; + break; + case 244: + q_recip = 269; + break; + case 245: + q_recip = 267; + break; + case 246: + q_recip = 266; + break; + case 247: + q_recip = 265; + break; + case 248: + q_recip = 264; + break; + case 249: + q_recip = 263; + break; + case 250: + q_recip = 262; + break; + case 251: + q_recip = 261; + break; + case 252: + q_recip = 260; + break; + case 253: + q_recip = 259; + break; + case 254: + q_recip = 258; + break; + default: + q_recip = 257; + break; + } + return q_recip; +} /* reciprocal */ + +static void push_word(u8 *base, s32 *offset, u32 word) +{ + u8 *ptr; + s32 i; + s32 bytes = (word >> 24) & 0xff; + for (i = bytes - 1; i >= 0; i--) { + ptr = base + *offset; + (*offset)++; + if (i == 0) + *ptr = word & 0xff; + else if (i == 1) + *ptr = (word >> 8) & 0xff; + else if (i == 2) + *ptr = (word >> 16) & 0xff; + } +} + +static s32 jpeg_quality_scaling(s32 quality) +{ + if (quality <= 0) + quality = 1; + if (quality > 100) + quality = 100; + + if (quality < 50) + quality = 5000 / quality; + else + quality = 200 - quality * 2; + return quality; +} + +static void _convert_quant_table(u16 *qtable, u16 *basic_table, + s32 scale_factor, bool force_baseline) +{ + s32 i = 0; + s32 temp; + for (i = 0; i < DCTSIZE2; i++) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) { + if (use_quality) { + //jenc_pr(LOG_ALL, "c1 using quality factor %d\n", scale_factor); + temp = ((s32)basic_table[i] * scale_factor + 50) / 100; + } else { + //jenc_pr(LOG_INFO, "c1 ignore quality factor\n"); + temp = (s32)basic_table[i]; + } + }else + temp = ((s32)basic_table[i] * scale_factor + 50) / 100; + /* limit the values to the valid range */ + if (temp <= 0) + temp = 1; + /* max quantizer needed for 12 bits */ + if (temp > 32767) + temp = 32767; + /* limit to baseline range if requested */ + if (force_baseline && temp > 255) + temp = 255; + qtable[i] = (u16)temp; + } +} + +static void convert_quant_table(u16 *qtable, u16 *basic_table, + s32 scale_factor) +{ + _convert_quant_table(qtable, basic_table, scale_factor, true); +} + +static void write_jpeg_quant_lut(s32 table_num) +{ + s32 i; + u32 data32; + + for (i = 0; i < DCTSIZE2; i += 2) { + data32 = reciprocal(gQuantTable[table_num][i]); + data32 |= reciprocal(gQuantTable[table_num][i + 1]) << 16; + WRITE_HREG(HCODEC_QDCT_JPEG_QUANT_DATA, data32); + } +} + +static void write_jpeg_huffman_lut_dc(s32 table_num) +{ + u32 code_len, code_word, pos, addr; + u32 num_code_len; + u32 lut[12]; + u32 i, j; + + code_len = 1; + code_word = 1; + pos = 16; + + /* Construct DC Huffman table */ + for (i = 0; i < 16; i++) { + num_code_len = jpeg_huffman_dc[table_num][i]; + for (j = 0; j < num_code_len; j++) { + code_word = (code_word + 1) & ((1 << code_len) - 1); + if (code_len < i + 1) { + code_word <<= (i + 1 - code_len); + code_len = i + 1; + } + addr = jpeg_huffman_dc[table_num][pos]; + lut[addr] = ((code_len - 1) << 16) | (code_word); + pos++; + } + } + + /* Write DC Huffman table to HW */ + for (i = 0; i < 12; i++) + WRITE_HREG(HCODEC_VLC_HUFFMAN_DATA, lut[i]); +} + +static void write_jpeg_huffman_lut_ac(s32 table_num) +{ + u32 code_len, code_word, pos; + u32 num_code_len; + u32 run, size; + u32 data, addr = 0; + u32 *lut = NULL; + u32 i, j; + code_len = 1; + code_word = 1; + pos = 16; + + lut = (u32 *)vmalloc(162 * sizeof(u32)); + if (!lut) { + pr_err("alloc lut failed.\n"); + return; + } + + /* Construct AC Huffman table */ + for (i = 0; i < 16; i++) { + num_code_len = jpeg_huffman_ac[table_num][i]; + for (j = 0; j < num_code_len; j++) { + code_word = (code_word + 1) & ((1 << code_len) - 1); + if (code_len < i + 1) { + code_word <<= (i + 1 - code_len); + code_len = i + 1; + } + run = jpeg_huffman_ac[table_num][pos] >> 4; + size = jpeg_huffman_ac[table_num][pos] & 0xf; + data = ((code_len - 1) << 16) | (code_word); + if (size == 0) { + if (run == 0) + addr = 0; /* EOB */ + else if (run == 0xf) + addr = 161; /* ZRL */ + else + jenc_pr(LOG_ERROR, + "Error: Illegal AC Huffman table format!\n"); + } else if (size <= 0xa) + addr = 1 + 16 * (size - 1) + run; + else + jenc_pr(LOG_ERROR, + "Error: Illegal AC Huffman table format!\n"); + lut[addr] = data; + pos++; + } + } + + /* Write AC Huffman table to HW */ + for (i = 0; i < 162; i++) + WRITE_HREG(HCODEC_VLC_HUFFMAN_DATA, lut[i]); + + vfree(lut); +} + +static void prepare_jpeg_header(struct jpegenc_wq_s *wq) +{ + s32 pic_format; + s32 pic_width, pic_height; + s32 q_sel_comp0, q_sel_comp1, q_sel_comp2; + s32 dc_huff_sel_comp0, dc_huff_sel_comp1, dc_huff_sel_comp2; + s32 ac_huff_sel_comp0, ac_huff_sel_comp1, ac_huff_sel_comp2; + s32 lastcoeff_sel; + s32 jdct_intr_sel; + s32 h_factor_comp0, v_factor_comp0; + s32 h_factor_comp1, v_factor_comp1; + s32 h_factor_comp2, v_factor_comp2; + s32 q_num; + s32 tq[2]; + s32 dc_huff_num, ac_huff_num; + s32 dc_th[2], ac_th[2]; + u32 header_bytes = 0; + u32 bak_header_bytes = 0; + s32 i, j; + u8 *assitbuf = (u8 *)wq->AssitstreamStartVirtAddr; + + if (wq->cmd.output_fmt >= JPEGENC_MAX_FRAME_FMT) + jenc_pr(LOG_ERROR, "Input format is wrong!\n"); + switch (wq->cmd.output_fmt) { + case JPEGENC_FMT_NV21: + case JPEGENC_FMT_NV12: + case JPEGENC_FMT_YUV420: + pic_format = 3; + break; + case JPEGENC_FMT_YUV422_SINGLE: + pic_format = 2; + break; + case JPEGENC_FMT_YUV444_SINGLE: + case JPEGENC_FMT_YUV444_PLANE: + pic_format = 1; + break; + default: + pic_format = 0; + break; + } + + pic_width = wq->cmd.encoder_width; + pic_height = wq->cmd.encoder_height; + + q_sel_comp0 = QUANT_SEL_COMP0; + q_sel_comp1 = QUANT_SEL_COMP1; + q_sel_comp2 = QUANT_SEL_COMP2; + + dc_huff_sel_comp0 = DC_HUFF_SEL_COMP0; + dc_huff_sel_comp1 = DC_HUFF_SEL_COMP1; + dc_huff_sel_comp2 = DC_HUFF_SEL_COMP2; + ac_huff_sel_comp0 = AC_HUFF_SEL_COMP0; + ac_huff_sel_comp1 = AC_HUFF_SEL_COMP1; + ac_huff_sel_comp2 = AC_HUFF_SEL_COMP2; + lastcoeff_sel = JDCT_LASTCOEFF_SEL; + jdct_intr_sel = JDCT_INTR_SEL; + + if (pic_format == 2) { + /* YUV422 */ + h_factor_comp0 = 1; + v_factor_comp0 = 0; + h_factor_comp1 = 0; + v_factor_comp1 = 0; + h_factor_comp2 = 0; + v_factor_comp2 = 0; + } else if (pic_format == 3) { + /* YUV420 */ + h_factor_comp0 = 1; + v_factor_comp0 = 1; + h_factor_comp1 = 0; + v_factor_comp1 = 0; + h_factor_comp2 = 0; + v_factor_comp2 = 0; + } else { + /* RGB or YUV */ + h_factor_comp0 = 0; + v_factor_comp0 = 0; + h_factor_comp1 = 0; + v_factor_comp1 = 0; + h_factor_comp2 = 0; + v_factor_comp2 = 0; + } + + /* SOI marke */ + push_word(assitbuf, &header_bytes, + (2 << 24) | /* Number of bytes */ + (0xffd8 << 0)); /* data: SOI marker */ + + /* Define quantization tables */ + q_num = 1; +#if 0 + if ((q_sel_comp0 != q_sel_comp1) || + (q_sel_comp0 != q_sel_comp2) || + (q_sel_comp1 != q_sel_comp2)) +#endif + q_num++; +#if 0 + tq[0] = q_sel_comp0; + tq[1] = (q_sel_comp0 != q_sel_comp1) ? q_sel_comp1 : + (q_sel_comp0 != q_sel_comp2) ? q_sel_comp2 : + q_sel_comp0; +#endif + tq[0] = 0; + tq[1] = q_num - 1; + + /* data: DQT marker */ + push_word(assitbuf, &header_bytes, + (2 << 24) | (0xffdb << 0)); + /* data: Lq */ + push_word(assitbuf, &header_bytes, + (2 << 24) | ((2 + 65 * q_num) << 0)); + + /* Add Quantization table bytes */ + /* header_bytes += (2 + (2 + 65 * q_num)); */ + for (i = 0; i < q_num; i++) { + /* data: {Pq,Tq} */ + push_word(assitbuf, &header_bytes, + (1 << 24) | (i << 0)); + for (j = 0; j < DCTSIZE2; j++) { + /* data: Qk */ + push_word(assitbuf, &header_bytes, + (1 << 24) | + ((gQuantTable[tq[i]][zigzag(j)]) << 0)); + } + } + + /* Define Huffman tables */ + dc_huff_num = 1; + if ((dc_huff_sel_comp0 != dc_huff_sel_comp1) || + (dc_huff_sel_comp0 != dc_huff_sel_comp2) || + (dc_huff_sel_comp1 != dc_huff_sel_comp2)) + dc_huff_num++; + + ac_huff_num = 1; + if ((ac_huff_sel_comp0 != ac_huff_sel_comp1) || + (ac_huff_sel_comp0 != ac_huff_sel_comp2) || + (ac_huff_sel_comp1 != ac_huff_sel_comp2)) + ac_huff_num++; + + dc_th[0] = dc_huff_sel_comp0; + dc_th[1] = (dc_huff_sel_comp0 != dc_huff_sel_comp1) ? + dc_huff_sel_comp1 : (dc_huff_sel_comp0 != dc_huff_sel_comp2) ? + dc_huff_sel_comp2 : dc_huff_sel_comp0; + + ac_th[0] = ac_huff_sel_comp0; + ac_th[1] = (ac_huff_sel_comp0 != ac_huff_sel_comp1) ? + ac_huff_sel_comp1 : (ac_huff_sel_comp0 != ac_huff_sel_comp2) ? + ac_huff_sel_comp2 : ac_huff_sel_comp0; + + /* data: DHT marker */ + push_word(assitbuf, &header_bytes, + (2 << 24) | (0xffc4 << 0)); + /* data: Lh */ + push_word(assitbuf, &header_bytes, + (2 << 24) | + ((2 + (1 + 16 + 12) * dc_huff_num + + (1 + 16 + 162) * ac_huff_num) << 0)); + + /* Add Huffman table bytes */ + /* data: {Tc,Th} */ + for (i = 0; i < dc_huff_num; i++) { + push_word(assitbuf, &header_bytes, + (1 << 24) | (i << 0)); + for (j = 0; j < 16 + 12; j++) { + /* data: Li then Vi,j */ + push_word(assitbuf, &header_bytes, + (1 << 24) | + ((jpeg_huffman_dc[dc_th[i]][j]) << 0)); + } + } + for (i = 0; i < ac_huff_num; i++) { + push_word(assitbuf, &header_bytes, + (1 << 24) | + (1 << 4) | /* data: Tc */ + (i << 0)); /* data: Th */ + for (j = 0; j < 16 + 162; j++) { + /* data: Li then Vi,j */ + push_word(assitbuf, &header_bytes, + (1 << 24) | + ((jpeg_huffman_ac[ac_th[i]][j]) << 0)); + } + } + + /* Frame header */ + /* Add Frame header bytes */ + /* header_bytes += (2 + (8 + 3 * 3)); */ + push_word(assitbuf, &header_bytes, + (2 << 24) | /* Number of bytes */ + (0xffc0 << 0)); /* data: SOF_0 marker */ + /* data: Lf */ + push_word(assitbuf, &header_bytes, + (2 << 24) | ((8 + 3 * 3) << 0)); + /* data: P -- Sample precision */ + push_word(assitbuf, + &header_bytes, (1 << 24) | (8 << 0)); + /* data: Y -- Number of lines */ + push_word(assitbuf, + &header_bytes, (2 << 24) | (pic_height << 0)); + /* data: X -- Number of samples per line */ + push_word(assitbuf, + &header_bytes, (2 << 24) | (pic_width << 0)); + /* data: Nf -- Number of components in a frame */ + push_word(assitbuf, + &header_bytes, (1 << 24) | (3 << 0)); + /* data: C0 -- Comp0 identifier */ + push_word(assitbuf, + &header_bytes, (1 << 24) | (0 << 0)); + push_word(assitbuf, + &header_bytes, (1 << 24) | + /* data: H0 -- Comp0 horizontal sampling factor */ + ((h_factor_comp0 + 1) << 4) | + /* data: V0 -- Comp0 vertical sampling factor */ + ((v_factor_comp0 + 1) << 0)); + + /* data: Tq0 -- Comp0 quantization table seletor */ + push_word(assitbuf, + &header_bytes, (1 << 24) | (0 << 0)); + /* data: C1 -- Comp1 identifier */ + push_word(assitbuf, + &header_bytes, (1 << 24) | (1 << 0)); + push_word(assitbuf, + &header_bytes, (1 << 24) | + /* data: H1 -- Comp1 horizontal sampling factor */ + ((h_factor_comp1 + 1) << 4) | + /* data: V1 -- Comp1 vertical sampling factor */ + ((v_factor_comp1 + 1) << 0)); + /* data: Tq1 -- Comp1 quantization table seletor */ + push_word(assitbuf, + &header_bytes, (1 << 24) | + (((q_sel_comp0 != q_sel_comp1) ? 1 : 0) << 0)); + /* data: C2 -- Comp2 identifier */ + push_word(assitbuf, + &header_bytes, (1 << 24) | (2 << 0)); + push_word(assitbuf, + &header_bytes, (1 << 24) | + /* data: H2 -- Comp2 horizontal sampling factor */ + ((h_factor_comp2 + 1) << 4) | + /* data: V2 -- Comp2 vertical sampling factor */ + ((v_factor_comp2 + 1) << 0)); + /* data: Tq2 -- Comp2 quantization table seletor */ + push_word(assitbuf, + &header_bytes, (1 << 24) | + (((q_sel_comp0 != q_sel_comp2) ? 1 : 0) << 0)); + + /* Scan header */ + bak_header_bytes = header_bytes + (2 + (6 + 2 * 3)); + + /* Add Scan header bytes */ + /* header_bytes += (2 + (6+2*3)); */ + /* If total header bytes is not multiple of 8, + then fill 0xff byte between Frame header segment + and the Scan header segment. */ + /* header_bytes = ((header_bytes + 7)/8)*8; */ + bak_header_bytes = ((bak_header_bytes + 7) / 8) * 8 - bak_header_bytes; + for (i = 0; i < bak_header_bytes; i++) + push_word(assitbuf, + &header_bytes, + (1 << 24) | (0xff << 0)); /* 0xff filler */ + + push_word(assitbuf, + &header_bytes, + (2 << 24) | /* Number of bytes */ + (0xffda << 0)); /* data: SOS marker */ + + /* data: Ls */ + push_word(assitbuf, + &header_bytes, (2 << 24) | ((6 + 2 * 3) << 0)); + /* data: Ns -- Number of components in a scan */ + push_word(assitbuf, + &header_bytes, (1 << 24) | (3 << 0)); + /* data: Cs0 -- Comp0 identifier */ + push_word(assitbuf, + &header_bytes, (1 << 24) | (0 << 0)); + push_word(assitbuf, + &header_bytes, (1 << 24) | + (0 << 4) | /* data: Td0 -- Comp0 DC Huffman table selector */ + (0 << 0)); /* data: Ta0 -- Comp0 AC Huffman table selector */ + /* data: Cs1 -- Comp1 identifier */ + push_word(assitbuf, + &header_bytes, (1 << 24) | (1 << 0)); + push_word(assitbuf, + &header_bytes, (1 << 24) | + /* data: Td1 -- Comp1 DC Huffman table selector */ + (((dc_huff_sel_comp0 != dc_huff_sel_comp1) ? 1 : 0) << 4) | + /* data: Ta1 -- Comp1 AC Huffman table selector */ + (((ac_huff_sel_comp0 != ac_huff_sel_comp1) ? 1 : 0) << 0)); + /* data: Cs2 -- Comp2 identifier */ + push_word(assitbuf, + &header_bytes, (1 << 24) | (2 << 0)); + push_word(assitbuf, + &header_bytes, (1 << 24) | + /* data: Td2 -- Comp2 DC Huffman table selector */ + (((dc_huff_sel_comp0 != dc_huff_sel_comp2) ? 1 : 0) << 4) | + /* data: Ta2 -- Comp2 AC Huffman table selector */ + (((ac_huff_sel_comp0 != ac_huff_sel_comp2) ? 1 : 0) << 0)); + push_word(assitbuf, &header_bytes, + (3 << 24) | + (0 << 16) | /* data: Ss = 0 */ + (63 << 8) | /* data: Se = 63 */ + (0 << 4) | /* data: Ah = 0 */ + (0 << 0)); /* data: Al = 0 */ + jenc_pr(LOG_INFO, "jpeg header bytes is %d\n", header_bytes); + wq->headbytes = header_bytes; +} + +static void init_jpeg_encoder(struct jpegenc_wq_s *wq) +{ + u32 data32; + s32 pic_format; /* 0=RGB; 1=YUV; 2=YUV422; 3=YUV420 */ + s32 pic_x_start, pic_x_end, pic_y_start, pic_y_end; + s32 pic_width, pic_height; + u32 q_sel_comp0, q_sel_comp1, q_sel_comp2; + s32 dc_huff_sel_comp0, dc_huff_sel_comp1, dc_huff_sel_comp2; + s32 ac_huff_sel_comp0, ac_huff_sel_comp1, ac_huff_sel_comp2; + s32 lastcoeff_sel; + s32 jdct_intr_sel; + s32 h_factor_comp0, v_factor_comp0; + s32 h_factor_comp1, v_factor_comp1; + s32 h_factor_comp2, v_factor_comp2; + + jenc_pr(LOG_INFO, "Initialize JPEG Encoder ....\n"); + if (wq->cmd.output_fmt >= JPEGENC_MAX_FRAME_FMT) + jenc_pr(LOG_ERROR, "Input format is wrong!\n"); + switch (wq->cmd.output_fmt) { + case JPEGENC_FMT_NV21: + case JPEGENC_FMT_NV12: + case JPEGENC_FMT_YUV420: + pic_format = 3; + break; + case JPEGENC_FMT_YUV422_SINGLE: + pic_format = 2; + break; + case JPEGENC_FMT_YUV444_SINGLE: + case JPEGENC_FMT_YUV444_PLANE: + pic_format = 1; + break; + default: + pic_format = 0; + break; + } + + pic_x_start = 0; + pic_x_end = wq->cmd.encoder_width - 1; + + pic_y_start = 0; + pic_y_end = wq->cmd.encoder_height - 1; + + pic_width = wq->cmd.encoder_width; + pic_height = wq->cmd.encoder_height; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) { + q_sel_comp0 = QUANT_SEL_COMP0 & 0xff; + q_sel_comp1 = QUANT_SEL_COMP1 & 0xff; + q_sel_comp2 = QUANT_SEL_COMP2 & 0xff; + } else { + q_sel_comp0 = wq->cmd.QuantTable_id * 2; + q_sel_comp1 = q_sel_comp0 + 1; + q_sel_comp2 = q_sel_comp1; + } + if (q_sel_comp0 >= 6 || q_sel_comp1 >= 6) + { + jenc_pr(LOG_ERROR, "error, q_sel_comp0, q_sel_comp1 is invalid %d,%d\n", + q_sel_comp0, q_sel_comp1); + return; + } + dc_huff_sel_comp0 = DC_HUFF_SEL_COMP0; + dc_huff_sel_comp1 = DC_HUFF_SEL_COMP1; + dc_huff_sel_comp2 = DC_HUFF_SEL_COMP2; + ac_huff_sel_comp0 = AC_HUFF_SEL_COMP0; + ac_huff_sel_comp1 = AC_HUFF_SEL_COMP1; + ac_huff_sel_comp2 = AC_HUFF_SEL_COMP2; + lastcoeff_sel = JDCT_LASTCOEFF_SEL; + jdct_intr_sel = JDCT_INTR_SEL; + + if (pic_format == 2) { + /* YUV422 */ + h_factor_comp0 = 1; + v_factor_comp0 = 0; + h_factor_comp1 = 0; + v_factor_comp1 = 0; + h_factor_comp2 = 0; + v_factor_comp2 = 0; + } else if (pic_format == 3) { + /* YUV420 */ + h_factor_comp0 = 1; + v_factor_comp0 = 1; + h_factor_comp1 = 0; + v_factor_comp1 = 0; + h_factor_comp2 = 0; + v_factor_comp2 = 0; + } else { + /* RGB or YUV */ + h_factor_comp0 = 0; + v_factor_comp0 = 0; + h_factor_comp1 = 0; + v_factor_comp1 = 0; + h_factor_comp2 = 0; + v_factor_comp2 = 0; + } + + /* Configure picture size and format */ + WRITE_HREG(HCODEC_VLC_PIC_SIZE, pic_width | (pic_height << 16)); + WRITE_HREG(HCODEC_VLC_PIC_POSITION, pic_format | (lastcoeff_sel << 4)); + WRITE_HREG(HCODEC_QDCT_JPEG_X_START_END, + ((pic_x_end << 16) | (pic_x_start << 0))); + WRITE_HREG(HCODEC_QDCT_JPEG_Y_START_END, + ((pic_y_end << 16) | (pic_y_start << 0))); + + /* Configure quantization tables */ +#ifdef EXTEAN_QUANT_TABLE + if (external_quant_table_available) { + convert_quant_table(&gQuantTable[0][0], + &gExternalQuantTablePtr[0], + wq->cmd.jpeg_quality); + convert_quant_table(&gQuantTable[1][0], + &gExternalQuantTablePtr[DCTSIZE2], + wq->cmd.jpeg_quality); + q_sel_comp0 = 0; + q_sel_comp1 = 1; + q_sel_comp2 = 1; + } else +#endif + { + s32 tq[2]; + tq[0] = q_sel_comp0; + tq[1] = (q_sel_comp0 != q_sel_comp1) ? + q_sel_comp1 : (q_sel_comp0 != q_sel_comp2) ? + q_sel_comp2 : q_sel_comp0; + convert_quant_table(&gQuantTable[0][0], + (u16 *)&jpeg_quant[tq[0]], + wq->cmd.jpeg_quality); + if (tq[0] != tq[1]) + convert_quant_table(&gQuantTable[1][0], + (u16 *)&jpeg_quant[tq[1]], + wq->cmd.jpeg_quality); + q_sel_comp0 = tq[0]; + q_sel_comp1 = tq[1]; + q_sel_comp2 = tq[1]; + } + + /* Set Quantization LUT start address */ + data32 = 0; + data32 |= 0 << 8; /* [8] 0=Write LUT, 1=Read */ + data32 |= 0 << 0; /* [5:0] Start addr = 0 */ + + WRITE_HREG(HCODEC_QDCT_JPEG_QUANT_ADDR, data32); + + /* Burst-write Quantization LUT data */ + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_C1) { + write_jpeg_quant_lut(0); + if (q_sel_comp0 != q_sel_comp1) + write_jpeg_quant_lut(1); + } else { + write_jpeg_quant_lut(q_sel_comp0); + if (q_sel_comp1 != q_sel_comp0) + write_jpeg_quant_lut(q_sel_comp1); + if ((q_sel_comp2 != q_sel_comp0) && (q_sel_comp2 != q_sel_comp1)) + write_jpeg_quant_lut(q_sel_comp2); + } + + /* Configure Huffman tables */ + + /* Set DC Huffman LUT start address */ + data32 = 0; + data32 |= 0 << 16; /* [16] 0=Write LUT, 1=Read */ + data32 |= 0 << 0; /* [8:0] Start addr = 0 */ + WRITE_HREG(HCODEC_VLC_HUFFMAN_ADDR, data32); + + /* Burst-write DC Huffman LUT data */ + write_jpeg_huffman_lut_dc(dc_huff_sel_comp0); + if (dc_huff_sel_comp1 != dc_huff_sel_comp0) + write_jpeg_huffman_lut_dc(dc_huff_sel_comp1); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) { + if ((dc_huff_sel_comp2 != dc_huff_sel_comp0) + && (dc_huff_sel_comp2 != dc_huff_sel_comp1)) + write_jpeg_huffman_lut_dc(dc_huff_sel_comp2); + } + + /* Set AC Huffman LUT start address */ + data32 = 0; + data32 |= 0 << 16; /* [16] 0=Write LUT, 1=Read */ + data32 |= 24 << 0; /* [8:0] Start addr = 0 */ + WRITE_HREG(HCODEC_VLC_HUFFMAN_ADDR, data32); + + /* Burst-write AC Huffman LUT data */ + write_jpeg_huffman_lut_ac(ac_huff_sel_comp0); + if (ac_huff_sel_comp1 != ac_huff_sel_comp0) + write_jpeg_huffman_lut_ac(ac_huff_sel_comp1); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) { + if ((ac_huff_sel_comp2 != ac_huff_sel_comp0) + && (ac_huff_sel_comp2 != ac_huff_sel_comp1)) + write_jpeg_huffman_lut_ac(ac_huff_sel_comp2); + } + + /* Configure general control registers */ + data32 = 0; + /* [19:18] dct_inflow_ctrl: 0=No halt; */ + /* 1=DCT halts request at end of each 8x8 block; */ + /* 2=DCT halts request at end of each MCU. */ + data32 |= 0 << 18; + /* [17:16] jpeg_coeff_last_sel: */ + /* 0=Mark last coeff at the end of an 8x8 block, */ + /* 1=Mark last coeff at the end of an MCU */ + /* 2=Mark last coeff at the end of a scan */ + data32 |= lastcoeff_sel << 16; + /* [15] jpeg_quant_sel_comp2 */ + data32 |= ((q_sel_comp2 == q_sel_comp0) ? 0 : 1) << 15; + /* [14] jpeg_v_factor_comp2 */ + data32 |= v_factor_comp2 << 14; + /* [13] jpeg_h_factor_comp2 */ + data32 |= h_factor_comp2 << 13; + /* [12] jpeg_comp2_en */ + data32 |= 1 << 12; + /* [11] jpeg_quant_sel_comp1 */ + data32 |= ((q_sel_comp1 == q_sel_comp0) ? 0 : 1) << 11; + /* [10] jpeg_v_factor_comp1 */ + data32 |= v_factor_comp1 << 10; + /* [9] jpeg_h_factor_comp1 */ + data32 |= h_factor_comp1 << 9; + /* [8] jpeg_comp1_en */ + data32 |= 1 << 8; + /* [7] jpeg_quant_sel_comp0 */ + data32 |= 0 << 7; + /* [6] jpeg_v_factor_comp0 */ + data32 |= v_factor_comp0 << 6; + /* [5] jpeg_h_factor_comp0 */ + data32 |= h_factor_comp0 << 5; + /* [4] jpeg_comp0_en */ + data32 |= 1 << 4; + /* [3:1] jdct_intr_sel:0=Disable intr; */ + /* 1=Intr at end of each 8x8 block of DCT input; */ + /* 2=Intr at end of each MCU of DCT input; */ + /* 3=Intr at end of a scan of DCT input; */ + /* 4=Intr at end of each 8x8 block of DCT output; */ + /* 5=Intr at end of each MCU of DCT output; */ + /* 6=Intr at end of a scan of DCT output. */ + data32 |= jdct_intr_sel << 1; + /* [0] jpeg_en */ + data32 |= 1 << 0; + WRITE_HREG(HCODEC_QDCT_JPEG_CTRL, data32); + + data32 = 0; + data32 |= ((ac_huff_sel_comp2 == ac_huff_sel_comp0)? 0 : 1) << 29; // [ 29] jpeg_comp2_ac_table_sel + data32 |= ((dc_huff_sel_comp2 == dc_huff_sel_comp0)? 0 : 1) << 28; // [ 28] jpeg_comp2_dc_table_sel + /* [26:25] jpeg_comp2_cnt_max */ + data32 |= ((h_factor_comp2 + 1) * (v_factor_comp2 + 1) - 1) << 25; + /* [24] jpeg_comp2_en */ + data32 |= 1 << 24; + data32 |= ((ac_huff_sel_comp1 == ac_huff_sel_comp0)? 0 : 1) << 21; // [ 21] jpeg_comp1_ac_table_sel + data32 |= ((dc_huff_sel_comp1 == dc_huff_sel_comp0)? 0 : 1) << 20; // [ 20] jpeg_comp1_dc_table_sel + /* [18:17] jpeg_comp1_cnt_max */ + data32 |= ((h_factor_comp1 + 1) * (v_factor_comp1 + 1) - 1) << 17; + /* [16] jpeg_comp1_en */ + data32 |= 1 << 16; + /* [13] jpeg_comp0_ac_table_sel */ + data32 |= 0 << 13; + /* [12] jpeg_comp0_dc_table_sel */ + data32 |= 0 << 12; + /* [10:9] jpeg_comp0_cnt_max */ + data32 |= ((h_factor_comp0 + 1) * (v_factor_comp0 + 1) - 1) << 9; + /* [8] jpeg_comp0_en */ + data32 |= 1 << 8; + /* [0] jpeg_en, will be enbled by amrisc */ + data32 |= 0 << 0; + WRITE_HREG(HCODEC_VLC_JPEG_CTRL, data32); + + WRITE_HREG(HCODEC_QDCT_MB_CONTROL, + (1 << 9) | /* mb_info_soft_reset */ + (1 << 0)); /* mb read buffer soft reset */ + + WRITE_HREG(HCODEC_QDCT_MB_CONTROL, + (0 << 28) | /* ignore_t_p8x8 */ + (0 << 27) | /* zero_mc_out_null_non_skipped_mb */ + (0 << 26) | /* no_mc_out_null_non_skipped_mb */ + (0 << 25) | /* mc_out_even_skipped_mb */ + (0 << 24) | /* mc_out_wait_cbp_ready */ + (0 << 23) | /* mc_out_wait_mb_type_ready */ + (0 << 29) | /* ie_start_int_enable */ + (0 << 19) | /* i_pred_enable */ + (0 << 20) | /* ie_sub_enable */ + (0 << 18) | /* iq_enable */ + (0 << 17) | /* idct_enable */ + (0 << 14) | /* mb_pause_enable */ + (1 << 13) | /* q_enable */ + (1 << 12) | /* dct_enable */ + (0 << 10) | /* mb_info_en */ + (0 << 3) | /* endian */ + (0 << 1) | /* mb_read_en */ + (0 << 0)); /* soft reset */ + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) { + // INIT_ENCODER + WRITE_HREG(HCODEC_VLC_TOTAL_BYTES, 0); + WRITE_HREG(HCODEC_VLC_INT_CONTROL, 0);// disable vlc interrupt + + WRITE_HREG(HCODEC_HENC_SCRATCH_0, 0);// mtspi ENCODER_IDLE = ( ENCODER_STATUS + WRITE_HREG(HCODEC_HENC_SCRATCH_1, 0xffffffff);// reset MCU_XY_REG + + WRITE_HREG(HCODEC_ASSIST_AMR1_INT0, 0x15);// vb_full_isr + WRITE_HREG(HCODEC_ASSIST_AMR1_INT1, 8);// vlc_isr + WRITE_HREG(HCODEC_ASSIST_AMR1_INT3, 0x14);// qdct_isr + } + + /* Assember JPEG file header */ + prepare_jpeg_header(wq); +} + +static void jpegenc_init_output_buffer(struct jpegenc_wq_s *wq) +{ + WRITE_HREG(HCODEC_VLC_VB_MEM_CTL, + ((1 << 31) | (0x3f << 24) | + (0x20 << 16) | (2 << 0))); + + WRITE_HREG(HCODEC_VLC_VB_START_PTR, wq->BitstreamStart); + WRITE_HREG(HCODEC_VLC_VB_WR_PTR, wq->BitstreamStart); + WRITE_HREG(HCODEC_VLC_VB_SW_RD_PTR, wq->BitstreamStart); + WRITE_HREG(HCODEC_VLC_VB_END_PTR, wq->BitstreamEnd); + WRITE_HREG(HCODEC_VLC_VB_CONTROL, 1); + WRITE_HREG(HCODEC_VLC_VB_CONTROL, + ((0 << 14) | (7 << 3) | + (1 << 1) | (0 << 0))); +} + +static void jpegenc_buffspec_init(struct jpegenc_wq_s *wq) +{ + /* input dct buffer config */ + wq->InputBuffStart = wq->buf_start + gJpegenc.mem.bufspec->input.buf_start; + wq->InputBuffEnd = wq->InputBuffStart + gJpegenc.mem.bufspec->input.buf_size - 1; + jenc_pr(LOG_INFO, "InputBuffStart is 0x%x\n", wq->InputBuffStart); + + /* assit stream buffer config */ + wq->AssitStart = wq->buf_start + gJpegenc.mem.bufspec->assit.buf_start; + wq->AssitEnd = wq->AssitStart + gJpegenc.mem.bufspec->assit.buf_size - 1; + /* output stream buffer config */ + wq->BitstreamStart = wq->buf_start + gJpegenc.mem.bufspec->bitstream.buf_start; + wq->BitstreamEnd = wq->BitstreamStart + gJpegenc.mem.bufspec->bitstream.buf_size - 1; + jenc_pr(LOG_INFO, "BitstreamStart is 0x%x\n", wq->BitstreamStart); + + wq->AssitstreamStartVirtAddr = phys_to_virt(wq->AssitStart); + jenc_pr(LOG_INFO, "AssitstreamStartVirtAddr is %p\n", wq->AssitstreamStartVirtAddr); +} + +/* for temp */ +#define HCODEC_MFDIN_REGC_MBLP (HCODEC_MFDIN_REGB_AMPC + 0x1) +#define HCODEC_MFDIN_REG0D (HCODEC_MFDIN_REGB_AMPC + 0x2) +#define HCODEC_MFDIN_REG0E (HCODEC_MFDIN_REGB_AMPC + 0x3) +#define HCODEC_MFDIN_REG0F (HCODEC_MFDIN_REGB_AMPC + 0x4) +#define HCODEC_MFDIN_REG10 (HCODEC_MFDIN_REGB_AMPC + 0x5) +#define HCODEC_MFDIN_REG11 (HCODEC_MFDIN_REGB_AMPC + 0x6) +#define HCODEC_MFDIN_REG12 (HCODEC_MFDIN_REGB_AMPC + 0x7) +#define HCODEC_MFDIN_REG13 (HCODEC_MFDIN_REGB_AMPC + 0x8) +#define HCODEC_MFDIN_REG14 (HCODEC_MFDIN_REGB_AMPC + 0x9) +#define HCODEC_MFDIN_REG15 (HCODEC_MFDIN_REGB_AMPC + 0xa) +#define HCODEC_MFDIN_REG16 (HCODEC_MFDIN_REGB_AMPC + 0xb) + +static void mfdin_basic_jpeg( + u32 input, u8 iformat, u8 oformat, u32 picsize_x, + u32 picsize_y, u8 r2y_en, u8 ifmt_extra, + int mfdin_canvas0_stride, + int mfdin_canvas1_stride, + int mfdin_canvas2_stride, + int mfdin_canvas0_blkmode, + int mfdin_canvas1_blkmode, + int mfdin_canvas2_blkmode, + int mfdin_canvas0_addr, + int mfdin_canvas1_addr, + int mfdin_canvas2_addr, + int mfdin_canvas_bias, + bool mfdin_big_endian) +{ + u8 dsample_en; /* Downsample Enable */ + u8 interp_en; /* Interpolation Enable */ + u8 y_size; /* 0:16 Pixels for y direction pickup; 1:8 pixels */ + u8 r2y_mode; /* RGB2YUV Mode, range(0~3) */ + /* mfdin_reg3_canv[25:24]; */ + /* bytes per pixel in x direction for index0, 0:half 1:1 2:2 3:3 */ + u8 canv_idx0_bppx; + /* mfdin_reg3_canv[27:26]; */ + /* bytes per pixel in x direction for index1-2, 0:half 1:1 2:2 3:3 */ + u8 canv_idx1_bppx; + /* mfdin_reg3_canv[29:28]; */ + /* bytes per pixel in y direction for index0, 0:half 1:1 2:2 3:3 */ + u8 canv_idx0_bppy; + /* mfdin_reg3_canv[31:30]; */ + /* bytes per pixel in y direction for index1-2, 0:half 1:1 2:2 3:3 */ + u8 canv_idx1_bppy; + u8 ifmt444, ifmt422, ifmt420, linear_bytes4p; + u32 linear_bytesperline; + int mfdin_input_mode = 0; + //s32 reg_offset; + bool format_err = false; + u32 data32; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_TXL) { + if ((iformat == 7) && (ifmt_extra > 2)) + format_err = true; + } else if (iformat == 7) + format_err = true; + + if (format_err) { + jenc_pr(LOG_ERROR, + "mfdin format err, iformat:%d, ifmt_extra:%d\n", + iformat, ifmt_extra); + return; + } + if (iformat != 7) + ifmt_extra = 0; + + ifmt444 = ((iformat == 1) || (iformat == 5) || (iformat == 8) + || (iformat == 9) || (iformat == 12)) ? 1 : 0; + if (iformat == 7 && ifmt_extra == 1) + ifmt444 = 1; + ifmt422 = ((iformat == 0) || (iformat == 10)) ? 1 : 0; + if (iformat == 7 && ifmt_extra != 1) + ifmt422 = 1; + ifmt420 = ((iformat == 2) || (iformat == 3) || (iformat == 4) + || (iformat == 11)) ? 1 : 0; + dsample_en = ((ifmt444 && (oformat != 2)) + || (ifmt422 && (oformat == 0))) ? 1 : 0; + interp_en = ((ifmt422 && (oformat == 2)) + || (ifmt420 && (oformat != 0))) ? 1 : 0; + y_size = (oformat != 0) ? 1 : 0; + /* r2y_mode = (r2y_en == 1) ? 1 : 0; */ + r2y_mode = 1; + canv_idx0_bppx = (iformat == 1) ? 3 : (iformat == 0) ? 2 : 1; + canv_idx1_bppx = (iformat == 4) ? 0 : 1; + canv_idx0_bppy = 1; + canv_idx1_bppy = (iformat == 5) ? 1 : 0; + + if ((iformat == 8) || (iformat == 9) || (iformat == 12)) + linear_bytes4p = 3; + else if (iformat == 10) + linear_bytes4p = 2; + else if (iformat == 11) + linear_bytes4p = 1; + else + linear_bytes4p = 0; + linear_bytesperline = picsize_x * linear_bytes4p; + + if (iformat < 8) + mfdin_input_mode = 0; + else + mfdin_input_mode = 1; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) + mfdin_input_mode = 2; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB) { + reg_offset = -8; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) + WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset), + (picsize_x << 16) | (picsize_y << 0)); + else + WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset), + (picsize_x << 14) | (picsize_y << 0)); + } else { + reg_offset = 0; + WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset), + (picsize_x << 12) | (picsize_y << 0)); + } + + WRITE_HREG((HCODEC_MFDIN_REG1_CTRL + reg_offset), + (iformat << 0) | + (oformat << 4) | + (dsample_en << 6) | + (y_size << 8) | + (interp_en << 9) | + (r2y_en << 12) | + (r2y_mode << 13) | + (ifmt_extra << 16) | + (0 <<19) | // 0:NR Not Enabled + (2 <<29) | // 0:H264_I_PIC_ALL_4x4, 1:H264_P_PIC_Y_16x16_C_8x8, 2:JPEG_ALL_8x8, 3:Reserved + (0 <<31)); // 0:YC interleaved 1:YC non-interleaved(for JPEG) + + if (mfdin_input_mode == 0) { + WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset), + (input & 0xffffff) | + (canv_idx1_bppy << 30) | + (canv_idx0_bppy << 28) | + (canv_idx1_bppx << 26) | + (canv_idx0_bppx << 24)); + WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset), + (0 << 16) | (0 << 0)); + WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), 0); + } else if (mfdin_input_mode == 1) { + WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset), + (canv_idx1_bppy << 30) | + (canv_idx0_bppy << 28) | + (canv_idx1_bppx << 26) | + (canv_idx0_bppx << 24)); + WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset), + (linear_bytes4p << 16) | (linear_bytesperline << 0)); + WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), input); + } else if (mfdin_input_mode == 2) { + WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset), + (canv_idx1_bppy << 30) | + (canv_idx0_bppy << 28) | + (canv_idx1_bppx << 26) | + (canv_idx0_bppx << 24)); + WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset), + mfdin_canvas0_stride << 0); + WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), mfdin_canvas0_addr); + + WRITE_HREG(HCODEC_MFDIN_REG17, mfdin_canvas1_addr); // U canvas initial address + WRITE_HREG(HCODEC_MFDIN_REG18, mfdin_canvas2_addr); // V canvas initial address + WRITE_HREG(HCODEC_MFDIN_REG19, (mfdin_canvas1_stride << 16) | // U canvas stride + (mfdin_canvas2_stride << 0)); + + data32 = READ_HREG(HCODEC_MFDIN_REG6_DCFG + reg_offset); + data32 = data32 & 0x3ff; + + if (jpeg_in_full_hcodec) { + pr_err("JPEG_IN_FULL_HCODEC\n"); + data32 |= (0<<16); + + if(mfdin_ambus_canv_conv) { + data32 |= (1<<17); // AMBUS + } + } else { + data32 |= (1 << 16); // AXI Enable + } + + data32 |= (mfdin_canvas0_blkmode << 14) | // V canvas block mode + (mfdin_canvas1_blkmode << 12) | // U canvas block mode + (mfdin_canvas2_blkmode << 10); // Y canvas block mode + + WRITE_HREG(HCODEC_MFDIN_REG6_DCFG + reg_offset, data32); + + if (mfdin_canvas_bias) + WRITE_HREG(HCODEC_MFDIN_REGA_CAV1 + reg_offset, mfdin_canvas_bias); + } + + if (!mfdin_big_endian) { + WRITE_HREG((HCODEC_MFDIN_REG9_ENDN + reg_offset), + (7 << 0) | (6 << 3) | (5 << 6) | + (4 << 9) | (3 << 12) | (2 << 15) | + (1 << 18) | (0 << 21)); + } + + if (jpeg_in_full_hcodec) {//#ifdef JPEG_IN_FULL_HCODEC + data32 = READ_HREG(HCODEC_MFDIN_REG3_CANV + reg_offset); + WRITE_HREG(HCODEC_MFDIN_REG3_CANV + reg_offset, data32|(0x1 << 8)|(0x2 << 16)); + } + + data32 = READ_HREG(HCODEC_MFDIN_REG7_SCMD + reg_offset); + WRITE_HREG(HCODEC_MFDIN_REG7_SCMD + reg_offset, data32 | (0x1 << 28)); // MFDIN Enabled + + jenc_pr(LOG_INFO, "MFDIN Enabled\n"); + + return; +} + +//#define CONFIG_AMLOGIC_MEDIA_CANVAS + +static s32 set_jpeg_input_format(struct jpegenc_wq_s *wq, + struct jpegenc_request_s *cmd) +{ + s32 ret = 0; + u8 iformat = JPEGENC_MAX_FRAME_FMT; + u8 oformat = JPEGENC_MAX_FRAME_FMT; + u8 r2y_en = 0; + u32 picsize_x = 0, picsize_y = 0; + u32 input = cmd->src; + u8 ifmt_extra = 0; + int mfdin_canvas0_stride = 0, mfdin_canvas1_stride = 0, mfdin_canvas2_stride = 0; + int mfdin_canvas0_blkmode = 0, mfdin_canvas1_blkmode = 0, mfdin_canvas2_blkmode = 0; + int mfdin_canvas0_addr = 0, mfdin_canvas1_addr = 0, mfdin_canvas2_addr = 0; + int mfdin_canvas_height = 0; + int mfdin_canvas_bias = 0; + bool mfdin_big_endian = false; + u32 block_mode = 0; + +#ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + u32 canvas_w = 0; +#endif + + jenc_pr(LOG_INFO, "************begin set input format**************\n"); + jenc_pr(LOG_INFO, "type is %d\n", cmd->type); + jenc_pr(LOG_INFO, "input_fmt is %d\n", cmd->input_fmt); + jenc_pr(LOG_INFO, "output_fmt is %d\n", cmd->output_fmt); + jenc_pr(LOG_INFO, "input is 0x%x\n", cmd->src); + jenc_pr(LOG_INFO, "size is %d\n", cmd->framesize); + jenc_pr(LOG_INFO, "quality is %d\n", cmd->jpeg_quality); + jenc_pr(LOG_INFO, "quant tbl_id is %d\n", cmd->QuantTable_id); + jenc_pr(LOG_INFO, "flush flag is %d\n", cmd->flush_flag); + jenc_pr(LOG_INFO, "block mode is %d\n", cmd->block_mode); + jenc_pr(LOG_INFO, "************end set input format**************\n"); + + if ((cmd->type == JPEGENC_LOCAL_BUFF) || + (cmd->type == JPEGENC_DMA_BUFF) || + (cmd->type == JPEGENC_PHYSICAL_BUFF)) { + + if (cmd->type == JPEGENC_LOCAL_BUFF) { + if (cmd->flush_flag & JPEGENC_FLUSH_FLAG_INPUT) + dma_flush(wq->InputBuffStart, cmd->framesize); + } + + if (cmd->type == JPEGENC_LOCAL_BUFF || cmd->type == JPEGENC_DMA_BUFF) + input = wq->InputBuffStart; + + picsize_x = ((cmd->encoder_width + 15) >> 4) << 4; + picsize_y = ((cmd->encoder_height + 15) >> 4) << 4; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) { + /* + * mfdin in fmt: 0:422 single, 1:444 single, 2:NV21, 3:NV12, 4:420 plane, 5:444 plane + * mfdin out fmt: 0:420, 1:422, 2:444 + * picture fmt: 0:RGB, 1:444, 2:422, 3:420 + * (out, pic) : (0,3), (1,2), (2,1) + */ + if (cmd->input_fmt == JPEGENC_FMT_YUV422_SINGLE) + iformat = 0; + + else if (cmd->input_fmt == JPEGENC_FMT_YUV444_SINGLE) + iformat = 1; + + else if (cmd->input_fmt == JPEGENC_FMT_NV21) + iformat = 2; + + else if (cmd->input_fmt == JPEGENC_FMT_NV12) + iformat = 3; + + else if (cmd->input_fmt == JPEGENC_FMT_YUV420) + iformat = 4; + + else if (cmd->input_fmt == JPEGENC_FMT_YUV444_PLANE) + iformat = 5; + + if (cmd->output_fmt == JPEGENC_FMT_YUV420) + oformat = 0; + else if (cmd->output_fmt == JPEGENC_FMT_YUV422_SINGLE) + oformat = 1; + else if (cmd->output_fmt == JPEGENC_FMT_YUV444_SINGLE) + oformat = 2; + + block_mode = cmd->block_mode; + mfdin_canvas0_stride = cmd->y_stride; + mfdin_canvas1_stride = cmd->u_stride; + mfdin_canvas2_stride = cmd->v_stride; + mfdin_canvas_height = cmd->h_stride; + + if (simulation_enable) { + if (g_block_mode) + block_mode = g_block_mode; + if (g_canv0_stride) + mfdin_canvas0_stride = g_canv0_stride; + if (g_canv1_stride) + mfdin_canvas1_stride = g_canv1_stride; + if (g_canv2_stride) + mfdin_canvas2_stride = g_canv2_stride; + if (g_canvas_height) + mfdin_canvas_height = g_canvas_height; + } + + if (block_mode) { + mfdin_canvas0_blkmode = 1; + mfdin_canvas1_blkmode = 1; + mfdin_canvas2_blkmode = 1; + } else { + mfdin_canvas0_blkmode = 0; + mfdin_canvas1_blkmode = 0; + mfdin_canvas2_blkmode = 0; + } + + if ((iformat == 0 && oformat == 0) || /*case1013, 422 single -> 420*/ + (iformat == 0 && oformat == 1) || /*case1002, 422 single -> 422*/ + (iformat == 0 && oformat == 2) || /*case1004, 422 single -> 444*/ + (iformat == 1 && oformat == 1) || /*case1003, 444 single -> 444*/ + (iformat == 1 && oformat == 0) || + (iformat == 1 && oformat == 2)) { /*case1005, 444 single -> 444*/ + mfdin_canvas0_addr = input; + mfdin_canvas1_addr = input; + mfdin_canvas2_addr = input; + + if (iformat == 0) { + mfdin_canvas0_stride = cmd->y_stride * 2; + mfdin_canvas1_stride = cmd->u_stride * 2; + mfdin_canvas2_stride = cmd->v_stride * 3; + } else if (iformat == 1) { + mfdin_canvas0_stride = cmd->y_stride * 3; + mfdin_canvas1_stride = cmd->u_stride * 3; + mfdin_canvas2_stride = cmd->v_stride * 3; + } + } else if ((iformat == 2 && oformat == 0) || + (iformat == 3 && oformat == 0) || /*case1001, NV21 -> 420*/ + (iformat == 3 && oformat == 1) || /*case1000, NV21 -> 422*/ + (iformat == 3 && oformat == 2) || /*case1006, NV21 -> 444*/ + (iformat == 2 && oformat == 2) || + (iformat == 2 && oformat == 1)) { /*case1006, NV12 -> 444, linear*/ + mfdin_canvas0_addr = input; + mfdin_canvas1_addr = input + mfdin_canvas0_stride * mfdin_canvas_height; + mfdin_canvas2_addr = mfdin_canvas1_addr; + } else if ((iformat == 4 && oformat == 0) || /*case1010, 420 plane -> 420*/ + (iformat == 4 && oformat == 2) || + (iformat == 4 && oformat == 1)) { /*case1008, case1011, case1012, 420 plane -> 444*/ + if (!simulation_enable) { + mfdin_canvas1_stride = mfdin_canvas0_stride / 2; + mfdin_canvas2_stride = mfdin_canvas0_stride / 2; + } + mfdin_canvas0_addr = input; + mfdin_canvas1_addr = input + mfdin_canvas0_stride * mfdin_canvas_height; + + mfdin_canvas2_addr = input + mfdin_canvas0_stride * mfdin_canvas_height + + mfdin_canvas0_stride * mfdin_canvas_height / 4; + + jenc_pr(LOG_INFO, "%x:%x:%x, mfdin_canvas0_stride=%d, mfdin_canvas1_stride=%d, mfdin_canvas2_stride=%d, mfdin_canvas_height=%d\n", + mfdin_canvas0_addr, mfdin_canvas1_addr, mfdin_canvas2_addr, + mfdin_canvas0_stride, + mfdin_canvas1_stride, + mfdin_canvas2_stride, + mfdin_canvas_height); + + jenc_pr(LOG_INFO, "process yuv420p input, uoff=%d, voff=%d\n", + mfdin_canvas0_stride * mfdin_canvas_height, + mfdin_canvas0_stride * mfdin_canvas_height + + mfdin_canvas0_stride * mfdin_canvas_height / 4); + } else if (iformat == 4 && oformat == 1) { /*case1009, 420 plane -> 422*/ + if (!simulation_enable) { + mfdin_canvas1_stride = mfdin_canvas0_stride / 2; + mfdin_canvas2_stride = mfdin_canvas0_stride / 2; + } + mfdin_canvas0_addr = input; + mfdin_canvas1_addr = input + mfdin_canvas0_stride * mfdin_canvas_height; + mfdin_canvas2_addr = input + mfdin_canvas0_stride * mfdin_canvas_height + + mfdin_canvas0_stride * mfdin_canvas_height / 4; + + //mfdin_canvas_bias = mfdin_canvas1_stride << 16; + } else if (iformat == 5 /*&& oformat == 0*/) { /*case1007, 444 plane -> 420*/ + mfdin_canvas1_stride = mfdin_canvas0_stride; + mfdin_canvas2_stride = mfdin_canvas0_stride; + mfdin_canvas0_addr = input; + mfdin_canvas1_addr = input + mfdin_canvas0_stride * mfdin_canvas_height; + mfdin_canvas2_addr = mfdin_canvas1_addr + mfdin_canvas1_stride * mfdin_canvas_height; + //mfdin_big_endian = true; + } else { + pr_err("config input or output format err!\n"); + return -1; + } + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) { + if ((cmd->input_fmt == JPEGENC_FMT_RGB565) + || (cmd->input_fmt >= JPEGENC_MAX_FRAME_FMT)) + return -1; + + if (cmd->output_fmt == JPEGENC_FMT_YUV420) { + oformat = 0; + } else if (cmd->output_fmt == JPEGENC_FMT_YUV422_SINGLE) { + oformat = 1; + } else if (cmd->output_fmt == JPEGENC_FMT_YUV444_SINGLE) { + oformat = 2; + } + + if ((cmd->input_fmt <= JPEGENC_FMT_YUV444_PLANE) || + (cmd->input_fmt >= JPEGENC_FMT_YUV422_12BIT)) + r2y_en = 0; + else + r2y_en = 1; + + if (cmd->input_fmt >= JPEGENC_FMT_YUV422_12BIT) { + iformat = 7; + ifmt_extra = + cmd->input_fmt - JPEGENC_FMT_YUV422_12BIT; + + #ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + + if (cmd->input_fmt == JPEGENC_FMT_YUV422_12BIT) + canvas_w = picsize_x * 24 / 8; + else if (cmd->input_fmt == JPEGENC_FMT_YUV444_10BIT) + canvas_w = picsize_x * 32 / 8; + else + canvas_w = (picsize_x * 20 + 7) / 8; + canvas_w = ((canvas_w + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ENC_CANVAS_OFFSET; + input = input & 0xff; + #endif + } else if (cmd->input_fmt == JPEGENC_FMT_YUV422_SINGLE) { + iformat = 0; + + #ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + canvas_w = picsize_x * 2; + canvas_w = ((canvas_w + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ENC_CANVAS_OFFSET; + #endif + } else if ((cmd->input_fmt == JPEGENC_FMT_YUV444_SINGLE) + || (cmd->input_fmt == JPEGENC_FMT_RGB888)) { + iformat = 1; + if (cmd->input_fmt == JPEGENC_FMT_RGB888) + r2y_en = 1; + + #ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + canvas_w = picsize_x * 3; + canvas_w = ((canvas_w + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ENC_CANVAS_OFFSET; + #endif + } else if ((cmd->input_fmt == JPEGENC_FMT_NV21) + || (cmd->input_fmt == JPEGENC_FMT_NV12)) { + iformat = (cmd->input_fmt == JPEGENC_FMT_NV21) ? 2 : 3; + #ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + canvas_w = ((cmd->encoder_width + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 1, + input + canvas_w * picsize_y, canvas_w, + picsize_y / 2, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ((ENC_CANVAS_OFFSET + 1) << 8) | + ENC_CANVAS_OFFSET; + #endif + } else if (cmd->input_fmt == JPEGENC_FMT_YUV420) { + iformat = 4; + + #ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + canvas_w = ((cmd->encoder_width + 63) >> 6) << 6; + canvas_config_proxy(ENC_CANVAS_OFFSET, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 2, + input + canvas_w * picsize_y, + canvas_w / 2, picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 2, + input + canvas_w * picsize_y * 5 / 4, + canvas_w / 2, picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ((ENC_CANVAS_OFFSET + 2) << 16) | + ((ENC_CANVAS_OFFSET + 1) << 8) | + ENC_CANVAS_OFFSET; + #endif + } else if ((cmd->input_fmt == JPEGENC_FMT_YUV444_PLANE) + || (cmd->input_fmt == JPEGENC_FMT_RGB888_PLANE)) { + iformat = 5; + if (cmd->input_fmt == JPEGENC_FMT_RGB888_PLANE) + r2y_en = 1; + + #ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + canvas_w = ((cmd->encoder_width + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 1, + input + canvas_w * picsize_y, canvas_w, + picsize_y, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 2, + input + canvas_w * picsize_y * 2, + canvas_w, picsize_y, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ((ENC_CANVAS_OFFSET + 2) << 16) | + ((ENC_CANVAS_OFFSET + 1) << 8) | + ENC_CANVAS_OFFSET; + #endif + } else if (cmd->input_fmt == JPEGENC_FMT_RGBA8888) { + iformat = 12; + r2y_en = 1; + } + ret = 0; + } + + } else { + if ((cmd->input_fmt == JPEGENC_FMT_RGB565) + || (cmd->input_fmt >= JPEGENC_MAX_FRAME_FMT)) + return -1; + + if (cmd->output_fmt == JPEGENC_FMT_YUV422_SINGLE) + oformat = 1; + else + oformat = 0; + + if ((cmd->input_fmt <= JPEGENC_FMT_YUV444_PLANE) || + (cmd->input_fmt >= JPEGENC_FMT_YUV422_12BIT)) + r2y_en = 0; + else + r2y_en = 1; + + if (cmd->input_fmt >= JPEGENC_FMT_YUV422_12BIT) { + iformat = 7; + ifmt_extra = + cmd->input_fmt - JPEGENC_FMT_YUV422_12BIT; + +#ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + if (cmd->input_fmt == JPEGENC_FMT_YUV422_12BIT) + canvas_w = picsize_x * 24 / 8; + else if (cmd->input_fmt == JPEGENC_FMT_YUV444_10BIT) + canvas_w = picsize_x * 32 / 8; + else + canvas_w = (picsize_x * 20 + 7) / 8; + canvas_w = ((canvas_w + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ENC_CANVAS_OFFSET; + input = input & 0xff; +#endif + } else if (cmd->input_fmt == JPEGENC_FMT_YUV422_SINGLE) { + iformat = 0; + +#ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + canvas_w = picsize_x * 2; + canvas_w = ((canvas_w + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ENC_CANVAS_OFFSET; +#endif + } else if ((cmd->input_fmt == JPEGENC_FMT_YUV444_SINGLE) + || (cmd->input_fmt == JPEGENC_FMT_RGB888)) { + iformat = 1; + if (cmd->input_fmt == JPEGENC_FMT_RGB888) + r2y_en = 1; + +#ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + canvas_w = picsize_x * 3; + canvas_w = ((canvas_w + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ENC_CANVAS_OFFSET; +#endif + } else if ((cmd->input_fmt == JPEGENC_FMT_NV21) + || (cmd->input_fmt == JPEGENC_FMT_NV12)) { + iformat = (cmd->input_fmt == JPEGENC_FMT_NV21) ? 2 : 3; + +#ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + canvas_w = ((cmd->encoder_width + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 1, + input + canvas_w * picsize_y, canvas_w, + picsize_y / 2, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ((ENC_CANVAS_OFFSET + 1) << 8) | + ENC_CANVAS_OFFSET; +#endif + } else if (cmd->input_fmt == JPEGENC_FMT_YUV420) { + iformat = 4; + +#ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + canvas_w = ((cmd->encoder_width + 63) >> 6) << 6; + canvas_config_proxy(ENC_CANVAS_OFFSET, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 2, + input + canvas_w * picsize_y, + canvas_w / 2, picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 2, + input + canvas_w * picsize_y * 5 / 4, + canvas_w / 2, picsize_y / 2, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ((ENC_CANVAS_OFFSET + 2) << 16) | + ((ENC_CANVAS_OFFSET + 1) << 8) | + ENC_CANVAS_OFFSET; +#endif + } else if ((cmd->input_fmt == JPEGENC_FMT_YUV444_PLANE) + || (cmd->input_fmt == JPEGENC_FMT_RGB888_PLANE)) { + iformat = 5; + if (cmd->input_fmt == JPEGENC_FMT_RGB888_PLANE) + r2y_en = 1; + +#ifdef CONFIG_AMLOGIC_MEDIA_CANVAS + canvas_w = ((cmd->encoder_width + 31) >> 5) << 5; + canvas_config_proxy(ENC_CANVAS_OFFSET, + input, + canvas_w, picsize_y, + CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 1, + input + canvas_w * picsize_y, canvas_w, + picsize_y, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + canvas_config_proxy(ENC_CANVAS_OFFSET + 2, + input + canvas_w * picsize_y * 2, + canvas_w, picsize_y, CANVAS_ADDR_NOWRAP, + CANVAS_BLKMODE_LINEAR); + input = ((ENC_CANVAS_OFFSET + 2) << 16) | + ((ENC_CANVAS_OFFSET + 1) << 8) | + ENC_CANVAS_OFFSET; +#endif + } else if (cmd->input_fmt == JPEGENC_FMT_RGBA8888) { + iformat = 12; + r2y_en = 1; + } + } + ret = 0; + } else if (cmd->type == JPEGENC_CANVAS_BUFF) { + r2y_en = 0; + if (cmd->input_fmt == JPEGENC_FMT_YUV422_SINGLE) { + iformat = 0; + input = input & 0xff; + } else if (cmd->input_fmt == JPEGENC_FMT_YUV444_SINGLE) { + iformat = 1; + input = input & 0xff; + } else if ((cmd->input_fmt == JPEGENC_FMT_NV21) + || (cmd->input_fmt == JPEGENC_FMT_NV12)) { + iformat = (cmd->input_fmt == JPEGENC_FMT_NV21) ? 2 : 3; + input = input & 0xffff; + } else if (cmd->input_fmt == JPEGENC_FMT_YUV420) { + iformat = 4; + input = input & 0xffffff; + } else if ((cmd->input_fmt == JPEGENC_FMT_YUV444_PLANE) + || (cmd->input_fmt == JPEGENC_FMT_RGB888_PLANE)) { + if (cmd->input_fmt == JPEGENC_FMT_RGB888_PLANE) + r2y_en = 1; + iformat = 5; + input = input & 0xffffff; + } else if ((cmd->input_fmt == JPEGENC_FMT_YUV422_12BIT) + || (cmd->input_fmt == JPEGENC_FMT_YUV444_10BIT) + || (cmd->input_fmt == JPEGENC_FMT_YUV422_10BIT)) { + iformat = 7; + ifmt_extra = cmd->input_fmt - JPEGENC_FMT_YUV422_12BIT; + input = input & 0xff; + } else + ret = -1; + } + + if (ret == 0) + mfdin_basic_jpeg(input, iformat, oformat, + picsize_x, picsize_y, r2y_en, ifmt_extra, + mfdin_canvas0_stride, + mfdin_canvas1_stride, + mfdin_canvas2_stride, + mfdin_canvas0_blkmode, + mfdin_canvas1_blkmode, + mfdin_canvas2_blkmode, + mfdin_canvas0_addr, + mfdin_canvas1_addr, + mfdin_canvas2_addr, + mfdin_canvas_bias, + mfdin_big_endian); + return ret; +} + +static void jpegenc_isr_tasklet(ulong data) +{ + struct jpegenc_manager_s *manager = (struct jpegenc_manager_s *)data; + + jenc_pr(LOG_INFO, "encoder is done %d\n", manager->encode_hw_status); + + if ((manager->encode_hw_status == JPEGENC_ENCODER_DONE) + && (manager->process_irq)) { + manager->wq.hw_status = manager->encode_hw_status; + manager->wq.output_size = READ_HREG(HCODEC_VLC_TOTAL_BYTES); + jenc_pr(LOG_INFO, "encoder size %d\n", manager->wq.output_size); + atomic_inc(&manager->wq.ready); + wake_up_interruptible(&manager->wq.complete); + } +} + +static irqreturn_t jpegenc_isr(s32 irq_number, void *para) +{ + struct jpegenc_manager_s *manager = (struct jpegenc_manager_s *)para; + jenc_pr(LOG_ALL, "jpegenc intr is fired\n"); + + if (manager->irq_requested == false) + return IRQ_NONE; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) + WRITE_HREG(HCODEC_ASSIST_MBOX2_CLR_REG, 1); + else + WRITE_HREG(HCODEC_ASSIST_MBOX0_CLR_REG, 1); + } else + WRITE_HREG(HCODEC_ASSIST_MBOX2_CLR_REG, 1); + + manager->encode_hw_status = READ_HREG(JPEGENC_ENCODER_STATUS); + + if (manager->encode_hw_status == JPEGENC_ENCODER_DONE) { + jpegenc_time_count_end(&time_cnt); + manager->process_irq = true; + tasklet_schedule(&manager->tasklet); + } + + return IRQ_HANDLED; +} + +static void jpegenc_start(void) +{ + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + WRITE_VREG(DOS_SW_RESET1, (1 << 12) | (1 << 11)); + WRITE_VREG(DOS_SW_RESET1, 0); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + WRITE_HREG(HCODEC_MPSR, 0x0001); +} + +static void _jpegenc_stop(void) +{ + ulong timeout = jiffies + HZ; + + WRITE_HREG(HCODEC_MPSR, 0); + WRITE_HREG(HCODEC_CPSR, 0); + + while (READ_HREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) { + if (time_after(jiffies, timeout)) + break; + } + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB) + WRITE_VREG(DOS_SW_RESET1, + (1 << 12) | (1 << 11) | + (1 << 2) | (1 << 6) | + (1 << 7) | (1 << 8) | + (1 << 14) | (1 << 16) | + (1 << 17)); + else + WRITE_VREG(DOS_SW_RESET1, + (1 << 12) | (1 << 11) | + (1 << 2) | (1 << 6) | + (1 << 7) | (1 << 8) | + (1 << 16) | (1 << 17)); + + WRITE_VREG(DOS_SW_RESET1, 0); + + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); +} + +static void dump_mem(u8 *addr) { + int i; + u8 *offset = addr; + + for (i=0;i<dumpmem_line;i++) { + offset += i * 8; + pr_err("%#x\t%#x\t%#x\t%#x\t%#x\t%#x\t%#x\t%#x\n", + *(offset+0), *(offset+1), *(offset+2), *(offset+3), + *(offset+4), *(offset+5), *(offset+6), *(offset+7)); + } +} + +static void __iomem *mc_addr; +static u32 mc_addr_map; +#define MC_SIZE (4096 * 4) +s32 jpegenc_loadmc(const char *p) +{ + ulong timeout; + s32 ret = 0; + //int i=0; + + /* use static mempry*/ + if (mc_addr == NULL) { + mc_addr = kmalloc(MC_SIZE, GFP_KERNEL); + + if (!mc_addr) { + jenc_pr(LOG_ERROR, "jpegenc loadmc iomap mc addr error.\n"); + return -ENOMEM; + } + memset(mc_addr, 0, MC_SIZE); + } + + //ret = get_data_from_name("c1_jpeg_enc", (u8 *)mc_addr); + ret = get_firmware_data(VIDEO_ENC_JPEG, (u8 *)mc_addr); + + dump_mem(mc_addr); + + if (ret < 0) + jenc_pr(LOG_ERROR, "jpegenc microcode fail ret=%d, name: %s.\n", ret, p); + + mc_addr_map = dma_map_single(&gJpegenc.this_pdev->dev, mc_addr, MC_SIZE, DMA_TO_DEVICE); + + WRITE_HREG(HCODEC_MPSR, 0); + WRITE_HREG(HCODEC_CPSR, 0); + + /* Read CBUS register for timing */ + //timeout = READ_HREG(HCODEC_MPSR); + //timeout = READ_HREG(HCODEC_MPSR); + timeout = jiffies + HZ; + WRITE_HREG(HCODEC_IMEM_DMA_ADR, mc_addr_map); + WRITE_HREG(HCODEC_IMEM_DMA_COUNT, 0x1000); + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) + WRITE_HREG(HCODEC_IMEM_DMA_CTRL, (0x8000 | (0xf << 16))); // ucode test c is 0x8000 | (0xf << 16) + else if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) { + pr_err("t3 HCODEC_IMEM_DMA_CTRL (0x8000 | (0 & 0xffff))\n"); + WRITE_HREG(HCODEC_IMEM_DMA_CTRL, (0x8000 | (0 & 0xffff))); // Endian : 4'b1000); + } else + WRITE_HREG(HCODEC_IMEM_DMA_CTRL, (0x8000 | (0 & 0xffff))); // Endian : 4'b1000); + + while (READ_HREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) { + if (time_before(jiffies, timeout)) { + schedule(); + } else { + jenc_pr(LOG_ERROR, "hcodec load mc error\n"); + ret = -EBUSY; + break; + } + } + + dma_unmap_single(&gJpegenc.this_pdev->dev, mc_addr_map, MC_SIZE, DMA_TO_DEVICE); + return ret; +} + +static s32 jpegenc_poweron_ex(u32 clock) +{ + u32 frq; + + if (clock == 1) + frq = 200; + else if (clock == 3) + frq = 300; + else + frq = 400; + + jpeg_enc_clk_enable(&g_jpeg_enc_clks, frq); + + /* Powerup HCODEC memories */ + WRITE_VREG(DOS_MEM_PD_HCODEC, 0x0); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) { + pr_err("powering on hcodec\n"); + vdec_poweron(VDEC_HCODEC); + pr_err("hcodec power status after poweron:%d\n", vdec_on(VDEC_HCODEC)); + } else { + pwr_ctrl_psci_smc(PDID_T7_DOS_HCODEC, true); + } + + /* + * [21] hcodec clk_en for henc qdct + * [20] hcodec clk_en for henc vlc + * [19] hcodec clk_en for assist and cbus + * [18] hcodec clk_en for ddr + * [17] hcodec clk_en for vcpu + * [16] hcodec clk_en for hdec assist + * [15] hcodec clk_en for hdec dblk + * [14] reserved + * [13] hcodec clk_en for hdec mc + * [12] hcodec clk_en for hdec pic_dc + */ + WRITE_VREG_BITS(DOS_GCLK_EN0, 0x7fff, 12, 15); + + return 0; +} + +static s32 jpegenc_poweroff_ex(void) +{ + WRITE_VREG_BITS(DOS_GCLK_EN0, 0, 12, 15); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) { + pr_err("powering off hcodec for t7\n"); + vdec_poweroff(VDEC_HCODEC); + pr_err("hcodec power status after poweroff:%d\n", vdec_on(VDEC_HCODEC)); + } else + pwr_ctrl_psci_smc(PDID_T7_DOS_HCODEC, false); + + /* power off HCODEC memories */ + WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL); + + jpeg_enc_clk_disable(&g_jpeg_enc_clks); + + return 0; +} + +#if 0 +bool jpegenc_on(void) +{ + bool hcodec_on; + ulong flags; + + spin_lock_irqsave(&lock, flags); + + hcodec_on = vdec_on(VDEC_HCODEC); + hcodec_on &= (gJpegenc.opened > 0); + + spin_unlock_irqrestore(&lock, flags); + return hcodec_on; +} +#endif + +static s32 jpegenc_poweron(u32 clock) +{ + //ulong flags; + u32 data32; + + //spin_lock_irqsave(&lock, flags); + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_C1) { + data32 = 0; + amports_switch_gate("vdec", 1); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) { + WRITE_AOREG(AO_RTI_PWR_CNTL_REG0, + (READ_AOREG(AO_RTI_PWR_CNTL_REG0) & (~0x18))); + udelay(10); + /* Powerup HCODEC */ + /* [1:0] HCODEC */ + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + (READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & (~0x3))); + udelay(10); + } + + WRITE_VREG(DOS_SW_RESET1, 0xffffffff); + WRITE_VREG(DOS_SW_RESET1, 0); + + /* Enable Dos internal clock gating */ + jpegenc_clock_enable(clock); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) { + /* Powerup HCODEC memories */ + WRITE_VREG(DOS_MEM_PD_HCODEC, 0x0); + + /* Remove HCODEC ISO */ + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + (READ_AOREG(AO_RTI_GEN_PWR_ISO0) & (~0x30))); + udelay(10); + } + /* Disable auto-clock gate */ + WRITE_VREG(DOS_GEN_CTRL0, (READ_VREG(DOS_GEN_CTRL0) | 0x1)); + WRITE_VREG(DOS_GEN_CTRL0, (READ_VREG(DOS_GEN_CTRL0) & 0xFFFFFFFE)); + } else { + jpegenc_poweron_ex(clock); + } + //spin_unlock_irqrestore(&lock, flags); + + mdelay(10); + return 0; +} + +static s32 jpegenc_poweroff(void) +{ + //ulong flags; + //spin_lock_irqsave(&lock, flags); + + if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_C1) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) { + /* enable HCODEC isolation */ + WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, + READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x30); + /* power off HCODEC memories */ + WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL); + } + /* disable HCODEC clock */ + jpegenc_clock_disable(); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) { + /* HCODEC power off */ + WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, + READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0x3); + } + + /* release DOS clk81 clock gating */ + amports_switch_gate("vdec", 0); + } else { + jpegenc_poweroff_ex(); + } + //spin_unlock_irqrestore(&lock, flags); + + return 0; +} + +void jpegenc_reset(void) +{ + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB) + WRITE_VREG(DOS_SW_RESET1, + (1 << 2) | (1 << 6) | + (1 << 7) | (1 << 8) | + (1 << 14) | (1 << 16) | + (1 << 17)); + else + WRITE_VREG(DOS_SW_RESET1, + (1 << 2) | (1 << 6) | (1 << 7) | + (1 << 8) | (1 << 16) | (1 << 17)); + WRITE_VREG(DOS_SW_RESET1, 0); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); + READ_VREG(DOS_SW_RESET1); +} + +static s32 jpegenc_init(void) +{ + jpegenc_poweron(clock_level); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_MG9TV) + WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1, 0x32); + else + WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1, 0x2); + + jenc_pr(LOG_ALL, "start to load microcode\n"); + + if (!legacy_load && (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7 )) { + char *buf = vmalloc(0x1000 * 16); + int ret = -1; + pr_err("load ucode\n"); + if (get_firmware_data(VIDEO_ENC_JPEG, buf) < 0) { + //amvdec_disable(); + pr_err("get firmware for jpeg enc fail!\n"); + vfree(buf); + return -1; + } + WRITE_HREG(HCODEC_MPSR, 0); + WRITE_HREG(HCODEC_CPSR, 0); + ret = amvdec_loadmc_ex(VFORMAT_JPEG_ENC, NULL, buf); + + if (ret < 0) { + //amvdec_disable(); + vfree(buf); + pr_err("jpegenc: the %s fw loading failed, err: %x\n", + tee_enabled() ? "TEE" : "local", ret); + return -EBUSY; + } + vfree(buf); + } else { + if (jpegenc_loadmc(jpegenc_ucode[0]) < 0) + return -EBUSY; + } + + jenc_pr(LOG_ALL, "jpegenc load microcode success.\n"); + gJpegenc.process_irq = false; + + if (request_irq(gJpegenc.irq_num, jpegenc_isr, IRQF_SHARED, + "jpegenc-irq", (void *)&gJpegenc) == 0) + gJpegenc.irq_requested = true; + else + gJpegenc.irq_requested = false; + + WRITE_HREG(JPEGENC_ENCODER_STATUS, JPEGENC_ENCODER_IDLE); + gJpegenc.inited = true; + return 0; +} + +static s32 convert_cmd(struct jpegenc_wq_s *wq, u32 *cmd_info) +{ + if (!wq) { + jenc_pr(LOG_ERROR, "jpegenc convert_cmd error\n"); + return -1; + } + + memset(&wq->cmd, 0, sizeof(struct jpegenc_request_s)); + wq->cmd.type = cmd_info[0]; + wq->cmd.input_fmt = cmd_info[1]; + wq->cmd.output_fmt = cmd_info[2]; + wq->cmd.encoder_width = cmd_info[3]; + wq->cmd.encoder_height = cmd_info[4]; + wq->cmd.framesize = cmd_info[5]; + wq->cmd.src = cmd_info[6]; + wq->cmd.jpeg_quality = cmd_info[7]; + wq->cmd.QuantTable_id = cmd_info[8]; + wq->cmd.flush_flag = cmd_info[9]; + wq->cmd.block_mode = cmd_info[10]; + + wq->cmd.y_stride = cmd_info[11]; + wq->cmd.u_stride = cmd_info[12]; + wq->cmd.v_stride = cmd_info[13]; + wq->cmd.h_stride = cmd_info[14]; + jenc_pr(LOG_DEBUG, "convert_cmd: ystride:%d, h_stride:%d\n", + cmd_info[11], cmd_info[14]); + + if (is_oversize(wq->cmd.encoder_width, + wq->cmd.encoder_height, + wq->max_width * wq->max_height)) { + jenc_pr(LOG_ERROR, + "set encode size %dx%d is larger than supported (%dx%d).\n", + wq->cmd.encoder_width, + wq->cmd.encoder_height, + wq->max_width, + wq->max_height); + return -1; + } + + wq->cmd.jpeg_quality = jpeg_quality_scaling(wq->cmd.jpeg_quality); + if (wq->cmd.QuantTable_id < 4) { + jenc_pr(LOG_INFO, + "JPEGENC_SEL_QUANT_TABLE : %d.\n", + wq->cmd.QuantTable_id); + } else { + wq->cmd.QuantTable_id = 0; + jenc_pr(LOG_ERROR, + "JPEGENC_SEL_QUANT_TABLE invaild. target value: %d.\n", + cmd_info[8]); + } + jenc_pr(LOG_INFO, + "target quality : %d, jpeg_quality value: %d.\n", + cmd_info[7], wq->cmd.jpeg_quality); + return 0; +} + +static void jpegenc_start_cmd(struct jpegenc_wq_s *wq) +{ + gJpegenc.process_irq = false; + gJpegenc.encode_hw_status = JPEGENC_ENCODER_IDLE; + + jpegenc_reset(); + + set_jpeg_input_format(wq, &wq->cmd); + + init_jpeg_encoder(wq); + + jpegenc_init_output_buffer(wq); + + /* clear mailbox interrupt */ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) + WRITE_HREG(HCODEC_ASSIST_MBOX2_CLR_REG, 1); + else + WRITE_HREG(HCODEC_ASSIST_MBOX0_CLR_REG, 1); + } else + WRITE_HREG(HCODEC_ASSIST_MBOX2_CLR_REG, 1); + + /* enable mailbox interrupt */ + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_T7) + WRITE_HREG(HCODEC_ASSIST_MBOX2_MASK, 0xffffffff); + else + WRITE_HREG(HCODEC_ASSIST_MBOX0_MASK, 0xffffffff); + } else + WRITE_HREG(HCODEC_ASSIST_MBOX2_MASK, 1); + + gJpegenc.encode_hw_status = JPEGENC_ENCODER_IDLE; + WRITE_HREG(JPEGENC_ENCODER_STATUS, JPEGENC_ENCODER_IDLE); + gJpegenc.process_irq = false; + + time_cnt = jpegenc_time_count_start(); + + jpegenc_start(); + jenc_pr(LOG_INFO, "jpegenc_start\n"); +} + +static void jpegenc_stop(void) +{ + if ((gJpegenc.irq_num >= 0) && + (gJpegenc.irq_requested == true)) { + gJpegenc.irq_requested = false; + free_irq(gJpegenc.irq_num, &gJpegenc); + } + _jpegenc_stop(); + jpegenc_poweroff(); + jenc_pr(LOG_INFO, "jpegenc_stop\n"); +} + +static void dma_flush(u32 buf_start, u32 buf_size) +{ + dma_sync_single_for_device(&gJpegenc.this_pdev->dev, + buf_start, buf_size, DMA_TO_DEVICE); +} + +static void cache_flush(u32 buf_start, u32 buf_size) +{ + dma_sync_single_for_cpu(&gJpegenc.this_pdev->dev, + buf_start, buf_size, DMA_FROM_DEVICE); +} + +static s32 jpegenc_open(struct inode *inode, struct file *file) +{ + struct jpegenc_wq_s *wq; + s32 r; + pr_err("jpegenc open, filp=%lu\n", (unsigned long)file); +#ifdef CONFIG_AM_ENCODER + if (amvenc_avc_on() == true) { + jenc_pr(LOG_ERROR, "hcodec in use for AVC Encode now.\n"); + return -EBUSY; + } +#endif + file->private_data = NULL; + + spin_lock(&gJpegenc.sem_lock); + if (gJpegenc.opened > 0) { + spin_unlock(&gJpegenc.sem_lock); + jenc_pr(LOG_ERROR, "jpegenc open busy.\n"); + return -EBUSY; + } + wq = &gJpegenc.wq; + wq->buf_start = gJpegenc.mem.buf_start; + wq->buf_size = gJpegenc.mem.buf_size; + gJpegenc.opened++; + spin_unlock(&gJpegenc.sem_lock); + +#ifdef CONFIG_CMA + if (gJpegenc.use_reserve == false) { + if (gJpegenc.use_cma) { + struct page *cma_pages; + cma_pages = dma_alloc_from_contiguous(&gJpegenc.this_pdev->dev, + gJpegenc.mem.buf_size >> PAGE_SHIFT, 0, true); + if (cma_pages) { + wq->buf_start = page_to_phys(cma_pages); + wq->buf_size = gJpegenc.mem.buf_size; + } else { + jenc_pr(LOG_ERROR, "jpegenc - cma allocation failed\n"); + } + } + + if (!wq->buf_start) { + gJpegenc.use_cma = false; + wq->buf_start = codec_mm_alloc_for_dma(DRIVER_NAME, + gJpegenc.mem.buf_size >> PAGE_SHIFT, 0, 0); + if (wq->buf_start) { + wq->buf_size = gJpegenc.mem.buf_size; + } else { + jenc_pr(LOG_ERROR, + "jpegenc - codec_mm allocation failed\n"); + spin_lock(&gJpegenc.sem_lock); + gJpegenc.opened--; + spin_unlock(&gJpegenc.sem_lock); + return -ENOMEM; + } + } + } +#endif + + jenc_pr(LOG_DEBUG, + "jpegenc - allocated from %s: start:0x%x, %d MB.\n", + gJpegenc.use_reserve ? "reserved" : + gJpegenc.use_cma ? "cma" : "codec_mm", + wq->buf_start, gJpegenc.mem.buf_size / SZ_1M); + + spin_lock(&gJpegenc.sem_lock); + init_waitqueue_head(&wq->complete); + atomic_set(&wq->ready, 0); + wq->AssitstreamStartVirtAddr = NULL; + memset(gQuantTable, 0, sizeof(gQuantTable)); + wq->cmd.QuantTable_id = 0; + wq->cmd.jpeg_quality = 90; + wq->max_width = gJpegenc.mem.bufspec->max_width; + wq->max_height = gJpegenc.mem.bufspec->max_height; + wq->headbytes = 0; + file->private_data = (void *)wq; +#ifdef EXTEAN_QUANT_TABLE + gExternalQuantTablePtr = NULL; + external_quant_table_available = false; +#endif + spin_unlock(&gJpegenc.sem_lock); + r = 0; + + return r; +} + +static s32 jpegenc_release(struct inode *inode, struct file *file) +{ + struct jpegenc_wq_s *wq = (struct jpegenc_wq_s *)file->private_data; + + if (wq != &gJpegenc.wq) { + jenc_pr(LOG_ERROR, "jpegenc release error\n"); + return -1; + } + if (gJpegenc.inited) { + jpegenc_stop(); + gJpegenc.inited = false; + } + if (wq->dma_input != NULL) { + enc_dma_buf_unmap(wq->dma_input); + kfree(wq->dma_input); + wq->dma_input = NULL; + } + memset(gQuantTable, 0, sizeof(gQuantTable)); + + if (wq->AssitstreamStartVirtAddr) + wq->AssitstreamStartVirtAddr = NULL; + +#ifdef CONFIG_CMA + if (wq->buf_start) { + if (gJpegenc.use_cma) { + struct page *cma_pages; + cma_pages = phys_to_page(wq->buf_start); + if (!dma_release_from_contiguous(&gJpegenc.this_pdev->dev, cma_pages, + wq->buf_size >> PAGE_SHIFT)) { + jenc_pr(LOG_ERROR, "[%s] failed to release cma buffer\n", __FUNCTION__); + } + } else { + codec_mm_free_for_dma(DRIVER_NAME, wq->buf_start); + } + } +#endif + wq->buf_start = 0; + wq->buf_size = 0; +#ifdef EXTEAN_QUANT_TABLE + kfree(gExternalQuantTablePtr); + gExternalQuantTablePtr = NULL; + external_quant_table_available = false; +#endif + spin_lock(&gJpegenc.sem_lock); + if (gJpegenc.opened > 0) + gJpegenc.opened--; + spin_unlock(&gJpegenc.sem_lock); + jenc_pr(LOG_DEBUG, "jpegenc release\n"); + return 0; +} + +static void jpegenc_reconfig_input(struct jpegenc_wq_s *wq, u32 new_addr, u32 new_size) { + wq->InputBuffStart = new_addr; + wq->InputBuffEnd = wq->InputBuffStart + new_size - 1; +} + +static void jpegenc_restore_input(struct jpegenc_wq_s *wq) { + wq->InputBuffStart = wq->buf_start + gJpegenc.mem.bufspec->input.buf_start; + wq->InputBuffEnd = wq->InputBuffStart + gJpegenc.mem.bufspec->input.buf_size - 1; +} +static long jpegenc_ioctl(struct file *file, u32 cmd, ulong arg) +{ + long r = 0; + struct jpegenc_wq_s *wq = (struct jpegenc_wq_s *)file->private_data; +#define MAX_ADDR_INFO_SIZE 30 + u32 addr_info[MAX_ADDR_INFO_SIZE + 4]; + int shared_fd = -1; + //struct jpegenc_frame_params *frm_params; + + struct platform_device *pdev; + unsigned long paddr = 0; + + switch (cmd) { + case JPEGENC_IOC_QUERY_DMA_SUPPORT: + jenc_pr(LOG_DEBUG, "ioctl JPEGENC_IOC_QUERY_DMA_SUPPORT\n"); + jenc_pr(LOG_INFO, "use_dma_io=%u\n", use_dma_io); + put_user(use_dma_io, (u32 *)arg); + break; + case JPEGENC_IOC_CONFIG_DMA_INPUT: + jenc_pr(LOG_DEBUG, "ioctl JPEGENC_IOC_CONFIG_DMA_INPUT\n"); + if (copy_from_user(&shared_fd, (void *)arg, sizeof(s32))) { + jenc_pr(LOG_ERROR, + "jpegenc JPEGENC_IOC_CONFIG_DMA_INPUT error.\n"); + return -1; + } + + jenc_pr(LOG_INFO, "JPEGENC_IOC_CONFIG_DMA_INPUT, shared_fd:%d\n", + shared_fd); + + if (wq->dma_input != NULL) { + jenc_pr(LOG_INFO, "[%d] release old stale dma buffer\n", __LINE__); + enc_dma_buf_unmap(wq->dma_input); + wq->dma_input->fd = -1; + } + + if (wq->dma_input == NULL) + wq->dma_input = kmalloc(sizeof(struct enc_dma_cfg), GFP_KERNEL); + + if (wq->dma_input != NULL) { + wq->dma_input->dir = DMA_TO_DEVICE; + wq->dma_input->fd = shared_fd; + pdev = gJpegenc.this_pdev; + wq->dma_input->dev = &(pdev->dev); + + r = enc_dma_buf_get_phys(wq->dma_input, &paddr); + + if (r < 0) { + jenc_pr(LOG_ERROR, + "import fd %d failed\n", + wq->dma_input->fd); + wq->dma_input->paddr = NULL; + wq->dma_input->vaddr = NULL; + enc_dma_buf_unmap(wq->dma_input); + kfree(wq->dma_input); + return -1; + } + + wq->dma_input->paddr = (void *)paddr; + jenc_pr(LOG_INFO, "paddr %p\n", + wq->dma_input->paddr); + } else { + jenc_pr(LOG_ERROR, "kmalloc struct enc_dma_cfg failed\n"); + return -1; + } + + jpegenc_reconfig_input(wq, paddr, wq->dma_input->size); + break; + case JPEGENC_IOC_RELEASE_DMA_INPUT: + jenc_pr(LOG_DEBUG, "ioctl JPEGENC_IOC_RELEASE_DMA_INPUT\n"); + if (wq->dma_input != NULL) { + jenc_pr(LOG_INFO, "[%d] release dma buffer\n", __LINE__); + enc_dma_buf_unmap(wq->dma_input); + wq->dma_input->fd = -1; + // restore to original input buffer config if dma buffer is revoked + jpegenc_restore_input(wq); + } + break; + case JPEGENC_IOC_NEW_CMD: + jenc_pr(LOG_DEBUG, "ioctl JPEGENC_IOC_NEW_CMD\n"); + if (copy_from_user(addr_info, (void *)arg, + MAX_ADDR_INFO_SIZE * sizeof(u32))) { + jenc_pr(LOG_ERROR, + "jpegenc get new cmd error.\n"); + return -1; + } + if (!convert_cmd(wq, addr_info)) + jpegenc_start_cmd(wq); + break; + case JPEGENC_IOC_NEW_CMD2: + jenc_pr(LOG_DEBUG, "ioctl JPEGENC_IOC_NEW_CMD2\n"); + if (copy_from_user(&(wq->cmd), (void *)arg, (unsigned long) sizeof(struct jpegenc_request_s))) { + jenc_pr(LOG_ERROR, + "jpegenc get new cmd2 error.\n"); + return -1; + } + + dump_requst(&(wq->cmd)); + + if (is_oversize(wq->cmd.encoder_width, + wq->cmd.encoder_height, + wq->max_width * wq->max_height)) { + jenc_pr(LOG_ERROR, + "set encode size %dx%d is larger than supported (%dx%d).\n", + wq->cmd.encoder_width, + wq->cmd.encoder_height, + wq->max_width, + wq->max_height); + return -1; + } + + wq->cmd.jpeg_quality = jpeg_quality_scaling(wq->cmd.jpeg_quality); + + if (wq->cmd.QuantTable_id < 4) { + jenc_pr(LOG_INFO, "JPEGENC_SEL_QUANT_TABLE: %d\n", wq->cmd.QuantTable_id); + } else { + wq->cmd.QuantTable_id = 0; + jenc_pr(LOG_ERROR, "JPEGENC_SEL_QUANT_TABLE invaild, use 0 instead\n"); + } + + jenc_pr(LOG_INFO, "scaled jpeg_quality: %d\n", wq->cmd.jpeg_quality); + + //if (!convert_cmd(wq, addr_info)) + jpegenc_start_cmd(wq); + + break; + case JPEGENC_IOC_GET_STAGE: + jenc_pr(LOG_DEBUG, "ioctl JPEGENC_IOC_GET_STAGE\n"); + put_user(wq->hw_status, (u32 *)arg); + break; + case JPEGENC_IOC_GET_OUTPUT_SIZE: + jenc_pr(LOG_DEBUG, "ioctl JPEGENC_IOC_GET_OUTPUT_SIZE\n"); + cache_flush(wq->BitstreamStart, wq->output_size); + addr_info[0] = wq->headbytes; + addr_info[1] = wq->output_size; + r = copy_to_user((u32 *)arg, addr_info , 2 * sizeof(u32)); + break; + case JPEGENC_IOC_CONFIG_INIT: + jenc_pr(LOG_DEBUG, "ioctl JPEGENC_IOC_CONFIG_INIT\n"); + jpegenc_init(); + jpegenc_buffspec_init(wq); + jenc_pr(LOG_DEBUG, "ioctl JPEGENC_IOC_CONFIG_INIT end\n"); + break; + case JPEGENC_IOC_GET_BUFFINFO: + jenc_pr(LOG_DEBUG, "ioctl JPEGENC_IOC_GET_BUFFINFO\n"); + addr_info[0] = gJpegenc.mem.buf_size; + addr_info[1] = gJpegenc.mem.bufspec->input.buf_start; + addr_info[2] = gJpegenc.mem.bufspec->input.buf_size; + addr_info[3] = gJpegenc.mem.bufspec->assit.buf_start; + addr_info[4] = gJpegenc.mem.bufspec->assit.buf_size; + addr_info[5] = gJpegenc.mem.bufspec->bitstream.buf_start; + addr_info[6] = gJpegenc.mem.bufspec->bitstream.buf_size; + r = copy_to_user((u32 *)arg, addr_info , 7 * sizeof(u32)); + break; + case JPEGENC_IOC_GET_DEVINFO: + jenc_pr(LOG_DEBUG, "ioctl JPEGENC_IOC_GET_DEVINFO\n"); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXL) { + /* GXL send same id of GXTVBB to upper*/ + r = copy_to_user((s8 *)arg, JPEGENC_DEVINFO_GXTVBB, + strlen(JPEGENC_DEVINFO_GXTVBB)); + } else if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXTVBB) { + r = copy_to_user((s8 *)arg, JPEGENC_DEVINFO_GXTVBB, + strlen(JPEGENC_DEVINFO_GXTVBB)); + } else if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) { + r = copy_to_user((s8 *)arg, JPEGENC_DEVINFO_GXBB, + strlen(JPEGENC_DEVINFO_GXBB)); + } else if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_MG9TV) { + r = copy_to_user((s8 *)arg, JPEGENC_DEVINFO_G9, + strlen(JPEGENC_DEVINFO_G9)); + } else { + r = copy_to_user((s8 *)arg, JPEGENC_DEVINFO_M8, + strlen(JPEGENC_DEVINFO_M8)); + } + break; + case JPEGENC_IOC_SET_EXT_QUANT_TABLE: + jenc_pr(LOG_DEBUG, "ioctl JPEGENC_IOC_SET_EXT_QUANT_TABLE\n"); +#ifdef EXTEAN_QUANT_TABLE + if (arg == 0) { + kfree(gExternalQuantTablePtr); + gExternalQuantTablePtr = NULL; + external_quant_table_available = false; + } else { + void __user *argp = (void __user *)arg; + gExternalQuantTablePtr = + kmalloc(sizeof(u16) * DCTSIZE2 * 2, + GFP_KERNEL); + if (gExternalQuantTablePtr) { + if (copy_from_user + (gExternalQuantTablePtr, argp, + sizeof(u16) * DCTSIZE2 * 2)) { + r = -1; + break; + } + external_quant_table_available = true; + r = 0; + } else { + jenc_pr(LOG_ERROR, + "gExternalQuantTablePtr malloc fail\n"); + r = -1; + } + } +#else + r = 0; +#endif + break; + default: + jenc_pr(LOG_DEBUG, "ioctl BAD cmd\n"); + r = -1; + break; + } + return r; +} + +#ifdef CONFIG_COMPAT +static long jpegenc_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long args) +{ + unsigned long ret; + + args = (unsigned long)compat_ptr(args); + ret = jpegenc_ioctl(filp, cmd, args); + return ret; +} +#endif + +static s32 jpegenc_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct jpegenc_wq_s *wq = (struct jpegenc_wq_s *)filp->private_data; + ulong off = vma->vm_pgoff << PAGE_SHIFT; + ulong vma_size = vma->vm_end - vma->vm_start; + + if (vma_size == 0) { + jenc_pr(LOG_ERROR, "vma_size is 0\n"); + return -EAGAIN; + } + off += wq->buf_start; + jenc_pr(LOG_INFO, "vma_size is %ld, off is %ld\n", vma_size, off); + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; + /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */ + if (remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) { + jenc_pr(LOG_ERROR, "set_cached: failed remap_pfn_range\n"); + return -EAGAIN; + } + return 0; +} + +static u32 jpegenc_poll(struct file *file, poll_table *wait_table) +{ + struct jpegenc_wq_s *wq = (struct jpegenc_wq_s *)file->private_data; + poll_wait(file, &wq->complete, wait_table); + + if (atomic_read(&wq->ready)) { + atomic_dec(&wq->ready); + return POLLIN | POLLRDNORM; + } + return 0; +} + +static const struct file_operations jpegenc_fops = { + .owner = THIS_MODULE, + .open = jpegenc_open, + .mmap = jpegenc_mmap, + .release = jpegenc_release, + .unlocked_ioctl = jpegenc_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = jpegenc_compat_ioctl, +#endif + .poll = jpegenc_poll, +}; + +static s32 jpegenc_wq_init(void) +{ + jenc_pr(LOG_DEBUG, "jpegenc_wq_init.\n"); + spin_lock_init(&gJpegenc.sem_lock); + spin_lock(&gJpegenc.sem_lock); + gJpegenc.irq_requested = false; + gJpegenc.process_irq = false; + gJpegenc.inited = false; + gJpegenc.opened = 0; + gJpegenc.encode_hw_status = JPEGENC_ENCODER_IDLE; + + tasklet_init(&gJpegenc.tasklet, + jpegenc_isr_tasklet, + (ulong)&gJpegenc); + spin_unlock(&gJpegenc.sem_lock); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_GXBB) + clock_level = 5; + else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8M2) + clock_level = 3; + else + clock_level = 1; + return 0; +} + +static s32 jpegenc_wq_uninit(void) +{ + s32 r = -1; + jenc_pr(LOG_DEBUG, "uninit encode wq.\n"); + if (gJpegenc.encode_hw_status == JPEGENC_ENCODER_IDLE) { + if ((gJpegenc.irq_num >= 0) && + (gJpegenc.irq_requested == true)) { + free_irq(gJpegenc.irq_num, &gJpegenc); + gJpegenc.irq_requested = false; + } + r = 0; + } + return r; +} + +static ssize_t encode_status_show(struct class *cla, + struct class_attribute *attr, char *buf) +{ + s32 irq_num; + u32 hw_status, width, height; + bool process_irq; + bool inited; + bool use_reserve; + u32 cma_size, max_w, max_h; + u32 buffer_start, buffer_size; + u8 lev, opened; + struct Jpegenc_Buff_s res; + + spin_lock(&gJpegenc.sem_lock); + + irq_num = gJpegenc.irq_num; + hw_status = gJpegenc.encode_hw_status; + process_irq = gJpegenc.process_irq; + inited = gJpegenc.inited; + opened = gJpegenc.opened; + use_reserve = gJpegenc.use_reserve; + res.buf_start = gJpegenc.mem.reserve_mem.buf_start; + res.buf_size = gJpegenc.mem.reserve_mem.buf_size; + buffer_start = gJpegenc.mem.buf_start; + buffer_size = gJpegenc.mem.buf_size; + lev = gJpegenc.mem.cur_buf_lev; + max_w = gJpegenc.mem.bufspec->max_width; + max_h = gJpegenc.mem.bufspec->max_height; + width = gJpegenc.wq.cmd.encoder_width; + height = gJpegenc.wq.cmd.encoder_height; +#ifdef CONFIG_CMA + cma_size = gJpegenc.mem.cma_pool_size / SZ_1M; +#endif + spin_unlock(&gJpegenc.sem_lock); + + jenc_pr(LOG_DEBUG, + "jpegenc width: %d, encode height: %d.\n", + width, height); + jenc_pr(LOG_DEBUG, + "jpegenc hw_status: %d, process_irq: %s.\n", + hw_status, process_irq ? "true" : "false"); + jenc_pr(LOG_DEBUG, + "jpegenc irq num: %d, inited: %s, opened: %d\n", + irq_num, inited ? "true" : "false", opened); + if (use_reserve) { + jenc_pr(LOG_DEBUG, + "jpegenc reserve memory, buffer start: 0x%x, size: %d MB.\n", + res.buf_start, res.buf_size / SZ_1M); + } else { +#ifdef CONFIG_CMA + jenc_pr(LOG_DEBUG, "jpegenc cma pool size: %d.\n", cma_size); +#endif + } + jenc_pr(LOG_DEBUG, "jpegenc buffer start: 0x%x, size: 0x%x\n", + buffer_start, buffer_size); + jenc_pr(LOG_DEBUG, "buffer level: %s\n", glevel_str[lev]); + return snprintf(buf, 40, "max size: %dx%d\n", max_w, max_h); +} + +static int enc_dma_buf_map(struct enc_dma_cfg *cfg) +{ + long ret = -1; + int fd = -1; + struct dma_buf *dbuf = NULL; + struct dma_buf_attachment *d_att = NULL; + struct sg_table *sg = NULL; + void *vaddr = NULL; + struct device *dev = NULL; + enum dma_data_direction dir; + + if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) { + jenc_pr(LOG_ERROR, "error input param\n"); + return -EINVAL; + } + + jenc_pr(LOG_INFO, "enc_dma_buf_map, fd %d\n", cfg->fd); + + fd = cfg->fd; + dev = cfg->dev; + dir = cfg->dir; + jenc_pr(LOG_INFO, "enc_dma_buffer_map fd %d\n", fd); + + dbuf = dma_buf_get(fd); + + if (dbuf == NULL) { + jenc_pr(LOG_ERROR, "failed to get dma buffer,fd %d\n",fd); + return -EINVAL; + } + + d_att = dma_buf_attach(dbuf, dev); + + if (d_att == NULL) { + jenc_pr(LOG_ERROR, "failed to set dma attach\n"); + goto attach_err; + } + + sg = dma_buf_map_attachment(d_att, dir); + + if (sg == NULL) { + jenc_pr(LOG_ERROR, "failed to get dma sg\n"); + goto map_attach_err; + } + + ret = dma_buf_begin_cpu_access(dbuf, dir); + + if (ret != 0) { + jenc_pr(LOG_ERROR, "failed to access dma buff\n"); + goto access_err; + } + + vaddr = dma_buf_vmap(dbuf); + + if (vaddr == NULL) { + jenc_pr(LOG_ERROR, "failed to vmap dma buf\n"); + goto vmap_err; + } + + cfg->dbuf = dbuf; + cfg->attach = d_att; + cfg->vaddr = vaddr; + cfg->sg = sg; + cfg->size = dbuf->size; + jenc_pr(LOG_INFO, "dmabuf size is %zu\n", cfg->size); + + return ret; + +vmap_err: + dma_buf_end_cpu_access(dbuf, dir); + +access_err: + dma_buf_unmap_attachment(d_att, sg, dir); + +map_attach_err: + dma_buf_detach(dbuf, d_att); + +attach_err: + dma_buf_put(dbuf); + + return ret; +} + +static int enc_dma_buf_get_phys(struct enc_dma_cfg *cfg, unsigned long *addr) +{ + struct sg_table *sg_table; + struct page *page; + int ret; + jenc_pr(LOG_INFO, "jpegenc_dma_buf_get_phys in\n"); + + ret = enc_dma_buf_map(cfg); + + if (ret < 0) { + jenc_pr(LOG_ERROR, "jpegenc_dma_buf_get_phys failed\n"); + return ret; + } + + if (cfg->sg) { + sg_table = cfg->sg; + page = sg_page(sg_table->sgl); + *addr = PFN_PHYS(page_to_pfn(page)); + ret = 0; + } + + jenc_pr(LOG_INFO, "jpegenc_dma_buf_get_phys 0x%lx\n", *addr); + return ret; +} + +static void enc_dma_buf_unmap(struct enc_dma_cfg *cfg) +{ + int fd = -1; + struct dma_buf *dbuf = NULL; + struct dma_buf_attachment *d_att = NULL; + struct sg_table *sg = NULL; + void *vaddr = NULL; + struct device *dev = NULL; + enum dma_data_direction dir; + + if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL + || cfg->dbuf == NULL || cfg->vaddr == NULL + || cfg->attach == NULL || cfg->sg == NULL) { + jenc_pr(LOG_ERROR, "Error input param\n"); + return; + } + + fd = cfg->fd; + dev = cfg->dev; + dir = cfg->dir; + dbuf = cfg->dbuf; + vaddr = cfg->vaddr; + d_att = cfg->attach; + sg = cfg->sg; + + dma_buf_vunmap(dbuf, vaddr); + + dma_buf_end_cpu_access(dbuf, dir); + + dma_buf_unmap_attachment(d_att, sg, dir); + + dma_buf_detach(dbuf, d_att); + + dma_buf_put(dbuf); + jenc_pr(LOG_DEBUG, "enc_dma_buffer_unmap vaddr %p\n",(unsigned *)vaddr); +} + +static ssize_t power_ctrl_show(struct class *cla, struct class_attribute *attr, char *buf) { + pr_err("power status: %lu\n", pwr_ctrl_status_psci_smc(PDID_T7_DOS_HCODEC)); + pr_err("jpeg clk: %ld\n", clk_get_rate(g_jpeg_enc_clks.jpeg_enc_clk)); + return 1;//snprintf(buf, PAGE_SIZE, "power control show done\n"); +} + +static ssize_t power_ctrl_store(struct class *class,struct class_attribute *attr, + const char *buf, size_t count) { + if (strncmp(buf, "poweron", 7) == 0) { + pr_err("now powering on:\n"); + //pwr_ctrl_psci_smc(PM_HCODEC, true); + //jpegenc_poweron_ex(6); + jpegenc_init(); + } else if (strncmp(buf, "poweroff", 8) == 0) { + pr_err("now powering off:\n"); + //pwr_ctrl_psci_smc(PM_HCODEC, false); + jpegenc_poweroff_ex(); + } else if (strncmp(buf, "mbox0", 5) == 0) { + pr_err("trigger mbox0:\n"); + + WRITE_HREG(HCODEC_ASSIST_MBOX0_MASK, 1); //enable irq + WRITE_VREG(HCODEC_ASSIST_MBOX0_IRQ_REG, 0x1); // set irq + } else if (strncmp(buf, "mbox1", 5) == 0) { + pr_err("trigger mbox1:\n"); + + WRITE_HREG(HCODEC_ASSIST_MBOX1_MASK, 1); //enable irq + WRITE_VREG(HCODEC_ASSIST_MBOX1_IRQ_REG, 0x1); // set irq + } else if (strncmp(buf, "mbox2", 5) == 0) { + pr_err("trigger mbox2:\n"); + + WRITE_HREG(HCODEC_ASSIST_MBOX2_MASK, 1); //enable irq + WRITE_VREG(HCODEC_ASSIST_MBOX2_IRQ_REG, 0x1); // set irq + } + + return 1;//snprintf(buf, PAGE_SIZE, "policy read,just for test\n"); +} + +static CLASS_ATTR_RO(encode_status); +static CLASS_ATTR_RW(power_ctrl); +//static CLASS_ATTR(clock_ctrl, 0664, clock_ctrl_show, clock_ctrl_store); + +static struct attribute *jpegenc_class_attrs[] = { + &class_attr_encode_status.attr, + &class_attr_power_ctrl.attr, + //&class_attr_clock_ctrl.attr, + NULL +}; + +ATTRIBUTE_GROUPS(jpegenc_class); + +static struct class jpegenc_class = { + .name = CLASS_NAME, + .class_groups = jpegenc_class_groups, +}; + +s32 init_jpegenc_device(void) +{ + s32 r = 0; + r = register_chrdev(0, DEVICE_NAME, &jpegenc_fops); + if (r <= 0) { + jenc_pr(LOG_ERROR, "register jpegenc device error\n"); + return r; + } + jpegenc_device_major = r; + + r = class_register(&jpegenc_class); + if (r < 0) { + jenc_pr(LOG_ERROR, "error create jpegenc class.\n"); + return r; + } + + jpegenc_dev = device_create(&jpegenc_class, NULL, + MKDEV(jpegenc_device_major, 0), NULL, + DEVICE_NAME); + + if (IS_ERR(jpegenc_dev)) { + jenc_pr(LOG_ERROR, "create jpegenc device error.\n"); + class_unregister(&jpegenc_class); + return -1; + } + return r; +} + +s32 uninit_jpegenc_device(void) +{ + if (jpegenc_dev) + device_destroy(&jpegenc_class, MKDEV(jpegenc_device_major, 0)); + + class_destroy(&jpegenc_class); + + unregister_chrdev(jpegenc_device_major, DEVICE_NAME); + return 0; +} + +static s32 jpegenc_mem_device_init(struct reserved_mem *rmem, + struct device *dev) +{ + s32 r = 0; + struct resource res; + if (!rmem) { + jenc_pr(LOG_ERROR, + "Can't obtain I/O memory, will allocate jpegenc buffer!\n"); + r = -EFAULT; + return r; + } + res.start = (phys_addr_t) rmem->base; + res.end = res.start + (phys_addr_t) rmem->size - 1; + gJpegenc.mem.reserve_mem.buf_start = res.start; + gJpegenc.mem.reserve_mem.buf_size = res.end - res.start + 1; + jenc_pr(LOG_DEBUG, "found reserved memory device(start:0x%llux, size:0x%llux)\n", + rmem->base, rmem->size); + if (gJpegenc.mem.reserve_mem.buf_size >= + jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_VGA].min_buffsize) + gJpegenc.use_reserve = true; + else { + jenc_pr(LOG_ERROR, + "jpegenc reserve_mem too small, size is %d.\n", + gJpegenc.mem.reserve_mem.buf_size); + gJpegenc.mem.reserve_mem.buf_start = 0; + gJpegenc.mem.reserve_mem.buf_size = 0; + return -EFAULT; + } + return r; +} + +static s32 jpegenc_probe(struct platform_device *pdev) +{ + s32 res_irq; + s32 idx; + + jenc_pr(LOG_DEBUG, "jpegenc probe start.\n"); + + gJpegenc.this_pdev = pdev; + gJpegenc.use_reserve = false; + gJpegenc.use_cma = false; + + jpeg_in_full_hcodec = 0; + mfdin_ambus_canv_conv = 0; + + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T3) { + pr_err("jpegenc_probe: jpeg_in_full_hcodec\n"); + jpeg_in_full_hcodec = 1; + mfdin_ambus_canv_conv = 1; + } + + memset(&gJpegenc.mem, 0, sizeof(struct jpegenc_meminfo_s)); + + idx = of_reserved_mem_device_init(&pdev->dev); + if (idx != 0) { + jenc_pr(LOG_DEBUG, + "jpegenc memory resource undefined. err=%d\n", idx); + } + + if (gJpegenc.use_reserve == false) { +#ifndef CONFIG_CMA + jenc_pr(LOG_ERROR, + "jpegenc memory is invaild, probe fail!\n"); + return -EFAULT; +#else + struct device_node *mem_node; + struct reserved_mem *rmem = NULL; + + mem_node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0); + if (mem_node) + rmem = of_reserved_mem_lookup(mem_node); + of_node_put(mem_node); + + if (rmem) { + jenc_pr(LOG_DEBUG, + "jpegenc - reserved cma node found: %s.\n", rmem->name); + gJpegenc.mem.cma_pool_size = rmem->size; + gJpegenc.use_cma = true; + jenc_pr(LOG_DEBUG, + "jpegenc - codec mm pool size: %d MB.\n", codec_mm_get_free_size() / SZ_1M); + + } else { + jenc_pr(LOG_DEBUG, + "jpegenc - reserved cma node not found, using codec mm pool size.\n"); + gJpegenc.mem.cma_pool_size = codec_mm_get_free_size(); + } + + jenc_pr(LOG_DEBUG, + "jpegenc - cma memory pool size: %d MB\n", + (u32)gJpegenc.mem.cma_pool_size / SZ_1M); + gJpegenc.mem.buf_size = gJpegenc.mem.cma_pool_size; +#endif + } else { + + jenc_pr(LOG_DEBUG, "using reserved memory(start:0x%x, size:%d MB)\n", + gJpegenc.mem.reserve_mem.buf_start, + (u32)gJpegenc.mem.reserve_mem.buf_size / SZ_1M); + gJpegenc.mem.buf_start = gJpegenc.mem.reserve_mem.buf_start; + gJpegenc.mem.buf_size = gJpegenc.mem.reserve_mem.buf_size; + } + + // when use_cma is false, choose JPEGENC_BUFFER_LEVEL_8M as default + if (gJpegenc.use_cma == false) { + jenc_pr(LOG_ERROR, "set mem spec to JPEGENC_BUFFER_LEVEL_8M\n"); + gJpegenc.mem.buf_size = jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_8M].min_buffsize; + } + + if (gJpegenc.mem.buf_size >= + jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_HD].min_buffsize) { + gJpegenc.mem.cur_buf_lev = JPEGENC_BUFFER_LEVEL_HD; + gJpegenc.mem.bufspec = (struct Jpegenc_BuffInfo_s *) + &jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_HD]; + } else if (gJpegenc.mem.buf_size >= + jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_13M].min_buffsize) { + gJpegenc.mem.cur_buf_lev = JPEGENC_BUFFER_LEVEL_13M; + gJpegenc.mem.bufspec = (struct Jpegenc_BuffInfo_s *) + &jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_13M]; + } else if (gJpegenc.mem.buf_size >= + jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_8M].min_buffsize) { + gJpegenc.mem.cur_buf_lev = JPEGENC_BUFFER_LEVEL_8M; + gJpegenc.mem.bufspec = (struct Jpegenc_BuffInfo_s *) + &jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_8M]; + } else if (gJpegenc.mem.buf_size >= + jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_5M].min_buffsize) { + gJpegenc.mem.cur_buf_lev = JPEGENC_BUFFER_LEVEL_5M; + gJpegenc.mem.bufspec = (struct Jpegenc_BuffInfo_s *) + &jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_5M]; + } else if (gJpegenc.mem.buf_size >= + jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_3M].min_buffsize) { + gJpegenc.mem.cur_buf_lev = JPEGENC_BUFFER_LEVEL_3M; + gJpegenc.mem.bufspec = (struct Jpegenc_BuffInfo_s *) + &jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_3M]; + } else if (gJpegenc.mem.buf_size >= + jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_2M].min_buffsize) { + gJpegenc.mem.cur_buf_lev = JPEGENC_BUFFER_LEVEL_2M; + gJpegenc.mem.bufspec = (struct Jpegenc_BuffInfo_s *) + &jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_2M]; + } else if (gJpegenc.mem.buf_size >= + jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_VGA].min_buffsize) { + gJpegenc.mem.cur_buf_lev = JPEGENC_BUFFER_LEVEL_VGA; + gJpegenc.mem.bufspec = (struct Jpegenc_BuffInfo_s *) + &jpegenc_buffspec[JPEGENC_BUFFER_LEVEL_VGA]; + } else { + jenc_pr(LOG_ERROR, + "jpegenc probe memory too small, size is %d.\n", + gJpegenc.mem.buf_size); + gJpegenc.mem.buf_start = 0; + gJpegenc.mem.buf_size = 0; + return -EFAULT; + } +#if 1 + res_irq = platform_get_irq(pdev, 0); +#else + switch (manual_irq_num) { + case 0: + res_irq = platform_get_irq_byname(pdev, "dos_mbox_slow_irq0"); + pr_err("[%s:%d] get irq dos_mbox_slow_irq0, res_irq=%d\n", __FUNCTION__, __LINE__, res_irq); + break; + case 1: + res_irq = platform_get_irq_byname(pdev, "dos_mbox_slow_irq1"); + pr_err("[%s:%d] get irq dos_mbox_slow_irq1, res_irq=%d\n", __FUNCTION__, __LINE__, res_irq); + break; + case 2: + res_irq = platform_get_irq_byname(pdev, "dos_mbox_slow_irq2"); + pr_err("[%s:%d] get irq dos_mbox_slow_irq2, res_irq=%d\n", __FUNCTION__, __LINE__, res_irq); + break; + default: + + res_irq = platform_get_irq_byname(pdev, "dos_mbox_slow_irq0"); + pr_err("[%s:%d] get irq dos_mbox_slow_irq0, res_irq=%d\n", __FUNCTION__, __LINE__, res_irq); + break; + } +#endif + + if (res_irq < 0) { + jenc_pr(LOG_ERROR, "[%s] get irq error!", __func__); + return -EINVAL; + } else + jenc_pr(LOG_ERROR, "[%s] get irq success: %d!, manual_irq_num=%d\n", __func__, res_irq, manual_irq_num); + + gJpegenc.irq_num = res_irq; + + jenc_pr(LOG_DEBUG, + "jpegenc memory config sucess, buff size is 0x%x, level: %s\n", + gJpegenc.mem.buf_size, + glevel_str[gJpegenc.mem.cur_buf_lev]); + + jpegenc_wq_init(); + init_jpegenc_device(); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) { + if (jpeg_enc_clk_get(&pdev->dev, &g_jpeg_enc_clks) != 0) { + pr_err("jpeg_enc_clk_get failed\n"); + return -1; + } + } + + jenc_pr(LOG_DEBUG, "jpegenc probe end.\n"); + return 0; +} + +static s32 jpegenc_remove(struct platform_device *pdev) +{ + if (jpegenc_wq_uninit()) + jenc_pr(LOG_ERROR, "jpegenc_wq_uninit error.\n"); + + of_reserved_mem_device_release(&pdev->dev); + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_C1) + jpeg_enc_clk_put(&pdev->dev, &g_jpeg_enc_clks); + + uninit_jpegenc_device(); + jenc_pr(LOG_DEBUG, "jpegenc remove.\n"); + return 0; +} + +static const struct of_device_id amlogic_jpegenc_dt_match[] = { + { + .compatible = "amlogic, jpegenc", + }, + {}, +}; + +static struct platform_driver jpegenc_driver = { + .probe = jpegenc_probe, + .remove = jpegenc_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = amlogic_jpegenc_dt_match, + } +}; + +#if 0 +static struct codec_profile_t jpegenc_profile = { + .name = "jpegenc", + .profile = "" +}; +#endif + +static s32 __init jpegenc_driver_init_module(void) +{ + jenc_pr(LOG_DEBUG, "jpegenc module init\n"); + + if (platform_driver_register(&jpegenc_driver)) { + jenc_pr(LOG_ERROR, "failed to register jpegenc driver\n"); + return -ENODEV; + } + +#if 0 + vcodec_profile_register(&jpegenc_profile); +#endif + return 0; +} + +static void __exit jpegenc_driver_remove_module(void) +{ + jenc_pr(LOG_DEBUG, "jpegenc module remove.\n"); + platform_driver_unregister(&jpegenc_driver); +} + +static const struct reserved_mem_ops rmem_jpegenc_ops = { + .device_init = jpegenc_mem_device_init, +}; + +static s32 __init jpegenc_mem_setup(struct reserved_mem *rmem) +{ + rmem->ops = &rmem_jpegenc_ops; + jenc_pr(LOG_DEBUG, "jpegenc reserved mem setup.\n"); + return 0; +} + +module_param(simulation_enable, uint, 0664); +MODULE_PARM_DESC(simulation_enable, "\n simulation_enable\n"); + +module_param(g_block_mode, uint, 0664); +MODULE_PARM_DESC(g_block_mode, "\n g_block_mode\n"); + +module_param(g_canv0_stride, uint, 0664); +MODULE_PARM_DESC(g_canv0_stride, "\n g_canv0_stride\n"); + +module_param(g_canv1_stride, uint, 0664); +MODULE_PARM_DESC(g_canv1_stride, "\n g_canv1_stride\n"); + +module_param(g_canv2_stride, uint, 0664); +MODULE_PARM_DESC(g_canv2_stride, "\n g_canv2_stride\n"); + +module_param(g_canvas_height, uint, 0664); +MODULE_PARM_DESC(g_canvas_height, "\n g_canvas_height\n"); + +module_param(clock_level, uint, 0664); +MODULE_PARM_DESC(clock_level, "\n clock_level\n"); + +module_param(jpegenc_print_level, uint, 0664); +MODULE_PARM_DESC(jpegenc_print_level, "\n jpegenc_print_level\n"); + +module_param(reg_offset, int, 0664); +MODULE_PARM_DESC(reg_offset, "\n reg_offset\n"); + +module_param(use_dma_io, uint, 0664); +MODULE_PARM_DESC(use_dma_io, "\n use dma io or not\n"); + +module_param(use_quality, uint, 0664); +MODULE_PARM_DESC(use_quality, "\n use_quality\n"); + +module_param(legacy_load, uint, 0664); +MODULE_PARM_DESC(legacy_load, "\n legacy_load\n"); + +module_param(dumpmem_line, uint, 0664); +MODULE_PARM_DESC(dumpmem_line, "\n dumpmem_line\n"); + +module_param(pointer, uint, 0664); +MODULE_PARM_DESC(pointer, "\n pointer\n"); + +/*######### DEBUG-BRINGUP#########*/ +module_param(manual_clock, uint, 0664); +MODULE_PARM_DESC(manual_clock, "\n manual_clock\n"); + +module_param(manual_irq_num, uint, 0664); +MODULE_PARM_DESC(manual_irq_num, "\n manual_irq_num\n"); + +module_param(manual_interrupt, uint, 0664); +MODULE_PARM_DESC(manual_interrupt, "\n manual_interrupt\n"); +/*################################*/ + +module_init(jpegenc_driver_init_module); +module_exit(jpegenc_driver_remove_module); +RESERVEDMEM_OF_DECLARE(jpegenc, "amlogic, jpegenc-memory", jpegenc_mem_setup); + +MODULE_DESCRIPTION("AMLOGIC JPEG Encoder Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("simon.zheng <simon.zheng@amlogic.com>");
diff --git a/drivers/frame_sink/encoder/jpeg/jpegenc.h b/drivers/frame_sink/encoder/jpeg/jpegenc.h new file mode 100644 index 0000000..d8dcf84 --- /dev/null +++ b/drivers/frame_sink/encoder/jpeg/jpegenc.h
@@ -0,0 +1,266 @@ +#ifndef __JPEG_ENC_H_ +#define __JPEG_ENC_H_ + +#include <linux/dma-buf.h> +#define JPEGENC_DEVINFO_M8 "AML-M8" +#define JPEGENC_DEVINFO_G9 "AML-G9" +#define JPEGENC_DEVINFO_GXBB "AML-GXBB" +#define JPEGENC_DEVINFO_GXTVBB "AML-GXTVBB" +#define JPEGENC_DEVINFO_GXL "AML-GXL" + +/* M8: 2550/10 = 255M GX: 2000/10 = 200M */ +#define JPEGENC_HDEC_L0() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (2 << 25) | (1 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) +/* M8: 2550/8 = 319M GX: 2000/8 = 250M */ +#define JPEGENC_HDEC_L1() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (0 << 25) | (1 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) +/* M8: 2550/7 = 364M GX: 2000/7 = 285M */ +#define JPEGENC_HDEC_L2() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (3 << 25) | (0 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) +/* M8: 2550/6 = 425M GX: 2000/6 = 333M */ +#define JPEGENC_HDEC_L3() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (1 << 25) | (1 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) +/* M8: 2550/5 = 510M GX: 2000/5 = 400M */ +#define JPEGENC_HDEC_L4() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (2 << 25) | (0 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) +/* M8: 2550/4 = 638M GX: 2000/4 = 500M */ +#define JPEGENC_HDEC_L5() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (0 << 25) | (0 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) +/* M8: 2550/3 = 850M GX: 2000/3 = 667M */ +#define JPEGENC_HDEC_L6() WRITE_HHI_REG(HHI_VDEC_CLK_CNTL, \ + (1 << 25) | (0 << 16) | (1 << 24) | \ + (0xffff & READ_HHI_REG(HHI_VDEC_CLK_CNTL))) + +#define jpegenc_clock_enable(level) \ + do { \ + if (level == 0) \ + JPEGENC_HDEC_L0(); \ + else if (level == 1) \ + JPEGENC_HDEC_L1(); \ + else if (level == 2) \ + JPEGENC_HDEC_L2(); \ + else if (level == 3) \ + JPEGENC_HDEC_L3(); \ + else if (level == 4) \ + JPEGENC_HDEC_L4(); \ + else if (level == 5) \ + JPEGENC_HDEC_L5(); \ + else if (level == 6) \ + JPEGENC_HDEC_L6(); \ + WRITE_VREG_BITS(DOS_GCLK_EN0, 0x7fff, 12, 15); \ + } while (0) + +#define jpegenc_clock_disable() \ + do { \ + WRITE_VREG_BITS(DOS_GCLK_EN0, 0, 12, 15); \ + WRITE_HHI_REG_BITS(HHI_VDEC_CLK_CNTL, 0, 24, 1); \ + } while (0) + +#define JPEGENC_IOC_MAGIC 'J' + +#define JPEGENC_IOC_GET_DEVINFO _IOW(JPEGENC_IOC_MAGIC, 0xf0, u32) + +#define JPEGENC_IOC_GET_BUFFINFO _IOW(JPEGENC_IOC_MAGIC, 0x00, u32) +#define JPEGENC_IOC_CONFIG_INIT _IOW(JPEGENC_IOC_MAGIC, 0x01, u32) +#define JPEGENC_IOC_NEW_CMD _IOW(JPEGENC_IOC_MAGIC, 0x02, u32) +#define JPEGENC_IOC_GET_STAGE _IOW(JPEGENC_IOC_MAGIC, 0x03, u32) +#define JPEGENC_IOC_GET_OUTPUT_SIZE _IOW(JPEGENC_IOC_MAGIC, 0x04, u32) +#define JPEGENC_IOC_SET_EXT_QUANT_TABLE _IOW(JPEGENC_IOC_MAGIC, 0x05, u32) +#define JPEGENC_IOC_QUERY_DMA_SUPPORT _IOW(JPEGENC_IOC_MAGIC, 0x06, u32) +#define JPEGENC_IOC_CONFIG_DMA_INPUT _IOW(JPEGENC_IOC_MAGIC, 0x07, s32) +#define JPEGENC_IOC_RELEASE_DMA_INPUT _IOW(JPEGENC_IOC_MAGIC, 0x08, u32) + +#define DCTSIZE2 64 + +#define JPEGENC_FLUSH_FLAG_INPUT 0x1 +#define JPEGENC_FLUSH_FLAG_OUTPUT 0x2 + +/* Define Quantization table: Max two tables */ +#define QUANT_SEL_COMP0 0 +#define QUANT_SEL_COMP1 1 +#define QUANT_SEL_COMP2 1 + +/* Define Huffman table selection: Max two tables per DC/AC */ +#define DC_HUFF_SEL_COMP0 0 +#define DC_HUFF_SEL_COMP1 1 +#define DC_HUFF_SEL_COMP2 1 +#define AC_HUFF_SEL_COMP0 0 +#define AC_HUFF_SEL_COMP1 1 +#define AC_HUFF_SEL_COMP2 1 + +/* DCT interrupt select:0=Disable intr; + 1=Intr at end of each 8x8 block of DCT input; + 2=Intr at end of each MCU of DCT input; + 3=Intr at end of a scan of DCT input; + 4=Intr at end of each 8x8 block of DCT output; + 5=Intr at end of each MCU of DCT output; + 6=Intr at end of a scan of DCT output; */ +#define JDCT_INTR_SEL 0 + +/* 0=Mark last coeff at the end of an 8x8 block, + 1=Mark last coeff at the end of an MCU + 2=Mark last coeff at the end of a scan */ +#define JDCT_LASTCOEFF_SEL 1 + +enum jpegenc_mem_type_e { + JPEGENC_LOCAL_BUFF = 0, + JPEGENC_CANVAS_BUFF, + JPEGENC_PHYSICAL_BUFF, + JPEGENC_DMA_BUFF, + JPEGENC_MAX_BUFF_TYPE +}; + +enum jpegenc_frame_fmt_e { + JPEGENC_FMT_YUV422_SINGLE = 0, + JPEGENC_FMT_YUV444_SINGLE, + JPEGENC_FMT_NV21, + JPEGENC_FMT_NV12, + JPEGENC_FMT_YUV420, + JPEGENC_FMT_YUV444_PLANE, + JPEGENC_FMT_RGB888, + JPEGENC_FMT_RGB888_PLANE, + JPEGENC_FMT_RGB565, + JPEGENC_FMT_RGBA8888, + JPEGENC_FMT_YUV422_12BIT, + JPEGENC_FMT_YUV444_10BIT, + JPEGENC_FMT_YUV422_10BIT, + JPEGENC_MAX_FRAME_FMT +}; + +struct Jpegenc_Buff_s { + u32 buf_start; + u32 buf_size; +}; + +struct Jpegenc_BuffInfo_s { + u32 lev_id; + u32 max_width; + u32 max_height; + u32 min_buffsize; + struct Jpegenc_Buff_s input; + struct Jpegenc_Buff_s assit; + struct Jpegenc_Buff_s bitstream; +}; + +struct jpegenc_request_s { + u32 src; + u32 encoder_width; + u32 encoder_height; + u32 framesize; + u32 jpeg_quality; + u32 QuantTable_id; + u32 flush_flag; + u32 block_mode; + enum jpegenc_mem_type_e type; + enum jpegenc_frame_fmt_e input_fmt; + enum jpegenc_frame_fmt_e output_fmt; + + u32 y_off; + u32 u_off; + u32 v_off; + + u32 y_stride; + u32 u_stride; + u32 v_stride; + + u32 h_stride; +}; + +struct jpegenc_meminfo_s { + u32 buf_start; + u32 buf_size; + u8 cur_buf_lev; + +#ifdef CONFIG_CMA + ulong cma_pool_size; +#endif + + struct Jpegenc_Buff_s reserve_mem; + struct Jpegenc_BuffInfo_s *bufspec; +}; + +struct jpegenc_wq_s { + u32 hw_status; + u32 headbytes; + u32 output_size; + + u32 buf_start; + u32 buf_size; + + u32 InputBuffStart; + u32 InputBuffEnd; + + u32 AssitStart; + u32 AssitEnd; + + u32 BitstreamStart; + u32 BitstreamEnd; + void __iomem *AssitstreamStartVirtAddr; + + u32 max_width; + u32 max_height; + + struct jpegenc_request_s cmd; + atomic_t ready; + wait_queue_head_t complete; +#ifdef CONFIG_CMA + struct page *venc_pages; +#endif + struct enc_dma_cfg *dma_input; +}; + +struct jpegenc_manager_s { + u32 encode_hw_status; + s32 irq_num; + + bool irq_requested; + bool process_irq; + bool inited; + bool use_reserve; + bool use_cma; + + u8 opened; + + spinlock_t sem_lock; + struct platform_device *this_pdev; + struct jpegenc_meminfo_s mem; + struct jpegenc_wq_s wq; + struct tasklet_struct tasklet; +}; + +struct enc_dma_cfg { + int fd; + size_t size; + void *dev; + void *vaddr; + void *paddr; + struct dma_buf *dbuf; + struct dma_buf_attachment *attach; + struct sg_table *sg; + enum dma_data_direction dir; +}; + +/******************************************** + * AV Scratch Register Re-Define +********************************************/ +#define JPEGENC_ENCODER_STATUS HCODEC_HENC_SCRATCH_0 +#define JPEGENC_BITSTREAM_OFFSET HCODEC_HENC_SCRATCH_1 + +/********************************************* + * ENCODER_STATUS define +********************************************/ +#define JPEGENC_ENCODER_IDLE 0 +#define JPEGENC_ENCODER_START 1 +/* #define JPEGENC_ENCODER_SOS_HEADER 2 */ +#define JPEGENC_ENCODER_MCU 3 +#define JPEGENC_ENCODER_DONE 4 + +extern bool jpegenc_on(void); +#define JPEGENC_IOC_NEW_CMD2 _IOW(JPEGENC_IOC_MAGIC, 0x09, struct jpegenc_request_s) +#endif
diff --git a/drivers/frame_sink/encoder/multi/Makefile b/drivers/frame_sink/encoder/multi/Makefile new file mode 100644 index 0000000..711ffe7 --- /dev/null +++ b/drivers/frame_sink/encoder/multi/Makefile
@@ -0,0 +1,2 @@ +obj-$(CONFIG_AMLOGIC_MEDIA_VENC_MULTI) += amvenc_multi.o +amvenc_multi-objs += vpu_multi.o
diff --git a/drivers/frame_sink/encoder/multi/vmm_multi.h b/drivers/frame_sink/encoder/multi/vmm_multi.h new file mode 100644 index 0000000..0c5cbae --- /dev/null +++ b/drivers/frame_sink/encoder/multi/vmm_multi.h
@@ -0,0 +1,663 @@ +/* + * + * Copyright (C) 2019 by Amlogic, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#ifndef __CNM_VIDEO_MEMORY_MANAGEMENT_H__ +#define __CNM_VIDEO_MEMORY_MANAGEMENT_H__ + +#define VMEM_PAGE_SIZE (16 * 1024) +#define MAKE_KEY(_a, _b) (((vmem_key_t)_a) << 32 | _b) +#define KEY_TO_VALUE(_key) (_key >> 32) + +#define VMEM_P_ALLOC(_x) vmalloc(_x) +#define VMEM_P_FREE(_x) vfree(_x) + +#define VMEM_ASSERT \ + pr_info("VMEM_ASSERT at %s:%d\n", __FILE__, __LINE__) + + +#define VMEM_HEIGHT(_tree) (_tree == NULL ? -1 : _tree->height) + +#define MAX(_a, _b) (_a >= _b ? _a : _b) + +struct avl_node_t; +#define vmem_key_t unsigned long long + +struct vmem_info_t { + ulong total_pages; + ulong alloc_pages; + ulong free_pages; + ulong page_size; +}; + +struct page_t { + s32 pageno; + ulong addr; + s32 used; + s32 alloc_pages; + s32 first_pageno; +}; + +struct avl_node_t { + vmem_key_t key; + s32 height; + struct page_t *page; + struct avl_node_t *left; + struct avl_node_t *right; +}; + +struct video_mm_t { + struct avl_node_t *free_tree; + struct avl_node_t *alloc_tree; + struct page_t *page_list; + s32 num_pages; + ulong base_addr; + ulong mem_size; + s32 free_page_count; + s32 alloc_page_count; +}; + +enum rotation_dir_t { + LEFT, + RIGHT +}; + +struct avl_node_data_t { + s32 key; + struct page_t *page; +}; + +static struct avl_node_t *make_avl_node( + vmem_key_t key, + struct page_t *page) +{ + struct avl_node_t *node = + (struct avl_node_t *)VMEM_P_ALLOC(sizeof(struct avl_node_t)); + node->key = key; + node->page = page; + node->height = 0; + node->left = NULL; + node->right = NULL; + return node; +} + +static s32 get_balance_factor(struct avl_node_t *tree) +{ + s32 factor = 0; + + if (tree) + factor = VMEM_HEIGHT(tree->right) - VMEM_HEIGHT(tree->left); + return factor; +} + +/* + * Left Rotation + * + * A B + * \ / \ + * B => A C + * / \ \ + * D C D + * + */ +static struct avl_node_t *rotation_left(struct avl_node_t *tree) +{ + struct avl_node_t *rchild; + struct avl_node_t *lchild; + + if (tree == NULL) + return NULL; + + rchild = tree->right; + if (rchild == NULL) + return tree; + + lchild = rchild->left; + rchild->left = tree; + tree->right = lchild; + + tree->height = + MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1; + rchild->height = + MAX(VMEM_HEIGHT(rchild->left), VMEM_HEIGHT(rchild->right)) + 1; + return rchild; +} + + +/* + * Reft Rotation + * + * A B + * \ / \ + * B => D A + * / \ / + * D C C + * + */ +static struct avl_node_t *rotation_right(struct avl_node_t *tree) +{ + struct avl_node_t *rchild; + struct avl_node_t *lchild; + + if (tree == NULL) + return NULL; + + lchild = tree->left; + if (lchild == NULL) + return NULL; + + rchild = lchild->right; + lchild->right = tree; + tree->left = rchild; + + tree->height = + MAX(VMEM_HEIGHT(tree->left), + VMEM_HEIGHT(tree->right)) + 1; + lchild->height = + MAX(VMEM_HEIGHT(lchild->left), + VMEM_HEIGHT(lchild->right)) + 1; + return lchild; +} + +static struct avl_node_t *do_balance(struct avl_node_t *tree) +{ + s32 bfactor = 0, child_bfactor; + + bfactor = get_balance_factor(tree); + if (bfactor >= 2) { + child_bfactor = get_balance_factor(tree->right); + if (child_bfactor == 1 || child_bfactor == 0) { + tree = rotation_left(tree); + } else if (child_bfactor == -1) { + tree->right = rotation_right(tree->right); + tree = rotation_left(tree); + } else { + pr_info( + "invalid balancing factor: %d\n", + child_bfactor); + VMEM_ASSERT; + return NULL; + } + } else if (bfactor <= -2) { + child_bfactor = get_balance_factor(tree->left); + if (child_bfactor == -1 || child_bfactor == 0) { + tree = rotation_right(tree); + } else if (child_bfactor == 1) { + tree->left = rotation_left(tree->left); + tree = rotation_right(tree); + } else { + pr_info( + "invalid balancing factor: %d\n", + child_bfactor); + VMEM_ASSERT; + return NULL; + } + } + return tree; +} + +static struct avl_node_t *unlink_end_node( + struct avl_node_t *tree, + s32 dir, + struct avl_node_t **found_node) +{ + struct avl_node_t *node; + *found_node = NULL; + + if (tree == NULL) + return NULL; + + if (dir == LEFT) { + if (tree->left == NULL) { + *found_node = tree; + return NULL; + } + } else { + if (tree->right == NULL) { + *found_node = tree; + return NULL; + } + } + + if (dir == LEFT) { + node = tree->left; + tree->left = unlink_end_node(tree->left, LEFT, found_node); + if (tree->left == NULL) { + tree->left = (*found_node)->right; + (*found_node)->left = NULL; + (*found_node)->right = NULL; + } + } else { + node = tree->right; + tree->right = unlink_end_node(tree->right, RIGHT, found_node); + if (tree->right == NULL) { + tree->right = (*found_node)->left; + (*found_node)->left = NULL; + (*found_node)->right = NULL; + } + } + tree->height = + MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1; + return do_balance(tree); +} + + +static struct avl_node_t *avltree_insert( + struct avl_node_t *tree, + vmem_key_t key, + struct page_t *page) +{ + if (tree == NULL) { + tree = make_avl_node(key, page); + } else { + if (key >= tree->key) + tree->right = + avltree_insert(tree->right, key, page); + else + tree->left = + avltree_insert(tree->left, key, page); + } + tree = do_balance(tree); + tree->height = + MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1; + return tree; +} + +static struct avl_node_t *do_unlink(struct avl_node_t *tree) +{ + struct avl_node_t *node; + struct avl_node_t *end_node; + + node = unlink_end_node(tree->right, LEFT, &end_node); + if (node) { + tree->right = node; + } else { + node = + unlink_end_node(tree->left, RIGHT, &end_node); + if (node) + tree->left = node; + } + + if (node == NULL) { + node = tree->right ? tree->right : tree->left; + end_node = node; + } + + if (end_node) { + end_node->left = + (tree->left != end_node) ? + tree->left : end_node->left; + end_node->right = + (tree->right != end_node) ? + tree->right : end_node->right; + end_node->height = + MAX(VMEM_HEIGHT(end_node->left), + VMEM_HEIGHT(end_node->right)) + 1; + } + tree = end_node; + return tree; +} + +static struct avl_node_t *avltree_remove( + struct avl_node_t *tree, + struct avl_node_t **found_node, + vmem_key_t key) +{ + *found_node = NULL; + if (tree == NULL) { + pr_info("failed to find key %d\n", (int)key); + return NULL; + } + + if (key == tree->key) { + *found_node = tree; + tree = do_unlink(tree); + } else if (key > tree->key) { + tree->right = + avltree_remove(tree->right, found_node, key); + } else { + tree->left = + avltree_remove(tree->left, found_node, key); + } + + if (tree) + tree->height = + MAX(VMEM_HEIGHT(tree->left), + VMEM_HEIGHT(tree->right)) + 1; + + tree = do_balance(tree); + return tree; +} + +void avltree_free(struct avl_node_t *tree) +{ + if (tree == NULL) + return; + if (tree->left == NULL && tree->right == NULL) { + VMEM_P_FREE(tree); + return; + } + + avltree_free(tree->left); + tree->left = NULL; + avltree_free(tree->right); + tree->right = NULL; + VMEM_P_FREE(tree); +} + +static struct avl_node_t *remove_approx_value( + struct avl_node_t *tree, + struct avl_node_t **found, + vmem_key_t key) +{ + *found = NULL; + if (tree == NULL) + return NULL; + + if (key == tree->key) { + *found = tree; + tree = do_unlink(tree); + } else if (key > tree->key) { + tree->right = remove_approx_value(tree->right, found, key); + } else { + tree->left = remove_approx_value(tree->left, found, key); + if (*found == NULL) { + *found = tree; + tree = do_unlink(tree); + } + } + if (tree) + tree->height = + MAX(VMEM_HEIGHT(tree->left), + VMEM_HEIGHT(tree->right)) + 1; + tree = do_balance(tree); + return tree; +} + +static void set_blocks_free( + struct video_mm_t *mm, + s32 pageno, + s32 npages) +{ + s32 last_pageno = pageno + npages - 1; + s32 i; + struct page_t *page; + struct page_t *last_page; + + if (npages == 0) + VMEM_ASSERT; + + if (last_pageno >= mm->num_pages) { + pr_info( + "set_blocks_free: invalid last page number: %d\n", + last_pageno); + VMEM_ASSERT; + return; + } + + for (i = pageno; i <= last_pageno; i++) { + mm->page_list[i].used = 0; + mm->page_list[i].alloc_pages = 0; + mm->page_list[i].first_pageno = -1; + } + + page = &mm->page_list[pageno]; + page->alloc_pages = npages; + last_page = &mm->page_list[last_pageno]; + last_page->first_pageno = pageno; + mm->free_tree = + avltree_insert(mm->free_tree, MAKE_KEY(npages, pageno), page); +} + +static void set_blocks_alloc( + struct video_mm_t *mm, + s32 pageno, + s32 npages) +{ + s32 last_pageno = pageno + npages - 1; + s32 i; + struct page_t *page; + struct page_t *last_page; + + if (last_pageno >= mm->num_pages) { + pr_info( + "set_blocks_free: invalid last page number: %d\n", + last_pageno); + VMEM_ASSERT; + return; + } + + for (i = pageno; i <= last_pageno; i++) { + mm->page_list[i].used = 1; + mm->page_list[i].alloc_pages = 0; + mm->page_list[i].first_pageno = -1; + } + + page = &mm->page_list[pageno]; + page->alloc_pages = npages; + last_page = &mm->page_list[last_pageno]; + last_page->first_pageno = pageno; + mm->alloc_tree = + avltree_insert(mm->alloc_tree, MAKE_KEY(page->addr, 0), page); +} + + +s32 vmem_init(struct video_mm_t *mm, ulong addr, ulong size) +{ + s32 i; + + if (mm == NULL) + return -1; + + mm->base_addr = (addr + (VMEM_PAGE_SIZE - 1)) + & ~(VMEM_PAGE_SIZE - 1); + mm->mem_size = size & ~VMEM_PAGE_SIZE; + mm->num_pages = mm->mem_size / VMEM_PAGE_SIZE; + mm->free_tree = NULL; + mm->alloc_tree = NULL; + mm->free_page_count = mm->num_pages; + mm->alloc_page_count = 0; + mm->page_list = + (struct page_t *)VMEM_P_ALLOC( + mm->num_pages * sizeof(struct page_t)); + if (mm->page_list == NULL) { + pr_err("%s:%d failed to kmalloc(%ld)\n", + __func__, __LINE__, + (long)mm->num_pages * sizeof(struct page_t)); + return -1; + } + + for (i = 0; i < mm->num_pages; i++) { + mm->page_list[i].pageno = i; + mm->page_list[i].addr = + mm->base_addr + i * VMEM_PAGE_SIZE; + mm->page_list[i].alloc_pages = 0; + mm->page_list[i].used = 0; + mm->page_list[i].first_pageno = -1; + } + set_blocks_free(mm, 0, mm->num_pages); + return 0; +} + +s32 vmem_exit(struct video_mm_t *mm) +{ + if (mm == NULL) { + pr_info("vmem_exit: invalid handle\n"); + return -1; + } + + if (mm->free_tree) + avltree_free(mm->free_tree); + if (mm->alloc_tree) + avltree_free(mm->alloc_tree); + + if (mm->page_list) { + VMEM_P_FREE(mm->page_list); + mm->page_list = NULL; + } + + mm->base_addr = 0; + mm->mem_size = 0; + mm->num_pages = 0; + mm->page_list = NULL; + mm->free_tree = NULL; + mm->alloc_tree = NULL; + mm->free_page_count = 0; + mm->alloc_page_count = 0; + return 0; +} + +ulong vmem_alloc(struct video_mm_t *mm, s32 size, ulong pid) +{ + struct avl_node_t *node; + struct page_t *free_page; + s32 npages, free_size; + s32 alloc_pageno; + ulong ptr; + + if (mm == NULL) { + pr_info("vmem_alloc: invalid handle\n"); + return -1; + } + + if (size <= 0) + return -1; + + npages = (size + VMEM_PAGE_SIZE - 1) / VMEM_PAGE_SIZE; + mm->free_tree = remove_approx_value(mm->free_tree, + &node, MAKE_KEY(npages, 0)); + + if (node == NULL) + return -1; + + free_page = node->page; + free_size = KEY_TO_VALUE(node->key); + alloc_pageno = free_page->pageno; + set_blocks_alloc(mm, alloc_pageno, npages); + if (npages != free_size) { + s32 free_pageno = alloc_pageno + npages; + + set_blocks_free(mm, free_pageno, (free_size-npages)); + } + VMEM_P_FREE(node); + + ptr = mm->page_list[alloc_pageno].addr; + mm->alloc_page_count += npages; + mm->free_page_count -= npages; + return ptr; +} + +s32 vmem_free(struct video_mm_t *mm, ulong ptr, ulong pid) +{ + ulong addr; + struct avl_node_t *found; + struct page_t *page; + s32 pageno, prev_free_pageno, next_free_pageno; + s32 prev_size, next_size; + s32 merge_page_no, merge_page_size, free_page_size; + + if (mm == NULL) { + pr_info("vmem_free: invalid handle\n"); + return -1; + } + + addr = ptr; + mm->alloc_tree = avltree_remove(mm->alloc_tree, &found, + MAKE_KEY(addr, 0)); + + if (found == NULL) { + pr_info("vmem_free: 0x%08x not found\n", (s32)addr); + VMEM_ASSERT; + return -1; + } + + /* find previous free block */ + page = found->page; + pageno = page->pageno; + free_page_size = page->alloc_pages; + prev_free_pageno = pageno - 1; + prev_size = -1; + if (prev_free_pageno >= 0) { + if (mm->page_list[prev_free_pageno].used == 0) { + prev_free_pageno = + mm->page_list[prev_free_pageno].first_pageno; + prev_size = + mm->page_list[prev_free_pageno].alloc_pages; + } + } + + /* find next free block */ + next_free_pageno = pageno + page->alloc_pages; + next_free_pageno = + (next_free_pageno == mm->num_pages) ? -1 : next_free_pageno; + next_size = -1; + if (next_free_pageno >= 0) { + if (mm->page_list[next_free_pageno].used == 0) { + next_size = + mm->page_list[next_free_pageno].alloc_pages; + } + } + VMEM_P_FREE(found); + + /* merge */ + merge_page_no = page->pageno; + merge_page_size = page->alloc_pages; + if (prev_size >= 0) { + mm->free_tree = avltree_remove(mm->free_tree, &found, + MAKE_KEY(prev_size, prev_free_pageno)); + if (found == NULL) { + VMEM_ASSERT; + return -1; + } + merge_page_no = found->page->pageno; + merge_page_size += found->page->alloc_pages; + VMEM_P_FREE(found); + } + if (next_size >= 0) { + mm->free_tree = avltree_remove(mm->free_tree, &found, + MAKE_KEY(next_size, next_free_pageno)); + if (found == NULL) { + VMEM_ASSERT; + return -1; + } + merge_page_size += found->page->alloc_pages; + VMEM_P_FREE(found); + } + page->alloc_pages = 0; + page->first_pageno = -1; + set_blocks_free(mm, merge_page_no, merge_page_size); + mm->alloc_page_count -= free_page_size; + mm->free_page_count += free_page_size; + return 0; +} + +s32 vmem_get_info(struct video_mm_t *mm, struct vmem_info_t *info) +{ + if (mm == NULL) { + pr_info("vmem_get_info: invalid handle\n"); + return -1; + } + + if (info == NULL) + return -1; + + info->total_pages = mm->num_pages; + info->alloc_pages = mm->alloc_page_count; + info->free_pages = mm->free_page_count; + info->page_size = VMEM_PAGE_SIZE; + return 0; +} +#endif /* __CNM_VIDEO_MEMORY_MANAGEMENT_H__ */
diff --git a/drivers/frame_sink/encoder/multi/vpu_multi.c b/drivers/frame_sink/encoder/multi/vpu_multi.c new file mode 100644 index 0000000..fda8c28 --- /dev/null +++ b/drivers/frame_sink/encoder/multi/vpu_multi.c
@@ -0,0 +1,3489 @@ +/* + * + * Copyright (C) 2019 by Amlogic, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#define LOG_LINE() pr_err("[%s:%d]\n", __FUNCTION__, __LINE__); +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/dma-mapping.h> +#include <linux/wait.h> +#include <linux/list.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/uaccess.h> +#include <linux/cdev.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/reset.h> +#include <linux/clk.h> +#include <linux/compat.h> +#include <linux/of_reserved_mem.h> +#include <linux/of_address.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/sched/signal.h> +#include <linux/amlogic/cpu_version.h> +//#include <linux/amlogic/pwr_ctrl.h> + +#include <linux/amlogic/media/canvas/canvas.h> +#include <linux/amlogic/media/canvas/canvas_mgr.h> + +#include <linux/amlogic/power_ctrl.h> +#include <dt-bindings/power/t7-pd.h> +#include <linux/amlogic/power_domain.h> +#include "../../../common/chips/decoder_cpu_ver_info.h" +#include "../../../frame_provider/decoder/utils/vdec.h" +#include "../../../frame_provider/decoder/utils/vdec_power_ctrl.h" +#include "vpu_multi.h" +#include "vmm_multi.h" + +#define MAX_INTERRUPT_QUEUE (16*MAX_NUM_INSTANCE) + +/* definitions to be changed as customer configuration */ +/* if you want to have clock gating scheme frame by frame */ +#define VPU_SUPPORT_CLOCK_CONTROL + +#define VPU_PLATFORM_DEVICE_NAME "amvenc_multi" +#define VPU_DEV_NAME "amvenc_multi" +#define VPU_CLASS_NAME "amvenc_multi" + +#ifndef VM_RESERVED /*for kernel up to 3.7.0 version*/ +#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP) +#endif + +#define MHz (1000000) + +#define VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE (256 * SZ_1M) + +#define LOG_ALL 0 +#define LOG_INFO 1 +#define LOG_DEBUG 2 +#define LOG_ERROR 3 + +#define enc_pr(level, x...) \ + do { \ + if (level >= print_level) \ + printk(x); \ + } while (0) + +static s32 print_level = LOG_DEBUG; +static s32 clock_level = 4; +static s32 clock_gate_count = 0; +static u32 set_clock_freq = 0; + +static struct video_mm_t s_vmem; +static struct vpudrv_buffer_t s_video_memory = {0}; +static bool use_reserve; +static ulong cma_pool_size; +static u32 cma_cfg_size; + +static u32 clock_a, clock_b, clock_c; +static int dump_input; +static int dump_es; +static int vpu_hw_reset(void); +static void hw_reset(bool reset); + +struct vpu_clks { + struct clk *dos_clk; + struct clk *dos_apb_clk; + struct clk *a_clk; + struct clk *b_clk; + struct clk *c_clk; +}; + +static struct vpu_clks s_vpu_clks; + +#ifdef CONFIG_COMPAT +static struct file *file_open(const char *path, int flags, int rights) +{ + struct file *filp = NULL; + mm_segment_t oldfs; + long err1 = 0; + void *err2 = NULL; + + oldfs = get_fs(); + //set_fs(get_ds()); + set_fs(KERNEL_DS); + filp = filp_open(path, flags, rights); + set_fs(oldfs); + + if (IS_ERR(filp)) { + err1 = PTR_ERR(filp); + err2 = ERR_PTR(err1); + pr_err("filp_open return %p, %ld, %p\n", filp, err1, err2); + return NULL; + } + + return filp; +} + +static void file_close(struct file *file) +{ + filp_close(file, NULL); +} +/* +static int file_read(struct file *file, unsigned long long offset, unsigned char *data, unsigned int size) +{ + mm_segment_t oldfs; + int ret; + + oldfs = get_fs(); + set_fs(KERNEL_DS); + + ret = vfs_read(file, data, size, &offset); + + set_fs(oldfs); + return ret; +}*/ +static int file_write(struct file *file, unsigned long long offset, unsigned char *data, unsigned int size) +{ + mm_segment_t oldfs; + int ret; + //loff_t pos; + + oldfs = get_fs(); + set_fs(KERNEL_DS); + //pos = file->f_pos; + ret = vfs_write(file, data, size, &offset); + //file->f_pos = pos; + + set_fs(oldfs); + return ret; +} + +static int file_sync(struct file *file) +{ + vfs_fsync(file, 0); + return 0; +} +static s32 dump_raw_input(struct canvas_s *cs0) { + u8 *data; + + u32 addr, canvas_w, picsize_y; + //u32 input = request->src; + //u8 iformat = MAX_FRAME_FMT; + struct file *filp; + int ret; + + addr = cs0->addr; + + canvas_w = ((cs0->width + 31) >> 5) << 5; + picsize_y = cs0->height; + + filp = file_open("/data/multienc.yuv", O_CREAT | O_RDWR | O_APPEND, 0777); + + if (filp) { + data = (u8*)phys_to_virt(addr); + ret = file_write(filp, 0, data, canvas_w * picsize_y); + + file_sync(filp); + file_close(filp); + } else + pr_err("open encoder.yuv failed\n"); + + return 0; +} + +static s32 dump_data(u32 phy_addr, u32 size) { + u8 *data ; + + struct file *filp; + int ret; + + filp = file_open("/data/multienc.es", O_CREAT | O_RDWR | O_APPEND, 0777); + + if (filp) { + data = (u8*)phys_to_virt(phy_addr); + ret = file_write(filp, 0, data, size); + + file_sync(filp); + file_close(filp); + } else + pr_err("open encoder.es failed\n"); + + return 0; +} +#endif + +/*static s32 dump_encoded_data(u8 *data, u32 size) { + struct file *filp; + + filp = file_open("/data/multienc_output.es", O_APPEND | O_RDWR, 0644); + + if (filp) { + file_write(filp, 0, data, size); + file_sync(filp); + file_close(filp); + } else + pr_err("/data/multienc_output.es failed\n"); + + return 0; +}*/ + +static void vpu_clk_put(struct device *dev, struct vpu_clks *clks) +{ + if (!(clks->c_clk == NULL || IS_ERR(clks->c_clk))) + devm_clk_put(dev, clks->c_clk); + if (!(clks->b_clk == NULL || IS_ERR(clks->b_clk))) + devm_clk_put(dev, clks->b_clk); + if (!(clks->a_clk == NULL || IS_ERR(clks->a_clk))) + devm_clk_put(dev, clks->a_clk); + if (!(clks->dos_apb_clk == NULL || IS_ERR(clks->dos_apb_clk))) + devm_clk_put(dev, clks->dos_apb_clk); + if (!(clks->dos_clk == NULL || IS_ERR(clks->dos_clk))) + devm_clk_put(dev, clks->dos_clk); +} + +static int vpu_clk_get(struct device *dev, struct vpu_clks *clks) +{ + int ret = 0; + + clks->dos_clk = devm_clk_get(dev, "clk_dos"); + + if (IS_ERR(clks->dos_clk)) { + enc_pr(LOG_ERROR, "cannot get clk_dos clock\n"); + clks->dos_clk = NULL; + ret = -ENOENT; + goto err; + } + + clks->dos_apb_clk = devm_clk_get(dev, "clk_apb_dos"); + + if (IS_ERR(clks->dos_apb_clk)) { + enc_pr(LOG_ERROR, "cannot get clk_apb_dos clock\n"); + clks->dos_apb_clk = NULL; + ret = -ENOENT; + goto err; + } + + clks->a_clk = devm_clk_get(dev, "clk_MultiEnc_A"); + + if (IS_ERR(clks->a_clk)) { + enc_pr(LOG_ERROR, "cannot get clock\n"); + clks->a_clk = NULL; + ret = -ENOENT; + goto err; + } + + clks->b_clk = devm_clk_get(dev, "clk_MultiEnc_B"); + + if (IS_ERR(clks->b_clk)) { + enc_pr(LOG_ERROR, "cannot get clk_MultiEnc_B clock\n"); + clks->b_clk = NULL; + ret = -ENOENT; + goto err; + } + + clks->c_clk = devm_clk_get(dev, "clk_MultiEnc_C"); + + if (IS_ERR(clks->c_clk)) { + enc_pr(LOG_ERROR, "cannot get clk_MultiEnc_C clock\n"); + clks->c_clk = NULL; + ret = -ENOENT; + goto err; + } + + return 0; +err: + vpu_clk_put(dev, clks); + + return ret; +} + +static void vpu_clk_enable(struct vpu_clks *clks) +{ + u32 freq = 400; + + if (set_clock_freq && set_clock_freq <= 400) + freq = set_clock_freq; + + clk_set_rate(clks->dos_clk, freq * MHz); + clk_set_rate(clks->dos_apb_clk, freq * MHz); + + if (clock_a > 0) { + pr_info("vpu_multi: desired clock_a freq %u\n", clock_a); + clk_set_rate(clks->a_clk, clock_a); + } else + clk_set_rate(clks->a_clk, 666666666); + + if (clock_b > 0) { + pr_info("vpu_multi: desired clock_b freq %u\n", clock_b); + clk_set_rate(clks->b_clk, clock_b); + } else + clk_set_rate(clks->b_clk, 500 * MHz); + + if (clock_c > 0) { + pr_info("vpu_multi: desired clock_c freq %u\n", clock_c); + clk_set_rate(clks->c_clk, clock_c); + } else + clk_set_rate(clks->c_clk, 500 * MHz); + + clk_prepare_enable(clks->dos_clk); + clk_prepare_enable(clks->dos_apb_clk); + clk_prepare_enable(clks->a_clk); + clk_prepare_enable(clks->b_clk); + clk_prepare_enable(clks->c_clk); + + enc_pr(LOG_DEBUG, "dos: %ld, dos_apb: %ld, a: %ld, b: %ld, c: %ld\n", + clk_get_rate(clks->dos_clk), clk_get_rate(clks->dos_apb_clk), + clk_get_rate(clks->a_clk), clk_get_rate(clks->b_clk), + clk_get_rate(clks->c_clk)); + + /* the power on */ + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) { + pr_err("powering on wave521 for t7\n"); + vdec_poweron(VDEC_WAVE); + mdelay(5); + pr_err("wave power stauts after poweron: %d\n", vdec_on(VDEC_WAVE)); + } else { + pwr_ctrl_psci_smc(PDID_T7_DOS_WAVE, true); + mdelay(5); + pr_err("wave power stauts after poweron: %lu\n", pwr_ctrl_status_psci_smc(PDID_T7_DOS_WAVE)); + } + + /* reset */ + hw_reset(true); + mdelay(5); + hw_reset(false); + /* gate the clocks */ +#ifdef VPU_SUPPORT_CLOCK_CONTROL + pr_err("vpu_clk_enable, now gate off the clock\n"); + clk_disable(clks->c_clk); + clk_disable(clks->b_clk); + clk_disable(clks->a_clk); +#endif +} + +static void vpu_clk_disable(struct vpu_clks *clks) +{ +#ifdef VPU_SUPPORT_CLOCK_CONTROL + if (clock_gate_count > 0) +#endif + { + enc_pr(LOG_INFO, "vpu unclosed clock %d\n", clock_gate_count); + clk_disable(clks->c_clk); + clk_disable(clks->b_clk); + clk_disable(clks->a_clk); + clock_gate_count = 0; + } + clk_unprepare(clks->c_clk); + clk_unprepare(clks->b_clk); + clk_unprepare(clks->a_clk); + clk_disable_unprepare(clks->dos_apb_clk); + clk_disable_unprepare(clks->dos_clk); + /* the power off */ + /* the power on */ + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T7) { + pr_err("powering off wave521 for t7\n"); + vdec_poweron(VDEC_WAVE); + mdelay(5); + pr_err("wave power stauts after poweroff: %d\n", vdec_on(VDEC_WAVE)); + } else { + pwr_ctrl_psci_smc(PDID_T7_DOS_WAVE, false); + mdelay(5); + pr_err("wave power stauts after poweroff: %lu\n", pwr_ctrl_status_psci_smc(PDID_T7_DOS_WAVE)); + } +} + +/* end customer definition */ +static struct vpudrv_buffer_t s_instance_pool = {0}; +static struct vpudrv_buffer_t s_common_memory = {0}; +static struct vpu_drv_context_t s_vpu_drv_context; +static s32 s_vpu_major; +static s32 s_register_flag; +static struct device *multienc_dev; + +static s32 s_vpu_open_ref_count; +static s32 s_vpu_irq; +static bool s_vpu_irq_requested; + +static struct vpudrv_buffer_t s_vpu_register = {0}; + +static s32 s_interrupt_flag[MAX_NUM_INSTANCE]; +static wait_queue_head_t s_interrupt_wait_q[MAX_NUM_INSTANCE]; +static struct kfifo s_interrupt_pending_q[MAX_NUM_INSTANCE]; +static s32 s_fifo_alloc_flag[MAX_NUM_INSTANCE]; +static spinlock_t s_kfifo_lock = __SPIN_LOCK_UNLOCKED(s_kfifo_lock); + +static spinlock_t s_vpu_lock = __SPIN_LOCK_UNLOCKED(s_vpu_lock); +static DEFINE_SEMAPHORE(s_vpu_sem); +static struct list_head s_vbp_head = LIST_HEAD_INIT(s_vbp_head); +static struct list_head s_inst_list_head = LIST_HEAD_INIT(s_inst_list_head); +static struct tasklet_struct multienc_tasklet; +static struct platform_device *multienc_pdev; + +static struct vpu_bit_firmware_info_t s_bit_firmware_info[MAX_NUM_VPU_CORE]; + +static spinlock_t s_dma_buf_lock = __SPIN_LOCK_UNLOCKED(s_dma_buf_lock); +static struct list_head s_dma_bufp_head = LIST_HEAD_INIT(s_dma_bufp_head); + +static s32 vpu_src_addr_config(struct vpudrv_dma_buf_info_t *dma_info, + struct file *filp); +static s32 vpu_src_addr_unmap(struct vpudrv_dma_buf_info_t *dma_info, + struct file *filp); +static void vpu_dma_buffer_unmap(struct vpu_dma_cfg *cfg); + +static void dma_flush(u32 buf_start, u32 buf_size) +{ + if (multienc_pdev) + dma_sync_single_for_device( + &multienc_pdev->dev, buf_start, + buf_size, DMA_TO_DEVICE); +} + +static void cache_flush(u32 buf_start, u32 buf_size) +{ + if (multienc_pdev) + dma_sync_single_for_cpu( + &multienc_pdev->dev, buf_start, + buf_size, DMA_FROM_DEVICE); +} + +s32 vpu_hw_reset(void) +{ + enc_pr(LOG_DEBUG, "request vpu reset from application\n"); + return 0; +} + +static void vpu_clk_config(int enable) +{ + struct vpu_clks *clks = &s_vpu_clks; + + enc_pr(LOG_INFO, " vpu clock config %d\n", enable); + + if (enable == 0) { + clock_gate_count --; + if (clock_gate_count == 0) { + clk_disable(clks->c_clk); + clk_disable(clks->b_clk); + clk_disable(clks->a_clk); + } else if (clock_gate_count < 0) + enc_pr(LOG_ERROR, "vpu clock alredy closed %d\n", + clock_gate_count); + } else { + clock_gate_count ++; + if (clock_gate_count == 1) { + clk_enable(clks->a_clk); + clk_enable(clks->b_clk); + clk_enable(clks->c_clk); + } + } +} + +static s32 vpu_alloc_dma_buffer(struct vpudrv_buffer_t *vb) +{ + if (!vb) + return -1; + + vb->phys_addr = (ulong)vmem_alloc(&s_vmem, vb->size, 0); + if ((ulong)vb->phys_addr == (ulong)-1) { + enc_pr(LOG_ERROR, + "Physical memory allocation error size=%d\n", + vb->size); + return -1; + } + + enc_pr(LOG_INFO, + "%s: vb->phys_addr 0x%lx\n", __func__, + vb->phys_addr); + return 0; +} + +static void vpu_free_dma_buffer(struct vpudrv_buffer_t *vb) +{ + if (!vb) + return; + enc_pr(LOG_INFO, "vpu_free_dma_buffer 0x%lx\n",vb->phys_addr); + + if (vb->phys_addr) + vmem_free(&s_vmem, vb->phys_addr, 0); +} + +static s32 vpu_free_instances(struct file *filp) +{ + struct vpudrv_instanace_list_t *vil, *n; + struct vpudrv_instance_pool_t *vip; + void *vip_base; + s32 instance_pool_size_per_core; + void *vdi_mutexes_base; + const s32 PTHREAD_MUTEX_T_DESTROY_VALUE = 0xdead10cc; + + enc_pr(LOG_DEBUG, "[VPUDRV] vpu_free_instances\n"); + + /* s_instance_pool.size was assigned to the size of all core once + call VDI_IOCTL_GET_INSTANCE_POOL by user. */ + instance_pool_size_per_core = (s_instance_pool.size/MAX_NUM_VPU_CORE); + + list_for_each_entry_safe(vil, n, &s_inst_list_head, list) { + if (vil->filp == filp) { + vip_base = (void *)(s_instance_pool.base + + (instance_pool_size_per_core*vil->core_idx)); + enc_pr(LOG_INFO, + "free_ins Idx=%d, core=%d, base=%p, sz=%d\n", + (s32)vil->inst_idx, (s32)vil->core_idx, + vip_base, + (s32)instance_pool_size_per_core); + vip = (struct vpudrv_instance_pool_t *)vip_base; + if (vip) { + /* only first 4 byte is key point + * (inUse of CodecInst in vpuapi) + * to free the corresponding instance. + */ + memset(&vip->codecInstPool[vil->inst_idx], + 0x00, 4); + +#define PTHREAD_MUTEX_T_HANDLE_SIZE 4 + vdi_mutexes_base = (vip_base + + (instance_pool_size_per_core - + PTHREAD_MUTEX_T_HANDLE_SIZE*4)); + enc_pr(LOG_INFO, + "Force destroy in user space "); + enc_pr(LOG_INFO," vdi_mutex_base=%p \n", + vdi_mutexes_base); + if (vdi_mutexes_base) { + s32 i; + for (i = 0; i < 4; i++) { + memcpy(vdi_mutexes_base, + &PTHREAD_MUTEX_T_DESTROY_VALUE, + PTHREAD_MUTEX_T_HANDLE_SIZE); + vdi_mutexes_base += + PTHREAD_MUTEX_T_HANDLE_SIZE; + } + } + } + spin_lock(&s_vpu_lock); + s_vpu_open_ref_count--; + list_del(&vil->list); + spin_unlock(&s_vpu_lock); + kfree(vil); + } + } + return 1; +} + +static s32 vpu_free_buffers(struct file *filp) +{ + struct vpudrv_buffer_pool_t *pool, *n; + struct vpudrv_buffer_t vb; + + enc_pr(LOG_DEBUG, "vpu_free_buffers\n"); + list_for_each_entry_safe(pool, n, &s_vbp_head, list) { + if (pool->filp == filp) { + vb = pool->vb; + if (vb.phys_addr) { + vpu_free_dma_buffer(&vb); + spin_lock(&s_vpu_lock); + list_del(&pool->list); + spin_unlock(&s_vpu_lock); + kfree(pool); + } + } + } + return 0; +} + +static u32 vpu_is_buffer_cached(struct file *filp, ulong vm_pgoff) +{ + struct vpudrv_buffer_pool_t *pool, *n; + struct vpudrv_buffer_t vb; + bool find = false; + u32 cached = 0; + + enc_pr(LOG_ALL, "[+]vpu_is_buffer_cached\n"); + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(pool, n, &s_vbp_head, list) { + if (pool->filp == filp) { + vb = pool->vb; + if (((vb.phys_addr >> PAGE_SHIFT) == vm_pgoff) + && find == false){ + cached = vb.cached; + find = true; + break; + } + } + } + spin_unlock(&s_vpu_lock); + enc_pr(LOG_ALL, "[-]vpu_is_buffer_cached, ret:%d\n", cached); + return cached; +} + +static s32 vpu_dma_buf_release(struct file *filp) +{ + struct vpudrv_dma_buf_pool_t *pool, *n; + struct vpu_dma_cfg vb; + + enc_pr(LOG_DEBUG, "vpu_release_dma_buffers\n"); + list_for_each_entry_safe(pool, n, &s_dma_bufp_head, list) { + if (pool->filp == filp) { + vb = pool->dma_cfg; + if (vb.attach) { + vpu_dma_buffer_unmap(&vb); + spin_lock(&s_dma_buf_lock); + list_del(&pool->list); + spin_unlock(&s_dma_buf_lock); + kfree(pool); + } + } + } + return 0; +} + +static inline u32 get_inst_idx(u32 reg_val) +{ + u32 inst_idx; + int i; + for (i=0; i < MAX_NUM_INSTANCE; i++) + { + if (((reg_val >> i)&0x01) == 1) + break; + } + inst_idx = i; + return inst_idx; +} + +static s32 get_vpu_inst_idx(struct vpu_drv_context_t *dev, u32 *reason, + u32 empty_inst, u32 done_inst, u32 seq_inst) +{ + s32 inst_idx; + u32 reg_val; + u32 int_reason; + + int_reason = *reason; + enc_pr(LOG_INFO, + "[+]%s, reason=0x%x, empty_inst=0x%x, done_inst=0x%x\n", + __func__, int_reason, empty_inst, done_inst); + + if (int_reason & (1 << INT_BSBUF_EMPTY)) + { + reg_val = (empty_inst & 0xffff); + inst_idx = get_inst_idx(reg_val); + *reason = (1 << INT_BSBUF_EMPTY); + enc_pr(LOG_DEBUG, + "%s, RET_BS_EMPTY_INST reg_val=0x%x, inst_idx=%d\n", + __func__, reg_val, inst_idx); + goto GET_VPU_INST_IDX_HANDLED; + } + + if (int_reason & (1 << INT_INIT_SEQ)) + { + reg_val = (seq_inst & 0xffff); + inst_idx = get_inst_idx(reg_val); + *reason = (1 << INT_INIT_SEQ); + enc_pr(LOG_DEBUG, + "%s, RET_QUEUE_CMD_DONE INIT_SEQ val=0x%x, idx=%d\n", + __func__, reg_val, inst_idx); + goto GET_VPU_INST_IDX_HANDLED; + } + + if (int_reason & (1 << INT_DEC_PIC)) + { + reg_val = (done_inst & 0xffff); + inst_idx = get_inst_idx(reg_val); + *reason = (1 << INT_DEC_PIC); + enc_pr(LOG_INFO, + "%s, RET_QUEUE_CMD_DONE DEC_PIC val=0x%x, idx=%d\n", + __func__, reg_val, inst_idx); + + if (int_reason & (1 << INT_ENC_LOW_LATENCY)) + { + u32 ll_inst_idx; + reg_val = (done_inst >> 16); + ll_inst_idx = get_inst_idx(reg_val); + if (ll_inst_idx == inst_idx) + *reason = ((1 << INT_DEC_PIC) + | (1 << INT_ENC_LOW_LATENCY)); + enc_pr(LOG_DEBUG, "%s, LOW_LATENCY ", __func__); + enc_pr(LOG_DEBUG, "val=0x%x, idx=%d, ll_idx=%d\n", + reg_val, inst_idx, ll_inst_idx); + } + goto GET_VPU_INST_IDX_HANDLED; + } + + if (int_reason & (1 << INT_ENC_SET_PARAM)) + { + reg_val = (seq_inst & 0xffff); + inst_idx = get_inst_idx(reg_val); + *reason = (1 << INT_ENC_SET_PARAM); + enc_pr(LOG_DEBUG, + "%s, RET_QUEUE_CMD_DONE SET_PARAM val=0x%x, idx=%d\n", + __func__, reg_val, inst_idx); + goto GET_VPU_INST_IDX_HANDLED; + } + +#ifdef SUPPORT_SOURCE_RELEASE_INTERRUPT + if (int_reason & (1 << INT_ENC_SRC_RELEASE)) + { + reg_val = (done_inst & 0xffff); + inst_idx = get_inst_idx(reg_val); + *reason = (1 << INT_ENC_SRC_RELEASE); + enc_pr(LOG_DEBUG, + "%s, RET_QUEUE_CMD_DONE SRC_RELEASE ", + __func__); + enc_pr(LOG_DEBUG, + "val=0x%x, idx=%d\n", reg_val, inst_idx); + goto GET_VPU_INST_IDX_HANDLED; + } +#endif + + if (int_reason & (1 << INT_ENC_LOW_LATENCY)) + { + reg_val = (done_inst >> 16); + inst_idx = get_inst_idx(reg_val); + *reason = (1 << INT_ENC_LOW_LATENCY); + enc_pr(LOG_DEBUG, + "%s, RET_QUEUE_CMD_DONE LOW_LATENCY ", + __func__); + enc_pr(LOG_DEBUG, + "val=0x%x, idx=%d\n", reg_val, inst_idx); + goto GET_VPU_INST_IDX_HANDLED; + } + + inst_idx = -1; + *reason = 0; + enc_pr(LOG_DEBUG, + "%s, UNKNOWN INTERRUPT REASON: 0x%08x\n", + __func__, int_reason); + +GET_VPU_INST_IDX_HANDLED: + + enc_pr(LOG_INFO, "[-]%s, inst_idx=%d. *reason=0x%x\n", __func__, + inst_idx, *reason); + + return inst_idx; +} + +static void multienc_isr_tasklet(ulong data) +{ + u32 intr_reason; + u32 inst_index; + struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)data; + + /* notify the interrupt to user space */ + if (dev->async_queue) { + enc_pr(LOG_ALL, "kill_fasync e %s\n", __func__); + kill_fasync(&dev->async_queue, SIGIO, POLL_IN); + } + + for (inst_index=0; inst_index < MAX_NUM_INSTANCE; inst_index++) { + intr_reason = dev->interrupt_flag[inst_index]; + if (intr_reason) { + dev->interrupt_flag[inst_index] = 0; + enc_pr(LOG_INFO, + "isr_tasklet intr:0x%08x ins_index %d\n", + intr_reason, inst_index); + s_interrupt_flag[inst_index] = 1; + wake_up_interruptible(&s_interrupt_wait_q[inst_index]); + } + } + enc_pr(LOG_ALL, "[-]%s\n", __func__); +} + +static irqreturn_t vpu_irq_handler(s32 irq, void *dev_id) +{ + struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)dev_id; + /* this can be removed. + * it also work in VPU_WaitInterrupt of API function + */ + u32 core; + u32 intr_reason; + u32 intr_inst_index; + + enc_pr(LOG_ALL, "[+]%s\n", __func__); + +#ifdef VPU_IRQ_CONTROL + disable_irq_nosync(s_vpu_irq); +#endif + + intr_inst_index = 0; + intr_reason = 0; + + for (core = 0; core < MAX_NUM_VPU_CORE; core++) { + u32 empty_inst; + u32 done_inst; + u32 seq_inst; + u32 i, reason, reason_clr; + if (s_bit_firmware_info[core].size == 0) { + /* it means that we didn't get an information + * the current core from API layer. + * No core activated. + */ + enc_pr(LOG_ERROR, + "s_bit_firmware_info[core].size is zero\n"); + continue; + } + if (ReadVpuRegister(VP5_VPU_VPU_INT_STS) == 0) + continue; + reason = ReadVpuRegister(VP5_VPU_INT_REASON); + empty_inst = ReadVpuRegister(VP5_RET_BS_EMPTY_INST); + done_inst = ReadVpuRegister(VP5_RET_QUEUE_CMD_DONE_INST); + seq_inst = ReadVpuRegister(VP5_RET_SEQ_DONE_INSTANCE_INFO); + reason_clr = reason; + enc_pr(LOG_INFO, "irq reason=0x%x, inst: empty=0x%x,", + reason, empty_inst); + enc_pr(LOG_INFO, " done=0x%x, seq_inst=0x%x\n", + done_inst, seq_inst); + for (i = 0; i < MAX_NUM_INSTANCE; i++) { + if (empty_inst == 0 && done_inst == 0 && seq_inst == 0) + break; + + intr_reason = reason; + intr_inst_index = get_vpu_inst_idx(dev, + &intr_reason, + empty_inst, + done_inst, + seq_inst); + + enc_pr(LOG_INFO, "irq instance: %d", intr_inst_index); + enc_pr(LOG_INFO, " reason: %08x", intr_reason); + enc_pr(LOG_INFO, " empty_inst: %08x", empty_inst); + enc_pr(LOG_INFO, " done_inst: %08x", done_inst); + enc_pr(LOG_INFO, " seq_inst: %08x\n", seq_inst); + + if (intr_inst_index < MAX_NUM_INSTANCE) { + if (intr_reason == (1 << INT_BSBUF_EMPTY)) { + empty_inst = empty_inst & + ~(1 << intr_inst_index); + WriteVpuRegister(VP5_RET_BS_EMPTY_INST, + empty_inst); + if (empty_inst == 0) + reason &= + ~(1 << INT_BSBUF_EMPTY); + enc_pr(LOG_INFO, + "%s, RET_BS_EMPTY_INST Clear ", + __func__); + enc_pr(LOG_INFO, + "inst=0x%x, index=%d\n", + empty_inst, intr_inst_index); + } + if (intr_reason == (1 << INT_DEC_PIC)) { + done_inst = done_inst & + ~(1 << intr_inst_index); + WriteVpuRegister( + VP5_RET_QUEUE_CMD_DONE_INST, + done_inst); + if (done_inst == 0) + reason &= ~(1 << INT_DEC_PIC); + enc_pr(LOG_INFO, + "%s, RET_QUEUE_CMD_DONE ", + __func__); + enc_pr(LOG_INFO, + "inst=0x%x, index=%d\n", + done_inst, intr_inst_index); + } + if ((intr_reason == (1 << INT_INIT_SEQ)) || + (intr_reason == + (1 << INT_ENC_SET_PARAM))) { + seq_inst = seq_inst & + ~(1 << intr_inst_index); + WriteVpuRegister + (VP5_RET_SEQ_DONE_INSTANCE_INFO, + seq_inst); + if (seq_inst == 0) + reason &= ~(1 << INT_INIT_SEQ | + 1 << INT_ENC_SET_PARAM); + + enc_pr(LOG_INFO, "%s, RET_", __func__); + enc_pr(LOG_INFO, + "SEQ_DONE_INSTANCE_INFO inst"); + enc_pr(LOG_INFO, + "=0x%x, intr_inst_index=%d\n", + done_inst, intr_inst_index); + } + if (intr_reason == (1 << INT_ENC_LOW_LATENCY)) { + done_inst = (done_inst >> 16); + done_inst = done_inst + & ~(1 << intr_inst_index); + done_inst = (done_inst << 16); + WriteVpuRegister( + VP5_RET_QUEUE_CMD_DONE_INST, + done_inst); + if (done_inst == 0) + reason &= + ~(1 << INT_ENC_LOW_LATENCY); + + enc_pr(LOG_INFO, + "%s, LOW_LATENCY Clear ", + __func__); + enc_pr(LOG_INFO, + "inst=0x%x, index=%d\n", + done_inst, intr_inst_index); + } + if (!kfifo_is_full( + &s_interrupt_pending_q[intr_inst_index] + )) { + if (intr_reason == + ((1 << INT_ENC_PIC) | + (1 << + INT_ENC_LOW_LATENCY))) { + u32 ll_intr_reason = + (1 << + INT_ENC_PIC); + kfifo_in_spinlocked( + &s_interrupt_pending_q[ + intr_inst_index], + &ll_intr_reason, + sizeof(u32), + &s_kfifo_lock); + } else + kfifo_in_spinlocked( + &s_interrupt_pending_q[ + intr_inst_index], + &intr_reason, + sizeof(u32), + &s_kfifo_lock); + } + else { + enc_pr(LOG_ERROR, "kfifo_is_full "); + enc_pr(LOG_ERROR, + "kfifo_count %d index %d\n", + kfifo_len( + &s_interrupt_pending_q[ + intr_inst_index]), + intr_inst_index); + } + dev->interrupt_flag[intr_inst_index] = + intr_reason; + } + else { + enc_pr(LOG_ERROR, + "intr_inst_index (%d) is wrong \n", + intr_inst_index); + } + enc_pr(LOG_INFO, + "intr_reason: 0x%08x\n", intr_reason); + } + if (reason != 0) { + enc_pr(LOG_ERROR, "INTERRUPT REASON REMAINED: %08x\n", + reason); + } + WriteVpuRegister(VP5_VPU_INT_REASON_CLEAR, reason_clr); + WriteVpuRegister(VP5_VPU_VINT_CLEAR, 0x1); + } + + tasklet_schedule(&multienc_tasklet); + enc_pr(LOG_ALL, "[-]%s\n", __func__); + return IRQ_HANDLED; +} + +#define RESETCTRL_RESET1_LEVEL (0xfe000044) + +static void hw_reset(bool reset) +{ + void __iomem *reset_addr; + uint32_t val; + + reset_addr = ioremap_nocache(RESETCTRL_RESET1_LEVEL, 8); + if (reset_addr == NULL) { + enc_pr(LOG_ERROR, "%s: Failed to ioremap\n", __func__); + return; + } + + val = __raw_readl(reset_addr); + if (reset) + val &= ~(1 << 28); + else + val |= (1 << 28); + __raw_writel(val, reset_addr); + + mdelay(5); + + iounmap(reset_addr); + if (reset) + enc_pr(LOG_INFO, "%s:reset\n", __func__); + else + enc_pr(LOG_INFO, "%s:release reset\n", __func__); + +} + +static s32 vpu_open(struct inode *inode, struct file *filp) +{ + bool first_open = false; + s32 r = 0; + + //enc_pr(LOG_DEBUG, "[+] %s, filp=%lu, %lu, f_count=%lld\n", __func__, + //(unsigned long)filp, ( ((unsigned long)filp)%8), filp->f_count.counter); + spin_lock(&s_vpu_lock); + s_vpu_drv_context.open_count++; + if (s_vpu_drv_context.open_count == 1) { + first_open = true; + } /*else { + r = -EBUSY; + s_vpu_drv_context.open_count--; + spin_unlock(&s_vpu_lock); + return r; + }*/ + filp->private_data = (void *)(&s_vpu_drv_context); + spin_unlock(&s_vpu_lock); + if (first_open && !use_reserve) { +#ifdef CONFIG_CMA + s_video_memory.size = cma_cfg_size; + s_video_memory.phys_addr = (ulong)codec_mm_alloc_for_dma(VPU_DEV_NAME, cma_cfg_size >> PAGE_SHIFT, 0, 0); + + if (s_video_memory.phys_addr) { + enc_pr(LOG_DEBUG, "allocating phys 0x%lx, ", s_video_memory.phys_addr); + enc_pr(LOG_DEBUG, "virt addr 0x%lx, size %dk\n", s_video_memory.base, s_video_memory.size >> 10); + + if (vmem_init(&s_vmem, s_video_memory.phys_addr, s_video_memory.size) < 0) { + enc_pr(LOG_ERROR, "fail to init vmem system\n"); + r = -ENOMEM; + + codec_mm_free_for_dma(VPU_DEV_NAME, (u32)s_video_memory.phys_addr); + vmem_exit(&s_vmem); + memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t)); + memset(&s_vmem, 0, sizeof(struct video_mm_t)); + } + } else { + enc_pr(LOG_ERROR, "Failed to alloc dma buffer %s, phys:0x%lx\n", VPU_DEV_NAME, s_video_memory.phys_addr); + + if (s_video_memory.phys_addr) + codec_mm_free_for_dma(VPU_DEV_NAME, (u32)s_video_memory.phys_addr); + + s_video_memory.phys_addr = 0; + r = -ENOMEM; + } +#else + enc_pr(LOG_ERROR, "No CMA and reserved memory for MultiEnc!!!\n"); + r = -ENOMEM; +#endif + } else if (!s_video_memory.phys_addr) { + enc_pr(LOG_ERROR, "MultiEnc memory is not malloced yet wait & retry!\n"); + r = -EBUSY; + } + + if (first_open) { + if ((s_vpu_irq >= 0) && (s_vpu_irq_requested == false)) { + s32 err; + + err = request_irq(s_vpu_irq, vpu_irq_handler, 0, "MultiEnc-irq", (void *)(&s_vpu_drv_context)); + + if (err) { + enc_pr(LOG_ERROR, "Failed to register irq handler\n"); + spin_lock(&s_vpu_lock); + s_vpu_drv_context.open_count--; + spin_unlock(&s_vpu_lock); + return -EFAULT; + } + + s_vpu_irq_requested = true; + } + + /* enable vpu clks and power*/ + vpu_clk_enable(&s_vpu_clks); + } + + if (r != 0) { + spin_lock(&s_vpu_lock); + s_vpu_drv_context.open_count--; + spin_unlock(&s_vpu_lock); + } + + enc_pr(LOG_DEBUG, "[-] %s, ret: %d\n", __func__, r); + return r; +} +ulong phys_addrY; +ulong phys_addrU; +ulong phys_addrV; + +static long vpu_ioctl(struct file *filp, u32 cmd, ulong arg) +{ + s32 ret = 0; + struct vpu_drv_context_t *dev = + (struct vpu_drv_context_t *)filp->private_data; + + switch (cmd) { + case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY: + { + struct vpudrv_buffer_pool_t *vbp; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n"); + ret = down_interruptible(&s_vpu_sem); + if (ret == 0) { + vbp = kzalloc(sizeof(*vbp), GFP_KERNEL); + if (!vbp) { + up(&s_vpu_sem); + return -ENOMEM; + } + + ret = copy_from_user(&(vbp->vb), + (struct vpudrv_buffer_t *)arg, + sizeof(struct vpudrv_buffer_t)); + if (ret) { + kfree(vbp); + up(&s_vpu_sem); + return -EFAULT; + } + + ret = vpu_alloc_dma_buffer(&(vbp->vb)); + if (ret == -1) { + ret = -ENOMEM; + kfree(vbp); + up(&s_vpu_sem); + break; + } + ret = copy_to_user((void __user *)arg, + &(vbp->vb), + sizeof(struct vpudrv_buffer_t)); + if (ret) { + kfree(vbp); + ret = -EFAULT; + up(&s_vpu_sem); + break; + } + + vbp->filp = filp; + spin_lock(&s_vpu_lock); + list_add(&vbp->list, &s_vbp_head); + spin_unlock(&s_vpu_lock); + + up(&s_vpu_sem); + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n"); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32: + { + struct vpudrv_buffer_pool_t *vbp; + struct compat_vpudrv_buffer_t buf32; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32\n"); + ret = down_interruptible(&s_vpu_sem); + if (ret == 0) { + vbp = kzalloc(sizeof(*vbp), GFP_KERNEL); + if (!vbp) { + up(&s_vpu_sem); + return -ENOMEM; + } + + ret = copy_from_user(&buf32, + (struct compat_vpudrv_buffer_t *)arg, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret) { + kfree(vbp); + up(&s_vpu_sem); + return -EFAULT; + } + + vbp->vb.size = buf32.size; + vbp->vb.cached = buf32.cached; + vbp->vb.phys_addr = + (ulong)buf32.phys_addr; + vbp->vb.virt_addr = + (ulong)buf32.virt_addr; + ret = vpu_alloc_dma_buffer(&(vbp->vb)); + if (ret == -1) { + ret = -ENOMEM; + kfree(vbp); + up(&s_vpu_sem); + break; + } + + buf32.size = vbp->vb.size; + buf32.phys_addr = + (compat_ulong_t)vbp->vb.phys_addr; + buf32.virt_addr = + (compat_ulong_t)vbp->vb.virt_addr; + + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret) { + kfree(vbp); + ret = -EFAULT; + up(&s_vpu_sem); + break; + } + + vbp->filp = filp; + spin_lock(&s_vpu_lock); + list_add(&vbp->list, &s_vbp_head); + spin_unlock(&s_vpu_lock); + + up(&s_vpu_sem); + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32\n"); + } + break; +#endif + case VDI_IOCTL_FREE_PHYSICALMEMORY: + { + struct vpudrv_buffer_pool_t *vbp, *n; + struct vpudrv_buffer_t vb; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_FREE_PHYSICALMEMORY\n"); + ret = down_interruptible(&s_vpu_sem); + if (ret == 0) { + ret = copy_from_user(&vb, + (struct vpudrv_buffer_t *)arg, + sizeof(struct vpudrv_buffer_t)); + if (ret) { + up(&s_vpu_sem); + return -EACCES; + } + + if (vb.phys_addr) + vpu_free_dma_buffer(&vb); + + list_for_each_entry_safe(vbp, n, + &s_vbp_head, list) { + if (vbp->vb.phys_addr + == vb.phys_addr) { + spin_lock(&s_vpu_lock); + list_del(&vbp->list); + spin_unlock(&s_vpu_lock); + kfree(vbp); + break; + } + } + up(&s_vpu_sem); + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_FREE_PHYSICALMEMORY\n"); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_FREE_PHYSICALMEMORY32: + { + struct vpudrv_buffer_pool_t *vbp, *n; + struct compat_vpudrv_buffer_t buf32; + struct vpudrv_buffer_t vb; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_FREE_PHYSICALMEMORY32\n"); + ret = down_interruptible(&s_vpu_sem); + if (ret == 0) { + ret = copy_from_user(&buf32, + (struct compat_vpudrv_buffer_t *)arg, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret) { + up(&s_vpu_sem); + return -EACCES; + } + + vb.size = buf32.size; + vb.phys_addr = + (ulong)buf32.phys_addr; + vb.virt_addr = + (ulong)buf32.virt_addr; + + if (vb.phys_addr) + vpu_free_dma_buffer(&vb); + /*TODO check equal condition*/ + list_for_each_entry_safe(vbp, n, + &s_vbp_head, list) { + if ((compat_ulong_t)vbp->vb.phys_addr + == buf32.phys_addr) { + spin_lock(&s_vpu_lock); + list_del(&vbp->list); + spin_unlock(&s_vpu_lock); + kfree(vbp); + break; + } + } + up(&s_vpu_sem); + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_FREE_PHYSICALMEMORY32\n"); + } + break; +#endif + case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO: + { + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY\n"); + if (s_video_memory.phys_addr != 0) { + ret = copy_to_user((void __user *)arg, + &s_video_memory, + sizeof(struct vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else { + ret = -EFAULT; + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY\n"); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32: + { + struct compat_vpudrv_buffer_t buf32; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY32\n"); + memset(&buf32, 0, sizeof(struct compat_vpudrv_buffer_t)); + buf32.size = s_video_memory.size; + buf32.phys_addr = + (compat_ulong_t)s_video_memory.phys_addr; + buf32.virt_addr = + (compat_ulong_t)s_video_memory.virt_addr; + if (s_video_memory.phys_addr != 0) { + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else { + ret = -EFAULT; + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY32\n"); + } + break; +#endif + case VDI_IOCTL_WAIT_INTERRUPT: + { + struct vpudrv_intr_info_t info; + u32 intr_inst_index; + u32 intr_reason_in_q; + u32 interrupt_flag_in_q; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_WAIT_INTERRUPT\n"); + ret = copy_from_user(&info, + (struct vpudrv_intr_info_t *)arg, + sizeof(struct vpudrv_intr_info_t)); + if (ret != 0) + return -EFAULT; + intr_inst_index = info.intr_inst_index; + + intr_reason_in_q = 0; + if (intr_inst_index >= MAX_NUM_INSTANCE) + { + enc_pr(LOG_ALL, + "error, intr_inst_index is invalid !\n"); + return -EFAULT; + } + interrupt_flag_in_q = kfifo_out_spinlocked( + &s_interrupt_pending_q[intr_inst_index], + &intr_reason_in_q, sizeof(u32), + &s_kfifo_lock); + if (interrupt_flag_in_q > 0) + { + dev->interrupt_reason[intr_inst_index] = + intr_reason_in_q; + enc_pr(LOG_ALL, + "Intr Remain in Q: inst_index= %d, ", + intr_inst_index); + enc_pr(LOG_ALL, "reason= 0x%x, flag= %d\n", + intr_reason_in_q, + interrupt_flag_in_q); + goto INTERRUPT_REMAIN_IN_QUEUE; + } + + ret = wait_event_interruptible_timeout( + s_interrupt_wait_q[intr_inst_index], + s_interrupt_flag[intr_inst_index] != 0, + msecs_to_jiffies(info.timeout)); + if (!ret) { + ret = -ETIME; + break; + } + + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + + intr_reason_in_q = 0; + interrupt_flag_in_q = kfifo_out_spinlocked( + &s_interrupt_pending_q[intr_inst_index], + &intr_reason_in_q, sizeof(u32), &s_kfifo_lock); + if (interrupt_flag_in_q > 0) { + dev->interrupt_reason[intr_inst_index] = + intr_reason_in_q; + } + else { + dev->interrupt_reason[intr_inst_index] = 0; + } + enc_pr(LOG_INFO, + "inst_index(%d),s_interrupt_flag(%d), ", + intr_inst_index, + s_interrupt_flag[intr_inst_index]); + enc_pr(LOG_INFO, + "reason(0x%08lx)\n", + dev->interrupt_reason[intr_inst_index]); + +INTERRUPT_REMAIN_IN_QUEUE: + info.intr_reason = + dev->interrupt_reason[intr_inst_index]; + s_interrupt_flag[intr_inst_index] = 0; + dev->interrupt_reason[intr_inst_index] = 0; + +#ifdef VPU_IRQ_CONTROL + enable_irq(s_vpu_irq); +#endif + ret = copy_to_user((void __user *)arg, + &info, sizeof(struct vpudrv_intr_info_t)); + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_WAIT_INTERRUPT\n"); + if (ret != 0) + return -EFAULT; + } + break; + case VDI_IOCTL_SET_CLOCK_GATE: + { + u32 clkgate; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_SET_CLOCK_GATE\n"); + if (get_user(clkgate, (u32 __user *) arg)) + return -EFAULT; +#ifdef VPU_SUPPORT_CLOCK_CONTROL + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + return -EFAULT; + vpu_clk_config(clkgate); + up(&s_vpu_sem); +#endif + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_SET_CLOCK_GATE\n"); + } + break; + case VDI_IOCTL_GET_INSTANCE_POOL: + { + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_INSTANCE_POOL\n"); + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + break; + + if (s_instance_pool.base != 0) { + ret = copy_to_user((void __user *)arg, + &s_instance_pool, + sizeof(struct vpudrv_buffer_t)); + ret = (ret != 0) ? -EFAULT : 0; + } else { + ret = copy_from_user(&s_instance_pool, + (struct vpudrv_buffer_t *)arg, + sizeof(struct vpudrv_buffer_t)); + if (ret == 0) { + s_instance_pool.size = + PAGE_ALIGN( + s_instance_pool.size); + s_instance_pool.base = + (ulong)vmalloc( + s_instance_pool.size); + s_instance_pool.phys_addr = + s_instance_pool.base; + if (s_instance_pool.base == 0) { + ret = -EFAULT; + up(&s_vpu_sem); + break; + } + /*clearing memory*/ + memset((void *)s_instance_pool.base, + 0, s_instance_pool.size); + ret = copy_to_user((void __user *)arg, + &s_instance_pool, + sizeof(struct + vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else + ret = -EFAULT; + } + up(&s_vpu_sem); + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_INSTANCE_POOL\n"); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_GET_INSTANCE_POOL32: + { + struct compat_vpudrv_buffer_t buf32; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_INSTANCE_POOL32\n"); + memset(&buf32, 0, sizeof(struct compat_vpudrv_buffer_t)); + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + break; + if (s_instance_pool.base != 0) { + buf32.size = s_instance_pool.size; + buf32.phys_addr = + (compat_ulong_t) + s_instance_pool.phys_addr; + buf32.virt_addr = + (compat_ulong_t) + s_instance_pool.virt_addr; + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof(struct + compat_vpudrv_buffer_t)); + ret = (ret != 0) ? -EFAULT : 0; + } else { + ret = copy_from_user(&buf32, + (struct compat_vpudrv_buffer_t *)arg, + sizeof(struct + compat_vpudrv_buffer_t)); + if (ret == 0) { + s_instance_pool.size = buf32.size; + s_instance_pool.size = + PAGE_ALIGN( + s_instance_pool.size); + s_instance_pool.base = + (ulong)vmalloc( + s_instance_pool.size); + s_instance_pool.phys_addr = + s_instance_pool.base; + buf32.size = + s_instance_pool.size; + buf32.phys_addr = + (compat_ulong_t) + s_instance_pool.phys_addr; + buf32.base = + (compat_ulong_t) + s_instance_pool.base; + buf32.virt_addr = + (compat_ulong_t) + s_instance_pool.virt_addr; + if (s_instance_pool.base == 0) { + ret = -EFAULT; + up(&s_vpu_sem); + break; + } + /*clearing memory*/ + memset((void *)s_instance_pool.base, + 0x0, s_instance_pool.size); + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof(struct + compat_vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else + ret = -EFAULT; + } + up(&s_vpu_sem); + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_INSTANCE_POOL32\n"); + } + break; +#endif + case VDI_IOCTL_GET_COMMON_MEMORY: + { + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_COMMON_MEMORY\n"); + if (s_common_memory.phys_addr != 0) { + ret = copy_to_user((void __user *)arg, + &s_common_memory, + sizeof(struct vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else { + ret = copy_from_user(&s_common_memory, + (struct vpudrv_buffer_t *)arg, + sizeof(struct vpudrv_buffer_t)); + if (ret != 0) { + ret = -EFAULT; + break; + } + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + break; + if (vpu_alloc_dma_buffer( + &s_common_memory) != -1) { + ret = copy_to_user((void __user *)arg, + &s_common_memory, + sizeof(struct + vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else + ret = -EFAULT; + up(&s_vpu_sem); + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_COMMON_MEMORY\n"); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_GET_COMMON_MEMORY32: + { + struct compat_vpudrv_buffer_t buf32; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_COMMON_MEMORY32\n"); + + memset(&buf32, 0, sizeof(struct compat_vpudrv_buffer_t)); + buf32.size = s_common_memory.size; + buf32.phys_addr = + (compat_ulong_t) + s_common_memory.phys_addr; + buf32.virt_addr = + (compat_ulong_t) + s_common_memory.virt_addr; + if (s_common_memory.phys_addr != 0) { + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else { + ret = copy_from_user(&buf32, + (struct compat_vpudrv_buffer_t *)arg, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret != 0) { + ret = -EFAULT; + break; + } + s_common_memory.size = buf32.size; + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + break; + if (vpu_alloc_dma_buffer( + &s_common_memory) != -1) { + buf32.size = + s_common_memory.size; + buf32.phys_addr = + (compat_ulong_t) + s_common_memory.phys_addr; + buf32.virt_addr = + (compat_ulong_t) + s_common_memory.virt_addr; + ret = copy_to_user((void __user *)arg, + &buf32, sizeof(struct + compat_vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + } else + ret = -EFAULT; + up(&s_vpu_sem); + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_COMMON_MEMORY32\n"); + } + break; +#endif + case VDI_IOCTL_OPEN_INSTANCE: + { + struct vpudrv_inst_info_t inst_info; + struct vpudrv_instanace_list_t *vil, *n; + + enc_pr(LOG_DEBUG, + "[+]VDI_IOCTL_OPEN_INSTANCE\n"); + + vil = kzalloc(sizeof(*vil), GFP_KERNEL); + if (!vil) + return -ENOMEM; + + if (copy_from_user(&inst_info, + (struct vpudrv_inst_info_t *)arg, + sizeof(struct vpudrv_inst_info_t))) + { + kfree(vil); + return -EFAULT; + } + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + { + kfree(vil); + break; + } + vil->inst_idx = inst_info.inst_idx; + vil->core_idx = inst_info.core_idx; + vil->filp = filp; + /* counting the current open instance number */ + inst_info.inst_open_count = 0; + spin_lock(&s_vpu_lock); + list_add(&vil->list, &s_inst_list_head); + list_for_each_entry_safe(vil, n, + &s_inst_list_head, list) { + if (vil->core_idx == inst_info.core_idx) + inst_info.inst_open_count++; + } + if (inst_info.inst_idx >= MAX_NUM_INSTANCE) + { + enc_pr(LOG_ALL, + "error, inst_info.inst_idx is invalid !\n"); + kfree(vil); + spin_unlock(&s_vpu_lock); + return -EFAULT; + } + kfifo_reset( + &s_interrupt_pending_q[inst_info.inst_idx]); + + /* flag just for that vpu is in opened or closed */ + s_vpu_open_ref_count++; + spin_unlock(&s_vpu_lock); + up(&s_vpu_sem); + if (copy_to_user((void __user *)arg, + &inst_info, + sizeof(struct vpudrv_inst_info_t))) { + kfree(vil); + return -EFAULT; + } + + enc_pr(LOG_DEBUG, + "[-]VDI_IOCTL_OPEN_INSTANCE "); + enc_pr(LOG_DEBUG, + "core_idx = %d, inst_idx = %d, ", + (u32)inst_info.core_idx, + (u32)inst_info.inst_idx); + enc_pr(LOG_DEBUG, + "s_vpu_open_ref_count = %d, ", + s_vpu_open_ref_count); + enc_pr(LOG_DEBUG, + "inst_open_count = %d\n", + inst_info.inst_open_count); + } + break; + case VDI_IOCTL_CLOSE_INSTANCE: + { + struct vpudrv_inst_info_t inst_info; + struct vpudrv_instanace_list_t *vil, *n; + u32 found = 0; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_CLOSE_INSTANCE\n"); + if (copy_from_user(&inst_info, + (struct vpudrv_inst_info_t *)arg, + sizeof(struct vpudrv_inst_info_t))) + return -EFAULT; + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + break; + + list_for_each_entry_safe(vil, n, + &s_inst_list_head, list) { + if (vil->inst_idx == inst_info.inst_idx && + vil->core_idx == inst_info.core_idx) { + spin_lock(&s_vpu_lock); + list_del(&vil->list); + spin_unlock(&s_vpu_lock); + kfree(vil); + found = 1; + break; + } + } + + if (found == 0) { + up(&s_vpu_sem); + return -EINVAL; + } + + /* counting the current open instance number */ + inst_info.inst_open_count = 0; + list_for_each_entry_safe(vil, n, + &s_inst_list_head, list) { + if (vil->core_idx == inst_info.core_idx) + inst_info.inst_open_count++; + } + + kfifo_reset( + &s_interrupt_pending_q[inst_info.inst_idx]); + /* flag just for that vpu is in opened or closed */ + spin_lock(&s_vpu_lock); + s_vpu_open_ref_count--; + spin_unlock(&s_vpu_lock); + up(&s_vpu_sem); + if (copy_to_user((void __user *)arg, + &inst_info, + sizeof(struct vpudrv_inst_info_t))) + return -EFAULT; + + enc_pr(LOG_DEBUG, + "[-]VDI_IOCTL_CLOSE_INSTANCE "); + enc_pr(LOG_DEBUG, + "core_idx= %d, inst_idx= %d, ", + (u32)inst_info.core_idx, + (u32)inst_info.inst_idx); + enc_pr(LOG_DEBUG, + "s_vpu_open_ref_count= %d, ", + s_vpu_open_ref_count); + enc_pr(LOG_DEBUG, + "inst_open_count= %d\n", + inst_info.inst_open_count); + } + break; + case VDI_IOCTL_GET_INSTANCE_NUM: + { + struct vpudrv_inst_info_t inst_info; + struct vpudrv_instanace_list_t *vil, *n; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_INSTANCE_NUM\n"); + + ret = copy_from_user(&inst_info, + (struct vpudrv_inst_info_t *)arg, + sizeof(struct vpudrv_inst_info_t)); + if (ret != 0) + break; + + inst_info.inst_open_count = 0; + + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(vil, n, + &s_inst_list_head, list) { + if (vil->core_idx == inst_info.core_idx) + inst_info.inst_open_count++; + } + spin_unlock(&s_vpu_lock); + + ret = copy_to_user((void __user *)arg, + &inst_info, + sizeof(struct vpudrv_inst_info_t)); + + enc_pr(LOG_DEBUG, + "[-]VDI_IOCTL_GET_INSTANCE_NUM "); + enc_pr(LOG_DEBUG, + "core_idx=%d, inst_idx=%d, open_count=%d\n", + (u32)inst_info.core_idx, + (u32)inst_info.inst_idx, + inst_info.inst_open_count); + } + break; + case VDI_IOCTL_RESET: + { + vpu_hw_reset(); + } + break; + case VDI_IOCTL_GET_REGISTER_INFO: + { + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_REGISTER_INFO\n"); + ret = copy_to_user((void __user *)arg, + &s_vpu_register, + sizeof(struct vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_REGISTER_INFO "); + enc_pr(LOG_ALL, + "s_vpu_register.phys_addr=0x%lx, ", + s_vpu_register.phys_addr); + enc_pr(LOG_ALL, + "s_vpu_register.virt_addr=0x%lx, ", + s_vpu_register.virt_addr); + enc_pr(LOG_ALL, + "s_vpu_register.size=0x%x\n", + s_vpu_register.size); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_GET_REGISTER_INFO32: + { + struct compat_vpudrv_buffer_t buf32; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_GET_REGISTER_INFO32\n"); + + memset(&buf32, 0, sizeof(struct compat_vpudrv_buffer_t)); + buf32.size = s_vpu_register.size; + buf32.phys_addr = + (compat_ulong_t) + s_vpu_register.phys_addr; + buf32.virt_addr = + (compat_ulong_t) + s_vpu_register.virt_addr; + ret = copy_to_user((void __user *)arg, + &buf32, + sizeof( + struct compat_vpudrv_buffer_t)); + if (ret != 0) + ret = -EFAULT; + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_GET_REGISTER_INFO32 "); + enc_pr(LOG_ALL, + "s_vpu_register.phys_addr=0x%lx, ", + s_vpu_register.phys_addr); + enc_pr(LOG_ALL, + "s_vpu_register.virt_addr=0x%lx, ", + s_vpu_register.virt_addr); + enc_pr(LOG_ALL, + "s_vpu_register.size=0x%x\n", + s_vpu_register.size); + } + break; + case VDI_IOCTL_FLUSH_BUFFER32: + { + struct vpudrv_buffer_pool_t *pool, *n; + struct compat_vpudrv_buffer_t buf32; + struct vpudrv_buffer_t vb; + bool find = false; + u32 cached = 0; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_FLUSH_BUFFER32\n"); + + ret = copy_from_user(&buf32, + (struct compat_vpudrv_buffer_t *)arg, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret) + return -EFAULT; + + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(pool, n, + &s_vbp_head, list) { + if (pool->filp == filp) { + vb = pool->vb; + if (((compat_ulong_t)vb.phys_addr <= + buf32.phys_addr) && + (((compat_ulong_t)vb.phys_addr + + vb.size) + > buf32.phys_addr) + && find == false){ + cached = vb.cached; + find = true; + break; + } + } + } + spin_unlock(&s_vpu_lock); + + if (find && cached) + dma_flush( + (u32)buf32.phys_addr, + (u32)buf32.size); + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_FLUSH_BUFFER32\n"); + } + break; +#endif + case VDI_IOCTL_FLUSH_BUFFER: + { + struct vpudrv_buffer_pool_t *pool, *n; + struct vpudrv_buffer_t vb, buf; + bool find = false; + u32 cached = 0; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_FLUSH_BUFFER\n"); + + ret = copy_from_user(&buf, + (struct vpudrv_buffer_t *)arg, + sizeof(struct vpudrv_buffer_t)); + if (ret) + return -EFAULT; + + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(pool, n, + &s_vbp_head, list) { + if (pool->filp == filp) { + vb = pool->vb; + if ((vb.phys_addr <= buf.phys_addr) + && ((vb.phys_addr + vb.size) + > buf.phys_addr) + && find == false){ + cached = vb.cached; + find = true; + break; + } + } + } + spin_unlock(&s_vpu_lock); + if (find && cached) + dma_flush( + (u32)buf.phys_addr, + (u32)buf.size); + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_FLUSH_BUFFER\n"); + } + break; + case VDI_IOCTL_CACHE_INV_BUFFER: + { + struct vpudrv_buffer_t buf; + struct vpudrv_buffer_pool_t *pool, *n; + struct vpudrv_buffer_t vb; + bool find = false; + u32 cached = 0; + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_CACHE_INV_BUFFER\n"); + + ret = copy_from_user(&buf, + (struct vpudrv_buffer_t *)arg, + sizeof(struct vpudrv_buffer_t)); + if (ret) + return -EFAULT; + + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(pool, n, &s_vbp_head, list) { + if (pool->filp == filp) { + vb = pool->vb; + if ((vb.phys_addr <= buf.phys_addr) && ((vb.phys_addr + vb.size) > buf.phys_addr) && find == false) { + cached = vb.cached; + find = true; + break; + } + } + } + spin_unlock(&s_vpu_lock); + if (find && cached) { + //pr_err("[%d]doing cache flush for %p~%p\n", __LINE__, (long)(buf.phys_addr), (long)(buf.phys_addr+buf.size)); + cache_flush((u32)buf.phys_addr,(u32)buf.size); + } + + enc_pr(LOG_ALL,"[-]VDI_IOCTL_CACHE_INV_BUFFER\n"); + } + break; +#ifdef CONFIG_COMPAT + case VDI_IOCTL_CACHE_INV_BUFFER32: + { + struct compat_vpudrv_buffer_t buf32; + struct vpudrv_buffer_pool_t *pool, *n; + struct vpudrv_buffer_t vb; + bool find = false; + u32 cached = 0; + enc_pr(LOG_ALL, "[+]VDI_IOCTL_CACHE_INV_BUFFER32\n"); + + ret = copy_from_user(&buf32, + (struct compat_vpudrv_buffer_t *)arg, + sizeof(struct compat_vpudrv_buffer_t)); + if (ret) + return -EFAULT; + + spin_lock(&s_vpu_lock); + list_for_each_entry_safe(pool, n, + &s_vbp_head, list) { + if (pool->filp == filp) { + vb = pool->vb; + if (((compat_ulong_t)vb.phys_addr + <= buf32.phys_addr) + && (((compat_ulong_t)vb.phys_addr + + vb.size) > buf32.phys_addr) + && find == false){ + cached = vb.cached; + find = true; + break; + } + } + } + spin_unlock(&s_vpu_lock); + + if (find && cached) { + cache_flush((u32)buf32.phys_addr, (u32)buf32.size); + + if (dump_es) { + pr_err("dump es frame, size=%u\n", (u32)buf32.size); + dump_data((u32)buf32.phys_addr, (u32)buf32.size); + } + } + enc_pr(LOG_INFO, "[-]VVDI_IOCTL_CACHE_INV_BUFFER32\n"); + } + break; +#endif + case VDI_IOCTL_CONFIG_DMA: + { + struct vpudrv_dma_buf_info_t dma_info; + enc_pr(LOG_DEBUG, + "[+]VDI_IOCTL_CONFIG_DMA_BUF\n"); + + if (copy_from_user(&dma_info, + (struct vpudrv_dma_buf_info_t *)arg, + sizeof(struct vpudrv_dma_buf_info_t))) + return -EFAULT; + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + break; + if (vpu_src_addr_config(&dma_info, filp)) { + up(&s_vpu_sem); + enc_pr(LOG_ERROR, + "src addr config error\n"); + ret = -EFAULT; + break; + } + up(&s_vpu_sem); + ret = copy_to_user((void __user *)arg, + &dma_info, + sizeof(struct vpudrv_dma_buf_info_t)); + if (ret) { + ret = -EFAULT; + break; + } + enc_pr(LOG_DEBUG, + "[-]VDI_IOCTL_CONFIG_DMA_BUF %d, %d, %d\n", + dma_info.fd[0], + dma_info.fd[1], + dma_info.fd[2]); + } + break; + + //hoan add for canvas + case VDI_IOCTL_READ_CANVAS: + { + struct vpudrv_dma_buf_canvas_info_t dma_info; + + struct canvas_s dst ; + u32 canvas = 0; + + if (copy_from_user(&dma_info, + (struct vpudrv_dma_buf_canvas_info_t *)arg, + sizeof(struct vpudrv_dma_buf_canvas_info_t))) + { + return -EFAULT; + } + + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + { + up(&s_vpu_sem); + break; + } + + canvas = dma_info.canvas_index; + enc_pr(LOG_DEBUG,"[+]VDI_IOCTL_READ_CANVAS,canvas = 0x%x\n",canvas); + if (canvas & 0xff) + { + canvas_read(canvas & 0xff, &dst); + dma_info.phys_addr[0] = dst.addr; + + if ((canvas & 0xff00) >> 8) + { + canvas_read((canvas & 0xff00) >> 8, &dst); + dma_info.phys_addr[1] = dst.addr; + + } + + if ((canvas & 0xff0000) >> 16) + { + canvas_read((canvas & 0xff0000) >> 16, &dst); + dma_info.phys_addr[2] = dst.addr; + } + + enc_pr(LOG_DEBUG,"[+]VDI_IOCTL_READ_CANVAS,phys_addr[0] = 0x%lx,phys_addr[1] = 0x%lx,phys_addr[2] = 0x%lx\n",dma_info.phys_addr[0],dma_info.phys_addr[1],dma_info.phys_addr[2]); + + } + else + { + dma_info.phys_addr[0] = 0; + dma_info.phys_addr[1] = 0; + dma_info.phys_addr[2] = 0; + } + up(&s_vpu_sem); + #if 0 + dma_info.phys_addr[0] = phys_addrY; + dma_info.phys_addr[1] = phys_addrU; + dma_info.phys_addr[2] = phys_addrV; + #endif + + ret = copy_to_user((void __user *)arg, + &dma_info, + sizeof(struct vpudrv_dma_buf_canvas_info_t)); + + enc_pr(LOG_DEBUG,"[-]VDI_IOCTL_READ_CANVAS,copy_to_user End\n"); + if (ret) + { + ret = -EFAULT; + break; + } + + } + break; + //end + +#ifdef CONFIG_COMPAT + case VDI_IOCTL_CONFIG_DMA32: + { + struct vpudrv_dma_buf_info_t dma_info; + struct compat_vpudrv_dma_buf_info_t dma_info32; + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_CONFIG_DMA_BUF32\n"); + + if (copy_from_user(&dma_info32, + (struct compat_vpudrv_dma_buf_info_t *)arg, + sizeof(struct compat_vpudrv_dma_buf_info_t))) + return -EFAULT; + dma_info.num_planes = dma_info32.num_planes; + dma_info.fd[0] = (int) dma_info32.fd[0]; + dma_info.fd[1] = (int) dma_info32.fd[1]; + dma_info.fd[2] = (int) dma_info32.fd[2]; + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + break; + if (vpu_src_addr_config(&dma_info, filp)) { + up(&s_vpu_sem); + enc_pr(LOG_ERROR, + "src addr config error\n"); + ret = -EFAULT; + break; + } + up(&s_vpu_sem); + dma_info32.phys_addr[0] = + (compat_ulong_t) dma_info.phys_addr[0]; + dma_info32.phys_addr[1] = + (compat_ulong_t) dma_info.phys_addr[1]; + dma_info32.phys_addr[2] = + (compat_ulong_t) dma_info.phys_addr[2]; + ret = copy_to_user((void __user *)arg, + &dma_info32, + sizeof(struct compat_vpudrv_dma_buf_info_t)); + if (ret) { + ret = -EFAULT; + break; + } + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_CONFIG_DMA_BUF32 %d, %d, %d\n", + dma_info.fd[0], + dma_info.fd[1], + dma_info.fd[2]); + } + break; + +#if 1 + + //hoan add for canvas + case VDI_IOCTL_READ_CANVAS32: + { + struct vpudrv_dma_buf_canvas_info_t dma_info; + struct compat_vpudrv_dma_buf_canvas_info_t dma_info32; + struct canvas_s dst; + u32 canvas = 0; + + if (copy_from_user(&dma_info32, + (struct compat_vpudrv_dma_buf_canvas_info_t *)arg, + sizeof(struct compat_vpudrv_dma_buf_canvas_info_t))) + { + return -EFAULT; + } + + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + { + up(&s_vpu_sem); + break; + } + + canvas = dma_info32.canvas_index; + enc_pr(LOG_INFO,"[+]VDI_IOCTL_READ_CANVAS32,canvas = 0x%x\n",dma_info32.canvas_index); + if (canvas & 0xff) + { + canvas_read(canvas & 0xff, &dst); + dma_info.phys_addr[0] = dst.addr; + + if (dump_input) { + dump_raw_input(&dst); + } + + if ((canvas & 0xff00) >> 8) + { + canvas_read((canvas & 0xff00) >> 8, &dst); + dma_info.phys_addr[1] = dst.addr; + if (dump_input) { + dump_raw_input(&dst); + } + } + + if ((canvas & 0xff0000) >> 16) + { + canvas_read((canvas & 0xff0000) >> 16, &dst); + dma_info.phys_addr[2] = dst.addr; + if (dump_input) { + dump_raw_input(&dst); + } + } + + enc_pr(LOG_INFO,"VDI_IOCTL_READ_CANVAS32_1,phys_addr[0] = 0x%lx,phys_addr[1] = 0x%lx,phys_addr[2] = 0x%lx\n", + dma_info.phys_addr[0],dma_info.phys_addr[1],dma_info.phys_addr[2]); + } + else + { + dma_info.phys_addr[0] = 0; + dma_info.phys_addr[1] = 0; + dma_info.phys_addr[2] = 0; + } + + up(&s_vpu_sem); + + dma_info32.phys_addr[0] = (compat_ulong_t)dma_info.phys_addr[0]; + dma_info32.phys_addr[1] = (compat_ulong_t)dma_info.phys_addr[1]; + dma_info32.phys_addr[2] = (compat_ulong_t)dma_info.phys_addr[2]; + + enc_pr(LOG_INFO,"VDI_IOCTL_READ_CANVAS32_2,phys_addr[0] = 0x%lx,phys_addr[1] = 0x%lx,phys_addr[2] = 0x%lx\n",dma_info.phys_addr[0], dma_info.phys_addr[1], dma_info.phys_addr[2]); + enc_pr(LOG_INFO,"VDI_IOCTL_READ_CANVAS32_3,phys_addr[0] = 0x%x,phys_addr[1] = 0x%x,phys_addr[2] = 0x%x\n", dma_info32.phys_addr[0],dma_info32.phys_addr[1],dma_info32.phys_addr[2]); + + ret = copy_to_user((void __user *)arg, + &dma_info32, + sizeof(struct compat_vpudrv_dma_buf_canvas_info_t)); + + enc_pr(LOG_INFO,"[-]VDI_IOCTL_READ_CANVAS,copy_to_user End\n"); + if (ret) + { + ret = -EFAULT; + break; + } + + } + break; + //end +#endif + + case VDI_IOCTL_UNMAP_DMA32: + { + struct vpudrv_dma_buf_info_t dma_info; + struct compat_vpudrv_dma_buf_info_t dma_info32; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_UNMAP_DMA32\n"); + + if (copy_from_user(&dma_info32, + (struct compat_vpudrv_dma_buf_info_t *)arg, + sizeof(struct compat_vpudrv_dma_buf_info_t))) + return -EFAULT; + dma_info.num_planes = dma_info32.num_planes; + dma_info.fd[0] = (int) dma_info32.fd[0]; + dma_info.fd[1] = (int) dma_info32.fd[1]; + dma_info.fd[2] = (int) dma_info32.fd[2]; + dma_info.phys_addr[0] = + (ulong) dma_info32.phys_addr[0]; + dma_info.phys_addr[1] = + (ulong) dma_info32.phys_addr[1]; + dma_info.phys_addr[2] = + (ulong) dma_info32.phys_addr[2]; + + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + break; + if (vpu_src_addr_unmap(&dma_info, filp)) { + up(&s_vpu_sem); + enc_pr(LOG_ERROR, + "dma addr unmap config error\n"); + ret = -EFAULT; + break; + } + up(&s_vpu_sem); + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_UNMAP_DMA32\n"); + } + break; +#endif + case VDI_IOCTL_UNMAP_DMA: + { + struct vpudrv_dma_buf_info_t dma_info; + + enc_pr(LOG_ALL, + "[+]VDI_IOCTL_UNMAP_DMA\n"); + + if (copy_from_user(&dma_info, + (struct vpudrv_dma_buf_info_t *)arg, + sizeof(struct vpudrv_dma_buf_info_t))) + return -EFAULT; + ret = down_interruptible(&s_vpu_sem); + if (ret != 0) + break; + if (vpu_src_addr_unmap(&dma_info, filp)) { + up(&s_vpu_sem); + enc_pr(LOG_ERROR, + "dma addr unmap config error\n"); + ret = -EFAULT; + break; + } + up(&s_vpu_sem); + enc_pr(LOG_ALL, + "[-]VDI_IOCTL_UNMAP_DMA\n"); + } + break; + default: + { + enc_pr(LOG_ERROR, + "No such IOCTL, cmd is 0x%x\n", cmd); + ret = -EFAULT; + } + break; + } + return ret; +} + +#ifdef CONFIG_COMPAT +static long vpu_compat_ioctl(struct file *filp, u32 cmd, ulong arg) +{ + long ret; + + arg = (ulong)compat_ptr(arg); + ret = vpu_ioctl(filp, cmd, arg); + return ret; +} +#endif + +static ssize_t vpu_write(struct file *filp, + const char *buf, + size_t len, + loff_t *ppos) +{ + enc_pr(LOG_INFO, "vpu_write len=%d\n", (int)len); + + if (!buf) { + enc_pr(LOG_ERROR, "vpu_write buf = NULL error\n"); + return -EFAULT; + } + + if (len == sizeof(struct vpu_bit_firmware_info_t)) { + struct vpu_bit_firmware_info_t *bit_firmware_info; + + bit_firmware_info = + kmalloc(sizeof(struct vpu_bit_firmware_info_t), + GFP_KERNEL); + if (!bit_firmware_info) { + enc_pr(LOG_ERROR, + "bit_firmware_info allocation error\n"); + return -EFAULT; + } + + if (copy_from_user(bit_firmware_info, buf, len)) { + enc_pr(LOG_ERROR, + "copy_from_user error for firmware_info\n"); + kfree(bit_firmware_info); + return -EFAULT; + } + + if (bit_firmware_info->size == + sizeof(struct vpu_bit_firmware_info_t)) { + enc_pr(LOG_INFO, + "set bit_firmware_info coreIdx= 0x%x, ", + bit_firmware_info->core_idx); + + enc_pr(LOG_INFO, + "base_offset = 0x%x, size = 0x%x, ", + bit_firmware_info->reg_base_offset, + bit_firmware_info->size); + + enc_pr(LOG_INFO,"bit_code[0] = 0x%x\n", + bit_firmware_info->bit_code[0]); + + if (bit_firmware_info->core_idx + > MAX_NUM_VPU_CORE) { + enc_pr(LOG_ERROR, + "vpu_write coreIdx[%d] is ", + bit_firmware_info->core_idx); + enc_pr(LOG_ERROR, + "exceeded than MAX_NUM_VPU_CORE[%d]\n", + MAX_NUM_VPU_CORE); + kfree(bit_firmware_info); + return -ENODEV; + } + if (bit_firmware_info->core_idx >= MAX_NUM_VPU_CORE) + { + enc_pr(LOG_ERROR, + "bit_firmware_info->core_idx invalid\n"); + kfree(bit_firmware_info); + return -ENODEV; + + } + memcpy((void *)&s_bit_firmware_info[bit_firmware_info->core_idx], bit_firmware_info, sizeof(struct vpu_bit_firmware_info_t)); + kfree(bit_firmware_info); + return len; + } + + kfree(bit_firmware_info); + } + return -1; +} + +static s32 vpu_release(struct inode *inode, struct file *filp) +{ + s32 ret = 0; + u32 open_count; + s32 i; + + //enc_pr(LOG_DEBUG, "vpu_release filp=%lu, f_counter=%lld\n", + //(unsigned long)filp, filp->f_count.counter); + ret = down_interruptible(&s_vpu_sem); + + if (ret == 0) { + /* found and free the not handled + buffer by user applications */ + vpu_free_buffers(filp); + vpu_dma_buf_release(filp); + /* found and free the not closed + instance by user applications */ + vpu_free_instances(filp); + + spin_lock(&s_vpu_lock); + s_vpu_drv_context.open_count--; + open_count = s_vpu_drv_context.open_count; + spin_unlock(&s_vpu_lock); + + pr_err("open_count=%u\n", open_count); + + if (open_count == 0) { + for (i=0; i<MAX_NUM_INSTANCE; i++) { + kfifo_reset(&s_interrupt_pending_q[i]); + s_interrupt_flag[i] = 0; + s_vpu_drv_context.interrupt_reason[i] = 0; + s_vpu_drv_context.interrupt_flag[i] = 0; + } + if (s_instance_pool.base) { + enc_pr(LOG_DEBUG, "free instance pool\n"); + vfree((const void *)s_instance_pool.base); + s_instance_pool.base = 0; + } + if (s_common_memory.phys_addr) { + enc_pr(LOG_INFO, + "vpu_release, s_common_memory 0x%lx\n", + s_common_memory.phys_addr); + vpu_free_dma_buffer(&s_common_memory); + s_common_memory.phys_addr = 0; + } + + if (s_video_memory.phys_addr && !use_reserve) { + enc_pr(LOG_DEBUG, + "vpu_release, s_video_memory 0x%lx\n", + s_video_memory.phys_addr); + codec_mm_free_for_dma( + VPU_DEV_NAME, + (u32)s_video_memory.phys_addr); + vmem_exit(&s_vmem); + memset(&s_video_memory, + 0, sizeof(struct vpudrv_buffer_t)); + memset(&s_vmem, + 0, sizeof(struct video_mm_t)); + } + if ((s_vpu_irq >= 0) + && (s_vpu_irq_requested == true)) { + free_irq(s_vpu_irq, &s_vpu_drv_context); + s_vpu_irq_requested = false; + } + + /* disable vpu clks.*/ + vpu_clk_disable(&s_vpu_clks); + } + } + up(&s_vpu_sem); + return 0; +} + +static s32 vpu_fasync(s32 fd, struct file *filp, s32 mode) +{ + struct vpu_drv_context_t *dev = + (struct vpu_drv_context_t *)filp->private_data; + return fasync_helper(fd, filp, mode, &dev->async_queue); +} + +static s32 vpu_map_to_register(struct file *fp, struct vm_area_struct *vm) +{ + ulong pfn; + + vm->vm_flags |= VM_IO | VM_RESERVED; + vm->vm_page_prot = + pgprot_noncached(vm->vm_page_prot); + pfn = s_vpu_register.phys_addr >> PAGE_SHIFT; + return remap_pfn_range(vm, vm->vm_start, pfn, + vm->vm_end - vm->vm_start, + vm->vm_page_prot) ? -EAGAIN : 0; +} + +static s32 vpu_map_to_physical_memory( + struct file *fp, struct vm_area_struct *vm) +{ + vm->vm_flags |= VM_IO | VM_RESERVED; + if (vm->vm_pgoff == + (s_common_memory.phys_addr >> PAGE_SHIFT)) { + vm->vm_page_prot = + pgprot_noncached(vm->vm_page_prot); + } else { + if (vpu_is_buffer_cached(fp, vm->vm_pgoff) == 0) + vm->vm_page_prot = + pgprot_noncached(vm->vm_page_prot); + } + /* vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot); */ + return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff, + vm->vm_end - vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0; +} + +static s32 vpu_map_to_instance_pool_memory( + struct file *fp, struct vm_area_struct *vm) +{ + s32 ret; + long length = vm->vm_end - vm->vm_start; + ulong start = vm->vm_start; + s8 *vmalloc_area_ptr = (s8 *)s_instance_pool.base; + ulong pfn; + + vm->vm_flags |= VM_RESERVED; + + /* loop over all pages, map it page individually */ + while (length > 0) { + pfn = vmalloc_to_pfn(vmalloc_area_ptr); + ret = remap_pfn_range(vm, start, pfn, + PAGE_SIZE, PAGE_SHARED); + if (ret < 0) + return ret; + start += PAGE_SIZE; + vmalloc_area_ptr += PAGE_SIZE; + length -= PAGE_SIZE; + } + return 0; +} + +/* + * @brief memory map interface for vpu file operation + * @return 0 on success or negative error code on error + */ +static s32 vpu_mmap(struct file *fp, struct vm_area_struct *vm) +{ + /* if (vm->vm_pgoff == (s_vpu_register.phys_addr >> PAGE_SHIFT)) */ + if ((vm->vm_end - vm->vm_start) == (s_vpu_register.size + 1) && + (vm->vm_pgoff == 0)) { + vm->vm_pgoff = (s_vpu_register.phys_addr >> PAGE_SHIFT); + return vpu_map_to_register(fp, vm); + } + + if (vm->vm_pgoff == 0) + return vpu_map_to_instance_pool_memory(fp, vm); + + return vpu_map_to_physical_memory(fp, vm); +} +static int vpu_dma_buffer_map(struct vpu_dma_cfg *cfg) +{ + int ret = -1; + int fd = -1; + struct page *page = NULL; + struct dma_buf *dbuf = NULL; + struct dma_buf_attachment *d_att = NULL; + struct sg_table *sg = NULL; + void *vaddr = NULL; + struct device *dev = NULL; + enum dma_data_direction dir; + + if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) { + enc_pr(LOG_ERROR, "error dma param\n"); + return -EINVAL; + } + fd = cfg->fd; + dev = cfg->dev; + dir = cfg->dir; + + dbuf = dma_buf_get(fd); + if (IS_ERR_OR_NULL(dbuf)) { + enc_pr(LOG_ERROR, "failed to get dma buffer,fd %d\n",fd); + return -EINVAL; + } + + d_att = dma_buf_attach(dbuf, dev); + if (IS_ERR(d_att)) { + enc_pr(LOG_ERROR, "failed to set dma attach\n"); + goto attach_err; + } + + sg = dma_buf_map_attachment(d_att, dir); + if (IS_ERR(sg)) { + enc_pr(LOG_ERROR, "failed to get dma sg\n"); + goto map_attach_err; + } + + page = sg_page(sg->sgl); + cfg->paddr = PFN_PHYS(page_to_pfn(page)); + cfg->dbuf = dbuf; + cfg->attach = d_att; + cfg->vaddr = vaddr; + cfg->sg = sg; + + return 0; + +map_attach_err: + dma_buf_detach(dbuf, d_att); +attach_err: + dma_buf_put(dbuf); + + return ret; +} + +static void vpu_dma_buffer_unmap(struct vpu_dma_cfg *cfg) +{ + int fd = -1; + struct dma_buf *dbuf = NULL; + struct dma_buf_attachment *d_att = NULL; + struct sg_table *sg = NULL; + /*void *vaddr = NULL;*/ + struct device *dev = NULL; + enum dma_data_direction dir; + + if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL + || cfg->dbuf == NULL /*|| cfg->vaddr == NULL*/ + || cfg->attach == NULL || cfg->sg == NULL) { + enc_pr(LOG_ERROR, "unmap: Error dma param\n"); + return; + } + + fd = cfg->fd; + dev = cfg->dev; + dir = cfg->dir; + dbuf = cfg->dbuf; + d_att = cfg->attach; + sg = cfg->sg; + + dma_buf_unmap_attachment(d_att, sg, dir); + dma_buf_detach(dbuf, d_att); + dma_buf_put(dbuf); + + enc_pr(LOG_INFO, "vpu_dma_buffer_unmap fd %d\n",fd); +} + +static s32 vpu_dma_buffer_get_phys(struct vpu_dma_cfg *cfg, + unsigned long *addr) +{ + int ret = 0; + if (cfg->paddr == 0) + { /* only mapp once */ + ret = vpu_dma_buffer_map(cfg); + if (ret < 0) { + enc_pr(LOG_ERROR, "vpu_dma_buffer_map failed\n"); + return ret; + } + } + if (cfg->paddr) *addr = cfg->paddr; + enc_pr(LOG_INFO,"vpu_dma_buffer_get_phys 0x%lx\n", cfg->paddr); + return ret; +} + +static s32 vpu_src_addr_config(struct vpudrv_dma_buf_info_t *pinfo, + struct file *filp) +{ + struct vpudrv_dma_buf_pool_t *vbp; + unsigned long phy_addr; + struct vpu_dma_cfg *cfg; + s32 idx, ret = 0; + if (pinfo->num_planes == 0 || pinfo->num_planes > 3) + return -EFAULT; + + for (idx = 0; idx < pinfo->num_planes; idx++) + pinfo->phys_addr[idx] = 0; + for (idx = 0; idx < pinfo->num_planes; idx++) { + vbp = kzalloc(sizeof(*vbp), GFP_KERNEL); + if (!vbp) { + ret = -ENOMEM; + break; + } + memset(vbp, 0, sizeof(struct vpudrv_dma_buf_pool_t)); + cfg = &vbp->dma_cfg; + cfg->dir = DMA_TO_DEVICE; + cfg->fd = pinfo->fd[idx]; + cfg->dev = &(multienc_pdev->dev); + phy_addr = 0; + ret = vpu_dma_buffer_get_phys(cfg, &phy_addr); + if (ret < 0) { + enc_pr(LOG_ERROR, "import fd %d failed\n", cfg->fd); + kfree(vbp); + ret = -1; + break; + } + pinfo->phys_addr[idx] = (ulong) phy_addr; + vbp->filp = filp; + spin_lock(&s_dma_buf_lock); + list_add(&vbp->list, &s_dma_bufp_head); + spin_unlock(&s_dma_buf_lock); + } + enc_pr(LOG_INFO, "vpu_src_addr_config phy_addr 0x%lx, 0x%lx, 0x%lx\n", + pinfo->phys_addr[0], pinfo->phys_addr[1], pinfo->phys_addr[2]); + //hoan add for canvas test + phys_addrY = pinfo->phys_addr[0]; + phys_addrU = pinfo->phys_addr[1]; + phys_addrV = pinfo->phys_addr[2]; + + //end + return ret; +} + +static s32 vpu_src_addr_unmap(struct vpudrv_dma_buf_info_t *pinfo, + struct file *filp) +{ + struct vpudrv_dma_buf_pool_t *pool, *n; + struct vpu_dma_cfg vb; + ulong phys_addr; + s32 plane_idx = 0; + s32 ret = 0; + s32 found; + + if (pinfo->num_planes == 0 || pinfo->num_planes > 3) + return -EFAULT; + + enc_pr(LOG_INFO, + "dma_unmap planes %d fd: %d-%d-%d, phy_add: 0x%lx-%lx-%lx\n", + pinfo->num_planes, pinfo->fd[0],pinfo->fd[1], pinfo->fd[2], + pinfo->phys_addr[0], pinfo->phys_addr[1], pinfo->phys_addr[2]); + + list_for_each_entry_safe(pool, n, &s_dma_bufp_head, list) { + found = 0; + if (pool->filp == filp) { + vb = pool->dma_cfg; + phys_addr = vb.paddr; + if (vb.fd == pinfo->fd[0]) + { + if (phys_addr != pinfo->phys_addr[0]) { + enc_pr(LOG_ERROR, "dma_unmap plane 0"); + enc_pr(LOG_ERROR, " no match "); + enc_pr(LOG_ERROR, "0x%lx %lx\n", + phys_addr, pinfo->phys_addr[0]); + } + found = 1; + plane_idx++; + } + else if (vb.fd == pinfo->fd[1] + && pinfo->num_planes > 1) { + if (phys_addr != pinfo->phys_addr[1]) { + enc_pr(LOG_ERROR, "dma_unmap plane 1"); + enc_pr(LOG_ERROR, " no match "); + enc_pr(LOG_ERROR, "0x%lx %lx\n", + phys_addr, pinfo->phys_addr[1]); + } + plane_idx++; + found = 1; + } + else if (vb.fd == pinfo->fd[2] + && pinfo->num_planes > 2) { + if (phys_addr != pinfo->phys_addr[2]) { + enc_pr(LOG_ERROR, "dma_unmap plane 2"); + enc_pr(LOG_ERROR, " no match "); + enc_pr(LOG_ERROR, "0x%lx %lx\n", + phys_addr, pinfo->phys_addr[2]); + } + plane_idx++; + found = 1; + } + if (found && vb.attach) { + vpu_dma_buffer_unmap(&vb); + spin_lock(&s_dma_buf_lock); + list_del(&pool->list); + spin_unlock(&s_dma_buf_lock); + kfree(pool); + } + } + } + + if (plane_idx != pinfo->num_planes) { + enc_pr(LOG_DEBUG, "dma_unmap fd planes not match\n"); + enc_pr(LOG_DEBUG, " found %d need %d\n", + plane_idx, pinfo->num_planes); + } + return ret; +} + +static const struct file_operations vpu_fops = { + .owner = THIS_MODULE, + .open = vpu_open, + .release = vpu_release, + .write = vpu_write, + .unlocked_ioctl = vpu_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vpu_compat_ioctl, +#endif + .fasync = vpu_fasync, + .mmap = vpu_mmap, +}; + +static ssize_t encode_status_show(struct class *cla, + struct class_attribute *attr, char *buf) +{ + struct vmem_info_t info; + char *pbuf = buf; + s32 open_count; + + spin_lock(&s_vpu_lock); + open_count = s_vpu_drv_context.open_count; + spin_unlock(&s_vpu_lock); + + vmem_get_info(&s_vmem, &info); + pbuf += snprintf(buf, 40, "\nmultienc memory usage info:\n"); + pbuf += snprintf(buf, 120, "Total %ld used %ld free %ld page sz %ld, open_count=%u, in_interrupt=%lu, flag=%u\n", + info.total_pages*info.page_size, + info.alloc_pages*info.page_size, + info.free_pages*info.page_size, + info.page_size, + open_count, + in_interrupt(), + current->flags & PF_KTHREAD); + return pbuf - buf; +} + +static CLASS_ATTR_RO(encode_status); +static struct attribute *multienc_class_attrs[] = { + &class_attr_encode_status.attr, + NULL +}; + +ATTRIBUTE_GROUPS(multienc_class); + +static struct class multienc_class = { + .name = VPU_CLASS_NAME, + .class_groups = multienc_class_groups, +}; + +s32 init_MultiEnc_device(void) +{ + s32 r = 0; + + r = register_chrdev(0, VPU_DEV_NAME, &vpu_fops); + if (r <= 0) { + enc_pr(LOG_ERROR, "register multienc device error.\n"); + return r; + } + s_vpu_major = r; + + r = class_register(&multienc_class); + if (r < 0) { + enc_pr(LOG_ERROR, "error create multienc class.\n"); + return r; + } + s_register_flag = 1; + multienc_dev = device_create(&multienc_class, NULL, + MKDEV(s_vpu_major, 0), NULL, + VPU_DEV_NAME); + + if (IS_ERR(multienc_dev)) { + enc_pr(LOG_ERROR, "create multienc device error.\n"); + class_unregister(&multienc_class); + return -1; + } + return r; +} + +s32 uninit_MultiEnc_device(void) +{ + if (multienc_dev) + device_destroy(&multienc_class, MKDEV(s_vpu_major, 0)); + + if (s_register_flag) + class_destroy(&multienc_class); + s_register_flag = 0; + + if (s_vpu_major) + unregister_chrdev(s_vpu_major, VPU_DEV_NAME); + s_vpu_major = 0; + return 0; +} + +static s32 multienc_mem_device_init( + struct reserved_mem *rmem, struct device *dev) +{ + s32 r; + + if (!rmem) { + enc_pr(LOG_ERROR, "Can not obtain I/O memory, "); + enc_pr(LOG_ERROR, "will allocate multienc buffer!\n"); + + r = -EFAULT; + return r; + } + + if ((!rmem->base) || + (rmem->size < cma_cfg_size)) { + enc_pr(LOG_ERROR, + "memory range error, 0x%lx - 0x%lx\n", + (ulong)rmem->base, (ulong)rmem->size); + r = -EFAULT; + return r; + } + r = 0; + s_video_memory.size = rmem->size; + s_video_memory.phys_addr = (ulong)rmem->base; + enc_pr(LOG_DEBUG, "multienc_mem_device_init %d, 0x%lx\n", + s_video_memory.size,s_video_memory.phys_addr); + + return r; +} + +static s32 vpu_probe(struct platform_device *pdev) +{ + s32 err = 0, irq, reg_count, idx; + struct resource res; + struct device_node *np, *child; + + enc_pr(LOG_DEBUG, "vpu_probe\n"); + + s_vpu_major = 0; + use_reserve = false; + s_vpu_irq = -1; + cma_pool_size = 0; + s_vpu_irq_requested = false; + spin_lock(&s_vpu_lock); + s_vpu_open_ref_count = 0; + spin_unlock(&s_vpu_lock); + multienc_dev = NULL; + multienc_pdev = NULL; + s_register_flag = 0; + memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t)); + memset(&s_vpu_register, 0, sizeof(struct vpudrv_buffer_t)); + memset(&s_vmem, 0, sizeof(struct video_mm_t)); + memset(&s_bit_firmware_info[0], 0, sizeof(s_bit_firmware_info)); + memset(&res, 0, sizeof(struct resource)); + memset(&s_fifo_alloc_flag, 0, sizeof(s_fifo_alloc_flag)); + np = pdev->dev.of_node; + err = of_property_read_u32(np, "config_mm_sz_mb", &cma_cfg_size); + + cma_cfg_size = 100; + enc_pr(LOG_ERROR, "reset cma_cfg_size to 200"); + + if (err) { + enc_pr(LOG_ERROR, "failed to get config_mm_sz_mb node, use default\n"); + cma_cfg_size = VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE; + err = 0; + } else + cma_cfg_size = cma_cfg_size*SZ_1M; + + enc_pr(LOG_INFO, "cma cfg size %d\n", cma_cfg_size); + idx = of_reserved_mem_device_init(&pdev->dev); + + if (idx != 0) + enc_pr(LOG_DEBUG, "MultiEnc reserved memory config fail.\n"); + else if (s_video_memory.phys_addr) + use_reserve = true; + + if (use_reserve == false) { +#ifndef CONFIG_CMA + enc_pr(LOG_ERROR, "MultiEnc reserved memory is invaild, probe fail!\n"); + err = -EFAULT; + goto ERROR_PROVE_DEVICE; +#else + cma_pool_size = + (codec_mm_get_total_size() > (cma_cfg_size)) ? + (cma_cfg_size) : + codec_mm_get_total_size(); + + enc_pr(LOG_DEBUG, "MultiEnc - cma memory pool size: %d MB\n", (u32)cma_pool_size / SZ_1M); +#endif + } + + /* get interrupt resource */ + irq = platform_get_irq_byname(pdev, "multienc_irq"); + + if (irq < 0) { + enc_pr(LOG_ERROR, "get MultiEnc irq resource error\n"); + err = -EFAULT; + + goto ERROR_PROVE_DEVICE; + } + + s_vpu_irq = irq; + enc_pr(LOG_DEBUG, "MultiEnc -irq: %d\n", s_vpu_irq); + + /* get vpu clks */ + if (vpu_clk_get(&pdev->dev, &s_vpu_clks)) { + enc_pr(LOG_DEBUG, "get vpu clks fail.\n"); + goto ERROR_PROVE_DEVICE; + } else + enc_pr(LOG_DEBUG, "MultiEnc. clock get success\n"); + + reg_count = 0; + np = pdev->dev.of_node; + + for_each_child_of_node(np, child) { + if (of_address_to_resource(child, 0, &res) || (reg_count > 1)) { + enc_pr(LOG_ERROR, "no reg ranges or more reg ranges %d\n", reg_count); + err = -ENXIO; + goto ERROR_PROVE_DEVICE; + } + + /* if platform driver is implemented */ + if (res.start != 0) { + s_vpu_register.phys_addr = res.start; + s_vpu_register.virt_addr = (ulong)ioremap_nocache(res.start, resource_size(&res)); + s_vpu_register.size = res.end - res.start; + + enc_pr(LOG_DEBUG, "vpu base address get from platform driver "); + enc_pr(LOG_DEBUG, "physical addr=0x%lx, virtual addr=0x%lx\n", + s_vpu_register.phys_addr, s_vpu_register.virt_addr); + } else { + s_vpu_register.phys_addr = VPU_REG_BASE_ADDR; + s_vpu_register.virt_addr = (ulong)ioremap_nocache(s_vpu_register.phys_addr, VPU_REG_SIZE); + s_vpu_register.size = VPU_REG_SIZE; + + enc_pr(LOG_DEBUG, "vpu base address get from defined value "); + enc_pr(LOG_DEBUG, "physical addr=0x%lx, virtual addr=0x%lx\n", + s_vpu_register.phys_addr, s_vpu_register.virt_addr); + } + + reg_count++; + } + + /* get the major number of the character device */ + if (init_MultiEnc_device()) { + err = -EBUSY; + enc_pr(LOG_ERROR, "could not allocate major number\n"); + + goto ERROR_PROVE_DEVICE; + } + + enc_pr(LOG_INFO, "SUCCESS alloc_chrdev_region\n"); + + for (idx = 0; idx < MAX_NUM_INSTANCE; idx ++) + init_waitqueue_head(&s_interrupt_wait_q[idx]); + + for (idx = 0; idx < MAX_NUM_INSTANCE; idx ++) { + err = kfifo_alloc(&s_interrupt_pending_q[idx], MAX_INTERRUPT_QUEUE*sizeof(u32), GFP_KERNEL); + + if (err) { + enc_pr(LOG_ERROR,"kfifo_alloc failed 0x%x\n", err); + goto ERROR_PROVE_DEVICE; + } + + s_fifo_alloc_flag[idx] = 1; + } + + tasklet_init(&multienc_tasklet, multienc_isr_tasklet, (ulong)&s_vpu_drv_context); + + s_common_memory.base = 0; + s_instance_pool.base = 0; + + if (use_reserve == true) { + if (vmem_init(&s_vmem, s_video_memory.phys_addr, s_video_memory.size) < 0) { + enc_pr(LOG_ERROR, "fail to init vmem system\n"); + goto ERROR_PROVE_DEVICE; + } + + enc_pr(LOG_DEBUG, "success to probe vpu device with video memory"); + enc_pr(LOG_DEBUG, "phys_addr=0x%lx, base = 0x%lx\n", + (ulong)s_video_memory.phys_addr, (ulong)s_video_memory.base); + } else { + enc_pr(LOG_DEBUG, "success to probe vpu device with video memory \n"); + } + + enc_pr(LOG_DEBUG, "to be allocate from CMA pool_size 0x%lx\n", cma_pool_size); + multienc_pdev = pdev; + + return 0; + +ERROR_PROVE_DEVICE: + for (idx = 0; idx < MAX_NUM_INSTANCE; idx++) { + if (s_fifo_alloc_flag[idx]) + kfifo_free(&s_interrupt_pending_q[idx]); + s_fifo_alloc_flag[idx] = 0; + } + + if (s_vpu_register.virt_addr) { + iounmap((void *)s_vpu_register.virt_addr); + memset(&s_vpu_register, 0, sizeof(struct vpudrv_buffer_t)); + } + + if (s_video_memory.phys_addr) { + vmem_exit(&s_vmem); + memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t)); + memset(&s_vmem, 0, sizeof(struct video_mm_t)); + } + + if (s_vpu_irq_requested == true) { + if (s_vpu_irq >= 0) { + free_irq(s_vpu_irq, &s_vpu_drv_context); + s_vpu_irq = -1; + } + s_vpu_irq_requested = false; + } + uninit_MultiEnc_device(); + return err; +} + +static s32 vpu_remove(struct platform_device *pdev) +{ + s32 idx; + enc_pr(LOG_DEBUG, "vpu_remove\n"); + + if (s_instance_pool.base) { + vfree((const void *)s_instance_pool.base); + s_instance_pool.base = 0; + } + + if (s_common_memory.phys_addr) { + vpu_free_dma_buffer(&s_common_memory); + s_common_memory.phys_addr = 0; + } + + if (s_video_memory.phys_addr) { + if (!use_reserve) { + codec_mm_free_for_dma( + VPU_DEV_NAME, + (u32)s_video_memory.phys_addr); + } + vmem_exit(&s_vmem); + memset(&s_video_memory, + 0, sizeof(struct vpudrv_buffer_t)); + memset(&s_vmem, + 0, sizeof(struct video_mm_t)); + } + + if (s_vpu_irq_requested == true) { + if (s_vpu_irq >= 0) { + free_irq(s_vpu_irq, &s_vpu_drv_context); + s_vpu_irq = -1; + } + s_vpu_irq_requested = false; + } + for (idx = 0; idx < MAX_NUM_INSTANCE; idx++) + kfifo_free(&s_interrupt_pending_q[idx]); + + if (s_vpu_register.virt_addr) { + iounmap((void *)s_vpu_register.virt_addr); + memset(&s_vpu_register, + 0, sizeof(struct vpudrv_buffer_t)); + } + if (clock_gate_count > 0) + { + vpu_clk_disable(&s_vpu_clks); + } + vpu_clk_put(&multienc_pdev->dev, &s_vpu_clks); + multienc_pdev = NULL; + uninit_MultiEnc_device(); + return 0; +} + +#ifdef CONFIG_PM +#define VP5_CMD_INIT_VPU (0x0001) +#define VP5_CMD_SLEEP_VPU (0x0004) +#define VP5_CMD_WAKEUP_VPU (0x0002) + +static void Vp5BitIssueCommand(int core, u32 cmd) +{ + WriteVpuRegister(VP5_VPU_BUSY_STATUS, 1); + WriteVpuRegister(VP5_COMMAND, cmd); + WriteVpuRegister(VP5_VPU_HOST_INT_REQ, 1); + + return; +} + +static s32 vpu_suspend(struct platform_device *pdev, pm_message_t state) +{ + u32 core; + ulong timeout = jiffies + HZ; /* vpu wait timeout to 1sec */ + + enc_pr(LOG_DEBUG, "vpu_suspend\n"); + + if (s_vpu_open_ref_count > 0) { +#ifdef VPU_SUPPORT_CLOCK_CONTROL + vpu_clk_config(1); +#endif + for (core = 0; core < MAX_NUM_VPU_CORE; core++) { + if (s_bit_firmware_info[core].size == 0) + continue; + while (ReadVpuRegister(VP5_VPU_BUSY_STATUS)) { + if (time_after(jiffies, timeout)) { + enc_pr(LOG_ERROR, + "SLEEP_VPU BUSY timeout"); + goto DONE_SUSPEND; + } + } + Vp5BitIssueCommand(core, VP5_CMD_SLEEP_VPU); + while (ReadVpuRegister(VP5_VPU_BUSY_STATUS)) { + if (time_after(jiffies, timeout)) { + enc_pr(LOG_ERROR, + "SLEEP_VPU BUSY timeout"); + goto DONE_SUSPEND; + } + } + if (ReadVpuRegister(VP5_RET_SUCCESS) == 0) { + enc_pr(LOG_ERROR, + "SLEEP_VPU failed [0x%x]", + ReadVpuRegister(VP5_RET_FAIL_REASON)); + goto DONE_SUSPEND; + } + } +#ifdef VPU_SUPPORT_CLOCK_CONTROL + vpu_clk_config(0); + if (clock_gate_count > 0) +#endif + { + clk_disable(s_vpu_clks.c_clk); + clk_disable(s_vpu_clks.b_clk); + clk_disable(s_vpu_clks.a_clk); + } + /* the power off */ + pwr_ctrl_psci_smc(PDID_T7_DOS_WAVE, false); + } + return 0; + +DONE_SUSPEND: +#ifdef VPU_SUPPORT_CLOCK_CONTROL + vpu_clk_config(0); +#endif + return -EAGAIN; +} +static s32 vpu_resume(struct platform_device *pdev) +{ + u32 core; + ulong timeout = jiffies + HZ; /* vpu wait timeout to 1sec */ + ulong code_base; + u32 code_size; + u32 remap_size; + u32 regVal; + u32 hwOption = 0; + + enc_pr(LOG_DEBUG, "vpu_resume\n"); + + if (s_vpu_open_ref_count > 0) { +#ifdef VPU_SUPPORT_CLOCK_CONTROL + if (clock_gate_count > 0) +#endif + { + clk_enable(s_vpu_clks.a_clk); + clk_enable(s_vpu_clks.b_clk); + clk_enable(s_vpu_clks.c_clk); + } + vpu_clk_config(1); + /* the power on */ + pwr_ctrl_psci_smc(PDID_T7_DOS_WAVE, true); + for (core = 0; core < MAX_NUM_VPU_CORE; core++) { + if (s_bit_firmware_info[core].size == 0) + continue; + code_base = s_common_memory.phys_addr; + /* ALIGN TO 4KB */ + code_size = (s_common_memory.size & ~0xfff); + if (code_size < s_bit_firmware_info[core].size * 2) + goto DONE_WAKEUP; + regVal = 0; + WriteVpuRegister(VP5_PO_CONF, regVal); + + /* Reset All blocks */ + regVal = 0x7ffffff; + WriteVpuRegister(VP5_VPU_RESET_REQ, regVal); + /* Waiting reset done */ + while (ReadVpuRegister(VP5_VPU_RESET_STATUS)) { + if (time_after(jiffies, timeout)) + goto DONE_WAKEUP; + } + WriteVpuRegister(VP5_VPU_RESET_REQ, 0); + + /* remap page size */ + remap_size = (code_size >> 12) & 0x1ff; + regVal = 0x80000000 | (VP5_REMAP_CODE_INDEX<<12) + | (0 << 16) | (1<<11) | remap_size; + WriteVpuRegister(VP5_VPU_REMAP_CTRL, regVal); + /* DO NOT CHANGE! */ + WriteVpuRegister(VP5_VPU_REMAP_VADDR, 0x00000000); + WriteVpuRegister(VP5_VPU_REMAP_PADDR, code_base); + WriteVpuRegister(VP5_ADDR_CODE_BASE, code_base); + WriteVpuRegister(VP5_CODE_SIZE, code_size); + WriteVpuRegister(VP5_CODE_PARAM, 0); + WriteVpuRegister(VP5_INIT_VPU_TIME_OUT_CNT, timeout); + WriteVpuRegister(VP5_HW_OPTION, hwOption); + + /* Interrupt */ + regVal = (1 << INT_ENC_SET_PARAM); + regVal |= (1 << INT_ENC_PIC); + regVal |= (1 << INT_INIT_SEQ); + regVal |= (1 << INT_DEC_PIC); + regVal |= (1 << INT_BSBUF_EMPTY); + WriteVpuRegister(VP5_VPU_VINT_ENABLE, regVal); + Vp5BitIssueCommand(core, VP5_CMD_INIT_VPU); + WriteVpuRegister(VP5_VPU_REMAP_CORE_START, 1); + while (ReadVpuRegister(VP5_VPU_BUSY_STATUS)) { + if (time_after(jiffies, timeout)) + goto DONE_WAKEUP; + } + + if (ReadVpuRegister(VP5_RET_SUCCESS) == 0) { + enc_pr(LOG_ERROR, + "WAKEUP_VPU failed [0x%x]", + ReadVpuRegister(VP5_RET_FAIL_REASON)); + goto DONE_WAKEUP; + } + } + } +DONE_WAKEUP: + if (s_vpu_open_ref_count > 0) + vpu_clk_config(0); + return 0; +} +#else +#define vpu_suspend NULL +#define vpu_resume NULL +#endif /* !CONFIG_PM */ + +static const struct of_device_id cnm_multienc_dt_match[] = { + { + .compatible = "cnm, MultiEnc", + }, + {}, +}; + +static struct platform_driver vpu_driver = { + .driver = { + .name = VPU_PLATFORM_DEVICE_NAME, + .of_match_table = cnm_multienc_dt_match, + }, + .probe = vpu_probe, + .remove = vpu_remove, + .suspend = vpu_suspend, + .resume = vpu_resume, +}; + +static s32 __init vpu_init(void) +{ + s32 res; + + enc_pr(LOG_DEBUG, "vpu_init\n"); + + if (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_T7) { + pr_err("The chip is not support multi encoder!!\n"); + return -1; + } + + res = platform_driver_register(&vpu_driver); + enc_pr(LOG_INFO, + "end vpu_init result=0x%x\n", res); + return res; +} + +static void __exit vpu_exit(void) +{ + enc_pr(LOG_DEBUG, "vpu_exit\n"); + platform_driver_unregister(&vpu_driver); +} + +static const struct reserved_mem_ops rmem_multienc_ops = { + .device_init = multienc_mem_device_init, +}; + +static s32 __init multienc_mem_setup(struct reserved_mem *rmem) +{ + rmem->ops = &rmem_multienc_ops; + enc_pr(LOG_DEBUG, "MultiEnc reserved mem setup.\n"); + return 0; +} + +module_param(print_level, uint, 0664); +MODULE_PARM_DESC(print_level, "\n print_level\n"); + +module_param(clock_level, uint, 0664); +MODULE_PARM_DESC(clock_level, "\n clock_level\n"); + +module_param(clock_gate_count, uint, 0664); +MODULE_PARM_DESC(clock_gate_count, "\n clock_gate_count\n"); + +module_param(set_clock_freq, uint, 0664); +MODULE_PARM_DESC(set_clock_freq, "\n set clk freq\n"); + +module_param(clock_a, uint, 0664); +MODULE_PARM_DESC(clock_a, "\n clock_a\n"); + +module_param(clock_b, uint, 0664); +MODULE_PARM_DESC(clock_b, "\n clock_b\n"); + +module_param(clock_c, uint, 0664); +MODULE_PARM_DESC(clock_c, "\n clock_c\n"); + +module_param(dump_input, uint, 0664); +MODULE_PARM_DESC(dump_input, "\n dump_input\n"); + +module_param(dump_es, uint, 0664); +MODULE_PARM_DESC(dump_es, "\n dump_es\n"); + +MODULE_AUTHOR("Amlogic Inc."); +MODULE_DESCRIPTION("VPU linux driver"); +MODULE_LICENSE("GPL"); + +module_init(vpu_init); +module_exit(vpu_exit); +RESERVEDMEM_OF_DECLARE(cnm_multienc, "cnm, MultiEnc-mem", multienc_mem_setup);
diff --git a/drivers/frame_sink/encoder/multi/vpu_multi.h b/drivers/frame_sink/encoder/multi/vpu_multi.h new file mode 100644 index 0000000..912df01 --- /dev/null +++ b/drivers/frame_sink/encoder/multi/vpu_multi.h
@@ -0,0 +1,352 @@ +/* + * + * Copyright (C) 2019 by Amlogic, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#ifndef __VPU_MULTI_DRV_H__ +#define __VPU_MULTI_DRV_H__ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/compat.h> +#include <linux/dma-buf.h> + +#define SUPPORT_SOURCE_RELEASE_INTERRUPT + +/* DO NOT CHANGE THIS VALUE */ +#define MAX_INST_HANDLE_SIZE 48 +#define MAX_NUM_INSTANCE 6 +#define MAX_NUM_VPU_CORE 1 + +#ifdef CONFIG_COMPAT +struct compat_vpudrv_buffer_t { + u32 size; + u32 cached; + compat_ulong_t phys_addr; + compat_ulong_t base; /* kernel logical address in use kernel */ + compat_ulong_t virt_addr; /* virtual user space address */ +}; +#endif + +struct vpudrv_buffer_t { + u32 size; + u32 cached; + ulong phys_addr; + ulong base; /* kernel logical address in use kernel */ + ulong virt_addr; /* virtual user space address */ +}; + +struct vpu_bit_firmware_info_t { + u32 size; /* size of this structure*/ + u32 core_idx; + u32 reg_base_offset; + u16 bit_code[512]; +}; + +struct vpudrv_inst_info_t { + u32 core_idx; + u32 inst_idx; + s32 inst_open_count; /* for output only*/ +}; + +struct vpudrv_intr_info_t { + u32 timeout; + s32 intr_reason; + s32 intr_inst_index; +}; + +struct vpu_drv_context_t { + struct fasync_struct *async_queue; + ulong interrupt_reason[MAX_NUM_INSTANCE]; + u32 interrupt_flag[MAX_NUM_INSTANCE]; + u32 open_count; /*!<< device reference count. Not instance count */ +}; + +/* To track the allocated memory buffer */ +struct vpudrv_buffer_pool_t { + struct list_head list; + struct vpudrv_buffer_t vb; + struct file *filp; +}; + +/* To track the instance index and buffer in instance pool */ +struct vpudrv_instanace_list_t { + struct list_head list; + ulong inst_idx; + ulong core_idx; + struct file *filp; +}; + +struct vpudrv_instance_pool_t { + u8 codecInstPool[MAX_NUM_INSTANCE][MAX_INST_HANDLE_SIZE]; +}; + +struct vpudrv_dma_buf_info_t { + u32 num_planes; + int fd[3]; + ulong phys_addr[3]; /* phys address for DMA buffer */ +}; + +//hoan add for canvas +struct vpudrv_dma_buf_canvas_info_t { + u32 num_planes; + u32 canvas_index; + int fd[3]; + ulong phys_addr[3]; /* phys address for DMA buffer */ +}; + + + +//end + + + +#ifdef CONFIG_COMPAT +struct compat_vpudrv_dma_buf_info_t { + u32 num_planes; + compat_int_t fd[3]; + compat_ulong_t phys_addr[3]; /* phys address for DMA buffer */ +}; + + +struct compat_vpudrv_dma_buf_canvas_info_t { + u32 num_planes; + u32 canvas_index; + compat_int_t fd[3]; + compat_ulong_t phys_addr[3]; /* phys address for DMA buffer */ +}; + + +#endif + +struct vpu_dma_cfg { + int fd; + void *dev; + void *vaddr; + unsigned long paddr; + struct dma_buf *dbuf; + struct dma_buf_attachment *attach; + struct sg_table *sg; + enum dma_data_direction dir; +}; + +/* To track the occupied dma_buf */ +struct vpudrv_dma_buf_pool_t { + struct list_head list; + struct vpu_dma_cfg dma_cfg; + struct file *filp; +}; + +#define VPUDRV_BUF_LEN struct vpudrv_buffer_t +#define VPUDRV_INST_LEN struct vpudrv_inst_info_t +#define VPUDRV_DMABUF_LEN struct vpudrv_dma_buf_info_t + //hoan add for canvas +#define VPUDRV_DMABUF_CANVAS_LEN struct vpudrv_dma_buf_canvas_info_t + //end + +#ifdef CONFIG_COMPAT +#define VPUDRV_BUF_LEN32 struct compat_vpudrv_buffer_t +#define VPUDRV_DMABUF_LEN32 struct compat_vpudrv_dma_buf_info_t + //hoan add for canvas +#define VPUDRV_DMABUF_CANVAS_LEN32 struct compat_vpudrv_dma_buf_canvas_info_t + //end + +#endif + +#define VDI_MAGIC 'V' +#define VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY \ + _IOW(VDI_MAGIC, 0, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_FREE_PHYSICALMEMORY \ + _IOW(VDI_MAGIC, 1, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_WAIT_INTERRUPT \ + _IOW(VDI_MAGIC, 2, struct vpudrv_intr_info_t) + +#define VDI_IOCTL_SET_CLOCK_GATE \ + _IOW(VDI_MAGIC, 3, u32) + +#define VDI_IOCTL_RESET \ + _IOW(VDI_MAGIC, 4, u32) + +#define VDI_IOCTL_GET_INSTANCE_POOL \ + _IOW(VDI_MAGIC, 5, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_GET_COMMON_MEMORY \ + _IOW(VDI_MAGIC, 6, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO \ + _IOW(VDI_MAGIC, 8, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_OPEN_INSTANCE \ + _IOW(VDI_MAGIC, 9, VPUDRV_INST_LEN) + +#define VDI_IOCTL_CLOSE_INSTANCE \ + _IOW(VDI_MAGIC, 10, VPUDRV_INST_LEN) + +#define VDI_IOCTL_GET_INSTANCE_NUM \ + _IOW(VDI_MAGIC, 11, VPUDRV_INST_LEN) + +#define VDI_IOCTL_GET_REGISTER_INFO \ + _IOW(VDI_MAGIC, 12, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_GET_FREE_MEM_SIZE \ + _IOW(VDI_MAGIC, 13, u32) + +#define VDI_IOCTL_FLUSH_BUFFER \ + _IOW(VDI_MAGIC, 14, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_CACHE_INV_BUFFER \ + _IOW(VDI_MAGIC, 15, VPUDRV_BUF_LEN) + +#define VDI_IOCTL_CONFIG_DMA \ + _IOW(VDI_MAGIC, 16, VPUDRV_DMABUF_LEN) + +#define VDI_IOCTL_UNMAP_DMA \ + _IOW(VDI_MAGIC, 17, VPUDRV_DMABUF_LEN) + + +//hoan add for canvas +#define VDI_IOCTL_READ_CANVAS \ + _IOW(VDI_MAGIC, 20, VPUDRV_DMABUF_CANVAS_LEN) +//end + +#ifdef CONFIG_COMPAT +#define VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32 \ + _IOW(VDI_MAGIC, 0, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_FREE_PHYSICALMEMORY32 \ + _IOW(VDI_MAGIC, 1, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_GET_INSTANCE_POOL32 \ + _IOW(VDI_MAGIC, 5, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_GET_COMMON_MEMORY32 \ + _IOW(VDI_MAGIC, 6, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32 \ + _IOW(VDI_MAGIC, 8, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_GET_REGISTER_INFO32 \ + _IOW(VDI_MAGIC, 12, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_FLUSH_BUFFER32 \ + _IOW(VDI_MAGIC, 14, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_CACHE_INV_BUFFER32 \ + _IOW(VDI_MAGIC, 15, VPUDRV_BUF_LEN32) + +#define VDI_IOCTL_CONFIG_DMA32 \ + _IOW(VDI_MAGIC, 16, VPUDRV_DMABUF_LEN32) + +#define VDI_IOCTL_UNMAP_DMA32 \ + _IOW(VDI_MAGIC, 17, VPUDRV_DMABUF_LEN32) + +//hoan add for canvas +#define VDI_IOCTL_READ_CANVAS32 \ + _IOW(VDI_MAGIC, 20, VPUDRV_DMABUF_CANVAS_LEN32) +//end + + + +#endif +/* implement to power management functions */ +#define BIT_BASE 0x0000 +#define BIT_CODE_RUN (BIT_BASE + 0x000) +#define BIT_CODE_DOWN (BIT_BASE + 0x004) +#define BIT_INT_CLEAR (BIT_BASE + 0x00C) +#define BIT_INT_STS (BIT_BASE + 0x010) +#define BIT_CODE_RESET (BIT_BASE + 0x014) +#define BIT_INT_REASON (BIT_BASE + 0x174) +#define BIT_BUSY_FLAG (BIT_BASE + 0x160) +#define BIT_RUN_COMMAND (BIT_BASE + 0x164) +#define BIT_RUN_INDEX (BIT_BASE + 0x168) +#define BIT_RUN_COD_STD (BIT_BASE + 0x16C) + + +#define VPU_REG_BASE_ADDR 0xFE070000 +#define VPU_REG_SIZE (0x4000*MAX_NUM_VPU_CORE) + +/* registers */ +#define VP5_REG_BASE 0x0000 +#define VP5_VPU_BUSY_STATUS (VP5_REG_BASE + 0x0070) +#define VP5_VPU_INT_REASON_CLEAR (VP5_REG_BASE + 0x0034) +#define VP5_VPU_VINT_CLEAR (VP5_REG_BASE + 0x003C) +#define VP5_VPU_VPU_INT_STS (VP5_REG_BASE + 0x0044) +#define VP5_VPU_INT_REASON (VP5_REG_BASE + 0x004c) +#define VP5_RET_FAIL_REASON (VP5_REG_BASE + 0x010C) + +#define VP5_RET_BS_EMPTY_INST (VP5_REG_BASE + 0x01E4) +#define VP5_RET_QUEUE_CMD_DONE_INST (VP5_REG_BASE + 0x01E8) +#define VP5_RET_SEQ_DONE_INSTANCE_INFO (VP5_REG_BASE + 0x01FC) + +/* interrrupt bits */ +enum { + INT_INIT_VPU = 0, + INT_WAKEUP_VPU = 1, + INT_SLEEP_VPU = 2, + INT_CREATE_INSTANCE = 3, + INT_FLUSH_INSTANCE = 4, + INT_DESTORY_INSTANCE = 5, + INT_INIT_SEQ = 6, + INT_SET_FRAMEBUF = 7, + INT_DEC_PIC = 8, + INT_ENC_PIC = 8, + INT_ENC_SET_PARAM = 9, +#ifdef SUPPORT_SOURCE_RELEASE_INTERRUPT + INT_ENC_SRC_RELEASE = 10, +#endif + INT_ENC_LOW_LATENCY = 13, + INT_DEC_QUERY = 14, + INT_BSBUF_EMPTY = 15, + INT_BSBUF_FULL = 15, +}; + +/* INIT, WAKEUP */ +#define VP5_PO_CONF (VP5_REG_BASE + 0x0000) +#define VP5_VPU_VINT_ENABLE (VP5_REG_BASE + 0x0048) + +#define VP5_VPU_RESET_REQ (VP5_REG_BASE + 0x0050) +#define VP5_VPU_RESET_STATUS (VP5_REG_BASE + 0x0054) + +#define VP5_VPU_REMAP_CTRL (VP5_REG_BASE + 0x0060) +#define VP5_VPU_REMAP_VADDR (VP5_REG_BASE + 0x0064) +#define VP5_VPU_REMAP_PADDR (VP5_REG_BASE + 0x0068) +#define VP5_VPU_REMAP_CORE_START (VP5_REG_BASE + 0x006C) + +#define VP5_REMAP_CODE_INDEX 0 + +/*VPU registers */ +#define VP5_ADDR_CODE_BASE (VP5_REG_BASE + 0x0110) +#define VP5_CODE_SIZE (VP5_REG_BASE + 0x0114) +#define VP5_CODE_PARAM (VP5_REG_BASE + 0x0118) +#define VP5_INIT_VPU_TIME_OUT_CNT (VP5_REG_BASE + 0x0130) +#define VP5_HW_OPTION (VP5_REG_BASE + 0x012C) +#define VP5_RET_SUCCESS (VP5_REG_BASE + 0x0108) +#define VP5_COMMAND (VP5_REG_BASE + 0x0100) +#define VP5_VPU_HOST_INT_REQ (VP5_REG_BASE + 0x0038) +/* Product register */ +#define VPU_PRODUCT_CODE_REGISTER (BIT_BASE + 0x1044) + +#define ReadVpuRegister(addr) \ + readl((void __iomem *)(s_vpu_register.virt_addr \ + + s_bit_firmware_info[core].reg_base_offset + addr)) + +#define WriteVpuRegister(addr, val) \ + writel((u32)val, (void __iomem *)(s_vpu_register.virt_addr \ + + s_bit_firmware_info[core].reg_base_offset + addr)) + +#define WriteVpu(addr, val) writel((u32)val, (void __iomem *)addr) +#endif
diff --git a/drivers/framerate_adapter/Makefile b/drivers/framerate_adapter/Makefile new file mode 100644 index 0000000..fc1998b --- /dev/null +++ b/drivers/framerate_adapter/Makefile
@@ -0,0 +1 @@ +obj-m += video_framerate_adapter.o
diff --git a/drivers/framerate_adapter/video_framerate_adapter.c b/drivers/framerate_adapter/video_framerate_adapter.c new file mode 100644 index 0000000..e59bd65 --- /dev/null +++ b/drivers/framerate_adapter/video_framerate_adapter.c
@@ -0,0 +1,133 @@ +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <uapi/linux/major.h> +#include "video_framerate_adapter.h" + +#define CLASS_NAME "framerate_adapter" +#define DEV_NAME "framerate_dev" + +#ifndef VIDEOFRAME_MAJOR +#define VIDEOFRAME_MAJOR AMSTREAM_MAJOR +#endif + +struct frame_rate_dev_s* frame_rate_dev; + + void vframe_rate_uevent(int duration) +{ + char *configured[2]; + char framerate[40] = {0}; + + sprintf(framerate, "FRAME_RATE_HINT=%lu", + (unsigned long)duration); + configured[0] = framerate; + configured[1] = NULL; + kobject_uevent_env(&frame_rate_dev->dev->kobj, + KOBJ_CHANGE, configured); + + pr_info("%s: sent uevent %s\n", __func__, configured[0]); +} + +EXPORT_SYMBOL(vframe_rate_uevent); + +static const struct file_operations frame_rate_fops = { + .owner = THIS_MODULE +}; + +static struct attribute *frame_rate_class_attrs[] = { + NULL +}; + +ATTRIBUTE_GROUPS(frame_rate_class); + +static struct class frame_rate_class = { + .name = CLASS_NAME, + .class_groups = frame_rate_class_groups, +}; + +static int frame_rate_driver_init(void) +{ + int ret = -1; + + frame_rate_dev = kzalloc(sizeof(struct frame_rate_dev_s), GFP_KERNEL); + if (IS_ERR_OR_NULL(frame_rate_dev)) + return -ENOMEM; + + frame_rate_dev->dev_no = MKDEV(VIDEOFRAME_MAJOR, 101); + + ret = register_chrdev_region(frame_rate_dev->dev_no, 1, DEV_NAME); + if (ret < 0) { + pr_err("Can't get major number %d.\n", VIDEOFRAME_MAJOR); + goto err_4; + } + + cdev_init(&frame_rate_dev->cdev, &frame_rate_fops); + frame_rate_dev->cdev.owner = THIS_MODULE; + + ret = cdev_add(&frame_rate_dev->cdev, frame_rate_dev->dev_no, 1); + if (ret) { + pr_err("Error %d adding cdev fail.\n", ret); + goto err_3; + } + + ret = class_register(&frame_rate_class); + if (ret < 0) { + pr_err("Failed in creating class.\n"); + goto err_2; + } + + frame_rate_dev->dev = device_create(&frame_rate_class, NULL, + frame_rate_dev->dev_no, NULL, DEV_NAME); + if (IS_ERR_OR_NULL(frame_rate_dev->dev)) { + pr_err("Create device failed.\n"); + ret = -ENODEV; + goto err_1; + } + pr_info("Registered frame rate driver success.\n"); + return 0; + +err_1: + device_destroy(&frame_rate_class, frame_rate_dev->dev_no); +err_2: + class_unregister(&frame_rate_class); +err_3: + cdev_del(&frame_rate_dev->cdev); +err_4: + unregister_chrdev_region(frame_rate_dev->dev_no, 1); + kfree(frame_rate_dev); + return ret; +} + +static void frame_rate_driver_exit(void) +{ + device_destroy(&frame_rate_class, frame_rate_dev->dev_no); + class_unregister(&frame_rate_class); + cdev_del(&frame_rate_dev->cdev); + unregister_chrdev_region(frame_rate_dev->dev_no, 1); + kfree(frame_rate_dev); +} + +static int __init frame_rate_module_init(void) +{ + int ret = -1; + + ret = frame_rate_driver_init(); + if (ret) { + pr_info("Error %d frame_rate_module_init init fail.\n", ret); + } + return ret; +} + +static void __exit frame_rate_module_exit(void) +{ + frame_rate_driver_exit(); + pr_info("frame_rate_module_exit\n"); +} + +module_init(frame_rate_module_init); +module_exit(frame_rate_module_exit); + +MODULE_AUTHOR("<shilong.yang@amlogic.com>"); +MODULE_DESCRIPTION("framerate adapter"); +MODULE_LICENSE("GPL");
diff --git a/drivers/framerate_adapter/video_framerate_adapter.h b/drivers/framerate_adapter/video_framerate_adapter.h new file mode 100644 index 0000000..1748843 --- /dev/null +++ b/drivers/framerate_adapter/video_framerate_adapter.h
@@ -0,0 +1,34 @@ +/* + * drivers/framerate_adapter/video_framerate_adaper.h + * + * Copyright (C) 2020 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#ifndef VIDEOFRAMERATEADAPTER_H +#define VIDEOFRAMERATEADAPTER_H + +#include <linux/types.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/cdev.h> + + +struct frame_rate_dev_s { + struct cdev cdev; + struct device *dev; + dev_t dev_no; +}; + +#endif +
diff --git a/drivers/include/dummy-for-git-empty-dir b/drivers/include/dummy-for-git-empty-dir new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/drivers/include/dummy-for-git-empty-dir
diff --git a/drivers/media_sync/Makefile b/drivers/media_sync/Makefile new file mode 100644 index 0000000..7ed5b83 --- /dev/null +++ b/drivers/media_sync/Makefile
@@ -0,0 +1,4 @@ +obj-m += media_sync.o + +media_sync-objs += media_sync_dev.o +media_sync-objs += media_sync_core.o
diff --git a/drivers/media_sync/media_sync_core.c b/drivers/media_sync/media_sync_core.c new file mode 100644 index 0000000..abc016f --- /dev/null +++ b/drivers/media_sync/media_sync_core.c
@@ -0,0 +1,1256 @@ +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/amlogic/cpu_version.h> +#include <linux/syscalls.h> +#include <linux/times.h> +#include <linux/time.h> +#include <linux/time64.h> +#include "media_sync_core.h" + +#define MAX_INSTANCE_NUM 10 +mediasync_ins* vMediaSyncInsList[MAX_INSTANCE_NUM] = {0}; +u64 last_system; +u64 last_pcr; +typedef int (*pfun_amldemux_pcrscr_get)(int demux_device_index, int index, + u64 *stc); +static pfun_amldemux_pcrscr_get amldemux_pcrscr_get = NULL; +//extern int demux_get_stc(int demux_device_index, int index, +// u64 *stc, unsigned int *base); +extern int demux_get_pcr(int demux_device_index, int index, u64 *pcr); + +static u64 get_llabs(s64 value){ + u64 llvalue; + if (value > 0) { + return value; + } else { + llvalue = (u64)(0-value); + return llvalue; + } +} + +static u64 get_stc_time_us(s32 sSyncInsId) +{ + /*mediasync_ins* pInstance = NULL; + u64 stc; + unsigned int base; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + demux_get_stc(pInstance->mDemuxId, 0, &stc, &base);*/ + mediasync_ins* pInstance = NULL; + int ret = -1; + u64 stc; + u64 timeus; + u64 pcr; + s64 pcr_diff; + s64 time_diff; + s32 index = sSyncInsId; + struct timespec64 ts_monotonic; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + pInstance = vMediaSyncInsList[index]; + if (pInstance->mSyncMode != MEDIA_SYNC_PCRMASTER) + return 0; + if (!amldemux_pcrscr_get) + amldemux_pcrscr_get = symbol_request(demux_get_pcr); + if (!amldemux_pcrscr_get) + return 0; + ktime_get_ts64(&ts_monotonic); + timeus = ts_monotonic.tv_sec * 1000000LL + div_u64(ts_monotonic.tv_nsec , 1000); + if (pInstance->mDemuxId < 0) + return timeus; + + + ret = amldemux_pcrscr_get(pInstance->mDemuxId, 0, &pcr); + + if (ret != 0) { + stc = timeus; + } else { + if (last_pcr == 0) { + stc = timeus; + last_pcr = div_u64(pcr * 100 , 9); + last_system = timeus; + } else { + pcr_diff = div_u64(pcr * 100 , 9) - last_pcr; + time_diff = timeus - last_system; + if (time_diff && (div_u64(get_llabs(pcr_diff) , (s32)time_diff) + > 100)) { + last_pcr = div_u64(pcr * 100 , 9); + last_system = timeus; + stc = timeus; + } else { + if (time_diff) + stc = last_system + pcr_diff; + else + stc = timeus; + + last_pcr = div_u64(pcr * 100 , 9); + last_system = stc; + } + } + } + pr_debug("get_stc_time_us stc:%lld pcr:%lld system_time:%lld\n", stc, div_u64(pcr * 100 , 9), timeus); + return stc; +} + +static s64 get_system_time_us(void) { + s64 TimeUs; + struct timespec64 ts_monotonic; + ktime_get_ts64(&ts_monotonic); + TimeUs = ts_monotonic.tv_sec * 1000000LL + div_u64(ts_monotonic.tv_nsec , 1000); + pr_debug("get_system_time_us %lld\n", TimeUs); + return TimeUs; +} + +long mediasync_ins_alloc(s32 sDemuxId, + s32 sPcrPid, + s32 *sSyncInsId, + mediasync_ins **pIns){ + s32 index = 0; + mediasync_ins* pInstance = NULL; + pInstance = kzalloc(sizeof(mediasync_ins), GFP_KERNEL); + if (pInstance == NULL) { + return -1; + } + + for (index = 0; index < MAX_INSTANCE_NUM - 1; index++) { + if (vMediaSyncInsList[index] == NULL) { + vMediaSyncInsList[index] = pInstance; + pInstance->mSyncInsId = index; + *sSyncInsId = index; + pr_info("mediasync_ins_alloc index:%d\n", index); + break; + } + } + + if (index == MAX_INSTANCE_NUM) { + kzfree(pInstance); + return -1; + } + + pInstance->mDemuxId = sDemuxId; + pInstance->mPcrPid = sPcrPid; + mediasync_ins_init_syncinfo(pInstance->mSyncInsId); + pInstance->mHasAudio = -1; + pInstance->mHasVideo = -1; + pInstance->mVideoWorkMode = 0; + pInstance->mFccEnable = 0; + pInstance->mSourceClockType = UNKNOWN_CLOCK; + pInstance->mSyncInfo.state = MEDIASYNC_INIT; + pInstance->mSourceClockState = CLOCK_PROVIDER_NORMAL; + pInstance->mute_flag = false; + pInstance->mSourceType = TS_DEMOD; + pInstance->mUpdateTimeThreshold = MIN_UPDATETIME_THRESHOLD_US; + *pIns = pInstance; + return 0; +} + + +long mediasync_ins_delete(s32 sSyncInsId) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + kzfree(pInstance); + vMediaSyncInsList[index] = NULL; + return 0; +} + +long mediasync_ins_binder(s32 sSyncInsId, + mediasync_ins **pIns) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mRef++; + *pIns = pInstance; + return 0; +} + +long mediasync_ins_unbinder(s32 sSyncInsId) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mRef--; + + if (pInstance->mRef <= 0) + mediasync_ins_delete(sSyncInsId); + + return 0; +} + +long mediasync_ins_init_syncinfo(s32 sSyncInsId) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSyncInfo.state = MEDIASYNC_INIT; + pInstance->mSyncInfo.firstAframeInfo.framePts = -1; + pInstance->mSyncInfo.firstAframeInfo.frameSystemTime = -1; + pInstance->mSyncInfo.firstVframeInfo.framePts = -1; + pInstance->mSyncInfo.firstVframeInfo.frameSystemTime = -1; + pInstance->mSyncInfo.firstDmxPcrInfo.framePts = -1; + pInstance->mSyncInfo.firstDmxPcrInfo.frameSystemTime = -1; + pInstance->mSyncInfo.refClockInfo.framePts = -1; + pInstance->mSyncInfo.refClockInfo.frameSystemTime = -1; + pInstance->mSyncInfo.curAudioInfo.framePts = -1; + pInstance->mSyncInfo.curAudioInfo.frameSystemTime = -1; + pInstance->mSyncInfo.curVideoInfo.framePts = -1; + pInstance->mSyncInfo.curVideoInfo.frameSystemTime = -1; + pInstance->mSyncInfo.curDmxPcrInfo.framePts = -1; + pInstance->mSyncInfo.curDmxPcrInfo.frameSystemTime = -1; + pInstance->mAudioInfo.cacheSize = -1; + pInstance->mAudioInfo.cacheDuration = -1; + pInstance->mVideoInfo.cacheSize = -1; + pInstance->mVideoInfo.cacheDuration = -1; + pInstance->mPauseResumeFlag = 0; + + return 0; +} + +long mediasync_ins_update_mediatime(s32 sSyncInsId, + s64 lMediaTime, + s64 lSystemTime, bool forceUpdate) { + mediasync_ins* pInstance = NULL; + u64 current_stc = 0; + s64 current_systemtime = 0; + s64 diff_system_time = 0; + s64 diff_mediatime = 0; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + current_stc = get_stc_time_us(sSyncInsId); + current_systemtime = get_system_time_us(); +#if 0 + pInstance->mSyncMode = MEDIA_SYNC_PCRMASTER; +#endif + if (pInstance->mSyncMode == MEDIA_SYNC_PCRMASTER) { + if (lSystemTime == 0) { + if (current_stc != 0) { + diff_system_time = current_stc - pInstance->mLastStc; + diff_mediatime = lMediaTime - pInstance->mLastMediaTime; + } else { + diff_system_time = current_systemtime - pInstance->mLastRealTime; + diff_mediatime = lMediaTime - pInstance->mLastMediaTime; + } + if (pInstance->mSyncModeChange == 1 + || diff_mediatime < 0 + || ((diff_mediatime > 0) + && (get_llabs(diff_system_time - diff_mediatime) > pInstance->mUpdateTimeThreshold))) { + pr_info("MEDIA_SYNC_PCRMASTER update time\n"); + pInstance->mLastMediaTime = lMediaTime; + pInstance->mLastRealTime = current_systemtime; + pInstance->mLastStc = current_stc; + pInstance->mSyncModeChange = 0; + } + } else { + if (current_stc != 0) { + diff_system_time = lSystemTime - pInstance->mLastRealTime; + diff_mediatime = lMediaTime - pInstance->mLastMediaTime; + } else { + diff_system_time = lSystemTime - pInstance->mLastRealTime; + diff_mediatime = lMediaTime - pInstance->mLastMediaTime; + } + + if (pInstance->mSyncModeChange == 1 + || diff_mediatime < 0 + || ((diff_mediatime > 0) + && (get_llabs(diff_system_time - diff_mediatime) > pInstance->mUpdateTimeThreshold))) { + pInstance->mLastMediaTime = lMediaTime; + pInstance->mLastRealTime = lSystemTime; + pInstance->mLastStc = current_stc + lSystemTime - current_systemtime; + pInstance->mSyncModeChange = 0; + } + } + } else { + if (lSystemTime == 0) { + diff_system_time = current_systemtime - pInstance->mLastRealTime; + diff_mediatime = lMediaTime - pInstance->mLastMediaTime; + + if (pInstance->mSyncModeChange == 1 + || forceUpdate + || diff_mediatime < 0 + || ((diff_mediatime > 0) + && (get_llabs(diff_system_time - diff_mediatime) > pInstance->mUpdateTimeThreshold))) { + pr_info("mSyncMode:%d update time system diff:%lld media diff:%lld current:%lld\n", + pInstance->mSyncMode, + diff_system_time, + diff_mediatime, + current_systemtime); + pInstance->mLastMediaTime = lMediaTime; + pInstance->mLastRealTime = current_systemtime; + pInstance->mLastStc = current_stc; + pInstance->mSyncModeChange = 0; + } + } else { + diff_system_time = lSystemTime - pInstance->mLastRealTime; + diff_mediatime = lMediaTime - pInstance->mLastMediaTime; + if (pInstance->mSyncModeChange == 1 + || forceUpdate + || diff_mediatime < 0 + || ((diff_mediatime > 0) + && (get_llabs(diff_system_time - diff_mediatime) > pInstance->mUpdateTimeThreshold))) { + pr_info("mSyncMode:%d update time stc diff:%lld media diff:%lld lSystemTime:%lld lMediaTime:%lld\n", + pInstance->mSyncMode, + diff_system_time, + diff_mediatime, + lSystemTime, + lMediaTime); + pInstance->mLastMediaTime = lMediaTime; + pInstance->mLastRealTime = lSystemTime; + pInstance->mLastStc = current_stc + lSystemTime - current_systemtime; + pInstance->mSyncModeChange = 0; + } + } + } + pInstance->mTrackMediaTime = lMediaTime; + return 0; +} + +long mediasync_ins_set_mediatime_speed(s32 sSyncInsId, + mediasync_speed fSpeed) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSpeed.mNumerator = fSpeed.mNumerator; + pInstance->mSpeed.mDenominator = fSpeed.mDenominator; + return 0; +} + +long mediasync_ins_set_paused(s32 sSyncInsId, s32 sPaused) { + + mediasync_ins* pInstance = NULL; + u64 current_stc = 0; + s64 current_systemtime = 0; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + if (sPaused != 0 && sPaused != 1) + return -1; + if (sPaused == pInstance->mPaused) + return 0; + + current_stc = get_stc_time_us(sSyncInsId); + current_systemtime = get_system_time_us(); + + pInstance->mPaused = sPaused; + + if (pInstance->mSyncMode == MEDIA_SYNC_AMASTER) + pInstance->mLastMediaTime = pInstance->mLastMediaTime + + (current_systemtime - pInstance->mLastRealTime); + + pInstance->mLastRealTime = current_systemtime; + pInstance->mLastStc = current_stc; + + return 0; +} + +long mediasync_ins_get_paused(s32 sSyncInsId, s32* spPaused) { + + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + *spPaused = pInstance->mPaused ; + return 0; +} + +long mediasync_ins_set_syncmode(s32 sSyncInsId, s32 sSyncMode){ + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSyncMode = sSyncMode; + pInstance->mSyncModeChange = 1; + return 0; +} + +long mediasync_ins_get_syncmode(s32 sSyncInsId, s32 *sSyncMode) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + *sSyncMode = pInstance->mSyncMode; + return 0; +} + +long mediasync_ins_get_mediatime_speed(s32 sSyncInsId, mediasync_speed *fpSpeed) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + fpSpeed->mNumerator = pInstance->mSpeed.mNumerator; + fpSpeed->mDenominator = pInstance->mSpeed.mDenominator; + return 0; +} + +long mediasync_ins_get_anchor_time(s32 sSyncInsId, + s64* lpMediaTime, + s64* lpSTCTime, + s64* lpSystemTime) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *lpMediaTime = pInstance->mLastMediaTime; + *lpSTCTime = pInstance->mLastStc; + *lpSystemTime = pInstance->mLastRealTime; + return 0; +} + +long mediasync_ins_get_systemtime(s32 sSyncInsId, s64* lpSTC, s64* lpSystemTime){ + mediasync_ins* pInstance = NULL; + u64 current_stc = 0; + s64 current_systemtime = 0; + s32 index = sSyncInsId; + + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + current_stc = get_stc_time_us(sSyncInsId); + current_systemtime = get_system_time_us(); + + *lpSTC = current_stc; + *lpSystemTime = current_systemtime; + + return 0; +} + +long mediasync_ins_get_nextvsync_systemtime(s32 sSyncInsId, s64* lpSystemTime) { + + return 0; +} + +long mediasync_ins_set_updatetime_threshold(s32 sSyncInsId, s64 lTimeThreshold) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + pInstance->mUpdateTimeThreshold = lTimeThreshold; + return 0; +} + +long mediasync_ins_get_updatetime_threshold(s32 sSyncInsId, s64* lpTimeThreshold) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + *lpTimeThreshold = pInstance->mUpdateTimeThreshold; + return 0; +} + +long mediasync_ins_get_trackmediatime(s32 sSyncInsId, s64* lpTrackMediaTime) { + + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + *lpTrackMediaTime = pInstance->mTrackMediaTime; + return 0; +} +long mediasync_ins_set_clocktype(s32 sSyncInsId, mediasync_clocktype type) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSourceClockType = type; + return 0; +} + +long mediasync_ins_get_clocktype(s32 sSyncInsId, mediasync_clocktype* type) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *type = pInstance->mSourceClockType; + return 0; +} + +long mediasync_ins_set_clockstate(s32 sSyncInsId, mediasync_clockprovider_state state) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSourceClockState = state; + return 0; +} + +long mediasync_ins_get_clockstate(s32 sSyncInsId, mediasync_clockprovider_state* state) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *state = pInstance->mSourceClockState; + return 0; +} + +long mediasync_ins_set_hasaudio(s32 sSyncInsId, int hasaudio) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mHasAudio = hasaudio; + return 0; +} + +long mediasync_ins_get_hasaudio(s32 sSyncInsId, int* hasaudio) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *hasaudio = pInstance->mHasAudio; + return 0; +} +long mediasync_ins_set_hasvideo(s32 sSyncInsId, int hasvideo) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mHasVideo = hasvideo; + return 0; +} + +long mediasync_ins_get_hasvideo(s32 sSyncInsId, int* hasvideo) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *hasvideo = pInstance->mHasVideo; + return 0; +} + +long mediasync_ins_set_firstaudioframeinfo(s32 sSyncInsId, mediasync_frameinfo info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSyncInfo.firstAframeInfo.framePts = info.framePts; + pInstance->mSyncInfo.firstAframeInfo.frameSystemTime = info.frameSystemTime; + return 0; +} + +long mediasync_ins_get_firstaudioframeinfo(s32 sSyncInsId, mediasync_frameinfo* info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + info->framePts = pInstance->mSyncInfo.firstAframeInfo.framePts; + info->frameSystemTime = pInstance->mSyncInfo.firstAframeInfo.frameSystemTime; + return 0; +} + +long mediasync_ins_set_firstvideoframeinfo(s32 sSyncInsId, mediasync_frameinfo info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSyncInfo.firstVframeInfo.framePts = info.framePts; + pInstance->mSyncInfo.firstVframeInfo.frameSystemTime = info.frameSystemTime; + return 0; +} + +long mediasync_ins_get_firstvideoframeinfo(s32 sSyncInsId, mediasync_frameinfo* info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + info->framePts = pInstance->mSyncInfo.firstVframeInfo.framePts; + info->frameSystemTime = pInstance->mSyncInfo.firstVframeInfo.frameSystemTime; + return 0; +} + +long mediasync_ins_set_firstdmxpcrinfo(s32 sSyncInsId, mediasync_frameinfo info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSyncInfo.firstDmxPcrInfo.framePts = info.framePts; + pInstance->mSyncInfo.firstDmxPcrInfo.frameSystemTime = info.frameSystemTime; + return 0; +} + +long mediasync_ins_get_firstdmxpcrinfo(s32 sSyncInsId, mediasync_frameinfo* info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + int64_t pcr = -1; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + if (pInstance->mSyncInfo.firstDmxPcrInfo.framePts == -1) { + if (amldemux_pcrscr_get) { + amldemux_pcrscr_get(pInstance->mDemuxId, 0, &pcr); + pInstance->mSyncInfo.firstDmxPcrInfo.framePts = pcr; + pInstance->mSyncInfo.firstDmxPcrInfo.frameSystemTime = get_system_time_us(); + } + } + + info->framePts = pInstance->mSyncInfo.firstDmxPcrInfo.framePts; + info->frameSystemTime = pInstance->mSyncInfo.firstDmxPcrInfo.frameSystemTime; + return 0; +} + +long mediasync_ins_set_refclockinfo(s32 sSyncInsId, mediasync_frameinfo info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSyncInfo.refClockInfo.framePts = info.framePts; + pInstance->mSyncInfo.refClockInfo.frameSystemTime = info.frameSystemTime; + return 0; +} + +long mediasync_ins_get_refclockinfo(s32 sSyncInsId, mediasync_frameinfo* info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + info->framePts = pInstance->mSyncInfo.refClockInfo.framePts; + info->frameSystemTime = pInstance->mSyncInfo.refClockInfo.frameSystemTime; + return 0; +} + +long mediasync_ins_set_curaudioframeinfo(s32 sSyncInsId, mediasync_frameinfo info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSyncInfo.curAudioInfo.framePts = info.framePts; + pInstance->mSyncInfo.curAudioInfo.frameSystemTime = info.frameSystemTime; + return 0; +} + +long mediasync_ins_get_curaudioframeinfo(s32 sSyncInsId, mediasync_frameinfo* info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + info->framePts = pInstance->mSyncInfo.curAudioInfo.framePts; + info->frameSystemTime = pInstance->mSyncInfo.curAudioInfo.frameSystemTime; + return 0; +} + +long mediasync_ins_set_curvideoframeinfo(s32 sSyncInsId, mediasync_frameinfo info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSyncInfo.curVideoInfo.framePts = info.framePts; + pInstance->mSyncInfo.curVideoInfo.frameSystemTime = info.frameSystemTime; + pInstance->mTrackMediaTime = div_u64(info.framePts * 100 , 9); + + return 0; +} + +long mediasync_ins_get_curvideoframeinfo(s32 sSyncInsId, mediasync_frameinfo* info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + info->framePts = pInstance->mSyncInfo.curVideoInfo.framePts; + info->frameSystemTime = pInstance->mSyncInfo.curVideoInfo.frameSystemTime; + return 0; +} + +long mediasync_ins_set_curdmxpcrinfo(s32 sSyncInsId, mediasync_frameinfo info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSyncInfo.curDmxPcrInfo.framePts = info.framePts; + pInstance->mSyncInfo.curDmxPcrInfo.frameSystemTime = info.frameSystemTime; + return 0; +} + +long mediasync_ins_get_curdmxpcrinfo(s32 sSyncInsId, mediasync_frameinfo* info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + int64_t pcr = -1; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + if (amldemux_pcrscr_get) { + amldemux_pcrscr_get(pInstance->mDemuxId, 0, &pcr); + pInstance->mSyncInfo.curDmxPcrInfo.framePts = pcr; + pInstance->mSyncInfo.curDmxPcrInfo.frameSystemTime = get_system_time_us(); + } + + info->framePts = pInstance->mSyncInfo.curDmxPcrInfo.framePts; + info->frameSystemTime = pInstance->mSyncInfo.curDmxPcrInfo.frameSystemTime; + return 0; +} + +long mediasync_ins_set_audiomute(s32 sSyncInsId, int mute_flag) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mute_flag = mute_flag; + + return 0; +} + +long mediasync_ins_get_audiomute(s32 sSyncInsId, int* mute_flag) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *mute_flag = pInstance->mute_flag; + return 0; +} + +long mediasync_ins_set_audioinfo(s32 sSyncInsId, mediasync_audioinfo info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mAudioInfo.cacheDuration = info.cacheDuration; + pInstance->mAudioInfo.cacheSize = info.cacheSize; + return 0; +} + +long mediasync_ins_get_audioinfo(s32 sSyncInsId, mediasync_audioinfo* info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + info->cacheDuration = pInstance->mAudioInfo.cacheDuration; + info->cacheDuration = pInstance->mAudioInfo.cacheDuration; + return 0; +} + +long mediasync_ins_set_videoinfo(s32 sSyncInsId, mediasync_videoinfo info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mVideoInfo.cacheDuration = info.cacheDuration; + pInstance->mVideoInfo.cacheSize = info.cacheSize; + pInstance->mVideoInfo.specialSizeCount = info.specialSizeCount; + return 0; + +} + +long mediasync_ins_get_videoinfo(s32 sSyncInsId, mediasync_videoinfo* info) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + info->cacheDuration = pInstance->mVideoInfo.cacheDuration; + info->cacheSize = pInstance->mVideoInfo.cacheSize; + info->specialSizeCount = pInstance->mVideoInfo.specialSizeCount; + return 0; +} + +long mediasync_ins_set_avsyncstate(s32 sSyncInsId, s32 state) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSyncInfo.state = state; + return 0; +} + +long mediasync_ins_get_avsyncstate(s32 sSyncInsId, s32* state) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *state = pInstance->mSyncInfo.state; + return 0; +} + +long mediasync_ins_set_startthreshold(s32 sSyncInsId, s32 threshold) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mStartThreshold = threshold; + return 0; +} + +long mediasync_ins_get_startthreshold(s32 sSyncInsId, s32* threshold) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *threshold = pInstance->mStartThreshold; + return 0; +} + +long mediasync_ins_set_ptsadjust(s32 sSyncInsId, s32 adujstpts) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mPtsAdjust = adujstpts; + return 0; +} + +long mediasync_ins_get_ptsadjust(s32 sSyncInsId, s32* adujstpts) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *adujstpts = pInstance->mPtsAdjust; + return 0; +} + +long mediasync_ins_set_videoworkmode(s32 sSyncInsId, s64 mode) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mVideoWorkMode = mode; + return 0; +} + +long mediasync_ins_get_videoworkmode(s32 sSyncInsId, s64* mode) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *mode = pInstance->mVideoWorkMode; + return 0; +} + +long mediasync_ins_set_fccenable(s32 sSyncInsId, s64 enable) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mFccEnable = enable; + return 0; +} + +long mediasync_ins_get_fccenable(s32 sSyncInsId, s64* enable) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *enable = pInstance->mFccEnable; + return 0; +} + + +long mediasync_ins_set_source_type(s32 sSyncInsId, aml_Source_Type sourceType) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mSourceType = sourceType; + return 0; +} + +long mediasync_ins_get_source_type(s32 sSyncInsId, aml_Source_Type* sourceType) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *sourceType = pInstance->mSourceType; + return 0; +} + +long mediasync_ins_set_start_media_time(s32 sSyncInsId, s64 startime) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + pInstance->mStartMediaTime = startime; + return 0; +} + +long mediasync_ins_get_start_media_time(s32 sSyncInsId, s64* starttime) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + *starttime = pInstance->mStartMediaTime; + return 0; +} + +long mediasync_ins_set_audioformat(s32 sSyncInsId, mediasync_audio_format format) { + + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + pInstance->mAudioFormat.channels = format.channels; + pInstance->mAudioFormat.datawidth = format.datawidth; + pInstance->mAudioFormat.format = format.format; + pInstance->mAudioFormat.samplerate = format.samplerate; + return 0; + +} + +long mediasync_ins_get_audioformat(s32 sSyncInsId, mediasync_audio_format* format) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + format->channels = pInstance->mAudioFormat.channels; + format->datawidth = pInstance->mAudioFormat.datawidth; + format->format = pInstance->mAudioFormat.format; + format->samplerate = pInstance->mAudioFormat.samplerate; + + return 0; +} + +long mediasync_ins_set_pauseresume(s32 sSyncInsId, int flag) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mPauseResumeFlag = flag; + + return 0; +} + +long mediasync_ins_get_pauseresume(s32 sSyncInsId, int* flag) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + *flag = pInstance->mPauseResumeFlag; + return 0; +} + +long mediasync_ins_set_pcrslope(s32 sSyncInsId, mediasync_speed pcrslope) { + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pInstance->mPcrSlope.mNumerator = pcrslope.mNumerator; + pInstance->mPcrSlope.mDenominator = pcrslope.mDenominator; + return 0; + +} + +long mediasync_ins_get_pcrslope(s32 sSyncInsId, mediasync_speed *pcrslope){ + mediasync_ins* pInstance = NULL; + s32 index = sSyncInsId; + if (index < 0 || index >= MAX_INSTANCE_NUM) + return -1; + + pInstance = vMediaSyncInsList[index]; + if (pInstance == NULL) + return -1; + + pcrslope->mNumerator = pInstance->mPcrSlope.mNumerator; + pcrslope->mDenominator = pInstance->mPcrSlope.mDenominator; + return 0; +} + +
diff --git a/drivers/media_sync/media_sync_core.h b/drivers/media_sync/media_sync_core.h new file mode 100644 index 0000000..4e05e89 --- /dev/null +++ b/drivers/media_sync/media_sync_core.h
@@ -0,0 +1,208 @@ +#ifndef MEDIA_SYNC_HEAD_HH +#define MEDIA_SYNC_HEAD_HH + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/amlogic/cpu_version.h> + + +#define MIN_UPDATETIME_THRESHOLD_US 50000 +typedef enum { + MEDIA_SYNC_VMASTER = 0, + MEDIA_SYNC_AMASTER = 1, + MEDIA_SYNC_PCRMASTER = 2, + MEDIA_SYNC_MODE_MAX = 255, +}sync_mode; + +typedef struct speed{ + u32 mNumerator; + u32 mDenominator; +}mediasync_speed; + +typedef enum { + MEDIASYNC_INIT = 0, + MEDIASYNC_AUDIO_ARRIVED, + MEDIASYNC_VIDEO_ARRIVED, + MEDIASYNC_AV_ARRIVED, + MEDIASYNC_AV_SYNCED, + MEDIASYNC_RUNNING, + MEDIASYNC_LOST_SYNC, + MEDIASYNC_EXIT, +} avsync_state; + +typedef enum { + UNKNOWN_CLOCK = 0, + AUDIO_CLOCK, + VIDEO_CLOCK, + PCR_CLOCK, + REF_CLOCK, +} mediasync_clocktype; + +typedef struct frameinfo{ + int64_t framePts; + int64_t frameSystemTime; +}mediasync_frameinfo; + +typedef struct syncinfo { + avsync_state state; + mediasync_frameinfo firstAframeInfo; + mediasync_frameinfo firstVframeInfo; + mediasync_frameinfo firstDmxPcrInfo; + mediasync_frameinfo refClockInfo; + mediasync_frameinfo curAudioInfo; + mediasync_frameinfo curVideoInfo; + mediasync_frameinfo curDmxPcrInfo; +}mediasync_syncinfo; + +typedef struct audioinfo{ + int cacheSize; + int cacheDuration; +}mediasync_audioinfo; + +typedef struct videoinfo{ + int cacheSize; + int specialSizeCount; + int cacheDuration; +}mediasync_videoinfo; + +typedef struct audioforamt{ + int samplerate; + int datawidth; + int channels; + int format; +}mediasync_audio_format; + +typedef enum +{ + TS_DEMOD = 0, // TS Data input from demod + TS_MEMORY = 1, // TS Data input from memory + ES_MEMORY = 2, // ES Data input from memory +} aml_Source_Type; + +typedef enum { + CLOCK_PROVIDER_NONE = 0, + CLOCK_PROVIDER_DISCONTINUE, + CLOCK_PROVIDER_NORMAL, + CLOCK_PROVIDER_LOST, + CLOCK_PROVIDER_RECOVERING, +} mediasync_clockprovider_state; + +typedef struct instance{ + s32 mSyncInsId; + s32 mDemuxId; + s32 mPcrPid; + s32 mPaused; + s32 mRef; + s32 mSyncMode; + s64 mLastStc; + s64 mLastRealTime; + s64 mLastMediaTime; + s64 mTrackMediaTime; + s64 mStartMediaTime; + mediasync_speed mSpeed; + mediasync_speed mPcrSlope; + s32 mSyncModeChange; + s64 mUpdateTimeThreshold; + + int mHasAudio; + int mHasVideo; + int mute_flag; + int mStartThreshold; + int mPtsAdjust; + int mVideoWorkMode; + int mFccEnable; + int mPauseResumeFlag; + mediasync_clocktype mSourceClockType; + mediasync_clockprovider_state mSourceClockState; + mediasync_audioinfo mAudioInfo; + mediasync_videoinfo mVideoInfo; + mediasync_syncinfo mSyncInfo; + aml_Source_Type mSourceType; + mediasync_audio_format mAudioFormat; +}mediasync_ins; + +long mediasync_ins_alloc(s32 sDemuxId, + s32 sPcrPid, + s32 *sSyncInsId, + mediasync_ins **pIns); + +long mediasync_ins_delete(s32 sSyncInsId); +long mediasync_ins_binder(s32 sSyncInsId, + mediasync_ins **pIns); +long mediasync_ins_unbinder(s32 sSyncInsId); +long mediasync_ins_update_mediatime(s32 sSyncInsId, + s64 lMediaTime, + s64 lSystemTime, bool forceUpdate); +long mediasync_ins_set_mediatime_speed(s32 sSyncInsId, mediasync_speed fSpeed); +long mediasync_ins_set_paused(s32 sSyncInsId, s32 sPaused); +long mediasync_ins_get_paused(s32 sSyncInsId, s32* spPaused); +long mediasync_ins_get_trackmediatime(s32 sSyncInsId, s64* lpTrackMediaTime); +long mediasync_ins_set_syncmode(s32 sSyncInsId, s32 sSyncMode); +long mediasync_ins_get_syncmode(s32 sSyncInsId, s32 *sSyncMode); +long mediasync_ins_get_mediatime_speed(s32 sSyncInsId, mediasync_speed *fpSpeed); +long mediasync_ins_get_anchor_time(s32 sSyncInsId, + s64* lpMediaTime, + s64* lpSTCTime, + s64* lpSystemTime); +long mediasync_ins_get_systemtime(s32 sSyncInsId, + s64* lpSTC, + s64* lpSystemTime); +long mediasync_ins_get_nextvsync_systemtime(s32 sSyncInsId, s64* lpSystemTime); +long mediasync_ins_set_updatetime_threshold(s32 sSyncInsId, s64 lTimeThreshold); +long mediasync_ins_get_updatetime_threshold(s32 sSyncInsId, s64* lpTimeThreshold); + +long mediasync_ins_init_syncinfo(s32 sSyncInsId); +long mediasync_ins_set_clocktype(s32 sSyncInsId, mediasync_clocktype type); +long mediasync_ins_get_clocktype(s32 sSyncInsId, mediasync_clocktype* type); +long mediasync_ins_set_avsyncstate(s32 sSyncInsId, s32 state); +long mediasync_ins_get_avsyncstate(s32 sSyncInsId, s32* state); +long mediasync_ins_set_hasaudio(s32 sSyncInsId, int hasaudio); +long mediasync_ins_get_hasaudio(s32 sSyncInsId, int* hasaudio); +long mediasync_ins_set_hasvideo(s32 sSyncInsId, int hasvideo); +long mediasync_ins_get_hasvideo(s32 sSyncInsId, int* hasvideo); +long mediasync_ins_set_audioinfo(s32 sSyncInsId, mediasync_audioinfo info); +long mediasync_ins_get_audioinfo(s32 sSyncInsId, mediasync_audioinfo* info); +long mediasync_ins_set_videoinfo(s32 sSyncInsId, mediasync_videoinfo info); +long mediasync_ins_set_audiomute(s32 sSyncInsId, int mute_flag); +long mediasync_ins_get_audiomute(s32 sSyncInsId, int* mute_flag); +long mediasync_ins_get_videoinfo(s32 sSyncInsId, mediasync_videoinfo* info); +long mediasync_ins_set_firstaudioframeinfo(s32 sSyncInsId, mediasync_frameinfo info); +long mediasync_ins_get_firstaudioframeinfo(s32 sSyncInsId, mediasync_frameinfo* info); +long mediasync_ins_set_firstvideoframeinfo(s32 sSyncInsId, mediasync_frameinfo info); +long mediasync_ins_get_firstvideoframeinfo(s32 sSyncInsId, mediasync_frameinfo* info); +long mediasync_ins_set_firstdmxpcrinfo(s32 sSyncInsId, mediasync_frameinfo info); +long mediasync_ins_get_firstdmxpcrinfo(s32 sSyncInsId, mediasync_frameinfo* info); +long mediasync_ins_set_refclockinfo(s32 sSyncInsId, mediasync_frameinfo info); +long mediasync_ins_get_refclockinfo(s32 sSyncInsId, mediasync_frameinfo* info); +long mediasync_ins_set_curaudioframeinfo(s32 sSyncInsId, mediasync_frameinfo info); +long mediasync_ins_get_curaudioframeinfo(s32 sSyncInsId, mediasync_frameinfo* info); +long mediasync_ins_set_curvideoframeinfo(s32 sSyncInsId, mediasync_frameinfo info); +long mediasync_ins_get_curvideoframeinfo(s32 sSyncInsId, mediasync_frameinfo* info); +long mediasync_ins_set_curdmxpcrinfo(s32 sSyncInsId, mediasync_frameinfo info); +long mediasync_ins_get_curdmxpcrinfo(s32 sSyncInsId, mediasync_frameinfo* info); +long mediasync_ins_set_clockstate(s32 sSyncInsId, mediasync_clockprovider_state state); +long mediasync_ins_get_clockstate(s32 sSyncInsId, mediasync_clockprovider_state* state); +long mediasync_ins_set_startthreshold(s32 sSyncInsId, s32 threshold); +long mediasync_ins_get_startthreshold(s32 sSyncInsId, s32* threshold); +long mediasync_ins_set_ptsadjust(s32 sSyncInsId, s32 adujstpts); +long mediasync_ins_get_ptsadjust(s32 sSyncInsId, s32* adujstpts); +long mediasync_ins_set_videoworkmode(s32 sSyncInsId, s64 mode); +long mediasync_ins_get_videoworkmode(s32 sSyncInsId, s64* mode); +long mediasync_ins_set_fccenable(s32 sSyncInsId, s64 enable); +long mediasync_ins_get_fccenable(s32 sSyncInsId, s64* enable); +long mediasync_ins_set_source_type(s32 sSyncInsId, aml_Source_Type sourceType); +long mediasync_ins_get_source_type(s32 sSyncInsId, aml_Source_Type* sourceType); +long mediasync_ins_set_start_media_time(s32 sSyncInsId, s64 startime); +long mediasync_ins_get_start_media_time(s32 sSyncInsId, s64* starttime); +long mediasync_ins_set_audioformat(s32 sSyncInsId, mediasync_audio_format format); +long mediasync_ins_get_audioformat(s32 sSyncInsId, mediasync_audio_format* format); +long mediasync_ins_set_pauseresume(s32 sSyncInsId, int flag); +long mediasync_ins_get_pauseresume(s32 sSyncInsId, int* flag); +long mediasync_ins_set_pcrslope(s32 sSyncInsId, mediasync_speed pcrslope); +long mediasync_ins_get_pcrslope(s32 sSyncInsId, mediasync_speed *pcrslope); + + + +#endif
diff --git a/drivers/media_sync/media_sync_debug.c b/drivers/media_sync/media_sync_debug.c new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/drivers/media_sync/media_sync_debug.c
diff --git a/drivers/media_sync/media_sync_dev.c b/drivers/media_sync/media_sync_dev.c new file mode 100644 index 0000000..0e0df27 --- /dev/null +++ b/drivers/media_sync/media_sync_dev.c
@@ -0,0 +1,1165 @@ +/* + * drivers/amlogic/media/frame_sync/tsync_pcr.c + * + * Copyright (C) 2017 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/vmalloc.h> +#include <linux/uaccess.h> +#include <linux/slab.h> + +#include <linux/platform_device.h> +#include <linux/amlogic/cpu_version.h> +#include <linux/amlogic/major.h> +#include "media_sync_core.h" +#include "media_sync_dev.h" + +#define MEDIASYNC_DEVICE_NAME "mediasync" +static struct device *mediasync_dev; + +typedef struct alloc_para { + s32 mDemuxId; + s32 mPcrPid; +} mediasync_alloc_para; + +typedef struct systime_para { + s64 mStcUs; + s64 mSystemTimeUs; +}mediasync_systime_para; + +typedef struct updatetime_para { + int64_t mMediaTimeUs; + int64_t mSystemTimeUs; + bool mForceUpdate; +}mediasync_updatetime_para; + +typedef struct arthortime_para { + int64_t mMediaTimeUs; + int64_t mSystemTimeUs; + int64_t mStcTimeUs; +}mediasync_arthortime_para; + +typedef struct priv_s { + s32 mSyncInsId; + mediasync_ins *mSyncIns; +}mediasync_priv_s; + +static int mediasync_open(struct inode *inode, struct file *file) +{ + mediasync_priv_s *priv = {0}; + priv = kzalloc(sizeof(mediasync_priv_s), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + priv->mSyncInsId = -1; + priv->mSyncIns = NULL; + file->private_data = priv; + return 0; +} + +static int mediasync_release(struct inode *inode, struct file *file) +{ + long ret = 0; + mediasync_priv_s *priv = (mediasync_priv_s *)file->private_data; + if (priv == NULL) { + return -ENOMEM; + } + + if (priv->mSyncInsId != -1) { + ret = mediasync_ins_unbinder(priv->mSyncInsId); + priv->mSyncInsId = -1; + priv->mSyncIns = NULL; + } + kfree(priv); + return 0; +} + +static long mediasync_ioctl(struct file *file, unsigned int cmd, ulong arg) +{ + long ret = 0; + mediasync_speed SyncSpeed = {0}; + mediasync_speed PcrSlope = {0}; + mediasync_frameinfo FrameInfo = {-1, -1}; + mediasync_audioinfo AudioInfo = {0, 0}; + mediasync_videoinfo VideoInfo = {0, 0}; + mediasync_audio_format AudioFormat; + mediasync_clocktype ClockType = UNKNOWN_CLOCK; + mediasync_clockprovider_state state; + s32 SyncInsId = -1; + s32 SyncPaused = 0; + s32 SyncMode = -1; + s32 SyncState = 0; + s64 NextVsyncSystemTime = 0; + s64 TrackMediaTime = 0; + int HasAudio = -1; + int HasVideo = -1; + s32 StartThreshold = 0; + s32 PtsAdjust = 0; + s64 VideoWorkMode = 0; + s64 FccEnable = 0; + int mute_flag = 0; + int PauseResumeFlag = 0; + mediasync_priv_s *priv = (mediasync_priv_s *)file->private_data; + mediasync_ins *SyncIns = NULL; + mediasync_alloc_para parm = {0}; + mediasync_arthortime_para ArthorTime = {0}; + mediasync_updatetime_para UpdateTime = {0}; + mediasync_systime_para SystemTime = {0}; + aml_Source_Type sourceType = TS_DEMOD; + s64 UpdateTimeThreshold = 0; + s64 StartMediaTime = -1; + + + switch (cmd) { + case MEDIASYNC_IOC_INSTANCE_ALLOC: + if (copy_from_user ((void *)&parm, + (void *)arg, + sizeof(parm))) + return -EFAULT; + if (mediasync_ins_alloc(parm.mDemuxId, + parm.mPcrPid, + &SyncInsId, + &SyncIns) < 0) { + return -EFAULT; + } + if (SyncIns == NULL) { + return -EFAULT; + } + if (priv != NULL) { + priv->mSyncInsId = SyncInsId; + priv->mSyncIns = SyncIns; + priv->mSyncIns->mRef++; + } + + break; + case MEDIASYNC_IOC_INSTANCE_GET: + if (priv->mSyncIns == NULL) { + return -EFAULT; + } + + SyncInsId = priv->mSyncInsId; + if (copy_to_user((void *)arg, + &SyncInsId, + sizeof(SyncInsId))) { + return -EFAULT; + } + break; + case MEDIASYNC_IOC_INSTANCE_BINDER: + if (copy_from_user((void *)&SyncInsId, + (void *)arg, + sizeof(SyncInsId))) { + return -EFAULT; + } + ret = mediasync_ins_binder(SyncInsId, &SyncIns); + if (SyncIns == NULL) { + return -EFAULT; + } + + priv->mSyncInsId = SyncInsId; + priv->mSyncIns = SyncIns; + break; + + case MEDIASYNC_IOC_UPDATE_MEDIATIME: + if (copy_from_user((void *)&UpdateTime, + (void *)arg, + sizeof(UpdateTime))) { + return -EFAULT; + } + if (priv->mSyncIns == NULL) { + return -EFAULT; + } + + ret = mediasync_ins_update_mediatime(priv->mSyncInsId, + UpdateTime.mMediaTimeUs, + UpdateTime.mSystemTimeUs, + UpdateTime.mForceUpdate); + break; + + case MEDIASYNC_IOC_GET_MEDIATIME: + if (priv->mSyncIns == NULL) { + return -EFAULT; + } + ret = mediasync_ins_get_anchor_time(priv->mSyncInsId, + &(ArthorTime.mMediaTimeUs), + &(ArthorTime.mStcTimeUs), + &(ArthorTime.mSystemTimeUs)); + if (ret == 0) { + if (copy_to_user((void *)arg, + &ArthorTime, + sizeof(ArthorTime))) { + return -EFAULT; + } + } + break; + + case MEDIASYNC_IOC_GET_SYSTEMTIME: + + if (priv->mSyncIns == NULL) { + return -EFAULT; + } + + ret = mediasync_ins_get_systemtime(priv->mSyncInsId, + &(SystemTime.mStcUs), + &(SystemTime.mSystemTimeUs)); + if (ret == 0) { + if (copy_to_user((void *)arg, + &SystemTime, + sizeof(SystemTime))) { + return -EFAULT; + } + } + break; + + case MEDIASYNC_IOC_GET_NEXTVSYNC_TIME: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_nextvsync_systemtime(priv->mSyncInsId, + &NextVsyncSystemTime); + if (ret == 0) { + if (copy_to_user((void *)arg, + &NextVsyncSystemTime, + sizeof(NextVsyncSystemTime))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_SPEED: + if (copy_from_user((void *)&SyncSpeed, + (void *)arg, + sizeof(SyncSpeed))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_mediatime_speed(priv->mSyncInsId, + SyncSpeed); + break; + + case MEDIASYNC_IOC_GET_SPEED: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_mediatime_speed(priv->mSyncInsId, + &SyncSpeed); + if (ret == 0) { + if (copy_to_user((void *)arg, + &SyncSpeed, + sizeof(SyncSpeed))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_PAUSE: + if (copy_from_user((void *)&SyncPaused, + (void *)arg, + sizeof(SyncPaused))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_paused(priv->mSyncInsId, + SyncPaused); + break; + + case MEDIASYNC_IOC_GET_PAUSE: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_paused(priv->mSyncInsId, + &SyncPaused); + if (ret == 0) { + if (copy_to_user((void *)arg, + &SyncPaused, + sizeof(SyncPaused))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_SYNCMODE: + if (copy_from_user((void *)&SyncMode, + (void *)arg, + sizeof(SyncMode))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_syncmode(priv->mSyncInsId, + SyncMode); + break; + + case MEDIASYNC_IOC_GET_SYNCMODE: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_syncmode(priv->mSyncInsId, + &SyncMode); + if (ret == 0) { + if (copy_to_user((void *)arg, + &SyncMode, + sizeof(SyncMode))) + return -EFAULT; + } + break; + case MEDIASYNC_IOC_GET_TRACKMEDIATIME: + if (priv->mSyncIns == NULL) { + return -EFAULT; + } + + ret = mediasync_ins_get_trackmediatime(priv->mSyncInsId, + &TrackMediaTime); + if (ret == 0) { + if (copy_to_user((void *)arg, + &TrackMediaTime, + sizeof(TrackMediaTime))) { + return -EFAULT; + } + } + break; + + case MEDIASYNC_IOC_SET_FIRST_AFRAME_INFO: + if (copy_from_user((void *)&FrameInfo, + (void *)arg, + sizeof(FrameInfo))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_firstaudioframeinfo(priv->mSyncInsId, + FrameInfo); + break; + + case MEDIASYNC_IOC_GET_FIRST_AFRAME_INFO: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_firstaudioframeinfo(priv->mSyncInsId, + &FrameInfo); + if (ret == 0) { + if (copy_to_user((void *)arg, + &FrameInfo, + sizeof(FrameInfo))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_FIRST_VFRAME_INFO: + if (copy_from_user((void *)&FrameInfo, + (void *)arg, + sizeof(FrameInfo))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_firstvideoframeinfo(priv->mSyncInsId, + FrameInfo); + break; + + case MEDIASYNC_IOC_GET_FIRST_VFRAME_INFO: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_firstvideoframeinfo(priv->mSyncInsId, + &FrameInfo); + if (ret == 0) { + if (copy_to_user((void *)arg, + &FrameInfo, + sizeof(FrameInfo))) + return -EFAULT; + } + break; + + + case MEDIASYNC_IOC_SET_FIRST_DMXPCR_INFO: + if (copy_from_user((void *)&FrameInfo, + (void *)arg, + sizeof(FrameInfo))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_firstdmxpcrinfo(priv->mSyncInsId, + FrameInfo); + break; + + case MEDIASYNC_IOC_GET_FIRST_DMXPCR_INFO: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_firstdmxpcrinfo(priv->mSyncInsId, + &FrameInfo); + if (ret == 0) { + if (copy_to_user((void *)arg, + &FrameInfo, + sizeof(FrameInfo))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_REFCLOCK_INFO: + if (copy_from_user((void *)&FrameInfo, + (void *)arg, + sizeof(FrameInfo))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_refclockinfo(priv->mSyncInsId, + FrameInfo); + break; + + case MEDIASYNC_IOC_GET_REFCLOCK_INFO: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_refclockinfo(priv->mSyncInsId, + &FrameInfo); + if (ret == 0) { + if (copy_to_user((void *)arg, + &FrameInfo, + sizeof(FrameInfo))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_CUR_AFRAME_INFO: + if (copy_from_user((void *)&FrameInfo, + (void *)arg, + sizeof(FrameInfo))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_curaudioframeinfo(priv->mSyncInsId, + FrameInfo); + break; + + case MEDIASYNC_IOC_GET_CUR_AFRAME_INFO: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_curaudioframeinfo(priv->mSyncInsId, + &FrameInfo); + if (ret == 0) { + if (copy_to_user((void *)arg, + &FrameInfo, + sizeof(FrameInfo))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_CUR_VFRAME_INFO: + if (copy_from_user((void *)&FrameInfo, + (void *)arg, + sizeof(FrameInfo))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_curvideoframeinfo(priv->mSyncInsId, + FrameInfo); + break; + + case MEDIASYNC_IOC_GET_CUR_VFRAME_INFO: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_curvideoframeinfo(priv->mSyncInsId, + &FrameInfo); + if (ret == 0) { + if (copy_to_user((void *)arg, + &FrameInfo, + sizeof(FrameInfo))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_CUR_DMXPCR_INFO: + if (copy_from_user((void *)&FrameInfo, + (void *)arg, + sizeof(FrameInfo))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_curdmxpcrinfo(priv->mSyncInsId, + FrameInfo); + break; + + case MEDIASYNC_IOC_GET_CUR_DMXPCR_INFO: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_curdmxpcrinfo(priv->mSyncInsId, + &FrameInfo); + if (ret == 0) { + if (copy_to_user((void *)arg, + &FrameInfo, + sizeof(FrameInfo))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_AUDIO_INFO: + if (copy_from_user((void *)&AudioInfo, + (void *)arg, + sizeof(AudioInfo))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_audioinfo(priv->mSyncInsId, + AudioInfo); + break; + + case MEDIASYNC_IOC_GET_AUDIO_INFO: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_audioinfo(priv->mSyncInsId, + &AudioInfo); + if (ret == 0) { + if (copy_to_user((void *)arg, + &AudioInfo, + sizeof(AudioInfo))) + return -EFAULT; + } + break; + + + case MEDIASYNC_IOC_SET_AUDIO_MUTEFLAG: + if (copy_from_user((void *)&mute_flag, + (void *)arg, + sizeof(mute_flag))) { + return -EFAULT; + } + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_audiomute(priv->mSyncInsId, + mute_flag); + break; + + case MEDIASYNC_IOC_GET_AUDIO_MUTEFLAG: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_audiomute(priv->mSyncInsId, + &mute_flag); + if (ret == 0) { + if (copy_to_user((void *)arg, + &mute_flag, + sizeof(mute_flag))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_VIDEO_INFO: + if (copy_from_user((void *)&VideoInfo, + (void *)arg, + sizeof(VideoInfo))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_videoinfo(priv->mSyncInsId, + VideoInfo); + break; + + case MEDIASYNC_IOC_GET_VIDEO_INFO: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_videoinfo(priv->mSyncInsId, + &VideoInfo); + if (ret == 0) { + if (copy_to_user((void *)arg, + &VideoInfo, + sizeof(VideoInfo))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_HASAUDIO: + if (copy_from_user((void *)&HasAudio, + (void *)arg, + sizeof(HasAudio))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_hasaudio(priv->mSyncInsId, + HasAudio); + break; + + case MEDIASYNC_IOC_GET_HASAUDIO: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_hasaudio(priv->mSyncInsId, + &HasAudio); + if (ret == 0) { + if (copy_to_user((void *)arg, + &HasAudio, + sizeof(HasAudio))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_HASVIDEO: + if (copy_from_user((void *)&HasVideo, + (void *)arg, + sizeof(HasVideo))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_hasvideo(priv->mSyncInsId, + HasVideo); + break; + + case MEDIASYNC_IOC_GET_HASVIDEO: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_hasvideo(priv->mSyncInsId, + &HasVideo); + if (ret == 0) { + if (copy_to_user((void *)arg, + &HasVideo, + sizeof(HasVideo))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_AVSTATE: + if (copy_from_user((void *)&SyncState, + (void *)arg, + sizeof(SyncState))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_avsyncstate(priv->mSyncInsId, + SyncState); + break; + + case MEDIASYNC_IOC_GET_AVSTATE: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_avsyncstate(priv->mSyncInsId, + &SyncState); + if (ret == 0) { + if (copy_to_user((void *)arg, + &SyncState, + sizeof(SyncState))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_CLOCKTYPE: + if (copy_from_user((void *)&ClockType, + (void *)arg, + sizeof(ClockType))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_clocktype(priv->mSyncInsId, + ClockType); + break; + + case MEDIASYNC_IOC_GET_CLOCKTYPE: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_clocktype(priv->mSyncInsId, + &ClockType); + if (ret == 0) { + if (copy_to_user((void *)arg, + &ClockType, + sizeof(ClockType))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_CLOCKSTATE: + if (copy_from_user((void *)&state, + (void *)arg, + sizeof(state))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_clockstate(priv->mSyncInsId, + state); + break; + + case MEDIASYNC_IOC_GET_CLOCKSTATE: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_clockstate(priv->mSyncInsId, + &state); + if (ret == 0) { + if (copy_to_user((void *)arg, + &state, + sizeof(state))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_STARTTHRESHOLD: + if (copy_from_user((void *)&StartThreshold, + (void *)arg, + sizeof(StartThreshold))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_startthreshold(priv->mSyncInsId, + StartThreshold); + break; + + case MEDIASYNC_IOC_GET_STARTTHRESHOLD: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_startthreshold(priv->mSyncInsId, + &StartThreshold); + if (ret == 0) { + if (copy_to_user((void *)arg, + &StartThreshold, + sizeof(StartThreshold))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_PTSADJUST: + if (copy_from_user((void *)&PtsAdjust, + (void *)arg, + sizeof(PtsAdjust))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_ptsadjust(priv->mSyncInsId, + PtsAdjust); + break; + + case MEDIASYNC_IOC_GET_PTSADJUST: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_ptsadjust(priv->mSyncInsId, + &PtsAdjust); + if (ret == 0) { + if (copy_to_user((void *)arg, + &PtsAdjust, + sizeof(PtsAdjust))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_VIDEOWORKMODE: + if (copy_from_user((void *)&VideoWorkMode, + (void *)arg, + sizeof(VideoWorkMode))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_videoworkmode(priv->mSyncInsId, + VideoWorkMode); + break; + + case MEDIASYNC_IOC_GET_VIDEOWORKMODE: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_videoworkmode(priv->mSyncInsId, + &VideoWorkMode); + if (ret == 0) { + if (copy_to_user((void *)arg, + &VideoWorkMode, + sizeof(VideoWorkMode))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_FCCENABLE: + if (copy_from_user((void *)&FccEnable, + (void *)arg, + sizeof(FccEnable))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_fccenable(priv->mSyncInsId, + FccEnable); + break; + + case MEDIASYNC_IOC_GET_FCCENABLE: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_fccenable(priv->mSyncInsId, + &FccEnable); + if (ret == 0) { + if (copy_to_user((void *)arg, + &FccEnable, + sizeof(FccEnable))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_SOURCE_TYPE: + if (copy_from_user((void *)&sourceType, + (void *)arg, + sizeof(sourceType))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_source_type(priv->mSyncInsId, + sourceType); + break; + + case MEDIASYNC_IOC_GET_SOURCE_TYPE: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_source_type(priv->mSyncInsId, + &sourceType); + if (ret == 0) { + if (copy_to_user((void *)arg, + &sourceType, + sizeof(sourceType))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_UPDATETIME_THRESHOLD: + if (copy_from_user((void *)&UpdateTimeThreshold, + (void *)arg, + sizeof(UpdateTimeThreshold))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_updatetime_threshold(priv->mSyncInsId, + UpdateTimeThreshold); + break; + + case MEDIASYNC_IOC_GET_UPDATETIME_THRESHOLD: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_updatetime_threshold(priv->mSyncInsId, + &UpdateTimeThreshold); + if (ret == 0) { + if (copy_to_user((void *)arg, + &UpdateTimeThreshold, + sizeof(UpdateTimeThreshold))) + return -EFAULT; + } + break; + case MEDIASYNC_IOC_SET_START_MEDIA_TIME: + if (priv->mSyncIns == NULL) + return -EFAULT; + + if (copy_from_user((void *)&StartMediaTime, + (void *)arg, + sizeof(StartMediaTime))) + return -EFAULT; + ret = mediasync_ins_set_start_media_time(priv->mSyncInsId, StartMediaTime); + break; + + case MEDIASYNC_IOC_GET_START_MEDIA_TIME: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_start_media_time(priv->mSyncInsId, &StartMediaTime); + if (ret == 0) { + if (copy_to_user((void *)arg, + &StartMediaTime, + sizeof(StartMediaTime))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_AUDIO_FORMAT: + if (copy_from_user((void *)&AudioFormat, + (void *)arg, + sizeof(AudioFormat))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_audioformat(priv->mSyncInsId, + AudioFormat); + break; + + case MEDIASYNC_IOC_GET_AUDIO_FORMAT: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_audioformat(priv->mSyncInsId, + &AudioFormat); + if (ret == 0) { + if (copy_to_user((void *)arg, + &AudioFormat, + sizeof(AudioFormat))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_PAUSERESUME_FLAG: + if (copy_from_user((void *)&PauseResumeFlag, + (void *)arg, + sizeof(PauseResumeFlag))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_pauseresume(priv->mSyncInsId, + PauseResumeFlag); + break; + + case MEDIASYNC_IOC_GET_PAUSERESUME_FLAG: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_pauseresume(priv->mSyncInsId, + &PauseResumeFlag); + if (ret == 0) { + if (copy_to_user((void *)arg, + &PauseResumeFlag, + sizeof(PauseResumeFlag))) + return -EFAULT; + } + break; + + case MEDIASYNC_IOC_SET_PCRSLOPE: + if (copy_from_user((void *)&PcrSlope, + (void *)arg, + sizeof(PcrSlope))) + return -EFAULT; + + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_set_pcrslope(priv->mSyncInsId, + PcrSlope); + break; + + case MEDIASYNC_IOC_GET_PCRSLOPE: + if (priv->mSyncIns == NULL) + return -EFAULT; + + ret = mediasync_ins_get_pcrslope(priv->mSyncInsId, + &PcrSlope); + if (ret == 0) { + if (copy_to_user((void *)arg, + &PcrSlope, + sizeof(PcrSlope))) + return -EFAULT; + } + break; + + default: + pr_info("invalid cmd:%d\n", cmd); + break; + } + + return ret; +} + +#ifdef CONFIG_COMPAT +static long mediasync_compat_ioctl(struct file *file, unsigned int cmd, ulong arg) +{ + long ret = 0; + switch (cmd) { + case MEDIASYNC_IOC_INSTANCE_ALLOC: + case MEDIASYNC_IOC_INSTANCE_GET: + case MEDIASYNC_IOC_INSTANCE_BINDER: + case MEDIASYNC_IOC_UPDATE_MEDIATIME: + case MEDIASYNC_IOC_GET_MEDIATIME: + case MEDIASYNC_IOC_GET_SYSTEMTIME: + case MEDIASYNC_IOC_GET_NEXTVSYNC_TIME: + case MEDIASYNC_IOC_SET_SPEED: + case MEDIASYNC_IOC_GET_SPEED: + case MEDIASYNC_IOC_SET_PAUSE: + case MEDIASYNC_IOC_GET_PAUSE: + case MEDIASYNC_IOC_SET_SYNCMODE: + case MEDIASYNC_IOC_GET_SYNCMODE: + case MEDIASYNC_IOC_GET_TRACKMEDIATIME: + case MEDIASYNC_IOC_SET_FIRST_AFRAME_INFO: + case MEDIASYNC_IOC_GET_FIRST_AFRAME_INFO: + case MEDIASYNC_IOC_SET_FIRST_VFRAME_INFO: + case MEDIASYNC_IOC_GET_FIRST_VFRAME_INFO: + case MEDIASYNC_IOC_SET_FIRST_DMXPCR_INFO: + case MEDIASYNC_IOC_GET_FIRST_DMXPCR_INFO: + case MEDIASYNC_IOC_SET_REFCLOCK_INFO: + case MEDIASYNC_IOC_GET_REFCLOCK_INFO: + case MEDIASYNC_IOC_SET_CUR_AFRAME_INFO: + case MEDIASYNC_IOC_GET_CUR_AFRAME_INFO: + case MEDIASYNC_IOC_SET_CUR_VFRAME_INFO: + case MEDIASYNC_IOC_GET_CUR_VFRAME_INFO: + case MEDIASYNC_IOC_SET_CUR_DMXPCR_INFO: + case MEDIASYNC_IOC_GET_CUR_DMXPCR_INFO: + case MEDIASYNC_IOC_SET_AUDIO_INFO: + case MEDIASYNC_IOC_GET_AUDIO_INFO: + case MEDIASYNC_IOC_SET_VIDEO_INFO: + case MEDIASYNC_IOC_GET_VIDEO_INFO: + case MEDIASYNC_IOC_SET_AVSTATE: + case MEDIASYNC_IOC_GET_AVSTATE: + case MEDIASYNC_IOC_SET_HASAUDIO: + case MEDIASYNC_IOC_GET_HASAUDIO: + case MEDIASYNC_IOC_SET_HASVIDEO: + case MEDIASYNC_IOC_GET_HASVIDEO: + case MEDIASYNC_IOC_GET_CLOCKTYPE: + case MEDIASYNC_IOC_SET_CLOCKTYPE: + case MEDIASYNC_IOC_GET_CLOCKSTATE: + case MEDIASYNC_IOC_SET_CLOCKSTATE: + case MEDIASYNC_IOC_SET_STARTTHRESHOLD: + case MEDIASYNC_IOC_GET_STARTTHRESHOLD: + case MEDIASYNC_IOC_SET_PTSADJUST: + case MEDIASYNC_IOC_GET_PTSADJUST: + case MEDIASYNC_IOC_SET_VIDEOWORKMODE: + case MEDIASYNC_IOC_GET_VIDEOWORKMODE: + case MEDIASYNC_IOC_SET_FCCENABLE: + case MEDIASYNC_IOC_GET_FCCENABLE: + case MEDIASYNC_IOC_SET_AUDIO_MUTEFLAG: + case MEDIASYNC_IOC_GET_AUDIO_MUTEFLAG: + case MEDIASYNC_IOC_SET_SOURCE_TYPE: + case MEDIASYNC_IOC_GET_SOURCE_TYPE: + case MEDIASYNC_IOC_SET_UPDATETIME_THRESHOLD: + case MEDIASYNC_IOC_GET_UPDATETIME_THRESHOLD: + case MEDIASYNC_IOC_SET_START_MEDIA_TIME: + case MEDIASYNC_IOC_GET_START_MEDIA_TIME: + case MEDIASYNC_IOC_SET_AUDIO_FORMAT: + case MEDIASYNC_IOC_GET_AUDIO_FORMAT: + case MEDIASYNC_IOC_SET_PAUSERESUME_FLAG: + case MEDIASYNC_IOC_GET_PAUSERESUME_FLAG: + case MEDIASYNC_IOC_SET_PCRSLOPE: + case MEDIASYNC_IOC_GET_PCRSLOPE: + + return mediasync_ioctl(file, cmd, arg); + default: + return -EINVAL; + } + return ret; +} +#endif + +static const struct file_operations mediasync_fops = { + .owner = THIS_MODULE, + .open = mediasync_open, + .release = mediasync_release, + .unlocked_ioctl = mediasync_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = mediasync_compat_ioctl, +#endif +}; + +static struct attribute *mediasync_class_attrs[] = { + NULL +}; + +ATTRIBUTE_GROUPS(mediasync_class); + +static struct class mediasync_class = { + .name = "mediasync", + .class_groups = mediasync_class_groups, +}; + +static int __init mediasync_module_init(void) +{ + int r; + + r = class_register(&mediasync_class); + + if (r) { + pr_err("mediasync class create fail.\n"); + return r; + } + + /* create tsync device */ + r = register_chrdev(MEDIASYNC_MAJOR, "mediasync", &mediasync_fops); + if (r < 0) { + pr_info("Can't register major for tsync\n"); + goto err2; + } + + mediasync_dev = device_create(&mediasync_class, NULL, + MKDEV(MEDIASYNC_MAJOR, 0), NULL, MEDIASYNC_DEVICE_NAME); + + if (IS_ERR(mediasync_dev)) { + pr_err("Can't create mediasync_dev device\n"); + goto err1; + } + return 0; + +err1: + unregister_chrdev(MEDIASYNC_MAJOR, "mediasync"); +err2: + class_unregister(&mediasync_class); + + return 0; +} + +static void __exit mediasync_module_exit(void) +{ + device_destroy(&mediasync_class, MKDEV(MEDIASYNC_MAJOR, 0)); + unregister_chrdev(MEDIASYNC_MAJOR, "mediasync"); + class_unregister(&mediasync_class); +} + +module_init(mediasync_module_init); +module_exit(mediasync_module_exit); + +MODULE_DESCRIPTION("AMLOGIC media sync management driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lifeng Cao <lifeng.cao@amlogic.com>"); +
diff --git a/drivers/media_sync/media_sync_dev.h b/drivers/media_sync/media_sync_dev.h new file mode 100644 index 0000000..52c28e9 --- /dev/null +++ b/drivers/media_sync/media_sync_dev.h
@@ -0,0 +1,72 @@ +#ifndef MEDIA_SYNC_DEV_HEAD_HH +#define MEDIA_SYNC_DEV_HEAD_HH + +#define MEDIASYNC_IOC_MAGIC 'M' + +#define MEDIASYNC_IOC_INSTANCE_ALLOC _IOW(MEDIASYNC_IOC_MAGIC, 0x01, int) +#define MEDIASYNC_IOC_INSTANCE_GET _IOW(MEDIASYNC_IOC_MAGIC, 0x02, int) +#define MEDIASYNC_IOC_INSTANCE_BINDER _IOW(MEDIASYNC_IOC_MAGIC, 0x03, int) +#define MEDIASYNC_IOC_UPDATE_MEDIATIME _IOW(MEDIASYNC_IOC_MAGIC, 0x04, int) +#define MEDIASYNC_IOC_GET_MEDIATIME _IOW(MEDIASYNC_IOC_MAGIC, 0x05, int) +#define MEDIASYNC_IOC_GET_SYSTEMTIME _IOW(MEDIASYNC_IOC_MAGIC, 0x06, int) +#define MEDIASYNC_IOC_GET_NEXTVSYNC_TIME _IOW(MEDIASYNC_IOC_MAGIC, 0x07, int) +#define MEDIASYNC_IOC_SET_SPEED _IOW(MEDIASYNC_IOC_MAGIC, 0x08, int) +#define MEDIASYNC_IOC_GET_SPEED _IOW(MEDIASYNC_IOC_MAGIC, 0x09, int) +#define MEDIASYNC_IOC_SET_PAUSE _IOW(MEDIASYNC_IOC_MAGIC, 0x0A, int) +#define MEDIASYNC_IOC_GET_PAUSE _IOW(MEDIASYNC_IOC_MAGIC, 0x0B, int) +#define MEDIASYNC_IOC_SET_SYNCMODE _IOW(MEDIASYNC_IOC_MAGIC, 0x0C, int) +#define MEDIASYNC_IOC_GET_SYNCMODE _IOW(MEDIASYNC_IOC_MAGIC, 0x0D, int) +#define MEDIASYNC_IOC_GET_TRACKMEDIATIME _IOW(MEDIASYNC_IOC_MAGIC, 0x0E, int) + +#define MEDIASYNC_IOC_SET_FIRST_AFRAME_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x0F, int) +#define MEDIASYNC_IOC_GET_FIRST_AFRAME_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x10, int) +#define MEDIASYNC_IOC_SET_FIRST_VFRAME_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x11, int) +#define MEDIASYNC_IOC_GET_FIRST_VFRAME_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x12, int) +#define MEDIASYNC_IOC_SET_FIRST_DMXPCR_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x13, int) +#define MEDIASYNC_IOC_GET_FIRST_DMXPCR_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x14, int) +#define MEDIASYNC_IOC_SET_REFCLOCK_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x15, int) +#define MEDIASYNC_IOC_GET_REFCLOCK_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x16, int) +#define MEDIASYNC_IOC_SET_CUR_AFRAME_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x17, int) +#define MEDIASYNC_IOC_GET_CUR_AFRAME_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x18, int) +#define MEDIASYNC_IOC_SET_CUR_VFRAME_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x19, int) +#define MEDIASYNC_IOC_GET_CUR_VFRAME_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x1A, int) +#define MEDIASYNC_IOC_SET_CUR_DMXPCR_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x1B, int) +#define MEDIASYNC_IOC_GET_CUR_DMXPCR_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x1C, int) +#define MEDIASYNC_IOC_SET_AUDIO_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x1D, int) +#define MEDIASYNC_IOC_GET_AUDIO_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x1E, int) +#define MEDIASYNC_IOC_SET_VIDEO_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x1F, int) +#define MEDIASYNC_IOC_GET_VIDEO_INFO _IOW(MEDIASYNC_IOC_MAGIC, 0x20, int) +#define MEDIASYNC_IOC_SET_AVSTATE _IOW(MEDIASYNC_IOC_MAGIC, 0x21, int) +#define MEDIASYNC_IOC_GET_AVSTATE _IOW(MEDIASYNC_IOC_MAGIC, 0x22, int) +#define MEDIASYNC_IOC_SET_HASAUDIO _IOW(MEDIASYNC_IOC_MAGIC, 0x23, int) +#define MEDIASYNC_IOC_GET_HASAUDIO _IOW(MEDIASYNC_IOC_MAGIC, 0x24, int) +#define MEDIASYNC_IOC_SET_HASVIDEO _IOW(MEDIASYNC_IOC_MAGIC, 0x25, int) +#define MEDIASYNC_IOC_GET_HASVIDEO _IOW(MEDIASYNC_IOC_MAGIC, 0x26, int) +#define MEDIASYNC_IOC_SET_CLOCKTYPE _IOW(MEDIASYNC_IOC_MAGIC, 0x27, int) +#define MEDIASYNC_IOC_GET_CLOCKTYPE _IOW(MEDIASYNC_IOC_MAGIC, 0x28, int) +#define MEDIASYNC_IOC_SET_CLOCKSTATE _IOW(MEDIASYNC_IOC_MAGIC, 0x29, int) +#define MEDIASYNC_IOC_GET_CLOCKSTATE _IOW(MEDIASYNC_IOC_MAGIC, 0x2A, int) +#define MEDIASYNC_IOC_SET_STARTTHRESHOLD _IOW(MEDIASYNC_IOC_MAGIC, 0x2B, int) +#define MEDIASYNC_IOC_GET_STARTTHRESHOLD _IOW(MEDIASYNC_IOC_MAGIC, 0x2C, int) +#define MEDIASYNC_IOC_SET_PTSADJUST _IOW(MEDIASYNC_IOC_MAGIC, 0x2D, int) +#define MEDIASYNC_IOC_GET_PTSADJUST _IOW(MEDIASYNC_IOC_MAGIC, 0x2E, int) +#define MEDIASYNC_IOC_SET_VIDEOWORKMODE _IOW(MEDIASYNC_IOC_MAGIC, 0x2F, int) +#define MEDIASYNC_IOC_GET_VIDEOWORKMODE _IOW(MEDIASYNC_IOC_MAGIC, 0x30, int) +#define MEDIASYNC_IOC_SET_FCCENABLE _IOW(MEDIASYNC_IOC_MAGIC, 0x31, int) +#define MEDIASYNC_IOC_GET_FCCENABLE _IOW(MEDIASYNC_IOC_MAGIC, 0x32, int) +#define MEDIASYNC_IOC_SET_AUDIO_MUTEFLAG _IOW(MEDIASYNC_IOC_MAGIC, 0x33, int) +#define MEDIASYNC_IOC_GET_AUDIO_MUTEFLAG _IOW(MEDIASYNC_IOC_MAGIC, 0x34, int) +#define MEDIASYNC_IOC_SET_SOURCE_TYPE _IOW(MEDIASYNC_IOC_MAGIC, 0x35, int) +#define MEDIASYNC_IOC_GET_SOURCE_TYPE _IOW(MEDIASYNC_IOC_MAGIC, 0x36, int) +#define MEDIASYNC_IOC_SET_UPDATETIME_THRESHOLD _IOW(MEDIASYNC_IOC_MAGIC, 0x37, int) +#define MEDIASYNC_IOC_GET_UPDATETIME_THRESHOLD _IOW(MEDIASYNC_IOC_MAGIC, 0x38, int) +#define MEDIASYNC_IOC_SET_START_MEDIA_TIME _IOW(MEDIASYNC_IOC_MAGIC, 0x39, int) +#define MEDIASYNC_IOC_GET_START_MEDIA_TIME _IOW(MEDIASYNC_IOC_MAGIC, 0x3A, int) +#define MEDIASYNC_IOC_SET_AUDIO_FORMAT _IOW(MEDIASYNC_IOC_MAGIC, 0x3B, int) +#define MEDIASYNC_IOC_GET_AUDIO_FORMAT _IOW(MEDIASYNC_IOC_MAGIC, 0x3C, int) +#define MEDIASYNC_IOC_SET_PAUSERESUME_FLAG _IOW(MEDIASYNC_IOC_MAGIC, 0x3D, int) +#define MEDIASYNC_IOC_GET_PAUSERESUME_FLAG _IOW(MEDIASYNC_IOC_MAGIC, 0x3E, int) +#define MEDIASYNC_IOC_SET_PCRSLOPE _IOW(MEDIASYNC_IOC_MAGIC, 0x3F, int) +#define MEDIASYNC_IOC_GET_PCRSLOPE _IOW(MEDIASYNC_IOC_MAGIC, 0x40, int) + +#endif
diff --git a/drivers/stream_input/Makefile b/drivers/stream_input/Makefile new file mode 100644 index 0000000..2e7d5d9 --- /dev/null +++ b/drivers/stream_input/Makefile
@@ -0,0 +1,26 @@ +obj-m += stream_input.o + +stream_input-objs += amports/amstream.o +stream_input-objs += amports/adec.o +stream_input-objs += amports/thread_rw.o +stream_input-objs += amports/streambuf.o +stream_input-objs += amports/stream_buffer_base.o +stream_input-objs += amports/stream_buffer_interface.o + +stream_input-objs += parser/esparser.o +stream_input-objs += parser/tsdemux.o +stream_input-objs += parser/psparser.o +stream_input-objs += parser/rmparser.o +stream_input-objs += subtitle/subtitle.o + +stream_input-objs += parser/dvb_common.o +obj-$(CONFIG_AMLOGIC_DVB) += parser/hw_demux/ +obj-$(CONFIG_AMLOGIC_DVB) += parser/dvb_ci/ + +ccflags-y += -I. +ccflags-y += -I$(srctree)/include/media + +#obj-y += tv_frontend/ +# obj-y += box-frontend/avl6211/ +# obj-y += box-frontend/atbm8881/ +# obj-y += box-frontend/avl68xx/
diff --git a/drivers/stream_input/amports/Makefile b/drivers/stream_input/amports/Makefile new file mode 100644 index 0000000..82f5934 --- /dev/null +++ b/drivers/stream_input/amports/Makefile
@@ -0,0 +1,2 @@ +obj-y += amports.o +amports-objs += amstream.o adec.o
diff --git a/drivers/stream_input/amports/adec.c b/drivers/stream_input/amports/adec.c new file mode 100644 index 0000000..3ab5e5a --- /dev/null +++ b/drivers/stream_input/amports/adec.c
@@ -0,0 +1,427 @@ +/* + * drivers/amlogic/media/stream_input/amports/adec.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/platform_device.h> +#include <linux/of_platform.h> +#include <linux/slab.h> +#include <linux/uio_driver.h> +#include <linux/amlogic/media/utils/aformat.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/registers/register.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../amports/streambuf.h" +#include <linux/module.h> +#include <linux/of.h> +#include "amports_priv.h" +#include "../../common/chips/decoder_cpu_ver_info.h" +#define INFO_VALID ((astream_dev) && (astream_dev->format)) + +struct astream_device_s { + char *name; + char *format; + s32 channum; + s32 samplerate; + s32 datawidth; + int offset; + + struct device dev; +}; + +static char *astream_format[] = { + "amadec_mpeg", + "amadec_pcm_s16le", + "amadec_aac", + "amadec_ac3", + "amadec_alaw", + "amadec_mulaw", + "amadec_dts", + "amadec_pcm_s16be", + "amadec_flac", + "amadec_cook", + "amadec_pcm_u8", + "amadec_adpcm", + "amadec_amr", + "amadec_raac", + "amadec_wma", + "amadec_wmapro", + "amadec_pcm_bluray", + "amadec_alac", + "amadec_vorbis", + "amadec_aac_latm", + "amadec_ape", + "amadec_eac3", + "amadec_pcm_widi", + "amadec_dra", + "amadec_sipr", + "amadec_truehd", + "amadec_mpeg1", + "amadec_mpeg2", + "amadec_wmavoi", + "amadec_wmalossless", + "amadec_pcm_s24le", + "adec_max" +}; + +static const char *na_string = "NA"; +static struct astream_device_s *astream_dev; + +static ssize_t format_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + if (INFO_VALID && astream_dev->format) + return sprintf(buf, "%s\n", astream_dev->format); + else + return sprintf(buf, "%s\n", na_string); +} + +static ssize_t channum_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + if (INFO_VALID) + return sprintf(buf, "%u\n", astream_dev->channum); + else + return sprintf(buf, "%s\n", na_string); +} + +static ssize_t samplerate_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + if (INFO_VALID) + return sprintf(buf, "%u\n", astream_dev->samplerate); + else + return sprintf(buf, "%s\n", na_string); +} + +static ssize_t datawidth_show(struct class *class, + struct class_attribute *attr, + char *buf) +{ + if (INFO_VALID) + return sprintf(buf, "%u\n", astream_dev->datawidth); + else + return sprintf(buf, "%s\n", na_string); +} + +static ssize_t pts_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + u32 pts, frame_size; + u32 pts_margin = 0; + + if (astream_dev->samplerate <= 12000) + pts_margin = 512; + + if (INFO_VALID && (pts_lookup(PTS_TYPE_AUDIO, &pts, + &frame_size, pts_margin) >= 0)) + return sprintf(buf, "0x%x\n", pts); + else + return sprintf(buf, "%s\n", na_string); +} + +static ssize_t addr_offset_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", astream_dev->offset); +} + +static CLASS_ATTR_RO(format); +static CLASS_ATTR_RO(samplerate); +static CLASS_ATTR_RO(channum); +static CLASS_ATTR_RO(datawidth); +static CLASS_ATTR_RO(pts); +static CLASS_ATTR_RO(addr_offset); + +static struct attribute *astream_class_attrs[] = { + &class_attr_format.attr, + &class_attr_samplerate.attr, + &class_attr_channum.attr, + &class_attr_datawidth.attr, + &class_attr_pts.attr, + &class_attr_addr_offset.attr, + NULL +}; + +ATTRIBUTE_GROUPS(astream_class); + +static struct class astream_class = { + .name = "astream", + .class_groups = astream_class_groups, +}; + +#if 1 +#define IO_CBUS_PHY_BASE 0xc1100000ULL +#define IO_AOBUS_PHY_BASE 0xc8100000ULL +#define CBUS_REG_OFFSET(reg) ((reg) << 2) +#define IO_SECBUS_PHY_BASE 0xda000000ULL + + +#define IO_AOBUS_PHY_BASE_AFTER_G12A 0xff800000ULL + +static struct uio_info astream_uio_info = { + .name = "astream_uio", + .version = "0.1", + .irq = UIO_IRQ_NONE, + + .mem = { + [0] = { + .name = "AIFIFO", + .memtype = UIO_MEM_PHYS, + .addr = + (IO_CBUS_PHY_BASE + CBUS_REG_OFFSET(AIU_AIFIFO_CTRL)) + &(PAGE_MASK), + .size = PAGE_SIZE, + }, + [1] = { + .memtype = UIO_MEM_PHYS, + .addr = + (IO_CBUS_PHY_BASE + CBUS_REG_OFFSET(VCOP_CTRL_REG)), + .size = PAGE_SIZE, + }, +/* + [2] = { + .name = "SECBUS", + .memtype = UIO_MEM_PHYS, + .addr = (IO_SECBUS_PHY_BASE), + .size = PAGE_SIZE, + }, +*/ + [2] = { + .name = "CBUS", + .memtype = UIO_MEM_PHYS, + .addr = + (IO_CBUS_PHY_BASE + CBUS_REG_OFFSET(ASSIST_HW_REV)) + &(PAGE_MASK), + .size = PAGE_SIZE, + }, + [3] = { + .name = "CBUS-START", + .memtype = UIO_MEM_PHYS, + .addr = (IO_CBUS_PHY_BASE + CBUS_REG_OFFSET(0x1000)), + .size = PAGE_SIZE, + }, + [4] = { + .name = "AOBUS-START", + .memtype = UIO_MEM_PHYS, + .addr = (IO_AOBUS_PHY_BASE), + .size = PAGE_SIZE, + }, + }, +}; +#endif + +static void astream_release(struct device *dev) +{ + kfree(astream_dev); + + astream_dev = NULL; +} + +s32 adec_init(struct stream_port_s *port) +{ + enum aformat_e af; + + if (!astream_dev) + return -ENODEV; + + af = port->aformat; + + astream_dev->channum = port->achanl; + astream_dev->samplerate = port->asamprate; + astream_dev->datawidth = port->adatawidth; + + /*wmb();don't need it...*/ + if (af < ARRAY_SIZE(astream_format)) + astream_dev->format = astream_format[af]; + else + astream_dev->format = NULL; + return 0; +} +EXPORT_SYMBOL(adec_init); + +s32 adec_release(enum aformat_e vf) +{ + pr_info("adec_release\n"); + + if (!astream_dev) + return -ENODEV; + + astream_dev->format = NULL; + + return 0; +} +EXPORT_SYMBOL(adec_release); + +int amstream_adec_show_fun(const char *trigger, int id, char *sbuf, int size) +{ + int ret = -1; + void *buf, *getbuf = NULL; + if (size < PAGE_SIZE) { + getbuf = (void *)__get_free_page(GFP_KERNEL); + if (!getbuf) + return -ENOMEM; + buf = getbuf; + } else { + buf = sbuf; + } + switch (trigger[0]) { + case 'f': + ret = format_show(NULL, NULL, buf); + break; + case 's': + ret = samplerate_show(NULL, NULL, buf); + break; + case 'c': + ret = channum_show(NULL, NULL, buf); + break; + case 'd': + ret = datawidth_show(NULL, NULL, buf); + break; + case 'p': + ret = pts_show(NULL, NULL, buf); + break; + default: + ret = -1; + } + if (ret > 0 && getbuf != NULL) { + ret = min_t(int, ret, size); + strncpy(sbuf, buf, ret); + } + if (getbuf != NULL) + free_page((unsigned long)getbuf); + return ret; +} + +static struct mconfig adec_configs[] = { + MC_FUN("format", &amstream_adec_show_fun, NULL), + MC_FUN("samplerate", &amstream_adec_show_fun, NULL), + MC_FUN("channum", &amstream_adec_show_fun, NULL), + MC_FUN("datawidth", &amstream_adec_show_fun, NULL), + MC_FUN("pts", &amstream_adec_show_fun, NULL), +}; +static struct mconfig_node adec_node; + + +s32 astream_dev_register(void) +{ + s32 r; + struct device_node *node; + unsigned int cbus_base = 0xffd00000; + + r = class_register(&astream_class); + if (r) { + pr_info("astream class create fail.\n"); + return r; + } + + astream_dev = kzalloc(sizeof(struct astream_device_s), GFP_KERNEL); + + if (!astream_dev) { + pr_info("astream device create fail.\n"); + r = -ENOMEM; + goto err_3; + } + + astream_dev->dev.class = &astream_class; + astream_dev->dev.release = astream_release; + astream_dev->offset = 0; + dev_set_name(&astream_dev->dev, "astream-dev"); + + dev_set_drvdata(&astream_dev->dev, astream_dev); + + r = device_register(&astream_dev->dev); + if (r) { + pr_info("astream device register fail.\n"); + goto err_2; + } + + if (AM_MESON_CPU_MAJOR_ID_TXL < get_cpu_major_id()) { + struct resource *res_mem; + struct platform_device *pdev; + + node = of_find_node_by_path("/codec_io"); + if (!node) { + pr_info("No io_cbus_base node found."); + goto err_1; + } + + pdev = of_find_device_by_node(node); + res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cbus"); + if (!res_mem) { + pr_info("No find node.\n"); + goto err_1; + } + cbus_base = res_mem->start; + of_node_put(node); + + /*need to offset -0x100 in txlx.*/ + astream_dev->offset = -0x100; + + /*need to offset -0x180 in g12a.*/ + if (AM_MESON_CPU_MAJOR_ID_G12A <= get_cpu_major_id()) { + astream_dev->offset = -0x180; + /* after G12A chip, the aobus base addr changed */ + astream_uio_info.mem[4].addr = IO_AOBUS_PHY_BASE_AFTER_G12A; + } + astream_uio_info.mem[0].addr = + (cbus_base + CBUS_REG_OFFSET(AIU_AIFIFO_CTRL + + astream_dev->offset)) & (PAGE_MASK); + + astream_uio_info.mem[3].addr = + (cbus_base + CBUS_REG_OFFSET(ASSIST_HW_REV + + 0x100)) & (PAGE_MASK); + } + +#if 1 + if (uio_register_device(&astream_dev->dev, &astream_uio_info)) { + pr_info("astream UIO device register fail.\n"); + r = -ENODEV; + goto err_1; + } +#endif + INIT_REG_NODE_CONFIGS("media", &adec_node, + "adec", adec_configs, CONFIG_FOR_R); + return 0; + +err_1: + device_unregister(&astream_dev->dev); + +err_2: + kfree(astream_dev); + astream_dev = NULL; + +err_3: + class_unregister(&astream_class); + + return r; +} + +void astream_dev_unregister(void) +{ + if (astream_dev) { +#if 1 + uio_unregister_device(&astream_uio_info); +#endif + + device_unregister(&astream_dev->dev); + + class_unregister(&astream_class); + } +} +
diff --git a/drivers/stream_input/amports/adec.h b/drivers/stream_input/amports/adec.h new file mode 100644 index 0000000..1ad276f --- /dev/null +++ b/drivers/stream_input/amports/adec.h
@@ -0,0 +1,32 @@ +/* + * drivers/amlogic/media/stream_input/amports/adec.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef ADEC_H +#define ADEC_H + +#include "../amports/streambuf.h" +#include <linux/amlogic/media/utils/aformat.h> + +extern s32 adec_init(struct stream_port_s *port); + +extern s32 adec_release(enum aformat_e af); + +extern s32 astream_dev_register(void); + +extern s32 astream_dev_unregister(void); + +#endif /* ADEC_H */
diff --git a/drivers/stream_input/amports/amports_priv.h b/drivers/stream_input/amports/amports_priv.h new file mode 100644 index 0000000..ffead01 --- /dev/null +++ b/drivers/stream_input/amports/amports_priv.h
@@ -0,0 +1,54 @@ +/* + * drivers/amlogic/media/stream_input/amports/amports_priv.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef AMPORTS_PRIV_HEAD_HH +#define AMPORTS_PRIV_HEAD_HH +#include "../amports/streambuf.h" +#include "../../common/media_clock/switch/amports_gate.h" +#include <linux/amlogic/media/vfm/vframe.h> +#include <linux/amlogic/media/registers/register.h> +#include <linux/amlogic/media/utils/log.h> + +struct port_priv_s { + struct vdec_s *vdec; + struct stream_port_s *port; + struct mutex mutex; +}; + +struct stream_buf_s *get_buf_by_type(u32 type); + +/*video.c provide*/ +extern u32 trickmode_i; +struct amvideocap_req; +extern u32 set_blackout_policy(int policy); +extern u32 get_blackout_policy(void); +int calculation_stream_ext_delayed_ms(u8 type); +int ext_get_cur_video_frame(struct vframe_s **vf, int *canvas_index); +int ext_put_video_frame(struct vframe_s *vf); +int ext_register_end_frame_callback(struct amvideocap_req *req); +int amstream_request_firmware_from_sys(const char *file_name, + char *buf, int size); +void set_vsync_pts_inc_mode(int inc); + +void set_real_audio_info(void *arg); +void amstream_wakeup_userdata_poll(struct vdec_s *vdec); +#define dbg() pr_info("on %s,line %d\n", __func__, __LINE__); + +struct device *amports_get_dma_device(void); +struct device *get_codec_cma_device(void); + +#endif
diff --git a/drivers/stream_input/amports/amstream.c b/drivers/stream_input/amports/amstream.c new file mode 100644 index 0000000..3b217f2 --- /dev/null +++ b/drivers/stream_input/amports/amstream.c
@@ -0,0 +1,4662 @@ +/* + * drivers/amlogic/media/stream_input/amports/amstream.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <uapi/linux/major.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/media/utils/aformat.h> +#include <linux/amlogic/media/frame_sync/tsync.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/frame_sync/timestamp.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/mutex.h> +#include <linux/poll.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/uaccess.h> +#include <linux/clk.h> +#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +/* #include <mach/mod_gate.h> */ +/* #include <mach/power_gate.h> */ +#endif +#include "../amports/streambuf.h" +#include "../amports/streambuf_reg.h" +#include "../parser/tsdemux.h" +#include "../parser/psparser.h" +#include "../parser/esparser.h" +#include "../../frame_provider/decoder/utils/vdec.h" +#include "adec.h" +#include "../parser/rmparser.h" +#include "amports_priv.h" +#include <linux/amlogic/media/utils/amports_config.h> +#include <linux/amlogic/media/frame_sync/tsync_pcr.h> +#include "../amports/thread_rw.h" +#include <linux/firmware.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/libfdt_env.h> +#include <linux/of_reserved_mem.h> +#include <linux/reset.h> +#ifdef CONFIG_COMPAT +#include <linux/compat.h> +#endif +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../../frame_provider/decoder/utils/firmware.h" +#include "../../common/chips/chips.h" +#include "../../common/chips/decoder_cpu_ver_info.h" +#include "../subtitle/subtitle.h" +#include "stream_buffer_base.h" +#include "../../frame_provider/decoder/utils/vdec_feature.h" + +//#define G12A_BRINGUP_DEBUG + +#define CONFIG_AM_VDEC_REAL //DEBUG_TMP + +#define DEVICE_NAME "amstream-dev" +#define DRIVER_NAME "amstream" +#define MODULE_NAME "amstream" + +#define MAX_AMSTREAM_PORT_NUM ARRAY_SIZE(ports) +u32 amstream_port_num; +u32 amstream_buf_num; + +u32 amstream_audio_reset = 0; + +#if 0 +#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESONG9TV +#define NO_VDEC2_INIT 1 +#elif MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD +#define NO_VDEC2_INIT IS_MESON_M8M2_CPU +#endif +#endif +#define NO_VDEC2_INIT 1 + +#define DEFAULT_VIDEO_BUFFER_SIZE (1024 * 1024 * 3) +#define DEFAULT_VIDEO_BUFFER_SIZE_4K (1024 * 1024 * 6) +#define DEFAULT_VIDEO_BUFFER_SIZE_TVP (1024 * 1024 * 10) +#define DEFAULT_VIDEO_BUFFER_SIZE_4K_TVP (1024 * 1024 * 15) + + +#define DEFAULT_AUDIO_BUFFER_SIZE (1024*768*2) +#define DEFAULT_SUBTITLE_BUFFER_SIZE (1024*256) + +static int def_4k_vstreambuf_sizeM = + (DEFAULT_VIDEO_BUFFER_SIZE_4K >> 20); +static int def_vstreambuf_sizeM = + (DEFAULT_VIDEO_BUFFER_SIZE >> 20); +static int slow_input; + +/* #define DATA_DEBUG */ +static int use_bufferlevelx10000 = 10000; +static int reset_canuse_buferlevel(int level); + +static struct platform_device *amstream_pdev; +struct device *amports_get_dma_device(void) +{ + return &amstream_pdev->dev; +} +EXPORT_SYMBOL(amports_get_dma_device); + +#ifdef DATA_DEBUG +#include <linux/fs.h> + +#define DEBUG_FILE_NAME "/sdcard/debug.tmp" +static struct file *debug_filp; +static loff_t debug_file_pos; + +void debug_file_write(const char __user *buf, size_t count) +{ + mm_segment_t old_fs; + + if (!debug_filp) + return; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + + if (count != vfs_write(debug_filp, buf, count, &debug_file_pos)) + pr_err("Failed to write debug file\n"); + + set_fs(old_fs); +} +#endif + + + +static int amstream_open(struct inode *inode, struct file *file); +static int amstream_release(struct inode *inode, struct file *file); +static long amstream_ioctl(struct file *file, unsigned int cmd, ulong arg); +#ifdef CONFIG_COMPAT +static long amstream_compat_ioctl + (struct file *file, unsigned int cmd, ulong arg); +#endif +static ssize_t amstream_vbuf_write +(struct file *file, const char *buf, size_t count, loff_t *ppos); +static ssize_t amstream_vframe_write +(struct file *file, const char *buf, size_t count, loff_t *ppos); +static ssize_t amstream_abuf_write +(struct file *file, const char *buf, size_t count, loff_t *ppos); +static ssize_t amstream_mpts_write +(struct file *file, const char *buf, size_t count, loff_t *ppos); +static ssize_t amstream_mpps_write +(struct file *file, const char *buf, size_t count, loff_t *ppos); +static ssize_t amstream_sub_read +(struct file *file, char *buf, size_t count, loff_t *ppos); +static ssize_t amstream_sub_write +(struct file *file, const char *buf, size_t count, loff_t *ppos); +static unsigned int amstream_sub_poll +(struct file *file, poll_table *wait_table); +static unsigned int amstream_userdata_poll +(struct file *file, poll_table *wait_table); +static int (*amstream_adec_status) +(struct adec_status *astatus); +#ifdef CONFIG_AM_VDEC_REAL +static ssize_t amstream_mprm_write +(struct file *file, const char *buf, size_t count, loff_t *ppos); +#endif + +static const struct file_operations vbuf_fops = { + .owner = THIS_MODULE, + .open = amstream_open, + .release = amstream_release, + .write = amstream_vbuf_write, + .unlocked_ioctl = amstream_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = amstream_compat_ioctl, +#endif +}; + +static const struct file_operations vframe_fops = { + .owner = THIS_MODULE, + .open = amstream_open, + .release = amstream_release, + .write = amstream_vframe_write, + .unlocked_ioctl = amstream_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = amstream_compat_ioctl, +#endif +}; + +static const struct file_operations abuf_fops = { + .owner = THIS_MODULE, + .open = amstream_open, + .release = amstream_release, + .write = amstream_abuf_write, + .unlocked_ioctl = amstream_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = amstream_compat_ioctl, +#endif +}; + +static const struct file_operations mpts_fops = { + .owner = THIS_MODULE, + .open = amstream_open, + .release = amstream_release, + .write = amstream_mpts_write, + .unlocked_ioctl = amstream_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = amstream_compat_ioctl, +#endif +}; + +static const struct file_operations mpps_fops = { + .owner = THIS_MODULE, + .open = amstream_open, + .release = amstream_release, + .write = amstream_mpps_write, + .unlocked_ioctl = amstream_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = amstream_compat_ioctl, +#endif +}; + +static const struct file_operations mprm_fops = { + .owner = THIS_MODULE, + .open = amstream_open, + .release = amstream_release, +#ifdef CONFIG_AM_VDEC_REAL + .write = amstream_mprm_write, +#endif + .unlocked_ioctl = amstream_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = amstream_compat_ioctl, +#endif +}; + +static const struct file_operations sub_fops = { + .owner = THIS_MODULE, + .open = amstream_open, + .release = amstream_release, + .write = amstream_sub_write, + .unlocked_ioctl = amstream_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = amstream_compat_ioctl, +#endif +}; + +static const struct file_operations sub_read_fops = { + .owner = THIS_MODULE, + .open = amstream_open, + .release = amstream_release, + .read = amstream_sub_read, + .poll = amstream_sub_poll, + .unlocked_ioctl = amstream_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = amstream_compat_ioctl, +#endif +}; + +static const struct file_operations userdata_fops = { + .owner = THIS_MODULE, + .open = amstream_open, + .release = amstream_release, + .poll = amstream_userdata_poll, + .unlocked_ioctl = amstream_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = amstream_compat_ioctl, +#endif +}; + +static const struct file_operations amstream_fops = { + .owner = THIS_MODULE, + .open = amstream_open, + .release = amstream_release, + .unlocked_ioctl = amstream_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = amstream_compat_ioctl, +#endif +}; + +/**************************************************/ +static struct audio_info audio_dec_info; +static struct class *amstream_dev_class; +static DEFINE_MUTEX(amstream_mutex); + +atomic_t subdata_ready = ATOMIC_INIT(0); +static int sub_type; +static int sub_port_inited; +/* wait queue for poll */ +static wait_queue_head_t amstream_sub_wait; +atomic_t userdata_ready = ATOMIC_INIT(0); +static int userdata_length; +static wait_queue_head_t amstream_userdata_wait; +#define USERDATA_FIFO_NUM 1024 +static struct userdata_poc_info_t *userdata_poc_info; +static int userdata_poc_ri, userdata_poc_wi; +static int last_read_wi; + +/*bit 1 force dual layer + *bit 2 force frame mode + */ +static u32 force_dv_mode; + +static DEFINE_MUTEX(userdata_mutex); + +static struct stream_port_s ports[] = { + { + .name = "amstream_vbuf", + .type = PORT_TYPE_ES | PORT_TYPE_VIDEO, + .fops = &vbuf_fops, + }, +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + { + .name = "amstream_vbuf_sched", + .type = PORT_TYPE_ES | PORT_TYPE_VIDEO | + PORT_TYPE_DECODER_SCHED, + .fops = &vbuf_fops, + }, + { + .name = "amstream_vframe", + .type = PORT_TYPE_ES | PORT_TYPE_VIDEO | + PORT_TYPE_FRAME | PORT_TYPE_DECODER_SCHED, + .fops = &vframe_fops, + }, +#endif + { + .name = "amstream_abuf", + .type = PORT_TYPE_ES | PORT_TYPE_AUDIO, + .fops = &abuf_fops, + }, + { + .name = "amstream_mpts", + .type = PORT_TYPE_MPTS | PORT_TYPE_VIDEO | + PORT_TYPE_AUDIO | PORT_TYPE_SUB, + .fops = &mpts_fops, + }, +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + { + .name = "amstream_mpts_sched", + .type = PORT_TYPE_MPTS | PORT_TYPE_VIDEO | + PORT_TYPE_AUDIO | PORT_TYPE_SUB | + PORT_TYPE_DECODER_SCHED, + .fops = &mpts_fops, + }, +#endif + { + .name = "amstream_mpps", + .type = PORT_TYPE_MPPS | PORT_TYPE_VIDEO | + PORT_TYPE_AUDIO | PORT_TYPE_SUB, + .fops = &mpps_fops, + }, + { + .name = "amstream_rm", + .type = PORT_TYPE_RM | PORT_TYPE_VIDEO | PORT_TYPE_AUDIO, + .fops = &mprm_fops, + }, + { + .name = "amstream_sub", + .type = PORT_TYPE_SUB, + .fops = &sub_fops, + }, + { + .name = "amstream_sub_read", + .type = PORT_TYPE_SUB_RD, + .fops = &sub_read_fops, + }, + { + .name = "amstream_userdata", + .type = PORT_TYPE_USERDATA, + .fops = &userdata_fops, + }, + { + .name = "amstream_hevc", + .type = PORT_TYPE_ES | PORT_TYPE_VIDEO | PORT_TYPE_HEVC, + .fops = &vbuf_fops, + .vformat = VFORMAT_HEVC, + }, +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + { + .name = "amstream_hevc_frame", + .type = PORT_TYPE_ES | PORT_TYPE_VIDEO | PORT_TYPE_HEVC | + PORT_TYPE_FRAME | PORT_TYPE_DECODER_SCHED, + .fops = &vframe_fops, + .vformat = VFORMAT_HEVC, + }, + { + .name = "amstream_hevc_sched", + .type = PORT_TYPE_ES | PORT_TYPE_VIDEO | PORT_TYPE_HEVC | + PORT_TYPE_DECODER_SCHED, + .fops = &vbuf_fops, + .vformat = VFORMAT_HEVC, + }, +#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION + { + .name = "amstream_dves_avc", + .type = PORT_TYPE_ES | PORT_TYPE_VIDEO | + PORT_TYPE_DECODER_SCHED | PORT_TYPE_DUALDEC, + .fops = &vbuf_fops, + }, + { + .name = "amstream_dves_hevc", + .type = PORT_TYPE_ES | PORT_TYPE_VIDEO | PORT_TYPE_HEVC | + PORT_TYPE_DECODER_SCHED | PORT_TYPE_DUALDEC, + .fops = &vbuf_fops, + .vformat = VFORMAT_HEVC, + }, + { + .name = "amstream_dves_avc_frame", + .type = PORT_TYPE_ES | PORT_TYPE_VIDEO | PORT_TYPE_FRAME | + PORT_TYPE_DECODER_SCHED | PORT_TYPE_DUALDEC, + .fops = &vframe_fops, + }, + { + .name = "amstream_dves_hevc_frame", + .type = PORT_TYPE_ES | PORT_TYPE_VIDEO | PORT_TYPE_HEVC | PORT_TYPE_FRAME | + PORT_TYPE_DECODER_SCHED | PORT_TYPE_DUALDEC, + .fops = &vframe_fops, + .vformat = VFORMAT_HEVC, + }, + { + .name = "amstream_dves_av1", + .type = PORT_TYPE_ES | PORT_TYPE_VIDEO | PORT_TYPE_HEVC | PORT_TYPE_FRAME | + PORT_TYPE_DECODER_SCHED | PORT_TYPE_DUALDEC, + .fops = &vframe_fops, + .vformat = VFORMAT_AV1, + }, +#endif +#endif +}; + +static struct stream_buf_s bufs[BUF_MAX_NUM] = { + { + .reg_base = VLD_MEM_VIFIFO_REG_BASE, + .type = BUF_TYPE_VIDEO, + .buf_start = 0, + .buf_size = DEFAULT_VIDEO_BUFFER_SIZE, + .default_buf_size = DEFAULT_VIDEO_BUFFER_SIZE, + .first_tstamp = INVALID_PTS + }, + { + .reg_base = AIU_MEM_AIFIFO_REG_BASE, + .type = BUF_TYPE_AUDIO, + .buf_start = 0, + .buf_size = DEFAULT_AUDIO_BUFFER_SIZE, + .default_buf_size = DEFAULT_AUDIO_BUFFER_SIZE, + .first_tstamp = INVALID_PTS + }, + { + .reg_base = 0, + .type = BUF_TYPE_SUBTITLE, + .buf_start = 0, + .buf_size = DEFAULT_SUBTITLE_BUFFER_SIZE, + .default_buf_size = DEFAULT_SUBTITLE_BUFFER_SIZE, + .first_tstamp = INVALID_PTS + }, + { + .reg_base = 0, + .type = BUF_TYPE_USERDATA, + .buf_start = 0, + .buf_size = 0, + .first_tstamp = INVALID_PTS + }, + { + .reg_base = HEVC_STREAM_REG_BASE, + .type = BUF_TYPE_HEVC, + .buf_start = 0, + .buf_size = DEFAULT_VIDEO_BUFFER_SIZE_4K, + .default_buf_size = DEFAULT_VIDEO_BUFFER_SIZE_4K, + .first_tstamp = INVALID_PTS + }, +}; + +struct stream_buf_s *get_buf_by_type(u32 type) +{ + if (PTS_TYPE_VIDEO == type) + return &bufs[BUF_TYPE_VIDEO]; + if (PTS_TYPE_AUDIO == type) + return &bufs[BUF_TYPE_AUDIO]; + if (has_hevc_vdec()) { + if (PTS_TYPE_HEVC == type) + return &bufs[BUF_TYPE_HEVC]; + } + + return NULL; +} + +void set_sample_rate_info(int arg) +{ + audio_dec_info.sample_rate = arg; + audio_dec_info.valid = 1; +} + +void set_ch_num_info(int arg) +{ + audio_dec_info.channels = arg; +} + +struct audio_info *get_audio_info(void) +{ + return &audio_dec_info; +} +EXPORT_SYMBOL(get_audio_info); + +static void amstream_change_vbufsize(struct port_priv_s *priv, + struct stream_buf_s *pvbuf) +{ + if (pvbuf->buf_start != 0 || pvbuf->ext_buf_addr != 0) { + pr_info("streambuf is alloced, buf_start 0x%lx, extbuf 0x%lx\n", + pvbuf->buf_start, pvbuf->ext_buf_addr); + return; + } + if (priv->port->is_4k) { + pvbuf->buf_size = def_4k_vstreambuf_sizeM * SZ_1M; + if (priv->vdec->port_flag & PORT_FLAG_DRM) + pvbuf->buf_size = DEFAULT_VIDEO_BUFFER_SIZE_4K_TVP; + if ((pvbuf->buf_size > 30 * SZ_1M) && + (codec_mm_get_total_size() < 220 * SZ_1M)) { + /*if less than 250M, used 20M for 4K & 265*/ + pvbuf->buf_size = pvbuf->buf_size >> 1; + } + } else if (pvbuf->buf_size > def_vstreambuf_sizeM * SZ_1M) { + if (priv->vdec->port_flag & PORT_FLAG_DRM) + pvbuf->buf_size = DEFAULT_VIDEO_BUFFER_SIZE_TVP; + } else { + pvbuf->buf_size = def_vstreambuf_sizeM * SZ_1M; + if (priv->vdec->port_flag & PORT_FLAG_DRM) + pvbuf->buf_size = DEFAULT_VIDEO_BUFFER_SIZE_TVP; + } + reset_canuse_buferlevel(10000); +} + +static bool port_get_inited(struct port_priv_s *priv) +{ + struct stream_port_s *port = priv->port; + + if (port->type & PORT_TYPE_VIDEO) { + struct vdec_s *vdec = priv->vdec; + + return vdec ? vdec->port_flag & PORT_FLAG_INITED : 0; + } + + return port->flag & PORT_FLAG_INITED; +} + +static void port_set_inited(struct port_priv_s *priv) +{ + struct stream_port_s *port = priv->port; + + if (port->type & PORT_TYPE_VIDEO) { + struct vdec_s *vdec = priv->vdec; + + vdec->port_flag |= PORT_FLAG_INITED; + port->flag |= PORT_FLAG_INITED; + pr_info("vdec->port_flag=0x%x, port_flag=0x%x\n", + vdec->port_flag, port->flag); + } else + port->flag |= PORT_FLAG_INITED; +} + +static void video_port_release(struct port_priv_s *priv, + struct stream_buf_s *pbuf, int release_num) +{ + struct vdec_s *vdec = priv->vdec; + struct vdec_s *slave = NULL; + + if (!vdec) + return; + + switch (release_num) { + default: + /*fallthrough*/ + case 0: /*release all */ + case 3: + if (vdec->slave) + slave = vdec->slave; + vdec_release(vdec); + if (slave) + vdec_release(slave); + priv->vdec = NULL; + /*fallthrough*/ + case 1: + ; + } +} + +static int video_port_init(struct port_priv_s *priv, + struct stream_buf_s *pbuf) +{ + int r; + struct stream_port_s *port = priv->port; + struct vdec_s *vdec = priv->vdec; + + if ((vdec->port_flag & PORT_FLAG_VFORMAT) == 0) { + pr_err("vformat not set\n"); + return -EPERM; + } + if (vdec_dual(vdec) && vdec_secure(vdec) && (vdec->slave)) { + /*copy drm flags for slave dec.*/ + vdec->slave->port_flag |= PORT_FLAG_DRM; + } + if (port->vformat == VFORMAT_H264_4K2K || + (priv->vdec->sys_info->height * + priv->vdec->sys_info->width) > 1920*1088) { + port->is_4k = true; + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_TXLX + && port->vformat == VFORMAT_H264) { + vdec_poweron(VDEC_HEVC); + } + } else { + port->is_4k = false; + } + + if (port->type & PORT_TYPE_FRAME) { + r = vdec_init(vdec, + (priv->vdec->sys_info->height * + priv->vdec->sys_info->width) > 1920*1088, false); + if (r < 0) { + pr_err("video_port_init %d, vdec_init failed\n", + __LINE__); + return r; + } +#if 0 + if (vdec_dual(vdec)) { + if (port->vformat == VFORMAT_AV1) /* av1 dv only single layer */ + return 0; + r = vdec_init(vdec->slave, + (priv->vdec->sys_info->height * + priv->vdec->sys_info->width) > 1920*1088); + if (r < 0) { + vdec_release(vdec); + pr_err("video_port_init %d, vdec_init failed\n", + __LINE__); + return r; + } + } +#endif + return 0; + } + + amstream_change_vbufsize(priv, pbuf); + + if (has_hevc_vdec()) { + if (port->type & PORT_TYPE_MPTS) { + if (pbuf->type == BUF_TYPE_HEVC) + vdec_poweroff(VDEC_1); + else + vdec_poweroff(VDEC_HEVC); + } + } + + /* todo: set path based on port flag */ + r = vdec_init(vdec, + (priv->vdec->sys_info->height * + priv->vdec->sys_info->width) > 1920*1088, false); + + if (r < 0) { + pr_err("video_port_init %d, vdec_init failed\n", __LINE__); + goto err; + } + + if (vdec_dual(vdec)) { + r = vdec_init(vdec->slave, + (priv->vdec->sys_info->height * + priv->vdec->sys_info->width) > 1920*1088, false); + if (r < 0) { + pr_err("video_port_init %d, vdec_init failed\n", __LINE__); + goto err; + } + } + + return 0; +err: + if (vdec->slave) + vdec_release(vdec->slave); + if (vdec) + vdec_release(vdec); + priv->vdec = NULL; + + return r; +} + +static void audio_port_release(struct stream_port_s *port, + struct stream_buf_s *pbuf, int release_num) +{ + switch (release_num) { + default: + /*fallthrough*/ + case 0: /*release all */ + /*fallthrough*/ + case 4: + esparser_release(pbuf); + /*fallthrough*/ + case 3: + adec_release(port->vformat); + /*fallthrough*/ + case 2: + stbuf_release(pbuf); + /*fallthrough*/ + case 1: + ; + } + amstream_audio_reset = 0; + return; +} + +static int audio_port_reset(struct stream_port_s *port, + struct stream_buf_s *pbuf) +{ + int r; + if ((port->flag & PORT_FLAG_AFORMAT) == 0) { + pr_err("aformat not set\n"); + return 0; + } + + pr_info("audio port reset, flag:0x%x\n", port->flag); + if ((port->flag & PORT_FLAG_INITED) == 0) { + pr_info("audio port not inited,return\n"); + return 0; + } + + pr_info("audio_port_reset begin\n"); + pts_stop(PTS_TYPE_AUDIO); + + stbuf_release(pbuf); + + r = stbuf_init(pbuf, NULL); + if (r < 0) { + return r; + } + + r = adec_init(port); + if (r < 0) { + audio_port_release(port, pbuf, 2); + return r; + } + + if (port->type & PORT_TYPE_ES) + esparser_audio_reset_s(pbuf); + + if (port->type & PORT_TYPE_MPTS) + tsdemux_audio_reset(); + + if (port->type & PORT_TYPE_MPPS) + psparser_audio_reset(); + +#ifdef CONFIG_AM_VDEC_REAL + if (port->type & PORT_TYPE_RM) + rm_audio_reset(); +#endif + + pbuf->flag |= BUF_FLAG_IN_USE; + amstream_audio_reset = 1; + + r = pts_start(PTS_TYPE_AUDIO); + + //clear audio break flag after reset + //tsync_audio_break(0); + + pr_info("audio_port_reset done\n"); + return r; +} + +static int sub_port_reset(struct stream_port_s *port, + struct stream_buf_s *pbuf) +{ + int r; + + port->flag &= (~PORT_FLAG_INITED); + + stbuf_release(pbuf); + + r = stbuf_init(pbuf, NULL); + if (r < 0) + return r; + + if (port->type & PORT_TYPE_MPTS) + tsdemux_sub_reset(); + + if (port->type & PORT_TYPE_MPPS) + psparser_sub_reset(); + + if (port->sid == 0xffff) { /* es sub */ + esparser_sub_reset(); + pbuf->flag |= BUF_FLAG_PARSER; + } + + pbuf->flag |= BUF_FLAG_IN_USE; + + port->flag |= PORT_FLAG_INITED; + + return 0; +} + +static int audio_port_init(struct stream_port_s *port, + struct stream_buf_s *pbuf) +{ + int r; + + if ((port->flag & PORT_FLAG_AFORMAT) == 0) { + pr_err("aformat not set\n"); + return 0; + } + + r = stbuf_init(pbuf, NULL); + if (r < 0) + return r; + r = adec_init(port); + if (r < 0) { + audio_port_release(port, pbuf, 2); + return r; + } + if (port->type & PORT_TYPE_ES) { + r = esparser_init(pbuf, NULL); + if (r < 0) { + audio_port_release(port, pbuf, 3); + return r; + } + } + pbuf->flag |= BUF_FLAG_IN_USE; + return 0; +} + +static void sub_port_release(struct stream_port_s *port, + struct stream_buf_s *pbuf) +{ + if ((port->sid == 0xffff) && + ((port->type & (PORT_TYPE_MPPS | PORT_TYPE_MPTS)) == 0)) { + /* this is es sub */ + esparser_release(pbuf); + } + stbuf_release(pbuf); + sub_port_inited = 0; +} + +static int sub_port_init(struct stream_port_s *port, struct stream_buf_s *pbuf) +{ + int r; + r = stbuf_init(pbuf, NULL); + if (r < 0) + return r; + if ((port->flag & PORT_FLAG_SID) == 0) { + pr_err("subtitle id not set\n"); + return 0; + } + + if ((port->sid == 0xffff) && + ((port->type & (PORT_TYPE_MPPS | PORT_TYPE_MPTS)) == 0)) { + /* es sub */ + r = esparser_init(pbuf, NULL); + if (r < 0) { + sub_port_release(port, pbuf); + return r; + } + } + + sub_port_inited = 1; + return 0; +} + +static void amstream_user_buffer_init(void) +{ + struct stream_buf_s *pubuf = &bufs[BUF_TYPE_USERDATA]; + + pubuf->buf_size = 0; + pubuf->buf_start = 0; + pubuf->buf_wp = 0; + pubuf->buf_rp = 0; +} + +#if 1 +/*DDD*/ +struct stream_buf_s *get_vbuf(void) +{ + return &bufs[BUF_TYPE_VIDEO]; +} + +EXPORT_SYMBOL(get_vbuf); +#endif + +static int amstream_port_init(struct port_priv_s *priv) +{ + int r = 0; + struct stream_buf_s *pvbuf = &bufs[BUF_TYPE_VIDEO]; + struct stream_buf_s *pabuf = &bufs[BUF_TYPE_AUDIO]; + struct stream_buf_s *psbuf = &bufs[BUF_TYPE_SUBTITLE]; + struct stream_port_s *port = priv->port; + struct vdec_s *vdec = priv->vdec; + + r = vdec_resource_checking(vdec); + if (r < 0) + return r; + + mutex_lock(&amstream_mutex); + + if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) && + (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_SC2)) { + r = check_efuse_chip(port->vformat); + if (r) { + pr_info("No support video format %d.\n", port->vformat); + mutex_unlock(&amstream_mutex); + return 0; + } + } + + /* try to reload the fw.*/ + r = video_fw_reload(FW_LOAD_TRY); + if (r) + pr_err("the firmware reload fail.\n"); + + stbuf_fetch_init(); + + amstream_user_buffer_init(); + + if (port_get_inited(priv)) { + mutex_unlock(&amstream_mutex); + return 0; + } + + if ((port->type & PORT_TYPE_AUDIO) && + (port->flag & PORT_FLAG_AFORMAT)) { + r = audio_port_init(port, pabuf); + if (r < 0) { + pr_err("audio_port_init failed\n"); + goto error1; + } + } + + if ((port->type & PORT_TYPE_VIDEO) && + (port->flag & PORT_FLAG_VFORMAT)) { + if (vdec_stream_based(vdec)) { + struct stream_buf_ops *ops = NULL; + struct parser_args pars = { + .vid = (port->flag & PORT_FLAG_VID) ? port->vid : 0xffff, + .aid = (port->flag & PORT_FLAG_AID) ? port->aid : 0xffff, + .sid = (port->flag & PORT_FLAG_SID) ? port->sid : 0xffff, + .pcrid = (port->pcr_inited == 1) ? port->pcrid : 0xffff, + }; + + if (port->type & PORT_TYPE_MPTS) { + ops = get_tsparser_stbuf_ops(); + } else if (port->type & PORT_TYPE_MPPS) { + ops = get_psparser_stbuf_ops(); + } else { + ops = !vdec_single(vdec) ? + get_stbuf_ops() : + get_esparser_stbuf_ops(); + + /* def used stbuf with parser if the feature disable. */ + if (!is_support_no_parser()) + ops = get_esparser_stbuf_ops(); + else if (vdec->format == VFORMAT_H264MVC || + vdec->format == VFORMAT_VC1) + ops = get_stbuf_ops(); + } + + r = stream_buffer_base_init(&vdec->vbuf, ops, &pars); + if (r) { + mutex_unlock(&priv->mutex); + pr_err("stream buffer base init failed\n"); + goto error2; + } + } + + mutex_lock(&priv->mutex); + r = video_port_init(priv, &vdec->vbuf); + if (r < 0) { + mutex_unlock(&priv->mutex); + pr_err("video_port_init failed\n"); + goto error2; + } + mutex_unlock(&priv->mutex); + } + + if ((port->type & PORT_TYPE_MPTS) && + !(port->flag & PORT_FLAG_VFORMAT)) { + r = tsdemux_init(0xffff, + (port->flag & PORT_FLAG_AID) ? port->aid : 0xffff, + (port->flag & PORT_FLAG_SID) ? port->sid : 0xffff, + (port->pcr_inited == 1) ? port->pcrid : 0xffff, + 0, vdec); + if (r < 0) { + pr_err("tsdemux_init failed\n"); + goto error3; + } + tsync_pcr_start(); + } + + if ((port->type & PORT_TYPE_SUB) && (port->flag & PORT_FLAG_SID)) { + r = sub_port_init(port, psbuf); + if (r < 0) { + pr_err("sub_port_init failed\n"); + goto error4; + } + } + +#ifdef CONFIG_AM_VDEC_REAL + if (port->type & PORT_TYPE_RM) { + rm_set_vasid( + (port->flag & PORT_FLAG_VID) ? port->vid : 0xffff, + (port->flag & PORT_FLAG_AID) ? port->aid : 0xffff); + } +#endif +#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */ + if (!NO_VDEC2_INIT) { + if ((port->type & PORT_TYPE_VIDEO) + && (port->vformat == VFORMAT_H264_4K2K)) + stbuf_vdec2_init(pvbuf); + } +#endif + + if ((port->type & PORT_TYPE_VIDEO) && + (vdec->port_flag & PORT_FLAG_VFORMAT)) + /* connect vdec at the end after all HW initialization */ + vdec_connect(vdec); + + tsync_audio_break(0); /* clear audio break */ + set_vsync_pts_inc_mode(0); /* clear video inc */ + + port_set_inited(priv); + + mutex_unlock(&amstream_mutex); + return 0; + /*errors follow here */ + +error4: + if ((port->type & PORT_TYPE_MPTS) && + !(port->flag & PORT_FLAG_VFORMAT)) + tsdemux_release(); +error3: + if ((port->type & PORT_TYPE_VIDEO) && + (port->flag & PORT_FLAG_VFORMAT)) + video_port_release(priv, &priv->vdec->vbuf, 0); +error2: + if ((port->type & PORT_TYPE_AUDIO) && + (port->flag & PORT_FLAG_AFORMAT)) + audio_port_release(port, pabuf, 0); +error1: + mutex_unlock(&amstream_mutex); + return r; +} + +static int amstream_port_release(struct port_priv_s *priv) +{ + struct stream_port_s *port = priv->port; + struct stream_buf_s *pvbuf = &priv->vdec->vbuf; + struct stream_buf_s *pabuf = &bufs[BUF_TYPE_AUDIO]; + struct stream_buf_s *psbuf = &bufs[BUF_TYPE_SUBTITLE]; + + if ((port->type & PORT_TYPE_MPTS) && + !(port->flag & PORT_FLAG_VFORMAT)) { + tsync_pcr_stop(); + tsdemux_release(); + } + + if ((port->type & PORT_TYPE_MPPS) && + !(port->flag & PORT_FLAG_VFORMAT)) { + psparser_release(); + } + + if (port->type & PORT_TYPE_VIDEO) { + video_port_release(priv, pvbuf, 0); + } + + if (port->type & PORT_TYPE_AUDIO) + audio_port_release(port, pabuf, 0); + + if (port->type & PORT_TYPE_SUB) + sub_port_release(port, psbuf); + + port->pcr_inited = 0; + + if (!is_mult_inc(port->type) || + (is_mult_inc(port->type) && + !is_support_no_parser())) + port->flag = 0; + + return 0; +} + +static void amstream_change_avid(struct stream_port_s *port) +{ + if (port->type & PORT_TYPE_MPTS) { + tsdemux_change_avid( + (port->flag & PORT_FLAG_VID) ? port->vid : 0xffff, + (port->flag & PORT_FLAG_AID) ? port->aid : 0xffff); + } + + if (port->type & PORT_TYPE_MPPS) { + psparser_change_avid( + (port->flag & PORT_FLAG_VID) ? port->vid : 0xffff, + (port->flag & PORT_FLAG_AID) ? port->aid : 0xffff); + } + +#ifdef CONFIG_AM_VDEC_REAL + if (port->type & PORT_TYPE_RM) { + rm_set_vasid( + (port->flag & PORT_FLAG_VID) ? port->vid : 0xffff, + (port->flag & PORT_FLAG_AID) ? port->aid : 0xffff); + } +#endif +} + +static void amstream_change_sid(struct stream_port_s *port) +{ + if (port->type & PORT_TYPE_MPTS) { + tsdemux_change_sid( + (port->flag & PORT_FLAG_SID) ? port->sid : 0xffff); + } + + if (port->type & PORT_TYPE_MPPS) { + psparser_change_sid( + (port->flag & PORT_FLAG_SID) ? port->sid : 0xffff); + } +} + +/**************************************************/ +static ssize_t amstream_vbuf_write(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + struct stream_buf_s *pbuf = &priv->vdec->vbuf; + int r; + + if (!(port_get_inited(priv))) { + r = amstream_port_init(priv); + if (r < 0) + return r; + } + + if (priv->vdec->port_flag & PORT_FLAG_DRM) + r = drm_write(file, pbuf, buf, count); + else + r = stream_buffer_write(file, pbuf, buf, count); + if (slow_input) { + pr_info("slow_input: es codec write size %x\n", r); + msleep(3000); + } +#ifdef DATA_DEBUG + debug_file_write(buf, r); +#endif + + return r; +} + +static ssize_t amstream_vframe_write(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + ssize_t ret; + int wait_max_cnt = 5; +#ifdef DATA_DEBUG + debug_file_write(buf, count); +#endif + do { + ret = vdec_write_vframe(priv->vdec, buf, count); + if (file->f_flags & O_NONBLOCK) { + break;/*alway return for no block mode.*/ + } else if (ret == -EAGAIN) { + int level; + level = vdec_input_level(&priv->vdec->input); + if (wait_max_cnt-- < 0) + break; + msleep(20); + } + } while (ret == -EAGAIN); + return ret; +} + +static ssize_t amstream_abuf_write(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + struct stream_port_s *port = priv->port; + struct stream_buf_s *pbuf = &bufs[BUF_TYPE_AUDIO]; + int r; + + if (!(port_get_inited(priv))) { + r = amstream_port_init(priv); + if (r < 0) + return r; + } + + if (port->flag & PORT_FLAG_DRM) + r = drm_write(file, pbuf, buf, count); + else + r = esparser_write(file, pbuf, buf, count); + + return r; +} + +static ssize_t amstream_mpts_write(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + struct stream_port_s *port = priv->port; + struct stream_buf_s *pabuf = &bufs[BUF_TYPE_AUDIO]; + struct stream_buf_s *pvbuf = &priv->vdec->vbuf; + int r = 0; + + if (!(port_get_inited(priv))) { + r = amstream_port_init(priv); + if (r < 0) + return r; + } +#ifdef DATA_DEBUG + debug_file_write(buf, count); +#endif + if (port->flag & PORT_FLAG_DRM) + r = drm_tswrite(file, pvbuf, pabuf, buf, count); + else + r = tsdemux_write(file, pvbuf, pabuf, buf, count); + if (slow_input) { + pr_info("slow_input: ts codec write size %x\n", r); + msleep(3000); + } + return r; +} + +static ssize_t amstream_mpps_write(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + struct stream_buf_s *pvbuf = &bufs[BUF_TYPE_VIDEO]; + struct stream_buf_s *pabuf = &bufs[BUF_TYPE_AUDIO]; + int r; + + if (!(port_get_inited(priv))) { + r = amstream_port_init(priv); + if (r < 0) + return r; + } + return psparser_write(file, pvbuf, pabuf, buf, count); +} + +#ifdef CONFIG_AM_VDEC_REAL +static ssize_t amstream_mprm_write(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + struct stream_buf_s *pvbuf = &bufs[BUF_TYPE_VIDEO]; + struct stream_buf_s *pabuf = &bufs[BUF_TYPE_AUDIO]; + int r; + + if (!(port_get_inited(priv))) { + r = amstream_port_init(priv); + if (r < 0) + return r; + } + return rmparser_write(file, pvbuf, pabuf, buf, count); +} +#endif + +static ssize_t amstream_sub_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + u32 sub_rp, sub_wp, sub_start, data_size, res; + struct stream_buf_s *s_buf = &bufs[BUF_TYPE_SUBTITLE]; + + if (sub_port_inited == 0) + return 0; + + sub_rp = stbuf_sub_rp_get(); + sub_wp = stbuf_sub_wp_get(); + sub_start = stbuf_sub_start_get(); + + if (sub_wp == sub_rp || sub_rp == 0) + return 0; + /*flush sub buf before read*/ + codec_mm_dma_flush( + (void*)codec_mm_phys_to_virt(sub_start), + stbuf_size(s_buf), + DMA_FROM_DEVICE); + if (sub_wp > sub_rp) + data_size = sub_wp - sub_rp; + else + data_size = s_buf->buf_size - sub_rp + sub_wp; + + if (data_size > count) + data_size = count; + + if (sub_wp < sub_rp) { + int first_num = s_buf->buf_size - (sub_rp - sub_start); + + if (data_size <= first_num) { + res = copy_to_user((void *)buf, + (void *)(codec_mm_phys_to_virt(sub_rp)), + data_size); + stbuf_sub_rp_set(sub_rp + data_size - res); + + return data_size - res; + } else { + if (first_num > 0) { + res = copy_to_user((void *)buf, + (void *)(codec_mm_phys_to_virt(sub_rp)), + first_num); + stbuf_sub_rp_set(sub_rp + first_num - + res); + + return first_num - res; + } + + res = copy_to_user((void *)buf, + (void *)(codec_mm_phys_to_virt(sub_start)), + data_size - first_num); + + stbuf_sub_rp_set(sub_start + data_size - + first_num - res); + + return data_size - first_num - res; + } + } else { + res = + copy_to_user((void *)buf, + (void *)(codec_mm_phys_to_virt(sub_rp)), + data_size); + + stbuf_sub_rp_set(sub_rp + data_size - res); + + return data_size - res; + } +} + +static ssize_t amstream_sub_write(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + struct stream_buf_s *pbuf = &bufs[BUF_TYPE_SUBTITLE]; + int r; + + if (!(port_get_inited(priv))) { + r = amstream_port_init(priv); + if (r < 0) + return r; + } + r = esparser_write(file, pbuf, buf, count); + if (r < 0) + return r; + + wakeup_sub_poll(); + + return r; +} + +static unsigned int amstream_sub_poll(struct file *file, + poll_table *wait_table) +{ + poll_wait(file, &amstream_sub_wait, wait_table); + + if (atomic_read(&subdata_ready)) { + atomic_dec(&subdata_ready); + return POLLOUT | POLLWRNORM; + } + + return 0; +} + +static void set_userdata_poc(struct userdata_poc_info_t poc) +{ + userdata_poc_info[userdata_poc_wi] = poc; + userdata_poc_wi++; + if (userdata_poc_wi == USERDATA_FIFO_NUM) + userdata_poc_wi = 0; +} + +void init_userdata_fifo(void) +{ + userdata_poc_ri = 0; + userdata_poc_wi = 0; + userdata_length = 0; +} +EXPORT_SYMBOL(init_userdata_fifo); + +void reset_userdata_fifo(int bInit) +{ + struct stream_buf_s *userdata_buf; + int wi, ri; + u32 rp, wp; + + mutex_lock(&userdata_mutex); + + wi = userdata_poc_wi; + ri = userdata_poc_ri; + + userdata_buf = &bufs[BUF_TYPE_USERDATA]; + rp = userdata_buf->buf_rp; + wp = userdata_buf->buf_wp; + if (bInit) { + /* decoder reset */ + userdata_buf->buf_rp = 0; + userdata_buf->buf_wp = 0; + userdata_poc_ri = 0; + userdata_poc_wi = 0; + } else { + /* just clean fifo buffer */ + userdata_buf->buf_rp = userdata_buf->buf_wp; + userdata_poc_ri = userdata_poc_wi; + } + userdata_length = 0; + last_read_wi = userdata_poc_wi; + + mutex_unlock(&userdata_mutex); + pr_debug("reset_userdata_fifo, bInit=%d, wi=%d, ri=%d, rp=%d, wp=%d\n", + bInit, wi, ri, rp, wp); +} +EXPORT_SYMBOL(reset_userdata_fifo); + +int wakeup_userdata_poll(struct userdata_poc_info_t poc, + int wp, + unsigned long start_phyaddr, + int buf_size, + int data_length) +{ + struct stream_buf_s *userdata_buf = &bufs[BUF_TYPE_USERDATA]; + mutex_lock(&userdata_mutex); + + if (data_length & 0x7) + data_length = (((data_length + 8) >> 3) << 3); + set_userdata_poc(poc); + userdata_buf->buf_start = start_phyaddr; + userdata_buf->buf_wp = wp; + userdata_buf->buf_size = buf_size; + atomic_set(&userdata_ready, 1); + userdata_length += data_length; + mutex_unlock(&userdata_mutex); + + wake_up_interruptible(&amstream_userdata_wait); + return userdata_buf->buf_rp; +} +EXPORT_SYMBOL(wakeup_userdata_poll); + + +void amstream_wakeup_userdata_poll(struct vdec_s *vdec) +{ + int i; + st_userdata *userdata = get_vdec_userdata_ctx(); + + if (vdec == NULL) { + pr_info("Error, invalid vdec instance!\n"); + return; + } + + mutex_lock(&userdata->mutex); + + for (i = 0; i < MAX_USERDATA_CHANNEL_NUM; i++) { + if (userdata->set_id_flag && (userdata->id[i] == vdec->video_id)) { + userdata->ready_flag[i] = 1; + if (vdec_get_debug_flags() & 0x10000000) + pr_info("%s, wakeup! id = %d\n", __func__, vdec->video_id); + break; + } else if (!userdata->set_id_flag) { + if (!userdata->used[0]) { + vdec->video_id = vdec->id; + userdata->id[0] = vdec->id; + userdata->used[0] = 1; + } + + if (vdec_get_debug_flags() & 0x10000000) + pr_info("%s[%d] userdata instance %d ready!\n", + __func__, i, userdata->id[i]); + + userdata->ready_flag[i] = 1; + break; + } + } + + mutex_unlock(&userdata->mutex); + + wake_up_interruptible(&userdata->userdata_wait); +} +EXPORT_SYMBOL(amstream_wakeup_userdata_poll); + +static unsigned int amstream_userdata_poll(struct file *file, + poll_table *wait_table) +{ + int fd_match = 0; + int i; + st_userdata *userdata = get_vdec_userdata_ctx(); + + poll_wait(file, &userdata->userdata_wait, wait_table); + mutex_lock(&userdata->mutex); + for (i = 0; i < MAX_USERDATA_CHANNEL_NUM; i++) { + if (userdata->id[i] == userdata->video_id && userdata->ready_flag[i] == 1) { + fd_match = 1; + if (vdec_get_debug_flags() & 0x10000000) + pr_info("%s, success! id = %d\n", __func__, userdata->video_id); + break; + } + } + + if (fd_match) { + mutex_unlock(&userdata->mutex); + return POLLIN | POLLRDNORM; + } + + mutex_unlock(&userdata->mutex); + return 0; +} + +static void amstream_userdata_init(void) +{ + int i; + st_userdata *userdata = get_vdec_userdata_ctx(); + + init_waitqueue_head(&userdata->userdata_wait); + mutex_init(&userdata->mutex); + userdata->set_id_flag = 0; + + for (i = 0; i < MAX_USERDATA_CHANNEL_NUM; i++) { + userdata->ready_flag[i] = 0; + userdata->id[i] = -1; + userdata->used[i] = 0; + } + + return; +} + +static int amstream_open(struct inode *inode, struct file *file) +{ + s32 i; + struct stream_port_s *s; + struct stream_port_s *port = &ports[iminor(inode)]; + struct port_priv_s *priv; + + VDEC_PRINT_FUN_LINENO(__func__, __LINE__); + if (vdec_get_debug_flags() & 0x10000000) + pr_info("%s, port type %d\n", __func__, port->type); +#ifdef G12A_BRINGUP_DEBUG + if (vdec_get_debug_flags() & 0xff0000) { + pr_info("%s force open port %d\n", + __func__, + ((vdec_get_debug_flags() >> 16) & 0xff) - 1); + port = &ports[((vdec_get_debug_flags() >> 16) & 0xff) - 1]; + } + pr_info("%s, port name %s\n", __func__, port->name); +#endif + if (iminor(inode) >= amstream_port_num) + return -ENODEV; + + mutex_lock(&amstream_mutex); + + if (port->type & PORT_TYPE_VIDEO) { + for (s = &ports[0], i = 0; i < amstream_port_num; i++, s++) { + if ((!is_mult_inc(s->type)) && + (s->type & PORT_TYPE_VIDEO) && + (s->flag & PORT_FLAG_IN_USE)) { + mutex_unlock(&amstream_mutex); + return -EBUSY; + } + } + } + + if (!is_support_no_parser()) { + if ((port->flag & PORT_FLAG_IN_USE) && + ((port->type & PORT_TYPE_FRAME) == 0)) { + mutex_unlock(&amstream_mutex); + return -EBUSY; + } + } + /* force dv frame mode */ + if (force_dv_mode & 0x2) { + port->type |= PORT_TYPE_FRAME; + port->fops = &vframe_fops; + pr_debug("%s, dobly vision force frame mode.\n", __func__); + } + + /* esplayer stream mode force dv */ + if (force_dv_mode & 0x1) + port->type |= PORT_TYPE_DUALDEC; + + /* check other ports conflicts for audio */ + for (s = &ports[0], i = 0; i < amstream_port_num; i++, s++) { + if ((s->flag & PORT_FLAG_IN_USE) && + ((port->type) & (s->type) & PORT_TYPE_AUDIO)) { + mutex_unlock(&amstream_mutex); + return -EBUSY; + } + } + + priv = kzalloc(sizeof(struct port_priv_s), GFP_KERNEL); + if (priv == NULL) { + mutex_unlock(&amstream_mutex); + return -ENOMEM; + } + + mutex_init(&priv->mutex); + + priv->port = port; + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + /* TODO: mod gate */ + /* switch_mod_gate_by_name("demux", 1); */ + amports_switch_gate("demux", 1); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) { + /* TODO: clc gate */ + /* CLK_GATE_ON(HIU_PARSER_TOP); */ + amports_switch_gate("parser_top", 1); + } + + if (port->type & PORT_TYPE_VIDEO) { + /* TODO: mod gate */ + /* switch_mod_gate_by_name("vdec", 1); */ + amports_switch_gate("vdec", 1); + + if (has_hevc_vdec()) { + if (port->type & + (PORT_TYPE_MPTS | PORT_TYPE_HEVC)) + vdec_poweron(VDEC_HEVC); + + if ((port->type & PORT_TYPE_HEVC) == 0) + vdec_poweron(VDEC_1); + } else { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) + vdec_poweron(VDEC_1); + } + } + + if (port->type & PORT_TYPE_AUDIO) { + /* TODO: mod gate */ + /* switch_mod_gate_by_name("audio", 1); */ + amports_switch_gate("audio", 1); + } + } + + port->vid = 0; + port->aid = 0; + port->sid = 0; + port->pcrid = 0xffff; + file->f_op = port->fops; + file->private_data = priv; + + port->flag = PORT_FLAG_IN_USE; + port->pcr_inited = 0; +#ifdef DATA_DEBUG + debug_filp = filp_open(DEBUG_FILE_NAME, O_WRONLY, 0); + if (IS_ERR(debug_filp)) { + pr_err("amstream: open debug file failed\n"); + debug_filp = NULL; + } +#endif + mutex_unlock(&amstream_mutex); + + if (port->type & PORT_TYPE_VIDEO) { + priv->vdec = vdec_create(port, NULL); + + if (priv->vdec == NULL) { + port->flag = 0; + kfree(priv); + pr_err("amstream: vdec creation failed\n"); + return -ENOMEM; + } + if (!(port->type & PORT_TYPE_FRAME)) { + if ((port->type & PORT_TYPE_DUALDEC) || + (vdec_get_debug_flags() & 0x100)) { + priv->vdec->slave = vdec_create(port, priv->vdec); + + if (priv->vdec->slave == NULL) { + vdec_release(priv->vdec); + port->flag = 0; + kfree(priv); + pr_err("amstream: sub vdec creation failed\n"); + return -ENOMEM; + } + } + } + } + + return 0; +} + +static int amstream_release(struct inode *inode, struct file *file) +{ + struct port_priv_s *priv = file->private_data; + struct stream_port_s *port = priv->port; + struct vdec_s *slave = NULL; +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + u32 port_flag = 0; +#endif + if (vdec_get_debug_flags() & 0x10000000) + pr_info("%s, port type %d\n", __func__, port->type); + + if (iminor(inode) >= amstream_port_num) + return -ENODEV; + + mutex_lock(&amstream_mutex); + + if (port_get_inited(priv)) + amstream_port_release(priv); + + if (priv->vdec) { +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + port_flag = priv->vdec->port_flag; +#endif + if (priv->vdec->slave) + slave = priv->vdec->slave; + vdec_release(priv->vdec); + if (slave) + vdec_release(slave); + priv->vdec = NULL; + } + + if ((port->type & (PORT_TYPE_AUDIO | PORT_TYPE_VIDEO)) == + PORT_TYPE_AUDIO) { + s32 i; + struct stream_port_s *s; + + for (s = &ports[0], i = 0; i < amstream_port_num; i++, s++) { + if ((s->flag & PORT_FLAG_IN_USE) + && (s->type & PORT_TYPE_VIDEO)) + break; + } + if (i == amstream_port_num) + timestamp_firstvpts_set(0); + } + + if (!is_mult_inc(port->type) || + (is_mult_inc(port->type) && + !is_support_no_parser())) + port->flag = 0; + + /* timestamp_pcrscr_set(0); */ + +#ifdef DATA_DEBUG + if (debug_filp) { + filp_close(debug_filp, current->files); + debug_filp = NULL; + debug_file_pos = 0; + } +#endif + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + if (port->type & PORT_TYPE_VIDEO) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) { +#ifndef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + if (has_hevc_vdec()) + vdec_poweroff(VDEC_HEVC); + + vdec_poweroff(VDEC_1); +#else + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_TXLX + && port->vformat == VFORMAT_H264 + && port->is_4k) { + vdec_poweroff(VDEC_HEVC); + } + + if ((port->vformat == VFORMAT_HEVC + || port->vformat == VFORMAT_AVS2 + || port->vformat == VFORMAT_AV1 + || port->vformat == VFORMAT_VP9)) { + vdec_poweroff(VDEC_HEVC); + } else { + vdec_poweroff(VDEC_1); + } +#endif + } + /* TODO: mod gate */ + /* switch_mod_gate_by_name("vdec", 0); */ + amports_switch_gate("vdec", 0); + } + + if (port->type & PORT_TYPE_AUDIO) { + /* TODO: mod gate */ + /* switch_mod_gate_by_name("audio", 0); */ + /* amports_switch_gate("audio", 0); */ + } + + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) { + /* TODO: clc gate */ + /* CLK_GATE_OFF(HIU_PARSER_TOP); */ + amports_switch_gate("parser_top", 0); + } + /* TODO: mod gate */ + /* switch_mod_gate_by_name("demux", 0); */ + amports_switch_gate("demux", 0); + } + + mutex_destroy(&priv->mutex); + + kfree(priv); + + mutex_unlock(&amstream_mutex); + return 0; +} + +static long amstream_ioctl_get_version(struct port_priv_s *priv, + ulong arg) +{ + int version = (AMSTREAM_IOC_VERSION_FIRST & 0xffff) << 16 + | (AMSTREAM_IOC_VERSION_SECOND & 0xffff); + put_user(version, (u32 __user *)arg); + + return 0; +} +static long amstream_ioctl_get(struct port_priv_s *priv, ulong arg) +{ + struct stream_port_s *this = priv->port; + long r = 0; + + struct am_ioctl_parm parm; + + if (copy_from_user + ((void *)&parm, (void *)arg, + sizeof(parm))) + r = -EFAULT; + + switch (parm.cmd) { + case AMSTREAM_GET_SUB_LENGTH: + if ((this->type & PORT_TYPE_SUB) || + (this->type & PORT_TYPE_SUB_RD)) { + u32 sub_wp, sub_rp; + struct stream_buf_s *psbuf = &bufs[BUF_TYPE_SUBTITLE]; + int val; + + sub_wp = stbuf_sub_wp_get(); + sub_rp = stbuf_sub_rp_get(); + + if (sub_wp == sub_rp) + val = 0; + else if (sub_wp > sub_rp) + val = sub_wp - sub_rp; + else + val = psbuf->buf_size - (sub_rp - sub_wp); + parm.data_32 = val; + } else + r = -EINVAL; + break; + case AMSTREAM_GET_UD_LENGTH: + if (this->type & PORT_TYPE_USERDATA) { + parm.data_32 = userdata_length; + userdata_length = 0; + } else + r = -EINVAL; + break; + case AMSTREAM_GET_APTS_LOOKUP: + if (this->type & PORT_TYPE_AUDIO) { + u32 pts = 0, frame_size, offset; + + offset = parm.data_32; + pts_lookup_offset(PTS_TYPE_AUDIO, offset, &pts, + &frame_size, 300); + parm.data_32 = pts; + } + break; + case AMSTREAM_GET_FIRST_APTS_FLAG: + if (this->type & PORT_TYPE_AUDIO) { + parm.data_32 = first_pts_checkin_complete( + PTS_TYPE_AUDIO); + } + break; + case AMSTREAM_GET_APTS: + parm.data_32 = timestamp_apts_get(); + break; + case AMSTREAM_GET_VPTS: + parm.data_32 = timestamp_vpts_get(); + break; + case AMSTREAM_GET_VPTS_U64: + parm.data_64 = timestamp_vpts_get_u64(); + break; + case AMSTREAM_GET_APTS_U64: + parm.data_64 = timestamp_apts_get_u64(); + break; + case AMSTREAM_GET_PCRSCR: + //parm.data_32 = timestamp_pcrscr_get(); + break; + case AMSTREAM_GET_LAST_CHECKIN_APTS: + parm.data_32 = get_last_checkin_pts(PTS_TYPE_AUDIO); + break; + case AMSTREAM_GET_LAST_CHECKIN_VPTS: + parm.data_32 = get_last_checkin_pts(PTS_TYPE_VIDEO); + break; + case AMSTREAM_GET_LAST_CHECKOUT_APTS: + parm.data_32 = get_last_checkout_pts(PTS_TYPE_AUDIO); + break; + case AMSTREAM_GET_LAST_CHECKOUT_VPTS: + parm.data_32 = get_last_checkout_pts(PTS_TYPE_VIDEO); + break; + case AMSTREAM_GET_SUB_NUM: + parm.data_32 = psparser_get_sub_found_num(); + break; + case AMSTREAM_GET_VIDEO_DELAY_LIMIT_MS: + parm.data_32 = bufs[BUF_TYPE_VIDEO].max_buffer_delay_ms; + break; + case AMSTREAM_GET_AUDIO_DELAY_LIMIT_MS: + parm.data_32 = bufs[BUF_TYPE_AUDIO].max_buffer_delay_ms; + break; + case AMSTREAM_GET_VIDEO_CUR_DELAY_MS: { + int delay; + + delay = calculation_stream_delayed_ms( + PTS_TYPE_VIDEO, NULL, NULL); + if (delay >= 0) + parm.data_32 = delay; + else + parm.data_32 = 0; + } + break; + + case AMSTREAM_GET_AUDIO_CUR_DELAY_MS: { + int delay; + + delay = calculation_stream_delayed_ms( + PTS_TYPE_AUDIO, NULL, NULL); + if (delay >= 0) + parm.data_32 = delay; + else + parm.data_32 = 0; + } + break; + case AMSTREAM_GET_AUDIO_AVG_BITRATE_BPS: { + int delay; + u32 avgbps; + + delay = calculation_stream_delayed_ms( + PTS_TYPE_AUDIO, NULL, &avgbps); + if (delay >= 0) + parm.data_32 = avgbps; + else + parm.data_32 = 0; + } + break; + case AMSTREAM_GET_VIDEO_AVG_BITRATE_BPS: { + int delay; + u32 avgbps; + + delay = calculation_stream_delayed_ms( + PTS_TYPE_VIDEO, NULL, &avgbps); + if (delay >= 0) + parm.data_32 = avgbps; + else + parm.data_32 = 0; + } + break; + case AMSTREAM_GET_ION_ID: + parm.data_32 = priv->vdec->vf_receiver_inst; + break; + case AMSTREAM_GET_NEED_MORE_DATA: + parm.data_32 = vdec_need_more_data(priv->vdec); + break; + case AMSTREAM_GET_FREED_HANDLE: + parm.data_32 = vdec_input_get_freed_handle(priv->vdec); + break; + default: + r = -ENOIOCTLCMD; + break; + } + /* pr_info("parm size:%d\n", sizeof(parm)); */ + if (r == 0) { + if (copy_to_user((void *)arg, &parm, sizeof(parm))) + r = -EFAULT; + } + + return r; + +} +static long amstream_ioctl_set(struct port_priv_s *priv, ulong arg) +{ + struct stream_port_s *this = priv->port; + struct am_ioctl_parm parm; + long r = 0; + int i; + st_userdata *userdata = get_vdec_userdata_ctx(); + + if (copy_from_user + ((void *)&parm, (void *)arg, + sizeof(parm))) + r = -EFAULT; + + switch (parm.cmd) { + case AMSTREAM_SET_VB_START: + if ((this->type & PORT_TYPE_VIDEO) && + ((priv->vdec->vbuf.flag & BUF_FLAG_IN_USE) == 0)) { + priv->vdec->vbuf.buf_start = parm.data_32; + } else + r = -EINVAL; + break; + case AMSTREAM_SET_VB_SIZE: + if ((this->type & PORT_TYPE_VIDEO) && + ((priv->vdec->vbuf.flag & BUF_FLAG_IN_USE) == 0)) { + if (priv->vdec->vbuf.flag & BUF_FLAG_ALLOC) { + r += stbuf_change_size( + &priv->vdec->vbuf, + parm.data_32, + false); + } + } else if (this->type & PORT_TYPE_FRAME) { + /* todo: frame based set max buffer size */ + r = 0; + } else + r = -EINVAL; + break; + case AMSTREAM_SET_AB_START: + if ((this->type & PORT_TYPE_AUDIO) && + ((bufs[BUF_TYPE_AUDIO].flag & BUF_FLAG_IN_USE) == 0)) + bufs[BUF_TYPE_AUDIO].buf_start = parm.data_32; + else + r = -EINVAL; + break; + case AMSTREAM_SET_AB_SIZE: + if ((this->type & PORT_TYPE_AUDIO) && + ((bufs[BUF_TYPE_AUDIO].flag & BUF_FLAG_IN_USE) == 0)) { + if (bufs[BUF_TYPE_AUDIO].flag & BUF_FLAG_ALLOC) { + r = stbuf_change_size( + &bufs[BUF_TYPE_AUDIO], + parm.data_32, + false); + } + } else + r = -EINVAL; + break; + case AMSTREAM_SET_VFORMAT: + if ((this->type & PORT_TYPE_VIDEO) && + (parm.data_vformat < VFORMAT_MAX)) { + this->vformat = parm.data_vformat; + this->flag |= PORT_FLAG_VFORMAT; + + vdec_set_format(priv->vdec, this->vformat); + } else + r = -EINVAL; + break; + case AMSTREAM_SET_AFORMAT: + if ((this->type & PORT_TYPE_AUDIO) && + (parm.data_aformat < AFORMAT_MAX)) { + memset(&audio_dec_info, 0, + sizeof(struct audio_info)); + /* for new format,reset the audio info. */ + this->aformat = parm.data_aformat; + this->flag |= PORT_FLAG_AFORMAT; + } else + r = -EINVAL; + break; + case AMSTREAM_SET_VID: + if (this->type & PORT_TYPE_VIDEO) { + this->vid = parm.data_32; + this->flag |= PORT_FLAG_VID; + } else + r = -EINVAL; + + break; + case AMSTREAM_SET_AID: + if (this->type & PORT_TYPE_AUDIO) { + this->aid = parm.data_32; + this->flag |= PORT_FLAG_AID; + + if (port_get_inited(priv)) { + //tsync_audio_break(1); + amstream_change_avid(this); + } + } else + r = -EINVAL; + break; + case AMSTREAM_SET_SID: + if (this->type & PORT_TYPE_SUB) { + this->sid = parm.data_32; + this->flag |= PORT_FLAG_SID; + + if (port_get_inited(priv)) + amstream_change_sid(this); + } else + r = -EINVAL; + + break; + case AMSTREAM_IOC_PCRID: + this->pcrid = parm.data_32; + this->pcr_inited = 1; + pr_err("set pcrid = 0x%x\n", this->pcrid); + break; + case AMSTREAM_SET_ACHANNEL: + if (this->type & PORT_TYPE_AUDIO) { + this->achanl = parm.data_32; + set_ch_num_info(parm.data_32); + } else + r = -EINVAL; + break; + case AMSTREAM_SET_SAMPLERATE: + if (this->type & PORT_TYPE_AUDIO) { + this->asamprate = parm.data_32; + set_sample_rate_info(parm.data_32); + } else + r = -EINVAL; + break; + case AMSTREAM_SET_DATAWIDTH: + if (this->type & PORT_TYPE_AUDIO) + this->adatawidth = parm.data_32; + else + r = -EINVAL; + break; + case AMSTREAM_SET_TSTAMP: + if ((this->type & (PORT_TYPE_AUDIO | PORT_TYPE_VIDEO)) == + ((PORT_TYPE_AUDIO | PORT_TYPE_VIDEO))) + r = -EINVAL; + else if (this->type & PORT_TYPE_FRAME) + r = vdec_set_pts(priv->vdec, parm.data_32); + else if ((this->type & PORT_TYPE_VIDEO) || + (this->type & PORT_TYPE_HEVC)) { + struct stream_buf_s *vbuf = &priv->vdec->vbuf; + if (vbuf->no_parser) { + pts_checkin_offset(PTS_TYPE_VIDEO, + vbuf->stream_offset, parm.data_32); + } else { + r = es_vpts_checkin(vbuf, parm.data_32); + } + } else if (this->type & PORT_TYPE_AUDIO) + r = es_apts_checkin(&bufs[BUF_TYPE_AUDIO], + parm.data_32); + break; + case AMSTREAM_SET_TSTAMP_US64: + if ((this->type & (PORT_TYPE_AUDIO | PORT_TYPE_VIDEO)) == + ((PORT_TYPE_AUDIO | PORT_TYPE_VIDEO))) + r = -EINVAL; + else { + u64 pts = parm.data_64; + + if (this->type & PORT_TYPE_FRAME) { + /* + *todo: check upper layer for decoder handler + * life sequence or multi-tasking management + */ + r = vdec_set_pts64(priv->vdec, pts); + } else if ((this->type & PORT_TYPE_HEVC) || + (this->type & PORT_TYPE_VIDEO)) { + r = es_vpts_checkin_us64( + &priv->vdec->vbuf, pts); + } else if (this->type & PORT_TYPE_AUDIO) { + r = es_vpts_checkin_us64( + &bufs[BUF_TYPE_AUDIO], pts); + } + } + break; + case AMSTREAM_PORT_INIT: + r = amstream_port_init(priv); + break; + case AMSTREAM_SET_TRICKMODE: + if ((this->type & PORT_TYPE_VIDEO) == 0) + return -EINVAL; + r = vdec_set_trickmode(priv->vdec, parm.data_32); + if (r == -1) + return -ENODEV; + break; + + case AMSTREAM_AUDIO_RESET: + if (this->type & PORT_TYPE_AUDIO) { + struct stream_buf_s *pabuf = &bufs[BUF_TYPE_AUDIO]; + + mutex_lock(&amstream_mutex); + r = audio_port_reset(this, pabuf); + mutex_unlock(&amstream_mutex); + } else + r = -EINVAL; + + break; + case AMSTREAM_SUB_RESET: + if (this->type & PORT_TYPE_SUB) { + struct stream_buf_s *psbuf = &bufs[BUF_TYPE_SUBTITLE]; + + r = sub_port_reset(this, psbuf); + } else + r = -EINVAL; + break; + case AMSTREAM_DEC_RESET: + tsync_set_dec_reset(); + break; + case AMSTREAM_SET_TS_SKIPBYTE: + tsdemux_set_skipbyte(parm.data_32); + break; + case AMSTREAM_SET_SUB_TYPE: + sub_type = parm.data_32; + break; + case AMSTREAM_SET_PCRSCR: + timestamp_pcrscr_set(parm.data_32); + break; + case AMSTREAM_SET_DEMUX: + tsdemux_set_demux(parm.data_32); + break; + case AMSTREAM_SET_VIDEO_DELAY_LIMIT_MS: + priv->vdec->vbuf.max_buffer_delay_ms = parm.data_32; + break; + case AMSTREAM_SET_AUDIO_DELAY_LIMIT_MS: + bufs[BUF_TYPE_AUDIO].max_buffer_delay_ms = parm.data_32; + break; + case AMSTREAM_SET_DRMMODE: + if (parm.data_32 == 1) { + pr_debug("set drmmode\n"); + this->flag |= PORT_FLAG_DRM; + if ((this->type & PORT_TYPE_VIDEO) && + (priv->vdec)) + priv->vdec->port_flag |= PORT_FLAG_DRM; + } else { + this->flag &= (~PORT_FLAG_DRM); + pr_debug("no drmmode\n"); + } + break; + case AMSTREAM_SET_APTS: { + unsigned int pts; + + pts = parm.data_32; + if (tsync_get_mode() == TSYNC_MODE_PCRMASTER) + tsync_pcr_set_apts(pts); + else + tsync_set_apts(pts); + break; + } + case AMSTREAM_SET_FRAME_BASE_PATH: + if (is_mult_inc(this->type) && + (parm.frame_base_video_path < FRAME_BASE_PATH_MAX)) { + vdec_set_video_path(priv->vdec, parm.data_32); + } else + r = -EINVAL; + break; + case AMSTREAM_SET_EOS: + if (priv->vdec) + vdec_set_eos(priv->vdec, parm.data_32); + break; + case AMSTREAM_SET_RECEIVE_ID: + if (is_mult_inc(this->type)) + vdec_set_receive_id(priv->vdec, parm.data_32); + else + r = -EINVAL; + break; + case AMSTREAM_SET_IS_RESET: + if (priv->vdec) + vdec_set_isreset(priv->vdec, parm.data_32); + break; + case AMSTREAM_SET_DV_META_WITH_EL: + if (priv->vdec) { + vdec_set_dv_metawithel(priv->vdec, parm.data_32); + if (vdec_dual(priv->vdec) && priv->vdec->slave) + vdec_set_dv_metawithel(priv->vdec->slave, + parm.data_32); + } + break; + case AMSTREAM_SET_NO_POWERDOWN: + vdec_set_no_powerdown(parm.data_32); + break; + case AMSTREAM_SET_VIDEO_ID: + priv->vdec->video_id = parm.data_32; + mutex_lock(&userdata->mutex); + for (i = 0;i < MAX_USERDATA_CHANNEL_NUM; i++) { + if (userdata->used[i] == 0) { + userdata->id[i] = priv->vdec->video_id; + userdata->used[i] = 1; + userdata->video_id = priv->vdec->video_id; + userdata->set_id_flag = 1; + break; + } + } + mutex_unlock(&userdata->mutex); + + pr_info("AMSTREAM_SET_VIDEO_ID video_id: %d\n", parm.data_32); + break; + default: + r = -ENOIOCTLCMD; + break; + } + return r; +} + +static enum E_ASPECT_RATIO get_normalized_aspect_ratio(u32 ratio_control) +{ + enum E_ASPECT_RATIO euAspectRatio; + + ratio_control = ratio_control >> DISP_RATIO_ASPECT_RATIO_BIT; + + switch (ratio_control) { + case 0x8c: + case 0x90: + euAspectRatio = ASPECT_RATIO_16_9; + /*pr_info("ASPECT_RATIO_16_9\n");*/ + break; + case 0xbb: + case 0xc0: + euAspectRatio = ASPECT_RATIO_4_3; + /*pr_info("ASPECT_RATIO_4_3\n");*/ + break; + default: + euAspectRatio = ASPECT_UNDEFINED; + /*pr_info("ASPECT_UNDEFINED and ratio_control = 0x%x\n", + ratio_control);*/ + break; + } + + return euAspectRatio; +} + +static long amstream_ioctl_get_ex(struct port_priv_s *priv, ulong arg) +{ + struct stream_port_s *this = priv->port; + long r = 0; + struct am_ioctl_parm_ex parm; + + if (copy_from_user + ((void *)&parm, (void *)arg, + sizeof(parm))) + r = -EFAULT; + + switch (parm.cmd) { + case AMSTREAM_GET_EX_VB_STATUS: + if (this->type & PORT_TYPE_VIDEO) { + struct am_ioctl_parm_ex *p = &parm; + struct stream_buf_s *buf = NULL; + + mutex_lock(&amstream_mutex); + + /* + *todo: check upper layer for decoder + * handler lifecycle + */ + if (priv->vdec == NULL) { + r = -EINVAL; + mutex_unlock(&amstream_mutex); + break; + } + + if (this->type & PORT_TYPE_FRAME) { + struct vdec_input_status_s status; + + r = vdec_input_get_status(&priv->vdec->input, + &status); + if (r == 0) { + p->status.size = status.size; + p->status.data_len = status.data_len; + p->status.free_len = status.free_len; + p->status.read_pointer = + status.read_pointer; + } + mutex_unlock(&amstream_mutex); + break; + } + + buf = &priv->vdec->vbuf; + p->status.size = stbuf_canusesize(buf); + p->status.data_len = stbuf_level(buf); + p->status.free_len = stbuf_space(buf); + p->status.read_pointer = stbuf_rp(buf); + mutex_unlock(&amstream_mutex); + } else + r = -EINVAL; + break; + case AMSTREAM_GET_EX_AB_STATUS: + if (this->type & PORT_TYPE_AUDIO) { + struct am_ioctl_parm_ex *p = &parm; + struct stream_buf_s *buf = &bufs[BUF_TYPE_AUDIO]; + + + p->status.size = stbuf_canusesize(buf); + p->status.data_len = stbuf_level(buf); + p->status.free_len = stbuf_space(buf); + p->status.read_pointer = stbuf_rp(buf); + + } else + r = -EINVAL; + break; + case AMSTREAM_GET_EX_VDECSTAT: + if ((this->type & PORT_TYPE_VIDEO) == 0) { + pr_err("no video\n"); + return -EINVAL; + } else { + struct vdec_info vstatus; + struct am_ioctl_parm_ex *p = &parm; + + memset(&vstatus, 0, sizeof(vstatus)); + + mutex_lock(&priv->mutex); + if (vdec_status(priv->vdec, &vstatus) == -1) { + mutex_unlock(&priv->mutex); + return -ENODEV; + } + mutex_unlock(&priv->mutex); + + p->vstatus.width = vstatus.frame_width; + p->vstatus.height = vstatus.frame_height; + p->vstatus.fps = vstatus.frame_rate; + p->vstatus.error_count = vstatus.error_count; + p->vstatus.status = vstatus.status; + p->vstatus.euAspectRatio = + get_normalized_aspect_ratio( + vstatus.ratio_control); + + } + break; + case AMSTREAM_GET_EX_ADECSTAT: + if ((this->type & PORT_TYPE_AUDIO) == 0) { + pr_err("no audio\n"); + return -EINVAL; + } + if (amstream_adec_status == NULL) { + /* + *pr_err("no amstream_adec_status\n"); + *return -ENODEV; + */ + memset(&parm.astatus, 0, sizeof(parm.astatus)); + } else { + struct adec_status astatus; + struct am_ioctl_parm_ex *p = &parm; + + amstream_adec_status(&astatus); + p->astatus.channels = astatus.channels; + p->astatus.sample_rate = astatus.sample_rate; + p->astatus.resolution = astatus.resolution; + p->astatus.error_count = astatus.error_count; + p->astatus.status = astatus.status; + } + break; + + case AMSTREAM_GET_EX_UD_POC: + if (this->type & PORT_TYPE_USERDATA) { + struct userdata_poc_info_t userdata_poc = + userdata_poc_info[userdata_poc_ri]; + memcpy(&parm.data_userdata_info, + &userdata_poc, + sizeof(struct userdata_poc_info_t)); + + userdata_poc_ri++; + if (userdata_poc_ri == USERDATA_FIFO_NUM) + userdata_poc_ri = 0; + } else + r = -EINVAL; + break; + case AMSTREAM_GET_EX_WR_COUNT: + { + struct am_ioctl_parm_ex *p = &parm; + struct vdec_s *vdec = priv->vdec; + + mutex_lock(&amstream_mutex); + if (!vdec) + vdec = vdec_get_vdec_by_id(0); //Use id 0 as default + if (vdec && vdec->mvfrm) + p->wr_count = vdec->mvfrm->wr; + else + p->wr_count = 0; + mutex_unlock(&amstream_mutex); + r = 0; + } + break; + default: + r = -ENOIOCTLCMD; + break; + } + /* pr_info("parm size:%zx\n", sizeof(parm)); */ + if (r == 0) { + if (copy_to_user((void *)arg, &parm, sizeof(parm))) + r = -EFAULT; + } + return r; + +} +static long amstream_ioctl_set_ex(struct port_priv_s *priv, ulong arg) +{ + long r = 0; + return r; +} +static long amstream_ioctl_get_ptr(struct port_priv_s *priv, ulong arg) +{ + long r = 0; + + struct am_ioctl_parm_ptr parm; + + if (copy_from_user + ((void *)&parm, (void *)arg, + sizeof(parm))) + return -EFAULT; + + switch (parm.cmd) { + case AMSTREAM_GET_PTR_SUB_INFO: + { + struct subtitle_info msub_info[MAX_SUB_NUM]; + struct subtitle_info *psub_info[MAX_SUB_NUM]; + int i; + + for (i = 0; i < MAX_SUB_NUM; i++) + psub_info[i] = &msub_info[i]; + + r = psparser_get_sub_info(psub_info); + + if (r == 0) { + memcpy(parm.pdata_sub_info, msub_info, + sizeof(struct subtitle_info) + * MAX_SUB_NUM); + } + } + break; + default: + r = -ENOIOCTLCMD; + break; + } + /* pr_info("parm size:%d\n", sizeof(parm)); */ + if (r == 0) { + if (copy_to_user((void *)arg, &parm, sizeof(parm))) + r = -EFAULT; + } + + return r; + +} +static long amstream_ioctl_set_ptr(struct port_priv_s *priv, ulong arg) +{ + struct stream_port_s *this = priv->port; + struct am_ioctl_parm_ptr parm; + long r = 0; + + if (copy_from_user + ((void *)&parm, (void *)arg, + sizeof(parm))) { + pr_err("[%s]%d, arg err\n", __func__, __LINE__); + r = -EFAULT; + } + switch (parm.cmd) { + case AMSTREAM_SET_PTR_AUDIO_INFO: + if ((this->type & PORT_TYPE_VIDEO) + || (this->type & PORT_TYPE_AUDIO)) { + if (parm.pdata_audio_info != NULL) { + if (copy_from_user + ((void *)&audio_dec_info, (void *)parm.pdata_audio_info, + sizeof(audio_dec_info))) { + pr_err("[%s]%d, arg err\n", __func__, __LINE__); + r = -EFAULT; + } + } + } else + r = -EINVAL; + break; + case AMSTREAM_SET_PTR_CONFIGS: + if (this->type & PORT_TYPE_VIDEO) { + if (!parm.pointer || (parm.len <= 0) || + (parm.len > PAGE_SIZE)) { + r = -EINVAL; + } else { + r = copy_from_user(priv->vdec->config, + parm.pointer, parm.len); + if (r) + r = -EINVAL; + else + priv->vdec->config_len = parm.len; + } + } else + r = -EINVAL; + break; + case AMSTREAM_SET_PTR_HDR10P_DATA: + if ((this->type & PORT_TYPE_VIDEO) && (this->type & PORT_TYPE_FRAME)) { + if (!parm.pointer || (parm.len <= 0) || + (parm.len > PAGE_SIZE)) { + r = -EINVAL; + } else { + r = copy_from_user(priv->vdec->hdr10p_data_buf, + parm.pointer, parm.len); + if (r) { + priv->vdec->hdr10p_data_size = 0; + priv->vdec->hdr10p_data_valid = false; + r = -EINVAL; + } else { + priv->vdec->hdr10p_data_size = parm.len; + priv->vdec->hdr10p_data_valid = true; + } + } + } else + r = -EINVAL; + break; + default: + r = -ENOIOCTLCMD; + break; + } + return r; +} + +static long amstream_do_ioctl_new(struct port_priv_s *priv, + unsigned int cmd, ulong arg) +{ + long r = 0; + struct stream_port_s *this = priv->port; + + switch (cmd) { + case AMSTREAM_IOC_GET_VERSION: + r = amstream_ioctl_get_version(priv, arg); + break; + case AMSTREAM_IOC_GET: + r = amstream_ioctl_get(priv, arg); + break; + case AMSTREAM_IOC_SET: + r = amstream_ioctl_set(priv, arg); + break; + case AMSTREAM_IOC_GET_EX: + r = amstream_ioctl_get_ex(priv, arg); + break; + case AMSTREAM_IOC_SET_EX: + r = amstream_ioctl_set_ex(priv, arg); + break; + case AMSTREAM_IOC_GET_PTR: + r = amstream_ioctl_get_ptr(priv, arg); + break; + case AMSTREAM_IOC_SET_PTR: + r = amstream_ioctl_set_ptr(priv, arg); + break; + case AMSTREAM_IOC_SYSINFO: + if (this->type & PORT_TYPE_VIDEO) + r = vdec_set_decinfo(priv->vdec, (void *)arg); + else + r = -EINVAL; + break; + case AMSTREAM_IOC_GET_QOSINFO: + case AMSTREAM_IOC_GET_MVDECINFO: + { + u32 slots = 0; + u32 struct_size = 0; + int vdec_id = 0; + struct vdec_s *vdec = priv->vdec; + struct vframe_counter_s *tmpbuf = kmalloc(QOS_FRAME_NUM * + sizeof(struct vframe_counter_s),GFP_KERNEL); + struct av_param_mvdec_t __user *uarg = (void *)arg; + + mutex_lock(&amstream_mutex); + if (!tmpbuf) { + r = -EFAULT; + pr_err("kmalloc vframe_counter_s failed!\n"); + mutex_unlock(&amstream_mutex); + break; + } + + if (get_user(vdec_id, &uarg->vdec_id) < 0 || + get_user(struct_size, &uarg->struct_size) < 0) { + r = -EFAULT; + kfree(tmpbuf); + mutex_unlock(&amstream_mutex); + break; + } + + if (vdec && !vdec_id) //If vdec_id is > 0, it means user require to use it. + r = 0;//vdec =priv->vdec;//Nothing to do. + else + vdec = vdec_get_vdec_by_id(vdec_id); + if (!vdec) { + r = 0; + kfree(tmpbuf); + mutex_unlock(&amstream_mutex); + break; + } + + slots = vdec_get_frame_vdec(vdec, tmpbuf); + if (AMSTREAM_IOC_GET_MVDECINFO == cmd) + put_user(slots, &uarg->slots); + if (slots) { + if (AMSTREAM_IOC_GET_MVDECINFO == cmd) { + if (vdec->mvfrm && copy_to_user((void *)&uarg->comm, + &vdec->mvfrm->comm, + sizeof(struct vframe_comm_s))) { + r = -EFAULT; + kfree(tmpbuf); + mutex_unlock(&amstream_mutex); + break; + } + if (struct_size == sizeof(struct av_param_mvdec_t_old)) {//old struct + struct av_param_mvdec_t_old __user *uarg_old = (void *)arg; + int m; + for (m=0; m<slots; m++) + if (copy_to_user((void *)&uarg_old->minfo[m], + &tmpbuf[m], + sizeof(struct vframe_counter_s_old))) { + r = -EFAULT; + kfree(tmpbuf); + mutex_unlock(&amstream_mutex); + break; + } + } else if (struct_size == sizeof(struct av_param_mvdec_t)) {//new struct + if (copy_to_user((void *)&uarg->minfo[0], + tmpbuf, + slots*sizeof(struct vframe_counter_s))) { + r = -EFAULT; + kfree(tmpbuf); + mutex_unlock(&amstream_mutex); + break; + } + } else { + pr_err("pass in size %u,old struct size %u,current struct size %u\n", + struct_size, (u32)sizeof(struct av_param_mvdec_t_old),(u32)sizeof(struct av_param_mvdec_t)); + pr_err("App use another picture size,we haven't support it.\n"); + } + }else { //For compatibility, only copy the qos + struct av_param_qosinfo_t __user *uarg = (void *)arg; + int i; + for (i=0; i<slots; i++) + if (copy_to_user((void *)&uarg->vframe_qos[i], + &tmpbuf[i].qos, + sizeof(struct vframe_qos_s))) { + r = -EFAULT; + kfree(tmpbuf); + mutex_unlock(&amstream_mutex); + break; + } + } + } else { + /*Vdec didn't produce item,wait for 10 ms to avoid user application + infinitely calling*/ + //msleep(10); let user app handle it. + } + kfree(tmpbuf); + } + mutex_unlock(&amstream_mutex); + break; + case AMSTREAM_IOC_GET_AVINFO: + { + struct av_param_info_t __user *uarg = (void *)arg; + struct av_info_t av_info; + int delay; + u32 avgbps; + if (this->type & PORT_TYPE_VIDEO) { + av_info.first_pic_coming = get_first_pic_coming(); + av_info.current_fps = -1; + av_info.vpts = timestamp_vpts_get(); + //av_info.vpts_err = tsync_get_vpts_error_num(); + av_info.apts = timestamp_apts_get(); + //av_info.apts_err = tsync_get_apts_error_num(); + av_info.ts_error = get_discontinue_counter(); + av_info.first_vpts = timestamp_firstvpts_get(); + av_info.toggle_frame_count = get_toggle_frame_count(); + delay = calculation_stream_delayed_ms( + PTS_TYPE_VIDEO, NULL, &avgbps); + if (delay >= 0) + av_info.dec_video_bps = avgbps; + else + av_info.dec_video_bps = 0; + } + if (copy_to_user((void *)&uarg->av_info, (void *)&av_info, + sizeof(struct av_info_t))) + r = -EFAULT; + } + break; + default: + r = -ENOIOCTLCMD; + break; + } + + return r; +} + +static long amstream_do_ioctl_old(struct port_priv_s *priv, + unsigned int cmd, ulong arg) +{ + struct stream_port_s *this = priv->port; + long r = 0; + int i; + + switch (cmd) { + + case AMSTREAM_IOC_VB_START: + if ((this->type & PORT_TYPE_VIDEO) && + ((priv->vdec->vbuf.flag & BUF_FLAG_IN_USE) == 0)) { + priv->vdec->vbuf.buf_start = arg; + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_VB_SIZE: + if ((this->type & PORT_TYPE_VIDEO) && + ((priv->vdec->vbuf.flag & BUF_FLAG_IN_USE) == 0)) { + if (priv->vdec->vbuf.flag & BUF_FLAG_ALLOC) { + r += stbuf_change_size( + &priv->vdec->vbuf, + arg, false); + } + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_AB_START: + if ((this->type & PORT_TYPE_AUDIO) && + ((bufs[BUF_TYPE_AUDIO].flag & BUF_FLAG_IN_USE) == 0)) + bufs[BUF_TYPE_AUDIO].buf_start = arg; + else + r = -EINVAL; + break; + + case AMSTREAM_IOC_AB_SIZE: + if ((this->type & PORT_TYPE_AUDIO) && + ((bufs[BUF_TYPE_AUDIO].flag & BUF_FLAG_IN_USE) == 0)) { + if (bufs[BUF_TYPE_AUDIO].flag & BUF_FLAG_ALLOC) { + r = stbuf_change_size( + &bufs[BUF_TYPE_AUDIO], arg, false); + } + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_VFORMAT: + if ((this->type & PORT_TYPE_VIDEO) && (arg < VFORMAT_MAX)) { + this->vformat = (enum vformat_e)arg; + this->flag |= PORT_FLAG_VFORMAT; + + vdec_set_format(priv->vdec, this->vformat); + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_AFORMAT: + if ((this->type & PORT_TYPE_AUDIO) && (arg < AFORMAT_MAX)) { + memset(&audio_dec_info, 0, + sizeof(struct audio_info)); + /* for new format,reset the audio info. */ + this->aformat = (enum aformat_e)arg; + this->flag |= PORT_FLAG_AFORMAT; + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_VID: + if (this->type & PORT_TYPE_VIDEO) { + this->vid = (u32) arg; + this->flag |= PORT_FLAG_VID; + } else + r = -EINVAL; + + break; + + case AMSTREAM_IOC_AID: + if (this->type & PORT_TYPE_AUDIO) { + this->aid = (u32) arg; + this->flag |= PORT_FLAG_AID; + + if (port_get_inited(priv)) { + //tsync_audio_break(1); + amstream_change_avid(this); + } + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_SID: + if (this->type & PORT_TYPE_SUB) { + this->sid = (u32) arg; + this->flag |= PORT_FLAG_SID; + + if (port_get_inited(priv)) + amstream_change_sid(this); + } else + r = -EINVAL; + + break; + + case AMSTREAM_IOC_PCRID: + this->pcrid = (u32) arg; + this->pcr_inited = 1; + pr_err("set pcrid = 0x%x\n", this->pcrid); + break; + + case AMSTREAM_IOC_VB_STATUS: + if (this->type & PORT_TYPE_VIDEO) { + struct am_io_param para; + struct am_io_param *p = ¶ + struct stream_buf_s *buf = NULL; + + mutex_lock(&amstream_mutex); + + /* + *todo: check upper layer for decoder + * handler lifecycle + */ + if (priv->vdec == NULL) { + r = -EINVAL; + mutex_unlock(&amstream_mutex); + break; + } + + if (this->type & PORT_TYPE_FRAME) { + struct vdec_input_status_s status; + + r = vdec_input_get_status(&priv->vdec->input, + &status); + if (r == 0) { + p->status.size = status.size; + p->status.data_len = status.data_len; + p->status.free_len = status.free_len; + p->status.read_pointer = + status.read_pointer; + if (copy_to_user((void *)arg, p, + sizeof(para))) + r = -EFAULT; + } + mutex_unlock(&amstream_mutex); + break; + } + + buf = &priv->vdec->vbuf; + p->status.size = stbuf_canusesize(buf); + p->status.data_len = stbuf_level(buf); + p->status.free_len = stbuf_space(buf); + p->status.read_pointer = stbuf_rp(buf); + if (copy_to_user((void *)arg, p, sizeof(para))) + r = -EFAULT; + + mutex_unlock(&amstream_mutex); + return r; + } + r = -EINVAL; + break; + + case AMSTREAM_IOC_AB_STATUS: + if (this->type & PORT_TYPE_AUDIO) { + struct am_io_param para; + struct am_io_param *p = ¶ + struct stream_buf_s *buf = &bufs[BUF_TYPE_AUDIO]; + + p->status.size = stbuf_canusesize(buf); + p->status.data_len = stbuf_level(buf); + p->status.free_len = stbuf_space(buf); + p->status.read_pointer = stbuf_rp(buf); + if (copy_to_user((void *)arg, p, sizeof(para))) + r = -EFAULT; + return r; + } + r = -EINVAL; + break; + + case AMSTREAM_IOC_SYSINFO: + if (this->type & PORT_TYPE_VIDEO) + r = vdec_set_decinfo(priv->vdec, (void *)arg); + else + r = -EINVAL; + break; + + case AMSTREAM_IOC_ACHANNEL: + if (this->type & PORT_TYPE_AUDIO) { + this->achanl = (u32) arg; + set_ch_num_info((u32) arg); + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_SAMPLERATE: + if (this->type & PORT_TYPE_AUDIO) { + this->asamprate = (u32) arg; + set_sample_rate_info((u32) arg); + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_DATAWIDTH: + if (this->type & PORT_TYPE_AUDIO) + this->adatawidth = (u32) arg; + else + r = -EINVAL; + break; + + case AMSTREAM_IOC_TSTAMP: + if ((this->type & (PORT_TYPE_AUDIO | PORT_TYPE_VIDEO)) == + ((PORT_TYPE_AUDIO | PORT_TYPE_VIDEO))) + r = -EINVAL; + else if (this->type & PORT_TYPE_FRAME) + r = vdec_set_pts(priv->vdec, arg); + else if ((this->type & PORT_TYPE_VIDEO) || + (this->type & PORT_TYPE_HEVC)) { + struct stream_buf_s *vbuf = &priv->vdec->vbuf; + if (vbuf->no_parser) { + pts_checkin_offset(PTS_TYPE_VIDEO, + vbuf->stream_offset, arg); + } else { + r = es_vpts_checkin(vbuf, arg); + } + } else if (this->type & PORT_TYPE_AUDIO) + r = es_apts_checkin(&bufs[BUF_TYPE_AUDIO], arg); + break; + + case AMSTREAM_IOC_TSTAMP_uS64: + if ((this->type & (PORT_TYPE_AUDIO | PORT_TYPE_VIDEO)) == + ((PORT_TYPE_AUDIO | PORT_TYPE_VIDEO))) + r = -EINVAL; + else { + u64 pts; + + if (copy_from_user + ((void *)&pts, (void *)arg, sizeof(u64))) + return -EFAULT; + if (this->type & PORT_TYPE_FRAME) { + /* + *todo: check upper layer for decoder handler + * life sequence or multi-tasking management + */ + if (priv->vdec) + r = vdec_set_pts64(priv->vdec, pts); + } else if ((this->type & PORT_TYPE_HEVC) || + (this->type & PORT_TYPE_VIDEO)) { + struct stream_buf_s *vbuf = &priv->vdec->vbuf; + if (vbuf->no_parser && !vdec_single(priv->vdec)) { + pts_checkin_offset_us64(PTS_TYPE_VIDEO, + vbuf->stream_offset, pts); + } else { + r = es_vpts_checkin_us64( + &priv->vdec->vbuf, pts); + } + } else if (this->type & PORT_TYPE_AUDIO) { + r = es_vpts_checkin_us64( + &bufs[BUF_TYPE_AUDIO], pts); + } + } + break; + + case AMSTREAM_IOC_VDECSTAT: + if ((this->type & PORT_TYPE_VIDEO) == 0) + return -EINVAL; + { + struct vdec_info vstatus; + struct am_io_param para; + struct am_io_param *p = ¶ + + memset(&vstatus, 0, sizeof(vstatus)); + + mutex_lock(&priv->mutex); + if (vdec_status(priv->vdec, &vstatus) == -1) { + mutex_unlock(&priv->mutex); + return -ENODEV; + } + mutex_unlock(&priv->mutex); + + p->vstatus.width = vstatus.frame_width; + p->vstatus.height = vstatus.frame_height; + p->vstatus.fps = vstatus.frame_rate; + p->vstatus.error_count = vstatus.error_count; + p->vstatus.status = vstatus.status; + p->vstatus.euAspectRatio = + get_normalized_aspect_ratio( + vstatus.ratio_control); + + if (copy_to_user((void *)arg, p, sizeof(para))) + r = -EFAULT; + return r; + } + + case AMSTREAM_IOC_VDECINFO: + if ((this->type & PORT_TYPE_VIDEO) == 0) + return -EINVAL; + { + struct vdec_info vinfo; + struct am_io_info para; + + memset(¶, 0x0, sizeof(struct am_io_info)); + + mutex_lock(&priv->mutex); + if (vdec_status(priv->vdec, &vinfo) == -1) { + mutex_unlock(&priv->mutex); + return -ENODEV; + } + mutex_unlock(&priv->mutex); + + memcpy(¶.vinfo, &vinfo, sizeof(struct vdec_info)); + if (copy_to_user((void *)arg, ¶, sizeof(para))) + r = -EFAULT; + return r; + } + + case AMSTREAM_IOC_ADECSTAT: + if ((this->type & PORT_TYPE_AUDIO) == 0) + return -EINVAL; + if (amstream_adec_status == NULL) + return -ENODEV; + else { + struct adec_status astatus; + struct am_io_param para; + struct am_io_param *p = ¶ + + amstream_adec_status(&astatus); + p->astatus.channels = astatus.channels; + p->astatus.sample_rate = astatus.sample_rate; + p->astatus.resolution = astatus.resolution; + p->astatus.error_count = astatus.error_count; + p->astatus.status = astatus.status; + if (copy_to_user((void *)arg, p, sizeof(para))) + r = -EFAULT; + return r; + } + case AMSTREAM_IOC_PORT_INIT: + r = amstream_port_init(priv); + break; + + case AMSTREAM_IOC_VDEC_RESET: + if ((this->type & PORT_TYPE_VIDEO) == 0) + return -EINVAL; + + if (priv->vdec == NULL) + return -ENODEV; + + r = vdec_reset(priv->vdec); + break; + + case AMSTREAM_IOC_TRICKMODE: + if ((this->type & PORT_TYPE_VIDEO) == 0) + return -EINVAL; + r = vdec_set_trickmode(priv->vdec, arg); + if (r == -1) + return -ENODEV; + break; + + case AMSTREAM_IOC_AUDIO_INFO: + if ((this->type & PORT_TYPE_VIDEO) + || (this->type & PORT_TYPE_AUDIO)) { + if (copy_from_user + (&audio_dec_info, (void __user *)arg, + sizeof(audio_dec_info))) + r = -EFAULT; + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_AUDIO_RESET: + if (this->type & PORT_TYPE_AUDIO) { + struct stream_buf_s *pabuf = &bufs[BUF_TYPE_AUDIO]; + + mutex_lock(&amstream_mutex); + r = audio_port_reset(this, pabuf); + mutex_unlock(&amstream_mutex); + } else + r = -EINVAL; + + break; + + case AMSTREAM_IOC_SUB_RESET: + if (this->type & PORT_TYPE_SUB) { + struct stream_buf_s *psbuf = &bufs[BUF_TYPE_SUBTITLE]; + + r = sub_port_reset(this, psbuf); + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_SUB_LENGTH: + if ((this->type & PORT_TYPE_SUB) || + (this->type & PORT_TYPE_SUB_RD)) { + u32 sub_wp, sub_rp; + struct stream_buf_s *psbuf = &bufs[BUF_TYPE_SUBTITLE]; + int val; + + sub_wp = stbuf_sub_wp_get(); + sub_rp = stbuf_sub_rp_get(); + + if (sub_wp == sub_rp) + val = 0; + else if (sub_wp > sub_rp) + val = sub_wp - sub_rp; + else + val = psbuf->buf_size - (sub_rp - sub_wp); + put_user(val, (int __user *)arg); + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_UD_LENGTH: + if (this->type & PORT_TYPE_USERDATA) { + /* *((u32 *)arg) = userdata_length; */ + put_user(userdata_length, (unsigned long __user *)arg); + userdata_length = 0; + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_UD_POC: + if (this->type & PORT_TYPE_USERDATA) { + /* *((u32 *)arg) = userdata_length; */ + int ri; +#ifdef DEBUG_USER_DATA + int wi; +#endif + int bDataAvail = 0; + + mutex_lock(&userdata_mutex); + if (userdata_poc_wi != userdata_poc_ri) { + bDataAvail = 1; + ri = userdata_poc_ri; +#ifdef DEBUG_USER_DATA + wi = userdata_poc_wi; +#endif + userdata_poc_ri++; + if (userdata_poc_ri >= USERDATA_FIFO_NUM) + userdata_poc_ri = 0; + } + mutex_unlock(&userdata_mutex); + if (bDataAvail) { + int res; + struct userdata_poc_info_t userdata_poc = + userdata_poc_info[ri]; +#ifdef DEBUG_USER_DATA + pr_info("read poc: ri=%d, wi=%d, poc=%d, last_wi=%d\n", + ri, wi, + userdata_poc.poc_number, + last_read_wi); +#endif + res = + copy_to_user((unsigned long __user *)arg, + &userdata_poc, + sizeof(struct userdata_poc_info_t)); + if (res < 0) + r = -EFAULT; + } else { + r = -EFAULT; + } + } else { + r = -EINVAL; + } + break; + + case AMSTREAM_IOC_UD_BUF_READ: + { + if (this->type & PORT_TYPE_USERDATA) { + struct userdata_param_t param; + struct userdata_param_t *p_userdata_param; + struct vdec_s *vdec; + + p_userdata_param = ¶m; + if (copy_from_user(p_userdata_param, + (void __user *)arg, + sizeof(struct userdata_param_t))) { + r = -EFAULT; + break; + } + mutex_lock(&amstream_mutex); + if (vdec_get_debug_flags() & 0x10000000) + pr_info("%s, instance_id = %d\n", __func__, p_userdata_param->instance_id); + vdec = vdec_get_vdec_by_video_id(p_userdata_param->instance_id); + if (vdec) { + if (vdec_read_user_data(vdec, + p_userdata_param) == 0) { + r = -EFAULT; + mutex_unlock(&amstream_mutex); + break; + } + + if (copy_to_user((void *)arg, + p_userdata_param, + sizeof(struct userdata_param_t))) + r = -EFAULT; + } else + r = -EINVAL; + mutex_unlock(&amstream_mutex); + } + } + break; + + case AMSTREAM_IOC_UD_AVAILABLE_VDEC: + { + unsigned int ready_vdec = 0; + u32 ready_flag = 0; + st_userdata *userdata = get_vdec_userdata_ctx(); + + mutex_lock(&userdata->mutex); + for (i = 0; i < MAX_USERDATA_CHANNEL_NUM; i++) { + if (userdata->video_id == userdata->id[i] && + userdata->ready_flag[i] == 1) { + ready_vdec = userdata->id[i]; + userdata->ready_flag[i] = 0; + ready_flag = 1; + break; + } + } + if (!ready_flag) { + pr_info("instance %d not ready!\n", userdata->video_id); + r = -EINVAL; + } + mutex_unlock(&userdata->mutex); + + put_user(ready_vdec, (uint32_t __user *)arg); + if (vdec_get_debug_flags() & 0x10000000) + pr_info("%s, ready_vdec = %u\n", __func__, ready_vdec); + } + break; + + case AMSTREAM_IOC_GET_VDEC_ID: + if (this->type & PORT_TYPE_VIDEO && priv->vdec) { + put_user(priv->vdec->id, (int32_t __user *)arg); + } else + r = -EINVAL; + break; + + + case AMSTREAM_IOC_UD_FLUSH_USERDATA: + if (this->type & PORT_TYPE_USERDATA) { + struct vdec_s *vdec; + int video_id; + + mutex_lock(&amstream_mutex); + get_user(video_id, (int __user *)arg); + pr_info("userdata flush id: %d\n", video_id); + vdec = vdec_get_vdec_by_video_id(video_id); + if (vdec) { + vdec_reset_userdata_fifo(vdec, 0); + pr_info("reset_userdata_fifo for vdec_id: %d video_id: %d\\n", + vdec->id, vdec->video_id); + } + mutex_unlock(&amstream_mutex); + } else + r = -EINVAL; + break; + + case AMSTREAM_IOC_SET_DEC_RESET: + tsync_set_dec_reset(); + break; + + case AMSTREAM_IOC_TS_SKIPBYTE: + if ((int)arg >= 0) + tsdemux_set_skipbyte(arg); + else + r = -EINVAL; + break; + + case AMSTREAM_IOC_SUB_TYPE: + sub_type = (int)arg; + break; + + case AMSTREAM_IOC_APTS_LOOKUP: + if (this->type & PORT_TYPE_AUDIO) { + u32 pts = 0, frame_size, offset; + + get_user(offset, (unsigned long __user *)arg); + pts_lookup_offset(PTS_TYPE_AUDIO, offset, &pts, + &frame_size, 300); + put_user(pts, (int __user *)arg); + } + return 0; + case GET_FIRST_APTS_FLAG: + if (this->type & PORT_TYPE_AUDIO) { + put_user(first_pts_checkin_complete(PTS_TYPE_AUDIO), + (int __user *)arg); + } + break; + + case AMSTREAM_IOC_APTS: + put_user(timestamp_apts_get(), (int __user *)arg); + break; + + case AMSTREAM_IOC_VPTS: + put_user(timestamp_vpts_get(), (int __user *)arg); + break; + + case AMSTREAM_IOC_PCRSCR: + //put_user(timestamp_pcrscr_get(), (int __user *)arg); + break; + + case AMSTREAM_IOC_SET_PCRSCR: + timestamp_pcrscr_set(arg); + break; + case AMSTREAM_IOC_GET_LAST_CHECKIN_APTS: + put_user(get_last_checkin_pts(PTS_TYPE_AUDIO), (int *)arg); + break; + case AMSTREAM_IOC_GET_LAST_CHECKIN_VPTS: + put_user(get_last_checkin_pts(PTS_TYPE_VIDEO), (int *)arg); + break; + case AMSTREAM_IOC_GET_LAST_CHECKOUT_APTS: + put_user(get_last_checkout_pts(PTS_TYPE_AUDIO), (int *)arg); + break; + case AMSTREAM_IOC_GET_LAST_CHECKOUT_VPTS: + put_user(get_last_checkout_pts(PTS_TYPE_VIDEO), (int *)arg); + break; + case AMSTREAM_IOC_SUB_NUM: + put_user(psparser_get_sub_found_num(), (int *)arg); + break; + + case AMSTREAM_IOC_SUB_INFO: + if (arg > 0) { + struct subtitle_info *msub_info = + vzalloc(sizeof(struct subtitle_info) * MAX_SUB_NUM); + struct subtitle_info **psub_info = + vzalloc(sizeof(struct subtitle_info) * MAX_SUB_NUM); + int i; + + if (!msub_info || !psub_info) { + r = -ENOMEM; + break; + } + + for (i = 0; i < MAX_SUB_NUM; i++) + psub_info[i] = &msub_info[i]; + + r = psparser_get_sub_info(psub_info); + + if (r == 0) { + if (copy_to_user((void __user *)arg, msub_info, + sizeof(struct subtitle_info) * MAX_SUB_NUM)) + r = -EFAULT; + } + vfree(msub_info); + vfree(psub_info); + } + break; + case AMSTREAM_IOC_SET_DEMUX: + tsdemux_set_demux((int)arg); + break; + case AMSTREAM_IOC_SET_VIDEO_DELAY_LIMIT_MS: + priv->vdec->vbuf.max_buffer_delay_ms = (int)arg; + break; + case AMSTREAM_IOC_SET_AUDIO_DELAY_LIMIT_MS: + bufs[BUF_TYPE_AUDIO].max_buffer_delay_ms = (int)arg; + break; + case AMSTREAM_IOC_GET_VIDEO_DELAY_LIMIT_MS: + put_user(priv->vdec->vbuf.max_buffer_delay_ms, (int *)arg); + break; + case AMSTREAM_IOC_GET_AUDIO_DELAY_LIMIT_MS: + put_user(bufs[BUF_TYPE_AUDIO].max_buffer_delay_ms, (int *)arg); + break; + case AMSTREAM_IOC_GET_VIDEO_CUR_DELAY_MS: { + int delay; + + delay = calculation_stream_delayed_ms( + PTS_TYPE_VIDEO, NULL, NULL); + if (delay >= 0) + put_user(delay, (int *)arg); + else + put_user(0, (int *)arg); + } + break; + + case AMSTREAM_IOC_GET_AUDIO_CUR_DELAY_MS: { + int delay; + + delay = calculation_stream_delayed_ms(PTS_TYPE_AUDIO, NULL, + NULL); + if (delay >= 0) + put_user(delay, (int *)arg); + else + put_user(0, (int *)arg); + } + break; + case AMSTREAM_IOC_GET_AUDIO_AVG_BITRATE_BPS: { + int delay; + u32 avgbps; + + delay = calculation_stream_delayed_ms(PTS_TYPE_AUDIO, NULL, + &avgbps); + if (delay >= 0) + put_user(avgbps, (int *)arg); + else + put_user(0, (int *)arg); + break; + } + case AMSTREAM_IOC_GET_VIDEO_AVG_BITRATE_BPS: { + int delay; + u32 avgbps; + + delay = calculation_stream_delayed_ms(PTS_TYPE_VIDEO, NULL, + &avgbps); + if (delay >= 0) + put_user(avgbps, (int *)arg); + else + put_user(0, (int *)arg); + break; + } + case AMSTREAM_IOC_SET_DRMMODE: + if ((u32) arg == 1) { + pr_err("set drmmode, input must be secure buffer\n"); + this->flag |= PORT_FLAG_DRM; + if ((this->type & PORT_TYPE_VIDEO) && + (priv->vdec)) + priv->vdec->port_flag |= PORT_FLAG_DRM; + } else if ((u32)arg == 2) { + pr_err("set drmmode, input must be normal buffer\n"); + if ((this->type & PORT_TYPE_VIDEO) && + (priv->vdec)) { + pr_err("vdec port_flag with drmmode\n"); + priv->vdec->port_flag |= PORT_FLAG_DRM; + } + } else { + this->flag &= (~PORT_FLAG_DRM); + pr_err("no drmmode\n"); + } + break; + case AMSTREAM_IOC_SET_APTS: { + unsigned long pts; + + if (get_user(pts, (unsigned long __user *)arg)) { + pr_err + ("Get audio pts from user space fault!\n"); + return -EFAULT; + } + if (tsync_get_mode() == TSYNC_MODE_PCRMASTER) + tsync_pcr_set_apts(pts); + else + tsync_set_apts(pts); + break; + } + case AMSTREAM_IOC_SET_CRC: { + struct usr_crc_info_t crc_info; + struct vdec_s *vdec; + + if (copy_from_user(&crc_info, (void __user *)arg, + sizeof(struct usr_crc_info_t))) { + return -EFAULT; + } + /* + pr_info("id %d, frame %d, y_crc: %08x, uv_crc: %08x\n", crc_info.id, + crc_info.pic_num, crc_info.y_crc, crc_info.uv_crc); + */ + vdec = vdec_get_vdec_by_id(crc_info.id); + if (vdec == NULL) + return -ENODEV; + if (vdec->vfc.cmp_pool == NULL) { + vdec->vfc.cmp_pool = + vmalloc(USER_CMP_POOL_MAX_SIZE * + sizeof(struct usr_crc_info_t)); + if (vdec->vfc.cmp_pool == NULL) + return -ENOMEM; + } + if (vdec->vfc.usr_cmp_num >= USER_CMP_POOL_MAX_SIZE) { + pr_info("warn: could not write any more, max %d", + USER_CMP_POOL_MAX_SIZE); + return -EFAULT; + } + memcpy(&vdec->vfc.cmp_pool[vdec->vfc.usr_cmp_num], &crc_info, + sizeof(struct usr_crc_info_t)); + vdec->vfc.usr_cmp_num++; + break; + } + case AMSTREAM_IOC_GET_CRC_CMP_RESULT: { + int val, vdec_id; + struct vdec_s *vdec; + + if (get_user(val, (int __user *)arg)) { + return -EFAULT; + } + vdec_id = val & 0x00ff; + vdec = vdec_get_vdec_by_id(vdec_id); + if (vdec == NULL) + return -ENODEV; + if (val & 0xff00) + put_user(vdec->vfc.usr_cmp_num, (int *)arg); + else + put_user(vdec->vfc.usr_cmp_result, (int *)arg); + /* + pr_info("amstream get crc32 cmpare num %d result: %d\n", + vdec->vfc.usr_cmp_num, vdec->vfc.usr_cmp_result); + */ + break; + } + case AMSTREAM_IOC_INIT_EX_STBUF: { + struct stream_buffer_metainfo parm; + struct stream_buf_s *vbuf = NULL; + + if (priv->vdec == NULL) { + pr_err("init %s, no vdec.\n", __func__); + return -EFAULT; + } + + vbuf = &priv->vdec->vbuf; + if (vbuf == NULL) { + pr_err("init %s, no stbuf.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(&parm, (void __user *)arg, + sizeof(struct stream_buffer_metainfo))) { + return -EFAULT; + } + stream_buffer_set_ext_buf(vbuf, parm.stbuf_start, + parm.stbuf_size, parm.stbuf_flag); + break; + } + case AMSTREAM_IOC_WR_STBUF_META: { + struct stream_buffer_metainfo meta; + struct stream_buf_s *vbuf = NULL; + + if (priv->vdec == NULL) { + pr_err("write %s, no vdec.\n", __func__); + return -EFAULT; + } + + vbuf = &priv->vdec->vbuf; + if (vbuf == NULL) { + pr_err("write %s, no stbuf.\n", __func__); + return -EFAULT; + } + + if (vbuf->ops == NULL) { + pr_err("write %s, no ops.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(&meta, (void __user *)arg, + sizeof(struct stream_buffer_metainfo))) { + return -EFAULT; + } + if (!vbuf->ext_buf_addr) + return -ENODEV; + + stream_buffer_meta_write(vbuf, &meta); + break; + } + case AMSTREAM_IOC_GET_STBUF_STATUS: { + struct stream_buffer_status st; + struct stream_buf_s *pbuf = NULL; + + if (priv->vdec == NULL) { + pr_err("get status %s, no vdec.\n", __func__); + return -EFAULT; + } + + pbuf = &priv->vdec->vbuf; + if (pbuf == NULL) { + pr_err("get status %s, no stbuf.\n", __func__); + return -EFAULT; + } + + if (pbuf->ops == NULL) { + pr_err("get status %s, no ops.\n", __func__); + return -EFAULT; + } + + st.stbuf_start = pbuf->ext_buf_addr; + st.stbuf_size = pbuf->buf_size; + st.stbuf_rp = pbuf->ops->get_rp(pbuf); + st.stbuf_wp = pbuf->ops->get_wp(pbuf); + if (copy_to_user((void __user *)arg, &st, + sizeof(struct stream_buffer_status))) { + return -EFAULT; + } + break; + } + default: + r = -ENOIOCTLCMD; + break; + } + + return r; +} + +static long amstream_do_ioctl(struct port_priv_s *priv, + unsigned int cmd, ulong arg) +{ + long r = 0; + + switch (cmd) { + case AMSTREAM_IOC_GET_VERSION: + case AMSTREAM_IOC_GET: + case AMSTREAM_IOC_SET: + case AMSTREAM_IOC_GET_EX: + case AMSTREAM_IOC_SET_EX: + case AMSTREAM_IOC_GET_PTR: + case AMSTREAM_IOC_SET_PTR: + case AMSTREAM_IOC_SYSINFO: + case AMSTREAM_IOC_GET_QOSINFO: + case AMSTREAM_IOC_GET_MVDECINFO: + case AMSTREAM_IOC_GET_AVINFO: + r = amstream_do_ioctl_new(priv, cmd, arg); + break; + default: + r = amstream_do_ioctl_old(priv, cmd, arg); + break; + } + if (r != 0) + pr_debug("amstream_do_ioctl error :%lx, %x\n", r, cmd); + + return r; +} +static long amstream_ioctl(struct file *file, unsigned int cmd, ulong arg) +{ + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + struct stream_port_s *this = priv->port; + + if (!this) + return -ENODEV; + + return amstream_do_ioctl(priv, cmd, arg); +} + +#ifdef CONFIG_COMPAT +struct dec_sysinfo32 { + + u32 format; + + u32 width; + + u32 height; + + u32 rate; + + u32 extra; + + u32 status; + + u32 ratio; + + compat_uptr_t param; + + u64 ratio64; +}; + +struct am_ioctl_parm_ptr32 { + union { + compat_uptr_t pdata_audio_info; + compat_uptr_t pdata_sub_info; + compat_uptr_t pointer; + char data[8]; + }; + u32 cmd; + u32 len; +}; + +static long amstream_ioc_setget_ptr(struct port_priv_s *priv, + unsigned int cmd, struct am_ioctl_parm_ptr32 __user *arg) +{ + struct am_ioctl_parm_ptr __user *data; + struct am_ioctl_parm_ptr32 param; + int ret; + + if (copy_from_user(¶m, + (void __user *)arg, + sizeof(struct am_ioctl_parm_ptr32))) + return -EFAULT; + + data = compat_alloc_user_space(sizeof(*data)); + if (!access_ok(data, sizeof(*data))) + return -EFAULT; + + if (put_user(param.cmd, &data->cmd) || + put_user(compat_ptr(param.pointer), &data->pointer) || + put_user(param.len, &data->len)) + return -EFAULT; + + ret = amstream_do_ioctl(priv, cmd, (unsigned long)data); + if (ret < 0) + return ret; + return 0; + +} + +static long amstream_set_sysinfo(struct port_priv_s *priv, + struct dec_sysinfo32 __user *arg) +{ + struct dec_sysinfo __user *data; + struct dec_sysinfo32 __user *data32 = arg; + int ret; + struct dec_sysinfo32 param; + + if (copy_from_user(¶m, + (void __user *)arg, + sizeof(struct dec_sysinfo32))) + return -EFAULT; + + data = compat_alloc_user_space(sizeof(*data)); + if (!access_ok(data, sizeof(*data))) + return -EFAULT; + if (copy_in_user(data, data32, 7 * sizeof(u32))) + return -EFAULT; + if (put_user(compat_ptr(param.param), &data->param)) + return -EFAULT; + if (copy_in_user(&data->ratio64, &data32->ratio64, + sizeof(data->ratio64))) + return -EFAULT; + + ret = amstream_do_ioctl(priv, AMSTREAM_IOC_SYSINFO, + (unsigned long)data); + if (ret < 0) + return ret; + + if (copy_in_user(&arg->format, &data->format, 7 * sizeof(u32)) || + copy_in_user(&arg->ratio64, &data->ratio64, + sizeof(arg->ratio64))) + return -EFAULT; + + return 0; +} + + +struct userdata_param32_t { + uint32_t version; + uint32_t instance_id; /*input, 0~9*/ + uint32_t buf_len; /*input*/ + uint32_t data_size; /*output*/ + compat_uptr_t pbuf_addr; /*input*/ + struct userdata_meta_info_t meta_info; /*output*/ +}; + + +static long amstream_ioc_get_userdata(struct port_priv_s *priv, + struct userdata_param32_t __user *arg) +{ + struct userdata_param_t __user *data; + struct userdata_param32_t __user *data32 = arg; + int ret; + struct userdata_param32_t param; + + + if (copy_from_user(¶m, + (void __user *)arg, + sizeof(struct userdata_param32_t))) + return -EFAULT; + + data = compat_alloc_user_space(sizeof(*data)); + if (!access_ok(data, sizeof(*data))) + return -EFAULT; + + if (copy_in_user(data, data32, 4 * sizeof(u32))) + return -EFAULT; + + if (copy_in_user(&data->meta_info, &data32->meta_info, + sizeof(data->meta_info))) + return -EFAULT; + + if (put_user(compat_ptr(param.pbuf_addr), &data->pbuf_addr)) + return -EFAULT; + + ret = amstream_do_ioctl(priv, AMSTREAM_IOC_UD_BUF_READ, + (unsigned long)data); + if (ret < 0) + return ret; + + if (copy_in_user(&data32->version, &data->version, 4 * sizeof(u32)) || + copy_in_user(&data32->meta_info, &data->meta_info, + sizeof(data32->meta_info))) + return -EFAULT; + + return 0; +} + + +static long amstream_compat_ioctl(struct file *file, + unsigned int cmd, ulong arg) +{ + s32 r = 0; + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + + switch (cmd) { + case AMSTREAM_IOC_GET_VERSION: + case AMSTREAM_IOC_GET: + case AMSTREAM_IOC_SET: + case AMSTREAM_IOC_GET_EX: + case AMSTREAM_IOC_SET_EX: + return amstream_do_ioctl(priv, cmd, (ulong)compat_ptr(arg)); + case AMSTREAM_IOC_GET_PTR: + case AMSTREAM_IOC_SET_PTR: + return amstream_ioc_setget_ptr(priv, cmd, compat_ptr(arg)); + case AMSTREAM_IOC_SYSINFO: + return amstream_set_sysinfo(priv, compat_ptr(arg)); + case AMSTREAM_IOC_UD_BUF_READ: + return amstream_ioc_get_userdata(priv, compat_ptr(arg)); + default: + return amstream_do_ioctl(priv, cmd, (ulong)compat_ptr(arg)); + } + + return r; +} +#endif + +static ssize_t ports_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + int i; + char *pbuf = buf; + struct stream_port_s *p = NULL; + + for (i = 0; i < amstream_port_num; i++) { + p = &ports[i]; + /*name */ + pbuf += sprintf(pbuf, "%s\t:\n", p->name); + /*type */ + pbuf += sprintf(pbuf, "\ttype:%d( ", p->type); + if (p->type & PORT_TYPE_VIDEO) + pbuf += sprintf(pbuf, "%s ", "Video"); + if (p->type & PORT_TYPE_AUDIO) + pbuf += sprintf(pbuf, "%s ", "Audio"); + if (p->type & PORT_TYPE_MPTS) + pbuf += sprintf(pbuf, "%s ", "TS"); + if (p->type & PORT_TYPE_MPPS) + pbuf += sprintf(pbuf, "%s ", "PS"); + if (p->type & PORT_TYPE_ES) + pbuf += sprintf(pbuf, "%s ", "ES"); + if (p->type & PORT_TYPE_RM) + pbuf += sprintf(pbuf, "%s ", "RM"); + if (p->type & PORT_TYPE_SUB) + pbuf += sprintf(pbuf, "%s ", "Subtitle"); + if (p->type & PORT_TYPE_SUB_RD) + pbuf += sprintf(pbuf, "%s ", "Subtitle_Read"); + if (p->type & PORT_TYPE_USERDATA) + pbuf += sprintf(pbuf, "%s ", "userdata"); + pbuf += sprintf(pbuf, ")\n"); + /*flag */ + pbuf += sprintf(pbuf, "\tflag:%d( ", p->flag); + if (p->flag & PORT_FLAG_IN_USE) + pbuf += sprintf(pbuf, "%s ", "Used"); + else + pbuf += sprintf(pbuf, "%s ", "Unused"); + if ((p->type & PORT_TYPE_VIDEO) == 0) { + if (p->flag & PORT_FLAG_INITED) + pbuf += sprintf(pbuf, "%s ", "inited"); + else + pbuf += sprintf(pbuf, "%s ", "uninited"); + } + pbuf += sprintf(pbuf, ")\n"); + /*others */ + pbuf += sprintf(pbuf, "\tVformat:%d\n", + (p->flag & PORT_FLAG_VFORMAT) ? p->vformat : -1); + pbuf += sprintf(pbuf, "\tAformat:%d\n", + (p->flag & PORT_FLAG_AFORMAT) ? p->aformat : -1); + pbuf += sprintf(pbuf, "\tVid:%d\n", + (p->flag & PORT_FLAG_VID) ? p->vid : -1); + pbuf += sprintf(pbuf, "\tAid:%d\n", + (p->flag & PORT_FLAG_AID) ? p->aid : -1); + pbuf += sprintf(pbuf, "\tSid:%d\n", + (p->flag & PORT_FLAG_SID) ? p->sid : -1); + pbuf += sprintf(pbuf, "\tPCRid:%d\n", + (p->pcr_inited == 1) ? p->pcrid : -1); + pbuf += sprintf(pbuf, "\tachannel:%d\n", p->achanl); + pbuf += sprintf(pbuf, "\tasamprate:%d\n", p->asamprate); + pbuf += sprintf(pbuf, "\tadatawidth:%d\n\n", p->adatawidth); + } + return pbuf - buf; +} + +static int show_vbuf_status_cb(struct stream_buf_s *p, char *buf) +{ + char *pbuf = buf; + + if (!p->buf_start) + return 0; + /*type */ + pbuf += sprintf(pbuf, "Video-%d buffer:", p->id); + /*flag */ + pbuf += sprintf(pbuf, "\tflag:%d( ", p->flag); + if (p->flag & BUF_FLAG_ALLOC) + pbuf += sprintf(pbuf, "%s ", "Alloc"); + else + pbuf += sprintf(pbuf, "%s ", "Unalloc"); + if (p->flag & BUF_FLAG_IN_USE) + pbuf += sprintf(pbuf, "%s ", "Used"); + else + pbuf += sprintf(pbuf, "%s ", "Noused"); + if (p->flag & BUF_FLAG_PARSER) + pbuf += sprintf(pbuf, "%s ", "Parser"); + else + pbuf += sprintf(pbuf, "%s ", "noParser"); + if (p->flag & BUF_FLAG_FIRST_TSTAMP) + pbuf += sprintf(pbuf, "%s ", "firststamp"); + else + pbuf += sprintf(pbuf, "%s ", "nofirststamp"); + pbuf += sprintf(pbuf, ")\n"); + + /*buf stats */ + pbuf += sprintf(pbuf, "\tbuf addr:%p\n", (void *)p->buf_start); + pbuf += sprintf(pbuf, "\tbuf size:%#x\n", p->buf_size); + pbuf += sprintf(pbuf, "\tbuf canusesize:%#x\n", p->canusebuf_size); + pbuf += sprintf(pbuf, "\tbuf regbase:%#lx\n", p->reg_base); + + if (p->reg_base && p->flag & BUF_FLAG_IN_USE) { + pbuf += sprintf(pbuf, "\tbuf level:%#x\n", + stbuf_level(p)); + pbuf += sprintf(pbuf, "\tbuf space:%#x\n", + stbuf_space(p)); + pbuf += sprintf(pbuf, "\tbuf read pointer:%#x\n", + stbuf_rp(p)); + } else + pbuf += sprintf(pbuf, "\tbuf no used.\n"); + + return pbuf - buf; +} + +static ssize_t bufs_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + int i; + char *pbuf = buf; + struct stream_buf_s *p = NULL; + char buf_type[][12] = { "Video", "Audio", "Subtitle", + "UserData", "HEVC" }; + + for (i = 0; i < amstream_buf_num; i++) { + p = &bufs[i]; + + if (!p->buf_start) + continue; + + /*type */ + pbuf += sprintf(pbuf, "%s buffer:", buf_type[p->type]); + /*flag */ + pbuf += sprintf(pbuf, "\tflag:%d( ", p->flag); + if (p->flag & BUF_FLAG_ALLOC) + pbuf += sprintf(pbuf, "%s ", "Alloc"); + else + pbuf += sprintf(pbuf, "%s ", "Unalloc"); + if (p->flag & BUF_FLAG_IN_USE) + pbuf += sprintf(pbuf, "%s ", "Used"); + else + pbuf += sprintf(pbuf, "%s ", "Noused"); + if (p->flag & BUF_FLAG_PARSER) + pbuf += sprintf(pbuf, "%s ", "Parser"); + else + pbuf += sprintf(pbuf, "%s ", "noParser"); + if (p->flag & BUF_FLAG_FIRST_TSTAMP) + pbuf += sprintf(pbuf, "%s ", "firststamp"); + else + pbuf += sprintf(pbuf, "%s ", "nofirststamp"); + pbuf += sprintf(pbuf, ")\n"); + /*buf stats */ + + pbuf += sprintf(pbuf, "\tbuf addr:%p\n", (void *)p->buf_start); + + if (p->type != BUF_TYPE_SUBTITLE) { + pbuf += sprintf(pbuf, "\tbuf size:%#x\n", p->buf_size); + pbuf += sprintf(pbuf, + "\tbuf canusesize:%#x\n", + p->canusebuf_size); + pbuf += sprintf(pbuf, + "\tbuf regbase:%#lx\n", p->reg_base); + + if (p->reg_base && p->flag & BUF_FLAG_IN_USE) { + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + /* TODO: mod gate */ + /* switch_mod_gate_by_name("vdec", 1);*/ + amports_switch_gate("vdec", 1); + } + pbuf += sprintf(pbuf, "\tbuf level:%#x\n", + stbuf_level(p)); + pbuf += sprintf(pbuf, "\tbuf space:%#x\n", + stbuf_space(p)); + pbuf += sprintf(pbuf, + "\tbuf read pointer:%#x\n", + stbuf_rp(p)); + if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) { + /* TODO: mod gate */ + /* switch_mod_gate_by_name("vdec", 0);*/ + amports_switch_gate("vdec", 0); + } + } else + pbuf += sprintf(pbuf, "\tbuf no used.\n"); + + if (p->type == BUF_TYPE_USERDATA) { + pbuf += sprintf(pbuf, + "\tbuf write pointer:%#x\n", + p->buf_wp); + pbuf += sprintf(pbuf, + "\tbuf read pointer:%#x\n", + p->buf_rp); + } + } else { + u32 sub_wp, sub_rp, data_size; + + sub_wp = stbuf_sub_wp_get(); + sub_rp = stbuf_sub_rp_get(); + if (sub_wp >= sub_rp) + data_size = sub_wp - sub_rp; + else + data_size = p->buf_size - sub_rp + sub_wp; + pbuf += sprintf(pbuf, "\tbuf size:%#x\n", p->buf_size); + pbuf += + sprintf(pbuf, "\tbuf canusesize:%#x\n", + p->canusebuf_size); + pbuf += + sprintf(pbuf, "\tbuf start:%#x\n", + stbuf_sub_start_get()); + pbuf += sprintf(pbuf, + "\tbuf write pointer:%#x\n", sub_wp); + pbuf += sprintf(pbuf, + "\tbuf read pointer:%#x\n", sub_rp); + pbuf += sprintf(pbuf, "\tbuf level:%#x\n", data_size); + } + + pbuf += sprintf(pbuf, "\tbuf first_stamp:%#x\n", + p->first_tstamp); + pbuf += sprintf(pbuf, "\tbuf wcnt:%#x\n\n", p->wcnt); + pbuf += sprintf(pbuf, "\tbuf max_buffer_delay_ms:%dms\n", + p->max_buffer_delay_ms); + + if (p->reg_base && p->flag & BUF_FLAG_IN_USE) { + int calc_delayms = 0; + u32 bitrate = 0, avg_bitrate = 0; + + calc_delayms = calculation_stream_delayed_ms( + (p->type == BUF_TYPE_AUDIO) ? PTS_TYPE_AUDIO : + PTS_TYPE_VIDEO, + &bitrate, + &avg_bitrate); + + if (calc_delayms >= 0) { + pbuf += sprintf(pbuf, + "\tbuf current delay:%dms\n", + calc_delayms); + pbuf += sprintf(pbuf, + "\tbuf bitrate latest:%dbps,avg:%dbps\n", + bitrate, avg_bitrate); + pbuf += sprintf(pbuf, + "\tbuf time after last pts:%d ms\n", + calculation_stream_ext_delayed_ms + ((p->type == BUF_TYPE_AUDIO) ? PTS_TYPE_AUDIO : + PTS_TYPE_VIDEO)); + + pbuf += sprintf(pbuf, + "\tbuf time after last write data :%d ms\n", + (int)(jiffies_64 - + p->last_write_jiffies64) * 1000 / HZ); + } + } + if (p->write_thread) { + pbuf += sprintf(pbuf, + "\twrite thread:%d/%d,fifo %d:%d,passed:%d\n", + threadrw_buffer_level(p), + threadrw_buffer_size(p), + threadrw_datafifo_len(p), + threadrw_freefifo_len(p), + threadrw_passed_len(p) + ); + } + } + + pbuf += show_stream_buffer_status(pbuf, show_vbuf_status_cb); + + return pbuf - buf; +} + +static ssize_t videobufused_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + char *pbuf = buf; + struct stream_buf_s *p = NULL; + + p = &bufs[0]; + + if (p->flag & BUF_FLAG_IN_USE) + pbuf += sprintf(pbuf, "%d ", 1); + else + pbuf += sprintf(pbuf, "%d ", 0); + return 1; +} + +static ssize_t vcodec_profile_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + return vcodec_profile_read(buf); +} + +static ssize_t vcodec_feature_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + return vcodec_feature_read(buf); +} + +static int reset_canuse_buferlevel(int levelx10000) +{ + int i; + struct stream_buf_s *p = NULL; + + if (levelx10000 >= 0 && levelx10000 <= 10000) + use_bufferlevelx10000 = levelx10000; + else + use_bufferlevelx10000 = 10000; + for (i = 0; i < amstream_buf_num; i++) { + p = &bufs[i]; + p->canusebuf_size = ((p->buf_size / 1024) * + use_bufferlevelx10000 / 10000) * 1024; + p->canusebuf_size += 1023; + p->canusebuf_size &= ~1023; + if (p->canusebuf_size > p->buf_size) + p->canusebuf_size = p->buf_size; + } + return 0; +} + +static ssize_t canuse_buferlevel_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + ssize_t size = sprintf(buf, + "use_bufferlevel=%d/10000[=(set range[ 0~10000])=\n", + use_bufferlevelx10000); + return size; +} + +static ssize_t canuse_buferlevel_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int val; + ssize_t ret; + + /*ret = sscanf(buf, "%d", &val);*/ + ret = kstrtoint(buf, 0, &val); + + if (ret != 0) + return -EINVAL; + (void)val; + reset_canuse_buferlevel(val); + return size; +} + +static ssize_t max_buffer_delay_ms_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int val; + ssize_t ret; + int i; + + /*ret = sscanf(buf, "%d", &val);*/ + ret = kstrtoint(buf, 0, &val); + if (ret != 0) + return -EINVAL; + for (i = 0; i < amstream_buf_num; i++) + bufs[i].max_buffer_delay_ms = val; + return size; +} + +static ssize_t max_buffer_delay_ms_show(struct class *class, + struct class_attribute *attr, + char *buf) +{ + ssize_t size = 0; + + size += sprintf(buf, "%dms video max buffered data delay ms\n", + bufs[0].max_buffer_delay_ms); + size += sprintf(buf, "%dms audio max buffered data delay ms\n", + bufs[1].max_buffer_delay_ms); + return size; +} + +static ssize_t reset_audio_port_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int val = 0; + int i; + ssize_t ret; + struct stream_buf_s *pabuf = &bufs[BUF_TYPE_AUDIO]; + struct stream_port_s *this; + + ret = kstrtoint(buf, 0, &val); + if (ret != 0) + return -EINVAL; + if (val != 1) + return -EINVAL; + mutex_lock(&amstream_mutex); + for (i = 0; i < MAX_AMSTREAM_PORT_NUM; i++) { + if (strcmp(ports[i].name, "amstream_mpts") == 0 || + strcmp(ports[i].name, "amstream_mpts_sched") == 0) { + this = &ports[i]; + if ((this->flag & PORT_FLAG_AFORMAT) != 0) { + pr_info("audio_port_reset %s\n", ports[i].name); + audio_port_reset(this, pabuf); + } + } + } + mutex_unlock(&amstream_mutex); + return size; +} + +ssize_t dump_stream_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + char *p_buf = buf; + + p_buf += sprintf(p_buf, "\nmdkir -p /data/tmp -m 777;setenforce 0;\n\n"); + p_buf += sprintf(p_buf, "video:\n\t echo 0 > /sys/class/amstream/dump_stream;\n"); + p_buf += sprintf(p_buf, "hevc :\n\t echo 4 > /sys/class/amstream/dump_stream;\n"); + + return p_buf - buf; +} + +#define DUMP_STREAM_FILE "/data/tmp/dump_stream.h264" +ssize_t dump_stream_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + struct stream_buf_s *p_buf; + int ret = 0, id = 0; + unsigned int stride, remain, level, vmap_size; + int write_size; + void *stbuf_vaddr; + unsigned long offset; + struct file *fp; + mm_segment_t old_fs; + loff_t fpos; + + ret = sscanf(buf, "%d", &id); + if (ret < 0) { + pr_info("paser buf id fail, default id = 0\n"); + id = 0; + } + if (id != BUF_TYPE_VIDEO && id != BUF_TYPE_HEVC) { + pr_info("buf id out of range, max %d, id %d, set default id 0\n", BUF_MAX_NUM - 1, id); + id = 0; + } + p_buf = get_stream_buffer(id); + if (!p_buf) { + pr_info("get buf fail, id %d\n", id); + return size; + } + if ((!p_buf->buf_size) || (p_buf->is_secure) || (!(p_buf->flag & BUF_FLAG_IN_USE))) { + pr_info("buf size %d, is_secure %d, in_use %d, it can not dump\n", + p_buf->buf_size, p_buf->is_secure, (p_buf->flag & BUF_FLAG_IN_USE)); + return size; + } + + level = stbuf_level(p_buf); + if (!level || level > p_buf->buf_size) { + pr_info("stream buf level %d, buf size %d, error return\n", level, p_buf->buf_size); + return size; + } + + fp = filp_open(DUMP_STREAM_FILE, O_CREAT | O_RDWR, 0666); + if (IS_ERR(fp)) { + fp = NULL; + pr_info("create dump stream file failed\n"); + return size; + } + + offset = p_buf->buf_start; + remain = level; + stride = SZ_1M; + vmap_size = 0; + fpos = 0; + pr_info("create file success, it will dump from addr 0x%lx, size 0x%x\n", offset, remain); + while (remain > 0) { + if (remain > stride) + vmap_size = stride; + else { + stride = remain; + vmap_size = stride; + } + + stbuf_vaddr = codec_mm_vmap(offset, vmap_size); + if (stbuf_vaddr == NULL) { + stride >>= 1; + pr_info("vmap fail change vmap stide size 0x%x\n", stride); + continue; + } + codec_mm_dma_flush(stbuf_vaddr, vmap_size, DMA_FROM_DEVICE); + + old_fs = get_fs(); + set_fs(KERNEL_DS); + write_size = vfs_write(fp, stbuf_vaddr, vmap_size, &fpos); + if (write_size < vmap_size) { + write_size += vfs_write(fp, stbuf_vaddr + write_size, vmap_size - write_size, &fpos); + pr_info("fail write retry, total %d, write %d\n", vmap_size, write_size); + if (write_size < vmap_size) { + pr_info("retry fail, interrupt dump stream, break\n"); + set_fs(old_fs); + break; + } + } + set_fs(old_fs); + vfs_fsync(fp, 0); + pr_info("vmap_size 0x%x dump size 0x%x\n", vmap_size, write_size); + + offset += vmap_size; + remain -= vmap_size; + codec_mm_unmap_phyaddr(stbuf_vaddr); + } + + filp_close(fp, current->files); + pr_info("dump stream buf end\n"); + + return size; +} + +static CLASS_ATTR_RO(ports); +static CLASS_ATTR_RO(bufs); +static CLASS_ATTR_RO(vcodec_profile); +static CLASS_ATTR_RO(vcodec_feature); +static CLASS_ATTR_RO(videobufused); +static CLASS_ATTR_RW(canuse_buferlevel); +static CLASS_ATTR_RW(max_buffer_delay_ms); +static CLASS_ATTR_WO(reset_audio_port); + +static struct attribute *amstream_class_attrs[] = { + &class_attr_ports.attr, + &class_attr_bufs.attr, + &class_attr_vcodec_profile.attr, + &class_attr_vcodec_feature.attr, + &class_attr_videobufused.attr, + &class_attr_canuse_buferlevel.attr, + &class_attr_max_buffer_delay_ms.attr, + &class_attr_reset_audio_port.attr, + NULL +}; + +ATTRIBUTE_GROUPS(amstream_class); + +static struct class amstream_class = { + .name = "amstream", + .class_groups = amstream_class_groups, +}; + +int amstream_request_firmware_from_sys(const char *file_name, + char *buf, int size) +{ + const struct firmware *firmware; + int err = 0; + struct device *micro_dev; + + pr_info("try load %s ...", file_name); + micro_dev = device_create(&amstream_class, + NULL, MKDEV(AMSTREAM_MAJOR, 100), + NULL, "videodec"); + if (micro_dev == NULL) { + pr_err("device_create failed =%d\n", err); + return -1; + } + err = request_firmware(&firmware, file_name, micro_dev); + if (err < 0) { + pr_err("can't load the %s,err=%d\n", file_name, err); + goto error1; + } + if (firmware->size > size) { + pr_err("not enough memory size for audiodsp code\n"); + err = -ENOMEM; + goto release; + } + + memcpy(buf, (char *)firmware->data, firmware->size); + /*mb(); don't need it*/ + pr_err("load mcode size=%zd\n mcode name %s\n", firmware->size, + file_name); + err = firmware->size; +release: + release_firmware(firmware); +error1: + device_destroy(&amstream_class, MKDEV(AMSTREAM_MAJOR, 100)); + return err; +} + +int videobufused_show_fun(const char *trigger, int id, char *sbuf, int size) +{ + int ret = -1; + void *buf, *getbuf = NULL; + if (size < PAGE_SIZE) { + getbuf = (void *)__get_free_page(GFP_KERNEL); + if (!getbuf) + return -ENOMEM; + buf = getbuf; + } else { + buf = sbuf; + } + + switch (id) { + case 0: + ret = videobufused_show(NULL, NULL , buf); + break; + default: + ret = -1; + } + if (ret > 0 && getbuf != NULL) { + ret = min_t(int, ret, size); + strncpy(sbuf, buf, ret); + } + if (getbuf != NULL) + free_page((unsigned long)getbuf); + return ret; +} + +static struct mconfig amports_configs[] = { + MC_PI32("def_4k_vstreambuf_sizeM", &def_4k_vstreambuf_sizeM), + MC_PI32("def_vstreambuf_sizeM", &def_vstreambuf_sizeM), + MC_PI32("slow_input", &slow_input), + MC_FUN_ID("videobufused", videobufused_show_fun, NULL, 0), +}; + + + +/*static struct resource memobj;*/ +static int amstream_probe(struct platform_device *pdev) +{ + int i; + int r; + struct stream_port_s *st; + + pr_err("Amlogic A/V streaming port init\n"); + + amstream_port_num = MAX_AMSTREAM_PORT_NUM; + amstream_buf_num = BUF_MAX_NUM; +/* + * r = of_reserved_mem_device_init(&pdev->dev); + * if (r == 0) + * pr_info("of probe done"); + * else { + * r = -ENOMEM; + * return r; + * } + */ + r = class_register(&amstream_class); + if (r) { + pr_err("amstream class create fail.\n"); + return r; + } + + r = astream_dev_register(); + if (r) + return r; + + r = register_chrdev(AMSTREAM_MAJOR, "amstream", &amstream_fops); + if (r < 0) { + pr_err("Can't allocate major for amstreaming device\n"); + + goto error2; + } + + amstream_dev_class = class_create(THIS_MODULE, DEVICE_NAME); + + for (st = &ports[0], i = 0; i < amstream_port_num; i++, st++) { + st->class_dev = device_create(amstream_dev_class, NULL, + MKDEV(AMSTREAM_MAJOR, i), NULL, + ports[i].name); + } + + amstream_adec_status = NULL; + if (tsdemux_class_register() != 0) { + r = (-EIO); + goto error3; + } + tsdemux_tsync_func_init(); + init_waitqueue_head(&amstream_sub_wait); + init_waitqueue_head(&amstream_userdata_wait); + reset_canuse_buferlevel(10000); + amstream_pdev = pdev; + amports_clock_gate_init(&amstream_pdev->dev); + + /*prealloc fetch buf to avoid no continue buffer later...*/ + stbuf_fetch_init(); + REG_PATH_CONFIGS("media.amports", amports_configs); + + amstream_userdata_init(); + /* poweroff the decode core because dos can not be reset when reboot */ + if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_G12A) + vdec_power_reset(); + + return 0; + + /* + * error4: + * tsdemux_class_unregister(); + */ +error3: + for (st = &ports[0], i = 0; i < amstream_port_num; i++, st++) + device_destroy(amstream_dev_class, MKDEV(AMSTREAM_MAJOR, i)); + class_destroy(amstream_dev_class); +error2: + unregister_chrdev(AMSTREAM_MAJOR, "amstream"); + /* error1: */ + astream_dev_unregister(); + return r; +} + +static int amstream_remove(struct platform_device *pdev) +{ + int i; + struct stream_port_s *st; + + if (bufs[BUF_TYPE_AUDIO].flag & BUF_FLAG_ALLOC) + stbuf_change_size(&bufs[BUF_TYPE_AUDIO], 0, false); + stbuf_fetch_release(); + tsdemux_class_unregister(); + for (st = &ports[0], i = 0; i < amstream_port_num; i++, st++) + device_destroy(amstream_dev_class, MKDEV(AMSTREAM_MAJOR, i)); + + class_destroy(amstream_dev_class); + + unregister_chrdev(AMSTREAM_MAJOR, "amstream"); + + class_unregister(&amstream_class); + + astream_dev_unregister(); + + amstream_adec_status = NULL; + + pr_err("Amlogic A/V streaming port release\n"); + + return 0; +} + +void set_adec_func(int (*adec_func)(struct adec_status *)) +{ + amstream_adec_status = adec_func; +} + +void wakeup_sub_poll(void) +{ + atomic_inc(&subdata_ready); + wake_up_interruptible(&amstream_sub_wait); +} + +int get_sub_type(void) +{ + return sub_type; +} + +u32 get_audio_reset(void) +{ + return amstream_audio_reset; +} + +/*get pes buffers */ + +struct stream_buf_s *get_stream_buffer(int id) +{ + if (id >= BUF_MAX_NUM) + return 0; + return &bufs[id]; +} +EXPORT_SYMBOL(get_stream_buffer); +static const struct of_device_id amlogic_mesonstream_dt_match[] = { + { + .compatible = "amlogic, codec, streambuf", + }, + {}, +}; + +static struct platform_driver amstream_driver = { + .probe = amstream_probe, + .remove = amstream_remove, + .driver = { + .owner = THIS_MODULE, + .name = "mesonstream", + .of_match_table = amlogic_mesonstream_dt_match, + } +}; + +static int __init amstream_module_init(void) +{ + if (platform_driver_register(&amstream_driver)) { + pr_err("failed to register amstream module\n"); + return -ENODEV; + } + + if (subtitle_init()) { + pr_err("failed to init subtitle\n"); + return -ENODEV; + } + + return 0; +} + +static void __exit amstream_module_exit(void) +{ + platform_driver_unregister(&amstream_driver); + subtitle_exit(); +} + +module_init(amstream_module_init); +module_exit(amstream_module_exit); + +module_param(force_dv_mode, uint, 0664); +MODULE_PARM_DESC(force_dv_mode, + "\n force_dv_mode \n"); + +module_param(def_4k_vstreambuf_sizeM, uint, 0664); +MODULE_PARM_DESC(def_4k_vstreambuf_sizeM, + "\nDefault video Stream buf size for 4K MByptes\n"); + +module_param(def_vstreambuf_sizeM, uint, 0664); +MODULE_PARM_DESC(def_vstreambuf_sizeM, + "\nDefault video Stream buf size for < 1080p MByptes\n"); + +module_param(slow_input, uint, 0664); +MODULE_PARM_DESC(slow_input, "\n amstream slow_input\n"); + + +MODULE_DESCRIPTION("AMLOGIC streaming port driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");
diff --git a/drivers/stream_input/amports/stream_buffer_base.c b/drivers/stream_input/amports/stream_buffer_base.c new file mode 100644 index 0000000..71df8e4 --- /dev/null +++ b/drivers/stream_input/amports/stream_buffer_base.c
@@ -0,0 +1,235 @@ +/* + * drivers/amlogic/media/stream_input/parser/stream_buffer_base.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/uaccess.h> +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include "../../frame_provider/decoder/utils/vdec.h" +#include "amports_priv.h" +#include "stream_buffer_base.h" +#include "thread_rw.h" + +#define DEFAULT_VIDEO_BUFFER_SIZE (1024 * 1024 * 3) +#define DEFAULT_VIDEO_BUFFER_SIZE_4K (1024 * 1024 * 6) +#define DEFAULT_VIDEO_BUFFER_SIZE_TVP (1024 * 1024 * 10) +#define DEFAULT_VIDEO_BUFFER_SIZE_4K_TVP (1024 * 1024 * 15) + +static struct stream_buf_s vdec_buf_def = { + .reg_base = VLD_MEM_VIFIFO_REG_BASE, + .type = BUF_TYPE_VIDEO, + .buf_start = 0, + .buf_size = DEFAULT_VIDEO_BUFFER_SIZE, + .default_buf_size = DEFAULT_VIDEO_BUFFER_SIZE, + .first_tstamp = INVALID_PTS +}; + +static struct stream_buf_s hevc_buf_def = { + .reg_base = HEVC_STREAM_REG_BASE, + .type = BUF_TYPE_HEVC, + .buf_start = 0, + .buf_size = DEFAULT_VIDEO_BUFFER_SIZE_4K, + .default_buf_size = DEFAULT_VIDEO_BUFFER_SIZE_4K, + .first_tstamp = INVALID_PTS +}; + +static struct stream_buf_s *get_def_parms(int f) +{ + switch (f) { + case VFORMAT_HEVC: + case VFORMAT_AVS2: + case VFORMAT_AV1: + case VFORMAT_VP9: + return &hevc_buf_def; + default: + return &vdec_buf_def; + } +} + +int stream_buffer_base_init(struct stream_buf_s *stbuf, + struct stream_buf_ops *ops, + struct parser_args *pars) +{ + struct vdec_s *vdec = + container_of(stbuf, struct vdec_s, vbuf); + struct stream_port_s *port = NULL; + u32 format, width, height; + + /* sanity check. */ + if (WARN_ON(!stbuf) || WARN_ON(!ops)) + return -EINVAL; + + port = vdec->port; + format = vdec->port->vformat; + width = vdec->sys_info->width; + height = vdec->sys_info->height; + + if (!stbuf->ext_buf_addr) { + memcpy(stbuf, get_def_parms(format), + sizeof(*stbuf)); + } + + stbuf->id = vdec->id; + stbuf->is_hevc = ((format == VFORMAT_HEVC) || + (format == VFORMAT_AVS2) || + (format == VFORMAT_AV1) || + (format == VFORMAT_VP9)); + stbuf->for_4k = ((width * height) > + (1920 * 1088)) ? 1 : 0; + stbuf->is_multi_inst = !vdec_single(vdec); + memcpy(&stbuf->pars, pars, sizeof(*pars)); + + /* register ops func. */ + stbuf->ops = ops; + + return 0; +} +EXPORT_SYMBOL(stream_buffer_base_init); + +void stream_buffer_set_ext_buf(struct stream_buf_s *stbuf, + ulong addr, + u32 size, + u32 flag) +{ + stbuf->ext_buf_addr = addr; + stbuf->buf_size = size; + stbuf->is_secure = ((flag & STBUF_META_FLAG_SECURE) != 0); + stbuf->use_ptsserv = ((flag & STBUF_META_FLAG_PTS_SERV) != 0); + /* + pr_debug("%s, addr %lx, size 0x%x, secure %d\n", __func__, + stbuf->ext_buf_addr, stbuf->buf_size, stbuf->is_secure); + */ +} +EXPORT_SYMBOL(stream_buffer_set_ext_buf); + +void stream_buffer_meta_write(struct stream_buf_s *stbuf, + struct stream_buffer_metainfo *meta) +{ + u32 wp = stbuf->ops->get_wp(stbuf); + + if ((stbuf->stream_offset == 0) && + (wp == stbuf->ext_buf_addr) && + (meta->stbuf_pktaddr > stbuf->ext_buf_addr)) { + struct vdec_s *vdec = container_of(stbuf, struct vdec_s, vbuf); + u32 first_ptr; + u32 round_down_size = 0; + + /*RP max alignment requirement*/ + if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) + round_down_size = 0x80; + else if (vdec->input.target == VDEC_INPUT_TARGET_VLD) + round_down_size = 0x100; + + if (stbuf->ext_buf_addr > (meta->stbuf_pktaddr - round_down_size)) + first_ptr = stbuf->ext_buf_addr; + else { + first_ptr = round_down(meta->stbuf_pktaddr, round_down_size); + pr_info("warn: first packet_wp(%x round_down %x) is not stbuf start addr(%lx)\n", + meta->stbuf_pktaddr, first_ptr, stbuf->ext_buf_addr); + } + + stbuf->ops->set_wp(stbuf, first_ptr); + stbuf->ops->set_rp(stbuf, first_ptr); + vdec->input.swap_rp = first_ptr; + if (vdec->slave) + vdec->slave->input.swap_rp = first_ptr; + if (vdec->input.target != VDEC_INPUT_TARGET_HEVC) + stbuf->stream_offset += (meta->stbuf_pktaddr - stbuf->ext_buf_addr); + else + stbuf->stream_offset += (meta->stbuf_pktaddr - first_ptr); + + } + + if (meta->stbuf_pktaddr + meta->stbuf_pktsize < stbuf->buf_start + stbuf->buf_size) + wp = meta->stbuf_pktaddr + meta->stbuf_pktsize; + else + wp = meta->stbuf_pktaddr + meta->stbuf_pktsize - stbuf->buf_size; + + stbuf->ops->set_wp(stbuf, wp); + + stbuf->stream_offset += meta->stbuf_pktsize; + stbuf->last_offset[stbuf->write_count % 2] = stbuf->stream_offset; + stbuf->write_count++; + /* + pr_debug("%s, update wp 0x%x + sz 0x%x --> 0x%x, stream_offset 0x%x\n", + __func__, meta->stbuf_pktaddr, meta->stbuf_pktsize, wp, stbuf->stream_offset); + */ +} +EXPORT_SYMBOL(stream_buffer_meta_write); + +ssize_t stream_buffer_write_ex(struct file *file, + struct stream_buf_s *stbuf, + const char __user *buf, + size_t count, int flags) +{ + int r; + u32 len = count; + + if (buf == NULL || count == 0) + return -EINVAL; + + if (stbuf_space(stbuf) < count) { + if ((flags & 2) || ((file != NULL) && + (file->f_flags & O_NONBLOCK))) { + len = stbuf_space(stbuf); + if (len < 256) /* <1k.do eagain, */ + return -EAGAIN; + } else { + len = min(stbuf_canusesize(stbuf) / 8, len); + if (stbuf_space(stbuf) < len) { + r = stbuf_wait_space(stbuf, len); + if (r < 0) + return r; + } + } + } + + stbuf->last_write_jiffies64 = jiffies_64; + stbuf->is_phybuf = (flags & 1); + + len = min_t(u32, len, count); + + r = stbuf->ops->write(stbuf, buf, len); + if (r > 0) + stbuf->stream_offset += r; + + return r; +} +EXPORT_SYMBOL(stream_buffer_write_ex); + +int stream_buffer_write(struct file *file, + struct stream_buf_s *stbuf, + const char *buf, + size_t count) +{ + if (stbuf->write_thread) + return threadrw_write(file, stbuf, buf, count); + else + return stream_buffer_write_ex(file, stbuf, buf, count, 0); +} +EXPORT_SYMBOL(stream_buffer_write); +
diff --git a/drivers/stream_input/amports/stream_buffer_base.h b/drivers/stream_input/amports/stream_buffer_base.h new file mode 100644 index 0000000..3360e64 --- /dev/null +++ b/drivers/stream_input/amports/stream_buffer_base.h
@@ -0,0 +1,66 @@ +/* + * drivers/amlogic/media/stream_input/parser/stream_buffer_base.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef STREAM_BUFFER_INTERFACE_H +#define STREAM_BUFFER_INTERFACE_H +#include "streambuf.h" +#include "streambuf_reg.h" + +#define STBUF_READ(s, func, args...) \ +({ \ + u32 ret = 0; \ + if ((s) && (s)->ops) \ + ret = (s)->ops->func((s), ##args); \ + ret; \ +}) + +#define STBUF_WRITE(s, func, args...) \ +({ \ + if ((s) && (s)->ops) \ + (s)->ops->func((s), ##args); \ +}) + +extern struct stream_buf_ops *get_stbuf_ops(void); +extern struct stream_buf_ops *get_esparser_stbuf_ops(void); +extern struct stream_buf_ops *get_tsparser_stbuf_ops(void); +extern struct stream_buf_ops *get_psparser_stbuf_ops(void); + +int stream_buffer_base_init(struct stream_buf_s *stbuf, + struct stream_buf_ops *ops, + struct parser_args *pars); + +void stream_buffer_set_ext_buf(struct stream_buf_s *stbuf, + ulong addr, + u32 size, + u32 flag); + +int stream_buffer_write(struct file *file, + struct stream_buf_s *stbuf, + const char *buf, + size_t count); + +ssize_t stream_buffer_write_ex(struct file *file, + struct stream_buf_s *stbuf, + const char __user *buf, + size_t count, + int flags); + +void stream_buffer_meta_write(struct stream_buf_s *stbuf, + struct stream_buffer_metainfo *meta); + +#endif /* STREAM_BUFFER_INTERFACE_H */ +
diff --git a/drivers/stream_input/amports/stream_buffer_interface.c b/drivers/stream_input/amports/stream_buffer_interface.c new file mode 100644 index 0000000..64a14ee --- /dev/null +++ b/drivers/stream_input/amports/stream_buffer_interface.c
@@ -0,0 +1,326 @@ +/* + * drivers/amlogic/media/stream_input/parser/stream_bufffer_interface.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/uaccess.h> +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/frame_sync/tsync_pcr.h> +#include "../../frame_provider/decoder/utils/vdec.h" +#include "../../common/chips/decoder_cpu_ver_info.h" +#include "stream_buffer_base.h" +#include "amports_priv.h" +#include "thread_rw.h" + +#define MEM_NAME "stbuf" +#define MAP_RANGE (SZ_1M) + +static void stream_buffer_release(struct stream_buf_s *stbuf); + +static const char *type_to_str(int t) +{ + switch (t) { + case BUF_TYPE_VIDEO: + return "VIDEO"; + case BUF_TYPE_AUDIO: + return "AUDIO"; + case BUF_TYPE_SUBTITLE: + return "SUB"; + case BUF_TYPE_USERDATA: + return "USER"; + case BUF_TYPE_HEVC: + return "HEVC"; + default: + return "ERR"; + } +} + +static int stream_buffer_init(struct stream_buf_s *stbuf, struct vdec_s *vdec) +{ + int ret = 0; + u32 flags = CODEC_MM_FLAGS_DMA; + bool is_secure = 0; + u32 addr = 0; + int pages = 0; + u32 size; + + if (stbuf->buf_start) + return 0; + + snprintf(stbuf->name, sizeof(stbuf->name), + "%s-%d", MEM_NAME, vdec->id); + + if (stbuf->ext_buf_addr) { + addr = stbuf->ext_buf_addr; + size = stbuf->buf_size; + is_secure = stbuf->is_secure; + pages = (size >> PAGE_SHIFT); + } else { + flags |= CODEC_MM_FLAGS_FOR_VDECODER; + if (vdec->port_flag & PORT_FLAG_DRM) { + flags |= CODEC_MM_FLAGS_TVP; + is_secure = true; + } + + size = PAGE_ALIGN(stbuf->buf_size); + pages = (size >> PAGE_SHIFT); + addr = codec_mm_alloc_for_dma(stbuf->name, + pages, PAGE_SHIFT + 4, flags); + if (!addr) { + ret = -ENOMEM; + goto err; + } + stbuf->use_ptsserv = 1; + } + vdec_config_vld_reg(vdec, addr, size); + + ret = vdec_set_input_buffer(vdec, addr, size); + if (ret) { + pr_err("[%d]: set input buffer err.\n", stbuf->id); + goto err; + } + + atomic_set(&stbuf->payload, 0); + init_waitqueue_head(&stbuf->wq); + + stbuf->buf_start = addr; + stbuf->buf_wp = addr; + stbuf->buf_rp = addr; + stbuf->buf_size = size; + stbuf->is_secure = is_secure; + stbuf->no_parser = true; + stbuf->buf_page_num = pages; + stbuf->canusebuf_size = size; + stbuf->stream_offset = 0; + + /* init thread write. */ + if (!(vdec_get_debug_flags() & 1) && + !codec_mm_video_tvp_enabled() && + (!stbuf->ext_buf_addr)) { + int block_size = PAGE_SIZE << 4; + int buf_num = (2 * SZ_1M) / (PAGE_SIZE << 4); + + stbuf->write_thread = + threadrw_alloc(buf_num, block_size, + stream_buffer_write_ex, 0); + } + + stbuf->flag |= BUF_FLAG_ALLOC; + stbuf->flag |= BUF_FLAG_IN_USE; + if (vdec_single(vdec)) + pts_start(stbuf->type); + pr_info("[%d]: [%s-%s] addr: %lx, size: %x, thrRW: %d, extbuf: %d, secure: %d\n", + stbuf->id, type_to_str(stbuf->type), stbuf->name, + stbuf->buf_start, stbuf->buf_size, + !!stbuf->write_thread, + !!stbuf->ext_buf_addr, + stbuf->is_secure); + + return 0; +err: + stream_buffer_release(stbuf); + + return ret; +} + +static void stream_buffer_release(struct stream_buf_s *stbuf) +{ + if (stbuf->write_thread) + threadrw_release(stbuf); + if (vdec_single(container_of(stbuf, struct vdec_s, vbuf))) + pts_stop(stbuf->type); + + if (stbuf->flag & BUF_FLAG_ALLOC && stbuf->buf_start) { + if (!stbuf->ext_buf_addr) + codec_mm_free_for_dma(MEM_NAME, stbuf->buf_start); + + stbuf->flag &= ~BUF_FLAG_ALLOC; + stbuf->ext_buf_addr = 0; + stbuf->buf_start = 0; + stbuf->is_secure = false; + } + stbuf->flag &= ~BUF_FLAG_IN_USE; +} + +static int get_free_space(struct stream_buf_s *stbuf) +{ + u32 len = stbuf->buf_size; + int idle = 0; + + if (!atomic_read(&stbuf->payload) && (stbuf->buf_rp == stbuf->buf_wp)) + idle = len; + else if (stbuf->buf_wp > stbuf->buf_rp) + idle = len - (stbuf->buf_wp - stbuf->buf_rp); + else if (stbuf->buf_wp < stbuf->buf_rp) + idle = stbuf->buf_rp - stbuf->buf_wp; + + /*pr_info("[%d]: wp: %x, rp: %x, payload: %d, free space: %d\n", + stbuf->id, stbuf->buf_wp, stbuf->buf_rp, + atomic_read(&stbuf->payload), idle);*/ + + return idle; +} + +static int aml_copy_from_user(void *to, const void *from, ulong n) +{ + int ret =0; + + if (likely(access_ok(from, n))) + ret = copy_from_user(to, from, n); + else + memcpy(to, from, n); + + return ret; +} + +static int stream_buffer_copy(struct stream_buf_s *stbuf, const u8 *buf, u32 size) +{ + int ret = 0; + void *src = NULL, *dst = NULL; + int i, len; + + for (i = 0; i < size; i += MAP_RANGE) { + len = ((size - i) > MAP_RANGE) ? MAP_RANGE : size - i; + src = stbuf->is_phybuf ? + codec_mm_vmap((ulong) buf + i, len) : + (void *) buf; + dst = codec_mm_vmap(stbuf->buf_wp + i, len); + if (!src || !dst) { + ret = -EFAULT; + pr_err("[%d]: %s, src or dst is invalid.\n", + stbuf->id, __func__); + goto err; + } + + if (aml_copy_from_user(dst, src, len)) { + ret = -EAGAIN; + goto err; + } + + codec_mm_dma_flush(dst, len, DMA_TO_DEVICE); + codec_mm_unmap_phyaddr(dst); + + if (stbuf->is_phybuf) + codec_mm_unmap_phyaddr(src); + } + + return 0; +err: + if (stbuf->is_phybuf && src) + codec_mm_unmap_phyaddr(src); + if (dst) + codec_mm_unmap_phyaddr(dst); + return ret; +} + +static int rb_push_data(struct stream_buf_s *stbuf, const u8 *in, u32 size) +{ + int ret, len; + u32 wp = stbuf->buf_wp; + u32 sp = (stbuf->buf_wp + size); + u32 ep = (stbuf->buf_start + stbuf->buf_size); + + len = sp > ep ? ep - wp : size; + + if (!stbuf->ext_buf_addr) { + ret = stream_buffer_copy(stbuf, in, len); + if (ret) + return ret; + } + + stbuf->ops->set_wp(stbuf, (wp + len >= ep) ? + stbuf->buf_start : (wp + len)); + + if (stbuf->buf_wp == stbuf->buf_rp) { + pr_debug("[%d]: stream buffer is full, payload: %d\n", + stbuf->id, atomic_read(&stbuf->payload)); + } + + return len; +} + +static int stream_buffer_write_inner(struct stream_buf_s *stbuf, + const u8 *in, u32 size) +{ + if (in == NULL || size > stbuf->buf_size) { + pr_err("[%d]: params are not valid.\n", stbuf->id); + return -1; + } + + if (get_free_space(stbuf) < size) + return -EAGAIN; + + return rb_push_data(stbuf, in, size); +} + +static u32 stream_buffer_get_wp(struct stream_buf_s *stbuf) +{ + return stbuf->buf_wp; +} + +static void stream_buffer_set_wp(struct stream_buf_s *stbuf, u32 val) +{ + int len = (val >= stbuf->buf_wp) ? (val - stbuf->buf_wp) : + (stbuf->buf_size - stbuf->buf_wp + val); + + stbuf->buf_wp = val; + vdec_set_vld_wp(container_of(stbuf, struct vdec_s, vbuf), stbuf->buf_wp); + + atomic_add(len, &stbuf->payload); +} + +static u32 stream_buffer_get_rp(struct stream_buf_s *stbuf) +{ + return stbuf->buf_rp; +} + +static void stream_buffer_set_rp(struct stream_buf_s *stbuf, u32 val) +{ + int len = (val >= stbuf->buf_rp) ? (val - stbuf->buf_rp) : + (stbuf->buf_size - stbuf->buf_rp + val); + + stbuf->buf_rp = val; + atomic_sub(len, &stbuf->payload); +} + +static struct stream_buf_ops stream_buffer_ops = { + .init = stream_buffer_init, + .release = stream_buffer_release, + .write = stream_buffer_write_inner, + .get_wp = stream_buffer_get_wp, + .set_wp = stream_buffer_set_wp, + .get_rp = stream_buffer_get_rp, + .set_rp = stream_buffer_set_rp, +}; + +struct stream_buf_ops *get_stbuf_ops(void) +{ + return &stream_buffer_ops; +} +EXPORT_SYMBOL(get_stbuf_ops); +
diff --git a/drivers/stream_input/amports/streambuf.c b/drivers/stream_input/amports/streambuf.c new file mode 100644 index 0000000..c267879 --- /dev/null +++ b/drivers/stream_input/amports/streambuf.c
@@ -0,0 +1,496 @@ +/* + * drivers/amlogic/media/stream_input/parser/streambuf.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define DEBUG +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/timer.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/utils/vformat.h> +#include <linux/amlogic/iomap.h> +#include <asm/cacheflush.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> +/* #include <mach/am_regs.h> */ + +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../../frame_provider/decoder/utils/vdec.h" +#include "streambuf_reg.h" +#include "streambuf.h" +#include <linux/amlogic/media/utils/amports_config.h> +#include "../amports/amports_priv.h" +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> + +#define STBUF_SIZE (64*1024) +#define STBUF_WAIT_INTERVAL (HZ/100) +#define MEM_NAME "streambuf" + +void *fetchbuf = 0; + +static s32 _stbuf_alloc(struct stream_buf_s *buf, bool is_secure) +{ + if (buf->buf_size == 0) + return -ENOBUFS; + + while (buf->buf_start == 0) { + int flags = CODEC_MM_FLAGS_DMA; + + buf->buf_page_num = PAGE_ALIGN(buf->buf_size) / PAGE_SIZE; + if (buf->type == BUF_TYPE_SUBTITLE) + flags = CODEC_MM_FLAGS_DMA_CPU; + + /* + *if 4k, + *used cma first,for less mem fragments. + */ + if (((buf->type == BUF_TYPE_HEVC) || + (buf->type == BUF_TYPE_VIDEO)) && + buf->for_4k) + flags |= CODEC_MM_FLAGS_CMA_FIRST; + if (buf->buf_size > 20 * 1024 * 1024) + flags |= CODEC_MM_FLAGS_CMA_FIRST; + + if ((buf->type == BUF_TYPE_HEVC) || + (buf->type == BUF_TYPE_VIDEO)) { + flags |= CODEC_MM_FLAGS_FOR_VDECODER; + } else if (buf->type == BUF_TYPE_AUDIO) { + flags |= CODEC_MM_FLAGS_FOR_ADECODER; + flags |= CODEC_MM_FLAGS_DMA_CPU; + } + + if (is_secure) + flags |= CODEC_MM_FLAGS_TVP; + + buf->buf_start = codec_mm_alloc_for_dma(MEM_NAME, + buf->buf_page_num, 4+PAGE_SHIFT, flags); + if (!buf->buf_start) { + int is_video = (buf->type == BUF_TYPE_HEVC) || + (buf->type == BUF_TYPE_VIDEO); + if (is_video && buf->buf_size >= 9 * SZ_1M) {/*min 6M*/ + int old_size = buf->buf_size; + + buf->buf_size = + PAGE_ALIGN(buf->buf_size * 2/3); + pr_info("%s stbuf alloced size = %d failed try small %d size\n", + (buf->type == BUF_TYPE_HEVC) ? "HEVC" : + (buf->type == BUF_TYPE_VIDEO) ? "Video" : + (buf->type == BUF_TYPE_AUDIO) ? "Audio" : + "Subtitle", old_size, buf->buf_size); + continue; + } + pr_info("%s stbuf alloced size = %d failed\n", + (buf->type == BUF_TYPE_HEVC) ? "HEVC" : + (buf->type == BUF_TYPE_VIDEO) ? "Video" : + (buf->type == BUF_TYPE_AUDIO) ? "Audio" : + "Subtitle", buf->buf_size); + return -ENOMEM; + } + + buf->is_secure = is_secure; + + pr_debug("%s stbuf alloced at %p, secure = %d, size = %d\n", + (buf->type == BUF_TYPE_HEVC) ? "HEVC" : + (buf->type == BUF_TYPE_VIDEO) ? "Video" : + (buf->type == BUF_TYPE_AUDIO) ? "Audio" : + "Subtitle", (void *)buf->buf_start, + buf->is_secure, + buf->buf_size); + } + + buf->canusebuf_size = buf->buf_size; + buf->flag |= BUF_FLAG_ALLOC; + + return 0; +} + +int stbuf_change_size(struct stream_buf_s *buf, int size, bool is_secure) +{ + unsigned long old_buf; + int old_size, old_pagenum; + int ret; + + pr_info("buffersize=%d,%d,start=%p, secure=%d\n", size, buf->buf_size, + (void *)buf->buf_start, is_secure); + + if (buf->buf_size == size && buf->buf_start != 0) + return 0; + + old_buf = buf->buf_start; + old_size = buf->buf_size; + old_pagenum = buf->buf_page_num; + buf->buf_start = 0; + buf->buf_size = size; + ret = size; + + if (size == 0 || + _stbuf_alloc(buf, is_secure) == 0) { + /* + * size=0:We only free the old memory; + * alloc ok,changed to new buffer + */ + if (old_buf != 0) { + codec_mm_free_for_dma(MEM_NAME, old_buf); + } + + if (size == 0) + buf->is_secure = false; + + pr_info("changed the (%d) buffer size from %d to %d\n", + buf->type, old_size, size); + return 0; + } else { + /* alloc failed */ + buf->buf_start = old_buf; + buf->buf_size = old_size; + buf->buf_page_num = old_pagenum; + pr_info("changed the (%d) buffer size from %d to %d,failed\n", + buf->type, old_size, size); + } + + return ret; +} + +int stbuf_fetch_init(void) +{ + if (NULL != fetchbuf) + return 0; + + fetchbuf = (void *)__get_free_pages(GFP_KERNEL, + get_order(FETCHBUF_SIZE)); + + if (!fetchbuf) { + pr_info("%s: Can not allocate fetch working buffer\n", + __func__); + return -ENOMEM; + } + return 0; +} +EXPORT_SYMBOL(stbuf_fetch_init); + +void stbuf_fetch_release(void) +{ + if (0 && fetchbuf) { + /* always don't free.for safe alloc/free*/ + free_pages((unsigned long)fetchbuf, get_order(FETCHBUF_SIZE)); + fetchbuf = 0; + } +} + +static void _stbuf_timer_func(struct timer_list *arg) +{ + struct stream_buf_s *p = (struct stream_buf_s *)arg; + + if (stbuf_space(p) < p->wcnt) { + p->timer.expires = jiffies + STBUF_WAIT_INTERVAL; + + add_timer(&p->timer); + } else + wake_up_interruptible(&p->wq); + +} + +u32 stbuf_level(struct stream_buf_s *buf) +{ + if ((buf->type == BUF_TYPE_HEVC) || (buf->type == BUF_TYPE_VIDEO)) { + if (buf->no_parser) { + int level = buf->buf_wp - buf->buf_rp; + if (level < 0) + level += buf->buf_size; + return level; + } else { + if (READ_PARSER_REG(PARSER_ES_CONTROL) & 1) { + int level = READ_PARSER_REG(PARSER_VIDEO_WP) - + READ_PARSER_REG(PARSER_VIDEO_RP); + if (level < 0) + level += READ_PARSER_REG(PARSER_VIDEO_END_PTR) - + READ_PARSER_REG(PARSER_VIDEO_START_PTR) + 8; + return (u32)level; + } else + return (buf->type == BUF_TYPE_HEVC) ? + READ_VREG(HEVC_STREAM_LEVEL) : + _READ_ST_REG(LEVEL); + } + } + + return _READ_ST_REG(LEVEL); +} + +u32 stbuf_rp(struct stream_buf_s *buf) +{ + if ((buf->type == BUF_TYPE_HEVC) || (buf->type == BUF_TYPE_VIDEO)) { + if (buf->no_parser) + return buf->buf_rp; + else { + if (READ_PARSER_REG(PARSER_ES_CONTROL) & 1) + return READ_PARSER_REG(PARSER_VIDEO_RP); + else + return (buf->type == BUF_TYPE_HEVC) ? + READ_VREG(HEVC_STREAM_RD_PTR) : + _READ_ST_REG(RP); + } + } + + return _READ_ST_REG(RP); +} + +u32 stbuf_space(struct stream_buf_s *buf) +{ + /* reserved space for safe write, + * the parser fifo size is 1024byts, so reserve it + */ + int size; + + size = buf->canusebuf_size - stbuf_level(buf); + + if (buf->canusebuf_size >= buf->buf_size / 2) { + /* old reversed value,tobe full, reversed only... */ + size = size - 6 * 1024; + } + + if (!buf->no_parser) { + if ((buf->type == BUF_TYPE_VIDEO) + || (has_hevc_vdec() && buf->type == BUF_TYPE_HEVC)) + size -= READ_PARSER_REG(PARSER_VIDEO_HOLE); + } + return size > 0 ? size : 0; +} + +u32 stbuf_size(struct stream_buf_s *buf) +{ + return buf->buf_size; +} + +u32 stbuf_canusesize(struct stream_buf_s *buf) +{ + return buf->canusebuf_size; +} + +s32 stbuf_init(struct stream_buf_s *buf, struct vdec_s *vdec) +{ + s32 r; + u32 dummy; + u32 addr32; + + VDEC_PRINT_FUN_LINENO(__func__, __LINE__); + + if (!buf->buf_start) { + r = _stbuf_alloc(buf, (vdec) ? + vdec->port_flag & PORT_FLAG_DRM : 0); + if (r < 0) + return r; + } + addr32 = buf->buf_start & 0xffffffff; + buf->use_ptsserv = true; + init_waitqueue_head(&buf->wq); + + /* + * For multidec, do not touch HW stream buffers during port + * init and release. + */ + if ((buf->type == BUF_TYPE_VIDEO) || (buf->type == BUF_TYPE_HEVC)) { + if (vdec) { + if (vdec_stream_based(vdec)) + vdec_set_input_buffer(vdec, addr32, + buf->buf_size); + else + return vdec_set_input_buffer(vdec, addr32, + buf->buf_size); + } + } + + buf->write_thread = 0; + if (((vdec && !vdec_single(vdec)) || (buf->is_multi_inst)) && + (vdec_get_debug_flags() & 0x2) == 0) + return 0; + if (has_hevc_vdec() && buf->type == BUF_TYPE_HEVC) { + CLEAR_VREG_MASK(HEVC_STREAM_CONTROL, 1); + WRITE_VREG(HEVC_STREAM_START_ADDR, addr32); + WRITE_VREG(HEVC_STREAM_END_ADDR, addr32 + buf->buf_size); + WRITE_VREG(HEVC_STREAM_RD_PTR, addr32); + WRITE_VREG(HEVC_STREAM_WR_PTR, addr32); + + return 0; + } + + if (buf->type == BUF_TYPE_VIDEO) { + VDEC_PRINT_FUN_LINENO(__func__, __LINE__); + + _WRITE_ST_REG(CONTROL, 0); + /* reset VLD before setting all pointers */ + WRITE_VREG(VLD_MEM_VIFIFO_WRAP_COUNT, 0); + /*TODO: only > m6*/ +#if 1/* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + WRITE_VREG(DOS_SW_RESET0, (1 << 4)); + WRITE_VREG(DOS_SW_RESET0, 0); +#else + WRITE_RESET_REG(RESET0_REGISTER, RESET_VLD); +#endif + + dummy = READ_RESET_REG(RESET0_REGISTER); + WRITE_VREG(POWER_CTL_VLD, 1 << 4); + } else if (buf->type == BUF_TYPE_AUDIO) { + _WRITE_ST_REG(CONTROL, 0); + + WRITE_AIU_REG(AIU_AIFIFO_GBIT, 0x80); + } + + if (buf->type == BUF_TYPE_SUBTITLE) { + WRITE_PARSER_REG(PARSER_SUB_RP, addr32); + WRITE_PARSER_REG(PARSER_SUB_START_PTR, addr32); + WRITE_PARSER_REG(PARSER_SUB_END_PTR, + addr32 + buf->buf_size - 8); + + return 0; + } + + _WRITE_ST_REG(START_PTR, addr32); + _WRITE_ST_REG(CURR_PTR, addr32); + _WRITE_ST_REG(END_PTR, addr32 + buf->buf_size - 8); + + _SET_ST_REG_MASK(CONTROL, MEM_BUFCTRL_INIT); + _CLR_ST_REG_MASK(CONTROL, MEM_BUFCTRL_INIT); + + _WRITE_ST_REG(BUF_CTRL, MEM_BUFCTRL_MANUAL); + _WRITE_ST_REG(WP, addr32); + + _SET_ST_REG_MASK(BUF_CTRL, MEM_BUFCTRL_INIT); + _CLR_ST_REG_MASK(BUF_CTRL, MEM_BUFCTRL_INIT); + + _SET_ST_REG_MASK(CONTROL, + (0x11 << 16) | MEM_FILL_ON_LEVEL | MEM_CTRL_FILL_EN | + MEM_CTRL_EMPTY_EN); + + if (buf->no_parser) + _SET_ST_REG_MASK(CONTROL, 7 << 3); + + return 0; +} +EXPORT_SYMBOL(stbuf_init); + +void stbuf_vdec2_init(struct stream_buf_s *buf) +{ + + _WRITE_VDEC2_ST_REG(CONTROL, 0); + + _WRITE_VDEC2_ST_REG(START_PTR, _READ_ST_REG(START_PTR)); + _WRITE_VDEC2_ST_REG(END_PTR, _READ_ST_REG(END_PTR)); + _WRITE_VDEC2_ST_REG(CURR_PTR, _READ_ST_REG(CURR_PTR)); + + _WRITE_VDEC2_ST_REG(CONTROL, MEM_FILL_ON_LEVEL | MEM_BUFCTRL_INIT); + _WRITE_VDEC2_ST_REG(CONTROL, MEM_FILL_ON_LEVEL); + + _WRITE_VDEC2_ST_REG(BUF_CTRL, MEM_BUFCTRL_INIT); + _WRITE_VDEC2_ST_REG(BUF_CTRL, 0); + + _WRITE_VDEC2_ST_REG(CONTROL, + (0x11 << 16) | MEM_FILL_ON_LEVEL | MEM_CTRL_FILL_EN + | MEM_CTRL_EMPTY_EN); +} + +s32 stbuf_wait_space(struct stream_buf_s *stream_buf, size_t count) +{ + struct stream_buf_s *p = stream_buf; + long time_out = 200; + + p->wcnt = count; + + timer_setup(&p->timer, _stbuf_timer_func, (ulong) p); + + mod_timer(&p->timer, jiffies + STBUF_WAIT_INTERVAL); + + if (wait_event_interruptible_timeout + (p->wq, stbuf_space(p) >= count, + msecs_to_jiffies(time_out)) == 0) { + del_timer_sync(&p->timer); + + return -EAGAIN; + } + + del_timer_sync(&p->timer); + + return 0; +} + +void stbuf_release(struct stream_buf_s *buf) +{ + int r; + + buf->first_tstamp = INVALID_PTS; + if (!buf->ext_buf_addr) { + r = stbuf_init(buf, NULL);/* reinit buffer */ + if (r < 0) + pr_err("stbuf_release %d, stbuf_init failed\n", __LINE__); + } + if (buf->flag & BUF_FLAG_ALLOC && buf->buf_start) { + codec_mm_free_for_dma(MEM_NAME, buf->buf_start); + buf->flag &= ~BUF_FLAG_ALLOC; + buf->buf_start = 0; + buf->is_secure = false; + } + buf->flag &= ~BUF_FLAG_IN_USE; +} +EXPORT_SYMBOL(stbuf_release); + +u32 stbuf_sub_rp_get(void) +{ + return READ_PARSER_REG(PARSER_SUB_RP); +} + +void stbuf_sub_rp_set(unsigned int sub_rp) +{ + WRITE_PARSER_REG(PARSER_SUB_RP, sub_rp); + return; +} + +u32 stbuf_sub_wp_get(void) +{ + return READ_PARSER_REG(PARSER_SUB_WP); +} + +u32 stbuf_sub_start_get(void) +{ + return READ_PARSER_REG(PARSER_SUB_START_PTR); +} + +u32 parser_get_wp(struct stream_buf_s *vb) +{ + return READ_PARSER_REG(PARSER_VIDEO_WP); +} +EXPORT_SYMBOL(parser_get_wp); + +void parser_set_wp(struct stream_buf_s *vb, u32 val) +{ + WRITE_PARSER_REG(PARSER_VIDEO_WP, val); +} +EXPORT_SYMBOL(parser_set_wp); + +u32 parser_get_rp(struct stream_buf_s *vb) +{ + return READ_PARSER_REG(PARSER_VIDEO_RP); +} +EXPORT_SYMBOL(parser_get_rp); + +void parser_set_rp(struct stream_buf_s *vb, u32 val) +{ + WRITE_PARSER_REG(PARSER_VIDEO_RP, val); +} +EXPORT_SYMBOL(parser_set_rp); +
diff --git a/drivers/stream_input/amports/streambuf.h b/drivers/stream_input/amports/streambuf.h new file mode 100644 index 0000000..88f0926 --- /dev/null +++ b/drivers/stream_input/amports/streambuf.h
@@ -0,0 +1,207 @@ +/* + * drivers/amlogic/media/stream_input/parser/streambuf.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef STREAMBUF_H +#define STREAMBUF_H +#include <linux/amlogic/media/utils/amports_config.h> + +#define BUF_FLAG_ALLOC 0x01 +#define BUF_FLAG_IN_USE 0x02 +#define BUF_FLAG_PARSER 0x04 +#define BUF_FLAG_FIRST_TSTAMP 0x08 +#define BUF_FLAG_IOMEM 0x10 + +#define BUF_TYPE_VIDEO 0 +#define BUF_TYPE_AUDIO 1 +#define BUF_TYPE_SUBTITLE 2 +#define BUF_TYPE_USERDATA 3 +#define BUF_TYPE_HEVC 4 +#define BUF_MAX_NUM 5 + +#define INVALID_PTS 0xffffffff + +#define FETCHBUF_SIZE (64*1024) +#define USER_DATA_SIZE (8*1024) + +/* stream_buffer_metainfo stbuf_flag */ +#define STBUF_META_FLAG_SECURE (1 << 0) +#define STBUF_META_FLAG_PTS_SERV (1 << 1) /* use pts server flag */ +#define STBUF_META_FLAG_XXX1 (1 << 2) + +struct vdec_s; +struct stream_buf_s; + +struct parser_args { + u32 vid; + u32 aid; + u32 sid; + u32 pcrid; +}; + +struct stream_buf_ops { + int (*init) (struct stream_buf_s *, struct vdec_s *); + void (*release) (struct stream_buf_s *); + int (*write) (struct stream_buf_s *, const u8 *, u32); + u32 (*get_wp) (struct stream_buf_s *); + void (*set_wp) (struct stream_buf_s *, u32); + u32 (*get_rp) (struct stream_buf_s *); + void (*set_rp) (struct stream_buf_s *, u32); +}; + +struct stream_buf_s { + int id; + u8 name[16]; + s32 flag; + u32 type; + unsigned long buf_start; + struct page *buf_pages; + int buf_page_num; + u32 buf_size; + u32 default_buf_size; + u32 canusebuf_size; + u32 first_tstamp; + const ulong reg_base; + wait_queue_head_t wq; + struct timer_list timer; + u32 wcnt; + u32 buf_wp; + u32 buf_rp; + u32 max_buffer_delay_ms; + u64 last_write_jiffies64; + void *write_thread; + int for_4k; + bool is_secure; + bool is_multi_inst; + bool no_parser; + bool is_phybuf; + bool is_hevc; + bool use_ptsserv; + u32 drm_flag; + ulong ext_buf_addr; + atomic_t payload; + u32 stream_offset; + struct parser_args pars; + struct stream_buf_ops *ops; + u32 last_offset[2]; + u32 write_count; +} /*stream_buf_t */; + +struct stream_port_s { + /* driver info */ + const char *name; + struct device *class_dev; + const struct file_operations *fops; + + /* ports control */ + s32 type; + s32 flag; + s32 pcr_inited; + + /* decoder info */ + s32 vformat; + s32 aformat; + s32 achanl; + s32 asamprate; + s32 adatawidth; + + /* parser info */ + u32 vid; + u32 aid; + u32 sid; + u32 pcrid; + bool is_4k; +} /*stream_port_t */; +enum drm_level_e { + DRM_LEVEL1 = 1, + DRM_LEVEL2 = 2, + DRM_LEVEL3 = 3, + DRM_NONE = 4, +}; + +struct drm_info { + enum drm_level_e drm_level; + u32 drm_flag; + u32 drm_hasesdata; + u32 drm_priv; + u32 drm_pktsize; + u32 drm_pktpts; + u32 drm_phy; + u32 drm_vir; + u32 drm_remap; + u32 data_offset; + u32 handle; + u32 extpad[7]; +} /*drminfo_t */; + +struct stream_buffer_metainfo { + union { + u32 stbuf_start; + u32 stbuf_pktaddr; //stbuf_pktaddr + stbuf_pktsize = wp + }; + union { + u32 stbuf_size; + u32 stbuf_pktsize; + }; + u32 stbuf_flag; + u32 stbuf_private; + u32 reserved[16]; +}; + +struct stream_buffer_status { + u32 stbuf_wp; + u32 stbuf_rp; + u32 stbuf_start; + u32 stbuf_size; + u32 reserved[16]; +}; + + +#define TYPE_DRMINFO_V2 0x100 +#define TYPE_DRMINFO 0x80 +#define TYPE_PATTERN 0x40 + +struct vdec_s; + +extern void *fetchbuf; + +extern u32 stbuf_level(struct stream_buf_s *buf); +extern u32 stbuf_rp(struct stream_buf_s *buf); +extern u32 stbuf_space(struct stream_buf_s *buf); +extern u32 stbuf_size(struct stream_buf_s *buf); +extern u32 stbuf_canusesize(struct stream_buf_s *buf); +extern s32 stbuf_init(struct stream_buf_s *buf, struct vdec_s *vdec); +extern s32 stbuf_wait_space(struct stream_buf_s *stream_buf, size_t count); +extern void stbuf_release(struct stream_buf_s *buf); +extern int stbuf_change_size(struct stream_buf_s *buf, int size, + bool is_secure); +extern int stbuf_fetch_init(void); +extern void stbuf_fetch_release(void); +extern u32 stbuf_sub_rp_get(void); +extern void stbuf_sub_rp_set(unsigned int sub_rp); +extern u32 stbuf_sub_wp_get(void); +extern u32 stbuf_sub_start_get(void); +extern u32 stbuf_userdata_start_get(void); +extern struct stream_buf_s *get_stream_buffer(int id); + +extern void stbuf_vdec2_init(struct stream_buf_s *buf); + +u32 parser_get_wp(struct stream_buf_s *vb); +void parser_set_wp(struct stream_buf_s *vb, u32 val); +u32 parser_get_rp(struct stream_buf_s *vb); +void parser_set_rp(struct stream_buf_s *vb, u32 val); + +#endif /* STREAMBUF_H */
diff --git a/drivers/stream_input/amports/streambuf_reg.h b/drivers/stream_input/amports/streambuf_reg.h new file mode 100644 index 0000000..5f0c8ca --- /dev/null +++ b/drivers/stream_input/amports/streambuf_reg.h
@@ -0,0 +1,114 @@ +/* + * drivers/amlogic/media/stream_input/parser/streambuf_reg.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef STREAMBUF_REG_H +#define STREAMBUF_REG_H + +#define HEVC_STREAM_REG_BASE HEVC_STREAM_START_ADDR + +#define VLD_MEM_VIFIFO_REG_BASE VLD_MEM_VIFIFO_START_PTR +#define AIU_MEM_AIFIFO_REG_BASE AIU_MEM_AIFIFO_START_PTR + +#define START_PTR 0 +#define CURR_PTR 1 +#define END_PTR 2 +#define BYTES_AVAIL 3 +#define CONTROL 4 +#define WP 5 +#define RP 6 +#define LEVEL 7 +#define BUF_CTRL 8 + +/* + *#if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 + *#define _WRITE_ST_REG(r, val) \ + * __raw_writel(val, (volatile void __iomem *)(buf->reg_base+(r<<2))) + *#define _WRITE_ST_REG_BITS(r, val, s, e) \ + * __raw_writel((((_READ_ST_REG(r) & \ + * (((1L<<(e)-1)<<(s))-1)<<(s)))|((unsigned)((val)&((1L<<(e))-1))<<(s))), \ + * (volatile void __iomem *)(buf->reg_base+(r<<2))) + *#define _SET_ST_REG_MASK(r, val) \ + * __raw_writel(_READ_ST_REG(r)| (val), \ + * (volatile void __iomem *)(buf->reg_base+(r<<2))) + *#define _CLR_ST_REG_MASK(r, val) \ + * __raw_writel(_READ_ST_REG(r)&~(val), \ + * (volatile void __iomem *)(buf->reg_base+(r<<2))) + *#define _READ_ST_REG(r) \ + * (__raw_readl((volatile void __iomem *)(buf->reg_base+(r<<2)))) + * + *#if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD + *#define _READ_VDEC2_ST_REG(r) \ + * (__raw_readl((volatile void __iomem *)(buf->reg_base + \ + * DOS_REG_ADDR(VDEC2_VLD_MEM_VIFIFO_START_PTR) - \ + * DOS_REG_ADDR(VLD_MEM_VIFIFO_START_PTR) + (r<<2)))) + *#define _WRITE_VDEC2_ST_REG(r, val) \ + * __raw_writel(val, (volatile void __iomem *)(buf->reg_base + \ + * DOS_REG_ADDR(VDEC2_VLD_MEM_VIFIFO_START_PTR) - \ + * DOS_REG_ADDR(VLD_MEM_VIFIFO_START_PTR) + (r<<2))) + *#endif + * + *#define MEM_BUFCTRL_MANUAL (1<<1) + *#define MEM_BUFCTRL_INIT (1<<0) + *#define MEM_LEVEL_CNT_BIT 18 + *#define MEM_FIFO_CNT_BIT 16 + *#define MEM_FILL_ON_LEVEL (1<<10) + *#define MEM_CTRL_EMPTY_EN (1<<2) + *#define MEM_CTRL_FILL_EN (1<<1) + *#define MEM_CTRL_INIT (1<<0) + * + *#else + *#define _WRITE_ST_REG(r, val) \ + *WRITE_MPEG_REG(buf->reg_base + (r), \ + * (val)) + *#define _WRITE_ST_REG_BITS(r, val, s, e)\ + * WRITE_MPEG_REG(buf->reg_base + (r), \ + * (val), (s), (e)) + *#define _SET_ST_REG_MASK(r, val) SET_MPEG_REG_MASK(buf->reg_base + \ + * (r), (val)) + *#define _CLR_ST_REG_MASK(r, val) CLEAR_MPEG_REG_MASK(buf->reg_base + \ + * (r), (val)) + *#define _READ_ST_REG(r) READ_MPEG_REG(buf->reg_base + (r)) + *#endif + */ + + /*TODO*/ +#define _WRITE_ST_REG(r, val) do { \ + if (buf->reg_base == VLD_MEM_VIFIFO_REG_BASE) \ + codec_dosbus_write((buf->reg_base+(r)), (val)); \ + else \ + codec_aiubus_write((buf->reg_base+(r)), (val)); \ + } while (0) +#define _READ_ST_REG(r) \ + ((buf->reg_base == VLD_MEM_VIFIFO_REG_BASE) ? \ + codec_dosbus_read(buf->reg_base+(r)) : \ + codec_aiubus_read(buf->reg_base+(r))) + +#define _SET_ST_REG_MASK(r, val) _WRITE_ST_REG(r, _READ_ST_REG(r) | (val)) +#define _CLR_ST_REG_MASK(r, val) _WRITE_ST_REG(r, _READ_ST_REG(r)&~(val)) +#define _READ_VDEC2_ST_REG(r) (codec_dosbus_read(\ + (VDEC2_VLD_MEM_VIFIFO_START_PTR+(r)))) +#define _WRITE_VDEC2_ST_REG(r, val) codec_dosbus_write(\ + (VDEC2_VLD_MEM_VIFIFO_START_PTR+r), val) +#define MEM_BUFCTRL_MANUAL (1<<1) +#define MEM_BUFCTRL_INIT (1<<0) +#define MEM_LEVEL_CNT_BIT 18 +#define MEM_FIFO_CNT_BIT 16 +#define MEM_FILL_ON_LEVEL (1<<10) +#define MEM_CTRL_EMPTY_EN (1<<2) +#define MEM_CTRL_FILL_EN (1<<1) +#define MEM_CTRL_INIT (1<<0) +#endif /* STREAMBUF_REG_H */
diff --git a/drivers/stream_input/amports/thread_rw.c b/drivers/stream_input/amports/thread_rw.c new file mode 100644 index 0000000..0d386ce --- /dev/null +++ b/drivers/stream_input/amports/thread_rw.c
@@ -0,0 +1,630 @@ +/* + * drivers/amlogic/media/stream_input/parser/thread_rw.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/kfifo.h> +#include <linux/workqueue.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/uaccess.h> +#include <linux/fs.h> +#include <linux/vmalloc.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> + +/* #include <mach/am_regs.h> */ +#include <linux/delay.h> + +#include "streambuf.h" +#include "amports_priv.h" +#include "thread_rw.h" + +#define BUF_NAME "fetchbuf" + +#define DEFAULT_BLOCK_SIZE (64*1024) + +struct threadrw_buf { + void *vbuffer; + dma_addr_t dma_handle; + int write_off; + int data_size; + int buffer_size; + int from_cma; +}; + +#define MAX_MM_BUFFER_NUM 16 +struct threadrw_write_task { + struct file *file; + struct delayed_work write_work; + DECLARE_KFIFO_PTR(datafifo, void *); + DECLARE_KFIFO_PTR(freefifo, void *); + int bufs_num; + int max_bufs; + int errors; + spinlock_t lock; + struct mutex mutex; + struct stream_buf_s *sbuf; + int buffered_data_size; + int passed_data_len; + int buffer_size; + int def_block_size; + int data_offset; + int writework_on; + unsigned long codec_mm_buffer[MAX_MM_BUFFER_NUM]; + int manual_write; + int failed_onmore; + wait_queue_head_t wq; + ssize_t (*write)(struct file *, + struct stream_buf_s *, + const char __user *, + size_t, int); + struct threadrw_buf buf[1]; + /*don't add any after buf[] define */ +}; + +static int free_task_buffers(struct threadrw_write_task *task); + +static struct workqueue_struct *threadrw_wq_get(void) +{ + static struct workqueue_struct *threadrw_wq; + + if (!threadrw_wq) + threadrw_wq = create_singlethread_workqueue("threadrw"); + return threadrw_wq; +} + +static int threadrw_schedule_delayed_work( + struct threadrw_write_task *task, + unsigned long delay) +{ + bool ret; + + if (threadrw_wq_get()) { + ret = queue_delayed_work(threadrw_wq_get(), + &task->write_work, delay); + } else + ret = schedule_delayed_work(&task->write_work, delay); + if (!ret) { + cancel_delayed_work(&task->write_work); + if (threadrw_wq_get()) + ret = queue_delayed_work(threadrw_wq_get(), + &task->write_work, 0); + else + ret = schedule_delayed_work(&task->write_work, 0); + } + return 0; +} + +static ssize_t threadrw_write_onece( + struct threadrw_write_task *task, + struct file *file, + struct stream_buf_s *stbuf, + const char __user *buf, size_t count) +{ + struct threadrw_buf *rwbuf = NULL; + int ret = 0; + int to_write; + + if (!kfifo_get(&task->freefifo, (void *)&rwbuf)) { + if (task->errors) + return task->errors; + return -EAGAIN; + } + + to_write = min_t(u32, rwbuf->buffer_size, count); + if (copy_from_user(rwbuf->vbuffer, buf, to_write)) { + kfifo_put(&task->freefifo, (const void *)buf); + ret = -EFAULT; + goto err; + } + rwbuf->data_size = to_write; + rwbuf->write_off = 0; + kfifo_put(&task->datafifo, (const void *)rwbuf); + threadrw_schedule_delayed_work(task, 0); + return to_write; +err: + return ret; +} + +static ssize_t threadrw_write_in( + struct threadrw_write_task *task, + struct stream_buf_s *stbuf, + const char __user *buf, size_t count) +{ + int ret = 0; + int off = 0; + /* int change to size_t for buffer overflow on OTT-5057 */ + size_t left = count; + int wait_num = 0; + unsigned long flags; + + while (left > 0) { + ret = threadrw_write_onece(task, + task->file, + stbuf, buf + off, left); + + /* firstly check ret < 0, avoid the risk of -EAGAIN in ret + * implicit convert to size_t when compare with "size_t left". + */ + if (ret < 0) { + if (off > 0) { + break; /*have write ok some data. */ + } else if (ret == -EAGAIN) { + if (!(task->file->f_flags & O_NONBLOCK) && + (++wait_num < 10)) { + wait_event_interruptible_timeout( + task->wq, + !kfifo_is_empty( + &task->freefifo), + HZ / 100); + continue; /* write again. */ + } + ret = -EAGAIN; + break; + } + break; /*to end */ + } else if (ret >= left) { + off = count; + left = 0; + } else if (ret > 0) { + off += ret; + left -= ret; + } + } + + /*end: */ + spin_lock_irqsave(&task->lock, flags); + if (off > 0) { + task->buffered_data_size += off; + task->data_offset += off; + } + spin_unlock_irqrestore(&task->lock, flags); + if (off > 0) + return off; + else + return ret; +} + +static int do_write_work_in(struct threadrw_write_task *task) +{ + struct threadrw_buf *rwbuf = NULL; + int ret; + int need_re_write = 0; + int write_len = 0; + unsigned long flags; + + if (kfifo_is_empty(&task->datafifo)) + return 0; + if (!kfifo_peek(&task->datafifo, (void *)&rwbuf)) + return 0; + if (!task->manual_write && + rwbuf->from_cma && + !rwbuf->write_off) + codec_mm_dma_flush(rwbuf->vbuffer, + rwbuf->buffer_size, + DMA_TO_DEVICE); + if (task->manual_write) { + ret = task->write(task->file, task->sbuf, + (const char __user *)rwbuf->vbuffer + rwbuf->write_off, + rwbuf->data_size, + 2); /* noblock,virtual addr */ + } else { + ret = task->write(task->file, task->sbuf, + (const char __user *)rwbuf->dma_handle + rwbuf->write_off, + rwbuf->data_size, + 3); /* noblock,phy addr */ + } + if (ret == -EAGAIN) { + need_re_write = 0; + /*do later retry. */ + } else if (ret >= rwbuf->data_size) { + write_len += rwbuf->data_size; + if (kfifo_get(&task->datafifo, (void *)&rwbuf)) { + rwbuf->data_size = 0; + kfifo_put(&task->freefifo, (const void *)rwbuf); + /*wakeup write thread. */ + wake_up_interruptible(&task->wq); + } else + pr_err("write ok,but kfifo_get data failed.!!!\n"); + need_re_write = 1; + } else if (ret > 0) { + rwbuf->data_size -= ret; /* half data write */ + rwbuf->write_off += ret; + write_len += ret; + need_re_write = 1; + } else { /*ret <=0 */ + pr_err("get errors ret=%d size=%d\n", ret, + rwbuf->data_size); + task->errors = ret; + } + if (write_len > 0) { + spin_lock_irqsave(&task->lock, flags); + task->passed_data_len += write_len; + spin_unlock_irqrestore(&task->lock, flags); + } + return need_re_write; + +} + +static void do_write_work(struct work_struct *work) +{ + struct threadrw_write_task *task = container_of(work, + struct threadrw_write_task, + write_work.work); + int need_retry = 1; + + task->writework_on = 1; + while (need_retry) { + mutex_lock(&task->mutex); + need_retry = do_write_work_in(task); + mutex_unlock(&task->mutex); + } + threadrw_schedule_delayed_work(task, HZ / 10); + task->writework_on = 0; +} + +static int alloc_task_buffers_inlock(struct threadrw_write_task *task, + int new_bubffers, + int block_size) +{ + struct threadrw_buf *rwbuf; + int i; + int used_codec_mm = task->manual_write ? 0 : 1; + int new_num = new_bubffers; + int mm_slot = -1; + int start_idx = task->bufs_num; + int total_mm = 0; + unsigned long addr; + + if (codec_mm_get_total_size() < 80 || + codec_mm_get_free_size() < 40) + used_codec_mm = 0; + if (task->bufs_num + new_num > task->max_bufs) + new_num = task->max_bufs - task->bufs_num; + for (i = 0; i < MAX_MM_BUFFER_NUM; i++) { + if (task->codec_mm_buffer[i] == 0) { + mm_slot = i; + break; + } + } + if (mm_slot < 0) + used_codec_mm = 0; + if (block_size <= 0) + block_size = DEFAULT_BLOCK_SIZE; + + if (used_codec_mm && (block_size * new_num) >= 128 * 1024) { + total_mm = ALIGN(block_size * new_num, (1 << 17)); + addr = + codec_mm_alloc_for_dma(BUF_NAME, + total_mm / PAGE_SIZE, 0, + CODEC_MM_FLAGS_DMA_CPU); + if (addr != 0) { + task->codec_mm_buffer[mm_slot] = addr; + task->buffer_size += total_mm; + } else { + used_codec_mm = 0; + } + } + for (i = 0; i < new_num; i++) { + int bufidx = start_idx + i; + + rwbuf = &task->buf[bufidx]; + rwbuf->buffer_size = block_size; + if (used_codec_mm) { + unsigned long start_addr = + task->codec_mm_buffer[mm_slot]; + if (i == new_num - 1) + rwbuf->buffer_size = total_mm - + block_size * i; + rwbuf->dma_handle = (dma_addr_t) start_addr + + block_size * i; + rwbuf->vbuffer = codec_mm_phys_to_virt( + rwbuf->dma_handle); + rwbuf->from_cma = 1; + + } else { + rwbuf->vbuffer = dma_alloc_coherent( + amports_get_dma_device(), + rwbuf->buffer_size, + &rwbuf->dma_handle, GFP_KERNEL); + if (!rwbuf->vbuffer) { + rwbuf->buffer_size = 0; + rwbuf->dma_handle = 0; + task->bufs_num = bufidx; + break; + } + rwbuf->from_cma = 0; + task->buffer_size += rwbuf->buffer_size; + } + + kfifo_put(&task->freefifo, (const void *)rwbuf); + task->bufs_num = bufidx + 1; + } + if (start_idx > 0 ||/*have buffers before*/ + task->bufs_num >= 3 || + task->bufs_num == new_num) { + if (!task->def_block_size) + task->def_block_size = task->buf[0].buffer_size; + return 0; /*must >=3 for swap buffers. */ + } + if (task->bufs_num > 0) + free_task_buffers(task); + return -1; +} + +static int free_task_buffers(struct threadrw_write_task *task) +{ + int i; + + for (i = 0; i < MAX_MM_BUFFER_NUM; i++) { + if (task->codec_mm_buffer[i]) + codec_mm_free_for_dma(BUF_NAME, + task->codec_mm_buffer[i]); + } + for (i = 0; i < task->bufs_num; i++) { + if (task->buf[i].vbuffer && task->buf[i].from_cma == 0) + dma_free_coherent(amports_get_dma_device(), + task->buf[i].buffer_size, + task->buf[i].vbuffer, + task->buf[i].dma_handle); + } + return 0; +} + +static struct threadrw_write_task *threadrw_alloc_in(int num, + int block_size, + ssize_t (*write)(struct file *, + struct stream_buf_s *, + const char __user *, size_t, int), + int flags) +{ + int max_bufs = num; + int task_buffer_size; + struct threadrw_write_task *task; + int ret; + + if (!(flags & 1)) /*not audio*/ + max_bufs = 300; /*can great for video bufs.*/ + task_buffer_size = sizeof(struct threadrw_write_task) + + sizeof(struct threadrw_buf) * max_bufs; + task = vmalloc(task_buffer_size); + + if (!task) + return NULL; + memset(task, 0, task_buffer_size); + + spin_lock_init(&task->lock); + mutex_init(&task->mutex); + INIT_DELAYED_WORK(&task->write_work, do_write_work); + init_waitqueue_head(&task->wq); + ret = kfifo_alloc(&task->datafifo, max_bufs, GFP_KERNEL); + if (ret) + goto err1; + ret = kfifo_alloc(&task->freefifo, max_bufs, GFP_KERNEL); + if (ret) + goto err2; + task->write = write; + task->file = NULL; + task->buffer_size = 0; + task->manual_write = flags & 1; + task->max_bufs = max_bufs; + mutex_lock(&task->mutex); + ret = alloc_task_buffers_inlock(task, num, block_size); + mutex_unlock(&task->mutex); + if (ret < 0) + goto err3; + threadrw_wq_get(); /*start thread. */ + return task; + +err3: + kfifo_free(&task->freefifo); +err2: + kfifo_free(&task->datafifo); +err1: + vfree(task); + pr_err("alloc threadrw failed num:%d,block:%d\n", num, block_size); + return NULL; +} + +/* + *fifo data size; + */ + +void threadrw_update_buffer_level(struct stream_buf_s *stbuf, + int parsed_size) +{ + struct threadrw_write_task *task = stbuf->write_thread; + unsigned long flags; + + if (task) + { + spin_lock_irqsave(&task->lock, flags); + task->buffered_data_size -= parsed_size; + spin_unlock_irqrestore(&task->lock, flags); + } + +} +EXPORT_SYMBOL(threadrw_update_buffer_level); + +int threadrw_buffer_level(struct stream_buf_s *stbuf) +{ + struct threadrw_write_task *task = stbuf->write_thread; + + if (task) + return task->buffered_data_size; + return 0; +} + +int threadrw_buffer_size(struct stream_buf_s *stbuf) +{ + struct threadrw_write_task *task = stbuf->write_thread; + + if (task) + return task->buffer_size; + return 0; +} + +int threadrw_datafifo_len(struct stream_buf_s *stbuf) +{ + struct threadrw_write_task *task = stbuf->write_thread; + + if (task) + return kfifo_len(&task->datafifo); + return 0; +} + +int threadrw_freefifo_len(struct stream_buf_s *stbuf) +{ + struct threadrw_write_task *task = stbuf->write_thread; + + if (task) + return kfifo_len(&task->freefifo); + return 0; +} +int threadrw_support_more_buffers(struct stream_buf_s *stbuf) +{ + struct threadrw_write_task *task = stbuf->write_thread; + + if (!task) + return 0; + if (task->failed_onmore) + return 0; + return task->max_bufs - task->bufs_num; +} + +/* + *data len out fifo; + */ +int threadrw_passed_len(struct stream_buf_s *stbuf) +{ + struct threadrw_write_task *task = stbuf->write_thread; + + if (task) + return task->passed_data_len; + return 0; + +} +/* + *all data writed.; + */ +int threadrw_dataoffset(struct stream_buf_s *stbuf) +{ + struct threadrw_write_task *task = stbuf->write_thread; + int offset = 0; + + if (task) + return task->data_offset; + return offset; + +} + +ssize_t threadrw_write(struct file *file, struct stream_buf_s *stbuf, + const char __user *buf, size_t count) +{ + struct threadrw_write_task *task = stbuf->write_thread; + ssize_t size; + + if (!task->file) { + task->file = file; + task->sbuf = stbuf; + } + mutex_lock(&task->mutex); + size = threadrw_write_in(task, stbuf, buf, count); + mutex_unlock(&task->mutex); + return size; +} + +int threadrw_flush_buffers(struct stream_buf_s *stbuf) +{ + struct threadrw_write_task *task = stbuf->write_thread; + int max_retry = 20; + + if (!task) + return 0; + while (!kfifo_is_empty(&task->datafifo) && max_retry-- > 0) { + threadrw_schedule_delayed_work(task, 0); + msleep(20); + } + if (!kfifo_is_empty(&task->datafifo)) + return -1;/*data not flushed*/ + return 0; +} +int threadrw_alloc_more_buffer_size( + struct stream_buf_s *stbuf, + int size) +{ + struct threadrw_write_task *task = stbuf->write_thread; + int block_size; + int new_num; + int ret = -1; + int old_num; + + if (!task) + return -1; + mutex_lock(&task->mutex); + block_size = task->def_block_size; + if (block_size == 0) + block_size = 32 * 1024; + new_num = size / block_size; + old_num = task->bufs_num; + if (new_num == 0) + new_num = 1; + else if (new_num > task->max_bufs - task->bufs_num) + new_num = task->max_bufs - task->bufs_num; + if (new_num != 0) + ret = alloc_task_buffers_inlock(task, new_num, + block_size); + mutex_unlock(&task->mutex); + pr_info("threadrw add more buffer from %d -> %d for size %d\n", + old_num, task->bufs_num, + size); + if (ret < 0 || old_num == task->bufs_num) + task->failed_onmore = 1; + return ret; +} + +void *threadrw_alloc(int num, + int block_size, + ssize_t (*write)(struct file *, + struct stream_buf_s *, + const char __user *, + size_t, int), + int flags) +{ + return threadrw_alloc_in(num, block_size, write, flags); +} + +void threadrw_release(struct stream_buf_s *stbuf) +{ + struct threadrw_write_task *task = stbuf->write_thread; + + if (task) { + wake_up_interruptible(&task->wq); + cancel_delayed_work_sync(&task->write_work); + mutex_lock(&task->mutex); + free_task_buffers(task); + mutex_unlock(&task->mutex); + kfifo_free(&task->freefifo); + kfifo_free(&task->datafifo); + vfree(task); + } + stbuf->write_thread = NULL; +}
diff --git a/drivers/stream_input/amports/thread_rw.h b/drivers/stream_input/amports/thread_rw.h new file mode 100644 index 0000000..4a93dee --- /dev/null +++ b/drivers/stream_input/amports/thread_rw.h
@@ -0,0 +1,53 @@ +/* + * drivers/amlogic/media/stream_input/parser/thread_rw.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef THREAD_RW_H +#define THREAD_RW_H +#include "../../stream_input/amports/streambuf_reg.h" +#include "../../stream_input/amports/streambuf.h" +#include "../../stream_input/parser/esparser.h" +#include "../../stream_input/amports/amports_priv.h" + +ssize_t threadrw_write(struct file *file, + struct stream_buf_s *stbuf, + const char __user *buf, + size_t count); + +void *threadrw_alloc(int num, + int block_size, + ssize_t (*write)(struct file *, + struct stream_buf_s *, + const char __user *, + size_t, int), + int flags);/*flags &1: manual mode*/ + +void threadrw_release(struct stream_buf_s *stbuf); + +int threadrw_buffer_level(struct stream_buf_s *stbuf); +int threadrw_buffer_size(struct stream_buf_s *stbuf); +int threadrw_datafifo_len(struct stream_buf_s *stbuf); +int threadrw_freefifo_len(struct stream_buf_s *stbuf); +int threadrw_passed_len(struct stream_buf_s *stbuf); +int threadrw_flush_buffers(struct stream_buf_s *stbuf); +int threadrw_dataoffset(struct stream_buf_s *stbuf); +int threadrw_alloc_more_buffer_size( + struct stream_buf_s *stbuf, + int size); +int threadrw_support_more_buffers(struct stream_buf_s *stbuf); +void threadrw_update_buffer_level(struct stream_buf_s *stbuf, + int parsed_size); +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/Makefile b/drivers/stream_input/parser/dvb_ci/Makefile new file mode 100644 index 0000000..47e295d --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/Makefile
@@ -0,0 +1,22 @@ +obj-m += ci.o +#cimax-usb.o + +ci-objs = $(amlci-objs) $(cimcu-objs) +#$(cimax-objs) + +amlci-objs = aml_pcmcia.o aml_ci.o aml_ci_bus.o +#aml_spi.o +cimcu-objs = cimax/dvb_ringbuffer.o cimcu/dvb_ca_en50221_cimcu.o +#cimax-objs = cimax/dvb_ca_en50221_cimax.o cimax/aml_cimax.o cimax/dvb_ringbuffer.o +KBUILD_CFLAGS += -Wno-implicit-fallthrough +#cimax-usb-objs += cimax/usb/SRC/cimax+usb-driver.o cimax/usb/SRC/cimax+usb_fw.o +#cimax-usb-objs += cimax/usb/SRC/cimax+usb_config.o +#cimax-objs += cimax/aml_cimax_usb.o + +ccflags-y += -I$(srctree)/ +ccflags-y += -I$(srctree)/include +ccflags-y += -I$(srctree)/drivers/gpio +ccflags-y += -I$(srctree)/drivers/media/dvb-core +ccflags-y += -I$(srctree)/include/media +ccflags-y += -I$(srctree)/drivers/media/pci/ttpci +
diff --git a/drivers/stream_input/parser/dvb_ci/aml_ci.c b/drivers/stream_input/parser/dvb_ci/aml_ci.c new file mode 100644 index 0000000..f6c27fd --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/aml_ci.c
@@ -0,0 +1,828 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/version.h> +#include <linux/kernel.h> +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/sysfs.h> +#include <linux/of.h> +#include "aml_ci.h" +//#include "aml_spi.h" +#include "aml_ci_bus.h" +//#include "cimax/aml_cimax.h" + +//#include "dvb_ca_en50221.h" +#include <dvbdev.h> + +MODULE_PARM_DESC(aml_ci_debug, "\n\t\t dvb ci debug"); +static int aml_ci_debug = 1; +module_param(aml_ci_debug, int, S_IRUGO); + +#define pr_dbg(args...)\ + do {\ + if (aml_ci_debug)\ + printk(args);\ + } while (0) +#define pr_error(fmt, args...) printk("DVBCI: " fmt, ## args) + + +extern struct dvb_adapter *aml_get_dvb_adapter(void); +/**\brief aml_ci_mem_read:mem read from cam + * \param en50221: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \param addr: read addr + * \return + * - read value:ok + * - -EINVAL : error + */ +static int aml_ci_mem_read(struct dvb_ca_en50221_cimcu *en50221, int slot, int addr) +{ + struct aml_ci *ci = en50221->data; + + if (slot != 0) { + pr_error("slot !=0 %s :%d\r\n", __func__, slot); + return -EINVAL; + } + + if (ci->ci_mem_read != NULL) + return ci->ci_mem_read(ci, slot, addr); + + pr_error("ci_mem_read is null %s\r\n", __func__); + return -EINVAL; +} +/**\brief aml_ci_mem_write:mem write to cam + * \param en50221: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \param addr: write addr + * \param addr: write value + * \return + * - 0:ok + * - -EINVAL : error + */ +static int aml_ci_mem_write(struct dvb_ca_en50221_cimcu *en50221, + int slot, int addr, u8 data) +{ + + struct aml_ci *ci = en50221->data; + + if (slot != 0) { + pr_error("slot not 0 %s :%d\r\n", __func__, slot); + return -EINVAL; + } + + if (ci->ci_mem_write != NULL) + return ci->ci_mem_write(ci, slot, addr, data); + pr_error("ci_mem_write is null %s\r\n", __func__); + return -EINVAL; +} +/**\brief aml_ci_io_read:io read from cam + * \param en50221: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \param addr: read addr + * \return + * - read value:ok + * - -EINVAL : error + */ +static int aml_ci_io_read(struct dvb_ca_en50221_cimcu *en50221, int slot, u8 addr) +{ + struct aml_ci *ci = en50221->data; + + if (slot != 0) { + pr_error("slot !=0 %s :%d\r\n", __func__, slot); + return -EINVAL; + } + + if (ci->ci_io_read != NULL) + return ci->ci_io_read(ci, slot, addr); + + pr_error("ci_io_read is null %s\r\n", __func__); + return -EINVAL; +} +/**\brief aml_ci_io_write:io write to cam + * \param en50221: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \param addr: write addr + * \param addr: write value + * \return + * - 0:ok + * - -EINVAL : error + */ +static int aml_ci_io_write(struct dvb_ca_en50221_cimcu *en50221, + int slot, u8 addr, u8 data) +{ + struct aml_ci *ci = en50221->data; + + if (slot != 0) { + pr_error("slot !=0 %s :%d\r\n", __func__, slot); + return -EINVAL; + } + + if (ci->ci_mem_write != NULL) + return ci->ci_io_write(ci, slot, addr, data); + + pr_error("ci_io_write is null %s\r\n", __func__); + return -EINVAL; +} +/**\brief aml_ci_slot_reset:reset slot + * \param en50221: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \return + * - 0:ok + * - -EINVAL : error + */ +static int aml_ci_slot_reset(struct dvb_ca_en50221_cimcu *en50221, int slot) +{ + struct aml_ci *ci = en50221->data; + pr_dbg("Slot(%d): Slot RESET\n", slot); + if (ci->ci_slot_reset != NULL) { + ci->ci_slot_reset(ci, slot); + } else { + pr_error("ci_slot_reset is null %s\r\n", __func__); + return -EINVAL; + } + return 0; +} +/**\brief aml_ci_slot_shutdown:show slot + * \param en50221: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \return + * - 0:ok + * - -EINVAL : error + */ +static int aml_ci_slot_shutdown(struct dvb_ca_en50221_cimcu *en50221, int slot) +{ + struct aml_ci *ci = en50221->data; + pr_dbg("Slot(%d): Slot shutdown\n", slot); + if (ci->ci_slot_shutdown != NULL) { + ci->ci_slot_shutdown(ci, slot); + } else { + pr_error("aml_ci_slot_shutdown is null %s\r\n", __func__); + return -EINVAL; + } + return 0; +} +/**\brief aml_ci_ts_control:control slot ts + * \param en50221: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \return + * - 0:ok + * - -EINVAL : error + */ +static int aml_ci_ts_control(struct dvb_ca_en50221_cimcu *en50221, int slot) +{ + + struct aml_ci *ci = en50221->data; + pr_dbg("Slot(%d): TS control\n", slot); + if (ci->ci_slot_ts_enable != NULL) { + ci->ci_slot_ts_enable(ci, slot); + } else { + pr_error("aml_ci_ts_control is null %s\r\n", __func__); + return -EINVAL; + } + return 0; +} +/**\brief aml_ci_slot_status:get slot status + * \param en50221: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \param open: no used + * \return + * - cam status + * - -EINVAL : error + */ +static int aml_ci_slot_status(struct dvb_ca_en50221_cimcu *en50221, + int slot, int open) +{ + struct aml_ci *ci = en50221->data; + + //pr_dbg("Slot(%d): Poll Slot status\n", slot); + + if (ci->ci_poll_slot_status != NULL) { + return ci->ci_poll_slot_status(ci, slot, open); + } else { + /*pr_error("ci_poll_slot_status is null %s\r\n", __func__);*/ + } + + return 0; +} +/**\brief aml_ci_slot_status:get slot status + * \param en50221: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \param open: no used + * \return + * - cam status + * - -EINVAL : error + */ +static int aml_ci_slot_wakeup(struct dvb_ca_en50221_cimcu *en50221, + int slot) +{ + struct aml_ci *ci = en50221->data; + + if (ci->ci_get_slot_wakeup != NULL) { + return ci->ci_get_slot_wakeup(ci, slot); + } else { + /*pr_error("aml_ci_slot_wakeup is null %s\r\n", __func__);*/ + } + + return 1; +} + +#if 0 +static int aml_ci_cimax_slot_reset(struct dvb_ca_en50221_cimax *en50221, + int slot) +{ + struct aml_ci *ci = en50221->data; + pr_dbg("Slot(%d): Slot RESET\n", slot); + if (ci->ci_slot_reset != NULL) { + ci->ci_slot_reset(ci, slot); + } else { + pr_error("ci_slot_reset is null %s\r\n", __func__); + return -EINVAL; + } + return 0; +} +/**\brief aml_ci_slot_shutdown:show slot + * \param en50221: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \return + * - 0:ok + * - -EINVAL : error + */ +static int aml_ci_cimax_slot_shutdown(struct dvb_ca_en50221_cimax *en50221, + int slot) +{ + struct aml_ci *ci = en50221->data; + pr_dbg("Slot(%d): Slot shutdown\n", slot); + if (ci->ci_slot_shutdown != NULL) { + ci->ci_slot_shutdown(ci, slot); + } else { + pr_error("aml_ci_slot_shutdown is null %s\r\n", __func__); + return -EINVAL; + } + return 0; +} +/**\brief aml_ci_ts_control:control slot ts + * \param en50221: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \return + * - 0:ok + * - -EINVAL : error + */ +static int aml_ci_cimax_ts_control(struct dvb_ca_en50221_cimax *en50221, + int slot) +{ + + struct aml_ci *ci = en50221->data; + pr_dbg("Slot(%d): TS control\n", slot); + if (ci->ci_slot_ts_enable != NULL) { + ci->ci_slot_ts_enable(ci, slot); + } else { + pr_error("aml_ci_ts_control is null %s\r\n", __func__); + return -EINVAL; + } + return 0; +} +/**\brief aml_ci_cimax_slot_status:get slot status + * \param en50221: en50221_cimax obj,used this data to get dvb_ci obj + * \param slot: slot index + * \param open: no used + * \return + * - cam status + * - -EINVAL : error + */ +static int aml_ci_cimax_slot_status( + struct dvb_ca_en50221_cimax *en50221, int slot, int open) +{ + struct aml_ci *ci = en50221->data; + + /*pr_dbg("Slot(%d): Poll Slot status\n", slot);*/ + + if (ci->ci_poll_slot_status != NULL) { + return ci->ci_poll_slot_status(ci, slot, open); + } else { + /*pr_error("ci_poll_slot_status is null %s\r\n", __func__);*/ + } + + return 0; +} + +/**\brief aml_ci_read_cis: read cis + * \param en50221_max: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \param buf: buf for cis data + * \param size: buf size + * \return + * --EINVAL : error + * - : actual size read + */ +static int aml_ci_read_cis(struct dvb_ca_en50221_cimax *en50221, + int slot, u8 *buf, int size) +{ + struct aml_ci *ci = en50221->data; + + if (slot != 0) { + pr_error("slot !=0 %s :%d\r\n", __func__, slot); + return -EINVAL; + } + + if (ci->ci_read_cis != NULL) + return ci->ci_read_cis(ci, slot, buf, size); + + pr_error("ci_read_cis is null %s\r\n", __func__); + return -EINVAL; +} +/**\brief aml_ci_write_cor: write cor + * \param en50221_max: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \param addr: + * \param buf: + * \return + * --EINVAL : error + * -0 : ok + */ +static int aml_ci_write_cor(struct dvb_ca_en50221_cimax *en50221, + int slot, int address, u8 *buf) +{ + struct aml_ci *ci = en50221->data; + + if (slot != 0) { + pr_error("slot !=0 %s :%d\r\n", __func__, slot); + return -EINVAL; + } + + if (ci->ci_write_cor != NULL) + return ci->ci_write_cor(ci, slot, address, buf); + + pr_error("ci_write_cor is null %s\r\n", __func__); + return -EINVAL; +} +/**\brief aml_ci_negociate: negotiate + * \param en50221_max: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \param size: suggested size + * \return + * --EINVAL : error + * - : size negotiated + */ +static int aml_ci_negotiate(struct dvb_ca_en50221_cimax *en50221, + int slot, int size) +{ + struct aml_ci *ci = en50221->data; + + if (slot != 0) { + pr_error("slot !=0 %s :%d\r\n", __func__, slot); + return -EINVAL; + } + + if (ci->ci_negotiate != NULL) + return ci->ci_negotiate(ci, slot, size); + + pr_error("ci_negotiate is null %s\r\n", __func__); + return -EINVAL; +} +/**\brief aml_ci_read_lpdu: read lpdu + * \param en50221_max: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \param buf: buf + * \param size: buf size + * \return + * --EINVAL : error + * - : size read + */ +static int aml_ci_read_lpdu(struct dvb_ca_en50221_cimax *en50221, + int slot, u8 *buf, int size) +{ + struct aml_ci *ci = en50221->data; + + if (slot != 0) { + pr_error("slot !=0 %s :%d\r\n", __func__, slot); + return -EINVAL; + } + + if (ci->ci_read_lpdu != NULL) + return ci->ci_read_lpdu(ci, slot, buf, size); + + pr_error("ci_read_lpdu is null %s\r\n", __func__); + return -EINVAL; +} + +/**\brief aml_ci_write_lpdu: write lpdu + * \param en50221_max: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \param buf: buf + * \param size: write size + * \return + * --EINVAL : error + * - : size written + */ +static int aml_ci_write_lpdu(struct dvb_ca_en50221_cimax *en50221, + int slot, u8 *buf, int size) +{ + struct aml_ci *ci = en50221->data; + + if (slot != 0) { + pr_error("slot !=0 %s :%d\r\n", __func__, slot); + return -EINVAL; + } + + if (ci->ci_write_lpdu != NULL) + return ci->ci_write_lpdu(ci, slot, buf, size); + + pr_error("ci_write_lpdu is null %s\r\n", __func__); + return -EINVAL; +} + +static int aml_ci_read_cam_status(struct dvb_ca_en50221_cimax *en50221, + int slot) +{ + struct aml_ci *ci = en50221->data; + + if (slot != 0) { + pr_error("slot !=0 %s :%d\r\n", __func__, slot); + return -EINVAL; + } + + if (ci->ci_read_cam_status != NULL) + return ci->ci_read_cam_status(ci, slot); + + pr_error("ci_read_cam_status is null %s\r\n", __func__); + return -EINVAL; +} + +static int aml_ci_cam_reset(struct dvb_ca_en50221_cimax *en50221, int slot) +{ + struct aml_ci *ci = en50221->data; + + if (slot != 0) { + pr_error("slot !=0 %s :%d\r\n", __func__, slot); + return -EINVAL; + } + + if (ci->ci_cam_reset != NULL) + return ci->ci_cam_reset(ci, slot); + + pr_error("ci_cam_reset is null %s\r\n", __func__); + return -EINVAL; +} + +/**\brief aml_ci_get_capbility + * \param en50221_max: en50221 obj,used this data to get dvb_ci obj + * \param slot: slot index + * \return + * - : capbilities + */ +static int aml_ci_get_capbility(struct dvb_ca_en50221_cimax *en50221, int slot) +{ + struct aml_ci *ci = en50221->data; + + if (slot != 0) { + pr_error("slot !=0 %s :%d\r\n", __func__, slot); + return 0; + } + + if (ci->ci_get_capbility != NULL) + return ci->ci_get_capbility(ci, slot); + + pr_error("ci_get_capbility is null %s\r\n", __func__); + return 0; +} +#endif + +/**\brief get ci config from dts + * \param np: device node + * \return + * - 0 成功 + * - 其他值 : + */ +static int aml_ci_get_config_from_dts(struct platform_device *pdev, + struct aml_ci *ci) +{ + char buf[32]; + int ret = 0; + int value; + + snprintf(buf, sizeof(buf), "%s", "io_type"); + ret = of_property_read_u32(pdev->dev.of_node, buf, &value); + if (!ret) { + pr_dbg("%s: 0x%x\n", buf, value); + ci->io_type = value; + } + return 0; +} + +/**\brief aml_ci_init:ci dev init + * \param pdev: platform_device device node,used to get dts info + * \param dvb: aml_dvb obj,used to get dvb_adapter for en0211 to use + * \param cip: ci_dev pp + * \return + * - 0 成功 + * - 其他值 : + */ +int aml_ci_init(struct platform_device *pdev, + struct dvb_adapter *dvb_adapter, struct aml_ci **cip) +{ + struct aml_ci *ci = NULL; + int ca_flags = 0, result; + + ci = kzalloc(sizeof(struct aml_ci), GFP_KERNEL); + if (!ci) { + pr_error("Out of memory!, exiting ..\n"); + result = -ENOMEM; + goto err; + } + ci->id = 0; + aml_ci_get_config_from_dts(pdev, ci); + +// ci->priv = dvb; + /* register CA interface */ +#if 0 + if (ci->io_type == AML_DVB_IO_TYPE_CIMAX) { + ci->en50221_cimax.owner = THIS_MODULE; + ci->en50221_cimax.read_cis = aml_ci_read_cis; + ci->en50221_cimax.write_cor = aml_ci_write_cor; + ci->en50221_cimax.negotiate = aml_ci_negotiate; + ci->en50221_cimax.read_lpdu = aml_ci_read_lpdu; + ci->en50221_cimax.write_lpdu = aml_ci_write_lpdu; + ci->en50221_cimax.read_cam_status = aml_ci_read_cam_status; + ci->en50221_cimax.cam_reset = aml_ci_cam_reset; + ci->en50221_cimax.get_capbility = aml_ci_get_capbility; + ci->en50221_cimax.slot_reset = aml_ci_cimax_slot_reset; + ci->en50221_cimax.slot_shutdown = aml_ci_cimax_slot_shutdown; + ci->en50221_cimax.slot_ts_enable = aml_ci_cimax_ts_control; + ci->en50221_cimax.poll_slot_status = aml_ci_cimax_slot_status; + ci->en50221_cimax.data = ci; + + pr_dbg("Registering EN50221 CIMAX device\n"); + result = dvb_ca_en50221_cimax_init(dvb_adapter, + &ci->en50221_cimax, ca_flags, 1); + if (result != 0) { + pr_error("EN50221 CIMAX: Initialization failed <%d>\n", + result); + goto err; + } + } else +#endif + { + ca_flags = ~DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE; + /* register CA interface */ + ci->en50221_cimcu.owner = THIS_MODULE; + ci->en50221_cimcu.read_attribute_mem = aml_ci_mem_read; + ci->en50221_cimcu.write_attribute_mem = aml_ci_mem_write; + ci->en50221_cimcu.read_cam_control = aml_ci_io_read; + ci->en50221_cimcu.write_cam_control = aml_ci_io_write; + ci->en50221_cimcu.slot_reset = aml_ci_slot_reset; + ci->en50221_cimcu.slot_shutdown = aml_ci_slot_shutdown; + ci->en50221_cimcu.slot_ts_enable = aml_ci_ts_control; + ci->en50221_cimcu.poll_slot_status = aml_ci_slot_status; + ci->en50221_cimcu.get_slot_wakeup = aml_ci_slot_wakeup; + + ci->en50221_cimcu.data = ci; + + + pr_dbg("Registering EN50221 device\n"); + result = dvb_ca_en50221_cimcu_init(dvb_adapter, + &ci->en50221_cimcu, ca_flags, 1); + if (result != 0) { + pr_error("EN50221_cimcu: Initialization failed <%d>\n", + result); + goto err; + } + } + *cip = ci; + pr_dbg("Registered EN50221 device\n"); +#if 0 + if (ci->io_type == AML_DVB_IO_TYPE_SPI || ci->io_type == AML_DVB_IO_TYPE_SPI_T312) { + /* spi init */ + ci->ci_init = aml_spi_init; + ci->ci_exit = aml_spi_exit; + } else if (ci->io_type == AML_DVB_IO_TYPE_CIMAX) { + ci->ci_init = aml_cimax_init; + ci->ci_exit = aml_cimax_exit; + } else +#endif + if (ci->io_type == AML_DVB_IO_TYPE_CIBUS) { + ci->ci_init = aml_ci_bus_init; + ci->ci_exit = aml_ci_bus_exit; + } else { + /* no io dev init,is error */ + pr_dbg("unknown io type, please check io_type in dts file\r\n"); + } + + if (ci->ci_init) + result = ci->ci_init(pdev, ci); +#if 0 + if (ci->io_type == AML_DVB_IO_TYPE_CIMAX) { + if (result) + dvb_ca_en50221_cimax_release(&ci->en50221_cimax); + } +#endif + return result; +err: + kfree(ci); + return result; +} + +void aml_ci_exit(struct aml_ci *ci) +{ + pr_dbg("Unregistering EN50221 device\n"); + if (ci) { +#if 0 + if (ci->io_type == AML_DVB_IO_TYPE_CIMAX) + dvb_ca_en50221_cimax_release(&ci->en50221_cimax); + else +#endif + dvb_ca_en50221_cimcu_release(&ci->en50221_cimcu); + if (ci->ci_exit) + ci->ci_exit(ci); + kfree(ci); + } +} + +static struct aml_ci *ci_dev; + +static ssize_t ts_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret; + ret = sprintf(buf, "ts%d\n", 1); + return ret; +} +static CLASS_ATTR_RO(ts); + +static struct attribute *aml_ci_attrs[] = { + &class_attr_ts.attr, + NULL +}; + +ATTRIBUTE_GROUPS(aml_ci); + +static int aml_ci_register_class(struct aml_ci *ci) +{ + #define CLASS_NAME_LEN 48 + int ret; + struct class *clp; + + clp = &(ci->class); + + clp->name = kzalloc(CLASS_NAME_LEN, GFP_KERNEL); + if (!clp->name) + return -ENOMEM; + + snprintf((char *)clp->name, CLASS_NAME_LEN, "amlci-%d", ci->id); + clp->owner = THIS_MODULE; + clp->class_groups = aml_ci_groups; + ret = class_register(clp); + if (ret) + kfree(clp->name); + + return 0; +} + +static int aml_ci_unregister_class(struct aml_ci *ci) +{ + class_unregister(&ci->class); + kzfree(ci->class.name); + return 0; +} + + +static int aml_ci_probe(struct platform_device *pdev) +{ + struct dvb_adapter *dvb_adapter = aml_get_dvb_adapter(); + int err = 0; + pr_dbg("---Amlogic CI Init---[%p]\n", dvb_adapter); + + err = aml_ci_init(pdev, dvb_adapter, &ci_dev); + if (err < 0) + return err; + platform_set_drvdata(pdev, ci_dev); + aml_ci_register_class(ci_dev); +#if 0 + if (ci_dev->io_type == AML_DVB_IO_TYPE_SPI || + ci_dev->io_type == AML_DVB_IO_TYPE_SPI_T312) + aml_spi_mod_init(); +#endif + return 0; +} + +static int aml_ci_remove(struct platform_device *pdev) +{ + aml_ci_unregister_class(ci_dev); + platform_set_drvdata(pdev, NULL); +#if 0 + if (ci_dev->io_type == AML_DVB_IO_TYPE_SPI || + ci_dev->io_type == AML_DVB_IO_TYPE_SPI_T312) { + aml_spi_exit(ci_dev); + aml_spi_mod_exit(); + } + else if (ci_dev->io_type == AML_DVB_IO_TYPE_CIMAX) + aml_cimax_exit(ci_dev); + else +#endif + if (ci_dev->io_type == AML_DVB_IO_TYPE_CIBUS) + aml_ci_bus_exit(ci_dev); + else + pr_dbg("---Amlogic CI remove unkown io type---\n"); + + aml_ci_exit(ci_dev); + return 0; +} + +static int aml_ci_suspend(struct platform_device *pdev, pm_message_t state) +{ + pr_dbg("Amlogic CI Suspend!\n"); +#if 0 + if (ci_dev->io_type == AML_DVB_IO_TYPE_SPI || + ci_dev->io_type == AML_DVB_IO_TYPE_SPI_T312) { + aml_spi_exit(ci_dev); + } + else if (ci_dev->io_type == AML_DVB_IO_TYPE_CIMAX) + aml_cimax_exit(ci_dev); + else +#endif + if (ci_dev->io_type == AML_DVB_IO_TYPE_CIBUS) + aml_ci_bus_exit(ci_dev); + else + pr_dbg("---Amlogic CI remove unkown io type---\n"); + + return 0; +} + +static int aml_ci_resume(struct platform_device *pdev) +{ + int err = 0; + pr_dbg("Amlogic CI Resume!\n"); +#if 0 + if (ci_dev->io_type == AML_DVB_IO_TYPE_SPI || + ci_dev->io_type == AML_DVB_IO_TYPE_SPI_T312) { + aml_spi_init(pdev, ci_dev); + } + else if (ci_dev->io_type == AML_DVB_IO_TYPE_CIMAX) + aml_cimax_init(pdev, ci_dev); + else +#endif + if (ci_dev->io_type == AML_DVB_IO_TYPE_CIBUS) + aml_ci_bus_init(pdev, ci_dev); + else + pr_dbg("---Amlogic CI remove unkown io type---\n"); + return err; +} + +static const struct of_device_id dvbci_dev_dt_match[] = { + { + .compatible = "amlogic, dvbci", + }, + {}, +}; + + + +static struct platform_driver aml_ci_driver = { + .probe = aml_ci_probe, + .remove = aml_ci_remove, + .suspend = aml_ci_suspend, + .resume = aml_ci_resume, + .driver = { + .name = "dvbci", + .of_match_table = dvbci_dev_dt_match, + .owner = THIS_MODULE, + } +}; + +static int aml_ci_mod_init(void) +{ + pr_dbg("Amlogic CI mode init\n"); + return platform_driver_register(&aml_ci_driver); +} + +static void aml_ci_mod_exit(void) +{ + pr_dbg("Amlogic CI mode Exit\n"); + platform_driver_unregister(&aml_ci_driver); +} + +module_init(aml_ci_mod_init); +module_exit(aml_ci_mod_exit); +MODULE_LICENSE("GPL"); +
diff --git a/drivers/stream_input/parser/dvb_ci/aml_ci.h b/drivers/stream_input/parser/dvb_ci/aml_ci.h new file mode 100644 index 0000000..d534557 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/aml_ci.h
@@ -0,0 +1,124 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ + +#ifndef __AML_CI_H_ +#define __AML_CI_H_ + +//#include "drivers/media/dvb-core/dvb_ca_en50221.h" +#include "cimcu/dvb_ca_en50221_cimcu.h" +//#include "cimax/dvb_ca_en50221_cimax.h" +#include <linux/platform_device.h> + +enum aml_dvb_io_type_e { + AML_DVB_IO_TYPE_IOBUS = 0, + AML_DVB_IO_TYPE_SPI, + AML_DVB_IO_TYPE_CIMAX, + AML_DVB_IO_TYPE_SPI_T312, + AML_DVB_IO_TYPE_CIBUS, + AML_DVB_IO_TYPE_MAX, +}; + +enum aml_gpio_level_e { + AML_GPIO_LOW = 0, + AML_GPIO_HIGH +}; +enum AM_CI_CMD { + AM_CI_CMD_IOR = 0, + AM_CI_CMD_IOW, + AM_CI_CMD_MEMR, + AM_CI_CMD_MEMW, + AM_CI_CMD_FULLTEST, + AM_CI_CMD_CISTEST, + AM_CI_CMD_GETCD12, + AM_CI_CMD_POWER, + AM_CI_CMD_RESET, + AM_CI_CMD_CONGPIO, +}; + +struct aml_ci { +// struct dvb_ca_en50221 en50221; + struct dvb_ca_en50221_cimcu en50221_cimcu; + struct mutex ci_lock; + int io_type; + void *priv; + int id; + struct class class; + + int (*ci_init)(struct platform_device *pdev, struct aml_ci *ci); + int (*ci_exit)(struct aml_ci *ci); + + /* NOTE: the read_*, write_* and poll_slot_status functions will be + * called for different slots concurrently and need to use locks where + * and if appropriate. There will be no concurrent access to one slot. + */ + + /* functions for accessing attribute memory on the CAM */ + int (*ci_mem_read)(struct aml_ci *ca, int slot, int address); + int (*ci_mem_write)(struct aml_ci *ca, int slot, int address, u8 value); + + /* functions for accessing the control interface on the CAM */ + int (*ci_io_read)(struct aml_ci *ca, int slot, int address); + int (*ci_io_write)(struct aml_ci *ca, int slot, int address, u8 value); + + /* Functions for controlling slots */ + int (*ci_slot_reset)(struct aml_ci *ca, int slot); + int (*ci_slot_shutdown)(struct aml_ci *ca, int slot); + int (*ci_slot_ts_enable)(struct aml_ci *ca, int slot); + + /* + * Poll slot status. + * Only necessary if DVB_CA_FLAG_EN50221_IRQ_CAMCHANGE is not set + */ + int (*ci_poll_slot_status)(struct aml_ci *ca, int slot, int open); + + int (*ci_get_slot_wakeup)(struct aml_ci *ca, int slot); + + //struct dvb_ca_en50221_cimax en50221_cimax; + + //int (*ci_read_cis)(struct aml_ci *ca, int slot, u8 *buf, int size); + //int (*ci_write_cor)(struct aml_ci *ca, int slot, int address, u8 *buf); + /*return the final size or -1 for error*/ + //int (*ci_negotiate)(struct aml_ci *ca, int slot, int size); + + /* functions for accessing the control interface on the CAM */ + //int (*ci_read_lpdu)(struct aml_ci *ca, int slot, u8 *buf, int size); + //int (*ci_write_lpdu)(struct aml_ci *ca, int slot, u8 *buf, int size); + + //int (*ci_get_capbility)(struct aml_ci *ca, int slot); + + //int (*ci_cam_reset)(struct aml_ci *ca, int slot); + //int (*ci_read_cam_status)(struct aml_ci *ca, int slot); + + /* private data, used by caller */ + void *data; +}; + +struct ci_dev_config_s { + char name[20]; + unsigned char type; + int cs_hold_delay; + int cs_clk_delay; +}; +extern int aml_ci_init(struct platform_device *pdev, + struct dvb_adapter *dvb_adapter, struct aml_ci **cip); +extern void aml_ci_exit(struct aml_ci *ci); + +#endif /* __AML_CI_H_ */ +
diff --git a/drivers/stream_input/parser/dvb_ci/aml_ci_bus.c b/drivers/stream_input/parser/dvb_ci/aml_ci_bus.c new file mode 100644 index 0000000..7138dfd --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/aml_ci_bus.c
@@ -0,0 +1,1903 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/amlogic/aml_gpio_consumer.h> +#include <linux/gpio/consumer.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/of_irq.h> +#include <linux/irq.h> +#include <linux/compat.h> +#include "aml_ci_bus.h" +#include "aml_ci.h" + + + +//can see jtag dts and driver to select gpio function. +//write dts config for cam/tsin/out +//gpio irq is can used. +// +static struct aml_ci_bus ci_bus; +static int aml_ci_bus_debug = 1; +static int aml_ci_bus_time = 500; +static int aml_ci_bus_set_delay = 0; + +static DECLARE_WAIT_QUEUE_HEAD(wq); +static u32 fetch_done; + +module_param_named(ci_bus_debug, aml_ci_bus_debug, int, 0644); +MODULE_PARM_DESC(ci_bus_debug, "enable verbose debug messages"); + +module_param_named(ci_bus_set_delay, aml_ci_bus_set_delay, int, 0644); +MODULE_PARM_DESC(ci_bus_set_delay, "enable ci bus delay set"); + +module_param_named(ci_bus_time, aml_ci_bus_time, int, 0644); +MODULE_PARM_DESC(ci_bus_time, "set ci bus time"); + + +#define pr_dbg(args...)\ + do {\ + if (aml_ci_bus_debug)\ + printk(args);\ + } while (0) +#define pr_error(fmt, args...) printk("AML_CI_BUS: " fmt, ## args) + + +#define INPUT 0 +#define OUTPUT 1 +#define OUTLEVEL_LOW 0 +#define OUTLEVEL_HIGH 1 +#define PULLLOW 1 +#define PULLHIGH 0 + +#define AML_MODE_NAME "aml_dvbci_bus" + +int aml_ci_bus_mod_init(void); +void aml_ci_bus_mod_exit(void); +static int aml_read_self(unsigned int reg); +static void aml_write_self(unsigned int reg, unsigned int val); +static int aml_set_gpio_out(struct gpio_desc *gpio, int val); +static int aml_set_gpio_in(struct gpio_desc *gpio); + +#define WRITE_CIBUS_REG(_r, _v) aml_write_self(_r, _v) +#define READ_CIBUS_REG(_r) aml_read_self(_r) + +#define USED_IRQ 0 + +static void *p_hw_base; +//write reg +static void aml_write_self(unsigned int reg, unsigned int val) +{ + void *ptr = (void *)(p_hw_base + reg); + writel(val, ptr); +} +//read reg +static int aml_read_self(unsigned int reg) +{ + void *addr = p_hw_base + reg; + int ret = readl(addr); + return ret; +} + +/**\brief init_ci_addr:ci bus init mem addr +* \param pdev: +* \return +* - read value:ok +* - -EINVAL : error +*/ +int init_ci_addr(struct platform_device *pdev) +{ + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + pr_dbg("%s fail\n", __func__); + return -1; + } + + p_hw_base = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (p_hw_base) { + pr_dbg("%s base addr = %lx\n", __func__, + (unsigned long)p_hw_base); + } else { + pr_dbg("%s base addr error\n", __func__); + } + return 0; +} + + +/**\brief aml_ci_bus_io:ci bus read or write api with bus +* \param ci_bus_dev: ci_bus_dev obj,used this data to ctl +* \param val: read or write value +* \param addr: rw addr +* \param mode: cmd +* \return +* - read value:ok +* - -EINVAL : error +*/ +static int aml_ci_bus_select_gpio(struct aml_ci_bus *ci_bus_dev, + int select/*, int enable*/) +{ + //SELECT GPIO FUNCTION + unsigned int old_select = ci_bus_dev->select; + struct pinctrl_state *s; + int ret = 0; + + if (old_select == select) + return 0; + + if (ci_bus_dev->addr_ts_mode_multiplex == 0) { + //not multiplex ts and ci addr, so no need to + //change ts and addr + if (select == AML_GPIO_ADDR) { + return 0; + } + } + if (!ci_bus_dev->pinctrl) { + ci_bus_dev->pinctrl = devm_pinctrl_get(&ci_bus_dev->pdev->dev); + if (IS_ERR_OR_NULL(ci_bus_dev->pinctrl)) { + dev_err(&ci_bus_dev->pdev->dev, "could not get pinctrl handle\n"); + return -EINVAL; + } + } + if (IS_ERR_OR_NULL(ci_bus_dev->pinctrl)) { + dev_err(&ci_bus_dev->pdev->dev, "return, could not get pinctrl handle\n"); + return -EINVAL; + } + /* set pinmux */ + switch (select) { + case AML_GPIO_ADDR: + s = pinctrl_lookup_state(ci_bus_dev->pinctrl, "ci_addr_pins"); + if (IS_ERR_OR_NULL(s)) { + dev_err(&ci_bus_dev->pdev->dev, + "could not get ci_addr_pins state\n"); + return -EINVAL; + } + ret = pinctrl_select_state(ci_bus_dev->pinctrl, s); + if (ret) { + dev_err(&ci_bus_dev->pdev->dev, "failed to set ci_addr_pins pinctrl\n"); + return -EINVAL; + } + if (ci_bus_dev->le_pin) { + aml_set_gpio_out(ci_bus_dev->le_pin, ci_bus_dev->le_enable_level); + pr_dbg("set le pin to low"); + } + break; + case AML_GPIO_TS: + if (ci_bus_dev->le_pin) { + aml_set_gpio_out(ci_bus_dev->le_pin, !ci_bus_dev->le_enable_level); + pr_dbg("set le pin to high"); + } + s = pinctrl_lookup_state(ci_bus_dev->pinctrl, "ci_ts_pins"); + if (IS_ERR_OR_NULL(s)) { + dev_err(&ci_bus_dev->pdev->dev, + "could not get ci_ts_pins state\n"); + return -EINVAL; + } + ret = pinctrl_select_state(ci_bus_dev->pinctrl, s); + if (ret) { + dev_err(&ci_bus_dev->pdev->dev, "failed to set ci_ts_pins pinctrl\n"); + return -EINVAL; + } + break; + default: + break; + } + ci_bus_dev->select = select; + return 0; +} + +/**\brief aml_ci_bus_set_delay_time:set ci bus delay time +* \param mode: cmd,io rd/wr or mem rd/wr +* \return +* - read value:ok +* - -EINVAL : error +*/ +static int aml_ci_bus_set_delay_time(int mode) +{ + u32 delay0 = 0, delay1 = 0; + //set cmd delay + if (mode == AM_CI_CMD_IOR) { + //delay0 + delay0 = delay0 | (DELAY_RIO_INIT_ADDR << 0); + delay0 = delay0 | (DELAY_RIO_ADDR_CE << 8); + delay0 = delay0 | (DELAY_RIO_CE_RD << 16); + delay0 = delay0 | (DELAY_RIO_RD_RWAIT << 24); + //delay1 + delay1 = delay1 | (DELAY_RIO_RWAIT_DATA << 0); + delay1 = delay1 | (DELAY_RIO_DATA_DRD << 8); + delay1 = delay1 | (DELAY_RIO_DRD_DCE << 16); + delay1 = delay1 | (DELAY_RIO_DCE_INIT << 24); + } else if (mode == AM_CI_CMD_IOW) { + //delay0 + delay0 = delay0 | (DELAY_WIO_INIT_ADDR << 0); + delay0 = delay0 | (DELAY_WIO_ADDR_CE << 8); + delay0 = delay0 | (DELAY_WIO_CE_WR << 16); + delay0 = delay0 | (DELAY_WIO_WR_RWAIT << 24); + //delay1 + delay1 = delay1 | (DELAY_WIO_RWAIT_DATA << 0); + delay1 = delay1 | (DELAY_WIO_DATA_DWR << 8); + delay1 = delay1 | (DELAY_WIO_DWR_DCE << 16); + delay1 = delay1 | (DELAY_WIO_DCE_INIT << 24); + } else if (mode == AM_CI_CMD_MEMR) { + //delay0 + delay0 = delay0 | (DELAY_RMEM_INIT_ADDR << 0); + delay0 = delay0 | (DELAY_RMEM_ADDR_CE << 8); + delay0 = delay0 | (DELAY_RMEM_CE_RD << 16); + delay0 = delay0 | (DELAY_RMEM_RD_RWAIT << 24); + //delay1 + delay1 = delay1 | (DELAY_RMEM_RWAIT_DATA << 0); + delay1 = delay1 | (DELAY_RMEM_DATA_DRD << 8); + delay1 = delay1 | (DELAY_RMEM_DRD_DCE << 16); + delay1 = delay1 | (DELAY_RMEM_DCE_INIT << 24); + } else if (mode == AM_CI_CMD_MEMW) { + //delay0 + delay0 = delay0 | (DELAY_WMEM_INIT_ADDR << 0); + delay0 = delay0 | (DELAY_WMEM_ADDR_CE << 8); + delay0 = delay0 | (DELAY_WMEM_CE_WR << 16); + delay0 = delay0 | (DELAY_WMEM_WR_RWAIT << 24); + //delay1 + delay1 = delay1 | (DELAY_WMEM_RWAIT_DATA << 0); + delay1 = delay1 | (DELAY_WMEM_DATA_DWR << 8); + delay1 = delay1 | (DELAY_WMEM_DWR_DCE << 16); + delay1 = delay1 | (DELAY_WMEM_DCE_INIT << 24); + } + //Wwrite cmd + WRITE_CIBUS_REG(CIPLUS_DELAY_CTRL0, delay0); + WRITE_CIBUS_REG(CIPLUS_DELAY_CTRL1, delay1); + return 0; +} + + +/**\brief aml_ci_bus_io:ci bus read or write api with bus +* \param ci_bus_dev: ci_bus_dev obj,used this data to ctl +* \param val: read or write value +* \param addr: rw addr +* \param mode: cmd +* \return +* - read value:ok +* - -EINVAL : error +*/ +static int aml_ci_bus_io(struct aml_ci_bus *ci_bus_dev, + u8 val, u16 addr, int mode) +{ + int rd; + int ret = -1; + u32 address = addr; + u32 data = val; + u32 reg = 0; + u32 ctrl = 0; + int enable = 0; + int count = 0; + u32 int_status; + //only used hi addr. we to change tsout to addr + if (addr >= 4) { + enable = 1; + } + //clear irq + ctrl = READ_CIBUS_REG(CIPLUS_CTRL_REG); + ctrl = ctrl | (1 << CLEAR_CMP_IRQ); + ctrl = ctrl | (1 << CLEAR_TIMEOUT_IRQ); + //Wwrite cmd crtl + WRITE_CIBUS_REG(CIPLUS_CTRL_REG, ctrl); + fetch_done = 0; + //gpio select gpio func + //aml_ci_bus_select_gpio(ci_bus_dev, enable ? AML_GPIO_ADDR : AML_GPIO_TS); + while (1) { + count++; + if (count < aml_ci_bus_time) + break; + } + //enable delay reg,defalue is disable + if (aml_ci_bus_set_delay) + aml_ci_bus_set_delay_time(mode); + //cmd vilad + reg = reg | (1 << CI_CMD_VALID); + //set addr + reg = reg | ((address & 0xeFFF) << CI_CMD_ADDR); + //set cmd and write data + if (mode == AM_CI_CMD_IOR) { + reg = reg | (IORD << CI_CMD_TYPE); + } else if (mode == AM_CI_CMD_IOW) { + reg = reg | (data << CI_CMD_WDATA); + reg = reg | (IOWR << CI_CMD_TYPE); + } else if (mode == AM_CI_CMD_MEMR) { + reg = reg | (MEMRD << CI_CMD_TYPE); + } else if (mode == AM_CI_CMD_MEMW) { + reg = reg | (data << CI_CMD_WDATA); + reg = reg | (MEMWR << CI_CMD_TYPE); + } + //clear irq + ctrl = READ_CIBUS_REG(CIPLUS_CTRL_REG); + ctrl = ctrl | (1 << CLEAR_CMP_IRQ); + ctrl = ctrl | (1 << CLEAR_TIMEOUT_IRQ); + //Wwrite cmd crtl + WRITE_CIBUS_REG(CIPLUS_CTRL_REG, ctrl); + //Wwrite cmd reg + WRITE_CIBUS_REG(CIPLUS_CMD_REG, reg); + //wait cmp irq or timwout irq + if (USED_IRQ == 1) { + ret = + wait_event_interruptible_timeout(wq, fetch_done != 0, + HZ / 100);//10ms + } else { + count = 0; + while(1) { + count++; + int_status = READ_CIBUS_REG(CIPLUS_STATUS_REG); + if ((int_status&(1 << COMPLETE_IRQ_STATE)) == (1 << COMPLETE_IRQ_STATE)) { + break; + } + if (count > 50) { + printk("count timeout:%d\r\n", count); + break; + } + } + } + rd = READ_CIBUS_REG(CIPLUS_RDATA_REG); + //gpio select tsout func + //aml_ci_bus_select_gpio(ci_bus_dev, AML_GPIO_TS, enable); + return rd; +} +/**\brief aml_ci_bus_init_reg:ci bus init reg,enable ci bus +* \param ci_bus_dev: ci_bus_dev obj,used this data to ctl +* \return +* - 0:ok +*/ +static int aml_ci_bus_init_reg(struct aml_ci_bus *ci_bus_dev) +{ + u32 ctrl = 0; + + if (ci_bus_dev->addr_ts_mode_multiplex == 0) { + aml_ci_bus_select_gpio(ci_bus_dev,AML_GPIO_TS); + } else { + aml_ci_bus_select_gpio(ci_bus_dev,AML_GPIO_ADDR); + } + + //init ci bus reg + pr_dbg("aml_ci_bus_init_reg---\r\n"); + ctrl = READ_CIBUS_REG(CIPLUS_CTRL_REG); + ctrl = ctrl | (1 << CI_ENABLE); + ctrl = ctrl | (1 << ENABLE_CMP_IRQ); + WRITE_CIBUS_REG(CIPLUS_CTRL_REG, ctrl); + + ctrl = 0; + ctrl = ctrl | (1 << ENABEL_TIMEOUT_IRQ); + ctrl = ctrl | (TIMEOUT_IRQ_HOLD_TIME << WATT_TIMEOUT_TIME); + //timeout hold time + //WRITE_CIBUS_REG(CIPLUS_WAIT_TIMEOUT, ctrl); + //aml_ci_bus_select_gpio(ci_bus_dev,AML_GPIO_TS); + return 0; +} +/********************************************************/ +/********************************************************/ +/******* gpio api *************/ +/********************************************************/ +/********************************************************/ +/**\brief aml_set_gpio_out:set gio out and set val value +* \param gpio: gpio_desc obj, +* \param val: set val +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_set_gpio_out(struct gpio_desc *gpio, int val) +{ + int ret = 0; + if (val < 0) { + pr_dbg("gpio out val = -1.\n"); + return -1; + } + if (val != 0) + val = 1; + ret = gpiod_direction_output(gpio, val); + pr_dbg("dvb ci gpio out ret %d set val:%d\n", ret, val); + return ret; +} + +/**\brief aml_set_gpio_in:set gio in +* \param gpio: gpio_desc obj, +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_set_gpio_in(struct gpio_desc *gpio) +{ + gpiod_direction_input(gpio); + return 0; +} + +/**\brief aml_get_gpio_value:get gio value +* \param gpio: gpio_desc obj, +* \return +* - gpio value:ok +* - -EINVAL : error +*/ +static int aml_get_gpio_value(struct gpio_desc *gpio) +{ + int ret = 0; + ret = gpiod_get_value(gpio); + return ret; +} +/**\brief aml_gpio_free:free gio +* \param gpio: gpio_desc obj, +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_gpio_free(struct gpio_desc *gpio) +{ + gpiod_put(gpio); + return 0; +} +/**\brief ci_bus_get_gpio_by_name:get gpio desc from dts file +* \param ci_bus_dev: aml_ci_bus obj +* \param gpiod: gpio_desc * obj +* \param str: gpio name at dts file +* \param input_output: gpio input or output type +* \param output_value: gpio out put value +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int ci_bus_get_gpio_by_name(struct aml_ci_bus *ci_bus_dev, +struct gpio_desc **gpiod, int *pin_value, +char *str, int input_output, int output_level) +{ + int ret = 0; + struct device_node *child = NULL; + struct platform_device *pdev = ci_bus_dev->pdev; + struct device_node *np = pdev->dev.of_node; + + /*get gpio config from dts*/ + /* get device config for dvbci_io*/ + child = of_get_child_by_name(np, "dvbci_io"); + if (IS_ERR(*gpiod)) { + pr_dbg("dvb ci bus %s request failed\n", str); + return -1; + } + + *pin_value = of_get_named_gpio_flags(child, str, 0, NULL); + *gpiod = gpio_to_desc(*pin_value); + if (IS_ERR(*gpiod)) { + pr_dbg("ci bus %s request failed\n", str); + return -1; + } + pr_dbg("ci bus get_gpio %s %p %d\n", str, *gpiod, *pin_value); + gpio_request(*pin_value, AML_MODE_NAME); + + if (input_output == OUTPUT) { + ret = gpiod_direction_output(*gpiod, output_level); + } else if (input_output == INPUT) { + ret = gpiod_direction_input(*gpiod); + /*ret |= gpiod_set_pullup(*gpiod, 1);*/ + } else { + pr_error("ci bus Request gpio direction invalid\n"); + } + return ret; +} +/********************************************************/ +/********************************************************/ +/******* gpio ci bus api end *************/ +/********************************************************/ +/********************************************************/ +/**\brief aml_ci_bus_mem_read:io read from cam +* \param ci_dev: aml_ci obj,used this data to get ci_bus_dev obj +* \param slot: slot index +* \param addr: read addr +* \return +* - read value:ok +* - -EINVAL : error +*/ +static int aml_ci_bus_mem_read( + struct aml_ci *ci_dev, int slot, int addr) +{ + u8 data = 0; + u16 addres = addr; + int value = 0; + struct aml_ci_bus *ci_bus_dev = ci_dev->data; + mutex_lock(&(ci_bus_dev->mutex)); + aml_ci_bus_select_gpio(ci_bus_dev, AML_GPIO_ADDR); + value = aml_ci_bus_io(ci_bus_dev, data, addres, AM_CI_CMD_MEMR); + mutex_unlock(&(ci_bus_dev->mutex)); + return value; +} +/**\brief aml_ci_bus_mem_write:io write to cam by bus api +* \param ci_dev: aml_ci obj,used this data to get ci_bus_dev obj +* \param slot: slot index +* \param addr: write addr +* \param addr: write value +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_ci_bus_mem_write( + struct aml_ci *ci_dev, int slot, int addr, u8 val) +{ + u8 data = val; + u16 addres = addr; + int value = 0; + struct aml_ci_bus *ci_bus_dev = ci_dev->data; + mutex_lock(&(ci_bus_dev->mutex)); + aml_ci_bus_select_gpio(ci_bus_dev, AML_GPIO_ADDR); + value = aml_ci_bus_io(ci_bus_dev, data, addres, AM_CI_CMD_MEMW); + mutex_unlock(&(ci_bus_dev->mutex)); + return value; +} +/**\brief aml_ci_bus_io_read:io read from cam by bus api +* \param ci_dev: aml_ci obj,used this data to get ci_bus_dev obj +* \param slot: slot index +* \param addr: read addr +* \return +* - read value:ok +* - -EINVAL : error +*/ +static int aml_ci_bus_io_read( + struct aml_ci *ci_dev, int slot, int addr) +{ + u8 data = 0; + u16 addres = addr; + int value = 0; + struct aml_ci_bus *ci_bus_dev = ci_dev->data; + mutex_lock(&(ci_bus_dev->mutex)); + aml_ci_bus_select_gpio(ci_bus_dev, AML_GPIO_TS); + value = aml_ci_bus_io(ci_bus_dev, data, addres, AM_CI_CMD_IOR); + mutex_unlock(&(ci_bus_dev->mutex)); + return value; +} +/**\brief aml_ci_bus_io_write:io write to cam +* \param ci_dev: aml_ci obj,used this data to get ci_bus_dev obj +* \param slot: slot index +* \param addr: write addr +* \param addr: write value +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_ci_bus_io_write( + struct aml_ci *ci_dev, int slot, int addr, u8 val) +{ + u8 data = val; + u16 addres = addr; + int value = 0; + struct aml_ci_bus *ci_bus_dev = ci_dev->data; + mutex_lock(&(ci_bus_dev->mutex)); + aml_ci_bus_select_gpio(ci_bus_dev, AML_GPIO_TS); + value = aml_ci_bus_io(ci_bus_dev, data, addres, AM_CI_CMD_IOW); + mutex_unlock(&(ci_bus_dev->mutex)); + return value; +} + + +/**\brief aml_ci_bus_rst:reset cam by ci bus +* \param ci_dev: aml_ci obj,used this data to get ci_bus_dev obj +* \param slot: slot index +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_ci_bus_rst( + struct aml_ci *ci_dev, int slot, int level) +{ + int value = 0; + u32 ctrl = 0; + struct aml_ci_bus *ci_bus_dev = ci_dev->data; + mutex_lock(&(ci_bus_dev->mutex)); + ctrl = READ_CIBUS_REG(CIPLUS_CTRL_REG); + if (level == AML_H) + ctrl = ctrl | (1 << CAM_RESET); + else + ctrl = ctrl & (~(1 << CAM_RESET)); + //Wwrite cmd crtl + WRITE_CIBUS_REG(CIPLUS_CTRL_REG, ctrl); + mutex_unlock(&(ci_bus_dev->mutex)); + return value; +} + +/**\brief aml_ci_slot_reset:reset slot +* \param ci_dev: aml_ci obj,used this data to get ci_bus_dev obj +* \param slot: slot index +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_ci_slot_reset(struct aml_ci *ci_dev, int slot) +{ + struct aml_ci_bus *ci_bus_dev = ci_dev->data; + pr_dbg("Slot(%d): Slot RESET CAM\n", slot); + aml_pcmcia_reset(&ci_bus_dev->pc); + dvb_ca_en50221_cimcu_camready_irq(&ci_dev->en50221_cimcu, 0); + return 0; +} +/**\brief aml_ci_slot_shutdown:show slot +* \param ci_dev: aml_ci obj,used this data to get ci_bus_dev obj +* \param slot: slot index +* \return +* - 0:ok +* - -EINVAL : error +* readme:no use this api +*/ +static int aml_ci_slot_shutdown(struct aml_ci *ci_dev, int slot) +{ + pr_dbg("Slot(%d): Slot shutdown\n", slot); + return 0; +} +/**\brief aml_ci_ts_control:control slot ts +* \param ci_dev: aml_ci obj,used this data to get ci_bus_dev obj +* \param slot: slot index +* \return +* - 0:ok +* - -EINVAL : error +* readme:no use this api +*/ +static int aml_ci_ts_control(struct aml_ci *ci_dev, int slot) +{ + pr_dbg("Slot(%d): TS control\n", slot); + return 0; +} +/**\brief aml_ci_slot_status:get slot status +* \param ci_dev: aml_ci obj,used this data to get ci_bus_dev obj +* \param slot: slot index +* \param open: no used +* \return +* - cam status +* - -EINVAL : error +*/ +static int aml_ci_slot_status(struct aml_ci *ci_dev, int slot, int open) +{ + struct aml_ci_bus *ci_bus_dev = ci_dev->data; + int state = 0; + if (ci_bus_dev->pc.start_work == 0) { + return 0; + } + if (ci_bus_dev->pc.slot_state == MODULE_INSERTED) { + state = DVB_CA_EN50221_POLL_CAM_PRESENT | + DVB_CA_EN50221_POLL_CAM_READY; + } + if (ci_bus_dev->slot_state != ci_bus_dev->pc.slot_state) + { + printk("cam crad change-----\r\n"); + ci_bus_dev->slot_state = ci_bus_dev->pc.slot_state; + state = state | DVB_CA_EN50221_POLL_CAM_CHANGED; + } + return state; +} +/**\brief aml_ci_slot_wakeup:get slot wake up thread flag +* \param ci_dev: aml_ci obj,used this data to get ci_bus_dev obj +* \param slot: slot index +* \return +* - cam wake up flag +* - -EINVAL : error +*/ +static int aml_ci_slot_wakeup(struct aml_ci *ci_dev, int slot) +{ + struct aml_ci_bus *ci_bus_dev = ci_dev->data; + if (ci_bus_dev) { + return ci_bus_dev->wakeup_thread; + } + return 1; +} + +/**\brief aml_ci_gio_get_irq:get gpio cd1 irq pin value +* \return +* - irq pin value +* - -EINVAL : error +*/ +static int aml_ci_gio_get_irq(void) +{ + int ret = 0; + ret = aml_get_gpio_value(ci_bus.cd_pin1); + return ret; +} + +/********************************************************/ +/********************************************************/ +/******* for pcmcid api *************/ +/********************************************************/ +/********************************************************/ +/**\brief aml_gio_power:set power gpio hi or low +* \param pc: aml_pcmcia obj,used this priv to get ci_bus_dev obj +* \param enable: power pin hi or low +* \return +* - 0 +* - -EINVAL : error +*/ +static int aml_gio_power(struct aml_pcmcia *pc, int enable) +{ + int ret = 0; + struct aml_ci_bus *ci_bus_dev = pc->priv; + if (ci_bus_dev == NULL) { + pr_dbg("ci bus dev is null %s : %d\r\n", __func__, enable); + return -1; + } + pr_dbg("%s : %d\r\n", __func__, enable); + + if (enable == AML_PWR_OPEN) { + /*hi level ,open power*/ + ret = aml_set_gpio_out(ci_bus_dev->pwr_pin, AML_GPIO_LOW); + } else { + /*low level ,close power*/ + ret = aml_set_gpio_in(ci_bus_dev->pwr_pin); + } + + return ret; +} +/**\brief aml_gio_reset:set reset gpio hi or low +* \param pc: aml_pcmcia obj,used this priv to get ci_bus_dev obj +* \param enable: reset pin hi or low +* \return +* - 0 +* - -EINVAL : error +*/ +static int aml_gio_reset(struct aml_pcmcia *pc, int enable) +{ + /*need set hi and sleep set low*/ + int ret = 0; + struct aml_ci_bus *ci_bus_dev = pc->priv; + + if (ci_bus_dev == NULL) { + pr_dbg("ci bus dev is null %s : %d\r\n", __func__, enable); + return -1; + } + + pr_dbg("%s : %d type: %d\r\n", __func__, enable, ci_bus_dev->io_device_type); + if (ci_bus_dev == NULL || ci_bus_dev->priv == NULL) { + pr_dbg("rst by ci bus- ci bus dev-null-\r\n"); + return -1; + } + /*if (enable == AML_H) {*/ + aml_ci_bus_select_gpio(ci_bus_dev, AML_GPIO_ADDR); + /*ci_bus_dev->select = AML_GPIO_TS; + }*/ + + aml_ci_bus_rst((struct aml_ci *)ci_bus_dev->priv, 0, enable); + pr_dbg("rst by ci bus- ci bus [%d]-\r\n", ci_bus_dev->select); + /*if (enable == AML_L) + aml_ci_bus_select_gpio(ci_bus_dev, AML_GPIO_TS);*/ + return ret; +} + +/**\brief aml_gio_init_irq:set gpio irq +* \param pc: aml_pcmcia obj,used this priv to get ci_bus_dev obj +* \param flag: rising or falling or hi or low +* \return +* - 0 +* - -EINVAL : error +*/ +/*need change*/ +static int aml_gio_init_irq(struct aml_pcmcia *pc, int flag) +{ + struct aml_ci_bus *ci_bus_dev = (struct aml_ci_bus *)pc->priv; + gpiod_to_irq(ci_bus_dev->cd_pin1); + return 0; +} + +/**\brief aml_gio_get_cd1:get gpio cd1 pin value +* \param pc: aml_pcmcia obj,used this priv to get ci_bus_dev obj +* \return +* - cd1 pin value +* - -EINVAL : error +*/ +static int aml_gio_get_cd1(struct aml_pcmcia *pc) +{ + int ret = 1; + struct aml_ci_bus *ci_bus_dev = pc->priv; + ret = aml_get_gpio_value(ci_bus_dev->cd_pin1); + pr_dbg("%s :cd: %d\r\n", __func__, ret); + return ret; +} +/**\brief aml_gio_get_cd2:get gpio cd2 pin value +* \param pc: aml_pcmcia obj,used this priv to get ci_bus_dev obj +* \return +* - cd2 pin value +* - -EINVAL : error +*/ +static int aml_gio_get_cd2(struct aml_pcmcia *pc) +{ + int ret = 0; + struct aml_ci_bus *ci_bus_dev = pc->priv; + ret = aml_get_gpio_value(ci_bus_dev->cd_pin1); + pr_dbg("%s :cd: %d\r\n", __func__, ret); + return ret; +} +/**\brief aml_cam_plugin:notify en50221 cam card in or out +* \param pc: aml_pcmcia obj,used this priv to get ci_bus_dev obj +* \plugin: 0:remove;1:in +* \return +* - 0 +* - -EINVAL : error +*/ +static int aml_cam_plugin(struct aml_pcmcia *pc, int plugin) +{ + struct aml_ci *ci = (struct aml_ci *) + ((struct aml_ci_bus *)(pc->priv))->priv; + pr_dbg("%s : %d\r\n", __func__, plugin); + if (plugin) { + aml_ci_bus_select_gpio((struct aml_ci_bus *)(pc->priv), AML_GPIO_TS); + dvb_ca_en50221_cimcu_camchange_irq(&ci->en50221_cimcu, + 0, DVB_CA_EN50221_CAMCHANGE_INSERTED); + } else { + aml_ci_bus_select_gpio((struct aml_ci_bus *)(pc->priv), AML_GPIO_ADDR); + dvb_ca_en50221_cimcu_camchange_irq(&ci->en50221_cimcu, + 0, DVB_CA_EN50221_CAMCHANGE_REMOVED); + } + return 0; +} +/**\brief aml_pcmcia_alloc:alloc nad init pcmcia obj +* \param ci_bus_dev: aml_ci_bus obj, +* \param pcmcia: aml_pcmcia * obj, +* \return +* - 0 +* - -EINVAL : error +*/ +static void aml_pcmcia_alloc(struct aml_ci_bus *ci_bus_dev, + struct aml_pcmcia **pcmcia) +{ + pr_dbg("aml_pcmcia_alloc----\n"); + *pcmcia = &ci_bus_dev->pc; + (*pcmcia)->irq = ci_bus_dev->irq; + (*pcmcia)->init_irq = aml_gio_init_irq; + (*pcmcia)->get_cd1 = aml_gio_get_cd1; + (*pcmcia)->get_cd2 = aml_gio_get_cd2; + (*pcmcia)->pwr = aml_gio_power; + (*pcmcia)->rst = aml_gio_reset; + (*pcmcia)->pcmcia_plugin = aml_cam_plugin; + (*pcmcia)->slot_state = MODULE_XTRACTED; + (*pcmcia)->priv = ci_bus_dev; + (*pcmcia)->run_type = 0;/*0:irq;1:poll*/ + (*pcmcia)->io_device_type = AML_DVB_IO_TYPE_CIMAX; + (*pcmcia)->start_work = 0; +} + +/**\brief aml_ci_bus_get_config_from_dts: + * get gpio config from dts +* \param ci_bus_dev: aml_ci_bus obj, +* \return +* - 0 +* - -EINVAL : error +*/ +static int aml_ci_bus_get_config_from_dts(struct aml_ci_bus *ci_bus_dev) +{ + struct device_node *child = NULL; + struct platform_device *pdev = ci_bus_dev->pdev; + struct device_node *np = pdev->dev.of_node; + int ret = 0; + pr_dbg("into get ci bus dts -----\r\n"); + /*get gpio config from dts*/ + /* get device config for dvbci_io*/ + child = of_get_child_by_name(np, "dvbci_io"); + if (child == NULL) { + pr_error("failed to get dvbci_io\n"); + return -1; + } + //below is get cd1 cd2 pwr irq reset gpio info + if (ci_bus_dev->io_device_type == AML_DVB_IO_TYPE_CIBUS) { + struct resource *res; + char buf[32]; + int ival; + + /*get irq value*/ + ci_bus_dev->irq_cmp = 186; + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "%s", "irq_cmp"); + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, buf); + if (res) + ci_bus_dev->irq_cmp = res->start; + else + pr_err("get irq cmp error\r\n"); + + ci_bus_dev->irq_timeout = 187; + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "%s", "irq_timeout"); + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, buf); + if (res) + ci_bus_dev->irq_timeout = res->start; + else + pr_err("get irq irq_timeout error\r\n"); + //pin config + pr_dbg("ci bus irq[%d]cmp[%d] \r\n",ci_bus_dev->irq_cmp, ci_bus_dev->irq_timeout); + if (!ci_bus_dev->pinctrl) { + ci_bus_dev->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR_OR_NULL(ci_bus_dev->pinctrl)) { + pr_error("get pinctl could not get pinctrl handle\n"); + return -EINVAL; + } + } + + if (ci_bus_dev->pinctrl) { + struct pinctrl_state *s; + pr_dbg("ci bus get ts pin\r\n"); + s = pinctrl_lookup_state(ci_bus_dev->pinctrl, "ci_ts_pins"); + if (IS_ERR_OR_NULL(s)) { + pr_error("could not get jtag_apee_pins state\n"); + return -1; + } + pr_dbg("ci bus select ts pin\r\n"); + ret = pinctrl_select_state(ci_bus_dev->pinctrl, s); + if (ret) { + pr_error("failed to set pinctrl\n"); + return -1; + } + ci_bus_dev->select = AML_GPIO_TS; + } else if (IS_ERR_OR_NULL(ci_bus_dev->pinctrl)) { + pr_error("could not get pinctrl handle\n"); + return -EINVAL; + } + /*get reset pwd cd1 cd2 gpio pin*/ + ci_bus_dev->cd_pin1 = NULL; + pr_dbg("ci bus get cd1\r\n"); + ret = ci_bus_get_gpio_by_name(ci_bus_dev, + &ci_bus_dev->cd_pin1, + &ci_bus_dev->cd_pin1_value, "cd_pin1", + INPUT, OUTLEVEL_HIGH); + if (ret) { + pr_error("dvb ci cd_pin1 pin request failed\n"); + return -1; + } + ci_bus_dev->cd_pin2 = ci_bus_dev->cd_pin1; + ci_bus_dev->cd_pin2_value = ci_bus_dev->cd_pin1_value; + ci_bus_dev->pwr_pin = NULL; + pr_dbg("ci_bus_dev->cd_pin1_value==%d\r\n", ci_bus_dev->cd_pin1_value); + ci_bus_dev->irq = gpiod_to_irq(ci_bus_dev->cd_pin1) ; + pr_dbg("ci_bus_dev->irq==%d get from gpio cd1\r\n", ci_bus_dev->irq); + + ret = ci_bus_get_gpio_by_name(ci_bus_dev, + &ci_bus_dev->pwr_pin, &ci_bus_dev->pwr_pin_value, + "pwr_pin", OUTPUT, OUTLEVEL_HIGH); + if (ret) { + pr_error("dvb ci pwr_pin pin request failed\n"); + return -1; + } + aml_set_gpio_in(ci_bus_dev->pwr_pin); + + /*get le pin*/ + ci_bus_dev->le_pin = NULL; + ci_bus_dev->le_enable_level = 1; + ret = ci_bus_get_gpio_by_name(ci_bus_dev, + &ci_bus_dev->le_pin, &ci_bus_dev->le_pin_value, + "le_pin", OUTPUT, OUTLEVEL_HIGH); + if (ret) { + pr_error("dvb ci le_pin pin request failed\n"); + } else { + pr_dbg("ci_bus_dev->le_value %d\n", ci_bus_dev->le_pin_value); + } + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "%s", "le_enable_level"); + ret = of_property_read_u32(pdev->dev.of_node, buf, &ival); + if (ret) { + pr_error("dvb ci le_enable_level request failed\n"); + } else { + ci_bus_dev->le_enable_level = ival; + pr_dbg("ci_bus_dev->le_enable_level-- %d\n", ci_bus_dev->le_enable_level); + } + /*get addr_ts_mode_multiplex mode*/ + ci_bus_dev->addr_ts_mode_multiplex = 1; + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "%s", "addr_ts_mode_multiplex"); + ret = of_property_read_u32(pdev->dev.of_node, buf, &ival); + if (ret) { + pr_error("dvb ci addr_ts_mode_multiplex request failed\n"); + } else { + ci_bus_dev->addr_ts_mode_multiplex = ival; + pr_dbg("ci_bus_dev->addr_ts_mode_multiplex %d ******\n", ci_bus_dev->addr_ts_mode_multiplex); + } + + + } + return 0; +} +/**\brief aml_ci_free_gpio:free ci gpio +* \param ci_bus_dev: aml_ci_bus obj, +* \return +* - 0 +* - -EINVAL : error +*/ +static void aml_ci_free_gpio(struct aml_ci_bus *ci_bus_dev) +{ + if (ci_bus_dev == NULL) { + pr_error("ci_bus_dev is NULL,no need free gpio res\r\n"); + return; + } + + if (ci_bus_dev->pwr_pin) { + aml_gpio_free(ci_bus_dev->pwr_pin); + ci_bus_dev->pwr_pin = NULL; + } + if (ci_bus_dev->cd_pin1) { + aml_gpio_free(ci_bus_dev->cd_pin1); + ci_bus_dev->cd_pin1 = NULL; + ci_bus_dev->cd_pin2 = NULL; + } + if (ci_bus_dev->le_pin) { + aml_gpio_free(ci_bus_dev->le_pin); + ci_bus_dev->le_pin = NULL; + } + return; +} + +static irqreturn_t timeout_isr(int irq, void *dev_id) +{ + u32 int_status = READ_CIBUS_REG(CIPLUS_STATUS_REG); + + if ((int_status & (1 << TIMEOUT_IRQ_STATE)) == (1 << TIMEOUT_IRQ_STATE)) { + fetch_done = 1; + wake_up_interruptible(&wq); + } + return IRQ_HANDLED; +} + +static irqreturn_t cmp_isr(int irq, void *dev_id) +{ + u32 int_status = READ_CIBUS_REG(CIPLUS_STATUS_REG); + + if ((int_status&(1 << COMPLETE_IRQ_STATE)) == (1 << COMPLETE_IRQ_STATE)) { + fetch_done = 1; + wake_up_interruptible(&wq); + } + return IRQ_HANDLED; +} + +/**\brief aml_ci_bus_init:ci_bus_dev init +* \param ci_dev: aml_ci obj, +* \param pdev: platform_device obj,used to get dts info +* \return +* - 0 +* - -EINVAL : error +*/ +int aml_ci_bus_init(struct platform_device *pdev, struct aml_ci *ci_dev) +{ + struct aml_ci_bus *ci_bus_dev = NULL; + struct aml_pcmcia *pc; + int result,irq; + + ci_bus_dev = &ci_bus; + ci_bus_dev->pdev = pdev; + ci_bus_dev->priv = ci_dev; + ci_bus_dev->bus_pinctrl = NULL; + ci_bus_dev->pinctrl = NULL; + /*default mode is wake up,when trans a lot data,used sleep mode*/ + ci_bus_dev->wakeup_thread = 1; + mutex_init(&(ci_bus_dev->mutex)); + /*init io device type*/ + ci_bus_dev->io_device_type = ci_dev->io_type; + pr_dbg("*********ci bus Dev type [%d]\n", ci_dev->io_type); + /*get config from dts*/ + aml_ci_bus_get_config_from_dts(ci_bus_dev); + //iomap ci reg + init_ci_addr(pdev); + /*Register irq handlers */ + if (ci_bus_dev->irq_cmp != -1) { + if (USED_IRQ) { + irq = request_irq(ci_bus_dev->irq_cmp, + cmp_isr, + IRQF_SHARED|IRQF_TRIGGER_RISING, + "ciplus cmp irq", ci_bus_dev); + if (irq == 0) + pr_dbg("request cmp irq sucess\r\n"); + else if (irq == -EBUSY) + pr_err("request cmp irq busy\r\n"); + else + pr_err("request cmp irq error [%d]\r\n", irq); + } else { + disable_irq(ci_bus_dev->irq_cmp); + } + } + /*Register irq handlers */ + if (ci_bus_dev->irq_timeout != -1) { + if (USED_IRQ) { + pr_dbg("request timeout irq\n"); + irq = request_irq(ci_bus_dev->irq_timeout, + timeout_isr, + IRQF_SHARED|IRQF_TRIGGER_RISING, + "ciplus timeout irq", ci_bus_dev); + if (irq == 0) + pr_err("request timeout irq sucess\r\n"); + else if (irq == -EBUSY) + pr_err("request timeout irq busy\r\n"); + else + pr_err("request timeout irq error [%d]\r\n", irq); + } else { + disable_irq(ci_bus_dev->irq_timeout); + } + } + pr_dbg("*********ci bus init bus reg\n"); + aml_ci_bus_init_reg(ci_bus_dev); + /*init ci_dev used api.*/ + ci_dev->ci_mem_read = aml_ci_bus_mem_read; + ci_dev->ci_mem_write = aml_ci_bus_mem_write; + ci_dev->ci_io_read = aml_ci_bus_io_read; + ci_dev->ci_io_write = aml_ci_bus_io_write; + ci_dev->ci_slot_reset = aml_ci_slot_reset; + ci_dev->ci_slot_shutdown = aml_ci_slot_shutdown; + ci_dev->ci_slot_ts_enable = aml_ci_ts_control; + ci_dev->ci_poll_slot_status = aml_ci_slot_status; + ci_dev->ci_get_slot_wakeup = aml_ci_slot_wakeup; + ci_dev->data = ci_bus_dev; + + aml_pcmcia_alloc(ci_bus_dev, &pc); + pc->io_device_type = ci_bus_dev->io_device_type; + pr_dbg("*********ci bus aml_pcmcia_init start_work:%d\n", pc->start_work); + result = aml_pcmcia_init(pc); + if (result < 0) { + pr_error("aml_pcmcia_init failed\n"); + goto fail1; + } + pr_dbg("*********ci bus aml_ci_bus_mod_init---\n"); + aml_ci_bus_mod_init(); + return 0; +fail1: + kfree(ci_bus_dev); + ci_bus_dev = NULL; + return 0; +} +EXPORT_SYMBOL(aml_ci_bus_init); +/**\brief aml_ci_bus_exit:ci_bus exit +* \return +* - 0 +* - -EINVAL : error +*/ +int aml_ci_bus_exit(struct aml_ci *ci) +{ + aml_ci_bus_mod_exit(); + /*exit pc card*/ + aml_pcmcia_exit(&ci_bus.pc); + /*free gpio*/ + aml_ci_free_gpio(&ci_bus); + + return 0; +} +EXPORT_SYMBOL(aml_ci_bus_exit); + +#if 1 +/********************************************************/ +/********************************************************/ +/******* for ci bus test api *************/ +/********************************************************/ +/********************************************************/ + +/*cam difines*/ +#define DA 0x80 +#define FR 0x40 +#define WE 0x02 +#define RE 0x01 + +#define RS 0x08 +#define SR 0x04 +#define SW 0x02 +#define HC 0x01 +#define DATA_REG 0 +#define COM_STA_REG 1 +#define SIZE_REG_L 2 +#define SIZE_REG_M 3 + +static void aml_ci_bus_full_test(struct aml_ci *ci_dev) +{ + unsigned int BUF_SIZE = 0; + unsigned int i = 0; + unsigned char cc = 0; + unsigned char reg; + unsigned int bsize = 0; + int cnt = 0; + unsigned char buf[10]; + int count = 1000; + mdelay(1000); + pr_dbg("READ CIS START\r\n"); + for (i = 0; i < 200; i++) { + mdelay(100); + cc = aml_ci_bus_mem_read(ci_dev, 0, i); + pr_dbg("0x%x ", cc); + if ((i + 1) % 16 == 0) + pr_dbg(" \r\n"); + } + pr_dbg("READ CIS OVER\r\n"); + mdelay(1000); + pr_dbg("SW rst CAM...\r\n"); + aml_ci_bus_io_write(ci_dev, 0, COM_STA_REG, RS); + pr_dbg("SW rst over.\r\n"); + pr_dbg("-----------------------------------\r\n"); + pr_dbg("TO delay 2000ms\r\n"); + mdelay(2000); + pr_dbg("\r\n"); + pr_dbg("--------------clear rs--!!!-YOU MUST CLEAR RS BIT--no sleep--------\r\n"); + aml_ci_bus_io_write(ci_dev, 0, COM_STA_REG, 0); + pr_dbg("--------------sleep---------------------\r\n"); + mdelay(2000); + pr_dbg("TO check sw-rst is OK\r\n"); + pr_dbg("start read fr \r\n"); + if (1) { + unsigned char reg; + unsigned char reg1; + int count1 = 4000; + while (1) { + mdelay(20); + count1--; + reg1 = aml_ci_bus_io_read( + ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg1)) { + continue; + } else { + pr_dbg("CAM Reset Ok\r\n"); + break; + } + } + reg = aml_ci_bus_io_read(ci_dev, 0, COM_STA_REG); + pr_dbg("STA_REG = 0x%2.2x\r\n", reg); + if (FR & reg) { + pr_dbg("SW-RST is OK!\r\n"); + } else { + pr_dbg("SW-RST is ERR!\r\n"); + goto end; + } + } +end: + pr_dbg("TO check sw-rst over.\r\n"); + pr_dbg("\r\n"); + pr_dbg("-----------------------------------\r\n"); + pr_dbg("TO buffer size negotiation protocol...\r\n"); + pr_dbg("Get which buf size CAM can support\r\n"); + aml_ci_bus_io_write(ci_dev, 0, COM_STA_REG, SR); + mdelay(1000); + while (1) { + + reg = aml_ci_bus_io_read(ci_dev, 0, COM_STA_REG); + if ((reg & DA) == DA) { + pr_dbg("Buffer negotiate size date avalible.\r\n"); + break; + } else { + /*pr_dbg("Buffer negotiate + size date NOT avalible\r\n");*/ + continue; + } + mdelay(100); + } + cnt = (aml_ci_bus_io_read(ci_dev, 0, SIZE_REG_L)) + + ((aml_ci_bus_io_read(ci_dev, 0, SIZE_REG_M)) * 256); + pr_dbg("Moudle have <%d> Bytes send to host.\r\n", cnt); + if (cnt != 2) { + pr_dbg("The Bytes will be tx is ERR!\r\n"); + return; + } + for (i = 0; i < cnt; i++) + buf[i] = aml_ci_bus_io_read(ci_dev, 0, DATA_REG); + + reg = aml_ci_bus_io_read(ci_dev, 0, COM_STA_REG); + if (RE == (RE & reg)) { + pr_dbg("(1)Read CAM buf size ERR!\r\n"); + return; + } + aml_ci_bus_io_write(ci_dev, 0, (COM_STA_REG), 0); + + mdelay(1000); + + while (count--) { + reg = aml_ci_bus_io_read(ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg)) { + pr_dbg("CAM is busy 2, waiting...\r\n"); + continue; + } else { + pr_dbg("CAM is OK 2.\r\n"); + break; + } + } + reg = aml_ci_bus_io_read(ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg)) { + pr_dbg("(2)Read CAM buf size ERR!-\r\n"); + return; + } + bsize = (buf[0] * 256) + buf[1]; + + pr_dbg("CAM can support buf size is: <%d>B\r\n", bsize); + + pr_dbg("Tell CAM which size buf is be used\r\n"); + reg = aml_ci_bus_io_read(ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg)) + pr_dbg("CAM is busy, waiting free\r\n"); + while (1) { + reg = aml_ci_bus_io_read(ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg)) { + pr_dbg("CAM is busy 3, waiting\r\n"); + continue; + } else { + pr_dbg("CAM is OK 3\r\n"); + break; + } + } + + bsize = bsize - 0; + BUF_SIZE = bsize; + pr_dbg("We will use this buf size: <%d>B\r\n", bsize); + aml_ci_bus_io_write(ci_dev, 0, COM_STA_REG, SW); + reg = aml_ci_bus_io_read(ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg)) + pr_dbg("CAM is busy, waiting\r\n"); + + while (1) { + reg = aml_ci_bus_io_read(ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg)) { + pr_dbg("CAM is busy 4, waiting\r\n"); + continue; + } else { + pr_dbg("CAM is OK 4\r\n"); + break; + } + } + /*SHOULD CHECK DA!!!!!*/ + /*PLS ADD THIS CHECK CODE:*/ + pr_dbg("PRIOR to check CAM'S DA\r\n"); + reg = aml_ci_bus_io_read(ci_dev, 0, COM_STA_REG); + if ((reg & DA) == DA) { + pr_dbg("CAM have data send to HOST\r\n"); + return; + } + + + buf[0] = (unsigned char)((bsize >> 8) & 0xff); + buf[1] = (unsigned char)(bsize & 0xff); + + while (1) { + mdelay(10); + aml_ci_bus_io_write(ci_dev, + 0, COM_STA_REG, HC | SW); + mdelay(100); + reg = aml_ci_bus_io_read(ci_dev, + 0, COM_STA_REG); + if (FR != (FR & reg)) { + pr_dbg("CAM is busy 5, waiting\r\n"); + aml_ci_bus_io_write(ci_dev, + 0, COM_STA_REG, SW); + continue; + } else { + pr_dbg("CAM is OK 5\r\n"); + break; + } + } + pr_dbg("<2> Bytes send to CAM\r\n"); + aml_ci_bus_io_write(ci_dev, 0, SIZE_REG_M, 0); + aml_ci_bus_io_write(ci_dev, 0, SIZE_REG_L, 2); + for (i = 0; i < 2; i++) + aml_ci_bus_io_write(ci_dev, 0, DATA_REG, buf[i]); + + reg = aml_ci_bus_io_read(ci_dev, 0, COM_STA_REG); + if (WE == (WE & reg)) { + pr_dbg("Write CAM ERR!\r\n"); + return; + } else { + aml_ci_bus_io_write(ci_dev, 0, COM_STA_REG, SW); + mdelay(100); + aml_ci_bus_io_write(ci_dev, 0, COM_STA_REG, 0); + pr_dbg("Buffer size negotiation over!\r\n"); + pr_dbg("NOW, HOST can communicates with CAM\r\n"); + pr_dbg("NOW, TEST END\r\n"); + } +} + +/** +* Read a tuple from attribute memory. +* +* @param ca CA instance. +* @param slot Slot id. +* @param address Address to read from. Updated. +* @param tupleType Tuple id byte. Updated. +* @param tupleLength Tuple length. Updated. +* @param tuple Dest buffer for tuple (must be 256 bytes). Updated. +* +* @return 0 on success, nonzero on error. +*/ +static int dvb_ca_en50221_read_tuple( +int *address, int *tupleType, int *tupleLength, u8 *tuple) +{ + int i; + int _tupleType; + int _tupleLength; + int _address = *address; + + /* grab the next tuple length and type */ + _tupleType = aml_ci_bus_mem_read((struct aml_ci *) + ci_bus.priv, 0, _address); + if (_tupleType < 0) + return _tupleType; + if (_tupleType == 0xff) { + pr_dbg("END OF CHAIN TUPLE type:0x%x\n", _tupleType); + *address += 2; + *tupleType = _tupleType; + *tupleLength = 0; + return 0; + } + _tupleLength = aml_ci_bus_mem_read((struct aml_ci *) + ci_bus.priv, 0, _address + 2); + if (_tupleLength < 0) + return _tupleLength; + _address += 4; + + pr_dbg("TUPLE type:0x%x length:%i\n", _tupleType, _tupleLength); + + /* read in the whole tuple */ + for (i = 0; i < _tupleLength; i++) { + tuple[i] = aml_ci_bus_mem_read((struct aml_ci *) + ci_bus.priv, 0, _address + (i * 2)); + pr_dbg(" 0x%02x: 0x%02x %c\n", + i, tuple[i] & 0xff, + ((tuple[i] > 31) && (tuple[i] < 127)) ? tuple[i] : '.'); + } + _address += (_tupleLength * 2); + + /* success */ + *tupleType = _tupleType; + *tupleLength = _tupleLength; + *address = _address; + return 0; +} +static char *findstr(char *haystack, int hlen, char *needle, int nlen) +{ + int i; + + if (hlen < nlen) + return NULL; + + for (i = 0; i <= hlen - nlen; i++) { + if (!strncmp(haystack + i, needle, nlen)) + return haystack + i; + } + + return NULL; +} + +/** +* Parse attribute memory of a CAM module, extracting Config register, and checking +* it is a DVB CAM module. +* +* @param ca CA instance. +* @param slot Slot id. +* +* @return 0 on success, <0 on failure. +*/ +static int dvb_ca_en50221_parse_attributes(void) +{ + int address = 0; + int tupleLength; + int tupleType; + u8 tuple[257]; + char *dvb_str; + int rasz; + int status; + int got_cftableentry = 0; + int end_chain = 0; + int i; + u16 manfid = 0; + u16 devid = 0; + int config_base = 0; + int config_option; + + /* CISTPL_DEVICE_0A */ + status = dvb_ca_en50221_read_tuple(&address, + &tupleType, &tupleLength, tuple); + if (status < 0) { + pr_error("read status error\r\n"); + return status; + } + if (tupleType != 0x1D) { + pr_error("read tupleType error [0x%x]\r\n", tupleType); + return -EINVAL; + } + + /* CISTPL_DEVICE_0C */ + status = dvb_ca_en50221_read_tuple(&address, + &tupleType, &tupleLength, tuple); + if (status < 0) { + pr_error("read read cis error\r\n"); + return status; + } + if (tupleType != 0x1C) { + pr_error("read read cis type error\r\n"); + return -EINVAL; + } + + /* CISTPL_VERS_1 */ + status = dvb_ca_en50221_read_tuple(&address, + &tupleType, &tupleLength, tuple); + if (status < 0) { + pr_error("read read cis version error\r\n"); + return status; + } + if (tupleType != 0x15) { + pr_error("read read cis version type error\r\n"); + return -EINVAL; + } + + /* CISTPL_MANFID */ + status = dvb_ca_en50221_read_tuple(&address, &tupleType, + &tupleLength, tuple); + if (status < 0) { + pr_error("read read cis manfid error\r\n"); + return status; + } + if (tupleType != 0x20) { + pr_error("read read cis manfid type error\r\n"); + return -EINVAL; + } + if (tupleLength != 4) { + pr_error("read read cis manfid len error\r\n"); + return -EINVAL; + } + manfid = (tuple[1] << 8) | tuple[0]; + devid = (tuple[3] << 8) | tuple[2]; + + /* CISTPL_CONFIG */ + status = dvb_ca_en50221_read_tuple(&address, &tupleType, + &tupleLength, tuple); + if (status < 0) { + pr_error("read read cis config error\r\n"); + return status; + } + if (tupleType != 0x1A) { + pr_error("read read cis config type error\r\n"); + return -EINVAL; + } + if (tupleLength < 3) { + pr_error("read read cis config len error\r\n"); + return -EINVAL; + } + + /* extract the configbase */ + rasz = tuple[0] & 3; + if (tupleLength < (3 + rasz + 14)) { + pr_error("read extract the configbase error\r\n"); + return -EINVAL; + } + + for (i = 0; i < rasz + 1; i++) + config_base |= (tuple[2 + i] << (8 * i)); + + /* check it contains the correct DVB string */ + dvb_str = findstr((char *)tuple, tupleLength, "DVB_CI_V", 8); + if (dvb_str == NULL) { + pr_error("find dvb str DVB_CI_V error\r\n"); + return -EINVAL; + } + if (tupleLength < ((dvb_str - (char *) tuple) + 12)) { + pr_error("find dvb str DVB_CI_V len error\r\n"); + return -EINVAL; + } + + /* is it a version we support? */ + if (strncmp(dvb_str + 8, "1.00", 4)) { + pr_error(" Unsupported DVB CAM module version %c%c%c%c\n", + dvb_str[8], dvb_str[9], dvb_str[10], dvb_str[11]); + return -EINVAL; + } + + /* process the CFTABLE_ENTRY tuples, and any after those */ + while ((!end_chain) && (address < 0x1000)) { + status = dvb_ca_en50221_read_tuple(&address, &tupleType, + &tupleLength, tuple); + if (status < 0) { + pr_error("process the CFTABLE_ENTRY tuples error\r\n"); + return status; + } + + switch (tupleType) { + case 0x1B: /* CISTPL_CFTABLE_ENTRY */ + if (tupleLength < (2 + 11 + 17)) + break; + + /* if we've already parsed one, just use it */ + if (got_cftableentry) + break; + + /* get the config option */ + config_option = tuple[0] & 0x3f; + + /* OK, check it contains the correct strings */ + if ((findstr((char *)tuple, + tupleLength, "DVB_HOST", 8) == NULL) || + (findstr((char *)tuple, + tupleLength, "DVB_CI_MODULE", 13) == NULL)) + break; + + got_cftableentry = 1; + break; + + case 0x14: /* CISTPL_NO_LINK*/ + break; + + case 0xFF: /* CISTPL_END */ + end_chain = 1; + break; + + default: + /* Unknown tuple type - just skip + *this tuple and move to the next one + */ + pr_error("Skipping unknown tupletype:0x%x L:0x%x\n", + tupleType, tupleLength); + break; + } + } + + if ((address > 0x1000) || (!got_cftableentry)) { + pr_error("got_cftableentry :%d\r\n", got_cftableentry); + return -EINVAL; + } + + pr_error("----------ci cis ok------\r\n"); + return 0; +} + +static ssize_t reset_show(struct class *class, +struct class_attribute *attr, char *buf) +{ + int ret; + ret = sprintf(buf, "echo 1 > %s\n\t", attr->attr.name); + return ret; +} + +static ssize_t reset_store(struct class *class, +struct class_attribute *attr, const char *buf, size_t size) +{ + int ret; + struct aml_ci *ci = (struct aml_ci *)ci_bus.priv; + ret = aml_ci_slot_reset(ci, 0); + return size; +} +static CLASS_ATTR_RW(reset); +static ssize_t pwr_show(struct class *class, +struct class_attribute *attr, char *buf) +{ + int ret; + ret = sprintf(buf, "echo 1|0> %s\n\t", attr->attr.name); + return ret; +} + +static ssize_t pwr_store(struct class *class, +struct class_attribute *attr, const char *buf, size_t size) +{ + int ret = 0; + int enable = 0; + long value; + if (kstrtol(buf, 0, &value) == 0) + enable = (int)value; + ret = aml_gio_power(&ci_bus.pc, enable); + return size; +} + +static CLASS_ATTR_RW(pwr); + +static ssize_t start_show(struct class *class, +struct class_attribute *attr, char *buf) +{ + int ret; + ret = sprintf(buf, "start:%d\n", ci_bus.pc.start_work); + return ret; +} + +static ssize_t start_store(struct class *class, +struct class_attribute *attr, const char *buf, size_t size) +{ + int enable = 0; + long value; + if (kstrtol(buf, 0, &value) == 0) { + enable = (int)value; + ci_bus.pc.start_work = enable; + printk("start set start\n"); + aml_pcmcia_detect_cam(&ci_bus.pc); + } + return size; +} + +static CLASS_ATTR_RW(start); + +static ssize_t wakeup_show(struct class *class, +struct class_attribute *attr, char *buf) +{ + int ret; + ret = sprintf(buf, "wakeup:%d\n", ci_bus.wakeup_thread); + return ret; +} + +static ssize_t wakeup_store(struct class *class, +struct class_attribute *attr, const char *buf, size_t size) +{ + int enable = 0; + long value; + if (kstrtol(buf, 0, &value) == 0) { + enable = (int)value; + ci_bus.wakeup_thread = enable; + printk("wakeup is set\n"); + } + return size; +} + +static CLASS_ATTR_RW(wakeup); + +static ssize_t status_show(struct class *class, +struct class_attribute *attr, char *buf) +{ + int ret; + struct aml_ci *ci = (struct aml_ci *)ci_bus.priv; + ret = aml_ci_slot_status(ci, 0, 0); + ret = sprintf(buf, "%s: %d;\n\t", attr->attr.name, ret); + return ret; +} +static CLASS_ATTR_RO(status); + +static ssize_t irq_show(struct class *class, +struct class_attribute *attr, char *buf) +{ + int ret; + ret = aml_ci_gio_get_irq(); + ret = sprintf(buf, "%s irq: %d\n\t", attr->attr.name, ret); + return ret; +} +static CLASS_ATTR_RO(irq); + +static ssize_t iotest_show(struct class *class, +struct class_attribute *attr, char *buf) +{ + int ret; + ret = sprintf(buf, "echo (r|w|f|c)(i|a) addr data > %s\n", + attr->attr.name); + return ret; +} + +static ssize_t iotest_store(struct class *class, +struct class_attribute *attr, const char *buf, size_t size) +{ + int n = 0; + int i = 0; + char *buf_orig, *ps, *token; + char *parm[3]; + unsigned int addr = 0, val = 0, retval = 0; + long value = 0; + struct aml_ci *ci = (struct aml_ci *)ci_bus.priv; + buf_orig = kstrdup(buf, GFP_KERNEL); + ps = buf_orig; + while (1) { + /*need set '\n' to ' \n'*/ + token = strsep(&ps, " "); + if (token == NULL) + break; + if (*token == '\0') + continue; + parm[n++] = token; + } + + if (!n || ((n > 0) && (strlen(parm[0]) != 2))) { + pr_err("invalid command n[%x]p[%x][%s]\n", n,(int)strlen(parm[0]),parm[0]); + kfree(buf_orig); + return size; + } + + if ((parm[0][0] == 'r')) { + if (n > 2) { + pr_err("read: invalid parameter\n"); + kfree(buf_orig); + return size; + } + if (kstrtol(parm[1], 0, &value) == 0) + addr = (int)value; + pr_err("%s 0x%x\n", parm[0], addr); + switch ((char)parm[0][1]) { + case 'i': + for (i = 0; i < 1000; i++) + retval = aml_ci_bus_io_read(ci, 0, addr); + break; + case 'a': + for (i = 0; i < 1000; i++) + retval = aml_ci_bus_mem_read(ci, 0, addr); + break; + default: + break; + } + pr_dbg("%s: 0x%x --> 0x%x\n", parm[0], addr, retval); + } else if ((parm[0][0] == 'w')) { + if (n != 3) { + pr_err("write: invalid parameter\n"); + kfree(buf_orig); + return size; + } + if (kstrtol(parm[1], 0, &value) == 0) + addr = (int)value; + if (kstrtol(parm[2], 0, &value) == 0) + val = (int)value; + + pr_err("%s 0x%x 0x%x", parm[0], addr, val); + switch ((char)parm[0][1]) { + case 'i': + retval = aml_ci_bus_io_write(ci, 0, addr, val); + break; + case 'a': + retval = aml_ci_bus_mem_write(ci, 0, addr, val); + break; + default: + break; + } + pr_dbg("%s: 0x%x <-- 0x%x\n", parm[0], addr, retval); + } else if ((parm[0][0] == 'f')) { + pr_dbg("full test----\r\n"); + aml_ci_bus_full_test(ci); + } else if ((parm[0][0] == 'p')) { + pr_dbg("cis dvb_ca_en50221_parse_attributes----\r\n"); + dvb_ca_en50221_parse_attributes(); + } + + kfree(buf_orig); + return size; +} + +static CLASS_ATTR_RW(iotest); + +static struct attribute *aml_ci_bus_attrs[] = { + &class_attr_iotest.attr, + &class_attr_status.attr, + &class_attr_irq.attr, + &class_attr_reset.attr, + &class_attr_pwr.attr, + &class_attr_start.attr, + &class_attr_wakeup.attr, + NULL +}; + +ATTRIBUTE_GROUPS(aml_ci_bus); + + +int aml_ci_bus_mod_init(void) +{ + int ret; + struct class *clp; + #define CLASS_NAME_LEN 48 + pr_dbg("Amlogic DVB CI BUS Init\n"); + + clp = &(ci_bus.cls); + + clp->name = kzalloc(CLASS_NAME_LEN, GFP_KERNEL); + if (!clp->name) + return -ENOMEM; + + snprintf((char *)clp->name, CLASS_NAME_LEN, "aml_ci_bus_%s", "test"); + clp->owner = THIS_MODULE; + clp->class_groups = aml_ci_bus_groups; + ret = class_register(clp); + if (ret) + kfree(clp->name); + return 0; +} + +void aml_ci_bus_mod_exit(void) +{ + pr_dbg("Amlogic DVB CI BUS Exit\n"); + class_unregister(&(ci_bus.cls)); +} + +#endif +#if 0 +module_init(aml_ci_bus_mod_init); +module_exit(aml_ci_bus_mod_exit); + +MODULE_LICENSE("GPL"); +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/aml_ci_bus.h b/drivers/stream_input/parser/dvb_ci/aml_ci_bus.h new file mode 100644 index 0000000..e238253 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/aml_ci_bus.h
@@ -0,0 +1,210 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef __AML_CI_BUS_H_ +#define __AML_CI_BUS_H_ + +#include <linux/amlogic/iomap.h> +#include <linux/amlogic/aml_gpio_consumer.h> +#include <linux/gpio/consumer.h> +#include "aml_pcmcia.h" +#include "aml_ci.h" +#include "dvb_ca_en50221.h" + +/* +aml spi dev +*/ +struct aml_ci_bus { + struct mutex mutex; + struct class cls; + /* add gpio pin */ + struct gpio_desc *cd_pin1; + int cd_pin1_value; + struct gpio_desc *cd_pin2; + int cd_pin2_value; + struct gpio_desc *pwr_pin; + int pwr_pin_value; + struct gpio_desc *le_pin; + int le_pin_value; + int le_enable_level; + int addr_ts_mode_multiplex; + /* cam irq */ + int irq; + int irq_cmp; + int irq_timeout; + struct aml_pcmcia pc; + void *priv; + /*device type*/ + int io_device_type; + /*select gpio group*/ + int select; + struct pinctrl *pinctrl; + struct pinctrl *bus_pinctrl; + struct platform_device *pdev; + /*save cam cur state*/ + enum aml_slot_state slot_state; + /*wake up thread mode at cimcu thread*/ + int wakeup_thread; +}; + +enum aml_gpio_select_e { + AML_GPIO_ADDR = 0, + AML_GPIO_TS, +}; + +#define IORD 0x0 +#define IOWR 0x1 +#define MEMRD 0x2 +#define MEMWR 0x3 + +//24M cyccles, 42 ns +#define CYCLES (42) +//mem rd +#define DELAY_RMEM_INIT_ADDR (0) +#define DELAY_RMEM_ADDR_CE (0) +#define DELAY_RMEM_CE_RD (3) +#define DELAY_RMEM_RD_RWAIT (0) +#define DELAY_RMEM_RWAIT_DATA (0) +#define DELAY_RMEM_DATA_DRD (0) +#define DELAY_RMEM_DRD_DCE (0) +#define DELAY_RMEM_DCE_INIT (0) +//mem wr +#define DELAY_WMEM_INIT_ADDR (0) +#define DELAY_WMEM_ADDR_CE (1) +#define DELAY_WMEM_CE_WR (1) +#define DELAY_WMEM_WR_RWAIT (0) +#define DELAY_WMEM_RWAIT_DATA (0) +#define DELAY_WMEM_DATA_DWR (0) +#define DELAY_WMEM_DWR_DCE (1) +#define DELAY_WMEM_DCE_INIT (1) +//iord +#define DELAY_RIO_INIT_ADDR (0) +#define DELAY_RIO_ADDR_CE (0) +#define DELAY_RIO_CE_RD (2) +#define DELAY_RIO_RD_RWAIT (0) +#define DELAY_RIO_RWAIT_DATA (0) +#define DELAY_RIO_DATA_DRD (0) +#define DELAY_RIO_DRD_DCE (0) +#define DELAY_RIO_DCE_INIT (0) +//iowr +#define DELAY_WIO_INIT_ADDR (0) +#define DELAY_WIO_ADDR_CE (1) +#define DELAY_WIO_CE_WR (1) +#define DELAY_WIO_WR_RWAIT (0) +#define DELAY_WIO_RWAIT_DATA (0) +#define DELAY_WIO_DATA_DWR (0) +#define DELAY_WIO_DWR_DCE (1) +#define DELAY_WIO_DCE_INIT (1) + +//timeout irq hold time,10MS +#define TIMEOUT_IRQ_HOLD_TIME (200) + +//reg define +#define CIPLUS_CMD_REG 0x0 +/* +31:24 rw, wdata:the data write in cam +23 reserved +22:8 addr A0-A14 +7:6 reserved +5:4 cmd type:00: iord,01:iowr 10: oe 11:we +3:1 reserved +0 cmd valid,1:cmd is valid.need be executed. + 0: cmd is invalid +*/ +#define CI_CMD_WDATA 24 +#define CI_CMD_ADDR 8 +#define CI_CMD_TYPE 4 +#define CI_CMD_VALID 0 + + +#define CIPLUS_RDATA_REG 0x4 +/* +31:8 reserved +7:0 data read from cam +*/ +#define CIPLUS_CTRL_REG 0x8 +/* +31:21 reserved +10 ctrl clk 1:disable clk gated; + 0:enable clk gated +9 completion irq en,1: disable;0:able +8 inv_cam_iowrn +7 inv_cam_iordn +6 inv_cam_wen +5 inv_cam_oen +4 inv_cam_cen +3 clear transfer complete irq;1:clear +2 clear timeout ieq, 1:clear +1 cam reset: =1 assert =0 deassert +0 control enable: 1:enable ciplus ctrl,0:disable +*/ +#define ENABLE_CMP_IRQ 9 +#define INV_CAM_IOWR 8 +#define INV_CAM_IORD 7 +#define INV_CAM_WE 6 +#define INV_CAM_OE 5 +#define INV_CAM_CE 4 +#define CLEAR_CMP_IRQ 3 +#define CLEAR_TIMEOUT_IRQ 2 +#define CAM_RESET 1 +#define CI_ENABLE 0 + + + +#define CIPLUS_DELAY_CTRL0 0xc +/* +31:24 0xf,delay cycles between assert IORD/IOWR/MEMRD/MEMWR + and WAIT# RELEASED +23:16 0xf,delay cycles between assert CE and assert IORD/IOWR/MEMRD/MEMWR +15:8 0x8,delay cycles between send addr and assert CE +7:0 0x4,delay cycles between INIT and send ADDR/DATA +*/ +#define CIPLUS_DELAY_CTRL1 0x10 +/* +31:24 0xf,delay cycles between DEassert CE and IDLE +23:16 0xf,delay cycles between DEassert IORD/IOWR/MEMRD/MEMWR and deassert CE +15:8 0x8,delay cycles between SAMPLE DATA and DEassert IORD/IOWR/MEMRD/MEMWR +7:0 0xF,delay cycles between WAIT# RELEASE and (SAMPLEDATA OR DEASSERT IORD + IOWR/MEMRD/MEMWR) +*/ +#define CIPLUS_WAIT_TIMEOUT 0x14 +/* +31 wait timeout irq enable,defalut 1; +30:0 data read from cam;time out threshold + if wait time > threshold,than timeout irq. +*/ +#define ENABEL_TIMEOUT_IRQ 31 +#define WATT_TIMEOUT_TIME 0 + + +#define CIPLUS_STATUS_REG 0x18 +/* +31:6 reserved +5:2 fsm state +1 timeout irq +0 complete irq +*/ +#define TIMEOUT_IRQ_STATE 1 +#define COMPLETE_IRQ_STATE 0 + + +extern void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot); +extern int aml_ci_bus_init(struct platform_device *pdev, struct aml_ci *ci_dev); +extern int aml_ci_bus_exit(struct aml_ci *ci); +#endif /* __AML_CI_BUS_H_ */
diff --git a/drivers/stream_input/parser/dvb_ci/aml_pcmcia.c b/drivers/stream_input/parser/dvb_ci/aml_pcmcia.c new file mode 100644 index 0000000..2961d73 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/aml_pcmcia.c
@@ -0,0 +1,267 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/device.h> + +#include "aml_pcmcia.h" +#include "aml_ci.h" + +static int aml_pcmcia_debug = 1; + +module_param_named(pcmcia_debug, aml_pcmcia_debug, int, 0644); +MODULE_PARM_DESC(pcmcia_debug, "enable verbose debug messages"); + +#define pr_dbg(args...)\ + do {\ + if (aml_pcmcia_debug)\ + printk(args);\ + } while (0) +#define pr_error(fmt, args...) printk("PCMCIA: " fmt, ## args) + + +static int pcmcia_plugin(struct aml_pcmcia *pc, int reset) +{ + if (pc->slot_state == MODULE_XTRACTED) { + pc->pwr(pc, AML_PWR_OPEN);/*hi is open power*/ + pr_dbg(" CAM Plugged IN: Adapter(%d) Slot(0)\n", 0); + udelay(50); + //if (pc->io_device_type != AML_DVB_IO_TYPE_CIBUS) + aml_pcmcia_reset(pc); + /*wait unplug*/ + pc->init_irq(pc, IRQF_TRIGGER_RISING); + udelay(500); + pc->slot_state = MODULE_INSERTED; + + } else { + pr_error("repeat into pcmcia insert \r\n"); + if (reset) + aml_pcmcia_reset(pc); + } + + msleep(1); + pc->pcmcia_plugin(pc, 1); + + return 0; +} + +static int pcmcia_unplug(struct aml_pcmcia *pc) +{ + if (pc->slot_state == MODULE_INSERTED) { + pr_dbg(" CAM Unplugged: Adapter(%d) Slot(0)\n", 0); + /*udelay(50);*/ + /*aml_pcmcia_reset(pc);*/ + /*wait plugin*/ + pc->init_irq(pc, IRQF_TRIGGER_FALLING); + udelay(500); + pc->pwr(pc, AML_PWR_CLOSE);/*hi is open power*/ + + pc->slot_state = MODULE_XTRACTED; + } + msleep(1); + pc->pcmcia_plugin(pc, 0); + + return 0; +} + +static irqreturn_t pcmcia_irq_handler(int irq, void *dev_id) +{ + struct aml_pcmcia *pc = (struct aml_pcmcia *)dev_id; + pr_dbg("pcmcia_irq_handler--into--\r\n"); + disable_irq_nosync(pc->irq); + schedule_work(&pc->pcmcia_work); + enable_irq(pc->irq); + return IRQ_HANDLED; +} + +static void aml_pcmcia_work(struct work_struct *work) +{ + int cd1, cd2; + struct aml_pcmcia *pc = container_of( + work, struct aml_pcmcia, pcmcia_work); + + if (pc->start_work == 0) { + return; + } + cd1 = pc->get_cd1(pc); + cd2 = pc->get_cd2(pc); + + if (cd1 != cd2) + pr_error("work CAM card not inerted.\n"); + else { + if (!cd1) { + pr_error("work Adapter(%d) Slot(0): CAM Plugin\n", 0); + pcmcia_plugin(pc, 0); + } else { + pr_error("work Adapter(%d) Slot(0): CAM Unplug\n", 0); + pcmcia_unplug(pc); + } + } +} + +void aml_pcmcia_detect_cam(struct aml_pcmcia *pc) +{ + int cd1, cd2; + + if (pc == NULL) { + pr_error("pc is null\n"); + return; + } + if (pc->start_work == 0) { + pr_error("pc start work is 0\n"); + return; + } + cd1 = pc->get_cd1(pc); + cd2 = pc->get_cd2(pc); + + if (cd1 != cd2) + pr_error("CAM card not inerted. check end\n"); + else { + if (!cd1) { + pr_error("Adapter(%d) Slot(0): CAM Plugin\n", 0); + pcmcia_plugin(pc, 1); + } else { + pr_error("Adapter(%d) Slot(0): CAM Unplug\n", 0); + pcmcia_unplug(pc); + } + } +} +EXPORT_SYMBOL(aml_pcmcia_detect_cam); +static struct aml_pcmcia *pc_cur; + +int aml_pcmcia_init(struct aml_pcmcia *pc) +{ + int err = 0; + unsigned long mode; + pr_dbg("aml_pcmcia_init start pc->irq=%d\r\n", pc->irq); + pc->rst(pc, AML_L); + /*power on*/ + if (pc->io_device_type != AML_DVB_IO_TYPE_CIBUS) + pc->pwr(pc, AML_PWR_OPEN);/*hi is open power*/ + /*assuming cam unpluged, config the INT to waiting-for-plugin mode*/ + pc->init_irq(pc, IRQF_TRIGGER_LOW); + + INIT_WORK(&pc->pcmcia_work, aml_pcmcia_work); + + mode = IRQF_ONESHOT; + if (pc->io_device_type == AML_DVB_IO_TYPE_SPI_T312 || pc->io_device_type == AML_DVB_IO_TYPE_CIBUS) { + mode = mode | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; + } + + err = request_irq(pc->irq, + pcmcia_irq_handler, + mode, "aml-pcmcia", pc); + if (err != 0) { + pr_error("ERROR: IRQ registration failed ! <%d>", err); + return -ENODEV; + } + + pc_cur = pc; + pr_dbg("aml_pcmcia_init ok\r\n"); + if (pc->io_device_type == AML_DVB_IO_TYPE_SPI_T312 || pc->io_device_type == AML_DVB_IO_TYPE_CIBUS) { + //mcu start very fast,so she can detect cam before soc init end. + //so we need add detect cam fun for first time. + aml_pcmcia_detect_cam(pc); + } + return 0; +} +EXPORT_SYMBOL(aml_pcmcia_init); + +int aml_pcmcia_exit(struct aml_pcmcia *pc) +{ + pc->pwr(pc, AML_PWR_CLOSE);/*hi is open power*/ + free_irq(pc->irq, pc); + return 0; +} +EXPORT_SYMBOL(aml_pcmcia_exit); + +int aml_pcmcia_reset(struct aml_pcmcia *pc) +{ + pr_dbg("CAM RESET-->start\n"); + /* viaccess neotion cam need delay 2000 and 3000 */ + /* smit cam need delay 1000 and 1500 */ + /* need change delay according cam vendor */ + pc->rst(pc, AML_H);/*HI is reset*/ + msleep(2000); + pc->rst(pc, AML_L);/*defaule LOW*/ + msleep(2500); + pr_dbg("CAM RESET--end\n"); + return 0; +} +EXPORT_SYMBOL(aml_pcmcia_reset); + + +#if 0 +static ssize_t aml_pcmcia_test_cmd(struct class *class, +struct class_attribute *attr, const char *buf, size_t size) +{ + pr_dbg("pcmcia cmd: %s\n", buf); + if (pc_cur) { + if (memcmp(buf, "reset", 5) == 0) + aml_pcmcia_reset(pc_cur); + else if (memcmp(buf, "on", 2) == 0) + pc_cur->pwr(pc_cur, AML_PWR_OPEN); + else if (memcmp(buf, "off", 3) == 0) + pc_cur->pwr(pc_cur, AML_PWR_CLOSE); + else if (memcmp(buf, "poll", 4) == 0) + schedule_work(&pc_cur->pcmcia_work); + else if (memcmp(buf, "intr", 4) == 0) + pc_cur->init_irq(pc_cur, IRQF_TRIGGER_RISING); + else if (memcmp(buf, "intf", 4) == 0) + pc_cur->init_irq(pc_cur, IRQF_TRIGGER_FALLING); + } + return size; +} + +static struct class_attribute aml_pcmcia_class_attrs[] = { + __ATTR(cmd, S_IRUGO | S_IWUSR, NULL, aml_pcmcia_test_cmd), + __ATTR_NULL +}; + +static struct class aml_pcmcia_class = { + .name = "aml_pcmcia_test", + .class_attrs = aml_pcmcia_class_attrs, +}; + +static int __init aml_pcmcia_mod_init(void) +{ + pr_dbg("Amlogic PCMCIA Init\n"); + + class_register(&aml_pcmcia_class); + + return 0; +} + +static void __exit aml_pcmcia_mod_exit(void) +{ + pr_dbg("Amlogic PCMCIA Exit\n"); + + class_unregister(&aml_pcmcia_class); +} + + +module_init(aml_pcmcia_mod_init); +module_exit(aml_pcmcia_mod_exit); + +MODULE_LICENSE("GPL"); +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/aml_pcmcia.h b/drivers/stream_input/parser/dvb_ci/aml_pcmcia.h new file mode 100644 index 0000000..2119358 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/aml_pcmcia.h
@@ -0,0 +1,63 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ + +#ifndef _AML_PCMCIA_ +#define _AML_PCMCIA_ + +enum aml_slot_state { + MODULE_INSERTED = 3, + MODULE_XTRACTED = 4 +}; + +enum aml_pwr_cmd { + AML_PWR_OPEN = 0, + AML_PWR_CLOSE = 1 +}; +enum aml_reset_cmd { + AML_L = 0, + AML_H = 1 +}; +struct aml_pcmcia { + enum aml_slot_state slot_state; + struct work_struct pcmcia_work; + int run_type;/*0:irq;1:poll*/ + int irq; + int (*init_irq)(struct aml_pcmcia *pc, int flag); + int (*get_cd1)(struct aml_pcmcia *pc); + int (*get_cd2)(struct aml_pcmcia *pc); + int (*pwr)(struct aml_pcmcia *pc, int enable); + int (*rst)(struct aml_pcmcia *pc, int enable); + + int (*pcmcia_plugin)(struct aml_pcmcia *pc, int plugin); + + void *priv; + /*device type*/ + int io_device_type; + /*start detect card and work*/ + int start_work; +}; + +int aml_pcmcia_init(struct aml_pcmcia *pc); +int aml_pcmcia_exit(struct aml_pcmcia *pc); +int aml_pcmcia_reset(struct aml_pcmcia *pc); +void aml_pcmcia_detect_cam(struct aml_pcmcia *pc); + +#endif /*_AML_PCMCIA_*/ +
diff --git a/drivers/stream_input/parser/dvb_ci/aml_spi.c b/drivers/stream_input/parser/dvb_ci/aml_spi.c new file mode 100644 index 0000000..97fef8f --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/aml_spi.c
@@ -0,0 +1,1865 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/amlogic/aml_gpio_consumer.h> +#include <linux/gpio/consumer.h> +#include <linux/device.h> +#include <linux/slab.h> +//#include <linux/amlogic/sd.h> +#include <linux/mmc/sd.h> +#include <linux/of_irq.h> +#include <linux/irq.h> +#include "aml_spi.h" +#include "aml_ci.h" + +#define AML_MODE_NAME "aml_dvbci_spi" + +#define AML_SPI_READ_LEN 16 + +static int AML_CI_GPIO_IRQ_BASE = 251; +static struct aml_spi *g_spi_dev; +static int aml_spi_debug = 1; +static int G_rec_flag = AM_SPI_STEP_INIT; + + +module_param_named(spi_debug, aml_spi_debug, int, 0644); +MODULE_PARM_DESC(spi_debug, "enable verbose debug messages"); + + +#define pr_dbg(args...)\ + do {\ + if (aml_spi_debug)\ + printk(args);\ + } while (0) +#define pr_error(fmt, args...) printk("AML_CI_SPI: " fmt, ## args) + +struct spi_board_info aml_ci_spi_bdinfo = { + .modalias = "ci_spi_dev", + .mode = SPI_MODE_0, + .max_speed_hz = 1000000, /* 1MHz */ + .bus_num = 0, /* SPI bus No. */ + .chip_select = 0, /* the device index on the spi bus */ + .controller_data = NULL, +}; + +#define NORMAL_MSG (0<<7) +#define BROADCAST_MSG (1<<7) +#define BLOCK_DATA (0<<6) +#define SINGLE_DATA (1<<6) +#define CISPI_DEV_ADDR 1 + +#define INPUT 0 +#define OUTPUT 1 +#define OUTLEVEL_LOW 0 +#define OUTLEVEL_HIGH 1 +#define PULLLOW 1 +#define PULLHIGH 0 + +/* +sendbuf data struct +---------------------------------------------------- +|start flag| cmd | data | addr |end flag | +---------------------------------------------------- +| 2 byte | 1byte | 1byte | 2 byte| 2 byte | +---------------------------------------------------- +*/ + +#define SENDBUFLEN 8 +static u8 sendbuf[SENDBUFLEN];/* send data */ +static u8 rbuf[SENDBUFLEN];/*save get data */ +/**\brief aml_init_send_buf:init spi send buf +* \param cmd: ci cmd +* \param data: write value +* \param addr: read or write addr +* \return +* - read value:ok +* - -EINVAL : error +*/ +static int aml_init_send_buf(u8 cmd, u8 data, u16 addr) +{ + /* start flag */ + sendbuf[0] = DATASTART; + sendbuf[1] = DATASTART; + /* cmd */ + sendbuf[2] = cmd; + /* data */ + sendbuf[3] = data; + /* addr senf low 8 bit first,and then send hi 8bit */ + sendbuf[4] = addr & 0x00ff; + sendbuf[5] = (addr>>8) & 0xff; + /* end flag */ + sendbuf[6] = DATAEND; + sendbuf[7] = DATAEND; + return 0; +} +/**\brief aml_ci_spi_reciver +* \param[out] None +* \param[in] value,get from spi +* \return +* - 0:reciver end,-1:reciver +* - +*/ +/* +data strouct +---------------------------------------------------- +|start flag| cmd | data | addr |end flag | +---------------------------------------------------- +| 2 byte | 1byte | 1byte | 2 byte| 2 byte | +---------------------------------------------------- +*/ +int aml_ci_spi_paser_bit(uint8_t value) +{ + /* read spi data from slave */ + if (G_rec_flag == AM_SPI_STEP_INIT) { + /* start type first */ + if (value == DATASTART) { + rbuf[0] = value; + G_rec_flag = AM_SPI_STEP_START1; + } + } else if (G_rec_flag == AM_SPI_STEP_START1) { + /* start2 type seccond */ + if (value == DATASTART) { + rbuf[1] = value; + G_rec_flag = AM_SPI_STEP_START2; + } + } else if (G_rec_flag == AM_SPI_STEP_START2) { + /* cmd type */ + /* pr_dbg("spi value=%d\r\n",value); */ + rbuf[2] = value; + G_rec_flag = AM_SPI_STEP_CMD; + } else if (G_rec_flag == AM_SPI_STEP_CMD) { + /* data */ + rbuf[3] = value; + G_rec_flag = AM_SPI_STEP_DATA; + } else if (G_rec_flag == AM_SPI_STEP_DATA) { + /* ADDR1 */ + rbuf[4] = value; + G_rec_flag = AM_SPI_STEP_ADDR1; + } else if (G_rec_flag == AM_SPI_STEP_ADDR1) { + /* ADDR2 type */ + rbuf[5] = value; + G_rec_flag = AM_SPI_STEP_ADDR2; + } else if (G_rec_flag == AM_SPI_STEP_ADDR2) { + /* END1 type */ + if (value == DATAEND) { + rbuf[6] = value; + G_rec_flag = AM_SPI_STEP_END1; + } + } else if (G_rec_flag == AM_SPI_STEP_END1) { + /* END2 type */ + if (value == DATAEND) { + rbuf[7] = value; + G_rec_flag = AM_SPI_STEP_END2; + /* pr_dbg("spi read value ok end\r\n"); */ + return 0; + } + } + return -1; +} + +/**\brief aml_spi_io_api:spi read or write api with mcu +* \param spi_dev: aml_spi obj,used this data to get spi obj +* \param val: write value +* \param len: write value len +* \param mode: cmd +* \return +* - read value:ok +* - -EINVAL : error +*/ +static int aml_spi_io_api(struct aml_spi *spi_dev, u8 *val, int len, int mode) +{ + u8 rb[32] = {0}; + int ret = 0; + int i = 0; + u8 rd = 0; + int j = 0; + int is_retry = 0; + if (spi_dev == NULL ) { + pr_error("%s spi_dev is null\r\n", __func__); + return -EINVAL; + } + if (spi_dev->spi == NULL) { + pr_error("%s spi is null\r\n", __func__); + return -EINVAL; + } + spin_lock(&spi_dev->spi_lock); + if (spi_dev->cs_hold_delay) + udelay(spi_dev->cs_hold_delay); +restart: + dirspi_start(spi_dev->spi); + if (spi_dev->cs_clk_delay) + udelay(spi_dev->cs_clk_delay); + + ret = dirspi_xfer(spi_dev->spi, val, rb, len); + if (ret != 0) + pr_dbg("spi xfer value errro ret %d\r\n", ret); + /* wait mcu io 1ms */ + udelay(1000); + /* init rec flag */ + G_rec_flag = AM_SPI_STEP_INIT; + memset(rbuf, 0, 8); + + for (i = 0; i < 4 * len; i++) { + udelay(50); + memset(rb, 0, 32); + ret = dirspi_read(spi_dev->spi, rb, AML_SPI_READ_LEN); + if (ret != 0) { + pr_dbg("spi read value timeout:%x ret %d\r\n", rd, ret); + } + for (j = 0; j < AML_SPI_READ_LEN; j++) { + /*pr_dbg("spi read value rb[%d]: 0x%2x\r\n", j, rb[j]);*/ + ret = aml_ci_spi_paser_bit(rb[j]); + if (ret == 0) + break; + } + if (ret == 0) + break; + } + if (ret == 0) { + rd = rbuf[3];/* data */ + } else { + pr_dbg("*spi rec flag[%d]index [%d] read error[0x%x] mode[%d]addr[%d]****\r\n", + G_rec_flag, i,rd, mode, (val[5] << 8 | val[4]) & 0xffff); + dirspi_stop(spi_dev->spi); + //only retry once + if (is_retry == 0) { + is_retry = 1; + goto restart; + } + } + if (spi_dev->cs_clk_delay) + udelay(spi_dev->cs_clk_delay); + + /* pr_error("ci spi is stop in %s rd=%d\r\n",__func__,rd);*/ + dirspi_stop(spi_dev->spi); + + spin_unlock(&spi_dev->spi_lock); + + return rd; +} + +/********************************************************/ +/********************************************************/ +/******* gpio api *************/ +/********************************************************/ +/********************************************************/ +/**\brief aml_set_gpio_out:set gio out and set val value +* \param gpio: gpio_desc obj, +* \param val: set val +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_set_gpio_out(struct gpio_desc *gpio, int val) +{ + int ret = 0; + if (val < 0) { + pr_dbg("gpio out val = -1.\n"); + return -1; + } + if (val != 0) + val = 1; + ret = gpiod_direction_output(gpio, val); + pr_dbg("dvb ci gpio out ret %d set val:%d\n", ret, val); + return ret; +} +#if 0//no used +/**\brief aml_set_gpio_in:set gio in +* \param gpio: gpio_desc obj, +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_set_gpio_in(struct gpio_desc *gpio) +{ + gpiod_direction_input(gpio); + return 0; +} +#endif + +/**\brief aml_get_gpio_value:get gio value +* \param gpio: gpio_desc obj, +* \return +* - gpio value:ok +* - -EINVAL : error +*/ +static int aml_get_gpio_value(struct gpio_desc *gpio) +{ + int ret = 0; + ret = gpiod_get_value(gpio); + return ret; +} +/**\brief aml_gpio_free:free gio +* \param gpio: gpio_desc obj, +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_gpio_free(struct gpio_desc *gpio) +{ + gpiod_put(gpio); + return 0; +} +/**\brief spi_get_gpio_by_name:get gpio desc from dts file +* \param spi_dev: aml_spi obj +* \param gpiod: gpio_desc * obj +* \param str: gpio name at dts file +* \param input_output: gpio input or output type +* \param output_value: gpio out put value +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int spi_get_gpio_by_name(struct aml_spi *spi_dev, +struct gpio_desc **gpiod, int *pin_value, +char *str, int input_output, int output_level) +{ + int ret = 0; + struct device_node *child = NULL; + struct platform_device *pdev = spi_dev->pdev; + struct device_node *np = pdev->dev.of_node; + + /*get spi and gpio config from dts*/ + /* get device config for dvbci_io*/ + child = of_get_child_by_name(np, "dvbci_io"); + if (IS_ERR(*gpiod)) { + pr_dbg("dvb ci spi %s request failed\n", str); + return -1; + } + + *pin_value = of_get_named_gpio_flags(child, str, 0, NULL); + *gpiod = gpio_to_desc(*pin_value); + if (IS_ERR(*gpiod)) { + pr_dbg("spi %s request failed\n", str); + return -1; + } + pr_dbg("spi get_gpio %s %p %d\n", str, *gpiod, *pin_value); + gpio_request(*pin_value, AML_MODE_NAME); + + if (input_output == OUTPUT) { + ret = gpiod_direction_output(*gpiod, output_level); + } else if (input_output == INPUT) { + ret = gpiod_direction_input(*gpiod); + /*ret |= gpiod_set_pullup(*gpiod, 1);*/ + } else { + pr_error("spi Request gpio direction invalid\n"); + } + return ret; +} +/********************************************************/ +/********************************************************/ +/******* gpio api end *************/ +/********************************************************/ +/********************************************************/ +#if 1 +/**\brief aml_ci_cis_test_by_spi:test cis +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \param addr: read addr +* \return +* - test :ok +* - -EINVAL : error +*/ +/**\brief aml_ci_full_test_by_spi:ci full test +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \param addr: read addr +* \return +* - read value:ok +* - -EINVAL : error +*/ +static int aml_ci_full_test_by_spi( + struct aml_ci *ci_dev, int slot, int addr) +{ + u8 data = 0; + u16 addres = addr; + int value = 0; + struct aml_spi *spi_dev = ci_dev->data; + aml_init_send_buf(AM_CI_CMD_FULLTEST, data, addres); + value = aml_spi_io_api(spi_dev, + sendbuf, SENDBUFLEN, AM_CI_CMD_FULLTEST); + pr_dbg("FULL : TEST END \r\n"); + return value; +} +#endif +/**\brief aml_ci_mem_read_by_spi:io read from cam +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \param addr: read addr +* \return +* - read value:ok +* - -EINVAL : error +*/ +static int aml_ci_mem_read_by_spi( + struct aml_ci *ci_dev, int slot, int addr) +{ + u8 data = 0; + u16 addres = addr; + int value = 0; + struct aml_spi *spi_dev = ci_dev->data; + aml_init_send_buf(AM_CI_CMD_MEMR, data, addres); + value = aml_spi_io_api(spi_dev, sendbuf, SENDBUFLEN, AM_CI_CMD_MEMR); + /*pr_dbg("Read : mem[%d] = 0x%x\n", addr, value);*/ + return value; +} +/**\brief aml_ci_mem_write_by_spi:io write to cam by spi api +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \param addr: write addr +* \param addr: write value +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_ci_mem_write_by_spi( + struct aml_ci *ci_dev, int slot, int addr, u8 val) +{ + u8 data = val; + u16 addres = addr; + int value = 0; + struct aml_spi *spi_dev = ci_dev->data; + aml_init_send_buf(AM_CI_CMD_MEMW, data, addres); + value = aml_spi_io_api(spi_dev, sendbuf, SENDBUFLEN, AM_CI_CMD_MEMW); + /*pr_dbg("write : mem[%d] = 0x%x\n", addr, data);*/ +return value; +} +/**\brief aml_ci_io_read_by_spi:io read from cam by spi api +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \param addr: read addr +* \return +* - read value:ok +* - -EINVAL : error +*/ +static int aml_ci_io_read_by_spi( + struct aml_ci *ci_dev, int slot, int addr) +{ + u8 data = 0; + u16 addres = addr; + int value = 0; + struct aml_spi *spi_dev = ci_dev->data; + aml_init_send_buf(AM_CI_CMD_IOR, data, addres); + value = aml_spi_io_api(spi_dev, sendbuf, SENDBUFLEN, AM_CI_CMD_IOR); + /*pr_dbg("read : io[%d] = 0x%x\n", addr, value);*/ + return value; +} +/**\brief aml_ci_io_write_by_spi:io write to cam +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \param addr: write addr +* \param addr: write value +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_ci_io_write_by_spi( + struct aml_ci *ci_dev, int slot, int addr, u8 val) +{ + u8 data = val; + u16 addres = addr; + int value = 0; + struct aml_spi *spi_dev = ci_dev->data; + /*add by chl,need add time delay*/ + mdelay(10); + aml_init_send_buf(AM_CI_CMD_IOW, data, addres); + value = aml_spi_io_api(spi_dev, sendbuf, SENDBUFLEN, AM_CI_CMD_IOW); + /*pr_dbg("write : ATTR[%d] = 0x%x\n", addr, data);*/ + return value; +} + + +/**\brief aml_ci_rst_by_spi:reset cam by spi +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_ci_rst_by_spi( + struct aml_ci *ci_dev, int slot, int level) +{ + u8 data = (u8)level; + u16 addres = 0; + int value = 0; + struct aml_spi *spi_dev = ci_dev->data; + /*add by chl,need add time delay*/ + mdelay(10); + aml_init_send_buf(AM_CI_CMD_RESET, data, addres); + value = aml_spi_io_api(spi_dev, sendbuf, SENDBUFLEN, AM_CI_CMD_RESET); + return value; +} + +/**\brief aml_ci_power_by_spi:power cam by spi +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \param enable: enable or disable cam +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_ci_power_by_spi( + struct aml_ci *ci_dev, int slot, int enable) +{ + u8 data = (u8)enable; + u16 addres = 0; + int value = 0; + struct aml_spi *spi_dev = ci_dev->data; + /*add by chl,need add time delay*/ + /*power is controled by mcu*/ + if (0) { + mdelay(10); + aml_init_send_buf(AM_CI_CMD_POWER, data, addres); + value = aml_spi_io_api(spi_dev, sendbuf, SENDBUFLEN, AM_CI_CMD_POWER); + } + return value; +} + +/**\brief aml_ci_getcd12_by_spi:get cd12 cam by spi +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \param cd12: cd1 or cd2 value +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_ci_getcd12_by_spi( + struct aml_ci *ci_dev, int slot, int cd12) +{ + u8 data = (u8)cd12; + u16 addres = 0; + int value = 0; + struct aml_spi *spi_dev = ci_dev->data; + /*add by chl,need add time delay*/ + mdelay(10); + aml_init_send_buf(AM_CI_CMD_GETCD12, data, addres); + value = aml_spi_io_api(spi_dev, sendbuf, SENDBUFLEN, AM_CI_CMD_GETCD12); + return value; +} + + + +/**\brief aml_ci_slot_reset:reset slot +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \return +* - 0:ok +* - -EINVAL : error +*/ +static int aml_ci_slot_reset(struct aml_ci *ci_dev, int slot) +{ + struct aml_spi *spi_dev = ci_dev->data; + pr_dbg("Slot(%d): Slot RESET\n", slot); + aml_pcmcia_reset(&spi_dev->pc); + dvb_ca_en50221_cimcu_camready_irq(&ci_dev->en50221_cimcu, 0); + return 0; +} +/**\brief aml_ci_slot_shutdown:show slot +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \return +* - 0:ok +* - -EINVAL : error +* readme:no use this api +*/ +static int aml_ci_slot_shutdown(struct aml_ci *ci_dev, int slot) +{ + pr_dbg("Slot(%d): Slot shutdown\n", slot); + return 0; +} +/**\brief aml_ci_ts_control:control slot ts +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \return +* - 0:ok +* - -EINVAL : error +* readme:no use this api +*/ +static int aml_ci_ts_control(struct aml_ci *ci_dev, int slot) +{ + pr_dbg("Slot(%d): TS control\n", slot); + return 0; +} +/**\brief aml_ci_slot_status:get slot status +* \param ci_dev: aml_ci obj,used this data to get spi_dev obj +* \param slot: slot index +* \param open: no used +* \return +* - cam status +* - -EINVAL : error +*/ +static int aml_ci_slot_status(struct aml_ci *ci_dev, int slot, int open) +{ + struct aml_spi *spi_dev = ci_dev->data; + + pr_dbg("Slot(%d): Poll Slot status\n", slot); + + if (spi_dev->pc.slot_state == MODULE_INSERTED) { + pr_dbg("CA Module present and ready\n"); + return DVB_CA_EN50221_POLL_CAM_PRESENT | + DVB_CA_EN50221_POLL_CAM_READY; + } else { + pr_error("CA Module not present or not ready\n"); + } + return -EINVAL; +} +#if 1 +/**\brief aml_ci_gio_get_irq:get gpio cam irq pin value +* \return +* - irq pin value +* - -EINVAL : error +*/ +static int aml_ci_gio_get_irq(void) +{ + int ret = 0; + if (g_spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI) + ret = aml_get_gpio_value(g_spi_dev->irq_cam_pin); + else if (g_spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI) + ret = aml_get_gpio_value(g_spi_dev->mcu_irq_pin); + else + pr_error("aml_ci_gio_get_irq io type not surport\n"); + return ret; +} +#endif + +/********************************************************/ +/********************************************************/ +/******* for pcmcid api *************/ +/********************************************************/ +/********************************************************/ +/**\brief aml_gio_power:set power gpio hi or low +* \param pc: aml_pcmcia obj,used this priv to get spi_dev obj +* \param enable: power pin hi or low +* \return +* - 0 +* - -EINVAL : error +*/ +static int aml_gio_power(struct aml_pcmcia *pc, int enable) +{ + int ret = 0; + struct aml_spi *spi_dev = pc->priv; + if (spi_dev == NULL) { + pr_dbg("spi dev is null %s : %d\r\n", __func__, enable); + return -1; + } + pr_dbg("%s : %d\r\n", __func__, enable); + if (spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI) { + if (enable == AML_PWR_OPEN) { + /*hi level ,open power*/ + ret = aml_set_gpio_out(spi_dev->pwr_pin, AML_GPIO_HIGH); + } else { + /*low level ,close power*/ + ret = aml_set_gpio_out(spi_dev->pwr_pin, AML_GPIO_LOW); + } + } else if (spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI_T312) { + //no need power cam,we power cam card on MCU. + aml_ci_power_by_spi((struct aml_ci *)spi_dev->priv, 0, enable); + } else { + pr_dbg("aml_gio_power type [%d] enable: %d\r\n", spi_dev->io_device_type, enable); + } + return ret; +} +/**\brief aml_gio_reset:set reset gpio hi or low +* \param pc: aml_pcmcia obj,used this priv to get spi_dev obj +* \param enable: reset pin hi or low +* \return +* - 0 +* - -EINVAL : error +*/ +static int aml_gio_reset(struct aml_pcmcia *pc, int enable) +{ + /*need set hi and sleep set low*/ + int ret = 0; + struct aml_spi *spi_dev = pc->priv; + + if (spi_dev != NULL) + pr_dbg("%s : %d \r\n", __func__, enable); + + pr_dbg("%s : %d type: %d\r\n", __func__, enable, spi_dev->io_device_type); + if (spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI) { + if (enable == AML_L) + ret = aml_set_gpio_out(spi_dev->reset_pin, AML_GPIO_LOW); + else + ret = aml_set_gpio_out(spi_dev->reset_pin, AML_GPIO_HIGH); + } else if (spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI_T312) { + if (spi_dev == NULL || spi_dev->priv == NULL) { + pr_dbg("rst by spi-spidev-null-\r\n"); + return -1; + } + aml_ci_rst_by_spi((struct aml_ci *)spi_dev->priv, 0, enable); + } else { + pr_dbg("aml_gio_power type [%d] enable: %d\r\n", spi_dev->io_device_type, enable); + } + return ret; +} + +/**\brief aml_gio_init_irq:set gpio irq +* \param pc: aml_pcmcia obj,used this priv to get spi_dev obj +* \param flag: rising or falling or hi or low +* \return +* - 0 +* - -EINVAL : error +*/ +/*need change*/ +static int aml_gio_init_irq(struct aml_pcmcia *pc, int flag) +{ + struct aml_spi *spi_dev = (struct aml_spi *)pc->priv; + +#if 0 + int cd1_pin = desc_to_gpio(spi_dev->cd_pin1); + + int irq = pc->irq-AML_CI_GPIO_IRQ_BASE; + + printk("----cd1_pin=%d irq=%d\r\n", cd1_pin, irq); + aml_set_gpio_in(spi_dev->cd_pin1); + + if (flag == IRQF_TRIGGER_RISING) + gpio_for_irq(cd1_pin, + AML_GPIO_IRQ(irq, FILTER_NUM7, GPIO_IRQ_RISING)); + else if (flag == IRQF_TRIGGER_FALLING) + gpio_for_irq(cd1_pin, + AML_GPIO_IRQ(irq, FILTER_NUM7, GPIO_IRQ_FALLING)); + else if (flag == IRQF_TRIGGER_HIGH) + gpio_for_irq(cd1_pin, + AML_GPIO_IRQ(irq, FILTER_NUM7, GPIO_IRQ_HIGH)); + else if (flag == IRQF_TRIGGER_LOW) + gpio_for_irq(cd1_pin, + AML_GPIO_IRQ(irq, FILTER_NUM7, GPIO_IRQ_LOW)); + else + return -1; +#endif + if (spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI) { + gpiod_to_irq(spi_dev->cd_pin1); + } else if (spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI_T312) { + gpiod_to_irq(spi_dev->mcu_irq_pin); + } else { + pr_dbg("aml_gio_init_irq type [%d] \r\n", spi_dev->io_device_type); + } + return 0; +} + +/**\brief aml_gio_get_cd1:get gpio cd1 pin value +* \param pc: aml_pcmcia obj,used this priv to get spi_dev obj +* \return +* - cd1 pin value +* - -EINVAL : error +*/ +static int aml_gio_get_cd1(struct aml_pcmcia *pc) +{ + int ret = 1; + struct aml_spi *spi_dev = pc->priv; + if (spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI) { + ret = aml_get_gpio_value(spi_dev->cd_pin1); + } else if (spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI_T312) { + ret = aml_ci_getcd12_by_spi((struct aml_ci *)spi_dev->priv, 0, 0); + } else { + pr_dbg("aml_gio_get_cd1 not surport type [%d] \r\n", spi_dev->io_device_type); + } + return ret; +} +/**\brief aml_gio_get_cd2:get gpio cd2 pin value +* \param pc: aml_pcmcia obj,used this priv to get spi_dev obj +* \return +* - cd2 pin value +* - -EINVAL : error +*/ +static int aml_gio_get_cd2(struct aml_pcmcia *pc) +{ + int ret = 0; + struct aml_spi *spi_dev = pc->priv; + if (spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI) { + ret = aml_get_gpio_value(spi_dev->cd_pin2); + } else if (spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI_T312) { + ret = aml_ci_getcd12_by_spi((struct aml_ci *)spi_dev->priv, 0, 1); + } else { + pr_dbg("aml_gio_get_cd2 not surport type [%d] \r\n", spi_dev->io_device_type); + } + return ret; +} +/**\brief aml_cam_plugin:notify en50221 cam card in or out +* \param pc: aml_pcmcia obj,used this priv to get spi_dev obj +* \plugin: 0:remove;1:in +* \return +* - 0 +* - -EINVAL : error +*/ +static int aml_cam_plugin(struct aml_pcmcia *pc, int plugin) +{ + struct aml_ci *ci = (struct aml_ci *) + ((struct aml_spi *)(pc->priv))->priv; + pr_dbg("%s : %d\r\n", __func__, plugin); + if (plugin) { + dvb_ca_en50221_cimcu_camchange_irq(&ci->en50221_cimcu, + 0, DVB_CA_EN50221_CAMCHANGE_INSERTED); + } else { + dvb_ca_en50221_cimcu_camchange_irq(&ci->en50221_cimcu, + 0, DVB_CA_EN50221_CAMCHANGE_REMOVED); + } + return 0; +} +/**\brief aml_pcmcia_alloc:alloc nad init pcmcia obj +* \param spi_dev: aml_spi obj, +* \param pcmcia: aml_pcmcia * obj, +* \return +* - 0 +* - -EINVAL : error +*/ +static void aml_pcmcia_alloc(struct aml_spi *spi_dev, + struct aml_pcmcia **pcmcia) +{ + pr_dbg("aml_pcmcia_alloc----\n"); + *pcmcia = &spi_dev->pc; + (*pcmcia)->irq = spi_dev->irq; + (*pcmcia)->init_irq = aml_gio_init_irq; + (*pcmcia)->get_cd1 = aml_gio_get_cd1; + (*pcmcia)->get_cd2 = aml_gio_get_cd2; + (*pcmcia)->pwr = aml_gio_power; + (*pcmcia)->rst = aml_gio_reset; + (*pcmcia)->pcmcia_plugin = aml_cam_plugin; + (*pcmcia)->slot_state = MODULE_XTRACTED; + (*pcmcia)->priv = spi_dev; + (*pcmcia)->run_type = 0;/*0:irq;1:poll*/ + (*pcmcia)->io_device_type = AML_DVB_IO_TYPE_CIMAX; +} + +/**\brief aml_spi_get_config_from_dts:get spi config and gpio config from dts +* \param spi_dev: aml_spi obj, +* \return +* - 0 +* - -EINVAL : error +*/ +static int aml_spi_get_config_from_dts(struct aml_spi *spi_dev) +{ + struct device_node *child = NULL; + struct platform_device *pdev = spi_dev->pdev; + struct device_node *np = pdev->dev.of_node; + unsigned int temp[5], val; + int ret = 0; + pr_dbg("into get spi dts \r\n"); + + /*get spi and gpio config from dts*/ + /* get device config for dvbci_io*/ + child = of_get_child_by_name(np, "dvbci_io"); + if (child == NULL) { + pr_error("failed to get dvbci_io\n"); + return -1; + } + spi_dev->spi_bdinfo = &aml_ci_spi_bdinfo; + /* get spi config */ + ret = of_property_read_u32_array(child, "spi_bus_num", temp, 1); + if (ret) { + pr_error("failed to get spi_bus_num\n"); + } else { + aml_ci_spi_bdinfo.bus_num = temp[0]; + pr_dbg("bus_num: %d\n", aml_ci_spi_bdinfo.bus_num); + } + ret = of_property_read_u32_array(child, "spi_chip_select", + temp, 1); + if (ret) { + pr_error("failed to get spi_chip_select\n"); + } else { + aml_ci_spi_bdinfo.chip_select = temp[0]; + pr_dbg("chip_select: %d\n", aml_ci_spi_bdinfo.chip_select); + } + ret = of_property_read_u32_array(child, "spi_max_frequency", + temp, 1); + if (ret) { + pr_error("failed to get spi_chip_select\n"); + } else { + aml_ci_spi_bdinfo.max_speed_hz = temp[0]; + pr_dbg("max_speed_hz: %d\n", aml_ci_spi_bdinfo.max_speed_hz); + } + ret = of_property_read_u32_array(child, "spi_mode", temp, 1); + if (ret) { + pr_error("failed to get spi_mode\n"); + } else { + aml_ci_spi_bdinfo.mode = temp[0]; + pr_dbg("mode: %d\n", aml_ci_spi_bdinfo.mode); + } + ret = of_property_read_u32_array(child, "spi_cs_delay", + &temp[0], 2); + if (ret) { + spi_dev->cs_hold_delay = 0; + spi_dev->cs_clk_delay = 0; + } else { + spi_dev->cs_hold_delay = temp[0]; + spi_dev->cs_clk_delay = temp[1]; + } + ret = of_property_read_u32(child, "spi_write_check", &val); + if (ret) + spi_dev->write_check = 0; + else + spi_dev->write_check = (unsigned char)val; + + //below is get cd1 cd2 pwr irq reset gpio info + if (spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI) { + /*get cd1 irq num*/ + ret = of_property_read_u32(child, "irq_cd1", &val); + if (ret) { + spi_dev->irq = 5; + } else { + /*set irq value need add + AML_CI_GPIO_IRQ_BASE,but + we need minus + AML_CI_GPIO_IRQ_BASE + when gpio request irq */ + spi_dev->irq = val+AML_CI_GPIO_IRQ_BASE; + } + + spi_dev->irq = irq_of_parse_and_map( + pdev->dev.of_node, 0); + AML_CI_GPIO_IRQ_BASE = spi_dev->irq - val; + pr_dbg("get spi irq : %d USEDBASE:%d val:%d\r\n", + spi_dev->irq, AML_CI_GPIO_IRQ_BASE, val); + /*get reset pwd cd1 cd2 gpio pin*/ + spi_dev->reset_pin = NULL; + ret = spi_get_gpio_by_name(spi_dev, &spi_dev->reset_pin, + &spi_dev->reset_pin_value, "reset_pin", + OUTPUT, OUTLEVEL_HIGH); + if (ret) { + pr_error("dvb ci reset pin request failed\n"); + return -1; + } + spi_dev->cd_pin1 = NULL; + ret = spi_get_gpio_by_name(spi_dev, + &spi_dev->cd_pin1, + &spi_dev->cd_pin1_value, "cd_pin1", + INPUT, OUTLEVEL_HIGH); + if (ret) { + pr_error("dvb ci cd_pin1 pin request failed\n"); + return -1; + } + spi_dev->cd_pin2 = spi_dev->cd_pin1; + spi_dev->cd_pin2_value = spi_dev->cd_pin1_value; + spi_dev->pwr_pin = NULL; + pr_dbg("spi_dev->cd_pin1_value==%d\r\n", spi_dev->cd_pin1_value); + ret = spi_get_gpio_by_name(spi_dev, + &spi_dev->pwr_pin, &spi_dev->pwr_pin_value, + "pwr_pin", OUTPUT, OUTLEVEL_HIGH); + if (ret) { + pr_error("dvb ci pwr_pin pin request failed\n"); + return -1; + } + spi_dev->irq_cam_pin = NULL; + ret = spi_get_gpio_by_name(spi_dev, + &spi_dev->irq_cam_pin, &spi_dev->irq_cam_pin_value, + "irq_cam_pin", INPUT, OUTLEVEL_HIGH); + if (ret) { + pr_error("dvbci irq_cam_pin pin request failed\n"); + return -1; + } + } else if (spi_dev->io_device_type == AML_DVB_IO_TYPE_SPI_T312) { + //get mcu irq gpio + spi_dev->mcu_irq_pin = NULL; + ret = spi_get_gpio_by_name(spi_dev, + &spi_dev->mcu_irq_pin, + &spi_dev->mcu_irq_pin_value, "mcu_irq_pin", + INPUT, OUTLEVEL_HIGH); + if (ret) { + pr_error("dvb ci mcu_irq_pin pin request failed\n"); + return -1; + } + spi_dev->irq = gpiod_to_irq(spi_dev->mcu_irq_pin) ; + } else { + pr_error("dvbci io device type error [%d]\n", spi_dev->io_device_type); + } + return 0; +} +/**\brief aml_ci_free_gpio:free ci gpio +* \param spi_dev: aml_spi obj, +* \return +* - 0 +* - -EINVAL : error +*/ +static void aml_ci_free_gpio(struct aml_spi *spi_dev) +{ + if (spi_dev == NULL) { + pr_error("spi_dev is NULL,no need free gpio res\r\n"); + return; + } + + if (spi_dev->pwr_pin) { + aml_gpio_free(spi_dev->pwr_pin); + spi_dev->pwr_pin = NULL; + } + if (spi_dev->cd_pin1) { + aml_gpio_free(spi_dev->cd_pin1); + spi_dev->cd_pin1 = NULL; + spi_dev->cd_pin2 = NULL; + } + if (spi_dev->reset_pin) { + aml_gpio_free(spi_dev->reset_pin); + spi_dev->reset_pin = NULL; + } + if (spi_dev->irq_cam_pin) { + aml_gpio_free(spi_dev->irq_cam_pin); + spi_dev->irq_cam_pin = NULL; + } + return; +} + + +/**\brief ci_spi_dev_remove:spi probe api +* \param spi: spi obj, +* \return +* - 0 +* - -EINVAL : error +*/ +static int ci_spi_dev_probe(struct spi_device *spi) +{ + int ret; + pr_dbg("spi Dev probe--\r\n"); + spin_lock(&(g_spi_dev->spi_lock)); + if (g_spi_dev) + g_spi_dev->spi = spi; + else + pr_dbg("spi Dev probe-error-\n"); + spi->bits_per_word = 8; + ret = spi_setup(spi); + if (ret) + pr_dbg("spi setup failed\n"); + spin_unlock(&(g_spi_dev->spi_lock)); + return ret; +} +/**\brief ci_spi_dev_remove:spi remove api +* \param spi: spi obj, +* \return +* - 0 +* - -EINVAL : error +*/ +static int ci_spi_dev_remove(struct spi_device *spi) +{ + pr_dbg("spi Dev remove--\n"); + if (g_spi_dev) + g_spi_dev->spi = NULL; + + return 0; +} + +static struct spi_driver ci_spi_dev_driver = { + .probe = ci_spi_dev_probe, + .remove = ci_spi_dev_remove, + .driver = { + .name = "ci_spi_dev",/*set same with board info modalias*/ + .owner = THIS_MODULE, + }, +}; +/**\brief aml_spi_init:spi_dev init +* \param ci_dev: aml_ci obj, +* \param pdev: platform_device obj,used to get dts info +* \return +* - 0 +* - -EINVAL : error +*/ +int aml_spi_init(struct platform_device *pdev, struct aml_ci *ci_dev) +{ + struct aml_spi *spi_dev = NULL; + struct aml_pcmcia *pc; + int result; + + spi_dev = kmalloc(sizeof(struct aml_spi), GFP_KERNEL); + if (!spi_dev) { + pr_error("Out of memory!, exiting ..\n"); + result = -ENOMEM; + goto err; + } + g_spi_dev = spi_dev; + spi_dev->pdev = pdev; + spi_dev->priv = ci_dev; + spi_dev->spi = NULL; + /*init io device type*/ + spi_dev->io_device_type = ci_dev->io_type; + pr_dbg("*********spi Dev type [%d]\n", ci_dev->io_type); + /*get config from dts*/ + aml_spi_get_config_from_dts(spi_dev); + + /*init spi_lock*/ + spin_lock_init(&(spi_dev->spi_lock)); + /*regist api dev*/ + pr_dbg("*********spi Dev regist**********\r\n"); + //not support mcu mode on kernel 5.4 + //result = dirspi_register_board_info(spi_dev->spi_bdinfo, 1); + if (result) { + pr_error("register amlspi_dev spi boardinfo failed\n"); + goto fail1; + } + result = spi_register_driver(&ci_spi_dev_driver); + if (result) { + pr_error("register amlspi_dev spi driver failed\n"); + goto fail1; + } + + /*init ci_dev used api.*/ + ci_dev->ci_mem_read = aml_ci_mem_read_by_spi; + ci_dev->ci_mem_write = aml_ci_mem_write_by_spi; + ci_dev->ci_io_read = aml_ci_io_read_by_spi; + ci_dev->ci_io_write = aml_ci_io_write_by_spi; + ci_dev->ci_slot_reset = aml_ci_slot_reset; + ci_dev->ci_slot_shutdown = aml_ci_slot_shutdown; + ci_dev->ci_slot_ts_enable = aml_ci_ts_control; + ci_dev->ci_poll_slot_status = aml_ci_slot_status; + ci_dev->data = spi_dev; + + aml_pcmcia_alloc(spi_dev, &pc); + pc->io_device_type = spi_dev->io_device_type; + result = aml_pcmcia_init(pc); + if (result < 0) { + pr_error("aml_pcmcia_init failed\n"); + goto fail2; + } + return 0; +fail2: + spi_unregister_driver(&ci_spi_dev_driver); +fail1: + kfree(spi_dev); + spi_dev = NULL; +err: + return -1; +} +EXPORT_SYMBOL(aml_spi_init); +/**\brief aml_spi_exit:spi exit +* \return +* - 0 +* - -EINVAL : error +*/ +int aml_spi_exit(struct aml_ci *ci) +{ + /*exit pc card*/ + aml_pcmcia_exit(&g_spi_dev->pc); + /*un regist spi driver*/ + spi_unregister_driver(&ci_spi_dev_driver); + /*free gpio*/ + aml_ci_free_gpio(g_spi_dev); + /*free spi dev*/ + kfree(g_spi_dev); + g_spi_dev = NULL; + + return 0; +} +EXPORT_SYMBOL(aml_spi_exit); + + +#if 1 +/********************************************************/ +/********************************************************/ +/******* for spi test api *************/ +/********************************************************/ +/********************************************************/ + +/*cam difines*/ +#define DA 0x80 +#define FR 0x40 +#define WE 0x02 +#define RE 0x01 + +#define RS 0x08 +#define SR 0x04 +#define SW 0x02 +#define HC 0x01 +#define DATA_REG 0 +#define COM_STA_REG 1 +#define SIZE_REG_L 2 +#define SIZE_REG_M 3 +static void aml_spi_ca_full_test(struct aml_ci *ci_dev) +{ + unsigned int BUF_SIZE = 0; + unsigned int i = 0; + unsigned char cc = 0; + unsigned char reg; + unsigned int bsize = 0; + int cnt = 0; + unsigned char buf[10]; + int count = 1000; + mdelay(1000); + pr_dbg("READ CIS START\r\n"); + for (i = 0; i < 267; i++) { + mdelay(100); + cc = aml_ci_mem_read_by_spi(ci_dev, 0, i); + pr_dbg("0x%x ", cc); + if ((i + 1) % 16 == 0) + pr_dbg(" \r\n"); + } + pr_dbg("READ CIS OVER\r\n"); + mdelay(1000); + pr_dbg("SW rst CAM...\r\n"); + aml_ci_io_write_by_spi(ci_dev, 0, COM_STA_REG, RS); + pr_dbg("SW rst over.\r\n"); + pr_dbg("-----------------------------------\r\n"); + pr_dbg("TO delay 2000ms\r\n"); + mdelay(2000); + pr_dbg("\r\n"); + pr_dbg("--------------clear rs--!!!-YOU MUST CLEAR RS BIT--no sleep--------\r\n"); + aml_ci_io_write_by_spi(ci_dev, 0, COM_STA_REG, 0); + pr_dbg("--------------sleep---------------------\r\n"); + mdelay(2000); + pr_dbg("TO check sw-rst is OK\r\n"); + pr_dbg("start read fr \r\n"); + if (1) { + unsigned char reg; + unsigned char reg1; + int count1 = 4000; + while (1) { + mdelay(20); + count1--; + reg1 = aml_ci_io_read_by_spi( + ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg1)) { + continue; + } else { + pr_dbg("CAM Reset Ok\r\n"); + break; + } + } + reg = aml_ci_io_read_by_spi(ci_dev, 0, COM_STA_REG); + pr_dbg("STA_REG = 0x%2.2x\r\n", reg); + if (FR & reg) { + pr_dbg("SW-RST is OK!\r\n"); + } else { + pr_dbg("SW-RST is ERR!\r\n"); + goto end; + } + } +end: + pr_dbg("TO check sw-rst over.\r\n"); + pr_dbg("\r\n"); + pr_dbg("-----------------------------------\r\n"); + pr_dbg("TO buffer size negotiation protocol...\r\n"); + pr_dbg("Get which buf size CAM can support\r\n"); + aml_ci_io_write_by_spi(ci_dev, 0, COM_STA_REG, SR); + mdelay(1000); + while (1) { + + reg = aml_ci_io_read_by_spi(ci_dev, 0, COM_STA_REG); + if ((reg & DA) == DA) { + pr_dbg("Buffer negotiate size date avalible.\r\n"); + break; + } else { + /*pr_dbg("Buffer negotiate + size date NOT avalible\r\n");*/ + continue; + } + mdelay(100); + } + cnt = (aml_ci_io_read_by_spi(ci_dev, 0, SIZE_REG_L)) + + ((aml_ci_io_read_by_spi(ci_dev, 0, SIZE_REG_M)) * 256); + pr_dbg("Moudle have <%d> Bytes send to host.\r\n", cnt); + if (cnt != 2) { + pr_dbg("The Bytes will be tx is ERR!\r\n"); + return; + } + for (i = 0; i < cnt; i++) + buf[i] = aml_ci_io_read_by_spi(ci_dev, 0, DATA_REG); + + reg = aml_ci_io_read_by_spi(ci_dev, 0, COM_STA_REG); + if (RE == (RE & reg)) { + pr_dbg("(1)Read CAM buf size ERR!\r\n"); + return; + } + aml_ci_io_write_by_spi(ci_dev, 0, (COM_STA_REG), 0); + + mdelay(1000); + + while (count--) { + reg = aml_ci_io_read_by_spi(ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg)) { + pr_dbg("CAM is busy 2, waiting...\r\n"); + continue; + } else { + pr_dbg("CAM is OK 2.\r\n"); + break; + } + } + reg = aml_ci_io_read_by_spi(ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg)) { + pr_dbg("(2)Read CAM buf size ERR!-\r\n"); + return; + } + bsize = (buf[0] * 256) + buf[1]; + + pr_dbg("CAM can support buf size is: <%d>B\r\n", bsize); + + pr_dbg("Tell CAM which size buf is be used\r\n"); + reg = aml_ci_io_read_by_spi(ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg)) + pr_dbg("CAM is busy, waiting free\r\n"); + while (1) { + reg = aml_ci_io_read_by_spi(ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg)) { + pr_dbg("CAM is busy 3, waiting\r\n"); + continue; + } else { + pr_dbg("CAM is OK 3\r\n"); + break; + } + } + + bsize = bsize - 0; + BUF_SIZE = bsize; + pr_dbg("We will use this buf size: <%d>B\r\n", bsize); + aml_ci_io_write_by_spi(ci_dev, 0, COM_STA_REG, SW); + reg = aml_ci_io_read_by_spi(ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg)) + pr_dbg("CAM is busy, waiting\r\n"); + + while (1) { + reg = aml_ci_io_read_by_spi(ci_dev, 0, COM_STA_REG); + if (FR != (FR & reg)) { + pr_dbg("CAM is busy 4, waiting\r\n"); + continue; + } else { + pr_dbg("CAM is OK 4\r\n"); + break; + } + } + /*SHOULD CHECK DA!!!!!*/ + /*PLS ADD THIS CHECK CODE:*/ + pr_dbg("PRIOR to check CAM'S DA\r\n"); + reg = aml_ci_io_read_by_spi(ci_dev, 0, COM_STA_REG); + if ((reg & DA) == DA) { + pr_dbg("CAM have data send to HOST\r\n"); + return; + } + + + buf[0] = (unsigned char)((bsize >> 8) & 0xff); + buf[1] = (unsigned char)(bsize & 0xff); + + while (1) { + mdelay(10); + aml_ci_io_write_by_spi(ci_dev, + 0, COM_STA_REG, HC | SW); + mdelay(100); + reg = aml_ci_io_read_by_spi(ci_dev, + 0, COM_STA_REG); + if (FR != (FR & reg)) { + pr_dbg("CAM is busy 5, waiting\r\n"); + aml_ci_io_write_by_spi(ci_dev, + 0, COM_STA_REG, SW); + continue; + } else { + pr_dbg("CAM is OK 5\r\n"); + break; + } + } + pr_dbg("<2> Bytes send to CAM\r\n"); + aml_ci_io_write_by_spi(ci_dev, 0, SIZE_REG_M, 0); + aml_ci_io_write_by_spi(ci_dev, 0, SIZE_REG_L, 2); + for (i = 0; i < 2; i++) + aml_ci_io_write_by_spi(ci_dev, 0, DATA_REG, buf[i]); + + reg = aml_ci_io_read_by_spi(ci_dev, 0, COM_STA_REG); + if (WE == (WE & reg)) { + pr_dbg("Write CAM ERR!\r\n"); + return; + } else { + aml_ci_io_write_by_spi(ci_dev, 0, COM_STA_REG, SW); + mdelay(100); + aml_ci_io_write_by_spi(ci_dev, 0, COM_STA_REG, 0); + pr_dbg("Buffer size negotiation over!\r\n"); + pr_dbg("NOW, HOST can communicates with CAM\r\n"); + pr_dbg("NOW, TEST END\r\n"); + } +} + +/** +* Read a tuple from attribute memory. +* +* @param ca CA instance. +* @param slot Slot id. +* @param address Address to read from. Updated. +* @param tupleType Tuple id byte. Updated. +* @param tupleLength Tuple length. Updated. +* @param tuple Dest buffer for tuple (must be 256 bytes). Updated. +* +* @return 0 on success, nonzero on error. +*/ +static int dvb_ca_en50221_read_tuple( +int *address, int *tupleType, int *tupleLength, u8 *tuple) +{ + int i; + int _tupleType; + int _tupleLength; + int _address = *address; + + /* grab the next tuple length and type */ + _tupleType = aml_ci_mem_read_by_spi((struct aml_ci *) + g_spi_dev->priv, 0, _address); + if (_tupleType < 0) + return _tupleType; + if (_tupleType == 0xff) { + pr_dbg("END OF CHAIN TUPLE type:0x%x\n", _tupleType); + *address += 2; + *tupleType = _tupleType; + *tupleLength = 0; + return 0; + } + _tupleLength = aml_ci_mem_read_by_spi((struct aml_ci *) + g_spi_dev->priv, 0, _address + 2); + if (_tupleLength < 0) + return _tupleLength; + _address += 4; + + pr_dbg("TUPLE type:0x%x length:%i\n", _tupleType, _tupleLength); + + /* read in the whole tuple */ + for (i = 0; i < _tupleLength; i++) { + tuple[i] = aml_ci_mem_read_by_spi((struct aml_ci *) + g_spi_dev->priv, 0, _address + (i * 2)); + pr_dbg(" 0x%02x: 0x%02x %c\n", + i, tuple[i] & 0xff, + ((tuple[i] > 31) && (tuple[i] < 127)) ? tuple[i] : '.'); + } + _address += (_tupleLength * 2); + + /* success */ + *tupleType = _tupleType; + *tupleLength = _tupleLength; + *address = _address; + return 0; +} +static char *findstr(char *haystack, int hlen, char *needle, int nlen) +{ + int i; + + if (hlen < nlen) + return NULL; + + for (i = 0; i <= hlen - nlen; i++) { + if (!strncmp(haystack + i, needle, nlen)) + return haystack + i; + } + + return NULL; +} + +/** +* Parse attribute memory of a CAM module, extracting Config register, and checking +* it is a DVB CAM module. +* +* @param ca CA instance. +* @param slot Slot id. +* +* @return 0 on success, <0 on failure. +*/ +static int dvb_ca_en50221_parse_attributes(void) +{ + int address = 0; + int tupleLength; + int tupleType; + u8 tuple[257]; + char *dvb_str; + int rasz; + int status; + int got_cftableentry = 0; + int end_chain = 0; + int i; + u16 manfid = 0; + u16 devid = 0; + int config_base = 0; + int config_option; + + /* CISTPL_DEVICE_0A */ + status = dvb_ca_en50221_read_tuple(&address, + &tupleType, &tupleLength, tuple); + if (status < 0) { + pr_error("read status error\r\n"); + return status; + } + if (tupleType != 0x1D) { + pr_error("read tupleType error [0x%x]\r\n", tupleType); + return -EINVAL; + } + + + + /* CISTPL_DEVICE_0C */ + status = dvb_ca_en50221_read_tuple(&address, + &tupleType, &tupleLength, tuple); + if (status < 0) { + pr_error("read read cis error\r\n"); + return status; + } + if (tupleType != 0x1C) { + pr_error("read read cis type error\r\n"); + return -EINVAL; + } + + + + /* CISTPL_VERS_1 */ + status = dvb_ca_en50221_read_tuple(&address, + &tupleType, &tupleLength, tuple); + if (status < 0) { + pr_error("read read cis version error\r\n"); + return status; + } + if (tupleType != 0x15) { + pr_error("read read cis version type error\r\n"); + return -EINVAL; + } + + + + /* CISTPL_MANFID */ + status = dvb_ca_en50221_read_tuple(&address, &tupleType, + &tupleLength, tuple); + if (status < 0) { + pr_error("read read cis manfid error\r\n"); + return status; + } + if (tupleType != 0x20) { + pr_error("read read cis manfid type error\r\n"); + return -EINVAL; + } + if (tupleLength != 4) { + pr_error("read read cis manfid len error\r\n"); + return -EINVAL; + } + manfid = (tuple[1] << 8) | tuple[0]; + devid = (tuple[3] << 8) | tuple[2]; + + + + /* CISTPL_CONFIG */ + status = dvb_ca_en50221_read_tuple(&address, &tupleType, + &tupleLength, tuple); + if (status < 0) { + pr_error("read read cis config error\r\n"); + return status; + } + if (tupleType != 0x1A) { + pr_error("read read cis config type error\r\n"); + return -EINVAL; + } + if (tupleLength < 3) { + pr_error("read read cis config len error\r\n"); + return -EINVAL; + } + + /* extract the configbase */ + rasz = tuple[0] & 3; + if (tupleLength < (3 + rasz + 14)) { + pr_error("read extract the configbase error\r\n"); + return -EINVAL; + } + + for (i = 0; i < rasz + 1; i++) + config_base |= (tuple[2 + i] << (8 * i)); + + + /* check it contains the correct DVB string */ + dvb_str = findstr((char *)tuple, tupleLength, "DVB_CI_V", 8); + if (dvb_str == NULL) { + pr_error("find dvb str DVB_CI_V error\r\n"); + return -EINVAL; + } + if (tupleLength < ((dvb_str - (char *) tuple) + 12)) { + pr_error("find dvb str DVB_CI_V len error\r\n"); + return -EINVAL; + } + + /* is it a version we support? */ + if (strncmp(dvb_str + 8, "1.00", 4)) { + pr_error(" Unsupported DVB CAM module version %c%c%c%c\n", + dvb_str[8], dvb_str[9], dvb_str[10], dvb_str[11]); + return -EINVAL; + } + +/* process the CFTABLE_ENTRY tuples, and any after those */ +while ((!end_chain) && (address < 0x1000)) { + status = dvb_ca_en50221_read_tuple(&address, &tupleType, + &tupleLength, tuple); + if (status < 0) { + pr_error("process the CFTABLE_ENTRY tuples error\r\n"); + return status; + } + + switch (tupleType) { + case 0x1B: /* CISTPL_CFTABLE_ENTRY */ + if (tupleLength < (2 + 11 + 17)) + break; + + /* if we've already parsed one, just use it */ + if (got_cftableentry) + break; + + /* get the config option */ + config_option = tuple[0] & 0x3f; + + /* OK, check it contains the correct strings */ + if ((findstr((char *)tuple, + tupleLength, "DVB_HOST", 8) == NULL) || + (findstr((char *)tuple, + tupleLength, "DVB_CI_MODULE", 13) == NULL)) + break; + + + got_cftableentry = 1; + break; + + case 0x14: /* CISTPL_NO_LINK*/ + break; + + case 0xFF: /* CISTPL_END */ + end_chain = 1; + break; + + default: + /* Unknown tuple type - just skip + *this tuple and move to the next one + */ +pr_error("Skipping unknown tupletype:0x%x L:0x%x\n", + tupleType, tupleLength); + break; + } + } + + if ((address > 0x1000) || (!got_cftableentry)) { + pr_error("got_cftableentry :%d\r\n", got_cftableentry); + return -EINVAL; + } + + pr_error("----------ci cis ok-----\r\n"); + return 0; +} + +static ssize_t reset_show(struct class *class, +struct class_attribute *attr, char *buf) +{ + int ret; + ret = sprintf(buf, "echo 1 > %s\n\t", attr->attr.name); + return ret; +} + +static ssize_t reset_store(struct class *class, +struct class_attribute *attr, const char *buf, size_t size) +{ + int ret; + struct aml_ci *ci = (struct aml_ci *)g_spi_dev->priv; + ret = aml_ci_slot_reset(ci, 0); + return size; +} + +static ssize_t pwr_show(struct class *class, +struct class_attribute *attr, char *buf) +{ + int ret; + ret = sprintf(buf, "echo 1|0> %s\n\t", attr->attr.name); + return ret; +} + +static ssize_t pwr_store(struct class *class, +struct class_attribute *attr, const char *buf, size_t size) +{ + int ret = 0; + int enable = 0; + long value; + if (kstrtol(buf, 0, &value) == 0) + enable = (int)value; + ret = aml_gio_power(&g_spi_dev->pc, enable); + return size; +} +static ssize_t status_show(struct class *class, +struct class_attribute *attr, char *buf) +{ + int ret; + struct aml_ci *ci = (struct aml_ci *)g_spi_dev->priv; + ret = aml_ci_slot_status(ci, 0, 0); + ret = sprintf(buf, "%s: %d;\n\t", attr->attr.name, ret); + return ret; +} + +static ssize_t irq_show(struct class *class, +struct class_attribute *attr, char *buf) +{ + int ret; + ret = aml_ci_gio_get_irq(); + ret = sprintf(buf, "%s irq: %d\n\t", attr->attr.name, ret); + return ret; +} + +static ssize_t iotest_show(struct class *class, +struct class_attribute *attr, char *buf) +{ + int ret; + ret = sprintf(buf, "echo (r|w|f|c)(i|a) addr data > %s\n", + attr->attr.name); + return ret; +} + +static ssize_t iotest_store(struct class *class, +struct class_attribute *attr, const char *buf, size_t size) +{ + int n = 0; + int i = 0; + char *buf_orig, *ps, *token; + char *parm[3]; + unsigned int addr = 0, val = 0, retval = 0; + long value = 0; + struct aml_ci *ci = (struct aml_ci *)g_spi_dev->priv; + buf_orig = kstrdup(buf, GFP_KERNEL); + ps = buf_orig; + while (1) { + /*need set '\n' to ' \n'*/ + token = strsep(&ps, " "); + if (token == NULL) + break; + if (*token == '\0') + continue; + parm[n++] = token; + } + + if (!n || ((n > 0) && (strlen(parm[0]) != 2))) { + pr_err("invalid command n[%x]p[%x][%s]\n", n,(int)strlen(parm[0]),parm[0]); + kfree(buf_orig); + return size; + } + + if ((parm[0][0] == 'r')) { + if (n > 2) { + pr_err("read: invalid parameter\n"); + kfree(buf_orig); + return size; + } + if (kstrtol(parm[1], 0, &value) == 0) + addr = (int)value; + pr_err("%s 0x%x\n", parm[0], addr); + switch ((char)parm[0][1]) { + case 'i': + for (i = 0; i < 1000; i++) + retval = aml_ci_io_read_by_spi(ci, 0, addr); + break; + case 'a': + for (i = 0; i < 1000; i++) + retval = aml_ci_mem_read_by_spi(ci, 0, addr); + break; + default: + break; + } + pr_dbg("%s: 0x%x --> 0x%x\n", parm[0], addr, retval); + } else if ((parm[0][0] == 'w')) { + if (n != 3) { + pr_err("write: invalid parameter\n"); + kfree(buf_orig); + return size; + } + if (kstrtol(parm[1], 0, &value) == 0) + addr = (int)value; + if (kstrtol(parm[2], 0, &value) == 0) + val = (int)value; + + pr_err("%s 0x%x 0x%x", parm[0], addr, val); + /*switch ((char)parm[0][1]) { + case 'i': +retval = aml_ci_io_write_by_spi(ci, 0, addr, val); + break; + case 'a': +retval = aml_ci_mem_write_by_spi(ci, 0, addr, val); + break; + default: + break; + }*/ + pr_dbg("%s: 0x%x <-- 0x%x\n", parm[0], addr, retval); + } else if ((parm[0][0] == 'f')) { + pr_dbg("full test----\r\n"); + aml_spi_ca_full_test(ci); + } else if ((parm[0][0] == 'c')) { + pr_dbg("cis test----\r\n"); + aml_ci_full_test_by_spi(ci, 0, addr); + } else if ((parm[0][0] == 'p')) { + pr_dbg("cis dvb_ca_en50221_parse_attributes----\r\n"); + dvb_ca_en50221_parse_attributes(); + } + + kfree(buf_orig); + return size; +} +static CLASS_ATTR_RW(reset); +static CLASS_ATTR_RW(pwr); +static CLASS_ATTR_RO(irq); +static CLASS_ATTR_RO(status); +static CLASS_ATTR_RW(iotest); + +static struct attribute *aml_spi_class_attrs[] = { + &class_attr_reset.attr, + &class_attr_pwr.attr, + &class_attr_irq.attr, + &class_attr_status.attr, + &class_attr_iotest.attr, + NULL +}; + +ATTRIBUTE_GROUPS(aml_spi_class); + +static struct class aml_spi_class = { + .name = "aml_dvb_spi_test", + .class_groups = aml_spi_class_groups, +}; + +/**\brief aml_con_gpio_by_spi:control gpio by spi +* \param gpio: the value is from AM_CON_GPIO def +* \param level: 0: set low,1:set hight +* \return +* - 0:ok +* - -EINVAL : error +*/ +int aml_con_gpio_by_spi(int gpio, int level) +{ + u8 data = gpio; + u16 addres = level; + int value = 0; + struct aml_spi *spi_dev = g_spi_dev; + /*add by chl,need add time delay*/ + mdelay(10); + aml_init_send_buf(AM_CI_CMD_CONGPIO, data, addres); + value = aml_spi_io_api(spi_dev, sendbuf, SENDBUFLEN, AM_CI_CMD_CONGPIO); + return value; +} +EXPORT_SYMBOL(aml_con_gpio_by_spi); + +int aml_spi_mod_init(void) +{ + int ret; + pr_dbg("Amlogic DVB SPI Init\n"); + ret = class_register(&aml_spi_class); + return 0; +} +//EXPORT_SYMBOL(aml_spi_mod_init); +void aml_spi_mod_exit(void) +{ + pr_dbg("Amlogic DVB SPI Exit\n"); + class_unregister(&aml_spi_class); +} +EXPORT_SYMBOL(aml_spi_mod_exit); +#endif +#if 0 +module_init(aml_spi_mod_init); +module_exit(aml_spi_mod_exit); + +MODULE_LICENSE("GPL"); +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/aml_spi.h b/drivers/stream_input/parser/dvb_ci/aml_spi.h new file mode 100644 index 0000000..9c2dc1e --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/aml_spi.h
@@ -0,0 +1,108 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef __AML_SPI_H_ +#define __AML_SPI_H_ + +//#include <linux/amlogic/aml_gpio_consumer.h> +#include <linux/gpio/consumer.h> +#include "aml_pcmcia.h" +#include "aml_ci.h" +#include "dvb_ca_en50221.h" + +/* +aml spi dev +*/ +struct aml_spi { + spinlock_t spi_lock; + + /* add SPI DEV */ + struct spi_board_info *spi_bdinfo; + struct spi_device *spi; + struct platform_device *pdev; + struct device *dev; + + /* spi otherconfig */ + int cs_hold_delay; + int cs_clk_delay; + int write_check; + + /* add gpio pin */ + struct gpio_desc *reset_pin; + int reset_pin_value; + struct gpio_desc *cd_pin1; + int cd_pin1_value; + struct gpio_desc *cd_pin2; + int cd_pin2_value; + struct gpio_desc *pwr_pin; + int pwr_pin_value; + + /* cam and mcu irq */ + struct gpio_desc *irq_cam_pin; + int irq_cam_pin_value; + int irq; + struct aml_pcmcia pc; + void *priv; + /*for AML_DVB_IO_TYPE_SPI_T312 device*/ + struct gpio_desc *mcu_irq_pin; + int mcu_irq_pin_value; + /*device type*/ + int io_device_type; +}; + + +/* used to mcu */ +#define DATASTART 0xef +#define DATAEND 0xfe + +enum AM_SPI_RECIVERSTEP { + AM_SPI_STEP_INIT = 0, + AM_SPI_STEP_START1, + AM_SPI_STEP_START2, + AM_SPI_STEP_CMD, + AM_SPI_STEP_DATA, + AM_SPI_STEP_ADDR1, + AM_SPI_STEP_ADDR2, + AM_SPI_STEP_END1, + AM_SPI_STEP_END2 +}; + +enum AM_CON_GPIO +{ + AM_CONGPIO_SEL_LVDS = 0, + AM_CONGPIO_SCN_EN, + AM_CONGPIO_LD_EN2, + AM_CONGPIO_2D3D, + AM_CONGPIO_AMP_RST, +}; + +extern int dirspi_xfer(struct spi_device *spi, u8 *tx_buf, u8 *rx_buf, + int len); +extern int dirspi_write(struct spi_device *spi, u8 *buf, int len); +extern int dirspi_read(struct spi_device *spi, u8 *buf, int len); +extern void dirspi_start(struct spi_device *spi); +extern void dirspi_stop(struct spi_device *spi); +extern int dirspi_register_board_info(struct spi_board_info const *info, unsigned n); +extern void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot); +extern int aml_spi_init(struct platform_device *pdev, struct aml_ci *ci_dev); +extern int aml_spi_exit(struct aml_ci *ci_dev); +extern int aml_spi_mod_init(void); +extern void aml_spi_mod_exit(void); + +#endif /* __AML_SPI_H_ */
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax.c b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax.c new file mode 100644 index 0000000..a462d88 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax.c
@@ -0,0 +1,293 @@ + +/*************************************************************************** + * Copyright (c) 2014 Amlogic, Inc. All rights reserved. + * + * This source code is subject to the terms and conditions defined in the + * file 'LICENSE' which is part of this source code package. + * + * Description: + * +***************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/of.h> +#include "../aml_ci.h" +#include "aml_cimax.h" +#include "aml_cimax_spi.h" +#include "aml_cimax_usb.h" + +#define MODUDLE_NAME "aml_cimax" + +MODULE_PARM_DESC(cimax_debug, "enable verbose debug messages"); +static int aml_cimax_debug = 1; +module_param_named(cimax_debug, aml_cimax_debug, int, 0644); + +//static struct switch_dev slot_state = { +// .name = "ci_slot", +//}; + +#define pr_dbg(fmt...)\ + do {\ + if (aml_cimax_debug)\ + pr_info("AML_CIMAX: " fmt);\ + } while (0) +#define pr_error(fmt...) pr_err("AML_CIMAX: " fmt) + +static int aml_cimax_slot_reset(struct aml_ci *ci, int slot) +{ + int ret = 0; + struct aml_cimax *cimax = ci->data; + pr_dbg("cimax: slot(%d) reset\n", slot); + if (cimax->ops.slot_reset) + ret = cimax->ops.slot_reset(cimax, slot); + return ret; +} + +static int aml_cimax_slot_shutdown(struct aml_ci *ci, int slot) +{ + pr_dbg("slot(%d) shutdown\n", slot); + return 0; +} + +static int aml_cimax_slot_ts_enable(struct aml_ci *ci, int slot) +{ + pr_dbg("slot(%d) ts control\n", slot); + return 0; +} + +static int aml_cimax_slot_status(struct aml_ci *ci, int slot, int open) +{ + int ret = 0; + struct aml_cimax *cimax = ci->data; + + /*pr_dbg("cimax: slot(%d) poll\n", slot);*/ + if (cimax->ops.slot_status) + ret = cimax->ops.slot_status(cimax, slot); + return ret; +} + +#define DEF_FUNC_WRAPPER3(_pre, _fn, _S, _P1, _P2, _P3) \ +static int _pre##_fn(_S s, _P1 p1, _P2 p2, _P3 p3)\ +{\ + struct aml_cimax *cimax = s->data;\ + /*pr_dbg("%s\n", #_fn);*/\ + if (cimax->ops._fn)\ + return cimax->ops._fn(cimax, p1, p2, p3);\ + return 0;\ +} + +/*DEF_FUNC_WRAPPER3(aml_cimax_, read_reg, struct aml_ci*, int, u8*, int)*/ +/*DEF_FUNC_WRAPPER3(aml_cimax_, write_reg, struct aml_ci*, int, u8*, int)*/ +DEF_FUNC_WRAPPER3(aml_cimax_, read_cis, struct aml_ci*, int, u8*, int) +DEF_FUNC_WRAPPER3(aml_cimax_, read_lpdu, struct aml_ci*, int, u8*, int) +DEF_FUNC_WRAPPER3(aml_cimax_, write_lpdu, struct aml_ci*, int, u8*, int) + +static int aml_cimax_write_cor(struct aml_ci *ci, int slot, int addr, u8 *buf) +{ + struct aml_cimax *cimax = ci->data; + pr_dbg("write_cor\n"); + if (cimax->ops.write_cor) + return cimax->ops.write_cor(cimax, slot, addr, buf); + return 0; +} + +static int aml_cimax_negotiate(struct aml_ci *ci, int slot, int size) +{ + struct aml_cimax *cimax = ci->data; + pr_dbg("negotiate\n"); + if (cimax->ops.negotiate) + return cimax->ops.negotiate(cimax, slot, size); + return 0; +} + +static int aml_cimax_read_cam_status(struct aml_ci *ci, int slot) +{ + struct aml_cimax *cimax = ci->data; + if (cimax->ops.read_cam_status) + return cimax->ops.read_cam_status(cimax, slot); + return 0; +} + +static int aml_cimax_cam_reset(struct aml_ci *ci, int slot) +{ + struct aml_cimax *cimax = ci->data; + if (cimax->ops.cam_reset) + return cimax->ops.cam_reset(cimax, slot); + return 0; +} + +static int aml_cimax_get_capbility(struct aml_ci *ci, int slot) +{ + return 0; +} + +int aml_cimax_camchanged(struct aml_cimax *cimax, int slot, int plugin) +{ + struct aml_ci *ci = cimax->ci; + if (plugin) { + dvb_ca_en50221_cimax_camchange_irq(&ci->en50221_cimax, + slot, DVB_CA_EN50221_CAMCHANGE_INSERTED); + } else { + dvb_ca_en50221_cimax_camchange_irq(&ci->en50221_cimax, + slot, DVB_CA_EN50221_CAMCHANGE_REMOVED); + } + return 0; +} + +static int aml_cimax_start(struct aml_cimax *cimax) +{ + int ret = 0; + if (cimax->ops.start) + ret = cimax->ops.start(cimax); + return ret; +} + +static int aml_cimax_stop(struct aml_cimax *cimax) +{ + int ret = 0; + if (cimax->ops.stop) + ret = cimax->ops.stop(cimax); + return ret; +} + +static int aml_cimax_get_config_from_dts(struct aml_cimax *cimax) +{ + struct device_node *child = NULL; + struct platform_device *pdev = cimax->pdev; + struct device_node *np = pdev->dev.of_node; + unsigned int val; + int ret = 0; + pr_dbg("get cimax dts\n"); + + child = of_get_child_by_name(np, "cimax"); + if (child == NULL) { + pr_error("failed to get cimax\n"); + return -1; + } + ret = of_property_read_u32(child, "io_type", &val); + if (ret) + cimax->io_type = IO_TYPE_SPI; + else + cimax->io_type = val; + + return 0; +} + +int aml_cimax_init(struct platform_device *pdev, struct aml_ci *ci) +{ + struct aml_cimax *cimax = NULL; + int ret = 0; + + cimax = kzalloc(sizeof(struct aml_cimax), GFP_KERNEL); + if (!cimax) { + pr_error("Out of memory!, exiting ..\n"); + return -ENOMEM; + } + cimax->pdev = pdev; + cimax->ci = ci; + + aml_cimax_get_config_from_dts(cimax); + + if (cimax->io_type == IO_TYPE_SPI) { + //ret = aml_cimax_spi_init(pdev, cimax); + } + else { + ret = aml_cimax_usb_init(pdev, cimax); + } + + if (ret != 0) { + kfree(cimax); + cimax = NULL; + return -EIO; + } + + ret = aml_cimax_start(cimax); + if (ret != 0) + return ret; + + ci->data = cimax; + + ci->ci_read_cis = aml_cimax_read_cis; + ci->ci_write_cor = aml_cimax_write_cor; + ci->ci_negotiate = aml_cimax_negotiate; + ci->ci_read_lpdu = aml_cimax_read_lpdu; + ci->ci_write_lpdu = aml_cimax_write_lpdu; + ci->ci_read_cam_status = aml_cimax_read_cam_status; + ci->ci_cam_reset = aml_cimax_cam_reset; + ci->ci_get_capbility = aml_cimax_get_capbility; + ci->ci_slot_reset = aml_cimax_slot_reset; + ci->ci_slot_shutdown = aml_cimax_slot_shutdown; + ci->ci_slot_ts_enable = aml_cimax_slot_ts_enable; + ci->ci_poll_slot_status = aml_cimax_slot_status; + + return 0; +} +EXPORT_SYMBOL(aml_cimax_init); + +int aml_cimax_exit(struct aml_ci *ci) +{ + struct aml_cimax *cimax = ci->data; + + ci->ci_read_cis = NULL; + ci->ci_write_cor = NULL; + ci->ci_negotiate = NULL; + ci->ci_read_lpdu = NULL; + ci->ci_write_lpdu = NULL; + ci->ci_read_cam_status = NULL; + ci->ci_cam_reset = NULL; + ci->ci_get_capbility = NULL; + ci->ci_slot_reset = NULL; + ci->ci_slot_shutdown = NULL; + ci->ci_slot_ts_enable = NULL; + ci->ci_poll_slot_status = NULL; + + aml_cimax_stop(cimax); + + if (cimax->io_type == IO_TYPE_SPI) { + //aml_cimax_spi_exit(cimax); + } + else { + aml_cimax_usb_exit(cimax); + } + kfree(cimax); + ci->data = NULL; + + return 0; +} +EXPORT_SYMBOL(aml_cimax_exit); + +int aml_cimax_slot_state_changed(struct aml_cimax *cimax, int slot, int state) +{ + //if (slot == 0) + // switch_set_state(&slot_state, state); + return 0; +} +EXPORT_SYMBOL(aml_cimax_slot_state_changed); +#if 0 +static int __init aml_cimax_mod_init(void) +{ + pr_dbg("Amlogic DVB CIMAX Init\n"); + //switch_dev_register(&slot_state); + //switch_set_state(&slot_state, 0); + return 0; +} + +static void __exit aml_cimax_mod_exit(void) +{ + pr_dbg("Amlogic DVB CIMAX Exit\n"); + //switch_dev_unregister(&slot_state); +} + +module_init(aml_cimax_mod_init); +module_exit(aml_cimax_mod_exit); + +MODULE_LICENSE("GPL"); +#endif +
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax.h b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax.h new file mode 100644 index 0000000..17c62fa --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax.h
@@ -0,0 +1,61 @@ +/*************************************************************************** + * Copyright (c) 2014 Amlogic, Inc. All rights reserved. + * + * This source code is subject to the terms and conditions defined in the + * file 'LICENSE' which is part of this source code package. + * + * Description: + * +***************************************************************************/ + +#ifndef __AML_CIMAX_H_ +#define __AML_CIMAX_H_ + +#include "../aml_ci.h" +#include "dvb_ca_en50221_cimax.h" + +struct aml_cimax; + +struct aml_cimax_ops { + int (*read_cis)(struct aml_cimax *cimax, int slot, u8 *buf, int size); + int (*write_cor)(struct aml_cimax *cimax, + int slot, int address, u8 *buf); + int (*negotiate)(struct aml_cimax *cimax, int slot, int size); + int (*read_lpdu)(struct aml_cimax *cimax, int slot, u8 *buf, int size); + int (*write_lpdu)(struct aml_cimax *cimax, int slot, u8 *buf, int size); + int (*read_cam_status)(struct aml_cimax *cimax, int slot); + int (*cam_reset)(struct aml_cimax *cimax, int slot); + int (*get_capblility)(struct aml_cimax *cimax, int slot); + + int (*slot_reset)(struct aml_cimax *cimax, int slot); + int (*slot_shutdown)(struct aml_cimax *cimax, int slot); + int (*slot_ts_enable)(struct aml_cimax *cimax, int slot); + int (*slot_status)(struct aml_cimax *cimax, int slot); + + /*load fw etc.*/ + int (*start)(struct aml_cimax *cimax); + int (*stop)(struct aml_cimax *cimax); + + /*cimax reg*/ + int (*read_reg)(struct aml_cimax *cimax, int addr, u8 *buf, int size); + int (*write_reg)(struct aml_cimax *cimax, int addr, u8 *buf, int size); +}; + +struct aml_cimax { + struct platform_device *pdev; + struct device *dev; + struct aml_ci *ci; + + int io_type; +#define IO_TYPE_SPI 0 +#define IO_TYPE_USB 1 + struct aml_cimax_ops ops; + void *priv; +}; + +int aml_cimax_init(struct platform_device *pdev, struct aml_ci *ci_dev); +int aml_cimax_exit(struct aml_ci *ci_dev); + +int aml_cimax_camchanged(struct aml_cimax *cimax, int slot, int plugin); +int aml_cimax_slot_state_changed(struct aml_cimax *cimax, int slot, int state); +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_spi.c b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_spi.c new file mode 100644 index 0000000..a4aadda --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_spi.c
@@ -0,0 +1,2139 @@ +/*************************************************************************** + * Copyright (c) 2014 Amlogic, Inc. All rights reserved. + * + * This source code is subject to the terms and conditions defined in the + * file 'LICENSE' which is part of this source code package. + * + * Description: + * +***************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/amlogic/aml_gpio_consumer.h> +#include <linux/gpio/consumer.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/amlogic/sd.h> +#include <linux/of_irq.h> +#include <linux/irq.h> +#include <linux/mutex.h> +#include <linux/firmware.h> +#include <linux/delay.h> + +#include "aml_cimax.h" + +#define MOD_NAME "aml_cimax_spi" + +#define pr_dbg(fmt...)\ + do {\ + if (cimax_spi_debug)\ + pr_info("cimax_spi: "fmt);\ + } while (0) +#define pr_error(fmt...) pr_err("AML_CIMAX_SPI: " fmt) + +#define BUFFIN_CFG 0x0000 +#define BUFFIN_ADDR_LSB 0x0001 +#define BUFFIN_ADDR_MSB 0x0002 +#define BUFFIN_DATA 0x0003 +#define BUFFOUT_CFG 0x0004 +#define BUFFOUT_ADDR_LSB 0x0005 +#define BUFFOUT_ADDR_MSB 0x0006 +#define BUFFOUT_DATA 0x0007 +#define BOOT_Key 0x0008 +#define BOOT_Status 0x0009 +#define BOOT_Test 0x000A +#define usb2_0_irq_mask 0x0010 +#define usb2_0_status 0x0011 +#define usb2_0_rx 0x0012 +#define usb2_0_tx 0x0013 +#define SPI_Slave_Ctrl 0x0018 +#define SPI_Slave_Status 0x0019 +#define SPI_Slave_Rx 0x001A +#define SPI_Slave_Tx 0x001B +#define SPI_Slave_Mask 0x001C +#define UCSG_Ctrl 0x0020 +#define UCSG_Status 0x0021 +#define UCSG_RxData 0x0022 +#define UCSG_TxData 0x0023 +#define PCtrl_Ctrl 0x0028 +#define PCtrl_Status 0x0029 +#define PCtrl_NbByte_LSB 0x002A +#define PCtrl_NbByte_MSB 0x002B +#define SPI_Master_Ctl 0x0030 +#define SPI_Master_NCS 0x0031 +#define SPI_Master_Status 0x0032 +#define SPI_Master_TxBuf 0x0033 +#define SPI_Master_RxBuf 0x0034 +#define BISTRAM_Ctl 0x0038 +#define BISTRAM_Bank 0x0039 +#define BISTRAM_Pat 0x003A +#define BISTRAM_SM 0x003B +#define BISTRAM_AddrLSB 0x003C +#define BISTROM_Config 0x0040 +#define BISTROM_SignatureLSB 0x0041 +#define BISTROM_SignatureMSB 0x0042 +#define BISTROM_StartAddrLSB 0x0043 +#define BISTROM_StartAddrMSB 0x0043 +#define BISTROM_StopAddrLSB 0x0043 +#define BISTROM_StopAddrMSB 0x0043 +#define CkMan_Config 0x0048 +#define CkMan_Select 0x0049 +#define CkMan_Test 0x004A +#define Revision_Number 0x004B +#define ResMan_Config 0x0050 +#define ResMan_Status 0x0051 +#define ResMan_WD 0x0052 +#define ResMan_WD_MSB 0x0053 +#define CPU_Test 0x0060 +#define IrqMan_Config0 0x0068 +#define IrqMan_Config1 0x0069 +#define IrqMan_Irq0 0x006A +#define IrqMan_NMI 0x006B +#define IrqMan_SleepKey 0x006C +#define Tim_Config 0x0070 +#define Tim_Value_LSB 0x0071 +#define Tim_Value_MSB 0x0072 +#define Tim_Comp_LSB 0x0073 +#define Tim_Comp_MSB 0x0074 +#define TI_Config 0x0076 +#define TI_Data 0x0077 +#define TI_Reg0 0x0078 +#define TI_Reg1 0x0079 +#define TI_Reg2 0x007A +#define TI_Reg3 0x007B +#define TI_Reg4 0x007C +#define TI_ROM1 0x007D +#define TI_ROM2 0x007E +#define TI_ROM3 0x007F +#define DVBCI_START_ADDR 0x0100 +#define DVBCI_END_ADDR 0x017F +#define DATA 0x0180 +/*#define CTRL 0x0181*/ +#define QB_HOST 0x0182 +#define LEN_HOST_LSB 0x0183 +#define LEN_HOST_MSB 0x0184 +#define FIFO_TX_TH_LSB 0x0185 +#define FIFO_TX_TH_MSB 0x0186 +#define FIFO_TX_D_NB_LSB 0x0187 +#define FIFO_TX_D_NB_MSB 0x0188 +#define QB_MOD_CURR 0x0189 +#define LEN_MOD_CURR_LSB 0x018A +#define LEN_MOD_CURR_MSB 0x018B +#define QB_MOD 0x018C +#define LEN_MOD_LSB 0x018D +#define LEN_MOD_MSB 0x018E +#define FIFO_RX_TH_LSB 0x018F +#define FIFO_RX_TH_MSB 0x0190 +#define FIFO_RX_D_NB_LSB 0x0191 +#define FIFO_RX_D_NB_MSB 0x0192 +#define IT_STATUS_0 0x0193 +#define IT_STATUS_1 0x0194 +#define IT_MASK_0 0x0195 +#define IT_MASK_1 0x0196 +#define IT_HOST_PIN_CFG 0x0200 +#define CFG_0 0x0201 +#define CFG_1 0x0202 +#define CFG_2 0x0203 +#define IT_HOST 0x0204 +#define MOD_IT_STATUS 0x0205 +#define MOD_IT_MASK 0x0206 +#define MOD_CTRL_A 0x0207 +#define MOD_CTRL_B 0x0208 +#define DEST_SEL 0x0209 +#define CAM_MSB_ADD 0x020A +#define GPIO0_DIR 0x020B +#define GPIO0_DATA_IN 0x020C +#define GPIO0_DATA_OUT 0x020D +#define GPIO0_STATUS 0x020E +#define GPIO0_IT_MASK 0x020F +#define GPIO0_DFT 0x0210 +#define GPIO0_MASK_DATA 0x0211 +#define GPIO1_DIR 0x0212 +#define GPIO1_DATA_IN 0x0213 +#define GPIO1_DATA_OUT 0x0214 +#define GPIO1_STATUS 0x0215 +#define GPIO1_IT_MASK 0x0216 +#define MEM_ACC_TIME_A 0x0217 +#define MEM_ACC_TIME_B 0x0218 +#define IO_ACC_TIME_A 0x0219 +#define IO_ACC_TIME_B 0x021A +#define EXT_CH_ACC_TIME_A 0x021B +#define EXT_CH_ACC_TIME_B 0x021C +#define PAR_IF_0 0x021D +#define PAR_IF_1 0x021E +#define PAR_IF_CTRL 0x021F +#define PCK_LENGTH 0x0220 +#define USB2TS_CTRL 0x0221 +#define USB2TS0_RDL 0x0222 +#define USB2TS1_RDL 0x0223 +#define TS2USB_CTRL 0x0224 +#define TSOUT_PAR_CTRL 0x0225 +#define TSOUT_PAR_CLK_SEL 0x0226 +#define S2P_CH0_CTRL 0x0227 +#define S2P_CH1_CTRL 0x0228 +#define P2S_CH0_CTRL 0x0229 +#define P2S_CH1_CTRL 0x022A +#define TS_IT_STATUS 0x022B +#define TS_IT_MASK 0x022C +#define IN_SEL 0x022D +#define OUT_SEL 0x022E +#define ROUTER_CAM_CH 0x022F +#define ROUTER_CAM_MOD 0x0230 +#define FIFO_CTRL 0x0231 +#define FIFO1_2_STATUS 0x0232 +#define FIFO3_4_STATUS 0x0233 +#define GAP_REMOVER_CH0_CTRL 0x0234 +#define GAP_REMOVER_CH1_CTRL 0x0235 +#define SYNC_RTV_CTRL 0x0236 +#define SYNC_RTV_CH0_SYNC_NB 0x0237 +#define SYNC_RTV_CH0_PATTERN 0x0238 +#define SYNC_RTV_CH1_SYNC_NB 0x0239 +#define SYNC_RTV_CH1_PATTERN 0x023A +#define SYNC_RTV_OFFSET_PATT 0x023B +#define CTRL_FILTER 0x023D +#define PID_EN_FILTER_CH0 0x023E +#define PID_EN_FILTER_CH1 0x023F +#define PID_LSB_FILTER_CH0_0 0x0240 +#define PID_MSB_FILTER_CH0_0 0x0241 +#define PID_LSB_FILTER_CH0_1 0x0242 +#define PID_MSB_FILTER_CH0_1 0x0243 +#define PID_LSB_FILTER_CH0_2 0x0244 +#define PID_MSB_FILTER_CH0_2 0x0245 +#define PID_LSB_FILTER_CH0_3 0x0246 +#define PID_MSB_FILTER_CH0_3 0x0247 +#define PID_LSB_FILTER_CH0_4 0x0248 +#define PID_MSB_FILTER_CH0_4 0x0249 +#define PID_LSB_FILTER_CH0_5 0x024A +#define PID_MSB_FILTER_CH0_5 0x024B +#define PID_LSB_FILTER_CH0_6 0x024C +#define PID_MSB_FILTER_CH0_6 0x024D +#define PID_LSB_FILTER_CH0_7 0x024E +#define PID_MSB_FILTER_CH0_7 0x024F +#define PID_LSB_FILTER_CH1_0 0x0260 +#define PID_MSB_FILTER_CH1_0 0x0261 +#define PID_LSB_FILTER_CH1_1 0x0262 +#define PID_MSB_FILTER_CH1_1 0x0263 +#define PID_LSB_FILTER_CH1_2 0x0264 +#define PID_MSB_FILTER_CH1_2 0x0265 +#define PID_LSB_FILTER_CH1_3 0x0266 +#define PID_MSB_FILTER_CH1_3 0x0267 +#define PID_LSB_FILTER_CH1_4 0x0268 +#define PID_MSB_FILTER_CH1_4 0x0269 +#define PID_LSB_FILTER_CH1_5 0x026A +#define PID_MSB_FILTER_CH1_5 0x026B +#define PID_LSB_FILTER_CH1_6 0x026C +#define PID_MSB_FILTER_CH1_6 0x026D +#define PID_LSB_FILTER_CH1_7 0x026E +#define PID_MSB_FILTER_CH1_7 0x026F +#define PID_OLD_LSB_REMAPPER_0 0x0280 +#define PID_OLD_MSB_REMAPPER_0 0x0281 +#define PID_OLD_LSB_REMAPPER_1 0x0282 +#define PID_OLD_MSB_REMAPPER_1 0x0283 +#define PID_OLD_LSB_REMAPPER_2 0x0284 +#define PID_OLD_MSB_REMAPPER_2 0x0285 +#define PID_OLD_LSB_REMAPPER_3 0x0286 +#define PID_OLD_MSB_REMAPPER_3 0x0287 +#define PID_OLD_LSB_REMAPPER_4 0x0288 +#define PID_OLD_MSB_REMAPPER_4 0x0289 +#define PID_OLD_LSB_REMAPPER_5 0x028A +#define PID_OLD_MSB_REMAPPER_5 0x028B +#define PID_OLD_LSB_REMAPPER_6 0x028C +#define PID_OLD_MSB_REMAPPER_6 0x028D +#define PID_OLD_LSB_REMAPPER_7 0x028E +#define PID_OLD_MSB_REMAPPER_7 0x028F +#define PID_NEW_LSB_REMAPPER_0 0x02A0 +#define PID_NEW_MSB_REMAPPER_0 0x02A1 +#define PID_NEW_LSB_REMAPPER_1 0x02A2 +#define PID_NEW_MSB_REMAPPER_1 0x02A3 +#define PID_NEW_LSB_REMAPPER_2 0x02A4 +#define PID_NEW_MSB_REMAPPER_2 0x02A5 +#define PID_NEW_LSB_REMAPPER_3 0x02A6 +#define PID_NEW_MSB_REMAPPER_3 0x02A7 +#define PID_NEW_LSB_REMAPPER_4 0x02A8 +#define PID_NEW_MSB_REMAPPER_4 0x02A9 +#define PID_NEW_LSB_REMAPPER_5 0x02AA +#define PID_NEW_MSB_REMAPPER_5 0x02AB +#define PID_NEW_LSB_REMAPPER_6 0x02AC +#define PID_NEW_MSB_REMAPPER_6 0x02AD +#define PID_NEW_LSB_REMAPPER_7 0x02AE +#define PID_NEW_MSB_REMAPPER_7 0x02AF +#define MERGER_DIV_MICLK 0x02C0 +#define PID_AND_SYNC_REMAPPER_CTRL 0x02C1 +#define PID_EN_REMAPPER 0x02C2 +#define SYNC_SYMBOL 0x02C3 +#define PID_AND_SYNC_REMAPPER_INV_CTRL 0x02C4 +#define BITRATE_CH0_LSB 0x02C5 +#define BITRATE_CH0_MSB 0x02C6 +#define BITRATE_CH1_LSB 0x02C7 +#define BITRATE_CH1_MSB 0x02C8 +#define STATUS_CLK_SWITCH_0 0x02C9 +#define STATUS_CLK_SWITCH_1 0x02CA +#define RESET_CLK_SWITCH_0 0x02CB +#define RESET_CLK_SWITCH_1 0x02CC +#define PAD_DRVSTR_CTRL 0x02CD +#define PAD_PUPD_CTRL 0x02CE +#define PRE_HEADER_ADDER_CH0_0 0x02D0 +#define PRE_HEADER_ADDER_CH0_1 0x02D1 +#define PRE_HEADER_ADDER_CH0_2 0x02D2 +#define PRE_HEADER_ADDER_CH0_3 0x02D3 +#define PRE_HEADER_ADDER_CH0_4 0x02D4 +#define PRE_HEADER_ADDER_CH0_5 0x02D5 +#define PRE_HEADER_ADDER_CH0_6 0x02D6 +#define PRE_HEADER_ADDER_CH0_7 0x02D7 +#define PRE_HEADER_ADDER_CH0_8 0x02D8 +#define PRE_HEADER_ADDER_CH0_9 0x02D9 +#define PRE_HEADER_ADDER_CH0_10 0x02DA +#define PRE_HEADER_ADDER_CH0_11 0x02DB +#define PRE_HEADER_ADDER_CH1_0 0x02E0 +#define PRE_HEADER_ADDER_CH1_1 0x02E1 +#define PRE_HEADER_ADDER_CH1_2 0x02E2 +#define PRE_HEADER_ADDER_CH1_3 0x02E3 +#define PRE_HEADER_ADDER_CH1_4 0x02E4 +#define PRE_HEADER_ADDER_CH1_5 0x02E5 +#define PRE_HEADER_ADDER_CH1_6 0x02E6 +#define PRE_HEADER_ADDER_CH1_7 0x02E7 +#define PRE_HEADER_ADDER_CH1_8 0x02E8 +#define PRE_HEADER_ADDER_CH1_9 0x02E9 +#define PRE_HEADER_ADDER_CH1_10 0x02EA +#define PRE_HEADER_ADDER_CH1_11 0x02EB +#define PRE_HEADER_ADDER_CTRL 0x02EC +#define PRE_HEADER_ADDER_LEN 0x02ED +#define PRE_HEADER_REMOVER_CTRL 0x02EE +#define FSM_DVB 0x02F0 +#define TS2USB_FSM_DEBUG 0x02F2 +#define TSOUT_PAR_FSM_DEBUG 0x02F3 +#define GAP_REMOVER_FSM_DEBUG 0x02F4 +#define PID_AND_SYNC_REMAPPER_FSM_DEBUG 0x02F5 +#define PRE_HEADER_ADDER_FSM_DEBUG 0x02F6 +#define SYNC_RTV_FSM_DEBUG 0x02F7 +#define CHECK_PHY_CLK 0x0E00 +#define USB_CTRL1 0x0E01 +#define USB_ISO2_out 0x0800 +#define USB_ISO1_out 0x1000 +#define USB_Interrupt_out 0x1E00 +#define USB_Bulk_in 0x1F00 +#define CC2_Buffer_out 0x2000 +#define USB_EP0 0x30C0 +#define CC2_Buffer_in 0x4000 +#define USB_ISO2_in 0x5800 +#define USB_ISO1_in 0x6000 +#define nmb_vector_address_lsb 0xFFFA +#define nmb_vector_address_msb 0xFFFB +#define reset_vector_address_lsb 0xFFFC +#define reset_vector_address_msb 0xFFFD +#define irb_vector_address_lsb 0xFFFE +#define irb_vector_address_msb 0xFFFF + + +#define CIMAX_REG_HDR_SIZE 4 +#define CIMAX_REG_PLD_SIZE 255 +#define CIMAX_CAM_HDR_SIZE 4 +#define CIMAX_CAM_PLD_SIZE 65535 + +#define DEF_LOCK(_l_) struct mutex _l_ + +struct cimax_spi { + struct platform_device *pdev; + struct spi_device *dev; + + struct aml_cimax *cimax; + + u8 buf[CIMAX_REG_HDR_SIZE + CIMAX_CAM_HDR_SIZE + CIMAX_CAM_PLD_SIZE]; + + int cam_inserted[2]; + int cam_data_ready[2]; + + int poll_mode; +#define STOP_MODE 0 +#define POLL_MODE 1 +#define INT_MODE 2 + + int irq; + int irq_io; + int rst_io; + + struct workqueue_struct *workq; + struct delayed_work work; + int work_cnt; + + DEF_LOCK(lock); +#define lock_init(_spi) mutex_init(&(_spi)->lock) +#define lock_lock(_spi) do {\ + int err = mutex_lock_interruptible(&(_spi)->lock);\ + if (err)\ + return err;\ +} while (0) +#define lock_unlock(_spi) mutex_unlock(&(_spi)->lock) + + u8 *cis; +#define CIS_MAX 512 +}; + +static struct cimax_spi *g_spi; + +MODULE_PARM_DESC(debug, "enable verbose debug messages"); +static int cimax_spi_debug = 1; +module_param_named(debug, cimax_spi_debug, int, 0644); + +MODULE_PARM_DESC(poll_interval, "interval for spi poll"); +static int spi_poll_interval = 100; +module_param_named(poll_interval, spi_poll_interval, int, 0644); + +MODULE_PARM_DESC(poll_mode, "set cimax poll mode, need reset"); +static int cimax_poll_mode = 1; +module_param_named(poll_mode, cimax_poll_mode, int, 0644); + +MODULE_PARM_DESC(cam_irq_mode, "set cam irq mode, need reset"); +static int cam_irq_mode; +module_param_named(cam_irq_mode, cam_irq_mode, int, 0644); + +#define CIMAX_REG_READ 0xff +#define CIMAX_REG_READ_OK 0x4c +#define CIMAX_REG_WRITE 0x7f +#define CIMAX_REG_WRITE_OK 0x4d +#define CIMAX_REG_INIT 0x00 +#define CIMAX_REG_INIT_OK 0x4b +#define CIMAX_REG_CMD_ERROR 0x51 + +#define CIMAX_CAM_RESET 0x01 +#define CIMAX_CAM_RESET_OK 0x40 +#define CIMAX_CAM_CIS 0x02 +#define CIMAX_CAM_CIS_OK 0x41 +#define CIMAX_CAM_COR 0x03 +#define CIMAX_CAM_COR_OK 0x42 +#define CIMAX_CAM_NEG 0x04 +#define CIMAX_CAM_NEG_OK 0x43 +#define CIMAX_CAM_WLPDU 0x05 +#define CIMAX_CAM_WLPDU_OK 0x44 +#define CIMAX_CAM_RLPDU 0x06 +#define CIMAX_CAM_RLPDU_OK 0x46 +#define CIMAX_CAM_EVT 0x0d +#define CIMAX_CAM_DET_OK 0x45 +#define CIMAX_CAM_NOCAM 0x49 +#define CIMAX_CAM_ERROR 0x4a +#define CIMAX_CAM_NOEVT 0x55 +#define CIMAX_CAM_DATA_READY 0x4e +#define CIMAX_CAM_WBUSY 0x54 +#define CIMAX_CAM_PENDING 0x56 +#define CIMAX_CAM_REGSTAT 0x0e +#define CIMAX_CAM_REGSTAT_OK 0x57 + + +#define CIMAX_CAM_PKT_CNT_VAL 1 + +#define CIMAX_SLOT_A 0 +#define CIMAX_SLOT_B 1 + +#define CIMAX_CMD_RESP_MASK 0x7f + +#define cimax_to_spi(_c) ((struct cimax_spi *)((_c)->priv)) +#define dev_to_spi(_d) ((struct cimax_spi *)spi_get_drvdata(_d)) + +#define byte_to_u16(_b1, _b2) (((_b1)<<8) | (_b2)) + +#define hdr_cmd_resp(_s) ((_s)->buf[0] & CIMAX_CMD_RESP_MASK) + +#define reg_hdr(_s) ((_s)->buf) +#define reg_addr(_s) byte_to_u16((_s)->buf[1], (_s)->buf[2]) +#define reg_hdr_dat_size(_s) ((_s)->buf[3]) +#define reg_dat(_s) (&((_s)->buf[CIMAX_REG_HDR_SIZE])) + +#define cam_hdr(_s) ((_s)->buf) +#define cam_hdr_slot(_s) (((_s)->buf[0] & 0x80) ? 1 : 0) +#define cam_hdr_pkt_cnt(_s) ((_s)->buf[1]) +#define cam_hdr_dat_size(_s) byte_to_u16((_s)->buf[2], (_s)->buf[3]) +#define cam_dat(_s) (&((_s)->buf[CIMAX_CAM_HDR_SIZE])) + +#define REG_TIMEOUT 500 +#define CAM_TIMEOUT 5000 + +#define USE_INT_PIO + +static int aml_cimax_spi_mod_init(void); +static void aml_cimax_spi_mod_exit(void); + +static void dump(char *title, u8 *buf, int size) +{ + int i; + pr_info("%s\n", title); + for (i = 0; i < size; i++) { + if (!(i & 0xf)) + pr_info("\n\t"); + pr_info("%02x ", *(buf+i)); + } + pr_info("\n"); +} + +static void perr(char *err, struct cimax_spi *spi) +{ + pr_error("error: %s\n", err); + dump("dump:", spi->buf, 16); +} + +static inline unsigned long get_jiffies(void) +{ + return (unsigned long)(sched_clock()/10000000); +} + +static inline void set_spi_cam_ready(struct cimax_spi *spi, int slot) +{ + if (spi->cam_inserted[slot] == 1) { + spi->cam_inserted[slot] = 2; + aml_cimax_slot_state_changed(spi->cimax, + slot, spi->cam_inserted[slot]); + } +} + + +static int cimax_spi_get_resp(struct cimax_spi *spi, int timeout) +{ + struct spi_device *dev = spi->dev; + int ret = 0; + unsigned long start = get_jiffies(); +#ifndef USE_INT_PIO + u8 t = 0; + + while (!t && jiffies_to_msecs(get_jiffies() - start) < timeout) { + ret = spi_read(dev, &t, 1); + if (ret) + return ret; + usleep_range(1000, 2000); + } + if (!t) { + pr_error("resp timeout: %dms\n", timeout); + return -EIO; + } + cam_hdr(spi)[0] = t; + ret = spi_read(dev, cam_hdr(spi)+1, CIMAX_CAM_HDR_SIZE-1); + if (ret) + return ret; +#else + do { + if (jiffies_to_msecs(get_jiffies() - start) >= timeout) { + pr_error("resp timeout: %dms\n", timeout); + return -EIO; + } + usleep_range(1000, 2000); + } while (gpio_get_value(spi->irq_io)); + + ret = spi_read(dev, cam_hdr(spi), CIMAX_CAM_HDR_SIZE); + if (ret) + return ret; +#endif + /*pr_dbg("rp: %02x:%02x:%02x:%02x\n", + cam_hdr(spi)[0], cam_hdr(spi)[1], + cam_hdr(spi)[2], cam_hdr(spi)[3]);*/ + switch (hdr_cmd_resp(spi)) { + case CIMAX_REG_READ_OK: + case CIMAX_REG_WRITE_OK: + case CIMAX_REG_INIT_OK: + case CIMAX_REG_CMD_ERROR: { + int len = reg_hdr_dat_size(spi); + if (len) { + ret = spi_read(dev, reg_dat(spi), len); + if (ret != 0) + return ret; + } + } break; + case CIMAX_CAM_RESET_OK: + case CIMAX_CAM_CIS_OK: + case CIMAX_CAM_COR_OK: + case CIMAX_CAM_NEG_OK: + case CIMAX_CAM_WLPDU_OK: + case CIMAX_CAM_RLPDU_OK: + case CIMAX_CAM_NOCAM: + case CIMAX_CAM_ERROR: + case CIMAX_CAM_WBUSY: + case CIMAX_CAM_DET_OK: + case CIMAX_CAM_DATA_READY: + case CIMAX_CAM_PENDING: + case CIMAX_CAM_REGSTAT_OK: + case CIMAX_CAM_NOEVT: { + int len = cam_hdr_dat_size(spi); + if (len) { + ret = spi_read(dev, cam_dat(spi), len); + if (ret != 0) + return ret; + /*if (len) + dump("dat:", cam_dat(spi), len);*/ + } + } + break; + default: + pr_error("unknown resp:0x%02x\n", hdr_cmd_resp(spi)); + return -EINVAL; + } + return 0; +} + +static int init_reg_hdr(u8 *hdr, u8 tag, int addr, int size) +{ + hdr[0] = tag; + hdr[1] = (addr>>8) & 0xff; + hdr[2] = addr & 0xff; + hdr[3] = size; + return 0; +} + +static int check_reg_hdr(u8 *hdr, u8 tag, int addr, int size) +{ + return hdr[0] != tag + || hdr[1] != ((addr>>8) & 0xff) + || hdr[2] != (addr & 0xff) + || hdr[3] != size; +} + +static int aml_cimax_spi_read_reg(struct aml_cimax *cimax, int addr, + u8 *buf, int size) +{ + struct cimax_spi *spi = cimax_to_spi(cimax); + struct spi_device *dev = spi->dev; + int err = 0; + + init_reg_hdr(spi->buf, CIMAX_REG_READ, addr, size); + + lock_lock(spi); + + pr_dbg("rd %02x:%02x:%02x:%02x\n", + reg_hdr(spi)[0], reg_hdr(spi)[1], + reg_hdr(spi)[2], reg_hdr(spi)[3]); + err = spi_write(dev, spi->buf, CIMAX_REG_HDR_SIZE); + if (err) + goto end; + err = cimax_spi_get_resp(spi, REG_TIMEOUT); + if (err) + goto end; + if (check_reg_hdr(reg_hdr(spi), CIMAX_REG_READ_OK, addr, size) != 0) { + perr("read reg fail.", spi); + err = -EINVAL; + goto end; + } + memcpy(buf, reg_dat(spi), size); +end: + lock_unlock(spi); + return err; +} + +static int aml_cimax_spi_write_reg(struct aml_cimax *cimax, int addr, + u8 *buf, int size) +{ + struct cimax_spi *spi = cimax_to_spi(cimax); + struct spi_device *dev = spi->dev; + int err = 0; + + init_reg_hdr(spi->buf, CIMAX_REG_WRITE, addr, size); + memcpy(&spi->buf[CIMAX_REG_HDR_SIZE], buf, size); + + lock_lock(spi); + + pr_dbg("wr %02x:%02x:%02x:%02x\n", + reg_hdr(spi)[0], reg_hdr(spi)[1], + reg_hdr(spi)[2], reg_hdr(spi)[3]); + err = spi_write(dev, spi->buf, CIMAX_REG_HDR_SIZE + size); + if (err) + goto end; + err = cimax_spi_get_resp(spi, REG_TIMEOUT); + if (err) + goto end; + if (check_reg_hdr(reg_hdr(spi), CIMAX_REG_WRITE_OK, addr, 0) != 0) { + perr("write reg fail.", spi); + err = -EINVAL; + goto end; + } +end: + lock_unlock(spi); + return err; +} + +static inline int init_cam_hdr(u8 *hdr, int cmd, int size) +{ + hdr[0] = cmd; + hdr[1] = CIMAX_CAM_PKT_CNT_VAL; + hdr[2] = (size>>8) & 0xff; + hdr[3] = size & 0xff; + return 0; +} + +static inline int cam_err(struct cimax_spi *spi) +{ + if (hdr_cmd_resp(spi) != CIMAX_CAM_ERROR + || cam_hdr_pkt_cnt(spi) != CIMAX_CAM_PKT_CNT_VAL + || cam_hdr_dat_size(spi) != 2) + return 0; + return byte_to_u16(cam_dat(spi)[0], cam_dat(spi)[1]); +} + +static inline char *cam_err_str(int err) +{ +#define CAMERROR_RESET 0x0101 +#define CAMERROR_CIS_BUF 0x0201 +#define CAMERROR_CIS_SIZE 0x0202 +#define CAMERROR_CAM_NOT_ACT 0x0203 +#define CAMERROR_COR_NOT_READY 0x0301 +#define CAMERROR_COR_VAL_CHK 0x0302 +#define CAMERROR_NEG_NO_RESP 0x0401 +#define CAMERROR_NEG_BAD_SIZE 0x0402 +#define CAMERROR_NEG_NOT_READY 0x0403 +#define CAMERROR_LPDU_NOT_AVAIL 0x0601 + struct { int err; char *str; } cam_err_strings[] = { + {CAMERROR_RESET, "reset error, not ready."}, + {CAMERROR_CIS_BUF, "cis error, buffer not allocated."}, + {CAMERROR_CIS_SIZE, "cis error, bad cis size."}, + {CAMERROR_CAM_NOT_ACT, "cam not activated."}, + {CAMERROR_COR_NOT_READY, "cam not ready during write COR."}, + {CAMERROR_COR_VAL_CHK, "COR value check failed."}, + {CAMERROR_NEG_NO_RESP, "cam not responding when negotiation."}, + {CAMERROR_NEG_BAD_SIZE, "cam buf size length != 2."}, + {CAMERROR_NEG_NOT_READY, "cam not ready during negotiation."}, + {CAMERROR_LPDU_NOT_AVAIL, "lpdu not available."} + }; + int i; + for (i = 0; + i < sizeof(cam_err_strings)/sizeof(cam_err_strings[0]); i++) { + if (cam_err_strings[i].err == err) + return cam_err_strings[i].str; + } + return "err unknown."; +} + +static int cimax_spi_access_cam(struct cimax_spi *spi, int slot, + int cmd, u8 *buf, int size) +{ + struct spi_device *dev = spi->dev; + int err = 0; + + cmd |= slot ? 0x80 : 0; + init_cam_hdr(cam_hdr(spi), cmd, size); + memcpy(cam_dat(spi), buf, size); + /*dump("access cam:", cam_hdr(spi), CIMAX_CAM_HDR_SIZE+size);*/ + err = spi_write(dev, cam_hdr(spi), CIMAX_CAM_HDR_SIZE + size); + if (err) + return err; + err = cimax_spi_get_resp(spi, CAM_TIMEOUT); + if (err) + return err; + if (cam_hdr_slot(spi) != slot) { + pr_error("expect slot(%d), but slot(%d)\n", + slot, cam_hdr_slot(spi)); + return -EINVAL; + } + switch (hdr_cmd_resp(spi)) { + case CIMAX_CAM_NOCAM: + pr_dbg("no cam\n"); + err = -ENODEV; + break; + case CIMAX_CAM_ERROR: + pr_error("cam error\n"); + pr_error("err code: 0x%04x(%s)\n", cam_err(spi), + cam_err_str(cam_err(spi))); + err = -ENODEV; + break; + case CIMAX_CAM_WBUSY: + pr_dbg("cam busy\n"); + err = -EBUSY; + break; + case CIMAX_CAM_PENDING: + pr_dbg("cam pending\n"); + err = -EAGAIN; + break; + } + return err; +} + +static int aml_cimax_spi_read_cis(struct aml_cimax *cimax, int slot, + u8 *buf, int size) +{ + struct cimax_spi *spi = cimax_to_spi(cimax); + int err = 0; + int len; + + lock_lock(spi); + + err = cimax_spi_access_cam(spi, slot, CIMAX_CAM_CIS, NULL, 0); + if (err) + goto end; + if (hdr_cmd_resp(spi) != CIMAX_CAM_CIS_OK + || cam_hdr_pkt_cnt(spi) != CIMAX_CAM_PKT_CNT_VAL) { + perr("read cis fail.", spi); + err = -EINVAL; + goto end; + } + len = cam_hdr_dat_size(spi); + if (size < len) { + pr_error("cis size too large, expect<%d, but:%d\n", size, len); + perr("cis fail.", spi); + err = -EINVAL; + goto end; + } + memcpy(buf, cam_dat(spi), len); + + if (!spi->cis) + spi->cis = kzalloc((len < 512) ? 512 : len, GFP_KERNEL); + if (spi->cis) + memcpy(spi->cis, cam_dat(spi), len); + +end: + lock_unlock(spi); + return err; +} +#define CIMAX_CAM_COR_PLD_SIZE 5 +static int aml_cimax_spi_write_cor(struct aml_cimax *cimax, int slot, + int addr, u8 *buf) +{ + struct cimax_spi *spi = cimax_to_spi(cimax); + int err = 0; + u8 out[CIMAX_CAM_COR_PLD_SIZE + 8]; + int sz = CIMAX_CAM_COR_PLD_SIZE; + + out[0] = addr>>8 & 0xff; + out[1] = addr & 0xff; + out[2] = buf[0]; + out[3] = 0; + out[4] = 0; + + if (!cam_irq_mode) { + out[5] = 0x40;/*cam poll mode*/ + sz++; + } + + lock_lock(spi); + + err = cimax_spi_access_cam(spi, slot, CIMAX_CAM_COR, out, sz); + if (err) + goto end; + if (hdr_cmd_resp(spi) != CIMAX_CAM_COR_OK + || cam_hdr_pkt_cnt(spi) != CIMAX_CAM_PKT_CNT_VAL + || cam_hdr_dat_size(spi) != 0) { + perr("write cor fail.", spi); + err = -EINVAL; + goto end; + } +end: + lock_unlock(spi); + return err; +} +#define CIMAX_CAM_NEG_PLD_SIZE 2 +static int aml_cimax_spi_negotiate(struct aml_cimax *cimax, int slot, int size) +{ + struct cimax_spi *spi = cimax_to_spi(cimax); + int ret = 0; + u8 out[CIMAX_CAM_NEG_PLD_SIZE]; + + out[0] = (size>>8) & 0xff; + out[1] = size & 0xff; + + lock_lock(spi); + + ret = cimax_spi_access_cam(spi, slot, CIMAX_CAM_NEG, + out, CIMAX_CAM_NEG_PLD_SIZE); + if (ret) + goto end; + if (hdr_cmd_resp(spi) != CIMAX_CAM_NEG_OK + || cam_hdr_pkt_cnt(spi) != CIMAX_CAM_PKT_CNT_VAL + || cam_hdr_dat_size(spi) != 2) { + perr("negotiate fail.", spi); + ret = -EINVAL; + goto end; + } + ret = byte_to_u16(cam_dat(spi)[0], cam_dat(spi)[1]); + + set_spi_cam_ready(spi, slot); +end: + lock_unlock(spi); + return ret; +} + +static int aml_cimax_spi_write_lpdu(struct aml_cimax *cimax, int slot, + u8 *buf, int size) +{ + struct cimax_spi *spi = cimax_to_spi(cimax); + int ret = 0; + + lock_lock(spi); + + /*dump("lpdu ->", buf, size);*/ + ret = cimax_spi_access_cam(spi, slot, CIMAX_CAM_WLPDU, buf, size); + if (ret) + goto end; + if (hdr_cmd_resp(spi) != CIMAX_CAM_WLPDU_OK + || cam_hdr_pkt_cnt(spi) != CIMAX_CAM_PKT_CNT_VAL + || cam_hdr_dat_size(spi) != 0) { + perr("write lpdu fail.", spi); + ret = -EINVAL; + goto end; + } + ret = size; +end: + lock_unlock(spi); + return ret; +} + +static int aml_cimax_spi_read_lpdu(struct aml_cimax *cimax, int slot, + u8 *buf, int size) +{ + struct cimax_spi *spi = cimax_to_spi(cimax); + int ret = 0; + + lock_lock(spi); + + ret = cimax_spi_access_cam(spi, slot, CIMAX_CAM_RLPDU, NULL, 0); + if (ret) + goto end; + if (hdr_cmd_resp(spi) != CIMAX_CAM_RLPDU_OK + || cam_hdr_pkt_cnt(spi) != CIMAX_CAM_PKT_CNT_VAL) { + perr("read lpdu fail.", spi); + ret = -EINVAL; + goto end; + } + ret = cam_hdr_dat_size(spi); + memcpy(buf, cam_dat(spi), ret); + + /*dump("lpdu <-", buf, ret);*/ + + spi->cam_data_ready[slot] = 0; +end: + lock_unlock(spi); + return ret; +} + +static int aml_cimax_spi_read_cam_status(struct aml_cimax *cimax, int slot) +{ + struct cimax_spi *spi = cimax_to_spi(cimax); + int ret = 0; + + if (cam_irq_mode && spi->cam_data_ready[slot]) + return 0x80; + + lock_lock(spi); + + ret = cimax_spi_access_cam(spi, slot, CIMAX_CAM_REGSTAT, NULL, 0); + if (ret) + goto end; + if (hdr_cmd_resp(spi) != CIMAX_CAM_REGSTAT_OK + || cam_hdr_pkt_cnt(spi) != CIMAX_CAM_PKT_CNT_VAL + || cam_hdr_dat_size(spi) != 1) { + perr("read cam status fail.", spi); + ret = -EINVAL; + goto end; + } + + ret = cam_dat(spi)[0]; +end: + lock_unlock(spi); + return ret; +} + +static int aml_cimax_spi_slot_reset(struct aml_cimax *cimax, int slot) +{ + struct cimax_spi *spi = cimax_to_spi(cimax); + int ret = 0; + + spi->cam_data_ready[slot] = 0; + + lock_lock(spi); + + ret = cimax_spi_access_cam(spi, slot, CIMAX_CAM_RESET, NULL, 0); + if (ret) + goto end; + if (hdr_cmd_resp(spi) != CIMAX_CAM_RESET_OK + || cam_hdr_pkt_cnt(spi) != CIMAX_CAM_PKT_CNT_VAL + || cam_hdr_dat_size(spi) != 0) { + perr("slot reset fail.", spi); + ret = -EINVAL; + goto end; + } +end: + lock_unlock(spi); + return ret; +} + +static int aml_cimax_spi_cam_reset(struct aml_cimax *cimax, int slot) +{ + pr_dbg("Slot(%d): camreset\n", slot); + return 0; +} + +static int aml_cimax_spi_slot_shutdown(struct aml_cimax *cimax, int slot) +{ + pr_dbg("Slot(%d): shutdown\n", slot); + return 0; +} +static int aml_cimax_spi_slot_ts_enable(struct aml_cimax *cimax, int slot) +{ + pr_dbg("Slot(%d): ts control\n", slot); + return 0; +} +static int aml_cimax_spi_slot_status(struct aml_cimax *cimax, int slot) +{ + struct cimax_spi *spi = cimax_to_spi(cimax); + if (spi->cam_inserted[slot]) { + /*pr_dbg("CA Module present and ready\n");*/ + return DVB_CA_EN50221_POLL_CAM_PRESENT | + DVB_CA_EN50221_POLL_CAM_READY; + } else { + /*pr_error("CA Module not present or not ready\n");*/ + } + return 0; +} + +static int cimax_spi_cam_plugin(struct cimax_spi *spi, int slot, int plugin) +{ + pr_dbg("cam plug: slot(%d) %s\n", + slot, plugin ? "plugged" : "unplugged"); + return aml_cimax_camchanged(spi->cimax, slot, plugin); +} + +static int cimax_spi_poll(struct cimax_spi *spi) +{ + struct spi_device *dev = spi->dev; + int err = 0; + + lock_lock(spi); + +#ifdef USE_INT_PIO + if (gpio_get_value(spi->irq_io)) + goto end; +#endif + + init_reg_hdr(spi->buf, CIMAX_CAM_EVT, 0x100, 0); + err = spi_write(dev, spi->buf, CIMAX_REG_HDR_SIZE); + if (err) + goto end; + err = cimax_spi_get_resp(spi, CAM_TIMEOUT); + if (err) + goto end; + switch (hdr_cmd_resp(spi)) { + case CIMAX_CAM_DET_OK: { + int slot = cam_hdr_slot(spi); + int insert = cam_dat(spi)[0]; + if (!!spi->cam_inserted[slot] != insert) { + spi->cam_inserted[slot] = insert; + cimax_spi_cam_plugin(spi, slot, insert); + aml_cimax_slot_state_changed(spi->cimax, slot, + spi->cam_inserted[slot]); + } + } break; + case CIMAX_CAM_DATA_READY: { + int slot = cam_hdr_slot(spi); + spi->cam_data_ready[slot] = 1; + } break; + case CIMAX_CAM_NOEVT: + break; + default: + pr_error("unknown resp:%02x\n", hdr_cmd_resp(spi)); + break; + } +end: + queue_delayed_work(spi->workq, &spi->work, spi_poll_interval); + lock_unlock(spi); + return 0; +} + +static void cimax_spi_poll_work(struct work_struct *work) +{ + struct cimax_spi *spi = + container_of(to_delayed_work(work), struct cimax_spi, work); + spi->work_cnt++; + cimax_spi_poll(spi); +} + +static irqreturn_t cimax_irq_handler(int irq, void *para) +{ + return IRQ_HANDLED; +} + +#define CTRL_DISABLE -1 +#define CTRL_STOP 0 +#define CTRL_START 1 + +static inline int cimax_spi_poll_ctrl(struct cimax_spi *spi, int ctrl) +{ + if (ctrl == CTRL_START) { + spi->workq = create_singlethread_workqueue("cimax_spi"); + INIT_DELAYED_WORK(&spi->work, &cimax_spi_poll_work); + queue_delayed_work(spi->workq, + &spi->work, spi_poll_interval); + pr_dbg("poll started\n"); + } else { + if (!spi->workq) + return 0; + cancel_delayed_work_sync(&spi->work); + destroy_workqueue(spi->workq); + spi->workq = NULL; + pr_dbg("poll stopped\n"); + } + return 0; +} + +static inline int cimax_spi_intr_ctrl(struct cimax_spi *spi, int ctrl) +{ + if (ctrl == CTRL_START) { + int ret; + if (spi->irq == -1) { + pr_error("incorrect irq"); + return -1; + } + ret = request_irq(spi->irq, cimax_irq_handler, + IRQF_SHARED|IRQF_TRIGGER_RISING, + "cimax irq", spi); + enable_irq(spi->irq); + } else { + if (spi->irq == -1) + return 0; + disable_irq(spi->irq); + free_irq(spi->irq, spi); + } + return 0; +} + +static int cimax_spi_setup_poll(struct cimax_spi *spi, int poll_mode) +{ + if (poll_mode == spi->poll_mode) + return 0; + switch (poll_mode) { + case POLL_MODE: + if (spi->poll_mode == INT_MODE) + cimax_spi_intr_ctrl(spi, CTRL_DISABLE); + cimax_spi_poll_ctrl(spi, CTRL_START); + spi->poll_mode = POLL_MODE; + break; + case INT_MODE: + if (spi->poll_mode == POLL_MODE) + cimax_spi_poll_ctrl(spi, CTRL_DISABLE); + cimax_spi_intr_ctrl(spi, CTRL_START); + spi->poll_mode = INT_MODE; + break; + case STOP_MODE: + if (spi->poll_mode == POLL_MODE) + cimax_spi_poll_ctrl(spi, CTRL_DISABLE); + else if (spi->poll_mode == INT_MODE) + cimax_spi_intr_ctrl(spi, CTRL_DISABLE); + spi->poll_mode = STOP_MODE; + break; + default: + break; + } + return 0; +} + +static int cimax_spi_hw_reset(struct cimax_spi *spi, int reset_val) +{ + /*trigger reset io*/ + if (spi->rst_io) { + gpio_direction_output(spi->rst_io, reset_val ? 1 : 0); + msleep(50); + gpio_direction_output(spi->rst_io, reset_val ? 0 : 1); + } + return 0; +} + + +enum regOperation_e { + /** Read register. */ + REG_OP_READ, + /** Write register. */ + REG_OP_WRITE, + /** Read register until some bits are set. */ + REG_OP_WAIT_TO_BE_SET, + /** Read register until some bits are cleared. */ + REG_OP_WAIT_TO_BE_CLEARED, + /** Read register until it's value is not equal to defined. */ + REG_OP_WAIT_EQUAL, + /** Perform logical AND over register. */ + REG_OP_LOGICAL_AND, + /** Perform logical OR over register. */ + REG_OP_LOGICAL_OR, + /** Wait timeout in miliseconds. */ + REG_OP_WAIT +}; + +struct regSettings_s { + /** CIMaX+ register address. */ + u16 reg; + /** CIMaX+ register value. */ + u16 val; + /** CIMaX+ register operation. */ + enum regOperation_e op; +}; + +static struct regSettings_s spiRegSettings[] = { + /** TS interface init. */ + {IN_SEL, 0x00, REG_OP_WRITE}, /** Close TS input. */ + {OUT_SEL, 0x00, REG_OP_WRITE}, /** Close TS output. */ + {FIFO_CTRL, 0x0f, REG_OP_WRITE}, /** Reset TS FIFO. */ + {SYNC_RTV_CTRL, 0x0f, REG_OP_WRITE}, + + /** CAM power. */ + {GPIO0_DATA_OUT, 0x00, REG_OP_WRITE}, + /** Unlock CFG. */ + {CFG_2, 0x00, REG_OP_WRITE}, + /** 1) DVB/CI/CI+/SCARD 2slot. */ + {CFG_1, 0x00, REG_OP_WRITE}, + /** 2) Set the Default "power off" state + such as VCC_MODA=VCC_MODB=VPPx_MODA=VPPx_MODB='Z'. */ + {GPIO0_DFT, 0x00, REG_OP_WRITE}, + /** 3) Set GPIO3 as external power switch driver. */ + {GPIO0_MASK_DATA, 0x07, REG_OP_WRITE}, + /** 4) Set "power on" state (VCC=VPP1=VPP2= 5V). */ + {GPIO0_DATA_OUT, 0x03, REG_OP_WRITE}, + /** 5) Lock config. */ + {CFG_2, 0x01, REG_OP_WRITE}, + /** 6) Write in the GPIO0_DIR_REG: defines the GPIOs, + which are used to drive the external power switch, in output mode. */ + {GPIO0_DIR, 0x07, REG_OP_WRITE}, + /** 7) Check VCCENable. */ + {CFG_1, 0x20, REG_OP_WAIT_TO_BE_SET}, + /** 8) Set & wait for PcmciaOutputEnable. */ + {CFG_1, 0x08, REG_OP_LOGICAL_OR}, + {CFG_1, 0x08, REG_OP_WAIT_TO_BE_SET}, + + /** Set router CAM. */ + /** CH0 & CH1 from CAM A & B, CAM A & B from CH0 & CH1. */ + {ROUTER_CAM_MOD, 0x21, REG_OP_WRITE}, + {ROUTER_CAM_CH, 0x00, REG_OP_WRITE}, + /** Wait 200 miliseconds. */ + {0x0000, 200, REG_OP_WAIT}, + + /** Set In/Out. */ + /** Route CAM Channel 0 to Channel 0, Channel 1 null. */ + {ROUTER_CAM_CH, 0x80, REG_OP_WRITE}, + +#ifdef PARALLEL_OUT +#else +#if 1 + /*72M internal clock source*/ + /*CLK Select SER0->72M*/ + {CkMan_Select, 0x20, REG_OP_WRITE}, + /*Enable SER0 clk source, Enable 72M clk source*/ + {CkMan_Config, 0x44, REG_OP_LOGICAL_OR}, +#else + /*108M internal clock source*/ + /*CLK Select SER0->108M*/ + {CkMan_Select, 0x30, REG_OP_WRITE}, + /*Enable SER0 clk source, Enable 108M clk source*/ + {CkMan_Config, 0x48, REG_OP_LOGICAL_OR}, +#endif + {P2S_CH0_CTRL, 0x19, REG_OP_WRITE}, /*Enable p2s*/ + {OUT_SEL, 0x02, REG_OP_WRITE}, /*Out1=p2s0*/ +#endif /*Parallel out*/ + + /** Input Ch0=Parallel, Ch1=null. */ + {IN_SEL, 0x01, REG_OP_WRITE}, +}; + +int downloadCfg(struct cimax_spi *spi) +{ + u32 cnt; + u8 buf[CIMAX_REG_PLD_SIZE]; + struct aml_cimax *cimax = spi->cimax; + + pr_info("Download CIMaX+ configuration(register settings):\n"); + + for (cnt = 0; cnt < sizeof(spiRegSettings)/sizeof(struct regSettings_s); + cnt++) { + pr_dbg("reg:%04x, val:%02x, op:%d\n", + spiRegSettings[cnt].reg, + spiRegSettings[cnt].val, + spiRegSettings[cnt].op); + switch (spiRegSettings[cnt].op) { + case REG_OP_READ: + /* Read register. */ + if (aml_cimax_spi_read_reg(cimax, + spiRegSettings[cnt].reg, buf, 1) < 0) { + /* CIMaX+ read error. */ + pr_error("FAILED at REG_OP_READ operation.\n"); + return -1; + } + break; + case REG_OP_WRITE: + /* Write register. */ + if (aml_cimax_spi_write_reg(cimax, + spiRegSettings[cnt].reg, + (u8 *)&spiRegSettings[cnt].val, + 1) < 0) { + /* CIMaX+ write error. */ + pr_error("FAILED at REG_OP_WRITE operation.\n"); + return -1; + } + break; + case REG_OP_WAIT_TO_BE_SET: + do { + if (aml_cimax_spi_read_reg(cimax, + spiRegSettings[cnt].reg, buf, 1) < 0) { + /* CIMaX+ read error. */ + pr_error("E REG_OP_WAIT_TO_BE_SET\n"); + return -1; + } + } while ((buf[0] & spiRegSettings[cnt].val) + != spiRegSettings[cnt].val); + break; + case REG_OP_WAIT_TO_BE_CLEARED: + do { + if (aml_cimax_spi_read_reg(cimax, + spiRegSettings[cnt].reg, buf, 1) < 0) { + /* CIMaX+ read error. */ + pr_error("REG_OP_WAIT_TO_BE_CLEARED\n"); + return -1; + } + } while ((buf[0] & spiRegSettings[cnt].val) != 0); + break; + case REG_OP_WAIT_EQUAL: + do { + if (aml_cimax_spi_read_reg(cimax, + spiRegSettings[cnt].reg, buf, 1) < 0) { + /* CIMaX+ read error. */ + pr_error("REG_OP_WAIT_EQUAL.\n"); + return -1; + } + } while (buf[0] != spiRegSettings[cnt].val); + break; + case REG_OP_LOGICAL_AND: + if (aml_cimax_spi_read_reg(cimax, + spiRegSettings[cnt].reg, buf, 1) < 0) { + /* CIMaX+ read error. */ + pr_error("FAILED at REG_OP_LOGICAL_AND(r).\n"); + return -1; + } + buf[0] &= spiRegSettings[cnt].val; + if (aml_cimax_spi_write_reg(cimax, + spiRegSettings[cnt].reg, buf, 1) < 0) { + /* CIMaX+ write error. */ + pr_error("FAILED at REG_OP_LOGICAL_AND(w).\n"); + return -1; + } + break; + case REG_OP_LOGICAL_OR: + if (aml_cimax_spi_read_reg(cimax, + spiRegSettings[cnt].reg, buf, 1) < 0) { + /* CIMaX+ read error. */ + pr_error("FAILED at REG_OP_LOGICAL_OR(r).\n"); + return -1; + } + buf[0] |= spiRegSettings[cnt].val; + if (aml_cimax_spi_write_reg(cimax, + spiRegSettings[cnt].reg, buf, 1) < 0) { + /* CIMaX+ write error. */ + pr_error("FAILED at REG_OP_LOGICAL_AND(w).\n"); + return -1; + } + break; + case REG_OP_WAIT: + msleep(spiRegSettings[cnt].val); + break; + default: + pr_error("\nInvalid operation 0x%02x!\n", + spiRegSettings[cnt].op); + } + } + pr_info("config OK.\n"); + return 0; +} + + +#define CIMAX_FW_PKT_SIZE 128 +#define CIMAX_FW_START_ADDR 0x8000 +#define CIMAX_FW_STOP_ADDR 0xcff9 +#define CIMAX_FW_VECT_ADDR 0xfffa +#define CIMAX_FW_VECT_SIZE 6 + +static u32 compute_bistrom(const u8 *ptr, int size, u32 sign) +{ + int k, i; + u16 s; + + for (k = 0; k < size; k++) { + s = ptr[k]&0x01; + for (i = 0; i < 16; i++) + if (0x88B7 & (1<<i)) + s ^= (sign>>i) & 0x01; + s |= ((sign<<1) ^ (ptr[k])) & 0x00FE; + s |= (sign<<1) & 0x00FF00; + sign = s; + } + return sign; +} + +static int cimax_spi_upload_firmware(struct cimax_spi *spi, + const u8 *fw_data, u32 *sign) +{ + struct aml_cimax *cimax = spi->cimax; + int err = 0; + int addr; + const u8 *ptr; + int size; + int debug = cimax_spi_debug; + u8 *ptmp = kzalloc(CIMAX_FW_PKT_SIZE + CIMAX_REG_HDR_SIZE, GFP_KERNEL); + + if (!ptmp) + return -ENOMEM; + + cimax_spi_debug = 0; + + addr = CIMAX_FW_START_ADDR; + ptr = fw_data + addr; + while (addr < CIMAX_FW_STOP_ADDR) { + size = (addr <= (CIMAX_FW_STOP_ADDR+1-CIMAX_FW_PKT_SIZE)) ? + CIMAX_FW_PKT_SIZE : (CIMAX_FW_STOP_ADDR+1-addr); + + *sign = compute_bistrom(ptr, size, *sign); + + pr_dbg(">>%x@%x\n", size, addr); + + /*dump("w:", (u8*)ptr, size);*/ + err = aml_cimax_spi_write_reg(cimax, addr, (u8 *)ptr, size); + if (err) + break; + err = aml_cimax_spi_read_reg(cimax, addr, ptmp, size); + if (err) + break; + /*dump("r:", ptmp, size);*/ + if (memcmp(ptr, ptmp, size)) { + pr_error("fw write error.\n"); + err = -ENODEV; + break; + } + + addr += size; + ptr += size; + } + + if (!err) { + addr = CIMAX_FW_VECT_ADDR; + ptr = fw_data + addr; + size = CIMAX_FW_VECT_SIZE; + + *sign = compute_bistrom(ptr, size, *sign); + + err = aml_cimax_spi_write_reg(cimax, addr, (u8 *)ptr, size); + if (err) + goto end; + err = aml_cimax_spi_read_reg(cimax, addr, ptmp, size); + if (err) + goto end; + if (memcmp(ptr, ptmp, size)) { + pr_error("fw vect write error.\n"); + err = -ENODEV; + goto end; + } + } +end: + kfree(ptmp); + + cimax_spi_debug = debug; + return err; +} + +static int cimax_spi_check_bistrom(struct cimax_spi *spi, + int start, int end, u32 sign) +{ + struct aml_cimax *cimax = spi->cimax; + int err = 0; + u8 buf[2]; + + buf[0] = (0xd000-start) & 0xff; + buf[1] = (0xd000-start) >> 8; + err = aml_cimax_spi_write_reg(cimax, 0x8d, buf, 2); + if (err) + return err; + buf[0] = sign & 0xff; + buf[1] = sign >> 8; + err = aml_cimax_spi_write_reg(cimax, 0x80, buf, 2); + if (err) + return err; + buf[0] = 0xf; + err = aml_cimax_spi_write_reg(cimax, 0x82, buf, 1); + if (err) + return err; + err = aml_cimax_spi_read_reg(cimax, 0x41, buf, 2); + if (err) + return err; + pr_dbg("bist checked: 0x%04x\n", byte_to_u16(buf[0], buf[1])); + err = aml_cimax_spi_read_reg(cimax, 0x09, buf, 1); + if (err) + return err; + pr_dbg("rom status: 0x%02x\n", buf[0]); + return buf[0]; +} + +static int cimax_spi_init_firmware(struct cimax_spi *spi) +{ + struct spi_device *dev = spi->dev; + int err = 0; + + init_reg_hdr(spi->buf, CIMAX_REG_INIT, 0, 0); + err = spi_write(dev, spi->buf, CIMAX_REG_HDR_SIZE); + if (err) + return err; + err = cimax_spi_get_resp(spi, REG_TIMEOUT); + if (err) + return err; + if (check_reg_hdr(spi->buf, CIMAX_REG_INIT_OK, 0, 0)) { + perr("init fw fail.", spi); + return -EINVAL; + } + return err; +} + +static void request_fw_callback(const struct firmware *fw, void *context) +{ + u32 sign = 0; + int err = 0; + struct cimax_spi *spi = (struct cimax_spi *)context; + + if (!fw) + return; + + pr_dbg("got fw: %zd @ %p\n", fw->size, fw->data); + + /*cimax_spi_hw_reset(spi, 1);*/ + + err = cimax_spi_upload_firmware(spi, fw->data, &sign); + if (err) + goto end; + pr_dbg("upload fw done.\n"); + err = cimax_spi_check_bistrom(spi, + CIMAX_FW_START_ADDR, CIMAX_FW_STOP_ADDR, sign); + if (err != 0x2) + goto end; + pr_dbg("check bistrom done.\n"); + err = cimax_spi_init_firmware(spi); + if (err) + goto end; +end: + if (fw) + release_firmware(fw); + if (err) + return; + + if (downloadCfg(spi)) { + pr_error("download config fail.\n"); + return; + } + + cimax_spi_setup_poll(spi, cimax_poll_mode ? POLL_MODE : INT_MODE); + + return; +} + +static int cimax_spi_load_fw(struct cimax_spi *spi) +{ + char *name = "cimax_spidvb.bin"; + return request_firmware_nowait(THIS_MODULE, 1, name, + &spi->dev->dev, GFP_KERNEL, spi, request_fw_callback); +} + +static int cimax_spi_dev_probe(struct spi_device *spi) +{ + int ret; + struct cimax_spi *cimax_spi; + + pr_dbg("dev probe\n"); + /*setup again?*/ + spi->bits_per_word = 8; + ret = spi_setup(spi); + if (ret) + pr_dbg("spi setup failed\n"); + + cimax_spi = dev_get_platdata(&spi->dev); + cimax_spi->dev = spi; + + spi_set_drvdata(spi, cimax_spi); + + return cimax_spi_load_fw(cimax_spi); +} + +static int cimax_spi_dev_remove(struct spi_device *spi) +{ + struct cimax_spi *cimax_spi = dev_get_drvdata(&spi->dev); + + pr_dbg("dev remove\n"); + cimax_spi_setup_poll(cimax_spi, STOP_MODE); + return 0; +} + +static int cimax_spi_get_config_from_dts(struct cimax_spi *spi, + struct spi_board_info *bdinfo) +{ + struct device_node *child = NULL; + struct platform_device *pdev = spi->pdev; + struct device_node *np = pdev->dev.of_node; + unsigned int val; + int ret = 0; + pr_dbg("fetch cimax spi in dts\n"); + + child = of_get_child_by_name(np, "cimax"); + if (child == NULL) { + pr_error("cimax not found in dts\n"); + return -1; + } + child = of_get_child_by_name(child, "spi"); + if (!child) { + pr_error("spi not found in cimax"); + return -1; + } + + /* get spi config */ + ret = of_property_read_u32(child, "bus_num", &val); + if (ret) + pr_error("bus_num not found, use default.\n"); + else + bdinfo->bus_num = val; + pr_dbg("bus_num: %d\n", bdinfo->bus_num); + ret = of_property_read_u32(child, "chip_select", &val); + if (ret) + pr_error("chip_select not found, use default.\n"); + else + bdinfo->chip_select = val; + pr_dbg("chip_select: %d\n", bdinfo->chip_select); + ret = of_property_read_u32(child, "max_frequency", &val); + if (ret) + pr_error("max_frequency not found, use default.\n"); + else + bdinfo->max_speed_hz = val; + pr_dbg("max_speed_hz: %d\n", bdinfo->max_speed_hz); + ret = of_property_read_u32(child, "mode", &val); + if (ret) + pr_error("mode not found, use default.\n"); + else + bdinfo->mode = val; + pr_dbg("mode: %d\n", bdinfo->mode); +/* +dvbci { + compatible = "amlogic, dvbci"; + dev_name = "dvbci"; + io_type = <2>;//0:iobus,1:spi,2:cimax + cimax { + io_type = <0> //0:spi 1:usb + spi { + spi_bus_num = <0>; + spi_chip_select = <0>; + spi_max_frequency = <3000000>; + + rst_gpio = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>; + + irq_gpio = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>; + irq = <2>; + interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + +}; +*/ +#ifdef USE_INT_PIO + { + int ret = 0; + int gpio = -1; + gpio = of_get_named_gpio_flags(child, "irq-gpios", 0, NULL); + ret = gpio_request(gpio, "cimax-irq"); + if (ret < 0) { + pr_error("irq-gpios request fail.\n"); + return ret; + } + + ret = gpio_direction_input(gpio); + + spi->irq_io = gpio; + pr_dbg("irq_io: %d\n", spi->irq_io); + } +#ifdef CIMAX_IRQ + { + int irq; + unsigned int irqflag; + ret = of_property_read_u32(child, "irq", &gpio_irq); + irq = irq_of_parse_and_map(child, 0); + + gpio_for_irq(gpio, + AML_GPIO_IRQ(gpio_irq, FILTER_NUM7, GPIO_IRQ_FALLING)); + spi->irq = irq; + pr_dbg("irq: %d\n", spi->irq); + } +#else + spi->irq = -1; +#endif +#endif/*USE_INT_PIO*/ + { + int ret = 0; + int gpio = -1; + gpio = of_get_named_gpio_flags(child, "rst-gpios", 0, NULL); + if (gpio != -1) { + ret = gpio_request(gpio, "cimax"); + if (ret < 0) { + pr_error("rst-gpios request fail.\n"); + return ret; + } + + cimax_spi_hw_reset(spi, 1); + + spi->rst_io = gpio; + pr_dbg("rst: %d\n", spi->rst_io); + } else { + pr_error("rst io got fail, %d\n", gpio); + } + } + return 0; +} + +static struct spi_board_info cimax_spi_bdinfo = { + .modalias = "cimax_spi", + .mode = SPI_MODE_3, + .max_speed_hz = 1000000, /* 1MHz */ + .bus_num = 0, /* SPI bus No. */ + .chip_select = 0, /* the device index on the spi bus */ + .controller_data = NULL, +}; + +static struct spi_driver cimax_spi_dev_driver = { + .probe = cimax_spi_dev_probe, + .remove = cimax_spi_dev_remove, + .driver = { + .name = "cimax_spi", + .owner = THIS_MODULE, + }, +}; + +int aml_cimax_spi_init(struct platform_device *pdev, struct aml_cimax *cimax) +{ + int ret; + struct cimax_spi *cimax_spi; + + cimax_spi = kzalloc(sizeof(struct cimax_spi), GFP_KERNEL); + if (!cimax_spi) + return -ENOMEM; + + cimax_spi->pdev = pdev; + cimax_spi->cimax = cimax; + cimax_spi_get_config_from_dts(cimax_spi, &cimax_spi_bdinfo); + + /*init spi_lock*/ + lock_init(cimax_spi); + + /*register device*/ + cimax_spi_bdinfo.platform_data = cimax_spi; + spi_register_board_info(&cimax_spi_bdinfo, 1); + + /*register driver*/ + ret = spi_register_driver(&cimax_spi_dev_driver); + if (ret) { + pr_error("register cimax spi driver failed\n"); + return ret; + } + + /*init cimax used api.*/ +#define WI(_f)\ + cimax->ops._f = aml_cimax_spi_##_f + WI(read_cis); + WI(write_cor); + WI(negotiate); + WI(read_lpdu); + WI(write_lpdu); + WI(read_cam_status); + WI(cam_reset); + WI(slot_reset); + WI(slot_shutdown); + WI(slot_ts_enable); + WI(slot_status); + /*WI(start);*/ + /*WI(stop);*/ + WI(read_reg); + WI(write_reg); + + cimax->priv = cimax_spi; + + g_spi = cimax_spi; + + aml_cimax_spi_mod_init(); + + return 0; +} +EXPORT_SYMBOL(aml_cimax_spi_init); + +int aml_cimax_spi_exit(struct aml_cimax *cimax) +{ + struct cimax_spi *spi = cimax_to_spi(cimax); + + if (!spi) + return -ENODEV; + + aml_cimax_spi_mod_exit(); + + /*unregister driver*/ + spi_unregister_driver(&cimax_spi_dev_driver); + /*unregister device*/ + spi_unregister_device(spi->dev); + + if (spi->irq_io) + gpio_free(spi->irq_io); + if (spi->rst_io) + gpio_free(spi->rst_io); + + kfree(spi->cis); + + kfree(spi); + cimax->priv = NULL; + + g_spi = NULL; + return 0; +} +EXPORT_SYMBOL(aml_cimax_spi_exit); + +static int cimax_spi_reset(struct cimax_spi *spi, int reset_val) +{ + pr_dbg("reset spi:%p, rst:%d\n", spi, spi ? spi->rst_io : -1); + if (!spi) + return -ENODEV; + + pr_dbg("cimax spi reset\n"); + + cimax_spi_setup_poll(spi, STOP_MODE); + + cimax_spi_hw_reset(spi, reset_val); + + /*notify unplugged*/ + aml_cimax_camchanged(spi->cimax, 0, 0); + aml_cimax_camchanged(spi->cimax, 1, 0); + + spi->cam_inserted[0] = spi->cam_inserted[1] = 0; + spi->cam_data_ready[0] = spi->cam_data_ready[1] = 0; + + /*async start fw*/ + cimax_spi_load_fw(spi); + + /*cimax_spi_setup_poll(spi, cimax_poll_mode? POLL_MODE : INT_MODE);*/ + return 0; +} + +static ssize_t reset_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret; + ret = sprintf(buf, "echo 1 > %s\n", attr->attr.name); + return ret; +} + +static ssize_t reset_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + int ret; + int val = 0; + if (!g_spi) + return size; + ret = sscanf(buf, "%i", &val); + if (ret == 1) + ret = cimax_spi_reset(g_spi, val); + return size; +} + +static ssize_t debug_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret = 0; + if (!g_spi) + return ret; + + ret = sprintf(buf, "poll mode: %d\n", g_spi->poll_mode); + ret += sprintf(buf+ret, "status slot[0]=[%d] slot[1]=[%d]\n", + g_spi->cam_inserted[0], g_spi->cam_inserted[1]); + ret += sprintf(buf+ret, "data slot[0]=[%d] slot[1]=[%d]\n", + g_spi->cam_data_ready[0], g_spi->cam_data_ready[1]); + ret += sprintf(buf+ret, "work cnt:%d\n", g_spi->work_cnt); + return ret; +} + +static int reg_addr; +static ssize_t addr_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret = 0; + ret = sprintf(buf, "addr = 0x%04x\n", reg_addr); + return ret; +} + +static ssize_t addr_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + if (!g_spi) + return size; + if (sscanf(buf, "%i", ®_addr) == 1) + return size; + return size; +} + +static ssize_t reg_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret = 0; + u8 reg_val = 0; + struct aml_cimax *cimax = NULL; + + if (!g_spi) + return ret; + + cimax = g_spi->cimax; + ret = aml_cimax_spi_read_reg(cimax, reg_addr, ®_val, 1); + if (ret) + ret = sprintf(buf, "read fail, err=%d\n", ret); + else + ret = sprintf(buf, "reg[0x%04x] = 0x%02x\n", reg_addr, reg_val); + return ret; +} + +static ssize_t reg_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + int ret = 0; + struct aml_cimax *cimax = NULL; + int val = 0; + u8 reg_val = 0; + + if (!g_spi) + return size; + + if (sscanf(buf, "%i", &val) != 1) + return size; + reg_val = val; + cimax = g_spi->cimax; + ret = aml_cimax_spi_write_reg(cimax, reg_addr, ®_val, 1); + if (ret) + return ret; + return size; +} + +static int cis_mode; /*0:hex 1:binary*/ +static ssize_t cis_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret = 0; + + if (!g_spi || !g_spi->cis) + return ret; + + if (cis_mode == 0) { + int i; + for (i = 0; i < CIS_MAX; i++) { + if (i && !(i & 0xf)) + ret += sprintf(buf+ret, "\n"); + ret += sprintf(buf+ret, "%02X ", g_spi->cis[i]); + } + ret += sprintf(buf+ret, "\n"); + return ret; + } else { + memcpy(buf, g_spi->cis, CIS_MAX); + return CIS_MAX; + } + return ret; +} + +static ssize_t cis_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + if (size >= 3 + && !memcmp(buf, "bin", 3)) + cis_mode = 1; + else + cis_mode = 0; + return size; +} + +static ssize_t ts_rate_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret = 0; + u8 lsb = 0, msb = 0, plen = 0; + struct aml_cimax *cimax = NULL; + int err = 0; + + if (!g_spi) + return ret; + + cimax = g_spi->cimax; + err = aml_cimax_spi_read_reg(cimax, PCK_LENGTH, &plen, 1); + err |= aml_cimax_spi_read_reg(cimax, BITRATE_CH0_LSB, &lsb, 1); + err |= aml_cimax_spi_read_reg(cimax, BITRATE_CH0_MSB, &msb, 1); + if (err || !byte_to_u16(msb, lsb)) + ret += sprintf(buf+ret, "read fail, err=%d\n", err); + else + ret += sprintf(buf+ret, "rate[0] = %d Kbps\n", + 540*plen*8/byte_to_u16(msb, lsb)); + if (err) + return ret; + + err = aml_cimax_spi_read_reg(cimax, BITRATE_CH1_LSB, &lsb, 1); + err |= aml_cimax_spi_read_reg(cimax, BITRATE_CH1_MSB, &msb, 1); + if (err || !byte_to_u16(msb, lsb)) + ret += sprintf(buf+ret, "read fail, err=%d\n", err); + else + ret += sprintf(buf+ret, "rate[1] = %d Kbps\n", + 540*plen*8/byte_to_u16(msb, lsb)); + return ret; +} + +static ssize_t loop_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret = 0; + u8 ch = 0, mod = 0; + struct aml_cimax *cimax = NULL; + int err = 0; + + if (!g_spi) + return ret; + + cimax = g_spi->cimax; + err = aml_cimax_spi_read_reg(cimax, ROUTER_CAM_CH, &ch, 1); + err |= aml_cimax_spi_read_reg(cimax, ROUTER_CAM_MOD, &mod, 1); + if (err) { + ret = sprintf(buf, "read fail, err=%d\n", err); + return ret; + } + ret += sprintf(buf + ret, "OUT-0 <= "); + switch (ch & 0x0f) { + case 0x0: + ret += sprintf(buf + ret, "CAM-A"); break; + case 0x1: + ret += sprintf(buf + ret, "CH0-IN"); break; + case 0x2: + ret += sprintf(buf + ret, "CH1-IN"); break; + case 0x3: + ret += sprintf(buf + ret, "REMAPPER"); break; + case 0x4: + ret += sprintf(buf + ret, "PREHEADER"); break; + case 0x5: + ret += sprintf(buf + ret, "CAM-B"); break; + case 0x6: + ret += sprintf(buf + ret, "GAPREMOVER-0"); break; + case 0x7: + ret += sprintf(buf + ret, "GAPREMOVER-1"); break; + case 0x8: + ret += sprintf(buf + ret, "NONE"); break; + default: + ret += sprintf(buf + ret, "UNKNOWN"); break; + } + ret += sprintf(buf + ret, "\nCAM-A <= "); + switch (mod & 0x07) { + case 0x1: + ret += sprintf(buf + ret, "CH0-IN"); break; + case 0x2: + ret += sprintf(buf + ret, "CH1-IN"); break; + case 0x3: + ret += sprintf(buf + ret, "REMAPPER"); break; + case 0x4: + ret += sprintf(buf + ret, "PREHEADER"); break; + case 0x5: + ret += sprintf(buf + ret, "CAM-B"); break; + case 0x6: + ret += sprintf(buf + ret, "GAPREMOVER-0"); break; + case 0x7: + ret += sprintf(buf + ret, "GAPREMOVER-1"); break; + default: + ret += sprintf(buf + ret, "NONE"); break; + } + ret += sprintf(buf + ret, "\n"); + + return ret; +} + + +static ssize_t loop_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + int loop = 0; + int err = 0; + struct aml_cimax *cimax = NULL; + + if (!g_spi) + return size; + + if (sscanf(buf, "%i", &loop) == 1) { + int a = g_spi->cam_inserted[0]; + int b = g_spi->cam_inserted[1]; + u8 cm[2]; + cm[0] = loop ? (b ? 0x85 : 0x80) : 0x81;/*CH*/ + cm[1] = loop ? (a ? 0x51 : 0x11) : 0x00;/*MOD*/ + cimax = g_spi->cimax; + err = aml_cimax_spi_write_reg(cimax, ROUTER_CAM_CH, cm, 2); + } + return size; +} + +static ssize_t slot_reset_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + int err = 0; + int slot = 0; + struct aml_cimax *cimax = NULL; + + if (!g_spi) + return size; + + if (sscanf(buf, "%i", &slot) == 1) { + if (slot == 0 || slot == 1) { + pr_dbg("reset slot %d\n", slot); + cimax = g_spi->cimax; + err = aml_cimax_spi_slot_reset(cimax, slot); + } + } + return size; +} + +static ssize_t detect_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + int err = 0; + int slot = 0; + struct aml_cimax *cimax = NULL; + + if (!g_spi) + return size; + + if (sscanf(buf, "%i", &slot) == 1) { + if (slot == 0 || slot == 1) { + int addr = (!slot) ? MOD_CTRL_A : MOD_CTRL_B; + u8 reg = 0; + cimax = g_spi->cimax; + err = aml_cimax_spi_read_reg(cimax, addr, ®, 1); + g_spi->cam_inserted[slot] = reg & 1; + pr_dbg("detect slot(%d): %d\n", slot, reg & 1); + } + } + return size; +} + +static struct class_attribute cimax_spi_class_attrs[] = { + __ATTR_RW(reset), + __ATTR_RO(debug), + __ATTR_RW(addr), + __ATTR_RW(reg), + __ATTR_RW(cis), + __ATTR_RO(ts_rate), + __ATTR_RW(loop), + __ATTR_WO(slot_reset), + __ATTR_WO(detect), + __ATTR_NULL +}; + +static struct class cimax_spi_class = { + .name = "cimax_spi", + .class_attrs = cimax_spi_class_attrs, +}; + +static int aml_cimax_spi_mod_init(void) +{ + int ret; + pr_dbg("Amlogic CIMAX SPI Init\n"); + ret = class_register(&cimax_spi_class); + return 0; +} + +static void aml_cimax_spi_mod_exit(void) +{ + pr_dbg("Amlogic CIMAX SPI Exit\n"); + class_unregister(&cimax_spi_class); +} +
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_spi.h b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_spi.h new file mode 100644 index 0000000..3548de0 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_spi.h
@@ -0,0 +1,20 @@ +/*************************************************************************** + * Copyright (c) 2014 Amlogic, Inc. All rights reserved. + * + * This source code is subject to the terms and conditions defined in the + * file 'LICENSE' which is part of this source code package. + * + * Description: + * +***************************************************************************/ + +#ifndef _AML_CIMAX_SPI_H_ +#define _AML_CIMAX_SPI_H_ + +#include <linux/platform_device.h> +#include "aml_cimax.h" + +int aml_cimax_spi_init(struct platform_device *pdev, struct aml_cimax *ci); +int aml_cimax_spi_exit(struct aml_cimax *ci); + +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_usb.c b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_usb.c new file mode 100644 index 0000000..bb0c71f --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_usb.c
@@ -0,0 +1,1733 @@ +/*************************************************************************** + * Copyright (c) 2014 Amlogic, Inc. All rights reserved. + * + * This source code is subject to the terms and conditions defined in the + * file 'LICENSE' which is part of this source code package. + * + * Description: + * +***************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/amlogic/aml_gpio_consumer.h> +#include <linux/gpio/consumer.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/of_irq.h> +#include <linux/irq.h> +#include <linux/mutex.h> +#include <linux/firmware.h> +#include <linux/delay.h> +#include <linux/of_gpio.h> +#include <linux/sched/clock.h> + +//#include <linux/switch.h> + +#include "aml_cimax.h" +#include "./usb/SRC/cimax+usb-driver.h" + +#define MOD_NAME "aml_cimax_usb" + +#define pr_dbg(fmt...)\ + do {\ + if (cimax_usb_debug)\ + pr_info("cimax_usb: "fmt);\ + } while (0) +#define pr_inf(fmt...) pr_info("cimax_usb: "fmt) +#define pr_error(fmt...) pr_err("AML_CIMAX_USB: " fmt) + +/* + Uncomment below and enable permanent power in cfg + to disable dynamic power control mechanism +*/ +/*#define DISABLE_POWER_PATCH*/ + +#define BUFFIN_CFG 0x0000 +#define BUFFIN_ADDR_LSB 0x0001 +#define BUFFIN_ADDR_MSB 0x0002 +#define BUFFIN_DATA 0x0003 +#define BUFFOUT_CFG 0x0004 +#define BUFFOUT_ADDR_LSB 0x0005 +#define BUFFOUT_ADDR_MSB 0x0006 +#define BUFFOUT_DATA 0x0007 +#define BOOT_Key 0x0008 +#define BOOT_Status 0x0009 +#define BOOT_Test 0x000A +#define usb2_0_irq_mask 0x0010 +#define usb2_0_status 0x0011 +#define usb2_0_rx 0x0012 +#define usb2_0_tx 0x0013 +#define SPI_Slave_Ctrl 0x0018 +#define SPI_Slave_Status 0x0019 +#define SPI_Slave_Rx 0x001A +#define SPI_Slave_Tx 0x001B +#define SPI_Slave_Mask 0x001C +#define UCSG_Ctrl 0x0020 +#define UCSG_Status 0x0021 +#define UCSG_RxData 0x0022 +#define UCSG_TxData 0x0023 +#define PCtrl_Ctrl 0x0028 +#define PCtrl_Status 0x0029 +#define PCtrl_NbByte_LSB 0x002A +#define PCtrl_NbByte_MSB 0x002B +#define SPI_Master_Ctl 0x0030 +#define SPI_Master_NCS 0x0031 +#define SPI_Master_Status 0x0032 +#define SPI_Master_TxBuf 0x0033 +#define SPI_Master_RxBuf 0x0034 +#define BISTRAM_Ctl 0x0038 +#define BISTRAM_Bank 0x0039 +#define BISTRAM_Pat 0x003A +#define BISTRAM_SM 0x003B +#define BISTRAM_AddrLSB 0x003C +#define BISTROM_Config 0x0040 +#define BISTROM_SignatureLSB 0x0041 +#define BISTROM_SignatureMSB 0x0042 +#define BISTROM_StartAddrLSB 0x0043 +#define BISTROM_StartAddrMSB 0x0043 +#define BISTROM_StopAddrLSB 0x0043 +#define BISTROM_StopAddrMSB 0x0043 +#define CkMan_Config 0x0048 +#define CkMan_Select 0x0049 +#define CkMan_Test 0x004A +#define Revision_Number 0x004B +#define ResMan_Config 0x0050 +#define ResMan_Status 0x0051 +#define ResMan_WD 0x0052 +#define ResMan_WD_MSB 0x0053 +#define CPU_Test 0x0060 +#define IrqMan_Config0 0x0068 +#define IrqMan_Config1 0x0069 +#define IrqMan_Irq0 0x006A +#define IrqMan_NMI 0x006B +#define IrqMan_SleepKey 0x006C +#define Tim_Config 0x0070 +#define Tim_Value_LSB 0x0071 +#define Tim_Value_MSB 0x0072 +#define Tim_Comp_LSB 0x0073 +#define Tim_Comp_MSB 0x0074 +#define TI_Config 0x0076 +#define TI_Data 0x0077 +#define TI_Reg0 0x0078 +#define TI_Reg1 0x0079 +#define TI_Reg2 0x007A +#define TI_Reg3 0x007B +#define TI_Reg4 0x007C +#define TI_ROM1 0x007D +#define TI_ROM2 0x007E +#define TI_ROM3 0x007F +#define DVBCI_START_ADDR 0x0100 +#define DVBCI_END_ADDR 0x017F +#define DATA 0x0180 +/*#define CTRL 0x0181*/ +#define QB_HOST 0x0182 +#define LEN_HOST_LSB 0x0183 +#define LEN_HOST_MSB 0x0184 +#define FIFO_TX_TH_LSB 0x0185 +#define FIFO_TX_TH_MSB 0x0186 +#define FIFO_TX_D_NB_LSB 0x0187 +#define FIFO_TX_D_NB_MSB 0x0188 +#define QB_MOD_CURR 0x0189 +#define LEN_MOD_CURR_LSB 0x018A +#define LEN_MOD_CURR_MSB 0x018B +#define QB_MOD 0x018C +#define LEN_MOD_LSB 0x018D +#define LEN_MOD_MSB 0x018E +#define FIFO_RX_TH_LSB 0x018F +#define FIFO_RX_TH_MSB 0x0190 +#define FIFO_RX_D_NB_LSB 0x0191 +#define FIFO_RX_D_NB_MSB 0x0192 +#define IT_STATUS_0 0x0193 +#define IT_STATUS_1 0x0194 +#define IT_MASK_0 0x0195 +#define IT_MASK_1 0x0196 +#define IT_HOST_PIN_CFG 0x0200 +#define CFG_0 0x0201 +#define CFG_1 0x0202 +#define CFG_2 0x0203 +#define IT_HOST 0x0204 +#define MOD_IT_STATUS 0x0205 +#define MOD_IT_MASK 0x0206 +#define MOD_CTRL_A 0x0207 +#define MOD_CTRL_B 0x0208 +#define DEST_SEL 0x0209 +#define CAM_MSB_ADD 0x020A +#define GPIO0_DIR 0x020B +#define GPIO0_DATA_IN 0x020C +#define GPIO0_DATA_OUT 0x020D +#define GPIO0_STATUS 0x020E +#define GPIO0_IT_MASK 0x020F +#define GPIO0_DFT 0x0210 +#define GPIO0_MASK_DATA 0x0211 +#define GPIO1_DIR 0x0212 +#define GPIO1_DATA_IN 0x0213 +#define GPIO1_DATA_OUT 0x0214 +#define GPIO1_STATUS 0x0215 +#define GPIO1_IT_MASK 0x0216 +#define MEM_ACC_TIME_A 0x0217 +#define MEM_ACC_TIME_B 0x0218 +#define IO_ACC_TIME_A 0x0219 +#define IO_ACC_TIME_B 0x021A +#define EXT_CH_ACC_TIME_A 0x021B +#define EXT_CH_ACC_TIME_B 0x021C +#define PAR_IF_0 0x021D +#define PAR_IF_1 0x021E +#define PAR_IF_CTRL 0x021F +#define PCK_LENGTH 0x0220 +#define USB2TS_CTRL 0x0221 +#define USB2TS0_RDL 0x0222 +#define USB2TS1_RDL 0x0223 +#define TS2USB_CTRL 0x0224 +#define TSOUT_PAR_CTRL 0x0225 +#define TSOUT_PAR_CLK_SEL 0x0226 +#define S2P_CH0_CTRL 0x0227 +#define S2P_CH1_CTRL 0x0228 +#define P2S_CH0_CTRL 0x0229 +#define P2S_CH1_CTRL 0x022A +#define TS_IT_STATUS 0x022B +#define TS_IT_MASK 0x022C +#define IN_SEL 0x022D +#define OUT_SEL 0x022E +#define ROUTER_CAM_CH 0x022F +#define ROUTER_CAM_MOD 0x0230 +#define FIFO_CTRL 0x0231 +#define FIFO1_2_STATUS 0x0232 +#define FIFO3_4_STATUS 0x0233 +#define GAP_REMOVER_CH0_CTRL 0x0234 +#define GAP_REMOVER_CH1_CTRL 0x0235 +#define SYNC_RTV_CTRL 0x0236 +#define SYNC_RTV_CH0_SYNC_NB 0x0237 +#define SYNC_RTV_CH0_PATTERN 0x0238 +#define SYNC_RTV_CH1_SYNC_NB 0x0239 +#define SYNC_RTV_CH1_PATTERN 0x023A +#define SYNC_RTV_OFFSET_PATT 0x023B +#define CTRL_FILTER 0x023D +#define PID_EN_FILTER_CH0 0x023E +#define PID_EN_FILTER_CH1 0x023F +#define PID_LSB_FILTER_CH0_0 0x0240 +#define PID_MSB_FILTER_CH0_0 0x0241 +#define PID_LSB_FILTER_CH0_1 0x0242 +#define PID_MSB_FILTER_CH0_1 0x0243 +#define PID_LSB_FILTER_CH0_2 0x0244 +#define PID_MSB_FILTER_CH0_2 0x0245 +#define PID_LSB_FILTER_CH0_3 0x0246 +#define PID_MSB_FILTER_CH0_3 0x0247 +#define PID_LSB_FILTER_CH0_4 0x0248 +#define PID_MSB_FILTER_CH0_4 0x0249 +#define PID_LSB_FILTER_CH0_5 0x024A +#define PID_MSB_FILTER_CH0_5 0x024B +#define PID_LSB_FILTER_CH0_6 0x024C +#define PID_MSB_FILTER_CH0_6 0x024D +#define PID_LSB_FILTER_CH0_7 0x024E +#define PID_MSB_FILTER_CH0_7 0x024F +#define PID_LSB_FILTER_CH1_0 0x0260 +#define PID_MSB_FILTER_CH1_0 0x0261 +#define PID_LSB_FILTER_CH1_1 0x0262 +#define PID_MSB_FILTER_CH1_1 0x0263 +#define PID_LSB_FILTER_CH1_2 0x0264 +#define PID_MSB_FILTER_CH1_2 0x0265 +#define PID_LSB_FILTER_CH1_3 0x0266 +#define PID_MSB_FILTER_CH1_3 0x0267 +#define PID_LSB_FILTER_CH1_4 0x0268 +#define PID_MSB_FILTER_CH1_4 0x0269 +#define PID_LSB_FILTER_CH1_5 0x026A +#define PID_MSB_FILTER_CH1_5 0x026B +#define PID_LSB_FILTER_CH1_6 0x026C +#define PID_MSB_FILTER_CH1_6 0x026D +#define PID_LSB_FILTER_CH1_7 0x026E +#define PID_MSB_FILTER_CH1_7 0x026F +#define PID_OLD_LSB_REMAPPER_0 0x0280 +#define PID_OLD_MSB_REMAPPER_0 0x0281 +#define PID_OLD_LSB_REMAPPER_1 0x0282 +#define PID_OLD_MSB_REMAPPER_1 0x0283 +#define PID_OLD_LSB_REMAPPER_2 0x0284 +#define PID_OLD_MSB_REMAPPER_2 0x0285 +#define PID_OLD_LSB_REMAPPER_3 0x0286 +#define PID_OLD_MSB_REMAPPER_3 0x0287 +#define PID_OLD_LSB_REMAPPER_4 0x0288 +#define PID_OLD_MSB_REMAPPER_4 0x0289 +#define PID_OLD_LSB_REMAPPER_5 0x028A +#define PID_OLD_MSB_REMAPPER_5 0x028B +#define PID_OLD_LSB_REMAPPER_6 0x028C +#define PID_OLD_MSB_REMAPPER_6 0x028D +#define PID_OLD_LSB_REMAPPER_7 0x028E +#define PID_OLD_MSB_REMAPPER_7 0x028F +#define PID_NEW_LSB_REMAPPER_0 0x02A0 +#define PID_NEW_MSB_REMAPPER_0 0x02A1 +#define PID_NEW_LSB_REMAPPER_1 0x02A2 +#define PID_NEW_MSB_REMAPPER_1 0x02A3 +#define PID_NEW_LSB_REMAPPER_2 0x02A4 +#define PID_NEW_MSB_REMAPPER_2 0x02A5 +#define PID_NEW_LSB_REMAPPER_3 0x02A6 +#define PID_NEW_MSB_REMAPPER_3 0x02A7 +#define PID_NEW_LSB_REMAPPER_4 0x02A8 +#define PID_NEW_MSB_REMAPPER_4 0x02A9 +#define PID_NEW_LSB_REMAPPER_5 0x02AA +#define PID_NEW_MSB_REMAPPER_5 0x02AB +#define PID_NEW_LSB_REMAPPER_6 0x02AC +#define PID_NEW_MSB_REMAPPER_6 0x02AD +#define PID_NEW_LSB_REMAPPER_7 0x02AE +#define PID_NEW_MSB_REMAPPER_7 0x02AF +#define MERGER_DIV_MICLK 0x02C0 +#define PID_AND_SYNC_REMAPPER_CTRL 0x02C1 +#define PID_EN_REMAPPER 0x02C2 +#define SYNC_SYMBOL 0x02C3 +#define PID_AND_SYNC_REMAPPER_INV_CTRL 0x02C4 +#define BITRATE_CH0_LSB 0x02C5 +#define BITRATE_CH0_MSB 0x02C6 +#define BITRATE_CH1_LSB 0x02C7 +#define BITRATE_CH1_MSB 0x02C8 +#define STATUS_CLK_SWITCH_0 0x02C9 +#define STATUS_CLK_SWITCH_1 0x02CA +#define RESET_CLK_SWITCH_0 0x02CB +#define RESET_CLK_SWITCH_1 0x02CC +#define PAD_DRVSTR_CTRL 0x02CD +#define PAD_PUPD_CTRL 0x02CE +#define PRE_HEADER_ADDER_CH0_0 0x02D0 +#define PRE_HEADER_ADDER_CH0_1 0x02D1 +#define PRE_HEADER_ADDER_CH0_2 0x02D2 +#define PRE_HEADER_ADDER_CH0_3 0x02D3 +#define PRE_HEADER_ADDER_CH0_4 0x02D4 +#define PRE_HEADER_ADDER_CH0_5 0x02D5 +#define PRE_HEADER_ADDER_CH0_6 0x02D6 +#define PRE_HEADER_ADDER_CH0_7 0x02D7 +#define PRE_HEADER_ADDER_CH0_8 0x02D8 +#define PRE_HEADER_ADDER_CH0_9 0x02D9 +#define PRE_HEADER_ADDER_CH0_10 0x02DA +#define PRE_HEADER_ADDER_CH0_11 0x02DB +#define PRE_HEADER_ADDER_CH1_0 0x02E0 +#define PRE_HEADER_ADDER_CH1_1 0x02E1 +#define PRE_HEADER_ADDER_CH1_2 0x02E2 +#define PRE_HEADER_ADDER_CH1_3 0x02E3 +#define PRE_HEADER_ADDER_CH1_4 0x02E4 +#define PRE_HEADER_ADDER_CH1_5 0x02E5 +#define PRE_HEADER_ADDER_CH1_6 0x02E6 +#define PRE_HEADER_ADDER_CH1_7 0x02E7 +#define PRE_HEADER_ADDER_CH1_8 0x02E8 +#define PRE_HEADER_ADDER_CH1_9 0x02E9 +#define PRE_HEADER_ADDER_CH1_10 0x02EA +#define PRE_HEADER_ADDER_CH1_11 0x02EB +#define PRE_HEADER_ADDER_CTRL 0x02EC +#define PRE_HEADER_ADDER_LEN 0x02ED +#define PRE_HEADER_REMOVER_CTRL 0x02EE +#define FSM_DVB 0x02F0 +#define TS2USB_FSM_DEBUG 0x02F2 +#define TSOUT_PAR_FSM_DEBUG 0x02F3 +#define GAP_REMOVER_FSM_DEBUG 0x02F4 +#define PID_AND_SYNC_REMAPPER_FSM_DEBUG 0x02F5 +#define PRE_HEADER_ADDER_FSM_DEBUG 0x02F6 +#define SYNC_RTV_FSM_DEBUG 0x02F7 +#define CHECK_PHY_CLK 0x0E00 +#define USB_CTRL1 0x0E01 +#define USB_ISO2_out 0x0800 +#define USB_ISO1_out 0x1000 +#define USB_Interrupt_out 0x1E00 +#define USB_Bulk_in 0x1F00 +#define CC2_Buffer_out 0x2000 +#define USB_EP0 0x30C0 +#define CC2_Buffer_in 0x4000 +#define USB_ISO2_in 0x5800 +#define USB_ISO1_in 0x6000 +#define nmb_vector_address_lsb 0xFFFA +#define nmb_vector_address_msb 0xFFFB +#define reset_vector_address_lsb 0xFFFC +#define reset_vector_address_msb 0xFFFD +#define irb_vector_address_lsb 0xFFFE +#define irb_vector_address_msb 0xFFFF + + +#define CIMAX_REG_HDR_SIZE 4 +#define CIMAX_REG_PLD_SIZE 255 +#define CIMAX_CAM_HDR_SIZE 4 +#define CIMAX_CAM_PLD_SIZE 65535 + +#define DEF_LOCK(_l_) struct mutex _l_ + +struct cimax_usb { + struct platform_device *pdev; + struct device_s *dev; + + struct aml_cimax *cimax; + + u8 buf[CIMAX_REG_HDR_SIZE + CIMAX_CAM_HDR_SIZE + CIMAX_CAM_PLD_SIZE]; + int buf_size; + + int cam_inserted[2]; +#define IN_INSERTED 0x01 +#define IN_POWERED 0x02 +#define IN_LINKED 0x04 + int cam_data_ready[2]; + + int poll_mode; +#define STOP_MODE 0 +#define POLL_MODE 1 +#define INT_MODE 2 + + int rst_io; + + struct workqueue_struct *workq; + struct delayed_work work; + int work_auto_restart; + int work_cnt; + + struct delayed_work power_work; + int power_work_cnt; + int cam_det; + + DEF_LOCK(lock); +#define lock_init(_usb) mutex_init(&(_usb)->lock) +#define lock_lock(_usb) do {\ + int err = mutex_lock_interruptible(&(_usb)->lock);\ + if (err)\ + return err;\ +} while (0) +#define lock_unlock(_usb) mutex_unlock(&(_usb)->lock) + + u8 *cis; +#define CIS_MAX 512 +}; + +static struct cimax_usb *g_usb; + +MODULE_PARM_DESC(usbdebug, "enable verbose debug messages"); +static int cimax_usb_debug = 1; +module_param_named(usbdebug, cimax_usb_debug, int, 0644); + +MODULE_PARM_DESC(usbpoll_interval, "interval for usb poll"); +static int usb_poll_interval = 100; +module_param_named(usbpoll_interval, usb_poll_interval, int, 0644); + +MODULE_PARM_DESC(usbpoll_mode, "set cimax poll mode, need reset"); +static int cimax_poll_mode = 1; +module_param_named(usbpoll_mode, cimax_poll_mode, int, 0644); + +MODULE_PARM_DESC(usbcam_irq_mode, "set cam irq mode, need reset"); +static int cam_irq_mode; +module_param_named(usbcam_irq_mode, cam_irq_mode, int, 0644); + + +#define CIMAX_REG_READ 0xff +#define CIMAX_REG_READ_OK 0x4c +#define CIMAX_REG_WRITE 0x7f +#define CIMAX_REG_WRITE_OK 0x4d +#define CIMAX_REG_INIT 0x00 +#define CIMAX_REG_INIT_OK 0x4b +#define CIMAX_REG_CMD_ERROR 0x51 + +#define CIMAX_CAM_RESET 0x01 +#define CIMAX_CAM_RESET_OK 0x40 +#define CIMAX_CAM_CIS 0x02 +#define CIMAX_CAM_CIS_OK 0x41 +#define CIMAX_CAM_COR 0x03 +#define CIMAX_CAM_COR_OK 0x42 +#define CIMAX_CAM_NEG 0x04 +#define CIMAX_CAM_NEG_OK 0x43 +#define CIMAX_CAM_WLPDU 0x05 +#define CIMAX_CAM_WLPDU_OK 0x44 +#define CIMAX_CAM_RLPDU 0x06 +#define CIMAX_CAM_RLPDU_OK 0x46 +#define CIMAX_CAM_EVT 0x0d +#define CIMAX_CAM_DET_OK 0x45 +#define CIMAX_CAM_NOCAM 0x49 +#define CIMAX_CAM_ERROR 0x4a +#define CIMAX_CAM_NOEVT 0x55 +#define CIMAX_CAM_DATA_READY 0x4e +#define CIMAX_CAM_WBUSY 0x54 +#define CIMAX_CAM_PENDING 0x56 +#define CIMAX_CAM_REGSTAT 0x0e +#define CIMAX_CAM_REGSTAT_OK 0x57 + + +#define CIMAX_CAM_PKT_CNT_VAL 1 + +#define CIMAX_SLOT_A 0 +#define CIMAX_SLOT_B 1 + +#define CIMAX_CMD_RESP_MASK 0x7f + +#define cimax_to_usb(_c) ((struct cimax_usb *)((_c)->priv)) +#define dev_to_usb(_d) ((struct cimax_usb *)usb_get_drvdata(_d)) + +#define byte_to_u16(_b1, _b2) (((_b1)<<8) | (_b2)) + +#define hdr_cmd_resp(_s) ((_s)->buf[0] & CIMAX_CMD_RESP_MASK) + +#define reg_hdr(_s) ((_s)->buf) +#define reg_addr(_s) byte_to_u16((_s)->buf[1], (_s)->buf[2]) +#define reg_hdr_dat_size(_s) ((_s)->buf[3]) +#define reg_dat(_s) (&((_s)->buf[CIMAX_REG_HDR_SIZE])) + +#define cam_hdr(_s) ((_s)->buf) +#define cam_hdr_slot(_s) (((_s)->buf[0] & 0x80) ? 1 : 0) +#define cam_hdr_pkt_cnt(_s) ((_s)->buf[1]) +#define cam_hdr_dat_size(_s) byte_to_u16((_s)->buf[2], (_s)->buf[3]) +#define cam_dat(_s) (&((_s)->buf[CIMAX_CAM_HDR_SIZE])) + +#define REG_TIMEOUT 500 +#define CAM_TIMEOUT 5000 + +static int aml_cimax_usb_mod_init(struct platform_device *pdev); +static void aml_cimax_usb_mod_exit(struct platform_device *pdev); + +static int cimax_usb_set_loop(struct cimax_usb *usb, int loop); + +static void dump(char *title, u8 *buf, int size) +{ + int i; + pr_info("%s\n", title); + for (i = 0; i < size; i++) { + if (!(i & 0xf)) + pr_info("\n\t"); + pr_info("%02x ", *(buf+i)); + } + pr_info("\n"); +} + +static void perr(char *err, struct cimax_usb *usb) +{ + pr_error("error: %s\n", err); + dump("dump:", usb->buf, 16); +} + +static inline unsigned long get_jiffies(void) +{ + return (unsigned long)(sched_clock()/10000000); +} + +static int cam_usb_cam_detect(struct cimax_usb *usb, int slot, int flag) +{ + usb->cam_inserted[slot] = flag; + pr_inf("detect slot(%d): 0x%x(%s)\n", + slot, usb->cam_inserted[slot], + (!flag) ? "none" : + (flag & IN_LINKED) ? "linked" : + (flag & IN_POWERED) ? "powered" : + (flag & IN_INSERTED) ? "inserted" : + "unknown"); + aml_cimax_slot_state_changed(usb->cimax, slot, + usb->cam_inserted[slot]); + return 0; +} + +static inline void set_usb_cam_ready(struct cimax_usb *usb, int slot) +{ + if (usb->cam_inserted[slot] & IN_POWERED) { + cam_usb_cam_detect(usb, slot, + usb->cam_inserted[slot] | IN_LINKED); + cimax_usb_set_loop(usb, 1);/*set auto-loop*/ + } +} + +static int init_reg_hdr(u8 *hdr, u8 tag, int addr, int size) +{ + hdr[0] = tag; + hdr[1] = (addr>>8) & 0xff; + hdr[2] = addr & 0xff; + hdr[3] = size; + return 0; +} + +static int check_reg_hdr(u8 *hdr, u8 tag, int addr, int size) +{ + return hdr[0] != tag + || hdr[1] != ((addr>>8) & 0xff) + || hdr[2] != (addr & 0xff) + || hdr[3] != size; +} + +static int aml_cimax_usb_read_reg(struct aml_cimax *cimax, int addr, + u8 *buf, int size) +{ + struct cimax_usb *usb = cimax_to_usb(cimax); + struct device_s *dev = usb->dev; + u8 out[CIMAX_REG_HDR_SIZE]; + int err = 0; + + init_reg_hdr(out, CIMAX_REG_READ, addr, size); + + lock_lock(usb); + + /*pr_dbg("rd %02x:%02x:%02x:%02x\n", + out[0], out[1], + out[2], out[3]);*/ + err = cimax_usb_ci_write(dev, + out, CIMAX_REG_HDR_SIZE, usb->buf, sizeof(usb->buf)); + if (err) + goto end; + if (check_reg_hdr(reg_hdr(usb), CIMAX_REG_READ_OK, addr, size) != 0) { + pr_dbg("rd %02x:%02x:%02x:%02x\n", + out[0], out[1], + out[2], out[3]); + perr("read reg fail.", usb); + err = -EINVAL; + goto end; + } + memcpy(buf, reg_dat(usb), size); +end: + lock_unlock(usb); + return err; +} + +static int aml_cimax_usb_write_reg(struct aml_cimax *cimax, int addr, + u8 *buf, int size) +{ + struct cimax_usb *usb = cimax_to_usb(cimax); + struct device_s *dev = usb->dev; + u8 out[CIMAX_REG_HDR_SIZE + CIMAX_REG_PLD_SIZE]; + int err = 0; + + init_reg_hdr(out, CIMAX_REG_WRITE, addr, size); + memcpy(&out[CIMAX_REG_HDR_SIZE], buf, size); + + lock_lock(usb); + + pr_dbg("wr %02x:%02x:%02x:%02x\n", + out[0], out[1], + out[2], out[3]); + err = cimax_usb_ci_write(dev, + out, CIMAX_REG_HDR_SIZE + size, usb->buf, sizeof(usb->buf)); + if (err) + goto end; + if (check_reg_hdr(reg_hdr(usb), CIMAX_REG_WRITE_OK, addr, 0) != 0) { + perr("write reg fail.", usb); + err = -EINVAL; + goto end; + } +end: + lock_unlock(usb); + return err; +} + +static inline int init_cam_hdr(u8 *hdr, int cmd, int size) +{ + hdr[0] = cmd; + hdr[1] = CIMAX_CAM_PKT_CNT_VAL; + hdr[2] = (size>>8) & 0xff; + hdr[3] = size & 0xff; + return 0; +} + +static inline int cam_err(struct cimax_usb *usb) +{ + if (hdr_cmd_resp(usb) != CIMAX_CAM_ERROR + || cam_hdr_pkt_cnt(usb) != CIMAX_CAM_PKT_CNT_VAL + || cam_hdr_dat_size(usb) != 2) + return 0; + return byte_to_u16(cam_dat(usb)[0], cam_dat(usb)[1]); +} + +static inline char *cam_err_str(int err) +{ +#define CAMERROR_RESET 0x0101 +#define CAMERROR_CIS_BUF 0x0201 +#define CAMERROR_CIS_SIZE 0x0202 +#define CAMERROR_CAM_NOT_ACT 0x0203 +#define CAMERROR_COR_NOT_READY 0x0301 +#define CAMERROR_COR_VAL_CHK 0x0302 +#define CAMERROR_NEG_NO_RESP 0x0401 +#define CAMERROR_NEG_BAD_SIZE 0x0402 +#define CAMERROR_NEG_NOT_READY 0x0403 +#define CAMERROR_LPDU_NOT_AVAIL 0x0601 + struct { int err; char *str; } cam_err_strings[] = { + {CAMERROR_RESET, "reset error, not ready."}, + {CAMERROR_CIS_BUF, "cis error, buffer not allocated."}, + {CAMERROR_CIS_SIZE, "cis error, bad cis size."}, + {CAMERROR_CAM_NOT_ACT, "cam not activated."}, + {CAMERROR_COR_NOT_READY, "cam not ready during write COR."}, + {CAMERROR_COR_VAL_CHK, "COR value check failed."}, + {CAMERROR_NEG_NO_RESP, "cam not responding when negotiation."}, + {CAMERROR_NEG_BAD_SIZE, "cam buf size length != 2."}, + {CAMERROR_NEG_NOT_READY, "cam not ready during negotiation."}, + {CAMERROR_LPDU_NOT_AVAIL, "lpdu not available."} + }; + int i; + for (i = 0; + i < sizeof(cam_err_strings)/sizeof(cam_err_strings[0]); i++) { + if (cam_err_strings[i].err == err) + return cam_err_strings[i].str; + } + return "err unknown."; +} + +static int cimax_usb_access_cam(struct cimax_usb *usb, int slot, + int cmd, u8 *tx, int tx_size, u8 *rx, int rx_size) +{ + struct device_s *dev = usb->dev; + u8 *out = NULL; + int err = 0; + + out = kzalloc(CIMAX_CAM_HDR_SIZE + CIMAX_CAM_PLD_SIZE, GFP_KERNEL); + if (!out) { + pr_err("no mem for access cam.\n"); + return -ENOMEM; + } + + cmd |= slot ? 0x80 : 0; + init_cam_hdr(out, cmd, tx_size); + memcpy(&out[CIMAX_CAM_HDR_SIZE], tx, tx_size); + /*dump("access cam:", out, CIMAX_CAM_HDR_SIZE+size);*/ + + lock_lock(usb); + + err = cimax_usb_ci_write(dev, + out, CIMAX_CAM_HDR_SIZE + tx_size, rx, rx_size); + if (err) + goto end; + if (cam_hdr_slot(usb) != slot) { + pr_error("expect slot(%d), but slot(%d)\n", + slot, cam_hdr_slot(usb)); + err = -EINVAL; + goto end; + } + switch (hdr_cmd_resp(usb)) { + case CIMAX_CAM_NOCAM: + pr_dbg("no cam\n"); + err = -ENODEV; + break; + case CIMAX_CAM_ERROR: + pr_error("cam error\n"); + pr_error("err code: 0x%04x(%s)\n", cam_err(usb), + cam_err_str(cam_err(usb))); + err = -ENODEV; + break; + case CIMAX_CAM_WBUSY: + pr_dbg("cam busy\n"); + err = -EBUSY; + break; + case CIMAX_CAM_PENDING: + pr_dbg("cam pending\n"); + err = -EAGAIN; + break; + } +end: + kfree(out); + lock_unlock(usb); + return err; +} + +static int aml_cimax_usb_read_cis(struct aml_cimax *cimax, int slot, + u8 *buf, int size) +{ + struct cimax_usb *usb = cimax_to_usb(cimax); + int err = 0; + int len; + + err = cimax_usb_access_cam(usb, slot, CIMAX_CAM_CIS, + NULL, 0, usb->buf, sizeof(usb->buf)); + if (err) + goto end; + if (hdr_cmd_resp(usb) != CIMAX_CAM_CIS_OK + || cam_hdr_pkt_cnt(usb) != CIMAX_CAM_PKT_CNT_VAL) { + perr("read cis fail.", usb); + err = -EINVAL; + goto end; + } + len = cam_hdr_dat_size(usb); + if (size < len) { + pr_error("cis size too large, expect<%d, but:%d\n", size, len); + perr("cis fail.", usb); + err = -EINVAL; + goto end; + } + memcpy(buf, cam_dat(usb), len); + + if (!usb->cis) + usb->cis = kzalloc((len < 512) ? 512 : len, GFP_KERNEL); + if (usb->cis) + memcpy(usb->cis, cam_dat(usb), len); + +end: + return err; +} +#define CIMAX_CAM_COR_PLD_SIZE 5 +static int aml_cimax_usb_write_cor(struct aml_cimax *cimax, int slot, + int addr, u8 *buf) +{ + struct cimax_usb *usb = cimax_to_usb(cimax); + int err = 0; + u8 out[CIMAX_CAM_COR_PLD_SIZE + 8]; + int sz = CIMAX_CAM_COR_PLD_SIZE; + + out[0] = addr>>8 & 0xff; + out[1] = addr & 0xff; + out[2] = buf[0]; + out[3] = 0; + out[4] = 0; + + if (!cam_irq_mode) { + out[5] = 0x40;/*cam poll mode*/ + sz++; + } + + err = cimax_usb_access_cam(usb, slot, CIMAX_CAM_COR, + out, sz, usb->buf, sizeof(usb->buf)); + if (err) + goto end; + if (hdr_cmd_resp(usb) != CIMAX_CAM_COR_OK + || cam_hdr_pkt_cnt(usb) != CIMAX_CAM_PKT_CNT_VAL + || cam_hdr_dat_size(usb) != 0) { + perr("write cor fail.", usb); + err = -EINVAL; + goto end; + } +end: + return err; +} +#define CIMAX_CAM_NEG_PLD_SIZE 2 +static int aml_cimax_usb_negotiate(struct aml_cimax *cimax, int slot, int size) +{ + struct cimax_usb *usb = cimax_to_usb(cimax); + int ret = 0; + u8 out[CIMAX_CAM_NEG_PLD_SIZE]; + + out[0] = (size>>8) & 0xff; + out[1] = size & 0xff; + + ret = cimax_usb_access_cam(usb, slot, CIMAX_CAM_NEG, + out, CIMAX_CAM_NEG_PLD_SIZE, + usb->buf, sizeof(usb->buf)); + if (ret) + goto end; + if (hdr_cmd_resp(usb) != CIMAX_CAM_NEG_OK + || cam_hdr_pkt_cnt(usb) != CIMAX_CAM_PKT_CNT_VAL + || cam_hdr_dat_size(usb) != 2) { + perr("negotiate fail.", usb); + ret = -EINVAL; + goto end; + } + ret = byte_to_u16(cam_dat(usb)[0], cam_dat(usb)[1]); + + set_usb_cam_ready(usb, slot); +end: + return ret; +} + +static int aml_cimax_usb_write_lpdu(struct aml_cimax *cimax, int slot, + u8 *buf, int size) +{ + struct cimax_usb *usb = cimax_to_usb(cimax); + int ret = 0; + + /*dump("lpdu ->", buf, size);*/ + ret = cimax_usb_access_cam(usb, slot, CIMAX_CAM_WLPDU, + buf, size, usb->buf, sizeof(usb->buf)); + if (ret) + goto end; + if (hdr_cmd_resp(usb) != CIMAX_CAM_WLPDU_OK + || cam_hdr_pkt_cnt(usb) != CIMAX_CAM_PKT_CNT_VAL + || cam_hdr_dat_size(usb) != 0) { + perr("write lpdu fail.", usb); + ret = -EINVAL; + goto end; + } + ret = size; +end: + return ret; +} + +static int aml_cimax_usb_read_lpdu(struct aml_cimax *cimax, int slot, + u8 *buf, int size) +{ + struct cimax_usb *usb = cimax_to_usb(cimax); + int ret = 0; + + ret = cimax_usb_access_cam(usb, slot, CIMAX_CAM_RLPDU, + NULL, 0, usb->buf, sizeof(usb->buf)); + if (ret) + goto end; + if (hdr_cmd_resp(usb) != CIMAX_CAM_RLPDU_OK + || cam_hdr_pkt_cnt(usb) != CIMAX_CAM_PKT_CNT_VAL) { + perr("read lpdu fail.", usb); + ret = -EINVAL; + goto end; + } + ret = cam_hdr_dat_size(usb); + memcpy(buf, cam_dat(usb), ret); + + /*dump("lpdu <-", buf, ret);*/ + + usb->cam_data_ready[slot] = 0; +end: + return ret; +} + +static int aml_cimax_usb_read_cam_status(struct aml_cimax *cimax, int slot) +{ + struct cimax_usb *usb = cimax_to_usb(cimax); + int ret = 0; + + if (cam_irq_mode && usb->cam_data_ready[slot]) + return 0x80; + + ret = cimax_usb_access_cam(usb, slot, CIMAX_CAM_REGSTAT, + NULL, 0, usb->buf, sizeof(usb->buf)); + if (ret) + goto end; + if (hdr_cmd_resp(usb) != CIMAX_CAM_REGSTAT_OK + || cam_hdr_pkt_cnt(usb) != CIMAX_CAM_PKT_CNT_VAL + || cam_hdr_dat_size(usb) != 1) { + perr("read cam status fail.", usb); + ret = -EINVAL; + goto end; + } + + ret = cam_dat(usb)[0]; +end: + return ret; +} + +static int aml_cimax_usb_slot_reset(struct aml_cimax *cimax, int slot) +{ + struct cimax_usb *usb = cimax_to_usb(cimax); + int ret = 0; + + usb->cam_data_ready[slot] = 0; + + ret = cimax_usb_access_cam(usb, slot, CIMAX_CAM_RESET, + NULL, 0, usb->buf, sizeof(usb->buf)); + if (ret) + goto end; + if (hdr_cmd_resp(usb) != CIMAX_CAM_RESET_OK + || cam_hdr_pkt_cnt(usb) != CIMAX_CAM_PKT_CNT_VAL + || cam_hdr_dat_size(usb) != 0) { + perr("slot reset fail.", usb); + ret = -EINVAL; + goto end; + } +end: + return ret; +} + +static int aml_cimax_usb_cam_reset(struct aml_cimax *cimax, int slot) +{ + pr_dbg("Slot(%d): camreset\n", slot); + return 0; +} + +static int aml_cimax_usb_slot_shutdown(struct aml_cimax *cimax, int slot) +{ + pr_dbg("Slot(%d): shutdown\n", slot); + return 0; +} +static int aml_cimax_usb_slot_ts_enable(struct aml_cimax *cimax, int slot) +{ + pr_dbg("Slot(%d): ts control\n", slot); + return 0; +} +static int aml_cimax_usb_slot_status(struct aml_cimax *cimax, int slot) +{ + struct cimax_usb *usb = cimax_to_usb(cimax); + if (usb->cam_inserted[slot] & IN_POWERED) { + /*pr_dbg("CA Module present and ready\n");*/ + return DVB_CA_EN50221_POLL_CAM_PRESENT | + DVB_CA_EN50221_POLL_CAM_READY; + } else { + /*pr_error("CA Module not present or not ready\n");*/ + } + return 0; +} + +static int cimax_usb_cam_plugin(struct cimax_usb *usb, int slot, int plugin) +{ + pr_dbg("cam plug: slot(%d) %s\n", + slot, plugin ? "plugged" : "unplugged"); + return aml_cimax_camchanged(usb->cimax, slot, plugin); +} + +static int cimax_usb_set_power(struct cimax_usb *usb, int on) +{ + u8 reg = 0; + int err = 0; + if (!on) { + reg = 0; + err = aml_cimax_usb_read_reg(usb->cimax, MOD_IT_MASK, ®, 1); + if (err) + return err; + reg |= 0x03; + reg &= 0xf3; + + err = aml_cimax_usb_write_reg(usb->cimax, MOD_IT_MASK, ®, 1); + if (err) + return err; + } + reg = on ? 0x3 : 0x0; + return aml_cimax_usb_write_reg(usb->cimax, GPIO0_DATA_OUT, ®, 1); +} + +static int cimax_usb_check_poe(struct cimax_usb *usb, int *on) +{ + u8 reg = 0; + int err = 0; + + *on = 0; + + err = aml_cimax_usb_read_reg(usb->cimax, CFG_1, ®, 1); + if (err) + return err; + if (reg & 0x20) {/*if VCCEN*/ + reg |= 0x08;/*set POE*/ + err = aml_cimax_usb_write_reg(usb->cimax, CFG_1, ®, 1); + if (err) + return err; + err = aml_cimax_usb_read_reg(usb->cimax, CFG_1, ®, 1); + if (err) + return err; + if (reg & 0x08)/*if POE ok*/ + *on = 1; + } + return err; +} + +static void cimax_usb_power_work(struct work_struct *work) +{ + struct cimax_usb *usb = container_of(to_delayed_work(work), + struct cimax_usb, power_work); + int power = 0; + int err = 0; + + usb->power_work_cnt++; + err = cimax_usb_set_power(usb, 1); + if (err) + return; + + err = cimax_usb_check_poe(usb, &power); + if (err) + return; + + if (power) { + return; + } + + schedule_delayed_work(&usb->power_work, usb_poll_interval); +} + +static int cimax_usb_cam_powerctrl(struct cimax_usb *usb, + int slot, int power) +{ + if (slot != 0) + return 0; + +#ifdef DISABLE_POWER_PATCH + if (power) { + cam_usb_cam_detect(usb, slot, + usb->cam_inserted[slot] | IN_POWERED); + cimax_usb_cam_plugin(usb, slot, 1); + } + return 0; +#else + pr_inf("cancel power ctrl previous\n"); + cancel_delayed_work_sync(&usb->power_work); + + if (!power) { + int err = 0; + err = cimax_usb_set_power(usb, 0); + pr_inf("slot[%d] power off\n", slot); + return 0; + } + + INIT_DELAYED_WORK(&usb->power_work, &cimax_usb_power_work); + schedule_delayed_work(&usb->power_work, usb_poll_interval); + pr_inf("slot[%d] power ctrl started\n", slot); +#endif + return 0; +} + +static int cimax_usb_poll(struct cimax_usb *usb) +{ + struct device_s *dev = usb->dev; + int power = 0; + int err = 0; + int slot = 0; + + if (!usb->cam_det) { + for (slot = 0; slot < 2; slot++) { + int addr = (!slot) ? MOD_CTRL_A : MOD_CTRL_B; + u8 reg = 0; + err = aml_cimax_usb_read_reg(usb->cimax, + addr, ®, 1); + if (reg & 1) { + cam_usb_cam_detect(usb, slot, + (reg & 1) ? IN_INSERTED : 0); + msleep(200); + err = cimax_usb_set_power(usb, (reg & 1)); + err = cimax_usb_check_poe(usb, &power); + pr_inf("slot[%d] power on\n", slot); + msleep(200); + if (power) { + cam_usb_cam_detect(usb, slot, + usb->cam_inserted[slot] | IN_POWERED); + cimax_usb_cam_plugin(usb, slot, 1); + usb->cam_det = 1; + } + } + } + return 0; + } + err = cimax_usb_ci_read_evt(dev, CIMAX_SLOT_A, + usb->buf, sizeof(usb->buf)); + if (err) + goto end; + + switch (hdr_cmd_resp(usb)) { + case CIMAX_CAM_DET_OK: { + int slot = cam_hdr_slot(usb); + int insert = cam_dat(usb)[0]; + if ((!!usb->cam_inserted[slot]) != insert) { + cam_usb_cam_detect(usb, slot, + insert ? IN_INSERTED : 0); + cimax_usb_cam_powerctrl(usb, slot, insert); + } + if (!insert) + usb->cam_det = 0; + } break; + case CIMAX_CAM_DATA_READY: { + int slot = cam_hdr_slot(usb); + usb->cam_data_ready[slot] = 1; + } break; + case CIMAX_CAM_NOEVT: + break; + default: + pr_error("unknown resp:%02x\n", hdr_cmd_resp(usb)); + break; + } +end: + return 0; +} + +static void cimax_usb_poll_work(struct work_struct *work) +{ + struct cimax_usb *usb = + container_of(to_delayed_work(work), struct cimax_usb, work); + usb->work_cnt++; + cimax_usb_poll(usb); + if (usb->work_auto_restart) + queue_delayed_work(usb->workq, &usb->work, usb_poll_interval); +} + +#define CTRL_DISABLE -1 +#define CTRL_STOP 0 +#define CTRL_START 1 + +static inline int cimax_usb_poll_ctrl(struct cimax_usb *usb, int ctrl) +{ + if (ctrl == CTRL_START) { + if (usb->workq) + return 0; + usb->work_auto_restart = 1; + usb->workq = create_singlethread_workqueue("cimax_usb"); + INIT_DELAYED_WORK(&usb->work, &cimax_usb_poll_work); + queue_delayed_work(usb->workq, + &usb->work, usb_poll_interval); + pr_inf("poll started\n"); + } else { + if (!usb->workq) + return 0; + usb->work_auto_restart = 0; + cancel_delayed_work_sync(&usb->work); + destroy_workqueue(usb->workq); + usb->workq = NULL; + pr_inf("poll stopped\n"); + } + return 0; +} + +static int cimax_usb_setup_poll(struct cimax_usb *usb, int poll_mode) +{ + if (poll_mode == usb->poll_mode) + return 0; + switch (poll_mode) { + case POLL_MODE: + cimax_usb_poll_ctrl(usb, CTRL_START); + usb->poll_mode = POLL_MODE; + break; + case STOP_MODE: + if (usb->poll_mode == POLL_MODE) + cimax_usb_poll_ctrl(usb, CTRL_DISABLE); + usb->poll_mode = STOP_MODE; + break; + default: + break; + } + return 0; +} + +static int cimax_usb_hw_reset(struct cimax_usb *usb, int reset_val) +{ + /*trigger reset io*/ + if (usb->rst_io) { + gpio_direction_output(usb->rst_io, reset_val ? 1 : 0); + msleep(50); + gpio_direction_output(usb->rst_io, reset_val ? 0 : 1); + } + return 0; +} + +static int cimax_usb_set_loop(struct cimax_usb *usb, int loop) +{ + int a = usb->cam_inserted[0]; + int b = usb->cam_inserted[1]; + u8 cm[2]; + + pr_inf("set loop: %d\n", loop); + + cm[0] = loop ? (b ? 0x85 : 0x80) : 0x81;/*CH*/ + cm[1] = loop ? (a ? 0x51 : 0x11) : 0x00;/*MOD*/ + + return aml_cimax_usb_write_reg(usb->cimax, ROUTER_CAM_CH, cm, 2); +} + +int cimax_usb_dev_add(struct device_s *dev, int id) +{ + pr_inf("dev add\n"); + if (!g_usb) + return 0; + + (void)id; + + cimax_usb_device_open(dev); + cimax_usb_select_interface(dev, 3); + + lock_lock(g_usb); + g_usb->dev = dev; + lock_unlock(g_usb); + + if (0) + { + /* + the cimax's fw do not report cam status + when board power on with cam plugged, + have to check manually here. + */ + int slot = 0; + int err = 0; + for (slot = 0; slot < 2; slot++) { + int addr = (!slot) ? MOD_CTRL_A : MOD_CTRL_B; + u8 reg = 0; + err = aml_cimax_usb_read_reg(g_usb->cimax, + addr, ®, 1); + cam_usb_cam_detect(g_usb, slot, + (reg & 1) ? IN_INSERTED : 0); + cimax_usb_cam_powerctrl(g_usb, slot, (reg & 1)); + } + } + cimax_usb_set_power(g_usb, 0); + cimax_usb_setup_poll(g_usb, cimax_poll_mode ? POLL_MODE : INT_MODE); + return 0; +} +EXPORT_SYMBOL(cimax_usb_dev_add); + +int cimax_usb_dev_remove(struct device_s *dev, int id) +{ + pr_dbg("dev remove\n"); + if (!g_usb) + return 0; + (void)id; + pr_dbg("setup poll -> stop\n"); + cimax_usb_setup_poll(g_usb, STOP_MODE); + pr_dbg("setup poll end\n"); + lock_lock(g_usb); + g_usb->dev = NULL; + lock_unlock(g_usb); + return 0; +} +EXPORT_SYMBOL(cimax_usb_dev_remove); + +static int cimax_usb_get_config_from_dts(struct cimax_usb *usb) +{ + struct device_node *child = NULL; + struct platform_device *pdev = usb->pdev; + struct device_node *np = pdev->dev.of_node; + pr_dbg("fetch cimax usb in dts\n"); + + child = of_get_child_by_name(np, "cimax"); + if (child == NULL) { + pr_error("cimax not found in dts\n"); + return -1; + } + child = of_get_child_by_name(child, "usb"); + if (!child) { + pr_error("usb not found in cimax"); + return -1; + } +/* +dvbci { + compatible = "amlogic, dvbci"; + dev_name = "dvbci"; + io_type = <2>;//0:iobus,1:usb,2:cimax + cimax { + io_type = <1> //0:spi 1:usb + usb { + rst_gpio = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>; + }; + }; + +}; +*/ + { + int ret = 0; + int gpio = -1; + gpio = of_get_named_gpio_flags(child, "rst-gpios", 0, NULL); + if (gpio != -1) { + ret = gpio_request(gpio, "cimax"); + if (ret < 0) { + pr_error("rst-gpios request fail.\n"); + return ret; + } + usb->rst_io = gpio; + cimax_usb_hw_reset(usb, 1); + pr_dbg("rst: %d\n", usb->rst_io); + } else { + pr_error("rst io got fail, %d\n", gpio); + } + } + return 0; +} + +int aml_cimax_usb_init(struct platform_device *pdev, struct aml_cimax *cimax) +{ + struct cimax_usb *cimax_usb; + + cimax_usb = kzalloc(sizeof(struct cimax_usb), GFP_KERNEL); + if (!cimax_usb) + return -ENOMEM; + + cimax_usb->pdev = pdev; + cimax_usb->cimax = cimax; + cimax_usb_get_config_from_dts(cimax_usb); + + /*init usb_lock*/ + lock_init(cimax_usb); + + /*init cimax used api.*/ +#define WI(_f)\ + cimax->ops._f = aml_cimax_usb_##_f + WI(read_cis); + WI(write_cor); + WI(negotiate); + WI(read_lpdu); + WI(write_lpdu); + WI(read_cam_status); + WI(cam_reset); + WI(slot_reset); + WI(slot_shutdown); + WI(slot_ts_enable); + WI(slot_status); + WI(read_reg); + WI(write_reg); + + cimax->priv = cimax_usb; + + g_usb = cimax_usb; + + aml_cimax_usb_mod_init(pdev); + + cimax_usb_set_cb(cimax_usb_dev_add, cimax_usb_dev_remove); + + return 0; +} +EXPORT_SYMBOL(aml_cimax_usb_init); + +int aml_cimax_usb_exit(struct aml_cimax *cimax) +{ + struct cimax_usb *usb = cimax_to_usb(cimax); + + if (!usb) + return -ENODEV; + + aml_cimax_usb_mod_exit(usb->pdev); + + cimax_usb_device_close(usb->dev); + cimax_usb_setup_poll(usb, STOP_MODE); + + if (usb->rst_io) + gpio_free(usb->rst_io); + + kfree(usb->cis); + + kfree(usb); + cimax->priv = NULL; + + g_usb = NULL; + return 0; +} +EXPORT_SYMBOL(aml_cimax_usb_exit); + +static int cimax_usb_reset(struct cimax_usb *usb, int reset_val) +{ + pr_dbg("reset usb:%p, rst:%d\n", usb, usb ? usb->rst_io : -1); + if (!usb) + return -ENODEV; + + pr_inf("cimax usb reset\n"); + + /*notify unplugged*/ + aml_cimax_camchanged(usb->cimax, 0, 0); + aml_cimax_camchanged(usb->cimax, 1, 0); + + if (usb->dev) + cimax_usb_device_close(usb->dev); + + cimax_usb_setup_poll(usb, STOP_MODE); + + usb->cam_inserted[0] = usb->cam_inserted[1] = 0; + usb->cam_data_ready[0] = usb->cam_data_ready[1] = 0; + + cimax_usb_hw_reset(usb, reset_val); + + pr_inf("cimax usb reset end\n"); + return 0; +} + +static ssize_t reset_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret; + ret = sprintf(buf, "echo 1 > %s\n", attr->attr.name); + return ret; +} + +static ssize_t reset_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + int ret; + int val = 0; + if (!g_usb) + return size; + ret = sscanf(buf, "%i", &val); + if (ret == 1) + ret = cimax_usb_reset(g_usb, val); + return size; +} + +static ssize_t debug_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret = 0; + if (!g_usb) + return ret; + + ret = sprintf(buf, "poll mode: %d\n", g_usb->poll_mode); + ret += sprintf(buf+ret, "status slot[0]=[%d] slot[1]=[%d]\n", + g_usb->cam_inserted[0], g_usb->cam_inserted[1]); + { + int power = 0; + int err = cimax_usb_check_poe(g_usb, &power); + ret += sprintf(buf+ret, "power slot[0]=[%d] slot[1]=[%d]\n", + err ? -1 : power, 0); + } + ret += sprintf(buf+ret, "data slot[0]=[%d] slot[1]=[%d]\n", + g_usb->cam_data_ready[0], g_usb->cam_data_ready[1]); + ret += sprintf(buf+ret, "work cnt:%d\n", g_usb->work_cnt); + ret += sprintf(buf+ret, "pwr work cnt:%d\n", g_usb->power_work_cnt); + return ret; +} + +static int reg_addr; +static ssize_t addr_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret = 0; + ret = sprintf(buf, "addr = 0x%04x\n", reg_addr); + return ret; +} + +static ssize_t addr_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + if (!g_usb) + return size; + if (sscanf(buf, "%i", ®_addr) != 1) + return size; + return size; +} + +static ssize_t reg_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret = 0; + u8 reg_val = 0; + struct aml_cimax *cimax = NULL; + + if (!g_usb) + return ret; + + cimax = g_usb->cimax; + ret = aml_cimax_usb_read_reg(cimax, reg_addr, ®_val, 1); + if (ret) + ret = sprintf(buf, "read fail, err=%d\n", ret); + else + ret = sprintf(buf, "reg[0x%04x] = 0x%02x\n", reg_addr, reg_val); + return ret; +} + +static ssize_t reg_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + int ret = 0; + struct aml_cimax *cimax = NULL; + int val = 0; + u8 reg_val = 0; + + if (!g_usb) + return size; + + if (sscanf(buf, "%i", &val) != 1) + return size; + reg_val = val; + cimax = g_usb->cimax; + ret = aml_cimax_usb_write_reg(cimax, reg_addr, ®_val, 1); + if (ret) + return ret; + return size; +} + +static int cis_mode; /*0:hex 1:binary*/ +static ssize_t cis_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret = 0; + + if (!g_usb || !g_usb->cis) + return ret; + + if (cis_mode == 0) { + int i; + for (i = 0; i < CIS_MAX; i++) { + if (i && !(i & 0xf)) + ret += sprintf(buf+ret, "\n"); + ret += sprintf(buf+ret, "%02X ", g_usb->cis[i]); + } + ret += sprintf(buf+ret, "\n"); + return ret; + } else { + memcpy(buf, g_usb->cis, CIS_MAX); + return CIS_MAX; + } + return ret; +} + +static ssize_t cis_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + if (size >= 3 + && !memcmp(buf, "bin", 3)) + cis_mode = 1; + else + cis_mode = 0; + return size; +} + +static ssize_t ts_rate_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret = 0; + u8 lsb = 0, msb = 0, plen = 0; + struct aml_cimax *cimax = NULL; + int err = 0; + + if (!g_usb) + return ret; + + cimax = g_usb->cimax; + err = aml_cimax_usb_read_reg(cimax, PCK_LENGTH, &plen, 1); + err |= aml_cimax_usb_read_reg(cimax, BITRATE_CH0_LSB, &lsb, 1); + err |= aml_cimax_usb_read_reg(cimax, BITRATE_CH0_MSB, &msb, 1); + if (err) + ret += sprintf(buf+ret, "read fail, err=%d\n", err); + else if (!byte_to_u16(msb, lsb)) + ret += sprintf(buf+ret, "rate[0] = 0 Kbps\n"); + else + ret += sprintf(buf+ret, "rate[0] = %d Kbps\n", + 540*plen*8/byte_to_u16(msb, lsb)); + if (err) + return ret; + + err = aml_cimax_usb_read_reg(cimax, BITRATE_CH1_LSB, &lsb, 1); + err |= aml_cimax_usb_read_reg(cimax, BITRATE_CH1_MSB, &msb, 1); + if (err) + ret += sprintf(buf+ret, "read fail, err=%d\n", err); + else if (!byte_to_u16(msb, lsb)) + ret += sprintf(buf+ret, "rate[1] = 0 Kbps\n"); + else + ret += sprintf(buf+ret, "rate[1] = %d Kbps\n", + 540*plen*8/byte_to_u16(msb, lsb)); + return ret; +} + +static ssize_t loop_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret = 0; + u8 ch = 0, mod = 0; + struct aml_cimax *cimax = NULL; + int err = 0; + + if (!g_usb) + return ret; + + cimax = g_usb->cimax; + err = aml_cimax_usb_read_reg(cimax, ROUTER_CAM_CH, &ch, 1); + err |= aml_cimax_usb_read_reg(cimax, ROUTER_CAM_MOD, &mod, 1); + if (err) { + ret = sprintf(buf, "read fail, err=%d\n", err); + return ret; + } + ret += sprintf(buf + ret, "OUT-0 <= "); + switch (ch & 0x0f) { + case 0x0: + ret += sprintf(buf + ret, "CAM-A"); break; + case 0x1: + ret += sprintf(buf + ret, "CH0-IN"); break; + case 0x2: + ret += sprintf(buf + ret, "CH1-IN"); break; + case 0x3: + ret += sprintf(buf + ret, "REMAPPER"); break; + case 0x4: + ret += sprintf(buf + ret, "PREHEADER"); break; + case 0x5: + ret += sprintf(buf + ret, "CAM-B"); break; + case 0x6: + ret += sprintf(buf + ret, "GAPREMOVER-0"); break; + case 0x7: + ret += sprintf(buf + ret, "GAPREMOVER-1"); break; + case 0x8: + ret += sprintf(buf + ret, "NONE"); break; + default: + ret += sprintf(buf + ret, "UNKNOWN"); break; + } + ret += sprintf(buf + ret, "\nCAM-A <= "); + switch (mod & 0x07) { + case 0x1: + ret += sprintf(buf + ret, "CH0-IN"); break; + case 0x2: + ret += sprintf(buf + ret, "CH1-IN"); break; + case 0x3: + ret += sprintf(buf + ret, "REMAPPER"); break; + case 0x4: + ret += sprintf(buf + ret, "PREHEADER"); break; + case 0x5: + ret += sprintf(buf + ret, "CAM-B"); break; + case 0x6: + ret += sprintf(buf + ret, "GAPREMOVER-0"); break; + case 0x7: + ret += sprintf(buf + ret, "GAPREMOVER-1"); break; + default: + ret += sprintf(buf + ret, "NONE"); break; + } + ret += sprintf(buf + ret, "\n"); + + return ret; +} + +static ssize_t loop_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + int loop = 0; + + if (!g_usb) + return size; + + if (sscanf(buf, "%i", &loop) == 1) + cimax_usb_set_loop(g_usb, loop); + return size; +} + +static ssize_t slot_reset_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + int err = 0; + int slot = 0; + struct aml_cimax *cimax = NULL; + + if (!g_usb) + return size; + + if (sscanf(buf, "%i", &slot) == 1) { + if (slot == 0 || slot == 1) { + pr_dbg("reset slot %d\n", slot); + cimax = g_usb->cimax; + err = aml_cimax_usb_slot_reset(cimax, slot); + } + } + return size; +} + +static ssize_t detect_store(struct class *class, + struct class_attribute *attr, const char *buf, size_t size) +{ + int err = 0; + int slot = 0; + struct aml_cimax *cimax = NULL; + + if (!g_usb) + return size; + + if (sscanf(buf, "%i", &slot) == 1) { + if (slot == 0 || slot == 1) { + int addr = (!slot) ? MOD_CTRL_A : MOD_CTRL_B; + u8 reg = 0; + cimax = g_usb->cimax; + err = aml_cimax_usb_read_reg(cimax, addr, ®, 1); + cam_usb_cam_detect(g_usb, slot, + (reg & 1) ? IN_INSERTED : 0); + cimax_usb_cam_powerctrl(g_usb, slot, (reg & 1)); + } + } + return size; +} + +static CLASS_ATTR_RW(reset); +static CLASS_ATTR_RO(debug); +static CLASS_ATTR_RW(addr); +static CLASS_ATTR_RW(reg); +static CLASS_ATTR_RW(cis); +static CLASS_ATTR_RO(ts_rate); +static CLASS_ATTR_RW(loop); +static CLASS_ATTR_WO(slot_reset); +static CLASS_ATTR_WO(detect); + +#define CLASS_ATTR(name) &class_attr_##name.attr + +static struct attribute *cimax_usb_class_attrs[] = { + CLASS_ATTR(reset), + CLASS_ATTR(debug), + CLASS_ATTR(addr), + CLASS_ATTR(reg), + CLASS_ATTR(cis), + CLASS_ATTR(ts_rate), + CLASS_ATTR(loop), + CLASS_ATTR(slot_reset), + CLASS_ATTR(detect), + NULL +}; + + +ATTRIBUTE_GROUPS(cimax_usb_class); + +static struct class cimax_usb_class = { + .name = "cimax_usb", + .class_groups = cimax_usb_class_groups, +}; + +static int aml_cimax_usb_mod_init(struct platform_device *pdev) +{ + int ret; + pr_dbg("Amlogic CIMAX USB Init\n"); + + ret = class_register(&cimax_usb_class); + + return 0; +} + +static void aml_cimax_usb_mod_exit(struct platform_device *pdev) +{ + pr_dbg("Amlogic CIMAX USB Exit\n"); + class_unregister(&cimax_usb_class); +} +
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_usb.h b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_usb.h new file mode 100644 index 0000000..471d2da --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_usb.h
@@ -0,0 +1,20 @@ +/*************************************************************************** + * Copyright (c) 2014 Amlogic, Inc. All rights reserved. + * + * This source code is subject to the terms and conditions defined in the + * file 'LICENSE' which is part of this source code package. + * + * Description: + * +***************************************************************************/ + +#ifndef _AML_CIMAX_USB_H_ +#define _AML_CIMAX_USB_H_ + +#include <linux/platform_device.h> +#include "aml_cimax.h" + +int aml_cimax_usb_init(struct platform_device *pdev, struct aml_cimax *ci); +int aml_cimax_usb_exit(struct aml_cimax *ci); + +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_usb_priv.h b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_usb_priv.h new file mode 100644 index 0000000..aec694c --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/aml_cimax_usb_priv.h
@@ -0,0 +1,22 @@ +/*************************************************************************** + * Copyright (c) 2014 Amlogic, Inc. All rights reserved. + * + * This source code is subject to the terms and conditions defined in the + * file 'LICENSE' which is part of this source code package. + * + * Description: + * +***************************************************************************/ + +#ifndef _CIMAX_USB_DEV_H_ +#define _CIMAX_USB_DEV_H_ + +#if 0 +__attribute__ ((weak)) +int cimax_usb_dev_add(struct device_s *device, int id); +__attribute__ ((weak)) +int cimax_usb_dev_remove(struct device_s *device, int id); +#endif + +#endif +
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/dvb_ca_en50221_cimax.c b/drivers/stream_input/parser/dvb_ci/cimax/dvb_ca_en50221_cimax.c new file mode 100644 index 0000000..c3c6511 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/dvb_ca_en50221_cimax.c
@@ -0,0 +1,1987 @@ +/* + * dvb_ca.c: generic DVB functions for EN50221 CAM CIMAX interfaces + * + * Parts of this file were based on sources as follows: + * + * based on code: + * + * Copyright (C) 1999-2002 Ralph Metzler + * & Marcus Metzler for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/kthread.h> +#include <linux/compat.h> + +#include "dvb_ca_en50221_cimax.h" +#include "dvb_ringbuffer.h" + +#define READ_LPDU_PKT + +static int dvb_ca_en50221_debug = 1; + +module_param_named(cam_debug, dvb_ca_en50221_debug, int, 0644); +MODULE_PARM_DESC(cam_debug, "enable verbose debug messages"); + +#define HOST_LINK_BUF_SIZE 0x1000 + +static int dvb_ca_en50221_link_size = HOST_LINK_BUF_SIZE; +module_param_named(link_size, dvb_ca_en50221_link_size, int, 0644); +MODULE_PARM_DESC(link_size, "debug only, no more than 0x1000"); + +static int dvb_ca_en50221_buffer_free; +module_param_named(buffer_free, dvb_ca_en50221_buffer_free, int, 0644); +MODULE_PARM_DESC(buffer_free, "debug only"); + +#define dprintk(args...)\ + do {\ + if (dvb_ca_en50221_debug)\ + printk(args);\ + } while (0) +#define pr_error(fmt, args...) printk("CA EN50211: " fmt, ## args) + +#define INIT_TIMEOUT_SECS 40 + + +#define RX_BUFFER_SIZE 65535 + +#define MAX_RX_PACKETS_PER_ITERATION 10 + +#define CTRLIF_DATA 0 +#define CTRLIF_COMMAND 1 +#define CTRLIF_STATUS 1 +#define CTRLIF_SIZE_LOW 2 +#define CTRLIF_SIZE_HIGH 3 + +#define CMDREG_HC 1 /* Host control */ +#define CMDREG_SW 2 /* Size write */ +#define CMDREG_SR 4 /* Size read */ +#define CMDREG_RS 8 /* Reset interface */ +#define CMDREG_FRIE 0x40 /* Enable FR interrupt */ +#define CMDREG_DAIE 0x80 /* Enable DA interrupt */ +#define IRQEN (CMDREG_DAIE) + +#define STATUSREG_RE 1 /* read error */ +#define STATUSREG_WE 2 /* write error */ +#define STATUSREG_FR 0x40 /* module free */ +#define STATUSREG_DA 0x80 /* data available */ +#define STATUSREG_TXERR (STATUSREG_RE|STATUSREG_WE)/* general transfer error */ + + +#define DVB_CA_SLOTSTATE_NONE 0 +#define DVB_CA_SLOTSTATE_UNINITIALISED 1 +#define DVB_CA_SLOTSTATE_RUNNING 2 +#define DVB_CA_SLOTSTATE_INVALID 3 +#define DVB_CA_SLOTSTATE_WAITREADY 4 +#define DVB_CA_SLOTSTATE_VALIDATE 5 +#define DVB_CA_SLOTSTATE_WAITFR 6 +#define DVB_CA_SLOTSTATE_LINKINIT 7 +#define DVB_CA_SLOTSTATE_WAITLINKINIT 8 + +#define MAX_CIS_SIZE 512 + +/* Information on a CA slot */ +struct dvb_ca_slot { + + /* current state of the CAM */ + int slot_state; + + /* mutex used for serializing access to one CI slot */ + struct mutex slot_lock; + + /* Number of CAMCHANGES that have occurred since last processing */ + atomic_t camchange_count; + + /* Type of last CAMCHANGE */ + int camchange_type; + + /* base address of CAM config */ + u32 config_base; + + /* value to write into Config Control register */ + u8 config_option; + + /* if 1, the CAM supports DA IRQs */ + u8 da_irq_supported:1; + +#ifdef READ_LPDU_PKT + /* Offset into current ringbuffer when user buffer was not big enough + to return entire pkt */ + int rx_offset; +#endif + + /* size of the buffer to use when talking to the CAM */ + int link_buf_size; + + /* buffer for incoming packets */ + struct dvb_ringbuffer rx_buffer; + + /* timer used during various states of the slot */ + unsigned long timeout; +}; + +/* Private CA-interface information */ +struct dvb_ca_private { + + /* pointer back to the public data structure */ + struct dvb_ca_en50221_cimax *pub; + + /* the DVB device */ + struct dvb_device *dvbdev; + + /* Flags describing the interface (DVB_CA_FLAG_*) */ + u32 flags; + + /* number of slots supported by this CA interface */ + unsigned int slot_count; + + /* information on each slot */ + struct dvb_ca_slot *slot_info; + + /* wait queues for read() and write() operations */ + wait_queue_head_t wait_queue; + + /* PID of the monitoring thread */ + struct task_struct *thread; + + /* Flag indicating if the CA device is open */ + unsigned int open:1; + + /* Flag indicating the thread should wake up now */ + unsigned int wakeup:1; + + /* Delay the main thread should use */ + unsigned long delay; + + /* Slot to start looking for data + to read from in the next user-space read operation */ + int next_read_slot; + + /* mutex serializing ioctls */ + struct mutex ioctl_mutex; + + /*two bufs for read/write*/ + u8 *rbuf; + u8 *wbuf; +}; + +static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca); +static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, + int slot, u8 *ebuf, int ecount); +static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, + int slot, u8 *ebuf, int ecount); + + +/** + * Safely find needle in haystack. + * + * @param haystack Buffer to look in. + * @param hlen Number of bytes in haystack. + * @param needle Buffer to find. + * @param nlen Number of bytes in needle. + * @return Pointer into haystack needle was found at, or NULL if not found. + */ +static char *findstr(char *haystack, int hlen, char *needle, int nlen) +{ + int i; + + if (hlen < nlen) + return NULL; + + for (i = 0; i <= hlen - nlen; i++) { + if (!strncmp(haystack + i, needle, nlen)) + return haystack + i; + } + + return NULL; +} + + + +/* ************************************************************************** */ +/* EN50221 physical interface functions */ + + +/** + * Check CAM status. + */ +static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot) +{ + int slot_status; + int cam_present; + int cam_changed; + + /* IRQ mode */ + if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE) + return atomic_read(&ca->slot_info[slot].camchange_count) != 0; + + /* poll mode */ + slot_status = ca->pub->poll_slot_status(ca->pub, slot, ca->open); + + cam_present = (slot_status & DVB_CA_EN50221_POLL_CAM_PRESENT) ? 1 : 0; + cam_changed = (slot_status & DVB_CA_EN50221_POLL_CAM_CHANGED) ? 1 : 0; + if (!cam_changed) { + int cam_present_old = + (ca->slot_info[slot].slot_state + != DVB_CA_SLOTSTATE_NONE); + cam_changed = (cam_present != cam_present_old); + } + + if (cam_changed) { + if (!cam_present) { + ca->slot_info[slot].camchange_type = + DVB_CA_EN50221_CAMCHANGE_REMOVED; + } else { + ca->slot_info[slot].camchange_type = + DVB_CA_EN50221_CAMCHANGE_INSERTED; + } + atomic_set(&ca->slot_info[slot].camchange_count, 1); + } else { + if ((ca->slot_info[slot].slot_state + == DVB_CA_SLOTSTATE_WAITREADY) && + (slot_status & DVB_CA_EN50221_POLL_CAM_READY)) { + /* move to validate state if reset is completed */ + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_VALIDATE; + } + } + return cam_changed; +} + + +/** + * Initialise the link layer connection to a CAM. + * + * @param ca CA instance. + * @param slot Slot id. + * + * @return 0 on success, nonzero on failure. + */ +static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot) +{ + int ret; + dprintk("%s\n", __func__); + + /* we'll be determining these during this function */ + ca->slot_info[slot].da_irq_supported = 0; +#ifdef READ_LPDU_PKT + ca->slot_info[slot].rx_offset = 0; +#endif + /* set the host link buffer size temporarily. + it will be overwritten with the real negotiated size later. */ + ca->slot_info[slot].link_buf_size = dvb_ca_en50221_link_size; + dprintk("negotiate: host(%i)\n", ca->slot_info[slot].link_buf_size); + + ret = ca->pub->negotiate(ca->pub, + slot, ca->slot_info[slot].link_buf_size); + if (ret <= 0) + return ret; + + ca->slot_info[slot].link_buf_size = ret; + dprintk("Chosen link buffer size of %i\n", ret); + + /* success */ + return 0; +} + +/** + * Read a tuple from attribute memory. + * + * @param ca CA instance. + * @param slot Slot id. + * @param cis CIS data + * @param address Address to read from. Updated. + * @param tupleType Tuple id byte. Updated. + * @param tupleLength Tuple length. Updated. + * @param tuple Dest buffer for tuple (must be 256 bytes). Updated. + * + * @return 0 on success, nonzero on error. + */ +static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, + int slot, u8 *cis, + int *address, int *tupleType, int *tupleLength, u8 *tuple) +{ + int i; + int _tupleType; + int _tupleLength; + int _address = *address; + + /* grab the next tuple length and type */ + _tupleType = cis[_address]; + if (_tupleType < 0) + return _tupleType; + if (_tupleType == 0xff) { + dprintk("END OF CHAIN TUPLE type:0x%x\n", _tupleType); + *address += 1; + *tupleType = _tupleType; + *tupleLength = 0; + return 0; + } + _tupleLength = cis[_address + 1]; + if (_tupleLength < 0) + return _tupleLength; + _address += 2; + + dprintk("TUPLE type:0x%x length:%i\n", _tupleType, _tupleLength); + + /* read in the whole tuple */ + for (i = 0; i < _tupleLength; i++) { + tuple[i] = cis[_address + (i)]; + dprintk(" 0x%02x: 0x%02x %c\n", + i, tuple[i] & 0xff, + ((tuple[i] > 31) && (tuple[i] < 127)) ? tuple[i] : '.'); + } + _address += (_tupleLength); + + /* success */ + *tupleType = _tupleType; + *tupleLength = _tupleLength; + *address = _address; + return 0; +} + + +/** + * Parse attribute memory of a CAM module, extracting Config register, + * and checking it is a DVB CAM module. + * + * @param ca CA instance. + * @param slot Slot id. + * + * @return 0 on success, <0 on failure. + */ +static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot) +{ + int address = 0; + int tupleLength; + int tupleType; + u8 tuple[257]; + char *dvb_str; + int rasz; + int status; + int got_cftableentry = 0; + int end_chain = 0; + int i; + u16 manfid = 0; + u16 devid = 0; + u8 cis[MAX_CIS_SIZE]; + + status = ca->pub->read_cis(ca->pub, slot, cis, MAX_CIS_SIZE); + if (status != 0) + return -EINVAL; + + /* CISTPL_DEVICE_0A */ + status = + dvb_ca_en50221_read_tuple(ca, slot, cis, &address, + &tupleType, &tupleLength, tuple); + if (status < 0) { + pr_error("read status error\r\n"); + return status; + } + if (tupleType != 0x1D) { + pr_error("read tupleType error [0x%x]\r\n", tupleType); + return -EINVAL; + } + + + + /* CISTPL_DEVICE_0C */ + status = dvb_ca_en50221_read_tuple(ca, slot, cis, &address, + &tupleType, &tupleLength, tuple); + + if (status < 0) { + pr_error("read read cis error\r\n"); + return -EINVAL; + } + if (tupleType != 0x1C) { + pr_error("read read cis type error\r\n"); + return -EINVAL; + } + + + + /* CISTPL_VERS_1 */ + status = + dvb_ca_en50221_read_tuple(ca, slot, cis, + &address, &tupleType, &tupleLength, tuple); + if (status < 0) { + pr_error("read read cis version error\r\n"); + return status; + } + if (tupleType != 0x15) { + pr_error("read read cis version type error\r\n"); + return -EINVAL; + } + + + + /* CISTPL_MANFID */ + status = dvb_ca_en50221_read_tuple(ca, slot, cis, &address, &tupleType, + &tupleLength, tuple); + if (status < 0) { + pr_error("read read cis manfid error\r\n"); + return status; + } + if (tupleType != 0x20) { + pr_error("read read cis manfid type error\r\n"); + return -EINVAL; + } + if (tupleLength != 4) { + pr_error("read read cis manfid len error\r\n"); + return -EINVAL; + } + manfid = (tuple[1] << 8) | tuple[0]; + devid = (tuple[3] << 8) | tuple[2]; + + + + /* CISTPL_CONFIG */ + status = dvb_ca_en50221_read_tuple(ca, slot, cis, &address, &tupleType, + &tupleLength, tuple); + if (status < 0) { + pr_error("read read cis config error\r\n"); + return status; + } + if (tupleType != 0x1A) { + pr_error("read read cis config type error\r\n"); + return -EINVAL; + } + if (tupleLength < 3) { + pr_error("read read cis config len error\r\n"); + return -EINVAL; + } + + /* extract the configbase */ + rasz = tuple[0] & 3; + if (tupleLength < (3 + rasz + 14)) { + pr_error("read extract the configbase error\r\n"); + return -EINVAL; + } + ca->slot_info[slot].config_base = 0; + for (i = 0; i < rasz + 1; i++) + ca->slot_info[slot].config_base |= (tuple[2 + i] << (8 * i)); + + /* check it contains the correct DVB string */ + dvb_str = findstr((char *)tuple, tupleLength, "DVB_CI_V", 8); + if (dvb_str == NULL) { + pr_error("find dvb str DVB_CI_V error\r\n"); + return -EINVAL; + } + if (tupleLength < ((dvb_str - (char *) tuple) + 12)) { + pr_error("find dvb str DVB_CI_V len error\r\n"); + return -EINVAL; + } + + /* is it a version we support? */ + if (strncmp(dvb_str + 8, "1.00", 4)) { + pr_info("dvb_ca adapter %d: ", ca->dvbdev->adapter->num); + pr_info("Unsupported DVB CAM module version %c%c%c%c\n", + dvb_str[8], dvb_str[9], dvb_str[10], dvb_str[11]); + return -EINVAL; + } + + /* process the CFTABLE_ENTRY tuples, and any after those */ + while ((!end_chain) && (address < 0x1000)) { + status = dvb_ca_en50221_read_tuple(ca, slot, cis, &address, + &tupleType, &tupleLength, tuple); + if (status < 0) { + pr_error("process tuples error\r\n"); + return status; + } + + switch (tupleType) { + case 0x1B: /* CISTPL_CFTABLE_ENTRY */ + if (tupleLength < (2 + 11 + 17)) + break; + + /* if we've already parsed one, just use it */ + if (got_cftableentry) + break; + + /* get the config option */ + ca->slot_info[slot].config_option = tuple[0] & 0x3f; + + /* OK, check it contains the correct strings */ + if ((findstr((char *)tuple, tupleLength, "DVB_HOST", 8) + == NULL) + || (findstr((char *)tuple, + tupleLength, "DVB_CI_MODULE", 13) + == NULL)) { + break; + } + got_cftableentry = 1; + break; + + case 0x14: /* CISTPL_NO_LINK */ + break; + + case 0xFF: /* CISTPL_END */ + end_chain = 1; + break; + + default: /* Unknown tuple type + -just skip this tuple and move to the next one */ + dprintk("dvb_ca: Skipping unknown tuple type:0x%x", + tupleType); + dprintk(" length:0x%x\n", tupleLength); + break; + } + } + + if ((address > 0x1000) || (!got_cftableentry)) { + pr_error("got_cftableentry :%d\r\n", got_cftableentry); + return -EINVAL; + } + + dprintk("Valid DVB CAM detected MANID:%x DEVID:%x", manfid, devid); + dprintk(" CONFIGBASE:0x%x CONFIGOPTION:0x%x\n", + ca->slot_info[slot].config_base, + ca->slot_info[slot].config_option); + + /* success! */ + return 0; +} + + +/** + * Set CAM's configoption correctly. + * + * @param ca CA instance. + * @param slot Slot containing the CAM. + */ +static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot) +{ + int configoption; + + dprintk("%s\n", __func__); + + /* set the config option */ + ca->pub->write_cor(ca->pub, slot, + ca->slot_info[slot].config_base, + &ca->slot_info[slot].config_option); + + configoption = ca->slot_info[slot].config_option; + dprintk("Set configoption 0x%x, base 0x%x\n", + ca->slot_info[slot].config_option, + ca->slot_info[slot].config_base); + + /* fine! */ + return 0; + +} + + +/** + * This function talks to an EN50221 CAM control interface. It reads a buffer of + * data from the CAM. The data can either be stored in a supplied buffer, or + * automatically be added to the slot's rx_buffer. + * + * @param ca CA instance. + * @param slot Slot to read from. + * @param ebuf If non-NULL, the data will be written to this buffer. If NULL, + * the data will be added into the buffering system as a normal fragment. + * @param ecount Size of ebuf. Ignored if ebuf is NULL. + * + * @return Number of bytes read, or < 0 on error + */ +static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, + int slot, u8 *ebuf, int ecount) +{ + int bytes_read; + int status; + u8 *buf = ca->rbuf; + + /* dprintk("%s\n", __func__); */ + + /* check if we have space for a link buf in the rx_buffer */ + if (ebuf == NULL) { + int buf_free; + + if (ca->slot_info[slot].rx_buffer.data == NULL) { + status = -EIO; + goto exit; + } + buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer); + dvb_ca_en50221_buffer_free = buf_free; + + if (buf_free < (ca->slot_info[slot].link_buf_size + + DVB_RINGBUFFER_PKTHDRSIZE)) { + status = -EAGAIN; + goto exit; + } + } + + /* check if there is data available */ + status = ca->pub->read_cam_status(ca->pub, slot); + if (status < 0) + goto exit; + if (!(status & STATUSREG_DA)) { + /* no data */ + status = 0; + goto exit; + } + + /* read the amount of data */ + status = ca->pub->read_lpdu(ca->pub, + slot, buf, dvb_ca_en50221_link_size); + if (status < 0) { + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } + + bytes_read = status; + + /* check it will fit */ + if (ebuf == NULL) { + if (bytes_read > ca->slot_info[slot].link_buf_size) { + pr_error("dvb_ca adapter %d:", + ca->dvbdev->adapter->num); + pr_error(" CAM tried to send a buffer larger "); + pr_error("than the link buffer size(%i > %i)!\n", + bytes_read, ca->slot_info[slot].link_buf_size); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } + if (bytes_read < 2) { + pr_error("dvb_ca adapter %d: ", + ca->dvbdev->adapter->num); + pr_error("CAM sent a buffer"); + pr_error("that was less than 2B!\n"); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } + } else { + if (bytes_read > ecount) { + pr_error("dvb_ca adapter %d:", + ca->dvbdev->adapter->num); + pr_error(" CAM tried to send a buffer larger"); + pr_error(" than the ecount size!\n"); + status = -EIO; + goto exit; + } + } + + /* OK, add it to the receive buffer, + or copy into external buffer if supplied */ + if (ebuf == NULL) { + if (ca->slot_info[slot].rx_buffer.data == NULL) { + status = -EIO; + goto exit; + } + dvb_ringbuffer_pkt_write(&ca->slot_info[slot].rx_buffer, + buf, bytes_read); + } else { + memcpy(ebuf, buf, bytes_read); + } + +#ifndef READ_LPDU_PKT + /* wake up readers when a last_fragment is received */ + if ((buf[1] & 0x80) == 0x00) +#endif + wake_up_interruptible(&ca->wait_queue); + status = bytes_read; + +exit: + return status; +} + + +/** + * This function talks to an EN50221 CAM control interface. + * It writes a buffer of data + * to a CAM. + * + * @param ca CA instance. + * @param slot Slot to write to. + * @param ebuf The data in this buffer + * is treated as a complete link-level packet to be written. + * @param count Size of ebuf. + * + * @return Number of bytes written, or < 0 on error. + */ +static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, + int slot, u8 *buf, int bytes_write) +{ + int status; + + /* dprintk("%s\n", __func__); */ + + /* sanity check */ + if (bytes_write > ca->slot_info[slot].link_buf_size) + return -EINVAL; + + /* it is possible we are dealing with a single buffer implementation, + thus if there is data available for read or if there is even a read + already in progress, we do nothing but awake the kernel thread to + process the data if necessary. */ + status = ca->pub->read_cam_status(ca->pub, slot); + if (status < 0) + return status; + if (status & (STATUSREG_DA | STATUSREG_RE)) { + if (status & STATUSREG_DA) + dvb_ca_en50221_thread_wakeup(ca); + status = -EAGAIN; + return status; + } + + if (!(status & STATUSREG_FR)) { + status = -EAGAIN; + return status; + } + + status = ca->pub->write_lpdu(ca->pub, slot, buf, bytes_write); + if (status < 0) { + if (status == -EBUSY) { + status = -EAGAIN; + return status; + } else { + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + return status; + } + } + + status = bytes_write; + + return status; +} +EXPORT_SYMBOL(dvb_ca_en50221_cimax_camchange_irq); + + + +/* ************************************************************************** */ +/* EN50221 higher level functions */ + + +/** + * A CAM has been removed => shut it down. + * + * @param ca CA instance. + * @param slot Slot to shut down. + */ +static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot) +{ + dprintk("%s\n", __func__); + + ca->pub->slot_shutdown(ca->pub, slot); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; + + /* need to wake up all processes to check if they're now + trying to write to a defunct CAM */ + wake_up_interruptible(&ca->wait_queue); + + dprintk("Slot %i shutdown\n", slot); + + /* success */ + return 0; +} +EXPORT_SYMBOL(dvb_ca_en50221_cimax_camready_irq); + + +/** + * A CAMCHANGE IRQ has occurred. + * + * @param ca CA instance. + * @param slot Slot concerned. + * @param change_type One of the DVB_CA_CAMCHANGE_* values. + */ +void dvb_ca_en50221_cimax_camchange_irq(struct dvb_ca_en50221_cimax *pubca, + int slot, int change_type) +{ + struct dvb_ca_private *ca = pubca->private; + + dprintk("CAMCHANGE IRQ slot:%i change_type:%i\n", slot, change_type); + + switch (change_type) { + case DVB_CA_EN50221_CAMCHANGE_REMOVED: + case DVB_CA_EN50221_CAMCHANGE_INSERTED: + break; + + default: + return; + } + + ca->slot_info[slot].camchange_type = change_type; + atomic_inc(&ca->slot_info[slot].camchange_count); + dvb_ca_en50221_thread_wakeup(ca); +} +EXPORT_SYMBOL(dvb_ca_en50221_cimax_frda_irq); + + +/** + * A CAMREADY IRQ has occurred. + * + * @param ca CA instance. + * @param slot Slot concerned. + */ +void dvb_ca_en50221_cimax_camready_irq(struct dvb_ca_en50221_cimax *pubca, + int slot) +{ + struct dvb_ca_private *ca = pubca->private; + + dprintk("CAMREADY IRQ slot:%i\n", slot); + + if (ca->slot_info[slot].slot_state == DVB_CA_SLOTSTATE_WAITREADY) { + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_VALIDATE; + dvb_ca_en50221_thread_wakeup(ca); + } +} + + +/** + * An FR or DA IRQ has occurred. + * + * @param ca CA instance. + * @param slot Slot concerned. + */ +void dvb_ca_en50221_cimax_frda_irq(struct dvb_ca_en50221_cimax *pubca, + int slot) +{ + struct dvb_ca_private *ca = pubca->private; + int flags; + + dprintk("FR/DA IRQ slot:%i\n", slot); + + switch (ca->slot_info[slot].slot_state) { + case DVB_CA_SLOTSTATE_LINKINIT: + flags = ca->pub->get_capbility(pubca, slot); + if (flags & DVB_CA_EN50221_CAP_IRQ) { + dprintk("CAM supports DA IRQ\n"); + ca->slot_info[slot].da_irq_supported = 1; + } + break; + + case DVB_CA_SLOTSTATE_RUNNING: + if (ca->open) + dvb_ca_en50221_thread_wakeup(ca); + break; + } +} + + + +/* ************************************************************************** */ +/* EN50221 thread functions */ + +/** + * Wake up the DVB CA thread + * + * @param ca CA instance. + */ +static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca) +{ + + dprintk("%s\n", __func__); + + ca->wakeup = 1; + mb(); /*original*/ + wake_up_process(ca->thread); +} + +/** + * Update the delay used by the thread. + * + * @param ca CA instance. + */ +static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca) +{ + int delay; + int curdelay = 100000000; + int slot; + + /* Beware of too high polling frequency, because one polling + * call might take several hundred milliseconds until timeout! + */ + for (slot = 0; slot < ca->slot_count; slot++) { + switch (ca->slot_info[slot].slot_state) { + default: + case DVB_CA_SLOTSTATE_NONE: + delay = HZ * 60; /* 60s */ + if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) + delay = HZ * 5; /* 5s */ + break; + case DVB_CA_SLOTSTATE_INVALID: + delay = HZ * 60; /* 60s */ + if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) + delay = HZ / 10; /* 100ms */ + break; + + case DVB_CA_SLOTSTATE_UNINITIALISED: + case DVB_CA_SLOTSTATE_WAITREADY: + case DVB_CA_SLOTSTATE_VALIDATE: + case DVB_CA_SLOTSTATE_WAITFR: + case DVB_CA_SLOTSTATE_LINKINIT: + delay = HZ / 10; /* 100ms */ + break; + case DVB_CA_SLOTSTATE_WAITLINKINIT: + delay = HZ * 2; + break; + + case DVB_CA_SLOTSTATE_RUNNING: + delay = HZ * 60; /* 60s */ + if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) + delay = HZ / 10; /* 100ms */ + if (ca->open) { + if ((!ca->slot_info[slot].da_irq_supported) || + (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_DA))) + delay = HZ / 10; /* 100ms */ + } + break; + } + + if (delay < curdelay) + curdelay = delay; + } + + ca->delay = curdelay; +} + +static int dvb_ca_en50221_slot_process(struct dvb_ca_private *ca, int slot) +{ + int flags; + int status; + int pktcount; + void *rxbuf; + + mutex_lock(&ca->slot_info[slot].slot_lock); + + /*check the cam status + deal with CAMCHANGEs*/ + while (dvb_ca_en50221_check_camstatus(ca, slot)) { + /* clear down an old CI slot if necessary */ + if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE) + dvb_ca_en50221_slot_shutdown(ca, slot); + + /* if a CAM is NOW present, initialise it */ + if (ca->slot_info[slot].camchange_type + == DVB_CA_EN50221_CAMCHANGE_INSERTED) + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_UNINITIALISED; + + /* we've handled one CAMCHANGE */ + dvb_ca_en50221_thread_update_delay(ca); + atomic_dec(&ca->slot_info[slot].camchange_count); + } + + /* CAM state machine */ + switch (ca->slot_info[slot].slot_state) { + case DVB_CA_SLOTSTATE_NONE: + case DVB_CA_SLOTSTATE_INVALID: + /* no action needed */ + break; + + case DVB_CA_SLOTSTATE_UNINITIALISED: + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_WAITREADY; + ca->pub->slot_reset(ca->pub, slot); + ca->slot_info[slot].timeout = jiffies + + (INIT_TIMEOUT_SECS * HZ); + break; + + case DVB_CA_SLOTSTATE_WAITREADY: + if (time_after(jiffies, ca->slot_info[slot].timeout)) { + dprintk("%d: PC card did not respond\n", + ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + /* no other action needed; will automatically + * change state when ready + */ + break; + + case DVB_CA_SLOTSTATE_VALIDATE: + if (dvb_ca_en50221_parse_attributes(ca, slot) != 0) { + /* we need this extra check + for annoying interfaces like the budget-av */ + if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) + && (ca->pub->poll_slot_status)) { + status = ca->pub->poll_slot_status(ca->pub, + slot, 0); + if (!(status + & DVB_CA_EN50221_POLL_CAM_PRESENT)) { + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_NONE; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + } + dprintk(" %d: Invalid PC card inserted :(\n", + ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + if (dvb_ca_en50221_set_configoption(ca, slot) != 0) { + dprintk("%d: Unable initialise CAM:(\n", + ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + + if (ca->pub->cam_reset(ca->pub, slot) != 0) { + dprintk("%d: Unable to reset CAM IF\n", + ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + + dprintk("DVB CAM validated successfully\n"); + + ca->slot_info[slot].timeout = + jiffies + (INIT_TIMEOUT_SECS * HZ); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_WAITFR; + ca->wakeup = 1; + break; + + case DVB_CA_SLOTSTATE_WAITFR: + if (time_after(jiffies, ca->slot_info[slot].timeout)) { + pr_error("dvb_ca adapter %d: ", + ca->dvbdev->adapter->num); + pr_error("DVB CAM did not respond :(\n"); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + + /*flags = ca->pub->read_cam_status(ca->pub, slot);*/ + flags = STATUSREG_FR; + if (flags & STATUSREG_FR) { + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_LINKINIT; + ca->wakeup = 1; + } + break; + + case DVB_CA_SLOTSTATE_WAITLINKINIT: + if (time_after(jiffies, ca->slot_info[slot].timeout)) { + pr_error("dvb_ca adapter %d: ", + ca->dvbdev->adapter->num); + pr_error("DVB CAM link initialisation failed :(\n"); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + + case DVB_CA_SLOTSTATE_LINKINIT: + if (dvb_ca_en50221_link_init(ca, slot) != 0) { + /* we need this extra check for annoying interfaces + like the budget-av */ + if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) + && (ca->pub->poll_slot_status)) { + status = ca->pub->poll_slot_status(ca->pub, + slot, 0); + if (!(status + & DVB_CA_EN50221_POLL_CAM_PRESENT)) { + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_NONE; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + } + + ca->slot_info[slot].timeout = + jiffies + (INIT_TIMEOUT_SECS * HZ); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_WAITLINKINIT; + ca->wakeup = 1; + break; + } + + if (ca->slot_info[slot].rx_buffer.data == NULL) { + rxbuf = vmalloc(RX_BUFFER_SIZE); + if (rxbuf == NULL) { + pr_error("dvb_ca adapter %d: ", + ca->dvbdev->adapter->num); + pr_error("Unable to allocate CAM rx buffer\n"); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + dvb_ringbuffer_init(&ca->slot_info[slot].rx_buffer, + rxbuf, RX_BUFFER_SIZE); + } + + ca->pub->slot_ts_enable(ca->pub, slot); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_RUNNING; + dvb_ca_en50221_thread_update_delay(ca); + dprintk("%d: DVB CAM Initialised successfully\n", + ca->dvbdev->adapter->num); + break; + + case DVB_CA_SLOTSTATE_RUNNING: + if (!ca->open) + break; + + pktcount = 0; + while ((status = dvb_ca_en50221_read_data(ca, slot, NULL, 0)) + > 0) { + if (!ca->open) + break; + + /* if a CAMCHANGE occurred at some point, + do not do any more processing of this slot */ + if (dvb_ca_en50221_check_camstatus(ca, slot)) { + /* we dont want to sleep on the next iteration + so we can handle the cam change*/ + ca->wakeup = 1; + break; + } + + /* check if we've hit our limit this time */ + if (++pktcount >= MAX_RX_PACKETS_PER_ITERATION) { + /*dont sleep; + there is likely to be more data to read*/ + ca->wakeup = 1; + break; + } + } + break; + } + + mutex_unlock(&ca->slot_info[slot].slot_lock); + return 0; +} + + +/** + * Kernel thread which monitors CA slots for CAM changes, + * and performs data transfers. + */ +static int dvb_ca_en50221_thread(void *data) +{ + struct dvb_ca_private *ca = data; + int slot; + + dprintk(" %s\n", __func__); + /* choose the correct initial delay */ + dvb_ca_en50221_thread_update_delay(ca); + + /* main loop */ + while (!kthread_should_stop()) { + /* sleep for a bit */ + if (!ca->wakeup) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(ca->delay); + if (kthread_should_stop()) + return 0; + } + ca->wakeup = 0; + + /* go through all the slots processing them */ + for (slot = 0; slot < ca->slot_count; slot++) + dvb_ca_en50221_slot_process(ca, slot); + } + + return 0; +} + + + +/* ************************************************************************** */ +/* EN50221 IO interface functions */ + +/** + * Real ioctl implementation. + * NOTE: CA_SEND_MSG/CA_GET_MSG ioctls have userspace buffers passed to them. + * + * @param inode Inode concerned. + * @param file File concerned. + * @param cmd IOCTL command. + * @param arg Associated argument. + * + * @return 0 on success, <0 on error. + */ +static int dvb_ca_en50221_io_do_ioctl(struct file *file, + unsigned int cmd, void *parg) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_ca_private *ca = dvbdev->priv; + int err = 0; + int slot; + + if (mutex_lock_interruptible(&ca->ioctl_mutex)) { + pr_error("ci lock interrupt error\r\n"); + return -ERESTARTSYS; + } + + switch (cmd) { + case CA_RESET: + dprintk("ci reset---\r\n"); + for (slot = 0; slot < ca->slot_count; slot++) { + mutex_lock(&ca->slot_info[slot].slot_lock); + if (ca->slot_info[slot].slot_state + == DVB_CA_SLOTSTATE_NONE) + goto next; + dvb_ca_en50221_slot_shutdown(ca, slot); + if (ca->flags + & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE) + dvb_ca_en50221_cimax_camchange_irq( + ca->pub, + slot, + DVB_CA_EN50221_CAMCHANGE_INSERTED); +next: + mutex_unlock(&ca->slot_info[slot].slot_lock); + } + ca->next_read_slot = 0; + dvb_ca_en50221_thread_wakeup(ca); + break; + + case CA_GET_CAP: { + struct ca_caps *caps = parg; + + caps->slot_num = ca->slot_count; + caps->slot_type = CA_CI_LINK; + caps->descr_num = 0; + caps->descr_type = 0; + break; + } + + case CA_GET_SLOT_INFO: { + struct ca_slot_info *info = parg; + + if ((info->num > ca->slot_count) || (info->num < 0)) { + err = -EINVAL; + pr_error("info num error :%d\r\n", info->num); + goto out_unlock; + } + + info->type = CA_CI_LINK; + info->flags = 0; + if ((ca->slot_info[info->num].slot_state + != DVB_CA_SLOTSTATE_NONE) + && (ca->slot_info[info->num].slot_state + != DVB_CA_SLOTSTATE_INVALID)) { + info->flags = CA_CI_MODULE_PRESENT; + } + if (ca->slot_info[info->num].slot_state + == DVB_CA_SLOTSTATE_RUNNING) { + info->flags |= CA_CI_MODULE_READY; + } + break; + } + + default: + pr_error("Invalid cmd :%d\r\n", cmd); + err = -EINVAL; + break; + } + +out_unlock: + mutex_unlock(&ca->ioctl_mutex); + return err; +} + + +static int dvb_usercopy__(struct file *file, + unsigned int cmd, unsigned long arg, + int (*func)(struct file *file, + unsigned int cmd, void *arg)) +{ + char sbuf[128]; + void *mbuf = NULL; + void *parg = NULL; + int err = -EINVAL; + + /* Copy arguments into temp kernel buffer */ + switch (_IOC_DIR(cmd)) { + case _IOC_NONE: + /* + * For this command, the pointer is actually an integer + * argument. + */ + parg = (void *) arg; + break; + case _IOC_READ: /* some v4l ioctls are marked wrong ... */ + case _IOC_WRITE: + case (_IOC_WRITE | _IOC_READ): + if (_IOC_SIZE(cmd) <= sizeof(sbuf)) { + parg = sbuf; + } else { + /* too big to allocate from stack */ + mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); + if (NULL == mbuf) + return -ENOMEM; + parg = mbuf; + } + + err = -EFAULT; + if (copy_from_user(parg, (void __user *)arg, _IOC_SIZE(cmd))) + goto out; + break; + } + + /* call driver */ + if ((err = func(file, cmd, parg)) == -ENOIOCTLCMD) + err = -ENOTTY; + + if (err < 0) + goto out; + + /* Copy results into user buffer */ + switch (_IOC_DIR(cmd)) + { + case _IOC_READ: + case (_IOC_WRITE | _IOC_READ): + if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd))) + err = -EFAULT; + break; + } + +out: + kfree(mbuf); + return err; +} + + + +/** + * Wrapper for ioctl implementation. + * + * @param inode Inode concerned. + * @param file File concerned. + * @param cmd IOCTL command. + * @param arg Associated argument. + * + * @return 0 on success, <0 on error. + */ +static long dvb_ca_en50221_io_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + return dvb_usercopy__(file, cmd, arg, dvb_ca_en50221_io_do_ioctl); +} + + +/** + * Implementation of write() syscall. + * + * @param file File structure. + * @param buf Source buffer. + * @param count Size of source buffer. + * @param ppos Position in file (ignored). + * + * @return Number of bytes read, or <0 on error. + */ +static ssize_t dvb_ca_en50221_io_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_ca_private *ca = dvbdev->priv; + u8 slot, connection_id; + int status; + u8 *fragbuf = ca->wbuf; + int fragpos = 0; + int fraglen; + unsigned long timeout; + int written; + + /* dprintk("%s\n", __func__); */ + + /* Incoming packet has a 2 byte header. + hdr[0] = slot_id, hdr[1] = connection_id */ + if (count < 2) + return -EINVAL; + + /* extract slot & connection id */ + if (copy_from_user(&slot, buf, 1)) + return -EFAULT; + if (copy_from_user(&connection_id, buf + 1, 1)) + return -EFAULT; + buf += 2; + count -= 2; + + if (slot >= ca->slot_count) + return -EFAULT; + /* check if the slot is actually running */ + if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) + return -EINVAL; + + /* fragment the packets & store in the buffer */ + while (fragpos < count) { + fraglen = ca->slot_info[slot].link_buf_size - 2; + if (fraglen < 0) + break; + if (fraglen > dvb_ca_en50221_link_size - 2) + fraglen = dvb_ca_en50221_link_size - 2; + if ((count - fragpos) < fraglen) + fraglen = count - fragpos; + + fragbuf[0] = connection_id; + fragbuf[1] = ((fragpos + fraglen) < count) ? 0x80 : 0x00; + status = copy_from_user(fragbuf + 2, buf + fragpos, fraglen); + if (status) { + status = -EFAULT; + goto exit; + } + + timeout = jiffies + HZ / 2; + written = 0; + while (!time_after(jiffies, timeout)) { + /* check the CAM hasn't been removed/reset + in the meantime */ + if (ca->slot_info[slot].slot_state + != DVB_CA_SLOTSTATE_RUNNING) { + status = -EIO; + goto exit; + } + + mutex_lock(&ca->slot_info[slot].slot_lock); + status = dvb_ca_en50221_write_data(ca, + slot, fragbuf, fraglen + 2); + mutex_unlock(&ca->slot_info[slot].slot_lock); + if (status == (fraglen + 2)) { + written = 1; + break; + } + if (status != -EAGAIN) + goto exit; + + msleep(20); + } + if (!written) { + status = -EIO; + goto exit; + } + + fragpos += fraglen; + } + status = count + 2; + +exit: + return status; +} + + +/** + * Condition for waking up in dvb_ca_en50221_io_read_condition + */ +static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca, + int *result, int *_slot) +{ + int slot; + int slot_count = 0; + int idx; + size_t fraglen; + int connection_id = -1; + int found = 0; + u8 hdr[2]; + + slot = ca->next_read_slot; + while ((slot_count < ca->slot_count) && (!found)) { + if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) + goto nextslot; + + if (ca->slot_info[slot].rx_buffer.data == NULL) + return 0; +#ifdef READ_LPDU_PKT + if (ca->slot_info[slot].rx_offset != 0) { + *_slot = slot; + return 1; + } +#endif + idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, + -1, &fraglen); + while (idx != -1) { + dvb_ringbuffer_pkt_read(&ca->slot_info[slot].rx_buffer, + idx, 0, hdr, 2); + if (connection_id == -1) + connection_id = hdr[0]; + if ((hdr[0] == connection_id) +#ifndef READ_LPDU_PKT + && ((hdr[1] & 0x80) == 0) +#endif + ) { + *_slot = slot; + found = 1; + break; + } + + idx = dvb_ringbuffer_pkt_next( + &ca->slot_info[slot].rx_buffer, + idx, + &fraglen); + } + +nextslot: + slot = (slot + 1) % ca->slot_count; + slot_count++; + } + + ca->next_read_slot = slot; + return found; +} + + +/** + * Implementation of read() syscall. + * + * @param file File structure. + * @param buf Destination buffer. + * @param count Size of destination buffer. + * @param ppos Position in file (ignored). + * + * @return Number of bytes read, or <0 on error. + */ +static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_ca_private *ca = dvbdev->priv; + int status; + int result = 0; + u8 hdr[2]; + int slot; + int connection_id = -1; + size_t idx, idx2; + int last_fragment = 0; + size_t fraglen; + int pktlen; + int dispose = 0; + +#ifdef READ_LPDU_PKT + int offset; + u8 flag = 0; +#endif + + /* dprintk("%s\n", __func__); */ + + /* Outgoing packet has a 2 byte header. + hdr[0] = slot_id, hdr[1] = connection_id */ + if (count < 2) + return -EINVAL; + + /* wait for some data */ + status = dvb_ca_en50221_io_read_condition(ca, &result, &slot); + if (status == 0) { + /* if we're in nonblocking mode, exit immediately */ + if (file->f_flags & O_NONBLOCK) + return -EWOULDBLOCK; + + /* wait for some data */ + status = wait_event_interruptible(ca->wait_queue, + dvb_ca_en50221_io_read_condition + (ca, &result, &slot)); + } + if ((status < 0) || (result < 0)) { + if (result) + return result; + return status; + } + + idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, + -1, &fraglen); + pktlen = 2; + do { + if (idx == -1) { + pr_error("dvb_ca adapter %d: ", + ca->dvbdev->adapter->num); + pr_error("BUG: read packet ended"); + pr_error("before last_fragment encountered\n"); + status = -EIO; + goto exit; + } +#ifdef READ_LPDU_PKT + offset = 2 + ca->slot_info[slot].rx_offset; +#endif + dvb_ringbuffer_pkt_read(&ca->slot_info[slot].rx_buffer, + idx, 0, hdr, 2); + if (connection_id == -1) + connection_id = hdr[0]; +#ifdef READ_LPDU_PKT + flag = hdr[1]; + if (hdr[0] == connection_id) { + if (pktlen < count) { + if ((pktlen + fraglen - offset) > (count - 2)) { + fraglen = (count - 2) - pktlen; + ca->slot_info[slot].rx_offset += + fraglen; + /* more data for user, + but cannot send, + so force return to user, + rather than dispose of it */ + flag |= 0x80; + } else { + ca->slot_info[slot].rx_offset = 0; + fraglen -= offset; + dispose = 1; + } + + status = dvb_ringbuffer_pkt_read_user( + &ca->slot_info[slot].rx_buffer, + idx, + offset, + buf + pktlen + 2, + fraglen); + if (status < 0) + goto exit; + pktlen += fraglen; + } + + last_fragment = 1; + } +#else + if (hdr[0] == connection_id) { + if (pktlen < count) { + if ((pktlen + fraglen - 2) > count) + fraglen = count - pktlen; + else + fraglen -= 2; + + status = dvb_ringbuffer_pkt_read_user( + &ca->slot_info[slot].rx_buffer, + idx, + 2, + buf + pktlen, + fraglen); + if (status < 0) + goto exit; + pktlen += fraglen; + } + + if ((hdr[1] & 0x80) == 0) + last_fragment = 1; + dispose = 1; + } +#endif + idx2 = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, + idx, &fraglen); + if (dispose) + dvb_ringbuffer_pkt_dispose( + &ca->slot_info[slot].rx_buffer, idx); + idx = idx2; + dispose = 0; + } while (!last_fragment); + + hdr[0] = slot; + hdr[1] = connection_id; + status = copy_to_user(buf, hdr, 2); + if (status) { + status = -EFAULT; + goto exit; + } + status = pktlen; + +#ifdef READ_LPDU_PKT + hdr[0] = flag; + hdr[1] = 0; + + status = copy_to_user(buf + 2, hdr, 2); + if (status) { + status = -EFAULT; + goto exit; + } + status = pktlen + 2; +#endif +exit: + return status; +} + + +/** + * Implementation of file open syscall. + * + * @param inode Inode concerned. + * @param file File concerned. + * + * @return 0 on success, <0 on failure. + */ +static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_ca_private *ca = dvbdev->priv; + int err; + int i; + + dprintk("%s\n", __func__); + + if (!try_module_get(ca->pub->owner)) + return -EIO; + + err = dvb_generic_open(inode, file); + if (err < 0) { + module_put(ca->pub->owner); + return err; + } + + for (i = 0; i < ca->slot_count; i++) { + + if (ca->slot_info[i].slot_state == DVB_CA_SLOTSTATE_RUNNING) { + if (ca->slot_info[i].rx_buffer.data != NULL) { + /* it is safe to call this here without locks + because ca->open == 0. + Data is not read in this case */ + dvb_ringbuffer_flush( + &ca->slot_info[i].rx_buffer); + } + } + } + + ca->open = 1; + dvb_ca_en50221_thread_update_delay(ca); + dvb_ca_en50221_thread_wakeup(ca); + + return 0; +} + + +/** + * Implementation of file close syscall. + * + * @param inode Inode concerned. + * @param file File concerned. + * + * @return 0 on success, <0 on failure. + */ +static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_ca_private *ca = dvbdev->priv; + int err; + + dprintk("%s\n", __func__); + + /* mark the CA device as closed */ + ca->open = 0; + dvb_ca_en50221_thread_update_delay(ca); + + err = dvb_generic_release(inode, file); + + module_put(ca->pub->owner); + + return err; +} + + +/** + * Implementation of poll() syscall. + * + * @param file File concerned. + * @param wait poll wait table. + * + * @return Standard poll mask. + */ +static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_ca_private *ca = dvbdev->priv; + unsigned int mask = 0; + int slot; + int result = 0; + + /* dprintk("%s\n", __func__); */ + + if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) + mask |= POLLIN; + + /* if there is something, return now */ + if (mask) + return mask; + + /* wait for something to happen */ + poll_wait(file, &ca->wait_queue, wait); + + if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) + mask |= POLLIN; + + return mask; +} +EXPORT_SYMBOL(dvb_ca_en50221_cimax_init); + +#ifdef CONFIG_COMPAT +static long dvb_ca_en50221_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long args) +{ + unsigned long ret; + + args = (unsigned long)compat_ptr(args); + ret = dvb_ca_en50221_io_ioctl(filp, cmd, args); + return ret; +} +#endif +static const struct file_operations dvb_ca_fops = { + .owner = THIS_MODULE, + .read = dvb_ca_en50221_io_read, + .write = dvb_ca_en50221_io_write, + .unlocked_ioctl = dvb_ca_en50221_io_ioctl, + .open = dvb_ca_en50221_io_open, + .release = dvb_ca_en50221_io_release, + .poll = dvb_ca_en50221_io_poll, + .llseek = noop_llseek, +#ifdef CONFIG_COMPAT + .compat_ioctl = dvb_ca_en50221_compat_ioctl, +#endif +}; + +static struct dvb_device dvbdev_ca = { + .priv = NULL, + .users = 1, + .readers = 1, + .writers = 1, + .fops = &dvb_ca_fops, +}; + + +/* ************************************************************************** */ +/* Initialisation/shutdown functions */ + + +/** + * Initialise a new DVB CA EN50221 CIMAX interface device. + * + * @param dvb_adapter DVB adapter to attach the new CA device to. + * @param ca The dvb_ca instance. + * @param flags Flags describing the CA device (DVB_CA_FLAG_*). + * @param slot_count Number of slots supported. + * + * @return 0 on success, nonzero on failure + */ +int dvb_ca_en50221_cimax_init(struct dvb_adapter *dvb_adapter, + struct dvb_ca_en50221_cimax *pubca, + int flags, int slot_count) +{ + int ret; + struct dvb_ca_private *ca = NULL; + int i; + + dprintk("%s\n", __func__); + + if (slot_count < 1) + return -EINVAL; + + /* initialise the system data */ + ca = kzalloc(sizeof(struct dvb_ca_private), GFP_KERNEL); + if (ca == NULL) { + ret = -ENOMEM; + goto error; + } + ca->pub = pubca; + ca->flags = flags; + ca->slot_count = slot_count; + ca->slot_info = + kcalloc(slot_count, sizeof(struct dvb_ca_slot), GFP_KERNEL); + if (ca->slot_info == NULL) { + ret = -ENOMEM; + goto error; + } + init_waitqueue_head(&ca->wait_queue); + ca->open = 0; + ca->wakeup = 0; + ca->next_read_slot = 0; + ca->rbuf = kcalloc(HOST_LINK_BUF_SIZE, 1, GFP_KERNEL); + ca->wbuf = kcalloc(HOST_LINK_BUF_SIZE, 1, GFP_KERNEL); + if (ca->rbuf == NULL || ca->wbuf == NULL) { + ret = -ENOMEM; + goto error; + } + pubca->private = ca; + + /* register the DVB device */ + ret = dvb_register_device(dvb_adapter, + &ca->dvbdev, &dvbdev_ca, ca, DVB_DEVICE_CA, 0); + if (ret) + goto error; + + /* now initialise each slot */ + for (i = 0; i < slot_count; i++) { + memset(&ca->slot_info[i], 0, sizeof(struct dvb_ca_slot)); + ca->slot_info[i].slot_state = DVB_CA_SLOTSTATE_NONE; + atomic_set(&ca->slot_info[i].camchange_count, 0); + ca->slot_info[i].camchange_type = + DVB_CA_EN50221_CAMCHANGE_REMOVED; + mutex_init(&ca->slot_info[i].slot_lock); + } + + mutex_init(&ca->ioctl_mutex); + + if (signal_pending(current)) { + ret = -EINTR; + goto error; + } + mb();/*original*/ + + /* create a kthread for monitoring this CA device */ + ca->thread = kthread_run(dvb_ca_en50221_thread, ca, "kdvb-ca-%i:%i", + ca->dvbdev->adapter->num, ca->dvbdev->id); + if (IS_ERR(ca->thread)) { + ret = PTR_ERR(ca->thread); + printk("dvb_ca_init: failed to start kernel_thread (%d)\n", + ret); + goto error; + } + return 0; + +error: + if (ca != NULL) { + if (ca->dvbdev != NULL) + dvb_unregister_device(ca->dvbdev); + kfree(ca->slot_info); + kfree(ca->rbuf); + kfree(ca->wbuf); + kfree(ca); + } + pubca->private = NULL; + return ret; +} +EXPORT_SYMBOL(dvb_ca_en50221_cimax_release); + + + +/** + * Release a DVB CA EN50221 CIMAX interface device. + * + * @param ca_dev The dvb_struct device_s instance for the CA device. + * @param ca The associated dvb_ca instance. + */ +void dvb_ca_en50221_cimax_release(struct dvb_ca_en50221_cimax *pubca) +{ + struct dvb_ca_private *ca = pubca->private; + int i; + + dprintk("%s\n", __func__); + + /* shutdown the thread if there was one */ + kthread_stop(ca->thread); + + for (i = 0; i < ca->slot_count; i++) { + dvb_ca_en50221_slot_shutdown(ca, i); + vfree(ca->slot_info[i].rx_buffer.data); + } + kfree(ca->slot_info); + kfree(ca->rbuf); + kfree(ca->wbuf); + dvb_unregister_device(ca->dvbdev); + kfree(ca); + pubca->private = NULL; +}
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/dvb_ca_en50221_cimax.h b/drivers/stream_input/parser/dvb_ci/cimax/dvb_ca_en50221_cimax.h new file mode 100644 index 0000000..e2ebde6 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/dvb_ca_en50221_cimax.h
@@ -0,0 +1,144 @@ +/* + * dvb_ca_cimax.h: generic DVB functions for EN50221 CA CIMAX interfaces + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DVB_CA_EN50221_CIMAX_H_ +#define _DVB_CA_EN50221_CIMAX_H_ + +#include <linux/list.h> +#include <linux/dvb/ca.h> + +#include "dvbdev.h" + +#define DVB_CA_EN50221_POLL_CAM_PRESENT 1 +#define DVB_CA_EN50221_POLL_CAM_CHANGED 2 +#define DVB_CA_EN50221_POLL_CAM_READY 4 + +#define DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE 1 +#define DVB_CA_EN50221_FLAG_IRQ_FR 2 +#define DVB_CA_EN50221_FLAG_IRQ_DA 4 + +#define DVB_CA_EN50221_CAMCHANGE_REMOVED 0 +#define DVB_CA_EN50221_CAMCHANGE_INSERTED 1 + +#define DVB_CA_EN50221_CAP_IRQ 1 + +/* Structure describing a CA interface */ +struct dvb_ca_en50221_cimax { + + /* the module owning this structure */ + struct module *owner; + + /* NOTE: the read_*, write_* and poll_slot_status functions will be + * called for different slots concurrently and need to use locks where + * and if appropriate. There will be no concurrent access to one slot. + */ + + /* functions for accessing attribute memory on the CAM */ + int (*read_cis)(struct dvb_ca_en50221_cimax *ca, + int slot, u8 *buf, int size); + int (*write_cor)(struct dvb_ca_en50221_cimax *ca, + int slot, int address, u8 *buf); + /*return the final size or -1 for error*/ + int (*negotiate)(struct dvb_ca_en50221_cimax *ca, int slot, int size); + + /* functions for accessing the control interface on the CAM */ + int (*read_lpdu)(struct dvb_ca_en50221_cimax *ca, + int slot, u8 *buf, int size); + int (*write_lpdu)(struct dvb_ca_en50221_cimax *ca, + int slot, u8 *buf, int size); + + int (*cam_reset)(struct dvb_ca_en50221_cimax *ca, int slot); + int (*read_cam_status)(struct dvb_ca_en50221_cimax *ca, int slot); + int (*get_capbility)(struct dvb_ca_en50221_cimax *ca, int slot); + + /* Functions for controlling slots */ + int (*slot_reset)(struct dvb_ca_en50221_cimax *ca, int slot); + int (*slot_shutdown)(struct dvb_ca_en50221_cimax *ca, int slot); + int (*slot_ts_enable)(struct dvb_ca_en50221_cimax *ca, int slot); + + /* + * Poll slot status. + * Only necessary if DVB_CA_FLAG_EN50221_IRQ_CAMCHANGE is not set + */ + int (*poll_slot_status)(struct dvb_ca_en50221_cimax *ca, + int slot, int open); + + /* private data, used by caller */ + void *data; + + /* Opaque data used by the dvb_ca core. Do not modify! */ + void *private; +}; + + + + +/* ************************************************************************* */ +/* Functions for reporting IRQ events */ + +/** + * A CAMCHANGE IRQ has occurred. + * + * @param ca CA instance. + * @param slot Slot concerned. + * @param change_type One of the DVB_CA_CAMCHANGE_* values + */ +void dvb_ca_en50221_cimax_camchange_irq(struct dvb_ca_en50221_cimax *pubca, + int slot, int change_type); + +/** + * A CAMREADY IRQ has occurred. + * + * @param ca CA instance. + * @param slot Slot concerned. + */ +void dvb_ca_en50221_cimax_camready_irq(struct dvb_ca_en50221_cimax *pubca, + int slot); + +/** + * An FR or a DA IRQ has occurred. + * + * @param ca CA instance. + * @param slot Slot concerned. + */ +void dvb_ca_en50221_cimax_frda_irq(struct dvb_ca_en50221_cimax *ca, int slot); + + + +/* ************************************************************************** */ +/* Initialisation/shutdown functions */ + +/** + * Initialise a new DVB CA device. + * + * @param dvb_adapter DVB adapter to attach the new CA device to. + * @param ca The dvb_ca instance. + * @param flags Flags describing the CA device (DVB_CA_EN50221_FLAG_*). + * @param slot_count Number of slots supported. + * + * @return 0 on success, nonzero on failure + */ +extern int dvb_ca_en50221_cimax_init(struct dvb_adapter *dvb_adapter, + struct dvb_ca_en50221_cimax *ca, int flags, int slot_count); + +/** + * Release a DVB CA device. + * + * @param ca The associated dvb_ca instance. + */ +extern void dvb_ca_en50221_cimax_release(struct dvb_ca_en50221_cimax *ca); + + + +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/dvb_ringbuffer.c b/drivers/stream_input/parser/dvb_ci/cimax/dvb_ringbuffer.c new file mode 100644 index 0000000..40615d2 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/dvb_ringbuffer.c
@@ -0,0 +1,381 @@ +/* + * + * dvb_ringbuffer.c: ring buffer implementation for the dvb driver + * + * Copyright (C) 2003 Oliver Endriss + * Copyright (C) 2004 Andrew de Quincey + * + * based on code originally found in av7110.c & dvb_ci.c: + * Copyright (C) 1999-2003 Ralph Metzler + * & Marcus Metzler for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + + + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/uaccess.h> + +#include "dvb_ringbuffer.h" + +#define PKT_READY 0 +#define PKT_DISPOSED 1 + + +void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len) +{ + rbuf->pread=rbuf->pwrite=0; + rbuf->data=data; + rbuf->size=len; + rbuf->error=0; + + init_waitqueue_head(&rbuf->queue); + + spin_lock_init(&(rbuf->lock)); +} + + + +int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf) +{ + /* smp_load_acquire() to load write pointer on reader side + * this pairs with smp_store_release() in dvb_ringbuffer_write(), + * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset() + * + * for memory barriers also see Documentation/circular-buffers.txt + */ + return (rbuf->pread == smp_load_acquire(&rbuf->pwrite)); +} + + + +ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf) +{ + ssize_t free; + + /* READ_ONCE() to load read pointer on writer side + * this pairs with smp_store_release() in dvb_ringbuffer_read(), + * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(), + * or dvb_ringbuffer_reset() + */ + free = READ_ONCE(rbuf->pread) - rbuf->pwrite; + if (free <= 0) + free += rbuf->size; + return free-1; +} + + + +ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf) +{ + ssize_t avail; + + /* smp_load_acquire() to load write pointer on reader side + * this pairs with smp_store_release() in dvb_ringbuffer_write(), + * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset() + */ + avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread; + if (avail < 0) + avail += rbuf->size; + return avail; +} + + + +void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf) +{ + /* dvb_ringbuffer_flush() counts as read operation + * smp_load_acquire() to load write pointer + * smp_store_release() to update read pointer, this ensures that the + * correct pointer is visible for subsequent dvb_ringbuffer_free() + * calls on other cpu cores + */ + smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite)); + rbuf->error = 0; +} +//EXPORT_SYMBOL(dvb_ringbuffer_flush); + +void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf) +{ + /* dvb_ringbuffer_reset() counts as read and write operation + * smp_store_release() to update read pointer + */ + smp_store_release(&rbuf->pread, 0); + /* smp_store_release() to update write pointer */ + smp_store_release(&rbuf->pwrite, 0); + rbuf->error = 0; +} + +void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf) +{ + unsigned long flags; + + spin_lock_irqsave(&rbuf->lock, flags); + dvb_ringbuffer_flush(rbuf); + spin_unlock_irqrestore(&rbuf->lock, flags); + + wake_up(&rbuf->queue); +} + +ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, size_t len) +{ + size_t todo = len; + size_t split; + + split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0; + if (split > 0) { + if (copy_to_user(buf, rbuf->data+rbuf->pread, split)) + return -EFAULT; + buf += split; + todo -= split; + /* smp_store_release() for read pointer update to ensure + * that buf is not overwritten until read is complete, + * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() + */ + smp_store_release(&rbuf->pread, 0); + } + if (copy_to_user(buf, rbuf->data+rbuf->pread, todo)) + return -EFAULT; + + /* smp_store_release() to update read pointer, see above */ + smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size); + + return len; +} + +void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len) +{ + size_t todo = len; + size_t split; + + split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0; + if (split > 0) { + memcpy(buf, rbuf->data+rbuf->pread, split); + buf += split; + todo -= split; + /* smp_store_release() for read pointer update to ensure + * that buf is not overwritten until read is complete, + * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() + */ + smp_store_release(&rbuf->pread, 0); + } + memcpy(buf, rbuf->data+rbuf->pread, todo); + + /* smp_store_release() to update read pointer, see above */ + smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size); +} + + +ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t len) +{ + size_t todo = len; + size_t split; + + split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0; + + if (split > 0) { + memcpy(rbuf->data+rbuf->pwrite, buf, split); + buf += split; + todo -= split; + /* smp_store_release() for write pointer update to ensure that + * written data is visible on other cpu cores before the pointer + * update, this pairs with smp_load_acquire() in + * dvb_ringbuffer_empty() or dvb_ringbuffer_avail() + */ + smp_store_release(&rbuf->pwrite, 0); + } + memcpy(rbuf->data+rbuf->pwrite, buf, todo); + /* smp_store_release() for write pointer update, see above */ + smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size); + + return len; +} + +ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, + const u8 __user *buf, size_t len) +{ + int status; + size_t todo = len; + size_t split; + + split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0; + + if (split > 0) { + status = copy_from_user(rbuf->data+rbuf->pwrite, buf, split); + if (status) + return len - todo; + buf += split; + todo -= split; + /* smp_store_release() for write pointer update to ensure that + * written data is visible on other cpu cores before the pointer + * update, this pairs with smp_load_acquire() in + * dvb_ringbuffer_empty() or dvb_ringbuffer_avail() + */ + smp_store_release(&rbuf->pwrite, 0); + } + status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo); + if (status) + return len - todo; + /* smp_store_release() for write pointer update, see above */ + smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size); + + return len; +} + +ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf, size_t len) +{ + int status; + ssize_t oldpwrite = rbuf->pwrite; + + DVB_RINGBUFFER_WRITE_BYTE(rbuf, len >> 8); + DVB_RINGBUFFER_WRITE_BYTE(rbuf, len & 0xff); + DVB_RINGBUFFER_WRITE_BYTE(rbuf, PKT_READY); + status = dvb_ringbuffer_write(rbuf, buf, len); + + if (status < 0) rbuf->pwrite = oldpwrite; + return status; +} + +ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx, + int offset, u8 __user *buf, size_t len) +{ + size_t todo; + size_t split; + size_t pktlen; + + pktlen = rbuf->data[idx] << 8; + pktlen |= rbuf->data[(idx + 1) % rbuf->size]; + if (offset > pktlen) return -EINVAL; + if ((offset + len) > pktlen) len = pktlen - offset; + + idx = (idx + DVB_RINGBUFFER_PKTHDRSIZE + offset) % rbuf->size; + todo = len; + split = ((idx + len) > rbuf->size) ? rbuf->size - idx : 0; + if (split > 0) { + if (copy_to_user(buf, rbuf->data+idx, split)) + return -EFAULT; + buf += split; + todo -= split; + idx = 0; + } + if (copy_to_user(buf, rbuf->data+idx, todo)) + return -EFAULT; + + return len; +} + +ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx, + int offset, u8* buf, size_t len) +{ + size_t todo; + size_t split; + size_t pktlen; + + pktlen = rbuf->data[idx] << 8; + pktlen |= rbuf->data[(idx + 1) % rbuf->size]; + if (offset > pktlen) return -EINVAL; + if ((offset + len) > pktlen) len = pktlen - offset; + + idx = (idx + DVB_RINGBUFFER_PKTHDRSIZE + offset) % rbuf->size; + todo = len; + split = ((idx + len) > rbuf->size) ? rbuf->size - idx : 0; + if (split > 0) { + memcpy(buf, rbuf->data+idx, split); + buf += split; + todo -= split; + idx = 0; + } + memcpy(buf, rbuf->data+idx, todo); + return len; +} + +void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx) +{ + size_t pktlen; + + rbuf->data[(idx + 2) % rbuf->size] = PKT_DISPOSED; + + // clean up disposed packets + while(dvb_ringbuffer_avail(rbuf) > DVB_RINGBUFFER_PKTHDRSIZE) { + if (DVB_RINGBUFFER_PEEK(rbuf, 2) == PKT_DISPOSED) { + pktlen = DVB_RINGBUFFER_PEEK(rbuf, 0) << 8; + pktlen |= DVB_RINGBUFFER_PEEK(rbuf, 1); + DVB_RINGBUFFER_SKIP(rbuf, pktlen + DVB_RINGBUFFER_PKTHDRSIZE); + } else { + // first packet is not disposed, so we stop cleaning now + break; + } + } +} + +ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen) +{ + int consumed; + int curpktlen; + int curpktstatus; + + if (idx == -1) { + idx = rbuf->pread; + } else { + curpktlen = rbuf->data[idx] << 8; + curpktlen |= rbuf->data[(idx + 1) % rbuf->size]; + idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size; + } + + consumed = (idx - rbuf->pread) % rbuf->size; + + while((dvb_ringbuffer_avail(rbuf) - consumed) > DVB_RINGBUFFER_PKTHDRSIZE) { + + curpktlen = rbuf->data[idx] << 8; + curpktlen |= rbuf->data[(idx + 1) % rbuf->size]; + curpktstatus = rbuf->data[(idx + 2) % rbuf->size]; + + if (curpktstatus == PKT_READY) { + *pktlen = curpktlen; + return idx; + } + + consumed += curpktlen + DVB_RINGBUFFER_PKTHDRSIZE; + idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size; + } + + // no packets available + return -1; +} + + +#if 0 +EXPORT_SYMBOL(dvb_ringbuffer_init); +EXPORT_SYMBOL(dvb_ringbuffer_empty); +EXPORT_SYMBOL(dvb_ringbuffer_free); +EXPORT_SYMBOL(dvb_ringbuffer_avail); +EXPORT_SYMBOL(dvb_ringbuffer_flush_spinlock_wakeup); +EXPORT_SYMBOL(dvb_ringbuffer_read_user); +EXPORT_SYMBOL(dvb_ringbuffer_read); +EXPORT_SYMBOL(dvb_ringbuffer_write); +EXPORT_SYMBOL(dvb_ringbuffer_write_user); + +EXPORT_SYMBOL(dvb_ringbuffer_pkt_read); +EXPORT_SYMBOL(dvb_ringbuffer_pkt_write); +EXPORT_SYMBOL(dvb_ringbuffer_pkt_next); +EXPORT_SYMBOL(dvb_ringbuffer_pkt_dispose); +EXPORT_SYMBOL(dvb_ringbuffer_pkt_read_user); +#endif +
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/dvb_ringbuffer.h b/drivers/stream_input/parser/dvb_ci/cimax/dvb_ringbuffer.h new file mode 100644 index 0000000..bbe9487 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/dvb_ringbuffer.h
@@ -0,0 +1,280 @@ +/* + * + * dvb_ringbuffer.h: ring buffer implementation for the dvb driver + * + * Copyright (C) 2003 Oliver Endriss + * Copyright (C) 2004 Andrew de Quincey + * + * based on code originally found in av7110.c & dvb_ci.c: + * Copyright (C) 1999-2003 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + */ + +#ifndef _DVB_RINGBUFFER_H_ +#define _DVB_RINGBUFFER_H_ + +#include <linux/spinlock.h> +#include <linux/wait.h> + +/** + * struct dvb_ringbuffer - Describes a ring buffer used at DVB framework + * + * @data: Area were the ringbuffer data is written + * @size: size of the ringbuffer + * @pread: next position to read + * @pwrite: next position to write + * @error: used by ringbuffer clients to indicate that an error happened. + * @queue: Wait queue used by ringbuffer clients to indicate when buffer + * was filled + * @lock: Spinlock used to protect the ringbuffer + */ +struct dvb_ringbuffer { + u8 *data; + ssize_t size; + ssize_t pread; + ssize_t pwrite; + int error; + + wait_queue_head_t queue; + spinlock_t lock; +}; + +#define DVB_RINGBUFFER_PKTHDRSIZE 3 + +/** + * dvb_ringbuffer_init - initialize ring buffer, lock and queue + * + * @rbuf: pointer to struct dvb_ringbuffer + * @data: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + */ +extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, + size_t len); + +/** + * dvb_ringbuffer_empty - test whether buffer is empty + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_free - returns the number of free bytes in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Return: number of free bytes in the buffer + */ +extern ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_avail - returns the number of bytes waiting in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Return: number of bytes waiting in the buffer + */ +extern ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_reset - resets the ringbuffer to initial state + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Resets the read and write pointers to zero and flush the buffer. + * + * This counts as a read and write operation + */ +extern void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf); + +/* + * read routines & macros + */ + +/** + * dvb_ringbuffer_flush - flush buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_flush_spinlock_wakeup- flush buffer protected by spinlock + * and wake-up waiting task(s) + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf); + +/** + * DVB_RINGBUFFER_PEEK - peek at byte @offs in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @offs: offset inside the ringbuffer + */ +#define DVB_RINGBUFFER_PEEK(rbuf, offs) \ + ((rbuf)->data[((rbuf)->pread + (offs)) % (rbuf)->size]) + +/** + * DVB_RINGBUFFER_SKIP - advance read ptr by @num bytes + * + * @rbuf: pointer to struct dvb_ringbuffer + * @num: number of bytes to advance + */ +#define DVB_RINGBUFFER_SKIP(rbuf, num) {\ + (rbuf)->pread = ((rbuf)->pread + (num)) % (rbuf)->size;\ +} + +/** + * dvb_ringbuffer_read_user - Reads a buffer into an user pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the userspace. So, + * it will internally call copy_to_user(). + * + * Return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, + u8 __user *buf, size_t len); + +/** + * dvb_ringbuffer_read - Reads a buffer into a pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the Kernel space + * + * Return: number of bytes transferred or -EFAULT + */ +extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, + u8 *buf, size_t len); + +/* + * write routines & macros + */ + +/** + * DVB_RINGBUFFER_WRITE_BYTE - write single byte to ring buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @byte: byte to write + */ +#define DVB_RINGBUFFER_WRITE_BYTE(rbuf, byte) \ + { (rbuf)->data[(rbuf)->pwrite] = (byte); \ + (rbuf)->pwrite = ((rbuf)->pwrite + 1) % (rbuf)->size; } + +/** + * dvb_ringbuffer_write - Writes a buffer into the ringbuffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be read + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the Kernel space + * + * return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, + size_t len); + +/** + * dvb_ringbuffer_write_user - Writes a buffer received via an user pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be read + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the userspace. So, + * it will internally call copy_from_user(). + * + * Return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, + const u8 __user *buf, size_t len); + +/** + * dvb_ringbuffer_pkt_write - Write a packet into the ringbuffer. + * + * @rbuf: Ringbuffer to write to. + * @buf: Buffer to write. + * @len: Length of buffer (currently limited to 65535 bytes max). + * + * Return: Number of bytes written, or -EFAULT, -ENOMEM, -EVINAL. + */ +extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8 *buf, + size_t len); + +/** + * dvb_ringbuffer_pkt_read_user - Read from a packet in the ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + * @offset: Offset into packet to read from. + * @buf: Destination buffer for data. + * @len: Size of destination buffer. + * + * Return: Number of bytes read, or -EFAULT. + * + * .. note:: + * + * unlike dvb_ringbuffer_read(), this does **NOT** update the read pointer + * in the ringbuffer. You must use dvb_ringbuffer_pkt_dispose() to mark a + * packet as no longer required. + */ +extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, + size_t idx, + int offset, u8 __user *buf, + size_t len); + +/** + * dvb_ringbuffer_pkt_read - Read from a packet in the ringbuffer. + * Note: unlike dvb_ringbuffer_read_user(), this DOES update the read pointer + * in the ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + * @offset: Offset into packet to read from. + * @buf: Destination buffer for data. + * @len: Size of destination buffer. + * + * Return: Number of bytes read, or -EFAULT. + */ +extern ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx, + int offset, u8 *buf, size_t len); + +/** + * dvb_ringbuffer_pkt_dispose - Dispose of a packet in the ring buffer. + * + * @rbuf: Ring buffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + */ +extern void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx); + +/** + * dvb_ringbuffer_pkt_next - Get the index of the next packet in a ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Previous packet index, or -1 to return the first packet index. + * @pktlen: On success, will be updated to contain the length of the packet + * in bytes. + * returns Packet index (if >=0), or -1 if no packets available. + */ +extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, + size_t idx, size_t *pktlen); + +#endif /* _DVB_RINGBUFFER_H_ */
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/COPYING.LESSER b/drivers/stream_input/parser/dvb_ci/cimax/usb/COPYING.LESSER new file mode 100644 index 0000000..8f4a860 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/COPYING.LESSER
@@ -0,0 +1,499 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. +We sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + <one line to give the library's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library. + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + <signature of Ty Coon>, 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it!
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/README.txt b/drivers/stream_input/parser/dvb_ci/cimax/usb/README.txt new file mode 100644 index 0000000..1d6a01c --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/README.txt
@@ -0,0 +1,61 @@ +April 28, 2011 + +Linux CIMaX+ Kernel driver. +Release Code version: + +DISCLAIMER +---------- + +This is a Smardtv' Linux Kernel driver for use with +the CIMaX+ USB reference board hardware. + +IMPORTANT NOTE +-------------- + +This packages contains a Linux Kernel driver to be installed in the +/lib/modules target directory in order to be loaded automaticaly. + +1. High level driver: cimax+usb_driver.ko + This driver provide the high level interface to control the CIMaX+ USB interface. + +2. Firmware + To have a functional hardware, the firmware must be uploaded to the device. + + The firmwares is delivered in 2 files for dedicated usage and must be + installed in the Linux firmware directory on the target board: + + - /lib/firmware + + A) CIMaX+ firmware binary file + Copy the following firmware file to the /lib/firmware directory: + - firmware/cimax+_usbdvb.bin + + B) CIMaX+ configuration file + Copy the following firmware file to the /lib/firmware directory: + - firmware/cimax+usb.cfg + + +BUILD AND INSTALLATION INSTRUCTIONS +----------------------------------- + +2. Building LINUX USB DVB driver + On the target machine performs the following commands: + + 1. cd src + 2. make + 3. make install + +3. Installing firmware files + On the target machine performs the following command: + + 1. cp firmware/*.* /lib/firmware/ + +4. Installing rules file + 1. cp src/99-cimax+usb.rules /etc/udev/rules.d/ + 2. udevadm control --reload-rules + +5. Build modules dependencies + On the target machine performs the following command: + + 1. depmod -a +
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/99-cimax+usb.rules b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/99-cimax+usb.rules new file mode 100644 index 0000000..82af370 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/99-cimax+usb.rules
@@ -0,0 +1,4 @@ +BUS=="usb", ACTION=="add", SYSFS{idProduct}=="2F00", SYSFS{idVendor}=="1B0D" +KERNEL=="cimaxusb[0-9]", GROUP="users" + +
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/HOWTO.txt b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/HOWTO.txt new file mode 100644 index 0000000..23de038 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/HOWTO.txt
@@ -0,0 +1,85 @@ +The following examples shows how to use the cimax+usb driver. +All structures and macro are defined in file cimax+usb-driver.h. + + +1.Open the device cimax+usb +=========================== +/* open device */ +dev = open("/dev/cimaxusb0", O_RDWR); +/* select USB alt_setting */ +ioctl(dev, DEVICE_IOC_SELECT_INTF, alt_setting); + + +2.Close the device cimax+usb +============================ +/* unlock read queue */ +ioctl(dev, DEVICE_IOC_UNLOCK_READ, 0); +/* close device */ +close(dev); + + +3.Send a CI message to a CAM through the device +=============================================== +/* Send CAM A Reset command */ +uint8 command[5] = {0x01, /* Command */ + 0x01, /* Counter */ + 0x00, /* MSB data size */ + 0x01, /* LSB data size */ + 0x00}; /* data */ +uint8 response[4100]; +struct ioctl_data_s stData; +stData.txData = command; +stData.txSize = 5; +stData.rxData = response; +stData.rxSize = 4100; +ioctl(dev, DEVICE_IOC_CI_WRITE, &stData); + +/* Send CAM B Get CIS command */ +uint8 command[5] = {0x82, /* Command */ + 0x01, /* Counter */ + 0x00, /* MSB data size */ + 0x00}; /* LSB data size */ +uint8 response[4100]; +struct ioctl_data_s stData; +stData.txData = command; +stData.txSize = 4; +stData.rxData = response; +stData.rxSize = 4100; +ioctl(dev, DEVICE_IOC_CI_WRITE, &stData); + + +4.Send a Transport Stream to a CAM through the device +===================================================== +/* Send to CAM A */ +struct rw_data_s rwData; +rwData.type = DEVICE_TYPE_TS_WRITE; +rwData.moduleId = 0; /* CAM A */ +rwData.data = stream; +rwData.size = size; +rwData.copiedSize = 0; +write(dev, &rwData, sizeof(struct rw_data_s)); + + +5.Read a Transport Stream from a CAM through the device +======================================================= +/* Read from CAM B */ +struct rw_data_s rwData; +rwData.type = DEVICE_TYPE_TS_READ; +rwData.moduleId = 1; /* CAM B */ +rwData.data = stream; +rwData.size = size; +rwData.copiedSize = 0; +read(dev, &rwData, sizeof(struct rw_data_s)); + + +6.Read a CI message from a CAM through the device +================================================= +/* Read from CAM A */ +uint8 response[4096]; +struct rw_data_s rwData; +rwData.type = DEVICE_TYPE_CI_READ; +rwData.moduleId = 0; /* CAM A */ +rwData.data = response; +rwData.size = 4096; +rwData.copiedSize = 0; +read(dev, &rwData, sizeof(struct rw_data_s));
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/Makefile b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/Makefile new file mode 100644 index 0000000..bc79945 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/Makefile
@@ -0,0 +1,35 @@ +TARGET = cimax+usb_driver +OBJS = cimax+usb_driver.o +MDIR = drivers/misc + +ccflags-y = -DEXPORT_SYMTAB +#ccflags-y = -DEXPORT_SYMTAB -DDEBUG +CURRENT = $(shell uname -r) +KDIR = /lib/modules/$(CURRENT)/build +PWD = $(shell pwd) +DEST = /lib/modules/$(CURRENT)/kernel/$(MDIR) + +obj-m := $(TARGET).o +cimax+usb_driver-y := cimax+usb-driver.o cimax+usb_fw.o +cimax+usb_driver-y += cimax+usb_config.o +cimax+usb_driver-y += cimax+usb_time.o + +default: + make -C /usr/src/linux SUBDIRS=$(PWD) modules +# make -C /lib/modules/`uname -r`/build M=$(PWD) modules + +$(TARGET).o: $(OBJS) + $(LD) $(LD_RFLAG) -r -o $@ $(OBJS) + +ifneq (,$(findstring 2.4.,$(CURRENT))) +install: + su -c "cp -v $(TARGET).o $(DEST) && /sbin/depmod -a" +else +install: + su -c "cp -v $(TARGET).ko $(DEST) && /sbin/depmod -a" +endif + +clean: + -rm -f *.o *.ko .*.cmd .*.flags *.mod.c + +-include $(KDIR)/Rules.make
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/bodydef.h b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/bodydef.h new file mode 100644 index 0000000..f93fef5 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/bodydef.h
@@ -0,0 +1,424 @@ +/**************************************************************************//** + * @file bodydef.h + * + * @brief CIMaX+ USB Driver for linux based operating systems. + * + * Copyright (C) 2009-2011 Bruno Tonelli <bruno.tonelli@smardtv.com> + * & Franck Descours <franck.descours@smardtv.com> + * for SmarDTV France, La Ciotat + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + ******************************************************************************/ + +#ifndef __BODYDEF_H +#define __BODYDEF_H + +struct reg_s { + __u8 RegisterName[50]; + __u16 RegAddr; +}; + +/*======================================================================= + Input/Output Ports and Data Direction Registers +=======================================================================*/ +struct reg_s cimax_reg_map[] = { + {"BUFFIN_CFG" , 0x0000}, + {"BUFFIN_ADDR_LSB" , 0x0001}, + {"BUFFIN_ADDR_MSB" , 0x0002}, + {"BUFFIN_DATA" , 0x0003}, + {"BUFFOUT_CFG" , 0x0004}, + {"BUFFOUT_ADDR_LSB" , 0x0005}, + {"BUFFOUT_ADDR_MSB" , 0x0006}, + {"BUFFOUT_DATA" , 0x0007}, + {"BOOT_Key" , 0x0008}, + {"BOOT_Status" , 0x0009}, + {"BOOT_Test " , 0x000A}, + {"RxDMA_Ctrl" , 0x0010}, + {"RxDMA_Status" , 0x0011}, + {"RxDMA_DbgL" , 0x0012}, + {"RxDMA_DbgH" , 0x0013}, + {"SPI_Slave_Ctrl" , 0x0018}, + {"SPI_Slave_Status" , 0x0019}, + {"SPI_Slave_Rx" , 0x001A}, + {"SPI_Slave_Tx" , 0x001B}, + {"SPI_Slave_Mask" , 0x001C}, + {"UCSG_Ctrl" , 0x0020}, + {"UCSG_Status" , 0x0021}, + {"UCSG_RxData" , 0x0022}, + {"UCSG_TxData" , 0x0023}, + {"PCtrl_Ctrl" , 0x0028}, + {"PCtrl_Status" , 0x0029}, + {"PCtrl_NbByte_LSB" , 0x002A}, + {"PCtrl_NbByte_MSB" , 0x002B}, + {"SPI_Master_Ctl" , 0x0030}, + {"SPI_Master_NCS" , 0x0031}, + {"SPI_Master_Status" , 0x0032}, + {"SPI_Master_TxBuf" , 0x0033}, + {"SPI_Master_RxBuf" , 0x0034}, + {"BISTRAM_Ctl" , 0x0038}, + {"BISTRAM_Bank" , 0x0039}, + {"BISTRAM_Pat" , 0x003A}, + {"BISTRAM_SM" , 0x003B}, + {"BISTRAM_AddrLSB" , 0x003C}, + {"BISTROM_Config" , 0x0040}, + {"BISTROM_SignatureLSB" , 0x0041}, + {"BISTROM_SignatureMSB" , 0x0042}, + {"BISTROM_StartAddrLSB" , 0x0043}, + {"BISTROM_StartAddrMSB" , 0x0044}, + {"BISTROM_StopAddrLSB" , 0x0045}, + {"BISTROM_StopAddrMSB" , 0x0046}, + {"CkMan_Config" , 0x0048}, + {"CkMan_Select" , 0x0049}, + {"CkMan_Test" , 0x004A}, + {"Revision_Number" , 0x004B}, + {"CkMan_PD_Key" , 0x004C}, + {"USB_Power_Mode" , 0x004D}, + {"ResMan_Config" , 0x0050}, + {"ResMan_Status" , 0x0051}, + {"ResMan_WD" , 0x0052}, + {"ResMan_WD_MSB" , 0x0053}, + {"TxDMA_Ctrl" , 0x0058}, + {"TxDMA_Status" , 0x0059}, + {"TxDMA_StartAddrL" , 0x005A}, + {"TxDMA_StartAddrH" , 0x005B}, + {"TxDMA_StopAddrL" , 0x005C}, + {"TxDMA_StopAddrH" , 0x005D}, + {"CPU_Test" , 0x0060}, + {"IrqMan_Config0" , 0x0068}, + {"IrqMan_Config1" , 0x0069}, + {"IrqMan_Irq0" , 0x006A}, + {"IrqMan_NMI" , 0x006B}, + {"IrqMan_SleepKey" , 0x006C}, + {"Tim_Config" , 0x0070}, + {"Tim_Value_LSB" , 0x0071}, + {"Tim_Value_MSB" , 0x0072}, + {"Tim_Comp_LSB" , 0x0073}, + {"Tim_Comp_MSB" , 0x0074}, + {"TI_Config" , 0x0076}, + {"TI_Data" , 0x0077}, + {"TI_Reg0" , 0x0078}, + {"TI_Reg1" , 0x0079}, + {"TI_Reg2" , 0x007A}, + {"TI_Reg3" , 0x007B}, + {"TI_Reg4" , 0x007C}, + {"TI_ROM1" , 0x007D}, + {"TI_ROM2" , 0x007E}, + {"TI_ROM3" , 0x007F}, + {"DVBCI_START_ADDR" , 0x0100}, + {"DVBCI_END_ADDR" , 0x017F}, + {"DATA" , 0x0180}, + {"CTRL" , 0x0181}, + {"QB_HOST" , 0x0182}, + {"LEN_HOST_LSB" , 0x0183}, + {"LEN_HOST_MSB" , 0x0184}, + {"FIFO_TX_TH_LSB" , 0x0185}, + {"FIFO_TX_TH_MSB" , 0x0186}, + {"FIFO_TX_D_NB_LSB" , 0x0187}, + {"FIFO_TX_D_NB_MSB" , 0x0188}, + {"QB_MOD_CURR" , 0x0189}, + {"LEN_MOD_CURR_LSB" , 0x018A}, + {"LEN_MOD_CURR_MSB" , 0x018B}, + {"QB_MOD" , 0x018C}, + {"LEN_MOD_LSB" , 0x018D}, + {"LEN_MOD_MSB" , 0x018E}, + {"FIFO_RX_TH_LSB" , 0x018F}, + {"FIFO_RX_TH_MSB" , 0x0190}, + {"FIFO_RX_D_NB_LSB" , 0x0191}, + {"FIFO_RX_D_NB_MSB" , 0x0192}, + {"IT_STATUS_0" , 0x0193}, + {"IT_STATUS_1" , 0x0194}, + {"IT_MASK_0" , 0x0195}, + {"IT_MASK_1" , 0x0196}, + {"IT_HOST_PIN_CFG" , 0x0200}, + {"CFG_0" , 0x0201}, + {"CFG_1" , 0x0202}, + {"CFG_2" , 0x0203}, + {"IT_HOST" , 0x0204}, + {"MOD_IT_STATUS" , 0x0205}, + {"MOD_IT_MASK" , 0x0206}, + {"MOD_CTRL_A" , 0x0207}, + {"MOD_CTRL_B" , 0x0208}, + {"DEST_SEL" , 0x0209}, + {"CAM_MSB_ADD" , 0x020A}, + {"GPIO0_DIR" , 0x020B}, + {"GPIO0_DATA_IN" , 0x020C}, + {"GPIO0_DATA_OUT" , 0x020D}, + {"GPIO0_STATUS" , 0x020E}, + {"GPIO0_IT_MASK" , 0x020F}, + {"GPIO0_DFT" , 0x0210}, + {"GPIO0_MASK_DATA" , 0x0211}, + {"GPIO1_DIR" , 0x0212}, + {"GPIO1_DATA_IN" , 0x0213}, + {"GPIO1_DATA_OUT" , 0x0214}, + {"GPIO1_STATUS" , 0x0215}, + {"GPIO1_IT_MASK" , 0x0216}, + {"MEM_ACC_TIME_A" , 0x0217}, + {"MEM_ACC_TIME_B" , 0x0218}, + {"IO_ACC_TIME_A" , 0x0219}, + {"IO_ACC_TIME_B" , 0x021A}, + {"EXT_CH_ACC_TIME_A" , 0x021B}, + {"EXT_CH_ACC_TIME_B" , 0x021C}, + {"PAR_IF_0" , 0x021D}, + {"PAR_IF_1" , 0x021E}, + {"PAR_IF_CTRL" , 0x021F}, + {"PCK_LENGTH" , 0x0220}, + {"USB2TS_CTRL" , 0x0221}, + {"USB2TS0_RDL" , 0x0222}, + {"USB2TS1_RDL" , 0x0223}, + {"TS2USB_CTRL" , 0x0224}, + {"TSOUT_PAR_CTRL" , 0x0225}, + {"TSOUT_PAR_CLK_SEL" , 0x0226}, + {"S2P_CH0_CTRL" , 0x0227}, + {"S2P_CH1_CTRL" , 0x0228}, + {"P2S_CH0_CTRL" , 0x0229}, + {"P2S_CH1_CTRL" , 0x022A}, + {"TS_IT_STATUS" , 0x022B}, + {"TS_IT_MASK" , 0x022C}, + {"IN_SEL" , 0x022D}, + {"OUT_SEL" , 0x022E}, + {"ROUTER_CAM_CH" , 0x022F}, + {"ROUTER_CAM_MOD" , 0x0230}, + {"FIFO_CTRL" , 0x0231}, + {"FIFO1_2_STATUS" , 0x0232}, + {"FIFO3_4_STATUS" , 0x0233}, + {"GAP_REMOVER_CH0_CTRL" , 0x0234}, + {"GAP_REMOVER_CH1_CTRL" , 0x0235}, + {"SYNC_RTV_CTRL" , 0x0236}, + {"SYNC_RTV_CH0_SYNC_NB" , 0x0237}, + {"SYNC_RTV_CH0_PATTERN" , 0x0238}, + {"SYNC_RTV_CH1_SYNC_NB" , 0x0239}, + {"SYNC_RTV_CH1_PATTERN" , 0x023A}, + {"SYNC_RTV_OFFSET_PATT" , 0x023B}, + {"CTRL_FILTER" , 0x023D}, + {"PID_EN_FILTER_CH0" , 0x023E}, + {"PID_EN_FILTER_CH1" , 0x023F}, + {"PID_LSB_FILTER_CH0_0" , 0x0240}, + {"PID_MSB_FILTER_CH0_0" , 0x0241}, + {"PID_LSB_FILTER_CH0_1" , 0x0242}, + {"PID_MSB_FILTER_CH0_1" , 0x0243}, + {"PID_LSB_FILTER_CH0_2" , 0x0244}, + {"PID_MSB_FILTER_CH0_2" , 0x0245}, + {"PID_LSB_FILTER_CH0_3" , 0x0246}, + {"PID_MSB_FILTER_CH0_3" , 0x0247}, + {"PID_LSB_FILTER_CH0_4" , 0x0248}, + {"PID_MSB_FILTER_CH0_4" , 0x0249}, + {"PID_LSB_FILTER_CH0_5" , 0x024A}, + {"PID_MSB_FILTER_CH0_5" , 0x024B}, + {"PID_LSB_FILTER_CH0_6" , 0x024C}, + {"PID_MSB_FILTER_CH0_6" , 0x024D}, + {"PID_LSB_FILTER_CH0_7" , 0x024E}, + {"PID_MSB_FILTER_CH0_7" , 0x024F}, + {"PID_LSB_FILTER_CH1_0" , 0x0260}, + {"PID_MSB_FILTER_CH1_0" , 0x0261}, + {"PID_LSB_FILTER_CH1_1" , 0x0262}, + {"PID_MSB_FILTER_CH1_1" , 0x0263}, + {"PID_LSB_FILTER_CH1_2" , 0x0264}, + {"PID_MSB_FILTER_CH1_2" , 0x0265}, + {"PID_LSB_FILTER_CH1_3" , 0x0266}, + {"PID_MSB_FILTER_CH1_3" , 0x0267}, + {"PID_LSB_FILTER_CH1_4" , 0x0268}, + {"PID_MSB_FILTER_CH1_4" , 0x0269}, + {"PID_LSB_FILTER_CH1_5" , 0x026A}, + {"PID_MSB_FILTER_CH1_5" , 0x026B}, + {"PID_LSB_FILTER_CH1_6" , 0x026C}, + {"PID_MSB_FILTER_CH1_6" , 0x026D}, + {"PID_LSB_FILTER_CH1_7" , 0x026E}, + {"PID_MSB_FILTER_CH1_7" , 0x026F}, + {"PID_OLD_LSB_REMAPPER_0" , 0x0280}, + {"PID_OLD_MSB_REMAPPER_0" , 0x0281}, + {"PID_OLD_LSB_REMAPPER_1" , 0x0282}, + {"PID_OLD_MSB_REMAPPER_1" , 0x0283}, + {"PID_OLD_LSB_REMAPPER_2" , 0x0284}, + {"PID_OLD_MSB_REMAPPER_2" , 0x0285}, + {"PID_OLD_LSB_REMAPPER_3" , 0x0286}, + {"PID_OLD_MSB_REMAPPER_3" , 0x0287}, + {"PID_OLD_LSB_REMAPPER_4" , 0x0288}, + {"PID_OLD_MSB_REMAPPER_4" , 0x0289}, + {"PID_OLD_LSB_REMAPPER_5" , 0x028A}, + {"PID_OLD_MSB_REMAPPER_5" , 0x028B}, + {"PID_OLD_LSB_REMAPPER_6" , 0x028C}, + {"PID_OLD_MSB_REMAPPER_6" , 0x028D}, + {"PID_OLD_LSB_REMAPPER_7" , 0x028E}, + {"PID_OLD_MSB_REMAPPER_7" , 0x028F}, + {"PID_NEW_LSB_REMAPPER_0" , 0x02A0}, + {"PID_NEW_MSB_REMAPPER_0" , 0x02A1}, + {"PID_NEW_LSB_REMAPPER_1" , 0x02A2}, + {"PID_NEW_MSB_REMAPPER_1" , 0x02A3}, + {"PID_NEW_LSB_REMAPPER_2" , 0x02A4}, + {"PID_NEW_MSB_REMAPPER_2" , 0x02A5}, + {"PID_NEW_LSB_REMAPPER_3" , 0x02A6}, + {"PID_NEW_MSB_REMAPPER_3" , 0x02A7}, + {"PID_NEW_LSB_REMAPPER_4" , 0x02A8}, + {"PID_NEW_MSB_REMAPPER_4" , 0x02A9}, + {"PID_NEW_LSB_REMAPPER_5" , 0x02AA}, + {"PID_NEW_MSB_REMAPPER_5" , 0x02AB}, + {"PID_NEW_LSB_REMAPPER_6" , 0x02AC}, + {"PID_NEW_MSB_REMAPPER_6" , 0x02AD}, + {"PID_NEW_LSB_REMAPPER_7" , 0x02AE}, + {"PID_NEW_MSB_REMAPPER_7" , 0x02AF}, + {"MERGER_DIV_MICLK" , 0x02C0}, + {"PID_AND_SYNC_REMAPPER_CTRL" , 0x02C1}, + {"PID_EN_REMAPPER" , 0x02C2}, + {"SYNC_SYMBOL" , 0x02C3}, + {"PID_AND_SYNC_REMAPPER_INV_CTRL" , 0x02C4}, + {"BITRATE_CH0_LSB" , 0x02C5}, + {"BITRATE_CH0_MSB" , 0x02C6}, + {"BITRATE_CH1_LSB" , 0x02C7}, + {"BITRATE_CH1_MSB" , 0x02C8}, + {"STATUS_CLK_SWITCH_0" , 0x02C9}, + {"STATUS_CLK_SWITCH_1" , 0x02CA}, + {"RESET_CLK_SWITCH_0" , 0x02CB}, + {"RESET_CLK_SWITCH_1" , 0x02CC}, + {"PAD_DRVSTR_CTRL" , 0x02CD}, + {"PAD_PUPD_CTRL" , 0x02CE}, + {"PRE_HEADER_ADDER_CH0_0" , 0x02D0}, + {"PRE_HEADER_ADDER_CH0_1" , 0x02D1}, + {"PRE_HEADER_ADDER_CH0_2" , 0x02D2}, + {"PRE_HEADER_ADDER_CH0_3" , 0x02D3}, + {"PRE_HEADER_ADDER_CH0_4" , 0x02D4}, + {"PRE_HEADER_ADDER_CH0_5" , 0x02D5}, + {"PRE_HEADER_ADDER_CH0_6" , 0x02D6}, + {"PRE_HEADER_ADDER_CH0_7" , 0x02D7}, + {"PRE_HEADER_ADDER_CH0_8" , 0x02D8}, + {"PRE_HEADER_ADDER_CH0_9" , 0x02D9}, + {"PRE_HEADER_ADDER_CH0_10" , 0x02DA}, + {"PRE_HEADER_ADDER_CH0_11" , 0x02DB}, + {"PRE_HEADER_ADDER_CH1_0" , 0x02E0}, + {"PRE_HEADER_ADDER_CH1_1" , 0x02E1}, + {"PRE_HEADER_ADDER_CH1_2" , 0x02E2}, + {"PRE_HEADER_ADDER_CH1_3" , 0x02E3}, + {"PRE_HEADER_ADDER_CH1_4" , 0x02E4}, + {"PRE_HEADER_ADDER_CH1_5" , 0x02E5}, + {"PRE_HEADER_ADDER_CH1_6" , 0x02E6}, + {"PRE_HEADER_ADDER_CH1_7" , 0x02E7}, + {"PRE_HEADER_ADDER_CH1_8" , 0x02E8}, + {"PRE_HEADER_ADDER_CH1_9" , 0x02E9}, + {"PRE_HEADER_ADDER_CH1_10" , 0x02EA}, + {"PRE_HEADER_ADDER_CH1_11" , 0x02EB}, + {"PRE_HEADER_ADDER_CTRL" , 0x02EC}, + {"PRE_HEADER_ADDER_LEN" , 0x02ED}, + {"PRE_HEADER_REMOVER_CTRL" , 0x02EE}, + {"FSM_DVB" , 0x02F0}, + {"TS2USB_FSM_DEBUG" , 0x02F2}, + {"TSOUT_PAR_FSM_DEBUG" , 0x02F3}, + {"GAP_REMOVER_FSM_DEBUG" , 0x02F4}, + {"PID_AND_SYNC_REMAPPER_FSM_DEBUG" , 0x02F5}, + {"PRE_HEADER_ADDER_FSM_DEBUG" , 0x02F6}, + {"SYNC_RTV_FSM_DEBUG" , 0x02F7}, + {"CHECK_PHY_CLK" , 0x0E00}, + {"CONTROL1" , 0x0E01}, + {"WAKE_UP" , 0x0E02}, + {"CONTROL2" , 0x0E03}, + {"PHY_RELATED" , 0x0E04}, + {"EP_CFG" , 0x0E05}, + {"MAX_PKT_EP1L" , 0x0E06}, + {"MAX_PKT_EP1H" , 0x0E07}, + {"MAX_PKT_EP2L" , 0x0E08}, + {"MAX_PKT_EP2H" , 0x0E09}, + {"MAX_PKT_EP3L" , 0x0E0A}, + {"MAX_PKT_EP3H" , 0x0E0B}, + {"MAX_PKT_EP4L" , 0x0E0C}, + {"MAX_PKT_EP4H" , 0x0E0D}, + {"EPS_STALL_SET" , 0x0E10}, + {"EPS_STALL_CLR" , 0x0E11}, + {"EPS_ENABLE" , 0x0E12}, + {"DMA_ACC_EPS" , 0x0E13}, + {"CPU_ACC_EPS_EN" , 0x0E14}, + {"SETUP_BYTE0" , 0x0E15}, + {"SETUP_BYTE1" , 0x0E16}, + {"SETUP_BYTE2" , 0x0E17}, + {"SETUP_BYTE3" , 0x0E18}, + {"SETUP_BYTE4" , 0x0E19}, + {"SETUP_BYTE5" , 0x0E1A}, + {"SETUP_BYTE6" , 0x0E1B}, + {"SETUP_BYTE7" , 0x0E1C}, + {"SETUP_DT_VLD" , 0x0E1D}, + {"CLR_EPS_TOG" , 0x0E1E}, + {"EP0_CTRL" , 0x0E20}, + {"EP0_DATA_CNT" , 0x0E21}, + {"EP0_DATA" , 0x0E22}, + {"EP1_CTRL" , 0x0E30}, + {"EP1_DATA_CNTL" , 0x0E31}, + {"EP1_DATA_CNTH" , 0x0E32}, + {"EP1_DATA" , 0x0E33}, + {"EP1_HEADER" , 0x0E34}, + {"EP2_CTRL" , 0x0E40}, + {"EP2_DATA_CNTL" , 0x0E41}, + {"EP2_DATA_CNTH" , 0x0E42}, + {"EP2_DATA" , 0x0E43}, + {"EP2_HEADER" , 0x0E44}, + {"EP3_DATA_CNTL" , 0x0E50}, + {"EP3_DATA_CNTH" , 0x0E51}, + {"EP3_DATA" , 0x0E52}, + {"EP3_HEADER" , 0x0E53}, + {"EP3_HEADER_CNT" , 0x0E54}, + {"EP3_HEADER_DATA" , 0x0E55}, + {"EP4_DATA_CNTL" , 0x0E60}, + {"EP4_DATA_CNTH" , 0x0E61}, + {"EP4_DATA" , 0x0E62}, + {"EP4_HEADER" , 0x0E63}, + {"EP4_HEADER_CNT" , 0x0E64}, + {"EP4_HEADER_DATA" , 0x0E65}, + {"EP5_CTRL" , 0x0E70}, + {"EP5_DATA_CNTL" , 0x0E71}, + {"EP5_DATA_CNTH" , 0x0E72}, + {"EP5_DATA" , 0x0E73}, + {"MAX_PKT_EP5L" , 0x0E74}, + {"MAX_PKT_EP5H" , 0x0E75}, + {"EP6_DATA_CNTL" , 0x0E80}, + {"EP6_DATA_CNTH" , 0x0E81}, + {"EP6_DATA" , 0x0E82}, + {"MAX_PKT_EP6L" , 0x0E83}, + {"MAX_PKT_EP6H" , 0x0E84}, + {"FRAME_NUML" , 0x0E90}, + {"FRAME_NUMH" , 0x0E91}, + {"FRAME_TIMEL" , 0x0E92}, + {"FRAME_TIMEH" , 0x0E93}, + {"STC_DIVL" , 0x0E94}, + {"STC_DIVM" , 0x0E95}, + {"STC_DIVH" , 0x0E96}, + {"USB_STATUS" , 0x0E97}, + {"DEV_STATE1" , 0x0E98}, + {"DEV_STATE2" , 0x0E99}, + {"DEV_STATE3" , 0x0E9A}, + {"DEV_STATE4" , 0x0E9B}, + {"INTR_EN1" , 0x0EA0}, + {"INTR_EN2" , 0x0EA1}, + {"INTR_EN3" , 0x0EA2}, + {"INTR_EN4" , 0x0EA3}, + {"INTR_SRC1" , 0x0EB0}, + {"INTR_SRC2" , 0x0EB1}, + {"INTR_SRC3" , 0x0EB2}, + {"INTR_SRC4" , 0x0EB3}, + {"INTR_FLAG1" , 0x0EC0}, + {"INTR_FLAG2" , 0x0EC1}, + {"INTR_FLAG3" , 0x0EC2}, + {"INTR_FLAG4" , 0x0EC3}, + {"EP0_INAK_CNT" , 0x0ED0}, + {"EP0_ONAK_CNT" , 0x0ED1}, + {"EP1_NAK_CNT" , 0x0ED2}, + {"EP2_NAK_CNT" , 0x0ED3}, + {"EP3_NAK_CNT" , 0x0ED4}, + {"EP4_NAK_CNT" , 0x0ED5}, + {"EP5_NAK_CNT" , 0x0ED6}, + {"EP6_NAK_CNT" , 0x0ED7}, + {"NAK_CNT_LEVEL" , 0x0ED8}, + {"CC2_Buffer_out" , 0x2000}, + {"CC2_Buffer_in" , 0x4000}, + {"nmb_vector_address_lsb" , 0xFFFA}, + {"nmb_vector_address_msb" , 0xFFFB}, + {"reset_vector_address_lsb" , 0xFFFC}, + {"reset_vector_address_msb" , 0xFFFD}, + {"irb_vector_address_lsb" , 0xFFFE}, + {"irb_vector_address_msb" , 0xFFFF} +}; +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb-driver.c b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb-driver.c new file mode 100644 index 0000000..34bbbc7 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb-driver.c
@@ -0,0 +1,2537 @@ +/**************************************************************************//** + * @file cimax+usb-driver.c + * + * @brief CIMaX+ USB Driver for linux based operating systems. + * + * Copyright (C) 2009-2011 Bruno Tonelli <bruno.tonelli@smardtv.com> + * & Franck Descours <franck.descours@smardtv.com> + * for SmarDTV France, La Ciotat + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + ******************************************************************************/ + +#define FRBIT +/*#define DEBUG*/ +/*#define DEBUG_BITRATE*/ +/*#define DEBUG_ISOC_IN*/ +/*#define DEBUG_ISOC_OUT*/ +/*#define DEBUG_CONTINUITY*/ + +/****************************************************************************** + * Include + ******************************************************************************/ +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/poll.h> +#include <linux/usb.h> +#include <linux/uaccess.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/hrtimer.h> +#include <linux/ktime.h> +#include <linux/dvb/ca.h> +#include <linux/compat.h> + +#include "cimax+usb-driver.h" +#include "cimax+usb_fw.h" +#include "cimax+usb_config.h" +#ifdef TIMESTAMP +#include "cimax+usb_time.h" +#endif +#include "../../aml_cimax_usb_priv.h" + +/****************************************************************************** + * Defines + *****************************************************************************/ +#define DRIVER_VERSION "v1.1.2" +#define DRIVER_AUTHOR "Bruno Tonelli, tonelli@smardtv.com" +#define DRIVER_DESC "CIMaX+ USB Driver for Linux (c)2009-2011" + +#define DRIVER_MAX_NUMBER 1 + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_LICENSE("GPL"); + +/****************************************************************************** + * Structures + ******************************************************************************/ +/****************************************************************************** + * Globals + ******************************************************************************/ +#ifdef FRBIT +int CimaxCfg = 1; +module_param_named(CimaxCfg, CimaxCfg, int, 0644); +MODULE_PARM_DESC(CimaxCfg, "Turn on/off configuration of CIMaX+ (default: on)"); +int CimaxDwnl = 1; +module_param_named(CimaxDwnl, CimaxDwnl, int, 0644); +MODULE_PARM_DESC(CimaxDwnl, "Enable upload of FW in CIMaX+ chip (default: on)"); +#endif + +static struct device_s *gdevice; +static unsigned int gdeviceNumber; + +static struct usb_driver device_driver; +static struct timespec gStart; + +static __u8 nullHeader[] = { + 0x47, 0x1F, 0xFF, 0x1F, 0xFA, 0xDE, 0xBA, 0xBE +}; + +static struct bulk_timer_s gbulk_timer[DEVICE_NUM_CAM]; +int (*cimax_usb_dev_add)(struct device_s *device, int id); +int (*cimax_usb_dev_remove)(struct device_s *device, int id); + +#ifdef TIMESTAMP +static int bSetTimestamps; +#endif + +/****************************************************************************** + * Functions + ******************************************************************************/ +#ifdef DEBUG_CONTINUITY +#define TS_MAXPIDS 8192 /* max value of a PID */ +unsigned char tab_cc[TS_MAXPIDS]; + +__u16 get_ts_pid(unsigned char *pid) +{ + __u16 pp = 0; + + pp = (pid[0] & 0X1f)<<8; + pp |= pid[1]; + + return pp; +} + +static void init_tab_cc(void) +{ + memset(tab_cc, 0xff, TS_MAXPIDS); +} + +static int dbg_cc(unsigned char *buf) +{ + int pid; + unsigned char cc; + + if (buf[0] != DEVICE_MPEG2_SYNC_BYTE) { + err("Out Of Sync: "); + return -1; + } + + pid = get_ts_pid(buf + 1); + + if (!(buf[3] & 0x10)) /* no payload?*/ + return 0; + + if (buf[1] & 0x80) + err("Error in TS for PID: %d\n", pid); + + /* Check continuity count*/ + cc = tab_cc[pid]; + if (cc == 255) + cc = (buf[3] & 15); + else { + cc = ((cc) + 1) & 15; + if (cc != (buf[3] & 15)) { + /* Otherwise, this is a real corruption */ + err("pid %d cc %d expected cc %d actual\n", + pid, cc, buf[3] & 15); + cc = (buf[3] & 15); + } + } + return 0; +} +#endif + +/*-------------------------------------------------------------------*/ +#ifdef DEBUG +static void dbg_dump(char *hdr, unsigned char *data, int size) +{ + int i; + char line[40]; + char str[9]; + line[0] = 0; + for (i = 0; i < size; i++) { + sprintf(line, "%s%.2x ", line, data[i]); + if ((data[i] >= 32) && (data[i] <= 126)) + str[i%8] = data[i]; + else + str[i%8] = '.'; + if (!((i+1)%8)) { + str[i%8 + 1] = 0; + dbg_s("%s %s %s", hdr, line, str); + line[0] = 0; + } /* if */ + } /* for */ + if (i%8) { + int j; + str[i%8 + 1] = 0; + for (j = (i%8); j < 8; j++) + sprintf(line, "%s ", line); + dbg_s("%s %s %s", hdr, line, str); + line[0] = 0; + } /* if */ +} /* dbg_dump */ +#else +#define dbg_dump(format, arg...) do {} while (0) +#endif /* DEBUG */ + +static unsigned long copyDataFrom(int us, + void *to, const void *from, unsigned long n) +{ + if (us) + return copy_from_user(to, from, n); + memcpy(to, from, n); + return 0; +} + +static unsigned long copyDataTo(int us, + void *to, const void *from, unsigned long n) +{ + if (us) + return copy_to_user(to, from, n); + memcpy(to, from, n); + return 0; +} + +static void vb_init(struct video_buf_s *buf) +{ + buf->readOffset = 0; + buf->writeOffset = 0; + buf->isEmpty = 1; +} /* vb_init */ + +static int vb_get_write_size(struct video_buf_s *buf) +{ + int writeSize = 0; + + if (buf->writeOffset == buf->readOffset) { + if (buf->isEmpty) + writeSize = DEVICE_VB_LENGTH; + } else if (buf->writeOffset > buf->readOffset) + writeSize = + DEVICE_VB_LENGTH - (buf->writeOffset - buf->readOffset); + else + writeSize = buf->readOffset - buf->writeOffset; + return writeSize; +} /* vb_get_write_size */ + +static int vb_write(struct video_buf_s *buf, __u8 *data, int size) +{ + int writeSize = vb_get_write_size(buf); + int firstPart = DEVICE_VB_LENGTH - buf->writeOffset; + if (size > writeSize) + size = writeSize; + + if (size < firstPart) { + memcpy(&buf->data[buf->writeOffset], data, size); + buf->writeOffset += size; + } /* if */ else { + memcpy(&buf->data[buf->writeOffset], data, firstPart); + memcpy(buf->data, &data[firstPart], size - firstPart); + buf->writeOffset = size - firstPart; + } /* else */ + + if (size > 0) + buf->isEmpty = 0; + return size; +} /* vb_write */ + +static int vb_read_next(struct video_buf_s *buf, __u8 *data) +{ + int readSize; + int firstPart; + int nextOffset; + int isStuffing; + int ret; + + readSize = DEVICE_VB_LENGTH - vb_get_write_size(buf); + nextOffset = buf->readOffset + DEVICE_MPEG2_PACKET_SIZE; + if (nextOffset >= DEVICE_VB_LENGTH) + nextOffset -= DEVICE_VB_LENGTH; + while (readSize > DEVICE_MPEG2_PACKET_SIZE) { + if ((buf->data[buf->readOffset] == DEVICE_MPEG2_SYNC_BYTE) && + (buf->data[nextOffset] == DEVICE_MPEG2_SYNC_BYTE)) { + /* packet in sync */ + break; + } /* if */ + buf->readOffset++; + if (buf->readOffset == DEVICE_VB_LENGTH) + buf->readOffset = 0; + nextOffset++; + if (nextOffset == DEVICE_VB_LENGTH) + nextOffset = 0; + readSize--; + } /* while */ + if (readSize <= DEVICE_MPEG2_PACKET_SIZE) { + buf->isEmpty = 1; + return 0; + } /* if */ + + /* packet is in sync, check if it is a stuffing packet */ + isStuffing = 0; + firstPart = DEVICE_VB_LENGTH - buf->readOffset; + if (firstPart < DEVICE_NULL_HEADER_SIZE) { + if ((memcmp(nullHeader, &buf->data[buf->readOffset], firstPart) + == 0) && + (memcmp(&nullHeader[firstPart], buf->data, + DEVICE_NULL_HEADER_SIZE - firstPart) + == 0)) { + isStuffing = 1; + } /* if */ + } /* if */ + else { + if (memcmp(nullHeader, &buf->data[buf->readOffset], + DEVICE_NULL_HEADER_SIZE) == 0) { + isStuffing = 1; + } /* if */ + } /* else */ + readSize -= DEVICE_MPEG2_PACKET_SIZE; + if (readSize <= DEVICE_MPEG2_PACKET_SIZE) + buf->isEmpty = 1; + + /* skip stuffing packet */ + if (isStuffing) { + buf->readOffset = nextOffset; + return 0; + } /* if */ + + /* copy packet to user space */ + if (firstPart >= DEVICE_MPEG2_PACKET_SIZE) { + ret = copy_to_user(data, + &buf->data[buf->readOffset], DEVICE_MPEG2_PACKET_SIZE); + } /* if */ else { + ret = copy_to_user(data, + &buf->data[buf->readOffset], firstPart); + ret = copy_to_user(&data[firstPart], + buf->data, DEVICE_MPEG2_PACKET_SIZE - firstPart); + } /* else */ + buf->readOffset = nextOffset; + return DEVICE_MPEG2_PACKET_SIZE; +} /* vb_read_next */ + +/*-------------------------------------------------------------------*/ +#ifdef DEBUG_BITRATE +static void print_bitrate(struct ts_channel_s *channel, __u8 channel_number) +{ + int readSize; + ktime_t currentTime; + int diffTime_us; + int bitrate; + + currentTime = ktime_get_real(); + if (!(channel->bitrateTime.tv64)) { + channel->bitrateTime = currentTime; + } else { + readSize = DEVICE_VB_LENGTH - vb_get_write_size(&channel->inVb); + dbg("%d bytes received\n", readSize); + diffTime_us = (int)(ktime_us_delta(currentTime, + channel->bitrateTime)); + if (diffTime_us) { + bitrate = (int)((readSize * 8 * USEC_PER_SEC) + / diffTime_us); + } + channel->bitrateTime = currentTime; + dbg("received bitrate for channel[%d] = %dbps\n", + channel_number, bitrate); + } +} +#endif /* DEBUG_BITRATE */ +/*-------------------------------------------------------------------*/ + +static void device_cibulk_complete(struct urb *urb) +{ + dbg("start"); + kfree(urb->transfer_buffer); + usb_free_urb(urb); + dbg("end"); +} /* device_cibulk_complete */ + +static int device_cibulk_send(struct device_s *device, + struct ioctl_data_s *data, + int user_space) +{ + int res; + struct urb *urb; + int size; + int index = -1; + __u8 *ptr; + __u32 todo = data->txSize; + __u8 *userData = data->txData; + + dbg("start"); + + do { + /* get a free bulk message */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + err("alloc urb"); + return -ENOMEM; + } /* if */ + urb->dev = device->usbdev; + + /* allocate bulk data */ + size = device->ciBulk.outMaxPacketSize; + if (todo < size) + size = todo; + urb->transfer_buffer = kmalloc(size, GFP_KERNEL); + if (!urb->transfer_buffer) { + err("alloc transfer buffer"); + usb_free_urb(urb); + return -ENOMEM; + } /* if */ + + /* copy data */ + ptr = urb->transfer_buffer; + res = copyDataFrom(user_space, ptr, userData, size); + +#ifdef TIMESTAMP + if (bSetTimestamps) { + if (index == -1) { + SetTimestamp("urb %x, toSend %d, send %d", + urb, todo, size); + SetTimestamp("cmd 0x%02x", ptr[0]); + } else { + SetTimestamp("urb %x, toSend %d, send %d", + urb, todo, size); + } + } +#endif + + /* first packet, get index */ + if (index == -1) { + if ((ptr[DEVICE_COMMAND_OFFSET] == DEVICE_CMD_INIT) || + (ptr[DEVICE_COMMAND_OFFSET] + == DEVICE_CMD_WRITE_REG) || + (ptr[DEVICE_COMMAND_OFFSET] + == DEVICE_CMD_READ_REG)) { + index = 0; /* register command, no module */ + } else if (ptr[DEVICE_COMMAND_OFFSET] + & DEVICE_SEL_MASK) { + index = 1; /* module B */ + } else { + index = 0; /* module A */ + } /* else */ + device->ciBulk.ciData[index].syncDataSize = 0; + device->ciBulk.ciData[index].syncSignal = 0; + } /* if */ + + /* submit bulk */ + urb->pipe = usb_sndbulkpipe(device->usbdev, + DEVICE_BULK_OUT_PIPE); + urb->transfer_buffer_length = size; + urb->complete = device_cibulk_complete; + urb->context = NULL; + dbg_dump("txBuf", urb->transfer_buffer, + urb->transfer_buffer_length); + res = usb_submit_urb(urb, GFP_KERNEL); + if (res < 0) { + err("submit urb res = %d", res); + kfree(urb->transfer_buffer); + usb_free_urb(urb); + return -ENOMEM; + } /* if */ + todo -= size; + userData += size; + } while (todo); + + device->ciBulk.ciData[index].bPendingSend = 1; + dbg("end"); + return index; +} /* device_cibulk_send */ + +static void device_int_complete(struct urb *urb) +{ + unsigned long flags; + struct device_s *device = urb->context; + __u8 *dataToCopy; + int sizeToCopy, SizeReceived; + __u8 isFirstPacket = 0; + __u8 isLastPacket = 0; + __u8 index, i; + __u8 status; + struct message_node_s *message; + + dbg("start"); + + if (urb->status) { + dbg("urb status %d, not submitted again", urb->status); + kfree(urb->transfer_buffer); + usb_free_urb(urb); + for (i = 0; i < DEVICE_NUM_INT_IN_URBS; i++) { + if (device->ciBulk.intUrb[i] == urb) + device->ciBulk.intUrb[i] = NULL; + } + return; + } /* if */ + + spin_lock_irqsave(&device->ciBulk.intUrbLock, flags); + dbg("urb status %d, transfer_buffer_length %d actual_length %d", + urb->status, + urb->transfer_buffer_length, + urb->actual_length); + dataToCopy = urb->transfer_buffer; + SizeReceived = urb->actual_length; + dbg_dump("total rxBuf", dataToCopy, SizeReceived); + + + do { + if (device->ciBulk.intSizeToReceive == 0) { + if (!dataToCopy[DEVICE_STATUS_OFFSET] && + !dataToCopy[DEVICE_LENGTH_MSB_OFFSET] && + !dataToCopy[DEVICE_LENGTH_LSB_OFFSET] && + !dataToCopy[DEVICE_COUNTER_OFFSET]){ + dbg("no data receive"); + memset(urb->transfer_buffer, + 0, urb->transfer_buffer_length); + usb_submit_urb(urb, GFP_ATOMIC); + return; + } + /* first packet, read header */ + isFirstPacket = 1; + device->ciBulk.intCurrStatus = + dataToCopy[DEVICE_STATUS_OFFSET] & + DEVICE_CMD_MASK; + if (dataToCopy[DEVICE_STATUS_OFFSET] + & DEVICE_SEL_MASK) { + device->ciBulk.intCurrIndex = 1; /* module B */ + } else { + device->ciBulk.intCurrIndex = 0; /* module A */ + } + if ((device->ciBulk.intCurrStatus == DEVICE_READ_REGOK) + || (device->ciBulk.intCurrStatus + == DEVICE_WRITE_REGOK)) { + device->ciBulk.intSizeToReceive = + dataToCopy[DEVICE_LENGTH_LSB_OFFSET] + + DEVICE_DATA_OFFSET; + } else { + device->ciBulk.intSizeToReceive = + dataToCopy[DEVICE_LENGTH_MSB_OFFSET] * 256 + + dataToCopy[DEVICE_LENGTH_LSB_OFFSET] + + DEVICE_DATA_OFFSET; + } + } /* if */ + + /* get last packet state */ + status = device->ciBulk.intCurrStatus; + index = device->ciBulk.intCurrIndex; + sizeToCopy = device->ciBulk.intSizeToReceive; + if (sizeToCopy > urb->actual_length) { + /* limit size to received buffer size */ + sizeToCopy = urb->actual_length; + } /* if */ else + isLastPacket = 1; + dbg_dump("rxBuf", dataToCopy, sizeToCopy); + +#ifndef FRBIT + if (status == DEVICE_DATAREADY) { + if (device->ciBulk.ciData[index].bPendingSend) + status = DEVICE_DATAREADY_SYNC; + } +#endif + +#ifdef TIMESTAMP + if (device->ciBulk.intSizeToReceive > 2000) + bSetTimestamps = 1; + if (bSetTimestamps) { + SetTimestamp("urb %x,toReceive %d,received %d,toCopy%d", + urb, + device->ciBulk.intSizeToReceive, + SizeReceived, + sizeToCopy); + SetTimestamp("status 0x%02x, camIndex %d, isLast %d", + status, index, isLastPacket); + } +#endif + + switch (status) { + case DEVICE_INITOK: + case DEVICE_READ_REGOK: + case DEVICE_WRITE_REGOK: + index = 0; + case DEVICE_CAMRESETOK: + /*only for debug*/ + if (!dataToCopy[DEVICE_STATUS_OFFSET] && + !dataToCopy[DEVICE_LENGTH_MSB_OFFSET] && + !dataToCopy[DEVICE_LENGTH_LSB_OFFSET] && + !dataToCopy[DEVICE_COUNTER_OFFSET]){ + break; + } + case DEVICE_CISOK: + case DEVICE_WRITECOROK: + case DEVICE_NEGOTIATEOK: + case DEVICE_WRITELPDUOK: + case DEVICE_WRITELPDUBUSY: + case DEVICE_READLPDUOK: + case DEVICE_WRITEEXTOK: + case DEVICE_READEXTOK: + case DEVICE_NO_CAM: + case DEVICE_NOK: + case DEVICE_MCARD_WRITEOK: + case DEVICE_CAMPARSE_ERROR: + case DEVICE_CMDPENDING: + case DEVICE_REGSTATUSOK: + case DEVICE_DATAREADY_SYNC: + /* copy partial message */ + spin_lock_irqsave(&device->ciBulk.intLock, + flags); + memcpy(&device->ciBulk.ciData[index]. + syncData[device->ciBulk.ciData[index]. + syncDataSize], + dataToCopy, sizeToCopy); + device->ciBulk.intSizeToReceive -= sizeToCopy; + device->ciBulk.ciData[index].syncDataSize += + sizeToCopy; + spin_unlock_irqrestore(&device->ciBulk.intLock, + flags); + dbg("copied %d bytes at offset %d", sizeToCopy, + device->ciBulk.ciData[index]. + syncDataSize - sizeToCopy); + + if (isLastPacket) { + /* last packet received, sync message */ + device->ciBulk.ciData[index].syncSignal = 1; + wake_up_interruptible(&device->ciBulk. + ciData[index].syncWait); + device->ciBulk.ciData[index].bPendingSend = 0; + dbg("sync signal return %d %d ", + device->ciBulk.ciData[index]. + syncDataSize, index); + } /* if */ + break; + case DEVICE_CAMDET: + case DEVICE_DATAREADY: + case DEVICE_MCARD_READ: + case DEVICE_FRBit: + if (isFirstPacket) { + /* create new async message */ + message = kmalloc(sizeof(struct message_node_s), + GFP_ATOMIC); + if (!message) { + err("cannot allocate async message"); + break; + } + memset(message, + 0, sizeof(struct message_node_s)); + list_add_tail(&message->node, + &device->ciBulk.ciData[index]. + asyncDataList); + } /* if */ + else { + /* get tail message */ + message = list_entry((device->ciBulk. + ciData[index].asyncDataList.prev), + struct message_node_s, node); + } /* else */ + + /* copy partial message */ + spin_lock_irqsave(&device->ciBulk.intLock, flags); + memcpy(&message->data[message->size], + dataToCopy, sizeToCopy); + device->ciBulk.intSizeToReceive -= sizeToCopy; + message->size += sizeToCopy; + spin_unlock_irqrestore(&device->ciBulk.intLock, flags); + dbg("async copied %d bytes at offset %d", sizeToCopy, + message->size - sizeToCopy); + + if (isLastPacket) { + /* last packet received, signal async message */ + wake_up_interruptible(&device->ciBulk. + ciData[index].asyncWait); + dbg("async signal %d", index); + } /* if */ + break; + case DEVICE_GPIOCHANGE: + info("GPIO change %x %x %x", + status, dataToCopy[4], dataToCopy[5]); + device->ciBulk.intSizeToReceive -= sizeToCopy; + break; + default: + err("unknown status 0x%2x", status); + break; + } /* switch */ + dataToCopy += sizeToCopy; + SizeReceived -= sizeToCopy; + + } while (SizeReceived > 0); + + memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); + usb_submit_urb(urb, GFP_ATOMIC); + +#ifdef TIMESTAMP + /*if (bSetTimestamps) { + SetTimestamp("urb %x submitted", urb); + }*/ +#endif + + spin_unlock_irqrestore(&device->ciBulk.intUrbLock, flags); + + dbg("end"); +} /* device_int_complete */ + +static int device_wait_sync_data(struct device_s *device, + __u8 index, + struct ioctl_data_s *data, + int user_space) +{ + unsigned long flags; + int ret; + + dbg("start %d", index); + + spin_lock_irqsave(&device->ciBulk.intLock, flags); + while (device->ciBulk.ciData[index].syncSignal == 0) { + /* nothing to copy */ + spin_unlock_irqrestore(&device->ciBulk.intLock, flags); + if (wait_event_interruptible(device->ciBulk. + ciData[index].syncWait, + device->ciBulk.ciData[index].syncSignal)) { + device->ciBulk.ciData[index].bPendingSend = 0; + err("interrupt"); + return -ERESTARTSYS; + /* signal: tell the fs layer to handle it */ + } /* if */ + /* otherwise loop, but first reacquire the lock */ + spin_lock_irqsave(&device->ciBulk.intLock, flags); + } /* while */ + + /* copy packet to user space buffer */ + if (device->ciBulk.ciData[index].syncDataSize < data->rxSize) + /* truncate returned message against user buffer size */ + data->rxSize = device->ciBulk.ciData[index].syncDataSize; + spin_unlock_irqrestore(&device->ciBulk.intLock, flags); + /* release the lock */ + ret = copyDataTo(user_space, + data->rxData, + device->ciBulk.ciData[index].syncData, data->rxSize); + dbg_dump("userMsg", + device->ciBulk.ciData[index].syncData, data->rxSize); + dbg("userRet %d", data->rxSize); + device->ciBulk.ciData[index].syncDataSize = 0; + device->ciBulk.ciData[index].syncSignal = 0; + + dbg("end"); + return 0; +} /* device_wait_sync_data */ + +static int device_wait_async_data(struct device_s *device, + __u8 index, + struct rw_data_s *data, + int user_space) +{ + struct list_head *item; + struct message_node_s *message; + unsigned long flags; + int ret; + + dbg("start %d", index); + + if ((device->askToRelease) || (device->askToSuspend)) { + err("ask to release or ask to suspend"); + return -EINTR; /* device close interrupt */ + } /* if */ + + if (index >= DEVICE_NUM_CAM) { + err("bad index(%d)", index); + return -EINVAL; + } + + spin_lock_irqsave(&device->ciBulk.intLock, flags); + while (list_empty(&device->ciBulk.ciData[index].asyncDataList)) { + /* nothing to copy */ + spin_unlock_irqrestore(&device->ciBulk.intLock, flags); + /* release the lock */ + if (wait_event_interruptible(device->ciBulk. + ciData[index].asyncWait, + device->askToRelease || + device->askToSuspend || + (!list_empty(&device->ciBulk. + ciData[index].asyncDataList)))) { + err("interrupt"); + return -ERESTARTSYS; + /* signal: tell the fs layer to handle it */ + } /* if */ + if ((device->askToRelease) || (device->askToSuspend)) { + err("ask to release or ask to suspend"); + return -EINTR; /* device close interrupt */ + } /* if */ + /* otherwise loop, but first reacquire the lock */ + spin_lock_irqsave(&device->ciBulk.intLock, flags); + } /* while */ + + /* ok, data is there, return first item */ + item = device->ciBulk.ciData[index].asyncDataList.next; + message = list_entry(item, struct message_node_s, node); + if (message->size < data->size) { + /* truncate returned message against user buffer size */ + data->size = message->size; + } /* if */ + spin_unlock_irqrestore(&device->ciBulk.intLock, flags); + /* release the lock */ + ret = copyDataTo(user_space, data->data, message->data, data->size); + dbg_dump("userMsg", message->data, data->size); + dbg("userRet %d", data->size); + list_del(item); + kfree(message); + + dbg("end"); + return 0; +} /* device_wait_async_data */ + +static int device_start_intr(struct device_s *device) +{ + __u8 i, j; + struct urb *urb; + + dbg("start"); + + for (i = 0; i < DEVICE_NUM_INT_IN_URBS; i++) { + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + err("alloc urb"); + return -ENOMEM; + } /* if */ + urb->transfer_buffer = + kmalloc(device->ciBulk.inMaxPacketSize, GFP_KERNEL); + if (!urb->transfer_buffer) { + err("alloc transfer buffer"); + usb_free_urb(urb); + urb = NULL; + return -ENOMEM; + } /* if */ + + urb->dev = device->usbdev; + urb->pipe = usb_rcvintpipe(device->usbdev, DEVICE_INT_IN_PIPE); + urb->transfer_buffer_length = device->ciBulk.inMaxPacketSize; + urb->complete = device_int_complete; + urb->context = device; + urb->interval = 1; + device->ciBulk.intUrb[i] = urb; + for (j = 0; j < DEVICE_NUM_CAM; j++) { + init_waitqueue_head(&device->ciBulk.ciData[j].syncWait); + init_waitqueue_head(&device->ciBulk. + ciData[j].asyncWait); + } /* for */ + usb_submit_urb(device->ciBulk.intUrb[i], GFP_KERNEL); + } + + dbg("end"); + return 0; +} /* device_start_intr */ + +static void device_stop_intr(struct device_s *device) +{ + struct list_head *item; + struct list_head *tmp; + struct message_node_s *message; + int i, j; + + dbg("start"); + + for (i = 0; i < DEVICE_NUM_INT_IN_URBS; i++) { + if (!device->ciBulk.intUrb[i]) + break; + usb_unlink_urb(device->ciBulk.intUrb[i]); + device->ciBulk.intUrb[i] = NULL; + for (j = 0; j < DEVICE_NUM_CAM; j++) { + for (item = device->ciBulk.ciData[j].asyncDataList.next; + item != &device->ciBulk.ciData[j].asyncDataList; + ) { + message = list_entry(item, + struct message_node_s, node); + tmp = item->next; + list_del(item); + kfree(item); + item = tmp; + } /* for */ + } /* for */ + dbg("unlink urb"); + } + + dbg("end"); +} /* device_stop_intr */ + +static void device_iso_in_complete(struct urb *urb) +{ + unsigned long flags; + struct ts_channel_s *channel = urb->context; + __u8 i; + __u8 *data; + + /*dbg("start");*/ + + if (urb->status) { + dbg("urb status %d, not submitted again", urb->status); + kfree(urb->transfer_buffer); + usb_free_urb(urb); + return; + } /* if */ + + spin_lock_irqsave(&channel->inLock, flags); + for (i = 0; i < urb->number_of_packets; i++) { + data = urb->transfer_buffer + urb->iso_frame_desc[i].offset; + if (!urb->iso_frame_desc[i].status && + (urb->iso_frame_desc[i].actual_length > 0)) { + if (vb_get_write_size(&channel->inVb) + >= urb->iso_frame_desc[i].actual_length) { + vb_write(&channel->inVb, data, + urb->iso_frame_desc[i].actual_length); + } /* if */ + else { + err("video buffer is full, packet loss %d", + urb->iso_frame_desc[i].actual_length); + } /* else */ + } /* if */ + else { + err("frame rejected, status %x, actual_length %d bytes", + urb->iso_frame_desc[i].status, + urb->iso_frame_desc[i].actual_length); + } + } /* for */ + spin_unlock_irqrestore(&channel->inLock, flags); + + if (!channel->inVb.isEmpty) + wake_up_interruptible(&channel->inWait); + + memset(urb->transfer_buffer, + 0, DEVICE_ISOC_LENGTH(channel->maxPacketSize)); + urb->transfer_buffer_length = + DEVICE_ISOC_LENGTH(channel->maxPacketSize); + urb->number_of_packets = DEVICE_NUM_FRAMES_PER_URB; + urb->complete = device_iso_in_complete; + urb->context = channel; + urb->transfer_flags = URB_ISO_ASAP; + urb->interval = 1; + for (i = 0; i < DEVICE_NUM_FRAMES_PER_URB; i++) { + urb->iso_frame_desc[i].offset = i * channel->maxPacketSize; + urb->iso_frame_desc[i].length = channel->maxPacketSize; + } /* for */ + usb_submit_urb(urb, GFP_ATOMIC); + + /*dbg("end");*/ +} /* device_iso_in_complete */ + +static void device_tsbulk_in_complete(struct urb *urb) +{ + unsigned long flags; + struct ts_channel_s *channel = urb->context; + __u8 *data; +#ifdef DEBUG_CONTINUITY + unsigned int i; +#endif + + /*dbg("start");*/ + + if (urb->status) { + err("urb status %d(%x), not submitted again", + urb->status, urb->status); + kfree(urb->transfer_buffer); + usb_free_urb(urb); + return; + } /* if */ + + spin_lock_irqsave(&channel->inLock, flags); + data = urb->transfer_buffer; + +#ifdef DEBUG_CONTINUITY + i = 0; + /* check synchro byte*/ + while (i < urb->actual_length) { + if (!((data[i] == DEVICE_MPEG2_SYNC_BYTE) && + (data[i+DEVICE_MPEG2_PACKET_SIZE] + == DEVICE_MPEG2_SYNC_BYTE))) { + i++; + } else { + /* Synchro find*/ + break; + } + } + + /* Synchro Ok, check discontinuity*/ + while (i < urb->actual_length) { + if (dbg_cc(&data[i]) < 0) { + dbg("(actual_length= %d i=%d pkt=%d)", + urb->actual_length, + i, + i/DEVICE_MPEG2_PACKET_SIZE); + dbg("%02x %02x %02x %02x %02x %02x %02x %02x\n", + data[i-4], data[i-3], data[i-2], data[i-1], + data[i], data[i+1], data[i+2], data[i+3]); + } + i += DEVICE_MPEG2_PACKET_SIZE; + } +#endif + + if (urb->actual_length) { + channel->nbByteRead += urb->actual_length; + if (vb_get_write_size(&channel->inVb) >= urb->actual_length) + vb_write(&channel->inVb, data, urb->actual_length); + else + err("video buffer is full, packet loss %d", + urb->actual_length); + } else { + /*warn("receive size of 0\n");*/ + } + + spin_unlock_irqrestore(&channel->inLock, flags); + /* dbg("urb->actual_length=%d",urb->actual_length);*/ + /* info("urb->actual_length=%d\n",urb->actual_length);*/ + + if (!channel->inVb.isEmpty) + wake_up_interruptible(&channel->inWait); + kfree(urb->transfer_buffer); + usb_free_urb(urb); + /*dbg("end");*/ +} /* device_tsbulk_in_complete */ + +static int device_fill_ts(struct device_s *device, + __u8 index, + struct rw_data_s *data) +{ + unsigned long flags; + __u32 copiedSize; + struct ts_channel_s *channel = &device->channel[index]; + + /*dbg("start");*/ + + spin_lock_irqsave(&channel->inLock, flags); + do { + while (channel->inVb.isEmpty) { + /* nothing to copy */ + spin_unlock_irqrestore(&channel->inLock, flags); + /* release the lock */ + if (wait_event_interruptible(channel->inWait, + device->askToRelease || + device->askToSuspend || + (!channel->inVb.isEmpty))) { + err("interrupt"); + return -ERESTARTSYS; + /* signal: tell the fs layer to handle it */ + } /* if */ + if ((device->askToRelease) || (device->askToSuspend)) { + err("ask to release or ask to suspend"); + return -EINTR; /* device close interrupt */ + } /* if */ + /* otherwise loop, but first reacquire the lock */ + spin_lock_irqsave(&channel->inLock, flags); + } /* while */ + + spin_unlock_irqrestore(&channel->inLock, flags); + + copiedSize = vb_read_next(&channel->inVb, + &data->data[data->copiedSize]); + if (copiedSize) { + /*dbg("copied %d bytes in buffer 0x%p, offset %d", + copiedSize, data->data, data->copiedSize);*/ + data->copiedSize += copiedSize; + } /* if */ + spin_lock_irqsave(&channel->inLock, flags); + } while ((data->copiedSize+DEVICE_MPEG2_PACKET_SIZE) <= data->size); + /* buffer not full */ + + spin_unlock_irqrestore(&channel->inLock, flags); + +#ifdef DEBUG_BITRATE + print_bitrate(channel, index); +#endif + + /*dbg("end, buffer 0x%p", data->data);*/ + return 0; +} /* device_fill_ts */ + +static int device_start_iso_in(struct device_s *device, __u8 index) +{ + int i, j; + int ret = 0; + struct urb *urb; + + /*dbg("start");*/ + +#ifdef DEBUG_BITRATE + device->channel[index].bitrateTime = ktime_set(0, 0); +#endif + for (i = 0; i < DEVICE_NUM_ISOC_IN_URBS; i++) { + urb = usb_alloc_urb(DEVICE_NUM_FRAMES_PER_URB, GFP_KERNEL); + device->channel[index].isocInUrb[i] = urb; + if (urb) { + /*urb->transfer_buffer = + kmalloc(DEVICE_ISOC_LENGTH, GFP_KERNEL);*/ + urb->transfer_buffer = + kmalloc(DEVICE_ISOC_LENGTH( + device->channel[index].maxPacketSize), + GFP_KERNEL); + if (!urb->transfer_buffer) { + ret = -ENOMEM; + err("transfer_buffer allocation failed %d", i); + break; + } /* if */ + } /* if */ else { + ret = -ENOMEM; + err("usb_alloc_urb failed %d", i); + break; + } /* if */ + } /* for */ + + if (ret) { + /* Allocation error, must free already allocated data */ + for (i = 0; i < DEVICE_NUM_ISOC_IN_URBS; i++) { + urb = device->channel[index].isocInUrb[i]; + if (urb) { + kfree(urb->transfer_buffer); + if (urb->transfer_buffer) + urb->transfer_buffer = NULL; + usb_free_urb(urb); + device->channel[index].isocInUrb[i] = NULL; + } /* if */ + } /* for */ + return ret; + } /* if */ + + for (i = 0; i < DEVICE_NUM_ISOC_IN_URBS; i++) { + urb = device->channel[index].isocInUrb[i]; + memset(urb->transfer_buffer, + 0, + DEVICE_ISOC_LENGTH( + device->channel[index].maxPacketSize)); + urb->transfer_buffer_length = + DEVICE_ISOC_LENGTH( + device->channel[index].maxPacketSize); + urb->number_of_packets = DEVICE_NUM_FRAMES_PER_URB; + urb->complete = device_iso_in_complete; + urb->context = &device->channel[index]; + urb->dev = device->usbdev; + urb->pipe = usb_rcvisocpipe( + device->usbdev, DEVICE_TS_IN_PIPE + index); + urb->transfer_flags = URB_ISO_ASAP; + urb->interval = 1; + for (j = 0; j < DEVICE_NUM_FRAMES_PER_URB; j++) { + urb->iso_frame_desc[j].offset = + j * device->channel[index].maxPacketSize; + urb->iso_frame_desc[j].length = + device->channel[index].maxPacketSize; + } /* for */ + } /* for */ + for (i = 0; i < DEVICE_NUM_ISOC_IN_URBS; i++) + usb_submit_urb(device->channel[index].isocInUrb[i], GFP_KERNEL); + + /*dbg("end");*/ + return 0; +} /* device_start_iso_in */ + +static void device_stop_iso_in(struct device_s *device, __u8 index) +{ + int i; + + /*dbg("start");*/ + + for (i = 0; i < DEVICE_NUM_ISOC_IN_URBS; i++) { + if (device->channel[index].isocInUrb[i]) { + usb_unlink_urb(device->channel[index].isocInUrb[i]); + device->channel[index].isocInUrb[i] = NULL; + dbg("unlink urb %i", i); + } /* if */ + } /* for */ + + /*dbg("end");*/ +} /* device_stop_iso_in */ + +static int device_start_tsbulk_in(struct device_s *device, __u8 index) +{ + struct urb *urb; + + dbg("start"); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (urb) { + urb->transfer_buffer = kmalloc(3072, GFP_KERNEL); + if (!urb->transfer_buffer) { + err("transfer_buffer allocation failed"); + usb_free_urb(urb); + return -ENOMEM; + } /* if */ + } /* if */ else { + err("usb_alloc_urb failed"); + return -ENOMEM; + } /* if */ + device->channel[index].bulkInUrb = urb; + memset(urb->transfer_buffer, 0, 3072); + urb->transfer_buffer_length = 3072; + + urb->complete = device_tsbulk_in_complete; + urb->context = &device->channel[index]; + urb->dev = device->usbdev; + urb->pipe = usb_rcvbulkpipe(device->usbdev, DEVICE_TS_IN_PIPE + index); + usb_submit_urb(device->channel[index].bulkInUrb, GFP_KERNEL); + + dbg("end"); + return 0; +} /* device_start_tsbulk_in */ + +static void device_stop_tsbulk_in(struct device_s *device, __u8 index) +{ + dbg("start"); + + if (device->channel[index].bulkInUrb) { + usb_unlink_urb(device->channel[index].bulkInUrb); + device->channel[index].bulkInUrb = NULL; + dbg("unlink urb"); + } /* if */ + + dbg("end"); +} /* device_stop_tsbulk_in */ + +static void device_iso_out_complete(struct urb *urb) +{ + struct ts_channel_s *channel = urb->context; + struct urb *tmpUrb; + int i; + int ret = 0; + + /*dbg("start");*/ + +/*dbg_dump("txBuf", urb->transfer_buffer, urb->transfer_buffer_length);*/ + + if (urb->status || channel->outStop) { + /* error, free all coming urbs */ + err("free urb"); + channel->outStop = 1; + atomic_dec(&channel->numOutUrbs); + kfree(urb->transfer_buffer); + usb_free_urb(urb); + return; + } /* if */ + + for (i = 0; i < DEVICE_NUM_ISOC_OUT_URBS; i++) { + if (urb == channel->isocOutUrb[i]) + break; + } /* for */ + if (i == DEVICE_NUM_ISOC_OUT_URBS) { + /* urb must be deleted */ + kfree(urb->transfer_buffer); + usb_free_urb(urb); + } /* if */ + + if (atomic_dec_and_test(&channel->numOutUrbs)) { + /* get next free urb */ + tmpUrb = channel->isocOutUrb[channel->nextFreeOutUrbIndex++]; + if (channel->nextFreeOutUrbIndex == DEVICE_NUM_ISOC_OUT_URBS) + channel->nextFreeOutUrbIndex = 0; + + /* reinitialize urb with null packets */ + memset(tmpUrb->transfer_buffer, + 0xCD, DEVICE_ISOC_LENGTH(channel->maxPacketSize)); + for (i = 0; + i < DEVICE_ISOC_LENGTH(channel->maxPacketSize); + i += DEVICE_MPEG2_PACKET_SIZE) { + memcpy(tmpUrb->transfer_buffer+i, + nullHeader, sizeof(nullHeader)); + } /* for */ + tmpUrb->transfer_buffer_length = + DEVICE_ISOC_LENGTH(channel->maxPacketSize); + tmpUrb->number_of_packets = DEVICE_NUM_FRAMES_PER_URB; + tmpUrb->complete = device_iso_out_complete; + tmpUrb->context = channel; + tmpUrb->transfer_flags = URB_ISO_ASAP; + tmpUrb->interval = 1; + for (i = 0; i < DEVICE_NUM_FRAMES_PER_URB; i++) { + tmpUrb->iso_frame_desc[i].offset = + i * channel->maxPacketSize; + tmpUrb->iso_frame_desc[i].length = + channel->maxPacketSize; + } /* for */ + + /* submit urb */ + ret = usb_submit_urb(tmpUrb, GFP_ATOMIC); + if (ret) + err("usb_submit_urb failed %d", ret); + + atomic_inc(&channel->numOutUrbs); + } /* if */ + + /*dbg("end");*/ +} /* device_iso_out_complete */ + +static int device_tsiso_send(struct device_s *device, + __u8 index, __u8 *data, int size) +{ + int i, j; + struct urb **urb; + __u32 numUrbs; + int ret = 0; + + /* dbg("start");*/ + + numUrbs = + size / DEVICE_ISOC_LENGTH(device->channel[index].maxPacketSize); + urb = kmalloc(numUrbs * sizeof(struct urb *), GFP_KERNEL); + if (!urb) { + err("urb array allocation failed %d", numUrbs); + return -ENOMEM; + + } /* if */ + memset(urb, 0, numUrbs * sizeof(struct urb *)); + + for (i = 0; i < numUrbs; i++) { + urb[i] = usb_alloc_urb(DEVICE_NUM_FRAMES_PER_URB, GFP_KERNEL); + if (urb[i]) { + urb[i]->transfer_buffer = + kmalloc(DEVICE_ISOC_LENGTH( + device->channel[index].maxPacketSize), + GFP_KERNEL); + if (!urb[i]->transfer_buffer) { + ret = -ENOMEM; + err("transfer_buffer allocation failed %d", i); + break; + } /* if */ + } /* if */ + else { + ret = -ENOMEM; + err("usb_alloc_urb failed %d", i); + break; + } /* if */ + } /* for */ + if (ret) { + /* Allocation error, must free already allocated data */ + for (i = 0; i < numUrbs; i++) { + if (urb[i]) { + kfree(urb[i]->transfer_buffer); + urb[i]->transfer_buffer = NULL; + usb_free_urb(urb[i]); + urb[i] = NULL; + } /* if */ + } /* for */ + kfree(urb); + return ret; + } /* if */ + + for (i = 0; i < numUrbs; i++) { + ret = copy_from_user(urb[i]->transfer_buffer, + &data[i*DEVICE_ISOC_LENGTH( + device->channel[index].maxPacketSize)], + DEVICE_ISOC_LENGTH( + device->channel[index].maxPacketSize)); + urb[i]->transfer_buffer_length = DEVICE_ISOC_LENGTH( + device->channel[index].maxPacketSize); + urb[i]->number_of_packets = DEVICE_NUM_FRAMES_PER_URB; + urb[i]->complete = device_iso_out_complete; + urb[i]->context = &device->channel[index]; + urb[i]->dev = device->usbdev; + urb[i]->pipe = usb_sndisocpipe(device->usbdev, + DEVICE_TS_OUT_PIPE + index); + urb[i]->transfer_flags = URB_ISO_ASAP; + urb[i]->interval = 1; + for (j = 0; j < DEVICE_NUM_FRAMES_PER_URB; j++) { + urb[i]->iso_frame_desc[j].offset = + j * device->channel[index].maxPacketSize; + urb[i]->iso_frame_desc[j].length = + device->channel[index].maxPacketSize; + } /* for */ + } /* for */ + + atomic_add(numUrbs, &device->channel[index].numOutUrbs); + for (i = 0; i < numUrbs; i++) { + ret = usb_submit_urb(urb[i], GFP_ATOMIC); + if (ret) + err("usb_submit_urb failed %d", ret); + } /* for */ + + kfree(urb); + + /* dbg("end");*/ + return size; +} /* device_tsiso_send */ + +static int device_start_iso_out(struct device_s *device, __u8 index) +{ + int i, j; + int ret = 0; + struct urb *urb; + + /*dbg("start");*/ + + device->channel[index].outStop = 0; + for (i = 0; i < DEVICE_NUM_ISOC_OUT_URBS; i++) { + urb = usb_alloc_urb(DEVICE_NUM_FRAMES_PER_URB, GFP_KERNEL); + device->channel[index].isocOutUrb[i] = urb; + if (urb) { + urb->transfer_buffer = kmalloc(DEVICE_ISOC_LENGTH( + device->channel[index].maxPacketSize), + GFP_KERNEL); + if (!urb->transfer_buffer) { + ret = -ENOMEM; + err("transfer_buffer allocation failed %d", i); + break; + } /* if */ + } /* if */ else { + ret = -ENOMEM; + err("usb_alloc_urb failed %d", i); + break; + } /* if */ + } /* for */ + + if (ret) { + /* Allocation error, must free already allocated data */ + for (i = 0; i < DEVICE_NUM_ISOC_OUT_URBS; i++) { + urb = device->channel[index].isocOutUrb[i]; + if (urb) { + kfree(urb->transfer_buffer); + urb->transfer_buffer = NULL; + usb_free_urb(urb); + device->channel[index].isocOutUrb[i] = NULL; + } /* if */ + } /* for */ + return ret; + } /* if */ + + for (i = 0; i < DEVICE_NUM_ISOC_OUT_URBS; i++) { + urb = device->channel[index].isocOutUrb[i]; + memset(urb->transfer_buffer, 0, DEVICE_ISOC_LENGTH( + device->channel[index].maxPacketSize)); + for (j = 0; + j < DEVICE_ISOC_LENGTH( + device->channel[index].maxPacketSize); + j += DEVICE_MPEG2_PACKET_SIZE) { + memcpy(urb->transfer_buffer+j, + nullHeader, sizeof(nullHeader)); + } /* for */ + urb->transfer_buffer_length = DEVICE_ISOC_LENGTH( + device->channel[index].maxPacketSize); + urb->number_of_packets = DEVICE_NUM_FRAMES_PER_URB; + urb->complete = device_iso_out_complete; + urb->context = &device->channel[index]; + urb->dev = device->usbdev; + urb->pipe = usb_sndisocpipe(device->usbdev, + DEVICE_TS_OUT_PIPE + index); + urb->transfer_flags = URB_ISO_ASAP; + urb->interval = 1; + for (j = 0; j < DEVICE_NUM_FRAMES_PER_URB; j++) { + urb->iso_frame_desc[j].offset = + j * device->channel[index].maxPacketSize; + urb->iso_frame_desc[j].length = + device->channel[index].maxPacketSize; + } /* for */ + } /* for */ + device->channel[index].nextFreeOutUrbIndex = DEVICE_NUM_ISOC_OUT_URBS-1; + atomic_set(&device->channel[index].numOutUrbs, 1); + for (i = 0; i < DEVICE_NUM_ISOC_OUT_URBS-1; i++) { + ret = usb_submit_urb( + device->channel[index].isocOutUrb[i], GFP_KERNEL); + if (ret) + err("usb_submit_urb failed %d", ret); + } /* for */ + + /*dbg("end");*/ + return 0; +} /* device_start_iso_out */ + +static void device_stop_iso_out(struct device_s *device, __u8 index) +{ + dbg("start"); + + device->channel[index].outStop = 1; + + dbg("end"); +} /* device_stop_iso_out */ + +static void device_tsbulk_complete(struct urb *urb) +{ + struct device_s *device = urb->context; + __u8 index = 0; + + /*dbg("start");*/ + if (!urb->status) { + if (usb_endpoint_num(&(urb->ep->desc)) != DEVICE_TS_OUT_PIPE) + index = 1; + device->channel[index].nbByteSend += urb->actual_length; + } + kfree(urb->transfer_buffer); + usb_free_urb(urb); + /*dbg("end");*/ +} /* device_tsbulk_complete */ + +static int device_tsbulk_send(struct device_s *device, + __u8 index, __u8 *data, int size) +{ + struct urb *urb; + /* int todo = size;*/ + int ret = 0; + + dbg("start"); + + /* get a free bulk message */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + err("alloc urb"); + return -ENOMEM; + } /* if */ + urb->dev = device->usbdev; + + /* allocate bulk data */ + urb->transfer_buffer = kmalloc(size, GFP_KERNEL); + if (!urb->transfer_buffer) { + err("alloc transfer buffer"); + usb_free_urb(urb); + return -ENOMEM; + } /* if */ + + /* copy data */ + ret = copy_from_user(urb->transfer_buffer, data, size); + + /* submit bulk */ + urb->pipe = usb_sndbulkpipe(device->usbdev, DEVICE_TS_OUT_PIPE + index); + urb->transfer_buffer_length = size; + urb->complete = device_tsbulk_complete; + urb->context = device; + /*dbg("Transmit %d bytes\n",urb->transfer_buffer_length);*/ + /*dbg_dump("txBuf", + urb->transfer_buffer, urb->transfer_buffer_length);*/ + mod_timer(&(gbulk_timer[index].StartBulkReadTimer), + usecs_to_jiffies(50)); + + if (usb_submit_urb(urb, GFP_KERNEL) < 0) { + err("submit urb"); + kfree(urb->transfer_buffer); + usb_free_urb(urb); + return -ENOMEM; + } /* if */ + + dbg("end"); + return 0; +} /* device_tsbulk_send */ + +/* ---------------------------------------------------------- */ +static void StartBulkRead_func(struct timer_list * timer) +{ + struct bulk_timer_s *bulk_time = from_timer(bulk_time,timer,StartBulkReadTimer) ; + + device_start_tsbulk_in(bulk_time->device, bulk_time->index); +} + +static int device_drv_open(struct device_s *device) +{ + int index; + mutex_lock(&device->lock); + + if (!device->usbdev) { + err("no dev, can not start dev"); + mutex_unlock(&device->lock); + return -ENODEV; + } + + if (device->opened) { + mutex_unlock(&device->lock); + device->opened++; + info("udev=%p opened=%d", (device->usbdev), device->opened); + return 0; + } /* while */ + + info("set interface 0"); + if (usb_set_interface(device->usbdev, 0, 0) < 0) { + mutex_unlock(&device->lock); + err("set_interface fail"); + return -EINVAL; + } /* if */ + + device->opened++; + device->askToRelease = 0; + mutex_unlock(&device->lock); + + for (index = 0; index < DEVICE_NUM_CAM; index++) { + device->channel[index].nbByteSend = -376; + device->channel[index].nbByteRead = 0; + device->channel[index].FirstTransfer = true; + gbulk_timer[index].device = device; + gbulk_timer[index].index = index; + timer_setup(&gbulk_timer[index].StartBulkReadTimer, + StartBulkRead_func,0); + } +#ifdef DEBUG_CONTINUITY + init_tab_cc(); +#endif + info("udev=%p opened=%d", (device->usbdev), device->opened); + return 0; +} + +static int device_ci_unlock_read(struct device_s *device) +{ + if (device->opened) { + /* release blocking functions */ + device->askToRelease = 1; + wake_up_interruptible(&device->ciBulk.ciData[0].asyncWait); + wake_up_interruptible(&device->ciBulk.ciData[1].asyncWait); + wake_up_interruptible(&device->channel[0].inWait); + wake_up_interruptible(&device->channel[1].inWait); + } + return 0; +} + +static int device_drv_close(struct device_s *device) +{ + int i; + mutex_lock(&device->lock); + if (device->opened && ((--device->opened) == 0)) { + device->askToRelease = 1; + device_stop_intr(device); + for (i = 0; i < DEVICE_NUM_CAM; i++) { + if (device->useIsoc) { + device_stop_iso_out(device, i); + device_stop_iso_in(device, i); + } else + device_stop_tsbulk_in(device, i); + } /* for */ + device->opened = 0; + } /* if */ + mutex_unlock(&device->lock); + return 0; +} + +static int device_open(struct inode *inode, struct file *file) +{ + int devnum = iminor(inode); +#ifdef DEBUG + int type = (MINOR(inode->i_rdev) >> 4); + int num = (MINOR(inode->i_rdev) & 0xf); +#endif + int ret = 0; + + struct device_s *device; + + dbg("start, devnum = %d type = %d num = %d", devnum, type, num); + + if (gdeviceNumber >= DRIVER_MAX_NUMBER) { + dbg("only support one device open"); + return -EINVAL; + } + device = &gdevice[gdeviceNumber]; + /*gdeviceNumber++;*/ + + ktime_get_ts(&gStart); + + ret = device_drv_open(device); + if (ret < 0) + return ret; + + file->f_pos = 0; + file->private_data = device; + + dbg("end"); + return nonseekable_open(inode, file); +} /* device_open */ + +static int device_release(struct inode *inode, struct file *file) +{ + struct device_s *device = (struct device_s *)file->private_data; + + dbg("start"); + device_drv_close(device); + dbg("end"); + return 0; +} /* device_release */ + +int cimax_usb_select_interface(struct device_s *device, unsigned long intf) +{ + int max = 0; + int mult = 0; + int ret = 0; + + info("set interface %ld", intf); + if (usb_set_interface(device->usbdev, 0, intf) < 0) { + err("set_interface failed interface 0, altSetting %ld", intf); + return -EINVAL; + } /* if */ + + /* check endpoints */ + /* CI bulk out */ + if (!usb_endpoint_is_bulk_out( + &device->usbdev->ep_out[DEVICE_BULK_OUT_PIPE]->desc)) { + err("unexpected endpoint %d", DEVICE_BULK_OUT_PIPE); + return -EINVAL; + } /* if */ + device->ciBulk.outMaxPacketSize = DEVICE_BULK_OUT_MAXPACKET; + dbg("CI bulk out (endpoint %d), packet size %d", DEVICE_BULK_OUT_PIPE, + device->ciBulk.outMaxPacketSize); + /* CI int in */ + if (!usb_endpoint_is_int_in( + &device->usbdev->ep_in[DEVICE_INT_IN_PIPE]->desc)) { + err("unexpected endpoint %d", DEVICE_INT_IN_PIPE); + return -EINVAL; + } /* if */ + device->ciBulk.inMaxPacketSize = + device->usbdev->ep_in[DEVICE_INT_IN_PIPE]->desc.wMaxPacketSize; + dbg("CI int in (endpoint %d), packet size %d", DEVICE_INT_IN_PIPE, + device->ciBulk.inMaxPacketSize); + /* TS out */ + if (device->usbdev->ep_out[DEVICE_TS_OUT_PIPE] == NULL) + dbg("no TS endpoint"); + else { + if (usb_endpoint_is_bulk_out( + &device->usbdev->ep_out[DEVICE_TS_OUT_PIPE]->desc)) { + device->useIsoc = 0; + dbg("TS is configured as bulk"); + } else if (usb_endpoint_is_isoc_out( + &device->usbdev->ep_out[DEVICE_TS_OUT_PIPE]->desc)) { + device->useIsoc = 1; + dbg("TS is configured as isochronous"); + } else { + err("unexpected endpoint %d", DEVICE_TS_OUT_PIPE); + return -EINVAL; + } /* if */ + max = device->usbdev-> + ep_out[DEVICE_TS_OUT_PIPE]->desc.wMaxPacketSize; + mult = 1 + ((max >> 11) & 0x03); + max &= 0x7ff; + device->channel[0].maxPacketSize = max * mult; + dbg("TS out (endpoint %d), packet size %d", DEVICE_TS_OUT_PIPE, + device->channel[0].maxPacketSize); + + max = device->usbdev-> + ep_out[DEVICE_TS_OUT_PIPE+1]->desc.wMaxPacketSize; + mult = 1 + ((max >> 11) & 0x03); + max &= 0x7ff; + device->channel[1].maxPacketSize = max * mult; + dbg("TS out (endpoint %d), packet size %d", + DEVICE_TS_OUT_PIPE + 1, + device->channel[1].maxPacketSize); + } + + /* start intr urb */ + if (device->ciBulk.intUrb[0] == NULL) { + ret = device_start_intr(device); + if (ret < 0) { + err("cannot start int urb"); + return ret; + } /* if */ + } /* if */ + + return ret; +} +EXPORT_SYMBOL(cimax_usb_select_interface); + +static int device_ci_write(struct device_s *device, + struct ioctl_data_s *data, int isIoctl) +{ + int ret = 0; + if (!device) + return -ENODEV; + ret = device_cibulk_send(device, data, isIoctl); + if (ret < 0) + return ret; + return device_wait_sync_data(device, ret, data, isIoctl); +} + +static int device_ci_write_ioctl(struct device_s *device, + struct ioctl_data_s *data) +{ + return device_ci_write(device, data, 1); +} + +int cimax_usb_ci_write(struct device_s *device, + u8 *txData, int txSize, u8 *rxData, int rxSize) +{ + struct ioctl_data_s data; + if (!device) + return -ENODEV; + memset(&data, 0, sizeof(data)); + data.txData = txData; + data.txSize = txSize; + data.rxData = rxData; + data.rxSize = rxSize; + return device_ci_write(device, &data, 0); +} +EXPORT_SYMBOL(cimax_usb_ci_write); + +int cimax_usb_ci_read_evt(struct device_s *device, + int moduleId, u8 *buf, int size) +{ + int ret = 0; + struct rw_data_s data; + if (!device || !device->opened) + return -ENODEV; + memset(&data, 0, sizeof(data)); + data.moduleId = moduleId; + data.data = buf; + data.size = size; + ret = device_wait_async_data(device, data.moduleId, &data, 0); + if (ret < 0) { + err("wait ci read failed"); + return ret; + } /* if */ + dbg("return CI, moduleId %d, data 0x%p, size %d", + data.moduleId, data.data, data.size); + return ret; +} +EXPORT_SYMBOL(cimax_usb_ci_read_evt); + +int cimax_usb_device_open(struct device_s *device) +{ + return device_drv_open(device); +} +EXPORT_SYMBOL(cimax_usb_device_open); + +int cimax_usb_device_unlock_read(struct device_s *device) +{ + int ret = 0; + if (!device) + return 0; + mutex_lock(&device->lock); + ret = device_ci_unlock_read(device); + mutex_unlock(&device->lock); + return ret; +} +EXPORT_SYMBOL(cimax_usb_device_unlock_read); + +int cimax_usb_device_close(struct device_s *device) +{ + if (!device) + return 0; + cimax_usb_device_unlock_read(device); + if (cimax_usb_dev_remove) + cimax_usb_dev_remove(device, gdeviceNumber); + return device_drv_close(device); +} +EXPORT_SYMBOL(cimax_usb_device_close); + +static long device_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct device_s *device = (struct device_s *)file->private_data; + int err = 0; + int ret = 0; + struct ioctl_data_s data; + void *transfer_buffer = NULL; + + dbg("start"); + + /* Don't decode wrong cmds: return ENOTTY (inappropriate ioctl) */ + if (_IOC_TYPE(cmd) != DEVICE_IOC_MAGIC) + return -ENOTTY; + if (_IOC_NR(cmd) > DEVICE_IOC_MAXNR) + return -ENOTTY; + + /* Verify direction (read/write) */ + if (_IOC_DIR(cmd) & _IOC_READ) +// err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); + err = !access_ok((void *)arg, _IOC_SIZE(cmd)); + else if (_IOC_DIR(cmd) & _IOC_WRITE) +// err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); + err = !access_ok((void *)arg, _IOC_SIZE(cmd)); + if (err) + return -EFAULT; + + if (mutex_lock_interruptible(&device->lock)) + return -ERESTARTSYS; + + switch (cmd) { + case DEVICE_IOC_SELECT_INTF: + dbg("DEVICE_IOC_SELECT_INTF start"); + ret = cimax_usb_select_interface(device, arg); + dbg("DEVICE_IOC_SELECT_INTF end"); + break; + + case DEVICE_IOC_CI_WRITE: + dbg("DEVICE_IOC_CI_WRITE start"); + + /* send CI message */ + ret = copy_from_user(&data, + (void __user *)arg, sizeof(struct ioctl_data_s)); + dbg("inMsg, rx 0x%p, rxSize %d, tx 0x%p, txSize %d", + data.rxData, + data.rxSize, + data.txData, + data.txSize); + ret = device_ci_write_ioctl(device, &data); + if (ret < 0) + break; + ret = copy_to_user((void __user *)arg, + &data, sizeof(struct ioctl_data_s)); + + dbg("DEVICE_IOC_CI_WRITE end"); + break; + + case DEVICE_IOC_UNLOCK_READ: + dbg("DEVICE_IOC_UNLOCK_READ start"); + + ret = device_ci_unlock_read(device); + + dbg("DEVICE_IOC_UNLOCK_READ end"); + break; + + case DEVICE_IOC_SET_CONFIG: + dbg("DEVICE_IOC_SET_CONFIG start"); + + /* send CI message */ + ret = copy_from_user(&data, + (void __user *)arg, sizeof(struct ioctl_data_s)); + dbg("inMsg, rx 0x%p, rxSize %d, tx 0x%p, txSize %d", + data.rxData, data.rxSize, + data.txData, data.txSize); + transfer_buffer = kmalloc(data.txSize, GFP_KERNEL); + memcpy(transfer_buffer, data.txData, data.txSize); + dbg_dump("New config", transfer_buffer, data.txSize); + err = usb_control_msg(device->usbdev, + usb_sndctrlpipe(device->usbdev, 0), + USB_REQ_SET_DESCRIPTOR, + USB_TYPE_STANDARD, + (USB_DT_CONFIG << 8), + 0, + transfer_buffer, + data.txSize, + 5000); + if (err < 0) { + err("set_config failed %d", err); + ret = -EINVAL; + } + kfree(transfer_buffer); + dbg("DEVICE_IOC_SET_CONFIG end"); + break; + + default: + ret = -ENOIOCTLCMD; + break; + } /* switch */ + mutex_unlock(&device->lock); + + dbg("end, ret %d", ret); + return ret; +} /* device_ioctl */ + +static ssize_t device_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int ret; + struct rw_data_s data; + struct device_s *device = (struct device_s *)file->private_data; + + dbg("start"); + + /* get transmission buffer */ + ret = copy_from_user(&data, buf, sizeof(struct rw_data_s)); + dbg("txBuffer, moduleId %u, data 0x%p, size %d", + data.moduleId, data.data, data.size); + if (data.moduleId >= DEVICE_NUM_CAM) { + err("bad moduleId"); + return 0; + } + + if (device->useIsoc) { + if (!data.size || (data.size % DEVICE_ISOC_LENGTH( + device->channel[data.moduleId].maxPacketSize))) { + err("transmission buffer size must be a multiple of %d", + DEVICE_ISOC_LENGTH( + device->channel[data.moduleId].maxPacketSize)); + return -EINVAL; + } /* if */ + } + + if (device->useIsoc) { + if (device->channel[data.moduleId].isocInUrb[0] == NULL) { + ret = device_start_iso_in(device, data.moduleId); + if (ret < 0) + return ret; + } /* if */ + + if (device->channel[data.moduleId].isocOutUrb[0] == NULL) { + ret = device_start_iso_out(device, data.moduleId); + if (ret < 0) + return ret; + } /* if */ + + dbg("call device_tsiso_send moduleId %d, data 0x%p, size %d", + data.moduleId, data.data, data.size); + ret = device_tsiso_send(device, + data.moduleId, data.data, data.size); + } /* if */ + else { + ret = device_tsbulk_send(device, + data.moduleId, data.data, data.size); + } /* else */ + + dbg("end, moduleId %d return %d", data.moduleId, ret); + return ret; +} /* device_write */ + +static ssize_t device_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + int res; + struct rw_data_s data; + struct device_s *device = (struct device_s *)file->private_data; + + dbg("start"); + + if (count != sizeof(struct rw_data_s)) { + err("try to read uncorrect size %zd", count); + return -EFAULT; + } /* if */ + res = copy_from_user(&data, buf, sizeof(struct rw_data_s)); + data.copiedSize = 0; + if (data.type == DEVICE_TYPE_TS_READ) { + res = device_fill_ts(device, data.moduleId, &data); + if (res < 0) { + err("fill ts buffer failed"); + return res; + } /* if */ + dbg("return TS, moduleId %d, data 0x%p, size %d, copiedSize %d", + data.moduleId, data.data, data.size, data.copiedSize); + /*res = count;*/ + res = data.copiedSize; + } /* if */ + else if (data.type == DEVICE_TYPE_CI_READ) { + res = device_wait_async_data(device, data.moduleId, &data, 1); + if (res < 0) { + err("wait ci read failed"); + return res; + } /* if */ + dbg("return CI, moduleId %d, data 0x%p, size %d", + data.moduleId, data.data, data.size); + res = data.size; + } /* else if */ + else { + err("unknown data type %d", data.type); + res = -EFAULT; + } /* else */ + + dbg("end, return %d", res); + return res; +} /* device_read */ + +/****************************************************************************** + * @brief + * write data on Control endpoint. + * + + * @param dev + * Pointer to usb device. + * + * @param addr + + * register address to write. + * + * @param data + * data to write. + * + + * @param size + * size to write. + * + * @return + + * data writen or ENODEV error + ******************************************************************************/ +int write_ctrl_message(struct usb_device *dev, int addr, void *data, int size) +{ + int ret; + void *ptr = NULL; +#ifdef DEBUG + /* int i;*/ + /* unsigned char dump[500];*/ +#endif + + /* info("%s: . addr = %04x size=%d",DRIVER_NAME,addr,size);*/ + + if (size <= 0) + return 0; + + ptr = kmemdup(data, size, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + ret = usb_control_msg + (dev, usb_sndctrlpipe(dev, 0), 0xA0, 0x40, addr, 0x0001, + ptr, size, 300); + if (ret != size) { + err("Failed to write CIMaX+ register 0x%04x", addr); + ret = -ENODEV; + } + +#ifdef DEBUG + /* dump[0] =0; + for(i=0;i<size;i++) { + if((i !=0) && ((i%16) == 0)) { + dbg("cimax+usb: %s",dump); + dump[0] =0; + } + sprintf(dump,"%s%02x ",dump,((unsigned char *)ptr)[i]); + } + dbg("cimax+usb: %s",dump); + */ +#endif + kfree(ptr); + + return ret; +} + +/****************************************************************************** + * @brief + * read data from Control endpoint. + * + + * @param dev + * Pointer to usb device. + * + * @param addr + * firmware address to read. + + * + * @param data + * pointer to buffer to fill with register data. + * + + * @param size + * size to read. + * + * @return + * data writen or ENODEV error + + ******************************************************************************/ +int read_ctrl_message(struct usb_device *dev, int addr, void *data, int size) +{ + int ret; + ret = usb_control_msg + (dev, usb_rcvctrlpipe(dev, 0), 0xA0, 0xC0, addr, 0x0001, + (void *)data, size, 300); + if (ret != size) { + err("Failed to read CIMaX+ register 0x%04x return %d", + addr, ret); + ret = -ENODEV; + } + return ret; +} + +/****************************************************************************** + * @brief + * Start new Firmware. + * + * @param dev + * Pointer to usb device. + * + * @return + * None. + ******************************************************************************/ +int init_fw(struct usb_device *dev) +{ + int len; + char *bootStatus = NULL; + bootStatus = kmalloc(sizeof(char), GFP_KERNEL); + if (!bootStatus) { + pr_err("%s: init_fw kmalloc failed\n", + DRIVER_NAME); + return 0; + } + + info("%s: .", DRIVER_NAME); + len = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), + 0xA0, 0xC0, 0x0000, 0x0000, bootStatus, 1, 100); + if (len == 1) { + info("--> Init Status = %02X", *bootStatus); + if (bootStatus) + kfree(bootStatus); + return 0; + } + if (bootStatus) + kfree(bootStatus); + + return len; +} + +/****************************************************************************** + * @brief + * Start new Firmware. + * + * @param dev + * Pointer to usb device. + + * + * @return + * None. + ******************************************************************************/ +int write_ep6_message(struct usb_device *dev, void *data, int size) +{ + int ret; + void * ptr = NULL; + ptr = kmemdup(data, size, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + if (usb_bulk_msg(dev, usb_sndbulkpipe(dev, 6), + ptr, size, &ret, 200) < 0) { + err("Failed to write cmd 0x%02x", ((unsigned char *)data)[0]); + ret = -ENODEV; + } + kfree(ptr); + return ret; +} + +/****************************************************************************** + * @brief + * Start new Firmware. + * + * @param dev + * Pointer to usb device. + * + * @return + * None. + ******************************************************************************/ +int read_ep5_message(struct usb_device *dev, void *data, int size) +{ + int ret; + + if (usb_interrupt_msg(dev, usb_rcvintpipe(dev, 5), + data, size, &ret, 200) < 0) { + err("Failed read interrupt endpoint"); + ret = -ENODEV; + } + return ret; +} + +#ifdef CONFIG_COMPAT +static long device_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long args) +{ + unsigned long ret; + + args = (unsigned long)compat_ptr(args); + ret = device_ioctl(filp, cmd, args); + return ret; +} +#endif + +static const struct file_operations device_fops = { + .owner = THIS_MODULE, + .open = device_open, + .release = device_release, + .unlocked_ioctl = device_ioctl, + .write = device_write, + .read = device_read, + /* + .poll = device_poll, + */ +#ifdef CONFIG_COMPAT + .compat_ioctl = device_compat_ioctl, +#endif +}; + +struct cimaxusb_priv_ops_t cimaxusb_priv_ops = { + .write_ctrl_message = write_ctrl_message, + .read_ctrl_message = read_ctrl_message, + .init_fw = init_fw, + .write_ep6_message = write_ep6_message, + .read_ep5_message = read_ep5_message +}; + +static struct usb_class_driver device_class = { + .name = "cimaxusb%d", + .fops = &device_fops, + .minor_base = DEVICE_MINOR, +}; + +/* ---------------------------------------------------------- */ + + +void cimax_usb_set_cb(void *cb1, void *cb2) +{ + cimax_usb_dev_add = cb1; + cimax_usb_dev_remove = cb2; +} +EXPORT_SYMBOL(cimax_usb_set_cb); + +static int device_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *usbdev = interface_to_usbdev(intf); + struct device_s *device; + char cmd[] = { 0x0C, 0x01, 0x00, 0x00 }; + char *rsp; + + dbg("start vendor id 0x%x, product id 0x%x, Device id 0x%x minor 0x%x", + le16_to_cpu(usbdev->descriptor.idVendor), + le16_to_cpu(usbdev->descriptor.idProduct), + le16_to_cpu(usbdev->descriptor.bcdDevice), + intf->minor); + + /* device = &gdevice[intf->minor];*/ + device = &gdevice[gdeviceNumber]; + + mutex_lock(&device->lock); + /* device->usbdev = usbdev;*/ + device->usbdev = usb_get_dev(usbdev); + dbg("device->usbdev 0x%p", (device->usbdev)); + + /* set private callback functions */ + device->ops = &cimaxusb_priv_ops; + + device->askToSuspend = 0; + + usb_set_intfdata(intf, device); + mutex_unlock(&device->lock); + + if (usb_register_dev(intf, &device_class)) { + err("usb_register_dev"); + usb_set_intfdata(intf, NULL); + return -ENOMEM; + } /* if */ + + /* test if firmware loafing is needed */ +#ifdef FRBIT + if ((le16_to_cpu(usbdev->descriptor.bcdDevice) != 0) && + (CimaxDwnl == 1)) { +#else + if (le16_to_cpu(usbdev->descriptor.bcdDevice) != 0) { +#endif + info("start firmware download"); + /* load firmware*/ + cimaxusb_fw_upload(device); + info("end firmware download"); + } else { + info("set alternate setting 1"); + if (usb_set_interface(device->usbdev, 0, 1) < 0) { + err("set_interface failed intf 0, alt 1"); + } else { + info("check FW version"); + /* Get BOOT version */ + if (write_ep6_message(device->usbdev, + cmd, sizeof(cmd)) == sizeof(cmd)) { + rsp = kcalloc(256, + sizeof(unsigned char), + GFP_KERNEL); + if (!rsp) { + err("out of memory"); + return -ENOMEM; + } + if (read_ep5_message(device->usbdev, + rsp, 256) >= 0) { + info("=> ---- F.W. Version -------"); + info("=>= %02X.%02X.%02X.%02X.%02X%c", + rsp[4], rsp[5], rsp[6], + rsp[7], rsp[8], rsp[9]); + info("=> Boot Version = %d.%d", + rsp[10], rsp[11]); + info("=> --------------------"); + } + kfree(rsp); + } + } + info("start cfg download"); + if (cimaxusb_configuration_setting(device) < 0) + err(" Error : set CIMaX+ configuration"); + info("end cfg download"); + + if (cimax_usb_dev_add) + cimax_usb_dev_add(device, gdeviceNumber); + } + + dbg("end"); + return 0; +} /* device_probe */ + +static void device_disconnect(struct usb_interface *intf) +{ + struct device_s *device = usb_get_intfdata(intf); + int i; + + dbg("start"); + + if (!device) + return; + + mutex_lock(&device->lock); + if (device->opened) { + /* release blocking functions */ + device->askToRelease = 1; + wake_up_interruptible(&device->ciBulk.ciData[0].asyncWait); + wake_up_interruptible(&device->ciBulk.ciData[1].asyncWait); + wake_up_interruptible(&device->channel[0].inWait); + wake_up_interruptible(&device->channel[1].inWait); + device_stop_intr(device); + for (i = 0; i < DEVICE_NUM_CAM; i++) { + if (device->useIsoc) { + device_stop_iso_out(device, i); + device_stop_iso_in(device, i); + } /* if */ + else + device_stop_tsbulk_in(device, i); + } /* for */ + device->opened = 0; + + if (cimax_usb_dev_remove) + cimax_usb_dev_remove(device, gdeviceNumber); + } /* if */ + mutex_unlock(&device->lock); + usb_set_intfdata(intf, NULL); + if (device) { + usb_deregister_dev(intf, &device_class); + device->usbdev = NULL; + } /* if */ + dbg("end"); +} /* device_disconnect */ + +static int cimaxusb_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct device_s *device = usb_get_intfdata(intf); + int i; + + dbg("start"); + + if (!device) + return 0; + + mutex_lock(&device->lock); + if (device->opened) { + /* release blocking functions */ + device->askToSuspend = 1; + wake_up_interruptible(&device->ciBulk.ciData[0].asyncWait); + wake_up_interruptible(&device->ciBulk.ciData[1].asyncWait); + wake_up_interruptible(&device->channel[0].inWait); + wake_up_interruptible(&device->channel[1].inWait); + device_stop_intr(device); + for (i = 0; i < DEVICE_NUM_CAM; i++) { + if (device->useIsoc) { + device_stop_iso_out(device, i); + device_stop_iso_in(device, i); + } /* if */ + else + device_stop_tsbulk_in(device, i); + } /* for */ + device->opened = 0; + } /* if */ + mutex_unlock(&device->lock); + dbg("end"); + + return 0; +} + +static int cimaxusb_resume(struct usb_interface *intf) +{ + struct device_s *device = usb_get_intfdata(intf); + + dbg("start"); + + if (!device) + return 0; + + device->askToSuspend = 0; + dbg("end"); + return 0; +} + +static struct usb_device_id device_ids[] = { + { USB_DEVICE(0x1b0d, 0x2f00) }, + { USB_DEVICE(0x1b0d, 0x2f01) }, + { USB_DEVICE(0x1b0d, 0x2f02) }, + { USB_DEVICE(0x1b0d, 0x2f03) }, + { USB_DEVICE(0x1b0d, 0x2f04) }, + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, device_ids); + + +static struct usb_driver device_driver = { + .name = "cimaxusb", + .probe = device_probe, + .disconnect = device_disconnect, + .suspend = cimaxusb_suspend, + .resume = cimaxusb_resume, + .id_table = device_ids, +}; + +/* ---------------------------------------------------------- */ + +static int device_init_module(void) +{ + int ret = 0; + int i, j; + struct device_s *device; + + info("start"); + + if (!gdevice) + gdevice = kcalloc(DRIVER_MAX_NUMBER, + sizeof(struct device_s), GFP_KERNEL); + if (!gdevice) { + err("not enough memory"); + return -ENOMEM; + } + + for (i = 0; i < DRIVER_MAX_NUMBER; i++) { + device = &gdevice[i]; + /* initialize struct */ + memset(device, 0, sizeof(struct device_s)); + mutex_init(&device->lock); + + /* initialize ci bulk struct */ + device->ciBulk.counter = 1; + spin_lock_init(&device->ciBulk.intLock); + spin_lock_init(&device->ciBulk.intUrbLock); + for (j = 0; j < DEVICE_NUM_CAM; j++) { + init_waitqueue_head(&device->ciBulk.ciData[j].syncWait); + init_waitqueue_head( + &device->ciBulk.ciData[j].asyncWait); + INIT_LIST_HEAD(&device->ciBulk.ciData[j].asyncDataList); + } /* for */ + + /* initialize channels */ + for (j = 0; j < DEVICE_NUM_CAM; j++) { + spin_lock_init(&device->channel[j].inLock); + init_waitqueue_head(&device->channel[j].inWait); + vb_init(&device->channel[j].inVb); + device->channel[j].syncOffset = -1; + spin_lock_init(&device->channel[j].outLock); + } /* for */ + } /* for */ + + /* register misc device */ + ret = usb_register(&device_driver); + +#ifdef TIMESTAMP + InitTimestamp(); +#endif + + if (ret) + info("end driver register failed"); + else + info("end driver registered"); + + info(DRIVER_VERSION ":" DRIVER_DESC); + + return ret; +} /* device_init_module */ + +static void device_exit_module(void) +{ + int i; + struct device_s *device; + + info("start"); + +#ifdef TIMESTAMP + ShowTimestamp(); +#endif + + for (i = 0; i < DRIVER_MAX_NUMBER; i++) { + device = &gdevice[i]; + device->askToRelease = 1; + /* destroy struct */ + mutex_destroy(&device->lock); + } /* for */ + usb_deregister(&device_driver); + gdeviceNumber = 0; + + kfree(gdevice); + gdevice = NULL; + info("end"); +} /* device_exit_module */ + +module_init(device_init_module); +module_exit(device_exit_module);
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb-driver.h b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb-driver.h new file mode 100644 index 0000000..774c50a --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb-driver.h
@@ -0,0 +1,363 @@ +/**************************************************************************//** + * @file cimax+usb-driver.h + * + * @brief CIMaX+ USB Driver for linux based operating systems. + * + * Copyright (C) 2009-2011 Bruno Tonelli <bruno.tonelli@smardtv.com> + * & Franck Descours <franck.descours@smardtv.com> + * for SmarDTV France, La Ciotat + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + ******************************************************************************/ + +#include <linux/version.h> + +#ifndef CIMAXPLUS_USB_DRIVER_H +#define CIMAXPLUS_USB_DRIVER_H + +/****************************************************************************** + * Includes + *****************************************************************************/ +/****************************************************************************** + * Defines + *****************************************************************************/ +/** + * @brief + * Driver Name + */ +#define DRIVER_NAME "cimax+usb" +/** + * @brief + * An unassigned USB minor. + */ +#define DEVICE_MINOR 240 + +/** + * @brief + * Driver version. + */ +#define DEVICE_VERSION 0x1000 + +/** + * @brief + * Number of CA module supported by the driver. + */ +#define DEVICE_NUM_CAM 2 + +/** + * @brief + * Buffer length. + */ +#define DEVICE_MESSAGE_LENGTH 4100 + +/* Offset */ +#define DEVICE_COMMAND_OFFSET 0 +#define DEVICE_STATUS_OFFSET 0 +#define DEVICE_COUNTER_OFFSET 1 +#define DEVICE_LENGTH_MSB_OFFSET 2 +#define DEVICE_LENGTH_LSB_OFFSET 3 +#define DEVICE_DATA_OFFSET 4 + +/* Mask */ +#define DEVICE_SEL_MASK 0x80 +#define DEVICE_TYP_MASK 0x40 +#define DEVICE_CMD_MASK 0x3F + +/* Command tag */ +#define DEVICE_CMD_INIT 0x00 +#define DEVICE_CMD_WRITE_REG 0x7F +#define DEVICE_CMD_READ_REG 0xFF +#define DEVICE_CMD_CAMRESET 0x01 +#define DEVICE_CMD_GETCIS 0x02 +#define DEVICE_CMD_WRITECOR 0x03 +#define DEVICE_CMD_NEGOTIATE 0x04 +#define DEVICE_CMD_WRITELPDU 0x05 +#define DEVICE_CMD_READLPDU 0x06 +#define DEVICE_CMD_WRITEEXT 0x07 +#define DEVICE_CMD_READEXT 0x08 +#define DEVICE_CMD_CC1RESET 0x09 +#define DEVICE_CMD_MCARD_WRITE 0x0a + +/* Status field */ +#define DEVICE_CAMRESETOK 0x00 +#define DEVICE_CISOK 0x01 +#define DEVICE_WRITECOROK 0x02 +#define DEVICE_NEGOTIATEOK 0x03 +#define DEVICE_WRITELPDUOK 0x04 +#define DEVICE_CAMDET 0x05 +#define DEVICE_READLPDUOK 0x06 +#define DEVICE_WRITEEXTOK 0x07 +#define DEVICE_READEXTOK 0x08 +#define DEVICE_NO_CAM 0x09 +#define DEVICE_NOK 0x0a +#define DEVICE_INITOK 0x0b +#define DEVICE_READ_REGOK 0x0c +#define DEVICE_WRITE_REGOK 0x0d +#define DEVICE_DATAREADY 0x0e +#define DEVICE_MCARD_WRITEOK 0x0f +#define DEVICE_MCARD_READ 0x10 +#define DEVICE_CAMPARSE_ERROR 0x11 +#define DEVICE_WRITELPDUBUSY 0x14 +#define DEVICE_CMDPENDING 0x16 +#define DEVICE_REGSTATUSOK 0x17 +#define DEVICE_GPIOCHANGE 0x18 +#define DEVICE_FRBit 0x1A + + +#define DEVICE_DATAREADY_SYNC 0x3e + +/** + * @brief + * MPEG2 transport size,.isochronous size and number of frames per URB. + */ +#define DEVICE_MPEG2_PACKET_SIZE 188 +#define DEVICE_MPEG2_SYNC_BYTE 0x47 +#define DEVICE_NULL_HEADER_SIZE 8 +#define DEVICE_NUM_FRAMES_PER_URB 8 +#define DEVICE_ISOC_LENGTH(x) (DEVICE_NUM_FRAMES_PER_URB*x) +#define DEVICE_VB_LENGTH 902400 + +/** + * @brief + * Endpoint address. + */ +#define DEVICE_TS_IN_PIPE 1 /* and 2 */ +#define DEVICE_TS_OUT_PIPE 3 /* and 4 */ +#define DEVICE_INT_IN_PIPE 5 +#define DEVICE_BULK_OUT_PIPE 6 +#define DEVICE_BULK_OUT_MAXPACKET 256 + +/** + * @brief + * Number of isochronous/int URBs in the driver. + */ +#define DEVICE_NUM_ISOC_OUT_URBS 3 +#define DEVICE_NUM_ISOC_IN_URBS 2 +#define DEVICE_NUM_INT_IN_URBS 2 + +/** + * @brief + * ioctl() calls definition. + */ +#define DEVICE_IOC_MAGIC 'a' +#define DEVICE_IOC_SELECT_INTF _IOWR(DEVICE_IOC_MAGIC, 0, signed long) +#define DEVICE_IOC_CI_WRITE _IOWR(DEVICE_IOC_MAGIC, 1, struct ioctl_data_s) +#define DEVICE_IOC_UNLOCK_READ _IOWR(DEVICE_IOC_MAGIC, 2, signed long) +#define DEVICE_IOC_SET_CONFIG _IOWR(DEVICE_IOC_MAGIC, 3, struct ioctl_data_s) +#define DEVICE_IOC_MAXNR 4 + +/****************************************************************************** + * Types + *****************************************************************************/ +#ifdef __KERNEL__ +#include <linux/list.h> +#include <linux/atomic.h> +#include <linux/printk.h> + +#undef dbg +#undef dbg_isoc_in +#undef dbg_isoc_out + +#undef err +#undef info +#undef warn + +#define DEBUG + +#ifdef DEBUG +#define dbg(format, arg...) pr_debug("cimax+usb: %s> " format "\n" , \ + __func__, ## arg) +#define dbg_s(format, arg...)\ + pr_debug("cimax+usb: " format "\n" , ## arg) +#else +#define dbg(format, arg...) do {} while (0) +#define dbg_s(format, arg...) do {} while (0) +#endif + +#ifdef DEBUG_ISOC_IN +#define dbg_isoc_in(format, arg...)\ + pr_debug("cimax+usb: %s> " format "\n" , \ + __func__, ## arg) +#else +#define dbg_isoc_in(format, arg...) do {} while (0) +#endif + +#ifdef DEBUG_ISOC_OUT +#define dbg_isoc_out(format, arg...)\ + pr_debug("cimax+usb: %s> " format "\n" , \ + __func__, ## arg) +#else +#define dbg_isoc_out(format, arg...) do {} while (0) +#endif + +#define err(format, arg...)\ + pr_err("cimax+usb: %s> ERROR " format "\n" , \ + __func__, ## arg) +#define info(format, arg...)\ + pr_info("cimax+usb: %s> " format "\n" , \ + __func__, ## arg) +#define warn(format, arg...)\ + pr_warn("cimax+usb: %s> WARN" format "\n" , \ + __func__, ## arg) + +/** + * @brief + * Video buffer structure. + */ +struct video_buf_s { + __u8 data[DEVICE_VB_LENGTH]; + int readOffset; + int writeOffset; + int isEmpty; +}; +#endif + +/** + * @brief + * Io control data structure exchanged between user and kernel space. + */ +struct ioctl_data_s { + __u8 *txData; + __u32 txSize; + __u8 *rxData; + __u32 rxSize; +}; + +/** + * @brief + * Read/write type exchanged between user and kernel space. + */ +enum rw_type_e { + DEVICE_TYPE_CI_READ, + DEVICE_TYPE_TS_WRITE, + DEVICE_TYPE_TS_READ +}; + +/** + * @brief + * Read/write data structure exchanged between user and kernel space. + */ +struct rw_data_s { + enum rw_type_e type; + __u8 moduleId; + __u8 *data; + __u32 size; + __u32 copiedSize; +}; +#ifdef __KERNEL__ +/** + * @brief + * Message node structure. Can be inserted in a list. + */ +struct message_node_s { + __u8 data[DEVICE_MESSAGE_LENGTH]; + __u32 size; + struct list_head node; +}; + +/** + * @brief + * Received CI data. + */ +struct ci_rx_data_s { + wait_queue_head_t syncWait; + __u8 syncSignal; + __u8 syncData[DEVICE_MESSAGE_LENGTH]; + __u32 syncDataSize; + wait_queue_head_t asyncWait; + struct list_head asyncDataList; + __u8 bPendingSend; +}; + +/** + * @brief + * CI bulk channel. + */ +struct ci_bulk_s { + __u8 counter; + __u16 inMaxPacketSize; + __u16 outMaxPacketSize; + struct urb *intUrb[DEVICE_NUM_INT_IN_URBS]; + spinlock_t intLock; + spinlock_t intUrbLock; + __u8 intCurrStatus; + __u8 intCurrIndex; + __u16 intSizeToReceive; + struct ci_rx_data_s ciData[DEVICE_NUM_CAM]; +}; + +/** + * @brief + * TS channel (can use isoc or bulk interface). + * x + */ +struct ts_channel_s { + spinlock_t inLock; + wait_queue_head_t inWait; + struct video_buf_s inVb; + int syncOffset; + int prevOffset; + __u8 lastPacket[DEVICE_MPEG2_PACKET_SIZE]; + __u8 lastPacketSize; + spinlock_t outLock; + __u8 nextFreeOutUrbIndex; + atomic_t numOutUrbs; + __u8 outStop; + __u16 maxPacketSize; + /* isochronous urbs */ + struct urb *isocInUrb[DEVICE_NUM_ISOC_IN_URBS]; + struct urb *isocOutUrb[DEVICE_NUM_ISOC_OUT_URBS]; + /* bulk urbs */ + struct urb *bulkInUrb; + int nbByteSend; + int nbByteRead; + __u8 FirstTransfer; + +#ifdef DEBUG_BITRATE + ktime_t bitrateTime +#endif +}; + +struct device_s { + struct mutex lock; + struct usb_device *usbdev; + __u8 opened; + __u8 askToRelease; + __u8 askToSuspend; + struct ci_bulk_s ciBulk; + __u8 useIsoc; + struct ts_channel_s channel[DEVICE_NUM_CAM]; + /* bus adapter private ops callback */ + struct cimaxusb_priv_ops_t *ops; + int ref; +}; + +struct bulk_timer_s { + struct device_s *device; + __u8 index; + struct timer_list StartBulkReadTimer; +}; + +int cimax_usb_select_interface(struct device_s *device, unsigned long intf); +int cimax_usb_ci_write(struct device_s *device, + u8 *txData, int txSize, u8 *rxData, int rxSize); +int cimax_usb_ci_read_evt(struct device_s *device, + int moduleId, u8 *buf, int size); + +int cimax_usb_device_unlock_read(struct device_s *device); +int cimax_usb_device_open(struct device_s *device); +int cimax_usb_device_close(struct device_s *device); +void cimax_usb_set_cb(void *cb1, void *cb2); + + +#endif +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_config.c b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_config.c new file mode 100644 index 0000000..4469b87 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_config.c
@@ -0,0 +1,684 @@ +/**************************************************************************//** + * @file cimax+usb_config.c + * + * @brief CIMaX+ USB Driver for linux based operating systems. + * + * Copyright (C) 2009-2011 Bruno Tonelli <bruno.tonelli@smardtv.com> + * & Franck Descours <franck.descours@smardtv.com> + * for SmarDTV France, La Ciotat + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + ******************************************************************************/ +/****************************************************************************** + * Include + ******************************************************************************/ + + +#include <linux/slab.h> +#include <linux/init.h> + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/poll.h> +#include <linux/usb.h> + +#include <linux/errno.h> +#include <linux/firmware.h> + +#include <linux/uaccess.h> +#include <linux/mutex.h> + +#include "cimax+usb-driver.h" +#include "cimax+usb_config.h" +#include "bodydef.h" + +#include <linux/ctype.h> + +#define DEBUG +#include <linux/printk.h> + +/****************************************************************************** + * Structures + ******************************************************************************/ +/****************************************************************************** + * Globals + ******************************************************************************/ +char *cimax_config_file[4] = { + "cimax+usb.cfg", + "cimax+usb_vcc0.cfg", + "cimax+usb_vcc3.cfg", + "cimax+usb_vcc5.cfg" +}; + +/****************************************************************************** + * Functions + ******************************************************************************/ +/****************************************************************************** + * @brief + * set CIMaX+ register value. + * + * @param bus_adap + * Pointer to CIMaX+ usb adapter. + * + * @param addr + * Addr of CIMaX+ register. + * + * @param val + * CIMaX+ register value to set. + * + * @return + * 0 if OK otherwise -1. + ******************************************************************************/ +int usb_setbyte(struct device_s *bus_adap, unsigned int addr, unsigned char val) +{ + unsigned char cmd[5]; + unsigned char *buf; + int len; + struct usb_device *dev = bus_adap->usbdev; + + cmd[0] = 0x7F; + cmd[1] = (unsigned char)((addr>>8)&0xff); + cmd[2] = (unsigned char)(addr&0xff); + cmd[3] = 0x01; + cmd[4] = val; + if (bus_adap->ops->write_ep6_message(dev, cmd, sizeof(cmd)) + == sizeof(cmd)) { + /*pr_debug("%s-%s: cmd=0x%x\n", + DRIVER_NAME, __func__,cmd[0]);*/ + buf = kcalloc(256, sizeof(unsigned char), GFP_KERNEL); + if (!buf) { + pr_err("%s-%s: out of memory.\n", + DRIVER_NAME, __func__); + return -ENOMEM; + } + + while ((len = bus_adap->ops->read_ep5_message(dev, buf, 256)) + >= 0) { + if (len == 0) + continue; + if ((len == 5) && ((buf[0] & 0x7f) == 0x45)) + continue; + if (len < 4) { + pr_err("%s-%s: failed to read addr 0x%x\n", + DRIVER_NAME, __func__, addr); + kfree(buf); + return -1; + } else { + break; + } + } + } else { + pr_err("%s-%s: failed to write addr 0x%x\n", + DRIVER_NAME, __func__, addr); + return -1; + } + kfree(buf); + return 0; +} + +/****************************************************************************** + * @brief + * get CIMaX+ register value. + * + * @param bus_adap + * Pointer to CIMaX+ usb adapter. + * + * @param addr + * Addr of CIMaX+ register. + * + * @param val + * pointer to buffer to store CIMaX+ register value. + * + * @return + * 0 if OK otherwise -1. + ******************************************************************************/ +int usb_getbyte(struct device_s *bus_adap, + unsigned int addr, unsigned char *val) +{ + unsigned char cmd[4]; + unsigned char *buf; + int len; + struct usb_device *dev = bus_adap->usbdev; + + cmd[0] = 0xFF; + cmd[1] = (unsigned char)((addr>>8)&0xff); + cmd[2] = (unsigned char)(addr&0xff); + cmd[3] = 0x01; + if (bus_adap->ops->write_ep6_message(dev, cmd, sizeof(cmd)) + == sizeof(cmd)) { + /*pr_debug("%s-%s: cmd=0x%x\n", + DRIVER_NAME, __func__,cmd[0]);*/ + buf = kcalloc(256, sizeof(unsigned char), GFP_KERNEL); + if (!buf) { + pr_err("%s-%s: out of memory.\n", + DRIVER_NAME, __func__); + return -ENOMEM; + } + + while ((len = bus_adap->ops->read_ep5_message(dev, buf, 256)) + >= 0) { + if (len == 0) + continue; + /*pr_debug("%s-%s: 0x%x\n", + DRIVER_NAME, __func__,buf[0]);*/ + if ((len == 5) && ((buf[0] & 0x7f) == 0x45)) + continue; + if (len >= 5) { + *val = buf[4]; + break; + } else { + pr_err("%s-%s: failed to read addr 0x%x\n", + DRIVER_NAME, __func__, addr); + kfree(buf); + return -1; + } + } + } else { + pr_err("%s-%s: failed to read addr 0x%x\n", + DRIVER_NAME, __func__, addr); + return -1; + } + kfree(buf); + return 0; +} + +/****************************************************************************** + * @brief + * compute a logical Or between CIMaX+ register value and a param value. + * + * @param bus_adap + * Pointer to CIMaX+ usb adapter. + * + * @param addr + * Addr of CIMaX+ register. + * + * @param val + * value to compute. + * + * @param pval + * pointer to buffer to store CIMaX+ register value. + * + * @return + * 0 if OK otherwise -1. + ******************************************************************************/ +int setLogicalOr(struct device_s *bus_adap, + unsigned int addr, unsigned char val, unsigned char *pval) +{ + *pval |= val; + + if (usb_setbyte(bus_adap, addr, *pval) < 0) + return -1; + + if (usb_getbyte(bus_adap, addr, pval) < 0) + return -1; + + dbg("=> Logical OR [%02X] => %02X\n", val, *pval); + + return 0; +} + +/****************************************************************************** + * @brief + * compute a logical And between CIMaX+ register value and a param value. + * + * @param bus_adap + * Pointer to CIMaX+ usb adapter. + * + * @param addr + * Addr of CIMaX+ register. + * + * @param val + * value to compute. + * + * @param pval + * pointer to buffer to store CIMaX+ register value. + * + * @return + * 0 if OK otherwise -1. + ******************************************************************************/ +int setLogicalAnd(struct device_s *bus_adap, + unsigned int addr, unsigned char val, unsigned char *pval) +{ + *pval &= val; + + if (usb_setbyte(bus_adap, addr, *pval) < 0) + return -1; + + if (usb_getbyte(bus_adap, addr, pval) < 0) + return -1; + + dbg("=> Logical AND [%02X] => %02X\n", val, *pval); + + return 0; +} + +/****************************************************************************** + * @brief + * wait CIMaX+ register value match a param value. + * + * @param bus_adap + * Pointer to CIMaX+ usb adapter. + * + * @param addr + * Addr of CIMaX+ register. + * + * @param val + * value to match. + * + * @param pval + * pointer to buffer to store CIMaX+ register value. + * + * @return + * 0 if OK otherwise -1. + ******************************************************************************/ +int waitForValue(struct device_s *bus_adap, + unsigned int addr, unsigned char val, unsigned char *pval) +{ + dbg("=> Wait for Value [%02X]\n", val); + if (*pval == val) + return 0; + + while (1) { + if (usb_getbyte(bus_adap, addr, pval) < 0) + return -1; + + dbg("\r => %02X", *pval); + + if (*pval == val) + break; + } + + dbg("\n"); + + return 0; +} + +/****************************************************************************** + * @brief + * wait CIMaX+ register bits match a param value. + * + * @param bus_adap + * Pointer to CIMaX+ usb adapter. + * + * @param addr + * Addr of CIMaX+ register. + * + * @param val + * value to match. + * + * @param pval + * pointer to buffer to store CIMaX+ register value. + * + * @return + * 0 if OK otherwise -1. + ******************************************************************************/ +int waitForBitsSet(struct device_s *bus_adap, + unsigned int addr, unsigned char val, unsigned char *pval) +{ + dbg("=> Wait for Bits set [%02X]\n", val); + if ((*pval & val) == val) + return 0; + + while (1) { + if (usb_getbyte(bus_adap, addr, pval) < 0) + return -1; + + dbg("\r => %02X", *pval); + + if ((*pval & val) == val) + break; + } + + dbg("\n"); + + return 0; +} + +/****************************************************************************** + * @brief + * wait CIMaX+ register bits cleared. + * + * @param bus_adap + * Pointer to CIMaX+ usb adapter. + * + * @param addr + * Addr of CIMaX+ register. + * + * @param val + * bits to check. + * + * @param pval + * pointer to buffer to store CIMaX+ register value. + * + * @return + * 0 if OK otherwise -1. + ******************************************************************************/ +int waitForBitsCleared(struct device_s *bus_adap, + unsigned int addr, unsigned char val, unsigned char *pval) +{ + dbg("=> Wait for Bits cleared [%02X]\n", val); + if ((*pval & val) == 0x00) + return 0; + + while (1) { + if (usb_getbyte(bus_adap, addr, pval) < 0) + return -1; + + dbg("\r => %02X", *pval); + + if ((*pval & val) == 0x00) + break; + } + + dbg("\n"); + + return 0; +} + +/****************************************************************************** + * @brief + * retreive addr of CIMaX+ register. + * + * @param dev + * Pointer to CIMaX+ register Name. + * + * @return + * Address of CIMaX+ register. + *****************************************************************************/ +int cimaxusb_rtr_reg_addr(char *str_addr) +{ + int i32NbItem = sizeof(cimax_reg_map) / sizeof(struct reg_s); + int i32index; + int i32ValAddr = -1; + + for (i32index = 0; i32index < i32NbItem; i32index++) { + if (strcmp(str_addr, cimax_reg_map[i32index].RegisterName) + == 0) { + i32ValAddr = cimax_reg_map[i32index].RegAddr; + break; + } + } + return i32ValAddr; +} + +static int cimaxusb_parse_cfg_ops(struct device_s *bus_adap, + unsigned char **pptr, size_t *plen) +{ + unsigned char op; + int ret; + char param1[256], param2[256]; + char line[256], *ptr_line; + int val; + unsigned char val2; + unsigned int addr; + unsigned char *ptr = *pptr; + size_t len = *plen; + + ptr_line = line; + op = *ptr; + ret = sscanf(ptr, "%s %s", param1, param2); + ptr++; + len++; + if (sscanf(ptr, "%s %X", param1, &val) != 2) + return -EFAULT; + /*pr_debug("%s: param1=%s,param2=%s, val=%x\n", + DRIVER_NAME, param1, param2, val);*/ + strcpy(ptr_line, param1); + len += strlen(param1); + ptr += strlen(param1); + strncat(ptr_line, ptr, 1); + len++; + ptr++; + strcat(ptr_line, param2); + len += strlen(param2); + ptr += strlen(param2); + /*pr_debug("%s: len = %08d line: %s\n", + DRIVER_NAME, len, line);*/ + addr = cimaxusb_rtr_reg_addr(param1); + if (((signed)addr) < 0) { + pr_err("%s: unknown register name: %s\n", + DRIVER_NAME, param1); + return -ENODEV; + } + if (usb_getbyte(bus_adap, addr, &val2) < 0) { + pr_err("%s: CIMaX+ register reading problem: %s\n", + DRIVER_NAME, param1); + return -ENODEV; + } + + switch (op) { + case '=': + if (waitForValue(bus_adap, addr, val, &val2) == -1) { + pr_err("%s : waitForValue failed on : %s\n", + DRIVER_NAME, param1); + return -ENODEV; + } + pr_info("%s : wait for value ([%x] = %x = %x)\n", + line, addr, val, val2); + break; + case '?': + if (waitForBitsSet(bus_adap, addr, val, &val2) == -1) { + pr_err("%s : waitForBitsSet failed on : %s\n", + DRIVER_NAME, param1); + return -ENODEV; + } + pr_info("%s : wait for bitsset ([%x] = %x = %x)\n", + line, addr, val, val2); + break; + case '!': + if (waitForBitsCleared(bus_adap, addr, val, &val2) + == -1) { + pr_err("%s : waitForBitsCleared failed on : %s\n", + DRIVER_NAME, param1); + return -ENODEV; + } + pr_info("%s : wait for bits cleared ([%x] = %x = %x)\n", + line, addr, val, val2); + break; + case '|': + if (setLogicalOr(bus_adap, addr, val, &val2) == -1) { + pr_err("%s : setLogicalOr failed on : %s\n", + DRIVER_NAME, param1); + return -ENODEV; + } + pr_info("%s : setOr ([%x] = %x = %x)\n", + line, addr, val, val2); + break; + case '&': + if (setLogicalAnd(bus_adap, addr, val, &val2) == -1) { + pr_err("%s : setLogicalAnd failed on : %s\n", + DRIVER_NAME, param1); + return -ENODEV; + } + pr_info("%s : setAnd ([%x] = %x = %x)\n", + line, addr, val, val2); + break; + default: + pr_err("Error: Marker Unknown <%c> !!!\n", op); + return -ENODEV; + } + *pptr = ptr; + *plen = len; + return 0; +} + +static int cimaxusb_parse_cfg_default(struct device_s *bus_adap, + unsigned char **pptr, size_t *plen) +{ + unsigned char *ptr = *pptr; + size_t len = *plen; + int ret; + char param1[256], param2[256]; + char line[256], *ptr_line; + int val; + unsigned char val2; + unsigned int addr; + + ptr_line = line; + + ret = sscanf(ptr, "%s %s", param1, param2); + ret = sscanf(param2, "%X", &val); + /*pr_debug("%s: param1=%s,param2=%s,val=%x\n", + DRIVER_NAME,param1,param2,val);*/ + strcpy(ptr_line, param1); + len += strlen(param1); + ptr += strlen(param1); + strncat(ptr_line, ptr, 1); + len++; + ptr++; + strcat(ptr_line, param2); + len += strlen(param2); + ptr += strlen(param2); + /*pr_debug("%s: len = %08d line: %s\n", + DRIVER_NAME, len, line);*/ + addr = cimaxusb_rtr_reg_addr(param1); + if (((signed)addr) < 0) { + pr_err("%s: unknown register name: %s\n", + DRIVER_NAME, param1); + return -ENODEV; + } + if (usb_getbyte(bus_adap, addr, &val2) < 0) { + pr_err("%s: CIMaX+ register reading problem: %s\n", + DRIVER_NAME, param1); + return -ENODEV; + } + if (usb_setbyte(bus_adap, addr, val) < 0) { + pr_err("%s: CIMaX+ register writing problem: %s\n", + DRIVER_NAME, param1); + return -ENODEV; + } + if (usb_getbyte(bus_adap, addr, &val2) < 0) { + pr_err("%s: CIMaX+ register checking problem: %s\n", + DRIVER_NAME, param1); + return -ENODEV; + } + pr_info("set: %s(0x%04x) : 0x%02x\n", param1, addr, val); + + *pptr = ptr; + *plen = len; + return 0; +} + +/****************************************************************************** + * @brief + * parse CIMaX+ config file. + * + * @param dev + * Pointer to usb device. + * + * @param fw_data + * Pointer to buffer with firmware data. + * + * @return + * O if no error otherwise errno. + *****************************************************************************/ +int cimaxusb_parse_cfg(struct device_s *bus_adap, + const unsigned char *cfg_data, size_t size) +{ + char line[256], *ptr_line; + size_t len = 0; + unsigned char *ptr = (unsigned char *)cfg_data; + int ret = -EFAULT; + + dbg("%s: %s size = %zd\n", DRIVER_NAME, __func__, size); + do { + ptr_line = line; + switch (*ptr) { + case ';': + /* continue up to find \r character */ + while (*ptr != 0x0A) { + *ptr_line++ = *ptr; + ptr++; + len++; + } + *ptr_line = 0; + /*pr_debug("%s: len = %08d line: %s\n", + DRIVER_NAME, len, line);*/ + break; + case 0x0A: + case 0x0D: + ptr++; + len++; + break; + case '=': + case '?': + case '!': + case '|': + case '&': + ret = cimaxusb_parse_cfg_ops(bus_adap, &ptr, &len); + if (ret < 0) + return ret; + break; + default: + ret = cimaxusb_parse_cfg_default(bus_adap, &ptr, &len); + if (ret < 0) + return ret; + break; + } + } while (len < size); + + return (ret == -EFAULT) ? 0 : ret; +} + +/****************************************************************************** + * @brief + * read configuration file ( CIMAX_CONFIG_NAME) and set in CIMaX+ chip. + * + * @param bus_adap + * Pointer to usb device. + * + * @return + * None. + ******************************************************************************/ +/* +opt: 0: init, 1: off, 2: Vcc3.3 3: Vcc5 +*/ +int cimaxusb_configuration_setting_opt(struct device_s *bus_adap, int opt) +{ + int errno = -EFAULT; + const struct firmware *config; + char *cfg = cimax_config_file[opt]; + + struct usb_device *dev = bus_adap->usbdev; + + dbg("request configuration file"); + /* request kernel to locate firmware file */ + errno = request_firmware(&config, cfg, &dev->dev); + if (errno < 0) { + pr_err("%s: unable to locate configuration file: %s\n", + DRIVER_NAME, cfg); + goto error; + } + + dbg("parse configuration file"); + errno = cimaxusb_parse_cfg(bus_adap, config->data, config->size); + if (errno < 0) { + pr_err("%s: unable to parse config file: %s\n", + DRIVER_NAME, cfg); + goto error; + } + +error: + /* release firmware if needed */ + if (config != NULL) + release_firmware(config); + return errno; +} + +int cimaxusb_configuration_setting(struct device_s *bus_adap) +{ + return cimaxusb_configuration_setting_opt(bus_adap, 0); +} + +int cimaxusb_configuration_setting_vcc(struct device_s *bus_adap, int vcc) +{ + return cimaxusb_configuration_setting_opt(bus_adap, vcc); +}
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_config.h b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_config.h new file mode 100644 index 0000000..5c55204 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_config.h
@@ -0,0 +1,58 @@ +/**************************************************************************//** + * @file cimax+usb_config.h + * + * @brief CIMaX+ USB Driver for linux based operating systems. + * + * Copyright (C) 2009-2011 Bruno Tonelli <bruno.tonelli@smardtv.com> + * & Franck Descours <franck.descours@smardtv.com> + * for SmarDTV France, La Ciotat + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + ******************************************************************************/ + +#ifndef __CIMAXPLUS_USB_CFG_H +#define __CIMAXPLUS_USB_CFG_H + +/****************************************************************************** + * Include + ******************************************************************************/ +#include "cimax+usb_handle.h" + +/****************************************************************************** + * Defines + ******************************************************************************/ +#define CIMAX_CONFIG_NAME "cimax+usb.cfg" + +/****************************************************************************** + * Enums + ******************************************************************************/ +/****************************************************************************** + * Structures + ******************************************************************************/ + +/****************************************************************************** + * Functions + ******************************************************************************/ + +/****************************************************************************** + * @brief + * read configuration file ( CIMAX_CONFIG_NAME) and set in CIMaX+ chip. + * + * @param bus_adap + * Pointer to usb device. + * + * @return + * None. + ******************************************************************************/ +int cimaxusb_configuration_setting(struct device_s *bus_adap); + +#endif +
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_fw.c b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_fw.c new file mode 100644 index 0000000..6bccf4b --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_fw.c
@@ -0,0 +1,325 @@ +/**************************************************************************//** + * @file cimax+usb_fw.c + * + * @brief CIMaX+ USB Driver for linux based operating systems. + * + * Copyright (C) 2009-2011 Bruno Tonelli <bruno.tonelli@smardtv.com> + * & Franck Descours <franck.descours@smardtv.com> + * for SmarDTV France, La Ciotat + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + ******************************************************************************/ +/****************************************************************************** + * Include + ******************************************************************************/ +#include <linux/slab.h> +#include <linux/init.h> + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/poll.h> +#include <linux/usb.h> + +#include <linux/errno.h> +#include <linux/firmware.h> + +#include <linux/uaccess.h> +#include <linux/mutex.h> + +#include <linux/printk.h> + +#include "cimax+usb-driver.h" +#include "cimax+usb_fw.h" + +/****************************************************************************** + * Structures + ******************************************************************************/ +/****************************************************************************** + * Globals + ******************************************************************************/ +char cimax_fw[] = CIMAX_FIRMWARE_NAME; + +/****************************************************************************** + * Functions + ******************************************************************************/ +/****************************************************************************** + * @brief + * Start & check Bistrom. + * + * @param dev + * Pointer to usb device. + * + * @param startAddr + * start address of firmware to compute + * + * @param endAddr + * stop address of firmware to compute + * + * @param FWSign + * Current signature to check + * + * + * @return + * None. + ******************************************************************************/ +int checkBistRom(struct device_s *bus_adap, + int startAddr, int endAddr, int signature) +{ + unsigned char *Val; + int rv; + + struct usb_device *dev = bus_adap->usbdev; + + Val = kcalloc(2, sizeof(unsigned char), GFP_KERNEL); + if (!Val) { + pr_err("%s-%s: out of memory.\n", DRIVER_NAME, __func__); + return -ENOMEM; + } + + /* Write "Flash" Size + (!) BistRom is computed since D000-flashSize to D0000-6 + */ + Val[0] = (0xD000-startAddr)&0x00ff; + Val[1] = (0xD000-startAddr)>>8; + if (bus_adap->ops->write_ctrl_message(dev, 0x008D, Val, 2) != 2) { + kfree(Val); + return -1; + } + + /* Write Signature*/ + Val[0] = signature&0x00ff; + Val[1] = signature>>8; + if (bus_adap->ops->write_ctrl_message(dev, 0x0080, Val, 2) != 2) { + kfree(Val); + return -1; + } + + /* Launch BistRom [(D000-flashSize)..CFF9]+[FFFA..FFFF] computation*/ + Val[0] = 0x0F; + if (bus_adap->ops->write_ctrl_message(dev, 0x0082, Val, 1) != 1) { + kfree(Val); + return -1; + } + + pr_info("\n>>Read Signature\n"); + + /* Read Signature*/ + if (bus_adap->ops->read_ctrl_message(dev, 0x0041, Val, 2) != 2) { + kfree(Val); + return -1; + } + + pr_info("\n>> Bistrom computed = %04X\n", Val[0]|Val[1]<<8); + + /* Read Boot status*/ + if (bus_adap->ops->read_ctrl_message(dev, 0x0009, Val, 1) != 1) { + kfree(Val); + return -1; + } + + pr_info("\n>> BootRom Status = %02X\n", Val[0]); + rv = Val[0]; + kfree(Val); + + return rv; +} + +/****************************************************************************** + * @brief + * Compute BistRom. + * + * @param ptr + * Pointer to buffer with register data + * + * @param size + * Number of register to process + * + * @param FWSign + * Current signature + * + * @return + * None. + ******************************************************************************/ +unsigned int MISR(const unsigned char *ptr, int size, unsigned int FWSign) +{ + int k, i; + + unsigned short mySign; + + for (k = 0; k < size; k++) { + mySign = ptr[k]&0x01; + + for (i = 0; i < 16; i++) { + if (0x88B7 & (1<<i)) + mySign ^= (FWSign>>i) & 0x01; + } + + mySign |= ((FWSign<<1)^(ptr[k])) & 0x00FE; + mySign |= (FWSign<<1) & 0x00FF00; + + FWSign = mySign; + } + + return FWSign; +} + + +/****************************************************************************** + * @brief + * upload firmware in CIMaX+ chip. + * + * @param dev + * Pointer to usb device. + * + * @param fw_data + * Pointer to buffer with firmware data. + * + * @param FWSign + * Pointer to store computed signature. + * + * @return + * None. + ******************************************************************************/ +int cimaxusb_firmware_upload(struct device_s *bus_adap, + const unsigned char *fw_data, unsigned int *FWSign) +{ + int errno = -EFAULT; + const unsigned char *ptr; + int startAddr = START_FW_ADDR; + unsigned char *recv; + int size; + + struct usb_device *dev = bus_adap->usbdev; + + /* data firmware */ + ptr = fw_data; + ptr += START_FW_ADDR; + + recv = kcalloc(300, sizeof(unsigned char), GFP_KERNEL); + if (!recv) { + pr_err("%s-%s: out of memory.\n", DRIVER_NAME, __func__); + return -ENOMEM; + } + + do { + /* compute the size to send to CIMaX+ */ + size = (startAddr <= (STOP_FW_ADDR + 1 - MAX_FW_PKT_SIZE)) ? + (MAX_FW_PKT_SIZE) : (STOP_FW_ADDR + 1 - startAddr); + /* compute Signature */ + *FWSign = MISR(ptr, size, *FWSign); + + pr_info("%s: firmware start address %08x size %d\n", + DRIVER_NAME, startAddr, size); + + /* upload data firmware */ + if (bus_adap->ops->write_ctrl_message( + dev, startAddr, (void *)ptr, size) != size) { + pr_err("Failed to load CIMaX+ firmware\n"); + errno = -ENODEV; + break; + } + if (bus_adap->ops->read_ctrl_message( + dev, startAddr, (void *)recv, size) != size) { + pr_err("Failed to load CIMaX+ firmware\n"); + errno = -ENODEV; + break; + } + if (memcmp(ptr, recv, size)) { + pr_err("Failed compare at Address 0x%04x\n", + startAddr); + errno = -ENODEV; + break; + } + /* update size sent to CIMaX+ */ + startAddr += size; + ptr += size; + if (startAddr >= STOP_FW_ADDR) + break; + } while (1); + + kfree(recv); + + if (errno == -EFAULT) { + /* upload interrupt vector*/ + ptr = fw_data; + ptr += START_INTVECT_ADDR; + startAddr = START_INTVECT_ADDR; + + /* continue to compute Signature */ + *FWSign = MISR(ptr, 6, *FWSign); + + /* upload interrupt vector data*/ + if (bus_adap->ops->write_ctrl_message( + dev, startAddr, (void *)ptr, 6) != 6) { + pr_err("Failed to load CIMaX firmware(Int vector)\n"); + errno = -ENODEV; + } + } + return (errno == -EFAULT) ? 0 : errno; +} + +/****************************************************************************** + * @brief + * upload and start firmware in CIMaX+ chip. + * + * @param bus_adap + * Pointer to usb device. + * + * @return + * None. + ******************************************************************************/ +int cimaxusb_fw_upload(struct device_s *bus_adap) +{ + int errno = -EFAULT; + const struct firmware *firmware; + char *fw = cimax_fw; + unsigned int FWSign = 0; + int ret; + + struct usb_device *dev = bus_adap->usbdev; + + /* request kernel to locate firmware file */ + errno = request_firmware(&firmware, fw, &dev->dev); + if (errno < 0) { + pr_err("%s: unable to locate firmware file: %s\n", + DRIVER_NAME, fw); + goto error; + } + + errno = cimaxusb_firmware_upload(bus_adap, firmware->data, &FWSign); + if (errno < 0) { + pr_err("%s: unable to upload firmware file: %s\n", + DRIVER_NAME, fw); + goto error; + } + + pr_info("%s: firmware: %s loaded with success. Current Bistrom %04X\n", + DRIVER_NAME, fw, FWSign); + + ret = checkBistRom(bus_adap, START_FW_ADDR, STOP_FW_ADDR, FWSign); + if (ret != 0x02) { + pr_err("\nError: Fail on compare BistRom (%02X) !\n", ret); + errno = -ENODEV; + goto error; + } + + if (bus_adap->ops->init_fw(dev) != 0) { + pr_err("\nError: Fail on INIT command !\n"); + errno = -ENODEV; + goto error; + } + +error: + /* release firmware if needed */ + if (firmware != NULL) + release_firmware(firmware); + return errno; +}
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_fw.h b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_fw.h new file mode 100644 index 0000000..bf3f64c --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_fw.h
@@ -0,0 +1,61 @@ +/**************************************************************************//** + * @file cimax+usb_fw.h + * + * @brief CIMaX+ USB Driver for linux based operating systems. + * + * Copyright (C) 2009-2011 Bruno Tonelli <bruno.tonelli@smardtv.com> + * & Franck Descours <franck.descours@smardtv.com> + * for SmarDTV France, La Ciotat + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + ******************************************************************************/ + +#ifndef __CIMAXPLUS_USB_FW_H +#define __CIMAXPLUS_USB_FW_H + +/****************************************************************************** + * Include + ******************************************************************************/ +#include "cimax+usb_handle.h" + +/****************************************************************************** + * Defines + ******************************************************************************/ +#define CIMAX_FIRMWARE_NAME "cimax+_usbdvb.bin" +#define START_FW_ADDR 0x8000 +#define STOP_FW_ADDR 0xCFF9 +#define START_INTVECT_ADDR 0xFFFA +#define MAX_FW_PKT_SIZE 256 + +/****************************************************************************** + * Enums + ******************************************************************************/ +/****************************************************************************** + * Structures + ******************************************************************************/ + +/****************************************************************************** + * Functions + ******************************************************************************/ +/****************************************************************************** + * @brief + * upload firmware in CIMaX+ chip. + * + * @param bus_adap + * Pointer to usb device. + * + * @return + * None. + ******************************************************************************/ +int cimaxusb_fw_upload(struct device_s *bus_adap); + +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_handle.h b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_handle.h new file mode 100644 index 0000000..936b9cf --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_handle.h
@@ -0,0 +1,46 @@ +/**************************************************************************//** + * @file cimax+usb_handle.h + * + * @brief CIMaX+ USB Driver for linux based operating systems. + * + * Copyright (C) 2009-2011 Bruno Tonelli <bruno.tonelli@smardtv.com> + * & Franck Descours <franck.descours@smardtv.com> + * for SmarDTV France, La Ciotat + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + ******************************************************************************/ + +#ifndef __CIMAXPLUS_USB_HDLE_H +#define __CIMAXPLUS_USB_HDLE_H + +#ifdef __KERNEL__ + +struct cimaxusb_priv_ops_t { + int (*write_ctrl_message)( + struct usb_device *dev, int addr, void *data, int size); + + int (*read_ctrl_message)( + struct usb_device *dev, int addr, void *data, int size); + + int (*init_fw)( + struct usb_device *dev); + + int (*write_ep6_message)( + struct usb_device *dev, void *data, int size); + + int (*read_ep5_message)( + struct usb_device *dev, void *data, int size); +}; + +#endif + +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_time.c b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_time.c new file mode 100644 index 0000000..ea80b5e --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_time.c
@@ -0,0 +1,130 @@ +/**************************************************************************//** + * @file cimax+usb_fw.c + * + * @brief CIMaX+ USB Driver for linux based operating systems. + * + * Copyright (C) 2009-2011 Bruno Tonelli <bruno.tonelli@smardtv.com> + * & Franck Descours <franck.descours@smardtv.com> + * for SmarDTV France, La Ciotat + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + ******************************************************************************/ + +/*#define TIMESTAMP*/ + +/****************************************************************************** + * Include + ******************************************************************************/ +#include <linux/delay.h> + +#include "cimax+usb_time.h" + +/****************************************************************************** + * Structures + ******************************************************************************/ +#define err(format, arg...)\ + pr_err("cimax+usb_time: %s> ERROR " format "\n" , \ + __func__, ## arg) +#define info(format, arg...) pr_info("time> " format "\n", ## arg) +/****************************************************************************** + * Globals + ******************************************************************************/ +#ifdef TIMESTAMP +struct item_array_s gstArray; +#endif +/****************************************************************************** + * Functions + ******************************************************************************/ +/****************************************************************************** + * @brief + * Init timestamp. + * + * @param + * None + * + * @return + * None. + ******************************************************************************/ +void InitTimestamp(void) +{ +#ifdef TIMESTAMP + gstArray.count = 0; +#endif + return; +} + +/****************************************************************************** + * @brief + * Set timestamp. + * + * @param pcFormat + * Printf-like format + * + * @return + * None. + ******************************************************************************/ +void SetTimestamp(const char *pcFormat, ...) +{ +#ifdef TIMESTAMP + va_list stArgs; + + if (gstArray.count >= MAX_ITEMS) { + if (gstArray.count++ == MAX_ITEMS) + ShowTimestamp(); + return; + } + ktime_get_ts(&gstArray.stItem[gstArray.count].stTime); + va_start(stArgs, pcFormat); + vsprintf(gstArray.stItem[gstArray.count++].pcLine, pcFormat, stArgs); + va_end(stArgs); +#endif + return; +} + +/****************************************************************************** + * @brief + * Display all timestamps. + * + * @param + * None + * + * @return + * None. + ******************************************************************************/ +void ShowTimestamp(void) +{ +#ifdef TIMESTAMP + int i; + + if (gstArray.count == 0) { + err("No timestamps available"); + return; + } + + info("==============================================================="); + info(" TIMESTAMPS"); + info("==============================================================="); + + for (i = 0; i < gstArray.count; i++) { + info("[%04d] [%03d.%09d] %s", + i, + gstArray.stItem[i].stTime.tv_sec, + gstArray.stItem[i].stTime.tv_nsec, + gstArray.stItem[i].pcLine); + if ((i % 100) == 0) + msleep(20); + } + info("==============================================================="); + gstArray.count = 0; + +#endif + return; +}
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_time.h b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_time.h new file mode 100644 index 0000000..0012c64 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/SRC/cimax+usb_time.h
@@ -0,0 +1,92 @@ +/**************************************************************************//** + * @file cimax+usb_time.h + * + * @brief CIMaX+ USB Driver for linux based operating systems. + * + * Copyright (C) 2009-2011 Bruno Tonelli <bruno.tonelli@smardtv.com> + * & Franck Descours <franck.descours@smardtv.com> + * for SmarDTV France, La Ciotat + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + ******************************************************************************/ + +#ifndef __CIMAXPLUS_USB_TIME_H +#define __CIMAXPLUS_USB_TIME_H + +/****************************************************************************** + * Include + ******************************************************************************/ +#include <linux/ktime.h> + +/****************************************************************************** + * Defines + ******************************************************************************/ +#define MAX_ITEMS 100000 +#define MAX_LINE_SIZE 128 + +/****************************************************************************** + * Enums + ******************************************************************************/ +/****************************************************************************** + * Structures + ******************************************************************************/ +struct item_s { + struct timespec stTime; + char pcLine[MAX_LINE_SIZE]; +}; + +struct item_array_s { + int count; + item_s stItem[MAX_ITEMS]; +}; + +extern struct item_array_s gstArray; + +/****************************************************************************** + * Functions + ******************************************************************************/ +/****************************************************************************** + * @brief + * Init timestamp. + * + * @param + * None + * + * @return + * None. + ******************************************************************************/ +void InitTimestamp(void); + +/****************************************************************************** + * @brief + * Set timestamp. + * + * @param pcFormat + * Printf-like format + * + * @return + * None. + ******************************************************************************/ +void SetTimestamp(const char *pcFormat, ...); + +/****************************************************************************** + * @brief + * Display all timestamps. + * + * @param + * None + * + * @return + * None. + ******************************************************************************/ +void ShowTimestamp(void); + +#endif
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/firmware/cimax+_usbdvb.bin b/drivers/stream_input/parser/dvb_ci/cimax/usb/firmware/cimax+_usbdvb.bin new file mode 100644 index 0000000..315fe5e --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/firmware/cimax+_usbdvb.bin Binary files differ
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/firmware/cimax+usb.cfg b/drivers/stream_input/parser/dvb_ci/cimax/usb/firmware/cimax+usb.cfg new file mode 100644 index 0000000..7e792d8 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/firmware/cimax+usb.cfg
@@ -0,0 +1,97 @@ +; +; ************************ +; * INIT_TS.TXT SCRIPT * +; ************************ +; +; *** CLOSE INPUT *** +; +IN_SEL 0x00 +; +; *** CLOSE OUPUT *** +; +OUT_SEL 0x00 +; +; *** RESET FIFO *** +; +FIFO_CTRL 0x0f +SYNC_RTV_CTRL 0x0f +; +; +; ************************* +; * INIT_USB.TXT SCRIPT * +; ************************* +; +; *** Endpoint Config *** +; +DMA_ACC_EPS 0x3f +EPS_ENABLE 0x7f +; +; *** FIFO Control *** +; +USB2TS_CTRL 0x0f +TS2USB_CTRL 0x0f +; +; *** FREQ Control *** +; +USB2TS0_RDL 0x80 +USB2TS1_RDL 0x80 +; +;----------------------------------------------------------------------------- +; Set CAM power +;----------------------------------------------------------------------------- +; +GPIO0_DATA_OUT 0x00 +; +; unlock CFG +CFG_2 0x00 +; +; 1) DVB/CI/CI+/SCARD 2slot +CFG_1 0x00 +; +; 2) Set the Default "power off" state such as VCC_MODA=VCC_MODB=VPPx_MODA=VPPx_MODB='Z' +GPIO0_DFT 0x00 +; +; 3) Set GPIO3 as external power switch driver +GPIO0_MASK_DATA 0x07 +; +; 4) Set "power on" state (VCC=VPP1=VPP2= 5V) +GPIO0_DATA_OUT 0x01 +; +; 5) Lock config +CFG_2 0x01 +; +; 6) Write in the GPIO0_DIR_REG: defines the GPIOs, which +; are used to drive the external power switch, in output mode. +GPIO0_DIR 0x07 +; +; 7) Check VCCENable +?CFG_1 0x20 +; +; 8) Set & wait for PcmciaOutputEnable +|CFG_1 0x08 +?CFG_1 0x08 +; +;--------------------------------------- +; Set Router CAM +;--------------------------------------- +; +; +GAP_REMOVER_CH0_CTRL 0x0C +GAP_REMOVER_CH1_CTRL 0x0C + +; 9) CH0 & CH1 from CAM A & B, CAM A & B from CH0 & CH1 +; +ROUTER_CAM_MOD 0x21 +ROUTER_CAM_CH 0x00 +; +;_Wait 200 +;--------------------------------------- +; ROUTER CAM +;--------------------------------------- +OUT_SEL 0x03 +; +USB2TS0_RDL 0x80 +USB2TS1_RDL 0x80 +; +IN_SEL 0x22 +
diff --git a/drivers/stream_input/parser/dvb_ci/cimax/usb/firmware/cimax+usb_ms.cfg b/drivers/stream_input/parser/dvb_ci/cimax/usb/firmware/cimax+usb_ms.cfg new file mode 100644 index 0000000..5c38429 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimax/usb/firmware/cimax+usb_ms.cfg
@@ -0,0 +1,136 @@ +; +; ************************ +; * INIT_TS.TXT SCRIPT * +; ************************ +; +; *** CLOSE INPUT *** +; +IN_SEL 0x00 +; +; *** CLOSE OUPUT *** +; +OUT_SEL 0x00 +; +; *** RESET FIFO *** +; +FIFO_CTRL 0x0f +SYNC_RTV_CTRL 0x0f +; +; +; ************************* +; * INIT_USB.TXT SCRIPT * +; ************************* +; +; *** Endpoint Config *** +; +DMA_ACC_EPS 0x3f +EPS_ENABLE 0x7f +; +; *** FIFO Control *** +; +USB2TS_CTRL 0x0f +TS2USB_CTRL 0x0f +; +; *** FREQ Control *** +; +USB2TS0_RDL 0x80 +USB2TS1_RDL 0x80 +; +;----------------------------------------------------------------------------- +; Set CAM power +;----------------------------------------------------------------------------- +; +GPIO0_DATA_OUT 0x00 +; +; unlock CFG +CFG_2 0x00 +; +; 1) DVB/CI/CI+/SCARD 2slot +CFG_1 0x00 +; +; 2) Set the Default "power off" state such as VCC_MODA=VCC_MODB=VPPx_MODA=VPPx_MODB='Z' +GPIO0_DFT 0x00 +; +; 3) Set GPIO3 as external power switch driver +GPIO0_MASK_DATA 0x07 +; +; 4) Set "power on" state (VCC=VPP1=VPP2= 5V) +GPIO0_DATA_OUT 0x01 +; +; 5) Lock config +CFG_2 0x01 +; +; 6) Write in the GPIO0_DIR_REG: defines the GPIOs, which +; are used to drive the external power switch, in output mode. +GPIO0_DIR 0x07 +; +; 7) Check VCCENable +?CFG_1 0x20 +; +; 8) Set & wait for PcmciaOutputEnable +|CFG_1 0x08 +?CFG_1 0x08 +; +;--------------------------------------- +; Set Router CAM +;--------------------------------------- +; +; +GAP_REMOVER_CH0_CTRL 0x0C +GAP_REMOVER_CH1_CTRL 0x0C + +; 9) CH0 & CH1 from CAM A & B, CAM A & B from CH0 & CH1 +; +ROUTER_CAM_MOD 0x21 +ROUTER_CAM_CH 0x00 +; +;_Wait 200 +;--------------------------------------- +; ROUTER CAM +;--------------------------------------- +OUT_SEL 0x03 +; +USB2TS0_RDL 0x80 +USB2TS1_RDL 0x80 +; +IN_SEL 0x22 + +; *************************** +; * MuliStream.TXT SCRIPT * +; *************************** +; +; *** CLOSE INPUT *** +; +IN_SEL 0x00 +; +; *** CLOSE OUPUT *** +; +OUT_SEL 0x00 +; +; *** RESET FIFO *** +; +FIFO_CTRL 0x0f +SYNC_RTV_CTRL 0x0f +; +; *** *** +; +CkMan_Config 0x1f +CkMan_Select 0x00 +MERGER_DIV_MICLK 0x02 +SYNC_SYMBOL 0x0d +PID_AND_SYNC_REMAPPER_INV_CTRL 0x01 + +FIFO_CTRL 0x0f +ROUTER_CAM_MOD 0x03 +ROUTER_CAM_CH 0x80 + +USB2TS_CTRL 0x0f +TS2USB_CTRL 0x0f +USB2TS0_RDL 0x20 +USB2TS1_RDL 0x20 + +OUT_SEL 0x03 +IN_SEL 0x22 +; +; ************************ +;
diff --git a/drivers/stream_input/parser/dvb_ci/cimcu/dvb_ca_en50221_cimcu.c b/drivers/stream_input/parser/dvb_ci/cimcu/dvb_ca_en50221_cimcu.c new file mode 100644 index 0000000..87bec4e --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimcu/dvb_ca_en50221_cimcu.c
@@ -0,0 +1,2210 @@ +/* + * dvb_ca.c: generic DVB functions for EN50221 CAM interfaces + * + * Copyright (C) 2004 Andrew de Quincey + * + * Parts of this file were based on sources as follows: + * + * Copyright (C) 2003 Ralph Metzler <rjkm@metzlerbros.de> + * + * based on code: + * + * Copyright (C) 1999-2002 Ralph Metzler + * & Marcus Metzler for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * Or, point your browser to http://www.gnu.org/copyleft/gpl.html + */ + +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/kthread.h> +#include <linux/sched/signal.h> +#include <linux/compat.h> + +#include "dvb_ca_en50221_cimcu.h" +#include "../cimax/dvb_ringbuffer.h" + +#define READ_LPDU_PKT + +static int dvb_ca_en50221_debug; + +module_param_named(cammcu_debug, dvb_ca_en50221_debug, int, 0644); +MODULE_PARM_DESC(cammcu_debug, "enable verbose debug messages"); + +static int dvb_ca_en50221_usleep = 800; + +module_param_named(cammcu_usleep, dvb_ca_en50221_usleep, int, 0644); +MODULE_PARM_DESC(cammcu_usleep, "enable sleep"); + +static int dvb_ca_ciplus_enable = 0; +module_param_named(ciplus_enable, dvb_ca_ciplus_enable, int, 0644); +MODULE_PARM_DESC(ciplus_enable, "get ci plus enable"); + +static unsigned int dvb_ca_ci_profire = 0; +module_param_named(ci_profire, dvb_ca_ci_profire, int, 0644); +MODULE_PARM_DESC(ci_profire, "get ci plus profire"); + +#define dprintk if (dvb_ca_en50221_debug) printk + +#define INIT_TIMEOUT_SECS 10 + +#define HOST_LINK_BUF_SIZE 0x200 + +#define RX_BUFFER_SIZE 65535 + +#define MAX_RX_PACKETS_PER_ITERATION 10 + +#define CTRLIF_DATA 0 +#define CTRLIF_COMMAND 1 +#define CTRLIF_STATUS 1 +#define CTRLIF_SIZE_LOW 2 +#define CTRLIF_SIZE_HIGH 3 + +#define CMDREG_HC 1 /* Host control */ +#define CMDREG_SW 2 /* Size write */ +#define CMDREG_SR 4 /* Size read */ +#define CMDREG_RS 8 /* Reset interface */ +#define CMDREG_FRIE 0x40 /* Enable FR interrupt */ +#define CMDREG_DAIE 0x80 /* Enable DA interrupt */ +#define IRQEN (CMDREG_DAIE) + +#define STATUSREG_RE 1 /* read error */ +#define STATUSREG_WE 2 /* write error */ +#define STATUSREG_FR 0x40 /* module free */ +#define STATUSREG_DA 0x80 /* data available */ +#define STATUSREG_TXERR (STATUSREG_RE|STATUSREG_WE) /* general transfer error */ + + +#define DVB_CA_SLOTSTATE_NONE 0 +#define DVB_CA_SLOTSTATE_UNINITIALISED 1 +#define DVB_CA_SLOTSTATE_RUNNING 2 +#define DVB_CA_SLOTSTATE_INVALID 3 +#define DVB_CA_SLOTSTATE_WAITREADY 4 +#define DVB_CA_SLOTSTATE_VALIDATE 5 +#define DVB_CA_SLOTSTATE_WAITFR 6 +#define DVB_CA_SLOTSTATE_LINKINIT 7 + + +/* Information on a CA slot */ +struct dvb_ca_slot { + + /* current state of the CAM */ + int slot_state; + + /* mutex used for serializing access to one CI slot */ + struct mutex slot_lock; + + /* Number of CAMCHANGES that have occurred since last processing */ + atomic_t camchange_count; + + /* Type of last CAMCHANGE */ + int camchange_type; + + /* base address of CAM config */ + u32 config_base; + + /* value to write into Config Control register */ + u8 config_option; + + /* if 1, the CAM supports DA IRQs */ + u8 da_irq_supported:1; + +#ifdef READ_LPDU_PKT + /* Offset into current ringbuffer when user buffer was not big enough + to return entire pkt */ + int rx_offset; +#endif + + /* size of the buffer to use when talking to the CAM */ + int link_buf_size; + + /* buffer for incoming packets */ + struct dvb_ringbuffer rx_buffer; + + /* timer used during various states of the slot */ + unsigned long timeout; +}; + +/* Private CA-interface information */ +struct dvb_ca_private { + struct kref refcount; + + /* pointer back to the public data structure */ + struct dvb_ca_en50221_cimcu *pub; + + /* the DVB device */ + struct dvb_device *dvbdev; + + /* Flags describing the interface (DVB_CA_FLAG_*) */ + u32 flags; + + /* number of slots supported by this CA interface */ + unsigned int slot_count; + + /* information on each slot */ + struct dvb_ca_slot *slot_info; + + /* wait queues for read() and write() operations */ + wait_queue_head_t wait_queue; + + /* PID of the monitoring thread */ + struct task_struct *thread; + + /* Flag indicating if the CA device is open */ + unsigned int open:1; + + /* Flag indicating the thread should wake up now */ + unsigned int wakeup:1; + + /* Delay the main thread should use */ + unsigned long delay; + + /* Slot to start looking for data to read from in the next user-space read operation */ + int next_read_slot; + + /* mutex serializing ioctls */ + struct mutex ioctl_mutex; +}; + +static void dvb_ca_private_free(struct dvb_ca_private *ca) +{ + unsigned int i; + + dvb_unregister_device(ca->dvbdev); + for (i = 0; i < ca->slot_count; i++) + vfree(ca->slot_info[i].rx_buffer.data); + + kfree(ca->slot_info); + kfree(ca); +} + +static void dvb_ca_private_release(struct kref *ref) +{ + struct dvb_ca_private *ca = container_of(ref, struct dvb_ca_private, refcount); + dvb_ca_private_free(ca); +} + +static void dvb_ca_private_get(struct dvb_ca_private *ca) +{ + kref_get(&ca->refcount); +} + +static void dvb_ca_private_put(struct dvb_ca_private *ca) +{ + kref_put(&ca->refcount, dvb_ca_private_release); +} + +static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca); +static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * ebuf, int ecount); +static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * ebuf, int ecount); + + +/** + * Safely find needle in haystack. + * + * @haystack: Buffer to look in. + * @hlen: Number of bytes in haystack. + * @needle: Buffer to find. + * @nlen: Number of bytes in needle. + * @return Pointer into haystack needle was found at, or NULL if not found. + */ +static char *findstr(char * haystack, int hlen, char * needle, int nlen) +{ + int i; + + if (hlen < nlen) + return NULL; + + for (i = 0; i <= hlen - nlen; i++) { + if (!strncmp(haystack + i, needle, nlen)) + return haystack + i; + } + + return NULL; +} + + + +/* ******************************************************************************** */ +/* EN50221 physical interface functions */ + + +/** + * dvb_ca_en50221_check_camstatus - Check CAM status. + */ +static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot) +{ + int slot_status; + int cam_present_now; + int cam_changed; + + /* IRQ mode */ + if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE) { + return (atomic_read(&ca->slot_info[slot].camchange_count) != 0); + } + + /* poll mode */ + slot_status = ca->pub->poll_slot_status(ca->pub, slot, ca->open); + + cam_present_now = (slot_status & DVB_CA_EN50221_POLL_CAM_PRESENT) ? 1 : 0; + cam_changed = (slot_status & DVB_CA_EN50221_POLL_CAM_CHANGED) ? 1 : 0; + if (!cam_changed) { + int cam_present_old = (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE); + cam_changed = (cam_present_now != cam_present_old); + } + + if (cam_changed) { + if (!cam_present_now) { + ca->slot_info[slot].camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED; + } else { + ca->slot_info[slot].camchange_type = DVB_CA_EN50221_CAMCHANGE_INSERTED; + } + atomic_set(&ca->slot_info[slot].camchange_count, 1); + } else { + if ((ca->slot_info[slot].slot_state == DVB_CA_SLOTSTATE_WAITREADY) && + (slot_status & DVB_CA_EN50221_POLL_CAM_READY)) { + // move to validate state if reset is completed + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_VALIDATE; + } + } + + return cam_changed; +} + + +/** + * dvb_ca_en50221_wait_if_status - Wait for flags to become set on the STATUS + * register on a CAM interface, checking for errors and timeout. + * + * @ca: CA instance. + * @slot: Slot on interface. + * @waitfor: Flags to wait for. + * @timeout_ms: Timeout in milliseconds. + * + * @return 0 on success, nonzero on error. + */ +static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot, + u8 waitfor, int timeout_hz) +{ + unsigned long timeout; + unsigned long start; + + dprintk("%s\n", __func__); + + /* loop until timeout elapsed */ + start = jiffies; + timeout = jiffies + timeout_hz; + while (1) { + /* read the status and check for error */ + int res = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); + if (res < 0) + return -EIO; + + /* if we got the flags, it was successful! */ + if (res & waitfor) { + dprintk("%s succeeded timeout:%lu\n", __func__, jiffies - start); + return 0; + } + + /* check for timeout */ + if (time_after(jiffies, timeout)) { + break; + } + + /* wait for a bit */ + msleep(1); + } + + printk("%s failed timeout:%lu\n", __func__, jiffies - start); + + /* if we get here, we've timed out */ + return -ETIMEDOUT; +} + + +/** + * dvb_ca_en50221_link_init - Initialise the link layer connection to a CAM. + * + * @ca: CA instance. + * @slot: Slot id. + * + * @return 0 on success, nonzero on failure. + */ +static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot) +{ + int ret; + int buf_size; + u8 buf[2]; + + dprintk("%s\n", __func__); + + /* we'll be determining these during this function */ + ca->slot_info[slot].da_irq_supported = 0; +#ifdef READ_LPDU_PKT + ca->slot_info[slot].rx_offset = 0; +#endif + /* set the host link buffer size temporarily. it will be overwritten with the + * real negotiated size later. */ + ca->slot_info[slot].link_buf_size = 2; + + /* read the buffer size from the CAM */ + if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SR)) != 0) + return ret; + if ((ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ / 10)) != 0) + return ret; + if ((ret = dvb_ca_en50221_read_data(ca, slot, buf, 2)) != 2) + return -EIO; + if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN)) != 0) + return ret; + + /* store it, and choose the minimum of our buffer and the CAM's buffer size */ + buf_size = (buf[0] << 8) | buf[1]; + if (buf_size > HOST_LINK_BUF_SIZE) + buf_size = HOST_LINK_BUF_SIZE; + ca->slot_info[slot].link_buf_size = buf_size; + buf[0] = buf_size >> 8; + buf[1] = buf_size & 0xff; + dprintk("Chosen link buffer size of %i\n", buf_size); + + /* write the buffer size to the CAM */ + if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SW)) != 0) + return ret; + if ((ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10)) != 0) + return ret; + if ((ret = dvb_ca_en50221_write_data(ca, slot, buf, 2)) != 2) + return -EIO; + if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN)) != 0) + return ret; + + /* success */ + return 0; +} + +/** + * dvb_ca_en50221_read_tuple - Read a tuple from attribute memory. + * + * @ca: CA instance. + * @slot: Slot id. + * @address: Address to read from. Updated. + * @tupleType: Tuple id byte. Updated. + * @tupleLength: Tuple length. Updated. + * @tuple: Dest buffer for tuple (must be 256 bytes). Updated. + * + * @return 0 on success, nonzero on error. + */ +static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot, + int *address, int *tupleType, int *tupleLength, u8 * tuple) +{ + int i; + int _tupleType; + int _tupleLength; + int _address = *address; + + /* grab the next tuple length and type */ + msleep(10); + if ((_tupleType = ca->pub->read_attribute_mem(ca->pub, slot, _address)) < 0) { + printk("read_attribute_mem error\r\n"); + return _tupleType; + } + + if (_tupleType == 0xff) { + printk("END OF CHAIN TUPLE type:0x%x\n", _tupleType); + *address += 2; + *tupleType = _tupleType; + *tupleLength = 0; + return 0; + } + msleep(10); + if ((_tupleLength = ca->pub->read_attribute_mem(ca->pub, slot, _address + 2)) < 0) + return _tupleLength; + _address += 4; + + dprintk("TUPLE type:0x%x length:%i\n", _tupleType, _tupleLength); + msleep(10); + + /* read in the whole tuple */ + for (i = 0; i < _tupleLength; i++) { + msleep(10); + tuple[i] = ca->pub->read_attribute_mem(ca->pub, slot, _address + (i * 2)); + printk(" 0x%02x: 0x%02x %c\n", + i, tuple[i] & 0xff, + ((tuple[i] > 31) && (tuple[i] < 127)) ? tuple[i] : '.'); + } + _address += (_tupleLength * 2); + + // success + *tupleType = _tupleType; + *tupleLength = _tupleLength; + *address = _address; + return 0; +} + +static void parseCiplusCompatibility(char *tuple, int len, int *ci_plus_enabled) +{ + char flag = '+'; + int number; + int i; + + number = 0; + if (tuple[0] == '0') { + flag = '0'; + } else if (tuple[0] == '*') { + flag = '*'; + } else { + flag = '+'; + } +// printk("--flag[%c]----\r\n", flag); +/* Expected one or more decimal digits */ + for (i = 0; i < len; i++) + { + if (tuple[i] >= '0' && tuple[i] <= '9') { + number *= 10; + number += tuple[i] - '0'; + printk("--tuple[%d]=[%c]----\r\n", i, tuple[i]); + } else if (tuple[i] == '*' || tuple[i] == '+' || tuple[i] == '-') { + continue; + } else { + break; + } + } + if ((flag == '-') && (number == 1)) + { + /* CI+ "v1" not supported */ + printk("--tuple[%s]=set 0----\r\n",tuple); + *ci_plus_enabled = 0; + } + else if ((flag == '*') && (number >= 1)) + { + /* CI+ "v1" is supported */ + *ci_plus_enabled = 1; + printk("--tuple[%s]=set 1----\r\n",tuple); + } + else if ((flag == '+') && (number == 1)) + { + /* CI+ "v1" is supported */ + *ci_plus_enabled = 1; + printk("--tuple[%s]=set 1----\r\n",tuple); + } +} + +/** + * @brief Handle "ciprof" compatibilty item in compatiblity string + * @param identity - item's identity + * @param flag - compatibility flag + * @param ci_plus_profile - the value of ciprof + */ +static void parseCiprofCompatibility(char *tuple, int len, unsigned int *ci_plus_profile) +{ + int valid; + int number; + int i; + + valid = 1; + number = 0; + + /* Expected decimal or hexadecimal number (note: string is lowercase) */ + if ((len >= 2) && + (tuple[0] == '0') && (tuple[1] == 'x')) + { + /* Hexadecimal prefix */ + for (i = 2; (valid) && (i < len); i++) + { + printk("--tuple[%d]=[%c]----\r\n", i, tuple[i]); + if (tuple[i] >= '0' && tuple[i] <= '9') + { + number *= 16; + number += tuple[i] - '0'; + } + else if (tuple[i] >= 'a' && tuple[i] <= 'f') + { + number *= 16; + number += tuple[i] + 10 - 'a'; + } + else + { + break; + } + } + } + else + { + /* Decimal expected */ + for (i = 0; (valid) && (i < len); i++) + { + printk("--tuple[%d]=[%c]----\r\n", i, tuple[i]); + if (tuple[i] >= '0' && tuple[i] <= '9') + { + number *= 10; + number += tuple[i] - '0'; + } + else + { + break; + } + } + } + + if (valid) + { + printk("--tuple[%s] number [%u]-[0x%x]---\r\n", tuple, number, number); + *ci_plus_profile = number; + } +} + +static int dvb_ca_parse_ciinfo(struct dvb_ca_private *ca, char *tuple, int len) { + + /* check it contains the correct ciplus= string */ + char * ciplus_str = findstr((char *)tuple, len, "ciplus=", 7); + + if (ciplus_str == NULL) + printk("ciplus_str error line[%d]\r\n", __LINE__); + else + parseCiplusCompatibility(ciplus_str + 7, len - (ciplus_str + 7 - tuple), &dvb_ca_ciplus_enable); + + ciplus_str = findstr((char *)tuple, len, "ciprof=", 7); + if (ciplus_str == NULL) + printk("ciprof str error line[%d]\r\n", __LINE__); + else + parseCiprofCompatibility(ciplus_str + 7, len - (ciplus_str + 7 - tuple), &dvb_ca_ci_profire); + + return 0; +} + +/** + * dvb_ca_en50221_parse_attributes - Parse attribute memory of a CAM module, + * extracting Config register, and checking it is a DVB CAM module. + * + * @ca: CA instance. + * @slot: Slot id. + * + * @return 0 on success, <0 on failure. + */ +static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot) +{ + int address = 0; + int tupleLength; + int tupleType; + u8 tuple[257]; + char *dvb_str; + int rasz; + int status; + int got_cftableentry = 0; + int end_chain = 0; + int i; + u16 manfid = 0; + u16 devid = 0; + + + // CISTPL_DEVICE_0A + if ((status = + dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0) { + printk("read tuple error\r\n"); + return status; + } + if (tupleType != 0x1D) { + printk("read tupleType error\r\n"); + return -EINVAL; + } + + // CISTPL_DEVICE_0C + if ((status = + dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0) { + printk("read tupleType error line[%d]\r\n", __LINE__); + return status; + } + + if (tupleType != 0x1C) { + printk("read tupleType error line[%d]\r\n", __LINE__); + return -EINVAL; + } + + + + // CISTPL_VERS_1 + if ((status = + dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0){ + printk("dvb ci error line[%d]\r\n", __LINE__); + return status; + } + + if (tupleType != 0x15) { + printk("dvb ci error line[%d]\r\n", __LINE__); + return -EINVAL; + } + //parse cipls and prof info + printk("dvb ci parse ci line[%d]\r\n", __LINE__); + dvb_ca_parse_ciinfo(ca, (char *)tuple, tupleLength); + + // CISTPL_MANFID + if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, + &tupleLength, tuple)) < 0) { + printk("dvb ci error line[%d]\r\n", __LINE__); + return status; + } + + if (tupleType != 0x20){ + printk("dvb ci error line[%d]\r\n", __LINE__); + return -EINVAL; + } + + if (tupleLength != 4) { + printk("dvb ci error line[%d]\r\n", __LINE__); + return -EINVAL; + } + + manfid = (tuple[1] << 8) | tuple[0]; + devid = (tuple[3] << 8) | tuple[2]; + + + + // CISTPL_CONFIG + if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, + &tupleLength, tuple)) < 0) + { + printk("dvb ci error line[%d]\r\n", __LINE__); + return status; + } + + if (tupleType != 0x1A) { + printk("dvb ci error line[%d]\r\n", __LINE__); + return -EINVAL; + } + if (tupleLength < 3) { + printk("dvb ci error line[%d]\r\n", __LINE__); + return -EINVAL; + } + + /* extract the configbase */ + rasz = tuple[0] & 3; + if (tupleLength < (3 + rasz + 14)) + { + printk("dvb ci error line[%d]\r\n", __LINE__); + return -EINVAL; + } + + ca->slot_info[slot].config_base = 0; + for (i = 0; i < rasz + 1; i++) { + ca->slot_info[slot].config_base |= (tuple[2 + i] << (8 * i)); + } + + /* check it contains the correct DVB string */ + dvb_str = findstr((char *)tuple, tupleLength, "DVB_CI_V", 8); + if (dvb_str == NULL){ + printk("dvb ci error 5 line[%d]\r\n", __LINE__); + return -EINVAL; + } + + if (tupleLength < ((dvb_str - (char *) tuple) + 12)) + { + printk("dvb ci error line[%d]\r\n", __LINE__); + return -EINVAL; + } + + + /* is it a version we support? */ + if (strncmp(dvb_str + 8, "1.00", 4)) { + printk("dvb_ca adapter %d: Unsupported DVB CAM module version %c%c%c%c\n", + ca->dvbdev->adapter->num, dvb_str[8], dvb_str[9], dvb_str[10], dvb_str[11]); + return -EINVAL; + } + + /* process the CFTABLE_ENTRY tuples, and any after those */ + while ((!end_chain) && (address < 0x1000)) { + if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, + &tupleLength, tuple)) < 0) + { + printk("dvb ci error line[%d]\r\n", __LINE__); + return status; + } + + switch (tupleType) { + case 0x1B: // CISTPL_CFTABLE_ENTRY + if (tupleLength < (2 + 11 + 17)) { + printk("dvb ci error 0x1b error line[%d]\r\n", __LINE__); + break; + } + + /* if we've already parsed one, just use it */ + if (got_cftableentry) + break; + + /* get the config option */ + ca->slot_info[slot].config_option = tuple[0] & 0x3f; + + /* OK, check it contains the correct strings */ + if ((findstr((char *)tuple, tupleLength, "DVB_HOST", 8) == NULL) || + (findstr((char *)tuple, tupleLength, "DVB_CI_MODULE", 13) == NULL)) + break; + + got_cftableentry = 1; + break; + + case 0x14: // CISTPL_NO_LINK + printk("dvb ci no link"); + break; + + case 0xFF: // CISTPL_END + end_chain = 1; + break; + + default: /* Unknown tuple type - just skip this tuple and move to the next one */ + printk("dvb_ca: Skipping unknown tuple type:0x%x length:0x%x\n", tupleType, + tupleLength); + break; + } + } + + if ((address > 0x1000) || (!got_cftableentry)) + { + printk("dvb ci error line[%d]\r\n", __LINE__); + return -EINVAL; + } + + dprintk("Valid DVB CAM detected MANID:%x DEVID:%x CONFIGBASE:0x%x CONFIGOPTION:0x%x\n", + manfid, devid, ca->slot_info[slot].config_base, ca->slot_info[slot].config_option); + + // success! + return 0; +} + + +/** + * dvb_ca_en50221_set_configoption - Set CAM's configoption correctly. + * + * @ca: CA instance. + * @slot: Slot containing the CAM. + */ +static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot) +{ + int configoption; + + dprintk("%s\n", __func__); + + /* set the config option */ + ca->pub->write_attribute_mem(ca->pub, slot, + ca->slot_info[slot].config_base, + ca->slot_info[slot].config_option); + + /* check it */ + configoption = ca->pub->read_attribute_mem(ca->pub, slot, ca->slot_info[slot].config_base); + dprintk("Set configoption 0x%x, read configoption 0x%x\n", + ca->slot_info[slot].config_option, configoption & 0x3f); + + /* fine! */ + return 0; + +} + + +/** + * dvb_ca_en50221_read_data - This function talks to an EN50221 CAM control + * interface. It reads a buffer of data from the CAM. The data can either + * be stored in a supplied buffer, or automatically be added to the slot's + * rx_buffer. + * + * @ca: CA instance. + * @slot: Slot to read from. + * @ebuf: If non-NULL, the data will be written to this buffer. If NULL, + * the data will be added into the buffering system as a normal fragment. + * @ecount: Size of ebuf. Ignored if ebuf is NULL. + * + * @return Number of bytes read, or < 0 on error + */ +static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * ebuf, int ecount) +{ + int bytes_read; + int status; + u8 buf[HOST_LINK_BUF_SIZE]; + int i; + + dprintk("%s\n", __func__); + + /* check if we have space for a link buf in the rx_buffer */ + if (ebuf == NULL) { + int buf_free; + + if (ca->slot_info[slot].rx_buffer.data == NULL) { + status = -EIO; + goto exit; + } + buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer); + + if (buf_free < (ca->slot_info[slot].link_buf_size + DVB_RINGBUFFER_PKTHDRSIZE)) { + status = -EAGAIN; + goto exit; + } + } + + /* check if there is data available */ + if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) + goto exit; + if (!(status & STATUSREG_DA)) { + /* no data */ + status = 0; + goto exit; + } + + /* read the amount of data */ + if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH)) < 0) + goto exit; + bytes_read = status << 8; + if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW)) < 0) + goto exit; + bytes_read |= status; + + /* check it will fit */ + if (ebuf == NULL) { + if (bytes_read > ca->slot_info[slot].link_buf_size) { + printk("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n", + ca->dvbdev->adapter->num, bytes_read, ca->slot_info[slot].link_buf_size); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } + if (bytes_read < 2) { + printk("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n", + ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } + } else { + if (bytes_read > ecount) { + printk("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n", + ca->dvbdev->adapter->num); + status = -EIO; + goto exit; + } + } + + /* fill the buffer */ + for (i = 0; i < bytes_read; i++) { + /* read byte and check */ + if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_DATA)) < 0) + goto exit; + + /* OK, store it in the buffer */ + buf[i] = status; + } + + /* check for read error (RE should now be 0) */ + if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) + goto exit; + if (status & STATUSREG_RE) { + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } + + /* OK, add it to the receive buffer, or copy into external buffer if supplied */ + if (ebuf == NULL) { + if (ca->slot_info[slot].rx_buffer.data == NULL) { + status = -EIO; + goto exit; + } + dvb_ringbuffer_pkt_write(&ca->slot_info[slot].rx_buffer, buf, bytes_read); + } else { + memcpy(ebuf, buf, bytes_read); + } + + dprintk("Received CA packet for slot %i connection id 0x%x last_frag:%i size:0x%x\n", slot, + buf[0], (buf[1] & 0x80) == 0, bytes_read); +#ifndef READ_LPDU_PKT + /* wake up readers when a last_fragment is received */ + if ((buf[1] & 0x80) == 0x00) +#endif + wake_up_interruptible(&ca->wait_queue); + status = bytes_read; + +exit: + return status; +} + + +/** + * dvb_ca_en50221_write_data - This function talks to an EN50221 CAM control + * interface. It writes a buffer of data to a CAM. + * + * @ca: CA instance. + * @slot: Slot to write to. + * @ebuf: The data in this buffer is treated as a complete link-level packet to + * be written. + * @count: Size of ebuf. + * + * @return Number of bytes written, or < 0 on error. + */ +static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * buf, int bytes_write) +{ + int status; + int i; + + dprintk("%s\n", __func__); + + + /* sanity check */ + if (bytes_write > ca->slot_info[slot].link_buf_size) + return -EINVAL; + + /* it is possible we are dealing with a single buffer implementation, + thus if there is data available for read or if there is even a read + already in progress, we do nothing but awake the kernel thread to + process the data if necessary. */ + if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) + goto exitnowrite; + if (status & (STATUSREG_DA | STATUSREG_RE)) { + if (status & STATUSREG_DA) + dvb_ca_en50221_thread_wakeup(ca); + + status = -EAGAIN; + goto exitnowrite; + } + + /* OK, set HC bit */ + if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, + IRQEN | CMDREG_HC)) != 0) + goto exit; + + /* check if interface is still free */ + if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) + goto exit; + if (!(status & STATUSREG_FR)) { + /* it wasn't free => try again later */ + status = -EAGAIN; + goto exit; + } + + /* + * It may need some time for the CAM to settle down, or there might + * be a race condition between the CAM, writing HC and our last + * check for DA. This happens, if the CAM asserts DA, just after + * checking DA before we are setting HC. In this case it might be + * a bug in the CAM to keep the FR bit, the lower layer/HW + * communication requires a longer timeout or the CAM needs more + * time internally. But this happens in reality! + * We need to read the status from the HW again and do the same + * we did for the previous check for DA + */ + status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); + if (status < 0) + goto exit; + + if (status & (STATUSREG_DA | STATUSREG_RE)) { + if (status & STATUSREG_DA) + dvb_ca_en50221_thread_wakeup(ca); + + status = -EAGAIN; + goto exit; + } + + /* send the amount of data */ + if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, bytes_write >> 8)) != 0) + goto exit; + if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW, + bytes_write & 0xff)) != 0) + goto exit; + + /* send the buffer */ + for (i = 0; i < bytes_write; i++) { + if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_DATA, buf[i])) != 0) + goto exit; + } + + /* check for write error (WE should now be 0) */ + if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) + goto exit; + if (status & STATUSREG_WE) { + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } + status = bytes_write; + + dprintk("Wrote CA packet for slot %i, connection id 0x%x last_frag:%i size:0x%x\n", slot, + buf[0], (buf[1] & 0x80) == 0, bytes_write); + +exit: + ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN); + +exitnowrite: + return status; +} +EXPORT_SYMBOL(dvb_ca_en50221_cimcu_camchange_irq); + + + +/* ******************************************************************************** */ +/* EN50221 higher level functions */ + + +/** + * dvb_ca_en50221_camready_irq - A CAM has been removed => shut it down. + * + * @ca: CA instance. + * @slot: Slot to shut down. + */ +static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot) +{ + dprintk("%s\n", __func__); + + ca->pub->slot_shutdown(ca->pub, slot); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; + + /* need to wake up all processes to check if they're now + trying to write to a defunct CAM */ + wake_up_interruptible(&ca->wait_queue); + + dprintk("Slot %i shutdown\n", slot); + + /* success */ + return 0; +} +EXPORT_SYMBOL(dvb_ca_en50221_cimcu_camready_irq); + + +/** + * dvb_ca_en50221_camready_irq - A CAMCHANGE IRQ has occurred. + * + * @ca: CA instance. + * @slot: Slot concerned. + * @change_type: One of the DVB_CA_CAMCHANGE_* values. + */ +void dvb_ca_en50221_cimcu_camchange_irq(struct dvb_ca_en50221_cimcu *pubca, int slot, int change_type) +{ + struct dvb_ca_private *ca = pubca->private; + + dprintk("CAMCHANGE IRQ slot:%i change_type:%i\n", slot, change_type); + + switch (change_type) { + case DVB_CA_EN50221_CAMCHANGE_REMOVED: + case DVB_CA_EN50221_CAMCHANGE_INSERTED: + break; + + default: + return; + } + + ca->slot_info[slot].camchange_type = change_type; + atomic_inc(&ca->slot_info[slot].camchange_count); + dvb_ca_en50221_thread_wakeup(ca); +} +EXPORT_SYMBOL(dvb_ca_en50221_cimcu_frda_irq); + + +/** + * dvb_ca_en50221_cimcu_camready_irq - A CAMREADY IRQ has occurred. + * + * @ca: CA instance. + * @slot: Slot concerned. + */ +void dvb_ca_en50221_cimcu_camready_irq(struct dvb_ca_en50221_cimcu *pubca, int slot) +{ + struct dvb_ca_private *ca = pubca->private; + + dprintk("CAMREADY IRQ slot:%i\n", slot); + + if (ca->slot_info[slot].slot_state == DVB_CA_SLOTSTATE_WAITREADY) { + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_VALIDATE; + dvb_ca_en50221_thread_wakeup(ca); + } +} + + +/** + * An FR or DA IRQ has occurred. + * + * @ca: CA instance. + * @slot: Slot concerned. + */ +void dvb_ca_en50221_cimcu_frda_irq(struct dvb_ca_en50221_cimcu *pubca, int slot) +{ + struct dvb_ca_private *ca = pubca->private; + int flags; + + dprintk("FR/DA IRQ slot:%i\n", slot); + + switch (ca->slot_info[slot].slot_state) { + case DVB_CA_SLOTSTATE_LINKINIT: + flags = ca->pub->read_cam_control(pubca, slot, CTRLIF_STATUS); + if (flags & STATUSREG_DA) { + dprintk("CAM supports DA IRQ\n"); + ca->slot_info[slot].da_irq_supported = 1; + } + break; + + case DVB_CA_SLOTSTATE_RUNNING: + if (ca->open) + dvb_ca_en50221_thread_wakeup(ca); + break; + } +} + + + +/* ******************************************************************************** */ +/* EN50221 thread functions */ + +/** + * Wake up the DVB CA thread + * + * @ca: CA instance. + */ +static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca) +{ + + dprintk("%s\n", __func__); + + ca->wakeup = 1; + mb(); + wake_up_process(ca->thread); +} + +/** + * Update the delay used by the thread. + * + * @ca: CA instance. + */ +static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca) +{ + int delay; + int curdelay = 100000000; + int slot; + + /* Beware of too high polling frequency, because one polling + * call might take several hundred milliseconds until timeout! + */ + for (slot = 0; slot < ca->slot_count; slot++) { + switch (ca->slot_info[slot].slot_state) { + default: + case DVB_CA_SLOTSTATE_NONE: + delay = HZ * 60; /* 60s */ + if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) + delay = HZ * 5; /* 5s */ + break; + case DVB_CA_SLOTSTATE_INVALID: + delay = HZ * 60; /* 60s */ + if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) + delay = HZ / 10; /* 100ms */ + break; + + case DVB_CA_SLOTSTATE_UNINITIALISED: + case DVB_CA_SLOTSTATE_WAITREADY: + case DVB_CA_SLOTSTATE_VALIDATE: + case DVB_CA_SLOTSTATE_WAITFR: + case DVB_CA_SLOTSTATE_LINKINIT: + delay = HZ / 10; /* 100ms */ + break; + + case DVB_CA_SLOTSTATE_RUNNING: + delay = HZ * 60; /* 60s */ + if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) + delay = HZ / 100; /* 100ms */ + if (ca->open) { + if ((!ca->slot_info[slot].da_irq_supported) || + (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_DA))) + delay = HZ / 100; /* 100ms */ + } + break; + } + + if (delay < curdelay) + curdelay = delay; + } + + ca->delay = curdelay; +} + + + +/** + * Kernel thread which monitors CA slots for CAM changes, and performs data transfers. + */ +static int dvb_ca_en50221_thread(void *data) +{ + struct dvb_ca_private *ca = data; + int slot; + int flags; + int status; + int pktcount; + void *rxbuf; + + dprintk("%s\n", __func__); + + /* choose the correct initial delay */ + dvb_ca_en50221_thread_update_delay(ca); + + /* main loop */ + while (!kthread_should_stop()) { + /* sleep for a bit */ + if (!ca->wakeup) { + if (ca->slot_count > 0 + && ca->slot_info[0].slot_state == DVB_CA_SLOTSTATE_RUNNING + && ca->pub->get_slot_wakeup(ca->pub, 0) == 0) { + usleep_range(dvb_ca_en50221_usleep, dvb_ca_en50221_usleep + 100); + } else { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(ca->delay); + } + if (kthread_should_stop()) + return 0; + } else { + if (ca->pub->get_slot_wakeup(ca->pub, 0) == 0) + usleep_range(dvb_ca_en50221_usleep, dvb_ca_en50221_usleep + 100); + } + ca->wakeup = 0; + + /* go through all the slots processing them */ + for (slot = 0; slot < ca->slot_count; slot++) { + + mutex_lock(&ca->slot_info[slot].slot_lock); + + // check the cam status + deal with CAMCHANGEs + while (dvb_ca_en50221_check_camstatus(ca, slot)) { + /* clear down an old CI slot if necessary */ + if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE) + dvb_ca_en50221_slot_shutdown(ca, slot); + + /* if a CAM is NOW present, initialise it */ + if (ca->slot_info[slot].camchange_type == DVB_CA_EN50221_CAMCHANGE_INSERTED) { + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_UNINITIALISED; + } + + /* we've handled one CAMCHANGE */ + dvb_ca_en50221_thread_update_delay(ca); + atomic_dec(&ca->slot_info[slot].camchange_count); + } + + // CAM state machine + switch (ca->slot_info[slot].slot_state) { + case DVB_CA_SLOTSTATE_NONE: + case DVB_CA_SLOTSTATE_INVALID: + // no action needed + break; + + case DVB_CA_SLOTSTATE_UNINITIALISED: + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_WAITREADY; + ca->pub->slot_reset(ca->pub, slot); + ca->slot_info[slot].timeout = jiffies + (INIT_TIMEOUT_SECS * HZ); + break; + + case DVB_CA_SLOTSTATE_WAITREADY: + if (time_after(jiffies, ca->slot_info[slot].timeout)) { + printk("dvb_ca adaptor %d: PC card did not respond :(\n", + ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + // no other action needed; will automatically change state when ready + break; + + case DVB_CA_SLOTSTATE_VALIDATE: + if (dvb_ca_en50221_parse_attributes(ca, slot) != 0) { + /* we need this extra check for annoying interfaces like the budget-av */ + if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) && + (ca->pub->poll_slot_status)) { + status = ca->pub->poll_slot_status(ca->pub, slot, 0); + if (!(status & DVB_CA_EN50221_POLL_CAM_PRESENT)) { + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + } + + printk("dvb_ca adapter %d: Invalid PC card inserted :(\n", + ca->dvbdev->adapter->num); + //ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_UNINITIALISED; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + if (dvb_ca_en50221_set_configoption(ca, slot) != 0) { + printk("dvb_ca adapter %d: Unable to initialise CAM :(\n", + ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + if (ca->pub->write_cam_control(ca->pub, slot, + CTRLIF_COMMAND, CMDREG_RS) != 0) { + printk("dvb_ca adapter %d: Unable to reset CAM IF\n", + ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + dprintk("DVB CAM validated successfully\n"); + + ca->slot_info[slot].timeout = jiffies + (INIT_TIMEOUT_SECS * HZ); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_WAITFR; + ca->wakeup = 1; + break; + + case DVB_CA_SLOTSTATE_WAITFR: + if (time_after(jiffies, ca->slot_info[slot].timeout)) { + printk("dvb_ca adapter %d: DVB CAM did not respond :(\n", + ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + + flags = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); + if (flags & STATUSREG_FR) { + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; + ca->wakeup = 1; + } + break; + + case DVB_CA_SLOTSTATE_LINKINIT: + if (dvb_ca_en50221_link_init(ca, slot) != 0) { + /* we need this extra check for annoying interfaces like the budget-av */ + if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) && + (ca->pub->poll_slot_status)) { + status = ca->pub->poll_slot_status(ca->pub, slot, 0); + if (!(status & DVB_CA_EN50221_POLL_CAM_PRESENT)) { + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + } + + printk("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n", ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + + if (ca->slot_info[slot].rx_buffer.data == NULL) { + rxbuf = vmalloc(RX_BUFFER_SIZE); + if (rxbuf == NULL) { + printk("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n", ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; + dvb_ca_en50221_thread_update_delay(ca); + break; + } + dvb_ringbuffer_init(&ca->slot_info[slot].rx_buffer, rxbuf, RX_BUFFER_SIZE); + } + + ca->pub->slot_ts_enable(ca->pub, slot); + ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_RUNNING; + dvb_ca_en50221_thread_update_delay(ca); + printk("dvb_ca adapter %d: DVB CAM detected and initialised successfully\n", ca->dvbdev->adapter->num); + break; + + case DVB_CA_SLOTSTATE_RUNNING: + if (!ca->open) + break; + + // poll slots for data + pktcount = 0; + while ((status = dvb_ca_en50221_read_data(ca, slot, NULL, 0)) > 0) { + if (!ca->open) + break; + + /* if a CAMCHANGE occurred at some point, do not do any more processing of this slot */ + if (dvb_ca_en50221_check_camstatus(ca, slot)) { + // we dont want to sleep on the next iteration so we can handle the cam change + ca->wakeup = 1; + break; + } + + /* check if we've hit our limit this time */ + if (++pktcount >= MAX_RX_PACKETS_PER_ITERATION) { + // dont sleep; there is likely to be more data to read + ca->wakeup = 1; + break; + } + } + break; + } + + mutex_unlock(&ca->slot_info[slot].slot_lock); + } + } + + return 0; +} + + +int cam_state = 0; +/* ******************************************************************************** */ +/* EN50221 IO interface functions */ + +/** + * Real ioctl implementation. + * NOTE: CA_SEND_MSG/CA_GET_MSG ioctls have userspace buffers passed to them. + * + * @inode: Inode concerned. + * @file: File concerned. + * @cmd: IOCTL command. + * @arg: Associated argument. + * + * @return 0 on success, <0 on error. + */ +static int dvb_ca_en50221_io_do_ioctl(struct file *file, + unsigned int cmd, void *parg) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_ca_private *ca = dvbdev->priv; + int err = 0; + int slot; + u8 info = 0x80; + + if (mutex_lock_interruptible(&ca->ioctl_mutex)) { + printk("ci lock interrupt error\r\n"); + return -ERESTARTSYS; + } + switch (cmd) { + case CA_RESET: { + if (copy_from_user(&info, (void __user *)parg, 1)) + printk("ci reset-cp error--\r\n"); + if (info >> 7 == 1 ) { + for (slot = 0; slot < ca->slot_count; slot++) { + mutex_lock(&ca->slot_info[slot].slot_lock); + if (ca->pub->write_cam_control(ca->pub, slot, + CTRLIF_COMMAND, CMDREG_RS) != 0) { + printk("dvb_ca adapter %d: Unable to reset CAM IF\n", + ca->dvbdev->adapter->num); + } + mutex_unlock(&ca->slot_info[slot].slot_lock); + } + } else { + for (slot = 0; slot < ca->slot_count; slot++) { + mutex_lock(&ca->slot_info[slot].slot_lock); + if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE) { + //if reset camcard,need send all state to app + cam_state = 0; + dvb_ca_en50221_slot_shutdown(ca, slot); + if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE) + dvb_ca_en50221_cimcu_camchange_irq(ca->pub, + slot, + DVB_CA_EN50221_CAMCHANGE_INSERTED); + } + mutex_unlock(&ca->slot_info[slot].slot_lock); + } + } + ca->next_read_slot = 0; + dvb_ca_en50221_thread_wakeup(ca); + break; + } + case CA_GET_CAP: { + struct ca_caps *caps = parg; + + caps->slot_num = ca->slot_count; + caps->slot_type = CA_CI_LINK; + caps->descr_num = 0; + caps->descr_type = 0; + break; + } + + case CA_GET_SLOT_INFO: { + struct ca_slot_info *info = parg; + + if ((info->num > ca->slot_count) || (info->num < 0)) { + err = -EINVAL; + printk("info->num==%d\r\n",info->num); + goto out_unlock; + } + + info->type = CA_CI_LINK; + info->flags = 0; + if (cam_state != 0 && (ca->slot_info[info->num].slot_state != DVB_CA_SLOTSTATE_NONE) + && (ca->slot_info[info->num].slot_state != DVB_CA_SLOTSTATE_INVALID)) { + info->flags = CA_CI_MODULE_PRESENT; + } + + if (cam_state != 0 && cam_state != 1 && ca->slot_info[info->num].slot_state == DVB_CA_SLOTSTATE_RUNNING) { + info->flags |= CA_CI_MODULE_READY; + } + if (cam_state == 1 && ca->slot_info[info->num].slot_state == DVB_CA_SLOTSTATE_RUNNING) { + info->flags = CA_CI_MODULE_PRESENT; + cam_state++; + printk("info->flags==%d\r\n",info->flags); + } + if (cam_state == 0) { + cam_state++; + printk("info->flags==%d\r\n",info->flags); + } + break; + } + + default: + err = -EINVAL; + break; + } + +out_unlock: + mutex_unlock(&ca->ioctl_mutex); + return err; +} +static int dvb_usercopy__(struct file *file, + unsigned int cmd, unsigned long arg, + int (*func)(struct file *file, + unsigned int cmd, void *arg)) +{ + char sbuf[128]; + void *mbuf = NULL; + void *parg = NULL; + int err = -EINVAL; + + /* Copy arguments into temp kernel buffer */ + switch (_IOC_DIR(cmd)) { + case _IOC_NONE: + /* + * For this command, the pointer is actually an integer + * argument. + */ + parg = (void *) arg; + break; + case _IOC_READ: /* some v4l ioctls are marked wrong ... */ + case _IOC_WRITE: + case (_IOC_WRITE | _IOC_READ): + if (_IOC_SIZE(cmd) <= sizeof(sbuf)) { + parg = sbuf; + } else { + /* too big to allocate from stack */ + mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); + if (NULL == mbuf) + return -ENOMEM; + parg = mbuf; + } + + err = -EFAULT; + if (copy_from_user(parg, (void __user *)arg, _IOC_SIZE(cmd))) + goto out; + break; + } + + /* call driver */ + if ((err = func(file, cmd, parg)) == -ENOIOCTLCMD) + err = -ENOTTY; + + if (err < 0) + goto out; + + /* Copy results into user buffer */ + switch (_IOC_DIR(cmd)) + { + case _IOC_READ: + case (_IOC_WRITE | _IOC_READ): + if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd))) + err = -EFAULT; + break; + } + +out: + kfree(mbuf); + return err; +} + +/** + * Wrapper for ioctl implementation. + * + * @inode: Inode concerned. + * @file: File concerned. + * @cmd: IOCTL command. + * @arg: Associated argument. + * + * @return 0 on success, <0 on error. + */ +static long dvb_ca_en50221_io_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + return dvb_usercopy__(file, cmd, arg, dvb_ca_en50221_io_do_ioctl); +} + + +/** + * Implementation of write() syscall. + * + * @file: File structure. + * @buf: Source buffer. + * @count: Size of source buffer. + * @ppos: Position in file (ignored). + * + * @return Number of bytes read, or <0 on error. + */ +static ssize_t dvb_ca_en50221_io_write(struct file *file, + const char __user * buf, size_t count, loff_t * ppos) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_ca_private *ca = dvbdev->priv; + u8 slot, connection_id; + int status; + u8 fragbuf[HOST_LINK_BUF_SIZE]; + int fragpos = 0; + int fraglen; + unsigned long timeout; + int written; + + dprintk("%s\n", __func__); + + /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */ + if (count < 2) + return -EINVAL; + + /* extract slot & connection id */ + if (copy_from_user(&slot, buf, 1)) + return -EFAULT; + if (copy_from_user(&connection_id, buf + 1, 1)) + return -EFAULT; + buf += 2; + count -= 2; + + /* check if the slot is actually running */ + if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) + return -EINVAL; + + /* fragment the packets & store in the buffer */ + while (fragpos < count) { + fraglen = ca->slot_info[slot].link_buf_size - 2; + if (fraglen < 0) + break; + if (fraglen > HOST_LINK_BUF_SIZE - 2) + fraglen = HOST_LINK_BUF_SIZE - 2; + if ((count - fragpos) < fraglen) + fraglen = count - fragpos; + + fragbuf[0] = connection_id; + fragbuf[1] = ((fragpos + fraglen) < count) ? 0x80 : 0x00; + status = copy_from_user(fragbuf + 2, buf + fragpos, fraglen); + if (status) { + status = -EFAULT; + goto exit; + } + + timeout = jiffies + HZ / 2; + written = 0; + while (!time_after(jiffies, timeout)) { + /* check the CAM hasn't been removed/reset in the meantime */ + if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) { + status = -EIO; + goto exit; + } + + mutex_lock(&ca->slot_info[slot].slot_lock); + status = dvb_ca_en50221_write_data(ca, slot, fragbuf, fraglen + 2); + mutex_unlock(&ca->slot_info[slot].slot_lock); + if (status == (fraglen + 2)) { + written = 1; + break; + } + if (status != -EAGAIN) + goto exit; + + //msleep(1); + usleep_range(dvb_ca_en50221_usleep, dvb_ca_en50221_usleep + 100); + } + if (!written) { + status = -EIO; + goto exit; + } + + fragpos += fraglen; + } + status = count + 2; + +exit: + return status; +} + + +/** + * Condition for waking up in dvb_ca_en50221_io_read_condition + */ +static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca, + int *result, int *_slot) +{ + int slot; + int slot_count = 0; + int idx; + size_t fraglen; + int connection_id = -1; + int found = 0; + u8 hdr[2]; + + slot = ca->next_read_slot; + while ((slot_count < ca->slot_count) && (!found)) { + if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) + goto nextslot; + + if (ca->slot_info[slot].rx_buffer.data == NULL) + return 0; +#ifdef READ_LPDU_PKT + if (ca->slot_info[slot].rx_offset != 0) { + idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, -1, &fraglen); + if (idx == -1) { + //printk("no data to read----[%d]-\r\n", ca->slot_info[slot].rx_offset); + return 0; + } + *_slot = slot; + return 1; + } +#endif + idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, -1, &fraglen); + while (idx != -1) { + dvb_ringbuffer_pkt_read(&ca->slot_info[slot].rx_buffer, idx, 0, hdr, 2); + if (connection_id == -1) + connection_id = hdr[0]; + if ((hdr[0] == connection_id) +#ifndef READ_LPDU_PKT + && ((hdr[1] & 0x80) == 0) +#endif + ) { + *_slot = slot; + found = 1; + break; + } + + idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, idx, &fraglen); + } + +nextslot: + slot = (slot + 1) % ca->slot_count; + slot_count++; + } + + ca->next_read_slot = slot; + return found; +} + + +/** + * Implementation of read() syscall. + * + * @file: File structure. + * @buf: Destination buffer. + * @count: Size of destination buffer. + * @ppos: Position in file (ignored). + * + * @return Number of bytes read, or <0 on error. + */ +static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user * buf, + size_t count, loff_t * ppos) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_ca_private *ca = dvbdev->priv; + int status; + int result = 0; + u8 hdr[2]; + int slot; + int connection_id = -1; + size_t idx, idx2; + int last_fragment = 0; + size_t fraglen; + int pktlen; + int dispose = 0; + +#ifdef READ_LPDU_PKT + int offset; + u8 flag = 0; +#endif + dprintk("%s\n", __func__); + + /* Outgoing packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */ + if (count < 2) + return -EINVAL; + + /* wait for some data */ + if ((status = dvb_ca_en50221_io_read_condition(ca, &result, &slot)) == 0) { + + /* if we're in nonblocking mode, exit immediately */ + if (file->f_flags & O_NONBLOCK) + return -EWOULDBLOCK; + + /* wait for some data */ + status = wait_event_interruptible(ca->wait_queue, + dvb_ca_en50221_io_read_condition + (ca, &result, &slot)); + } + if ((status < 0) || (result < 0)) { + if (result) + return result; + return status; + } + + idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, -1, &fraglen); + pktlen = 2; + do { + if (idx == -1) { + printk("dvb_ca adapter %d: BUG: read packet ended before last_fragment encountered\n", ca->dvbdev->adapter->num); + status = -EIO; + goto exit; + } +#ifdef READ_LPDU_PKT + offset = 2 + ca->slot_info[slot].rx_offset; +#endif + dvb_ringbuffer_pkt_read(&ca->slot_info[slot].rx_buffer, idx, 0, hdr, 2); + if (connection_id == -1) + connection_id = hdr[0]; +#ifdef READ_LPDU_PKT + flag = hdr[1]; + if (hdr[0] == connection_id) { + if (pktlen < count) { + if ((pktlen + fraglen - offset) > (count - 2)) { + fraglen = (count - 2) - pktlen; + ca->slot_info[slot].rx_offset += + fraglen; + /* more data for user, + but cannot send, + so force return to user, + rather than dispose of it */ + flag |= 0x80; + } else { + ca->slot_info[slot].rx_offset = 0; + fraglen -= offset; + dispose = 1; + } + + status = dvb_ringbuffer_pkt_read_user( + &ca->slot_info[slot].rx_buffer, + idx, + offset, + buf + pktlen + 2, + fraglen); + if (status < 0) + goto exit; + pktlen += fraglen; + } + + last_fragment = 1; + } +#else + if (hdr[0] == connection_id) { + if (pktlen < count) { + if ((pktlen + fraglen - 2) > count) { + fraglen = count - pktlen; + } else { + fraglen -= 2; + } + + if ((status = dvb_ringbuffer_pkt_read_user(&ca->slot_info[slot].rx_buffer, idx, 2, + buf + pktlen, fraglen)) < 0) { + goto exit; + } + pktlen += fraglen; + } + + if ((hdr[1] & 0x80) == 0) + last_fragment = 1; + dispose = 1; + } +#endif + idx2 = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, + idx, &fraglen); + if (dispose) + dvb_ringbuffer_pkt_dispose(&ca->slot_info[slot].rx_buffer, idx); + idx = idx2; + dispose = 0; + } while (!last_fragment); + + hdr[0] = slot; + hdr[1] = connection_id; + status = copy_to_user(buf, hdr, 2); + if (status) { + status = -EFAULT; + goto exit; + } + status = pktlen; + +#ifdef READ_LPDU_PKT + hdr[0] = flag; + hdr[1] = 0; + + status = copy_to_user(buf + 2, hdr, 2); + if (status) { + status = -EFAULT; + goto exit; + } + status = pktlen + 2; +#endif +exit: + return status; +} + + +/** + * Implementation of file open syscall. + * + * @inode: Inode concerned. + * @file: File concerned. + * + * @return 0 on success, <0 on failure. + */ +static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_ca_private *ca = dvbdev->priv; + int err; + int i; + + printk("%s\n", __func__); + + if (!try_module_get(ca->pub->owner)) + return -EIO; + + err = dvb_generic_open(inode, file); + if (err < 0) { + module_put(ca->pub->owner); + return err; + } + + for (i = 0; i < ca->slot_count; i++) { + + if (ca->slot_info[i].slot_state == DVB_CA_SLOTSTATE_RUNNING) { + if (ca->slot_info[i].rx_buffer.data != NULL) { + /* it is safe to call this here without locks because + * ca->open == 0. Data is not read in this case */ + dvb_ringbuffer_flush(&ca->slot_info[i].rx_buffer); + } + } + } + + ca->open = 1; + dvb_ca_en50221_thread_update_delay(ca); + dvb_ca_en50221_thread_wakeup(ca); + + dvb_ca_private_get(ca); + + return 0; +} + + +/** + * Implementation of file close syscall. + * + * @inode: Inode concerned. + * @file: File concerned. + * + * @return 0 on success, <0 on failure. + */ +static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_ca_private *ca = dvbdev->priv; + int err; + + dprintk("%s\n", __func__); + + /* mark the CA device as closed */ + ca->open = 0; + dvb_ca_en50221_thread_update_delay(ca); + + err = dvb_generic_release(inode, file); + + module_put(ca->pub->owner); + + dvb_ca_private_put(ca); + + return err; +} + + +/** + * Implementation of poll() syscall. + * + * @file: File concerned. + * @wait: poll wait table. + * + * @return Standard poll mask. + */ +static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table * wait) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_ca_private *ca = dvbdev->priv; + unsigned int mask = 0; + int slot; + int result = 0; + + dprintk("%s\n", __func__); + + if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) { + mask |= POLLIN; + } + + /* if there is something, return now */ + if (mask) + return mask; + + /* wait for something to happen */ + poll_wait(file, &ca->wait_queue, wait); + + if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) { + mask |= POLLIN; + } + + return mask; +} +EXPORT_SYMBOL(dvb_ca_en50221_cimcu_init); + +#ifdef CONFIG_AMLOGIC_DVB_COMPAT +static long dvb_ca_en50221_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long args) +{ + unsigned long ret; +#ifdef CONFIG_COMPAT + args = (unsigned long)compat_ptr(args); +#endif + ret = dvb_ca_en50221_io_ioctl(filp, cmd, args); + return ret; +} +#endif + +static const struct file_operations dvb_ca_fops = { + .owner = THIS_MODULE, + .read = dvb_ca_en50221_io_read, + .write = dvb_ca_en50221_io_write, + .unlocked_ioctl = dvb_ca_en50221_io_ioctl, + .open = dvb_ca_en50221_io_open, + .release = dvb_ca_en50221_io_release, + .poll = dvb_ca_en50221_io_poll, + .llseek = noop_llseek, +#ifdef CONFIG_AMLOGIC_DVB_COMPAT + .compat_ioctl = dvb_ca_en50221_compat_ioctl, +#endif +}; + +static const struct dvb_device dvbdev_ca = { + .priv = NULL, + .users = 1, + .readers = 1, + .writers = 1, +#if defined(CONFIG_MEDIA_CONTROLLER_DVB) + .name = "dvb-ca-en50221", +#endif + .fops = &dvb_ca_fops, +}; + +/* ******************************************************************************** */ +/* Initialisation/shutdown functions */ + + +/** + * Initialise a new DVB CA EN50221 interface device. + * + * @dvb_adapter: DVB adapter to attach the new CA device to. + * @ca: The dvb_ca instance. + * @flags: Flags describing the CA device (DVB_CA_FLAG_*). + * @slot_count: Number of slots supported. + * + * @return 0 on success, nonzero on failure + */ +int dvb_ca_en50221_cimcu_init(struct dvb_adapter *dvb_adapter, + struct dvb_ca_en50221_cimcu *pubca, int flags, int slot_count) +{ + int ret; + struct dvb_ca_private *ca = NULL; + int i; + + dprintk("%s\n", __func__); + printk("---%s\n", __func__); + if (slot_count < 1) + return -EINVAL; + + /* initialise the system data */ + if ((ca = kzalloc(sizeof(struct dvb_ca_private), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto exit; + } + kref_init(&ca->refcount); + ca->pub = pubca; + ca->flags = flags; + ca->slot_count = slot_count; + if ((ca->slot_info = kcalloc(slot_count, sizeof(struct dvb_ca_slot), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto free_ca; + } + init_waitqueue_head(&ca->wait_queue); + ca->open = 0; + ca->wakeup = 0; + ca->next_read_slot = 0; + pubca->private = ca; + + /* register the DVB device */ + ret = dvb_register_device(dvb_adapter, &ca->dvbdev, &dvbdev_ca, ca, DVB_DEVICE_CA, 0); + if (ret) + goto free_slot_info; + + /* now initialise each slot */ + for (i = 0; i < slot_count; i++) { + memset(&ca->slot_info[i], 0, sizeof(struct dvb_ca_slot)); + ca->slot_info[i].slot_state = DVB_CA_SLOTSTATE_NONE; + atomic_set(&ca->slot_info[i].camchange_count, 0); + ca->slot_info[i].camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED; + mutex_init(&ca->slot_info[i].slot_lock); + } + + mutex_init(&ca->ioctl_mutex); + + if (signal_pending(current)) { + ret = -EINTR; + goto unregister_device; + } + mb(); + + /* create a kthread for monitoring this CA device */ + ca->thread = kthread_run(dvb_ca_en50221_thread, ca, "kdvb-ca-%i:%i", + ca->dvbdev->adapter->num, ca->dvbdev->id); + if (IS_ERR(ca->thread)) { + ret = PTR_ERR(ca->thread); + printk("dvb_ca_init: failed to start kernel_thread (%d)\n", + ret); + goto unregister_device; + } + return 0; + +unregister_device: + dvb_unregister_device(ca->dvbdev); +free_slot_info: + kfree(ca->slot_info); +free_ca: + kfree(ca); +exit: + pubca->private = NULL; + return ret; +} +EXPORT_SYMBOL(dvb_ca_en50221_cimcu_release); + + + +/** + * Release a DVB CA EN50221 interface device. + * + * @ca_dev: The dvb_device_t instance for the CA device. + * @ca: The associated dvb_ca instance. + */ +void dvb_ca_en50221_cimcu_release(struct dvb_ca_en50221_cimcu *pubca) +{ + struct dvb_ca_private *ca = pubca->private; + int i; + + dprintk("%s\n", __func__); + + /* shutdown the thread if there was one */ + kthread_stop(ca->thread); + + for (i = 0; i < ca->slot_count; i++) { + dvb_ca_en50221_slot_shutdown(ca, i); + } + dvb_ca_private_put(ca); + pubca->private = NULL; +}
diff --git a/drivers/stream_input/parser/dvb_ci/cimcu/dvb_ca_en50221_cimcu.h b/drivers/stream_input/parser/dvb_ci/cimcu/dvb_ca_en50221_cimcu.h new file mode 100644 index 0000000..3b14d17 --- /dev/null +++ b/drivers/stream_input/parser/dvb_ci/cimcu/dvb_ca_en50221_cimcu.h
@@ -0,0 +1,136 @@ +/* + * dvb_ca.h: generic DVB functions for EN50221 CA interfaces + * + * Copyright (C) 2004 Andrew de Quincey + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DVB_CA_EN50221_CIMCU_H_ +#define _DVB_CA_EN50221_CIMCU_H_ + +#include <linux/list.h> +#include <linux/dvb/ca.h> + +#include "dvbdev.h" + +#define DVB_CA_EN50221_POLL_CAM_PRESENT 1 +#define DVB_CA_EN50221_POLL_CAM_CHANGED 2 +#define DVB_CA_EN50221_POLL_CAM_READY 4 + +#define DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE 1 +#define DVB_CA_EN50221_FLAG_IRQ_FR 2 +#define DVB_CA_EN50221_FLAG_IRQ_DA 4 + +#define DVB_CA_EN50221_CAMCHANGE_REMOVED 0 +#define DVB_CA_EN50221_CAMCHANGE_INSERTED 1 + +/** + * struct dvb_ca_en50221- Structure describing a CA interface + * + * @owner: the module owning this structure + * @read_attribute_mem: function for reading attribute memory on the CAM + * @write_attribute_mem: function for writing attribute memory on the CAM + * @read_cam_control: function for reading the control interface on the CAM + * @write_cam_control: function for reading the control interface on the CAM + * @slot_reset: function to reset the CAM slot + * @slot_shutdown: function to shutdown a CAM slot + * @slot_ts_enable: function to enable the Transport Stream on a CAM slot + * @poll_slot_status: function to poll slot status. Only necessary if + * DVB_CA_FLAG_EN50221_IRQ_CAMCHANGE is not set. + * @data: private data, used by caller. + * @private: Opaque data used by the dvb_ca core. Do not modify! + * + * NOTE: the read_*, write_* and poll_slot_status functions will be + * called for different slots concurrently and need to use locks where + * and if appropriate. There will be no concurrent access to one slot. + */ +struct dvb_ca_en50221_cimcu { + struct module *owner; + + int (*read_attribute_mem)(struct dvb_ca_en50221_cimcu *ca, + int slot, int address); + int (*write_attribute_mem)(struct dvb_ca_en50221_cimcu *ca, + int slot, int address, u8 value); + + int (*read_cam_control)(struct dvb_ca_en50221_cimcu *ca, + int slot, u8 address); + int (*write_cam_control)(struct dvb_ca_en50221_cimcu *ca, + int slot, u8 address, u8 value); + + int (*slot_reset)(struct dvb_ca_en50221_cimcu *ca, int slot); + int (*slot_shutdown)(struct dvb_ca_en50221_cimcu *ca, int slot); + int (*slot_ts_enable)(struct dvb_ca_en50221_cimcu *ca, int slot); + + int (*poll_slot_status)(struct dvb_ca_en50221_cimcu *ca, int slot, int open); + int (*get_slot_wakeup)(struct dvb_ca_en50221_cimcu *ca, int slot); + + void *data; + + void *private; +}; + +/* + * Functions for reporting IRQ events + */ + +/** + * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred. + * + * @pubca: CA instance. + * @slot: Slot concerned. + * @change_type: One of the DVB_CA_CAMCHANGE_* values + */ +void dvb_ca_en50221_cimcu_camchange_irq(struct dvb_ca_en50221_cimcu *pubca, int slot, + int change_type); + +/** + * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred. + * + * @pubca: CA instance. + * @slot: Slot concerned. + */ +void dvb_ca_en50221_cimcu_camready_irq(struct dvb_ca_en50221_cimcu *pubca, int slot); + +/** + * dvb_ca_en50221_frda_irq - An FR or a DA IRQ has occurred. + * + * @ca: CA instance. + * @slot: Slot concerned. + */ +void dvb_ca_en50221_cimcu_frda_irq(struct dvb_ca_en50221_cimcu *ca, int slot); + +/* + * Initialisation/shutdown functions + */ + +/** + * dvb_ca_en50221_init - Initialise a new DVB CA device. + * + * @dvb_adapter: DVB adapter to attach the new CA device to. + * @ca: The dvb_ca instance. + * @flags: Flags describing the CA device (DVB_CA_EN50221_FLAG_*). + * @slot_count: Number of slots supported. + * + * @return 0 on success, nonzero on failure + */ +extern int dvb_ca_en50221_cimcu_init(struct dvb_adapter *dvb_adapter, + struct dvb_ca_en50221_cimcu *ca, int flags, + int slot_count); + +/** + * dvb_ca_en50221_release - Release a DVB CA device. + * + * @ca: The associated dvb_ca instance. + */ +extern void dvb_ca_en50221_cimcu_release(struct dvb_ca_en50221_cimcu *ca); + +#endif
diff --git a/drivers/stream_input/parser/dvb_common.c b/drivers/stream_input/parser/dvb_common.c new file mode 100644 index 0000000..ecea691 --- /dev/null +++ b/drivers/stream_input/parser/dvb_common.c
@@ -0,0 +1,17 @@ +/* + * ../hardware/amlogic/media_modules/drivers/stream_input/parser/dvb_common.c + * + * Copyright (C) 2017 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +
diff --git a/drivers/stream_input/parser/dvb_common.h b/drivers/stream_input/parser/dvb_common.h new file mode 100644 index 0000000..c4c597b --- /dev/null +++ b/drivers/stream_input/parser/dvb_common.h
@@ -0,0 +1,67 @@ +/* + * ../hardware/amlogic/media_modules/drivers/stream_input/parser/dvb_common.h + * + * Copyright (C) 2017 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __DVB_COMMON_H__ +#define __DVB_COMMON_H__ + +#include <linux/amlogic/aml_dvb_extern.h> + +#include "dvb_frontend.h" + +typedef enum dmx_source { + DMX_SOURCE_FRONT0 = 0, + DMX_SOURCE_FRONT1, + DMX_SOURCE_FRONT2, + DMX_SOURCE_FRONT3, + DMX_SOURCE_DVR0 = 16, + DMX_SOURCE_DVR1, + DMX_SOURCE_DVR2, + DMX_SOURCE_DVR3, + +#ifdef CONFIG_AMLOGIC_DVB_COMPAT + DMX_SOURCE_FRONT0_OFFSET = 100, + DMX_SOURCE_FRONT1_OFFSET, + DMX_SOURCE_FRONT2_OFFSET +#endif +} dmx_source_t; + +enum aml_dmx_id_t { + AM_DMX_0 = 0, + AM_DMX_1, + AM_DMX_2, + AM_DMX_MAX, +}; + +enum aml_ts_source_t { + AM_TS_SRC_TS0, + AM_TS_SRC_TS1, + AM_TS_SRC_TS2, + AM_TS_SRC_TS3, + + AM_TS_SRC_S_TS0, + AM_TS_SRC_S_TS1, + AM_TS_SRC_S_TS2, + AM_TS_SRC_S_TS3, + + AM_TS_SRC_HIU, + AM_TS_SRC_HIU1, + AM_TS_SRC_DMX0, + AM_TS_SRC_DMX1, + AM_TS_SRC_DMX2 +}; + +#endif /* __DVB_COMMON_H__ */
diff --git a/drivers/stream_input/parser/esparser.c b/drivers/stream_input/parser/esparser.c new file mode 100644 index 0000000..2675551 --- /dev/null +++ b/drivers/stream_input/parser/esparser.c
@@ -0,0 +1,1043 @@ +/* + * drivers/amlogic/media/stream_input/parser/esparser.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> + +#include <linux/uaccess.h> +#include <linux/atomic.h> + +/* #include <mach/am_regs.h> */ +#include <linux/delay.h> + +#include "../../frame_provider/decoder/utils/vdec.h" +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../amports/streambuf_reg.h" +#include "../amports/streambuf.h" +#include "esparser.h" +#include "../amports/amports_priv.h" +#include "../amports/thread_rw.h" + +#include <linux/amlogic/media/codec_mm/codec_mm.h> + + + +#define SAVE_SCR 0 + +#define ES_START_CODE_PATTERN 0x00000100 +#define ES_START_CODE_MASK 0xffffff00 +#define SEARCH_PATTERN_LEN 512 +#define ES_PARSER_POP READ_PARSER_REG(PFIFO_DATA) + +#define PARSER_WRITE (ES_WRITE | ES_PARSER_START) +#define PARSER_VIDEO (ES_TYPE_VIDEO) +#define PARSER_AUDIO (ES_TYPE_AUDIO) +#define PARSER_SUBPIC (ES_TYPE_SUBTITLE) +#define PARSER_PASSTHROUGH (ES_PASSTHROUGH | ES_PARSER_START) +#define PARSER_AUTOSEARCH (ES_SEARCH | ES_PARSER_START) +#define PARSER_DISCARD (ES_DISCARD | ES_PARSER_START) +#define PARSER_BUSY (ES_PARSER_BUSY) + +#define MAX_DRM_PACKAGE_SIZE 0x500000 + + +static unsigned char *search_pattern; +static dma_addr_t search_pattern_map; +static u32 audio_real_wp; +static u32 audio_buf_start; +static u32 audio_buf_end; + +static const char esparser_id[] = "esparser-id"; + +static DECLARE_WAIT_QUEUE_HEAD(wq); + + +static u32 search_done; +static u32 video_data_parsed; +static u32 audio_data_parsed; +static atomic_t esparser_use_count = ATOMIC_INIT(0); +static DEFINE_MUTEX(esparser_mutex); + +static inline u32 get_buf_wp(u32 type) +{ + if (type == BUF_TYPE_AUDIO) + return audio_real_wp; + else + return 0; +} +static inline u32 get_buf_start(u32 type) +{ + if (type == BUF_TYPE_AUDIO) + return audio_buf_start; + else + return 0; +} +static inline u32 get_buf_end(u32 type) +{ + if (type == BUF_TYPE_AUDIO) + return audio_buf_end; + else + return 0; +} +static void set_buf_wp(u32 type, u32 wp) +{ + if (type == BUF_TYPE_AUDIO) { + audio_real_wp = wp; + WRITE_AIU_REG(AIU_MEM_AIFIFO_MAN_WP, wp/* & 0xffffff00*/); + } + return; +} + +static irqreturn_t esparser_isr(int irq, void *dev_id) +{ + u32 int_status = READ_PARSER_REG(PARSER_INT_STATUS); + + WRITE_PARSER_REG(PARSER_INT_STATUS, int_status); + + if (int_status & PARSER_INTSTAT_SC_FOUND) { + WRITE_PARSER_REG(PFIFO_RD_PTR, 0); + WRITE_PARSER_REG(PFIFO_WR_PTR, 0); + search_done = 1; + wake_up_interruptible(&wq); + } + return IRQ_HANDLED; +} + +static inline u32 buf_wp(u32 type) +{ + u32 wp; + + if ((READ_PARSER_REG(PARSER_ES_CONTROL) & ES_VID_MAN_RD_PTR) == 0) { + wp = +#if 1/* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + (type == BUF_TYPE_HEVC) ? READ_VREG(HEVC_STREAM_WR_PTR) : +#endif + (type == BUF_TYPE_VIDEO) ? READ_VREG(VLD_MEM_VIFIFO_WP) : + (type == BUF_TYPE_AUDIO) ? + READ_AIU_REG(AIU_MEM_AIFIFO_MAN_WP) : + READ_PARSER_REG(PARSER_SUB_START_PTR); + } else { + wp = +#if 1/* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + (type == BUF_TYPE_HEVC) ? READ_PARSER_REG(PARSER_VIDEO_WP) : +#endif + (type == BUF_TYPE_VIDEO) ? READ_PARSER_REG(PARSER_VIDEO_WP) : + (type == BUF_TYPE_AUDIO) ? + READ_AIU_REG(AIU_MEM_AIFIFO_MAN_WP) : + READ_PARSER_REG(PARSER_SUB_START_PTR); + } + + return wp; +} + +static int esparser_stbuf_write(struct stream_buf_s *stbuf, const u8 *buf, u32 count) +{ + size_t r = count; + const char __user *p = buf; + + u32 len = 0; + u32 parser_type; + int ret; + u32 wp; + dma_addr_t dma_addr = 0; + u32 type = stbuf->type; + + VDEC_PRINT_FUN_LINENO(__func__, __LINE__); + if (type == BUF_TYPE_HEVC) + parser_type = PARSER_VIDEO; + else if (type == BUF_TYPE_VIDEO) + parser_type = PARSER_VIDEO; + else if (type == BUF_TYPE_AUDIO) + parser_type = PARSER_AUDIO; + else + parser_type = PARSER_SUBPIC; + + wp = buf_wp(type); + + if (r > 0) { + if (stbuf->is_phybuf) + len = count; + else { + len = min_t(size_t, r, (size_t) FETCHBUF_SIZE); + + if (copy_from_user(fetchbuf, p, len)) + return -EFAULT; + dma_addr = dma_map_single( + amports_get_dma_device(), fetchbuf, + FETCHBUF_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(amports_get_dma_device(), + (dma_addr_t) dma_addr)) + return -EFAULT; + + } + + /* wmb(); don't need */ + /* reset the Write and read pointer to zero again */ + WRITE_PARSER_REG(PFIFO_RD_PTR, 0); + WRITE_PARSER_REG(PFIFO_WR_PTR, 0); + + WRITE_PARSER_REG_BITS(PARSER_CONTROL, len, ES_PACK_SIZE_BIT, + ES_PACK_SIZE_WID); + WRITE_PARSER_REG_BITS(PARSER_CONTROL, + parser_type | PARSER_WRITE | + PARSER_AUTOSEARCH, ES_CTRL_BIT, + ES_CTRL_WID); + + if (stbuf->is_phybuf) { + u32 buf_32 = (unsigned long)buf & 0xffffffff; + WRITE_PARSER_REG(PARSER_FETCH_ADDR, buf_32); + } else { + WRITE_PARSER_REG(PARSER_FETCH_ADDR, dma_addr); + dma_unmap_single(amports_get_dma_device(), dma_addr, + FETCHBUF_SIZE, DMA_TO_DEVICE); + } + + search_done = 0; + if (!(stbuf->drm_flag & TYPE_PATTERN)) { + WRITE_PARSER_REG(PARSER_FETCH_CMD, + (7 << FETCH_ENDIAN) | len); + WRITE_PARSER_REG(PARSER_FETCH_ADDR, search_pattern_map); + WRITE_PARSER_REG(PARSER_FETCH_CMD, + (7 << FETCH_ENDIAN) | SEARCH_PATTERN_LEN); + } else { + WRITE_PARSER_REG(PARSER_FETCH_CMD, + (7 << FETCH_ENDIAN) | (len + 512)); + } + + ret = wait_event_interruptible_timeout(wq, search_done != 0, + HZ / 5); + if (ret == 0) { + WRITE_PARSER_REG(PARSER_FETCH_CMD, 0); + + if (wp == buf_wp(type)) { + /*no data fetched */ + return -EAGAIN; + } else { + pr_info("W Timeout, but fetch ok,"); + pr_info("type %d len=%d,wpdiff=%d, isphy %x\n", + type, len, wp - buf_wp(type), stbuf->is_phybuf); + } + } else if (ret < 0) + return -ERESTARTSYS; + } + + if ((type == BUF_TYPE_VIDEO) + || (has_hevc_vdec() && (type == BUF_TYPE_HEVC))) + video_data_parsed += len; + else if (type == BUF_TYPE_AUDIO) + audio_data_parsed += len; + + threadrw_update_buffer_level(stbuf, len); + VDEC_PRINT_FUN_LINENO(__func__, __LINE__); + + return len; +} + +static ssize_t _esparser_write(const char __user *buf, + size_t count, struct stream_buf_s *stbuf, int isphybuf) +{ + return esparser_stbuf_write(stbuf, buf, count); +} + +static ssize_t _esparser_write_s(const char __user *buf, + size_t count, struct stream_buf_s *stbuf) +{ + size_t r = count; + const char __user *p = buf; + u32 len = 0; + int ret; + u32 wp, buf_start, buf_end; + u32 type = stbuf->type; + void *vaddr = NULL; + + if (type != BUF_TYPE_AUDIO) + BUG(); + wp = get_buf_wp(type); + buf_end = get_buf_end(type) + 8; + buf_start = get_buf_start(type); + /*pr_info("write wp 0x%x, count %d, start 0x%x, end 0x%x\n", + * wp, (u32)count, buf_start, buf_end);*/ + if (wp + count > buf_end) { + if (wp == buf_end) { + wp = buf_start; + set_buf_wp(type, wp); + return -EAGAIN; + } + vaddr = codec_mm_phys_to_virt(wp); + ret = copy_from_user(vaddr, p, buf_end - wp); + if (ret > 0) { + len += buf_end - wp - ret; + codec_mm_dma_flush(vaddr, len, DMA_TO_DEVICE); + wp += len; + pr_info("copy from user not finished\n"); + set_buf_wp(type, wp); + goto end_write; + } else if (ret == 0) { + len += buf_end - wp; + codec_mm_dma_flush(vaddr, len, DMA_TO_DEVICE); + wp = buf_start; + r = count - len; + set_buf_wp(type, wp); + } else { + pr_info("copy from user failed 1\n"); + pr_info("w wp 0x%x, count %d, start 0x%x end 0x%x\n", + wp, (u32)count, buf_start, buf_end); + return -EAGAIN; + } + } + + vaddr = codec_mm_phys_to_virt(wp); + ret = copy_from_user(vaddr, p + len, r); + if (ret >= 0) { + len += r - ret; + codec_mm_dma_flush(vaddr, r - ret, DMA_TO_DEVICE); + if (ret > 0) + pr_info("copy from user not finished 2\n"); + wp += r - ret; + set_buf_wp(type, wp); + } else { + pr_info("copy from user failed 2\n"); + return -EAGAIN; + } + +end_write: + if (type == BUF_TYPE_AUDIO) + { + audio_data_parsed += len; + threadrw_update_buffer_level(stbuf, len); + } + + return len; +} + +s32 es_vpts_checkin_us64(struct stream_buf_s *buf, u64 us64) +{ + u32 passed; + + if (buf->write_thread) + passed = threadrw_dataoffset(buf); + else + passed = video_data_parsed; + return pts_checkin_offset_us64(PTS_TYPE_VIDEO, passed, us64); + +} + +s32 es_apts_checkin_us64(struct stream_buf_s *buf, u64 us64) +{ + u32 passed; + + if (buf->write_thread) + passed = threadrw_dataoffset(buf); + else + passed = audio_data_parsed; + return pts_checkin_offset_us64(PTS_TYPE_AUDIO, passed, us64); +} + +s32 es_vpts_checkin(struct stream_buf_s *buf, u32 pts) +{ +#if 0 + if (buf->first_tstamp == INVALID_PTS) { + buf->flag |= BUF_FLAG_FIRST_TSTAMP; + buf->first_tstamp = pts; + return 0; + } +#endif + u32 passed = 0; + + mutex_lock(&esparser_mutex); + passed = video_data_parsed + threadrw_buffer_level(buf); + mutex_unlock(&esparser_mutex); + + return pts_checkin_offset(PTS_TYPE_VIDEO, passed, pts); + +} + +s32 es_apts_checkin(struct stream_buf_s *buf, u32 pts) +{ +#if 0 + if (buf->first_tstamp == INVALID_PTS) { + buf->flag |= BUF_FLAG_FIRST_TSTAMP; + buf->first_tstamp = pts; + + return 0; + } +#endif + u32 passed = 0; + mutex_lock(&esparser_mutex); + passed = audio_data_parsed + threadrw_buffer_level(buf); + mutex_unlock(&esparser_mutex); + + return pts_checkin_offset(PTS_TYPE_AUDIO, passed, pts); +} + +s32 esparser_init(struct stream_buf_s *buf, struct vdec_s *vdec) +{ + s32 r = 0; + u32 pts_type; + u32 parser_sub_start_ptr; + u32 parser_sub_end_ptr; + u32 parser_sub_rp; + bool first_use = false; + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + VDEC_PRINT_FUN_LINENO(__func__, __LINE__); + + if (has_hevc_vdec() && (buf->type == BUF_TYPE_HEVC)) + pts_type = PTS_TYPE_HEVC; + else + /* #endif */ + if (buf->type == BUF_TYPE_VIDEO) + pts_type = PTS_TYPE_VIDEO; + else if (buf->type == BUF_TYPE_AUDIO) + pts_type = PTS_TYPE_AUDIO; + else if (buf->type == BUF_TYPE_SUBTITLE) + pts_type = PTS_TYPE_MAX; + else + return -EINVAL; + mutex_lock(&esparser_mutex); + parser_sub_start_ptr = READ_PARSER_REG(PARSER_SUB_START_PTR); + parser_sub_end_ptr = READ_PARSER_REG(PARSER_SUB_END_PTR); + parser_sub_rp = READ_PARSER_REG(PARSER_SUB_RP); + + buf->flag |= BUF_FLAG_PARSER; + + if (atomic_add_return(1, &esparser_use_count) == 1) { + first_use = true; + + if (fetchbuf == 0) { + pr_info("%s: no fetchbuf\n", __func__); + r = -ENOMEM; + goto Err_1; + } + + if (search_pattern == NULL) { + search_pattern = kcalloc(1, + SEARCH_PATTERN_LEN, + GFP_KERNEL); + + if (search_pattern == NULL) { + pr_err("%s: no search_pattern\n", __func__); + r = -ENOMEM; + goto Err_1; + } + + /* build a fake start code to get parser interrupt */ + search_pattern[0] = 0x00; + search_pattern[1] = 0x00; + search_pattern[2] = 0x01; + search_pattern[3] = 0xff; + + search_pattern_map = dma_map_single( + amports_get_dma_device(), + search_pattern, + SEARCH_PATTERN_LEN, + DMA_TO_DEVICE); + } + + /* reset PARSER with first esparser_init() call */ + WRITE_RESET_REG(RESET1_REGISTER, RESET_PARSER); +/* for recorded file and local play, this can't change the input source*/ +/* TS data path */ +/* +#ifndef CONFIG_AM_DVB + WRITE_DEMUX_REG(FEC_INPUT_CONTROL, 0); +#else + tsdemux_set_reset_flag(); +#endif */ + + CLEAR_DEMUX_REG_MASK(TS_HIU_CTL, 1 << USE_HI_BSF_INTERFACE); + CLEAR_DEMUX_REG_MASK(TS_HIU_CTL_2, 1 << USE_HI_BSF_INTERFACE); + CLEAR_DEMUX_REG_MASK(TS_HIU_CTL_3, 1 << USE_HI_BSF_INTERFACE); + + CLEAR_DEMUX_REG_MASK(TS_FILE_CONFIG, (1 << TS_HIU_ENABLE)); + + WRITE_PARSER_REG(PARSER_CONFIG, + (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) | + (1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) | + (16 << PS_CFG_MAX_FETCH_CYCLE_BIT)); + + WRITE_PARSER_REG(PFIFO_RD_PTR, 0); + WRITE_PARSER_REG(PFIFO_WR_PTR, 0); + + WRITE_PARSER_REG(PARSER_SEARCH_PATTERN, ES_START_CODE_PATTERN); + WRITE_PARSER_REG(PARSER_SEARCH_MASK, ES_START_CODE_MASK); + + WRITE_PARSER_REG(PARSER_CONFIG, + (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) | + (1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) | + PS_CFG_STARTCODE_WID_24 | + PS_CFG_PFIFO_ACCESS_WID_8 | + /* single byte pop */ + (16 << PS_CFG_MAX_FETCH_CYCLE_BIT)); + + WRITE_PARSER_REG(PARSER_CONTROL, PARSER_AUTOSEARCH); + + } + + /* hook stream buffer with PARSER */ + if (has_hevc_vdec() && (pts_type == PTS_TYPE_HEVC)) { + WRITE_PARSER_REG(PARSER_VIDEO_START_PTR, vdec->input.start); + WRITE_PARSER_REG(PARSER_VIDEO_END_PTR, vdec->input.start + + vdec->input.size - 8); + + if (vdec_single(vdec)) { + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, + ES_VID_MAN_RD_PTR); + + /* set vififo_vbuf_rp_sel=>hevc */ + WRITE_VREG(DOS_GEN_CTRL0, 3 << 1); + + /* set use_parser_vbuf_wp */ + SET_VREG_MASK(HEVC_STREAM_CONTROL, + (1 << 3) | (0 << 4)); + /* set stream_fetch_enable */ + SET_VREG_MASK(HEVC_STREAM_CONTROL, 1); + + if (buf->no_parser) { + /*set endian for non-parser mode */ + SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4); + } + + /* set stream_buffer_hole with 256 bytes */ + SET_VREG_MASK(HEVC_STREAM_FIFO_CTL, (1 << 29)); + } else { + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, + ES_VID_MAN_RD_PTR); + WRITE_PARSER_REG(PARSER_VIDEO_WP, vdec->input.start); + WRITE_PARSER_REG(PARSER_VIDEO_RP, vdec->input.start); + } + video_data_parsed = 0; + } else if (pts_type == PTS_TYPE_VIDEO) { + WRITE_PARSER_REG(PARSER_VIDEO_START_PTR, + vdec->input.start); + WRITE_PARSER_REG(PARSER_VIDEO_END_PTR, + vdec->input.start + vdec->input.size - 8); + + if (vdec_single(vdec) || (vdec_get_debug_flags() & 0x2)) { + if (vdec_get_debug_flags() & 0x2) + pr_info("%s %d\n", __func__, __LINE__); + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, + ES_VID_MAN_RD_PTR); + + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_VREG_MASK(VLD_MEM_VIFIFO_BUF_CNTL, + MEM_BUFCTRL_INIT); + + if (has_hevc_vdec()) { + /* set vififo_vbuf_rp_sel=>vdec */ + WRITE_VREG(DOS_GEN_CTRL0, 0); + } + } else { + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, + ES_VID_MAN_RD_PTR); + WRITE_PARSER_REG(PARSER_VIDEO_WP, + vdec->input.start); + WRITE_PARSER_REG(PARSER_VIDEO_RP, + vdec->input.start); + } + video_data_parsed = 0; + } else if (pts_type == PTS_TYPE_AUDIO) { + /* set wp as buffer start */ + SET_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, + MEM_BUFCTRL_MANUAL); + WRITE_AIU_REG(AIU_MEM_AIFIFO_MAN_RP, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_AIU_REG_BITS(AIU_MEM_AIFIFO_CONTROL, 7, 3, 3); + SET_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, + MEM_BUFCTRL_INIT); + CLEAR_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, + MEM_BUFCTRL_INIT); + WRITE_AIU_REG(AIU_MEM_AIFIFO_MAN_WP, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + audio_data_parsed = 0; + audio_buf_start = + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR); + audio_real_wp = audio_buf_start; + audio_buf_end = READ_AIU_REG(AIU_MEM_AIFIFO_END_PTR); + } else if (buf->type == BUF_TYPE_SUBTITLE) { + WRITE_PARSER_REG(PARSER_SUB_START_PTR, + parser_sub_start_ptr); + WRITE_PARSER_REG(PARSER_SUB_END_PTR, + parser_sub_end_ptr); + WRITE_PARSER_REG(PARSER_SUB_RP, parser_sub_rp); + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, + (7 << ES_SUB_WR_ENDIAN_BIT) | + ES_SUB_MAN_RD_PTR); + } + + if (pts_type < PTS_TYPE_MAX) { + r = pts_start(pts_type); + + if (r < 0) { + pr_info("esparser_init: pts_start failed\n"); + goto Err_1; + } + } +#if 0 + if (buf->flag & BUF_FLAG_FIRST_TSTAMP) { + if (buf->type == BUF_TYPE_VIDEO) + es_vpts_checkin(buf, buf->first_tstamp); + else if (buf->type == BUF_TYPE_AUDIO) + es_apts_checkin(buf, buf->first_tstamp); + + buf->flag &= ~BUF_FLAG_FIRST_TSTAMP; + } +#endif + + if (first_use) { + /*TODO irq */ + r = vdec_request_irq(PARSER_IRQ, esparser_isr, + "parser", (void *)esparser_id); + + if (r) { + pr_info("esparser_init: irq register failed.\n"); + goto Err_2; + } + VDEC_PRINT_FUN_LINENO(__func__, __LINE__); + + WRITE_PARSER_REG(PARSER_INT_STATUS, 0xffff); + WRITE_PARSER_REG(PARSER_INT_ENABLE, + PARSER_INTSTAT_SC_FOUND << + PARSER_INT_HOST_EN_BIT); + } + mutex_unlock(&esparser_mutex); + + if (!(vdec_get_debug_flags() & 1) && + !codec_mm_video_tvp_enabled()) { + int block_size = (buf->type == BUF_TYPE_AUDIO) ? + PAGE_SIZE : PAGE_SIZE << 4; + int buf_num = (buf->type == BUF_TYPE_AUDIO) ? + 20 : (2 * SZ_1M)/(PAGE_SIZE << 4); + if (!(buf->type == BUF_TYPE_SUBTITLE)) + buf->write_thread = threadrw_alloc(buf_num, + block_size, + esparser_write_ex, + (buf->type == BUF_TYPE_AUDIO) ? 1 : 0); + /*manul mode for audio*/ + } + + return 0; + +Err_2: + pts_stop(pts_type); + +Err_1: + atomic_dec(&esparser_use_count); + buf->flag &= ~BUF_FLAG_PARSER; + mutex_unlock(&esparser_mutex); + return r; +} +EXPORT_SYMBOL(esparser_init); + +void esparser_audio_reset_s(struct stream_buf_s *buf) +{ + ulong flags; + DEFINE_SPINLOCK(lock); + + spin_lock_irqsave(&lock, flags); + + SET_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_MANUAL); + WRITE_AIU_REG(AIU_MEM_AIFIFO_MAN_RP, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_AIU_REG_BITS(AIU_MEM_AIFIFO_CONTROL, 7, 3, 3); + SET_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + WRITE_AIU_REG(AIU_MEM_AIFIFO_MAN_WP, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + + buf->flag |= BUF_FLAG_PARSER; + + audio_data_parsed = 0; + audio_real_wp = READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR); + audio_buf_start = READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR); + audio_buf_end = READ_AIU_REG(AIU_MEM_AIFIFO_END_PTR); + spin_unlock_irqrestore(&lock, flags); + + return; +} + +void esparser_audio_reset(struct stream_buf_s *buf) +{ + ulong flags; + DEFINE_SPINLOCK(lock); + + spin_lock_irqsave(&lock, flags); + + WRITE_PARSER_REG(PARSER_AUDIO_WP, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_PARSER_REG(PARSER_AUDIO_RP, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + + WRITE_PARSER_REG(PARSER_AUDIO_START_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_PARSER_REG(PARSER_AUDIO_END_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_END_PTR)); + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_AUD_MAN_RD_PTR); + + WRITE_AIU_REG(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + + buf->flag |= BUF_FLAG_PARSER; + + audio_data_parsed = 0; + audio_real_wp = 0; + audio_buf_start = 0; + audio_buf_end = 0; + spin_unlock_irqrestore(&lock, flags); + +} + +void esparser_release(struct stream_buf_s *buf) +{ + u32 pts_type; + + /* check if esparser_init() is ever called */ + if ((buf->flag & BUF_FLAG_PARSER) == 0) + return; + + if (atomic_read(&esparser_use_count) == 0) { + pr_info + ("[%s:%d]###warning, esparser has been released already\n", + __func__, __LINE__); + return; + } + if (buf->write_thread) + threadrw_release(buf); + if (atomic_dec_and_test(&esparser_use_count)) { + WRITE_PARSER_REG(PARSER_INT_ENABLE, 0); + /*TODO irq */ + + vdec_free_irq(PARSER_IRQ, (void *)esparser_id); + + if (search_pattern) { + dma_unmap_single(amports_get_dma_device(), + search_pattern_map, + SEARCH_PATTERN_LEN, DMA_TO_DEVICE); + kfree(search_pattern); + search_pattern = NULL; + } + } + + if (has_hevc_vdec() && (buf->type == BUF_TYPE_HEVC)) + pts_type = PTS_TYPE_VIDEO; + else if (buf->type == BUF_TYPE_VIDEO) + pts_type = PTS_TYPE_VIDEO; + else if (buf->type == BUF_TYPE_AUDIO) + pts_type = PTS_TYPE_AUDIO; + else if (buf->type == BUF_TYPE_SUBTITLE) { + buf->flag &= ~BUF_FLAG_PARSER; + return; + } else + return; + + buf->flag &= ~BUF_FLAG_PARSER; + pts_stop(pts_type); +} +EXPORT_SYMBOL(esparser_release); + +ssize_t drm_write(struct file *file, struct stream_buf_s *stbuf, + const char __user *buf, size_t count) +{ + s32 r; + u32 len; + u32 realcount, totalcount; + u32 havewritebytes = 0; + u32 leftcount = 0; + + struct drm_info tmpmm; + struct drm_info *drm = &tmpmm; + u32 res = 0; + int drm_flag = 0; + unsigned long realbuf; + + if (buf == NULL || count == 0) + return -EINVAL; + if (stbuf->write_thread) { + r = threadrw_flush_buffers(stbuf); + if (r < 0) + pr_info("Warning. drm flush threadrw failed[%d]\n", r); + } + res = copy_from_user(drm, buf, sizeof(struct drm_info)); + if (res) { + pr_info("drm kmalloc failed res[%d]\n", res); + return -EFAULT; + } + + if ((drm->drm_flag & TYPE_DRMINFO) && (drm->drm_hasesdata == 0)) { + if (drm->drm_pktsize > MAX_DRM_PACKAGE_SIZE) { + pr_err("drm package size is error, size is %u\n", drm->drm_pktsize); + return -EINVAL; + } + /* buf only has drminfo not have esdata; */ + realbuf = drm->drm_phy; + realcount = drm->drm_pktsize; + drm_flag = drm->drm_flag; + /* DRM_PRNT("drm_get_rawdata + *onlydrminfo drm->drm_hasesdata[0x%x] + * stbuf->type %d buf[0x%x]\n", + *drm->drm_hasesdata,stbuf->type,buf); + */ + } else if (drm->drm_hasesdata == 1) { /* buf is drminfo+es; */ + if (drm->drm_pktsize > MAX_DRM_PACKAGE_SIZE) { + pr_err("drm package size is error, size is %u\n", drm->drm_pktsize); + return -EINVAL; + } + realcount = drm->drm_pktsize; + realbuf = (unsigned long)buf + sizeof(struct drm_info); + drm_flag = 0; + /* DRM_PRNT("drm_get_rawdata + * drminfo+es drm->drm_hasesdata[0x%x] + * stbuf->type %d\n",drm->drm_hasesdata,stbuf->type); + */ + } else { /* buf is hwhead; */ + realcount = count; + drm_flag = 0; + realbuf = (unsigned long)buf; + /* DRM_PRNT("drm_get_rawdata + * drm->drm_hasesdata[0x%x] + * len[%d] count[%d] realcout[%d]\n", + * drm->drm_hasesdata,len,count,realcount); + */ + } + + len = realcount; + count = realcount; + totalcount = realcount; + stbuf->drm_flag = drm_flag; + stbuf->is_phybuf = drm_flag ? 1 : 0; + + while (len > 0) { + if (stbuf->type != BUF_TYPE_SUBTITLE + && stbuf_space(stbuf) < count) { + /*should not write partial data in drm mode*/ + r = stbuf_wait_space(stbuf, count); + if (r < 0) + return r; + if (stbuf_space(stbuf) < count) + return -EAGAIN; + } + len = min_t(u32, len, count); + + mutex_lock(&esparser_mutex); + + if (stbuf->type != BUF_TYPE_AUDIO) + r = _esparser_write((const char __user *)realbuf, len, + stbuf, drm_flag); + else + r = _esparser_write_s((const char __user *)realbuf, len, + stbuf); + if (r < 0) { + pr_info("drm_write _esparser_write failed [%d]\n", r); + mutex_unlock(&esparser_mutex); + return r; + } + havewritebytes += r; + leftcount = totalcount - havewritebytes; + if (havewritebytes == totalcount) { + + mutex_unlock(&esparser_mutex); + break; /* write ok; */ + } else if ((len > 0) && (havewritebytes < totalcount)) { + DRM_PRNT + ("d writebytes[%d] want[%d] total[%d] real[%d]\n", + havewritebytes, len, totalcount, realcount); + len = len - r; /* write again; */ + realbuf = realbuf + r; + } else { + pr_info + ("e writebytes[%d] want[%d] total[%d] real[%d]\n", + havewritebytes, len, totalcount, realcount); + } + mutex_unlock(&esparser_mutex); + } + + if ((drm->drm_flag & TYPE_DRMINFO) && (drm->drm_hasesdata == 0)) { + havewritebytes = sizeof(struct drm_info); + } else if (drm->drm_hasesdata == 1) { + havewritebytes += sizeof(struct drm_info); + } + return havewritebytes; +} +EXPORT_SYMBOL(drm_write); + +/* + *flags: + *1:phy + *2:noblock + */ +ssize_t esparser_write_ex(struct file *file, + struct stream_buf_s *stbuf, + const char __user *buf, size_t count, + int flags) +{ + + s32 r; + u32 len = count; + + if (buf == NULL || count == 0) + return -EINVAL; + + /*subtitle have no level to check, */ + if (stbuf->type != BUF_TYPE_SUBTITLE && stbuf_space(stbuf) < count) { + if ((flags & 2) || ((file != NULL) && + (file->f_flags & O_NONBLOCK))) { + len = stbuf_space(stbuf); + + if (len < 256) /* <1k.do eagain, */ + return -EAGAIN; + } else { + len = min(stbuf_canusesize(stbuf) / 8, len); + + if (stbuf_space(stbuf) < len) { + r = stbuf_wait_space(stbuf, len); + if (r < 0) + return r; + } + } + } + + stbuf->last_write_jiffies64 = jiffies_64; + + len = min_t(u32, len, count); + + mutex_lock(&esparser_mutex); + + if (flags & 1) + stbuf->is_phybuf = true; + + if (stbuf->type == BUF_TYPE_AUDIO) + r = _esparser_write_s(buf, len, stbuf); + else + r = _esparser_write(buf, len, stbuf, flags & 1); + + mutex_unlock(&esparser_mutex); + + return r; +} +ssize_t esparser_write(struct file *file, + struct stream_buf_s *stbuf, + const char __user *buf, size_t count) +{ + if (stbuf->write_thread) { + ssize_t ret; + + ret = threadrw_write(file, stbuf, buf, count); + if (ret == -EAGAIN) { + u32 a, b; + int vdelay, adelay; + + if ((stbuf->type != BUF_TYPE_VIDEO) && + (stbuf->type != BUF_TYPE_HEVC)) + return ret; + if (stbuf->buf_size > (SZ_1M * 30) || + (threadrw_buffer_size(stbuf) > SZ_1M * 10) || + !threadrw_support_more_buffers(stbuf)) + return ret; + /*only chang buffer for video.*/ + vdelay = calculation_stream_delayed_ms( + PTS_TYPE_VIDEO, &a, &b); + adelay = calculation_stream_delayed_ms( + PTS_TYPE_AUDIO, &a, &b); + if ((vdelay > 100 && vdelay < 2000) && /*vdelay valid.*/ + ((vdelay < 500) ||/*video delay is short!*/ + (adelay > 0 && adelay < 1000))/*audio is low.*/ + ) { + /*on buffer fulled. + *if delay is less than 100ms we think errors, + *And we add more buffer on delay < 2s. + */ + int new_size = 2 * 1024 * 1024; + + threadrw_alloc_more_buffer_size( + stbuf, new_size); + } + } + return ret; + } + return esparser_write_ex(file, stbuf, buf, count, 0); +} +EXPORT_SYMBOL(esparser_write); + +void esparser_sub_reset(void) +{ + ulong flags; + DEFINE_SPINLOCK(lock); + u32 parser_sub_start_ptr; + u32 parser_sub_end_ptr; + + spin_lock_irqsave(&lock, flags); + + parser_sub_start_ptr = READ_PARSER_REG(PARSER_SUB_START_PTR); + parser_sub_end_ptr = READ_PARSER_REG(PARSER_SUB_END_PTR); + + WRITE_PARSER_REG(PARSER_SUB_START_PTR, parser_sub_start_ptr); + WRITE_PARSER_REG(PARSER_SUB_END_PTR, parser_sub_end_ptr); + WRITE_PARSER_REG(PARSER_SUB_RP, parser_sub_start_ptr); + WRITE_PARSER_REG(PARSER_SUB_WP, parser_sub_start_ptr); + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, + (7 << ES_SUB_WR_ENDIAN_BIT) | ES_SUB_MAN_RD_PTR); + + spin_unlock_irqrestore(&lock, flags); +} + +static int esparser_stbuf_init(struct stream_buf_s *stbuf, + struct vdec_s *vdec) +{ + int ret = -1; + + ret = stbuf_init(stbuf, vdec); + if (ret) + goto out; + + ret = esparser_init(stbuf, vdec); + if (!ret) + stbuf->flag |= BUF_FLAG_IN_USE; +out: + return ret; +} + +static void esparser_stbuf_release(struct stream_buf_s *stbuf) +{ + esparser_release(stbuf); + + stbuf_release(stbuf); +} + +static struct stream_buf_ops esparser_stbuf_ops = { + .init = esparser_stbuf_init, + .release = esparser_stbuf_release, + .write = esparser_stbuf_write, + .get_wp = parser_get_wp, + .set_wp = parser_set_wp, + .get_rp = parser_get_rp, + .set_rp = parser_set_rp, +}; + +struct stream_buf_ops *get_esparser_stbuf_ops(void) +{ + return &esparser_stbuf_ops; +} +EXPORT_SYMBOL(get_esparser_stbuf_ops); +
diff --git a/drivers/stream_input/parser/esparser.h b/drivers/stream_input/parser/esparser.h new file mode 100644 index 0000000..24e6926 --- /dev/null +++ b/drivers/stream_input/parser/esparser.h
@@ -0,0 +1,152 @@ +/* + * drivers/amlogic/media/stream_input/parser/esparser.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef ESPARSER_H +#define ESPARSER_H + +#include "../../frame_provider/decoder/utils/vdec.h" + +extern ssize_t drm_write(struct file *file, + struct stream_buf_s *stbuf, + const char __user *buf, size_t count); + +extern s32 esparser_init(struct stream_buf_s *buf, struct vdec_s *vdec); +extern s32 esparser_init_s(struct stream_buf_s *buf); +extern void esparser_release(struct stream_buf_s *buf); +extern ssize_t esparser_write(struct file *file, + struct stream_buf_s *stbuf, + const char __user *buf, size_t count); +extern ssize_t esparser_write_ex(struct file *file, + struct stream_buf_s *stbuf, + const char __user *buf, size_t count, + int is_phy); + +extern s32 es_vpts_checkin_us64(struct stream_buf_s *buf, u64 us64); + +extern s32 es_apts_checkin_us64(struct stream_buf_s *buf, u64 us64); + +extern int es_vpts_checkin(struct stream_buf_s *buf, u32 pts); + +extern int es_apts_checkin(struct stream_buf_s *buf, u32 pts); + +extern void esparser_audio_reset(struct stream_buf_s *buf); +extern void esparser_audio_reset_s(struct stream_buf_s *buf); + +extern void esparser_sub_reset(void); + +#ifdef CONFIG_AM_DVB +extern int tsdemux_set_reset_flag(void); +#endif + +/* TODO: move to register headers */ +#define ES_PACK_SIZE_BIT 8 +#define ES_PACK_SIZE_WID 24 + +#define ES_CTRL_WID 8 +#define ES_CTRL_BIT 0 +#define ES_TYPE_MASK (3 << 6) +#define ES_TYPE_VIDEO (0 << 6) +#define ES_TYPE_AUDIO (1 << 6) +#define ES_TYPE_SUBTITLE (2 << 6) + +#define ES_WRITE (1<<5) +#define ES_PASSTHROUGH (1<<4) +#define ES_INSERT_BEFORE_ES_WRITE (1<<3) +#define ES_DISCARD (1<<2) +#define ES_SEARCH (1<<1) +#define ES_PARSER_START (1<<0) +#define ES_PARSER_BUSY (1<<0) + +#define PARSER_INTSTAT_FETCH_CMD (1<<7) +#define PARSER_INTSTAT_PARSE (1<<4) +#define PARSER_INTSTAT_DISCARD (1<<3) +#define PARSER_INTSTAT_INSZERO (1<<2) +#define PARSER_INTSTAT_ACT_NOSSC (1<<1) +#define PARSER_INTSTAT_SC_FOUND (1<<0) + +#define FETCH_CIR_BUF (1<<31) +#define FETCH_CHK_BUF_STOP (1<<30) +#define FETCH_PASSTHROUGH (1<<29) +#define FETCH_ENDIAN 27 +#define FETCH_PASSTHROUGH_TYPE_MASK (0x3<<27) +#define FETCH_ENDIAN_MASK (0x7<<27) +#define FETCH_BUF_SIZE_MASK (0x7ffffff) +#define FETCH_CMD_PTR_MASK 3 +#define FETCH_CMD_RD_PTR_BIT 5 +#define FETCH_CMD_WR_PTR_BIT 3 +#define FETCH_CMD_NUM_MASK 3 +#define FETCH_CMD_NUM_BIT 0 + +#define ES_COUNT_MASK 0xfff +#define ES_COUNT_BIT 20 +#define ES_REQ_PENDING (1<<19) +#define ES_PASSTHROUGH_EN (1<<18) +#define ES_PASSTHROUGH_TYPE_MASK (3<<16) +#define ES_PASSTHROUGH_TYPE_VIDEO (0<<16) +#define ES_PASSTHROUGH_TYPE_AUDIO (1<<16) +#define ES_PASSTHROUGH_TYPE_SUBTITLE (2<<16) +#define ES_WR_ENDIAN_MASK (0x7) +#define ES_SUB_WR_ENDIAN_BIT 9 +#define ES_SUB_MAN_RD_PTR (1<<8) +#define ES_AUD_WR_ENDIAN_BIT 5 +#define ES_AUD_MAN_RD_PTR (1<<4) +#define ES_VID_WR_ENDIAN_BIT 1 +#define ES_VID_MAN_RD_PTR (1<<0) + +#define PS_CFG_FETCH_DMA_URGENT (1<<31) +#define PS_CFG_STREAM_DMA_URGENT (1<<30) +#define PS_CFG_FORCE_PFIFO_REN (1<<29) +#define PS_CFG_PFIFO_PEAK_EN (1<<28) +#define PS_CFG_SRC_SEL_BIT 24 +#define PS_CFG_SRC_SEL_MASK (3<<PS_CFG_SRC_SEL_BIT) +#define PS_CFG_SRC_SEL_FETCH (0<<PS_CFG_SRC_SEL_BIT) +#define PS_CFG_SRC_SEL_AUX1 (1<<PS_CFG_SRC_SEL_BIT) /*from NDMA */ +#define PS_CFG_SRC_SEL_AUX2 (2<<PS_CFG_SRC_SEL_BIT) +#define PS_CFG_SRC_SEL_AUX3 (3<<PS_CFG_SRC_SEL_BIT) +#define PS_CFG_PFIFO_EMPTY_CNT_BIT 16 +#define PS_CFG_PFIFO_EMPTY_CNT_MASK 0xff +#define PS_CFG_MAX_ES_WR_CYCLE_BIT 12 +#define PS_CFG_MAX_ES_WR_CYCLE_MASK 0xf +#define PS_CFG_STARTCODE_WID_MASK (0x3<<10) +#define PS_CFG_STARTCODE_WID_8 (0x0<<10) +#define PS_CFG_STARTCODE_WID_16 (0x1<<10) +#define PS_CFG_STARTCODE_WID_24 (0x2<<10) +#define PS_CFG_STARTCODE_WID_32 (0x3<<10) +#define PS_CFG_PFIFO_ACCESS_WID_MASK (0x3<<8) +#define PS_CFG_PFIFO_ACCESS_WID_8 (0x0<<8) +#define PS_CFG_PFIFO_ACCESS_WID_16 (0x1<<8) +#define PS_CFG_PFIFO_ACCESS_WID_24 (0x2<<8) +#define PS_CFG_PFIFO_ACCESS_WID_32 (0x3<<8) +#define PS_CFG_MAX_FETCH_CYCLE_BIT 0 +#define PS_CFG_MAX_FETCH_CYCLE_MASK 0xff + +#define PARSER_INT_DISABLE_CNT_MASK 0xffff +#define PARSER_INT_DISABLE_CNT_BIT 16 +#define PARSER_INT_HOST_EN_MASK 0xff +#define PARSER_INT_HOST_EN_BIT 8 +#define PARSER_INT_AMRISC_EN_MASK 0xff +#define PARSER_INT_AMRISC_EN_BIT 0 +#define PARSER_INT_ALL 0xff + +#define RESET_PARSER (1<<8) +#define TS_HIU_ENABLE 5 +#define USE_HI_BSF_INTERFACE 7 + +#define DRM_PRNT(fmt, args...) +#define TRACE() pr_info("drm--[%s::%d]\n", __func__, __LINE__) + +#endif /* ESPARSER_H */
diff --git a/drivers/stream_input/parser/hw_demux/Makefile b/drivers/stream_input/parser/hw_demux/Makefile new file mode 100644 index 0000000..93454ae --- /dev/null +++ b/drivers/stream_input/parser/hw_demux/Makefile
@@ -0,0 +1,8 @@ +obj-m += aml_hardware_dmx.o + +ccflags-y += -I$(srctree)/include/media -I$(srctree)/drivers/media/pci/ttpci -I$(srctree)/drivers/gpio -I$(srctree)/include -DENABLE_DEMUX_DRIVER + +aml_hardware_dmx-objs += aml_dvb.o +aml_hardware_dmx-objs += aml_dmx.o + +#obj-y += dvb_ci/
diff --git a/drivers/stream_input/parser/hw_demux/aml_demod_gt.h b/drivers/stream_input/parser/hw_demux/aml_demod_gt.h new file mode 100644 index 0000000..77658b0 --- /dev/null +++ b/drivers/stream_input/parser/hw_demux/aml_demod_gt.h
@@ -0,0 +1,26 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef __AML_DEMOD_GT_H__ +#define __AML_DEMOD_GT_H__ + +#include "../dvb_common.h" + + +#endif /*__AML_DEMOD_GT_H__*/
diff --git a/drivers/stream_input/parser/hw_demux/aml_dmx.c b/drivers/stream_input/parser/hw_demux/aml_dmx.c new file mode 100644 index 0000000..7d9d7de --- /dev/null +++ b/drivers/stream_input/parser/hw_demux/aml_dmx.c
@@ -0,0 +1,6379 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +/* + * AMLOGIC demux driver. + */ + +#include <linux/version.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/wait.h> +#include <linux/string.h> +#include <linux/interrupt.h> +#include <linux/fs.h> +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/fcntl.h> +#include <asm/irq.h> +#include <linux/uaccess.h> +#include <linux/poll.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <asm/cacheflush.h> +#include <linux/dma-mapping.h> +#include <linux/pinctrl/pinmux.h> +#include <linux/vmalloc.h> +#include <linux/amlogic/media/codec_mm/codec_mm.h> +#include <linux/amlogic/media/codec_mm/configs.h> +#include "../../amports/streambuf.h" +#include "c_stb_define.h" +#include "c_stb_regs_define.h" +#include "aml_dvb.h" +#include "aml_dvb_reg.h" +#include <linux/sched/signal.h> + + +#define ENABLE_SEC_BUFF_WATCHDOG +#define USE_AHB_MODE +#define PR_ERROR_SPEED_LIMIT + +#define pr_dbg_flag(_f, _args...)\ + do {\ + if (debug_dmx&(_f))\ + printk(_args);\ + } while (0) +#define pr_dbg_irq_flag(_f, _args...)\ + do {\ + if (debug_irq&(_f))\ + printk(_args);\ + } while (0) +#define pr_dbg(args...) pr_dbg_flag(0x1, args) +#define pr_dbg_irq(args...)pr_dbg_irq_flag(0x1, args) +#define pr_dbg_irq_dvr(args...)pr_dbg_irq_flag(0x2, args) +#define pr_dbg_sf(args...) pr_dbg_flag(0x4, args) +#define pr_dbg_irq_sf(args...) pr_dbg_irq_flag(0x4, args) +#define pr_dbg_ss(args...) pr_dbg_flag(0x8, args) +#define pr_dbg_irq_ss(args...) pr_dbg_irq_flag(0x8, args) +#define pr_dbg_irq_pes(args...) pr_dbg_irq_flag(0x10, args) +#define pr_dbg_irq_sub(args...) pr_dbg_irq_flag(0x20, args) + +#ifdef PR_ERROR_SPEED_LIMIT +static u32 last_pr_error_time; +#define pr_error(fmt, _args...)\ + do {\ + u32 diff = jiffies_to_msecs(jiffies - last_pr_error_time);\ + if (!last_pr_error_time || diff > 50) {\ + pr_err("DVB:" fmt, ## _args);\ + last_pr_error_time = jiffies;\ + } \ + } while (0) +#else +#define pr_error(fmt, args...) pr_err("DVB: " fmt, ## args) +#endif + +#define pr_inf(fmt, args...) printk("DVB: " fmt, ## args) + +#define dump(b, l) \ + do { \ + int i; \ + printk("dump: "); \ + for (i = 0; i < (l); i++) {\ + if (!(i&0xf)) \ + printk("\n\t"); \ + printk("%02x ", *(((unsigned char *)(b))+i)); \ + } \ + printk("\n"); \ + } while (0) + +MODULE_PARM_DESC(debug_dmx, "\n\t\t Enable demux debug information"); +static int debug_dmx; +module_param(debug_dmx, int, 0644); + +MODULE_PARM_DESC(debug_irq, "\n\t\t Enable demux IRQ debug information"); +static int debug_irq; +module_param(debug_irq, int, 0644); + +MODULE_PARM_DESC(disable_dsc, "\n\t\t Disable discrambler"); +static int disable_dsc; +module_param(disable_dsc, int, 0644); + +MODULE_PARM_DESC(enable_sec_monitor, "\n\t\t Enable sec monitor default is enable"); +static int enable_sec_monitor = 2; +module_param(enable_sec_monitor, int, 0644); +/*For old version kernel */ +#ifndef MESON_CPU_MAJOR_ID_GXL +#define MESON_CPU_MAJOR_ID_GXL 0x21 +#endif + +static int npidtypes = CHANNEL_COUNT; +#define MOD_PARAM_DECLARE_CHANPIDS_TYPES(_dmx) \ +MODULE_PARM_DESC(debug_dmx##_dmx##_chanpids_types, "\n\t\t pids types of dmx channels"); \ +static short debug_dmx##_dmx##_chanpids_types[CHANNEL_COUNT] = \ + {[0 ... (CHANNEL_COUNT - 1)] = -1}; \ +module_param_array(debug_dmx##_dmx##_chanpids_types, short, &npidtypes, 0444) + +MOD_PARAM_DECLARE_CHANPIDS_TYPES(0); +MOD_PARAM_DECLARE_CHANPIDS_TYPES(1); +MOD_PARAM_DECLARE_CHANPIDS_TYPES(2); + +#define set_debug_dmx_chanpids_types(_dmx, _idx, _type)\ + do { \ + if ((_dmx) == 0) \ + debug_dmx0_chanpids_types[(_idx)] = (_type); \ + else if ((_dmx) == 1) \ + debug_dmx1_chanpids_types[(_idx)] = (_type); \ + else if ((_dmx) == 2) \ + debug_dmx2_chanpids_types[(_idx)] = (_type); \ + } while (0) + + +static int npids = CHANNEL_COUNT; +#define MOD_PARAM_DECLARE_CHANPIDS(_dmx) \ +MODULE_PARM_DESC(debug_dmx##_dmx##_chanpids, "\n\t\t pids of dmx channels"); \ +static short debug_dmx##_dmx##_chanpids[CHANNEL_COUNT] = \ + {[0 ... (CHANNEL_COUNT - 1)] = -1}; \ +module_param_array(debug_dmx##_dmx##_chanpids, short, &npids, 0444) + +#define CIPLUS_OUTPUT_AUTO 8 +static int ciplus_out_sel = CIPLUS_OUTPUT_AUTO; +static int ciplus_out_auto_mode = 1; +static u32 ciplus = 0; +#define CIPLUS_OUT_SEL 28 +#define CIPLUS_IN_SEL 26 + +MOD_PARAM_DECLARE_CHANPIDS(0); +MOD_PARAM_DECLARE_CHANPIDS(1); +MOD_PARAM_DECLARE_CHANPIDS(2); + +#define set_debug_dmx_chanpids(_dmx, _idx, _pid)\ + do { \ + if ((_dmx) == 0) \ + debug_dmx0_chanpids[(_idx)] = (_pid); \ + else if ((_dmx) == 1) \ + debug_dmx1_chanpids[(_idx)] = (_pid); \ + else if ((_dmx) == 2) \ + debug_dmx2_chanpids[(_idx)] = (_pid); \ + if (_pid == -1) \ + set_debug_dmx_chanpids_types(_dmx, _idx, -1); \ + } while (0) + +MODULE_PARM_DESC(debug_sf_user, "\n\t\t only for sf mode check"); +static int debug_sf_user; +module_param(debug_sf_user, int, 0444); + +MODULE_PARM_DESC(force_sec_sf, "\n\t\t force sf mode for sec filter"); +static int force_sec_sf; +module_param(force_sec_sf, int, 0644); + +MODULE_PARM_DESC(force_pes_sf, "\n\t\t force sf mode for pes filter"); +static int force_pes_sf; +module_param(force_pes_sf, int, 0644); + +MODULE_PARM_DESC(use_of_sop, "\n\t\t Enable use of sop input"); +static int use_of_sop; +module_param(use_of_sop, int, 0644); + +/* + As the default value of unused channel's PID_TYPE is 0x7, + if we use PID_TYPE(RECORDER_STREAM:0x7) for recording channel, + the data with the pid which assigned in unused channel's setting will be captured also. + To avoid the high bitrate(exists) of this pid's data flood the buffers in the data path, + which will causes the record data corruption, and bad picture decoded. +*/ +MODULE_PARM_DESC(g_chan_def_pid, "\n\t\t default pid for unused channel"); +static int g_chan_def_pid = 0x1FFE; +module_param(g_chan_def_pid, int, 0644); + + +/*#define CIPLUS_KEY0 0x16f8 +#define CIPLUS_KEY1 0x16f9 +#define CIPLUS_KEY2 0x16fa +#define CIPLUS_KEY3 0x16fb +#define CIPLUS_KEY_WR 0x16fc +#define CIPLUS_CONFIG 0x16fd +#define CIPLUS_ENDIAN 0x16fe*/ + + +static u32 old_stb_top_config; +static u32 old_fec_input_control; +static int have_old_stb_top_config = 1; +static int have_old_fec_input_control = 1; + +static long pes_off_pre[DMX_DEV_COUNT]; + +static void +dmx_write_reg(int r, u32 v) +{ + u32 oldv, mask; + + if (disable_dsc) { + if (r == STB_TOP_CONFIG) { + if (have_old_stb_top_config) { + oldv = old_stb_top_config; + have_old_stb_top_config = 0; + } else { + oldv = READ_MPEG_REG(STB_TOP_CONFIG); + } + + mask = (1<<7)|(1<<15)|(3<<26)|(7<<28); + v &= ~mask; + v |= (oldv & mask); + } else if (r == FEC_INPUT_CONTROL) { + if (have_old_fec_input_control) { + oldv = old_fec_input_control; + have_old_fec_input_control = 0; + } else { + oldv = READ_MPEG_REG(FEC_INPUT_CONTROL); + } + + mask = (1<<15); + v &= ~mask; + v |= (oldv & mask); + } else if ((r == RESET1_REGISTER) || (r == RESET3_REGISTER)) { + if (!have_old_stb_top_config) { + have_old_stb_top_config = 1; + old_stb_top_config = + READ_MPEG_REG(STB_TOP_CONFIG); + } + if (!have_old_fec_input_control) { + have_old_fec_input_control = 1; + old_fec_input_control = + READ_MPEG_REG(FEC_INPUT_CONTROL); + } + } else if ((r == TS_PL_PID_INDEX) || (r == TS_PL_PID_DATA) + || (r == COMM_DESC_KEY0) + || (r == COMM_DESC_KEY1) + || (r == COMM_DESC_KEY_RW) + || (r == CIPLUS_KEY0) + || (r == CIPLUS_KEY1) + || (r == CIPLUS_KEY2) + || (r == CIPLUS_KEY3) + || (r == CIPLUS_KEY_WR) + || (r == CIPLUS_CONFIG) + || (r == CIPLUS_ENDIAN)) { + return; + } + } + WRITE_MPEG_REG(r, v); +} + +#undef WRITE_MPEG_REG +#define WRITE_MPEG_REG(r, v) dmx_write_reg(r, v) + +#define DMX_READ_REG(i, r)\ + ((i)?((i == 1)?READ_MPEG_REG(r##_2) :\ + READ_MPEG_REG(r##_3)) : READ_MPEG_REG(r)) + +#define DMX_WRITE_REG(i, r, d)\ + do {\ + if (i == 1) {\ + WRITE_MPEG_REG(r##_2, d);\ + } else if (i == 2) {\ + WRITE_MPEG_REG(r##_3, d);\ + } \ + else {\ + WRITE_MPEG_REG(r, d);\ + } \ + } while (0) + +#define READ_PERI_REG READ_CBUS_REG +#define WRITE_PERI_REG WRITE_CBUS_REG + +#define READ_ASYNC_FIFO_REG(i, r)\ + ((i) ? ((i-1)?READ_PERI_REG(ASYNC_FIFO1_##r):\ + READ_PERI_REG(ASYNC_FIFO2_##r)) : READ_PERI_REG(ASYNC_FIFO_##r)) + +#define WRITE_ASYNC_FIFO_REG(i, r, d)\ + do {\ + if (i == 2) {\ + WRITE_PERI_REG(ASYNC_FIFO1_##r, d);\ + } else if (i == 0) {\ + WRITE_PERI_REG(ASYNC_FIFO_##r, d);\ + } else {\ + WRITE_PERI_REG(ASYNC_FIFO2_##r, d);\ + } \ + } while (0) + +#define CLEAR_ASYNC_FIFO_REG_MASK(i, reg, mask) \ + WRITE_ASYNC_FIFO_REG(i, reg, \ + (READ_ASYNC_FIFO_REG(i, reg)&(~(mask)))) + +#define DVR_FEED(f) \ + ((f) && ((f)->type == DMX_TYPE_TS) && \ + (((f)->ts_type & (TS_PACKET | TS_DEMUX)) == TS_PACKET)) + +#define MOD_PARAM_DECLARE_CHANREC(_dmx) \ +MODULE_PARM_DESC(dmx##_dmx##_chanrec_enable, \ + "\n\t\t record by channel, one time use in the beginning"); \ +static int dmx##_dmx##_chanrec_enable; \ +module_param(dmx##_dmx##_chanrec_enable, int, 0644); \ +MODULE_PARM_DESC(dmx##_dmx##_chanrec, "\n\t\t record channels bits"); \ +static int dmx##_dmx##_chanrec; \ +module_param(dmx##_dmx##_chanrec, int, 0644) + +MOD_PARAM_DECLARE_CHANREC(0); +MOD_PARAM_DECLARE_CHANREC(1); +MOD_PARAM_DECLARE_CHANREC(2); + +#define MOD_PARAM_DECLARE_CHANPROC(_dmx) \ +MODULE_PARM_DESC(dmx##_dmx##_chanproc_enable, "channel further processing"); \ +static int dmx##_dmx##_chanproc_enable; \ +module_param(dmx##_dmx##_chanproc_enable, int, 0644); \ +MODULE_PARM_DESC(dmx##_dmx##_chanproc, "further process channels bits"); \ +static int dmx##_dmx##_chanproc; \ +module_param(dmx##_dmx##_chanproc, int, 0644) + +MOD_PARAM_DECLARE_CHANPROC(0); +MOD_PARAM_DECLARE_CHANPROC(1); +MOD_PARAM_DECLARE_CHANPROC(2); + +#define DMX_CH_OP_CHANREC 0 +#define DMX_CH_OP_CHANPROC 1 + +static inline int _setbit(int v, int b) { return v|(1<<b); } +static inline int _clrbit(int v, int b) { return v&~(1<<b); } +static inline int _set(int v, int b) { return b; } + +static int dsc_set_csa_key(struct aml_dsc_channel *ch, int flags, + enum ca_cw_type type, u8 *key); +static int dsc_set_aes_des_sm4_key(struct aml_dsc_channel *ch, int flags, + enum ca_cw_type type, u8 *key); +static void aml_ci_plus_disable(void); +static void am_ci_plus_set_output(struct aml_dsc_channel *ch); +static int set_subtitle_pes_buffer(struct aml_dmx *dmx); + +static void dmxn_op_chan(int dmx, int ch, int(*op)(int, int), int ch_op) +{ + int enable_0, enable_1, enable_2; + int *set_0, *set_1, *set_2; + int reg; + + if (ch_op == DMX_CH_OP_CHANREC) { + enable_0 = dmx0_chanrec_enable; + enable_1 = dmx1_chanrec_enable; + enable_2 = dmx2_chanrec_enable; + set_0 = &dmx0_chanrec; + set_1 = &dmx1_chanrec; + set_2 = &dmx2_chanrec; + reg = DEMUX_CHAN_RECORD_EN; + } else if (ch_op == DMX_CH_OP_CHANPROC) { + enable_0 = dmx0_chanproc_enable; + enable_1 = dmx1_chanproc_enable; + enable_2 = dmx2_chanproc_enable; + set_0 = &dmx0_chanproc; + set_1 = &dmx1_chanproc; + set_2 = &dmx2_chanproc; + reg = DEMUX_CHAN_PROCESS_EN; + } else { + return; + } + if (dmx == 0) { + if (enable_0) { + *set_0 = op(*set_0, ch); + WRITE_MPEG_REG(reg+DEMUX_1_OFFSET, *set_0); + } + } else if (dmx == 1) { + if (enable_1) { + *set_1 = op(*set_1, ch); + WRITE_MPEG_REG(reg+DEMUX_2_OFFSET, *set_1); + } + } else if (dmx == 2) { + if (enable_2) { + *set_2 = op(*set_2, ch); + WRITE_MPEG_REG(reg+DEMUX_3_OFFSET, *set_2); + } + } +} +#define dmx_add_recchan(_dmx, _chid) \ + do { \ + pr_dbg("dmx[%d]_add_recchan[%d]\n", _dmx, _chid); \ + dmxn_op_chan(_dmx, _chid, _setbit, DMX_CH_OP_CHANREC); \ + } while (0) +#define dmx_rm_recchan(_dmx, _chid) \ + do { \ + pr_dbg("dmx[%d]_rm_recchan[%ld]\n", _dmx, _chid); \ + dmxn_op_chan(_dmx, _chid, _clrbit, DMX_CH_OP_CHANREC); \ + } while (0) +#define dmx_set_recchan(_dmx, _chs) \ + do { \ + pr_dbg("dmx[%d]_set_recchan[%d]\n", _dmx, _chs); \ + dmxn_op_chan(_dmx, _chs, _set, DMX_CH_OP_CHANREC); \ + } while (0) + +#define dmx_add_procchan(_dmx, _chid) \ + do { \ + pr_dbg("dmx[%d]_add_procchan[%d]\n", _dmx, _chid); \ + dmxn_op_chan(_dmx, _chid, _setbit, DMX_CH_OP_CHANPROC); \ + } while (0) +#define dmx_rm_procchan(_dmx, _chid) \ + do { \ + pr_dbg("dmx[%d]_rm_procchan[%ld]\n", _dmx, _chid); \ + dmxn_op_chan(_dmx, _chid, _clrbit, DMX_CH_OP_CHANPROC); \ + } while (0) +#define dmx_set_procchan(_dmx, _chs) \ + do { \ + pr_dbg("dmx[%d]_set_procchan[%d]\n", _dmx, _chs); \ + dmxn_op_chan(_dmx, _chs, _set, DMX_CH_OP_CHANPROC); \ + } while (0) + +#define NO_SUB +#define SUB_BUF_DMX +#define SUB_PARSER + +#ifndef SUB_BUF_DMX +#undef SUB_PARSER +#endif + +#define SUB_BUF_SHARED +#define PES_BUF_SHARED + +#define SYS_CHAN_COUNT (4) +#define SEC_GRP_LEN_0 (0xc) +#define SEC_GRP_LEN_1 (0xc) +#define SEC_GRP_LEN_2 (0xc) +#define SEC_GRP_LEN_3 (0xc) +#define LARGE_SEC_BUFF_MASK 0xFFFFFFFF +#define LARGE_SEC_BUFF_COUNT 32 +#define WATCHDOG_TIMER 250 +#define ASYNCFIFO_BUFFER_SIZE_DEFAULT (512*1024) + +#define DEMUX_INT_MASK\ + ((0<<(AUDIO_SPLICING_POINT)) |\ + (0<<(VIDEO_SPLICING_POINT)) |\ + (1<<(OTHER_PES_READY)) |\ + (1<<(PCR_READY)) |\ + (1<<(SUB_PES_READY)) |\ + (1<<(SECTION_BUFFER_READY)) |\ + (0<<(OM_CMD_READ_PENDING)) |\ + (1<<(TS_ERROR_PIN)) |\ + (1<<(NEW_PDTS_READY)) |\ + (0<<(DUPLICATED_PACKET)) |\ + (0<<(DIS_CONTINUITY_PACKET))) + +#define TS_SRC_MAX 3 + +/*Reset the demux device*/ +#define RESET_DEMUX2 (1<<15) +#define RESET_DEMUX1 (1<<14) +#define RESET_DEMUX0 (1<<13) +#define RESET_S2P1 (1<<12) +#define RESET_S2P0 (1<<11) +#define RESET_DES (1<<10) +#define RESET_TOP (1<<9) + +static int dmx_remove_feed(struct aml_dmx *dmx, struct dvb_demux_feed *feed); +static void reset_async_fifos(struct aml_dvb *dvb); +static int dmx_add_feed(struct aml_dmx *dmx, struct dvb_demux_feed *feed); +static int dmx_smallsec_set(struct aml_smallsec *ss, int enable, int bufsize, + int force); +static int dmx_timeout_set(struct aml_dmxtimeout *dto, int enable, + int timeout, int ch_dis, int nomatch, + int force); + +/*Audio & Video PTS value*/ +static u32 video_pts = 0; +static u32 audio_pts = 0; +static u32 video_pts_bit32 = 0; +static u32 audio_pts_bit32 = 0; +static u32 first_video_pts = 0; +static u32 first_audio_pts = 0; +static int demux_skipbyte; +static int tsfile_clkdiv = 5; +static int asyncfifo_buf_len = ASYNCFIFO_BUFFER_SIZE_DEFAULT; + +#define SF_DMX_ID 2 +#define SF_AFIFO_ID 1 + +#define sf_dmx_sf(_dmx) \ + (((_dmx)->id == SF_DMX_ID) \ + && ((struct aml_dvb *)(_dmx)->demux.priv)->swfilter.user) +#define sf_afifo_sf(_afifo) \ + (((_afifo)->id == SF_AFIFO_ID) && (_afifo)->dvb->swfilter.user) +#define dmx_get_dev(dmx) (((struct aml_dvb *)((dmx)->demux.priv))->dev) +#define asyncfifo_get_dev(afifo) ((afifo)->dvb->dev) + + +int dmx_phyreg_access(unsigned int reg, unsigned int writeval, + unsigned int *readval) +{ + void __iomem *vaddr; + + reg = round_down(reg, 0x3); + vaddr = ioremap(reg, 0x4); + if (!vaddr) + return -ENOMEM; + + if (readval) + *readval = readl_relaxed(vaddr); + else + writel_relaxed(writeval, vaddr); + iounmap(vaddr); + return 0; +} + +/*Section buffer watchdog*/ +static void section_buffer_watchdog_func(struct timer_list * timer) +{ + struct aml_dvb *dvb = from_timer(dvb,timer,watchdog_timer); + struct aml_dmx *dmx; + u32 section_busy32 = 0, om_cmd_status32 = 0, + demux_channel_activity32 = 0; + u16 demux_int_status1 = 0; + u32 device_no = 0; + u32 filter_number = 0; + u32 i = 0; + unsigned long flags; + + spin_lock_irqsave(&dvb->slock, flags); + + for (device_no = 0; device_no < DMX_DEV_COUNT; device_no++) { + + dmx = &dvb->dmx[device_no]; + + if (dvb->dmx_watchdog_disable[device_no]) + continue; + + if (!dmx->init) + continue; + + om_cmd_status32 = + DMX_READ_REG(device_no, OM_CMD_STATUS); + demux_channel_activity32 = + DMX_READ_REG(device_no, DEMUX_CHANNEL_ACTIVITY); + section_busy32 = + DMX_READ_REG(device_no, SEC_BUFF_BUSY); + + if (om_cmd_status32 & 0x8fc2) { + /* bit 15:12 -- om_cmd_count (read only) */ + /* bit 11:9 -- overflow_count */ + /* bit 11:9 -- om_cmd_wr_ptr(read only) */ + /* bit 8:6 -- om_overwrite_count */ + /* bit 8:6 -- om_cmd_rd_ptr(read only) */ + /* bit 5:3 -- type_stb_om_w_rd(read only) */ + /* bit 2 -- unit_start_stb_om_w_rd(read only) */ + /* bit 1 -- om_cmd_overflow(read only) */ + /* bit 0 -- om_cmd_pending(read) */ + /* bit 0 -- om_cmd_read_finished(write) */ + /*BUG: If the recoder is running, return */ + if (!dmx->record) { + /* OM status is wrong */ + dmx->om_status_error_count++; + pr_dbg("demux om status \n" + "%04x\t%03x\t%03x\t%03x\t%01x\t%01x\t" + "%x\t%x\tdmx%d:status:0x%xerr_cnt:%d-%d\n", + (om_cmd_status32 >> 12) & 0xf, + (om_cmd_status32 >> 9) & 0x7, + (om_cmd_status32 >> 6) & 0x7, + (om_cmd_status32 >> 3) & 0x7, + (om_cmd_status32 >> 2) & 0x1, + (om_cmd_status32 >> 1) & 0x1, + demux_channel_activity32, section_busy32, + dmx->id, om_cmd_status32, dmx->om_status_error_count, enable_sec_monitor); + if (enable_sec_monitor && + dmx->om_status_error_count > enable_sec_monitor) { + /*Reset the demux */ + dmx_reset_dmx_hw_ex_unlock(dvb, dmx, 0); + /* Reset the error count */ + dmx->om_status_error_count = 0; + goto end; + } + } + } else { + /* OM status is correct, reset the error count */ + dmx->om_status_error_count = 0; + } + section_busy32 = + DMX_READ_REG(device_no, SEC_BUFF_BUSY); + if (LARGE_SEC_BUFF_MASK == + (section_busy32 & LARGE_SEC_BUFF_MASK)) { + /*All the largest section buffers occupied, + * clear buffers + */ + DMX_WRITE_REG(device_no, + SEC_BUFF_READY, section_busy32); + } else { + for (i = 0; i < SEC_BUF_COUNT; i++) { + if (!(section_busy32 & (1 << i))) + continue; + DMX_WRITE_REG(device_no, SEC_BUFF_NUMBER, i); + filter_number = DMX_READ_REG(device_no, + SEC_BUFF_NUMBER); + filter_number >>= 8; + if ((filter_number >= FILTER_COUNT) + /* >=31, do not handle this case */ + || ((filter_number < FILTER_COUNT) + && dmx->filter[filter_number].used)) + section_busy32 &= ~(1 << i); + } + if (section_busy32 & (dmx->smallsec.enable ? + 0x7FFFFFFF : + LARGE_SEC_BUFF_MASK)) { + /*Clear invalid buffers */ + DMX_WRITE_REG(device_no, + SEC_BUFF_READY, + section_busy32); + pr_error("clear invalid buffer 0x%x\n", + section_busy32); + } +#if 0 + section_busy32 = 0x7fffffff; + for (i = 0; i < SEC_BUF_BUSY_SIZE; i++) { + dmx->section_busy[i] = ( + (i == SEC_BUF_BUSY_SIZE - 1) ? + DMX_READ_REG(device_no, SEC_BUFF_BUSY) : + dmx->section_busy[i + 1]); + section_busy32 &= dmx->section_busy[i]; + } + + /*count the number of '1' bits */ + i = section_busy32; + i = (i & 0x55555555) + ((i & 0xaaaaaaaa) >> 1); + i = (i & 0x33333333) + ((i & 0xcccccccc) >> 2); + i = (i & 0x0f0f0f0f) + ((i & 0xf0f0f0f0) >> 4); + i = (i & 0x00ff00ff) + ((i & 0xff00ff00) >> 8); + i = (i & 0x0000ffff) + ((i & 0xffff0000) >> 16); + if (i > LARGE_SEC_BUFF_COUNT) { + /*too long some of the section + * buffers are being processed + */ + DMX_WRITE_REG(device_no, SEC_BUFF_READY, + section_busy32); + } +#endif + } + demux_int_status1 = + DMX_READ_REG(device_no, STB_INT_STATUS) & 0xfff7; + if (demux_int_status1 & (1 << TS_ERROR_PIN)) { + DMX_WRITE_REG(device_no, + STB_INT_STATUS, + (1 << TS_ERROR_PIN)); + } + } + +end: + spin_unlock_irqrestore(&dvb->slock, flags); +#ifdef ENABLE_SEC_BUFF_WATCHDOG + mod_timer(&dvb->watchdog_timer, + jiffies + msecs_to_jiffies(WATCHDOG_TIMER)); +#endif +} + +static inline int sec_filter_match(struct aml_dmx *dmx, struct aml_filter *f, + u8 *p) +{ + int b; + u8 neq = 0; + + if (!f->used || !dmx->channel[f->chan_id].used) + return 0; + + for (b = 0; b < FILTER_LEN; b++) { + u8 xor = p[b] ^ f->value[b]; + + if (xor & f->maskandmode[b]) + return 0; + + if (xor & f->maskandnotmode[b]) + neq = 1; + } + + if (f->neq && !neq) + return 0; + + return 1; +} + +static void trigger_crc_monitor(struct aml_dmx *dmx) +{ + if (!dmx->crc_check_time) { + dmx->crc_check_time = jiffies; + dmx->crc_check_count = 0; + } + + if (dmx->crc_check_count > 100) { + if (jiffies_to_msecs(jiffies - dmx->crc_check_time) <= 1000) { + struct aml_dvb *dvb = (struct aml_dvb *)dmx->demux.priv; + + pr_error("Too many crc fail (%d crc fail in %d ms)!\n", + dmx->crc_check_count, + jiffies_to_msecs(jiffies - dmx->crc_check_time) + ); + dmx_reset_dmx_hw_ex_unlock(dvb, dmx, 0); + } + dmx->crc_check_time = 0; + } + + dmx->crc_check_count++; +} +static int section_crc(struct aml_dmx *dmx, struct aml_filter *f, u8 *p) +{ + int sec_len = (((p[1] & 0xF) << 8) | p[2]) + 3; + struct dvb_demux_feed *feed = dmx->channel[f->chan_id].feed; + + if (feed->feed.sec.check_crc) { + struct dvb_demux *demux = feed->demux; + struct dmx_section_feed *sec = &feed->feed.sec; + int section_syntax_indicator; + + section_syntax_indicator = ((p[1] & 0x80) != 0); + sec->seclen = sec_len; + sec->crc_val = ~0; + if (demux->check_crc32(feed, p, sec_len)) { + pr_error("section CRC check failed! pid[%d]\n", feed->pid); + +#if 0 +{ + int i; + + pr_error("sec:[%#lx:%#lx:%#lx-%#lx:%#lx:%#lx-%#lx:%#lx:%#lx]\n", + dmx->sec_cnt[0], dmx->sec_cnt_match[0], dmx->sec_cnt_crc_fail[0], + dmx->sec_cnt[1], dmx->sec_cnt_match[1], dmx->sec_cnt_crc_fail[1], + dmx->sec_cnt[2], dmx->sec_cnt_match[2], dmx->sec_cnt_crc_fail[2]); + pr_error("bad sec[%d]:\n", sec_len); + /* + * if (sec_len > 256) + * sec_len = 256; + * for (i = 0; i < sec_len; i++) { + * pr_err("%02x ", p[i]); + * if (!((i + 1) % 16)) + * pr_err("\n"); + * } + */ +} +#endif + trigger_crc_monitor(dmx); + return 0; + } +#if 0 + int i; + + for (i = 0; i < sec_len; i++) { + pr_dbg("%02x ", p[i]); + if (!((i + 1) % 16)) + pr_dbg("\n"); + } + pr_dbg("\nsection data\n"); +#endif + } + + return 1; +} + +static void section_notify(struct aml_dmx *dmx, struct aml_filter *f, u8 *p) +{ + int sec_len = (((p[1] & 0xF) << 8) | p[2]) + 3; + struct dvb_demux_feed *feed = dmx->channel[f->chan_id].feed; + + if (feed && feed->cb.sec) + feed->cb.sec(p, sec_len, NULL, 0, f->filter,0); +} + +static void hardware_match_section(struct aml_dmx *dmx, + u16 sec_num, u16 buf_num) +{ + u8 *p = (u8 *) dmx->sec_buf[buf_num].addr; + struct aml_filter *f; + int chid, i; + int need_crc = 1; + + if (sec_num >= FILTER_COUNT) { + pr_dbg("sec_num invalid: %d\n", sec_num); + return; + } + + dma_sync_single_for_cpu(dmx_get_dev(dmx), + dmx->sec_pages_map + (buf_num << 0x0c), + (1 << 0x0c), DMA_FROM_DEVICE); + + f = &dmx->filter[sec_num]; + chid = f->chan_id; + + dmx->sec_cnt[SEC_CNT_HW]++; + + for (i = 0; i < FILTER_COUNT; i++) { + f = &dmx->filter[i]; + if (f->chan_id != chid) + continue; + if (sec_filter_match(dmx, f, p)) { + if (need_crc) { + dmx->sec_cnt_match[SEC_CNT_HW]++; + if (!section_crc(dmx, f, p)) { + dmx->sec_cnt_crc_fail[SEC_CNT_HW]++; + return; + } + need_crc = 0; + } + section_notify(dmx, f, p); + } + } +} + +static void software_match_section(struct aml_dmx *dmx, u16 buf_num) +{ + u8 *p = (u8 *) dmx->sec_buf[buf_num].addr; + struct aml_filter *f, *fmatch = NULL; + int i, fid = -1; + + dma_sync_single_for_cpu(dmx_get_dev(dmx), + dmx->sec_pages_map + (buf_num << 0x0c), + (1 << 0x0c), DMA_FROM_DEVICE); + + dmx->sec_cnt[SEC_CNT_SW]++; + + for (i = 0; i < FILTER_COUNT; i++) { + f = &dmx->filter[i]; + + if (sec_filter_match(dmx, f, p)) { + pr_dbg("[software match]filter %d match, pid %d\n", + i, dmx->channel[f->chan_id].pid); + if (!fmatch) { + fmatch = f; + fid = i; + } else { + pr_error("[sw match]Muli-filter match this\n" + "section, will skip this section\n"); + return; + } + } + } + + if (fmatch) { + pr_dbg("[software match]dispatch\n" + "section to filter %d pid %d\n", + fid, dmx->channel[fmatch->chan_id].pid); + dmx->sec_cnt_match[SEC_CNT_SW]++; + if (section_crc(dmx, fmatch, p)) + section_notify(dmx, fmatch, p); + else + dmx->sec_cnt_crc_fail[SEC_CNT_SW]++; + } else { + pr_dbg("[software match]this section do not\n" + "match any filter!!!\n"); + } +} + + +static int _rbuf_write(struct dvb_ringbuffer *buf, const u8 *src, size_t len) +{ + ssize_t free; + + if (!len) + return 0; + if (!buf->data) + return 0; + + free = dvb_ringbuffer_free(buf); + if (len > free) { + pr_error("sf: buffer overflow\n"); + return -EOVERFLOW; + } + + return dvb_ringbuffer_write(buf, src, len); +} + +static int _rbuf_filter_pkts(struct dvb_ringbuffer *rb, + u8 *wrapbuf, + void (*swfilter_packets)(struct dvb_demux *demux, + const u8 *buf, + size_t count), + struct dvb_demux *demux) +{ + ssize_t len1 = 0; + ssize_t len2 = 0; + size_t off; + size_t count; + size_t size; + + if (debug_irq & 0x4) + dump(&rb->data[rb->pread], (debug_irq & 0xFFF00) >> 8); + + /* + * rb|====--------===[0x47]====| + * ^ ^ + * wr rd + */ + + len1 = rb->pwrite - rb->pread; + if (len1 < 0) { + len1 = rb->size - rb->pread; + len2 = rb->pwrite; + } + + for (off = 0; off < len1; off++) { + if (rb->data[rb->pread + off] == 0x47) + break; + } + + if (off) + pr_dbg_irq_sf("off ->|%zd\n", off); + + len1 -= off; + rb->pread = (rb->pread + off) % rb->size; + + count = len1 / 188; + if (count) { + pr_dbg_irq_sf("pkt >> 1[%zd<->%zd]\n", rb->pread, rb->pwrite); + swfilter_packets(demux, rb->data + rb->pread, count); + + size = count * 188; + len1 -= size; + rb->pread += size; + } + + if (len2 && len1 && ((len1 + len2) > 188)) { + pr_dbg_irq_sf("pkt >> 2[%zd<->%zd]\n", rb->pread, rb->pwrite); + size = 188 - len1; + memcpy(wrapbuf, rb->data + rb->pread, len1); + memcpy(wrapbuf + len1, rb->data, size); + swfilter_packets(demux, wrapbuf, 1); + rb->pread = size; + len2 -= size; + } + + if (len2) { + pr_dbg_irq_sf("pkt >> 3[%zd<->%zd]\n", rb->pread, rb->pwrite); + count = len2 / 188; + if (count) { + swfilter_packets(demux, rb->data + rb->pread, count); + rb->pread += count * 188; + } + } + return 0; +} + +static void smallsection_match_section(struct aml_dmx *dmx, u8 *p, u16 sec_num) +{ + struct aml_filter *f; + int chid, i; + int need_crc = 1; + + if (sec_num >= FILTER_COUNT) { + pr_dbg("sec_num invalid: %d\n", sec_num); + return; + } + + f = &dmx->filter[sec_num]; + chid = f->chan_id; + + dmx->sec_cnt[SEC_CNT_SS]++; + + for (i = 0; i < FILTER_COUNT; i++) { + f = &dmx->filter[i]; + if (f->chan_id != chid) + continue; + if (sec_filter_match(dmx, f, p)) { + if (need_crc) { + dmx->sec_cnt_match[SEC_CNT_SS]++; + if (!section_crc(dmx, f, p)) { + dmx->sec_cnt_crc_fail[SEC_CNT_SS]++; + return; + } + need_crc = 0; + } + section_notify(dmx, f, p); + } + } + +} +static void process_smallsection(struct aml_dmx *dmx) +{ + + u32 v, wr, rd; + u32 data32; + struct aml_smallsec *ss = &dmx->smallsec; + + v = DMX_READ_REG(dmx->id, DEMUX_SMALL_SEC_CTL); + wr = (v >> 8) & 0xff; + rd = (v >> 16) & 0xff; + + if (rd != wr) { + int n1 = wr - rd, + n2 = 0, + max = (ss->bufsize>>8); + int i; + u8 *p; + int sec_len; + + pr_dbg_irq_ss("secbuf[31] ctrl:0x%x\n", v); + + if (n1 < 0) { + n1 = max - rd; + n2 = wr; + } + if (n1) { + pr_dbg_irq_ss("n1:%d\n", n1); + dma_sync_single_for_cpu(dmx_get_dev(dmx), + ss->buf_map+(rd<<8), + n1<<8, + DMA_FROM_DEVICE); + for (i = 0; i < n1; i++) { + p = (u8 *)ss->buf+((rd+i)<<8); + sec_len = (((p[1] & 0xF) << 8) | p[2]) + 3; + smallsection_match_section(dmx, p, + *(p+sec_len+1)); + } + } + if (n2) { + pr_dbg_irq_ss("n2:%d\n", n2); + dma_sync_single_for_cpu(dmx_get_dev(dmx), + ss->buf_map, + n2<<8, + DMA_FROM_DEVICE); + for (i = 0; i < n2; i++) { + p = (u8 *)ss->buf+(i<<8); + sec_len = (((p[1] & 0xF) << 8) | p[2]) + 3; + smallsection_match_section(dmx, p, + *(p+sec_len+1)); + } + } + + rd = wr; + data32 = (DMX_READ_REG(dmx->id, DEMUX_SMALL_SEC_CTL) + & 0xff00ffff) + | (rd << 16); + DMX_WRITE_REG(dmx->id, DEMUX_SMALL_SEC_CTL, data32); + } +} + + +static void process_section(struct aml_dmx *dmx) +{ + u32 ready, i, sec_busy; + u16 sec_num; + + /*pr_dbg("section\n"); */ + ready = DMX_READ_REG(dmx->id, SEC_BUFF_READY); + if (ready) { +#ifdef USE_AHB_MODE + /* WRITE_ISA_REG(AHB_BRIDGE_CTRL1, + * READ_ISA_REG (AHB_BRIDGE_CTRL1) | (1 << 31)); + */ + /* WRITE_ISA_REG(AHB_BRIDGE_CTRL1, + * READ_ISA_REG (AHB_BRIDGE_CTRL1) & (~ (1 << 31))); + */ +#endif + + if ((ready & (1<<31)) && dmx->smallsec.enable) { + u32 v, wr, rd; + + v = DMX_READ_REG(dmx->id, DEMUX_SMALL_SEC_CTL); + wr = (v >> 8) & 0xff; + rd = (v >> 16) & 0xff; + if ((wr < rd) && (5 > (rd - wr))) + pr_error("warning: small ss buf [w%dr%d]\n", + wr, rd); + pr_dbg_irq_ss("ss>%x\n", + DMX_READ_REG(dmx->id, DEMUX_SMALL_SEC_CTL)); + process_smallsection(dmx); + /*tasklet_hi_schedule(&dmx->dmx_tasklet);*/ + /*tasklet_schedule(&dmx->dmx_tasklet);*/ + DMX_WRITE_REG(dmx->id, SEC_BUFF_READY, (1<<31)); + return; + } + + for (i = 0; i < SEC_BUF_COUNT; i++) { + + if (!(ready & (1 << i))) + continue; + + /* get section busy */ + sec_busy = DMX_READ_REG(dmx->id, SEC_BUFF_BUSY); + /* get filter number */ + DMX_WRITE_REG(dmx->id, SEC_BUFF_NUMBER, i); + sec_num = (DMX_READ_REG(dmx->id, SEC_BUFF_NUMBER) >> 8); + + /* + * sec_buf_watchdog_count dispatch: + * byte0 -- always busy=0 's watchdog count + * byte1 -- always busy=1 & filter_num=31 's + * watchdog count + */ + + /* sec_busy is not set, check busy=0 watchdog count */ + if (!(sec_busy & (1 << i))) { + /* clear other wd count of this buffer */ + dmx->sec_buf_watchdog_count[i] &= 0x000000ff; + dmx->sec_buf_watchdog_count[i] += 0x1; + pr_dbg("bit%d ready=1, busy=0,\n" + "sec_num=%d for %d times\n", + i, sec_num, + dmx->sec_buf_watchdog_count[i]); + if (dmx->sec_buf_watchdog_count[i] >= 5) { + pr_dbg("busy=0 reach the max count,\n" + "try software match.\n"); + software_match_section(dmx, i); + dmx->sec_buf_watchdog_count[i] = 0; + DMX_WRITE_REG(dmx->id, SEC_BUFF_READY, + (1 << i)); + } + continue; + } + + /* filter_num == 31 && busy == 1,check watchdog count */ + if (sec_num >= FILTER_COUNT) { + /* clear other wd count of this buffer */ + dmx->sec_buf_watchdog_count[i] &= 0x0000ff00; + dmx->sec_buf_watchdog_count[i] += 0x100; + pr_dbg("bit%d ready=1,busy=1,\n" + "sec_num=%d for %d times\n", + i, sec_num, + dmx->sec_buf_watchdog_count[i] >> 8); + if (dmx->sec_buf_watchdog_count[i] >= 0x500) { + pr_dbg("busy=1&filter_num=31\n" + " reach the max count, clear\n" + " the buf ready & busy!\n"); + software_match_section(dmx, i); + dmx->sec_buf_watchdog_count[i] = 0; + DMX_WRITE_REG(dmx->id, + SEC_BUFF_READY, + (1 << i)); + DMX_WRITE_REG(dmx->id, + SEC_BUFF_BUSY, + (1 << i)); + } + continue; + } + + /* now, ready & busy are both set and + * filter number is valid + */ + if (dmx->sec_buf_watchdog_count[i] != 0) + dmx->sec_buf_watchdog_count[i] = 0; + + /* process this section */ + hardware_match_section(dmx, sec_num, i); + + /* clear the ready & busy bit */ + DMX_WRITE_REG(dmx->id, SEC_BUFF_READY, (1 << i)); + DMX_WRITE_REG(dmx->id, SEC_BUFF_BUSY, (1 << i)); + } + } +} + +#ifdef NO_SUB +static void process_sub(struct aml_dmx *dmx) +{ + + u32 rd_ptr = 0; + + u32 wr_ptr = READ_MPEG_REG(PARSER_SUB_WP); + u32 start_ptr = READ_MPEG_REG(PARSER_SUB_START_PTR); + u32 end_ptr = READ_MPEG_REG(PARSER_SUB_END_PTR); + + u32 buffer1 = 0, buffer2 = 0; + u8 *buffer1_virt = 0, *buffer2_virt = 0; + u32 len1 = 0, len2 = 0; + + if (!dmx->sub_buf_base_virt) + return; + + rd_ptr = READ_MPEG_REG(PARSER_SUB_RP); + if (!rd_ptr) + return; + if (rd_ptr > wr_ptr) { + len1 = end_ptr - rd_ptr + 8; + buffer1 = rd_ptr; + + len2 = wr_ptr - start_ptr; + buffer2 = start_ptr; + + rd_ptr = start_ptr + len2; + } else if (rd_ptr < wr_ptr) { + len1 = wr_ptr - rd_ptr; + buffer1 = rd_ptr; + rd_ptr += len1; + len2 = 0; + } else if (rd_ptr == wr_ptr) { + pr_dbg("sub no data\n"); + } + + if (buffer1 && len1) +#ifdef SUB_BUF_DMX + buffer1_virt = (void *)dmx->sub_pages + (buffer1 - start_ptr); +#else + buffer1_virt = (void *)dmx->sub_buf_base_virt + (buffer1 - start_ptr); +#endif + + if (buffer2 && len2) +#ifdef SUB_BUF_DMX + buffer2_virt = (void *)dmx->sub_pages + (buffer2 - start_ptr); +#else + buffer2_virt = (void *)dmx->sub_buf_base_virt + (buffer2 - start_ptr); +#endif + + pr_dbg_irq_sub("sub: rd_ptr:%x buf1:%x len1:%d buf2:%x len2:%d\n", + rd_ptr, buffer1, len1, buffer2, len2); + pr_dbg_irq_sub("sub: buf1_virt:%p buf2_virt:%p\n", + buffer1_virt, buffer2_virt); + + if (len1) + dma_sync_single_for_cpu(dmx_get_dev(dmx), + (dma_addr_t) buffer1, len1, + DMA_FROM_DEVICE); + if (len2) + dma_sync_single_for_cpu(dmx_get_dev(dmx), + (dma_addr_t) buffer2, len2, + DMA_FROM_DEVICE); + + if (dmx->channel[2].used) { + if (dmx->channel[2].feed && dmx->channel[2].feed->cb.ts && + ((buffer1_virt != NULL && len1 !=0 ) || (buffer2_virt != NULL && len2 != 0))) + { + dmx->channel[2].feed->cb.ts(buffer1_virt, len1, + buffer2_virt, len2, + &dmx->channel[2].feed->feed.ts,0); + } + } + WRITE_MPEG_REG(PARSER_SUB_RP, rd_ptr); +} +#endif + +static void process_pes(struct aml_dmx *dmx) +{ + long off, off_pre = pes_off_pre[dmx->id]; + u8 *buffer1 = 0, *buffer2 = 0; + u8 *buffer1_phys = 0, *buffer2_phys = 0; + u32 len1 = 0, len2 = 0; + int i = 1; + + off = (DMX_READ_REG(dmx->id, OTHER_WR_PTR) << 3); + + pr_dbg_irq_pes("[%d]WR:0x%x PES WR:0x%x\n", dmx->id, + DMX_READ_REG(dmx->id, OTHER_WR_PTR), + DMX_READ_REG(dmx->id, OB_PES_WR_PTR)); + buffer1 = (u8 *)(dmx->pes_pages + off_pre); + pr_dbg_irq_pes("[%d]PES WR[%02x %02x %02x %02x %02x %02x %02x %02x", + dmx->id, + buffer1[0], buffer1[1], buffer1[2], buffer1[3], + buffer1[4], buffer1[5], buffer1[6], buffer1[7]); + pr_dbg_irq_pes(" %02x %02x %02x %02x %02x %02x %02x %02x]\n", + buffer1[8], buffer1[9], buffer1[10], buffer1[11], + buffer1[12], buffer1[13], buffer1[14], buffer1[15]); + + if (off > off_pre) { + len1 = off-off_pre; + buffer1 = (unsigned char *)(dmx->pes_pages + off_pre); + } else if (off < off_pre) { + len1 = dmx->pes_buf_len-off_pre; + buffer1 = (unsigned char *)(dmx->pes_pages + off_pre); + len2 = off; + buffer2 = (unsigned char *)dmx->pes_pages; + } else if (off == off_pre) { + pr_dbg("pes no data\n"); + } + pes_off_pre[dmx->id] = off; + if (len1) { + buffer1_phys = (unsigned char *)virt_to_phys(buffer1); + dma_sync_single_for_cpu(dmx_get_dev(dmx), + (dma_addr_t)buffer1_phys, len1, DMA_FROM_DEVICE); + } + if (len2) { + buffer2_phys = (unsigned char *)virt_to_phys(buffer2); + dma_sync_single_for_cpu(dmx_get_dev(dmx), + (dma_addr_t)buffer2_phys, len2, DMA_FROM_DEVICE); + } + if (len1 || len2) { + struct aml_channel *ch; + + for (i = 0; i < CHANNEL_COUNT; i++) { + ch = &dmx->channel[i]; + if (ch->used && ch->feed + && (ch->feed->type == DMX_TYPE_TS)) { + if (ch->feed->ts_type & TS_PAYLOAD_ONLY) { + ch->feed->cb.ts(buffer1, + len1, buffer2, len2, + &ch->feed->feed.ts,0); + } + } + } + } +} + +static void process_om_read(struct aml_dmx *dmx) +{ + unsigned int i; + unsigned short om_cmd_status_data_0 = 0; + unsigned short om_cmd_status_data_1 = 0; +/* unsigned short om_cmd_status_data_2 = 0;*/ + unsigned short om_cmd_data_out = 0; + + om_cmd_status_data_0 = DMX_READ_REG(dmx->id, OM_CMD_STATUS); + om_cmd_status_data_1 = DMX_READ_REG(dmx->id, OM_CMD_DATA); +/* om_cmd_status_data_2 = DMX_READ_REG(dmx->id, OM_CMD_DATA2);*/ + + if (om_cmd_status_data_0 & 1) { + DMX_WRITE_REG(dmx->id, OM_DATA_RD_ADDR, + (1 << 15) | ((om_cmd_status_data_1 & 0xff) << 2)); + for (i = 0; i < (((om_cmd_status_data_1 >> 7) & 0x1fc) >> 1); + i++) { + om_cmd_data_out = DMX_READ_REG(dmx->id, OM_DATA_RD); + } + + om_cmd_data_out = DMX_READ_REG(dmx->id, OM_DATA_RD_ADDR); + DMX_WRITE_REG(dmx->id, OM_DATA_RD_ADDR, 0); + DMX_WRITE_REG(dmx->id, OM_CMD_STATUS, 1); + } +} + +static void dmx_irq_bh_handler(unsigned long arg) +{ + struct aml_dmx *dmx = (struct aml_dmx *)arg; +#if 0 + u32 status; + + status = DMX_READ_REG(dmx->id, STB_INT_STATUS); + + if (status) + DMX_WRITE_REG(dmx->id, STB_INT_STATUS, status); +#endif + process_smallsection(dmx); +} + +static irqreturn_t dmx_irq_handler(int irq_number, void *para) +{ + struct aml_dmx *dmx = (struct aml_dmx *)para; + struct aml_dvb *dvb = aml_get_dvb_device(); + u32 status; + unsigned long flags; + + spin_lock_irqsave(&dvb->slock, flags); + status = DMX_READ_REG(dmx->id, STB_INT_STATUS); + if (!status) + goto irq_handled; + + pr_dbg_irq("demux %d irq status: 0x%08x\n", dmx->id, status); + + if (status & (1 << SECTION_BUFFER_READY)) + process_section(dmx); +#ifdef NO_SUB + if (status & (1 << SUB_PES_READY)) { + /*If the subtitle is set by tsdemux, + *do not parser in demux driver. + */ + if (dmx->sub_chan == -1) + process_sub(dmx); + } +#endif + if (status & (1 << OTHER_PES_READY)) + process_pes(dmx); + if (status & (1 << OM_CMD_READ_PENDING)) + process_om_read(dmx); + /* + *if (status & (1 << DUPLICATED_PACKET)) { + *} + *if (status & (1 << DIS_CONTINUITY_PACKET)) { + *} + *if (status & (1 << VIDEO_SPLICING_POINT)) { + *} + *if (status & (1 << AUDIO_SPLICING_POINT)) { + *} + */ + if (status & (1 << TS_ERROR_PIN)) + pr_error("TS_ERROR_PIN\n"); + + if (status & (1 << NEW_PDTS_READY)) { + u32 pdts_status = DMX_READ_REG(dmx->id, STB_PTS_DTS_STATUS); + + if (pdts_status & (1 << VIDEO_PTS_READY)) { + video_pts = DMX_READ_REG(dmx->id, VIDEO_PTS_DEMUX); + video_pts_bit32 = + (pdts_status & (1 << VIDEO_PTS_BIT32)) ? 1 : 0; + if (!first_video_pts + || 0 > (int)(video_pts - first_video_pts)) + first_video_pts = video_pts; + } + + if (pdts_status & (1 << AUDIO_PTS_READY)) { + audio_pts = DMX_READ_REG(dmx->id, AUDIO_PTS_DEMUX); + audio_pts_bit32 = + (pdts_status & (1 << AUDIO_PTS_BIT32)) ? 1 : 0; + if (!first_audio_pts + || 0 > (int)(audio_pts - first_audio_pts)) + first_audio_pts = audio_pts; + } + } + + if (dmx->irq_handler) + dmx->irq_handler(dmx->dmx_irq, (void *)(long)dmx->id); + + DMX_WRITE_REG(dmx->id, STB_INT_STATUS, status); + + /*tasklet_schedule(&dmx->dmx_tasklet);*/ + + { + if (!dmx->int_check_time) { + dmx->int_check_time = jiffies; + dmx->int_check_count = 0; + } + + if (jiffies_to_msecs(jiffies - dmx->int_check_time) >= 100 + || dmx->int_check_count > 1000) { + if (dmx->int_check_count > 1000) { + struct aml_dvb *dvb = + (struct aml_dvb *)dmx->demux.priv; + pr_error("Too many irq (%d irq in %d ms)!\n", + dmx->int_check_count, + jiffies_to_msecs(jiffies - + dmx->int_check_time)); + if (dmx->fe && !dmx->in_tune) + DMX_WRITE_REG(dmx->id, STB_INT_MASK, 0); + dmx_reset_hw_ex(dvb, 0); + } + dmx->int_check_time = 0; + } + + dmx->int_check_count++; + + if (dmx->in_tune) { + dmx->error_check++; + if (dmx->error_check > 200) + DMX_WRITE_REG(dmx->id, STB_INT_MASK, 0); + } + } + +irq_handled: + spin_unlock_irqrestore(&dvb->slock, flags); + return IRQ_HANDLED; +} + +static inline int dmx_get_order(unsigned long size) +{ + int order; + + order = -1; + do { + size >>= 1; + order++; + } while (size); + + return order; +} + +static inline int dmx_get_afifo_size(struct aml_asyncfifo *afifo) +{ + return afifo->secure_enable && afifo->blk.len ? afifo->blk.len : asyncfifo_buf_len; +} + +static void dvr_process_channel(struct aml_asyncfifo *afifo, + struct aml_channel *channel, + u32 total, u32 size, + struct aml_swfilter *sf) +{ + int cnt; + int ret = 0; + struct aml_dvr_block blk; + + if (afifo->buf_read > afifo->buf_toggle) { + cnt = total - afifo->buf_read; + if (!(afifo->secure_enable && afifo->blk.addr)) { + dma_sync_single_for_cpu(asyncfifo_get_dev(afifo), + afifo->pages_map+afifo->buf_read*size, + cnt*size, + DMA_FROM_DEVICE); + if (sf) + ret = _rbuf_write(&sf->rbuf, + (u8 *)afifo->pages+afifo->buf_read*size, + cnt*size); + else + channel->dvr_feed->cb.ts( + (u8 *)afifo->pages+afifo->buf_read*size, + cnt*size, NULL, 0, + &channel->dvr_feed->feed.ts,0); + } else { + blk.addr = afifo->blk.addr+afifo->buf_read*size; + blk.len = cnt*size; + if (sf) + ret = _rbuf_write(&sf->rbuf, + (u8 *)afifo->pages+afifo->buf_read*size, + cnt*size); + else { + channel->dvr_feed->cb.ts( + (u8 *)&blk, + sizeof(struct aml_dvr_block), + NULL, 0, + &channel->dvr_feed->feed.ts,0); + } + } + afifo->buf_read = 0; + } + + if (afifo->buf_toggle > afifo->buf_read) { + cnt = afifo->buf_toggle - afifo->buf_read; + if (!(afifo->secure_enable && afifo->blk.addr)) { + dma_sync_single_for_cpu(asyncfifo_get_dev(afifo), + afifo->pages_map+afifo->buf_read*size, + cnt*size, + DMA_FROM_DEVICE); + if (sf) { + if (ret >= 0) + ret = _rbuf_write(&sf->rbuf, + (u8 *)afifo->pages+afifo->buf_read*size, + cnt*size); + } else { + channel->dvr_feed->cb.ts( + (u8 *)afifo->pages+afifo->buf_read*size, + cnt*size, NULL, 0, + &channel->dvr_feed->feed.ts,0); + } + } else { + blk.addr = afifo->blk.addr+afifo->buf_read*size; + blk.len = cnt*size; + if (sf) + ret = _rbuf_write(&sf->rbuf, + (u8 *)afifo->pages+afifo->buf_read*size, + cnt*size); + else { + channel->dvr_feed->cb.ts( + (u8 *)&blk, + sizeof(struct aml_dvr_block), + NULL, 0, + &channel->dvr_feed->feed.ts,0); + } + } + afifo->buf_read = afifo->buf_toggle; + } + + if (sf && ret > 0) { + _rbuf_filter_pkts(&sf->rbuf, sf->wrapbuf, + dvb_dmx_swfilter_packets, + channel->dvr_feed->demux); + } else if (sf && ret <= 0) + pr_error("sf rbuf write error[%d]\n", ret); + else + pr_dbg_irq_dvr("write data to dvr\n"); +} + +static uint32_t last_afifo_time = 0; +static void dvr_irq_bh_handler(unsigned long arg) +{ + struct aml_asyncfifo *afifo = (struct aml_asyncfifo *)arg; + struct aml_dvb *dvb = afifo->dvb; + struct aml_dmx *dmx; + u32 size, total; + int i, factor; + unsigned long flags; + + pr_dbg_irq_dvr("async fifo %d irq, interval:%d ms, %d data\n", afifo->id, + jiffies_to_msecs(jiffies - last_afifo_time), afifo->flush_size); + + spin_lock_irqsave(&dvb->slock, flags); + + if (dvb && afifo->source >= AM_DMX_0 && afifo->source < AM_DMX_MAX) { + dmx = &dvb->dmx[afifo->source]; + // pr_inf("async fifo %d irq, source:%d\n", afifo->id,afifo->source); + if (dmx->init && dmx->record) { + struct aml_swfilter *sf = &dvb->swfilter; + int issf = 0; + + total = afifo->buf_len / afifo->flush_size; + factor = dmx_get_order(total); + size = afifo->buf_len >> factor; + + if (sf->user && (sf->afifo == afifo)) + issf = 1; + + for (i = 0; i < CHANNEL_COUNT; i++) { + if (dmx->channel[i].used + && dmx->channel[i].dvr_feed) { + dvr_process_channel(afifo, + &dmx->channel[i], + total, + size, + issf?sf:NULL); + break; + } + } + + } + } + spin_unlock_irqrestore(&dvb->slock, flags); + last_afifo_time = jiffies; +} + +static irqreturn_t dvr_irq_handler(int irq_number, void *para) +{ + struct aml_asyncfifo *afifo = (struct aml_asyncfifo *)para; + int factor = dmx_get_order(afifo->buf_len / afifo->flush_size); + + afifo->buf_toggle++; + afifo->buf_toggle %= (1 << factor); + tasklet_schedule(&afifo->asyncfifo_tasklet); + return IRQ_HANDLED; +} + +/*Enable the STB*/ +static void stb_enable(struct aml_dvb *dvb) +{ + int out_src, des_in, en_des, fec_clk, hiu, dec_clk_en; + int src, tso_src, i; + u32 fec_s0, fec_s1,fec_s2; + u32 invert0, invert1, invert2; + u32 data; + + switch (dvb->stb_source) { + case AM_TS_SRC_DMX0: + src = dvb->dmx[0].source; + break; + case AM_TS_SRC_DMX1: + src = dvb->dmx[1].source; + break; + case AM_TS_SRC_DMX2: + src = dvb->dmx[2].source; + break; + default: + src = dvb->stb_source; + break; + } + + switch (src) { + case AM_TS_SRC_TS0: + fec_clk = tsfile_clkdiv; + hiu = 0; + break; + case AM_TS_SRC_TS1: + fec_clk = tsfile_clkdiv; + hiu = 0; + break; + case AM_TS_SRC_TS2: + fec_clk = tsfile_clkdiv; + hiu = 0; + break; + case AM_TS_SRC_TS3: + fec_clk = tsfile_clkdiv; + hiu = 0; + break; + case AM_TS_SRC_S_TS0: + fec_clk = tsfile_clkdiv; + hiu = 0; + break; + case AM_TS_SRC_S_TS1: + fec_clk = tsfile_clkdiv; + hiu = 0; + break; + case AM_TS_SRC_S_TS2: + fec_clk = tsfile_clkdiv; + hiu = 0; + break; + case AM_TS_SRC_HIU: + fec_clk = tsfile_clkdiv; + hiu = 1; + break; + case AM_TS_SRC_HIU1: + fec_clk = tsfile_clkdiv; + hiu = 1; + break; + default: + fec_clk = 0; + hiu = 0; + break; + } + + switch (dvb->dsc[0].source) { + case AM_TS_SRC_DMX0: + des_in = 0; + en_des = 1; + dec_clk_en = 1; + break; + case AM_TS_SRC_DMX1: + des_in = 1; + en_des = 1; + dec_clk_en = 1; + break; + case AM_TS_SRC_DMX2: + des_in = 2; + en_des = 1; + dec_clk_en = 1; + break; + default: + des_in = 0; + en_des = 0; + dec_clk_en = 0; + break; + } + switch (dvb->tso_source) { + case AM_TS_SRC_DMX0: + tso_src = dvb->dmx[0].source; + break; + case AM_TS_SRC_DMX1: + tso_src = dvb->dmx[1].source; + break; + case AM_TS_SRC_DMX2: + tso_src = dvb->dmx[2].source; + break; + default: + tso_src = dvb->tso_source; + break; + } + + switch (tso_src) { + case AM_TS_SRC_TS0: + out_src = 0; + break; + case AM_TS_SRC_TS1: + out_src = 1; + break; + case AM_TS_SRC_TS2: + out_src = 2; + break; + case AM_TS_SRC_TS3: + out_src = 3; + break; + case AM_TS_SRC_S_TS0: + out_src = 6; + break; + case AM_TS_SRC_S_TS1: + out_src = 5; + break; + case AM_TS_SRC_S_TS2: + out_src = 4; + break; + case AM_TS_SRC_HIU: + out_src = 7; + break; + default: + out_src = 0; + break; + } + + pr_dbg("[stb]src: %d, dsc1in: %d, tso: %d\n", src, des_in, out_src); + + fec_s0 = 0; + fec_s1 = 0; + fec_s2 = 0; + invert0 = 0; + invert1 = 0; + invert2 = 0; + + for (i = 0; i < dvb->ts_in_total_count; i++) { + if (dvb->ts[i].s2p_id == 0) + fec_s0 = i; + else if (dvb->ts[i].s2p_id == 1) + fec_s1 = i; + else if (dvb->ts[i].s2p_id == 2) + fec_s2 = i; + } + + invert0 = dvb->s2p[0].invert; + invert1 = dvb->s2p[1].invert; + + WRITE_MPEG_REG(STB_TOP_CONFIG, + (invert1 << INVERT_S2P1_FEC_CLK) | + (fec_s1 << S2P1_FEC_SERIAL_SEL) | + (out_src << TS_OUTPUT_SOURCE) | + (des_in << DES_INPUT_SEL) | + (en_des << ENABLE_DES_PL) | + (dec_clk_en << ENABLE_DES_PL_CLK) | + (invert0 << INVERT_S2P0_FEC_CLK) | + (fec_s0 << S2P0_FEC_SERIAL_SEL)| + (ciplus)); + ciplus = 0; + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TL1) { + invert2 = dvb->s2p[2].invert; + + WRITE_MPEG_REG(STB_S2P2_CONFIG, + (invert2 << INVERT_S2P2_FEC_CLK) | + (fec_s2 << S2P2_FEC_SERIAL_SEL)); + } + + if (dvb->reset_flag) + hiu = 0; + /* invert ts out clk,add ci model need add this*/ + if (dvb->ts_out_invert) { + printk("ts out invert ---\r\n"); + data = READ_MPEG_REG(TS_TOP_CONFIG); + data |= 1 << TS_OUT_CLK_INVERT; + WRITE_MPEG_REG(TS_TOP_CONFIG, data); + } + + if (src == AM_TS_SRC_HIU1) { + WRITE_MPEG_REG(TS_HIU1_CONFIG, + (demux_skipbyte << FILE_M2TS_SKIP_BYTES_HIU1) | + (hiu << TS_HIU_ENABLE_HIU1) | + (fec_clk << FEC_CLK_DIV_HIU1) | + (0xBB << TS_PACKAGE_LENGTH_SUB_1_HIU1) | + (0x47 << FEC_SYNC_BYTE_HIU1)); + } else { + /* invert ts out clk end */ + WRITE_MPEG_REG(TS_FILE_CONFIG, + (demux_skipbyte << 16) | + (6 << DES_OUT_DLY) | + (3 << TRANSPORT_SCRAMBLING_CONTROL_ODD) | + (3 << TRANSPORT_SCRAMBLING_CONTROL_ODD_2) | + (hiu << TS_HIU_ENABLE) | (fec_clk << FEC_FILE_CLK_DIV)); + } +} + +int dsc_set_pid(struct aml_dsc_channel *ch, int pid) +{ + struct aml_dsc *dsc = ch->dsc; + int is_dsc2 = (dsc->id == 1) ? 1 : 0; + u32 data; + + WRITE_MPEG_REG(TS_PL_PID_INDEX, + ((ch->id & 0x0f) >> 1)+(is_dsc2 ? 4 : 0)); + data = READ_MPEG_REG(TS_PL_PID_DATA); + if (ch->id & 1) { + data &= 0xFFFF0000; + data |= pid & 0x1fff; + if (!ch->used) + data |= 1 << PID_MATCH_DISABLE_LOW; + } else { + data &= 0xFFFF; + data |= (pid & 0x1fff) << 16; + if (!ch->used) + data |= 1 << PID_MATCH_DISABLE_HIGH; + } + WRITE_MPEG_REG(TS_PL_PID_INDEX, + ((ch->id & 0x0f) >> 1)+(is_dsc2 ? 4 : 0)); + WRITE_MPEG_REG(TS_PL_PID_DATA, data); + WRITE_MPEG_REG(TS_PL_PID_INDEX, 0); + + if (ch->used) + pr_dbg("set DSC %d ch %d PID %d\n", dsc->id, ch->id, pid); + else + pr_dbg("disable DSC %d ch %d\n", dsc->id, ch->id); + return 0; +} + +int dsc_get_pid(struct aml_dsc_channel *ch, int *pid) +{ + struct aml_dsc *dsc = ch->dsc; + int is_dsc2 = (dsc->id == 1) ? 1 : 0; + u32 data; + + WRITE_MPEG_REG(TS_PL_PID_INDEX, + ((ch->id & 0x0f) >> 1)+(is_dsc2 ? 4 : 0)); + data = READ_MPEG_REG(TS_PL_PID_DATA); + if (ch->id & 1) { + *pid = data & 0x1fff; + } else { + *pid = (data >> 16) & 0x1fff; + } + + /*pr_dbg("%s,get DSC %d ch %d PID %d\n", __FUNCTION__,dsc->id, ch->id, *pid);*/ + return 0; +} + +int dsc_set_key(struct aml_dsc_channel *ch, int flags, enum ca_cw_type type, + u8 *key) +{ + /*struct aml_dsc *dsc = ch->dsc;*/ + int ret = -1; + + switch (type) { + case CA_CW_DVB_CSA_EVEN: + case CA_CW_DVB_CSA_ODD: + aml_ci_plus_disable(); + ret = dsc_set_csa_key(ch, flags, type, key); + if (ret != 0) + goto END; + /* Different with old mode, do change */ + if (ch->work_mode == CIPLUS_MODE || ch->work_mode == -1) { + if (ch->work_mode == -1) + pr_inf("dsc[%d:%d] enable\n", + ch->dsc->id, ch->id); + else + pr_inf("dsc[%d:%d] enable (from ciplus)\n", + ch->dsc->id, ch->id); + ch->mode = ECB_MODE; + ch->work_mode = DVBCSA_MODE; + } + break; + case CA_CW_AES_EVEN: + case CA_CW_AES_ODD: + case CA_CW_AES_EVEN_IV: + case CA_CW_AES_ODD_IV: + case CA_CW_DES_EVEN: + case CA_CW_DES_ODD: + case CA_CW_SM4_EVEN: + case CA_CW_SM4_ODD: + case CA_CW_SM4_EVEN_IV: + case CA_CW_SM4_ODD_IV: + ret = dsc_set_aes_des_sm4_key(ch, flags, type, key); + if (ret != 0) + goto END; + am_ci_plus_set_output(ch); + /* Different with old mode, do change */ + if (ch->work_mode == DVBCSA_MODE || ch->work_mode == -1) { + if (ch->work_mode == -1) + pr_inf("dsc[%d:%d] ciplus enable\n", + ch->dsc->id, ch->id); + else + pr_inf("dsc[%d:%d] ciplus enable (from dsc)\n", + ch->dsc->id, ch->id); + ch->work_mode = CIPLUS_MODE; + } + break; + default: + break; + } +END: + return ret; +} + +int dsc_set_keys(struct aml_dsc_channel *ch) +{ + int types = ch->set & 0xFFFFFF; + int flag = (ch->set >> 24) & 0xFF; + int i; + u8 *k; + int ret = 0; + + for (i = 0; i < CA_CW_TYPE_MAX; i++) { + if (types & (1 << i)) { + k = NULL; + switch (i) { + case CA_CW_DVB_CSA_EVEN: + case CA_CW_AES_EVEN: + case CA_CW_DES_EVEN: + case CA_CW_SM4_EVEN: + k = ch->even; + break; + case CA_CW_DVB_CSA_ODD: + case CA_CW_AES_ODD: + case CA_CW_DES_ODD: + case CA_CW_SM4_ODD: + k = ch->odd; + break; + case CA_CW_AES_EVEN_IV: + case CA_CW_SM4_EVEN_IV: + k = ch->even_iv; + break; + case CA_CW_AES_ODD_IV: + case CA_CW_SM4_ODD_IV: + k = ch->odd_iv; + break; + default: + break; + } + /* + if (k) + pr_inf("dsc ch:%d flag:%d type:%d\n", ch->id, flag, i); + */ + if (k) + ret = dsc_set_key(ch, flag, + i, + k); + } + } + return 0; +} + +static int dsc_set_csa_key(struct aml_dsc_channel *ch, int flags, + enum ca_cw_type type, u8 *key) +{ + struct aml_dsc *dsc = ch->dsc; + int is_dsc2 = (dsc->id == 1) ? 1 : 0; + u16 k0, k1, k2, k3; + u32 key0, key1; + int reg; + + if (flags & DSC_FROM_KL) { + k0 = k1 = k2 = k3 = 0; + /*dummy write to check if kl not working*/ + key0 = key1 = 0; + WRITE_MPEG_REG(COMM_DESC_KEY0, key0); + WRITE_MPEG_REG(COMM_DESC_KEY1, key1); + + /*tdes? :*/ + if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXBB) { + WRITE_MPEG_REG(COMM_DESC_KEY_RW, +/* (type ? (1 << 6) : (1 << 5)) | */ + ((1 << 5)) | + ((ch->id + type * DSC_COUNT)+ + (is_dsc2 ? 16 : 0))); + } + if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXL || + get_cpu_type() == MESON_CPU_MAJOR_ID_GXM) { + pr_info("do kl..\n"); + WRITE_MPEG_REG(COMM_DESC_KEY_RW, + (type ? (1 << 6) : (1 << 5)) | (1<<7) | + ((ch->id + type * DSC_COUNT)+ + (is_dsc2 ? 16 : 0))); + } + reg = (type ? (1 << 6) : (1 << 5)) | + ((ch->id + type * DSC_COUNT)+ + (is_dsc2 ? 16 : 0)); + } else { + k0 = (key[0] << 8) | key[1]; + k1 = (key[2] << 8) | key[3]; + k2 = (key[4] << 8) | key[5]; + k3 = (key[6] << 8) | key[7]; + + key0 = (k0 << 16) | k1; + key1 = (k2 << 16) | k3; + WRITE_MPEG_REG(COMM_DESC_KEY0, key0); + WRITE_MPEG_REG(COMM_DESC_KEY1, key1); + + reg = (ch->id + type * DSC_COUNT)+(is_dsc2 ? 16 : 0); + WRITE_MPEG_REG(COMM_DESC_KEY_RW, reg); + } + + return 0; +} + +/************************* AES DESC************************************/ +/*#define STB_TOP_CONFIG 0x16f0 +#define CIPLUS_KEY0 0x16f8 +#define CIPLUS_KEY1 0x16f9 +#define CIPLUS_KEY2 0x16fa +#define CIPLUS_KEY3 0x16fb +#define CIPLUS_KEY_WR 0x16fc +#define CIPLUS_CONFIG 0x16fd +#define CIPLUS_ENDIAN 0x16fe*/ + +#define ENABLE_DEC_PL 7 +#define ENABLE_DES_PL_CLK 15 + +#define KEY_WR_AES_IV_B 5 +#define KEY_WR_AES_IV_A 4 +#define KEY_WR_AES_B 3 +#define KEY_WR_AES_A 2 +#define KEY_WR_DES_B 1 +#define KEY_WR_DES_A 0 + +#define IDSA_MODE_BIT 31 +#define SM4_MODE 30 +#define DES2_KEY_ENDIAN 25 +#define DES2_IN_ENDIAN 21 +#define DES2_CFG 6 +#define DES2_EN 5 +#define CNTL_ENABLE 3 +#define AES_CBC_DISABLE 2 +#define AES_EN 1 +#define DES_EN 0 + +#define AES_IV_ENDIAN 28 +#define AES_MSG_OUT_ENDIAN 24 +#define AES_MSG_IN_ENDIAN 20 +#define AES_KEY_ENDIAN 16 +#define DES_MSG_OUT_ENDIAN 8 +#define DES_MSG_IN_ENDIAN 4 +#define DES_KEY_ENDIAN 0 + +#define ALGO_AES 0 +#define ALGO_SM4 1 +#define ALGO_DES 2 + +#if 0 +static void aml_ci_plus_set_stb(void) +{ + unsigned int data; + /* data = READ_MPEG_REG(FEC_INPUT_CONTROL); */ + /* data |= (0<<FEC_SEL); */ + /* data |= (1<<FEC_CORE_SEL); */ + /* data |= (1<<FEC_INPUT_FEC_CLK); + * local playback will not work if set this + */ + /* WRITE_MPEG_REG(FEC_INPUT_CONTROL, data); */ + + data = READ_MPEG_REG(STB_TOP_CONFIG); + WRITE_MPEG_REG(STB_TOP_CONFIG, data | + (0 << CIPLUS_IN_SEL) | (0 << CIPLUS_OUT_SEL)); + data = READ_MPEG_REG(STB_TOP_CONFIG); + /* data |= (1<<ENABLE_DEC_PL); bit 7 -- + * enable_des_pl, this step was set in dsc_enable + */ + /*bit 15 -- enable_des_pl_clk*/ + /* data |= (1<<ENABLE_DES_PL_CLK); */ + data |= (1<<CIPLUS_OUT_SEL);/*bit 28 -- ciplus_out_sel from ciplus*/ + WRITE_MPEG_REG(STB_TOP_CONFIG, data); + data = READ_MPEG_REG(STB_TOP_CONFIG); +} +#endif + +/* + * param: + * key: + * 16bytes IV key + * type: + * AM_DSC_KEY_TYPE_AES_ODD IV odd key + * AM_DSC_KEY_TYPE_AES_EVEN IV even key + */ +void aml_ci_plus_set_iv(struct aml_dsc_channel *ch, enum ca_cw_type type, + u8 *key) +{ + unsigned int k0, k1, k2, k3; + + k3 = (key[0] << 24) | (key[1] << 16) | (key[2] << 8) | key[3]; + k2 = (key[4] << 24) | (key[5] << 16) | (key[6] << 8) | key[7]; + k1 = (key[8] << 24) | (key[9] << 16) | (key[10] << 8) | key[11]; + k0 = (key[12] << 24) | (key[13] << 16) | (key[14] << 8) | key[15]; + + if (type == CA_CW_AES_EVEN_IV || + type == CA_CW_SM4_EVEN_IV) { + WRITE_MPEG_REG(CIPLUS_KEY0, k0); + WRITE_MPEG_REG(CIPLUS_KEY1, k1); + WRITE_MPEG_REG(CIPLUS_KEY2, k2); + WRITE_MPEG_REG(CIPLUS_KEY3, k3); + WRITE_MPEG_REG(CIPLUS_KEY_WR, + (ch->id << 9) | (1<<KEY_WR_AES_IV_A)); + } else if (type == CA_CW_AES_ODD_IV || + type == CA_CW_SM4_ODD_IV) { + WRITE_MPEG_REG(CIPLUS_KEY0, k0); + WRITE_MPEG_REG(CIPLUS_KEY1, k1); + WRITE_MPEG_REG(CIPLUS_KEY2, k2); + WRITE_MPEG_REG(CIPLUS_KEY3, k3); + WRITE_MPEG_REG(CIPLUS_KEY_WR, + (ch->id << 9) | (1<<KEY_WR_AES_IV_B)); + } +} + +/* + * Param: + * key_endian + * S905D 7 for kl 0 for set key directly + * mode + * 0 for ebc + * 1 for cbc + */ +static void aml_ci_plus_config(int key_endian, int mode, int algo) +{ + unsigned int data; + unsigned int idsa_mode = 0; + unsigned int sm4_mode = 0; + unsigned int cbc_disable = 0; + unsigned int des_enable = 0; + unsigned int aes_enable = 0; + unsigned int des2_key_endian = 0; + unsigned int des2_in_endian = 0; + unsigned int des2_cfg = 0; + unsigned int des2_enable = 0; + + pr_dbg("%s mode:%d,alog:%d\n",__FUNCTION__,mode,algo); + + if (get_cpu_type() < MESON_CPU_MAJOR_ID_SM1) { + WRITE_MPEG_REG(CIPLUS_ENDIAN, + (15 << AES_MSG_OUT_ENDIAN) + | (15 << AES_MSG_IN_ENDIAN) + | (key_endian << AES_KEY_ENDIAN) + | + (15 << DES_MSG_OUT_ENDIAN) + | (15 << DES_MSG_IN_ENDIAN) + | (key_endian << DES_KEY_ENDIAN) + ); + } else if (algo == ALGO_DES){ + WRITE_MPEG_REG(CIPLUS_ENDIAN, + (15 << AES_IV_ENDIAN) + | (7 << AES_MSG_OUT_ENDIAN) + | (15 << AES_MSG_IN_ENDIAN) + | (15 << AES_KEY_ENDIAN) + ); + pr_inf("CIPLUS_ENDIAN is 0x%x\n", READ_MPEG_REG(CIPLUS_ENDIAN)); + } else { + WRITE_MPEG_REG(CIPLUS_ENDIAN, 0); + } + + data = READ_MPEG_REG(CIPLUS_ENDIAN); + + if (algo == ALGO_SM4) { + sm4_mode = 1; + } else if (algo == ALGO_AES){ + aes_enable = 1; + } else { + if (get_cpu_type() < MESON_CPU_MAJOR_ID_SM1) { + des_enable = 1; + } else { + des2_key_endian = 8; + des2_in_endian = 8; + des2_cfg = 2; + des2_enable = 1; + aes_enable = 1; + } + } + + if (mode == IDSA_MODE) { + idsa_mode = 1; + cbc_disable = 0; + } else if (mode == CBC_MODE) { + cbc_disable = 0; + } else { + cbc_disable = 1; + } + pr_dbg("idsa_mode:%d sm4_mode:%d cbc_disable:%d aes_enable:%d des_enable:%d\n", \ + idsa_mode,sm4_mode,cbc_disable,aes_enable,des_enable); + + data = (idsa_mode << IDSA_MODE_BIT) | + (sm4_mode << SM4_MODE ) | + (des2_key_endian << DES2_KEY_ENDIAN) | + (des2_in_endian << DES2_IN_ENDIAN) | + (des2_cfg << DES2_CFG) | + (des2_enable << DES2_EN) | + (cbc_disable << AES_CBC_DISABLE) | + /*1 << AES_CBC_DISABLE : ECB + *0 << AES_CBC_DISABLE : CBC + */ + (1 << CNTL_ENABLE) | + (aes_enable << AES_EN) | + (des_enable << DES_EN); + + WRITE_MPEG_REG(CIPLUS_CONFIG, data); + data = READ_MPEG_REG(CIPLUS_CONFIG); + pr_dbg("CIPLUS_CONFIG is 0x%x\n",data); +} + +static void set_fec_core_sel (struct aml_dvb *dvb) +{ + int i; + + for (i = 0; i < DMX_DEV_COUNT; i ++) { + int set = 0; + u32 ctrl = DMX_READ_REG(i, FEC_INPUT_CONTROL); + + if ((dvb->dsc[0].dst != -1) && (dvb->dsc[0].dst - AM_TS_SRC_DMX0 == i)) { + set = 1; + } else if ((dvb->dsc[1].dst != -1) && (dvb->dsc[1].dst - AM_TS_SRC_DMX0 == i)) { + set = 1; + } else { + u32 cfg = READ_MPEG_REG(CIPLUS_CONFIG); + + if (cfg & (1 << CNTL_ENABLE)) { + if (!ciplus_out_auto_mode) { + if (ciplus_out_sel & (1 << i)) + set = 1; + } + } + } + + if (set) { + ctrl |= (1 << FEC_CORE_SEL); + } else { + ctrl &= ~(1 << FEC_CORE_SEL); + } + + DMX_WRITE_REG(i, FEC_INPUT_CONTROL, ctrl); + } +} + +/* + * Set output to demux set. + */ +static void am_ci_plus_set_output(struct aml_dsc_channel *ch) +{ + struct aml_dsc *dsc = ch->dsc; + struct aml_dvb *dvb = dsc->dvb; + u32 data; + u32 in = 0, out = 0; + int set = 0; + + if (dsc->id != 0) { + pr_error("Ciplus set output can only work at dsc0 device\n"); + return; + } + + switch (dsc->source) { + case AM_TS_SRC_DMX0: + in = 0; + break; + case AM_TS_SRC_DMX1: + in = 1; + break; + case AM_TS_SRC_DMX2: + in = 2; + break; + default: + break; + } + + if (ciplus_out_auto_mode == 1) { + switch (dsc->dst) { + case AM_TS_SRC_DMX0: + out = 1; + break; + case AM_TS_SRC_DMX1: + out = 2; + break; + case AM_TS_SRC_DMX2: + out = 4; + break; + default: + break; + } + set = 1; + ciplus_out_sel = out; + } else if (ciplus_out_sel >= 0 && ciplus_out_sel <= 7) { + set = 1; + out = ciplus_out_sel; + } else { + pr_error("dsc ciplus out config is invalid\n"); + } + + if (set) { + /* Set ciplus input source , + * output set 0 means no output. ---> need confirm. + * if output set 0 still affects dsc output, we need to disable + * ciplus module. + */ + data = READ_MPEG_REG(STB_TOP_CONFIG); + data &= ~(3<<CIPLUS_IN_SEL); + data |= in << CIPLUS_IN_SEL; + data &= ~(7<<CIPLUS_OUT_SEL); + data |= out << CIPLUS_OUT_SEL; + WRITE_MPEG_REG(STB_TOP_CONFIG, data); + pr_inf("dsc ciplus in[%x] out[%x] %s\n", in, out, + (ciplus_out_auto_mode) ? "" : "force"); + + set_fec_core_sel(dvb); + } +} + +#if 0 +/* + * Ciplus output has high priority, + * disable it's output will let dsc output go. + */ +static void aml_ci_plus_disable_output(void) +{ + u32 data = 0; + + data = READ_MPEG_REG(STB_TOP_CONFIG); + WRITE_MPEG_REG(STB_TOP_CONFIG, data & + ~(7 << CIPLUS_OUT_SEL)); +} + +static void aml_ci_plus_enable(void) +{ + u32 data = 0; + + data = READ_MPEG_REG(STB_TOP_CONFIG); + WRITE_MPEG_REG(CIPLUS_CONFIG, + (1 << CNTL_ENABLE) + | (1 << AES_EN) + | (1 << DES_EN)); +} +#endif + +static void aml_ci_plus_disable(void) +{ + u32 data = 0; + + WRITE_MPEG_REG(CIPLUS_CONFIG, 0); + + data = READ_MPEG_REG(STB_TOP_CONFIG); + WRITE_MPEG_REG(STB_TOP_CONFIG, data & + ~((1 << CIPLUS_IN_SEL) | (7 << CIPLUS_OUT_SEL))); +} + +static int dsc_set_aes_des_sm4_key(struct aml_dsc_channel *ch, int flags, + enum ca_cw_type type, u8 *key) +{ + unsigned int k0, k1, k2, k3; + int iv = 0, aes = 0, des = 0; + int ab_iv = 0, ab_aes = 0, ab_des = 0; + int from_kl = flags & CA_CW_FROM_KL; + int algo = 0; + + if (!from_kl) { + if (get_cpu_type() < MESON_CPU_MAJOR_ID_SM1) { + k3 = (key[0] << 24) | (key[1] << 16) | (key[2] << 8) | key[3]; + k2 = (key[4] << 24) | (key[5] << 16) | (key[6] << 8) | key[7]; + k1 = (key[8] << 24) | (key[9] << 16) | (key[10] << 8) | key[11]; + k0 = (key[12] << 24) | (key[13] << 16) + | (key[14] << 8) | key[15]; + } else { + k0 = (key[0]) | (key[1] << 8) | (key[2] << 16) | (key[3] << 24); + k1 = (key[4]) | (key[5] << 8) | (key[6] << 16) | (key[7] << 24); + k2 = (key[8]) | (key[9] << 8) | (key[10] << 16)| (key[11] << 24); + k3 = (key[12])| (key[13] << 8)| (key[14] << 16)| (key[15] << 24); + } + } else + k0 = k1 = k2 = k3 = 0; + + switch (type) { + case CA_CW_AES_EVEN: + case CA_CW_SM4_EVEN: + ab_aes = (from_kl) ? 0x2 : 0x1; + if (ch->mode == -1) + ch->mode = ECB_MODE; + aes = 1; + if (type == CA_CW_AES_EVEN) + algo = ALGO_AES; + else + algo = ALGO_SM4; + break; + case CA_CW_AES_ODD: + case CA_CW_SM4_ODD: + ab_aes = (from_kl) ? 0x1 : 0x2; + if (ch->mode == -1) + ch->mode = ECB_MODE; + aes = 1; + if (type == CA_CW_AES_ODD) + algo = ALGO_AES; + else + algo = ALGO_SM4; + break; + case CA_CW_AES_EVEN_IV: + case CA_CW_SM4_EVEN_IV: + ab_iv = 0x1; + if (ch->mode == -1) + ch->mode = CBC_MODE; + iv = 1; + if (type == CA_CW_AES_EVEN_IV) + algo = ALGO_AES; + else + algo = ALGO_SM4; + break; + case CA_CW_AES_ODD_IV: + case CA_CW_SM4_ODD_IV: + ab_iv = 0x2; + if (ch->mode == -1) + ch->mode = CBC_MODE; + iv = 1; + if (type == CA_CW_AES_ODD_IV) + algo = ALGO_AES; + else + algo = ALGO_SM4; + break; + case CA_CW_DES_EVEN: + if (get_cpu_type() < MESON_CPU_MAJOR_ID_SM1) { + ab_des = 0x1; + } else { + ab_aes = 0x1; + } + ch->mode = ECB_MODE; + des = 1; + algo = ALGO_DES; + break; + case CA_CW_DES_ODD: + if (get_cpu_type() < MESON_CPU_MAJOR_ID_SM1) { + ab_des = 0x2; + } else { + ab_aes = 0x2; + } + ch->mode = ECB_MODE; + algo = ALGO_DES; + des = 1; + break; + default: + break; + } + + /* Set endian and cbc/ecb mode */ + if (from_kl) + aml_ci_plus_config(7, ch->mode, algo); + else + aml_ci_plus_config(0, ch->mode, algo); + + /* Write keys to work */ + if (iv || aes) { + WRITE_MPEG_REG(CIPLUS_KEY0, k0); + WRITE_MPEG_REG(CIPLUS_KEY1, k1); + WRITE_MPEG_REG(CIPLUS_KEY2, k2); + WRITE_MPEG_REG(CIPLUS_KEY3, k3); + } else {/*des*/ + WRITE_MPEG_REG(CIPLUS_KEY0, k2); + WRITE_MPEG_REG(CIPLUS_KEY1, k3); + WRITE_MPEG_REG(CIPLUS_KEY2, 0); + WRITE_MPEG_REG(CIPLUS_KEY3, 0); + } + WRITE_MPEG_REG(CIPLUS_KEY_WR, + (ch->id << 9) | + /* bit[11:9] the key of index, + need match PID index*/ + ((from_kl && des) ? (1 << 8) : 0) | + /* bit[8] des key use cw[127:64]*/ + (0 << 7) | /* bit[7] aes iv use cw*/ + ((from_kl && (aes || des)) ? (1 << 6) : 0) | + /* bit[6] aes/des key use cw*/ + /* bit[5] write AES IV B value*/ + (ab_iv << 4) | /* bit[4] write AES IV A value*/ + /* bit[3] write AES B key*/ + (ab_aes << 2) | /* bit[2] write AES A key*/ + /* bit[1] write DES B key*/ + (ab_des)); /* bit[0] write DES A key*/ + + /* + pr_inf("k:%08x:%08x:%08x:%08x kl:%d aes:%d des:%d ab_iv:%d ab_aes:%d ab_des:%d id:%d mod:%d\n", + k0, k1, k2, k3, + from_kl, aes, des, ab_iv, ab_aes, ab_des, ch->id, ch->aes_mode); + */ + return 0; +} + +void dsc_release(void) +{ + //aml_ci_plus_disable(); +} +/************************* AES DESC************************************/ +void set_ciplus_input_source(struct aml_dsc *dsc) +{ + u32 data; + u32 in = 0; + + if (dsc->id != 0) { + pr_error("Ciplus set output can only work at dsc0 device\n"); + return; + } + + switch (dsc->source) { + case AM_TS_SRC_DMX0: + in = 0; + break; + case AM_TS_SRC_DMX1: + in = 1; + break; + case AM_TS_SRC_DMX2: + in = 2; + break; + default: + break; + } + + if (ciplus_out_auto_mode == 1) { + /* Set ciplus input source */ + data = READ_MPEG_REG(STB_TOP_CONFIG); + data &= ~(3<<CIPLUS_IN_SEL); + data |= in << CIPLUS_IN_SEL; + WRITE_MPEG_REG(STB_TOP_CONFIG, data); + pr_inf("dsc ciplus in[%x]\n", in); + } +} + +int dsc_enable(struct aml_dsc *dsc, int enable) +{ + if (dsc->id == 0) { + WRITE_MPEG_REG(STB_TOP_CONFIG, + READ_MPEG_REG(STB_TOP_CONFIG) & + ~((0x11 << DES_INPUT_SEL)| + (1 << ENABLE_DES_PL)| + (1 << ENABLE_DES_PL_CLK))); + } else if (dsc->id == 1) { + WRITE_MPEG_REG(COMM_DESC_2_CTL, 0); + } + return 0; +} + +/*Set section buffer*/ +static int dmx_alloc_sec_buffer(struct aml_dmx *dmx) +{ + unsigned long base; + unsigned long grp_addr[SEC_BUF_GRP_COUNT]; + int grp_len[SEC_BUF_GRP_COUNT]; + int i; + + if (dmx->sec_pages) + return 0; + + grp_len[0] = (1 << SEC_GRP_LEN_0) * 8; + grp_len[1] = (1 << SEC_GRP_LEN_1) * 8; + grp_len[2] = (1 << SEC_GRP_LEN_2) * 8; + grp_len[3] = (1 << SEC_GRP_LEN_3) * 8; + + dmx->sec_total_len = grp_len[0] + grp_len[1] + grp_len[2] + grp_len[3]; + dmx->sec_pages = + __get_free_pages(GFP_KERNEL, get_order(dmx->sec_total_len)); + if (!dmx->sec_pages) { + pr_error("cannot allocate section buffer %d bytes %d order\n", + dmx->sec_total_len, get_order(dmx->sec_total_len)); + return -1; + } + dmx->sec_pages_map = + dma_map_single(dmx_get_dev(dmx), (void *)dmx->sec_pages, + dmx->sec_total_len, DMA_FROM_DEVICE); + + grp_addr[0] = dmx->sec_pages_map; + + grp_addr[1] = grp_addr[0] + grp_len[0]; + grp_addr[2] = grp_addr[1] + grp_len[1]; + grp_addr[3] = grp_addr[2] + grp_len[2]; + + dmx->sec_buf[0].addr = dmx->sec_pages; + dmx->sec_buf[0].len = grp_len[0] / 8; + + for (i = 1; i < SEC_BUF_COUNT; i++) { + dmx->sec_buf[i].addr = + dmx->sec_buf[i - 1].addr + dmx->sec_buf[i - 1].len; + dmx->sec_buf[i].len = grp_len[i / 8] / 8; + } + + base = grp_addr[0] & 0xFFFF0000; + DMX_WRITE_REG(dmx->id, SEC_BUFF_BASE, base >> 16); + DMX_WRITE_REG(dmx->id, SEC_BUFF_01_START, + (((grp_addr[0] - base) >> 8) << 16) | + ((grp_addr[1] - base) >> 8)); + DMX_WRITE_REG(dmx->id, SEC_BUFF_23_START, + (((grp_addr[2] - base) >> 8) << 16) | + ((grp_addr[3] - base) >> 8)); + DMX_WRITE_REG(dmx->id, SEC_BUFF_SIZE, + SEC_GRP_LEN_0 | + (SEC_GRP_LEN_1 << 4) | + (SEC_GRP_LEN_2 << 8) | + (SEC_GRP_LEN_3 << 12)); + + return 0; +} + +#ifdef NO_SUB +/*Set subtitle buffer*/ +static int dmx_alloc_sub_buffer(struct aml_dvb *dvb, struct aml_dmx *dmx) +{ +#ifdef SUB_BUF_DMX + unsigned long addr; + + if (dmx->sub_pages) + return 0; + + /*check if use shared buf*/ + if (dvb->sub_pages) { + dmx->sub_pages = dvb->sub_pages; + dmx->sub_buf_len = dvb->sub_buf_len; + dmx->sub_pages_map = dvb->sub_pages_map; + goto end_alloc; + } + + dmx->sub_buf_len = 64 * 1024; + dmx->sub_pages = + __get_free_pages(GFP_KERNEL, get_order(dmx->sub_buf_len)); + if (!dmx->sub_pages) { + pr_error("cannot allocate subtitle buffer\n"); + return -1; + } + dmx->sub_pages_map = + dma_map_single(dmx_get_dev(dmx), (void *)dmx->sub_pages, + dmx->sub_buf_len, DMA_FROM_DEVICE); + +end_alloc: + addr = virt_to_phys((void *)dmx->sub_pages); +#ifndef SUB_PARSER + DMX_WRITE_REG(dmx->id, SB_START, addr >> 12); + DMX_WRITE_REG(dmx->id, SB_LAST_ADDR, (dmx->sub_buf_len >> 3) - 1); +#endif + if (dmx->sub_pages != dvb->sub_pages) { + pr_dbg("sub buff: (%d) %lx %x\n", + dmx->id, addr, dmx->sub_buf_len); + } +#endif + return 0; +} +#ifdef SUB_BUF_SHARED +static int dmx_alloc_sub_buffer_shared(struct aml_dvb *dvb) +{ +#ifdef SUB_BUF_DMX + if (dvb->sub_pages) + return 0; + + dvb->sub_buf_len = 64 * 1024; + dvb->sub_pages = + __get_free_pages(GFP_KERNEL, get_order(dvb->sub_buf_len)); + if (!dvb->sub_pages) { + pr_error("cannot allocate subtitle buffer\n"); + return -1; + } + dvb->sub_pages_map = + dma_map_single(dvb->dev, (void *)dvb->sub_pages, + dvb->sub_buf_len, DMA_FROM_DEVICE); + + pr_dbg("sub buff shared: %lx %x\n", + (unsigned long)virt_to_phys((void *)dvb->sub_pages), + dvb->sub_buf_len); +#endif + return 0; +} +#endif +#endif /*NO_SUB */ + +/*Set PES buffer*/ +static int dmx_alloc_pes_buffer(struct aml_dvb *dvb, struct aml_dmx *dmx) +{ + unsigned long addr; + + if (dmx->pes_pages) + return 0; + + /*check if use shared buf*/ + if (dvb->pes_pages) { + dmx->pes_pages = dvb->pes_pages; + dmx->pes_buf_len = dvb->pes_buf_len; + dmx->pes_pages_map = dvb->pes_pages_map; + goto end_alloc; + } + + dmx->pes_buf_len = 64 * 1024; + dmx->pes_pages = + __get_free_pages(GFP_KERNEL, get_order(dmx->pes_buf_len)); + if (!dmx->pes_pages) { + pr_error("cannot allocate pes buffer\n"); + return -1; + } + dmx->pes_pages_map = + dma_map_single(dmx_get_dev(dmx), (void *)dmx->pes_pages, + dmx->pes_buf_len, DMA_FROM_DEVICE); +end_alloc: + addr = virt_to_phys((void *)dmx->pes_pages); + DMX_WRITE_REG(dmx->id, OB_START, addr >> 12); + DMX_WRITE_REG(dmx->id, OB_LAST_ADDR, (dmx->pes_buf_len >> 3) - 1); + + if (dmx->pes_pages != dvb->pes_pages) { + pr_dbg("pes buff: (%d) %lx %x\n", + dmx->id, addr, dmx->pes_buf_len); + } + return 0; +} +#ifdef PES_BUF_SHARED +static int dmx_alloc_pes_buffer_shared(struct aml_dvb *dvb) +{ + if (dvb->pes_pages) + return 0; + + dvb->pes_buf_len = 64 * 1024; + dvb->pes_pages = + __get_free_pages(GFP_KERNEL, get_order(dvb->pes_buf_len)); + if (!dvb->pes_pages) { + pr_error("cannot allocate pes buffer\n"); + return -1; + } + dvb->pes_pages_map = + dma_map_single(dvb->dev, (void *)dvb->pes_pages, + dvb->pes_buf_len, DMA_FROM_DEVICE); + + pr_dbg("pes buff shared: %lx %x\n", + (unsigned long)virt_to_phys((void *)dvb->pes_pages), + dvb->pes_buf_len); + return 0; +} +#endif + +/*Allocate ASYNC FIFO Buffer*/ +static unsigned long asyncfifo_alloc_buffer(struct aml_asyncfifo *afifo, int len) +{ + if (!afifo->stored_pages) { + afifo->stored_pages = __get_free_pages(GFP_KERNEL, get_order(len)); + } + + if (!afifo->stored_pages) { + pr_error("cannot allocate async fifo buffer\n"); + return 0; + } + return afifo->stored_pages; +} +static void asyncfifo_free_buffer(unsigned long buf, int len) +{ + //free_pages(buf, get_order(len)); +} + +static int asyncfifo_set_buffer(struct aml_asyncfifo *afifo, + int len, unsigned long buf) +{ + if (afifo->pages) + return -1; + + afifo->buf_toggle = 0; + afifo->buf_read = 0; + afifo->buf_len = dmx_get_afifo_size(afifo); + pr_dbg("async fifo %d buf %lu buf size %d, flush size %d, secure_enable %d, blk.addr %u\n", + afifo->id, buf, afifo->buf_len, afifo->flush_size, afifo->secure_enable, afifo->blk.addr); + + if ((afifo->flush_size <= 0) + || (afifo->flush_size > (len>>1))) { + afifo->flush_size = len>>1; + } else if (afifo->flush_size < 128) { + afifo->flush_size = 128; + } else { + int fsize; + + for (fsize = 128; fsize < (len>>1); fsize <<= 1) { + if (fsize >= afifo->flush_size) + break; + } + + afifo->flush_size = fsize; + } + + afifo->pages = buf; + if (!afifo->pages) + return -1; + + afifo->pages_map = dma_map_single(asyncfifo_get_dev(afifo), + (void *)afifo->pages, len, DMA_FROM_DEVICE); + + return 0; +} +static void asyncfifo_put_buffer(struct aml_asyncfifo *afifo) +{ + if (afifo->pages) { + dma_unmap_single(asyncfifo_get_dev(afifo), + afifo->pages_map, asyncfifo_buf_len, DMA_FROM_DEVICE); + asyncfifo_free_buffer(afifo->pages, asyncfifo_buf_len); + afifo->pages_map = 0; + afifo->pages = 0; + } +} + +int async_fifo_init(struct aml_asyncfifo *afifo, int initirq, + int buf_len, unsigned long buf) +{ + int ret = 0; + int irq; + + if (afifo->init) + return -1; + + afifo->source = AM_DMX_MAX; + afifo->pages = 0; + afifo->buf_toggle = 0; + afifo->buf_read = 0; + afifo->buf_len = 0; + + if (afifo->asyncfifo_irq == -1) { + pr_error("no irq for ASYNC_FIFO%d\n", afifo->id); + /*Do not return error*/ + return -1; + } + + tasklet_init(&afifo->asyncfifo_tasklet, + dvr_irq_bh_handler, (unsigned long)afifo); + if (initirq) + irq = request_irq(afifo->asyncfifo_irq, dvr_irq_handler, + IRQF_SHARED|IRQF_TRIGGER_RISING, + "dvr irq", afifo); + else + enable_irq(afifo->asyncfifo_irq); + + /*alloc buffer*/ + ret = asyncfifo_set_buffer(afifo, buf_len, buf); + + afifo->init = 1; + + return ret; +} + +int async_fifo_deinit(struct aml_asyncfifo *afifo, int freeirq) +{ + struct aml_dvb *dvb = afifo->dvb; + unsigned long flags; + + if (!afifo->init) + return 0; + + spin_lock_irqsave(&dvb->slock, flags); + CLEAR_ASYNC_FIFO_REG_MASK(afifo->id, REG1, 1 << ASYNC_FIFO_FLUSH_EN); + CLEAR_ASYNC_FIFO_REG_MASK(afifo->id, REG2, 1 << ASYNC_FIFO_FILL_EN); + spin_unlock_irqrestore(&dvb->slock, flags); + + asyncfifo_put_buffer(afifo); + + afifo->source = AM_DMX_MAX; + afifo->buf_toggle = 0; + afifo->buf_read = 0; + afifo->buf_len = 0; + + if (afifo->asyncfifo_irq != -1) { + if (freeirq) + free_irq(afifo->asyncfifo_irq, afifo); + else + disable_irq(afifo->asyncfifo_irq); + } + tasklet_kill(&afifo->asyncfifo_tasklet); + + afifo->init = 0; + + return 0; +} + +static int _dmx_smallsec_enable(struct aml_smallsec *ss, int bufsize) +{ + if (!ss->buf) { + + ss->buf = __get_free_pages(GFP_KERNEL, + get_order(bufsize)); + if (!ss->buf) { + pr_error("cannot allocate smallsec buffer\n" + "%d bytes %d order\n", + bufsize, get_order(bufsize)); + return -1; + } + ss->buf_map = dma_map_single(dmx_get_dev(ss->dmx), + (void *)ss->buf, + bufsize, DMA_FROM_DEVICE); + } + + DMX_WRITE_REG(ss->dmx->id, DEMUX_SMALL_SEC_ADDR, + ss->buf_map); + DMX_WRITE_REG(ss->dmx->id, DEMUX_SMALL_SEC_CTL, + ((((bufsize>>8)-1)&0xff)<<24) | + (1<<1) |/*enable reset the wr ptr*/ + (1<<0)); + + ss->bufsize = bufsize; + ss->enable = 1; + + pr_inf("demux%d smallsec buf start: %lx, size: %d\n", + ss->dmx->id, ss->buf, ss->bufsize); + return 0; +} + +static int _dmx_smallsec_disable(struct aml_smallsec *ss) +{ + DMX_WRITE_REG(ss->dmx->id, DEMUX_SMALL_SEC_CTL, 0); + if (ss->buf) { + dma_unmap_single(dmx_get_dev(ss->dmx), ss->buf_map, + ss->bufsize, DMA_FROM_DEVICE); + free_pages(ss->buf, get_order(ss->bufsize)); + ss->buf = 0; + ss->buf_map = 0; + } + ss->enable = 0; + pr_inf("demux%d smallsec buf disable\n", ss->dmx->id); + return 0; +} + +static int dmx_smallsec_set(struct aml_smallsec *ss, int enable, int bufsize, + int force) +{ + if (!enable) {/*disable*/ + + if (ss->enable || force) + _dmx_smallsec_disable(ss); + + } else {/*enable*/ + + if (bufsize < 0) + bufsize = SS_BUFSIZE_DEF; + else if (!bufsize) + bufsize = ss->bufsize; + else { + /*unit:FF max:FF00*/ + bufsize &= ~0xFF; + bufsize &= 0x1FF00; + } + + if ((ss->enable && (bufsize != ss->bufsize)) || force) + _dmx_smallsec_disable(ss); + + if (!ss->enable) + _dmx_smallsec_enable(ss, bufsize); + } + + return 0; +} + +static int _dmx_timeout_enable(struct aml_dmxtimeout *dto, int timeout, + int ch_dis, int match) +{ + + DMX_WRITE_REG(dto->dmx->id, DEMUX_INPUT_TIMEOUT_C, ch_dis); + DMX_WRITE_REG(dto->dmx->id, DEMUX_INPUT_TIMEOUT, + ((!!match)<<31) | + (timeout&0x7fffffff)); + + dto->ch_disable = ch_dis; + dto->match = match; + dto->timeout = timeout; + dto->trigger = 0; + dto->enable = 1; + + pr_inf("demux%d timeout enable:timeout(%d),ch(0x%x),match(%d)\n", + dto->dmx->id, dto->timeout, dto->ch_disable, dto->match); + + return 0; +} +static int _dmx_timeout_disable(struct aml_dmxtimeout *dto) +{ + + DMX_WRITE_REG(dto->dmx->id, DEMUX_INPUT_TIMEOUT, 0); + dto->enable = 0; + dto->trigger = 0; + pr_inf("demux%d timeout disable\n", dto->dmx->id); + + return 0; +} + +static int dmx_timeout_set(struct aml_dmxtimeout *dto, int enable, + int timeout, int ch_dis, int match, + int force) +{ + + if (!enable) {/*disable*/ + + if (dto->enable || force) + _dmx_timeout_disable(dto); + + } else {/*enable*/ + + if (timeout < 0) { + timeout = DTO_TIMEOUT_DEF; + ch_dis = DTO_CHDIS_VAS; + match = dto->match; + } else if (!timeout) { + timeout = dto->timeout; + ch_dis = dto->ch_disable; + match = dto->match; + } + + if ((dto->enable && (timeout != dto->timeout)) + || force) + _dmx_timeout_disable(dto); + + if (!dto->enable) + _dmx_timeout_enable(dto, timeout, ch_dis, match); + } + + return 0; +} + +/*Initialize the registers*/ +static int dmx_init(struct aml_dmx *dmx) +{ + struct aml_dvb *dvb = (struct aml_dvb *)dmx->demux.priv; + int irq; + int ret = 0; + char buf[32]; + u32 value = 0; + + if (dmx->init) + return 0; + + pr_dbg("[dmx_kpi] %s Enter\n", __func__); + + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "asyncfifo_buf_len"); + ret = of_property_read_u32(dvb->pdev->dev.of_node, buf, &value); + if (!ret) { + pr_inf("%s: 0x%x\n", buf, value); + asyncfifo_buf_len = value; + } + /*Register irq handlers */ + if (dmx->dmx_irq != -1) { + pr_dbg("request irq\n"); + tasklet_init(&dmx->dmx_tasklet, + dmx_irq_bh_handler, + (unsigned long)dmx); + irq = request_irq(dmx->dmx_irq, dmx_irq_handler, + IRQF_SHARED|IRQF_TRIGGER_RISING, + "dmx irq", dmx); + } + + /*Allocate buffer */ + if (dmx_alloc_sec_buffer(dmx) < 0) + return -1; +#ifdef NO_SUB +#ifdef SUB_BUF_SHARED + if (dmx_alloc_sub_buffer_shared(dvb) < 0) + return -1; +#endif + if (dmx_alloc_sub_buffer(dvb, dmx) < 0) + return -1; +#endif +#ifdef PES_BUF_SHARED + if (dmx_alloc_pes_buffer_shared(dvb) < 0) + return -1; +#endif + if (dmx_alloc_pes_buffer(dvb, dmx) < 0) + return -1; + /*Reset the hardware */ + if (!dvb->dmx_init) { + timer_setup(&dvb->watchdog_timer, section_buffer_watchdog_func,0); +#ifdef ENABLE_SEC_BUFF_WATCHDOG + mod_timer(&dvb->watchdog_timer,jiffies + msecs_to_jiffies(WATCHDOG_TIMER)); +#endif + dmx_reset_hw(dvb); + } + + dvb->dmx_init++; + + memset(dmx->sec_buf_watchdog_count, 0, + sizeof(dmx->sec_buf_watchdog_count)); + + dmx->om_status_error_count = 0; + dmx->init = 1; + pr_dbg("[dmx_kpi] %s Exit\n", __func__); + return 0; +} + +/*Release the resource*/ +static int dmx_deinit(struct aml_dmx *dmx) +{ + struct aml_dvb *dvb = (struct aml_dvb *)dmx->demux.priv; + pr_dbg("[dmx_kpi] %s Enter\n", __func__); + if (!dmx->init) + return 0; + + DMX_WRITE_REG(dmx->id, DEMUX_CONTROL, 0); + + dvb->dmx_init--; + + /*Reset the hardware */ + if (!dvb->dmx_init) { + dmx_reset_hw(dvb); +#ifdef ENABLE_SEC_BUFF_WATCHDOG + del_timer_sync(&dvb->watchdog_timer); +#endif + } + + if (dmx->sec_pages) { + dma_unmap_single(dmx_get_dev(dmx), dmx->sec_pages_map, + dmx->sec_total_len, DMA_FROM_DEVICE); + free_pages(dmx->sec_pages, get_order(dmx->sec_total_len)); + dmx->sec_pages = 0; + dmx->sec_pages_map = 0; + } +#ifdef NO_SUB +#ifdef SUB_BUF_DMX +#ifdef SUB_BUF_SHARED + if (dvb->sub_pages) { + dma_unmap_single(dvb->dev, dvb->sub_pages_map, + dvb->sub_buf_len, DMA_FROM_DEVICE); + free_pages(dvb->sub_pages, get_order(dvb->sub_buf_len)); + dvb->sub_pages = 0; + } + dmx->sub_pages = 0; +#else + if (dmx->sub_pages) { + dma_unmap_single(dmx_get_dev(dmx), dmx->sub_pages_map, + dmx->sub_buf_len, DMA_FROM_DEVICE); + free_pages(dmx->sub_pages, get_order(dmx->sub_buf_len)); + dmx->sub_pages = 0; + } +#endif +#endif +#endif +#ifdef PES_BUF_SHARED + if (dvb->pes_pages) { + dma_unmap_single(dvb->dev, dvb->pes_pages_map, + dvb->pes_buf_len, DMA_FROM_DEVICE); + free_pages(dvb->pes_pages, get_order(dvb->pes_buf_len)); + dvb->pes_pages = 0; + } + dmx->pes_pages = 0; +#else + if (dmx->pes_pages) { + dma_unmap_single(dmx_get_dev(dmx), dmx->pes_pages_map, + dmx->pes_buf_len, DMA_FROM_DEVICE); + free_pages(dmx->pes_pages, get_order(dmx->pes_buf_len)); + dmx->pes_pages = 0; + } +#endif + if (dmx->dmx_irq != -1) { + free_irq(dmx->dmx_irq, dmx); + tasklet_kill(&dmx->dmx_tasklet); + } + + dmx->init = 0; + pr_dbg("[dmx_kpi] %s Exit\n", __func__); + return 0; +} + +/*Check the record flag*/ +static int dmx_get_record_flag(struct aml_dmx *dmx) +{ + int i, linked = 0, record_flag = 0; + struct aml_dvb *dvb = (struct aml_dvb *)dmx->demux.priv; + + /*Check whether a async fifo connected to this dmx */ + for (i = 0; i < dvb->async_fifo_total_count; i++) { + if (!dvb->asyncfifo[i].init) + continue; + if ((dvb->asyncfifo[i].source == dmx->id) + /*&& !(dvb->swfilter.user && (i==SF_AFIFO_ID)) */ + /*sf mode reserved */ + ) { + linked = 1; + break; + } + } + + for (i = 0; i < CHANNEL_COUNT; i++) { + if (dmx->channel[i].used && dmx->channel[i].dvr_feed) { + if (!dmx->record) { + pr_error("dmx_get_record_flag set record dmx->id: %d\n", dmx->id); + dmx->record = 1; + + if (linked) { + /*A new record will start, + * must reset the async fifos for + * linking the right demux + */ + reset_async_fifos(dvb); + } + } + if (linked) + record_flag = 1; + goto find_done; + } + } + + if (dmx->record) { + pr_error("dmx_get_record_flag clear record dmx->id: %d\n", dmx->id); + dmx->record = 0; + if (linked) { + /*A record will stop, reset the async fifos + *for linking the right demux + */ + reset_async_fifos(dvb); + } + } + +find_done: + return record_flag; +} + +static void dmx_cascade_set(int cur_dmx, int source) { + int fec_sel_demux = 0; + int data; + + switch (source) { + case AM_TS_SRC_DMX0: + case AM_TS_SRC_DMX1: + case AM_TS_SRC_DMX2: + fec_sel_demux = source -AM_TS_SRC_DMX0; + break; + default: + fec_sel_demux = cur_dmx; + break; + } + + data = READ_MPEG_REG(TS_TOP_CONFIG1); + data &= ~(0x3 << (cur_dmx*2)); + data |= (fec_sel_demux << (cur_dmx*2)); + WRITE_MPEG_REG(TS_TOP_CONFIG1,data); + + pr_dbg("%s id:%d, source:%d data:0x%0x\n",__FUNCTION__,cur_dmx,fec_sel_demux,data); +} + +/*Enable the demux device*/ +static int dmx_enable(struct aml_dmx *dmx) +{ + struct aml_dvb *dvb = (struct aml_dvb *)dmx->demux.priv; + int fec_sel, hi_bsf, fec_ctrl, record; + int fec_core_sel = 0; + int set_stb = 0, fec_s = 0; + int s2p_id; + u32 invert0 = 0, invert1 = 0, invert2 = 0, fec_s0 = 0, fec_s1 = 0, fec_s2 = 0; + u32 use_sop = 0; + int i = 0; + + record = dmx_get_record_flag(dmx); + if (use_of_sop == 1) { + use_sop = 1; + pr_inf("dmx use of sop input\r\n"); + } + switch (dmx->source) { + case AM_TS_SRC_TS0: + fec_sel = 0; + fec_ctrl = dvb->ts[0].control; + record = record ? 1 : 0; + break; + case AM_TS_SRC_TS1: + fec_sel = 1; + fec_ctrl = dvb->ts[1].control; + record = record ? 1 : 0; + break; + case AM_TS_SRC_TS2: + fec_sel = 2; + fec_ctrl = dvb->ts[2].control; + record = record ? 1 : 0; + break; + case AM_TS_SRC_TS3: + fec_sel = 3; + fec_ctrl = dvb->ts[3].control; + record = record ? 1 : 0; + break; + case AM_TS_SRC_S_TS0: + case AM_TS_SRC_S_TS1: + case AM_TS_SRC_S_TS2: + s2p_id = 0; + fec_ctrl = 0; + if (dmx->source == AM_TS_SRC_S_TS0) { + s2p_id = 0; + } else if (dmx->source == AM_TS_SRC_S_TS1) { + s2p_id = 1; + } else if (dmx->source == AM_TS_SRC_S_TS2) { + s2p_id = 2; + } + for (i = 0; i < dvb->s2p_total_count; i++) { + if (dvb->ts[i].s2p_id == s2p_id) { + fec_ctrl = dvb->ts[i].control; + } + } + //fec_sel = (s2p_id == 1) ? 5 : 6; + fec_sel = 6 - s2p_id; + record = record ? 1 : 0; + set_stb = 1; + fec_s = dmx->source - AM_TS_SRC_S_TS0; + break; + case AM_TS_SRC_HIU: + fec_sel = 7; + fec_ctrl = 0; + /* + support record in HIU mode + record = 0; + */ + break; + case AM_TS_SRC_HIU1: + fec_sel = 8; + fec_ctrl = 0; + /* + support record in HIU mode + record = 0; + */ + break; + case AM_TS_SRC_DMX0: + case AM_TS_SRC_DMX1: + case AM_TS_SRC_DMX2: + fec_sel = -1; + fec_ctrl = 0; + record = record ? 1 : 0; + break; + default: + fec_sel = 0; + fec_ctrl = 0; + record = 0; + break; + } + + if (dmx->channel[0].used || dmx->channel[1].used) { + hi_bsf = 1; + if (fec_sel == 8) { + hi_bsf = 2; /*hi_bsf select hiu1*/ + } + }else { + hi_bsf = 0; + } + if ((dvb->dsc[0].dst != -1) + && ((dvb->dsc[0].dst - AM_TS_SRC_DMX0) == dmx->id)) + fec_core_sel = 1; + + if ((dvb->dsc[1].dst != -1) + && ((dvb->dsc[1].dst - AM_TS_SRC_DMX0) == dmx->id)) { + int des_in, des_out, en_des = 0; + + switch (dvb->dsc[1].source) { + case AM_TS_SRC_DMX0: + des_in = 0; + en_des = 1; + break; + case AM_TS_SRC_DMX1: + des_in = 1; + en_des = 1; + break; + case AM_TS_SRC_DMX2: + des_in = 2; + en_des = 1; + break; + default: + des_in = 0; + en_des = 0; + break; + } + + switch (dvb->dsc[1].dst) { + case AM_TS_SRC_DMX0: + des_out = 1; + break; + case AM_TS_SRC_DMX1: + des_out = 2; + break; + case AM_TS_SRC_DMX2: + des_out = 4; + break; + default: + des_out = 0; + break; + } + + if (!des_out) + en_des = 0; + + WRITE_MPEG_REG(COMM_DESC_2_CTL, + (6 << 8) |/*des_out_dly_2*/ + ((!!en_des) << 6) |/* des_pl_clk_2*/ + ((!!en_des) << 5) |/* des_pl_2*/ + (des_out << 2) |/*use_des_2*/ + (des_in)/*des_i_sel_2*/ + ); + fec_core_sel = 1; + pr_dbg("dsc2 ctrl: 0x%x\n", READ_MPEG_REG(COMM_DESC_2_CTL)); + } + + pr_dbg("[dmx-%d]src: %d, rec: %d, hi_bsf: %d, dsc: %d\n", + dmx->id, dmx->source, record, hi_bsf, fec_core_sel); + + if (dmx->chan_count) { + if (set_stb) { + u32 v = READ_MPEG_REG(STB_TOP_CONFIG); + int i; + + for (i = 0; i < dvb->ts_in_total_count; i++) { + if (dvb->ts[i].s2p_id == 0) + fec_s0 = i; + else if (dvb->ts[i].s2p_id == 1) + fec_s1 = i; + else if (dvb->ts[i].s2p_id == 2) + fec_s2 = i; + } + + invert0 = dvb->s2p[0].invert; + invert1 = dvb->s2p[1].invert; + + v &= ~((0x3 << S2P0_FEC_SERIAL_SEL) | + (0x1f << INVERT_S2P0_FEC_CLK) | + (0x3 << S2P1_FEC_SERIAL_SEL) | + (0x1f << INVERT_S2P1_FEC_CLK)); + + v |= (fec_s0 << S2P0_FEC_SERIAL_SEL) | + (invert0 << INVERT_S2P0_FEC_CLK) | + (fec_s1 << S2P1_FEC_SERIAL_SEL) | + (invert1 << INVERT_S2P1_FEC_CLK); + WRITE_MPEG_REG(STB_TOP_CONFIG, v); + + if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TL1) { + invert2 = dvb->s2p[2].invert; + + //add s2p2 config + v = READ_MPEG_REG(STB_S2P2_CONFIG); + v &= ~((0x3 << S2P2_FEC_SERIAL_SEL) | + (0x1f << INVERT_S2P2_FEC_CLK)); + v |= (fec_s2 << S2P2_FEC_SERIAL_SEL) | + (invert2 << INVERT_S2P2_FEC_CLK); + WRITE_MPEG_REG(STB_S2P2_CONFIG, v); + } + } + + /*Initialize the registers */ + DMX_WRITE_REG(dmx->id, STB_INT_MASK, DEMUX_INT_MASK); + DMX_WRITE_REG(dmx->id, DEMUX_MEM_REQ_EN, +#ifdef USE_AHB_MODE + (1 << SECTION_AHB_DMA_EN) | + (0 << SUB_AHB_DMA_EN) | + (1 << OTHER_PES_AHB_DMA_EN) | +#endif + (1 << SECTION_PACKET) | + (1 << VIDEO_PACKET) | + (1 << AUDIO_PACKET) | + (1 << SUB_PACKET) | + (1 << SCR_ONLY_PACKET) | + (1 << OTHER_PES_PACKET)); + DMX_WRITE_REG(dmx->id, PES_STRONG_SYNC, 0x1234); + DMX_WRITE_REG(dmx->id, DEMUX_ENDIAN, + (1<<SEPERATE_ENDIAN) | + (0<<OTHER_PES_ENDIAN) | + (7<<SCR_ENDIAN) | + (7<<SUB_ENDIAN) | + (7<<AUDIO_ENDIAN) | + (7<<VIDEO_ENDIAN) | + (7 << OTHER_ENDIAN) | + (7 << BYPASS_ENDIAN) | (0 << SECTION_ENDIAN)); + if (fec_sel != 8) { + DMX_WRITE_REG(dmx->id, TS_HIU_CTL, +// (0 << LAST_BURST_THRESHOLD) | + (hi_bsf << USE_HI_BSF_INTERFACE)); + } else { + DMX_WRITE_REG(dmx->id, TS_HIU_CTL, + (1 << PDTS_WR_SEL) | + (hi_bsf << USE_HI_BSF_INTERFACE)); + } + + if (!fec_core_sel) { + u32 cfg = READ_MPEG_REG(CIPLUS_CONFIG); + + if (cfg & (1 << CNTL_ENABLE)) { + if (!ciplus_out_auto_mode) { + int mask = 1 << dmx->id; + + if (ciplus_out_sel & mask) + fec_core_sel = 1; + } + } + } + + if (fec_sel == -1) { + dmx_cascade_set(dmx->id,dmx->source); + DMX_WRITE_REG(dmx->id, FEC_INPUT_CONTROL, + (fec_core_sel << FEC_CORE_SEL) | + (0 << FEC_SEL) | (fec_ctrl << 0)); + } else { + dmx_cascade_set(dmx->id,dmx->source); + if (fec_sel != 8) { + DMX_WRITE_REG(dmx->id, FEC_INPUT_CONTROL, + (fec_core_sel << FEC_CORE_SEL) | + (fec_sel << FEC_SEL) | (fec_ctrl << 0)); + } else { + DMX_WRITE_REG(dmx->id, FEC_INPUT_CONTROL, + (fec_core_sel << FEC_CORE_SEL) | + (1 << FEC_SEL_3BIT) | (fec_ctrl << 0)); + } + } + DMX_WRITE_REG(dmx->id, STB_OM_CTL, + (0x40 << MAX_OM_DMA_COUNT) | + (0x7f << LAST_OM_ADDR)); + + /*RECORDER_STREAM depends on video2*/ + /*VIDEO_STREAM_ID: video2_stream_id (bit[31:16])*/ + /*DEMUX_CONTROL: + bit[25] video2_en + bit[24:22] video2_type*/ + #define VIDEO2_FOR_RECORDER_STREAM (1 << 25 | 7 << 22) + + DMX_WRITE_REG(dmx->id, VIDEO_STREAM_ID, + ((record) ? 0xFFFF0000 : 0)); + + DMX_WRITE_REG(dmx->id, DEMUX_CONTROL, + (0 << BYPASS_USE_RECODER_PATH) | + (0 << INSERT_AUDIO_PES_STRONG_SYNC) | + (0 << INSERT_VIDEO_PES_STRONG_SYNC) | + (0 << OTHER_INT_AT_PES_BEGINING) | + (0 << DISCARD_AV_PACKAGE) | + ((!!dmx->dump_ts_select) << TS_RECORDER_SELECT) | + (record << TS_RECORDER_ENABLE) | + (1 << KEEP_DUPLICATE_PACKAGE) | + (1 << SECTION_END_WITH_TABLE_ID) | + (1 << ENABLE_FREE_CLK_FEC_DATA_VALID) | + (1 << ENABLE_FREE_CLK_STB_REG) | + (1 << STB_DEMUX_ENABLE) | + (use_sop << NOT_USE_OF_SOP_INPUT) | + ((record)? VIDEO2_FOR_RECORDER_STREAM : 0)); + pr_dbg("dmx control[%#x]\n", + DMX_READ_REG(dmx->id, DEMUX_CONTROL)); + } else { + DMX_WRITE_REG(dmx->id, STB_INT_MASK, 0); + /* if disable FEC_INPUT_CONTROL, background and unattended record will fail */ + // DMX_WRITE_REG(dmx->id, FEC_INPUT_CONTROL, 0); + DMX_WRITE_REG(dmx->id, DEMUX_CONTROL, 0); + //dmx not used, but it can cascade for other dmx + if ((dmx->source == AM_TS_SRC_DMX0 || + dmx->source == AM_TS_SRC_DMX1 || + dmx->source == AM_TS_SRC_DMX2 ) && + (dmx->id != dmx->source-AM_TS_SRC_DMX0)) + dmx_cascade_set(dmx->id,dmx->source); + } + return 0; +} + +static int dmx_set_misc(struct aml_dmx *dmx, int hi_bsf, int en_dsc) +{ + if (hi_bsf >= 0) { + DMX_WRITE_REG(dmx->id, TS_HIU_CTL, + hi_bsf ? + (DMX_READ_REG(dmx->id, TS_HIU_CTL) | + (1 << USE_HI_BSF_INTERFACE)) + : + (DMX_READ_REG(dmx->id, TS_HIU_CTL) & + (~(1 << USE_HI_BSF_INTERFACE)))); + } + + if (en_dsc >= 0) { + DMX_WRITE_REG(dmx->id, FEC_INPUT_CONTROL, + en_dsc ? + (DMX_READ_REG(dmx->id, FEC_INPUT_CONTROL) | + (1 << FEC_CORE_SEL)) + : + (DMX_READ_REG(dmx->id, FEC_INPUT_CONTROL) & + (~(1 << FEC_CORE_SEL)))); + } + + return 0; +} + +static int dmx_set_misc_id(struct aml_dvb *dvb, int id, int hi_bsf, int en_dsc) +{ + return dmx_set_misc(&dvb->dmx[id], hi_bsf, en_dsc); +} + +/*Get the channel's ID by its PID*/ +static int dmx_get_chan(struct aml_dmx *dmx, int pid) +{ + int id; + + for (id = 0; id < CHANNEL_COUNT; id++) { + if (dmx->channel[id].used && dmx->channel[id].pid == pid) + return id; + } + + return -1; +} + +/*Get the channel's target*/ +static u32 dmx_get_chan_target(struct aml_dmx *dmx, int cid) +{ + u32 type; + + if (!dmx->channel[cid].used) + return (0x7 << PID_TYPE) | g_chan_def_pid; + + if (dmx->channel[cid].type == DMX_TYPE_SEC) { + type = SECTION_PACKET; + } else { + switch (dmx->channel[cid].pes_type) { + case DMX_PES_AUDIO: + type = AUDIO_PACKET; + break; + case DMX_PES_VIDEO: + type = VIDEO_PACKET; + break; + case DMX_PES_SUBTITLE: + case DMX_PES_TELETEXT: + type = SUB_PACKET; + break; + case DMX_PES_PCR: + type = SCR_ONLY_PACKET; + break; + case DMX_PES_AUDIO3: + type = OTHER_PES_PACKET; + break; + default: + type = RECORDER_STREAM; + break; + } + } + dmx->channel[cid].pkt_type = type; + + pr_dbg("chan target: %x %x\n", type, dmx->channel[cid].pid); + return (type << PID_TYPE) | dmx->channel[cid].pid; +} + +/*Get the advance value of the channel*/ +static inline u32 dmx_get_chan_advance(struct aml_dmx *dmx, int cid) +{ + return 0; +} + +/*Set the channel registers*/ +static int dmx_set_chan_regs(struct aml_dmx *dmx, int cid) +{ + u32 data, addr, advance, max; + + pr_dbg("set channel (id:%d PID:0x%x) registers\n", cid, + dmx->channel[cid].pid); + + while (DMX_READ_REG(dmx->id, FM_WR_ADDR) & 0x8000) + udelay(1); + + if (cid & 1) { + data = + (dmx_get_chan_target(dmx, cid - 1) << 16) | + dmx_get_chan_target(dmx, cid); + advance = + (dmx_get_chan_advance(dmx, cid) << 8) | + dmx_get_chan_advance(dmx, cid - 1); + + if (dmx->channel[cid - 1].used) + set_debug_dmx_chanpids_types(dmx->id, cid - 1, + dmx->channel[cid - 1].pkt_type); + } else { + data = + (dmx_get_chan_target(dmx, cid) << 16) | + dmx_get_chan_target(dmx, cid + 1); + advance = + (dmx_get_chan_advance(dmx, cid + 1) << 8) | + dmx_get_chan_advance(dmx, cid); + + if (dmx->channel[cid + 1].used) + set_debug_dmx_chanpids_types(dmx->id, cid + 1, + dmx->channel[cid + 1].pkt_type); + } + addr = cid >> 1; + DMX_WRITE_REG(dmx->id, FM_WR_DATA, data); + DMX_WRITE_REG(dmx->id, FM_WR_ADDR, (advance << 16) | 0x8000 | addr); + + pr_dbg("write fm %x:%x\n", (advance << 16) | 0x8000 | addr, data); + + for (max = CHANNEL_COUNT - 1; max > 0; max--) { + if (dmx->channel[max].used) + break; + } + + data = DMX_READ_REG(dmx->id, MAX_FM_COMP_ADDR) & 0xF0; + DMX_WRITE_REG(dmx->id, MAX_FM_COMP_ADDR, data | (max >> 1)); + + pr_dbg("write fm comp %x\n", data | (max >> 1)); + + if (DMX_READ_REG(dmx->id, OM_CMD_STATUS) & 0x8e00) { + pr_error("warning: send cmd %x\n", + DMX_READ_REG(dmx->id, OM_CMD_STATUS)); + } + + if (cid == 0) { + video_pts = 0; + first_video_pts = 0; + } + else if (cid == 1) { + audio_pts = 0; + first_audio_pts = 0; + } + + if (dmx->channel[cid].used) + set_debug_dmx_chanpids_types(dmx->id, cid, + dmx->channel[cid].pkt_type); + return 0; +} + +/*Get the filter target*/ +static int dmx_get_filter_target(struct aml_dmx *dmx, int fid, u32 *target, + u8 *advance) +{ + struct dmx_section_filter *filter; + struct aml_filter *f; + int i, cid, neq_bytes; + + fid = fid & 0xFFFF; + f = &dmx->filter[fid]; + + if (!f->used) { + target[0] = 0x1fff; + advance[0] = 0; + for (i = 1; i < FILTER_LEN; i++) { + target[i] = 0x9fff; + advance[i] = 0; + } + return 0; + } + + cid = f->chan_id; + filter = f->filter; + + neq_bytes = 0; + if (filter->filter_mode[0] != 0xFF) { + neq_bytes = 2; + } else { + for (i = 3; i < FILTER_LEN; i++) { + if (filter->filter_mode[i] != 0xFF) + neq_bytes++; + } + } + + f->neq = 0; + + for (i = 0; i < FILTER_LEN; i++) { + u8 value = filter->filter_value[i]; + u8 mask = filter->filter_mask[i]; + u8 mode = filter->filter_mode[i]; + u8 mb, mb1, nb, v, t, adv = 0; + + if (!i) { + mb = 1; + mb1 = 1; + v = 0; + if ((mode == 0xFF) && mask) { + t = mask & 0xF0; + if (t) { + mb1 = 0; + adv |= t^0xF0; + } + v |= (value & 0xF0) | adv; + + t = mask & 0x0F; + if (t) { + mb = 0; + adv |= t^0x0F; + } + v |= (value & 0x0F) | adv; + } + + target[i] = (mb << SECTION_FIRSTBYTE_MASKLOW) | + (mb1 << SECTION_FIRSTBYTE_MASKHIGH) | + (0 << SECTION_FIRSTBYTE_DISABLE_PID_CHECK) | + (cid << SECTION_FIRSTBYTE_PID_INDEX) | v; + advance[i] = adv; + } else { + if (i < 3) { + value = 0; + mask = 0; + mode = 0xff; + } + mb = 1; + nb = 0; + v = 0; + + if ((i >= 3) && mask) { + if (mode == 0xFF) { + mb = 0; + nb = 0; + adv = mask ^ 0xFF; + v = value | adv; + } else { + if (neq_bytes == 1) { + mb = 0; + nb = 1; + adv = mask ^ 0xFF; + v = value & ~adv; + } + } + } + target[i] = (mb << SECTION_RESTBYTE_MASK) | + (nb << SECTION_RESTBYTE_MASK_EQ) | + (0 << SECTION_RESTBYTE_DISABLE_PID_CHECK) | + (cid << SECTION_RESTBYTE_PID_INDEX) | v; + advance[i] = adv; + } + + f->value[i] = value; + f->maskandmode[i] = mask & mode; + f->maskandnotmode[i] = mask & ~mode; + + if (f->maskandnotmode[i]) + f->neq = 1; + } + + return 0; +} + +/*Set the filter registers*/ +static int dmx_set_filter_regs(struct aml_dmx *dmx, int fid) +{ + u32 t1[FILTER_LEN], t2[FILTER_LEN]; + u8 advance1[FILTER_LEN], advance2[FILTER_LEN]; + u32 addr, data, max, adv; + int i; + + pr_dbg("set filter (id:%d) registers\n", fid); + + if (fid & 1) { + dmx_get_filter_target(dmx, fid - 1, t1, advance1); + dmx_get_filter_target(dmx, fid, t2, advance2); + } else { + dmx_get_filter_target(dmx, fid, t1, advance1); + dmx_get_filter_target(dmx, fid + 1, t2, advance2); + } + + for (i = 0; i < FILTER_LEN; i++) { + while (DMX_READ_REG(dmx->id, FM_WR_ADDR) & 0x8000) + udelay(1); + + data = (t1[i] << 16) | t2[i]; + addr = (fid >> 1) | ((i + 1) << 4); + adv = (advance1[i] << 8) | advance2[i]; + + DMX_WRITE_REG(dmx->id, FM_WR_DATA, data); + DMX_WRITE_REG(dmx->id, FM_WR_ADDR, (adv << 16) | 0x8000 | addr); + + pr_dbg("write fm %x:%x\n", (adv << 16) | 0x8000 | addr, data); + } + + for (max = FILTER_COUNT - 1; max > 0; max--) { + if (dmx->filter[max].used) + break; + } + + data = DMX_READ_REG(dmx->id, MAX_FM_COMP_ADDR) & 0xF; + DMX_WRITE_REG(dmx->id, MAX_FM_COMP_ADDR, data | ((max >> 1) << 4)); + + pr_dbg("write fm comp %x\n", data | ((max >> 1) << 4)); + + if (DMX_READ_REG(dmx->id, OM_CMD_STATUS) & 0x8e00) { + pr_error("error send cmd %x\n", + DMX_READ_REG(dmx->id, OM_CMD_STATUS)); + } + + return 0; +} + +/*Clear the filter's buffer*/ +static void dmx_clear_filter_buffer(struct aml_dmx *dmx, int fid) +{ + u32 section_busy32 = DMX_READ_REG(dmx->id, SEC_BUFF_READY); + u32 filter_number; + int i; + + if (!section_busy32) + return; + + for (i = 0; i < SEC_BUF_COUNT; i++) { + if (section_busy32 & (1 << i)) { + DMX_WRITE_REG(dmx->id, SEC_BUFF_NUMBER, i); + filter_number = + (DMX_READ_REG(dmx->id, SEC_BUFF_NUMBER) >> 8); + if (filter_number != fid) + section_busy32 &= ~(1 << i); + } + } + + if (section_busy32) + DMX_WRITE_REG(dmx->id, SEC_BUFF_READY, section_busy32); +} + +static void async_fifo_disable(struct aml_asyncfifo *afifo) +{ + pr_inf("AF(%d) disable asyncfifo\n", afifo->id); + CLEAR_ASYNC_FIFO_REG_MASK(afifo->id, REG1, 1 << ASYNC_FIFO_FLUSH_EN); + CLEAR_ASYNC_FIFO_REG_MASK(afifo->id, REG2, 1 << ASYNC_FIFO_FILL_EN); + if (READ_ASYNC_FIFO_REG(afifo->id, REG2) & (1 << ASYNC_FIFO_FILL_EN) + || READ_ASYNC_FIFO_REG(afifo->id, REG1) + & (1 << ASYNC_FIFO_FLUSH_EN)) { + pr_error("disable failed\n"); + } else + pr_inf("disable ok\n"); + afifo->buf_toggle = 0; + afifo->buf_read = 0; +} + +static void async_fifo_set_regs(struct aml_asyncfifo *afifo, int source_val) +{ + u32 start_addr = (afifo->secure_enable && afifo->blk.addr)? + afifo->blk.addr : virt_to_phys((void *)afifo->pages); + u32 size = afifo->buf_len; + u32 flush_size = afifo->flush_size; + int factor = dmx_get_order(size / flush_size); + u32 old_size, new_size, old_factor, new_factor; + int old_src, old_en; + + old_en = READ_ASYNC_FIFO_REG(afifo->id, REG2) + & (1 << ASYNC_FIFO_FILL_EN); + old_src = + (READ_ASYNC_FIFO_REG(afifo->id, REG2) >> ASYNC_FIFO_SOURCE_LSB) + & 3; + + new_size = (size >> 7) & 0x7fff; + old_size = + (READ_ASYNC_FIFO_REG(afifo->id, REG1) + >> ASYNC_FIFO_FLUSH_CNT_LSB) + & 0x7fff; + + old_factor = + (READ_ASYNC_FIFO_REG(afifo->id, REG3) + >> ASYNC_FLUSH_SIZE_IRQ_LSB) + & 0x7fff; + new_factor = ((size >> (factor + 7)) - 1) & 0x7fff; + + pr_inf("AF(%d) [%s] src:0x%x->0x%x size:0x%x->0x%x factor:0x%x->0x%x\n", + afifo->id, + old_en? "on" : "off", + old_src, source_val, + old_size, new_size, + old_factor, new_factor); + + if (old_en + && (old_src == source_val) + && (new_size == old_size) + && (old_factor == new_factor)) + return; + + if (old_en) { + if ((old_size == new_size) + && (old_factor == new_factor)) { + /*only source changed, do not reset all*/ + /* Connect the DEMUX to ASYNC_FIFO */ + WRITE_ASYNC_FIFO_REG(afifo->id, REG2, + (READ_ASYNC_FIFO_REG(afifo->id, REG2) + & ~(0x3 << ASYNC_FIFO_SOURCE_LSB)) + | (source_val << ASYNC_FIFO_SOURCE_LSB)); + return; + } else { + /*Dynamic change setting is not supported, + *should disable it first. + *Data discontinue will be here. + */ + async_fifo_disable(afifo); + } + } + + pr_inf("ASYNC FIFO id=%d, link to DMX%d, start_addr %x, buf_size %d," + "source value 0x%x, factor %d\n", + afifo->id, afifo->source, start_addr, size, source_val, factor); + + /* Destination address */ + WRITE_ASYNC_FIFO_REG(afifo->id, REG0, start_addr); + + /* Setup flush parameters */ + WRITE_ASYNC_FIFO_REG(afifo->id, REG1, + (0 << ASYNC_FIFO_TO_HIU) | + (0 << ASYNC_FIFO_FLUSH) | + /* don't flush the path */ + (1 << ASYNC_FIFO_RESET) | + /* reset the path */ + (1 << ASYNC_FIFO_WRAP_EN) | + /* wrap enable */ + (0 << ASYNC_FIFO_FLUSH_EN) | + /* disable the flush path */ + /*(0x3 << ASYNC_FIFO_FLUSH_CNT_LSB); + * flush 3 x 32 32-bit words + */ + /*(0x7fff << ASYNC_FIFO_FLUSH_CNT_LSB); + * flush 4MBytes of data + */ + (((size >> 7) & 0x7fff) << ASYNC_FIFO_FLUSH_CNT_LSB)); + /* number of 128-byte blocks to flush */ + + /* clear the reset signal */ + WRITE_ASYNC_FIFO_REG(afifo->id, REG1, + READ_ASYNC_FIFO_REG(afifo->id, + REG1) & ~(1 << ASYNC_FIFO_RESET)); + /* Enable flush */ + WRITE_ASYNC_FIFO_REG(afifo->id, REG1, + READ_ASYNC_FIFO_REG(afifo->id, + REG1) | (1 << ASYNC_FIFO_FLUSH_EN)); + + /*Setup Fill parameters */ + WRITE_ASYNC_FIFO_REG(afifo->id, REG2, + (1 << ASYNC_FIFO_ENDIAN_LSB) | + (0 << ASYNC_FIFO_FILL_EN) | + /* disable fill path to reset fill path */ + /*(96 << ASYNC_FIFO_FILL_CNT_LSB); + *3 x 32 32-bit words + */ + (0 << ASYNC_FIFO_FILL_CNT_LSB)); + /* forever FILL; */ + WRITE_ASYNC_FIFO_REG(afifo->id, REG2, + READ_ASYNC_FIFO_REG(afifo->id, REG2) | + (1 << ASYNC_FIFO_FILL_EN));/*Enable fill path*/ + + /* generate flush interrupt */ + WRITE_ASYNC_FIFO_REG(afifo->id, REG3, + (READ_ASYNC_FIFO_REG(afifo->id, REG3) & 0xffff0000) | + ((((size >> (factor + 7)) - 1) & 0x7fff) << + ASYNC_FLUSH_SIZE_IRQ_LSB)); + + /* Connect the STB DEMUX to ASYNC_FIFO */ + WRITE_ASYNC_FIFO_REG(afifo->id, REG2, + READ_ASYNC_FIFO_REG(afifo->id, REG2) | + (source_val << ASYNC_FIFO_SOURCE_LSB)); +} + +/*Reset the ASYNC FIFOS when a ASYNC FIFO connect to a different DMX*/ +static void reset_async_fifos(struct aml_dvb *dvb) +{ + struct aml_asyncfifo *low_dmx_fifo = NULL; + struct aml_asyncfifo *high_dmx_fifo = NULL; + struct aml_asyncfifo *highest_dmx_fifo = NULL; + int i, j; + int record_enable; + + struct aml_asyncfifo *afifo = NULL; + + for (j = 0; j < DMX_DEV_COUNT; j++) { + if (!dvb->dmx[j].init) + continue; + + record_enable = 0; + for (i = 0; i < dvb->async_fifo_total_count; i++) { + afifo = &dvb->asyncfifo[i]; + + if (!afifo->init) + continue; + + if (!dvb->dmx[j].record + || !(dvb->dmx[j].id == afifo->source)) + continue; + + /*This dmx is linked to the async fifo, + *Enable the TS_RECORDER_ENABLE + */ + record_enable = 1; + if (!low_dmx_fifo) { + low_dmx_fifo = afifo; + } else if (low_dmx_fifo->source > + afifo->source) { + if (!high_dmx_fifo) + high_dmx_fifo = low_dmx_fifo; + else { + highest_dmx_fifo = high_dmx_fifo; + high_dmx_fifo = low_dmx_fifo; + } + low_dmx_fifo = afifo; + } else if (low_dmx_fifo->source < afifo->source) { + if (!high_dmx_fifo) + high_dmx_fifo = afifo; + else { + if (high_dmx_fifo->source > + afifo->source) { + highest_dmx_fifo = + high_dmx_fifo; + high_dmx_fifo = afifo; + } else { + highest_dmx_fifo = afifo; + } + } + } + break; + } + + pr_inf("Set DMX%d TS_RECORDER_ENABLE to %d\n", dvb->dmx[j].id, + record_enable ? 1 : 0); + + if (record_enable) { + int old_en = + DMX_READ_REG(dvb->dmx[j].id, DEMUX_CONTROL) + & (1 << TS_RECORDER_ENABLE); + + if (!old_en) { + DMX_WRITE_REG(dvb->dmx[j].id, DEMUX_CONTROL, + DMX_READ_REG(dvb->dmx[j].id, + DEMUX_CONTROL) + | (1 << TS_RECORDER_ENABLE)); + } + } else { + int old_en = + DMX_READ_REG(dvb->dmx[j].id, DEMUX_CONTROL) + & (1 << TS_RECORDER_ENABLE); + + if (old_en) { + DMX_WRITE_REG(dvb->dmx[j].id, DEMUX_CONTROL, + DMX_READ_REG(dvb->dmx[j].id, + DEMUX_CONTROL) + & (~(1 << TS_RECORDER_ENABLE))); + } + } + } + pr_inf("reset ASYNC FIFOs\n"); + for (i = 0; i < dvb->async_fifo_total_count; i++) { + int old; + + afifo = &dvb->asyncfifo[i]; + + if (!afifo->init) + continue; + + old = READ_ASYNC_FIFO_REG(afifo->id, REG2) + & (1 << ASYNC_FIFO_FILL_EN); + + if (old + && (afifo != low_dmx_fifo) + && (afifo != high_dmx_fifo) + && (afifo != highest_dmx_fifo)) + async_fifo_disable(afifo); + } + + /*Set the async fifo regs */ + if (low_dmx_fifo) { + async_fifo_set_regs(low_dmx_fifo, 0x3); + + if (high_dmx_fifo) { + async_fifo_set_regs(high_dmx_fifo, 0x2); + + if (highest_dmx_fifo) + async_fifo_set_regs(highest_dmx_fifo, 0x0); + } + } +} + +/*Reset the demux device*/ +void dmx_reset_hw(struct aml_dvb *dvb) +{ + dmx_reset_hw_ex(dvb, 1); +} + +/*Reset the demux device*/ +void dmx_reset_hw_ex(struct aml_dvb *dvb, int reset_irq) +{ + int id, times; + u32 pcr_num[DMX_DEV_COUNT]; + u32 pcr_reg[DMX_DEV_COUNT]; + + pr_dbg("[dmx_kpi] demux reset begin\n"); + + for (id = 0; id < DMX_DEV_COUNT; id++) { + if (!dvb->dmx[id].init) + continue; + pcr_reg[id] = DMX_READ_REG(id, PCR90K_CTL); + pcr_num[id] = DMX_READ_REG(id, ASSIGN_PID_NUMBER); + pr_dbg("reset demux, pcr_regs[%d]:0x%x, pcr_num[%d]:0x%x\n", id, pcr_reg[id], id, pcr_num[id]); + if (reset_irq) { + if (dvb->dmx[id].dmx_irq != -1) + disable_irq(dvb->dmx[id].dmx_irq); + if (dvb->dmx[id].dvr_irq != -1) + disable_irq(dvb->dmx[id].dvr_irq); + } + } +#ifdef ENABLE_SEC_BUFF_WATCHDOG + if (reset_irq) + del_timer_sync(&dvb->watchdog_timer); +#endif + /*RESET_TOP will clear the dsc pid , save all dsc pid that setting in TA*/ + for (id = 0; id < DSC_DEV_COUNT; id++) { + struct aml_dsc *dsc = &dvb->dsc[id]; + int n; + + for (n = 0; n < DSC_COUNT; n++) { + struct aml_dsc_channel *ch = &dsc->channel[n]; + /*if(ch->used)*/ + { + ch->id = n; + dsc_get_pid(ch,&ch->pid); + } + } + } + + /*{ + u32 data; + data = READ_MPEG_REG(STB_TOP_CONFIG); + ciplus = 0x7C000000 & data; + }*/ + + WRITE_MPEG_REG(RESET1_REGISTER, RESET_DEMUXSTB); + /*WRITE_MPEG_REG(RESET3_REGISTER, RESET_DEMUX2|RESET_DEMUX1|RESET_DEMUX0|RESET_S2P1|RESET_S2P0|RESET_TOP);*/ + + for (id = 0; id < DMX_DEV_COUNT; id++) { + times = 0; + while (times++ < 1000000) { + if (!(DMX_READ_REG(id, OM_CMD_STATUS) & 0x01)) + break; + } + } + + WRITE_MPEG_REG(STB_TOP_CONFIG, 0); + WRITE_MPEG_REG(STB_S2P2_CONFIG, 0); + + for (id = 0; id < DMX_DEV_COUNT; id++) { + u32 version, data; + + if (!dvb->dmx[id].init) + continue; + + if (reset_irq) { + if (dvb->dmx[id].dmx_irq != -1) + enable_irq(dvb->dmx[id].dmx_irq); + if (dvb->dmx[id].dvr_irq != -1) + enable_irq(dvb->dmx[id].dvr_irq); + } + DMX_WRITE_REG(id, DEMUX_CONTROL, 0x0000); + version = DMX_READ_REG(id, STB_VERSION); + DMX_WRITE_REG(id, STB_TEST_REG, version); + pr_dbg("STB %d hardware version : %d\n", id, version); + DMX_WRITE_REG(id, STB_TEST_REG, 0x5550); + data = DMX_READ_REG(id, STB_TEST_REG); + if (data != 0x5550) + pr_error("STB %d register access failed\n", id); + DMX_WRITE_REG(id, STB_TEST_REG, 0xaaa0); + data = DMX_READ_REG(id, STB_TEST_REG); + if (data != 0xaaa0) + pr_error("STB %d register access failed\n", id); + DMX_WRITE_REG(id, MAX_FM_COMP_ADDR, 0x0000); + DMX_WRITE_REG(id, STB_INT_MASK, 0); + DMX_WRITE_REG(id, STB_INT_STATUS, 0xffff); + DMX_WRITE_REG(id, FEC_INPUT_CONTROL, 0); + } + + stb_enable(dvb); + + for (id = 0; id < DMX_DEV_COUNT; id++) { + struct aml_dmx *dmx = &dvb->dmx[id]; + int n; + unsigned long addr; + unsigned long base; + unsigned long grp_addr[SEC_BUF_GRP_COUNT]; + int grp_len[SEC_BUF_GRP_COUNT]; + + if (!dvb->dmx[id].init) + continue; + + if (dmx->sec_pages) { + grp_len[0] = (1 << SEC_GRP_LEN_0) * 8; + grp_len[1] = (1 << SEC_GRP_LEN_1) * 8; + grp_len[2] = (1 << SEC_GRP_LEN_2) * 8; + grp_len[3] = (1 << SEC_GRP_LEN_3) * 8; + + grp_addr[0] = virt_to_phys((void *)dmx->sec_pages); + grp_addr[1] = grp_addr[0] + grp_len[0]; + grp_addr[2] = grp_addr[1] + grp_len[1]; + grp_addr[3] = grp_addr[2] + grp_len[2]; + + base = grp_addr[0] & 0xFFFF0000; + DMX_WRITE_REG(dmx->id, SEC_BUFF_BASE, base >> 16); + DMX_WRITE_REG(dmx->id, SEC_BUFF_01_START, + (((grp_addr[0] - base) >> 8) << 16) | + ((grp_addr[1] - base) >> 8)); + DMX_WRITE_REG(dmx->id, SEC_BUFF_23_START, + (((grp_addr[2] - base) >> 8) << 16) | + ((grp_addr[3] - base) >> 8)); + DMX_WRITE_REG(dmx->id, SEC_BUFF_SIZE, + SEC_GRP_LEN_0 | + (SEC_GRP_LEN_1 << 4) | + (SEC_GRP_LEN_2 << 8) | + (SEC_GRP_LEN_3 << 12)); + } +#ifdef NO_SUB +#ifndef SUB_PARSER + if (dmx->sub_pages) { + addr = virt_to_phys((void *)dmx->sub_pages); + DMX_WRITE_REG(dmx->id, SB_START, addr >> 12); + DMX_WRITE_REG(dmx->id, SB_LAST_ADDR, + (dmx->sub_buf_len >> 3) - 1); + } +#endif +#endif + if (dmx->pes_pages) { + addr = virt_to_phys((void *)dmx->pes_pages); + DMX_WRITE_REG(dmx->id, OB_START, addr >> 12); + DMX_WRITE_REG(dmx->id, OB_LAST_ADDR, + (dmx->pes_buf_len >> 3) - 1); + } + + for (n = 0; n < CHANNEL_COUNT; n++) { + /*struct aml_channel *chan = &dmx->channel[n];*/ + + /*if (chan->used)*/ + { +#ifdef NO_SUB +#ifdef SUB_PARSER + /* + check if subtitle channel was running, + the parser will be used in amstream also, + take care of the buff ptr. + */ + u32 v = dmx_get_chan_target(dmx, n); + if (v != 0xFFFF && + (v & (0x7 << PID_TYPE)) + == (SUB_PACKET << PID_TYPE)) + set_subtitle_pes_buffer(dmx); +#endif +#endif + { + u32 v = dmx_get_chan_target(dmx, n); + if (v != 0xFFFF && + (v & (0x7 << PID_TYPE)) + == + (OTHER_PES_PACKET << PID_TYPE)) + pes_off_pre[dmx->id] = 0; + } + dmx_set_chan_regs(dmx, n); + } + } + + for (n = 0; n < FILTER_COUNT; n++) { + struct aml_filter *filter = &dmx->filter[n]; + + if (filter->used) + dmx_set_filter_regs(dmx, n); + } + dmx_enable(&dvb->dmx[id]); + dmx_smallsec_set(&dmx->smallsec, + dmx->smallsec.enable, + dmx->smallsec.bufsize, + 1); + + dmx_timeout_set(&dmx->timeout, + dmx->timeout.enable, + dmx->timeout.timeout, + dmx->timeout.ch_disable, + dmx->timeout.match, + 1); + DMX_WRITE_REG(id, ASSIGN_PID_NUMBER, pcr_num[id]); + DMX_WRITE_REG(id, PCR90K_CTL, pcr_reg[id]); + } + + for (id = 0; id < DSC_DEV_COUNT; id++) { + struct aml_dsc *dsc = &dvb->dsc[id]; + int n; + + for (n = 0; n < DSC_COUNT; n++) { + int flag = 0; + struct aml_dsc_channel *ch = &dsc->channel[n]; + /*if(ch->used)*/ + { + ch->work_mode = -1; + //if ta setting pid, used will 0 + if (ch->pid != 0x1fff && !ch->used) { + flag = 1; + ch->used = 1; + } + dsc_set_pid(ch, ch->pid); + if (flag) + ch->used = 0; + dsc_set_keys(ch); + } + } + } +#ifdef ENABLE_SEC_BUFF_WATCHDOG + if (reset_irq) { + mod_timer(&dvb->watchdog_timer, + jiffies + msecs_to_jiffies(WATCHDOG_TIMER)); + } +#endif + + pr_dbg("[dmx_kpi] demux reset end\n"); +} + +/*Reset the individual demux*/ +void dmx_reset_dmx_hw_ex_unlock(struct aml_dvb *dvb, struct aml_dmx *dmx, + int reset_irq) +{ + u32 pcr_num = 0; + u32 pcr_regs = 0; + { + if (!dmx->init) + return; + pcr_regs = DMX_READ_REG(dmx->id, PCR90K_CTL); + pcr_num = DMX_READ_REG(dmx->id, ASSIGN_PID_NUMBER); + pr_dbg("reset demux, pcr_regs:0x%x, pcr_num:0x%x\n", pcr_regs, pcr_num); + } + { + if (!dmx->init) + return; + if (reset_irq) { + if (dmx->dmx_irq != -1) + disable_irq(dmx->dmx_irq); + if (dmx->dvr_irq != -1) + disable_irq(dmx->dvr_irq); + } + } +#ifdef ENABLE_SEC_BUFF_WATCHDOG + if (reset_irq) { + /*del_timer_sync(&dvb->watchdog_timer); */ + dvb->dmx_watchdog_disable[dmx->id] = 1; + } +#endif + + /*{ + u32 data; + data = READ_MPEG_REG(STB_TOP_CONFIG); + ciplus = 0x7C000000 & data; + }*/ + + pr_error("dmx_reset_dmx_hw_ex_unlock into\n"); + WRITE_MPEG_REG(RESET3_REGISTER, + (dmx->id) ? ((dmx->id == + 1) ? RESET_DEMUX1 : RESET_DEMUX2) : + RESET_DEMUX0); + WRITE_MPEG_REG(RESET3_REGISTER, RESET_DES); + + { + int times; + + times = 0; + while (times++ < 1000000) { + if (!(DMX_READ_REG(dmx->id, OM_CMD_STATUS) & 0x01)) + break; + } + } + + /*WRITE_MPEG_REG(STB_TOP_CONFIG, 0); */ + + { + u32 version, data; + + if (!dmx->init) + return; + + if (reset_irq) { + if (dmx->dmx_irq != -1) + enable_irq(dmx->dmx_irq); + if (dmx->dvr_irq != -1) + enable_irq(dmx->dvr_irq); + } + DMX_WRITE_REG(dmx->id, DEMUX_CONTROL, 0x0000); + version = DMX_READ_REG(dmx->id, STB_VERSION); + DMX_WRITE_REG(dmx->id, STB_TEST_REG, version); + pr_dbg("STB %d hardware version : %d\n", dmx->id, version); + DMX_WRITE_REG(dmx->id, STB_TEST_REG, 0x5550); + data = DMX_READ_REG(dmx->id, STB_TEST_REG); + if (data != 0x5550) + pr_error("STB %d register access failed\n", dmx->id); + DMX_WRITE_REG(dmx->id, STB_TEST_REG, 0xaaa0); + data = DMX_READ_REG(dmx->id, STB_TEST_REG); + if (data != 0xaaa0) + pr_error("STB %d register access failed\n", dmx->id); + DMX_WRITE_REG(dmx->id, MAX_FM_COMP_ADDR, 0x0000); + DMX_WRITE_REG(dmx->id, STB_INT_MASK, 0); + DMX_WRITE_REG(dmx->id, STB_INT_STATUS, 0xffff); + DMX_WRITE_REG(dmx->id, FEC_INPUT_CONTROL, 0); + } + + stb_enable(dvb); + + { + int n; + unsigned long addr; + unsigned long base; + unsigned long grp_addr[SEC_BUF_GRP_COUNT]; + int grp_len[SEC_BUF_GRP_COUNT]; + + if (!dmx->init) + return; + + if (dmx->sec_pages) { + grp_len[0] = (1 << SEC_GRP_LEN_0) * 8; + grp_len[1] = (1 << SEC_GRP_LEN_1) * 8; + grp_len[2] = (1 << SEC_GRP_LEN_2) * 8; + grp_len[3] = (1 << SEC_GRP_LEN_3) * 8; + + grp_addr[0] = virt_to_phys((void *)dmx->sec_pages); + grp_addr[1] = grp_addr[0] + grp_len[0]; + grp_addr[2] = grp_addr[1] + grp_len[1]; + grp_addr[3] = grp_addr[2] + grp_len[2]; + + base = grp_addr[0] & 0xFFFF0000; + DMX_WRITE_REG(dmx->id, SEC_BUFF_BASE, base >> 16); + DMX_WRITE_REG(dmx->id, SEC_BUFF_01_START, + (((grp_addr[0] - base) >> 8) << 16) | + ((grp_addr[1] - base) >> 8)); + DMX_WRITE_REG(dmx->id, SEC_BUFF_23_START, + (((grp_addr[2] - base) >> 8) << 16) | + ((grp_addr[3] - base) >> 8)); + DMX_WRITE_REG(dmx->id, SEC_BUFF_SIZE, + SEC_GRP_LEN_0 | + (SEC_GRP_LEN_1 << 4) | + (SEC_GRP_LEN_2 << 8) | + (SEC_GRP_LEN_3 << 12)); + } +#ifdef NO_SUB +#ifndef SUB_PARSER + if (dmx->sub_pages) { + addr = virt_to_phys((void *)dmx->sub_pages); + DMX_WRITE_REG(dmx->id, SB_START, addr >> 12); + DMX_WRITE_REG(dmx->id, SB_LAST_ADDR, + (dmx->sub_buf_len >> 3) - 1); + } +#endif +#endif + if (dmx->pes_pages) { + addr = virt_to_phys((void *)dmx->pes_pages); + DMX_WRITE_REG(dmx->id, OB_START, addr >> 12); + DMX_WRITE_REG(dmx->id, OB_LAST_ADDR, + (dmx->pes_buf_len >> 3) - 1); + } + + for (n = 0; n < CHANNEL_COUNT; n++) { + /*struct aml_channel *chan = &dmx->channel[n];*/ + + /*if (chan->used)*/ + { +#ifdef NO_SUB +#ifdef SUB_PARSER + /* + check if subtitle channel was running, + the parser will be used in amstream also, + take care of the buff ptr. + */ + u32 v = dmx_get_chan_target(dmx, n); + if (v != 0xFFFF && + (v & (0x7 << PID_TYPE)) + == (SUB_PACKET << PID_TYPE)) + set_subtitle_pes_buffer(dmx); +#endif +#endif + { + u32 v = dmx_get_chan_target(dmx, n); + if (v != 0xFFFF && + (v & (0x7 << PID_TYPE)) + == + (OTHER_PES_PACKET << PID_TYPE)) + pes_off_pre[dmx->id] = 0; + } + dmx_set_chan_regs(dmx, n); + } + } + + for (n = 0; n < FILTER_COUNT; n++) { + struct aml_filter *filter = &dmx->filter[n]; + + if (filter->used) + dmx_set_filter_regs(dmx, n); + } + + for (n = 0; n < SEC_CNT_MAX; n++) { + dmx->sec_cnt[n] = 0; + dmx->sec_cnt_match[n] = 0; + dmx->sec_cnt_crc_fail[n] = 0; + } + dmx_enable(dmx); + dmx_smallsec_set(&dmx->smallsec, + dmx->smallsec.enable, + dmx->smallsec.bufsize, + 1); + + dmx_timeout_set(&dmx->timeout, + dmx->timeout.enable, + dmx->timeout.timeout, + dmx->timeout.ch_disable, + dmx->timeout.match, + 1); + } + + { + int id; + + for (id = 0; id < DSC_DEV_COUNT; id++) { + struct aml_dsc *dsc = &dvb->dsc[id]; + int n; + + for (n = 0; n < DSC_COUNT; n++) { + int flag = 0; + struct aml_dsc_channel *ch = &dsc->channel[n]; + /*if(ch->used)*/ { + ch->id = n; + ch->work_mode = -1; + dsc_get_pid(ch,&ch->pid); + if (ch->pid != 0x1fff && !ch->used) { + flag = 1; + ch->used = 1; + } + dsc_set_pid(ch, ch->pid); + if (flag) + ch->used = 0; + dsc_set_keys(ch); + } + } + } + } +#ifdef ENABLE_SEC_BUFF_WATCHDOG + if (reset_irq) { + /*mod_timer(&dvb->watchdog_timer, + *jiffies+msecs_to_jiffies(WATCHDOG_TIMER)); + */ + dvb->dmx_watchdog_disable[dmx->id] = 0; + } +#endif + { + DMX_WRITE_REG(dmx->id, ASSIGN_PID_NUMBER, pcr_num); + DMX_WRITE_REG(dmx->id, PCR90K_CTL, pcr_regs); + } +} + +void dmx_reset_dmx_id_hw_ex_unlock(struct aml_dvb *dvb, int id, int reset_irq) +{ + dmx_reset_dmx_hw_ex_unlock(dvb, &dvb->dmx[id], reset_irq); +} + +void dmx_reset_dmx_hw_ex(struct aml_dvb *dvb, struct aml_dmx *dmx, + int reset_irq) +{ + unsigned long flags; + spin_lock_irqsave(&dvb->slock, flags); + dmx_reset_dmx_hw_ex_unlock(dvb, dmx, reset_irq); + spin_unlock_irqrestore(&dvb->slock, flags); +} + +void dmx_reset_dmx_id_hw_ex(struct aml_dvb *dvb, int id, int reset_irq) +{ + unsigned long flags; + + spin_lock_irqsave(&dvb->slock, flags); + dmx_reset_dmx_id_hw_ex_unlock(dvb, id, reset_irq); + spin_unlock_irqrestore(&dvb->slock, flags); +} + +void dmx_reset_dmx_hw(struct aml_dvb *dvb, int id) +{ + dmx_reset_dmx_id_hw_ex(dvb, id, 1); +} + +/*Allocate subtitle pes buffer*/ +#if 0 +static int alloc_subtitle_pes_buffer(struct aml_dmx *dmx) +{ + int start_ptr = 0; + struct stream_buf_s *sbuff = 0; + u32 phy_addr; + + start_ptr = READ_MPEG_REG(PARSER_SUB_START_PTR); + if (start_ptr) { + WRITE_MPEG_REG(PARSER_SUB_RP, start_ptr); + goto exit; + } + sbuff = get_stream_buffer(BUF_TYPE_SUBTITLE); + if (sbuff) { + if (sbuff->flag & BUF_FLAG_IOMEM) + phy_addr = sbuff->buf_start; + else + phy_addr = virt_to_phys((void *)sbuff->buf_start); + + WRITE_MPEG_REG(PARSER_SUB_RP, phy_addr); + WRITE_MPEG_REG(PARSER_SUB_START_PTR, phy_addr); + WRITE_MPEG_REG(PARSER_SUB_END_PTR, + phy_addr + sbuff->buf_size - 8); + + pr_dbg("pes buff=:%x %x\n", phy_addr, sbuff->buf_size); + } else + pr_dbg("Error stream buffer\n"); +exit: + return 0; +} +#endif + +static int set_subtitle_pes_buffer(struct aml_dmx *dmx) +{ +#ifdef SUB_PARSER + if (dmx->sub_chan == -1) { + unsigned long addr = virt_to_phys((void *)dmx->sub_pages); + WRITE_MPEG_REG(PARSER_SUB_RP, addr); + WRITE_MPEG_REG(PARSER_SUB_START_PTR, addr); + WRITE_MPEG_REG(PARSER_SUB_END_PTR, addr + dmx->sub_buf_len - 8); + pr_inf("set sub buff: (%d) %lx %x\n", dmx->id, addr, dmx->sub_buf_len); + } +#endif + return 0; +} + +int dmx_get_sub_buffer(unsigned long *base, unsigned long *virt) +{ +#ifndef SUB_BUF_DMX + unsigned long s = READ_MPEG_REG(PARSER_SUB_START_PTR); + if (base) + *base = s; + if (virt) + *virt = (unsigned long)codec_mm_phys_to_virt(s); +#endif + return 0; +} + +int dmx_init_sub_buffer(struct aml_dmx *dmx, unsigned long base, unsigned long virt) +{ +#ifndef SUB_BUF_DMX + dmx->sub_buf_base = base; + pr_inf("sub buf base: 0x%lx\n", dmx->sub_buf_base); + + dmx->sub_buf_base_virt = (u8 *)virt; + pr_inf("sub buf base virt: 0x%p\n", dmx->sub_buf_base_virt); +#endif + return 0; +} + +static int check_dvr_for_raw_channel(struct aml_dmx *dmx, int ch) +{ + switch (ch) { + case 0: + case 1: return 1; + case 2: return dmx->sub_chan != -1 ? 1 : 0; + case 3: return dmx->pcr_chan != -1 ? 1 : 0; + default: return 0; + } + return 0; +} + +/*Allocate a new channel*/ +int dmx_alloc_chan(struct aml_dmx *dmx, int type, int pes_type, int pid) +{ + int id = -1; + int ret; + + if (type == DMX_TYPE_TS) { + switch (pes_type) { + case DMX_PES_VIDEO: + if (!dmx->channel[0].used) + id = 0; + break; + case DMX_PES_AUDIO: + if (!dmx->channel[1].used) + id = 1; + break; + case DMX_PES_SUBTITLE: + case DMX_PES_TELETEXT: + if (!dmx->channel[2].used) + id = 2; + //alloc_subtitle_pes_buffer(dmx); + set_subtitle_pes_buffer(dmx); + break; + case DMX_PES_PCR: + if (!dmx->channel[3].used) + id = 3; + break; + case DMX_PES_AUDIO3: + pes_off_pre[dmx->id] = 0; + { + int i; + + for (i = SYS_CHAN_COUNT; + i < CHANNEL_COUNT; i++) { + if (!dmx->channel[i].used) { + id = i; + break; + } + } + } + break; + case DMX_PES_OTHER: + { + int i; + + for (i = SYS_CHAN_COUNT; + i < CHANNEL_COUNT; i++) { + if (!dmx->channel[i].used) { + id = i; + break; + } + } + } + break; + default: + break; + } + } else { + int i; + + for (i = SYS_CHAN_COUNT; i < CHANNEL_COUNT; i++) { + if (!dmx->channel[i].used) { + id = i; + break; + } + } + } + + if (id == -1) { + pr_error("too many channels\n"); + return -1; + } + + pr_dbg("allocate channel(id:%d-%d PID:0x%x)\n", dmx->id, id, pid); + + if (check_dvr_for_raw_channel(dmx, id)) { + ret = dmx_get_chan(dmx, pid); + if (ret >= 0 && DVR_FEED(dmx->channel[ret].feed)) { + pr_dbg("raw ch fix: dmx:%d: ch[%d(dvr)] -> ch[%d]\n", + dmx->id, ret, id); + dmx_remove_feed(dmx, dmx->channel[ret].feed); + dmx->channel[id].dvr_feed = dmx->channel[ret].feed; + dmx->channel[id].dvr_feed->priv = (void *)(long)id; + } else { + dmx->channel[id].dvr_feed = NULL; + } + } + + dmx->channel[id].type = type; + dmx->channel[id].pes_type = pes_type; + dmx->channel[id].pid = pid; + dmx->channel[id].used = 1; + dmx->channel[id].filter_count = 0; + + dmx_set_chan_regs(dmx, id); + + set_debug_dmx_chanpids(dmx->id, id, pid); + + dmx->chan_count++; + dmx_enable(dmx); + + return id; +} + +/*Free a channel*/ +void dmx_free_chan(struct aml_dmx *dmx, int cid) +{ + pr_dbg("free channel(id:%d-%d PID:0x%x)\n", dmx->id, cid, dmx->channel[cid].pid); + + dmx->channel[cid].used = 0; + dmx->channel[cid].pid = 0x1fff; + dmx_set_chan_regs(dmx, cid); + + if (cid == 2) { + u32 parser_sub_start_ptr; + + parser_sub_start_ptr = READ_MPEG_REG(PARSER_SUB_START_PTR); + WRITE_MPEG_REG(PARSER_SUB_RP, parser_sub_start_ptr); + WRITE_MPEG_REG(PARSER_SUB_WP, parser_sub_start_ptr); + } + + set_debug_dmx_chanpids(dmx->id, cid, -1); + dmx->chan_count--; + dmx_enable(dmx); + + /*Special pes type channel, check its dvr feed */ + if (check_dvr_for_raw_channel(dmx, cid) + && dmx->channel[cid].dvr_feed) { + /*start the dvr feed */ + pr_dbg("raw ch fix: dmx:%d: ch[%d] -> ch[(dvr)]\n", + dmx->id, cid); + dmx_add_feed(dmx, dmx->channel[cid].dvr_feed); + dmx->channel[cid].dvr_feed = NULL; + } +} + +/*Add a section*/ +static int dmx_chan_add_filter(struct aml_dmx *dmx, int cid, + struct dvb_demux_filter *filter) +{ + int id = -1; + int i; + + for (i = 0; i < FILTER_COUNT; i++) { + if (!dmx->filter[i].used) { + id = i; + break; + } + } + + if (id == -1) { + pr_error("too many filters\n"); + return -1; + } + + pr_dbg("channel(id:%d PID:0x%x) add filter(id:%d)\n", cid, + filter->feed->pid, id); + + dmx->filter[id].chan_id = cid; + dmx->filter[id].used = 1; + dmx->filter[id].filter = (struct dmx_section_filter *)filter; + dmx->channel[cid].filter_count++; + + dmx_set_filter_regs(dmx, id); + + return id; +} + +static void dmx_remove_filter(struct aml_dmx *dmx, int cid, int fid) +{ + pr_dbg("channel(id:%d PID:0x%x) remove filter(id:%d)\n", cid, + dmx->channel[cid].pid, fid); + + dmx->filter[fid].used = 0; + dmx->channel[cid].filter_count--; + + dmx_set_filter_regs(dmx, fid); + dmx_clear_filter_buffer(dmx, fid); +} + +static int sf_add_feed(struct aml_dmx *src_dmx, struct dvb_demux_feed *feed) +{ + int ret = 0; + + struct aml_dvb *dvb = (struct aml_dvb *)src_dmx->demux.priv; + struct aml_swfilter *sf = &dvb->swfilter; + + pr_dbg_sf("sf add pid[%d]\n", feed->pid); + + /*init sf */ + if (!sf->user) { + void *mem; + + mem = vmalloc(SF_BUFFER_SIZE); + if (!mem) { + ret = -ENOMEM; + goto fail; + } + dvb_ringbuffer_init(&sf->rbuf, mem, SF_BUFFER_SIZE); + + sf->dmx = &dvb->dmx[SF_DMX_ID]; + sf->afifo = &dvb->asyncfifo[SF_AFIFO_ID]; + + sf->dmx->source = src_dmx->source; + sf->afifo->source = sf->dmx->id; + sf->track_dmx = src_dmx->id; + /*sf->afifo->flush_size = 188*10; */ + + pr_dbg_sf("init sf mode.\n"); + + } else if (sf->dmx->source != src_dmx->source) { + pr_error(" pid=%d[src:%d] already used with sfdmx%d[src:%d]\n", + feed->pid, src_dmx->source, sf->dmx->id, + sf->dmx->source); + ret = -EBUSY; + goto fail; + } + + /*setup feed */ + ret = dmx_get_chan(sf->dmx, feed->pid); + if (ret >= 0) { + pr_error(" pid=%d[dmx:%d] already used [dmx:%d].\n", + feed->pid, src_dmx->id, + ((struct aml_dmx *)sf->dmx->channel[ret].feed-> + demux)->id); + ret = -EBUSY; + goto fail; + } + ret = + dmx_alloc_chan(sf->dmx, DMX_TYPE_TS, DMX_PES_OTHER, + feed->pid); + if (ret < 0) { + pr_error(" %s: alloc chan error, ret=%d\n", __func__, ret); + ret = -EBUSY; + goto fail; + } + sf->dmx->channel[ret].feed = feed; + feed->priv = (void *)(long)ret; + + sf->dmx->channel[ret].dvr_feed = feed; + + sf->user++; + debug_sf_user = sf->user; + dmx_enable(sf->dmx); + + return 0; + +fail: + feed->priv = (void *)-1; + return ret; +} + +static int sf_remove_feed(struct aml_dmx *src_dmx, struct dvb_demux_feed *feed) +{ + int ret; + + struct aml_dvb *dvb = (struct aml_dvb *)src_dmx->demux.priv; + struct aml_swfilter *sf = &dvb->swfilter; + + if (!sf->user || (sf->dmx->source != src_dmx->source)) + return 0; + + /*add fail, no need to remove*/ + if (((long)feed->priv) < 0) + return 0; + + ret = dmx_get_chan(sf->dmx, feed->pid); + if (ret < 0) + return 0; + + pr_dbg_sf("sf remove pid[%d]\n", feed->pid); + + dmx_free_chan(sf->dmx, (long)feed->priv); + + sf->dmx->channel[ret].feed = NULL; + sf->dmx->channel[ret].dvr_feed = NULL; + + sf->user--; + debug_sf_user = sf->user; + + if (!sf->user) { + sf->dmx->source = -1; + sf->afifo->source = AM_DMX_MAX; + sf->track_dmx = -1; + /*sf->afifo->flush_size = sf->afifo->buf_len>>1; */ + + if (sf->rbuf.data) { + void *mem = sf->rbuf.data; + + sf->rbuf.data = NULL; + vfree(mem); + } + pr_dbg_sf("exit sf mode.\n"); + } + + return 0; +} + +static int sf_feed_sf(struct aml_dmx *dmx, struct dvb_demux_feed *feed, + int add_not_remove) +{ + int sf = 0; + + if (sf_dmx_sf(dmx)) { + pr_error("%s: demux %d is in sf mode\n", __func__, dmx->id); + return -EINVAL; + } + + switch (feed->type) { + case DMX_TYPE_TS:{ + struct dmxdev_filter *dmxdevfilter = + feed->feed.ts.priv; + if (!DVR_FEED(feed)) { + if (dmxdevfilter->params.pes. + flags & DMX_USE_SWFILTER) + sf = 1; + if (force_pes_sf) + sf = 1; + } + } + break; + + case DMX_TYPE_SEC:{ + struct dvb_demux_filter *filter; + + for (filter = feed->filter; filter; + filter = filter->next) { + struct dmxdev_filter *dmxdevfilter = + filter->filter.priv; + if (dmxdevfilter->params.sec. + flags & DMX_USE_SWFILTER) + sf = 1; + if (add_not_remove) + filter->hw_handle = (u16)-1; + } + if (force_sec_sf) + sf = 1; + } + break; + } + + return sf ? 0 : 1; +} + +static int sf_check_feed(struct aml_dmx *dmx, struct dvb_demux_feed *feed, + int add_not_remove) +{ + int ret = 0; + + ret = sf_feed_sf(dmx, feed, add_not_remove); + if (ret) + return ret; + + pr_dbg_sf("%s [pid:%d] %s\n", + (feed->type == DMX_TYPE_TS) ? "DMX_TYPE_TS" : "DMX_TYPE_SEC", + feed->pid, add_not_remove ? "-> sf mode" : "sf mode ->"); + + if (add_not_remove) + ret = sf_add_feed(dmx, feed); + else + ret = sf_remove_feed(dmx, feed); + + if (ret < 0) { + pr_error("sf %s feed fail[%d]\n", + add_not_remove ? "add" : "remove", ret); + } + return ret; +} + +static int dmx_add_feed(struct aml_dmx *dmx, struct dvb_demux_feed *feed) +{ + int id, ret = 0; + struct dvb_demux_filter *filter; + struct dvb_demux_feed *dfeed = NULL; + int sf_ret = 0; /*<0:error, =0:sf_on, >0:sf_off */ + + sf_ret = sf_check_feed(dmx, feed, 1/*SF_FEED_OP_ADD */); + if (sf_ret < 0) + return sf_ret; + + switch (feed->type) { + case DMX_TYPE_TS: + pr_dbg("add feed ts: pid:%d-0x%x, (%p)\n", + dmx->id, feed->pid, feed); + ret = dmx_get_chan(dmx, feed->pid); + if (ret >= 0) { + if (DVR_FEED(dmx->channel[ret].feed)) { + if (DVR_FEED(feed)) { + /*dvr feed already work */ + pr_error("PID %d already used(DVR)\n", + feed->pid); + ret = -EBUSY; + goto fail; + } + if (sf_ret) { + /*if sf_on, we do not reset the + *previous dvr feed, just load the pes + *feed on the sf, a diffrent data path. + */ + dfeed = dmx->channel[ret].feed; + dmx_remove_feed(dmx, dfeed); + } + } else { + if (DVR_FEED(feed) + && (!dmx->channel[ret].dvr_feed)) { + /*just store the dvr_feed */ + dmx->channel[ret].dvr_feed = feed; + feed->priv = (void *)(long)ret; + if (!dmx->record) + dmx_enable(dmx); + dmx_add_recchan(dmx->id, ret); + return 0; + } + { + pr_error("PID %d already used\n", + feed->pid); + ret = -EBUSY; + goto fail; + } + } + } + + if (sf_ret) { /*not sf feed. */ + ret = + dmx_alloc_chan(dmx, feed->type, + feed->pes_type, feed->pid); + if (ret < 0) { + pr_dbg("%s: alloc chan error, ret=%d\n", + __func__, ret); + ret = -EBUSY; + goto fail; + } + dmx->channel[ret].feed = feed; + feed->priv = (void *)(long)ret; + dmx->channel[ret].dvr_feed = NULL; + } + /*dvr */ + if (DVR_FEED(feed)) { + dmx->channel[ret].dvr_feed = feed; + feed->priv = (void *)(long)ret; + if (!dmx->record) + dmx_enable(dmx); + dmx_add_recchan(dmx->id, ret); + } else if (dfeed && sf_ret) { + dmx->channel[ret].dvr_feed = dfeed; + dfeed->priv = (void *)(long)ret; + if (!dmx->record) + dmx_enable(dmx); + dmx_add_recchan(dmx->id, ret); + } + + break; + case DMX_TYPE_SEC: + pr_dbg("add feed sec: pid:%d-0x%x, (%p)\n", + dmx->id, feed->pid, feed); + ret = dmx_get_chan(dmx, feed->pid); + if (ret >= 0) { + if (DVR_FEED(dmx->channel[ret].feed)) { + if (sf_ret) { + /*if sf_on, we do not reset the + *previous dvr feed, just load the pes + *feed on the sf,a diffrent data path. + */ + dfeed = dmx->channel[ret].feed; + dmx_remove_feed(dmx, dfeed); + } + } else { + pr_error("PID %d already used\n", feed->pid); + ret = -EBUSY; + goto fail; + } + } + if (sf_ret) { /*not sf feed. */ + id = dmx_alloc_chan(dmx, feed->type, + feed->pes_type, feed->pid); + if (id < 0) { + pr_dbg("%s: alloc chan error, ret=%d\n", + __func__, id); + ret = -EBUSY; + goto fail; + } + for (filter = feed->filter; filter; + filter = filter->next) { + ret = dmx_chan_add_filter(dmx, id, filter); + if (ret >= 0) + filter->hw_handle = ret; + else + filter->hw_handle = (u16)-1; + } + dmx->channel[id].feed = feed; + feed->priv = (void *)(long)id; + dmx->channel[id].dvr_feed = NULL; + + if (dfeed) { + dmx->channel[id].dvr_feed = dfeed; + dfeed->priv = (void *)(long)id; + if (!dmx->record) + dmx_enable(dmx); + dmx_add_recchan(dmx->id, id); + } + } + break; + default: + return -EINVAL; + } + + dmx->feed_count++; + + return 0; + +fail: + feed->priv = (void *)-1; + return ret; +} + +static int dmx_remove_feed(struct aml_dmx *dmx, struct dvb_demux_feed *feed) +{ + struct dvb_demux_filter *filter; + struct dvb_demux_feed *dfeed = NULL; + + int sf_ret = 0; /*<0:error, =0:sf_on, >0:sf_off */ + + /*add fail, no need to remove*/ + if (((long)feed->priv) < 0) + return 0; + + sf_ret = sf_check_feed(dmx, feed, 0/*SF_FEED_OP_RM */); + if (sf_ret <= 0) + return sf_ret; + + switch (feed->type) { + case DMX_TYPE_TS: + pr_dbg("rm feed ts: pid:%d-0x%x, %p\n", + dmx->id, feed->pid, feed); + if (dmx->channel[(long)feed->priv].feed == + dmx->channel[(long)feed->priv].dvr_feed) { + dmx_rm_recchan(dmx->id, (long)feed->priv); + dmx_free_chan(dmx, (long)feed->priv); + } else { + if (feed == dmx->channel[(long)feed->priv].feed) { + dfeed = dmx->channel[(long)feed->priv].dvr_feed; + dmx_rm_recchan(dmx->id, (long)feed->priv); + dmx_free_chan(dmx, (long)feed->priv); + if (dfeed) { + /*start the dvr feed */ + dmx_add_feed(dmx, dfeed); + } + } else if (feed == + dmx->channel[(long)feed->priv].dvr_feed) { + /*just remove the dvr_feed */ + dmx->channel[(long)feed->priv].dvr_feed = NULL; + dmx_rm_recchan(dmx->id, (long)feed->priv); + if (dmx->record) + dmx_enable(dmx); + } else { + /*This must never happen */ + pr_error("%s: unknown feed\n", __func__); + return -EINVAL; + } + } + + break; + case DMX_TYPE_SEC: + pr_dbg("rm feed sec: pid:%d-0x%x, %p\n", + dmx->id, feed->pid, feed); + for (filter = feed->filter; filter; filter = filter->next) { + if (filter->hw_handle != (u16)-1) + dmx_remove_filter(dmx, (long)feed->priv, + (int)filter->hw_handle); + } + + dfeed = dmx->channel[(long)feed->priv].dvr_feed; + dmx_rm_recchan(dmx->id, (long)feed->priv); + dmx_free_chan(dmx, (long)feed->priv); + if (dfeed) { + /*start the dvr feed */ + dmx_add_feed(dmx, dfeed); + } + break; + default: + return -EINVAL; + } + + dmx->feed_count--; + return 0; +} + +int aml_dmx_hw_init(struct aml_dmx *dmx) +{ + /* + *struct aml_dvb *dvb = (struct aml_dvb *)dmx->demux.priv; + *unsigned long flags; + */ + int ret; + + /*Demux initialize */ + /*spin_lock_irqsave(&dvb->slock, flags);*/ + ret = dmx_init(dmx); + /*spin_unlock_irqrestore(&dvb->slock, flags);*/ + + return ret; +} + +int aml_dmx_hw_deinit(struct aml_dmx *dmx) +{ + struct aml_dvb *dvb = (struct aml_dvb *)dmx->demux.priv; + unsigned long flags; + int ret; + + spin_lock_irqsave(&dvb->slock, flags); + ret = dmx_deinit(dmx); + spin_unlock_irqrestore(&dvb->slock, flags); + + return ret; +} + +/*extern void afifo_reset(int v);*/ + +int aml_asyncfifo_hw_init(struct aml_asyncfifo *afifo) +{ + +/* + * struct aml_dvb *dvb = afifo->dvb; + * unsigned long flags; + */ + int ret; + + int len = asyncfifo_buf_len; + unsigned long buf = asyncfifo_alloc_buffer(afifo, len); + + if (!buf) + return -1; + + /*Async FIFO initialize*/ +/* + * spin_lock_irqsave(&dvb->slock, flags); + */ +/* + *#if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 + * CLK_GATE_ON(ASYNC_FIFO); + *#endif + */ + /*afifo_reset(0);*/ + + WRITE_MPEG_REG(RESET6_REGISTER, (1<<11)|(1<<12)); + + ret = async_fifo_init(afifo, 1, len, buf); +/* + * spin_unlock_irqrestore(&dvb->slock, flags); + */ + if (ret < 0) + asyncfifo_free_buffer(buf, len); + + return ret; +} + +int aml_asyncfifo_hw_deinit(struct aml_asyncfifo *afifo) +{ + int ret; + + ret = async_fifo_deinit(afifo, 1); +/* + *#if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 + * CLK_GATE_OFF(ASYNC_FIFO); + *#endif + */ + /*afifo_reset(1);*/ + + return ret; +} + +int aml_asyncfifo_hw_reset(struct aml_asyncfifo *afifo) +{ + struct aml_dvb *dvb = afifo->dvb; + unsigned long flags; + int ret, src = -1; + + unsigned long buf = 0; + int len = asyncfifo_buf_len; + buf = asyncfifo_alloc_buffer(afifo, len); + if (!buf) + return -1; + + if (afifo->init) { + src = afifo->source; + async_fifo_deinit(afifo, 0); + } + + spin_lock_irqsave(&dvb->slock, flags); + ret = async_fifo_init(afifo, 0, len, buf); + /* restore the source */ + if (src != -1) + afifo->source = src; + + if ((ret == 0) && afifo->dvb) + reset_async_fifos(afifo->dvb); + + spin_unlock_irqrestore(&dvb->slock, flags); + + if (ret < 0) + asyncfifo_free_buffer(buf, len); + + return ret; +} + +int aml_dmx_hw_start_feed(struct dvb_demux_feed *dvbdmxfeed) +{ + struct aml_dmx *dmx = (struct aml_dmx *)dvbdmxfeed->demux; + struct aml_dvb *dvb = (struct aml_dvb *)dmx->demux.priv; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&dvb->slock, flags); + ret = dmx_add_feed(dmx, dvbdmxfeed); + spin_unlock_irqrestore(&dvb->slock, flags); + + /*handle errors silently*/ + if (ret != 0) + ret = 0; + + return ret; +} + +int aml_dmx_hw_stop_feed(struct dvb_demux_feed *dvbdmxfeed) +{ + struct aml_dmx *dmx = (struct aml_dmx *)dvbdmxfeed->demux; + struct aml_dvb *dvb = (struct aml_dvb *)dmx->demux.priv; + unsigned long flags; + + spin_lock_irqsave(&dvb->slock, flags); + dmx_remove_feed(dmx, dvbdmxfeed); + spin_unlock_irqrestore(&dvb->slock, flags); + + return 0; +} + +int sf_dmx_track_source(struct aml_dmx *dmx) +{ + struct aml_dvb *dvb = (struct aml_dvb *)dmx->demux.priv; + struct aml_swfilter *sf = &dvb->swfilter; + + if (sf->user && (dmx->id == sf->track_dmx)) { + pr_dbg_sf("tracking dmx src [%d -> %d]\n", + sf->dmx->source, dmx->source); + sf->dmx->source = dmx->source; + dmx_reset_dmx_hw_ex_unlock(dvb, sf->dmx, 0); + } + return 0; +} + +int aml_dmx_hw_set_source(struct dmx_demux *demux, dmx_source_t src) +{ + struct aml_dmx *dmx = (struct aml_dmx *)demux; + struct aml_dvb *dvb = (struct aml_dvb *)dmx->demux.priv; + int ret = 0; + int hw_src; + unsigned long flags; + + if (sf_dmx_sf(dmx)) { + pr_error("%s: demux %d is in sf mode\n", __func__, dmx->id); + return -EINVAL; + } + + spin_lock_irqsave(&dvb->slock, flags); + + hw_src = dmx->source; + + switch (src) { + case DMX_SOURCE_FRONT0: + hw_src = + (dvb->ts[0].mode == + AM_TS_SERIAL) ? (dvb->ts[0].s2p_id+AM_TS_SRC_S_TS0) : AM_TS_SRC_TS0; + break; + case DMX_SOURCE_FRONT1: + hw_src = + (dvb->ts[1].mode == + AM_TS_SERIAL) ? (dvb->ts[1].s2p_id+AM_TS_SRC_S_TS0) : AM_TS_SRC_TS1; + break; + case DMX_SOURCE_FRONT2: + hw_src = + (dvb->ts[2].mode == + AM_TS_SERIAL) ? (dvb->ts[2].s2p_id+AM_TS_SRC_S_TS0) : AM_TS_SRC_TS2; + break; + case DMX_SOURCE_FRONT3: + hw_src = + (dvb->ts[3].mode == + AM_TS_SERIAL) ? (dvb->ts[3].s2p_id+AM_TS_SRC_S_TS0) : AM_TS_SRC_TS3; + break; + case DMX_SOURCE_DVR0: + hw_src = AM_TS_SRC_HIU; + break; + case DMX_SOURCE_DVR1: + hw_src = AM_TS_SRC_HIU1; + break; + case DMX_SOURCE_FRONT0_OFFSET: + hw_src = AM_TS_SRC_DMX0; + break; + case DMX_SOURCE_FRONT1_OFFSET: + hw_src = AM_TS_SRC_DMX1; + break; + case DMX_SOURCE_FRONT2_OFFSET: + hw_src = AM_TS_SRC_DMX2; + break; + default: + pr_error("illegal demux source %d\n", src); + ret = -EINVAL; + break; + } + + if (hw_src != dmx->source) { + dmx->source = hw_src; + dmx_reset_dmx_hw_ex_unlock(dvb, dmx, 0); + sf_dmx_track_source(dmx); + } + + spin_unlock_irqrestore(&dvb->slock, flags); + + return ret; +} + +#define IS_SRC_DMX(_src) ((_src) >= AM_TS_SRC_DMX0 && (_src) <= AM_TS_SRC_DMX2) + +int aml_stb_hw_set_source(struct aml_dvb *dvb, dmx_source_t src) +{ + unsigned long flags; + int hw_src; + int ret; + + ret = 0; + spin_lock_irqsave(&dvb->slock, flags); + + hw_src = dvb->stb_source; + + switch (src) { + case DMX_SOURCE_FRONT0: + hw_src = + (dvb->ts[0].mode == + AM_TS_SERIAL) ? (dvb->ts[0].s2p_id+AM_TS_SRC_S_TS0) : AM_TS_SRC_TS0; + break; + case DMX_SOURCE_FRONT1: + hw_src = + (dvb->ts[1].mode == + AM_TS_SERIAL) ? (dvb->ts[1].s2p_id+AM_TS_SRC_S_TS0) : AM_TS_SRC_TS1; + break; + case DMX_SOURCE_FRONT2: + hw_src = + (dvb->ts[2].mode == + AM_TS_SERIAL) ? (dvb->ts[2].s2p_id+AM_TS_SRC_S_TS0) : AM_TS_SRC_TS2; + break; + case DMX_SOURCE_FRONT3: + hw_src = + (dvb->ts[3].mode == + AM_TS_SERIAL) ? (dvb->ts[3].s2p_id+AM_TS_SRC_S_TS0) : AM_TS_SRC_TS3; + break; + case DMX_SOURCE_DVR0: + hw_src = AM_TS_SRC_HIU; + break; + case DMX_SOURCE_DVR1: + hw_src = AM_TS_SRC_HIU1; + break; + case DMX_SOURCE_FRONT0_OFFSET: + hw_src = AM_TS_SRC_DMX0; + break; + case DMX_SOURCE_FRONT1_OFFSET: + hw_src = AM_TS_SRC_DMX1; + break; + case DMX_SOURCE_FRONT2_OFFSET: + hw_src = AM_TS_SRC_DMX2; + break; + default: + pr_error("illegal demux source %d\n", src); + ret = -EINVAL; + break; + } + + if (dvb->stb_source != hw_src) { + int old_source = dvb->stb_source; + + dvb->stb_source = hw_src; + + if (IS_SRC_DMX(old_source)) { + dmx_set_misc_id(dvb, + (old_source - AM_TS_SRC_DMX0), 0, -1); + } else { + /*which dmx for av-play is unknown, + *can't avoid reset-all + */ + dmx_reset_hw_ex(dvb, 0); + } + + if (IS_SRC_DMX(dvb->stb_source)) { + dmx_set_misc_id(dvb, + (dvb->stb_source - AM_TS_SRC_DMX0), 1, -1); + /*dmx_reset_dmx_id_hw_ex_unlock + * (dvb, (dvb->stb_source-AM_TS_SRC_DMX0), 0); + */ + } else { + /*which dmx for av-play is unknown, + *can't avoid reset-all + */ + dmx_reset_hw_ex(dvb, 0); + } + } + + spin_unlock_irqrestore(&dvb->slock, flags); + + return ret; +} + + + +int aml_dsc_hw_set_source(struct aml_dsc *dsc, + dmx_source_t src, dmx_source_t dst) +{ + struct aml_dvb *dvb = dsc->dvb; + int ret = 0; + unsigned long flags; + int hw_src = -1, hw_dst = -1, org_src = -1, org_dst = -1; + int src_reset = 0, dst_reset = 0; + + spin_lock_irqsave(&dvb->slock, flags); + + hw_src = dsc->source; + hw_dst = dsc->dst; + + switch (src) { + case DMX_SOURCE_FRONT0_OFFSET: + hw_src = AM_TS_SRC_DMX0; + break; + case DMX_SOURCE_FRONT1_OFFSET: + hw_src = AM_TS_SRC_DMX1; + break; + case DMX_SOURCE_FRONT2_OFFSET: + hw_src = AM_TS_SRC_DMX2; + break; + default: + hw_src = -1; + break; + } + switch (dst) { + case DMX_SOURCE_FRONT0_OFFSET: + hw_dst = AM_TS_SRC_DMX0; + break; + case DMX_SOURCE_FRONT1_OFFSET: + hw_dst = AM_TS_SRC_DMX1; + break; + case DMX_SOURCE_FRONT2_OFFSET: + hw_dst = AM_TS_SRC_DMX2; + break; + default: + hw_dst = -1; + break; + } + + if (hw_src != dsc->source) { + org_src = dsc->source; + dsc->source = hw_src; + src_reset = 1; + } + if (hw_dst != dsc->dst) { + org_dst = dsc->dst; + dsc->dst = hw_dst; + dst_reset = 1; + } + + if (src_reset) { + pr_inf("dsc%d source changed: %d -> %d\n", + dsc->id, org_src, hw_src); + if (org_src != -1) { + pr_inf("reset dmx%d\n", (org_src - AM_TS_SRC_DMX0)); + dmx_reset_dmx_id_hw_ex_unlock(dvb, + (org_src - AM_TS_SRC_DMX0), 0); + } + if (hw_src != -1) { + pr_inf("reset dmx%d\n", (hw_src - AM_TS_SRC_DMX0)); + dmx_reset_dmx_id_hw_ex_unlock(dvb, + (hw_src - AM_TS_SRC_DMX0), 0); + } else + dsc_enable(dsc, 0); + } + if (dst_reset) { + pr_inf("dsc%d dest changed: %d -> %d\n", + dsc->id, org_dst, hw_dst); + if (((!src_reset) && (org_dst != -1)) || + (src_reset && (org_dst != -1) && + (org_dst != org_src) && (org_dst != hw_src))) { + pr_inf("reset dmx%d\n", (org_dst - AM_TS_SRC_DMX0)); + dmx_reset_dmx_id_hw_ex_unlock(dvb, + (org_dst - AM_TS_SRC_DMX0), 0); + } + if (((!src_reset) && (hw_dst != -1)) || + (src_reset && (hw_dst != -1) + && (hw_dst != org_src) && (hw_dst != hw_src))) { + pr_inf("reset dmx%d\n", (hw_dst - AM_TS_SRC_DMX0)); + dmx_reset_dmx_id_hw_ex_unlock(dvb, + (hw_dst - AM_TS_SRC_DMX0), 0); + } + if (hw_dst == -1) + dsc_enable(dsc, 0); + } + if (src_reset && dst_reset) { + set_ciplus_input_source(dsc); + } + + spin_unlock_irqrestore(&dvb->slock, flags); + + return ret; +} + +int aml_tso_hw_set_source(struct aml_dvb *dvb, dmx_source_t src) +{ + int ret = 0; + unsigned long flags; + int hw_src; + + spin_lock_irqsave(&dvb->slock, flags); + + hw_src = dvb->tso_source; + + switch (src) { + case DMX_SOURCE_FRONT0: + hw_src = (dvb->ts[0].mode == AM_TS_SERIAL) + ? (dvb->ts[0].s2p_id+AM_TS_SRC_S_TS0) : AM_TS_SRC_TS0; + break; + case DMX_SOURCE_FRONT1: + hw_src = (dvb->ts[1].mode == AM_TS_SERIAL) + ? (dvb->ts[1].s2p_id+AM_TS_SRC_S_TS0) : AM_TS_SRC_TS1; + break; + case DMX_SOURCE_FRONT2: + hw_src = (dvb->ts[2].mode == AM_TS_SERIAL) + ? (dvb->ts[2].s2p_id+AM_TS_SRC_S_TS0) : AM_TS_SRC_TS2; + break; + case DMX_SOURCE_FRONT3: + hw_src = (dvb->ts[3].mode == AM_TS_SERIAL) + ? (dvb->ts[3].s2p_id+AM_TS_SRC_S_TS0) : AM_TS_SRC_TS3; + break; + case DMX_SOURCE_DVR0: + hw_src = AM_TS_SRC_HIU; + break; + case DMX_SOURCE_FRONT0 + 100: + hw_src = AM_TS_SRC_DMX0; + break; + case DMX_SOURCE_FRONT1 + 100: + hw_src = AM_TS_SRC_DMX1; + break; + case DMX_SOURCE_FRONT2 + 100: + hw_src = AM_TS_SRC_DMX2; + break; + default: + hw_src = -1; + ret = -EINVAL; + break; + } + + if (hw_src != dvb->tso_source) { + dvb->tso_source = hw_src; + stb_enable(dvb); + } + + spin_unlock_irqrestore(&dvb->slock, flags); + + return ret; +} + +int aml_asyncfifo_hw_set_source(struct aml_asyncfifo *afifo, + enum aml_dmx_id_t src) +{ + struct aml_dvb *dvb = afifo->dvb; + int ret = -1; + unsigned long flags; + + if (sf_afifo_sf(afifo)) { + pr_error("%s: afifo %d is in sf mode\n", __func__, afifo->id); + return -EINVAL; + } + + spin_lock_irqsave(&dvb->slock, flags); + + pr_dbg("asyncfifo %d set source %d->%d", + afifo->id, afifo->source, src); + switch (src) { + case AM_DMX_0: + case AM_DMX_1: + case AM_DMX_2: + if (afifo->source != src) { + afifo->source = src; + ret = 0; + } + break; + default: + pr_error("illegal async fifo source %d\n", src); + ret = -EINVAL; + break; + } + + if (ret == 0 && afifo->dvb) + reset_async_fifos(afifo->dvb); + + spin_unlock_irqrestore(&dvb->slock, flags); + + return ret; +} + +int aml_dmx_hw_set_dump_ts_select(struct dmx_demux *demux, int dump_ts_select) +{ + struct aml_dmx *dmx = (struct aml_dmx *)demux; + struct aml_dvb *dvb = (struct aml_dvb *)dmx->demux.priv; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&dvb->slock, flags); + dump_ts_select = !!dump_ts_select; + if (dmx->dump_ts_select != dump_ts_select) { + dmx->dump_ts_select = dump_ts_select; + dmx_reset_dmx_hw_ex_unlock(dvb, dmx, 0); + } + spin_unlock_irqrestore(&dvb->slock, flags); + + return ret; +} + +u32 aml_dmx_get_video_pts(struct aml_dvb *dvb) +{ + unsigned long flags; + u32 pts; + + spin_lock_irqsave(&dvb->slock, flags); + pts = video_pts; + spin_unlock_irqrestore(&dvb->slock, flags); + + return pts; +} + +u32 aml_dmx_get_audio_pts(struct aml_dvb *dvb) +{ + unsigned long flags; + u32 pts; + + spin_lock_irqsave(&dvb->slock, flags); + pts = audio_pts; + spin_unlock_irqrestore(&dvb->slock, flags); + + return pts; +} + +u32 aml_dmx_get_video_pts_bit32(struct aml_dvb *dvb) +{ + unsigned long flags; + u32 bit32; + + spin_lock_irqsave(&dvb->slock, flags); + bit32 = video_pts_bit32; + spin_unlock_irqrestore(&dvb->slock, flags); + + return bit32; +} + +u32 aml_dmx_get_audio_pts_bit32(struct aml_dvb *dvb) +{ + unsigned long flags; + u32 bit32; + + spin_lock_irqsave(&dvb->slock, flags); + bit32 = audio_pts_bit32; + spin_unlock_irqrestore(&dvb->slock, flags); + + return bit32; +} +u32 aml_dmx_get_first_video_pts(struct aml_dvb *dvb) +{ + unsigned long flags; + u32 pts; + + spin_lock_irqsave(&dvb->slock, flags); + pts = first_video_pts; + spin_unlock_irqrestore(&dvb->slock, flags); + + return pts; +} + +u32 aml_dmx_get_first_audio_pts(struct aml_dvb *dvb) +{ + unsigned long flags; + u32 pts; + + spin_lock_irqsave(&dvb->slock, flags); + pts = first_audio_pts; + spin_unlock_irqrestore(&dvb->slock, flags); + + return pts; +} + +int aml_dmx_set_skipbyte(struct aml_dvb *dvb, int skipbyte) +{ + if (demux_skipbyte != skipbyte) { + pr_dbg("set skip byte %d\n", skipbyte); + demux_skipbyte = skipbyte; + dmx_reset_hw_ex(dvb, 0); + } + + return 0; +} + +int aml_dmx_set_demux(struct aml_dvb *dvb, int id) +{ + aml_stb_hw_set_source(dvb, DMX_SOURCE_DVR0); + if (id < DMX_DEV_COUNT) { + struct aml_dmx *dmx = &dvb->dmx[id]; + + aml_dmx_hw_set_source((struct dmx_demux *)dmx, + DMX_SOURCE_DVR0); + } + + return 0; +} + +int _set_tsfile_clkdiv(struct aml_dvb *dvb, int clkdiv) +{ + if (tsfile_clkdiv != clkdiv) { + pr_dbg("set ts file clock div %d\n", clkdiv); + tsfile_clkdiv = clkdiv; + dmx_reset_hw(dvb); + } + + return 0; +} + +static ssize_t tsfile_clkdiv_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + /*int div = (int)simple_strtol(buf, NULL, 10);*/ + long div; + + if (kstrtol(buf, 0, &div) == 0) + _set_tsfile_clkdiv(aml_get_dvb_device(), (int)div); + return size; +} + +static ssize_t tsfile_clkdiv_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + ssize_t ret; + + ret = sprintf(buf, "%d\n", tsfile_clkdiv); + return ret; +} + + +static int dmx_id; + +static ssize_t dmx_smallsec_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + ssize_t ret; + struct aml_dvb *dvb = aml_get_dvb_device(); + + ret = sprintf(buf, "%d:%d\n", dvb->dmx[dmx_id].smallsec.enable, + dvb->dmx[dmx_id].smallsec.bufsize); + return ret; +} +static ssize_t dmx_smallsec_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + int i, e, s = 0, f = 0; + struct aml_dvb *dvb = aml_get_dvb_device(); + + i = sscanf(buf, "%d:%i:%d", &e, &s, &f); + if (i <= 0) + return size; + + dmx_smallsec_set(&dvb->dmx[dmx_id].smallsec, e, s, f); + return size; +} + +static ssize_t dmx_timeout_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + ssize_t ret; + struct aml_dvb *dvb = aml_get_dvb_device(); + + ret = sprintf(buf, "%d:%d:0x%x:%d:%d\n", + dvb->dmx[dmx_id].timeout.enable, + dvb->dmx[dmx_id].timeout.timeout, + dvb->dmx[dmx_id].timeout.ch_disable, + dvb->dmx[dmx_id].timeout.match, + (DMX_READ_REG(dmx_id, STB_INT_STATUS)&(1<<INPUT_TIME_OUT)) ? + 1 : 0); + DMX_WRITE_REG(dmx_id, STB_INT_STATUS, (1<<INPUT_TIME_OUT)); + return ret; +} +static ssize_t dmx_timeout_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + int i, e, t = 0, c = 0, m = 0, f = 0; + struct aml_dvb *dvb = aml_get_dvb_device(); + + i = sscanf(buf, "%d:%i:%i:%d:%d", &e, &t, &c, &m, &f); + if (i <= 0) + return size; + + dmx_timeout_set(&dvb->dmx[dmx_id].timeout, e, t, c, m, f); + return size; +} + + +#define DEMUX_SCAMBLE_FUNC_DECL(i) \ +static ssize_t demux##i##_scramble_show(struct class *class, \ +struct class_attribute *attr, char *buf)\ +{\ + int data = 0;\ + int aflag = 0;\ + int vflag = 0;\ + ssize_t ret = 0;\ + data = DMX_READ_REG(i, DEMUX_SCRAMBLING_STATE);\ + if ((data & 0x01) == 0x01) \ + vflag = 1;\ + if ((data & 0x02) == 0x02) \ + aflag = 1;\ + ret = sprintf(buf, "%d %d\n", vflag, aflag);\ + return ret;\ +} + +#if DMX_DEV_COUNT > 0 +DEMUX_SCAMBLE_FUNC_DECL(0) +#endif +#if DMX_DEV_COUNT > 1 +DEMUX_SCAMBLE_FUNC_DECL(1) +#endif +#if DMX_DEV_COUNT > 2 +DEMUX_SCAMBLE_FUNC_DECL(2) +#endif +static ssize_t ciplus_output_ctrl_show(struct class *class, + struct class_attribute *attr, + char *buf) +{ + int ret; + char *out = "none"; + + pr_inf("output demux use 3 bit to indicate.\n"); + pr_inf("1bit:demux0 2bit:demux1 3bit:demux2\n"); + + switch (ciplus_out_sel) { + case 1: + out = "dmx0"; + break; + case 2: + out = "dmx1"; + break; + case 4: + out = "dmx2"; + break; + default: + break; + } + + ret = sprintf(buf, "%s 0x%x %s\n", + out, + ciplus_out_sel, + (ciplus_out_auto_mode) ? "" : "(force)"); + return ret; +} + +static ssize_t ciplus_output_ctrl_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + struct aml_dvb *dvb = aml_get_dvb_device(); + int i, tmp; + u32 top_cfg, ci_cfg; + + i = kstrtoint(buf, -1, &tmp); + if (tmp > 8 || tmp < 0) + pr_error("Invalid output set\n"); + else if (tmp == 8) { + ciplus_out_auto_mode = 1; + ciplus_out_sel = -1; + pr_error("Auto set output mode enable\n"); + } else { + ciplus_out_auto_mode = 0; + ciplus_out_sel = tmp; + pr_error("Auto set output mode disable\n"); + } + + top_cfg = READ_MPEG_REG(STB_TOP_CONFIG); + ci_cfg = READ_MPEG_REG(CIPLUS_CONFIG); + + if (ci_cfg & (1 << CNTL_ENABLE)) { + int out = 0; + + if (ciplus_out_auto_mode) { + if (dvb->dsc[0].source != -1) + out = 1 << (dvb->dsc[0].source - AM_TS_SRC_DMX0); + } else { + out = ciplus_out_sel; + } + + top_cfg &= ~(7<<CIPLUS_OUT_SEL); + top_cfg |= (out<<CIPLUS_OUT_SEL); + WRITE_MPEG_REG(STB_TOP_CONFIG, top_cfg); + + set_fec_core_sel(dvb); + } + + return size; +} +static ssize_t reset_fec_input_ctrl_show(struct class *class, + struct class_attribute *attr, + char *buf) +{ + return 0; +} + +static ssize_t reset_fec_input_ctrl_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + u32 v; + + v = READ_MPEG_REG(FEC_INPUT_CONTROL); + v &= ~(1<<11); + WRITE_MPEG_REG(FEC_INPUT_CONTROL, v); + + pr_dbg("reset FEC_INPUT_CONTROL to %x\n", v); + + return size; +} +static ssize_t register_addr_show(struct class *class, + struct class_attribute *attr, + char *buf); +static ssize_t register_addr_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size); +static ssize_t dmx_id_show(struct class *class, + struct class_attribute *attr, char *buf); +static ssize_t dmx_id_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size); +static ssize_t register_value_show(struct class *class, + struct class_attribute *attr, + char *buf); +static ssize_t register_value_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size); +static ssize_t dmx_sec_statistics_show(struct class *class, + struct class_attribute *attr, + char *buf); +static int reg_addr; + +static CLASS_ATTR_RW(dmx_id); +static CLASS_ATTR_RW(register_addr); +static CLASS_ATTR_RW(register_value); +static CLASS_ATTR_RW(tsfile_clkdiv); + +#define DEMUX_SCAMBLE_ATTR_DECL(i)\ + CLASS_ATTR_RO(demux##i##_scramble); +#if DMX_DEV_COUNT > 0 +DEMUX_SCAMBLE_ATTR_DECL(0); +#endif +#if DMX_DEV_COUNT > 1 +DEMUX_SCAMBLE_ATTR_DECL(1); +#endif +#if DMX_DEV_COUNT > 2 +DEMUX_SCAMBLE_ATTR_DECL(2); +#endif + +static CLASS_ATTR_RW(dmx_smallsec); +static CLASS_ATTR_RW(dmx_timeout); +static CLASS_ATTR_RW(reset_fec_input_ctrl); +static CLASS_ATTR_RW(ciplus_output_ctrl); +static CLASS_ATTR_RO(dmx_sec_statistics); + +#define DMX_ATTR(name) &class_attr_##name.attr + +static struct attribute *aml_dmx_class_attrs[] = { + DMX_ATTR(dmx_id), + DMX_ATTR(register_addr), + DMX_ATTR(register_value), + DMX_ATTR(tsfile_clkdiv), + DMX_ATTR(dmx_smallsec), + DMX_ATTR(dmx_timeout), + DMX_ATTR(reset_fec_input_ctrl), + DMX_ATTR(ciplus_output_ctrl), + DMX_ATTR(dmx_sec_statistics), +#define DEMUX_SCRAMBLE(i) \ + DMX_ATTR(demux##i##_scramble) +#if DMX_DEV_COUNT > 0 + DEMUX_SCRAMBLE(0), +#endif +#if DMX_DEV_COUNT > 1 + DEMUX_SCRAMBLE(1), +#endif +#if DMX_DEV_COUNT > 2 + DEMUX_SCRAMBLE(2), +#endif + NULL, +}; + +ATTRIBUTE_GROUPS(aml_dmx_class); + +static struct class aml_dmx_class = { + .name = "dmx", + .class_groups = aml_dmx_class_groups, +}; + +static ssize_t dmx_id_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int ret; + + ret = sprintf(buf, "%d\n", dmx_id); + return ret; +} + +static ssize_t dmx_id_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + int id = 0; + long value = 0; + + if (kstrtol(buf, 0, &value) == 0) + id = (int)value; + /*id = simple_strtol(buf, 0, 16);*/ + + if (id < 0 || id > 2) + pr_dbg("dmx id must 0 ~2\n"); + else + dmx_id = id; + + return size; +} + +static ssize_t register_addr_show(struct class *class, + struct class_attribute *attr, + char *buf) +{ + int ret; + + ret = sprintf(buf, "%x\n", reg_addr); + return ret; +} + +static ssize_t register_addr_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + int addr = 0; + /*addr = simple_strtol(buf, 0, 16);*/ + long value = 0; + + if (kstrtol(buf, 0, &value) == 0) + addr = (int)value; + reg_addr = addr; + return size; +} + +static ssize_t register_value_show(struct class *class, + struct class_attribute *attr, + char *buf) +{ + int ret, value; + + value = READ_MPEG_REG(reg_addr); + ret = sprintf(buf, "%x\n", value); + return ret; +} + +static ssize_t register_value_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + int value = 0; + /*value = simple_strtol(buf, 0, 16);*/ + long val = 0; + + if (kstrtol(buf, 0, &val) == 0) + value = (int)val; + WRITE_MPEG_REG(reg_addr, value); + return size; +} + +static ssize_t dmx_sec_statistics_show(struct class *class, + struct class_attribute *attr, + char *buf) +{ + ssize_t ret; + char tmp[128]; + struct aml_dvb *dvb = aml_get_dvb_device(); + + ret = sprintf(tmp, "[hw]%#lx:%#lx:%#lx\n[sw]%#lx:%#lx:%#lx\n", + dvb->dmx[dmx_id].sec_cnt[SEC_CNT_HW], + dvb->dmx[dmx_id].sec_cnt_match[SEC_CNT_HW], + dvb->dmx[dmx_id].sec_cnt_crc_fail[SEC_CNT_HW], + dvb->dmx[dmx_id].sec_cnt[SEC_CNT_SW], + dvb->dmx[dmx_id].sec_cnt_match[SEC_CNT_SW], + dvb->dmx[dmx_id].sec_cnt_crc_fail[SEC_CNT_SW]); + ret = sprintf(buf, "%s[ss]%#lx:%#lx:%#lx\n", + tmp, + dvb->dmx[dmx_id].sec_cnt[SEC_CNT_SS], + dvb->dmx[dmx_id].sec_cnt_match[SEC_CNT_SS], + dvb->dmx[dmx_id].sec_cnt_crc_fail[SEC_CNT_SS]); + return ret; +} + +int aml_regist_dmx_class(void) +{ + + if (class_register(&aml_dmx_class) < 0) + pr_error("register class error\n"); + + return 0; +} + +int aml_unregist_dmx_class(void) +{ + + class_unregister(&aml_dmx_class); + return 0; +} + +static struct mconfig parser_configs[] = { + MC_PU32("video_pts", &video_pts), + MC_PU32("audio_pts", &audio_pts), + MC_PU32("video_pts_bit32", &video_pts_bit32), + MC_PU32("audio_pts_bit32", &audio_pts_bit32), + MC_PU32("first_video_pts", &first_video_pts), + MC_PU32("first_audio_pts", &first_audio_pts), +}; + +void aml_register_parser_mconfig(void) +{ + REG_PATH_CONFIGS("media.parser", parser_configs); +} +
diff --git a/drivers/stream_input/parser/hw_demux/aml_dvb.c b/drivers/stream_input/parser/hw_demux/aml_dvb.c new file mode 100644 index 0000000..6889e89 --- /dev/null +++ b/drivers/stream_input/parser/hw_demux/aml_dvb.c
@@ -0,0 +1,2934 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ + /* + * AMLOGIC DVB driver. + */ + +//move to define in Makefile +//#define ENABLE_DEMUX_DRIVER + +#include <linux/version.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/wait.h> +#include <linux/string.h> +#include <linux/interrupt.h> +#include <linux/fs.h> +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/fcntl.h> +#include <asm/irq.h> +#include <linux/uaccess.h> +#include <linux/poll.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/pinctrl/consumer.h> +#include <linux/reset.h> +#include <linux/of_gpio.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/registers/cpu_version.h> +#include <linux/clk.h> +#include <linux/of_irq.h> +#include <linux/compat.h> +#include "c_stb_define.h" +#include "c_stb_regs_define.h" +#include "aml_dvb.h" +#include "aml_dvb_reg.h" + +#include "aml_demod_gt.h" +#include "../../../common/media_clock/switch/amports_gate.h" + +#define pr_dbg(args...)\ + do {\ + if (debug_dvb)\ + printk(args);\ + } while (0) +#define pr_error(fmt, args...) printk("DVB: " fmt, ## args) +#define pr_inf(fmt, args...) printk(fmt, ## args) + +MODULE_PARM_DESC(debug_dvb, "\n\t\t Enable dvb debug information"); +static int debug_dvb; +module_param(debug_dvb, int, 0644); + +#define CARD_NAME "amlogic-dvb-demux" + +#define DVB_VERSION "V2.02" + + +//echo 0xff646180 0x40 > /sys/kernel/debug/aml_reg/paddr +//echo 0xff634590 0x1 > /sys/kernel/debug/aml_reg/paddr +#define TSINB_DEGLITCH0 0xff646180 +#define TSINB_DEGLITCH1 0xff634590 + +DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); + +MODULE_PARM_DESC(dsc_max, "max number of dsc"); +static int dsc_max = DSC_DEV_COUNT; +module_param(dsc_max, int, 0644); + +static struct aml_dvb aml_dvb_device; +static struct class aml_stb_class; + +static int dmx_reset_all_flag = 0; +#if 0 +static struct reset_control *aml_dvb_demux_reset_ctl; +static struct reset_control *aml_dvb_afifo_reset_ctl; +static struct reset_control *aml_dvb_ahbarb0_reset_ctl; +static struct reset_control *aml_dvb_uparsertop_reset_ctl; +#else +/*no used reset ctl,need use clk in 4.9 kernel*/ +static struct clk *aml_dvb_demux_clk; +static struct clk *aml_dvb_afifo_clk; +static struct clk *aml_dvb_ahbarb0_clk; +static struct clk *aml_dvb_uparsertop_clk; +#endif + +static int aml_tsdemux_reset(void); +static int aml_tsdemux_set_reset_flag(void); +static int aml_tsdemux_request_irq(irq_handler_t handler, void *data); +static int aml_tsdemux_free_irq(void); +static int aml_tsdemux_set_vid(int vpid); +static int aml_tsdemux_set_aid(int apid); +static int aml_tsdemux_set_sid(int spid); +static int aml_tsdemux_set_pcrid(int pcrpid); +static int aml_tsdemux_set_skipbyte(int skipbyte); +static int aml_tsdemux_set_demux(int id); +static unsigned long aml_tsdemux_hwdmx_spin_lock(unsigned long flags); +static int aml_tsdemux_hwdmx_spin_unlock(unsigned long flags); + +static struct tsdemux_ops aml_tsdemux_ops = { + .reset = aml_tsdemux_reset, + .set_reset_flag = aml_tsdemux_set_reset_flag, + .request_irq = aml_tsdemux_request_irq, + .free_irq = aml_tsdemux_free_irq, + .set_vid = aml_tsdemux_set_vid, + .set_aid = aml_tsdemux_set_aid, + .set_sid = aml_tsdemux_set_sid, + .set_pcrid = aml_tsdemux_set_pcrid, + .set_skipbyte = aml_tsdemux_set_skipbyte, + .set_demux = aml_tsdemux_set_demux, + .hw_dmx_lock = aml_tsdemux_hwdmx_spin_lock, + .hw_dmx_unlock = aml_tsdemux_hwdmx_spin_unlock +}; + +long aml_stb_get_base(int id) +{ + int newbase = 0; + if (MESON_CPU_MAJOR_ID_TXL < get_cpu_type() + && MESON_CPU_MAJOR_ID_GXLX != get_cpu_type()) { + newbase = 1; + } + + switch (id) { + case ID_STB_CBUS_BASE: + return (newbase) ? 0x1800 : 0x1600; + case ID_SMARTCARD_REG_BASE: + return (newbase) ? 0x9400 : 0x2110; + case ID_ASYNC_FIFO_REG_BASE: + return (newbase) ? 0x2800 : 0x2310; + case ID_ASYNC_FIFO1_REG_BASE: + return 0x9800; + case ID_ASYNC_FIFO2_REG_BASE: + return (newbase) ? 0x2400 : 0x2314; + case ID_RESET_BASE: + return (newbase) ? 0x0400 : 0x1100; + case ID_PARSER_SUB_START_PTR_BASE: + return (newbase) ? 0x3800 : 0x2900; + default: + return 0; + } + return 0; +} +static void aml_dvb_dmx_release(struct aml_dvb *advb, struct aml_dmx *dmx) +{ + int i; + pr_inf("[dmx_kpi] %s Enter.\n", __func__); + dvb_net_release(&dmx->dvb_net); + aml_dmx_hw_deinit(dmx); + dmx->demux.dmx.close(&dmx->demux.dmx); + dmx->demux.dmx.remove_frontend(&dmx->demux.dmx, &dmx->mem_fe); + + for (i = 0; i < DMX_DEV_COUNT; i++) + dmx->demux.dmx.remove_frontend(&dmx->demux.dmx, &dmx->hw_fe[i]); + + dvb_dmxdev_release(&dmx->dmxdev); + dvb_dmx_release(&dmx->demux); + pr_inf("[dmx_kpi] %s Exit.\n", __func__); +} + +static int aml_dvb_dmx_init(struct aml_dvb *advb, struct aml_dmx *dmx, int id) +{ + int i, ret; + struct device_node *node_dmx = NULL; + char buf[32]; + + switch (id) { + case 0: + dmx->dmx_irq = INT_DEMUX; + break; + case 1: + dmx->dmx_irq = INT_DEMUX_1; + break; + case 2: + dmx->dmx_irq = INT_DEMUX_2; + break; + } + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "dmx"); + node_dmx = of_parse_phandle(advb->pdev->dev.of_node, buf, 0); + if (node_dmx) { + memset(buf, 0, sizeof(buf)); + snprintf(buf, sizeof(buf), "demux%d_irq", id); + ret = of_irq_get_byname(node_dmx, buf); + if (ret > 0) + dmx->dmx_irq = ret; + printk("get irq num demux%d_irq:%d\n",id,dmx->dmx_irq); + } else { + printk("get default demux%d_irq:%d\n",id,dmx->dmx_irq); + } + + dmx->source = -1; + dmx->dump_ts_select = 0; + dmx->dvr_irq = -1; + + dmx->demux.dmx.capabilities = + (DMX_TS_FILTERING | DMX_SECTION_FILTERING | + DMX_MEMORY_BASED_FILTERING); + dmx->demux.filternum = dmx->demux.feednum = FILTER_COUNT; + dmx->demux.priv = advb; + dmx->demux.start_feed = aml_dmx_hw_start_feed; + dmx->demux.stop_feed = aml_dmx_hw_stop_feed; + dmx->demux.write_to_decoder = NULL; + ret = dvb_dmx_init(&dmx->demux); + if (ret < 0) { + pr_error("dvb_dmx failed: error %d\n", ret); + goto error_dmx_init; + } + + dmx->dmxdev.filternum = dmx->demux.feednum; + dmx->dmxdev.demux = &dmx->demux.dmx; + dmx->dmxdev.capabilities = 0; + ret = dvb_dmxdev_init(&dmx->dmxdev, &advb->dvb_adapter); + if (ret < 0) { + pr_error("dvb_dmxdev_init failed: error %d\n", ret); + goto error_dmxdev_init; + } + + for (i = 0; i < DMX_DEV_COUNT; i++) { + int source = i + DMX_FRONTEND_0; + + dmx->hw_fe[i].source = source; + ret = + dmx->demux.dmx.add_frontend(&dmx->demux.dmx, + &dmx->hw_fe[i]); + if (ret < 0) { + pr_error("adding hw_frontend to dmx failed: error %d", + ret); + dmx->hw_fe[i].source = 0; + goto error_add_hw_fe; + } + } + + dmx->mem_fe.source = DMX_MEMORY_FE; + ret = dmx->demux.dmx.add_frontend(&dmx->demux.dmx, &dmx->mem_fe); + if (ret < 0) { + pr_error("adding mem_frontend to dmx failed: error %d", ret); + goto error_add_mem_fe; + } + ret = dmx->demux.dmx.connect_frontend(&dmx->demux.dmx, &dmx->hw_fe[1]); + if (ret < 0) { + pr_error("connect frontend failed: error %d", ret); + goto error_connect_fe; + } + + dmx->id = id; + dmx->aud_chan = -1; + dmx->vid_chan = -1; + dmx->sub_chan = -1; + dmx->pcr_chan = -1; + + /*smallsec*/ + dmx->smallsec.enable = 0; + dmx->smallsec.bufsize = SS_BUFSIZE_DEF; + dmx->smallsec.dmx = dmx; + + /*input timeout*/ + dmx->timeout.enable = 1; + dmx->timeout.timeout = DTO_TIMEOUT_DEF; + dmx->timeout.ch_disable = DTO_CHDIS_VAS; + dmx->timeout.match = 1; + dmx->timeout.trigger = 0; + dmx->timeout.dmx = dmx; + + /*CRC monitor*/ + dmx->crc_check_count = 0; + dmx->crc_check_time = 0; + + ret = aml_dmx_hw_init(dmx); + if (ret < 0) { + pr_error("demux hw init error %d", ret); + dmx->id = -1; + goto error_dmx_hw_init; + } + + dvb_net_init(&advb->dvb_adapter, &dmx->dvb_net, &dmx->demux.dmx); + + return 0; +error_dmx_hw_init: +error_connect_fe: + dmx->demux.dmx.remove_frontend(&dmx->demux.dmx, &dmx->mem_fe); +error_add_mem_fe: +error_add_hw_fe: + for (i = 0; i < DMX_DEV_COUNT; i++) { + if (dmx->hw_fe[i].source) + dmx->demux.dmx.remove_frontend(&dmx->demux.dmx, + &dmx->hw_fe[i]); + } + dvb_dmxdev_release(&dmx->dmxdev); +error_dmxdev_init: + dvb_dmx_release(&dmx->demux); +error_dmx_init: + return ret; +} + +struct aml_dvb *aml_get_dvb_device(void) +{ + return &aml_dvb_device; +} +EXPORT_SYMBOL(aml_get_dvb_device); + +struct dvb_adapter *aml_get_dvb_adapter(void) +{ + return &aml_dvb_device.dvb_adapter; +} +EXPORT_SYMBOL(aml_get_dvb_adapter); + +static int dvb_dsc_open(struct inode *inode, struct file *file) +{ + int err; + + err = dvb_generic_open(inode, file); + if (err < 0) + return err; + + return 0; +} + +static void dsc_channel_alloc(struct aml_dsc *dsc, int id, unsigned int pid) +{ + struct aml_dsc_channel *ch = &dsc->channel[id]; + + ch->used = 1; + ch->work_mode = -1; + ch->id = id; + ch->pid = pid; + ch->set = 0; + ch->dsc = dsc; + ch->mode = -1; + + dsc_set_pid(ch, ch->pid); +} + +static void dsc_channel_free(struct aml_dsc_channel *ch) +{ + if (!ch->used) + return; + + ch->used = 0; + dsc_set_pid(ch, 0x1fff); + dsc_release(); + + ch->pid = 0x1fff; + ch->set = 0; + ch->work_mode = -1; + ch->mode = -1; +} + +static void dsc_reset(struct aml_dsc *dsc) +{ + int i; + + for (i = 0; i < DSC_COUNT; i++) + dsc_channel_free(&dsc->channel[i]); +} + +static int get_dsc_key_work_mode(enum ca_cw_type cw_type) +{ + int work_mode = DVBCSA_MODE; + + switch (cw_type) { + case CA_CW_DVB_CSA_EVEN: + case CA_CW_DVB_CSA_ODD: + work_mode = DVBCSA_MODE; + break; + case CA_CW_AES_EVEN: + case CA_CW_AES_ODD: + case CA_CW_AES_ODD_IV: + case CA_CW_AES_EVEN_IV: + case CA_CW_DES_EVEN: + case CA_CW_DES_ODD: + case CA_CW_SM4_EVEN: + case CA_CW_SM4_ODD: + case CA_CW_SM4_ODD_IV: + case CA_CW_SM4_EVEN_IV: + work_mode = CIPLUS_MODE; + default: + break; + } + return work_mode; +} + +/* Check if there are channels run in previous mode(aes/dvbcsa) + * in dsc0/ciplus + */ +static void dsc_ciplus_switch_check(struct aml_dsc_channel *ch, + enum ca_cw_type cw_type) +{ + struct aml_dsc *dsc = ch->dsc; + int work_mode = 0; + struct aml_dsc_channel *pch = NULL; + int i; + + work_mode = get_dsc_key_work_mode(cw_type); + if (dsc->work_mode == work_mode) + return; + + dsc->work_mode = work_mode; + + for (i = 0; i < DSC_COUNT; i++) { + pch = &dsc->channel[i]; + if (pch->work_mode != work_mode && pch->work_mode != -1) { + pr_error("Dsc work mode changed,"); + pr_error("but there are still some channels"); + pr_error("run in different mode\n"); + pr_error("mod_pre[%d] -> mod[%d] ch[%d]\n", + pch->work_mode, work_mode, i); + } + } +} + +static int dsc_set_cw(struct aml_dsc *dsc, struct ca_descr_ex *d) +{ + struct aml_dsc_channel *ch; + + if (d->index >= DSC_COUNT) + return -EINVAL; + + ch = &dsc->channel[d->index]; + + switch (d->type) { + case CA_CW_DVB_CSA_EVEN: + case CA_CW_AES_EVEN: + case CA_CW_DES_EVEN: + case CA_CW_SM4_EVEN: + memcpy(ch->even, d->cw, DSC_KEY_SIZE_MAX); + break; + case CA_CW_DVB_CSA_ODD: + case CA_CW_AES_ODD: + case CA_CW_DES_ODD: + case CA_CW_SM4_ODD: + memcpy(ch->odd, d->cw, DSC_KEY_SIZE_MAX); + break; + case CA_CW_AES_EVEN_IV: + case CA_CW_SM4_EVEN_IV: + memcpy(ch->even_iv, d->cw, DSC_KEY_SIZE_MAX); + break; + case CA_CW_AES_ODD_IV: + case CA_CW_SM4_ODD_IV: + memcpy(ch->odd_iv, d->cw, DSC_KEY_SIZE_MAX); + break; + default: + break; + } + + ch->set |= (1 << d->type) | (d->flags << 24); + + if (d->mode == CA_DSC_IDSA) { + ch->mode = IDSA_MODE; + } + + /*do key set*/ + dsc_set_key(ch, d->flags, d->type, d->cw); + dsc_ciplus_switch_check(ch, d->type); + + return 0; +} + +static int dvb_dsc_do_ioctl(struct file *file, unsigned int cmd, + void *parg) +{ + struct dvb_device *dvbdev = file->private_data; + struct aml_dsc *dsc = dvbdev->priv; + struct aml_dvb *dvb = dsc->dvb; + struct aml_dsc_channel *ch; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&dvb->slock, flags); + + switch (cmd) { + case CA_RESET: + dsc_reset(dsc); + break; + case CA_GET_CAP: { + struct ca_caps *cap = parg; + + cap->slot_num = 1; + cap->slot_type = CA_DESCR; + cap->descr_num = DSC_COUNT; + cap->descr_type = 0; + break; + } + case CA_GET_SLOT_INFO: { + struct ca_slot_info *slot = parg; + + slot->num = 1; + slot->type = CA_DESCR; + slot->flags = 0; + break; + } + case CA_GET_DESCR_INFO: { + struct ca_descr_info *descr = parg; + + descr->num = DSC_COUNT; + descr->type = 0; + break; + } + case CA_SET_DESCR: { + struct ca_descr *d = parg; + struct ca_descr_ex dex; + + dex.index = d->index; + dex.type = d->parity ? CA_CW_DVB_CSA_ODD : CA_CW_DVB_CSA_EVEN; + dex.mode = -1; + dex.flags = 0; + memcpy(dex.cw, d->cw, sizeof(d->cw)); + + ret = dsc_set_cw(dsc, &dex); + break; + } + case CA_SET_PID: { + struct ca_pid *pi = parg; + int i; + + if (pi->index == -1) { + for (i = 0; i < DSC_COUNT; i++) { + ch = &dsc->channel[i]; + + if (ch->used && (ch->pid == pi->pid)) { + dsc_channel_free(ch); + break; + } + } + } else if ((pi->index >= 0) && (pi->index < DSC_COUNT)) { + ch = &dsc->channel[pi->index]; + + if (pi->pid < 0x1fff) { + if (!ch->used) { + dsc_channel_alloc(dsc, + pi->index, pi->pid); + } + } else { + if (ch->used) + dsc_channel_free(ch); + } + } else { + ret = -EINVAL; + } + break; + } + case CA_SET_DESCR_EX: { + struct ca_descr_ex *d = parg; + + ret = dsc_set_cw(dsc, d); + break; + } + default: + ret = -EINVAL; + break; + } + + spin_unlock_irqrestore(&dvb->slock, flags); + + return ret; +} + +static int dvb_dsc_usercopy(struct file *file, + unsigned int cmd, unsigned long arg, + int (*func)(struct file *file, + unsigned int cmd, void *arg)) +{ + char sbuf[128]; + void *mbuf = NULL; + void *parg = NULL; + int err = -EINVAL; + + /* Copy arguments into temp kernel buffer */ + switch (_IOC_DIR(cmd)) { + case _IOC_NONE: + /* + * For this command, the pointer is actually an integer + * argument. + */ + parg = (void *) arg; + break; + case _IOC_READ: /* some v4l ioctls are marked wrong ... */ + case _IOC_WRITE: + case (_IOC_WRITE | _IOC_READ): + if (_IOC_SIZE(cmd) <= sizeof(sbuf)) { + parg = sbuf; + } else { + /* too big to allocate from stack */ + mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); + if (mbuf == NULL) + return -ENOMEM; + parg = mbuf; + } + + err = -EFAULT; + if (copy_from_user(parg, (void __user *)arg, _IOC_SIZE(cmd))) + goto out; + break; + } + + /* call driver */ + err = func(file, cmd, parg); + if (err == -ENOIOCTLCMD) + err = -ENOTTY; + + if (err < 0) + goto out; + + /* Copy results into user buffer */ + switch (_IOC_DIR(cmd)) { + case _IOC_READ: + case (_IOC_WRITE | _IOC_READ): + if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd))) + err = -EFAULT; + break; + } + +out: + kfree(mbuf); + return err; +} + +static long dvb_dsc_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return dvb_dsc_usercopy(file, cmd, arg, dvb_dsc_do_ioctl); +} + +static int dvb_dsc_release(struct inode *inode, struct file *file) +{ + struct dvb_device *dvbdev = file->private_data; + struct aml_dsc *dsc = dvbdev->priv; + struct aml_dvb *dvb = dsc->dvb; + unsigned long flags; + + spin_lock_irqsave(&dvb->slock, flags); + + dsc_reset(dsc); + + spin_unlock_irqrestore(&dvb->slock, flags); + + dvb_generic_release(inode, file); + + return 0; +} + +#ifdef CONFIG_COMPAT +static long dvb_dsc_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long args) +{ + unsigned long ret; + + args = (unsigned long)compat_ptr(args); + ret = dvb_dsc_ioctl(filp, cmd, args); + return ret; +} +#endif + + +static const struct file_operations dvb_dsc_fops = { + .owner = THIS_MODULE, + .read = NULL, + .write = NULL, + .unlocked_ioctl = dvb_dsc_ioctl, + .open = dvb_dsc_open, + .release = dvb_dsc_release, + .poll = NULL, +#ifdef CONFIG_COMPAT + .compat_ioctl = dvb_dsc_compat_ioctl, +#endif +}; + +static struct dvb_device dvbdev_dsc = { + .priv = NULL, + .users = 1, + .readers = 1, + .writers = 1, + .fops = &dvb_dsc_fops, +}; + +static int aml_dvb_asyncfifo_init(struct aml_dvb *advb, + struct aml_asyncfifo *asyncfifo, int id) +{ + char buf[32]; + int ret = 0; + struct device_node *node_dmx = NULL; + + if (id == 0) + asyncfifo->asyncfifo_irq = INT_ASYNC_FIFO_FLUSH; + else if(id == 2) + asyncfifo->asyncfifo_irq = INT_ASYNC_FIFO3_FLUSH; + else + asyncfifo->asyncfifo_irq = INT_ASYNC_FIFO2_FLUSH; + + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "dmx"); + node_dmx = of_parse_phandle(advb->pdev->dev.of_node, buf, 0); + if (node_dmx) { + memset(buf, 0, sizeof(buf)); + snprintf(buf, sizeof(buf), "dvr%d_irq", id); + ret = of_irq_get_byname(node_dmx, buf); + if (ret > 0) + asyncfifo->asyncfifo_irq = ret; + printk("get async%d_irq:%d\n",id,asyncfifo->asyncfifo_irq); + } else { + printk("get default async%d_irq:%d\n",id,asyncfifo->asyncfifo_irq); + } + asyncfifo->dvb = advb; + asyncfifo->id = id; + asyncfifo->init = 0; + asyncfifo->flush_size = 256 * 1024; + asyncfifo->secure_enable = 0; + asyncfifo->blk.addr = 0; + asyncfifo->blk.len = 0; + asyncfifo->stored_pages = 0; + + return aml_asyncfifo_hw_init(asyncfifo); +} +static void aml_dvb_asyncfifo_release(struct aml_dvb *advb, + struct aml_asyncfifo *asyncfifo) +{ + aml_asyncfifo_hw_deinit(asyncfifo); +} + +static int aml_dvb_dsc_init(struct aml_dvb *advb, + struct aml_dsc *dsc, int id) +{ + int i; + + for (i = 0; i < DSC_COUNT; i++) { + dsc->channel[i].id = i; + dsc->channel[i].used = 0; + dsc->channel[i].set = 0; + dsc->channel[i].pid = 0x1fff; + dsc->channel[i].dsc = dsc; + } + dsc->dvb = advb; + dsc->id = id; + dsc->source = -1; + dsc->dst = -1; + + /*Register descrambler device */ + return dvb_register_device(&advb->dvb_adapter, &dsc->dev, + &dvbdev_dsc, dsc, DVB_DEVICE_CA, 0); +} +static void aml_dvb_dsc_release(struct aml_dvb *advb, + struct aml_dsc *dsc) +{ + if (dsc->dev) + dvb_unregister_device(dsc->dev); + dsc->dev = NULL; +} + + +/*Show the STB input source*/ +static ssize_t source_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + ssize_t ret = 0; + char *src; + + switch (dvb->stb_source) { + case AM_TS_SRC_TS0: + case AM_TS_SRC_S_TS0: + src = "ts0"; + break; + case AM_TS_SRC_TS1: + case AM_TS_SRC_S_TS1: + src = "ts1"; + break; + case AM_TS_SRC_TS2: + case AM_TS_SRC_S_TS2: + src = "ts2"; + break; + case AM_TS_SRC_TS3: + src = "ts3"; + break; + case AM_TS_SRC_HIU: + src = "hiu"; + break; + case AM_TS_SRC_HIU1: + src = "hiu1"; + break; + case AM_TS_SRC_DMX0: + src = "dmx0"; + break; + case AM_TS_SRC_DMX1: + src = "dmx1"; + break; + case AM_TS_SRC_DMX2: + src = "dmx2"; + break; + default: + src = "disable"; + break; + } + + ret = sprintf(buf, "%s\n", src); + return ret; +} + +static ssize_t clear_av_store(struct class *class, + struct class_attribute *attr, const char *buf, + size_t size) +{ + if (!strncmp("1", buf, 1)) { + aml_tsdemux_set_vid(0x1fff); + aml_tsdemux_set_aid(0x1fff); + aml_tsdemux_set_sid(0x1fff); + aml_tsdemux_set_pcrid(0x1fff); + } + + return size; +} + +static int stb_check_source(const char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + int ret = 0; + char *src; + + switch (dvb->stb_source) { + case AM_TS_SRC_TS0: + case AM_TS_SRC_S_TS0: + src = "ts0"; + break; + case AM_TS_SRC_TS1: + case AM_TS_SRC_S_TS1: + src = "ts1"; + break; + case AM_TS_SRC_TS2: + case AM_TS_SRC_S_TS2: + src = "ts2"; + break; + case AM_TS_SRC_TS3: + src = "ts3"; + break; + case AM_TS_SRC_HIU: + src = "hiu"; + break; + case AM_TS_SRC_HIU1: + src = "hiu1"; + break; + case AM_TS_SRC_DMX0: + src = "dmx0"; + break; + case AM_TS_SRC_DMX1: + src = "dmx1"; + break; + case AM_TS_SRC_DMX2: + src = "dmx2"; + break; + default: + src = "disable"; + break; + } + pr_error("stb_check_source set buf:%s, src:%s\n", buf, src); + ret = strcmp(buf,src); + return ret; +} + +/*Set the STB input source*/ +static ssize_t source_store(struct class *class, + struct class_attribute *attr, const char *buf, + size_t size) +{ + dmx_source_t src = -1; + if (stb_check_source(buf) == 0) { + pr_error("stb_store_source same source \n"); + return size; + } + if (!strncmp("ts0", buf, 3)) + src = DMX_SOURCE_FRONT0; + else if (!strncmp("ts1", buf, 3)) + src = DMX_SOURCE_FRONT1; + else if (!strncmp("ts2", buf, 3)) + src = DMX_SOURCE_FRONT2; + else if (!strncmp("ts3", buf, 3)) + src = DMX_SOURCE_FRONT3; + else if (!strncmp("hiu1", buf, 4)) + src = DMX_SOURCE_DVR1; + else if (!strncmp("hiu", buf, 3)) + src = DMX_SOURCE_DVR0; + else if (!strncmp("dmx0", buf, 4)) + src = DMX_SOURCE_FRONT0 + 100; + else if (!strncmp("dmx1", buf, 4)) + src = DMX_SOURCE_FRONT1 + 100; + else if (!strncmp("dmx2", buf, 4)) + src = DMX_SOURCE_FRONT2 + 100; + if (src != -1) + aml_stb_hw_set_source(&aml_dvb_device, src); + return size; +} + +static ssize_t demux_reset_all_flag_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + ssize_t ret = 0; + char *src; + + if (dmx_reset_all_flag) + src = "1"; + else + src = "0"; + ret = sprintf(buf, "%s\n", src); + return ret; +} +static ssize_t demux_reset_all_flag_store(struct class *class, + struct class_attribute *attr, const char *buf, + size_t size) +{ + if (!strncmp("0", buf, 1)) + dmx_reset_all_flag = 0; + else if (!strncmp("1", buf, 1)) + dmx_reset_all_flag = 1; + + return size; +} +#define CASE_PREFIX + +/*Show the descrambler's input source*/ +#define DSC_SOURCE_FUNC_DECL(i) \ +static ssize_t dsc##i##_source_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_dsc *dsc = &dvb->dsc[i];\ + ssize_t ret = 0;\ + char *src, *dst;\ + switch (dsc->source) {\ + CASE_PREFIX case AM_TS_SRC_DMX0:\ + src = "dmx0";\ + break;\ + CASE_PREFIX case AM_TS_SRC_DMX1:\ + src = "dmx1";\ + break;\ + CASE_PREFIX case AM_TS_SRC_DMX2:\ + src = "dmx2";\ + break;\ + CASE_PREFIX default :\ + src = "bypass";\ + break;\ + } \ + switch (dsc->dst) {\ + CASE_PREFIX case AM_TS_SRC_DMX0:\ + dst = "dmx0";\ + break;\ + CASE_PREFIX case AM_TS_SRC_DMX1:\ + dst = "dmx1";\ + break;\ + CASE_PREFIX case AM_TS_SRC_DMX2:\ + dst = "dmx2";\ + break;\ + CASE_PREFIX default :\ + dst = "bypass";\ + break;\ + } \ + ret = sprintf(buf, "%s-%s\n", src, dst);\ + return ret;\ +} \ +static ssize_t dsc##i##_source_store(struct class *class, \ + struct class_attribute *attr, const char *buf, size_t size)\ +{\ + dmx_source_t src = -1, dst = -1;\ + \ + if (!strncmp("dmx0", buf, 4)) {\ + src = DMX_SOURCE_FRONT0 + 100;\ + } else if (!strncmp("dmx1", buf, 4)) {\ + src = DMX_SOURCE_FRONT1 + 100;\ + } else if (!strncmp("dmx2", buf, 4)) {\ + src = DMX_SOURCE_FRONT2 + 100;\ + } \ + if (buf[4] == '-') {\ + if (!strncmp("dmx0", buf+5, 4)) {\ + dst = DMX_SOURCE_FRONT0 + 100;\ + } else if (!strncmp("dmx1", buf+5, 4)) {\ + dst = DMX_SOURCE_FRONT1 + 100;\ + } else if (!strncmp("dmx2", buf+5, 4)) {\ + dst = DMX_SOURCE_FRONT2 + 100;\ + } \ + } \ + else \ + dst = src; \ + aml_dsc_hw_set_source(&aml_dvb_device.dsc[i], src, dst);\ + return size;\ +} + +/*Show free descramblers count*/ +#define DSC_FREE_FUNC_DECL(i) \ +static ssize_t dsc##i##_free_dscs_show(struct class *class, \ + struct class_attribute *attr, char *buf) \ +{ \ + struct aml_dvb *dvb = &aml_dvb_device; \ + int fid, count; \ + ssize_t ret = 0; \ + unsigned long flags;\ +\ + spin_lock_irqsave(&dvb->slock, flags); \ + count = 0; \ + for (fid = 0; fid < DSC_COUNT; fid++) { \ + if (!dvb->dsc[i].channel[fid].used) \ + count++; \ + } \ + spin_unlock_irqrestore(&dvb->slock, flags); \ +\ + ret = sprintf(buf, "%d\n", count); \ + return ret; \ +} + +#if DSC_DEV_COUNT > 0 + DSC_SOURCE_FUNC_DECL(0) + DSC_FREE_FUNC_DECL(0) +#endif +#if DSC_DEV_COUNT > 1 + DSC_SOURCE_FUNC_DECL(1) + DSC_FREE_FUNC_DECL(1) +#endif + +/*Show the TS output source*/ +static ssize_t tso_source_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + ssize_t ret = 0; + char *src; + + switch (dvb->tso_source) { + case AM_TS_SRC_TS0: + case AM_TS_SRC_S_TS0: + src = "ts0"; + break; + case AM_TS_SRC_TS1: + case AM_TS_SRC_S_TS1: + src = "ts1"; + break; + case AM_TS_SRC_TS2: + case AM_TS_SRC_S_TS2: + src = "ts2"; + break; + case AM_TS_SRC_TS3: + src = "ts3"; + break; + case AM_TS_SRC_HIU: + src = "hiu"; + break; + case AM_TS_SRC_DMX0: + src = "dmx0"; + break; + case AM_TS_SRC_DMX1: + src = "dmx1"; + break; + case AM_TS_SRC_DMX2: + src = "dmx2"; + break; + default: + src = "default"; + break; + } + + ret = sprintf(buf, "%s\n", src); + return ret; +} + +/*Set the TS output source*/ +static ssize_t tso_source_store(struct class *class, + struct class_attribute *attr, const char *buf, + size_t size) +{ + dmx_source_t src = -1; + + if (!strncmp("ts0", buf, 3)) + src = DMX_SOURCE_FRONT0; + else if (!strncmp("ts1", buf, 3)) + src = DMX_SOURCE_FRONT1; + else if (!strncmp("ts2", buf, 3)) + src = DMX_SOURCE_FRONT2; + else if (!strncmp("ts3", buf, 3)) + src = DMX_SOURCE_FRONT3; + else if (!strncmp("hiu", buf, 3)) + src = DMX_SOURCE_DVR0; + else if (!strncmp("dmx0", buf, 4)) + src = DMX_SOURCE_FRONT0 + 100; + else if (!strncmp("dmx1", buf, 4)) + src = DMX_SOURCE_FRONT1 + 100; + else if (!strncmp("dmx2", buf, 4)) + src = DMX_SOURCE_FRONT2 + 100; + + aml_tso_hw_set_source(&aml_dvb_device, src); + + return size; +} + +/*Show PCR*/ +#define DEMUX_PCR_FUNC_DECL(i) \ +static ssize_t demux##i##_pcr_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + int f = 0;\ + if (i == 0)\ + f = READ_MPEG_REG(PCR_DEMUX);\ + else if (i == 1)\ + f = READ_MPEG_REG(PCR_DEMUX_2);\ + else if (i == 2)\ + f = READ_MPEG_REG(PCR_DEMUX_3);\ + return sprintf(buf, "%08x\n", f);\ +} + +static int dmx_check_source(int i, const char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + struct aml_dmx *dmx = &dvb->dmx[i]; + ssize_t ret = 0; + char *src; + switch (dmx->source) { + case AM_TS_SRC_TS0: + case AM_TS_SRC_S_TS0: + src = "ts0"; + break; + case AM_TS_SRC_TS1: + case AM_TS_SRC_S_TS1: + src = "ts1"; + break; + case AM_TS_SRC_TS2: + case AM_TS_SRC_S_TS2: + src = "ts2"; + break; + case AM_TS_SRC_TS3: + src = "ts3"; + break; + case AM_TS_SRC_DMX0: + src = "dmx0"; + break; + case AM_TS_SRC_DMX1: + src = "dmx1"; + break; + case AM_TS_SRC_DMX2: + src = "dmx2"; + break; + case AM_TS_SRC_HIU: + src = "hiu"; + break; + case AM_TS_SRC_HIU1: + src = "hiu1"; + break; + default : + src = ""; + break; + } + pr_error("dmx_check_source:set[%s]src[%s]dmx[%d]dmx->source:%d\n", buf, src, i, dmx->source); + ret = strcmp(buf, src); + return ret; +} + + +/*Show the STB input source*/ +#define DEMUX_SOURCE_FUNC_DECL(i) \ +static ssize_t demux##i##_source_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_dmx *dmx = &dvb->dmx[i];\ + ssize_t ret = 0;\ + char *src;\ + switch (dmx->source) {\ + CASE_PREFIX case AM_TS_SRC_TS0:\ + CASE_PREFIX case AM_TS_SRC_S_TS0:\ + src = "ts0";\ + break;\ + CASE_PREFIX case AM_TS_SRC_TS1:\ + CASE_PREFIX case AM_TS_SRC_S_TS1:\ + src = "ts1";\ + break;\ + CASE_PREFIX case AM_TS_SRC_TS2:\ + CASE_PREFIX case AM_TS_SRC_S_TS2:\ + src = "ts2";\ + break;\ + CASE_PREFIX case AM_TS_SRC_TS3:\ + src = "ts3";\ + break;\ + CASE_PREFIX case AM_TS_SRC_DMX0:\ + src = "dmx0";\ + break;\ + CASE_PREFIX case AM_TS_SRC_DMX1:\ + src = "dmx1";\ + break;\ + CASE_PREFIX case AM_TS_SRC_DMX2:\ + src = "dmx2";\ + break;\ + CASE_PREFIX case AM_TS_SRC_HIU:\ + src = "hiu";\ + break;\ + CASE_PREFIX case AM_TS_SRC_HIU1:\ + src = "hiu1";\ + break;\ + CASE_PREFIX default :\ + src = "";\ + break;\ + } \ + ret = sprintf(buf, "%s\n", src);\ + return ret;\ +} \ +static ssize_t demux##i##_source_store(struct class *class, \ + struct class_attribute *attr, const char *buf, size_t size)\ +{\ + dmx_source_t src = -1;\ + if (dmx_check_source(i, buf) == 0) {\ + pr_error("dmx[%d] source is same [%s]\n", i, buf);\ + return size;\ + }\ + if (!strncmp("ts0", buf, 3)) {\ + src = DMX_SOURCE_FRONT0;\ + } else if (!strncmp("ts1", buf, 3)) {\ + src = DMX_SOURCE_FRONT1;\ + } else if (!strncmp("ts2", buf, 3)) {\ + src = DMX_SOURCE_FRONT2;\ + } else if (!strncmp("ts3", buf, 3)) {\ + src = DMX_SOURCE_FRONT3;\ + } else if (!strncmp("hiu1", buf, 4)) {\ + src = DMX_SOURCE_DVR1;\ + } else if (!strncmp("hiu", buf, 3)) {\ + src = DMX_SOURCE_DVR0;\ + } else if (!strncmp("dmx0", buf, 4)) {\ + src = DMX_SOURCE_FRONT0_OFFSET;\ + } else if (!strncmp("dmx1", buf, 4)) {\ + src = DMX_SOURCE_FRONT1_OFFSET;\ + } else if (!strncmp("dmx2", buf, 4)) {\ + src = DMX_SOURCE_FRONT2_OFFSET;\ + } \ + if (src != -1) {\ + aml_dmx_hw_set_source(aml_dvb_device.dmx[i].dmxdev.demux, src);\ + } \ + return size;\ +} + +/*Show free filters count*/ +#define DEMUX_FREE_FILTERS_FUNC_DECL(i) \ +static ssize_t demux##i##_free_filters_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct dvb_demux *dmx = &dvb->dmx[i].demux;\ + int fid, count;\ + ssize_t ret = 0;\ + if (mutex_lock_interruptible(&dmx->mutex)) \ + return -ERESTARTSYS; \ + count = 0;\ + for (fid = 0; fid < dmx->filternum; fid++) {\ + if (!dmx->filter[fid].state != DMX_STATE_FREE)\ + count++;\ + } \ + mutex_unlock(&dmx->mutex);\ + ret = sprintf(buf, "%d\n", count);\ + return ret;\ +} + +/*Show dmx dev open count*/ +#define DEMUX_DEV_USERS_FUNC_DECL(i) \ +static ssize_t demux##i##_dev_users_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct dvb_demux *dmx = &dvb->dmx[i].demux;\ + int count;\ + ssize_t ret = 0;\ + if (mutex_lock_interruptible(&dmx->mutex)) \ + return -ERESTARTSYS; \ + count = dvb->dmx[i].dmxdev.dvbdev->users -1;\ + mutex_unlock(&dmx->mutex);\ + ret = sprintf(buf, "%d\n", count);\ + return ret;\ +} + + +static ssize_t demux_state_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + int i = 0, r = 0, j = 0; + struct dvb_demux *dmx; + struct aml_dmx *dmx1; + int fid, count; + ssize_t ret = 0; + char *str = NULL; + + for (i = 0; i < 3; i++) { + r = sprintf(buf, "#####dmx%d#######\n", i); + buf += r; + ret += r; + dmx = &dvb->dmx[i].demux; + if (mutex_lock_interruptible(&dmx->mutex)) + return -ERESTARTSYS; + + r = sprintf(buf, "filter:\n"); + buf += r; + ret += r; + + count = 0; + for (fid = 0; fid < dmx->filternum; fid++) { + if (!dmx->filter[fid].state != DMX_STATE_FREE) + count++; + else { + r = sprintf(buf, "fid:%d, pid:0x%0x, state:%d\n", fid, dmx->filter[fid].feed->pid, + dmx->filter[fid].state); + buf += r; + ret += r; + } + } + r = sprintf(buf, "used filter:%d, free filter:%d\n", (dmx->filternum - count), count); + buf += r; + ret += r; + + r = sprintf(buf, "file users:%d\n", dvb->dmx[i].dmxdev.dvbdev->users); + buf += r; + ret += r; + + r = sprintf(buf, "chan:\n"); + buf += r; + ret += r; + + dmx1 = &dvb->dmx[i]; + count = 0; + for (j = 0; j < CHANNEL_COUNT; j++) { + if (dmx1->channel[j].used) { + if (dmx1->channel[j].type == DMX_TYPE_TS) { + if (dmx1->channel[j].pes_type == DMX_PES_VIDEO) { + str = "video"; + } else if (dmx1->channel[j].pes_type == DMX_PES_AUDIO){ + str = "audio"; + } else if (dmx1->channel[j].pes_type == DMX_PES_SUBTITLE) { + str = "sub"; + } else if (dmx1->channel[j].pes_type == DMX_PES_TELETEXT) { + str = "ttx"; + } else if (dmx1->channel[j].pes_type == DMX_PES_TELETEXT) { + str = "other"; + } + } else { + str = "sec"; + } + count ++; + r = sprintf(buf, "id:%d, type:%s, pid:0x%0x\n", j, str, dmx1->channel[j].pid); + buf += r; + ret += r; + } + } + r = sprintf(buf, "used chan:%d, free chan:%d\n", count, (CHANNEL_COUNT - count)); + buf += r; + ret += r; + mutex_unlock(&dmx->mutex); + } + return ret; +} + +/*Show filter users count*/ +#define DEMUX_FILTER_USERS_FUNC_DECL(i) \ +static ssize_t demux##i##_filter_users_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_dmx *dmx = &dvb->dmx[i];\ + int dmxdevfid, count;\ + ssize_t ret = 0;\ + unsigned long flags;\ + spin_lock_irqsave(&dvb->slock, flags);\ + count = 0;\ + for (dmxdevfid = 0; dmxdevfid < dmx->dmxdev.filternum; dmxdevfid++) {\ + if (dmx->dmxdev.filter[dmxdevfid].state >= \ + DMXDEV_STATE_ALLOCATED)\ + count++;\ + } \ + if (count > dmx->demux_filter_user) {\ + count = dmx->demux_filter_user;\ + } else{\ + dmx->demux_filter_user = count;\ + } \ + spin_unlock_irqrestore(&dvb->slock, flags);\ + ret = sprintf(buf, "%d\n", count);\ + return ret;\ +} \ +static ssize_t demux##i##_filter_users_store(struct class *class, \ + struct class_attribute *attr, const char *buf, size_t size)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_dmx *dmx = &dvb->dmx[i];\ + unsigned long filter_used;\ + unsigned long flags;/*char *endp;*/\ + /*filter_used = simple_strtol(buf, &endp, 0);*/\ + int ret = kstrtol(buf, 0, &filter_used);\ + spin_lock_irqsave(&dvb->slock, flags);\ + if (ret == 0 && filter_used) {\ + if (dmx->demux_filter_user < FILTER_COUNT)\ + dmx->demux_filter_user++;\ + } else {\ + if (dmx->demux_filter_user > 0)\ + dmx->demux_filter_user--;\ + } \ + spin_unlock_irqrestore(&dvb->slock, flags);\ + return size;\ +} + +/*Show ts header*/ +#define DEMUX_TS_HEADER_FUNC_DECL(i) \ +static ssize_t demux##i##_ts_header_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + int hdr = 0;\ + if (i == 0)\ + hdr = READ_MPEG_REG(TS_HEAD_1);\ + else if (i == 1)\ + hdr = READ_MPEG_REG(TS_HEAD_1_2);\ + else if (i == 2)\ + hdr = READ_MPEG_REG(TS_HEAD_1_3);\ + return sprintf(buf, "%08x\n", hdr);\ +} + +/*Show channel activity*/ +#define DEMUX_CHANNEL_ACTIVITY_FUNC_DECL(i) \ +static ssize_t demux##i##_channel_activity_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + int f = 0;\ + if (i == 0)\ + f = READ_MPEG_REG(DEMUX_CHANNEL_ACTIVITY);\ + else if (i == 1)\ + f = READ_MPEG_REG(DEMUX_CHANNEL_ACTIVITY_2);\ + else if (i == 2)\ + f = READ_MPEG_REG(DEMUX_CHANNEL_ACTIVITY_3);\ + return sprintf(buf, "%08x\n", f);\ +} + +#define DEMUX_RESET_FUNC_DECL(i) \ +static ssize_t demux##i##_reset_store(struct class *class, \ + struct class_attribute *attr, \ + const char *buf, size_t size)\ +{\ + if (!strncmp("1", buf, 1)) { \ + struct aml_dvb *dvb = &aml_dvb_device; \ + pr_inf("Reset demux["#i"], call dmx_reset_dmx_hw\n"); \ + dmx_reset_dmx_id_hw_ex(dvb, i, 0); \ + } \ + return size; \ +} + +/*DVR record mode*/ +#define DVR_MODE_FUNC_DECL(i) \ +static ssize_t dvr##i##_mode_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_dmx *dmx = &dvb->dmx[i];\ + ssize_t ret = 0;\ + char *mode;\ + if (dmx->dump_ts_select) {\ + mode = "ts";\ + } else {\ + mode = "pid";\ + } \ + ret = sprintf(buf, "%s\n", mode);\ + return ret;\ +} \ +static ssize_t dvr##i##_mode_store(struct class *class, \ + struct class_attribute *attr, const char *buf, size_t size)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_dmx *dmx = &dvb->dmx[i];\ + int dump_ts_select = -1;\ + \ + if (!strncmp("pid", buf, 3) && dmx->dump_ts_select) {\ + dump_ts_select = 0;\ + } else if (!strncmp("ts", buf, 2) && !dmx->dump_ts_select) {\ + dump_ts_select = 1;\ + } \ + if (dump_ts_select != -1) {\ + aml_dmx_hw_set_dump_ts_select(\ + aml_dvb_device.dmx[i].dmxdev.demux, dump_ts_select);\ + } \ + return size;\ +} + +#if DMX_DEV_COUNT > 0 + DEMUX_PCR_FUNC_DECL(0) + DEMUX_SOURCE_FUNC_DECL(0) + DEMUX_FREE_FILTERS_FUNC_DECL(0) + DEMUX_FILTER_USERS_FUNC_DECL(0) + DEMUX_DEV_USERS_FUNC_DECL(0) + DVR_MODE_FUNC_DECL(0) + DEMUX_TS_HEADER_FUNC_DECL(0) + DEMUX_CHANNEL_ACTIVITY_FUNC_DECL(0) + DEMUX_RESET_FUNC_DECL(0) +#endif +#if DMX_DEV_COUNT > 1 + DEMUX_PCR_FUNC_DECL(1) + DEMUX_SOURCE_FUNC_DECL(1) + DEMUX_FREE_FILTERS_FUNC_DECL(1) + DEMUX_FILTER_USERS_FUNC_DECL(1) + DEMUX_DEV_USERS_FUNC_DECL(1) + DVR_MODE_FUNC_DECL(1) + DEMUX_TS_HEADER_FUNC_DECL(1) + DEMUX_CHANNEL_ACTIVITY_FUNC_DECL(1) + DEMUX_RESET_FUNC_DECL(1) +#endif +#if DMX_DEV_COUNT > 2 + DEMUX_PCR_FUNC_DECL(2) + DEMUX_SOURCE_FUNC_DECL(2) + DEMUX_FREE_FILTERS_FUNC_DECL(2) + DEMUX_FILTER_USERS_FUNC_DECL(2) + DEMUX_DEV_USERS_FUNC_DECL(2) + DVR_MODE_FUNC_DECL(2) + DEMUX_TS_HEADER_FUNC_DECL(2) + DEMUX_CHANNEL_ACTIVITY_FUNC_DECL(2) + DEMUX_RESET_FUNC_DECL(2) +#endif + +/*Show the async fifo source*/ +#define ASYNCFIFO_SOURCE_FUNC_DECL(i) \ +static ssize_t asyncfifo##i##_source_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_asyncfifo *afifo = &dvb->asyncfifo[i];\ + ssize_t ret = 0;\ + char *src;\ + if (dvb->async_fifo_total_count <= i)\ + return ret;\ + switch (afifo->source) {\ + CASE_PREFIX case AM_DMX_0:\ + src = "dmx0";\ + break;\ + CASE_PREFIX case AM_DMX_1:\ + src = "dmx1";\ + break; \ + CASE_PREFIX case AM_DMX_2:\ + src = "dmx2";\ + break;\ + CASE_PREFIX default :\ + src = "";\ + break;\ + } \ + ret = sprintf(buf, "%s\n", src);\ + return ret;\ +} \ +static ssize_t asyncfifo##i##_source_store(struct class *class, \ + struct class_attribute *attr, const char *buf, size_t size)\ +{\ + enum aml_dmx_id_t src = -1;\ + \ + if (aml_dvb_device.async_fifo_total_count <= i)\ + return 0;\ + if (!strncmp("dmx0", buf, 4)) {\ + src = AM_DMX_0;\ + } else if (!strncmp("dmx1", buf, 4)) {\ + src = AM_DMX_1;\ + } else if (!strncmp("dmx2", buf, 4)) {\ + src = AM_DMX_2;\ + } \ + if (src != -1) {\ + aml_asyncfifo_hw_set_source(&aml_dvb_device.asyncfifo[i], src);\ + } \ + return size;\ +} + +#if ASYNCFIFO_COUNT > 0 +ASYNCFIFO_SOURCE_FUNC_DECL(0) +#endif +#if ASYNCFIFO_COUNT > 1 + ASYNCFIFO_SOURCE_FUNC_DECL(1) +#endif + +#if ASYNCFIFO_COUNT > 2 + ASYNCFIFO_SOURCE_FUNC_DECL(2) +#endif + +/*Show the async fifo flush size*/ +#define ASYNCFIFO_FLUSHSIZE_FUNC_DECL(i) \ +static ssize_t asyncfifo##i##_flush_size_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_asyncfifo *afifo = &dvb->asyncfifo[i];\ + ssize_t ret = 0;\ + if (dvb->async_fifo_total_count <= i)\ + return ret;\ + ret = sprintf(buf, "%d\n", afifo->flush_size);\ + return ret;\ +} \ +static ssize_t asyncfifo##i##_flush_size_store(struct class *class, \ + struct class_attribute *attr, \ + const char *buf, size_t size)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_asyncfifo *afifo = &dvb->asyncfifo[i];\ + /*int fsize = simple_strtol(buf, NULL, 10);*/\ + int fsize = 0;\ + long value;\ + int ret =0;\ + if (dvb->async_fifo_total_count <= i)\ + return (size_t)0;\ + ret = kstrtol(buf, 0, &value);\ + if (ret == 0)\ + fsize = value;\ + if (fsize != afifo->flush_size) {\ + afifo->flush_size = fsize;\ + aml_asyncfifo_hw_reset(&aml_dvb_device.asyncfifo[i]);\ + } \ + return size;\ +} + +#if ASYNCFIFO_COUNT > 0 +ASYNCFIFO_FLUSHSIZE_FUNC_DECL(0) +#endif + +#if ASYNCFIFO_COUNT > 1 + ASYNCFIFO_FLUSHSIZE_FUNC_DECL(1) +#endif + +#if ASYNCFIFO_COUNT > 2 + ASYNCFIFO_FLUSHSIZE_FUNC_DECL(2) +#endif + +/*Show the async fifo secure buffer addr*/ +#define ASYNCFIFO_SECUREADDR_FUNC_DECL(i) \ +static ssize_t asyncfifo##i##_secure_addr_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_asyncfifo *afifo = &dvb->asyncfifo[i];\ + ssize_t ret = 0;\ + if (dvb->async_fifo_total_count <= i)\ + return ret;\ + ret = sprintf(buf, "0x%x\n", afifo->blk.addr);\ + return ret;\ +} \ +static ssize_t asyncfifo##i##_secure_addr_store(struct class *class, \ + struct class_attribute *attr, \ +const char *buf, size_t size)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_asyncfifo *afifo = &dvb->asyncfifo[i];\ + unsigned long value;\ + int ret=0;\ + if (dvb->async_fifo_total_count <= i)\ + return (size_t)0;\ + ret = kstrtol(buf, 0, &value);\ + if (ret == 0 && value != afifo->blk.addr) {\ + afifo->blk.addr = value;\ + aml_asyncfifo_hw_reset(&aml_dvb_device.asyncfifo[i]);\ + } \ + return size;\ +} + +#if ASYNCFIFO_COUNT > 0 + ASYNCFIFO_SECUREADDR_FUNC_DECL(0) +#endif + +#if ASYNCFIFO_COUNT > 1 + ASYNCFIFO_SECUREADDR_FUNC_DECL(1) +#endif + +#if ASYNCFIFO_COUNT > 2 + ASYNCFIFO_SECUREADDR_FUNC_DECL(2) +#endif + +/*Show the async fifo secure buffer size*/ +#define ASYNCFIFO_SECUREADDR_SIZE_FUNC_DECL(i) \ +static ssize_t asyncfifo##i##_secure_addr_size_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_asyncfifo *afifo = &dvb->asyncfifo[i];\ + ssize_t ret = 0;\ + if (dvb->async_fifo_total_count <= i)\ + return ret;\ + ret = sprintf(buf, "0x%x\n", afifo->blk.len);\ + return ret;\ +} \ +static ssize_t asyncfifo##i##_secure_addr_size_store(struct class *class, \ + struct class_attribute *attr, \ +const char *buf, size_t size)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_asyncfifo *afifo = &dvb->asyncfifo[i];\ + unsigned long value;\ + int ret=0;\ + if (dvb->async_fifo_total_count <= i)\ + return (size_t)0;\ + ret = kstrtol(buf, 0, &value);\ + if (ret == 0 && value != afifo->blk.len) {\ + afifo->blk.len = value;\ + aml_asyncfifo_hw_reset(&aml_dvb_device.asyncfifo[i]);\ + } \ + return size;\ +} + +#if ASYNCFIFO_COUNT > 0 + ASYNCFIFO_SECUREADDR_SIZE_FUNC_DECL(0) +#endif + +#if ASYNCFIFO_COUNT > 1 + ASYNCFIFO_SECUREADDR_SIZE_FUNC_DECL(1) +#endif + +#if ASYNCFIFO_COUNT > 2 + ASYNCFIFO_SECUREADDR_SIZE_FUNC_DECL(2) +#endif + + +/*Show the async fifo secure enable*/ +#define ASYNCFIFO_SECURENABLE_FUNC_DECL(i) \ +static ssize_t asyncfifo##i##_secure_enable_show(struct class *class, \ + struct class_attribute *attr, char *buf)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_asyncfifo *afifo = &dvb->asyncfifo[i];\ + ssize_t ret = 0;\ + if (dvb->async_fifo_total_count <= i)\ + return ret;\ + ret = sprintf(buf, "%d\n", afifo->secure_enable);\ + return ret;\ +} \ +static ssize_t asyncfifo##i##_secure_enable_store(struct class *class, \ + struct class_attribute *attr, \ + const char *buf, size_t size)\ +{\ + struct aml_dvb *dvb = &aml_dvb_device;\ + struct aml_asyncfifo *afifo = &dvb->asyncfifo[i];\ + int enable = 0;\ + long value;\ + int ret=0;\ + if (dvb->async_fifo_total_count <= i)\ + return (size_t)0;\ + ret = kstrtol(buf, 0, &value);\ + if (ret == 0)\ + enable = value;\ + if (enable != afifo->secure_enable) {\ + afifo->secure_enable = enable;\ + aml_asyncfifo_hw_reset(&aml_dvb_device.asyncfifo[i]);\ + } \ + return size;\ +} + +#if ASYNCFIFO_COUNT > 0 +ASYNCFIFO_SECURENABLE_FUNC_DECL(0) +#endif + +#if ASYNCFIFO_COUNT > 1 + ASYNCFIFO_SECURENABLE_FUNC_DECL(1) +#endif + +#if ASYNCFIFO_COUNT > 2 + ASYNCFIFO_SECURENABLE_FUNC_DECL(2) +#endif + +/*Reset the Demux*/ +static ssize_t demux_reset_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t size) +{ + if (!strncmp("1", buf, 1)) { + struct aml_dvb *dvb = &aml_dvb_device; + unsigned long flags; + + spin_lock_irqsave(&dvb->slock, flags); + pr_dbg("Reset demux, call dmx_reset_hw\n"); + dmx_reset_hw_ex(dvb, 0); + spin_unlock_irqrestore(&dvb->slock, flags); + } + + return size; +} + +/*Show the Video PTS value*/ +static ssize_t video_pts_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + ssize_t ret = 0; + + ret = sprintf(buf, "%u\n", aml_dmx_get_video_pts(dvb)); + + return ret; +} + +/*Show the Audio PTS value*/ +static ssize_t audio_pts_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + ssize_t ret = 0; + + ret = sprintf(buf, "%u\n", aml_dmx_get_audio_pts(dvb)); + + return ret; +} + +/*Show the Video PTS bit32 value*/ +static ssize_t video_pts_bit32_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + ssize_t ret = 0; + + ret = sprintf(buf, "%u\n", aml_dmx_get_video_pts_bit32(dvb)); + + return ret; +} + +/*Show the Audio PTS bit32 value*/ +static ssize_t audio_pts_bit32_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + ssize_t ret = 0; + + ret = sprintf(buf, "%u\n", aml_dmx_get_audio_pts_bit32(dvb)); + + return ret; +} + +/*Show the 33bit Video PTS value*/ +static ssize_t video_pts_u64_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + ssize_t ret = 0; + + u64 pts_val = aml_dmx_get_video_pts(dvb); + pts_val &= 0x00000000FFFFFFFF; + + if (aml_dmx_get_video_pts_bit32(dvb)) { + pts_val = pts_val | (1LL<<32); + } + + ret = sprintf(buf, "%llu\n", pts_val); + + return ret; +} + +/*Show the 33bit Audio PTS value*/ +static ssize_t audio_pts_u64_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + ssize_t ret = 0; + + u64 pts_val = aml_dmx_get_audio_pts(dvb); + pts_val &= 0x00000000FFFFFFFF; + + if (aml_dmx_get_audio_pts_bit32(dvb)) { + pts_val = pts_val | (1LL<<32); + } + + ret = sprintf(buf, "%llu\n", pts_val); + + return ret; +} + +/*Show the First Video PTS value*/ +static ssize_t first_video_pts_show(struct class *class, + struct class_attribute *attr, + char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + ssize_t ret = 0; + + ret = sprintf(buf, "%u\n", aml_dmx_get_first_video_pts(dvb)); + + return ret; +} + +/*Show the First Audio PTS value*/ +static ssize_t first_audio_pts_show(struct class *class, + struct class_attribute *attr, + char *buf) +{ + struct aml_dvb *dvb = &aml_dvb_device; + ssize_t ret = 0; + + ret = sprintf(buf, "%u\n", aml_dmx_get_first_audio_pts(dvb)); + + return ret; +} + +static ssize_t hw_setting_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + int r, total = 0; + int i; + struct aml_dvb *dvb = &aml_dvb_device; + int invert, ctrl; + + for (i = 0; i < dvb->ts_in_total_count; i++) { + struct aml_ts_input *ts = &dvb->ts[i]; + + if (ts->s2p_id != -1) + invert = dvb->s2p[ts->s2p_id].invert; + else + invert = 0; + + ctrl = ts->control; + + r = sprintf(buf, "ts%d %s control: 0x%x invert: 0x%x\n", i, + ts->mode == AM_TS_DISABLE ? "disable" : + (ts->mode == AM_TS_SERIAL ? "serial" : + "parallel"), ctrl, invert); + buf += r; + total += r; + } + + return total; +} + +static ssize_t hw_setting_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t count) +{ + int id, ctrl, invert, r, mode; + char mname[32]; + char pname[32]; + unsigned long flags; + struct aml_ts_input *ts; + struct aml_dvb *dvb = &aml_dvb_device; + + r = sscanf(buf, "%d %s %x %x", &id, mname, &ctrl, &invert); + if (r != 4) + return -EINVAL; + + if (id < 0 || id >= dvb->ts_in_total_count) + return -EINVAL; + + if ((mname[0] == 's') || (mname[0] == 'S')) { + sprintf(pname, "s_ts%d", id); + mode = AM_TS_SERIAL; + } else if ((mname[0] == 'p') || (mname[0] == 'P')) { + sprintf(pname, "p_ts%d", id); + mode = AM_TS_PARALLEL; + } else + mode = AM_TS_DISABLE; + + spin_lock_irqsave(&dvb->slock, flags); + + ts = &dvb->ts[id]; + + if ((mode == AM_TS_SERIAL) && (ts->mode != AM_TS_SERIAL)) { + int i; + int scnt = 0; + + for (i = 0; i < dvb->ts_in_total_count; i++) { + if (dvb->ts[i].s2p_id != -1) + scnt++; + } + + if (scnt >= dvb->s2p_total_count) + pr_error("no free s2p\n"); + else + ts->s2p_id = scnt; + } + + if ((mode != AM_TS_SERIAL) || (ts->s2p_id != -1)) { + if (ts->pinctrl) { + devm_pinctrl_put(ts->pinctrl); + ts->pinctrl = NULL; + } + + ts->pinctrl = devm_pinctrl_get_select(&dvb->pdev->dev, pname); +/* if(IS_ERR_VALUE(ts->pinctrl))*/ +/* ts->pinctrl = NULL;*/ + ts->mode = mode; + ts->control = ctrl; + + if (mode == AM_TS_SERIAL) + dvb->s2p[ts->s2p_id].invert = invert; + else + ts->s2p_id = -1; + } + + spin_unlock_irqrestore(&dvb->slock, flags); + + return count; +} + + +static CLASS_ATTR_RW(hw_setting); +static CLASS_ATTR_RW(source); +static CLASS_ATTR_RW(demux_reset_all_flag); +static CLASS_ATTR_RW(tso_source); +#define DEMUX_SOURCE_ATTR_PCR(i)\ + static CLASS_ATTR_RO(demux##i##_pcr) +#define DEMUX_SOURCE_ATTR_DECL(i)\ + static CLASS_ATTR_RW(demux##i##_source) +#define DEMUX_FREE_FILTERS_ATTR_DECL(i)\ + static CLASS_ATTR_RO(demux##i##_free_filters) +#define DEMUX_FILTER_USERS_ATTR_DECL(i)\ + static CLASS_ATTR_RW(demux##i##_filter_users) +#define DEMUX_DEV_USERS_ATTR_DECL(i)\ + static CLASS_ATTR_RO(demux##i##_dev_users) +#define DVR_MODE_ATTR_DECL(i)\ + static CLASS_ATTR_RW(dvr##i##_mode) +#define DEMUX_TS_HEADER_ATTR_DECL(i)\ + static CLASS_ATTR_RO(demux##i##_ts_header) +#define DEMUX_CHANNEL_ACTIVITY_ATTR_DECL(i)\ + static CLASS_ATTR_RO(demux##i##_channel_activity) +#define DMX_RESET_ATTR_DECL(i)\ + static CLASS_ATTR_WO(demux##i##_reset) + +#if DMX_DEV_COUNT > 0 + DEMUX_SOURCE_ATTR_PCR(0); + DEMUX_SOURCE_ATTR_DECL(0); + DEMUX_FREE_FILTERS_ATTR_DECL(0); + DEMUX_FILTER_USERS_ATTR_DECL(0); + DEMUX_DEV_USERS_ATTR_DECL(0); + DVR_MODE_ATTR_DECL(0); + DEMUX_TS_HEADER_ATTR_DECL(0); + DEMUX_CHANNEL_ACTIVITY_ATTR_DECL(0); + DMX_RESET_ATTR_DECL(0); +#endif +#if DMX_DEV_COUNT > 1 + DEMUX_SOURCE_ATTR_PCR(1); + DEMUX_SOURCE_ATTR_DECL(1); + DEMUX_FREE_FILTERS_ATTR_DECL(1); + DEMUX_FILTER_USERS_ATTR_DECL(1); + DEMUX_DEV_USERS_ATTR_DECL(1); + DVR_MODE_ATTR_DECL(1); + DEMUX_TS_HEADER_ATTR_DECL(1); + DEMUX_CHANNEL_ACTIVITY_ATTR_DECL(1); + DMX_RESET_ATTR_DECL(1); +#endif +#if DMX_DEV_COUNT > 2 + DEMUX_SOURCE_ATTR_PCR(2); + DEMUX_SOURCE_ATTR_DECL(2); + DEMUX_FREE_FILTERS_ATTR_DECL(2); + DEMUX_FILTER_USERS_ATTR_DECL(2); + DEMUX_DEV_USERS_ATTR_DECL(2); + DVR_MODE_ATTR_DECL(2); + DEMUX_TS_HEADER_ATTR_DECL(2); + DEMUX_CHANNEL_ACTIVITY_ATTR_DECL(2); + DMX_RESET_ATTR_DECL(2); +#endif + +#define ASYNCFIFO_SOURCE_ATTR_DECL(i)\ + static CLASS_ATTR_RW(asyncfifo##i##_source) +#define ASYNCFIFO_FLUSHSIZE_ATTR_DECL(i)\ + static CLASS_ATTR_RW(asyncfifo##i##_flush_size) +#define ASYNCFIFO_SECUREADDR_ATTR_DECL(i)\ + static CLASS_ATTR_RW(asyncfifo##i##_secure_addr) +#define ASYNCFIFO_SECUREADDR_SIZE_ATTR_DECL(i)\ + static CLASS_ATTR_RW(asyncfifo##i##_secure_addr_size) +#define ASYNCFIFO_SECURENABLE_ATTR_DECL(i)\ + static CLASS_ATTR_RW(asyncfifo##i##_secure_enable) + +#if ASYNCFIFO_COUNT > 0 + ASYNCFIFO_SOURCE_ATTR_DECL(0); + ASYNCFIFO_FLUSHSIZE_ATTR_DECL(0); + ASYNCFIFO_SECUREADDR_ATTR_DECL(0); + ASYNCFIFO_SECUREADDR_SIZE_ATTR_DECL(0); + ASYNCFIFO_SECURENABLE_ATTR_DECL(0); +#endif +#if ASYNCFIFO_COUNT > 1 + ASYNCFIFO_SOURCE_ATTR_DECL(1); + ASYNCFIFO_FLUSHSIZE_ATTR_DECL(1); + ASYNCFIFO_SECUREADDR_ATTR_DECL(1); + ASYNCFIFO_SECUREADDR_SIZE_ATTR_DECL(1); + ASYNCFIFO_SECURENABLE_ATTR_DECL(1); +#endif + +#if ASYNCFIFO_COUNT > 2 + ASYNCFIFO_SOURCE_ATTR_DECL(2); + ASYNCFIFO_FLUSHSIZE_ATTR_DECL(2); + ASYNCFIFO_SECUREADDR_ATTR_DECL(2); + ASYNCFIFO_SECUREADDR_SIZE_ATTR_DECL(2); + ASYNCFIFO_SECURENABLE_ATTR_DECL(2); +#endif + + static CLASS_ATTR_WO(demux_reset); + static CLASS_ATTR_RO(video_pts); + static CLASS_ATTR_RO(audio_pts); + static CLASS_ATTR_RO(video_pts_bit32); + static CLASS_ATTR_RO(audio_pts_bit32); + static CLASS_ATTR_RO(video_pts_u64); + static CLASS_ATTR_RO(audio_pts_u64); + static CLASS_ATTR_RO(first_video_pts); + static CLASS_ATTR_RO(first_audio_pts); + static CLASS_ATTR_WO(clear_av); + static CLASS_ATTR_RO(demux_state); + +#define DSC_SOURCE_ATTR_DECL(i)\ + static CLASS_ATTR_RW(dsc##i##_source) +#define DSC_FREE_ATTR_DECL(i) \ + static CLASS_ATTR_RO(dsc##i##_free_dscs) + +#if DSC_DEV_COUNT > 0 + DSC_SOURCE_ATTR_DECL(0); + DSC_FREE_ATTR_DECL(0); +#endif +#if DSC_DEV_COUNT > 1 + DSC_SOURCE_ATTR_DECL(1); + DSC_FREE_ATTR_DECL(1); +#endif + +#define STB_ATTR(name) &class_attr_##name.attr + +static struct attribute *aml_stb_class_attrs[] = { + STB_ATTR(hw_setting), + STB_ATTR(source), + STB_ATTR(demux_reset_all_flag), + STB_ATTR(tso_source), + STB_ATTR(demux_reset), + STB_ATTR(video_pts), + STB_ATTR(audio_pts), + STB_ATTR(video_pts_bit32), + STB_ATTR(audio_pts_bit32), + STB_ATTR(video_pts_u64), + STB_ATTR(audio_pts_u64), + STB_ATTR(first_video_pts), + STB_ATTR(first_audio_pts), + STB_ATTR(clear_av), + STB_ATTR(demux_state), +#define DEMUX_PCR(i) \ + STB_ATTR(demux##i##_pcr) + DEMUX_PCR(0), + DEMUX_PCR(1), + DEMUX_PCR(2), +#define DEMUX_SOURCE(i) \ + STB_ATTR(demux##i##_source) + DEMUX_SOURCE(0), + DEMUX_SOURCE(1), + DEMUX_SOURCE(2), +#define DEMUX_FREE_FILTER(i) \ + STB_ATTR(demux##i##_free_filters) + DEMUX_FREE_FILTER(0), + DEMUX_FREE_FILTER(1), + DEMUX_FREE_FILTER(2), +#define DEMUX_FILTER_USERS(i) \ + STB_ATTR(demux##i##_filter_users) + DEMUX_FILTER_USERS(0), + DEMUX_FILTER_USERS(1), + DEMUX_FILTER_USERS(2), +#define DEMUX_DEV_USERS(i) \ + STB_ATTR(demux##i##_dev_users) + DEMUX_DEV_USERS(0), + DEMUX_DEV_USERS(1), + DEMUX_DEV_USERS(2), +#define DEMUX_TS_HEADER(i) \ + STB_ATTR(demux##i##_ts_header) + DEMUX_TS_HEADER(0), + DEMUX_TS_HEADER(1), + DEMUX_TS_HEADER(2), +#define DEMUX_CHANNEL_ACTIVITY_ATTR(i) \ + STB_ATTR(demux##i##_channel_activity) + DEMUX_CHANNEL_ACTIVITY_ATTR(0), + DEMUX_CHANNEL_ACTIVITY_ATTR(1), + DEMUX_CHANNEL_ACTIVITY_ATTR(2), +#define DEMUX_RESET(i) \ + STB_ATTR(demux##i##_reset) + DEMUX_RESET(0), + DEMUX_RESET(1), + DEMUX_RESET(2), +#define DVR_MODE(i)\ + STB_ATTR(dvr##i##_mode) + DVR_MODE(0), + DVR_MODE(1), + DVR_MODE(2), +#define ASYNCFIFO_SOURCE(i) \ + STB_ATTR(asyncfifo##i##_source) +#if ASYNCFIFO_COUNT > 0 + ASYNCFIFO_SOURCE(0), +#endif +#if ASYNCFIFO_COUNT > 1 + ASYNCFIFO_SOURCE(1), +#endif +#if ASYNCFIFO_COUNT > 2 + ASYNCFIFO_SOURCE(2), +#endif +#define ASYNCFIFO_FLUSH_SIZE(i) \ + STB_ATTR(asyncfifo##i##_flush_size) +#if ASYNCFIFO_COUNT > 0 + ASYNCFIFO_FLUSH_SIZE(0), +#endif +#if ASYNCFIFO_COUNT > 1 + ASYNCFIFO_FLUSH_SIZE(1), +#endif +#if ASYNCFIFO_COUNT > 2 + ASYNCFIFO_FLUSH_SIZE(2), +#endif + +#define ASYNCFIFO_SECURE_ADDR(i) \ + STB_ATTR(asyncfifo##i##_secure_addr) +#if ASYNCFIFO_COUNT > 0 + ASYNCFIFO_SECURE_ADDR(0), +#endif +#if ASYNCFIFO_COUNT > 1 + ASYNCFIFO_SECURE_ADDR(1), +#endif +#if ASYNCFIFO_COUNT > 2 + ASYNCFIFO_SECURE_ADDR(2), +#endif + +#define ASYNCFIFO_SECURE_ADDR_SIZE(i) \ + STB_ATTR(asyncfifo##i##_secure_addr_size) +#if ASYNCFIFO_COUNT > 0 + ASYNCFIFO_SECURE_ADDR_SIZE(0), +#endif +#if ASYNCFIFO_COUNT > 1 + ASYNCFIFO_SECURE_ADDR_SIZE(1), +#endif +#if ASYNCFIFO_COUNT > 2 + ASYNCFIFO_SECURE_ADDR_SIZE(2), +#endif + +#define ASYNCFIFO_SECURE_ENABLE(i)\ + STB_ATTR(asyncfifo##i##_secure_enable) +#if ASYNCFIFO_COUNT > 0 + ASYNCFIFO_SECURE_ENABLE(0), +#endif +#if ASYNCFIFO_COUNT > 1 + ASYNCFIFO_SECURE_ENABLE(1), +#endif +#if ASYNCFIFO_COUNT > 2 + ASYNCFIFO_SECURE_ENABLE(2), +#endif +#define DSC_SOURCE(i) \ + STB_ATTR(dsc##i##_source) +#if DSC_DEV_COUNT > 0 + DSC_SOURCE(0), +#endif +#if DSC_DEV_COUNT > 1 + DSC_SOURCE(1), +#endif +#define DSC_FREE_DSCS(i) \ + STB_ATTR(dsc##i##_free_dscs) +#if DSC_DEV_COUNT > 0 + DSC_FREE_DSCS(0), +#endif +#if DSC_DEV_COUNT > 1 + DSC_FREE_DSCS(1), +#endif + NULL +}; + + +ATTRIBUTE_GROUPS(aml_stb_class); + +static struct class aml_stb_class = { + .name = "stb", + .class_groups = aml_stb_class_groups, +}; + +/* + *extern int aml_regist_dmx_class(void); + *extern int aml_unregist_dmx_class(void); + */ +/* + *void afifo_reset(int v) + *{ + * if (v) + * reset_control_assert(aml_dvb_afifo_reset_ctl); + * else + * reset_control_deassert(aml_dvb_afifo_reset_ctl); + *} + */ + +static int aml_dvb_probe(struct platform_device *pdev) +{ + struct aml_dvb *advb; + int i, ret = 0; + struct devio_aml_platform_data *pd_dvb; + + pr_inf("probe amlogic dvb driver [%s]\n", DVB_VERSION); + + if (get_cpu_type() < MESON_CPU_MAJOR_ID_G12A) + { + aml_dvb_demux_clk = + devm_clk_get(&pdev->dev, "demux"); + if (IS_ERR_OR_NULL(aml_dvb_demux_clk)) { + dev_err(&pdev->dev, "get demux clk fail\n"); + return -1; + } + clk_prepare_enable(aml_dvb_demux_clk); + + aml_dvb_afifo_clk = + devm_clk_get(&pdev->dev, "asyncfifo"); + if (IS_ERR_OR_NULL(aml_dvb_afifo_clk)) { + dev_err(&pdev->dev, "get asyncfifo clk fail\n"); + return -1; + } + clk_prepare_enable(aml_dvb_afifo_clk); + + aml_dvb_ahbarb0_clk = + devm_clk_get(&pdev->dev, "ahbarb0"); + if (IS_ERR_OR_NULL(aml_dvb_ahbarb0_clk)) { + dev_err(&pdev->dev, "get ahbarb0 clk fail\n"); + return -1; + } + clk_prepare_enable(aml_dvb_ahbarb0_clk); + + aml_dvb_uparsertop_clk = + devm_clk_get(&pdev->dev, "uparsertop"); + if (IS_ERR_OR_NULL(aml_dvb_uparsertop_clk)) { + dev_err(&pdev->dev, "get uparsertop clk fail\n"); + return -1; + } + clk_prepare_enable(aml_dvb_uparsertop_clk); + } + else + { + amports_switch_gate("demux", 1); + amports_switch_gate("ahbarb0", 1); + amports_switch_gate("parser_top", 1); + if (get_cpu_type() == MESON_CPU_MAJOR_ID_TL1) + { + aml_dvb_afifo_clk = + devm_clk_get(&pdev->dev, "asyncfifo"); + if (IS_ERR_OR_NULL(aml_dvb_afifo_clk)) + dev_err(&pdev->dev, "get asyncfifo clk fail\n"); + else + clk_prepare_enable(aml_dvb_afifo_clk); + } + } + + advb = &aml_dvb_device; + memset(advb, 0, sizeof(aml_dvb_device)); + + spin_lock_init(&advb->slock); + + advb->dev = &pdev->dev; + advb->pdev = pdev; + advb->stb_source = -1; + advb->tso_source = -1; + + if (get_cpu_type() < MESON_CPU_MAJOR_ID_TL1) { + advb->ts_in_total_count = 3; + advb->s2p_total_count = 2; + advb->async_fifo_total_count = 2; + } else { + advb->ts_in_total_count = 4; + advb->s2p_total_count = 3; + advb->async_fifo_total_count = 3; + } + + for (i = 0; i < DMX_DEV_COUNT; i++) { + advb->dmx[i].dmx_irq = -1; + advb->dmx[i].dvr_irq = -1; + } + +#ifdef CONFIG_OF + if (pdev->dev.of_node) { + int s2p_id = 0; + char buf[32]; + const char *str; + u32 value; + + for (i = 0; i < advb->ts_in_total_count; i++) { + + advb->ts[i].mode = AM_TS_DISABLE; + advb->ts[i].s2p_id = -1; + advb->ts[i].pinctrl = NULL; + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "ts%d", i); + ret = + of_property_read_string(pdev->dev.of_node, buf, + &str); + if (!ret) { + if (!strcmp(str, "serial")) { + pr_inf("%s: serial\n", buf); + + if (s2p_id >= advb->s2p_total_count) + pr_error("no free s2p\n"); + else { + snprintf(buf, sizeof(buf), + "s_ts%d", i); + advb->ts[i].mode = AM_TS_SERIAL; + advb->ts[i].pinctrl = + devm_pinctrl_get_select + (&pdev->dev, buf); + advb->ts[i].s2p_id = s2p_id; + + s2p_id++; + } + } else if (!strcmp(str, "parallel")) { + pr_inf("%s: parallel\n", buf); + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "p_ts%d", i); + advb->ts[i].mode = AM_TS_PARALLEL; + advb->ts[i].pinctrl = + devm_pinctrl_get_select(&pdev->dev, + buf); + } else { + advb->ts[i].mode = AM_TS_DISABLE; + advb->ts[i].pinctrl = NULL; + } + + /* if(IS_ERR_VALUE(advb->ts[i].pinctrl)) */ + /* advb->ts[i].pinctrl = NULL; */ + } + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "ts%d_control", i); + ret = + of_property_read_u32(pdev->dev.of_node, buf, + &value); + if (!ret) { + pr_inf("%s: 0x%x\n", buf, value); + advb->ts[i].control = value; + } else { + pr_inf("read error:%s: 0x%x\n", buf, value); + } + + if (advb->ts[i].s2p_id != -1) { + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "ts%d_invert", i); + ret = + of_property_read_u32(pdev->dev.of_node, buf, + &value); + if (!ret) { + pr_inf("%s: 0x%x\n", buf, value); + advb->s2p[advb->ts[i].s2p_id].invert = + value; + } + } + } + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "ts_out_invert"); + ret = + of_property_read_u32(pdev->dev.of_node, buf, + &value); + if (!ret) { + pr_inf("%s: 0x%x\n", buf, value); + advb->ts_out_invert = value; + } + memset(buf, 0, 32); + snprintf(buf, sizeof(buf), "tsin_deglitch"); + ret = + of_property_read_u32(pdev->dev.of_node, buf, + &value); + if (!ret) { + pr_inf("%s: 0x%x\n", buf, value); + if (value) { + pr_inf("TSINB_DEGLITCH0 is set %s: 0x%x\n", buf, value); + dmx_phyreg_access(TSINB_DEGLITCH0, 0x40, NULL); + dmx_phyreg_access(TSINB_DEGLITCH1, 0x1, NULL); + } + } + } +#endif + + pd_dvb = (struct devio_aml_platform_data *)advb->dev->platform_data; + + ret = + dvb_register_adapter(&advb->dvb_adapter, CARD_NAME, THIS_MODULE, + advb->dev, adapter_nr); + if (ret < 0) + return ret; + + for (i = 0; i < DMX_DEV_COUNT; i++) + advb->dmx[i].id = -1; + + for (i = 0; i<DSC_DEV_COUNT; i++) + advb->dsc[i].id = -1; + + for (i = 0; i < advb->async_fifo_total_count; i++) + advb->asyncfifo[i].id = -1; + + advb->dvb_adapter.priv = advb; + dev_set_drvdata(advb->dev, advb); + + for (i = 0; i < DSC_DEV_COUNT; i++) { + ret = aml_dvb_dsc_init(advb, &advb->dsc[i], i); + if (ret < 0) + goto error; + } + + for (i = 0; i < DMX_DEV_COUNT; i++) { + ret = aml_dvb_dmx_init(advb, &advb->dmx[i], i); + if (ret < 0) + goto error; + } + + /*Init the async fifos */ + for (i = 0; i < advb->async_fifo_total_count; i++) { + ret = aml_dvb_asyncfifo_init(advb, &advb->asyncfifo[i], i); + if (ret < 0) + goto error; + } + + aml_regist_dmx_class(); + + if (class_register(&aml_stb_class) < 0) { + pr_error("dvb register class error\n"); + goto error; + } + + aml_register_parser_mconfig(); +#ifdef ENABLE_DEMUX_DRIVER + tsdemux_set_ops(&aml_tsdemux_ops); +#else + tsdemux_set_ops(NULL); +#endif + +#if (defined CONFIG_AMLOGIC_DVB_EXTERN ||\ + defined CONFIG_AMLOGIC_DVB_EXTERN_MODULE) + ret = dvb_extern_register_frontend(&advb->dvb_adapter); + if (ret) { + pr_error("aml register dvb frontend failed\n"); + goto error; + } +#endif + + return 0; + +error: + for (i = 0; i < advb->async_fifo_total_count; i++) { + if (advb->asyncfifo[i].id != -1) + aml_dvb_asyncfifo_release(advb, &advb->asyncfifo[i]); + } + + for (i = 0; i < DMX_DEV_COUNT; i++) { + if (advb->dmx[i].id != -1) + aml_dvb_dmx_release(advb, &advb->dmx[i]); + } + + for (i = 0; i < DSC_DEV_COUNT; i++) { + if (advb->dsc[i].id != -1) + aml_dvb_dsc_release(advb, &advb->dsc[i]); + } + + dvb_unregister_adapter(&advb->dvb_adapter); + + return ret; +} + +static int aml_dvb_remove(struct platform_device *pdev) +{ + struct aml_dvb *advb = (struct aml_dvb *)dev_get_drvdata(&pdev->dev); + int i; + + pr_inf("[dmx_kpi] %s Enter.\n", __func__); + +#if (defined CONFIG_AMLOGIC_DVB_EXTERN ||\ + defined CONFIG_AMLOGIC_DVB_EXTERN_MODULE) + dvb_extern_unregister_frontend(); +#endif + + tsdemux_set_ops(NULL); + + aml_unregist_dmx_class(); + class_unregister(&aml_stb_class); + + for (i = 0; i < advb->async_fifo_total_count; i++) { + if (advb->asyncfifo[i].id != -1) + aml_dvb_asyncfifo_release(advb, &advb->asyncfifo[i]); + } + + for (i = 0; i < DMX_DEV_COUNT; i++) { + pr_error("remove demx %d, id is %d\n",i,advb->dmx[i].id); + if (advb->dmx[i].id != -1) + aml_dvb_dmx_release(advb, &advb->dmx[i]); + } + + for (i = 0; i < DSC_DEV_COUNT; i++) { + if (advb->dsc[i].id != -1) + aml_dvb_dsc_release(advb, &advb->dsc[i]); + } + dvb_unregister_adapter(&advb->dvb_adapter); + + for (i = 0; i < advb->ts_in_total_count; i++) { + if (advb->ts[i].pinctrl && !IS_ERR_VALUE(advb->ts[i].pinctrl)) + devm_pinctrl_put(advb->ts[i].pinctrl); + } + + /*switch_mod_gate_by_name("demux", 0); */ +#if 0 + reset_control_assert(aml_dvb_uparsertop_reset_ctl); + reset_control_assert(aml_dvb_ahbarb0_reset_ctl); + reset_control_assert(aml_dvb_afifo_reset_ctl); + reset_control_assert(aml_dvb_demux_reset_ctl); +#else +#if 1 + if (get_cpu_type() < MESON_CPU_MAJOR_ID_G12A) + { + clk_disable_unprepare(aml_dvb_uparsertop_clk); + clk_disable_unprepare(aml_dvb_ahbarb0_clk); + clk_disable_unprepare(aml_dvb_afifo_clk); + clk_disable_unprepare(aml_dvb_demux_clk); + } + else + { + amports_switch_gate("demux", 0); + amports_switch_gate("ahbarb0", 0); + amports_switch_gate("parser_top", 0); + + if (!IS_ERR_OR_NULL(aml_dvb_afifo_clk)) { + clk_disable_unprepare(aml_dvb_afifo_clk); + } + } +#endif +#endif + + pr_inf("[dmx_kpi] %s Exit.\n", __func__); + return 0; +} + +static int aml_dvb_suspend(struct platform_device *dev, pm_message_t state) +{ + return 0; +} + +static int aml_dvb_resume(struct platform_device *dev) +{ + struct aml_dvb *dvb = &aml_dvb_device; + int i; + + for (i = 0; i < DMX_DEV_COUNT; i++) + dmx_reset_dmx_id_hw_ex(dvb, i, 0); + + pr_inf("dvb resume\n"); + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id aml_dvb_dt_match[] = { + { + .compatible = "amlogic, dvb-demux", + }, + {}, +}; +#endif /*CONFIG_OF */ + +static struct platform_driver aml_dvb_driver = { + .probe = aml_dvb_probe, + .remove = aml_dvb_remove, + .suspend = aml_dvb_suspend, + .resume = aml_dvb_resume, + .driver = { + .name = "amlogic-dvb-demux", + .owner = THIS_MODULE, +#ifdef CONFIG_OF + .of_match_table = aml_dvb_dt_match, +#endif + } +}; + +static int __init aml_dvb_init(void) +{ + return platform_driver_register(&aml_dvb_driver); +} + +static void __exit aml_dvb_exit(void) +{ + pr_inf("aml dvb exit\n"); + platform_driver_unregister(&aml_dvb_driver); +} + +/*Get the STB source demux*/ +static struct aml_dmx *get_stb_dmx(void) +{ + struct aml_dvb *dvb = &aml_dvb_device; + struct aml_dmx *dmx = NULL; + int i; + + switch (dvb->stb_source) { + case AM_TS_SRC_DMX0: + dmx = &dvb->dmx[0]; + break; + case AM_TS_SRC_DMX1: + dmx = &dvb->dmx[1]; + break; + case AM_TS_SRC_DMX2: + dmx = &dvb->dmx[2]; + break; + default: + for (i = 0; i < DMX_DEV_COUNT; i++) { + dmx = &dvb->dmx[i]; + if (dmx->source == dvb->stb_source) + return dmx; + } + break; + } + + return dmx; +} + +static int aml_tsdemux_reset(void) +{ + struct aml_dvb *dvb = &aml_dvb_device; + unsigned long flags; + pr_inf("[dmx_kpi] %s Enter\n", __func__); + + spin_lock_irqsave(&dvb->slock, flags); + if (dvb->reset_flag) { + struct aml_dmx *dmx = get_stb_dmx(); + + dvb->reset_flag = 0; + if (dmx) { + if (dmx_reset_all_flag) + dmx_reset_hw_ex(dvb, 0); + else + dmx_reset_dmx_hw_ex_unlock(dvb, dmx, 0); + } + } + spin_unlock_irqrestore(&dvb->slock, flags); + pr_inf("[dmx_kpi] %s Exit\n", __func__); + return 0; +} + +static int aml_tsdemux_set_reset_flag(void) +{ + struct aml_dvb *dvb = &aml_dvb_device; + unsigned long flags; + + spin_lock_irqsave(&dvb->slock, flags); + dvb->reset_flag = 1; + spin_unlock_irqrestore(&dvb->slock, flags); + + return 0; + +} + +/*Add the amstream irq handler*/ +static int aml_tsdemux_request_irq(irq_handler_t handler, void *data) +{ + struct aml_dvb *dvb = &aml_dvb_device; + struct aml_dmx *dmx; + unsigned long flags; + + spin_lock_irqsave(&dvb->slock, flags); + + dmx = get_stb_dmx(); + if (dmx) { + dmx->irq_handler = handler; + dmx->irq_data = data; + } + + spin_unlock_irqrestore(&dvb->slock, flags); + + return 0; +} + +/*Free the amstream irq handler*/ +static int aml_tsdemux_free_irq(void) +{ + struct aml_dvb *dvb = &aml_dvb_device; + struct aml_dmx *dmx; + unsigned long flags; + + spin_lock_irqsave(&dvb->slock, flags); + + dmx = get_stb_dmx(); + if (dmx) { + dmx->irq_handler = NULL; + dmx->irq_data = NULL; + } + + spin_unlock_irqrestore(&dvb->slock, flags); + + return 0; +} + +/*Reset the video PID*/ +static int aml_tsdemux_set_vid(int vpid) +{ + struct aml_dvb *dvb = &aml_dvb_device; + struct aml_dmx *dmx; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&dvb->slock, flags); + dmx = get_stb_dmx(); + if (dmx) { + if (dmx->vid_chan != -1) { + dmx_free_chan(dmx, dmx->vid_chan); + dmx->vid_chan = -1; + } + + if ((vpid >= 0) && (vpid < 0x1FFF)) { + dmx->vid_chan = + dmx_alloc_chan(dmx, DMX_TYPE_TS, + DMX_PES_VIDEO, vpid); + if (dmx->vid_chan == -1) + ret = -1; + } + } + + spin_unlock_irqrestore(&dvb->slock, flags); + + return ret; +} + +/*Reset the audio PID*/ +static int aml_tsdemux_set_aid(int apid) +{ + struct aml_dvb *dvb = &aml_dvb_device; + struct aml_dmx *dmx; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&dvb->slock, flags); + dmx = get_stb_dmx(); + if (dmx) { + if (dmx->aud_chan != -1) { + dmx_free_chan(dmx, dmx->aud_chan); + dmx->aud_chan = -1; + } + + if ((apid >= 0) && (apid < 0x1FFF)) { + dmx->aud_chan = + dmx_alloc_chan(dmx, DMX_TYPE_TS, + DMX_PES_AUDIO, apid); + if (dmx->aud_chan == -1) + ret = -1; + } + } + + spin_unlock_irqrestore(&dvb->slock, flags); + + return ret; +} + +/*Reset the subtitle PID*/ +static int aml_tsdemux_set_sid(int spid) +{ + struct aml_dvb *dvb = &aml_dvb_device; + struct aml_dmx *dmx; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&dvb->slock, flags); + + dmx = get_stb_dmx(); + if (dmx) { + if (dmx->sub_chan != -1) { + dmx_free_chan(dmx, dmx->sub_chan); + dmx->sub_chan = -1; + } + + if ((spid >= 0) && (spid < 0x1FFF)) { + dmx->sub_chan = 3; + dmx->sub_chan = + dmx_alloc_chan(dmx, DMX_TYPE_TS, + DMX_PES_SUBTITLE, spid); + if (dmx->sub_chan == -1) + ret = -1; + } + } + + spin_unlock_irqrestore(&dvb->slock, flags); + + return ret; +} + +static int aml_tsdemux_set_pcrid(int pcrpid) +{ + struct aml_dvb *dvb = &aml_dvb_device; + struct aml_dmx *dmx; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&dvb->slock, flags); + + dmx = get_stb_dmx(); + if (dmx) { + if (dmx->pcr_chan != -1) { + dmx_free_chan(dmx, dmx->pcr_chan); + dmx->pcr_chan = -1; + } + + if ((pcrpid >= 0) && (pcrpid < 0x1FFF)) { + dmx->pcr_chan = + dmx_alloc_chan(dmx, DMX_TYPE_TS, + DMX_PES_PCR, pcrpid); + if (dmx->pcr_chan == -1) + ret = -1; + } + } + + spin_unlock_irqrestore(&dvb->slock, flags); + + return ret; +} + +static int aml_tsdemux_set_skipbyte(int skipbyte) +{ + struct aml_dvb *dvb = &aml_dvb_device; + unsigned long flags; + + spin_lock_irqsave(&dvb->slock, flags); + aml_dmx_set_skipbyte(dvb, skipbyte); + spin_unlock_irqrestore(&dvb->slock, flags); + + return 0; +} + +static int aml_tsdemux_set_demux(int id) +{ + struct aml_dvb *dvb = &aml_dvb_device; + + aml_dmx_set_demux(dvb, id); + return 0; +} + +static unsigned long aml_tsdemux_hwdmx_spin_lock(unsigned long flags) +{ + struct aml_dvb *dvb = &aml_dvb_device; + + spin_lock_irqsave(&dvb->slock, flags); + return flags; +} + +static int aml_tsdemux_hwdmx_spin_unlock(unsigned long flags) +{ + struct aml_dvb *dvb = &aml_dvb_device; + + spin_unlock_irqrestore(&dvb->slock, flags); + return 0; +} + +module_init(aml_dvb_init); +module_exit(aml_dvb_exit); + +MODULE_DESCRIPTION("driver for the AMLogic DVB card"); +MODULE_AUTHOR("AMLOGIC"); +MODULE_LICENSE("GPL");
diff --git a/drivers/stream_input/parser/hw_demux/aml_dvb.h b/drivers/stream_input/parser/hw_demux/aml_dvb.h new file mode 100644 index 0000000..7bfce38 --- /dev/null +++ b/drivers/stream_input/parser/hw_demux/aml_dvb.h
@@ -0,0 +1,404 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +#ifndef _AML_DVB_H_ +#define _AML_DVB_H_ + +#include <linux/interrupt.h> +#include <linux/socket.h> +#include <linux/netdevice.h> +#include <linux/i2c.h> + +#include <linux/dvb/video.h> +#include <linux/dvb/audio.h> +#include <linux/dvb/dmx.h> +#include <linux/dvb/ca.h> +#include <linux/dvb/osd.h> +#include <linux/dvb/net.h> +#include <linux/dvb/frontend.h> + +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> + +#ifdef CONFIG_HAS_EARLYSUSPEND +#include <linux/earlysuspend.h> +#endif + + +#include <dvbdev.h> +#include <demux.h> +#include <dvb_demux.h> +#include <dmxdev.h> +#include <dvb_filter.h> +#include <dvb_net.h> +#include <dvb_ringbuffer.h> + +#include <linux/of.h> +#include <linux/pinctrl/consumer.h> + +#include "aml_demod_gt.h" + +#define TS_IN_COUNT 4 +#define S2P_COUNT 3 +#define ASYNCFIFO_COUNT 3 +#if 0 +#define TS_IN_COUNT 3 +#define S2P_COUNT 2 +#define ASYNCFIFO_COUNT 2 +#endif + +#define DMX_DEV_COUNT 3 +#define FE_DEV_COUNT 2 +#define CHANNEL_COUNT 31 +#define FILTER_COUNT 31 +#define FILTER_LEN 15 +#define DSC_DEV_COUNT 2 +#define DSC_COUNT 8 +#define SEC_BUF_GRP_COUNT 4 +#define SEC_BUF_BUSY_SIZE 4 +#define SEC_BUF_COUNT (SEC_BUF_GRP_COUNT*8) + + +struct aml_sec_buf { + unsigned long addr; + int len; +}; + +struct aml_channel { + int type; + enum dmx_ts_pes pes_type; + int pid; + int used; + int filter_count; + struct dvb_demux_feed *feed; + struct dvb_demux_feed *dvr_feed; + int pkt_type; +}; + +struct aml_filter { + int chan_id; + int used; + struct dmx_section_filter *filter; + u8 value[FILTER_LEN]; + u8 maskandmode[FILTER_LEN]; + u8 maskandnotmode[FILTER_LEN]; + u8 neq; +}; + +#define DVBCSA_MODE 0 +#define CIPLUS_MODE 1 +#define CBC_MODE 0 +#define ECB_MODE 1 +#define IDSA_MODE 2 + +#define DSC_SET_EVEN 1 +#define DSC_SET_ODD 2 +#define DSC_SET_AES_EVEN 4 +#define DSC_SET_AES_ODD 8 +#define DSC_FROM_KL 16 +#define DSC_SET_SM4_EVEN 32 +#define DSC_SET_SM4_ODD 64 + +#define DSC_KEY_SIZE_MAX 16 + +struct aml_dsc_channel { + int pid; + u8 even[DSC_KEY_SIZE_MAX]; + u8 odd[DSC_KEY_SIZE_MAX]; + u8 even_iv[DSC_KEY_SIZE_MAX]; + u8 odd_iv[DSC_KEY_SIZE_MAX]; + int used; + int set; + int id; + struct aml_dsc *dsc; + int work_mode; + int mode; +}; + +struct aml_dsc { + struct dvb_device *dev; + struct aml_dsc_channel channel[DSC_COUNT]; + enum aml_ts_source_t source; + enum aml_ts_source_t dst; + struct aml_dvb *dvb; + int id; + int work_mode; +}; + +struct aml_smallsec { + struct aml_dmx *dmx; + + int enable; + int bufsize; +#define SS_BUFSIZE_DEF (16*4*256) /*16KB*/ + long buf; + long buf_map; +}; + +struct aml_dmxtimeout { + struct aml_dmx *dmx; + + int enable; + + int timeout; +#define DTO_TIMEOUT_DEF (9000) /*0.5s*/ + u32 ch_disable; +#define DTO_CHDIS_VAS (0xfffffff8) /*v/a/s only*/ + int match; + + int trigger; +}; + +struct aml_dmx { + struct dvb_demux demux; + struct dmxdev dmxdev; + int id; + int feed_count; + int chan_count; + enum aml_ts_source_t source; + int init; + int record; + struct dmx_frontend hw_fe[DMX_DEV_COUNT]; + struct dmx_frontend mem_fe; + struct dvb_net dvb_net; + int dmx_irq; + int dvr_irq; + struct tasklet_struct dmx_tasklet; + struct tasklet_struct dvr_tasklet; + unsigned long sec_pages; + unsigned long sec_pages_map; + int sec_total_len; + struct aml_sec_buf sec_buf[SEC_BUF_COUNT]; + unsigned long pes_pages; + unsigned long pes_pages_map; + int pes_buf_len; + union { + unsigned long sub_pages; + unsigned long sub_buf_base; + }; + union { + unsigned long sub_pages_map; + u8 *sub_buf_base_virt; + }; + int sub_buf_len; + + struct aml_channel channel[CHANNEL_COUNT+1]; + struct aml_filter filter[FILTER_COUNT+1]; + irq_handler_t irq_handler; + void *irq_data; + int aud_chan; + int vid_chan; + int sub_chan; + int pcr_chan; + u32 section_busy[SEC_BUF_BUSY_SIZE]; + struct dvb_frontend *fe; + int int_check_count; + u32 int_check_time; + int in_tune; + int error_check; + int dump_ts_select; + int sec_buf_watchdog_count[SEC_BUF_COUNT]; + + struct aml_smallsec smallsec; + struct aml_dmxtimeout timeout; + + int demux_filter_user; + + unsigned long sec_cnt[3]; + unsigned long sec_cnt_match[3]; + unsigned long sec_cnt_crc_fail[3]; + #define SEC_CNT_HW (0) + #define SEC_CNT_SW (1) + #define SEC_CNT_SS (2) + #define SEC_CNT_MAX (3) + + int crc_check_count; + u32 crc_check_time; + int om_status_error_count; +}; + +struct aml_dvr_block { + u32 addr; + u32 len; +}; + +struct aml_asyncfifo { + int id; + int init; + int asyncfifo_irq; + enum aml_dmx_id_t source; + unsigned long pages; + unsigned long pages_map; + int buf_len; + int buf_toggle; + int buf_read; + int flush_size; + int secure_enable; + struct tasklet_struct asyncfifo_tasklet; + struct aml_dvb *dvb; + struct aml_dvr_block blk; + unsigned long stored_pages; +}; + +enum{ + AM_TS_DISABLE, + AM_TS_PARALLEL, + AM_TS_SERIAL +}; + +struct aml_ts_input { + int mode; + struct pinctrl *pinctrl; + int control; + int s2p_id; +}; + +struct aml_s2p { + int invert; +}; + +struct aml_swfilter { + int user; + struct aml_dmx *dmx; + struct aml_asyncfifo *afifo; + + struct dvb_ringbuffer rbuf; +#define SF_BUFFER_SIZE (10*188*1024) + + u8 wrapbuf[188]; + int track_dmx; +}; + +struct aml_dvb { + struct dvb_device dvb_dev; + int ts_in_total_count; + struct aml_ts_input ts[TS_IN_COUNT]; + int s2p_total_count; + struct aml_s2p s2p[S2P_COUNT]; + struct aml_dmx dmx[DMX_DEV_COUNT]; + struct aml_dsc dsc[DSC_DEV_COUNT]; + int async_fifo_total_count; + struct aml_asyncfifo asyncfifo[ASYNCFIFO_COUNT]; + struct dvb_adapter dvb_adapter; + struct device *dev; + struct platform_device *pdev; + enum aml_ts_source_t stb_source; + enum aml_ts_source_t tso_source; + int dmx_init; + int reset_flag; + spinlock_t slock; + struct timer_list watchdog_timer; + int dmx_watchdog_disable[DMX_DEV_COUNT]; + struct aml_swfilter swfilter; + int ts_out_invert; + + /*bufs for dmx shared*/ + unsigned long pes_pages; + unsigned long pes_pages_map; + int pes_buf_len; + unsigned long sub_pages; + unsigned long sub_pages_map; + int sub_buf_len; +}; + + +/*AMLogic demux interface*/ +extern int aml_dmx_hw_init(struct aml_dmx *dmx); +extern int aml_dmx_hw_deinit(struct aml_dmx *dmx); +extern int aml_dmx_hw_start_feed(struct dvb_demux_feed *dvbdmxfeed); +extern int aml_dmx_hw_stop_feed(struct dvb_demux_feed *dvbdmxfeed); +extern int aml_dmx_hw_set_source(struct dmx_demux *demux, + dmx_source_t src); +extern int aml_stb_hw_set_source(struct aml_dvb *dvb, dmx_source_t src); +extern int aml_dsc_hw_set_source(struct aml_dsc *dsc, + dmx_source_t src, dmx_source_t dst); +extern int aml_tso_hw_set_source(struct aml_dvb *dvb, dmx_source_t src); +extern int aml_dmx_set_skipbyte(struct aml_dvb *dvb, int skipbyte); +extern int aml_dmx_set_demux(struct aml_dvb *dvb, int id); +extern int aml_dmx_hw_set_dump_ts_select + (struct dmx_demux *demux, int dump_ts_select); + +extern int dmx_alloc_chan(struct aml_dmx *dmx, int type, + int pes_type, int pid); +extern void dmx_free_chan(struct aml_dmx *dmx, int cid); + +extern int dmx_get_ts_serial(enum aml_ts_source_t src); + +extern int dmx_get_sub_buffer(unsigned long *base, unsigned long *virt); +extern int dmx_init_sub_buffer(struct aml_dmx *dmx, unsigned long base, unsigned long virt); + +/*AMLogic dsc interface*/ +extern int dsc_set_pid(struct aml_dsc_channel *ch, int pid); +extern int dsc_set_key(struct aml_dsc_channel *ch, int flags, + enum ca_cw_type type, u8 *key); +extern void dsc_release(void); +extern int aml_ciplus_hw_set_source(int src); + +/*AMLogic ASYNC FIFO interface*/ +extern int aml_asyncfifo_hw_init(struct aml_asyncfifo *afifo); +extern int aml_asyncfifo_hw_deinit(struct aml_asyncfifo *afifo); +extern int aml_asyncfifo_hw_set_source(struct aml_asyncfifo *afifo, + enum aml_dmx_id_t src); +extern int aml_asyncfifo_hw_reset(struct aml_asyncfifo *afifo); + +/*Get the Audio & Video PTS*/ +extern u32 aml_dmx_get_video_pts(struct aml_dvb *dvb); +extern u32 aml_dmx_get_audio_pts(struct aml_dvb *dvb); +extern u32 aml_dmx_get_video_pts_bit32(struct aml_dvb *dvb); +extern u32 aml_dmx_get_audio_pts_bit32(struct aml_dvb *dvb); +extern u32 aml_dmx_get_first_video_pts(struct aml_dvb *dvb); +extern u32 aml_dmx_get_first_audio_pts(struct aml_dvb *dvb); + +/*Get the DVB device*/ +extern struct aml_dvb *aml_get_dvb_device(void); + +extern int aml_regist_dmx_class(void); +extern int aml_unregist_dmx_class(void); +extern void aml_register_parser_mconfig(void); + +extern int dmx_phyreg_access(unsigned int reg, unsigned int writeval, + unsigned int *readval); + +struct devio_aml_platform_data { + int (*io_setup)(void *); + int (*io_cleanup)(void *); + int (*io_power)(void *, int enable); + int (*io_reset)(void *, int enable); +}; + +void get_aml_dvb(struct aml_dvb *dvb_device); + +/*Reset the demux device*/ +void dmx_reset_hw(struct aml_dvb *dvb); +void dmx_reset_hw_ex(struct aml_dvb *dvb, int reset_irq); + +/*Reset the individual demux*/ +void dmx_reset_dmx_hw(struct aml_dvb *dvb, int id); +void dmx_reset_dmx_id_hw_ex(struct aml_dvb *dvb, int id, int reset_irq); +void dmx_reset_dmx_id_hw_ex_unlock(struct aml_dvb *dvb, int id, int reset_irq); +void dmx_reset_dmx_hw_ex(struct aml_dvb *dvb, + struct aml_dmx *dmx, + int reset_irq); +void dmx_reset_dmx_hw_ex_unlock(struct aml_dvb *dvb, + struct aml_dmx *dmx, + int reset_irq); + +#endif +
diff --git a/drivers/stream_input/parser/hw_demux/aml_dvb_reg.h b/drivers/stream_input/parser/hw_demux/aml_dvb_reg.h new file mode 100644 index 0000000..ac47688 --- /dev/null +++ b/drivers/stream_input/parser/hw_demux/aml_dvb_reg.h
@@ -0,0 +1,58 @@ +/* + * drivers/amlogic/dvb_tv/dvb_reg.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _DVB_REG_H_ +#define _DVB_REG_H_ + +//#include <linux/amlogic/iomap.h> +#include <linux/amlogic/media/registers/register_map.h> +//#include <linux/amlogic/media/registers/cpu_version.h> + +#define ID_STB_CBUS_BASE 0 +#define ID_SMARTCARD_REG_BASE 1 +#define ID_ASYNC_FIFO_REG_BASE 2 +#define ID_ASYNC_FIFO1_REG_BASE 3 +#define ID_ASYNC_FIFO2_REG_BASE 4 +#define ID_RESET_BASE 5 +#define ID_PARSER_SUB_START_PTR_BASE 6 + +long aml_stb_get_base(int id); +#include "c_stb_define.h" +#include "c_stb_regs_define.h" + +#define WRITE_MPEG_REG(_r, _v) aml_write_cbus(_r, _v) +#define READ_MPEG_REG(_r) aml_read_cbus(_r) + +#define WRITE_CBUS_REG(_r, _v) aml_write_cbus(_r, _v) +#define READ_CBUS_REG(_r) aml_read_cbus(_r) + +#define WRITE_VCBUS_REG(_r, _v) aml_write_vcbus(_r, _v) +#define READ_VCBUS_REG(_r) aml_read_vcbus(_r) + +#define BASE_IRQ 32 +#define AM_IRQ(reg) (reg + BASE_IRQ) +#define INT_DEMUX AM_IRQ(23) +#define INT_DEMUX_1 AM_IRQ(5) +#define INT_DEMUX_2 AM_IRQ(21) //AM_IRQ(53) +#define INT_ASYNC_FIFO_FILL AM_IRQ(18) +#define INT_ASYNC_FIFO_FLUSH AM_IRQ(19) +#define INT_ASYNC_FIFO2_FILL AM_IRQ(24) +#define INT_ASYNC_FIFO2_FLUSH AM_IRQ(25) + +#define INT_ASYNC_FIFO3_FLUSH AM_IRQ(17) +#endif +
diff --git a/drivers/stream_input/parser/hw_demux/c_stb_define.h b/drivers/stream_input/parser/hw_demux/c_stb_define.h new file mode 100644 index 0000000..6b815b6 --- /dev/null +++ b/drivers/stream_input/parser/hw_demux/c_stb_define.h
@@ -0,0 +1,1217 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +/* ----------------------------------------------------------------------*/ +/* This file is automatically generated from the script:*/ +/**/ +/* ./create_stb_define_for_C_code.pl*/ +/**/ +/* and was applied to the file*/ +/**/ +/* ./stb_define.h*/ +/**/ +/* DO NOT EDIT!!!!!*/ +/* ----------------------------------------------------------------------*/ +/**/ +#ifdef C_STB_DEFINE_H +#else +#define C_STB_DEFINE_H + +/*=================================================*/ +/* STB Registers Start*/ +/*=================================================*/ +/* -----------------------------------------------*/ +/*#define STB_CBUS_BASE 0x1600*/ +/* -----------------------------------------------*/ +/* There are two instantiations under one CBUS slave. + * Each CBUS slave can support*/ +/* 256 registers. + * Each demux is allocated 128 registers so set the offset in*/ +/* the middle*/ +/* Copy this define but don't add a base address*/ +/*#define DEMUX_1_OFFSET 0x00*/ +/*#define DEMUX_2_OFFSET 0x50*/ +/*#define DEMUX_3_OFFSET 0xa0*/ +/*======================================================*/ +/* STB TOP Registers (8'hf0 - 8'hf7)*/ +/*======================================================*/ +/* bit 30:28 -- ciplus_o_sel*/ +/* bit 27:26 -- ciplus_i_sel*/ +/* bit 25 -- use FAIL fro TS2*/ +/* bit 24 -- use FAIL fro TS1*/ +/* bit 23 -- use FAIL fro TS0*/ +/* bit 22 -- invert fec_error for S2P1*/ +/* bit 21 -- invert fec_data for S2P1*/ +/* bit 20 -- invert fec_sync for S2P1*/ +/* bit 19 -- invert fec_valid for S2P1*/ +/* bit 18 -- invert fec_clk for S2P1*/ +/* bit 17:16 -- fec_s_sel for S2P1 + * 00 - select TS0, 01 -- select TS1, 10 -- select TS2, 11 - TS3*/ +/* Bit 15 -- enable_des_pl_clk*/ +/* Bit 14 -- reserved*/ +/* Bit 13 -- use FAIL for TS3*/ +/* Bit 12:10 -- ts_out_select, + * 0-TS0, 1-TS1, 2-TS2, 3-TS3,4-S2P2, 5-S2P1, 6-S2P0, 7-File*/ +/* bit 9:8 -- des_i_sel 00 -- select demux0 as des input, +* 01 -- select_demux1, 10 -- select_demux2, 11 - reserved*/ +/* bit 7 -- enable_des_pl*/ +/* bit 6 -- invert fec_error for S2P0*/ +/* bit 5 -- invert fec_data for S2P0*/ +/* bit 4 -- invert fec_sync for S2P0*/ +/* bit 3 -- invert fec_valid for S2P0*/ +/* bit 2 -- invert fec_clk for S2P0*/ +/* bit 1:0 -- fec_s_sel for S2P0 + * 00 - select TS0, 01 -- select TS1, 10 -- select TS2, 11 - reserved*/ +/*#define STB_TOP_CONFIG (STB_CBUS_BASE + 0xf0) // 0x16f0*/ +/*----------- bit define -----------*/ +#define INVERT_S2P1_FEC_ERROR 22 +#define INVERT_S2P1_FEC_DATA 21 +#define INVERT_S2P1_FEC_SYNC 20 +#define INVERT_S2P1_FEC_VALID 19 +#define INVERT_S2P1_FEC_CLK 18 +#define S2P1_FEC_SERIAL_SEL 16 +#define ENABLE_DES_PL_CLK 15 +#define FAIL_TS3 13 +#define TS_OUTPUT_SOURCE 10 +#define DES_INPUT_SEL 8 +#define ENABLE_DES_PL 7 +#define INVERT_S2P0_FEC_ERROR 6 +#define INVERT_S2P0_FEC_DATA 5 +#define INVERT_S2P0_FEC_SYNC 4 +#define INVERT_S2P0_FEC_VALID 3 +#define INVERT_S2P0_FEC_CLK 2 +#define S2P0_FEC_SERIAL_SEL 0 + +//define STB_S2P2_CONFIG +#define S2P2_DISABLE 11 +#define S2P2_CLK_DIV 7 +#define INVERT_S2P2_FEC_ERROR 6 +#define INVERT_S2P2_FEC_DATA 5 +#define INVERT_S2P2_FEC_SYNC 4 +#define INVERT_S2P2_FEC_VALID 3 +#define INVERT_S2P2_FEC_CLK 2 +#define S2P2_FEC_SERIAL_SEL 0 + + +/* 31:24 -- file_m2ts_skip_bytes_hiu1*/ +/* 21 -- ts_hiu_enable_hiu1 */ +/*20:16 -- fec_clk_div_hiu1*/ +/*15:8 -- TS_package_length_sub_1_hiu1 */ +/*7:0 -- fec_sync_byte_hiu1*/ +/*#define TS_HIU1_CONFIG (STB_CBUS_BASE + 0x4e)*/ +#define FILE_M2TS_SKIP_BYTES_HIU1 24 +#define TS_HIU_ENABLE_HIU1 21 +#define FEC_CLK_DIV_HIU1 16 +#define TS_PACKAGE_LENGTH_SUB_1_HIU1 8 +#define FEC_SYNC_BYTE_HIU1 0 + +/*5:4 -- fec_sel_demux_2, default:2*/ +/*3:2 -- fec_sel_demux_1, default:1*/ +/*1:0 -- fec_sel_demux_0, default:0*/ +/*#define TS_TOP_CONFIG1 (STB_CBUS_BASE + 0x4f)*/ +#define FEC_SEL_DEMUX_2 4 +#define FEC_SEL_DEMUX_1 2 +#define FEC_SEL_DEMUX_0 0 + +/* 31:28 - s2p1_clk_div*/ +/* 27:24 - s2p0_clk_div*/ +/* 23 - s2p1_disable*/ +/* 22 - s2p0_disable*/ +/* 21 - Reserved*/ +/* 20 -- TS_OUT_error_INVERT*/ +/* 19 -- TS_OUT_data_INVERT*/ +/* 18 -- TS_OUT_sync_INVERT*/ +/* 17 -- TS_OUT_valid_INVERT*/ +/* 16 -- TS_OUT_clk_INVERT*/ +/* 15:8 -- TS_package_length_sub_1 (default : 187)*/ +/* 7:0 -- fec_sync_byte (default : 0x47)*/ +/*#define TS_TOP_CONFIG (STB_CBUS_BASE + 0xf1) // 0x16f1*/ +/*----------- bit define -----------*/ +#define TS_OUT_CLK_INVERT 16 +#define TS_PACKAGE_LENGTH_SUB_1 8 +#define FEC_DEFAULT_SYNC_BYTE 0 + +/* Bit 25:24 -- transport_scrambling_control_odd_2 // should be 3*/ +/* Bit 23:16 -- file_m2ts_skip_bytes*/ +/* Bit 15:8 -- des_out_dly*/ +/* Bit 7:6 -- transport_scrambling_control_odd // should be 3*/ +/* Bit 5 -- ts_hiu_enable*/ +/* Bit 4:0 -- fec_clk_div*/ +/*#define TS_FILE_CONFIG (STB_CBUS_BASE + 0xf2) // 0x16f2*/ +/*----------- bit define -----------*/ +#define TRANSPORT_SCRAMBLING_CONTROL_ODD_2 24 +#define FILE_M2TS_SKIP_BYTES 16 +#define DES_OUT_DLY 8 +#define TRANSPORT_SCRAMBLING_CONTROL_ODD 6 +#define TS_HIU_ENABLE 5 +#define FEC_FILE_CLK_DIV 0 + +/* Bit 19:14 -- des_2 ts pl state -- Read Only*/ +/* Bit 13:8 -- des ts pl state -- Read Only*/ +/* Bit 3:0 PID index to 8 PID to get key-set*/ +/* auto increse after TS_PL_PID_DATA read/write*/ +/*#define TS_PL_PID_INDEX (STB_CBUS_BASE + 0xf3) // 0x16f3*/ +/*----------- bit define -----------*/ +#define DES_TS_PL_STATE 8 +#define DES_2_TS_PL_STATE 14 + +/* Bit 13 -- PID match disble*/ +/* Bit 12:0 -- PID*/ +/*#define TS_PL_PID_DATA (STB_CBUS_BASE + 0xf4) // 0x16f4*/ +/*----------- bit define -----------*/ +#define PID_MATCH_DISABLE_HIGH 29 +#define PID_MATCH_HIGH 16 +#define PID_MATCH_DISABLE_LOW 13 +#define PID_MATCH_LOW 0 + +/*#define COMM_DESC_KEY0 + * (STB_CBUS_BASE + 0xf5) // 0x16f5 + Common descrambler key (key bits[63:32])*/ +/*#define COMM_DESC_KEY1 + * (STB_CBUS_BASE + 0xf6) // 0x16f6 + Common descrambler key (key bits[31:0])*/ +/*#define COMM_DESC_KEY_RW + * (STB_CBUS_BASE + 0xf7) // 0x16f7 // bits[3:0] + * point to the address to write the key + * {COMM_DESC_KEY3,...,COMM_DESC_KEY0}*/ +/* Writing this register writes the key to RAM*/ + +/* bit 15:8 - des_out_dly_2*/ +/* bit 7 - reserved*/ +/* Bit 6-- enable_des_pl_clk_2*/ +/* bit 5 - enable_des_pl_2*/ +/* bit 4:2 -- use_des_2 bit[2] -- demux0, bit[3] -- demux1, bit[4] -- demux2*/ +/* bit 1:0 -- des_i_sel_2 00 -- select_fec_0, 01 -- select_fec_1, + * 10 -- select_fec_2, 11 - reserved*/ +/*#define COMM_DESC_2_CTL (STB_CBUS_BASE + 0xff) *//*0x16ff*/ + +/*=======================================================*/ +/* Multiple STB Registers (8'h00 - 8'h45)*/ +/*=======================================================*/ +/* STB registers are 8'h0x*/ +/* Bit 15:0 -- version number : 0x0002 (v0.01)*/ +/*#define STB_VERSION + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x00) // 0x1600 // read only*/ +/*#define STB_VERSION_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x00) // 0x1650 // read only*/ +/*#define STB_VERSION_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x00) // 0x16a0 // read only*/ + +/*#define STB_TEST_REG + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x01) // 0x1601*/ +/*#define STB_TEST_REG_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x01) // 0x1651*/ +/*#define STB_TEST_REG_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x01) // 0x16a1*/ + +/* Bit 15 -- fec_core_select 1 - select descramble output*/ +/* Bit 14:12 - fec_select + * 0-TS0, 1-TS1, 2-TS2, 3,4-Reserved, 5-S2P1, 6-S2P0, 7-File*/ +/* Bit 11 -- FEC_CLK*/ +/* Bit 10 -- SOP*/ +/* Bit 9 -- D_VALID*/ +/* Bit 8 -- D_FAIL*/ +/* Bit 7:0 -- D_DATA 7:0*/ +/*#define FEC_INPUT_CONTROL + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x02) // 0x1602*/ +/*#define FEC_INPUT_CONTROL_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x02) // 0x1652*/ +/*#define FEC_INPUT_CONTROL_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x02) // 0x16a2*/ +/*----------- bit define -----------*/ +#define FEC_SEL_3BIT 16 +#define FEC_CORE_SEL 15 +#define FEC_SEL 12 +#define FEC_INPUT_FEC_CLK 11 +#define FEC_INPUT_SOP 10 +#define FEC_INPUT_D_VALID 9 +#define FEC_INPUT_D_FAIL 8 + +/*#define FEC_INPUT_DATA + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x03) // 0x1603 // read only*/ +/*#define FEC_INPUT_DATA_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x03) // 0x1653 // read only*/ +/*#define FEC_INPUT_DATA_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x03) // 0x16a3 // read only*/ + +/* bit 31 -- enable_free_clk_fec_data_valid*/ +/* bit 30 -- enable_free_clk_stb_reg*/ +/* bit 29 -- always_use_pes_package_length*/ +/* bit 28 -- disable_pre_incomplete_section_fix*/ +/* bit 27 -- pointer_field_multi_pre_en*/ +/* bit 26 -- ignore_pre_incomplete_section*/ +/* bit 25 -- video2_enable*/ +/* bit 24:22 -- video2_type*/ +/* bit 21 -- do_not_trust_pes_package_length*/ +/* bit 20 (bit 4) -- Bypass use recoder path*/ +/* bit 19 (bit 3) -- clear_PID_continuity_counter_valid*/ +/* bit 18 (bit 2) -- Disable Splicing*/ +/* bit 17 (bit 1) -- Insert PES_STRONG_SYNC in Audio PES*/ +/* bit 16 (bit 0) -- Insert PES_STRONG_SYNC in Video PES*/ +/* Bit 15 - do not trust section length*/ +/* Bit 14 - om cmd push even zero*/ +/* Bit 13 - reserved*/ +/* Bit 12 - SUB, OTHER PES interrupt at beginning of PES*/ +/* Bit 11 - discard_av_package -- for ts_recorder use only*/ +/* Bit 10 - ts_recorder_select 0:after PID filter 1:before PID filter*/ +/* Bit 9 - ts_recorder_enable*/ +/* Bit 8 - (table_id == 0xff) means section_end*/ +/* Bit 7 - do not send uncomplete section*/ +/* Bit 6 - do not discard duplicate package*/ +/* Bit 5 - search SOP when trasport_error_indicator*/ +/* Bit 4 - stb demux enable*/ +/* Bit 3 - do not reset state machine on SOP*/ +/* Bit 2 - search SOP when error happened + * ( when ignore_fail_n_sop, will have this case)*/ +/* Bit 1 - do not use SOP input ( check FEC sync byte instead )*/ +/* Bit 0 - ignore fec_error bit when non sop ( check error on SOP only)*/ +/*#define DEMUX_CONTROL + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x04) // 0x1604*/ +/*#define DEMUX_CONTROL_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x04) // 0x1654*/ +/*#define DEMUX_CONTROL_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x04) // 0x16a4*/ +/*----------- bit define -----------*/ +#define ENABLE_FREE_CLK_FEC_DATA_VALID 31 +#define ENABLE_FREE_CLK_STB_REG 30 +#define BYPASS_USE_RECODER_PATH 20 +#define CLEAR_PID_CONTINUITY_COUNTER_VALID 19 +#define DISABLE_SPLICING 18 +#define INSERT_AUDIO_PES_STRONG_SYNC 17 +#define INSERT_VIDEO_PES_STRONG_SYNC 16 +#define SECTION_LENGTH_UNTRUSTY 15 +#define OM_CMD_PUSH_EVEN_ZERO 14 +#define OTHER_INT_AT_PES_BEGINING 12 +#define DISCARD_AV_PACKAGE 11 +#define TS_RECORDER_SELECT 10 +#define TS_RECORDER_ENABLE 9 +#define SECTION_END_WITH_TABLE_ID 8 +#define SEND_COMPLETE_SECTION_ONLY 7 +#define KEEP_DUPLICATE_PACKAGE 6 +#define SEACH_SOP_ON_TRANSPORT_ERROR 5 +#define STB_DEMUX_ENABLE 4 +#define NO_RESET_ON_SOP 3 +#define SEARCH_SOP_ON_ERROR 2 +#define NOT_USE_OF_SOP_INPUT 1 +#define IGNORE_NONSOP_FEC_ERROR 0 + +/* bit 15:8 demux package length - 1 ( default : 187 )*/ +/* bit 7:0 default is 0x47*/ +/*#define FEC_SYNC_BYTE + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x05) // 0x1605*/ +/*#define FEC_SYNC_BYTE_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x05) // 0x1655*/ +/*#define FEC_SYNC_BYTE_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x05) // 0x16a5*/ + +/**************************************** + * FM Memory Usage : + * 0-15 (32 PID filter target) ---- 15:13-PID type 12:0-PID target or force data + * (force data : 1 will mask corespoding bit, + * 0 will disable this PID filter channel) + * advanced setting -- bit 7:0 + * bit 7 -- PID bit 12:11 compare result force + * bit 6 -- PID bit 10:9 compare result force + * bit 5 -- PID bit 8:7 compare result force + * bit 4 -- PID bit 6:5 compare result force + * bit 3 -- PID bit 4:3 compare result force + * bit 2 -- PID bit 2 compare result force + * bit 1 -- PID bit 1 compare result force + * bit 0 -- PID bit 0 compare result force + * 16-255(15x32 Section filter target) + * For first byte : Table_ID + * ---- 15-Mask High 4-bits + * 14-Mask Low 4-bits + * 13-disable_PID_check + * 12:8-PIDindex + * 7:0-section target (always EQ) + * For rest of bytes : + * ---- 15-Mask 14-EQ/NE 13-disable_PID_check + * ----12:8-PIDindex 7:0-section target (or force data) + * advanced setting -- bit 7:0 force compare result + **************************************************/ +/*----------- bit define -----------*/ +#define PID_TYPE 13 +#define PID_TARGET 0 + +#define SECTION_FIRSTBYTE_MASKHIGH 15 +#define SECTION_FIRSTBYTE_MASKLOW 14 +#define SECTION_FIRSTBYTE_DISABLE_PID_CHECK 13 +#define SECTION_FIRSTBYTE_PID_INDEX 8 +#define SECTION_TARGET 0 + +#define SECTION_RESTBYTE_MASK 15 +#define SECTION_RESTBYTE_MASK_EQ 14 +#define SECTION_RESTBYTE_DISABLE_PID_CHECK 13 +#define SECTION_RESTBYTE_PID_INDEX 8 + +/* bit 31:16 -- filter memory write data hi[31:16]*/ +/* bit 15:0 -- filter memory write data low [15:0]*/ +/*#define FM_WR_DATA + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x06) // 0x1606*/ +/*#define FM_WR_DATA_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x06) // 0x1656*/ +/*#define FM_WR_DATA_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x06) // 0x16a6*/ +/*----------- bit define -----------*/ +#define FM_WR_DATA_HI 16 + +/* bit 31:24 -- advanced setting hi*/ +/* bit 23:16 -- advanced setting low*/ +/* bit 15 -- filter memory write data request*/ +/* bit 7:0 -- filter memory write addr*/ +/*#define FM_WR_ADDR + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x07) // 0x1607*/ +/*#define FM_WR_ADDR_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x07) // 0x1657*/ +/*#define FM_WR_ADDR_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x07) // 0x16a7*/ +/*----------- bit define -----------*/ +#define FM_ADVANCED_SETTING_HI 24 +#define FM_ADVANCED_SETTING_LO 16 +#define FM_WR_DATA_REQUEST 15 + +/* bit 13:8 demux state -- read only*/ +/* bit 7:4 -- maxnum section filter compare address*/ +/* bit 3:0 -- maxnum PID filter compare address*/ +/*#define MAX_FM_COMP_ADDR + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x08) // 0x1608*/ +/*#define MAX_FM_COMP_ADDR_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x08) // 0x1658*/ +/*#define MAX_FM_COMP_ADDR_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x08) // 0x16a8*/ +/*----------- bit define -----------*/ +#define DEMUX_STATE 8 +#define MAX_FM_SECTION_FILTER_COMP_ADDR 4 + +/* bit 15 - transport_error_indicator*/ +/* bit 14 - payload_unit_start_indicator*/ +/* bit 13 - transport_priority*/ +/* bit 12:0 - PID*/ +/*#define TS_HEAD_0 + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x09) // 0x1609*/ +/*#define TS_HEAD_0_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x09) // 0x1659*/ +/*#define TS_HEAD_0_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x09) // 0x16a9*/ +/*----------- bit define -----------*/ +#define TRANSPORT_ERROR_INDICATOR 15 +#define PAYLOAD_UNIT_START_INDICATOR 14 +#define TRANSPORT_PRIORITY 13 + +/* bit 7:6 transport_scrambling_control*/ +/* bit 5:4 adaptation_field_control*/ +/* bit 3:0 continuity_counter*/ +/*#define TS_HEAD_1 + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x0a) // 0x160a*/ +/*#define TS_HEAD_1_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x0a) // 0x165a*/ +/*#define TS_HEAD_1_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x0a) // 0x16aa*/ +/*----------- bit define -----------*/ +#define TRANSPORT_SCRAMBLING_CONTROL 6 +#define ADAPTATION_FIELD_CONTROL 4 + +/* bit 15:12 -- om_cmd_count (read only)*/ +/* bit 11:9 -- overflow_count // bit 11:9 -- om_cmd_wr_ptr (read only)*/ +/* bit 8:6 -- om_overwrite_count // bit 8:6 -- om_cmd_rd_ptr (read only)*/ +/* bit 5:3 -- type_stb_om_w_rd (read only)*/ +/* bit 2 -- unit_start_stb_om_w_rd (read only)*/ +/* bit 1 -- om_cmd_overflow (read only)*/ +/* bit 0 -- om_cmd_pending (read)*/ +/* bit 0 -- om_cmd_read_finished (write)*/ +/*#define OM_CMD_STATUS + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x0b) // 0x160b*/ +/*#define OM_CMD_STATUS_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x0b) // 0x165b*/ +/*#define OM_CMD_STATUS_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x0b) // 0x16ab*/ +/*----------- bit define -----------*/ +#define OM_CMD_COUNT 12 +#define OM_OVERFLOW_COUNT 9 +#define OM_OVERWRITE_COUNT 6 +#define TYPE_STB_OM_W_RD 3 +#define UNIT_START_STB_OM_W_RD 2 +#define OM_CMD_OVERFLOW 1 + +/* bit 15:9 -- count_stb_om_w_rd (read only)*/ +/* bit 8:0 -- start_stb_om_wa_rd (read only)*/ +/*#define OM_CMD_DATA + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x0c) // 0x160c*/ +/*#define OM_CMD_DATA_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x0c) // 0x165c*/ +/*#define OM_CMD_DATA_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x0c) // 0x16ac*/ +/*----------- bit define -----------*/ +#define COUNT_STB_OM_W_RD 9 + +/* bit 11:0 -- offset for section data*/ +/*#define OM_CMD_DATA2 + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x0d) // 0x160d*/ +/*#define OM_CMD_DATA2_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x0d) // 0x165d*/ +/*#define OM_CMD_DATA2_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x0d) // 0x16ad*/ + +/* bit 31:16 -- base address for section buffer group 0 + * (*0x400 to get real address)*/ +/* bit 15:0 -- base address for section buffer group 1 + * (*0x400 to get real address)*/ +/*#define SEC_BUFF_01_START + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x0e) // 0x160e*/ +/*#define SEC_BUFF_01_START_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x0e) // 0x165e*/ +/*#define SEC_BUFF_01_START_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x0e) // 0x16ae*/ +/*----------- bit define -----------*/ +#define SEC_BUFF_0_BASE_ADDR 16 + +/* bit 31:16 -- base address for section buffer group 2 + * (*0x400 to get real address)*/ +/* bit 15:0 -- base address for section buffer group 3 + * (*0x400 to get real address)*/ +/*#define SEC_BUFF_23_START + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x0f) // 0x160f*/ +/*#define SEC_BUFF_23_START_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x0f) // 0x165f*/ +/*#define SEC_BUFF_23_START_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x0f) // 0x16af*/ +/*----------- bit define -----------*/ +#define SEC_BUFF_2_BASE_ADDR 16 + +/* bit 15:12 -- section buffer size for group 3*/ +/* bit 11:8 -- section buffer size for group 2*/ +/* bit 7:4 -- section buffer size for group 1*/ +/* bit 3:0 -- section buffer size for group 0 + * (bit used, for example, 10 means 1K)*/ +/*#define SEC_BUFF_SIZE + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x10) // 0x1610*/ +/*#define SEC_BUFF_SIZE_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x10) // 0x1660*/ +/*#define SEC_BUFF_SIZE_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x10) // 0x16b0*/ +/*----------- bit define -----------*/ +#define SEC_BUFF_3_SIZE 12 +#define SEC_BUFF_2_SIZE 8 +#define SEC_BUFF_1_SIZE 4 + +/* section buffer busy status for buff 31:0 ( Read Only )*/ +/*#define SEC_BUFF_BUSY + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x11) // 0x1611*/ +/*#define SEC_BUFF_BUSY_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x11) // 0x1661*/ +/*#define SEC_BUFF_BUSY_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x11) // 0x16b1*/ + +/* section buffer write status for buff 31:0 -- Read*/ +/* clear buffer status ( buff READY and BUSY ) -- write*/ +/*#define SEC_BUFF_READY + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x12) // 0x1612*/ +/*#define SEC_BUFF_READY_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x12) // 0x1662*/ +/*#define SEC_BUFF_READY_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x12) // 0x16b2*/ + +/* bit 15 -- section_reset_busy (Read Only)*/ +/* bit 14 -- output_section_buffer_valid*/ +/* bit 12:8 -- SEC_BUFFER_NUMBER for the INDEX buffer Read_Only*/ +/* bit 4:0 -- SEC_BUFFER_INDEX RW*/ +/*#define SEC_BUFF_NUMBER + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x13) // 0x1613*/ +/*#define SEC_BUFF_NUMBER_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x13) // 0x1663*/ +/*#define SEC_BUFF_NUMBER_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x13) // 0x16b3*/ +/*----------- bit define -----------*/ +#define SECTION_RESET_BUSY 15 +#define OUTPUT_SECTION_BUFFER_VALID 14 +#define INDEXED_SEC_BUFF_NUMBER 8 + +/* bit 9:5 -- BYPASS PID number*/ +/* bit 4:0 -- PCR PID number*/ +/*#define ASSIGN_PID_NUMBER + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x14) // 0x1614*/ +/*#define ASSIGN_PID_NUMBER_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x14) // 0x1664*/ +/*#define ASSIGN_PID_NUMBER_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x14) // 0x16b4*/ +/*----------- bit define -----------*/ +#define BYPASS_PID_NUMBER 5 + +/* bit 15:0 -- stream_id filter bit enable*/ +/* bit 7:0 -- stream_id filter target*/ +/*#define VIDEO_STREAM_ID + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x15) // 0x1615*/ +/*#define VIDEO_STREAM_ID_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x15) // 0x1665*/ +/*#define VIDEO_STREAM_ID_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x15) // 0x16b5*/ + +/*#define AUDIO_STREAM_ID + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x16) // 0x1616*/ +/*#define AUDIO_STREAM_ID_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x16) // 0x1666*/ +/*#define AUDIO_STREAM_ID_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x16) // 0x16b6*/ + +/*#define SUB_STREAM_ID + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x17) // 0x1617*/ +/*#define SUB_STREAM_ID_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x17) // 0x1667*/ +/*#define SUB_STREAM_ID_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x17) // 0x16b7*/ + +/*#define OTHER_STREAM_ID + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x18) // 0x1618*/ +/*#define OTHER_STREAM_ID_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x18) // 0x1668*/ +/*#define OTHER_STREAM_ID_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x18) // 0x16b8*/ + +/* bit 12 -- PCR_EN*/ +/* bit 11:0 -- PCR90K_DIV*/ +/*#define PCR90K_CTL + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x19) // 0x1619*/ +/*#define PCR90K_CTL_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x19) // 0x1669*/ +/*#define PCR90K_CTL_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x19) // 0x16b9*/ +/*----------- bit define -----------*/ +#define PCR_EN 12 + +/* bit 15:0 -- PCR[31:0] R/W*/ +/*#define PCR_DEMUX + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x1a) // 0x161a*/ +/*#define PCR_DEMUX_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x1a) // 0x166a*/ +/*#define PCR_DEMUX_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x1a) // 0x16ba*/ + +/* bit 15:0 -- VPTS[31:0] R/W*/ +/*#define VIDEO_PTS_DEMUX + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x1b) // 0x161b*/ +/*#define VIDEO_PTS_DEMUX_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x1b) // 0x166b*/ +/*#define VIDEO_PTS_DEMUX_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x1b) // 0x16bb*/ + +/* bit 15:0 -- VDTS[31:0] R/W*/ +/*#define VIDEO_DTS_DEMUX + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x1c) // 0x161c*/ +/*#define VIDEO_DTS_DEMUX_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x1c) // 0x166c*/ +/*#define VIDEO_DTS_DEMUX_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x1c) // 0x16bc*/ + +/* bit 15:0 -- APTS[31:0] R/W*/ +/*#define AUDIO_PTS_DEMUX + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x1d) // 0x161d*/ +/*#define AUDIO_PTS_DEMUX_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x1d) // 0x166d*/ +/*#define AUDIO_PTS_DEMUX_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x1d) // 0x16bd*/ + +/* bit 15:0 -- SPTS[31:0] R/W*/ +/*#define SUB_PTS_DEMUX + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x1e) // 0x161e*/ +/*#define SUB_PTS_DEMUX_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x1e) // 0x166e*/ +/*#define SUB_PTS_DEMUX_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x1e) // 0x16be*/ + +/* read -- status, write 1 clear status*/ +/* bit 15 -- SUB_PTS[32]*/ +/* bit 14 -- AUDIO_PTS[32]*/ +/* bit 13 -- VIDEO_DTS[32]*/ +/* bit 12 -- VIDEO_PTS[32]*/ +/* bit 3 -- sub_pts_ready*/ +/* bit 2 -- audio_pts_ready*/ +/* bit 1 -- video_dts_ready*/ +/* bit 0 -- video_pts_ready*/ +/*#define STB_PTS_DTS_STATUS + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x1f) // 0x161f*/ +/*#define STB_PTS_DTS_STATUS_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x1f) // 0x166f*/ +/*#define STB_PTS_DTS_STATUS_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x1f) // 0x16bf*/ +/*----------- bit define -----------*/ +#define SUB_PTS_BIT32 15 +#define AUDIO_PTS_BIT32 14 +#define VIDEO_DTS_BIT32 13 +#define VIDEO_PTS_BIT32 12 +#define SUB_PTS_READY 3 +#define AUDIO_PTS_READY 2 +#define VIDEO_DTS_READY 1 +#define VIDEO_PTS_READY 0 + +/* bit 3:0 --*/ +/* 0 -- adaptation_field_length[7:0], adaption_field_byte_1[7:0]*/ +/* 1 -- stream_id[7:0], pes_header_bytes_left[7:0]*/ +/* 2 -- pes_package_bytes_left[15:0]*/ +/* 3 -- pes_ctr_byte[7:0], pes_flag_byte[7:0]*/ +/*#define STB_DEBUG_INDEX + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x20) // 0x1620*/ +/*#define STB_DEBUG_INDEX_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x20) // 0x1670*/ +/*#define STB_DEBUG_INDEX_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x20) // 0x16c0*/ + +/* read only*/ +/*#define STB_DEBUG_DATA_OUT + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x21) // 0x1621*/ +/*#define STB_DEBUG_DATA_OUT_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x21) // 0x1671*/ +/*#define STB_DEBUG_DATA_OUT_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x21) // 0x16c1*/ + +/* bit[31] -- no_match_record_en*/ +/* bit[30:16] - reserved*/ +/* default : 0x807f*/ +/* bit 15:9 -- MAX OM DMA COUNT (default: 0x40)*/ +/* bit 8:0 -- LAST ADDR OF OM ADDR (default: 127)*/ +/*#define STB_OM_CTL \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x22) // 0x1622*/ +/*#define STB_OM_CTL_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x22) // 0x1672*/ +/*#define STB_OM_CTL_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x22) // 0x16c2*/ +/*----------- bit define -----------*/ +#define MAX_OM_DMA_COUNT 9 +#define LAST_OM_ADDR 0 + +/* 15:0 WRITE 1 CLEAR to clear interrupt source*/ +/*12 -- INPUT_TIME_OUT*/ +/*11 -- PCR_ready*/ +/*10 -- audio_splicing_point*/ +/* 9 -- video_splicing_point*/ +/* 8 -- other_PES_int*/ +/* 7 -- sub_PES_int*/ +/* 6 -- discontinuity*/ +/* 5 -- duplicated_pack_found*/ +/* 4 -- New PDTS ready*/ +/* 3 -- om_cmd_buffer ready for access*/ +/* 2 -- section buffer ready*/ +/* 1 -- transport_error_indicator*/ +/* 0 -- TS ERROR PIN*/ +/*#define STB_INT_STATUS + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x23) // 0x1623*/ +/*#define STB_INT_STATUS_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x23) // 0x1673*/ +/*#define STB_INT_STATUS_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x23) // 0x16c3*/ +/*----------- bit define -----------*/ +#define INPUT_TIME_OUT 12 +#define PCR_READY 11 +#define AUDIO_SPLICING_POINT 10 +#define VIDEO_SPLICING_POINT 9 +#define OTHER_PES_READY 8 +#define SUB_PES_READY 7 +#define DIS_CONTINUITY_PACKET 6 +#define DUPLICATED_PACKET 5 +#define NEW_PDTS_READY 4 +#define OM_CMD_READ_PENDING 3 +#define SECTION_BUFFER_READY 2 +#define TS_ERROR_PACKAGE 1 +#define TS_ERROR_PIN 0 + +/* When Bit 31 - 1 write will indicate all type use sepertate endian + * (Write Only)*/ +/* When Bit 31 - 0 write will indicate all type else use Bit 8:6*/ +/* Bit 23:21 - demux om write endian control for OTHER_PES_PACKET*/ +/* Bit 20:18 - demux om write endian control for SCR_ONLY_PACKET*/ +/* Bit 17:15 - demux om write endian control for SUB_PACKET*/ +/* Bit 14:12 - demux om write endian control for AUDIO_PACKET*/ +/* Bit 11:9 - demux om write endian control for VIDEO_PACKET*/ +/* Bit 8:6 - demux om write endian control for else*/ +/* Bit 5:3 - demux om write endian control for bypass*/ +/* Bit 2:0 - demux om write endian control for section*/ +/*#define DEMUX_ENDIAN + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x24) // 0x1624*/ +/*#define DEMUX_ENDIAN_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x24) // 0x1674*/ +/*#define DEMUX_ENDIAN_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x24) // 0x16c4*/ +/*----------- bit define -----------*/ +#define SEPERATE_ENDIAN 31 +#define OTHER_PES_ENDIAN 21 +#define SCR_ENDIAN 18 +#define SUB_ENDIAN 15 +#define AUDIO_ENDIAN 12 +#define VIDEO_ENDIAN 9 +#define OTHER_ENDIAN 6 +#define BYPASS_ENDIAN 3 +#define SECTION_ENDIAN 0 + +/* Bit 10:9 -- PDTS_wr_sel: 0 select video_PDTS_wr_ptr; 1 select video_PDTS_wr_ptr_parser_B; */ +/* Bit 7:8 -- use hi_bsf interface*/ +/* Bit 6:2 - fec_clk_div*/ +/* Bit 1 ts_source_sel */ +/* Bit 0 - Hiu TS generate enable */ +/*#define TS_HIU_CTL + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x25) // 0x1625*/ +/*#define TS_HIU_CTL_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x25) // 0x1675*/ +/*#define TS_HIU_CTL_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x25) // 0x16c5*/ +/*----------- bit define -----------*/ +//#define LAST_BURST_THRESHOLD 8 +#define PDTS_WR_SEL 9 +#define USE_HI_BSF_INTERFACE 7 + +/* bit 15:0 -- base address for section buffer start + * (*0x10000 to get real base)*/ +/*#define SEC_BUFF_BASE + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x26) // 0x1626*/ +/*#define SEC_BUFF_BASE_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x26) // 0x1676*/ +/*#define SEC_BUFF_BASE_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x26) // 0x16c6*/ + +/* bit 11 -- mask bit for OTHER_PES_AHB_DMA_EN*/ +/* bit 10 -- mask bit for SUB_AHB_DMA_EN*/ +/* bit 9 -- mask bit for BYPASS_AHB_DMA_EN*/ +/* bit 8 -- mask bit for SECTION_AHB_DMA_EN*/ +/* bit 7 -- mask bit for recoder stream*/ +/* bit 6:0 -- mask bit for each type*/ +/*#define DEMUX_MEM_REQ_EN + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x27) // 0x1627*/ +/*#define DEMUX_MEM_REQ_EN_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x27) // 0x1677*/ +/*#define DEMUX_MEM_REQ_EN_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x27) // 0x16c7*/ +/*----------- bit define -----------*/ +#define VIDEO2_DMA_EN_BIT 12 +#define OTHER_PES_AHB_DMA_EN 11 +#define SUB_AHB_DMA_EN 10 +#define BYPASS_AHB_DMA_EN 9 +#define SECTION_AHB_DMA_EN 8 +#define RECORDER_STREAM 7 +#define OTHER_PES_PACKET 6 +#define SCR_ONLY_PACKET 5 /*will never be used*/ +#define BYPASS_PACKET 4 +#define SECTION_PACKET 3 +#define SUB_PACKET 2 +#define AUDIO_PACKET 1 +#define VIDEO_PACKET 0 + +/* bit 31:0 -- vb_wr_ptr for video PDTS*/ +/*#define VIDEO_PDTS_WR_PTR + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x28) // 0x1628*/ +/*#define VIDEO_PDTS_WR_PTR_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x28) // 0x1678*/ +/*#define VIDEO_PDTS_WR_PTR_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x28) // 0x16c8*/ + +/* bit 31:0 -- ab_wr_ptr for audio PDTS*/ +/*#define AUDIO_PDTS_WR_PTR + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x29) // 0x1629*/ +/*#define AUDIO_PDTS_WR_PTR_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x29) // 0x1679*/ +/*#define AUDIO_PDTS_WR_PTR_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x29) // 0x16c9*/ + +/* bit 20:0 -- SB_WRITE_PTR (sb_wr_ptr << 3 == byte write position)*/ +/*#define SUB_WR_PTR + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x2a) // 0x162a*/ +/*#define SUB_WR_PTR_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x2a) // 0x167a*/ +/*#define SUB_WR_PTR_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x2a) // 0x16ca*/ + +/* bit 19:0 -- SB_START (sb_start << 12 == byte address);*/ +/*#define SB_START + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x2b) // 0x162b*/ +/*#define SB_START_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x2b) // 0x167b*/ +/*#define SB_START_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x2b) // 0x16cb*/ + +/* bit 20:0 -- SB_SIZE (sb_size << 3 == byte size, 16M maximun)*/ +/*#define SB_LAST_ADDR + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x2c) // 0x162c*/ +/*#define SB_LAST_ADDR_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x2c) // 0x167c*/ +/*#define SB_LAST_ADDR_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x2c) // 0x16cc*/ + +/* bit 31:0 -- sb_wr_ptr for sub PES*/ +/*#define SB_PES_WRITE_PTR + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x2d) // 0x162d*/ +/*#define SB_PES_WRITE_PTR_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x2d) // 0x167d*/ +/*#define SB_PES_WRITE_PTR_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x2d) // 0x16cd*/ + +/* bit 31:16 -- ob_wr_ptr for other PES*/ +/* bit 20:0 -- OB_WRITE_PTR (ob_wr_ptr << 3 == byte write position)*/ +/*#define OTHER_WR_PTR + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x2e) // 0x162e*/ +/*#define OTHER_WR_PTR_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x2e) // 0x167e*/ +/*#define OTHER_WR_PTR_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x2e) // 0x16ce*/ + +/* bit 19:0 -- OB_START (ob_start << 12 == byte address);*/ +/*#define OB_START + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x2f) // 0x162f*/ +/*#define OB_START_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x2f) // 0x167f*/ +/*#define OB_START_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x2f) // 0x16cf*/ + +/* bit 20:0 -- OB_SIZE (ob_size << 3 == byte size, 16M maximun)*/ +/*#define OB_LAST_ADDR + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x30) // 0x1630*/ +/*#define OB_LAST_ADDR_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x30) // 0x1680*/ +/*#define OB_LAST_ADDR_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x30) // 0x16d0*/ + +/* bit 31:0 -- ob_wr_ptr for sub PES*/ +/*#define OB_PES_WRITE_PTR + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x31) // 0x1631*/ +/*#define OB_PES_WRITE_PTR_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x31) // 0x1681*/ +/*#define OB_PES_WRITE_PTR_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x31) // 0x16d1*/ + +/* 15:0 DEMUX interrupt MASK*/ +/* 11 -- PCR_READY*/ +/* 10 -- audio_splicing_point*/ +/* 9 -- video_splicing_point*/ +/* 8 -- other_PES_int*/ +/* 7 -- sub_PES_int*/ +/* 6 -- discontinuity*/ +/* 5 -- duplicated_pack_found*/ +/* 4 -- New PDTS ready*/ +/* 3 -- om_cmd_buffer ready for access*/ +/* 2 -- section buffer ready*/ +/* 1 -- transport_error_indicator*/ +/* 0 -- TS ERROR PIN*/ +/*#define STB_INT_MASK + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x32) // 0x1632*/ +/*#define STB_INT_MASK_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x32) // 0x1682*/ +/*#define STB_INT_MASK_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x32) // 0x16d2*/ + +/* 31:16 VIDEO PID filter data*/ +/*15 -- splicing VIDEO PID change enable*/ +/*14:10 -- VIDEO PID FILTER ADDRESS*/ +/* 9 -- PES splicing active (Read Only)*/ +/* 8 -- splicing active (Read Only)*/ +/* 7:0 splicing countdown (Read Only)*/ +/*#define VIDEO_SPLICING_CTL + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x33) // 0x1633*/ +/*#define VIDEO_SPLICING_CTL_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x33) // 0x1683*/ +/*#define VIDEO_SPLICING_CTL_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x33) // 0x16d3*/ +/*----------- bit define -----------*/ +#define VIDEO_PID_FILTER_DATA 16 +#define VIDEO_SPLICING_PID_CHANGE_ENABLE 15 +#define VIDEO_PID_FILTER_ADDRESS 10 +#define VIDEO_PES_SPLICING_ACTIVE 9 +#define VIDEO_SPLICING_ACTIVE 8 + + +/* 31:16 AUDIO PID filter data*/ +/*15 -- splicing AUDIO PID change enable*/ +/*14:10 -- AUDIO PID FILTER ADDRESS*/ +/* 9 -- PES splicing active (Read Only)*/ +/* 8 -- splicing active (Read Only)*/ +/* 7:0 splicing countdown (Read Only)*/ +/*#define AUDIO_SPLICING_CTL + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x34) // 0x1634*/ +/*#define AUDIO_SPLICING_CTL_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x34) // 0x1684*/ +/*#define AUDIO_SPLICING_CTL_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x34) // 0x16d4*/ +/*----------- bit define -----------*/ +#define AUDIO_PID_FILTER_DATA 16 +#define AUDIO_SPLICING_PID_CHANGE_ENABLE 15 +#define AUDIO_PID_FILTER_ADDRESS 10 +#define AUDIO_PES_SPLICING_ACTIVE 9 +#define AUDIO_SPLICING_ACTIVE 8 + +/* 23:16 M2TS_SKIP_BYTES*/ +/* 15:8 LAST TS PACKAGE BYTE COUNT (Read Only)*/ +/* 7:0 PACKAGE BYTE COUNT (Read Only)*/ +/*#define TS_PACKAGE_BYTE_COUNT + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x35) // 0x1635*/ +/*#define TS_PACKAGE_BYTE_COUNT_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x35) // 0x1685*/ +/*#define TS_PACKAGE_BYTE_COUNT_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x35) // 0x16d5*/ +/*----------- bit define -----------*/ +#define M2TS_SKIP_BYTES 16 +#define LAST_TS_PACKAGE_BYTE_COUNT 8 + +/* 15:0 2 bytes strong sync add to PES*/ +/*#define PES_STRONG_SYNC + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x36) // 0x1636*/ +/*#define PES_STRONG_SYNC_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x36) // 0x1686*/ +/*#define PES_STRONG_SYNC_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x36) // 0x16d6*/ + +/* bit 15 -- stb_om_ren*/ +/* bit 14:11 -- reserved*/ +/* bit 10:0 -- OM_DATA_RD_ADDR*/ +/*#define OM_DATA_RD_ADDR + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x37) // 0x1637*/ +/*#define OM_DATA_RD_ADDR_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x37) // 0x1687*/ +/*#define OM_DATA_RD_ADDR_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x37) // 0x16d7*/ +/*----------- bit define -----------*/ +#define STB_OM_REN 15 + +/* bit 15:0 -- OM_DATA_RD*/ +/*#define OM_DATA_RD + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x38) // 0x1638*/ +/*#define OM_DATA_RD_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x38) // 0x1688*/ +/*#define OM_DATA_RD_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x38) // 0x16d8*/ + +/* AUTO STOP SETTING for 32 channels*/ +/* 4-bits per channel*/ +/* when write*/ +/* bit 3 -- set section active*/ +/* bit 2:0 -- auto stop after count (0 means never stop)*/ +/* when read*/ +/* bit 3 -- current active status (1 - active, 0 - stopped )*/ +/* bit 2:0 -- count down to auto stop*/ +/* section 31:24*/ +/*#define SECTION_AUTO_STOP_3 + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x39) // 0x1639*/ +/*#define SECTION_AUTO_STOP_3_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x39) // 0x1689*/ +/*#define SECTION_AUTO_STOP_3_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x39) // 0x16d9*/ +/* section 23:16*/ +/*#define SECTION_AUTO_STOP_2 + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x3a) // 0x163a*/ +/*#define SECTION_AUTO_STOP_2_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x3a) // 0x168a*/ +/*#define SECTION_AUTO_STOP_2_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x3a) // 0x16da*/ +/* section 15:8*/ +/*#define SECTION_AUTO_STOP_1 + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x3b) // 0x163b*/ +/*#define SECTION_AUTO_STOP_1_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x3b) // 0x168b*/ +/*#define SECTION_AUTO_STOP_1_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x3b) // 0x16db*/ +/* section 7:0*/ +/*#define SECTION_AUTO_STOP_0 + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x3c) // 0x163c*/ +/*#define SECTION_AUTO_STOP_0_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x3c) // 0x168c*/ +/*#define SECTION_AUTO_STOP_0_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x3c) // 0x16dc*/ + +/* bit 31:0 reset channel status - each bit reset each channel*/ +/* read -- 32 channel status*/ +/*#define DEMUX_CHANNEL_RESET + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x3d) // 0x163d*/ +/*#define DEMUX_CHANNEL_RESET_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x3d) // 0x168d*/ +/*#define DEMUX_CHANNEL_RESET_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x3d) // 0x16dd*/ + +/*#define DEMUX_SCRAMBLING_STATE + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x3e) // 0x163e*/ +/*#define DEMUX_SCRAMBLING_STATE_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x3e) // 0x168e*/ +/*#define DEMUX_SCRAMBLING_STATE_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x3e) // 0x16de*/ + +/*#define DEMUX_CHANNEL_ACTIVITY + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x3f) // 0x163f*/ +/*#define DEMUX_CHANNEL_ACTIVITY_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x3f) // 0x168f*/ +/*#define DEMUX_CHANNEL_ACTIVITY_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x3f) // 0x16df*/ + +/* bit 4 -- video_stamp_use_dts*/ +/* bit 3 -- audio_stamp_sync_1_en*/ +/* bit 2 -- audio_stamp_insert_en*/ +/* bit 1 -- video_stamp_sync_1_en*/ +/* bit 0 -- video_stamp_insert_en*/ +/*#define DEMUX_STAMP_CTL + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x40) // 0x1640*/ +/*#define DEMUX_STAMP_CTL_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x40) // 0x1690*/ +/*#define DEMUX_STAMP_CTL_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x40) // 0x16e0*/ + +/*#define DEMUX_VIDEO_STAMP_SYNC_0 + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x41) // 0x1641*/ +/*#define DEMUX_VIDEO_STAMP_SYNC_0_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x41) // 0x1691*/ +/*#define DEMUX_VIDEO_STAMP_SYNC_0_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x41) // 0x16e1*/ + +/*#define DEMUX_VIDEO_STAMP_SYNC_1 + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x42) // 0x1642*/ +/*#define DEMUX_VIDEO_STAMP_SYNC_1_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x42) // 0x1692*/ +/*#define DEMUX_VIDEO_STAMP_SYNC_1_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x42) // 0x16e2*/ + +/*#define DEMUX_AUDIO_STAMP_SYNC_0 + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x43) // 0x1643*/ +/*#define DEMUX_AUDIO_STAMP_SYNC_0_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x43) // 0x1693*/ +/*#define DEMUX_AUDIO_STAMP_SYNC_0_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x43) // 0x16e3*/ + +/*#define DEMUX_AUDIO_STAMP_SYNC_1 + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x44) // 0x1644*/ +/*#define DEMUX_AUDIO_STAMP_SYNC_1_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x44) // 0x1694*/ +/*#define DEMUX_AUDIO_STAMP_SYNC_1_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x44) // 0x16e4*/ + +/* Write : Bit[4:0] secter filter number for reset*/ +/* Read : select according to output_section_buffer_valid :*/ +/* per bit per section buffer valid status*/ +/* or section_buffer_ignore*/ +/*#define DEMUX_SECTION_RESET + * (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x45) // 0x1645*/ +/*#define DEMUX_SECTION_RESET_2 + * (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x45) // 0x1695*/ +/*#define DEMUX_SECTION_RESET_3 + * (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x45) // 0x16e5*/ + + +/* bit[31:0] - channel_reset_timeout_disable*/ +/*#define DEMUX_INPUT_TIMEOUT_C \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x46) // 0x1646*/ +/*#define DEMUX_INPUT_TIMEOUT_C_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x46) // 0x1696*/ +/*#define DEMUX_INPUT_TIMEOUT_C_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x46) // 0x16e6*/ +/* bit[31] - no_match_reset_timeout_disable*/ +/* bit[30:0] input_time_out_int_cnt (0 -- means disable) Wr-setting, Rd-count*/ +/*#define DEMUX_INPUT_TIMEOUT \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x47) // 0x1647*/ +/*#define DEMUX_INPUT_TIMEOUT_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x47) // 0x1697*/ +/*#define DEMUX_INPUT_TIMEOUT_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x47) // 0x16e7*/ + +/* bit[31:0] - channel_packet_count_disable*/ +/*#define DEMUX_PACKET_COUNT_C \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x48) // 0x1648*/ +/*#define DEMUX_PACKET_COUNT_C_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x48) // 0x1698*/ +/*#define DEMUX_PACKET_COUNT_C_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x48)*/ /* 0x16e8*/ +/* bit[31] - no_match_packet_count_disable*/ +/* bit[30:0] input_packet_count*/ +/*#define DEMUX_PACKET_COUNT \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x49) // 0x1649*/ +/*#define DEMUX_PACKET_COUNT_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x49) // 0x1699*/ +/*#define DEMUX_PACKET_COUNT_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x49) // 0x16e9*/ + +/* bit[31:0] channel_record_enable*/ +/*#define DEMUX_CHAN_RECORD_EN \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x4a) // 0x164a*/ +/*#define DEMUX_CHAN_RECORD_EN_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x4a) // 0x169a*/ +/*#define DEMUX_CHAN_RECORD_EN_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x4a) // 0x16ea*/ + +/* bit[31:0] channel_process_enable*/ +/*#define DEMUX_CHAN_PROCESS_EN \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x4b) // 0x164b*/ +/*#define DEMUX_CHAN_PROCESS_EN_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x4b) */ /* 0x169b*/ +/*#define DEMUX_CHAN_PROCESS_EN_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x4b) // 0x16eb*/ + +/* bit[31:24] small_sec_size ((n+1) * 256 Bytes)*/ +/* bit[23:16] small_sec_rd_ptr */ +/* bit[15:8] small_sec_wr_ptr */ +/* bit[7:2] reserved*/ +/* bit[1] small_sec_wr_ptr_wr_enable*/ +/* bit[0] small_section_enable*/ +/*#define DEMUX_SMALL_SEC_CTL \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x4c)*/ /* 0x164c*/ +/*#define DEMUX_SMALL_SEC_CTL_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x4c) // 0x169c*/ +/*#define DEMUX_SMALL_SEC_CTL_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x4c) // 0x16ec*/ +/* bit[31:0] small_sec_start_addr*/ +/*#define DEMUX_SMALL_SEC_ADDR \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x4d) // 0x164d*/ +/*#define DEMUX_SMALL_SEC_ADDR_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x4d) // 0x169d*/ +/*#define DEMUX_SMALL_SEC_ADDR_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x4d) // 0x16ed*/ + + +/*======================================================*/ +/* STB Registers End*/ +/*====================================================*/ +/* ----------------------------*/ +/* ASYNC FIFO (4)*/ +/* ----------------------------*/ +/*#define ASYNC_FIFO_REG0 0x2310*/ +/*#define ASYNC_FIFO_REG1 0x2311*/ +#define ASYNC_FIFO_FLUSH_STATUS 31 +#define ASYNC_FIFO_ERR 30 +#define ASYNC_FIFO_FIFO_EMPTY 29 +#define ASYNC_FIFO_TO_HIU 24 +#define ASYNC_FIFO_FLUSH 23 +#define ASYNC_FIFO_RESET 22 +#define ASYNC_FIFO_WRAP_EN 21 +#define ASYNC_FIFO_FLUSH_EN 20 +#define ASYNC_FIFO_RESIDUAL_MSB 19 +#define ASYNC_FIFO_RESIDUAL_LSB 15 +#define ASYNC_FIFO_FLUSH_CNT_MSB 14 +#define ASYNC_FIFO_FLUSH_CNT_LSB 0 +/*#define ASYNC_FIFO_REG2 0x2312*/ +#define ASYNC_FIFO_FIFO_FULL 26 +#define ASYNC_FIFO_FILL_STATUS 25 +#define ASYNC_FIFO_SOURCE_MSB 24 +#define ASYNC_FIFO_SOURCE_LSB 23 +#define ASYNC_FIFO_ENDIAN_MSB 22 +#define ASYNC_FIFO_ENDIAN_LSB 21 +#define ASYNC_FIFO_FILL_EN 20 +#define ASYNC_FIFO_FILL_CNT_MSB 19 +#define ASYNC_FIFO_FILL_CNT_LSB 0 +/*#define ASYNC_FIFO_REG3 0x2313*/ +#define ASYNC_FLUSH_SIZE_IRQ_MSB 15 +#define ASYNC_FLUSH_SIZE_IRQ_LSB 0 +/* ----------------------------*/ +/* ASYNC FIFO (4)*/ +/* ----------------------------*/ +/*#define ASYNC_FIFO2_REG0 0x2314*/ +/*#define ASYNC_FIFO2_REG1 0x2315*/ +/*#define ASYNC_FIFO2_REG2 0x2316*/ +/*#define ASYNC_FIFO2_REG3 0x2317*/ + +#define RESET_DEMUXSTB (1 << 1) +#endif /* C_STB_DEFINE_H*/
diff --git a/drivers/stream_input/parser/hw_demux/c_stb_regs_define.h b/drivers/stream_input/parser/hw_demux/c_stb_regs_define.h new file mode 100644 index 0000000..e351771 --- /dev/null +++ b/drivers/stream_input/parser/hw_demux/c_stb_regs_define.h
@@ -0,0 +1,814 @@ +/* +* Copyright (C) 2017 Amlogic, Inc. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +* +* You should have received a copy of the GNU General Public License along +* with this program; if not, write to the Free Software Foundation, Inc., +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +* +* Description: +*/ +/* + * This file is automaticly generated by genregs.awk. Please do not edit it + * Base files are .. + * .. + * .. + * Tue Oct 22 15:28:48 CST 2013 + **/ + +#ifndef __MACH_MESON8_REG_ADDR_H_ +#define __MACH_MESON8_REG_ADDR_H_ +#include <linux/amlogic/media/registers/register_map.h> +#define CBUS_REG_ADDR(_r) aml_read_cbus(_r) + + +#define STB_CBUS_BASE aml_stb_get_base(ID_STB_CBUS_BASE) +#define SMARTCARD_REG_BASE aml_stb_get_base(ID_SMARTCARD_REG_BASE) +#define ASYNC_FIFO_REG_BASE aml_stb_get_base(ID_ASYNC_FIFO_REG_BASE) +#define ASYNC_FIFO1_REG_BASE aml_stb_get_base(ID_ASYNC_FIFO1_REG_BASE) +#define ASYNC_FIFO2_REG_BASE aml_stb_get_base(ID_ASYNC_FIFO2_REG_BASE) +#define RESET_BASE aml_stb_get_base(ID_RESET_BASE) +#define PARSER_SUB_START_PTR_BASE \ + aml_stb_get_base(ID_PARSER_SUB_START_PTR_BASE) + +#define HHI_CSI_PHY_CNTL_BASE 0x1000 + +#define DEMUX_1_OFFSET 0x00 +#define DEMUX_2_OFFSET 0x50 +#define DEMUX_3_OFFSET 0xa0 + + +#define TS_HIU1_CONFIG (STB_CBUS_BASE + 0x4e) +#define P_TS_HIU1_CONFIG CBUS_REG_ADDR(TS_HIU1_CONFIG) + +#define TS_TOP_CONFIG1 (STB_CBUS_BASE + 0x4f) +#define P_TS_TOP_CONFIG1 CBUS_REG_ADDR(TS_TOP_CONFIG1) + +#define STB_S2P2_CONFIG (STB_CBUS_BASE + 0xef) +#define P_STB_S2P2_CONFIG CBUS_REG_ADDR(STB_S2P2_CONFIG) + +#define STB_RECORDER2_CNTL (STB_CBUS_BASE + 0xee) +#define P_STB_RECORDER2_CNTL CBUS_REG_ADDR(STB_RECORDER2_CNTL) + +#define STB_TOP_CONFIG (STB_CBUS_BASE + 0xf0) +#define P_STB_TOP_CONFIG CBUS_REG_ADDR(STB_TOP_CONFIG) +#define TS_TOP_CONFIG (STB_CBUS_BASE + 0xf1) +#define P_TS_TOP_CONFIG CBUS_REG_ADDR(TS_TOP_CONFIG) +#define TS_FILE_CONFIG (STB_CBUS_BASE + 0xf2) +#define P_TS_FILE_CONFIG CBUS_REG_ADDR(TS_FILE_CONFIG) +#define TS_PL_PID_INDEX (STB_CBUS_BASE + 0xf3) +#define P_TS_PL_PID_INDEX CBUS_REG_ADDR(TS_PL_PID_INDEX) +#define TS_PL_PID_DATA (STB_CBUS_BASE + 0xf4) +#define P_TS_PL_PID_DATA CBUS_REG_ADDR(TS_PL_PID_DATA) +#define COMM_DESC_KEY0 (STB_CBUS_BASE + 0xf5) +#define P_COMM_DESC_KEY0 CBUS_REG_ADDR(COMM_DESC_KEY0) +#define COMM_DESC_KEY1 (STB_CBUS_BASE + 0xf6) +#define P_COMM_DESC_KEY1 CBUS_REG_ADDR(COMM_DESC_KEY1) +#define COMM_DESC_KEY_RW (STB_CBUS_BASE + 0xf7) +#define P_COMM_DESC_KEY_RW CBUS_REG_ADDR(COMM_DESC_KEY_RW) +#define CIPLUS_KEY0 (STB_CBUS_BASE + 0xf8) +#define P_CIPLUS_KEY0 CBUS_REG_ADDR(CIPLUS_KEY0) +#define CIPLUS_KEY1 (STB_CBUS_BASE + 0xf9) +#define P_CIPLUS_KEY1 CBUS_REG_ADDR(CIPLUS_KEY1) +#define CIPLUS_KEY2 (STB_CBUS_BASE + 0xfa) +#define P_CIPLUS_KEY2 CBUS_REG_ADDR(CIPLUS_KEY2) +#define CIPLUS_KEY3 (STB_CBUS_BASE + 0xfb) +#define P_CIPLUS_KEY3 CBUS_REG_ADDR(CIPLUS_KEY3) +#define CIPLUS_KEY_WR (STB_CBUS_BASE + 0xfc) +#define P_CIPLUS_KEY_WR CBUS_REG_ADDR(CIPLUS_KEY_WR) +#define CIPLUS_CONFIG (STB_CBUS_BASE + 0xfd) +#define P_CIPLUS_CONFIG CBUS_REG_ADDR(CIPLUS_CONFIG) +#define CIPLUS_ENDIAN (STB_CBUS_BASE + 0xfe) +#define P_CIPLUS_ENDIAN CBUS_REG_ADDR(CIPLUS_ENDIAN) + +#define SMARTCARD_REG0 (SMARTCARD_REG_BASE + 0x0) +#define P_SMARTCARD_REG0 CBUS_REG_ADDR(SMARTCARD_REG0) +#define SMARTCARD_REG1 (SMARTCARD_REG_BASE + 0x1) +#define P_SMARTCARD_REG1 CBUS_REG_ADDR(SMARTCARD_REG1) +#define SMARTCARD_REG2 (SMARTCARD_REG_BASE + 0x2) +#define P_SMARTCARD_REG2 CBUS_REG_ADDR(SMARTCARD_REG2) +#define SMARTCARD_STATUS (SMARTCARD_REG_BASE + 0x3) +#define P_SMARTCARD_STATUS CBUS_REG_ADDR(SMARTCARD_STATUS) +#define SMARTCARD_INTR (SMARTCARD_REG_BASE + 0x4) +#define P_SMARTCARD_INTR CBUS_REG_ADDR(SMARTCARD_INTR) +#define SMARTCARD_REG5 (SMARTCARD_REG_BASE + 0x5) +#define P_SMARTCARD_REG5 CBUS_REG_ADDR(SMARTCARD_REG5) +#define SMARTCARD_REG6 (SMARTCARD_REG_BASE + 0x6) +#define P_SMARTCARD_REG6 CBUS_REG_ADDR(SMARTCARD_REG6) +#define SMARTCARD_FIFO (SMARTCARD_REG_BASE + 0x7) +#define P_SMARTCARD_FIFO CBUS_REG_ADDR(SMARTCARD_FIFO) +#define SMARTCARD_REG8 (SMARTCARD_REG_BASE + 0x8) +#define P_SMARTCARD_REG8 CBUS_REG_ADDR(SMARTCARD_REG8) + +#define ASYNC_FIFO_REG0 (ASYNC_FIFO_REG_BASE + 0x0) +#define P_ASYNC_FIFO_REG0 CBUS_REG_ADDR(ASYNC_FIFO_REG0) +#define ASYNC_FIFO_REG1 (ASYNC_FIFO_REG_BASE + 0x1) +#define P_ASYNC_FIFO_REG1 CBUS_REG_ADDR(ASYNC_FIFO_REG1) +#define ASYNC_FIFO_REG2 (ASYNC_FIFO_REG_BASE + 0x2) +#define P_ASYNC_FIFO_REG2 CBUS_REG_ADDR(ASYNC_FIFO_REG2) +#define ASYNC_FIFO_REG3 (ASYNC_FIFO_REG_BASE + 0x3) +#define P_ASYNC_FIFO_REG3 CBUS_REG_ADDR(ASYNC_FIFO_REG3) +#define ASYNC_FIFO_REG4 (ASYNC_FIFO_REG_BASE + 0x4) +#define P_ASYNC_FIFO_REG4 CBUS_REG_ADDR(ASYNC_FIFO_REG4) +#define ASYNC_FIFO_REG5 (ASYNC_FIFO_REG_BASE + 0x5) +#define P_ASYNC_FIFO_REG5 CBUS_REG_ADDR(ASYNC_FIFO_REG5) + +#define ASYNC_FIFO1_REG0 (ASYNC_FIFO1_REG_BASE + 0x0) +#define P_ASYNC_FIFO1_REG0 CBUS_REG_ADDR(ASYNC_FIFO1_REG0) +#define ASYNC_FIFO1_REG1 (ASYNC_FIFO1_REG_BASE + 0x1) +#define P_ASYNC_FIFO1_REG1 CBUS_REG_ADDR(ASYNC_FIFO1_REG1) +#define ASYNC_FIFO1_REG2 (ASYNC_FIFO1_REG_BASE + 0x2) +#define P_ASYNC_FIFO1_REG2 CBUS_REG_ADDR(ASYNC_FIFO1_REG2) +#define ASYNC_FIFO1_REG3 (ASYNC_FIFO1_REG_BASE + 0x3) +#define P_ASYNC_FIFO1_REG3 CBUS_REG_ADDR(ASYNC_FIFO1_REG3) +#define ASYNC_FIFO1_REG4 (ASYNC_FIFO1_REG_BASE + 0x4) +#define P_ASYNC_FIFO1_REG4 CBUS_REG_ADDR(ASYNC_FIFO1_REG4) +#define ASYNC_FIFO1_REG5 (ASYNC_FIFO1_REG_BASE + 0x5) +#define P_ASYNC_FIFO1_REG5 CBUS_REG_ADDR(ASYNC_FIFO1_REG5) + + +#define ASYNC_FIFO2_REG0 (ASYNC_FIFO2_REG_BASE + 0x0) +#define P_ASYNC_FIFO2_REG0 CBUS_REG_ADDR(ASYNC_FIFO2_REG0) +#define ASYNC_FIFO2_REG1 (ASYNC_FIFO2_REG_BASE + 0x1) +#define P_ASYNC_FIFO2_REG1 CBUS_REG_ADDR(ASYNC_FIFO2_REG1) +#define ASYNC_FIFO2_REG2 (ASYNC_FIFO2_REG_BASE + 0x2) +#define P_ASYNC_FIFO2_REG2 CBUS_REG_ADDR(ASYNC_FIFO2_REG2) +#define ASYNC_FIFO2_REG3 (ASYNC_FIFO2_REG_BASE + 0x3) +#define P_ASYNC_FIFO2_REG3 CBUS_REG_ADDR(ASYNC_FIFO2_REG3) +#define ASYNC_FIFO2_REG4 (ASYNC_FIFO2_REG_BASE + 0x4) +#define P_ASYNC_FIFO2_REG4 CBUS_REG_ADDR(ASYNC_FIFO2_REG4) +#define ASYNC_FIFO2_REG5 (ASYNC_FIFO2_REG_BASE + 0x5) +#define P_ASYNC_FIFO2_REG5 CBUS_REG_ADDR(ASYNC_FIFO2_REG5) + + +#define RESET0_REGISTER (RESET_BASE + 0x1) +#define P_RESET0_REGISTER CBUS_REG_ADDR(RESET0_REGISTER) +#define RESET1_REGISTER (RESET_BASE + 0x2) +#define P_RESET1_REGISTER CBUS_REG_ADDR(RESET1_REGISTER) +#define RESET2_REGISTER (RESET_BASE + 0x3) +#define P_RESET2_REGISTER CBUS_REG_ADDR(RESET2_REGISTER) +#define RESET3_REGISTER (RESET_BASE + 0x4) +#define P_RESET3_REGISTER CBUS_REG_ADDR(RESET3_REGISTER) +#define RESET4_REGISTER (RESET_BASE + 0x5) +#define P_RESET4_REGISTER CBUS_REG_ADDR(RESET4_REGISTER) +#define RESET5_REGISTER (RESET_BASE + 0x6) +#define P_RESET5_REGISTER CBUS_REG_ADDR(RESET5_REGISTER) +#define RESET6_REGISTER (RESET_BASE + 0x7) +#define P_RESET6_REGISTER CBUS_REG_ADDR(RESET6_REGISTER) +#define RESET7_REGISTER (RESET_BASE + 0x8) +#define P_RESET7_REGISTER CBUS_REG_ADDR(RESET7_REGISTER) +#define RESET0_MASK (RESET_BASE + 0x10) +#define P_RESET0_MASK CBUS_REG_ADDR(RESET0_MASK) +#define RESET1_MASK (RESET_BASE + 0x11) +#define P_RESET1_MASK CBUS_REG_ADDR(RESET1_MASK) +#define RESET2_MASK (RESET_BASE + 0x12) +#define P_RESET2_MASK CBUS_REG_ADDR(RESET2_MASK) +#define RESET3_MASK (RESET_BASE + 0x13) +#define P_RESET3_MASK CBUS_REG_ADDR(RESET3_MASK) +#define RESET4_MASK (RESET_BASE + 0x14) +#define P_RESET4_MASK CBUS_REG_ADDR(RESET4_MASK) +#define RESET5_MASK (RESET_BASE + 0x15) +#define P_RESET5_MASK CBUS_REG_ADDR(RESET5_MASK) +#define RESET6_MASK (RESET_BASE + 0x16) +#define P_RESET6_MASK CBUS_REG_ADDR(RESET6_MASK) +#define CRT_MASK (RESET_BASE + 0x17) +#define P_CRT_MASK CBUS_REG_ADDR(CRT_MASK) +#define RESET7_MASK (RESET_BASE + 0x18) +#define P_RESET7_MASK CBUS_REG_ADDR(RESET7_MASK) +/*add from M8M2*/ +#define P_RESET0_LEVEL CBUS_REG_ADDR(RESET0_LEVEL) +#define RESET1_LEVEL (RESET_BASE + 0x21) +#define P_RESET1_LEVEL CBUS_REG_ADDR(RESET1_LEVEL) +#define RESET2_LEVEL (RESET_BASE + 0x22) +#define P_RESET2_LEVEL CBUS_REG_ADDR(RESET2_LEVEL) +#define RESET3_LEVEL (RESET_BASE + 0x23) +#define P_RESET3_LEVEL CBUS_REG_ADDR(RESET3_LEVEL) +#define RESET4_LEVEL (RESET_BASE + 0x24) +#define P_RESET4_LEVEL CBUS_REG_ADDR(RESET4_LEVEL) +#define RESET5_LEVEL (RESET_BASE + 0x25) +#define P_RESET5_LEVEL CBUS_REG_ADDR(RESET5_LEVEL) +#define RESET6_LEVEL (RESET_BASE + 0x26) +#define P_RESET6_LEVEL CBUS_REG_ADDR(RESET6_LEVEL) +#define RESET7_LEVEL (RESET_BASE + 0x27) +#define P_RESET7_LEVEL CBUS_REG_ADDR(RESET7_LEVEL) + +/*no set*/ +#ifdef MESON_M8_CPU +#define HHI_CSI_PHY_CNTL0 (HHI_CSI_PHY_CNTL_BASE + 0xd3) +#define P_HHI_CSI_PHY_CNTL0 CBUS_REG_ADDR(HHI_CSI_PHY_CNTL0) +#define HHI_CSI_PHY_CNTL1 (HHI_CSI_PHY_CNTL_BASE + 0xd4) +#define P_HHI_CSI_PHY_CNTL1 CBUS_REG_ADDR(HHI_CSI_PHY_CNTL1) +#define HHI_CSI_PHY_CNTL2 (HHI_CSI_PHY_CNTL_BASE + 0xd5) +#define P_HHI_CSI_PHY_CNTL2 CBUS_REG_ADDR(HHI_CSI_PHY_CNTL2) +#define HHI_CSI_PHY_CNTL3 (HHI_CSI_PHY_CNTL_BASE + 0xd6) +#define P_HHI_CSI_PHY_CNTL3 CBUS_REG_ADDR(HHI_CSI_PHY_CNTL3) +#define HHI_CSI_PHY_CNTL4 (HHI_CSI_PHY_CNTL_BASE + 0xd7) +#define P_HHI_CSI_PHY_CNTL4 CBUS_REG_ADDR(HHI_CSI_PHY_CNTL4) +#endif + +#define PARSER_SUB_START_PTR (PARSER_SUB_START_PTR_BASE + 0x8a) +#define P_PARSER_SUB_START_PTR CBUS_REG_ADDR(PARSER_SUB_START_PTR) +#define PARSER_SUB_END_PTR (PARSER_SUB_START_PTR_BASE + 0x8b) +#define P_PARSER_SUB_END_PTR CBUS_REG_ADDR(PARSER_SUB_END_PTR) +#define PARSER_SUB_WP (PARSER_SUB_START_PTR_BASE + 0x8c) +#define P_PARSER_SUB_WP CBUS_REG_ADDR(PARSER_SUB_WP) +#define PARSER_SUB_RP (PARSER_SUB_START_PTR_BASE + 0x8d) +#define P_PARSER_SUB_RP CBUS_REG_ADDR(PARSER_SUB_RP) +#define PARSER_SUB_HOLE (PARSER_SUB_START_PTR_BASE + 0x8e) +#define P_PARSER_SUB_HOLE CBUS_REG_ADDR(PARSER_SUB_HOLE) + +/*no set*/ +#define AO_RTI_GEN_PWR_SLEEP0 ((0x00 << 10) | (0x3a << 2)) +#define P_AO_RTI_GEN_PWR_SLEEP0 \ + AOBUS_REG_ADDR(AO_RTI_GEN_PWR_SLEEP0) +#define AO_RTI_GEN_PWR_ISO0 ((0x00 << 10) | (0x3b << 2)) +#define P_AO_RTI_GEN_PWR_ISO0 AOBUS_REG_ADDR(AO_RTI_GEN_PWR_ISO0) + +/**/ +#define STB_VERSION (STB_CBUS_BASE + 0x00) +#define P_STB_VERSION CBUS_REG_ADDR(STB_VERSION) +#define STB_VERSION_2 (STB_CBUS_BASE + 0x50) +#define P_STB_VERSION_2 CBUS_REG_ADDR(STB_VERSION_2) +#define STB_VERSION_3 (STB_CBUS_BASE + 0xa0) +#define P_STB_VERSION_3 CBUS_REG_ADDR(STB_VERSION_3) +#define STB_TEST_REG (STB_CBUS_BASE + 0x01) +#define P_STB_TEST_REG CBUS_REG_ADDR(STB_TEST_REG) +#define STB_TEST_REG_2 (STB_CBUS_BASE + 0x51) +#define P_STB_TEST_REG_2 CBUS_REG_ADDR(STB_TEST_REG_2) +#define STB_TEST_REG_3 (STB_CBUS_BASE + 0xa1) +#define P_STB_TEST_REG_3 CBUS_REG_ADDR(STB_TEST_REG_3) + +#define FEC_INPUT_CONTROL (STB_CBUS_BASE + 0x2) +#define P_FEC_INPUT_CONTROL CBUS_REG_ADDR(FEC_INPUT_CONTROL) +#define FEC_INPUT_CONTROL_2 (STB_CBUS_BASE + 0x52) +#define P_FEC_INPUT_CONTROL_2 CBUS_REG_ADDR(FEC_INPUT_CONTROL_2) +#define FEC_INPUT_CONTROL_3 (STB_CBUS_BASE + 0xa2) +#define P_FEC_INPUT_CONTROL_3 CBUS_REG_ADDR(FEC_INPUT_CONTROL_3) +/*no used*/ +#define FEC_INPUT_DATA (STB_CBUS_BASE + 0x03) +#define P_FEC_INPUT_DATA CBUS_REG_ADDR(FEC_INPUT_DATA) +#define FEC_INPUT_DATA_2 (STB_CBUS_BASE + 0x53) +#define P_FEC_INPUT_DATA_2 CBUS_REG_ADDR(FEC_INPUT_DATA_2) +#define FEC_INPUT_DATA_3 (STB_CBUS_BASE + 0xa3) +#define P_FEC_INPUT_DATA_3 CBUS_REG_ADDR(FEC_INPUT_DATA_3) +/*no used end*/ +#define DEMUX_CONTROL (STB_CBUS_BASE + 0x04) +#define P_DEMUX_CONTROL CBUS_REG_ADDR(DEMUX_CONTROL) +#define DEMUX_CONTROL_2 (STB_CBUS_BASE + 0x54) +#define P_DEMUX_CONTROL_2 CBUS_REG_ADDR(DEMUX_CONTROL_2) +#define DEMUX_CONTROL_3 (STB_CBUS_BASE + 0xa4) +#define P_DEMUX_CONTROL_3 CBUS_REG_ADDR(DEMUX_CONTROL_3) +/*no used*/ +#define FEC_SYNC_BYTE (STB_CBUS_BASE + 0x05) +#define P_FEC_SYNC_BYTE CBUS_REG_ADDR(FEC_SYNC_BYTE) +#define FEC_SYNC_BYTE_2 (STB_CBUS_BASE + 0x55) +#define P_FEC_SYNC_BYTE_2 CBUS_REG_ADDR(FEC_SYNC_BYTE_2) +#define FEC_SYNC_BYTE_3 (STB_CBUS_BASE + 0xa5) +#define P_FEC_SYNC_BYTE_3 CBUS_REG_ADDR(FEC_SYNC_BYTE_3) +/*no used end*/ + +#define FM_WR_DATA (STB_CBUS_BASE + 0x06) +#define P_FM_WR_DATA CBUS_REG_ADDR(FM_WR_DATA) +#define FM_WR_DATA_2 (STB_CBUS_BASE + 0x56) +#define P_FM_WR_DATA_2 CBUS_REG_ADDR(FM_WR_DATA_2) +#define FM_WR_DATA_3 (STB_CBUS_BASE + 0xa6) +#define P_FM_WR_DATA_3 CBUS_REG_ADDR(FM_WR_DATA_3) +#define FM_WR_ADDR (STB_CBUS_BASE + 0x07) +#define P_FM_WR_ADDR CBUS_REG_ADDR(FM_WR_ADDR) +#define FM_WR_ADDR_2 (STB_CBUS_BASE + 0x57) +#define P_FM_WR_ADDR_2 CBUS_REG_ADDR(FM_WR_ADDR_2) +#define FM_WR_ADDR_3 (STB_CBUS_BASE + 0xa7) +#define P_FM_WR_ADDR_3 CBUS_REG_ADDR(FM_WR_ADDR_3) +#define MAX_FM_COMP_ADDR (STB_CBUS_BASE + 0x08) +#define P_MAX_FM_COMP_ADDR CBUS_REG_ADDR(MAX_FM_COMP_ADDR) +#define MAX_FM_COMP_ADDR_2 (STB_CBUS_BASE + 0x58) +#define P_MAX_FM_COMP_ADDR_2 CBUS_REG_ADDR(MAX_FM_COMP_ADDR_2) +#define MAX_FM_COMP_ADDR_3 (STB_CBUS_BASE + 0xa8) +#define P_MAX_FM_COMP_ADDR_3 CBUS_REG_ADDR(MAX_FM_COMP_ADDR_3) + +#define TS_HEAD_0 (STB_CBUS_BASE + 0x09) +#define P_TS_HEAD_0 CBUS_REG_ADDR(TS_HEAD_0) +#define TS_HEAD_0_2 (STB_CBUS_BASE + 0x59) +#define P_TS_HEAD_0_2 CBUS_REG_ADDR(TS_HEAD_0_2) +#define TS_HEAD_0_3 (STB_CBUS_BASE + 0xa9) +#define P_TS_HEAD_0_3 CBUS_REG_ADDR(TS_HEAD_0_3) +#define TS_HEAD_1 (STB_CBUS_BASE + 0x0a) +#define P_TS_HEAD_1 CBUS_REG_ADDR(TS_HEAD_1) +#define TS_HEAD_1_2 (STB_CBUS_BASE + 0x5a) +#define P_TS_HEAD_1_2 CBUS_REG_ADDR(TS_HEAD_1_2) +#define TS_HEAD_1_3 (STB_CBUS_BASE + 0xaa) +#define P_TS_HEAD_1_3 CBUS_REG_ADDR(TS_HEAD_1_3) + +#define OM_CMD_STATUS (STB_CBUS_BASE + 0x0b) +#define P_OM_CMD_STATUS CBUS_REG_ADDR(OM_CMD_STATUS) +#define OM_CMD_STATUS_2 (STB_CBUS_BASE + 0x5b) +#define P_OM_CMD_STATUS_2 CBUS_REG_ADDR(OM_CMD_STATUS_2) +#define OM_CMD_STATUS_3 (STB_CBUS_BASE + 0xab) +#define P_OM_CMD_STATUS_3 CBUS_REG_ADDR(OM_CMD_STATUS_3) + +#define OM_CMD_DATA (STB_CBUS_BASE + 0x0c) +#define P_OM_CMD_DATA CBUS_REG_ADDR(OM_CMD_DATA) +#define OM_CMD_DATA_2 (STB_CBUS_BASE + 0x5c) +#define P_OM_CMD_DATA_2 CBUS_REG_ADDR(OM_CMD_DATA_2) +#define OM_CMD_DATA_3 (STB_CBUS_BASE + 0xac) +#define P_OM_CMD_DATA_3 CBUS_REG_ADDR(OM_CMD_DATA_3) +#define OM_CMD_DATA2 (STB_CBUS_BASE + 0x0d) +#define P_OM_CMD_DATA2 CBUS_REG_ADDR(OM_CMD_DATA2) +#define OM_CMD_DATA2_2 (STB_CBUS_BASE + 0x5d) +#define P_OM_CMD_DATA2_2 CBUS_REG_ADDR(OM_CMD_DATA2_2) +#define OM_CMD_DATA2_3 (STB_CBUS_BASE + 0xad) +#define P_OM_CMD_DATA2_3 CBUS_REG_ADDR(OM_CMD_DATA2_3) + +#define SEC_BUFF_01_START (STB_CBUS_BASE + 0x0e) +#define P_SEC_BUFF_01_START CBUS_REG_ADDR(SEC_BUFF_01_START) +#define SEC_BUFF_01_START_2 (STB_CBUS_BASE + 0x5e) +#define P_SEC_BUFF_01_START_2 CBUS_REG_ADDR(SEC_BUFF_01_START_2) +#define SEC_BUFF_01_START_3 (STB_CBUS_BASE + 0xae) +#define P_SEC_BUFF_01_START_3 CBUS_REG_ADDR(SEC_BUFF_01_START_3) +#define SEC_BUFF_23_START (STB_CBUS_BASE + 0x0f) +#define P_SEC_BUFF_23_START CBUS_REG_ADDR(SEC_BUFF_23_START) +#define SEC_BUFF_23_START_2 (STB_CBUS_BASE + 0x5f) +#define P_SEC_BUFF_23_START_2 CBUS_REG_ADDR(SEC_BUFF_23_START_2) +#define SEC_BUFF_23_START_3 (STB_CBUS_BASE + 0xaf) +#define P_SEC_BUFF_23_START_3 CBUS_REG_ADDR(SEC_BUFF_23_START_3) +#define SEC_BUFF_SIZE (STB_CBUS_BASE + 0x10) +#define P_SEC_BUFF_SIZE CBUS_REG_ADDR(SEC_BUFF_SIZE) +#define SEC_BUFF_SIZE_2 (STB_CBUS_BASE + 0x60) +#define P_SEC_BUFF_SIZE_2 CBUS_REG_ADDR(SEC_BUFF_SIZE_2) +#define SEC_BUFF_SIZE_3 (STB_CBUS_BASE + 0xb0) +#define P_SEC_BUFF_SIZE_3 CBUS_REG_ADDR(SEC_BUFF_SIZE_3) +#define SEC_BUFF_BUSY (STB_CBUS_BASE + 0x11) +#define P_SEC_BUFF_BUSY CBUS_REG_ADDR(SEC_BUFF_BUSY) +#define SEC_BUFF_BUSY_2 (STB_CBUS_BASE + 0x61) +#define P_SEC_BUFF_BUSY_2 CBUS_REG_ADDR(SEC_BUFF_BUSY_2) +#define SEC_BUFF_BUSY_3 (STB_CBUS_BASE + 0xb1) +#define P_SEC_BUFF_BUSY_3 CBUS_REG_ADDR(SEC_BUFF_BUSY_3) +#define SEC_BUFF_READY (STB_CBUS_BASE + 0x12) +#define P_SEC_BUFF_READY CBUS_REG_ADDR(SEC_BUFF_READY) +#define SEC_BUFF_READY_2 (STB_CBUS_BASE + 0x62) +#define P_SEC_BUFF_READY_2 CBUS_REG_ADDR(SEC_BUFF_READY_2) +#define SEC_BUFF_READY_3 (STB_CBUS_BASE + 0xb2) +#define P_SEC_BUFF_READY_3 CBUS_REG_ADDR(SEC_BUFF_READY_3) +#define SEC_BUFF_NUMBER (STB_CBUS_BASE + 0x13) +#define P_SEC_BUFF_NUMBER CBUS_REG_ADDR(SEC_BUFF_NUMBER) +#define SEC_BUFF_NUMBER_2 (STB_CBUS_BASE + 0x63) +#define P_SEC_BUFF_NUMBER_2 CBUS_REG_ADDR(SEC_BUFF_NUMBER_2) +#define SEC_BUFF_NUMBER_3 (STB_CBUS_BASE + 0xb3) +#define P_SEC_BUFF_NUMBER_3 CBUS_REG_ADDR(SEC_BUFF_NUMBER_3) + + +/**no used*/ +#define ASSIGN_PID_NUMBER (STB_CBUS_BASE + 0x14) +#define P_ASSIGN_PID_NUMBER CBUS_REG_ADDR(ASSIGN_PID_NUMBER) +#define ASSIGN_PID_NUMBER_2 (STB_CBUS_BASE + 0x64) +#define P_ASSIGN_PID_NUMBER_2 CBUS_REG_ADDR(ASSIGN_PID_NUMBER_2) +#define ASSIGN_PID_NUMBER_3 (STB_CBUS_BASE + 0xb4) +#define P_ASSIGN_PID_NUMBER_3 CBUS_REG_ADDR(ASSIGN_PID_NUMBER_3) +#define VIDEO_STREAM_ID (STB_CBUS_BASE + 0x15) +#define P_VIDEO_STREAM_ID CBUS_REG_ADDR(VIDEO_STREAM_ID) +#define VIDEO_STREAM_ID_2 (STB_CBUS_BASE + 0x65) +#define P_VIDEO_STREAM_ID_2 CBUS_REG_ADDR(VIDEO_STREAM_ID_2) +#define VIDEO_STREAM_ID_3 (STB_CBUS_BASE + 0xb5) +#define P_VIDEO_STREAM_ID_3 CBUS_REG_ADDR(VIDEO_STREAM_ID_3) +#define AUDIO_STREAM_ID (STB_CBUS_BASE + 0x16) +#define P_AUDIO_STREAM_ID CBUS_REG_ADDR(AUDIO_STREAM_ID) +#define AUDIO_STREAM_ID_2 (STB_CBUS_BASE + 0x66) +#define P_AUDIO_STREAM_ID_2 CBUS_REG_ADDR(AUDIO_STREAM_ID_2) +#define AUDIO_STREAM_ID_3 (STB_CBUS_BASE + 0xb6) +#define P_AUDIO_STREAM_ID_3 CBUS_REG_ADDR(AUDIO_STREAM_ID_3) +#define SUB_STREAM_ID (STB_CBUS_BASE + 0x17) +#define P_SUB_STREAM_ID CBUS_REG_ADDR(SUB_STREAM_ID) +#define SUB_STREAM_ID_2 (STB_CBUS_BASE + 0x67) +#define P_SUB_STREAM_ID_2 CBUS_REG_ADDR(SUB_STREAM_ID_2) +#define SUB_STREAM_ID_3 (STB_CBUS_BASE + 0xb7) +#define P_SUB_STREAM_ID_3 CBUS_REG_ADDR(SUB_STREAM_ID_3) +#define OTHER_STREAM_ID (STB_CBUS_BASE + 0x18) +#define P_OTHER_STREAM_ID CBUS_REG_ADDR(OTHER_STREAM_ID) +#define OTHER_STREAM_ID_2 (STB_CBUS_BASE + 0x68) +#define P_OTHER_STREAM_ID_2 CBUS_REG_ADDR(OTHER_STREAM_ID_2) +#define OTHER_STREAM_ID_3 (STB_CBUS_BASE + 0xb8) +#define P_OTHER_STREAM_ID_3 CBUS_REG_ADDR(OTHER_STREAM_ID_3) +#define PCR90K_CTL (STB_CBUS_BASE + 0x19) +#define P_PCR90K_CTL CBUS_REG_ADDR(PCR90K_CTL) +#define PCR90K_CTL_2 (STB_CBUS_BASE + 0x69) +#define P_PCR90K_CTL_2 CBUS_REG_ADDR(PCR90K_CTL_2) +#define PCR90K_CTL_3 (STB_CBUS_BASE + 0xb9) +#define P_PCR90K_CTL_3 CBUS_REG_ADDR(PCR90K_CTL_3) +/*no used end*/ +#define PCR_DEMUX (STB_CBUS_BASE + 0x1a) +#define P_PCR_DEMUX CBUS_REG_ADDR(PCR_DEMUX) +#define PCR_DEMUX_2 (STB_CBUS_BASE + 0x6a) +#define P_PCR_DEMUX_2 CBUS_REG_ADDR(PCR_DEMUX_2) +#define PCR_DEMUX_3 (STB_CBUS_BASE + 0xba) +#define P_PCR_DEMUX_3 CBUS_REG_ADDR(PCR_DEMUX_3) + +#define VIDEO_PTS_DEMUX (STB_CBUS_BASE + 0x1b) +#define P_VIDEO_PTS_DEMUX CBUS_REG_ADDR(VIDEO_PTS_DEMUX) +#define VIDEO_PTS_DEMUX_2 (STB_CBUS_BASE + 0x6b) +#define P_VIDEO_PTS_DEMUX_2 CBUS_REG_ADDR(VIDEO_PTS_DEMUX_2) +#define VIDEO_PTS_DEMUX_3 (STB_CBUS_BASE + 0xbb) +#define P_VIDEO_PTS_DEMUX_3 CBUS_REG_ADDR(VIDEO_PTS_DEMUX_3) +/*no used*/ +#define VIDEO_DTS_DEMUX (STB_CBUS_BASE + 0x1c) +#define P_VIDEO_DTS_DEMUX CBUS_REG_ADDR(VIDEO_DTS_DEMUX) +#define VIDEO_DTS_DEMUX_2 (STB_CBUS_BASE + 0x6c) +#define P_VIDEO_DTS_DEMUX_2 CBUS_REG_ADDR(VIDEO_DTS_DEMUX_2) +#define VIDEO_DTS_DEMUX_3 (STB_CBUS_BASE + 0xbc) +#define P_VIDEO_DTS_DEMUX_3 CBUS_REG_ADDR(VIDEO_DTS_DEMUX_3) +/*no used end*/ +#define AUDIO_PTS_DEMUX (STB_CBUS_BASE + 0x1d) +#define P_AUDIO_PTS_DEMUX CBUS_REG_ADDR(AUDIO_PTS_DEMUX) +#define AUDIO_PTS_DEMUX_2 (STB_CBUS_BASE + 0x6d) +#define P_AUDIO_PTS_DEMUX_2 CBUS_REG_ADDR(AUDIO_PTS_DEMUX_2) +#define AUDIO_PTS_DEMUX_3 (STB_CBUS_BASE + 0xbd) +#define P_AUDIO_PTS_DEMUX_3 CBUS_REG_ADDR(AUDIO_PTS_DEMUX_3) +/*no used */ +#define SUB_PTS_DEMUX (STB_CBUS_BASE + 0x1e) +#define P_SUB_PTS_DEMUX CBUS_REG_ADDR(SUB_PTS_DEMUX) +#define SUB_PTS_DEMUX_2 (STB_CBUS_BASE + 0x6e) +#define P_SUB_PTS_DEMUX_2 CBUS_REG_ADDR(SUB_PTS_DEMUX_2) +#define SUB_PTS_DEMUX_3 (STB_CBUS_BASE + 0xbe) +#define P_SUB_PTS_DEMUX_3 CBUS_REG_ADDR(SUB_PTS_DEMUX_3) +/*no used end*/ +#define STB_PTS_DTS_STATUS (STB_CBUS_BASE + 0x1f) +#define P_STB_PTS_DTS_STATUS CBUS_REG_ADDR(STB_PTS_DTS_STATUS) +#define STB_PTS_DTS_STATUS_2 (STB_CBUS_BASE + 0x6f) +#define P_STB_PTS_DTS_STATUS_2 CBUS_REG_ADDR(STB_PTS_DTS_STATUS_2) +#define STB_PTS_DTS_STATUS_3 (STB_CBUS_BASE + 0xbf) +#define P_STB_PTS_DTS_STATUS_3 CBUS_REG_ADDR(STB_PTS_DTS_STATUS_3) + +/*no use*/ +#define STB_DEBUG_INDEX (STB_CBUS_BASE + 0x20) +#define P_STB_DEBUG_INDEX CBUS_REG_ADDR(STB_DEBUG_INDEX) +#define STB_DEBUG_INDEX_2 (STB_CBUS_BASE + 0x70) +#define P_STB_DEBUG_INDEX_2 CBUS_REG_ADDR(STB_DEBUG_INDEX_2) +#define STB_DEBUG_INDEX_3 (STB_CBUS_BASE + 0xc0) +#define P_STB_DEBUG_INDEX_3 CBUS_REG_ADDR(STB_DEBUG_INDEX_3) +#define STB_DEBUG_DATAUT_O (STB_CBUS_BASE + 0x21) +#define P_STB_DEBUG_DATAUT_O CBUS_REG_ADDR(STB_DEBUG_DATAUT_O) +#define STB_DEBUG_DATAUT_O_2 (STB_CBUS_BASE + 0x71) +#define P_STB_DEBUG_DATAUT_O_2 CBUS_REG_ADDR(STB_DEBUG_DATAUT_O_2) +#define STB_DEBUG_DATAUT_O_3 (STB_CBUS_BASE + 0xc1) +#define P_STB_DEBUG_DATAUT_O_3 CBUS_REG_ADDR(STB_DEBUG_DATAUT_O_3) +/*no use end*/ + +#define STBM_CTL_O (STB_CBUS_BASE + 0x22) +#define P_STBM_CTL_O CBUS_REG_ADDR(STBM_CTL_O) +#define STBM_CTL_O_2 (STB_CBUS_BASE + 0x72) +#define P_STBM_CTL_O_2 CBUS_REG_ADDR(STBM_CTL_O_2) +#define STBM_CTL_O_3 (STB_CBUS_BASE + 0xc2) +#define P_STBM_CTL_O_3 CBUS_REG_ADDR(STBM_CTL_O_3) +#define STB_INT_STATUS (STB_CBUS_BASE + 0x23) +#define P_STB_INT_STATUS CBUS_REG_ADDR(STB_INT_STATUS) +#define STB_INT_STATUS_2 (STB_CBUS_BASE + 0x73) +#define P_STB_INT_STATUS_2 CBUS_REG_ADDR(STB_INT_STATUS_2) +#define STB_INT_STATUS_3 (STB_CBUS_BASE + 0xc3) +#define P_STB_INT_STATUS_3 CBUS_REG_ADDR(STB_INT_STATUS_3) +#define DEMUX_ENDIAN (STB_CBUS_BASE + 0x24) +#define P_DEMUX_ENDIAN CBUS_REG_ADDR(DEMUX_ENDIAN) +#define DEMUX_ENDIAN_2 (STB_CBUS_BASE + 0x74) +#define P_DEMUX_ENDIAN_2 CBUS_REG_ADDR(DEMUX_ENDIAN_2) +#define DEMUX_ENDIAN_3 (STB_CBUS_BASE + 0xc4) +#define P_DEMUX_ENDIAN_3 CBUS_REG_ADDR(DEMUX_ENDIAN_3) +#define TS_HIU_CTL (STB_CBUS_BASE + 0x25) +#define P_TS_HIU_CTL CBUS_REG_ADDR(TS_HIU_CTL) +#define TS_HIU_CTL_2 (STB_CBUS_BASE + 0x75) +#define P_TS_HIU_CTL_2 CBUS_REG_ADDR(TS_HIU_CTL_2) +#define TS_HIU_CTL_3 (STB_CBUS_BASE + 0xc5) +#define P_TS_HIU_CTL_3 CBUS_REG_ADDR(TS_HIU_CTL_3) + +#define SEC_BUFF_BASE (STB_CBUS_BASE + 0x26) +#define P_SEC_BUFF_BASE CBUS_REG_ADDR(SEC_BUFF_BASE) +#define SEC_BUFF_BASE_2 (STB_CBUS_BASE + 0x76) +#define P_SEC_BUFF_BASE_2 CBUS_REG_ADDR(SEC_BUFF_BASE_2) +#define SEC_BUFF_BASE_3 (STB_CBUS_BASE + 0xc6) +#define P_SEC_BUFF_BASE_3 CBUS_REG_ADDR(SEC_BUFF_BASE_3) +#define DEMUX_MEM_REQ_EN (STB_CBUS_BASE + 0x27) +#define P_DEMUX_MEM_REQ_EN CBUS_REG_ADDR(DEMUX_MEM_REQ_EN) +#define DEMUX_MEM_REQ_EN_2 (STB_CBUS_BASE + 0x77) +#define P_DEMUX_MEM_REQ_EN_2 CBUS_REG_ADDR(DEMUX_MEM_REQ_EN_2) +#define DEMUX_MEM_REQ_EN_3 (STB_CBUS_BASE + 0xc7) +#define P_DEMUX_MEM_REQ_EN_3 CBUS_REG_ADDR(DEMUX_MEM_REQ_EN_3) + + +/*no use*/ +#define VIDEO_PDTS_WR_PTR (STB_CBUS_BASE + 0x28) +#define P_VIDEO_PDTS_WR_PTR CBUS_REG_ADDR(VIDEO_PDTS_WR_PTR) +#define VIDEO_PDTS_WR_PTR_2 (STB_CBUS_BASE + 0x78) +#define P_VIDEO_PDTS_WR_PTR_2 CBUS_REG_ADDR(VIDEO_PDTS_WR_PTR_2) +#define VIDEO_PDTS_WR_PTR_3 (STB_CBUS_BASE + 0xc8) +#define P_VIDEO_PDTS_WR_PTR_3 CBUS_REG_ADDR(VIDEO_PDTS_WR_PTR_3) +#define AUDIO_PDTS_WR_PTR (STB_CBUS_BASE + 0x29) +#define P_AUDIO_PDTS_WR_PTR CBUS_REG_ADDR(AUDIO_PDTS_WR_PTR) +#define AUDIO_PDTS_WR_PTR_2 (STB_CBUS_BASE + 0x79) +#define P_AUDIO_PDTS_WR_PTR_2 CBUS_REG_ADDR(AUDIO_PDTS_WR_PTR_2) +#define AUDIO_PDTS_WR_PTR_3 (STB_CBUS_BASE + 0xc9) +#define P_AUDIO_PDTS_WR_PTR_3 CBUS_REG_ADDR(AUDIO_PDTS_WR_PTR_3) +#define SUB_WR_PTR (STB_CBUS_BASE + 0x2a) +#define P_SUB_WR_PTR CBUS_REG_ADDR(SUB_WR_PTR) +#define SUB_WR_PTR_2 (STB_CBUS_BASE + 0x7a) +#define P_SUB_WR_PTR_2 CBUS_REG_ADDR(SUB_WR_PTR_2) +#define SUB_WR_PTR_3 (STB_CBUS_BASE + 0xca) +#define P_SUB_WR_PTR_3 CBUS_REG_ADDR(SUB_WR_PTR_3) +/*no use*/ + +#define SB_START (STB_CBUS_BASE + 0x2b) +#define P_SB_START CBUS_REG_ADDR(SB_START) +#define SB_START_2 (STB_CBUS_BASE + 0x7b) +#define P_SB_START_2 CBUS_REG_ADDR(SB_START_2) +#define SB_START_3 (STB_CBUS_BASE + 0xcb) +#define P_SB_START_3 CBUS_REG_ADDR(SB_START_3) +#define SB_LAST_ADDR (STB_CBUS_BASE + 0x2c) +#define P_SB_LAST_ADDR CBUS_REG_ADDR(SB_LAST_ADDR) +#define SB_LAST_ADDR_2 (STB_CBUS_BASE + 0x7c) +#define P_SB_LAST_ADDR_2 CBUS_REG_ADDR(SB_LAST_ADDR_2) +#define SB_LAST_ADDR_3 (STB_CBUS_BASE + 0xcc) +#define P_SB_LAST_ADDR_3 CBUS_REG_ADDR(SB_LAST_ADDR_3) +#define SB_PES_WR_PTR (STB_CBUS_BASE + 0x2d) +#define P_SB_PES_WR_PTR CBUS_REG_ADDR(SB_PES_WR_PTR) +#define SB_PES_WR_PTR_2 (STB_CBUS_BASE + 0x7d) +#define P_SB_PES_WR_PTR_2 CBUS_REG_ADDR(SB_PES_WR_PTR_2) +#define SB_PES_WR_PTR_3 (STB_CBUS_BASE + 0xcd) +#define P_SB_PES_WR_PTR_3 CBUS_REG_ADDR(SB_PES_WR_PTR_3) +#define OTHER_WR_PTR (STB_CBUS_BASE + 0x2e) +#define P_OTHER_WR_PTR CBUS_REG_ADDR(OTHER_WR_PTR) +#define OTHER_WR_PTR_2 (STB_CBUS_BASE + 0x7e) +#define P_OTHER_WR_PTR_2 CBUS_REG_ADDR(OTHER_WR_PTR_2) +#define OTHER_WR_PTR_3 (STB_CBUS_BASE + 0xce) +#define P_OTHER_WR_PTR_3 CBUS_REG_ADDR(OTHER_WR_PTR_3) + +#define OB_START (STB_CBUS_BASE + 0x2f) +#define P_OB_START CBUS_REG_ADDR(OB_START) +#define OB_START_2 (STB_CBUS_BASE + 0x7f) +#define P_OB_START_2 CBUS_REG_ADDR(OB_START_2) +#define OB_START_3 (STB_CBUS_BASE + 0xcf) +#define P_OB_START_3 CBUS_REG_ADDR(OB_START_3) +#define OB_LAST_ADDR (STB_CBUS_BASE + 0x30) +#define P_OB_LAST_ADDR CBUS_REG_ADDR(OB_LAST_ADDR) +#define OB_LAST_ADDR_2 (STB_CBUS_BASE + 0x80) +#define P_OB_LAST_ADDR_2 CBUS_REG_ADDR(OB_LAST_ADDR_2) +#define OB_LAST_ADDR_3 (STB_CBUS_BASE + 0xd0) +#define P_OB_LAST_ADDR_3 CBUS_REG_ADDR(OB_LAST_ADDR_3) +#define OB_PES_WR_PTR (STB_CBUS_BASE + 0x31) +#define P_OB_PES_WR_PTR CBUS_REG_ADDR(OB_PES_WR_PTR) +#define OB_PES_WR_PTR_2 (STB_CBUS_BASE + 0x81) +#define P_OB_PES_WR_PTR_2 CBUS_REG_ADDR(OB_PES_WR_PTR_2) +#define OB_PES_WR_PTR_3 (STB_CBUS_BASE + 0xd1) +#define P_OB_PES_WR_PTR_3 CBUS_REG_ADDR(OB_PES_WR_PTR_3) +#define STB_INT_MASK (STB_CBUS_BASE + 0x32) +#define P_STB_INT_MASK CBUS_REG_ADDR(STB_INT_MASK) +#define STB_INT_MASK_2 (STB_CBUS_BASE + 0x82) +#define P_STB_INT_MASK_2 CBUS_REG_ADDR(STB_INT_MASK_2) +#define STB_INT_MASK_3 (STB_CBUS_BASE + 0xd2) +#define P_STB_INT_MASK_3 CBUS_REG_ADDR(STB_INT_MASK_3) +/*no used */ +#define VIDEO_SPLICING_CTL (STB_CBUS_BASE + 0x33) +#define P_VIDEO_SPLICING_CTL CBUS_REG_ADDR(VIDEO_SPLICING_CTL) +#define VIDEO_SPLICING_CTL_2 (STB_CBUS_BASE + 0x83) +#define P_VIDEO_SPLICING_CTL_2 CBUS_REG_ADDR(VIDEO_SPLICING_CTL_2) +#define VIDEO_SPLICING_CTL_3 (STB_CBUS_BASE + 0xd3) +#define P_VIDEO_SPLICING_CTL_3 CBUS_REG_ADDR(VIDEO_SPLICING_CTL_3) +#define AUDIO_SPLICING_CTL (STB_CBUS_BASE + 0x34) +#define P_AUDIO_SPLICING_CTL CBUS_REG_ADDR(AUDIO_SPLICING_CTL) +#define AUDIO_SPLICING_CTL_2 (STB_CBUS_BASE + 0x84) +#define P_AUDIO_SPLICING_CTL_2 CBUS_REG_ADDR(AUDIO_SPLICING_CTL_2) +#define AUDIO_SPLICING_CTL_3 (STB_CBUS_BASE + 0xd4) +#define P_AUDIO_SPLICING_CTL_3 CBUS_REG_ADDR(AUDIO_SPLICING_CTL_3) +#define TS_PACKAGE_BYTE_COUNT (STB_CBUS_BASE + 0x35) +#define P_TS_PACKAGE_BYTE_COUNT \ + CBUS_REG_ADDR(TS_PACKAGE_BYTE_COUNT) +#define TS_PACKAGE_BYTE_COUNT_2 (STB_CBUS_BASE + 0x85) +#define P_TS_PACKAGE_BYTE_COUNT_2 \ + CBUS_REG_ADDR(TS_PACKAGE_BYTE_COUNT_2) +#define TS_PACKAGE_BYTE_COUNT_3 (STB_CBUS_BASE + 0xd5) +#define P_TS_PACKAGE_BYTE_COUNT_3 \ + CBUS_REG_ADDR(TS_PACKAGE_BYTE_COUNT_3) +/*no used end*/ + +#define PES_STRONG_SYNC (STB_CBUS_BASE + 0x36) +#define P_PES_STRONG_SYNC CBUS_REG_ADDR(PES_STRONG_SYNC) +#define PES_STRONG_SYNC_2 (STB_CBUS_BASE + 0x86) +#define P_PES_STRONG_SYNC_2 CBUS_REG_ADDR(PES_STRONG_SYNC_2) +#define PES_STRONG_SYNC_3 (STB_CBUS_BASE + 0xd6) +#define P_PES_STRONG_SYNC_3 CBUS_REG_ADDR(PES_STRONG_SYNC_3) + +#define OM_DATA_RD_ADDR (STB_CBUS_BASE + 0x37) +#define P_OM_DATA_RD_ADDR CBUS_REG_ADDR(OM_DATA_RD_ADDR) +#define OM_DATA_RD_ADDR_2 (STB_CBUS_BASE + 0x87) +#define P_OM_DATA_RD_ADDR_2 CBUS_REG_ADDR(OM_DATA_RD_ADDR_2) +#define OM_DATA_RD_ADDR_3 (STB_CBUS_BASE + 0xd7) +#define P_OM_DATA_RD_ADDR_3 CBUS_REG_ADDR(OM_DATA_RD_ADDR_3) +#define OM_DATA_RD (STB_CBUS_BASE + 0x38) +#define P_OM_DATA_RD CBUS_REG_ADDR(OM_DATA_RD) +#define OM_DATA_RD_2 (STB_CBUS_BASE + 0x88) +#define P_OM_DATA_RD_2 CBUS_REG_ADDR(OM_DATA_RD_2) +#define OM_DATA_RD_3 (STB_CBUS_BASE + 0xd8) +#define P_OM_DATA_RD_3 CBUS_REG_ADDR(OM_DATA_RD_3) + +/*no used*/ + +#define SECTION_AUTO_STOP_3 (STB_CBUS_BASE + 0x39) +#define P_SECTION_AUTO_STOP_3 CBUS_REG_ADDR(SECTION_AUTO_STOP_3) +#define SECTION_AUTO_STOP_3_2 (STB_CBUS_BASE + 0x89) +#define P_SECTION_AUTO_STOP_3_2 \ + CBUS_REG_ADDR(SECTION_AUTO_STOP_3_2) +#define SECTION_AUTO_STOP_3_3 (STB_CBUS_BASE + 0xd9) +#define P_SECTION_AUTO_STOP_3_3 \ + CBUS_REG_ADDR(SECTION_AUTO_STOP_3_3) +#define SECTION_AUTO_STOP_2 (STB_CBUS_BASE + 0x3a) +#define P_SECTION_AUTO_STOP_2 \ + CBUS_REG_ADDR(SECTION_AUTO_STOP_2) +#define SECTION_AUTO_STOP_2_2 (STB_CBUS_BASE + 0x8a) +#define P_SECTION_AUTO_STOP_2_2 \ + CBUS_REG_ADDR(SECTION_AUTO_STOP_2_2) +#define SECTION_AUTO_STOP_2_3 (STB_CBUS_BASE + 0xda) +#define P_SECTION_AUTO_STOP_2_3 \ + CBUS_REG_ADDR(SECTION_AUTO_STOP_2_3) +#define SECTION_AUTO_STOP_1 (STB_CBUS_BASE + 0x3b) +#define P_SECTION_AUTO_STOP_1 CBUS_REG_ADDR(SECTION_AUTO_STOP_1) +#define SECTION_AUTO_STOP_1_2 (STB_CBUS_BASE + 0x8b) +#define P_SECTION_AUTO_STOP_1_2 \ + CBUS_REG_ADDR(SECTION_AUTO_STOP_1_2) +#define SECTION_AUTO_STOP_1_3 (STB_CBUS_BASE + 0xdb) +#define P_SECTION_AUTO_STOP_1_3 \ + CBUS_REG_ADDR(SECTION_AUTO_STOP_1_3) +#define SECTION_AUTO_STOP_0 (STB_CBUS_BASE + 0x3c) +#define P_SECTION_AUTO_STOP_0 \ + CBUS_REG_ADDR(SECTION_AUTO_STOP_0) +#define SECTION_AUTO_STOP_0_2 (STB_CBUS_BASE + 0x8c) +#define P_SECTION_AUTO_STOP_0_2 \ + CBUS_REG_ADDR(SECTION_AUTO_STOP_0_2) +#define SECTION_AUTO_STOP_0_3 (STB_CBUS_BASE + 0xdc) +#define P_SECTION_AUTO_STOP_0_3 \ + CBUS_REG_ADDR(SECTION_AUTO_STOP_0_3) + +#define DEMUX_CHANNEL_RESET (STB_CBUS_BASE + 0x3d) +#define P_DEMUX_CHANNEL_RESET \ + CBUS_REG_ADDR(DEMUX_CHANNEL_RESET) +#define DEMUX_CHANNEL_RESET_2 (STB_CBUS_BASE + 0x8d) +#define P_DEMUX_CHANNEL_RESET_2 \ + CBUS_REG_ADDR(DEMUX_CHANNEL_RESET_2) +#define DEMUX_CHANNEL_RESET_3 (STB_CBUS_BASE + 0xdd) +#define P_DEMUX_CHANNEL_RESET_3 \ + CBUS_REG_ADDR(DEMUX_CHANNEL_RESET_3) +/*no use end*/ +#define DEMUX_SCRAMBLING_STATE (STB_CBUS_BASE + 0x3e) +#define DEMUX_SCRAMBLING_STATE_2 (STB_CBUS_BASE + 0x8e) +#define P_DEMUX_SCRAMBLING_STATE_2 \ + CBUS_REG_ADDR(DEMUX_SCRAMBLING_STATE_2) +#define DEMUX_SCRAMBLING_STATE_3 (STB_CBUS_BASE + 0xde) +#define P_DEMUX_SCRAMBLING_STATE_3 \ + CBUS_REG_ADDR(DEMUX_SCRAMBLING_STATE_3) +#define DEMUX_CHANNEL_ACTIVITY (STB_CBUS_BASE + 0x3f) +#define P_DEMUX_CHANNEL_ACTIVITY \ + CBUS_REG_ADDR(DEMUX_CHANNEL_ACTIVITY) +#define DEMUX_CHANNEL_ACTIVITY_2 (STB_CBUS_BASE + 0x8f) +#define P_DEMUX_CHANNEL_ACTIVITY_2 \ + CBUS_REG_ADDR(DEMUX_CHANNEL_ACTIVITY_2) +#define DEMUX_CHANNEL_ACTIVITY_3 (STB_CBUS_BASE + 0xdf) +#define P_DEMUX_CHANNEL_ACTIVITY_3 \ + CBUS_REG_ADDR(DEMUX_CHANNEL_ACTIVITY_3) + +/*no use*/ + +#define DEMUX_STAMP_CTL (STB_CBUS_BASE + 0x40) +#define P_DEMUX_STAMP_CTL CBUS_REG_ADDR(DEMUX_STAMP_CTL) +#define DEMUX_STAMP_CTL_2 (STB_CBUS_BASE + 0x90) +#define P_DEMUX_STAMP_CTL_2 \ + CBUS_REG_ADDR(DEMUX_STAMP_CTL_2) +#define DEMUX_STAMP_CTL_3 (STB_CBUS_BASE + 0xe0) +#define P_DEMUX_STAMP_CTL_3 \ + CBUS_REG_ADDR(DEMUX_STAMP_CTL_3) +#define DEMUX_VIDEO_STAMP_SYNC_0 (STB_CBUS_BASE + 0x41) +#define P_DEMUX_VIDEO_STAMP_SYNC_0 \ + CBUS_REG_ADDR(DEMUX_VIDEO_STAMP_SYNC_0) +#define DEMUX_VIDEO_STAMP_SYNC_0_2 (STB_CBUS_BASE + 0x91) +#define P_DEMUX_VIDEO_STAMP_SYNC_0_2 \ + CBUS_REG_ADDR(DEMUX_VIDEO_STAMP_SYNC_0_2) +#define DEMUX_VIDEO_STAMP_SYNC_0_3 (STB_CBUS_BASE + 0xe1) +#define P_DEMUX_VIDEO_STAMP_SYNC_0_3 \ + CBUS_REG_ADDR(DEMUX_VIDEO_STAMP_SYNC_0_3) +#define DEMUX_VIDEO_STAMP_SYNC_1 (STB_CBUS_BASE + 0x42) +#define P_DEMUX_VIDEO_STAMP_SYNC_1 \ + CBUS_REG_ADDR(DEMUX_VIDEO_STAMP_SYNC_1) +#define DEMUX_VIDEO_STAMP_SYNC_1_2 (STB_CBUS_BASE + 0x92) +#define P_DEMUX_VIDEO_STAMP_SYNC_1_2 \ + CBUS_REG_ADDR(DEMUX_VIDEO_STAMP_SYNC_1_2) +#define DEMUX_VIDEO_STAMP_SYNC_1_3 (STB_CBUS_BASE + 0xe2) +#define P_DEMUX_VIDEO_STAMP_SYNC_1_3 \ + CBUS_REG_ADDR(DEMUX_VIDEO_STAMP_SYNC_1_3) +#define DEMUX_AUDIO_STAMP_SYNC_0 (STB_CBUS_BASE + 0x43) +#define P_DEMUX_AUDIO_STAMP_SYNC_0 \ + CBUS_REG_ADDR(DEMUX_AUDIO_STAMP_SYNC_0) +#define DEMUX_AUDIO_STAMP_SYNC_0_2 (STB_CBUS_BASE + 0x93) +#define P_DEMUX_AUDIO_STAMP_SYNC_0_2 \ + CBUS_REG_ADDR(DEMUX_AUDIO_STAMP_SYNC_0_2) +#define DEMUX_AUDIO_STAMP_SYNC_0_3 (STB_CBUS_BASE + 0xe3) +#define P_DEMUX_AUDIO_STAMP_SYNC_0_3 \ + CBUS_REG_ADDR(DEMUX_AUDIO_STAMP_SYNC_0_3) +#define DEMUX_AUDIO_STAMP_SYNC_1 (STB_CBUS_BASE + 0x44) +#define P_DEMUX_AUDIO_STAMP_SYNC_1 \ + CBUS_REG_ADDR(DEMUX_AUDIO_STAMP_SYNC_1) +#define DEMUX_AUDIO_STAMP_SYNC_1_2 (STB_CBUS_BASE + 0x94) +#define P_DEMUX_AUDIO_STAMP_SYNC_1_2 \ + CBUS_REG_ADDR(DEMUX_AUDIO_STAMP_SYNC_1_2) +#define DEMUX_AUDIO_STAMP_SYNC_1_3 (STB_CBUS_BASE + 0xe4) +#define P_DEMUX_AUDIO_STAMP_SYNC_1_3 \ + CBUS_REG_ADDR(DEMUX_AUDIO_STAMP_SYNC_1_3) +#define DEMUX_SECTION_RESET (STB_CBUS_BASE + 0x45) +#define P_DEMUX_SECTION_RESET CBUS_REG_ADDR(DEMUX_SECTION_RESET) +#define DEMUX_SECTION_RESET_2 (STB_CBUS_BASE + 0x95) +#define P_DEMUX_SECTION_RESET_2 \ + CBUS_REG_ADDR(DEMUX_SECTION_RESET_2) +#define DEMUX_SECTION_RESET_3 (STB_CBUS_BASE + 0xe5) +#define P_DEMUX_SECTION_RESET_3 \ + CBUS_REG_ADDR(DEMUX_SECTION_RESET_3) +/*no use end*/ + +/*from c_stb_define.h*/ +#define COMM_DESC_2_CTL (STB_CBUS_BASE + 0xff) /*0x16ff*/ + +#define STB_OM_CTL \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x22) /* 0x1622*/ +#define STB_OM_CTL_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x22) /* 0x1672*/ +#define STB_OM_CTL_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x22) /* 0x16c2*/ + +#define DEMUX_INPUT_TIMEOUT_C \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x46) /* 0x1646*/ +#define DEMUX_INPUT_TIMEOUT_C_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x46) /* 0x1696*/ +#define DEMUX_INPUT_TIMEOUT_C_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x46) /* 0x16e6*/ +/* bit[31] - no_match_reset_timeout_disable*/ +/* bit[30:0] input_time_out_int_cnt (0 -- means disable) Wr-setting, Rd-count*/ +#define DEMUX_INPUT_TIMEOUT \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x47) /* 0x1647*/ +#define DEMUX_INPUT_TIMEOUT_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x47) /* 0x1697*/ +#define DEMUX_INPUT_TIMEOUT_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x47) /* 0x16e7*/ + +/* bit[31:0] - channel_packet_count_disable*/ +#define DEMUX_PACKET_COUNT_C \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x48) /* 0x1648*/ +#define DEMUX_PACKET_COUNT_C_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x48) /* 0x1698*/ +#define DEMUX_PACKET_COUNT_C_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x48) /* 0x16e8*/ +/* bit[31] - no_match_packet_count_disable*/ +/* bit[30:0] input_packet_count*/ +#define DEMUX_PACKET_COUNT \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x49) /* 0x1649*/ +#define DEMUX_PACKET_COUNT_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x49) /* 0x1699*/ +#define DEMUX_PACKET_COUNT_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x49) /* 0x16e9*/ + +/* bit[31:0] channel_record_enable*/ +#define DEMUX_CHAN_RECORD_EN \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x4a) /* 0x164a*/ +#define DEMUX_CHAN_RECORD_EN_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x4a) /* 0x169a*/ +#define DEMUX_CHAN_RECORD_EN_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x4a) /* 0x16ea*/ + +/* bit[31:0] channel_process_enable*/ +#define DEMUX_CHAN_PROCESS_EN \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x4b) /* 0x164b*/ +#define DEMUX_CHAN_PROCESS_EN_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x4b) /* 0x169b*/ +#define DEMUX_CHAN_PROCESS_EN_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x4b) /* 0x16eb*/ + +/* bit[31:24] small_sec_size ((n+1) * 256 Bytes)*/ +/* bit[23:16] small_sec_rd_ptr */ +/* bit[15:8] small_sec_wr_ptr */ +/* bit[7:2] reserved*/ +/* bit[1] small_sec_wr_ptr_wr_enable*/ +/* bit[0] small_section_enable*/ +#define DEMUX_SMALL_SEC_CTL \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x4c) /* 0x164c*/ +#define DEMUX_SMALL_SEC_CTL_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x4c) /* 0x169c*/ +#define DEMUX_SMALL_SEC_CTL_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x4c) /* 0x16ec*/ +/* bit[31:0] small_sec_start_addr*/ +#define DEMUX_SMALL_SEC_ADDR \ + (STB_CBUS_BASE + DEMUX_1_OFFSET + 0x4d) /* 0x164d*/ +#define DEMUX_SMALL_SEC_ADDR_2 \ + (STB_CBUS_BASE + DEMUX_2_OFFSET + 0x4d) /* 0x169d*/ +#define DEMUX_SMALL_SEC_ADDR_3 \ + (STB_CBUS_BASE + DEMUX_3_OFFSET + 0x4d) /* 0x16ed*/ + +#endif
diff --git a/drivers/stream_input/parser/psparser.c b/drivers/stream_input/parser/psparser.c new file mode 100644 index 0000000..20c1d1a --- /dev/null +++ b/drivers/stream_input/parser/psparser.c
@@ -0,0 +1,1199 @@ +/* + * drivers/amlogic/media/stream_input/parser/psparser.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/utils/amstream.h> + +#include <linux/uaccess.h> +/* #include <mach/am_regs.h> */ +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../amports/streambuf_reg.h" +#include "../amports/streambuf.h" +#include "psparser.h" +#include "../amports/amports_priv.h" + + +#define TIMESTAMP_IONLY 1 +#define SAVE_SCR 0 + +#define MPEG_START_CODE_PATTERN (0x00000100L) +#define MPEG_START_CODE_MASK (0xffffff00L) +#define MAX_MPG_AUDIOPK_SIZE 0x1000 + +#define SUB_INSERT_START_CODE_HIGH 0x414d4c55 +#define SUB_INSERT_START_CODE_LOW 0xaa000000 + +#define PARSER_WRITE (ES_WRITE | ES_PARSER_START) +#define PARSER_VIDEO (ES_TYPE_VIDEO) +#define PARSER_AUDIO (ES_TYPE_AUDIO) +#define PARSER_SUBPIC (ES_TYPE_SUBTITLE) +#define PARSER_PASSTHROUGH (ES_PASSTHROUGH | ES_PARSER_START) +#define PARSER_AUTOSEARCH (ES_SEARCH | ES_PARSER_START) +#define PARSER_DISCARD (ES_DISCARD | ES_PARSER_START) +#define PARSER_BUSY (ES_PARSER_BUSY) + +#define PARSER_PARAMETER_LENGTH_BIT 16 +#define PARSER_PARAMETER_LOOP_BIT 24 + +#define PARSER_POP READ_PARSER_REG(PFIFO_DATA) +#define SET_BLOCK(size) \ +WRITE_PARSER_REG_BITS(PARSER_CONTROL, size, ES_PACK_SIZE_BIT, ES_PACK_SIZE_WID) +#define SET_DISCARD_SIZE(size) WRITE_PARSER_REG(PARSER_PARAMETER, size) + +#define VIDEO_AUTO_FLUSH +#ifdef VIDEO_AUTO_FLUSH +static u32 video_auto_flush_state; +#define VIDEO_AUTO_FLUSH_IDLE 0 +#define VIDEO_AUTO_FLUSH_MONITOR 1 +#define VIDEO_AUTO_FLUSH_TRIGGER 2 +#define VIDEO_AUTO_FLUSH_DONE 3 +#define VIDEO_AUTO_FLUSH_PTS_THRESHOLD 90000 +#define VIDEO_AUTO_FLUSH_BYTE_COUNT 1024 + +static s32 audio_last_pts; +static s32 audio_monitor_pts; +#endif + +enum { + SEARCH_START_CODE = 0, + SEND_VIDEO_SEARCH, + SEND_AUDIO_SEARCH, + SEND_SUBPIC_SEARCH, + DISCARD_SEARCH, + DISCARD_ONLY +#ifdef VIDEO_AUTO_FLUSH + , + SEARCH_START_CODE_VIDEO_FLUSH +#endif +}; + +enum { + AUDIO_FIRST_ACCESS_ARM = 0, + AUDIO_FIRST_ACCESS_POPING, + AUDIO_FIRST_ACCESS_DONE +}; + +static const char psparser_id[] = "psparser-id"; + +static DECLARE_WAIT_QUEUE_HEAD(wq); + +static struct tasklet_struct psparser_tasklet; +static u32 fetch_done; +static u8 audio_id, video_id, sub_id, sub_id_max; +static u32 audio_first_access; +static u32 packet_remaining; +static u32 video_data_parsed; +static u32 audio_data_parsed; +static u32 pts_equ_dts_flag; + +static unsigned int first_apts, first_vpts; +static unsigned int audio_got_first_pts, video_got_first_dts, sub_got_first_pts; +atomic_t sub_block_found = ATOMIC_INIT(0); + +#define DEBUG_VOB_SUB +#ifdef DEBUG_VOB_SUB +static u8 sub_found_num; +static struct subtitle_info *sub_info[MAX_SUB_NUM]; +#endif + +static bool ptsmgr_first_vpts_ready(void) +{ + return (video_got_first_dts != 0) ? true : false; +} + +static bool ptsmgr_first_apts_ready(void) +{ + return (audio_got_first_pts != 0) ? true : false; +} + +static void ptsmgr_vpts_checkin(u32 pts) +{ + if (video_got_first_dts == 0) { + video_got_first_dts = 1; + first_vpts = pts; + } + + pts_checkin_offset(PTS_TYPE_VIDEO, video_data_parsed, pts); +} + +static void ptsmgr_apts_checkin(u32 pts) +{ + if (audio_got_first_pts == 0) { + audio_got_first_pts = 1; + first_apts = pts; + } + /* apts_checkin(pts); */ + pts_checkin_offset(PTS_TYPE_AUDIO, audio_data_parsed, pts); + +#ifdef VIDEO_AUTO_FLUSH + audio_last_pts = pts; + + if ((video_auto_flush_state == VIDEO_AUTO_FLUSH_IDLE) + && ptsmgr_first_vpts_ready()) { + video_auto_flush_state = VIDEO_AUTO_FLUSH_MONITOR; + audio_monitor_pts = pts; + } + + if (video_auto_flush_state == VIDEO_AUTO_FLUSH_MONITOR) { + if ((audio_last_pts - audio_monitor_pts) > + VIDEO_AUTO_FLUSH_PTS_THRESHOLD) + video_auto_flush_state = VIDEO_AUTO_FLUSH_TRIGGER; + } +#endif +} + +static u32 parser_process(s32 type, s32 packet_len) +{ + s16 temp, header_len, misc_flags, i; + u32 pts = 0, dts = 0; + u32 pts_dts_flag = 0; + u16 invalid_pts = 0; + + temp = PARSER_POP; + packet_len--; + + if ((temp >> 6) == 0x02) { + /* mpeg-2 system */ + misc_flags = PARSER_POP; + header_len = PARSER_POP; + packet_len -= 2; + packet_len -= header_len; + + if ((misc_flags >> 6) > 1) { + /* PTS exist */ + pts = ((PARSER_POP >> 1) & 7) << 30; /* bit 32-30 */ + pts |= PARSER_POP << 22; /* bit 29-22 */ + pts |= (PARSER_POP >> 1) << 15; /* bit 21-15 */ + pts |= (PARSER_POP << 7); /* bit 14-07 */ + pts |= (PARSER_POP >> 1); /* bit 06-00 */ + header_len -= 5; + pts_dts_flag |= 2; + } + + if ((misc_flags >> 6) > 2) { + /* DTS exist */ + dts = ((PARSER_POP >> 1) & 7) << 30; /* bit 32-30 */ + dts |= PARSER_POP << 22; /* bit 29-22 */ + dts |= (PARSER_POP >> 1) << 15; /* bit 21-15 */ + dts |= (PARSER_POP << 7); /* bit 14-07 */ + dts |= (PARSER_POP >> 1); /* bit 06-00 */ + header_len -= 5; + pts_dts_flag |= 1; + } + + if (misc_flags & 0x20) { + /* ESCR_flag */ + PARSER_POP; + PARSER_POP; + PARSER_POP; + PARSER_POP; + PARSER_POP; + PARSER_POP; + header_len -= 5; + } + + if (misc_flags & 0x10) { + /* ES_rate_flag */ + PARSER_POP; + PARSER_POP; + PARSER_POP; + header_len -= 3; + } + + if (misc_flags & 0x08) { + /* DSM_trick_mode_flag */ + PARSER_POP; + header_len -= 1; + } + + if (misc_flags & 0x04) { + /* additional_copy_info_flag */ + PARSER_POP; + header_len -= 1; + } + + if (misc_flags & 0x02) { + /* PES_CRC_flag */ + PARSER_POP; + PARSER_POP; + header_len -= 2; + } + + if (misc_flags & 0x01) { + /* PES_extension_flag */ + misc_flags = PARSER_POP; + header_len--; + + if ((misc_flags & 0x80) && (header_len >= 128)) { + /* PES_private_data_flag */ + for (i = 0; i < 128; i++) + PARSER_POP; + + header_len -= 128; + } +#if 0 + if (misc_flags & 0x40) { + /* pack_header_field_flag */ + /* Invalid case */ + } +#endif + if (misc_flags & 0x20) { + /* program_packet_sequence_counter_flag */ + PARSER_POP; + PARSER_POP; + header_len -= 2; + } + + if (misc_flags & 0x10) { + /* PSTD_buffer_flag */ + PARSER_POP; + PARSER_POP; + header_len -= 2; + } + + if (misc_flags & 1) { + /* PES_extension_flag_2 */ + temp = PARSER_POP & 0x7f; + + while (temp) { + PARSER_POP; + temp--; + header_len--; + } + } + + while (header_len) { + PARSER_POP; + header_len--; + } + } + + while (header_len) { + PARSER_POP; + header_len--; + } + + } else { + /* mpeg-1 system */ + while (temp == 0xff) { + temp = PARSER_POP; + packet_len--; + } + + if ((temp >> 6) == 1) { + PARSER_POP; /* STD buffer size */ + temp = PARSER_POP; + packet_len -= 2; + } + + if (((temp >> 4) == 2) || ((temp >> 4) == 3)) { + pts = ((temp >> 1) & 7) << 30; /* bit 32-30 */ + pts |= PARSER_POP << 22; /* bit 29-22 */ + pts |= (PARSER_POP >> 1) << 15; /* bit 21-15 */ + pts |= (PARSER_POP << 7); /* bit 14-07 */ + pts |= (PARSER_POP >> 1); /* bit 06-00 */ + packet_len -= 4; + pts_dts_flag |= 2; + } + + if ((temp >> 4) == 3) { + dts = ((PARSER_POP >> 1) & 7) << 30; /* bit 32-30 */ + dts |= PARSER_POP << 22; /* bit 29-22 */ + dts |= (PARSER_POP >> 1) << 15; /* bit 21-15 */ + dts |= (PARSER_POP << 7); /* bit 14-07 */ + dts |= (PARSER_POP >> 1); /* bit 06-00 */ + packet_len -= 5; + pts_dts_flag |= 1; + } + } + + if ((pts == 0) && (dts == 0xffffffff)) { + invalid_pts = 1; + pr_info("invalid pts\n"); + } + + if (!packet_len) + return SEARCH_START_CODE; + + else if (type == 0) { +#ifdef VIDEO_AUTO_FLUSH + if (video_auto_flush_state == VIDEO_AUTO_FLUSH_MONITOR) + audio_monitor_pts = audio_last_pts; +#endif + + if ((pts_dts_flag) && (!invalid_pts)) { +#if TIMESTAMP_IONLY + if (!ptsmgr_first_vpts_ready()) { + if (pts_dts_flag & 2) + ptsmgr_vpts_checkin(pts); + else + ptsmgr_vpts_checkin(dts); + } else if ((pts_dts_flag & 3) == 3) { + if (pts_equ_dts_flag) { + if (dts == pts) + ptsmgr_vpts_checkin(pts); + } else { + if (dts == pts) + pts_equ_dts_flag = 1; + ptsmgr_vpts_checkin(pts); + } + } +#else + if (!ptsmgr_first_vpts_ready()) { + if (pts_dts_flag & 2) + ptsmgr_vpts_checkin(pts); + else + ptsmgr_vpts_checkin(dts); + } else if (pts_dts_flag & 2) + ptsmgr_vpts_checkin(pts); +#endif + } + + if (ptsmgr_first_vpts_ready() || invalid_pts) { + SET_BLOCK(packet_len); + video_data_parsed += packet_len; + return SEND_VIDEO_SEARCH; + + } else { + SET_DISCARD_SIZE(packet_len); + return DISCARD_SEARCH; + } + + } else if (type == 1) { + /* mpeg audio */ + if (pts_dts_flag & 2) + ptsmgr_apts_checkin(pts); + + if (ptsmgr_first_apts_ready()) { + SET_BLOCK(packet_len); + audio_data_parsed += packet_len; + return SEND_AUDIO_SEARCH; + + } else { + SET_DISCARD_SIZE(packet_len); + return DISCARD_SEARCH; + } + + } else if (type == 2) { + /* Private stream */ + temp = PARSER_POP; /* sub_stream_id */ + packet_len--; + + if (((temp & 0xf8) == 0xa0) && (temp == audio_id)) { + /* DVD_VIDEO Audio LPCM data */ + PARSER_POP; + temp = (PARSER_POP << 8) | PARSER_POP; + if (temp == 0) + temp = 4; + temp--; + packet_len -= 3; + + if (audio_first_access == AUDIO_FIRST_ACCESS_ARM) { + if (temp) { + packet_remaining = packet_len - temp; + SET_DISCARD_SIZE(temp); + audio_first_access = + AUDIO_FIRST_ACCESS_POPING; + return DISCARD_ONLY; + } + + audio_first_access = AUDIO_FIRST_ACCESS_DONE; + + if (packet_len) { + SET_BLOCK(packet_len); + audio_data_parsed += packet_len; + return SEND_AUDIO_SEARCH; + + } else + return SEARCH_START_CODE; + + } else { + PARSER_POP; + PARSER_POP; + PARSER_POP; + packet_len -= 3; + } + + if (pts_dts_flag & 2) + ptsmgr_apts_checkin(pts); + + if (ptsmgr_first_apts_ready()) { + SET_BLOCK(packet_len); + audio_data_parsed += packet_len; + return SEND_AUDIO_SEARCH; + + } else { + SET_DISCARD_SIZE(packet_len); + return DISCARD_SEARCH; + } + + } else if (((temp & 0xf8) == 0x80) && (temp == audio_id)) { + /* Audio AC3 data */ + PARSER_POP; + temp = (PARSER_POP << 8) | PARSER_POP; + packet_len -= 3; + + if (audio_first_access == AUDIO_FIRST_ACCESS_ARM) { + if (pts_dts_flag & 2) + ptsmgr_apts_checkin(pts); + + if ((temp > 2) && (packet_len > (temp - 2))) { + temp -= 2; + packet_remaining = packet_len - temp; + SET_DISCARD_SIZE(temp); + audio_first_access = + AUDIO_FIRST_ACCESS_POPING; + return DISCARD_ONLY; + } + + audio_first_access = AUDIO_FIRST_ACCESS_DONE; + + if (packet_len) { + SET_BLOCK(packet_len); + audio_data_parsed += packet_len; + return SEND_AUDIO_SEARCH; + + } else + return SEARCH_START_CODE; + } + + if (pts_dts_flag & 2) + ptsmgr_apts_checkin(pts); + + if (ptsmgr_first_apts_ready()) { + SET_BLOCK(packet_len); + audio_data_parsed += packet_len; + return SEND_AUDIO_SEARCH; + + } else { + SET_DISCARD_SIZE(packet_len); + return DISCARD_SEARCH; + } + + } else if (((temp & 0xf8) == 0x88) && (temp == audio_id)) { + /* Audio DTS data */ + PARSER_POP; + PARSER_POP; + PARSER_POP; + packet_len -= 3; + + if (audio_first_access == AUDIO_FIRST_ACCESS_ARM) + audio_first_access = AUDIO_FIRST_ACCESS_DONE; + + if (pts_dts_flag & 2) + ptsmgr_apts_checkin(pts); + + if (ptsmgr_first_apts_ready()) { + SET_BLOCK(packet_len); + audio_data_parsed += packet_len; + return SEND_AUDIO_SEARCH; + + } else { + SET_DISCARD_SIZE(packet_len); + return DISCARD_SEARCH; + } + } else if ((temp & 0xe0) == 0x20) { + if (temp > sub_id_max) + sub_id_max = temp; +#ifdef DEBUG_VOB_SUB + for (i = 0; i < sub_found_num; i++) { + if (!sub_info[i]) + break; + if (temp == sub_info[i]->id) + break; + } + if (i == sub_found_num && i < MAX_SUB_NUM) { + if (sub_info[sub_found_num]) { + sub_info[sub_found_num]->id = temp; + sub_found_num++; + pr_info + ("[%s]found new sub_id=0x%x (num %d)\n", + __func__, temp, sub_found_num); + } else { + pr_info + ("[%s]sub info NULL!\n", __func__); + } + } +#endif + + if (temp == sub_id) { + /* DVD sub-picture data */ + if (!packet_len) + return SEARCH_START_CODE; + + else { +#if 0 + if (pts_dts_flag & 2) + ptsmgr_spts_checkin(pts); + + if (ptsmgr_first_spts_ready()) { + SET_BLOCK(packet_len); + return SEND_SUBPIC_SEARCH; + + } else { + SET_DISCARD_SIZE(packet_len); + return DISCARD_SEARCH; + } +#else + if (pts_dts_flag & 2) + sub_got_first_pts = 1; + + if (sub_got_first_pts) { + pr_info + ("sub pts 0x%x, len %d\n", + pts, packet_len); + SET_BLOCK(packet_len); + WRITE_PARSER_REG + (PARSER_PARAMETER, + 16 << + PARSER_PARAMETER_LENGTH_BIT); + WRITE_PARSER_REG + (PARSER_INSERT_DATA, + SUB_INSERT_START_CODE_HIGH); + WRITE_PARSER_REG + (PARSER_INSERT_DATA, + SUB_INSERT_START_CODE_LOW | + get_sub_type()); + WRITE_PARSER_REG + (PARSER_INSERT_DATA, + packet_len); + WRITE_PARSER_REG + (PARSER_INSERT_DATA, pts); + atomic_set(&sub_block_found, 1); + return SEND_SUBPIC_SEARCH; + } + + SET_DISCARD_SIZE(packet_len); + return DISCARD_SEARCH; +#endif + } + } else { + SET_DISCARD_SIZE(packet_len); + return DISCARD_SEARCH; + } + } else { + SET_DISCARD_SIZE(packet_len); + return DISCARD_SEARCH; + } + } + + return SEARCH_START_CODE; +} + +static void on_start_code_found(int start_code) +{ + unsigned short packet_len; + unsigned short temp; + unsigned int next_action; +#if SAVE_SCR + unsigned int scr; +#endif + + if (atomic_read(&sub_block_found)) { + wakeup_sub_poll(); + atomic_set(&sub_block_found, 0); + } + + if (audio_first_access == AUDIO_FIRST_ACCESS_POPING) { + /* + *we are in the procedure of poping data for audio first + * access, continue with last packet + */ + audio_first_access = AUDIO_FIRST_ACCESS_DONE; + + if (packet_remaining) { + next_action = SEND_AUDIO_SEARCH; + SET_BLOCK(packet_remaining); + + } else + next_action = SEARCH_START_CODE; + + } else if (start_code == 0xba) { /* PACK_START_CODE */ + temp = PARSER_POP; + + if ((temp >> 6) == 0x01) { +#if SAVE_SCR + scr = ((temp >> 3) & 0x3) << 30; /* bit 31-30 */ + scr |= (temp & 0x3) << 28; /* bit 29-28 */ + scr |= (PARSER_POP) << 20; /* bit 27-20 */ + temp = PARSER_POP; + scr |= (temp >> 4) << 16; /* bit 19-16 */ + scr |= (temp & 7) << 13; /* bit 15-13 */ + scr |= (PARSER_POP) << 5; /* bit 12-05 */ + scr |= (PARSER_POP) >> 3; /* bit 04-00 */ +#else + PARSER_POP; + PARSER_POP; + PARSER_POP; + PARSER_POP; +#endif + PARSER_POP; + PARSER_POP; + PARSER_POP; + PARSER_POP; + temp = PARSER_POP & 7; + + while (temp) { /* stuff byte */ + PARSER_POP; + temp--; + } + + } else { + /* mpeg-1 Pack Header */ +#if SAVE_SCR + scr = ((temp >> 1) & 0x3) << 30; /* bit 31-30 */ + scr |= (PARSER_POP) << 22; /* bit 29-22 */ + scr |= (PARSER_POP >> 1) << 15; /* bit 21-15 */ + scr |= (PARSER_POP) << 7; /* bit 14-07 */ + scr |= (PARSER_POP >> 1); /* bit 06-00 */ +#else + PARSER_POP; + PARSER_POP; + PARSER_POP; + PARSER_POP; +#endif + } + +#ifdef VIDEO_AUTO_FLUSH + if (video_auto_flush_state == VIDEO_AUTO_FLUSH_TRIGGER) { + next_action = SEARCH_START_CODE_VIDEO_FLUSH; + video_auto_flush_state = VIDEO_AUTO_FLUSH_DONE; + } else +#endif + + next_action = SEARCH_START_CODE; + + } else { + packet_len = (PARSER_POP << 8) | PARSER_POP; + + if (start_code == video_id) + next_action = parser_process(0, packet_len); + + else if (start_code == audio_id) { + /* add mpeg audio packet length check */ + if (packet_len > MAX_MPG_AUDIOPK_SIZE) + next_action = SEARCH_START_CODE; + + else + next_action = parser_process(1, packet_len); + + } else if (start_code == 0xbb) { + SET_DISCARD_SIZE(packet_len); + next_action = DISCARD_SEARCH; + } else if (start_code == 0xbd) + next_action = parser_process(2, packet_len); + + else if (start_code == 0xbf) { + SET_DISCARD_SIZE(packet_len); + next_action = DISCARD_SEARCH; + } else if ((start_code < 0xc0) || (start_code > 0xc8)) + next_action = SEARCH_START_CODE; + + else if (packet_len) { + SET_DISCARD_SIZE(packet_len); + next_action = DISCARD_SEARCH; + + } else + next_action = SEARCH_START_CODE; + } + + switch (next_action) { + case SEARCH_START_CODE: + WRITE_PARSER_REG(PARSER_CONTROL, PARSER_AUTOSEARCH); + break; + + case SEND_VIDEO_SEARCH: + WRITE_PARSER_REG_BITS(PARSER_CONTROL, + PARSER_AUTOSEARCH | PARSER_VIDEO | + PARSER_WRITE, ES_CTRL_BIT, ES_CTRL_WID); + break; + + case SEND_AUDIO_SEARCH: + WRITE_PARSER_REG_BITS(PARSER_CONTROL, + PARSER_AUTOSEARCH | PARSER_AUDIO | + PARSER_WRITE, ES_CTRL_BIT, ES_CTRL_WID); + break; + + case SEND_SUBPIC_SEARCH: + WRITE_PARSER_REG_BITS(PARSER_CONTROL, + PARSER_AUTOSEARCH | PARSER_SUBPIC | + PARSER_WRITE | ES_INSERT_BEFORE_ES_WRITE, + ES_CTRL_BIT, ES_CTRL_WID); + break; + + case DISCARD_SEARCH: + WRITE_PARSER_REG_BITS(PARSER_CONTROL, + PARSER_AUTOSEARCH | PARSER_DISCARD, + ES_CTRL_BIT, ES_CTRL_WID); + break; + + case DISCARD_ONLY: + WRITE_PARSER_REG_BITS(PARSER_CONTROL, + PARSER_DISCARD, ES_CTRL_BIT, ES_CTRL_WID); + break; + +#ifdef VIDEO_AUTO_FLUSH + case SEARCH_START_CODE_VIDEO_FLUSH: + WRITE_PARSER_REG(PARSER_INSERT_DATA, 0xffffffff); + WRITE_PARSER_REG(PARSER_INSERT_DATA, 0xffffffff); + WRITE_PARSER_REG(PARSER_PARAMETER, + ((VIDEO_AUTO_FLUSH_BYTE_COUNT / + 8) << PARSER_PARAMETER_LOOP_BIT) | (8 << + PARSER_PARAMETER_LENGTH_BIT)); + WRITE_PARSER_REG(PARSER_CONTROL, + PARSER_AUTOSEARCH | PARSER_VIDEO | PARSER_WRITE | + ES_INSERT_BEFORE_ES_WRITE); + break; +#endif + } +} + +static void parser_tasklet(ulong data) +{ + s32 sc; + u32 int_status = READ_PARSER_REG(PARSER_INT_STATUS); + + WRITE_PARSER_REG(PARSER_INT_STATUS, int_status); + + if (int_status & PARSER_INTSTAT_FETCH_CMD) { + fetch_done = 1; + + wake_up_interruptible(&wq); + } + + if (int_status & PARSER_INTSTAT_SC_FOUND) { + sc = PARSER_POP; + + on_start_code_found(sc); + + } else if (int_status & PARSER_INTSTAT_DISCARD) + on_start_code_found(0); +} + +static irqreturn_t parser_isr(int irq, void *dev_id) +{ + tasklet_schedule(&psparser_tasklet); + + return IRQ_HANDLED; +} + +static ssize_t _psparser_write(const char __user *buf, size_t count) +{ + size_t r = count; + const char __user *p = buf; + u32 len; + int ret; + dma_addr_t dma_addr = 0; + + if (r > 0) { + len = min_t(size_t, r, FETCHBUF_SIZE); + if (copy_from_user(fetchbuf, p, len)) + return -EFAULT; + + dma_addr = + dma_map_single(amports_get_dma_device(), + fetchbuf, FETCHBUF_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(amports_get_dma_device(), dma_addr)) + return -EFAULT; + + + fetch_done = 0; + + wmb(); /* Ensure fetchbuf contents visible */ + + WRITE_PARSER_REG(PARSER_FETCH_ADDR, dma_addr); + + WRITE_PARSER_REG(PARSER_FETCH_CMD, (7 << FETCH_ENDIAN) | len); + dma_unmap_single(amports_get_dma_device(), dma_addr, + FETCHBUF_SIZE, DMA_TO_DEVICE); + ret = + wait_event_interruptible_timeout(wq, fetch_done != 0, + HZ / 10); + if (ret == 0) { + WRITE_PARSER_REG(PARSER_FETCH_CMD, 0); + pr_info("write timeout, retry\n"); + return -EAGAIN; + } else if (ret < 0) + return -ERESTARTSYS; + + p += len; + r -= len; + } + + return count - r; +} + +s32 psparser_init(u32 vid, u32 aid, u32 sid, struct vdec_s *vdec) +{ + s32 r; + u32 parser_sub_start_ptr; + u32 parser_sub_end_ptr; + u32 parser_sub_rp; + +#ifdef DEBUG_VOB_SUB + u8 i; + + for (i = 0; i < MAX_SUB_NUM; i++) { + sub_info[i] = kzalloc(sizeof(struct subtitle_info), GFP_KERNEL); + if (!sub_info[i]) { + pr_info + ("[psparser_init]alloc for subtitle info failed\n"); + } else + sub_info[i]->id = -1; + } + sub_found_num = 0; +#endif + parser_sub_start_ptr = READ_PARSER_REG(PARSER_SUB_START_PTR); + parser_sub_end_ptr = READ_PARSER_REG(PARSER_SUB_END_PTR); + parser_sub_rp = READ_PARSER_REG(PARSER_SUB_RP); + + video_id = vid; + audio_id = aid; + sub_id = sid; + audio_got_first_pts = 0; + video_got_first_dts = 0; + sub_got_first_pts = 0; + first_apts = 0; + first_vpts = 0; + pts_equ_dts_flag = 0; + +#ifdef VIDEO_AUTO_FLUSH + video_auto_flush_state = VIDEO_AUTO_FLUSH_IDLE; +#endif + + pr_info("video 0x%x, audio 0x%x, sub 0x%x\n", video_id, audio_id, + sub_id); + if (fetchbuf == 0) { + pr_info("%s: no fetchbuf\n", __func__); + return -ENOMEM; + } + + WRITE_RESET_REG(RESET1_REGISTER, RESET_PARSER); + +/* for recorded file and local play, this can't change the input source*/ + /* TS data path */ +/* +#ifndef CONFIG_AM_DVB + WRITE_DEMUX_REG(FEC_INPUT_CONTROL, 0); +#else + tsdemux_set_reset_flag(); +#endif */ + + CLEAR_DEMUX_REG_MASK(TS_HIU_CTL, 1 << USE_HI_BSF_INTERFACE); + CLEAR_DEMUX_REG_MASK(TS_HIU_CTL_2, 1 << USE_HI_BSF_INTERFACE); + CLEAR_DEMUX_REG_MASK(TS_HIU_CTL_3, 1 << USE_HI_BSF_INTERFACE); + CLEAR_DEMUX_REG_MASK(TS_FILE_CONFIG, (1 << TS_HIU_ENABLE)); + + /* hook stream buffer with PARSER */ + WRITE_PARSER_REG(PARSER_VIDEO_START_PTR, vdec->input.start); + WRITE_PARSER_REG(PARSER_VIDEO_END_PTR, + vdec->input.start + vdec->input.size - 8); + + if (vdec_single(vdec)) { + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_VID_MAN_RD_PTR); + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_VREG_MASK(VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + } else { + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_VID_MAN_RD_PTR); + WRITE_PARSER_REG(PARSER_VIDEO_WP, vdec->input.start); + WRITE_PARSER_REG(PARSER_VIDEO_RP, vdec->input.start); + } + + WRITE_PARSER_REG(PARSER_AUDIO_START_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_PARSER_REG(PARSER_AUDIO_END_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_END_PTR)); + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_AUD_MAN_RD_PTR); + + WRITE_PARSER_REG(PARSER_CONFIG, + (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) | + (1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) | + (16 << PS_CFG_MAX_FETCH_CYCLE_BIT)); + + if (vdec_single(vdec)) { + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_VREG_MASK(VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + } + + WRITE_AIU_REG(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + + WRITE_PARSER_REG(PARSER_SUB_START_PTR, parser_sub_start_ptr); + WRITE_PARSER_REG(PARSER_SUB_END_PTR, parser_sub_end_ptr); + WRITE_PARSER_REG(PARSER_SUB_RP, parser_sub_start_ptr); + WRITE_PARSER_REG(PARSER_SUB_WP, parser_sub_start_ptr); + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, + (7 << ES_SUB_WR_ENDIAN_BIT) | ES_SUB_MAN_RD_PTR); + + WRITE_PARSER_REG(PFIFO_RD_PTR, 0); + WRITE_PARSER_REG(PFIFO_WR_PTR, 0); + + WRITE_PARSER_REG(PARSER_SEARCH_PATTERN, MPEG_START_CODE_PATTERN); + WRITE_PARSER_REG(PARSER_SEARCH_MASK, MPEG_START_CODE_MASK); + + WRITE_PARSER_REG(PARSER_CONFIG, + (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) | + (1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) | + PS_CFG_STARTCODE_WID_24 | + PS_CFG_PFIFO_ACCESS_WID_8 | /* single byte pop */ + (16 << PS_CFG_MAX_FETCH_CYCLE_BIT)); + WRITE_PARSER_REG(PARSER_CONTROL, PARSER_AUTOSEARCH); + + tasklet_init(&psparser_tasklet, parser_tasklet, 0); + r = pts_start(PTS_TYPE_VIDEO); + if (r < 0) + goto Err_1; + r = pts_start(PTS_TYPE_AUDIO); + if (r < 0) + goto Err_2; + + video_data_parsed = 0; + audio_data_parsed = 0; + /*TODO irq */ + + r = vdec_request_irq(PARSER_IRQ, parser_isr, + "psparser", (void *)psparser_id); + + if (r) { + pr_info("PS Demux irq register failed.\n"); + + r = -ENOENT; + goto Err_3; + } + + WRITE_PARSER_REG(PARSER_INT_STATUS, 0xffff); + WRITE_PARSER_REG(PARSER_INT_ENABLE, + PARSER_INT_ALL << PARSER_INT_HOST_EN_BIT); + + return 0; + +Err_3: + pts_stop(PTS_TYPE_AUDIO); + +Err_2: + pts_stop(PTS_TYPE_VIDEO); + +Err_1: + return r; +} + +void psparser_release(void) +{ + u8 i; + + pr_info("psparser_release\n"); + + WRITE_PARSER_REG(PARSER_INT_ENABLE, 0); + /*TODO irq */ + + vdec_free_irq(PARSER_IRQ, (void *)psparser_id); + + pts_stop(PTS_TYPE_VIDEO); + pts_stop(PTS_TYPE_AUDIO); +#ifdef DEBUG_VOB_SUB + for (i = 0; i < MAX_SUB_NUM; i++) + kfree(sub_info[i]); + pr_info("psparser release subtitle info\n"); +#endif +} +EXPORT_SYMBOL(psparser_release); + +ssize_t psparser_write(struct file *file, + struct stream_buf_s *vbuf, + struct stream_buf_s *abuf, + const char __user *buf, size_t count) +{ + s32 r; + + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + struct stream_port_s *port = priv->port; + + if ((stbuf_space(vbuf) < count) || (stbuf_space(abuf) < count)) { + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + if ((port->flag & PORT_FLAG_VID) + && (stbuf_space(vbuf) < count)) { + r = stbuf_wait_space(vbuf, count); + if (r < 0) + return r; + } + if ((port->flag & PORT_FLAG_AID) + && (stbuf_space(abuf) < count)) { + r = stbuf_wait_space(abuf, count); + if (r < 0) + return r; + } + } + + return _psparser_write(buf, count); +} + +void psparser_change_avid(unsigned int vid, unsigned int aid) +{ + video_id = vid; + audio_id = aid; +} + +void psparser_change_sid(unsigned int sid) +{ + sub_id = sid; +} + +void psparser_audio_reset(void) +{ + ulong flags; + + DEFINE_SPINLOCK(lock); + + spin_lock_irqsave(&lock, flags); + + WRITE_PARSER_REG(PARSER_AUDIO_WP, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_PARSER_REG(PARSER_AUDIO_RP, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + + WRITE_PARSER_REG(PARSER_AUDIO_START_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_PARSER_REG(PARSER_AUDIO_END_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_END_PTR)); + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_AUD_MAN_RD_PTR); + + WRITE_AIU_REG(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + + audio_data_parsed = 0; + + spin_unlock_irqrestore(&lock, flags); + +} + +void psparser_sub_reset(void) +{ + ulong flags; + + DEFINE_SPINLOCK(lock); + u32 parser_sub_start_ptr; + u32 parser_sub_end_ptr; + + spin_lock_irqsave(&lock, flags); + + parser_sub_start_ptr = READ_PARSER_REG(PARSER_SUB_START_PTR); + parser_sub_end_ptr = READ_PARSER_REG(PARSER_SUB_END_PTR); + + WRITE_PARSER_REG(PARSER_SUB_START_PTR, parser_sub_start_ptr); + WRITE_PARSER_REG(PARSER_SUB_END_PTR, parser_sub_end_ptr); + WRITE_PARSER_REG(PARSER_SUB_RP, parser_sub_start_ptr); + WRITE_PARSER_REG(PARSER_SUB_WP, parser_sub_start_ptr); + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, + (7 << ES_SUB_WR_ENDIAN_BIT) | ES_SUB_MAN_RD_PTR); + + spin_unlock_irqrestore(&lock, flags); + +} + +u8 psparser_get_sub_found_num(void) +{ +#ifdef DEBUG_VOB_SUB + return sub_found_num; +#else + return 0; +#endif +} + +u8 psparser_get_sub_info(struct subtitle_info **sub_infos) +{ +#ifdef DEBUG_VOB_SUB + u8 i = 0; + int ret = 0; + u8 size = sizeof(struct subtitle_info); + + for (i = 0; i < sub_found_num; i++) { + if (!sub_info[i]) { + pr_info + ("[psparser_get_sub_info:%d] sub_info[%d] NULL\n", + __LINE__, i); + ret = -1; + break; + } + if (!sub_infos[i]) { + pr_info + ("[psparser_get_sub_info:%d] sub_infos[%d] NULL\n", + __LINE__, i); + ret = -2; + break; + } + memcpy(sub_infos[i], sub_info[i], size); + } + return ret; +#else + return 0; +#endif +} + +static int psparser_stbuf_init(struct stream_buf_s *stbuf, + struct vdec_s *vdec) +{ + int ret = -1; + + ret = stbuf_init(stbuf, vdec); + if (ret) + goto out; + + ret = psparser_init(stbuf->pars.vid, + stbuf->pars.aid, + stbuf->pars.sid, + vdec); + if (ret) + goto out; + + stbuf->flag |= BUF_FLAG_IN_USE; +out: + return ret; +} + +static void psparser_stbuf_release(struct stream_buf_s *stbuf) +{ + psparser_release(); + + stbuf_release(stbuf); +} + +static struct stream_buf_ops psparser_stbuf_ops = { + .init = psparser_stbuf_init, + .release = psparser_stbuf_release, + .get_wp = parser_get_wp, + .set_wp = parser_set_wp, + .get_rp = parser_get_rp, + .set_rp = parser_set_rp, +}; + +struct stream_buf_ops *get_psparser_stbuf_ops(void) +{ + return &psparser_stbuf_ops; +} +EXPORT_SYMBOL(get_psparser_stbuf_ops); +
diff --git a/drivers/stream_input/parser/psparser.h b/drivers/stream_input/parser/psparser.h new file mode 100644 index 0000000..b83e342 --- /dev/null +++ b/drivers/stream_input/parser/psparser.h
@@ -0,0 +1,142 @@ +/* + * drivers/amlogic/media/stream_input/parser/psparser.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef PSPARSER_H +#define PSPARSER_H + +#include "../../frame_provider/decoder/utils/vdec.h" + +extern s32 psparser_init(u32 vid, u32 aid, u32 sid, struct vdec_s *vdec); + +extern void psparser_release(void); + +extern ssize_t psparser_write(struct file *file, + struct stream_buf_s *vbuf, + struct stream_buf_s *abuf, + const char __user *buf, size_t count); + +extern void psparser_change_avid(unsigned int vid, unsigned int aid); + +extern void psparser_change_sid(unsigned int sid); + +extern void psparser_audio_reset(void); + +extern void psparser_sub_reset(void); + +extern u8 psparser_get_sub_found_num(void); + +extern u8 psparser_get_sub_info(struct subtitle_info *sub_infos[]); + +#ifdef CONFIG_AM_DVB +extern int tsdemux_set_reset_flag(void); +#endif + +/* TODO: move to register headers */ +#define ES_PACK_SIZE_BIT 8 +#define ES_PACK_SIZE_WID 24 + +#define ES_CTRL_WID 8 +#define ES_CTRL_BIT 0 +#define ES_TYPE_MASK (3 << 6) +#define ES_TYPE_VIDEO (0 << 6) +#define ES_TYPE_AUDIO (1 << 6) +#define ES_TYPE_SUBTITLE (2 << 6) + +#define ES_WRITE (1<<5) +#define ES_PASSTHROUGH (1<<4) +#define ES_INSERT_BEFORE_ES_WRITE (1<<3) +#define ES_DISCARD (1<<2) +#define ES_SEARCH (1<<1) +#define ES_PARSER_START (1<<0) +#define ES_PARSER_BUSY (1<<0) + +#define PARSER_INTSTAT_FETCH_CMD (1<<7) +#define PARSER_INTSTAT_PARSE (1<<4) +#define PARSER_INTSTAT_DISCARD (1<<3) +#define PARSER_INTSTAT_INSZERO (1<<2) +#define PARSER_INTSTAT_ACT_NOSSC (1<<1) +#define PARSER_INTSTAT_SC_FOUND (1<<0) + +#define FETCH_CIR_BUF (1<<31) +#define FETCH_CHK_BUF_STOP (1<<30) +#define FETCH_PASSTHROUGH (1<<29) +#define FETCH_ENDIAN 27 +#define FETCH_PASSTHROUGH_TYPE_MASK (0x3<<27) +#define FETCH_ENDIAN_MASK (0x7<<27) +#define FETCH_BUF_SIZE_MASK (0x7ffffff) +#define FETCH_CMD_PTR_MASK 3 +#define FETCH_CMD_RD_PTR_BIT 5 +#define FETCH_CMD_WR_PTR_BIT 3 +#define FETCH_CMD_NUM_MASK 3 +#define FETCH_CMD_NUM_BIT 0 + +#define ES_COUNT_MASK 0xfff +#define ES_COUNT_BIT 20 +#define ES_REQ_PENDING (1<<19) +#define ES_PASSTHROUGH_EN (1<<18) +#define ES_PASSTHROUGH_TYPE_MASK (3<<16) +#define ES_PASSTHROUGH_TYPE_VIDEO (0<<16) +#define ES_PASSTHROUGH_TYPE_AUDIO (1<<16) +#define ES_PASSTHROUGH_TYPE_SUBTITLE (2<<16) +#define ES_WR_ENDIAN_MASK (0x7) +#define ES_SUB_WR_ENDIAN_BIT 9 +#define ES_SUB_MAN_RD_PTR (1<<8) +#define ES_AUD_WR_ENDIAN_BIT 5 +#define ES_AUD_MAN_RD_PTR (1<<4) +#define ES_VID_WR_ENDIAN_BIT 1 +#define ES_VID_MAN_RD_PTR (1<<0) + +#define PS_CFG_FETCH_DMA_URGENT (1<<31) +#define PS_CFG_STREAM_DMA_URGENT (1<<30) +#define PS_CFG_FORCE_PFIFO_REN (1<<29) +#define PS_CFG_PFIFO_PEAK_EN (1<<28) +#define PS_CFG_SRC_SEL_BIT 24 +#define PS_CFG_SRC_SEL_MASK (3<<PS_CFG_SRC_SEL_BIT) +#define PS_CFG_SRC_SEL_FETCH (0<<PS_CFG_SRC_SEL_BIT) +#define PS_CFG_SRC_SEL_AUX1 (1<<PS_CFG_SRC_SEL_BIT) /* from NDMA */ +#define PS_CFG_SRC_SEL_AUX2 (2<<PS_CFG_SRC_SEL_BIT) +#define PS_CFG_SRC_SEL_AUX3 (3<<PS_CFG_SRC_SEL_BIT) +#define PS_CFG_PFIFO_EMPTY_CNT_BIT 16 +#define PS_CFG_PFIFO_EMPTY_CNT_MASK 0xff +#define PS_CFG_MAX_ES_WR_CYCLE_BIT 12 +#define PS_CFG_MAX_ES_WR_CYCLE_MASK 0xf +#define PS_CFG_STARTCODE_WID_MASK (0x3<<10) +#define PS_CFG_STARTCODE_WID_8 (0x0<<10) +#define PS_CFG_STARTCODE_WID_16 (0x1<<10) +#define PS_CFG_STARTCODE_WID_24 (0x2<<10) +#define PS_CFG_STARTCODE_WID_32 (0x3<<10) +#define PS_CFG_PFIFO_ACCESS_WID_MASK (0x3<<8) +#define PS_CFG_PFIFO_ACCESS_WID_8 (0x0<<8) +#define PS_CFG_PFIFO_ACCESS_WID_16 (0x1<<8) +#define PS_CFG_PFIFO_ACCESS_WID_24 (0x2<<8) +#define PS_CFG_PFIFO_ACCESS_WID_32 (0x3<<8) +#define PS_CFG_MAX_FETCH_CYCLE_BIT 0 +#define PS_CFG_MAX_FETCH_CYCLE_MASK 0xff + +#define PARSER_INT_DISABLE_CNT_MASK 0xffff +#define PARSER_INT_DISABLE_CNT_BIT 16 +#define PARSER_INT_HOST_EN_MASK 0xff +#define PARSER_INT_HOST_EN_BIT 8 +#define PARSER_INT_AMRISC_EN_MASK 0xff +#define PARSER_INT_AMRISC_EN_BIT 0 +#define PARSER_INT_ALL 0xff + +#define RESET_PARSER (1<<8) +#define TS_HIU_ENABLE 5 +#define USE_HI_BSF_INTERFACE 7 + +#endif /* PSPARSER_H */
diff --git a/drivers/stream_input/parser/rmparser.c b/drivers/stream_input/parser/rmparser.c new file mode 100644 index 0000000..902dc5a --- /dev/null +++ b/drivers/stream_input/parser/rmparser.c
@@ -0,0 +1,340 @@ +/* + * drivers/amlogic/amports/rmparser.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/uaccess.h> + +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../amports/amports_priv.h" +#include "../amports/streambuf.h" +#include "../amports/streambuf_reg.h" +#include <linux/delay.h> +#include "rmparser.h" + +#define MANAGE_PTS + +static u32 fetch_done; +static u32 parse_halt; + +static DECLARE_WAIT_QUEUE_HEAD(rm_wq); +static const char rmparser_id[] = "rmparser-id"; + +static irqreturn_t rm_parser_isr(int irq, void *dev_id) +{ + u32 int_status = READ_PARSER_REG(PARSER_INT_STATUS); + + if (int_status & PARSER_INTSTAT_FETCH_CMD) { + WRITE_PARSER_REG(PARSER_INT_STATUS, PARSER_INTSTAT_FETCH_CMD); + fetch_done = 1; + + wake_up_interruptible(&rm_wq); + } + + return IRQ_HANDLED; +} + +s32 rmparser_init(struct vdec_s *vdec) +{ + s32 r; + + parse_halt = 0; + if (fetchbuf == 0) { + pr_info("%s: no fetchbuf\n", __func__); + return -ENOMEM; + } + + WRITE_RESET_REG(RESET1_REGISTER, RESET_PARSER); + +/* for recorded file and local play, this can't change the input source*/ + /* TS data path */ +/* +#ifndef CONFIG_AM_DVB + WRITE_DEMUX_REG(FEC_INPUT_CONTROL, 0); +#else + tsdemux_set_reset_flag(); +#endif */ + + CLEAR_DEMUX_REG_MASK(TS_HIU_CTL, 1 << USE_HI_BSF_INTERFACE); + CLEAR_DEMUX_REG_MASK(TS_HIU_CTL_2, 1 << USE_HI_BSF_INTERFACE); + CLEAR_DEMUX_REG_MASK(TS_HIU_CTL_3, 1 << USE_HI_BSF_INTERFACE); + + CLEAR_DEMUX_REG_MASK(TS_FILE_CONFIG, (1 << TS_HIU_ENABLE)); + + /* hook stream buffer with PARSER */ + WRITE_PARSER_REG(PARSER_VIDEO_START_PTR, vdec->input.start); + WRITE_PARSER_REG(PARSER_VIDEO_END_PTR, + vdec->input.start + vdec->input.size - 8); + + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_VID_MAN_RD_PTR); + + WRITE_PARSER_REG(PARSER_AUDIO_START_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_PARSER_REG(PARSER_AUDIO_END_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_END_PTR)); + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_AUD_MAN_RD_PTR); + + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_VREG_MASK(VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + + WRITE_AIU_REG(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + + WRITE_PARSER_REG(PFIFO_RD_PTR, 0); + WRITE_PARSER_REG(PFIFO_WR_PTR, 0); + + WRITE_PARSER_REG(PARSER_SEARCH_MASK, 0); + WRITE_PARSER_REG(PARSER_CONTROL, (ES_SEARCH | ES_PARSER_START)); + +#ifdef MANAGE_PTS + if (pts_start(PTS_TYPE_VIDEO) < 0) + goto Err_1; + + if (pts_start(PTS_TYPE_AUDIO) < 0) + goto Err_2; +#endif + /*TODO irq */ + + /* enable interrupt */ + + r = vdec_request_irq(PARSER_IRQ, rm_parser_isr, + "rmparser", (void *)rmparser_id); + + if (r) { + pr_info("RM parser irq register failed.\n"); + goto Err_3; + } + + WRITE_PARSER_REG(PARSER_INT_STATUS, 0xffff); + WRITE_PARSER_REG(PARSER_INT_ENABLE, + ((PARSER_INT_ALL & (~PARSER_INTSTAT_FETCH_CMD)) << + PARSER_INT_AMRISC_EN_BIT) + | (PARSER_INTSTAT_FETCH_CMD << PARSER_INT_HOST_EN_BIT)); + + return 0; + +Err_3: + pts_stop(PTS_TYPE_AUDIO); +Err_2: + pts_stop(PTS_TYPE_VIDEO); +Err_1: + return -ENOENT; +} +EXPORT_SYMBOL(rmparser_init); + +void rmparser_release(void) +{ + WRITE_PARSER_REG(PARSER_INT_ENABLE, 0); + /*TODO irq */ + + vdec_free_irq(PARSER_IRQ, (void *)rmparser_id); + +#ifdef MANAGE_PTS + pts_stop(PTS_TYPE_VIDEO); + pts_stop(PTS_TYPE_AUDIO); +#endif + +} +EXPORT_SYMBOL(rmparser_release); + +static inline u32 buf_wp(u32 type) +{ + return (type == BUF_TYPE_VIDEO) ? READ_VREG(VLD_MEM_VIFIFO_WP) : + (type == BUF_TYPE_AUDIO) ? + READ_AIU_REG(AIU_MEM_AIFIFO_MAN_WP) : + READ_PARSER_REG(PARSER_SUB_START_PTR); +} + +static ssize_t _rmparser_write(const char __user *buf, size_t count) +{ + size_t r = count; + const char __user *p = buf; + u32 len; + int ret; + static int halt_droped_len; + u32 vwp, awp; + dma_addr_t dma_addr = 0; + + if (r > 0) { + len = min_t(size_t, r, FETCHBUF_SIZE); + + if (copy_from_user(fetchbuf, p, len)) + return -EFAULT; + dma_addr = + dma_map_single(amports_get_dma_device(), + fetchbuf, FETCHBUF_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(amports_get_dma_device(), dma_addr)) + return -EFAULT; + + fetch_done = 0; + + wmb(); /* Ensure fetchbuf contents visible */ + vwp = buf_wp(BUF_TYPE_VIDEO); + awp = buf_wp(BUF_TYPE_AUDIO); + WRITE_PARSER_REG(PARSER_FETCH_ADDR, dma_addr); + + WRITE_PARSER_REG(PARSER_FETCH_CMD, (7 << FETCH_ENDIAN) | len); + dma_unmap_single(amports_get_dma_device(), dma_addr, + FETCHBUF_SIZE, DMA_TO_DEVICE); + ret = + wait_event_interruptible_timeout(rm_wq, fetch_done != 0, + HZ / 10); + if (ret == 0) { + WRITE_PARSER_REG(PARSER_FETCH_CMD, 0); + parse_halt++; + pr_info + ("write timeout,retry,halt_count=%d parse_control=%x\n", + parse_halt, READ_PARSER_REG(PARSER_CONTROL)); + + //vreal_set_fatal_flag(1);//DEBUG_TMP + + if (parse_halt > 10) { + WRITE_PARSER_REG(PARSER_CONTROL, + (ES_SEARCH | ES_PARSER_START)); + pr_info("reset parse_control=%x\n", + READ_PARSER_REG(PARSER_CONTROL)); + } + return -EAGAIN; + } else if (ret < 0) + return -ERESTARTSYS; + + if (vwp == buf_wp(BUF_TYPE_VIDEO) + && awp == buf_wp(BUF_TYPE_AUDIO)) { + struct stream_buf_s *v_buf_t = + get_buf_by_type(BUF_TYPE_VIDEO); + struct stream_buf_s *a_buf_t = + get_buf_by_type(BUF_TYPE_AUDIO); + int v_st_lv = stbuf_level(v_buf_t); + int a_st_lv = stbuf_level(a_buf_t); + + if ((parse_halt + 1) % 10 == 1) { + pr_info("V&A WP not changed after write"); + pr_info(",video %x->%x", vwp, + buf_wp(BUF_TYPE_VIDEO)); + pr_info(",Audio:%x-->%x,parse_halt=%d\n", + awp, buf_wp(BUF_TYPE_AUDIO), + parse_halt); + } + parse_halt++; + +/* wp not changed , + * we think have bugs on parser now. + */ + if (parse_halt > 10 && + (v_st_lv < 1000 || a_st_lv < 100)) { + /*reset while at least one is underflow. */ + WRITE_PARSER_REG(PARSER_CONTROL, + (ES_SEARCH | ES_PARSER_START)); + pr_info("reset parse_control=%x\n", + READ_PARSER_REG(PARSER_CONTROL)); + } + if (parse_halt <= 10 || + halt_droped_len < 100 * 1024) { + /*drops first 10 pkt , + * some times maybe no av data + */ + pr_info("drop this pkt=%d,len=%d\n", parse_halt, + len); + p += len; + r -= len; + halt_droped_len += len; + } else + return -EAGAIN; + } else { + halt_droped_len = 0; + parse_halt = 0; + p += len; + r -= len; + } + } + return count - r; +} + +ssize_t rmparser_write(struct file *file, + struct stream_buf_s *vbuf, + struct stream_buf_s *abuf, + const char __user *buf, size_t count) +{ + s32 r; + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + struct stream_port_s *port = priv->port; + size_t towrite = count; + + if ((stbuf_space(vbuf) < count) || (stbuf_space(abuf) < count)) { + if (file->f_flags & O_NONBLOCK) { + towrite = min(stbuf_space(vbuf), stbuf_space(abuf)); + if (towrite < 1024) /*? can write small? */ + return -EAGAIN; + } else { + if ((port->flag & PORT_FLAG_VID) + && (stbuf_space(vbuf) < count)) { + r = stbuf_wait_space(vbuf, count); + if (r < 0) + return r; + } + if ((port->flag & PORT_FLAG_AID) + && (stbuf_space(abuf) < count)) { + r = stbuf_wait_space(abuf, count); + if (r < 0) + return r; + } + } + } + towrite = min(towrite, count); + return _rmparser_write(buf, towrite); +} + +void rm_set_vasid(u32 vid, u32 aid) +{ + pr_info("rm_set_vasid aid %d, vid %d\n", aid, vid); + WRITE_PARSER_REG(VAS_STREAM_ID, (aid << 8) | vid); +} + +void rm_audio_reset(void) +{ + ulong flags; + DEFINE_SPINLOCK(lock); + + spin_lock_irqsave(&lock, flags); + + WRITE_PARSER_REG(PARSER_AUDIO_WP, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_PARSER_REG(PARSER_AUDIO_RP, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + + WRITE_PARSER_REG(PARSER_AUDIO_START_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_PARSER_REG(PARSER_AUDIO_END_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_END_PTR)); + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_AUD_MAN_RD_PTR); + + WRITE_AIU_REG(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + + spin_unlock_irqrestore(&lock, flags); +} +EXPORT_SYMBOL(rm_audio_reset);
diff --git a/drivers/stream_input/parser/rmparser.h b/drivers/stream_input/parser/rmparser.h new file mode 100644 index 0000000..eb2023a --- /dev/null +++ b/drivers/stream_input/parser/rmparser.h
@@ -0,0 +1,136 @@ +/* + * drivers/amlogic/amports/rmparser.h + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef RMPARSER_H +#define RMPARSER_H + +#include "../../frame_provider/decoder/utils/vdec.h" + +extern void rm_set_vasid(u32 vid, u32 aid); + +extern ssize_t rmparser_write(struct file *file, + struct stream_buf_s *vbuf, + struct stream_buf_s *abuf, + const char __user *buf, size_t count); + +s32 rmparser_init(struct vdec_s *vdec); + +extern void rmparser_release(void); + +extern void rm_audio_reset(void); + +extern void vreal_set_fatal_flag(int flag); + +#ifdef CONFIG_AM_DVB +extern int tsdemux_set_reset_flag(void); +#endif + +/* TODO: move to register headers */ +#define ES_PACK_SIZE_BIT 8 +#define ES_PACK_SIZE_WID 24 + +#define ES_CTRL_WID 8 +#define ES_CTRL_BIT 0 +#define ES_TYPE_MASK (3 << 6) +#define ES_TYPE_VIDEO (0 << 6) +#define ES_TYPE_AUDIO (1 << 6) +#define ES_TYPE_SUBTITLE (2 << 6) + +#define ES_WRITE (1<<5) +#define ES_PASSTHROUGH (1<<4) +#define ES_INSERT_BEFORE_ES_WRITE (1<<3) +#define ES_DISCARD (1<<2) +#define ES_SEARCH (1<<1) +#define ES_PARSER_START (1<<0) +#define ES_PARSER_BUSY (1<<0) + +#define PARSER_INTSTAT_FETCH_CMD (1<<7) +#define PARSER_INTSTAT_PARSE (1<<4) +#define PARSER_INTSTAT_DISCARD (1<<3) +#define PARSER_INTSTAT_INSZERO (1<<2) +#define PARSER_INTSTAT_ACT_NOSSC (1<<1) +#define PARSER_INTSTAT_SC_FOUND (1<<0) + +#define FETCH_CIR_BUF (1<<31) +#define FETCH_CHK_BUF_STOP (1<<30) +#define FETCH_PASSTHROUGH (1<<29) +#define FETCH_ENDIAN 27 +#define FETCH_PASSTHROUGH_TYPE_MASK (0x3<<27) +#define FETCH_ENDIAN_MASK (0x7<<27) +#define FETCH_BUF_SIZE_MASK (0x7ffffff) +#define FETCH_CMD_PTR_MASK 3 +#define FETCH_CMD_RD_PTR_BIT 5 +#define FETCH_CMD_WR_PTR_BIT 3 +#define FETCH_CMD_NUM_MASK 3 +#define FETCH_CMD_NUM_BIT 0 + +#define ES_COUNT_MASK 0xfff +#define ES_COUNT_BIT 20 +#define ES_REQ_PENDING (1<<19) +#define ES_PASSTHROUGH_EN (1<<18) +#define ES_PASSTHROUGH_TYPE_MASK (3<<16) +#define ES_PASSTHROUGH_TYPE_VIDEO (0<<16) +#define ES_PASSTHROUGH_TYPE_AUDIO (1<<16) +#define ES_PASSTHROUGH_TYPE_SUBTITLE (2<<16) +#define ES_WR_ENDIAN_MASK (0x7) +#define ES_SUB_WR_ENDIAN_BIT 9 +#define ES_SUB_MAN_RD_PTR (1<<8) +#define ES_AUD_WR_ENDIAN_BIT 5 +#define ES_AUD_MAN_RD_PTR (1<<4) +#define ES_VID_WR_ENDIAN_BIT 1 +#define ES_VID_MAN_RD_PTR (1<<0) + +#define PS_CFG_FETCH_DMA_URGENT (1<<31) +#define PS_CFG_STREAM_DMA_URGENT (1<<30) +#define PS_CFG_FORCE_PFIFO_REN (1<<29) +#define PS_CFG_PFIFO_PEAK_EN (1<<28) +#define PS_CFG_SRC_SEL_BIT 24 +#define PS_CFG_SRC_SEL_MASK (3<<PS_CFG_SRC_SEL_BIT) +#define PS_CFG_SRC_SEL_FETCH (0<<PS_CFG_SRC_SEL_BIT) +#define PS_CFG_SRC_SEL_AUX1 (1<<PS_CFG_SRC_SEL_BIT) /* from NDMA */ +#define PS_CFG_SRC_SEL_AUX2 (2<<PS_CFG_SRC_SEL_BIT) +#define PS_CFG_SRC_SEL_AUX3 (3<<PS_CFG_SRC_SEL_BIT) +#define PS_CFG_PFIFO_EMPTY_CNT_BIT 16 +#define PS_CFG_PFIFO_EMPTY_CNT_MASK 0xff +#define PS_CFG_MAX_ES_WR_CYCLE_BIT 12 +#define PS_CFG_MAX_ES_WR_CYCLE_MASK 0xf +#define PS_CFG_STARTCODE_WID_MASK (0x3<<10) +#define PS_CFG_STARTCODE_WID_8 (0x0<<10) +#define PS_CFG_STARTCODE_WID_16 (0x1<<10) +#define PS_CFG_STARTCODE_WID_24 (0x2<<10) +#define PS_CFG_STARTCODE_WID_32 (0x3<<10) +#define PS_CFG_PFIFO_ACCESS_WID_MASK (0x3<<8) +#define PS_CFG_PFIFO_ACCESS_WID_8 (0x0<<8) +#define PS_CFG_PFIFO_ACCESS_WID_16 (0x1<<8) +#define PS_CFG_PFIFO_ACCESS_WID_24 (0x2<<8) +#define PS_CFG_PFIFO_ACCESS_WID_32 (0x3<<8) +#define PS_CFG_MAX_FETCH_CYCLE_BIT 0 +#define PS_CFG_MAX_FETCH_CYCLE_MASK 0xff + +#define PARSER_INT_DISABLE_CNT_MASK 0xffff +#define PARSER_INT_DISABLE_CNT_BIT 16 +#define PARSER_INT_HOST_EN_MASK 0xff +#define PARSER_INT_HOST_EN_BIT 8 +#define PARSER_INT_AMRISC_EN_MASK 0xff +#define PARSER_INT_AMRISC_EN_BIT 0 +#define PARSER_INT_ALL 0xff + +#define RESET_PARSER (1<<8) +#define TS_HIU_ENABLE 5 +#define USE_HI_BSF_INTERFACE 7 + +#endif /* RMPARSER_H */
diff --git a/drivers/stream_input/parser/tsdemux.c b/drivers/stream_input/parser/tsdemux.c new file mode 100644 index 0000000..81c94c0 --- /dev/null +++ b/drivers/stream_input/parser/tsdemux.c
@@ -0,0 +1,1324 @@ +/* + * drivers/amlogic/media/stream_input/parser/tsdemux.c + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/dma-mapping.h> +#include <linux/amlogic/media/frame_sync/ptsserv.h> +#include <linux/amlogic/media/frame_sync/tsync.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/amlogic/media/vfm/vframe_provider.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/module.h> + +#include <linux/uaccess.h> +/* #include <mach/am_regs.h> */ +#include <linux/clk.h> +/* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ +/* #include <mach/mod_gate.h> */ +/* #endif */ + +#include "../../frame_provider/decoder/utils/vdec.h" +#include <linux/amlogic/media/utils/vdec_reg.h> +#include "../amports/streambuf_reg.h" +#include "../amports/streambuf.h" +#include <linux/amlogic/media/utils/amports_config.h> +#include <linux/amlogic/media/frame_sync/tsync_pcr.h> + +#include "tsdemux.h" +#include <linux/reset.h> +#include "../amports/amports_priv.h" + +#define MAX_DRM_PACKAGE_SIZE 0x500000 + + +MODULE_PARM_DESC(reset_demux_enable, "\n\t\t Reset demux enable"); +static int reset_demux_enable = 0; +module_param(reset_demux_enable, int, 0644); + +static const char tsdemux_fetch_id[] = "tsdemux-fetch-id"; +static const char tsdemux_irq_id[] = "tsdemux-irq-id"; + +static u32 curr_pcr_num = 0xffff; +static u32 curr_vid_id = 0xffff; +static u32 curr_aud_id = 0xffff; +static u32 curr_sub_id = 0xffff; +static u32 curr_pcr_id = 0xffff; + +static DECLARE_WAIT_QUEUE_HEAD(wq); +static u32 fetch_done; +static u32 discontinued_counter; +static u32 first_pcr; +static u8 pcrscr_valid; +static u8 pcraudio_valid; +static u8 pcrvideo_valid; +static u8 pcr_init_flag; + +static int demux_skipbyte; + +static struct tsdemux_ops *demux_ops; +static DEFINE_SPINLOCK(demux_ops_lock); + +static int enable_demux_driver(void) +{ + return demux_ops ? 1 : 0; +} + +void tsdemux_set_ops(struct tsdemux_ops *ops) +{ + unsigned long flags; + + spin_lock_irqsave(&demux_ops_lock, flags); + demux_ops = ops; + spin_unlock_irqrestore(&demux_ops_lock, flags); +} +EXPORT_SYMBOL(tsdemux_set_ops); + +int tsdemux_set_reset_flag_ext(void) +{ + int r = 0; + + if (demux_ops && demux_ops->set_reset_flag) + r = demux_ops->set_reset_flag(); + + return r; +} + +int tsdemux_set_reset_flag(void) +{ + unsigned long flags; + int r; + + spin_lock_irqsave(&demux_ops_lock, flags); + r = tsdemux_set_reset_flag_ext(); + spin_unlock_irqrestore(&demux_ops_lock, flags); + + return r; +} + +static int tsdemux_reset(void) +{ + unsigned long flags; + int r = 0; + + spin_lock_irqsave(&demux_ops_lock, flags); + if (demux_ops && demux_ops->reset) { + tsdemux_set_reset_flag_ext(); + r = demux_ops->reset(); + } + spin_unlock_irqrestore(&demux_ops_lock, flags); + + return r; +} + +static int tsdemux_request_irq(irq_handler_t handler, void *data) +{ + unsigned long flags; + int r = 0; + + spin_lock_irqsave(&demux_ops_lock, flags); + if (demux_ops && demux_ops->request_irq) + r = demux_ops->request_irq(handler, data); + spin_unlock_irqrestore(&demux_ops_lock, flags); + + return r; +} + +static int tsdemux_free_irq(void) +{ + unsigned long flags; + int r = 0; + + spin_lock_irqsave(&demux_ops_lock, flags); + if (demux_ops && demux_ops->free_irq) + r = demux_ops->free_irq(); + spin_unlock_irqrestore(&demux_ops_lock, flags); + + return r; +} + +static int tsdemux_set_vid(int vpid) +{ + unsigned long flags; + int r = 0; + + spin_lock_irqsave(&demux_ops_lock, flags); + if (demux_ops && demux_ops->set_vid) + r = demux_ops->set_vid(vpid); + spin_unlock_irqrestore(&demux_ops_lock, flags); + + return r; +} + +static int tsdemux_set_aid(int apid) +{ + unsigned long flags; + int r = 0; + + spin_lock_irqsave(&demux_ops_lock, flags); + if (demux_ops && demux_ops->set_aid) + r = demux_ops->set_aid(apid); + spin_unlock_irqrestore(&demux_ops_lock, flags); + + return r; +} + +static int tsdemux_set_sid(int spid) +{ + unsigned long flags; + int r = 0; + + spin_lock_irqsave(&demux_ops_lock, flags); + if (demux_ops && demux_ops->set_sid) + r = demux_ops->set_sid(spid); + spin_unlock_irqrestore(&demux_ops_lock, flags); + + return r; +} + +static int tsdemux_set_pcrid(int pcrpid) +{ + unsigned long flags; + int r = 0; + + spin_lock_irqsave(&demux_ops_lock, flags); + if (demux_ops && demux_ops->set_pcrid) + r = demux_ops->set_pcrid(pcrpid); + spin_unlock_irqrestore(&demux_ops_lock, flags); + + return r; +} + +static int tsdemux_set_skip_byte(int skipbyte) +{ + unsigned long flags; + int r = 0; + + spin_lock_irqsave(&demux_ops_lock, flags); + if (demux_ops && demux_ops->set_skipbyte) + r = demux_ops->set_skipbyte(skipbyte); + spin_unlock_irqrestore(&demux_ops_lock, flags); + + return r; +} + +static int tsdemux_config(void) +{ + return 0; +} + +static void tsdemux_pcr_set(unsigned int pcr); +/*TODO irq*/ +/* bit 15 ---------------*/ +/* bit 12 --VIDEO_PTS[32]*/ +/* bit 0 ---------------*/ +/*Read the 13th bit of STB_PTS_DTS_STATUS register +correspond to the highest bit of video pts*/ +static irqreturn_t tsdemux_isr(int irq, void *dev_id) +{ + u32 int_status = 0; + int id = (long)dev_id; + + if (!enable_demux_driver()) { + int_status = READ_DEMUX_REG(STB_INT_STATUS); + } else { + if (id == 0) + int_status = READ_DEMUX_REG(STB_INT_STATUS); + else if (id == 1) + int_status = READ_DEMUX_REG(STB_INT_STATUS_2); + else if (id == 2) + int_status = READ_DEMUX_REG(STB_INT_STATUS_3); + } + + if (int_status & (1 << NEW_PDTS_READY)) { + if (!enable_demux_driver()) { + u32 pdts_status = READ_DEMUX_REG(STB_PTS_DTS_STATUS); + u64 vpts; + + vpts = READ_MPEG_REG(VIDEO_PTS_DEMUX); + vpts &= 0x00000000FFFFFFFF; + if (pdts_status & 0x1000) { + vpts = vpts | (1LL<<32); + } + + if (pdts_status & (1 << VIDEO_PTS_READY)) + pts_checkin_wrptr_pts33(PTS_TYPE_VIDEO, + READ_DEMUX_REG(VIDEO_PDTS_WR_PTR), + vpts); + + if (pdts_status & (1 << AUDIO_PTS_READY)) + pts_checkin_wrptr(PTS_TYPE_AUDIO, + READ_DEMUX_REG(AUDIO_PDTS_WR_PTR), + READ_DEMUX_REG(AUDIO_PTS_DEMUX)); + + WRITE_DEMUX_REG(STB_PTS_DTS_STATUS, pdts_status); + } else { +#define DMX_READ_REG(i, r)\ + ((i) ? ((i == 1) ? READ_DEMUX_REG(r##_2) : \ + READ_DEMUX_REG(r##_3)) : READ_DEMUX_REG(r)) + u64 vpts; + u32 pdts_status = DMX_READ_REG(id, STB_PTS_DTS_STATUS); + vpts = DMX_READ_REG(id, VIDEO_PTS_DEMUX); + vpts &= 0x00000000FFFFFFFF; + if (pdts_status & 0x1000) { + vpts = vpts | (1LL<<32); + } + + if (pdts_status & (1 << VIDEO_PTS_READY)) + pts_checkin_wrptr_pts33(PTS_TYPE_VIDEO, + DMX_READ_REG(id, VIDEO_PDTS_WR_PTR), + vpts); + + if (pdts_status & (1 << AUDIO_PTS_READY)) + pts_checkin_wrptr(PTS_TYPE_AUDIO, + DMX_READ_REG(id, AUDIO_PDTS_WR_PTR), + DMX_READ_REG(id, AUDIO_PTS_DEMUX)); + + if (id == 1) + WRITE_DEMUX_REG(STB_PTS_DTS_STATUS_2, + pdts_status); + else if (id == 2) + WRITE_DEMUX_REG(STB_PTS_DTS_STATUS_3, + pdts_status); + else + WRITE_DEMUX_REG(STB_PTS_DTS_STATUS, + pdts_status); + } + } + if (int_status & (1 << DIS_CONTINUITY_PACKET)) { + discontinued_counter++; + /* pr_info("discontinued counter=%d\n",discontinued_counter); */ + } + if (int_status & (1 << SUB_PES_READY)) { + /* TODO: put data to somewhere */ + /* pr_info("subtitle pes ready\n"); */ + wakeup_sub_poll(); + } + if (int_status & (1<<PCR_READY)) { + unsigned int pcr_pts = 0xffffffff; + pcr_pts = DMX_READ_REG(id, PCR_DEMUX); + tsdemux_pcr_set(pcr_pts); + } + + if (!enable_demux_driver()) + WRITE_DEMUX_REG(STB_INT_STATUS, int_status); + + return IRQ_HANDLED; +} + +static irqreturn_t parser_isr(int irq, void *dev_id) +{ + u32 int_status = READ_PARSER_REG(PARSER_INT_STATUS); + + WRITE_PARSER_REG(PARSER_INT_STATUS, int_status); + + if (int_status & PARSER_INTSTAT_FETCH_CMD) { + fetch_done = 1; + + wake_up_interruptible(&wq); + } + + return IRQ_HANDLED; +} + +static ssize_t _tsdemux_write(const char __user *buf, size_t count, + int isphybuf) +{ + size_t r = count; + const char __user *p = buf; + u32 len; + int ret; + dma_addr_t dma_addr = 0; + + if (r > 0) { + if (isphybuf) + len = count; + else { + len = min_t(size_t, r, FETCHBUF_SIZE); + if (copy_from_user(fetchbuf, p, len)) + return -EFAULT; + + dma_addr = + dma_map_single(amports_get_dma_device(), + fetchbuf, + FETCHBUF_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(amports_get_dma_device(), + dma_addr)) + return -EFAULT; + + + } + + fetch_done = 0; + + wmb(); /* Ensure fetchbuf contents visible */ + + if (isphybuf) { + u32 buf_32 = (unsigned long)buf & 0xffffffff; + WRITE_PARSER_REG(PARSER_FETCH_ADDR, buf_32); + } else { + WRITE_PARSER_REG(PARSER_FETCH_ADDR, dma_addr); + dma_unmap_single(amports_get_dma_device(), dma_addr, + FETCHBUF_SIZE, DMA_TO_DEVICE); + } + + WRITE_PARSER_REG(PARSER_FETCH_CMD, (7 << FETCH_ENDIAN) | len); + + + ret = + wait_event_interruptible_timeout(wq, fetch_done != 0, + HZ / 2); + if (ret == 0) { + WRITE_PARSER_REG(PARSER_FETCH_CMD, 0); + pr_info("write timeout, retry\n"); + return -EAGAIN; + } else if (ret < 0) + return -ERESTARTSYS; + + p += len; + r -= len; + } + + return count - r; +} + +#define PCR_EN 12 + +static int reset_pcr_regs(void) +{ + u32 pcr_num; + u32 pcr_regs = 0; + if (curr_pcr_id >= 0x1FFF) + return 0; + /* set paramater to fetch pcr */ + pcr_num = 0; + if (curr_pcr_id == curr_vid_id) + pcr_num = 0; + else if (curr_pcr_id == curr_aud_id) + pcr_num = 1; + else if (curr_pcr_id == curr_sub_id) + pcr_num = 2; + else + pcr_num = 3; + if (pcr_num != curr_pcr_num) { + u32 clk_unit = 0; + u32 clk_81 = 0; + struct clk *clk; + //clk = clk_get(NULL,"clk81"); + clk= devm_clk_get(amports_get_dma_device(),"clk_81"); + if (IS_ERR(clk) || clk == 0) { + pr_info("[%s:%d] error clock\n", __func__, __LINE__); + return 0; + } + clk_81 = clk_get_rate(clk); + clk_unit = clk_81 / 90000; + pr_info("[%s:%d] clk_81 = %x clk_unit =%x\n", __func__, + __LINE__, clk_81, clk_unit); + pcr_regs = 1 << PCR_EN | clk_unit; + pr_info("[tsdemux_init] the set pcr_regs =%x\n", pcr_regs); + if (READ_DEMUX_REG(TS_HIU_CTL_2) & 0x80) { + WRITE_DEMUX_REG(PCR90K_CTL_2, pcr_regs); + WRITE_DEMUX_REG(ASSIGN_PID_NUMBER_2, pcr_num); + pr_info("[tsdemux_init] To use device 2,pcr_num=%d\n", + pcr_num); + pr_info("tsdemux_init] the read pcr_regs= %x\n", + READ_DEMUX_REG(PCR90K_CTL_2)); + } else if (READ_DEMUX_REG(TS_HIU_CTL_3) & 0x80) { + WRITE_DEMUX_REG(PCR90K_CTL_3, pcr_regs); + WRITE_DEMUX_REG(ASSIGN_PID_NUMBER_3, pcr_num); + pr_info("[tsdemux_init] To use device 3,pcr_num=%d\n", + pcr_num); + pr_info("tsdemux_init] the read pcr_regs= %x\n", + READ_DEMUX_REG(PCR90K_CTL_3)); + } else { + WRITE_DEMUX_REG(PCR90K_CTL, pcr_regs); + WRITE_DEMUX_REG(ASSIGN_PID_NUMBER, pcr_num); + pr_info("[tsdemux_init] To use device 1,pcr_num=%d\n", + pcr_num); + pr_info("tsdemux_init] the read pcr_regs= %x\n", + READ_DEMUX_REG(PCR90K_CTL)); + } + curr_pcr_num = pcr_num; + } + return 1; +} + +s32 tsdemux_init(u32 vid, u32 aid, u32 sid, u32 pcrid, bool is_hevc, + struct vdec_s *vdec) +{ + s32 r; + u32 parser_sub_start_ptr; + u32 parser_sub_end_ptr; + u32 parser_sub_rp; + pcrvideo_valid = 0; + pcraudio_valid = 0; + pcr_init_flag = 0; + + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + /*TODO clk */ + /* + *switch_mod_gate_by_type(MOD_DEMUX, 1); + */ + /* #endif */ + + amports_switch_gate("demux", 1); + + parser_sub_start_ptr = READ_PARSER_REG(PARSER_SUB_START_PTR); + parser_sub_end_ptr = READ_PARSER_REG(PARSER_SUB_END_PTR); + parser_sub_rp = READ_PARSER_REG(PARSER_SUB_RP); + + WRITE_RESET_REG(RESET1_REGISTER, RESET_PARSER); + + if (enable_demux_driver()) { + tsdemux_reset(); + } else { + WRITE_RESET_REG(RESET1_REGISTER, RESET_PARSER | RESET_DEMUXSTB); + + WRITE_DEMUX_REG(STB_TOP_CONFIG, 0); + WRITE_DEMUX_REG(DEMUX_CONTROL, 0); + } + + /* set PID filter */ + pr_info + ("tsdemux video_pid = 0x%x, audio_pid = 0x%x,", + vid, aid); + pr_info + ("sub_pid = 0x%x, pcrid = 0x%x\n", + sid, pcrid); + + if (!enable_demux_driver()) { + WRITE_DEMUX_REG(FM_WR_DATA, + (((vid < 0x1fff) + ? (vid & 0x1fff) | (VIDEO_PACKET << 13) + : 0xffff) << 16) + | ((aid < 0x1fff) + ? (aid & 0x1fff) | (AUDIO_PACKET << 13) + : 0xffff)); + WRITE_DEMUX_REG(FM_WR_ADDR, 0x8000); + while (READ_DEMUX_REG(FM_WR_ADDR) & 0x8000) + ; + + WRITE_DEMUX_REG(FM_WR_DATA, + (((sid < 0x1fff) + ? (sid & 0x1fff) | (SUB_PACKET << 13) + : 0xffff) << 16) + | 0xffff); + WRITE_DEMUX_REG(FM_WR_ADDR, 0x8001); + while (READ_DEMUX_REG(FM_WR_ADDR) & 0x8000) + ; + + WRITE_DEMUX_REG(MAX_FM_COMP_ADDR, 1); + + WRITE_DEMUX_REG(STB_INT_MASK, 0); + WRITE_DEMUX_REG(STB_INT_STATUS, 0xffff); + + /* TS data path */ + WRITE_DEMUX_REG(FEC_INPUT_CONTROL, 0x7000); + WRITE_DEMUX_REG(DEMUX_MEM_REQ_EN, + (1 << VIDEO_PACKET) | + (1 << AUDIO_PACKET) | (1 << SUB_PACKET)); + WRITE_DEMUX_REG(DEMUX_ENDIAN, + (7 << OTHER_ENDIAN) | + (7 << BYPASS_ENDIAN) | (0 << SECTION_ENDIAN)); + WRITE_DEMUX_REG(TS_HIU_CTL, 1 << USE_HI_BSF_INTERFACE); + WRITE_DEMUX_REG(TS_FILE_CONFIG, + (demux_skipbyte << 16) | + (6 << DES_OUT_DLY) | + (3 << TRANSPORT_SCRAMBLING_CONTROL_ODD) | + (1 << TS_HIU_ENABLE) | (4 << FEC_FILE_CLK_DIV)); + + /* enable TS demux */ + WRITE_DEMUX_REG(DEMUX_CONTROL, + (1 << STB_DEMUX_ENABLE) | + (1 << KEEP_DUPLICATE_PACKAGE)); + } + + if (fetchbuf == 0) { + pr_info("%s: no fetchbuf\n", __func__); + return -ENOMEM; + } + + /* hook stream buffer with PARSER */ + if (has_hevc_vdec() && is_hevc) { + WRITE_PARSER_REG(PARSER_VIDEO_START_PTR, vdec->input.start); + WRITE_PARSER_REG(PARSER_VIDEO_END_PTR, vdec->input.start + + vdec->input.size - 8); + + if (vdec_single(vdec)) { + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, + ES_VID_MAN_RD_PTR); + /* set vififo_vbuf_rp_sel=>hevc */ + WRITE_VREG(DOS_GEN_CTRL0, 3 << 1); + /* set use_parser_vbuf_wp */ + SET_VREG_MASK(HEVC_STREAM_CONTROL, + (1 << 3) | (0 << 4)); + /* set stream_fetch_enable */ + SET_VREG_MASK(HEVC_STREAM_CONTROL, 1); + /* set stream_buffer_hole with 256 bytes */ + SET_VREG_MASK(HEVC_STREAM_FIFO_CTL, + (1 << 29)); + } else { + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, + ES_VID_MAN_RD_PTR); + WRITE_PARSER_REG(PARSER_VIDEO_WP, vdec->input.start); + WRITE_PARSER_REG(PARSER_VIDEO_RP, vdec->input.start); + } + } else { + WRITE_PARSER_REG(PARSER_VIDEO_START_PTR, vdec->input.start); + WRITE_PARSER_REG(PARSER_VIDEO_END_PTR, vdec->input.start + + vdec->input.size - 8); + + if (vdec_single(vdec)) { + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, + ES_VID_MAN_RD_PTR); + + WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_VREG_MASK(VLD_MEM_VIFIFO_BUF_CNTL, + MEM_BUFCTRL_INIT); + /* set vififo_vbuf_rp_sel=>vdec */ + if (has_hevc_vdec()) + WRITE_VREG(DOS_GEN_CTRL0, 0); + } else { + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, + ES_VID_MAN_RD_PTR); + WRITE_PARSER_REG(PARSER_VIDEO_WP, vdec->input.start); + WRITE_PARSER_REG(PARSER_VIDEO_RP, vdec->input.start); + } + } + + WRITE_PARSER_REG(PARSER_AUDIO_START_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_PARSER_REG(PARSER_AUDIO_END_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_END_PTR)); + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_AUD_MAN_RD_PTR); + + WRITE_PARSER_REG(PARSER_CONFIG, + (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) | + (1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) | + (16 << PS_CFG_MAX_FETCH_CYCLE_BIT)); + + WRITE_AIU_REG(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + + if (!enable_demux_driver() || ((sid > 0) && (sid < 0x1fff))) { + WRITE_PARSER_REG(PARSER_SUB_START_PTR, parser_sub_start_ptr); + WRITE_PARSER_REG(PARSER_SUB_END_PTR, parser_sub_end_ptr); + WRITE_PARSER_REG(PARSER_SUB_RP, parser_sub_rp); + } + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, + (7 << ES_SUB_WR_ENDIAN_BIT) | ES_SUB_MAN_RD_PTR); + + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + if (vid != 0xffff) { + if (has_hevc_vdec()) + r = pts_start((is_hevc) ? PTS_TYPE_HEVC : PTS_TYPE_VIDEO); + else + /* #endif */ + r = pts_start(PTS_TYPE_VIDEO); + if ((r < 0) && (r != -EBUSY)) { + pr_info("Video pts start failed.(%d)\n", r); + goto err1; + } + } + + if (aid != 0xffff) { + r = pts_start(PTS_TYPE_AUDIO); + if ((r < 0) && (r != -EBUSY)) { + pr_info("Audio pts start failed.(%d)\n", r); + goto err2; + } + } + /*TODO irq */ + + r = vdec_request_irq(PARSER_IRQ, parser_isr, + "tsdemux-fetch", (void *)tsdemux_fetch_id); + + if (r) + goto err3; + + WRITE_PARSER_REG(PARSER_INT_STATUS, 0xffff); + WRITE_PARSER_REG(PARSER_INT_ENABLE, + PARSER_INTSTAT_FETCH_CMD << PARSER_INT_HOST_EN_BIT); + + WRITE_PARSER_REG(PARSER_VIDEO_HOLE, 0x400); + WRITE_PARSER_REG(PARSER_AUDIO_HOLE, 0x400); + + discontinued_counter = 0; + + if (!enable_demux_driver()) { + /*TODO irq */ + + r = vdec_request_irq(DEMUX_IRQ, tsdemux_isr, + "tsdemux-irq", (void *)tsdemux_irq_id); + + WRITE_DEMUX_REG(STB_INT_MASK, (1 << SUB_PES_READY) + | (1 << NEW_PDTS_READY) + | (1 << DIS_CONTINUITY_PACKET)); + if (r) + goto err4; + } else { + tsdemux_config(); + tsdemux_request_irq(tsdemux_isr, (void *)tsdemux_irq_id); + if (vid < 0x1FFF) { + curr_vid_id = vid; + tsdemux_set_vid(vid); + pcrvideo_valid = 1; + } + if (aid < 0x1FFF) { + curr_aud_id = aid; + tsdemux_set_aid(aid); + pcraudio_valid = 1; + } + if (sid < 0x1FFF) { + curr_sub_id = sid; + tsdemux_set_sid(sid); + } + + curr_pcr_id = pcrid; + pcrscr_valid = reset_pcr_regs(); + + if ((pcrid < 0x1FFF) && (pcrid != vid) && (pcrid != aid) + && (pcrid != sid)) + tsdemux_set_pcrid(pcrid); + } + + first_pcr = 0; + + return 0; + +err4: + /*TODO irq */ + + if (!enable_demux_driver()) + vdec_free_irq(PARSER_IRQ, (void *)tsdemux_fetch_id); + +err3: + pts_stop(PTS_TYPE_AUDIO); +err2: + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */ + if (has_hevc_vdec()) + pts_stop((is_hevc) ? PTS_TYPE_HEVC : PTS_TYPE_VIDEO); + else + /* #endif */ + pts_stop(PTS_TYPE_VIDEO); +err1: + pr_info("TS Demux init failed.\n"); + return -ENOENT; +} + +void tsdemux_release(void) +{ + pcrscr_valid = 0; + first_pcr = 0; + pcr_init_flag = 0; + + WRITE_PARSER_REG(PARSER_INT_ENABLE, 0); + WRITE_PARSER_REG(PARSER_VIDEO_HOLE, 0); + WRITE_PARSER_REG(PARSER_AUDIO_HOLE, 0); + + /*TODO irq */ + + vdec_free_irq(PARSER_IRQ, (void *)tsdemux_fetch_id); + + if (!enable_demux_driver()) { + WRITE_DEMUX_REG(STB_INT_MASK, 0); + /*TODO irq */ + + vdec_free_irq(DEMUX_IRQ, (void *)tsdemux_irq_id); + } else { + + tsdemux_set_aid(0xffff); + tsdemux_set_vid(0xffff); + tsdemux_set_sid(0xffff); + tsdemux_set_pcrid(0xffff); + tsdemux_free_irq(); + + curr_vid_id = 0xffff; + curr_aud_id = 0xffff; + curr_sub_id = 0xffff; + curr_pcr_id = 0xffff; + curr_pcr_num = 0xffff; + } + + pts_stop(PTS_TYPE_VIDEO); + pts_stop(PTS_TYPE_AUDIO); + + WRITE_RESET_REG(RESET1_REGISTER, RESET_PARSER); +#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_VID_MAN_RD_PTR); + WRITE_PARSER_REG(PARSER_VIDEO_WP, 0); + WRITE_PARSER_REG(PARSER_VIDEO_RP, 0); +#endif + + if (enable_demux_driver()) + tsdemux_reset(); + + /* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */ + /*TODO clk */ + /* + *switch_mod_gate_by_type(MOD_DEMUX, 0); + */ + /* #endif */ + amports_switch_gate("demux", 0); + +} +EXPORT_SYMBOL(tsdemux_release); + +static int limited_delay_check(struct file *file, + struct stream_buf_s *vbuf, + struct stream_buf_s *abuf, + const char __user *buf, size_t count) +{ + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + struct stream_port_s *port = priv->port; + int write_size; + + if (!((port->flag & PORT_FLAG_VID) && + (port->flag & PORT_FLAG_AID))) { + struct stream_buf_s *buf = + (port->flag & PORT_FLAG_VID) ? vbuf : abuf; + + return min_t(int, count, stbuf_space(buf)); + } + + if (vbuf->max_buffer_delay_ms > 0 && abuf->max_buffer_delay_ms > 0 && + stbuf_level(vbuf) > 1024 && stbuf_level(abuf) > 256) { + int vdelay = + calculation_stream_delayed_ms(PTS_TYPE_VIDEO, + NULL, NULL); + int adelay = + calculation_stream_delayed_ms(PTS_TYPE_AUDIO, + NULL, NULL); + /*max wait 100ms,if timeout,try again top level. */ + int maxretry = 10; + /*too big delay,do wait now. */ + /*if noblock mode,don't do wait. */ + if (!(file->f_flags & O_NONBLOCK)) { + while (vdelay > vbuf->max_buffer_delay_ms + && adelay > abuf->max_buffer_delay_ms + && maxretry-- > 0) { + msleep(20); + vdelay = + calculation_stream_delayed_ms + (PTS_TYPE_VIDEO, NULL, NULL); + adelay = + calculation_stream_delayed_ms + (PTS_TYPE_AUDIO, NULL, NULL); + } + } + if (vdelay > vbuf->max_buffer_delay_ms + && adelay > abuf->max_buffer_delay_ms) + return 0; + } + write_size = min(stbuf_space(vbuf), stbuf_space(abuf)); + write_size = min_t(int, count, write_size); + return write_size; +} + +ssize_t drm_tswrite(struct file *file, + struct stream_buf_s *vbuf, + struct stream_buf_s *abuf, + const char __user *buf, size_t count) +{ + s32 r; + u32 realcount = count; + u32 havewritebytes = 0; + + struct drm_info tmpmm; + struct drm_info *drm = &tmpmm; + u32 res = 0; + int isphybuf = 0; + unsigned long realbuf; + + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + struct stream_port_s *port = priv->port; + size_t wait_size, write_size; + + if (buf == NULL || count == 0) + return -EINVAL; + + res = copy_from_user(drm, buf, sizeof(struct drm_info)); + if (res) { + pr_info("drm kmalloc failed res[%d]\n", res); + return -EFAULT; + } + + if (drm->drm_flag == TYPE_DRMINFO && drm->drm_level == DRM_LEVEL1) { + /* buf only has drminfo not have esdata; */ + if (drm->drm_pktsize <= MAX_DRM_PACKAGE_SIZE) + realcount = drm->drm_pktsize; + else { + pr_err("drm package size is error, size is %u\n", drm->drm_pktsize); + return -EINVAL; + } + realbuf = drm->drm_phy; + isphybuf = 1; + } else + realbuf = (unsigned long)buf; + /* pr_info("drm->drm_flag = 0x%x,realcount = %d , buf = 0x%x ",*/ + /*drm->drm_flag,realcount, buf); */ + + count = realcount; + + while (count > 0) { + if ((stbuf_space(vbuf) < count) || + (stbuf_space(abuf) < count)) { + if (file->f_flags & O_NONBLOCK) { + int v_stbuf_space = stbuf_space(vbuf); + int a_stbuf_space = stbuf_space(abuf); + + write_size = min(v_stbuf_space, a_stbuf_space); + /*have 188 bytes,write now., */ + if (write_size <= 188) + return -EAGAIN; + } else { + wait_size = + min(stbuf_canusesize(vbuf) / 8, + stbuf_canusesize(abuf) / 4); + if ((port->flag & PORT_FLAG_VID) + && (stbuf_space(vbuf) < wait_size)) { + r = stbuf_wait_space(vbuf, wait_size); + + if (r < 0) { + if (r != -EAGAIN) + pr_info + ("write no space--- "); + if (r != -EAGAIN) + pr_info + ("no space,%d--%d,r-%d\n", + stbuf_space(vbuf), + stbuf_space(abuf), r); + return r; + } + } + + if ((port->flag & PORT_FLAG_AID) + && (stbuf_space(abuf) < wait_size)) { + r = stbuf_wait_space(abuf, wait_size); + + if (r < 0) { + pr_info + ("write no stbuf_wait_space--"); + pr_info + ("no space,%d--%d,r-%d\n", + stbuf_space(vbuf), + stbuf_space(abuf), r); + return r; + } + } + } + } + + if ((port->flag & PORT_FLAG_VID) && + (port->flag & PORT_FLAG_AID)) { + write_size = min(stbuf_space(vbuf), stbuf_space(abuf)); + write_size = min(count, write_size); + } else { + struct stream_buf_s *buf = + (port->flag & PORT_FLAG_VID) ? vbuf : abuf; + + write_size = min_t(int, count, stbuf_space(buf)); + } + /* pr_info("write_size = %d,count = %d,\n",*/ + /*write_size, count); */ + if (write_size > 0) { + r = _tsdemux_write((const char __user *)realbuf + havewritebytes, + write_size, isphybuf); + if (r < 0) { + if (r != -EAGAIN) + pr_info + ("vspace %d--aspace %d,r-%d\n", + stbuf_space(vbuf), + stbuf_space(abuf), r); + return r; + } + } + else + return -EAGAIN; + + havewritebytes += r; + + /* pr_info("havewritebytes = %d, r = %d,\n",*/ + /*havewritebytes, r); */ + if (havewritebytes == realcount) + break; /* write ok; */ + else if (havewritebytes > realcount) + pr_info(" error ! write too much havewritebytes = %u, r = %u\n", + (u32)havewritebytes,(u32)realcount); + + count -= r; + } + return havewritebytes; +} + +ssize_t tsdemux_write(struct file *file, + struct stream_buf_s *vbuf, + struct stream_buf_s *abuf, + const char __user *buf, size_t count) +{ + s32 r; + struct port_priv_s *priv = (struct port_priv_s *)file->private_data; + struct stream_port_s *port = priv->port; + size_t wait_size, write_size; + + if ((stbuf_space(vbuf) < count) || (stbuf_space(abuf) < count)) { + if (file->f_flags & O_NONBLOCK) { + write_size = min(stbuf_space(vbuf), stbuf_space(abuf)); + if (write_size <= 188) /*have 188 bytes,write now., */ + return -EAGAIN; + } else { + wait_size = + min(stbuf_canusesize(vbuf) / 8, + stbuf_canusesize(abuf) / 4); + if ((port->flag & PORT_FLAG_VID) + && (stbuf_space(vbuf) < wait_size)) { + r = stbuf_wait_space(vbuf, wait_size); + + if (r < 0) { + /* pr_info("write no space--- "); + * pr_info("no space,%d--%d,r-%d\n", + * stbuf_space(vbuf), + * stbuf_space(abuf),r); + */ + return r; + } + } + + if ((port->flag & PORT_FLAG_AID) + && (stbuf_space(abuf) < wait_size)) { + r = stbuf_wait_space(abuf, wait_size); + + if (r < 0) { + /* pr_info("write no stbuf_wait_space")' + * pr_info{"---no space,%d--%d,r-%d\n", + * stbuf_space(vbuf), + * stbuf_space(abuf),r); + */ + return r; + } + } + } + } + vbuf->last_write_jiffies64 = jiffies_64; + abuf->last_write_jiffies64 = jiffies_64; + write_size = limited_delay_check(file, vbuf, abuf, buf, count); + if (write_size > 0) + return _tsdemux_write(buf, write_size, 0); + else + return -EAGAIN; +} + +int get_discontinue_counter(void) +{ + return discontinued_counter; +} +EXPORT_SYMBOL(get_discontinue_counter); + +static ssize_t discontinue_counter_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", discontinued_counter); +} + +static CLASS_ATTR_RO(discontinue_counter); + +static struct attribute *tsdemux_class_attrs[] = { + &class_attr_discontinue_counter.attr, + NULL +}; + +ATTRIBUTE_GROUPS(tsdemux_class); + +static struct class tsdemux_class = { + .name = "tsdemux", + .class_groups = tsdemux_class_groups, +}; + +int tsdemux_class_register(void) +{ + int r = class_register(&tsdemux_class); + + if (r < 0) + pr_info("register tsdemux class error!\n"); + discontinued_counter = 0; + return r; +} + +void tsdemux_class_unregister(void) +{ + class_unregister(&tsdemux_class); +} + +void tsdemux_change_avid(unsigned int vid, unsigned int aid) +{ + if (!enable_demux_driver()) { + WRITE_DEMUX_REG(FM_WR_DATA, + (((vid & 0x1fff) | (VIDEO_PACKET << 13)) << 16) + | ((aid & 0x1fff) | (AUDIO_PACKET << 13))); + WRITE_DEMUX_REG(FM_WR_ADDR, 0x8000); + while (READ_DEMUX_REG(FM_WR_ADDR) & 0x8000) + ; + } else { + if (curr_vid_id != vid) { + tsdemux_set_vid(vid); + curr_vid_id = vid; + } + if (curr_aud_id != aid) { + tsdemux_set_aid(aid); + curr_aud_id = aid; + } + reset_pcr_regs(); + } +} + +void tsdemux_change_sid(unsigned int sid) +{ + if (!enable_demux_driver()) { + WRITE_DEMUX_REG(FM_WR_DATA, + (((sid & 0x1fff) | (SUB_PACKET << 13)) << 16) + | 0xffff); + WRITE_DEMUX_REG(FM_WR_ADDR, 0x8001); + while (READ_DEMUX_REG(FM_WR_ADDR) & 0x8000) + ; + } else { + curr_sub_id = sid; + + tsdemux_set_sid(sid); + + reset_pcr_regs(); + } + +} + +void tsdemux_audio_reset(void) +{ + ulong flags; + unsigned long xflags = 0; + + spin_lock_irqsave(&demux_ops_lock, flags); + if (demux_ops && demux_ops->hw_dmx_lock) + xflags = demux_ops->hw_dmx_lock(xflags); + + WRITE_PARSER_REG(PARSER_AUDIO_WP, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_PARSER_REG(PARSER_AUDIO_RP, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + + WRITE_PARSER_REG(PARSER_AUDIO_START_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_START_PTR)); + WRITE_PARSER_REG(PARSER_AUDIO_END_PTR, + READ_AIU_REG(AIU_MEM_AIFIFO_END_PTR)); + CLEAR_PARSER_REG_MASK(PARSER_ES_CONTROL, ES_AUD_MAN_RD_PTR); + + WRITE_AIU_REG(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + CLEAR_AIU_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT); + + if (demux_ops && demux_ops->hw_dmx_unlock) + demux_ops->hw_dmx_unlock(xflags); + spin_unlock_irqrestore(&demux_ops_lock, flags); + if (reset_demux_enable == 1) + tsdemux_reset(); +} + +void tsdemux_sub_reset(void) +{ + ulong flags; + u32 parser_sub_start_ptr; + u32 parser_sub_end_ptr; + unsigned long xflags = 0; + + spin_lock_irqsave(&demux_ops_lock, flags); + if (demux_ops && demux_ops->hw_dmx_lock) + xflags = demux_ops->hw_dmx_lock(xflags); + + parser_sub_start_ptr = READ_PARSER_REG(PARSER_SUB_START_PTR); + parser_sub_end_ptr = READ_PARSER_REG(PARSER_SUB_END_PTR); + + WRITE_PARSER_REG(PARSER_SUB_START_PTR, parser_sub_start_ptr); + WRITE_PARSER_REG(PARSER_SUB_END_PTR, parser_sub_end_ptr); + WRITE_PARSER_REG(PARSER_SUB_RP, parser_sub_start_ptr); + WRITE_PARSER_REG(PARSER_SUB_WP, parser_sub_start_ptr); + SET_PARSER_REG_MASK(PARSER_ES_CONTROL, + (7 << ES_SUB_WR_ENDIAN_BIT) | ES_SUB_MAN_RD_PTR); + + if (demux_ops && demux_ops->hw_dmx_unlock) + demux_ops->hw_dmx_unlock(xflags); + spin_unlock_irqrestore(&demux_ops_lock, flags); +} + +void tsdemux_set_skipbyte(int skipbyte) +{ + if (!enable_demux_driver()) + demux_skipbyte = skipbyte; + else + tsdemux_set_skip_byte(skipbyte); + +} + +void tsdemux_set_demux(int dev) +{ + if (enable_demux_driver()) { + unsigned long flags; + int r = 0; + + spin_lock_irqsave(&demux_ops_lock, flags); + if (demux_ops && demux_ops->set_demux) + r = demux_ops->set_demux(dev); + spin_unlock_irqrestore(&demux_ops_lock, flags); + } +} + +u32 tsdemux_pcrscr_get(void) +{ + u32 pcr = 0; + + if (pcrscr_valid == 0) + return 0; + + if (READ_DEMUX_REG(TS_HIU_CTL_2) & 0x80) + pcr = READ_DEMUX_REG(PCR_DEMUX_2); + else if (READ_DEMUX_REG(TS_HIU_CTL_3) & 0x80) + pcr = READ_DEMUX_REG(PCR_DEMUX_3); + else + pcr = READ_DEMUX_REG(PCR_DEMUX); + if (first_pcr == 0) + first_pcr = pcr; + return pcr; +} + +u32 tsdemux_first_pcrscr_get(void) +{ + if (pcrscr_valid == 0) + return 0; + + if (first_pcr == 0) { + u32 pcr; + if (READ_DEMUX_REG(TS_HIU_CTL_2) & 0x80) + pcr = READ_DEMUX_REG(PCR_DEMUX_2); + else if (READ_DEMUX_REG(TS_HIU_CTL_3) & 0x80) + pcr = READ_DEMUX_REG(PCR_DEMUX_3); + else + pcr = READ_DEMUX_REG(PCR_DEMUX); + first_pcr = pcr; + /* pr_info("set first_pcr = 0x%x\n", pcr); */ + } + + return first_pcr; +} + +u8 tsdemux_pcrscr_valid(void) +{ + return pcrscr_valid; +} + +u8 tsdemux_pcraudio_valid(void) +{ + return pcraudio_valid; +} + +u8 tsdemux_pcrvideo_valid(void) +{ + return pcrvideo_valid; +} + +void tsdemux_pcr_set(unsigned int pcr) +{ + if (pcr_init_flag == 0) { + /*timestamp_pcrscr_set(pcr); + timestamp_pcrscr_enable(1);*/ + pcr_init_flag = 1; + } +} + +void tsdemux_tsync_func_init(void) +{ + register_tsync_callbackfunc( + TSYNC_PCRSCR_VALID, (void *)(tsdemux_pcrscr_valid)); + register_tsync_callbackfunc( + TSYNC_PCRSCR_GET, (void *)(tsdemux_pcrscr_get)); + register_tsync_callbackfunc( + TSYNC_FIRST_PCRSCR_GET, (void *)(tsdemux_first_pcrscr_get)); + register_tsync_callbackfunc( + TSYNC_PCRAUDIO_VALID, (void *)(tsdemux_pcraudio_valid)); + register_tsync_callbackfunc( + TSYNC_PCRVIDEO_VALID, (void *)(tsdemux_pcrvideo_valid)); + register_tsync_callbackfunc( + TSYNC_BUF_BY_BYTE, (void *)(get_buf_by_type)); + register_tsync_callbackfunc( + TSYNC_STBUF_LEVEL, (void *)(stbuf_level)); + register_tsync_callbackfunc( + TSYNC_STBUF_SPACE, (void *)(stbuf_space)); + register_tsync_callbackfunc( + TSYNC_STBUF_SIZE, (void *)(stbuf_size)); +} + +static int tsparser_stbuf_init(struct stream_buf_s *stbuf, + struct vdec_s *vdec) +{ + int ret = -1; + + ret = stbuf_init(stbuf, vdec); + if (ret) + goto out; + + ret = tsdemux_init(stbuf->pars.vid, + stbuf->pars.aid, + stbuf->pars.sid, + stbuf->pars.pcrid, + stbuf->is_hevc, + vdec); + if (ret) + goto out; + + tsync_pcr_start(); + + stbuf->flag |= BUF_FLAG_IN_USE; +out: + return ret; +} + +static void tsparser_stbuf_release(struct stream_buf_s *stbuf) +{ + tsync_pcr_stop(); + + tsdemux_release(); + + stbuf_release(stbuf); +} + +static struct stream_buf_ops tsparser_stbuf_ops = { + .init = tsparser_stbuf_init, + .release = tsparser_stbuf_release, + .get_wp = parser_get_wp, + .set_wp = parser_set_wp, + .get_rp = parser_get_rp, + .set_rp = parser_set_rp, +}; + +struct stream_buf_ops *get_tsparser_stbuf_ops(void) +{ + return &tsparser_stbuf_ops; +} +EXPORT_SYMBOL(get_tsparser_stbuf_ops); + +
diff --git a/drivers/stream_input/parser/tsdemux.h b/drivers/stream_input/parser/tsdemux.h new file mode 100644 index 0000000..5f82309 --- /dev/null +++ b/drivers/stream_input/parser/tsdemux.h
@@ -0,0 +1,104 @@ +/* + * drivers/amlogic/media/stream_input/parser/tsdemux.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef TSDEMUX_H +#define TSDEMUX_H +#include <linux/amlogic/media/utils/amports_config.h> + +/* TODO: move to register headers */ +#define NEW_PDTS_READY 4 +#define AUDIO_PTS_READY 2 +#define VIDEO_PTS_READY 0 +#define DIS_CONTINUITY_PACKET 6 +#define SUB_PES_READY 7 +#define PCR_READY 11 + +#define PARSER_INTSTAT_FETCH_CMD (1<<7) + +#define FETCH_ENDIAN 27 +#define FETCH_ENDIAN_MASK (0x7<<27) + +#define RESET_DEMUXSTB (1<<1) +#define RESET_PARSER (1<<8) + +#define VIDEO_PACKET 0 +#define AUDIO_PACKET 1 +#define SUB_PACKET 2 + +#define OTHER_ENDIAN 6 +#define BYPASS_ENDIAN 3 +#define SECTION_ENDIAN 0 + +#define USE_HI_BSF_INTERFACE 7 +#define DES_OUT_DLY 8 +#define TRANSPORT_SCRAMBLING_CONTROL_ODD 6 +#define TS_HIU_ENABLE 5 +#define FEC_FILE_CLK_DIV 0 +#define STB_DEMUX_ENABLE 4 +#define KEEP_DUPLICATE_PACKAGE 6 + +#define ES_VID_MAN_RD_PTR (1<<0) +#define ES_AUD_MAN_RD_PTR (1<<4) + +#define PS_CFG_PFIFO_EMPTY_CNT_BIT 16 +#define PS_CFG_MAX_ES_WR_CYCLE_BIT 12 +#define PS_CFG_MAX_FETCH_CYCLE_BIT 0 + +#define ES_SUB_WR_ENDIAN_BIT 9 +#define ES_SUB_MAN_RD_PTR (1<<8) +#define PARSER_INTSTAT_FETCH_CMD (1<<7) + +#define PARSER_INT_HOST_EN_BIT 8 + +struct stream_buf_s; +struct vdec_s; + +extern s32 tsdemux_init(u32 vid, u32 aid, u32 sid, u32 pcrid, bool is_hevc, + struct vdec_s *vdec); + +extern void tsdemux_release(void); +extern ssize_t drm_tswrite(struct file *file, + struct stream_buf_s *vbuf, + struct stream_buf_s *abuf, + const char __user *buf, size_t count); + +extern ssize_t tsdemux_write(struct file *file, + struct stream_buf_s *vbuf, + struct stream_buf_s *abuf, + const char __user *buf, size_t count); + +extern u32 tsdemux_pcrscr_get(void); +extern u8 tsdemux_pcrscr_valid(void); +extern u8 tsdemux_pcraudio_valid(void); +extern u8 tsdemux_pcrvideo_valid(void); +extern u32 tsdemux_first_pcrscr_get(void); +extern void timestamp_pcrscr_enable(u32 enable); +extern void timestamp_pcrscr_set(u32 pts); +int get_discontinue_counter(void); + +int tsdemux_class_register(void); +void tsdemux_class_unregister(void); +void tsdemux_change_avid(unsigned int vid, unsigned int aid); +void tsdemux_change_sid(unsigned int sid); +void tsdemux_audio_reset(void); +void tsdemux_sub_reset(void); +void tsdemux_set_skipbyte(int skipbyte); +void tsdemux_set_demux(int dev); +void tsdemux_tsync_func_init(void); + + +#endif /* TSDEMUX_H */
diff --git a/drivers/stream_input/subtitle/subtitle.c b/drivers/stream_input/subtitle/subtitle.c new file mode 100644 index 0000000..915ee38 --- /dev/null +++ b/drivers/stream_input/subtitle/subtitle.c
@@ -0,0 +1,716 @@ +/* + * drivers/amlogic/media/subtitle/subtitle.c + * + * Copyright (C) 2017 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/vmalloc.h> +#include <linux/major.h> +#include <linux/slab.h> +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/interrupt.h> +#include <linux/amlogic/media/utils/amstream.h> +#include <linux/uaccess.h> +#include <linux/compat.h> +//#include "amports_priv.h" + +//#include "amlog.h" +//MODULE_AMLOG(AMLOG_DEFAULT_LEVEL, 0, LOG_DEFAULT_LEVEL_DESC, + //LOG_DEFAULT_MASK_DESC); +#define DEVICE_NAME "amsubtitle" +/* Dev name as it appears in /proc/devices */ +#define DEVICE_CLASS_NAME "subtitle" + +static int subdevice_open; + +#define MAX_SUBTITLE_PACKET 10 +static DEFINE_MUTEX(amsubtitle_mutex); + +struct subtitle_data_s { + int subtitle_size; + int subtitle_pts; + char *data; +}; +static struct subtitle_data_s subtitle_data[MAX_SUBTITLE_PACKET]; +static int subtitle_enable = 1; +static int subtitle_total; +static int subtitle_width; +static int subtitle_height; +static int subtitle_type = -1; +static int subtitle_current; /* no subtitle */ +/* sub_index node will be modified by libplayer; amlogicplayer will use */ +/* it to detect wheather libplayer switch sub finished or not */ +static int subtitle_index; /* no subtitle */ +/* static int subtitle_size = 0; */ +/* static int subtitle_read_pos = 0; */ +static int subtitle_write_pos; +static int subtitle_start_pts; +static int subtitle_fps; +static int subtitle_subtype; +static int subtitle_reset; +/* static int *subltitle_address[MAX_SUBTITLE_PACKET]; */ + +enum subinfo_para_e { + SUB_NULL = -1, + SUB_ENABLE = 0, + SUB_TOTAL, + SUB_WIDTH, + SUB_HEIGHT, + SUB_TYPE, + SUB_CURRENT, + SUB_INDEX, + SUB_WRITE_POS, + SUB_START_PTS, + SUB_FPS, + SUB_SUBTYPE, + SUB_RESET, + SUB_DATA_T_SIZE, + SUB_DATA_T_DATA +}; + +struct subinfo_para_s { + enum subinfo_para_e subinfo_type; + int subtitle_info; + char *data; +}; + +/* total */ +/* curr */ +/* bimap */ +/* text */ +/* type */ +/* info */ +/* pts */ +/* duration */ +/* color pallete */ +/* width/height */ + +static ssize_t show_curr(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d: current\n", subtitle_current); +} + +static ssize_t store_curr(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int curr; + ssize_t r; + + r = kstrtoint(buf, 0, &curr); + if (r < 0) + return -EINVAL; + + + subtitle_current = curr; + + return size; +} + +static ssize_t show_index(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d: current\n", subtitle_index); +} + +static ssize_t store_index(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int curr; + ssize_t r; + + r = kstrtoint(buf, 0, &curr); + if (r < 0) + return -EINVAL; + + subtitle_index = curr; + + return size; +} + +static ssize_t show_reset(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d: current\n", subtitle_reset); +} + +static ssize_t store_reset(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int reset; + ssize_t r; + + r = kstrtoint(buf, 0, &reset); + + pr_info("reset is %d\n", reset); + if (r < 0) + return -EINVAL; + + + subtitle_reset = reset; + + return size; +} + +static ssize_t show_type(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d: type\n", subtitle_type); +} + +static ssize_t store_type(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int type; + ssize_t r; + + r = kstrtoint(buf, 0, &type); + if (r < 0) + return -EINVAL; + + subtitle_type = type; + + return size; +} + +static ssize_t show_width(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d: width\n", subtitle_width); +} + +static ssize_t store_width(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int width; + ssize_t r; + + r = kstrtoint(buf, 0, &width); + if (r < 0) + return -EINVAL; + + subtitle_width = width; + + return size; +} + +static ssize_t show_height(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d: height\n", subtitle_height); +} + +static ssize_t store_height(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int height; + ssize_t r; + + r = kstrtoint(buf, 0, &height); + if (r < 0) + return -EINVAL; + + subtitle_height = height; + + return size; +} + +static ssize_t show_total(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d: num\n", subtitle_total); +} + +static ssize_t store_total(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int total; + ssize_t r; + + r = kstrtoint(buf, 0, &total); + if (r < 0) + return -EINVAL; + pr_info("subtitle num is %d\n", total); + subtitle_total = total; + + return size; +} + +static ssize_t show_enable(struct class *class, struct class_attribute *attr, + char *buf) +{ + if (subtitle_enable) + return sprintf(buf, "1: enabled\n"); + + return sprintf(buf, "0: disabled\n"); +} + +static ssize_t store_enable(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int mode; + ssize_t r; + + r = kstrtoint(buf, 0, &mode); + if (r < 0) + return -EINVAL; + pr_info("subtitle enable is %d\n", mode); + subtitle_enable = mode ? 1 : 0; + + return size; +} + +static ssize_t show_size(struct class *class, struct class_attribute *attr, + char *buf) +{ + if (subtitle_enable) + return sprintf(buf, "1: size\n"); + + return sprintf(buf, "0: size\n"); +} + +static ssize_t store_size(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int ssize; + ssize_t r; + + r = kstrtoint(buf, 0, &ssize); + if (r < 0) + return -EINVAL; + pr_info("subtitle size is %d\n", ssize); + subtitle_data[subtitle_write_pos].subtitle_size = ssize; + + return size; +} + +static ssize_t show_startpts(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d: pts\n", subtitle_start_pts); +} + +static ssize_t store_startpts(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int spts; + ssize_t r; + + r = kstrtoint(buf, 0, &spts); + if (r < 0) + return -EINVAL; + pr_info("subtitle start pts is %x\n", spts); + subtitle_start_pts = spts; + + return size; +} + +static ssize_t show_data(struct class *class, struct class_attribute *attr, + char *buf) +{ +#if 0 + if (subtitle_data[subtitle_write_pos].data) + return sprintf(buf, "%lld\n", + (unsigned long)(subtitle_data[subtitle_write_pos].data)); +#endif + return sprintf(buf, "0: disabled\n"); +} + +static ssize_t store_data(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int address; + ssize_t r; + + r = kstrtoint(buf, 0, &address); + if (r == (ssize_t)0) + return -EINVAL; +#if 0 + if (subtitle_data[subtitle_write_pos].subtitle_size > 0) { + subtitle_data[subtitle_write_pos].data = vmalloc(( + subtitle_data[subtitle_write_pos].subtitle_size)); + if (subtitle_data[subtitle_write_pos].data) + memcpy(subtitle_data[subtitle_write_pos].data, + (unsigned long *)address, + subtitle_data[subtitle_write_pos].subtitle_size); + } + pr_info("subtitle data address is %x", + (unsigned int)(subtitle_data[subtitle_write_pos].data)); +#endif + subtitle_write_pos++; + if (subtitle_write_pos >= MAX_SUBTITLE_PACKET) + subtitle_write_pos = 0; + return 1; +} + +static ssize_t show_fps(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d: fps\n", subtitle_fps); +} + +static ssize_t store_fps(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int ssize; + ssize_t r; + + r = kstrtoint(buf, 0, &ssize); + if (r < 0) + return -EINVAL; + pr_info("subtitle fps is %d\n", ssize); + subtitle_fps = ssize; + + return size; +} + +static ssize_t show_subtype(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d: subtype\n", subtitle_subtype); +} + +static ssize_t store_subtype(struct class *class, struct class_attribute *attr, + const char *buf, size_t size) +{ + unsigned int ssize; + ssize_t r; + + r = kstrtoint(buf, 0, &ssize); + if (r < 0) + return -EINVAL; + pr_info("subtitle subtype is %d\n", ssize); + subtitle_subtype = ssize; + + return size; +} + +static struct class_attribute subtitle_class_attrs[] = { + __ATTR(enable, 0664, show_enable, store_enable), + __ATTR(total, 0664, show_total, store_total), + __ATTR(width, 0664, show_width, store_width), + __ATTR(height, 0664, show_height, store_height), + __ATTR(type, 0664, show_type, store_type), + __ATTR(curr, 0664, show_curr, store_curr), + __ATTR(index, 0664, show_index, store_index), + __ATTR(size, 0664, show_size, store_size), + __ATTR(data, 0664, show_data, store_data), + __ATTR(startpts, 0664, show_startpts, + store_startpts), + __ATTR(fps, 0664, show_fps, store_fps), + __ATTR(subtype, 0664, show_subtype, + store_subtype), + __ATTR(reset, 0644, show_reset, store_reset), + __ATTR_NULL +}; + + +/********************************************************* + * /dev/amvideo APIs + *********************************************************/ +static int amsubtitle_open(struct inode *inode, struct file *file) +{ + mutex_lock(&amsubtitle_mutex); + + if (subdevice_open) { + mutex_unlock(&amsubtitle_mutex); + return -EBUSY; + } + + subdevice_open = 1; + + try_module_get(THIS_MODULE); + + mutex_unlock(&amsubtitle_mutex); + + return 0; +} + +static int amsubtitle_release(struct inode *inode, struct file *file) +{ + mutex_lock(&amsubtitle_mutex); + + subdevice_open = 0; + + module_put(THIS_MODULE); + + mutex_unlock(&amsubtitle_mutex); + + return 0; +} + +static long amsubtitle_ioctl(struct file *file, unsigned int cmd, ulong arg) +{ + switch (cmd) { + case AMSTREAM_IOC_GET_SUBTITLE_INFO: { + struct subinfo_para_s Vstates; + struct subinfo_para_s *states = &Vstates; + + if (copy_from_user((void *)states, + (void *)arg, sizeof(Vstates))) + return -EFAULT; + switch (states->subinfo_type) { + case SUB_ENABLE: + states->subtitle_info = subtitle_enable; + break; + case SUB_TOTAL: + states->subtitle_info = subtitle_total; + break; + case SUB_WIDTH: + states->subtitle_info = subtitle_width; + break; + case SUB_HEIGHT: + states->subtitle_info = subtitle_height; + break; + case SUB_TYPE: + states->subtitle_info = subtitle_type; + break; + case SUB_CURRENT: + states->subtitle_info = subtitle_current; + break; + case SUB_INDEX: + states->subtitle_info = subtitle_index; + break; + case SUB_WRITE_POS: + states->subtitle_info = subtitle_write_pos; + break; + case SUB_START_PTS: + states->subtitle_info = subtitle_start_pts; + break; + case SUB_FPS: + states->subtitle_info = subtitle_fps; + break; + case SUB_SUBTYPE: + states->subtitle_info = subtitle_subtype; + break; + case SUB_RESET: + states->subtitle_info = subtitle_reset; + break; + case SUB_DATA_T_SIZE: + states->subtitle_info = + subtitle_data[subtitle_write_pos].subtitle_size; + break; + case SUB_DATA_T_DATA: { + if (states->subtitle_info > 0) + states->subtitle_info = + (long)subtitle_data[subtitle_write_pos].data; + } + break; + default: + break; + } + if (copy_to_user((void *)arg, (void *)states, sizeof(Vstates))) + return -EFAULT; + } + + break; + case AMSTREAM_IOC_SET_SUBTITLE_INFO: { + struct subinfo_para_s Vstates; + struct subinfo_para_s *states = &Vstates; + + if (copy_from_user((void *)states, + (void *)arg, sizeof(Vstates))) + return -EFAULT; + switch (states->subinfo_type) { + case SUB_ENABLE: + subtitle_enable = states->subtitle_info; + break; + case SUB_TOTAL: + subtitle_total = states->subtitle_info; + break; + case SUB_WIDTH: + subtitle_width = states->subtitle_info; + break; + case SUB_HEIGHT: + subtitle_height = states->subtitle_info; + break; + case SUB_TYPE: + subtitle_type = states->subtitle_info; + break; + case SUB_CURRENT: + subtitle_current = states->subtitle_info; + break; + case SUB_INDEX: + subtitle_index = states->subtitle_info; + break; + case SUB_WRITE_POS: + subtitle_write_pos = states->subtitle_info; + break; + case SUB_START_PTS: + subtitle_start_pts = states->subtitle_info; + break; + case SUB_FPS: + subtitle_fps = states->subtitle_info; + break; + case SUB_SUBTYPE: + subtitle_subtype = states->subtitle_info; + break; + case SUB_RESET: + subtitle_reset = states->subtitle_info; + break; + case SUB_DATA_T_SIZE: + subtitle_data[subtitle_write_pos].subtitle_size = + states->subtitle_info; + break; + case SUB_DATA_T_DATA: { + if (states->subtitle_info > 0) { + subtitle_data[subtitle_write_pos].data = + vmalloc((states->subtitle_info)); + if (subtitle_data[subtitle_write_pos].data) + memcpy( + subtitle_data[subtitle_write_pos].data, + (char *)states->data, + states->subtitle_info); + } + + subtitle_write_pos++; + if (subtitle_write_pos >= MAX_SUBTITLE_PACKET) + subtitle_write_pos = 0; + } + break; + default: + break; + } + + } + + break; + default: + break; + } + + return 0; +} + +#ifdef CONFIG_COMPAT +static long amsub_compat_ioctl(struct file *file, unsigned int cmd, ulong arg) +{ + long ret = 0; + + ret = amsubtitle_ioctl(file, cmd, (ulong)compat_ptr(arg)); + return ret; +} +#endif + +static const struct file_operations amsubtitle_fops = { + .owner = THIS_MODULE, + .open = amsubtitle_open, + .release = amsubtitle_release, + .unlocked_ioctl = amsubtitle_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = amsub_compat_ioctl, +#endif +}; + +static struct device *amsubtitle_dev; +static dev_t amsub_devno; +static struct class *amsub_clsp; +static struct cdev *amsub_cdevp; +#define AMSUBTITLE_DEVICE_COUNT 1 + +static void create_amsub_attrs(struct class *class) +{ + int i = 0; + + for (i = 0; subtitle_class_attrs[i].attr.name; i++) { + if (class_create_file(class, &subtitle_class_attrs[i]) < 0) + break; + } +} + +static void remove_amsub_attrs(struct class *class) +{ + int i = 0; + + for (i = 0; subtitle_class_attrs[i].attr.name; i++) + class_remove_file(class, &subtitle_class_attrs[i]); +} + +int subtitle_init(void) +{ + int ret = 0; + + ret = alloc_chrdev_region(&amsub_devno, 0, AMSUBTITLE_DEVICE_COUNT, + DEVICE_NAME); + if (ret < 0) { + pr_info("amsub: failed to alloc major number\n"); + ret = -ENODEV; + return ret; + } + + amsub_clsp = class_create(THIS_MODULE, DEVICE_CLASS_NAME); + if (IS_ERR(amsub_clsp)) { + ret = PTR_ERR(amsub_clsp); + goto err1; + } + + create_amsub_attrs(amsub_clsp); + + amsub_cdevp = kmalloc(sizeof(struct cdev), GFP_KERNEL); + if (!amsub_cdevp) { + /*pr_info("amsub: failed to allocate memory\n");*/ + ret = -ENOMEM; + goto err2; + } + + cdev_init(amsub_cdevp, &amsubtitle_fops); + amsub_cdevp->owner = THIS_MODULE; + /* connect the major/minor number to cdev */ + ret = cdev_add(amsub_cdevp, amsub_devno, AMSUBTITLE_DEVICE_COUNT); + if (ret) { + pr_info("amsub:failed to add cdev\n"); + goto err3; + } + + amsubtitle_dev = device_create(amsub_clsp, + NULL, MKDEV(MAJOR(amsub_devno), 0), + NULL, DEVICE_NAME); + + if (IS_ERR(amsubtitle_dev)) { + pr_err("## Can't create amsubtitle device\n"); + goto err4; + } + + return 0; + +err4: + cdev_del(amsub_cdevp); +err3: + kfree(amsub_cdevp); +err2: + remove_amsub_attrs(amsub_clsp); + class_destroy(amsub_clsp); +err1: + unregister_chrdev_region(amsub_devno, 1); + + return ret; +} +EXPORT_SYMBOL(subtitle_init); + +void subtitle_exit(void) +{ + unregister_chrdev_region(amsub_devno, 1); + device_destroy(amsub_clsp, MKDEV(MAJOR(amsub_devno), 0)); + cdev_del(amsub_cdevp); + kfree(amsub_cdevp); + remove_amsub_attrs(amsub_clsp); + class_destroy(amsub_clsp); +} +EXPORT_SYMBOL(subtitle_exit); +
diff --git a/drivers/stream_input/subtitle/subtitle.h b/drivers/stream_input/subtitle/subtitle.h new file mode 100644 index 0000000..375a31a --- /dev/null +++ b/drivers/stream_input/subtitle/subtitle.h
@@ -0,0 +1,24 @@ +/* + * drivers/amlogic/media/stream_input/amports/amports_priv.h + * + * Copyright (C) 2016 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef SUBTITLE_H +#define SUBTITLE_H + +int subtitle_init(void); +void subtitle_exit(void); + +#endif
diff --git a/firmware/checkmsg b/firmware/checkmsg new file mode 100755 index 0000000..8ba42db --- /dev/null +++ b/firmware/checkmsg Binary files differ
diff --git a/firmware/h264_enc.bin b/firmware/h264_enc.bin new file mode 100644 index 0000000..d8bc73d --- /dev/null +++ b/firmware/h264_enc.bin Binary files differ
diff --git a/firmware/video_ucode.bin b/firmware/video_ucode.bin new file mode 100755 index 0000000..46d2ca3 --- /dev/null +++ b/firmware/video_ucode.bin Binary files differ
diff --git a/submit_tool.sh b/submit_tool.sh new file mode 100755 index 0000000..60e36e5 --- /dev/null +++ b/submit_tool.sh
@@ -0,0 +1,31 @@ +#!/bin/sh + +ChangeID=$(git log -1 | grep "Change-Id:" | sed 's/^[ \t]*//g') +Signed=$(git log -1 | grep "Signed-off-by:" | sed 's/^[ \t]*//g') + +git log -1 | sed '/Change-Id:/,$d' | sed '/Signed-off-by/,$d' | sed '/SourceCode:/,$d' | sed '/media_module version:/,$d' | sed -n '/^$/,$p' | sed '/^$/d' | sed 's/^[ \t]*//g' > tmp.txt + +#echo "media_module version:" >> tmp.txt +#echo $(./version_control.sh) | awk '{print $1}' | awk -F [=] '{print $2}' >> tmp.txt + +if [ "0$(git log --stat -1 | grep "video_ucode.bin")" != "0" ]; then +echo "SourceCode: +ucode:" >> tmp.txt + ./firmware/checkmsg ./firmware/video_ucode.bin | grep ver | awk '{print $3}' | sed 's/v//g' > version.txt + ./firmware/checkmsg ./firmware/video_ucode.bin | grep -A 5 "change id history:" >> version.txt + chmod 644 version.txt + ./firmware/checkmsg ./firmware/video_ucode.bin | grep ver | awk '{print $3}' | sed 's/v//g' >> tmp.txt + cat version.txt | sed -n '3,7p' >> tmp.txt + + git add version.txt +fi + +echo "" >> tmp.txt +echo ${ChangeID} >> tmp.txt +echo ${Signed} >> tmp.txt + + +git status +cat tmp.txt | xargs -0 git commit --amend -m + +rm -f tmp.txt
diff --git a/version.txt b/version.txt new file mode 100644 index 0000000..a994c18 --- /dev/null +++ b/version.txt
@@ -0,0 +1,6 @@ +0.3.41-g645c87e +I9b5e8 +If13e3 +I234d5 +I3f003 +Ibb6f2
diff --git a/version_control.sh b/version_control.sh new file mode 100755 index 0000000..9069a58 --- /dev/null +++ b/version_control.sh
@@ -0,0 +1,21 @@ +#!/bin/sh + +MEDIA_MODULE_PATH=$(cd "$(dirname "$0")";pwd) +Major_V=$(cd ${MEDIA_MODULE_PATH}/; grep "Major_V" VERSION | awk -F [=] '{print $2}') +Minor_V=$(cd ${MEDIA_MODULE_PATH}/; grep "Minor_V" VERSION | awk -F [=] '{print $2}') +BASE_CHANGEID=$(cd ${MEDIA_MODULE_PATH}/; grep "^BaseChangeId" VERSION | awk -F [=] '{print $2}' | cut -c1-6) +#MEDIAMODULE_CHANGEID=$(cd ${MEDIA_MODULE_PATH}; git log -1 ${MEDIA_MODULE_PATH} | grep "Change-Id: " | awk '{ print $2}' | cut -c1-6 | tail -1) +COMMIT_COUNT=$(cd ${MEDIA_MODULE_PATH}/; git log | grep "Change-Id: " | grep -n ${BASE_CHANGEID} | awk -F ":" '{printf "%d", $1-1}' ) +MEDIAMODULE_COMMITID=$(cd ${MEDIA_MODULE_PATH}/; git rev-parse --short HEAD) +UCODE_VERSION_DETAIL=$(cd ${MEDIA_MODULE_PATH}/; ./firmware/checkmsg ./firmware/video_ucode.bin | grep "ver :" | awk '{print $3}' | sed 's/v//g') +UCODE_VERSION=$(cd ${MEDIA_MODULE_PATH}/; ./firmware/checkmsg ./firmware/video_ucode.bin | grep "ver :" | awk -F '[v-]' '{print $3}' | awk -F [\.] '{printf "%d%02d%03d", $1,$2,$3}') +RELEASED_VERSION=$(cd ${MEDIA_MODULE_PATH}/; grep "^#V" VERSION | head -1 | awk '{print $1}' | sed 's/#//g') + +if [ "x${COMMIT_COUNT}" != "x" ]; then +VERSION_CONTROL_CFLAGS="-DDECODER_VERSION=${Major_V}.${Minor_V}.${COMMIT_COUNT}-g${MEDIAMODULE_COMMITID}.${UCODE_VERSION}" +else +VERSION_CONTROL_CFLAGS="${VERSION_CONTROL_CFLAGS} -DRELEASED_VERSION=${RELEASED_VERSION}" +fi +VERSION_CONTROL_CFLAGS="${VERSION_CONTROL_CFLAGS} -DUCODE_VERSION=${UCODE_VERSION_DETAIL}" + +echo ${VERSION_CONTROL_CFLAGS}